#include "dm.h"
#include "dm-bio-list.h"
+#include "dm-uevent.h"
#include <linux/init.h>
#include <linux/module.h>
*/
atomic_t event_nr;
wait_queue_head_t eventq;
+ atomic_t uevent_seq;
+ struct list_head uevent_list;
+ spinlock_t uevent_lock; /* Protect access to uevent_list */
/*
* freeze/thaw support require holding onto a super block
return -ENOMEM;
}
+ r = dm_uevent_init();
+ if (r) {
+ kmem_cache_destroy(_tio_cache);
+ kmem_cache_destroy(_io_cache);
+ return r;
+ }
+
_major = major;
r = register_blkdev(_major, _name);
if (r < 0) {
kmem_cache_destroy(_tio_cache);
kmem_cache_destroy(_io_cache);
+ dm_uevent_exit();
return r;
}
kmem_cache_destroy(_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
+ dm_uevent_exit();
_major = 0;
return clone;
}
-static void __clone_and_map(struct clone_info *ci)
+static int __clone_and_map(struct clone_info *ci)
{
struct bio *clone, *bio = ci->bio;
- struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
- sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
+ struct dm_target *ti;
+ sector_t len = 0, max;
struct dm_target_io *tio;
+ ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
+
+ max = max_io_len(ci->md, ci->sector, ti);
+
/*
* Allocate a target io object.
*/
do {
if (offset) {
ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
+
max = max_io_len(ci->md, ci->sector, ti);
tio = alloc_tio(ci->md);
ci->idx++;
}
+
+ return 0;
}
/*
static int __split_bio(struct mapped_device *md, struct bio *bio)
{
struct clone_info ci;
+ int error = 0;
ci.map = dm_get_table(md);
if (unlikely(!ci.map))
ci.idx = bio->bi_idx;
start_io_acct(ci.io);
- while (ci.sector_count)
- __clone_and_map(&ci);
+ while (ci.sector_count && !error)
+ error = __clone_and_map(&ci);
/* drop the extra reference count */
- dec_pending(ci.io, 0);
+ dec_pending(ci.io, error);
dm_table_put(ci.map);
return 0;
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
atomic_set(&md->event_nr, 0);
+ atomic_set(&md->uevent_seq, 0);
+ INIT_LIST_HEAD(&md->uevent_list);
+ spin_lock_init(&md->uevent_lock);
md->queue = blk_alloc_queue(GFP_KERNEL);
if (!md->queue)
*/
static void event_callback(void *context)
{
+ unsigned long flags;
+ LIST_HEAD(uevents);
struct mapped_device *md = (struct mapped_device *) context;
+ spin_lock_irqsave(&md->uevent_lock, flags);
+ list_splice_init(&md->uevent_list, &uevents);
+ spin_unlock_irqrestore(&md->uevent_lock, flags);
+
+ dm_send_uevents(&uevents, &md->disk->dev.kobj);
+
atomic_inc(&md->event_nr);
wake_up(&md->eventq);
}
dm_table_unplug_all(map);
- kobject_uevent(&md->disk->kobj, KOBJ_CHANGE);
+ dm_kobject_uevent(md);
r = 0;
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
+void dm_kobject_uevent(struct mapped_device *md)
+{
+ kobject_uevent(&md->disk->dev.kobj, KOBJ_CHANGE);
+}
+
+uint32_t dm_next_uevent_seq(struct mapped_device *md)
+{
+ return atomic_add_return(1, &md->uevent_seq);
+}
+
uint32_t dm_get_event_nr(struct mapped_device *md)
{
return atomic_read(&md->event_nr);
(event_nr != atomic_read(&md->event_nr)));
}
+void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&md->uevent_lock, flags);
+ list_add(elist, &md->uevent_list);
+ spin_unlock_irqrestore(&md->uevent_lock, flags);
+}
+
/*
* The gendisk is only valid as long as you have a reference
* count on 'md'.