2 * Copyright (C) 2003 Sistina Software Limited.
4 * This file is released under the GPL.
8 #include "dm-bio-list.h"
13 #include <linux/ctype.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/time.h>
20 #include <linux/vmalloc.h>
21 #include <linux/workqueue.h>
23 #define DM_MSG_PREFIX "raid1"
25 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
27 /*-----------------------------------------------------------------
30 * The mirror splits itself up into discrete regions. Each
31 * region can be in one of three states: clean, dirty,
32 * nosync. There is no need to put clean regions in the hash.
34 * In addition to being present in the hash table a region _may_
35 * be present on one of three lists.
37 * clean_regions: Regions on this list have no io pending to
38 * them, they are in sync, we are no longer interested in them,
39 * they are dull. rh_update_states() will remove them from the
42 * quiesced_regions: These regions have been spun down, ready
43 * for recovery. rh_recovery_start() will remove regions from
44 * this list and hand them to kmirrord, which will schedule the
45 * recovery io with kcopyd.
47 * recovered_regions: Regions that kcopyd has successfully
48 * recovered. rh_update_states() will now schedule any delayed
49 * io, up the recovery_count, and remove the region from the
53 * A rw spin lock 'hash_lock' protects just the hash table,
54 * this is never held in write mode from interrupt context,
55 * which I believe means that we only have to disable irqs when
58 * An ordinary spin lock 'region_lock' that protects the three
59 * lists in the region_hash, with the 'state', 'list' and
60 * 'bhs_delayed' fields of the regions. This is used from irq
61 * context, so all other uses will have to suspend local irqs.
62 *---------------------------------------------------------------*/
65 struct mirror_set *ms;
67 unsigned region_shift;
69 /* holds persistent region state */
70 struct dirty_log *log;
74 mempool_t *region_pool;
76 unsigned int nr_buckets;
77 struct list_head *buckets;
79 spinlock_t region_lock;
80 atomic_t recovery_in_flight;
81 struct semaphore recovery_count;
82 struct list_head clean_regions;
83 struct list_head quiesced_regions;
84 struct list_head recovered_regions;
95 struct region_hash *rh; /* FIXME: can we get rid of this ? */
99 struct list_head hash_list;
100 struct list_head list;
103 struct bio_list delayed_bios;
107 /*-----------------------------------------------------------------
108 * Mirror set structures.
109 *---------------------------------------------------------------*/
111 atomic_t error_count;
117 struct dm_target *ti;
118 struct list_head list;
119 struct region_hash rh;
120 struct kcopyd_client *kcopyd_client;
122 spinlock_t lock; /* protects the next two lists */
123 struct bio_list reads;
124 struct bio_list writes;
130 struct mirror *default_mirror; /* Default mirror */
132 struct workqueue_struct *kmirrord_wq;
133 struct work_struct kmirrord_work;
135 unsigned int nr_mirrors;
136 struct mirror mirror[0];
142 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
144 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
147 static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
149 return region << rh->region_shift;
152 static void wake(struct mirror_set *ms)
154 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
157 /* FIXME move this */
158 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
160 #define MIN_REGIONS 64
161 #define MAX_RECOVERY 1
162 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
163 struct dirty_log *log, uint32_t region_size,
166 unsigned int nr_buckets, max_buckets;
170 * Calculate a suitable number of buckets for our hash
173 max_buckets = nr_regions >> 6;
174 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
180 rh->region_size = region_size;
181 rh->region_shift = ffs(region_size) - 1;
182 rwlock_init(&rh->hash_lock);
183 rh->mask = nr_buckets - 1;
184 rh->nr_buckets = nr_buckets;
186 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
188 DMERR("unable to allocate region hash memory");
192 for (i = 0; i < nr_buckets; i++)
193 INIT_LIST_HEAD(rh->buckets + i);
195 spin_lock_init(&rh->region_lock);
196 sema_init(&rh->recovery_count, 0);
197 atomic_set(&rh->recovery_in_flight, 0);
198 INIT_LIST_HEAD(&rh->clean_regions);
199 INIT_LIST_HEAD(&rh->quiesced_regions);
200 INIT_LIST_HEAD(&rh->recovered_regions);
202 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
203 sizeof(struct region));
204 if (!rh->region_pool) {
213 static void rh_exit(struct region_hash *rh)
216 struct region *reg, *nreg;
218 BUG_ON(!list_empty(&rh->quiesced_regions));
219 for (h = 0; h < rh->nr_buckets; h++) {
220 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
221 BUG_ON(atomic_read(®->pending));
222 mempool_free(reg, rh->region_pool);
227 dm_destroy_dirty_log(rh->log);
229 mempool_destroy(rh->region_pool);
233 #define RH_HASH_MULT 2654435387U
235 static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
237 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
240 static struct region *__rh_lookup(struct region_hash *rh, region_t region)
244 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
245 if (reg->key == region)
251 static void __rh_insert(struct region_hash *rh, struct region *reg)
253 unsigned int h = rh_hash(rh, reg->key);
254 list_add(®->hash_list, rh->buckets + h);
257 static struct region *__rh_alloc(struct region_hash *rh, region_t region)
259 struct region *reg, *nreg;
261 read_unlock(&rh->hash_lock);
262 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
264 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
265 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
266 RH_CLEAN : RH_NOSYNC;
270 INIT_LIST_HEAD(&nreg->list);
272 atomic_set(&nreg->pending, 0);
273 bio_list_init(&nreg->delayed_bios);
274 write_lock_irq(&rh->hash_lock);
276 reg = __rh_lookup(rh, region);
278 /* we lost the race */
279 mempool_free(nreg, rh->region_pool);
282 __rh_insert(rh, nreg);
283 if (nreg->state == RH_CLEAN) {
284 spin_lock(&rh->region_lock);
285 list_add(&nreg->list, &rh->clean_regions);
286 spin_unlock(&rh->region_lock);
290 write_unlock_irq(&rh->hash_lock);
291 read_lock(&rh->hash_lock);
296 static inline struct region *__rh_find(struct region_hash *rh, region_t region)
300 reg = __rh_lookup(rh, region);
302 reg = __rh_alloc(rh, region);
307 static int rh_state(struct region_hash *rh, region_t region, int may_block)
312 read_lock(&rh->hash_lock);
313 reg = __rh_lookup(rh, region);
314 read_unlock(&rh->hash_lock);
320 * The region wasn't in the hash, so we fall back to the
323 r = rh->log->type->in_sync(rh->log, region, may_block);
326 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
327 * taken as a RH_NOSYNC
329 return r == 1 ? RH_CLEAN : RH_NOSYNC;
332 static inline int rh_in_sync(struct region_hash *rh,
333 region_t region, int may_block)
335 int state = rh_state(rh, region, may_block);
336 return state == RH_CLEAN || state == RH_DIRTY;
339 static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
343 while ((bio = bio_list_pop(bio_list))) {
344 queue_bio(ms, bio, WRITE);
348 static void complete_resync_work(struct region *reg, int success)
350 struct region_hash *rh = reg->rh;
352 rh->log->type->set_region_sync(rh->log, reg->key, success);
353 dispatch_bios(rh->ms, ®->delayed_bios);
354 if (atomic_dec_and_test(&rh->recovery_in_flight))
355 wake_up_all(&_kmirrord_recovery_stopped);
356 up(&rh->recovery_count);
359 static void rh_update_states(struct region_hash *rh)
361 struct region *reg, *next;
364 LIST_HEAD(recovered);
367 * Quickly grab the lists.
369 write_lock_irq(&rh->hash_lock);
370 spin_lock(&rh->region_lock);
371 if (!list_empty(&rh->clean_regions)) {
372 list_splice(&rh->clean_regions, &clean);
373 INIT_LIST_HEAD(&rh->clean_regions);
375 list_for_each_entry (reg, &clean, list) {
376 rh->log->type->clear_region(rh->log, reg->key);
377 list_del(®->hash_list);
381 if (!list_empty(&rh->recovered_regions)) {
382 list_splice(&rh->recovered_regions, &recovered);
383 INIT_LIST_HEAD(&rh->recovered_regions);
385 list_for_each_entry (reg, &recovered, list)
386 list_del(®->hash_list);
388 spin_unlock(&rh->region_lock);
389 write_unlock_irq(&rh->hash_lock);
392 * All the regions on the recovered and clean lists have
393 * now been pulled out of the system, so no need to do
396 list_for_each_entry_safe (reg, next, &recovered, list) {
397 rh->log->type->clear_region(rh->log, reg->key);
398 complete_resync_work(reg, 1);
399 mempool_free(reg, rh->region_pool);
402 if (!list_empty(&recovered))
403 rh->log->type->flush(rh->log);
405 list_for_each_entry_safe (reg, next, &clean, list)
406 mempool_free(reg, rh->region_pool);
409 static void rh_inc(struct region_hash *rh, region_t region)
413 read_lock(&rh->hash_lock);
414 reg = __rh_find(rh, region);
416 spin_lock_irq(&rh->region_lock);
417 atomic_inc(®->pending);
419 if (reg->state == RH_CLEAN) {
420 reg->state = RH_DIRTY;
421 list_del_init(®->list); /* take off the clean list */
422 spin_unlock_irq(&rh->region_lock);
424 rh->log->type->mark_region(rh->log, reg->key);
426 spin_unlock_irq(&rh->region_lock);
429 read_unlock(&rh->hash_lock);
432 static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
436 for (bio = bios->head; bio; bio = bio->bi_next)
437 rh_inc(rh, bio_to_region(rh, bio));
440 static void rh_dec(struct region_hash *rh, region_t region)
446 read_lock(&rh->hash_lock);
447 reg = __rh_lookup(rh, region);
448 read_unlock(&rh->hash_lock);
450 spin_lock_irqsave(&rh->region_lock, flags);
451 if (atomic_dec_and_test(®->pending)) {
453 * There is no pending I/O for this region.
454 * We can move the region to corresponding list for next action.
455 * At this point, the region is not yet connected to any list.
457 * If the state is RH_NOSYNC, the region should be kept off
459 * The hash entry for RH_NOSYNC will remain in memory
460 * until the region is recovered or the map is reloaded.
463 /* do nothing for RH_NOSYNC */
464 if (reg->state == RH_RECOVERING) {
465 list_add_tail(®->list, &rh->quiesced_regions);
466 } else if (reg->state == RH_DIRTY) {
467 reg->state = RH_CLEAN;
468 list_add(®->list, &rh->clean_regions);
472 spin_unlock_irqrestore(&rh->region_lock, flags);
479 * Starts quiescing a region in preparation for recovery.
481 static int __rh_recovery_prepare(struct region_hash *rh)
488 * Ask the dirty log what's next.
490 r = rh->log->type->get_resync_work(rh->log, ®ion);
495 * Get this region, and start it quiescing by setting the
498 read_lock(&rh->hash_lock);
499 reg = __rh_find(rh, region);
500 read_unlock(&rh->hash_lock);
502 spin_lock_irq(&rh->region_lock);
503 reg->state = RH_RECOVERING;
505 /* Already quiesced ? */
506 if (atomic_read(®->pending))
507 list_del_init(®->list);
509 list_move(®->list, &rh->quiesced_regions);
511 spin_unlock_irq(&rh->region_lock);
516 static void rh_recovery_prepare(struct region_hash *rh)
518 /* Extra reference to avoid race with rh_stop_recovery */
519 atomic_inc(&rh->recovery_in_flight);
521 while (!down_trylock(&rh->recovery_count)) {
522 atomic_inc(&rh->recovery_in_flight);
523 if (__rh_recovery_prepare(rh) <= 0) {
524 atomic_dec(&rh->recovery_in_flight);
525 up(&rh->recovery_count);
530 /* Drop the extra reference */
531 if (atomic_dec_and_test(&rh->recovery_in_flight))
532 wake_up_all(&_kmirrord_recovery_stopped);
536 * Returns any quiesced regions.
538 static struct region *rh_recovery_start(struct region_hash *rh)
540 struct region *reg = NULL;
542 spin_lock_irq(&rh->region_lock);
543 if (!list_empty(&rh->quiesced_regions)) {
544 reg = list_entry(rh->quiesced_regions.next,
545 struct region, list);
546 list_del_init(®->list); /* remove from the quiesced list */
548 spin_unlock_irq(&rh->region_lock);
553 /* FIXME: success ignored for now */
554 static void rh_recovery_end(struct region *reg, int success)
556 struct region_hash *rh = reg->rh;
558 spin_lock_irq(&rh->region_lock);
559 list_add(®->list, ®->rh->recovered_regions);
560 spin_unlock_irq(&rh->region_lock);
565 static void rh_flush(struct region_hash *rh)
567 rh->log->type->flush(rh->log);
570 static void rh_delay(struct region_hash *rh, struct bio *bio)
574 read_lock(&rh->hash_lock);
575 reg = __rh_find(rh, bio_to_region(rh, bio));
576 bio_list_add(®->delayed_bios, bio);
577 read_unlock(&rh->hash_lock);
580 static void rh_stop_recovery(struct region_hash *rh)
584 /* wait for any recovering regions */
585 for (i = 0; i < MAX_RECOVERY; i++)
586 down(&rh->recovery_count);
589 static void rh_start_recovery(struct region_hash *rh)
593 for (i = 0; i < MAX_RECOVERY; i++)
594 up(&rh->recovery_count);
600 * Every mirror should look like this one.
602 #define DEFAULT_MIRROR 0
605 * This is yucky. We squirrel the mirror_set struct away inside
606 * bi_next for write buffers. This is safe since the bh
607 * doesn't get submitted to the lower levels of block layer.
609 static struct mirror_set *bio_get_ms(struct bio *bio)
611 return (struct mirror_set *) bio->bi_next;
614 static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
616 bio->bi_next = (struct bio *) ms;
619 /*-----------------------------------------------------------------
622 * When a mirror is first activated we may find that some regions
623 * are in the no-sync state. We have to recover these by
624 * recopying from the default mirror to all the others.
625 *---------------------------------------------------------------*/
626 static void recovery_complete(int read_err, unsigned int write_err,
629 struct region *reg = (struct region *) context;
631 /* FIXME: better error handling */
632 rh_recovery_end(reg, !(read_err || write_err));
635 static int recover(struct mirror_set *ms, struct region *reg)
639 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
641 unsigned long flags = 0;
643 /* fill in the source */
644 m = ms->default_mirror;
645 from.bdev = m->dev->bdev;
646 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
647 if (reg->key == (ms->nr_regions - 1)) {
649 * The final region may be smaller than
652 from.count = ms->ti->len & (reg->rh->region_size - 1);
654 from.count = reg->rh->region_size;
656 from.count = reg->rh->region_size;
658 /* fill in the destinations */
659 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
660 if (&ms->mirror[i] == ms->default_mirror)
664 dest->bdev = m->dev->bdev;
665 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
666 dest->count = from.count;
671 set_bit(KCOPYD_IGNORE_ERROR, &flags);
672 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
673 recovery_complete, reg);
678 static void do_recovery(struct mirror_set *ms)
682 struct dirty_log *log = ms->rh.log;
685 * Start quiescing some regions.
687 rh_recovery_prepare(&ms->rh);
690 * Copy any already quiesced regions.
692 while ((reg = rh_recovery_start(&ms->rh))) {
693 r = recover(ms, reg);
695 rh_recovery_end(reg, 0);
699 * Update the in sync flag.
702 (log->type->get_sync_count(log) == ms->nr_regions)) {
703 /* the sync is complete */
704 dm_table_event(ms->ti->table);
709 /*-----------------------------------------------------------------
711 *---------------------------------------------------------------*/
712 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
714 /* FIXME: add read balancing */
715 return ms->default_mirror;
719 * remap a buffer to a particular mirror.
721 static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
723 bio->bi_bdev = m->dev->bdev;
724 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
727 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
733 while ((bio = bio_list_pop(reads))) {
734 region = bio_to_region(&ms->rh, bio);
737 * We can only read balance if the region is in sync.
739 if (rh_in_sync(&ms->rh, region, 0))
740 m = choose_mirror(ms, bio->bi_sector);
742 m = ms->default_mirror;
745 generic_make_request(bio);
749 /*-----------------------------------------------------------------
752 * We do different things with the write io depending on the
753 * state of the region that it's in:
755 * SYNC: increment pending, use kcopyd to write to *all* mirrors
756 * RECOVERING: delay the io until recovery completes
757 * NOSYNC: increment pending, just write to the default mirror
758 *---------------------------------------------------------------*/
759 static void write_callback(unsigned long error, void *context)
763 struct bio *bio = (struct bio *) context;
764 struct mirror_set *ms;
766 ms = bio_get_ms(bio);
767 bio_set_ms(bio, NULL);
770 * NOTE: We don't decrement the pending count here,
771 * instead it is done by the targets endio function.
772 * This way we handle both writes to SYNC and NOSYNC
773 * regions with the same code.
778 * only error the io if all mirrors failed.
782 for (i = 0; i < ms->nr_mirrors; i++)
783 if (!test_bit(i, &error)) {
788 bio_endio(bio, bio->bi_size, 0);
791 static void do_write(struct mirror_set *ms, struct bio *bio)
794 struct io_region io[KCOPYD_MAX_REGIONS+1];
797 for (i = 0; i < ms->nr_mirrors; i++) {
800 io[i].bdev = m->dev->bdev;
801 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
802 io[i].count = bio->bi_size >> 9;
806 dm_io_async_bvec(ms->nr_mirrors, io, WRITE,
807 bio->bi_io_vec + bio->bi_idx,
808 write_callback, bio);
811 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
815 struct bio_list sync, nosync, recover, *this_list = NULL;
821 * Classify each write.
823 bio_list_init(&sync);
824 bio_list_init(&nosync);
825 bio_list_init(&recover);
827 while ((bio = bio_list_pop(writes))) {
828 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
840 this_list = &recover;
844 bio_list_add(this_list, bio);
848 * Increment the pending counts for any regions that will
849 * be written to (writes to recover regions are going to
852 rh_inc_pending(&ms->rh, &sync);
853 rh_inc_pending(&ms->rh, &nosync);
859 while ((bio = bio_list_pop(&sync)))
862 while ((bio = bio_list_pop(&recover)))
863 rh_delay(&ms->rh, bio);
865 while ((bio = bio_list_pop(&nosync))) {
866 map_bio(ms, ms->default_mirror, bio);
867 generic_make_request(bio);
871 /*-----------------------------------------------------------------
873 *---------------------------------------------------------------*/
874 static void do_mirror(struct work_struct *work)
876 struct mirror_set *ms =container_of(work, struct mirror_set,
878 struct bio_list reads, writes;
880 spin_lock(&ms->lock);
883 bio_list_init(&ms->reads);
884 bio_list_init(&ms->writes);
885 spin_unlock(&ms->lock);
887 rh_update_states(&ms->rh);
889 do_reads(ms, &reads);
890 do_writes(ms, &writes);
893 /*-----------------------------------------------------------------
895 *---------------------------------------------------------------*/
896 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
897 uint32_t region_size,
898 struct dm_target *ti,
899 struct dirty_log *dl)
902 struct mirror_set *ms = NULL;
904 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
907 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
909 ms = kmalloc(len, GFP_KERNEL);
911 ti->error = "Cannot allocate mirror context";
916 spin_lock_init(&ms->lock);
919 ms->nr_mirrors = nr_mirrors;
920 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
922 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
924 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
925 ti->error = "Error creating dirty region hash";
933 static void free_context(struct mirror_set *ms, struct dm_target *ti,
937 dm_put_device(ti, ms->mirror[m].dev);
943 static inline int _check_region_size(struct dm_target *ti, uint32_t size)
945 return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
949 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
950 unsigned int mirror, char **argv)
952 unsigned long long offset;
954 if (sscanf(argv[1], "%llu", &offset) != 1) {
955 ti->error = "Invalid offset";
959 if (dm_get_device(ti, argv[0], offset, ti->len,
960 dm_table_get_mode(ti->table),
961 &ms->mirror[mirror].dev)) {
962 ti->error = "Device lookup failure";
966 ms->mirror[mirror].offset = offset;
972 * Create dirty log: log_type #log_params <log_params>
974 static struct dirty_log *create_dirty_log(struct dm_target *ti,
975 unsigned int argc, char **argv,
976 unsigned int *args_used)
978 unsigned int param_count;
979 struct dirty_log *dl;
982 ti->error = "Insufficient mirror log arguments";
986 if (sscanf(argv[1], "%u", ¶m_count) != 1) {
987 ti->error = "Invalid mirror log argument count";
991 *args_used = 2 + param_count;
993 if (argc < *args_used) {
994 ti->error = "Insufficient mirror log arguments";
998 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1000 ti->error = "Error creating mirror dirty log";
1004 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1005 ti->error = "Invalid region size";
1006 dm_destroy_dirty_log(dl);
1014 * Construct a mirror mapping:
1016 * log_type #log_params <log_params>
1017 * #mirrors [mirror_path offset]{2,}
1019 * log_type is "core" or "disk"
1020 * #log_params is between 1 and 3
1022 #define DM_IO_PAGES 64
1023 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1026 unsigned int nr_mirrors, m, args_used;
1027 struct mirror_set *ms;
1028 struct dirty_log *dl;
1030 dl = create_dirty_log(ti, argc, argv, &args_used);
1037 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1038 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
1039 ti->error = "Invalid number of mirrors";
1040 dm_destroy_dirty_log(dl);
1046 if (argc != nr_mirrors * 2) {
1047 ti->error = "Wrong number of mirror arguments";
1048 dm_destroy_dirty_log(dl);
1052 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1054 dm_destroy_dirty_log(dl);
1058 /* Get the mirror parameter sets */
1059 for (m = 0; m < nr_mirrors; m++) {
1060 r = get_mirror(ms, ti, m, argv);
1062 free_context(ms, ti, m);
1070 ti->split_io = ms->rh.region_size;
1072 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1073 if (!ms->kmirrord_wq) {
1074 DMERR("couldn't start kmirrord");
1075 free_context(ms, ti, m);
1078 INIT_WORK(&ms->kmirrord_work, do_mirror);
1080 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1082 destroy_workqueue(ms->kmirrord_wq);
1083 free_context(ms, ti, ms->nr_mirrors);
1091 static void mirror_dtr(struct dm_target *ti)
1093 struct mirror_set *ms = (struct mirror_set *) ti->private;
1095 flush_workqueue(ms->kmirrord_wq);
1096 kcopyd_client_destroy(ms->kcopyd_client);
1097 destroy_workqueue(ms->kmirrord_wq);
1098 free_context(ms, ti, ms->nr_mirrors);
1101 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1103 int should_wake = 0;
1104 struct bio_list *bl;
1106 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1107 spin_lock(&ms->lock);
1108 should_wake = !(bl->head);
1109 bio_list_add(bl, bio);
1110 spin_unlock(&ms->lock);
1117 * Mirror mapping function
1119 static int mirror_map(struct dm_target *ti, struct bio *bio,
1120 union map_info *map_context)
1122 int r, rw = bio_rw(bio);
1124 struct mirror_set *ms = ti->private;
1126 map_context->ll = bio_to_region(&ms->rh, bio);
1129 queue_bio(ms, bio, rw);
1130 return DM_MAPIO_SUBMITTED;
1133 r = ms->rh.log->type->in_sync(ms->rh.log,
1134 bio_to_region(&ms->rh, bio), 0);
1135 if (r < 0 && r != -EWOULDBLOCK)
1138 if (r == -EWOULDBLOCK) /* FIXME: ugly */
1139 r = DM_MAPIO_SUBMITTED;
1142 * We don't want to fast track a recovery just for a read
1143 * ahead. So we just let it silently fail.
1144 * FIXME: get rid of this.
1146 if (!r && rw == READA)
1150 /* Pass this io over to the daemon */
1151 queue_bio(ms, bio, rw);
1152 return DM_MAPIO_SUBMITTED;
1155 m = choose_mirror(ms, bio->bi_sector);
1159 map_bio(ms, m, bio);
1160 return DM_MAPIO_REMAPPED;
1163 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1164 int error, union map_info *map_context)
1166 int rw = bio_rw(bio);
1167 struct mirror_set *ms = (struct mirror_set *) ti->private;
1168 region_t region = map_context->ll;
1171 * We need to dec pending if this was a write.
1174 rh_dec(&ms->rh, region);
1179 static void mirror_postsuspend(struct dm_target *ti)
1181 struct mirror_set *ms = (struct mirror_set *) ti->private;
1182 struct dirty_log *log = ms->rh.log;
1184 rh_stop_recovery(&ms->rh);
1186 /* Wait for all I/O we generated to complete */
1187 wait_event(_kmirrord_recovery_stopped,
1188 !atomic_read(&ms->rh.recovery_in_flight));
1190 if (log->type->suspend && log->type->suspend(log))
1191 /* FIXME: need better error handling */
1192 DMWARN("log suspend failed");
1195 static void mirror_resume(struct dm_target *ti)
1197 struct mirror_set *ms = (struct mirror_set *) ti->private;
1198 struct dirty_log *log = ms->rh.log;
1199 if (log->type->resume && log->type->resume(log))
1200 /* FIXME: need better error handling */
1201 DMWARN("log resume failed");
1202 rh_start_recovery(&ms->rh);
1205 static int mirror_status(struct dm_target *ti, status_type_t type,
1206 char *result, unsigned int maxlen)
1209 struct mirror_set *ms = (struct mirror_set *) ti->private;
1211 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1214 case STATUSTYPE_INFO:
1215 DMEMIT("%d ", ms->nr_mirrors);
1216 for (m = 0; m < ms->nr_mirrors; m++)
1217 DMEMIT("%s ", ms->mirror[m].dev->name);
1220 (unsigned long long)ms->rh.log->type->
1221 get_sync_count(ms->rh.log),
1222 (unsigned long long)ms->nr_regions);
1225 case STATUSTYPE_TABLE:
1226 DMEMIT("%d", ms->nr_mirrors);
1227 for (m = 0; m < ms->nr_mirrors; m++)
1228 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1229 (unsigned long long)ms->mirror[m].offset);
1235 static struct target_type mirror_target = {
1237 .version = {1, 0, 2},
1238 .module = THIS_MODULE,
1242 .end_io = mirror_end_io,
1243 .postsuspend = mirror_postsuspend,
1244 .resume = mirror_resume,
1245 .status = mirror_status,
1248 static int __init dm_mirror_init(void)
1252 r = dm_dirty_log_init();
1256 r = dm_register_target(&mirror_target);
1258 DMERR("%s: Failed to register mirror target",
1259 mirror_target.name);
1260 dm_dirty_log_exit();
1266 static void __exit dm_mirror_exit(void)
1270 r = dm_unregister_target(&mirror_target);
1272 DMERR("%s: unregister failed %d", mirror_target.name, r);
1274 dm_dirty_log_exit();
1278 module_init(dm_mirror_init);
1279 module_exit(dm_mirror_exit);
1281 MODULE_DESCRIPTION(DM_NAME " mirror target");
1282 MODULE_AUTHOR("Joe Thornber");
1283 MODULE_LICENSE("GPL");