2 * Copyright (C) 2003 Sistina Software Limited.
4 * This file is released under the GPL.
8 #include "dm-bio-list.h"
13 #include <linux/ctype.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/slab.h>
19 #include <linux/time.h>
20 #include <linux/vmalloc.h>
21 #include <linux/workqueue.h>
22 #include <linux/log2.h>
23 #include <linux/hardirq.h>
25 #define DM_MSG_PREFIX "raid1"
26 #define DM_IO_PAGES 64
28 #define DM_RAID1_HANDLE_ERRORS 0x01
29 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
31 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
33 /*-----------------------------------------------------------------
36 * The mirror splits itself up into discrete regions. Each
37 * region can be in one of three states: clean, dirty,
38 * nosync. There is no need to put clean regions in the hash.
40 * In addition to being present in the hash table a region _may_
41 * be present on one of three lists.
43 * clean_regions: Regions on this list have no io pending to
44 * them, they are in sync, we are no longer interested in them,
45 * they are dull. rh_update_states() will remove them from the
48 * quiesced_regions: These regions have been spun down, ready
49 * for recovery. rh_recovery_start() will remove regions from
50 * this list and hand them to kmirrord, which will schedule the
51 * recovery io with kcopyd.
53 * recovered_regions: Regions that kcopyd has successfully
54 * recovered. rh_update_states() will now schedule any delayed
55 * io, up the recovery_count, and remove the region from the
59 * A rw spin lock 'hash_lock' protects just the hash table,
60 * this is never held in write mode from interrupt context,
61 * which I believe means that we only have to disable irqs when
64 * An ordinary spin lock 'region_lock' that protects the three
65 * lists in the region_hash, with the 'state', 'list' and
66 * 'bhs_delayed' fields of the regions. This is used from irq
67 * context, so all other uses will have to suspend local irqs.
68 *---------------------------------------------------------------*/
71 struct mirror_set *ms;
73 unsigned region_shift;
75 /* holds persistent region state */
76 struct dirty_log *log;
80 mempool_t *region_pool;
82 unsigned int nr_buckets;
83 struct list_head *buckets;
85 spinlock_t region_lock;
86 atomic_t recovery_in_flight;
87 struct semaphore recovery_count;
88 struct list_head clean_regions;
89 struct list_head quiesced_regions;
90 struct list_head recovered_regions;
91 struct list_head failed_recovered_regions;
102 struct region_hash *rh; /* FIXME: can we get rid of this ? */
106 struct list_head hash_list;
107 struct list_head list;
110 struct bio_list delayed_bios;
114 /*-----------------------------------------------------------------
115 * Mirror set structures.
116 *---------------------------------------------------------------*/
117 enum dm_raid1_error {
118 DM_RAID1_WRITE_ERROR,
124 struct mirror_set *ms;
125 atomic_t error_count;
132 struct dm_target *ti;
133 struct list_head list;
134 struct region_hash rh;
135 struct kcopyd_client *kcopyd_client;
138 spinlock_t lock; /* protects the lists */
139 struct bio_list reads;
140 struct bio_list writes;
141 struct bio_list failures;
143 struct dm_io_client *io_client;
150 atomic_t default_mirror; /* Default mirror */
152 struct workqueue_struct *kmirrord_wq;
153 struct work_struct kmirrord_work;
154 struct work_struct trigger_event;
156 unsigned int nr_mirrors;
157 struct mirror mirror[0];
163 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
165 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
168 static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
170 return region << rh->region_shift;
173 static void wake(struct mirror_set *ms)
175 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
178 /* FIXME move this */
179 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
181 #define MIN_REGIONS 64
182 #define MAX_RECOVERY 1
183 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
184 struct dirty_log *log, uint32_t region_size,
187 unsigned int nr_buckets, max_buckets;
191 * Calculate a suitable number of buckets for our hash
194 max_buckets = nr_regions >> 6;
195 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
201 rh->region_size = region_size;
202 rh->region_shift = ffs(region_size) - 1;
203 rwlock_init(&rh->hash_lock);
204 rh->mask = nr_buckets - 1;
205 rh->nr_buckets = nr_buckets;
207 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
209 DMERR("unable to allocate region hash memory");
213 for (i = 0; i < nr_buckets; i++)
214 INIT_LIST_HEAD(rh->buckets + i);
216 spin_lock_init(&rh->region_lock);
217 sema_init(&rh->recovery_count, 0);
218 atomic_set(&rh->recovery_in_flight, 0);
219 INIT_LIST_HEAD(&rh->clean_regions);
220 INIT_LIST_HEAD(&rh->quiesced_regions);
221 INIT_LIST_HEAD(&rh->recovered_regions);
222 INIT_LIST_HEAD(&rh->failed_recovered_regions);
224 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
225 sizeof(struct region));
226 if (!rh->region_pool) {
235 static void rh_exit(struct region_hash *rh)
238 struct region *reg, *nreg;
240 BUG_ON(!list_empty(&rh->quiesced_regions));
241 for (h = 0; h < rh->nr_buckets; h++) {
242 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
243 BUG_ON(atomic_read(®->pending));
244 mempool_free(reg, rh->region_pool);
249 dm_destroy_dirty_log(rh->log);
251 mempool_destroy(rh->region_pool);
255 #define RH_HASH_MULT 2654435387U
257 static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
259 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
262 static struct region *__rh_lookup(struct region_hash *rh, region_t region)
266 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
267 if (reg->key == region)
273 static void __rh_insert(struct region_hash *rh, struct region *reg)
275 unsigned int h = rh_hash(rh, reg->key);
276 list_add(®->hash_list, rh->buckets + h);
279 static struct region *__rh_alloc(struct region_hash *rh, region_t region)
281 struct region *reg, *nreg;
283 read_unlock(&rh->hash_lock);
284 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
286 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
287 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
288 RH_CLEAN : RH_NOSYNC;
292 INIT_LIST_HEAD(&nreg->list);
294 atomic_set(&nreg->pending, 0);
295 bio_list_init(&nreg->delayed_bios);
296 write_lock_irq(&rh->hash_lock);
298 reg = __rh_lookup(rh, region);
300 /* we lost the race */
301 mempool_free(nreg, rh->region_pool);
304 __rh_insert(rh, nreg);
305 if (nreg->state == RH_CLEAN) {
306 spin_lock(&rh->region_lock);
307 list_add(&nreg->list, &rh->clean_regions);
308 spin_unlock(&rh->region_lock);
312 write_unlock_irq(&rh->hash_lock);
313 read_lock(&rh->hash_lock);
318 static inline struct region *__rh_find(struct region_hash *rh, region_t region)
322 reg = __rh_lookup(rh, region);
324 reg = __rh_alloc(rh, region);
329 static int rh_state(struct region_hash *rh, region_t region, int may_block)
334 read_lock(&rh->hash_lock);
335 reg = __rh_lookup(rh, region);
336 read_unlock(&rh->hash_lock);
342 * The region wasn't in the hash, so we fall back to the
345 r = rh->log->type->in_sync(rh->log, region, may_block);
348 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
349 * taken as a RH_NOSYNC
351 return r == 1 ? RH_CLEAN : RH_NOSYNC;
354 static inline int rh_in_sync(struct region_hash *rh,
355 region_t region, int may_block)
357 int state = rh_state(rh, region, may_block);
358 return state == RH_CLEAN || state == RH_DIRTY;
361 static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
365 while ((bio = bio_list_pop(bio_list))) {
366 queue_bio(ms, bio, WRITE);
370 static void complete_resync_work(struct region *reg, int success)
372 struct region_hash *rh = reg->rh;
374 rh->log->type->set_region_sync(rh->log, reg->key, success);
375 dispatch_bios(rh->ms, ®->delayed_bios);
376 if (atomic_dec_and_test(&rh->recovery_in_flight))
377 wake_up_all(&_kmirrord_recovery_stopped);
378 up(&rh->recovery_count);
381 static void rh_update_states(struct region_hash *rh)
383 struct region *reg, *next;
386 LIST_HEAD(recovered);
387 LIST_HEAD(failed_recovered);
390 * Quickly grab the lists.
392 write_lock_irq(&rh->hash_lock);
393 spin_lock(&rh->region_lock);
394 if (!list_empty(&rh->clean_regions)) {
395 list_splice(&rh->clean_regions, &clean);
396 INIT_LIST_HEAD(&rh->clean_regions);
398 list_for_each_entry(reg, &clean, list)
399 list_del(®->hash_list);
402 if (!list_empty(&rh->recovered_regions)) {
403 list_splice(&rh->recovered_regions, &recovered);
404 INIT_LIST_HEAD(&rh->recovered_regions);
406 list_for_each_entry (reg, &recovered, list)
407 list_del(®->hash_list);
410 if (!list_empty(&rh->failed_recovered_regions)) {
411 list_splice(&rh->failed_recovered_regions, &failed_recovered);
412 INIT_LIST_HEAD(&rh->failed_recovered_regions);
414 list_for_each_entry(reg, &failed_recovered, list)
415 list_del(®->hash_list);
418 spin_unlock(&rh->region_lock);
419 write_unlock_irq(&rh->hash_lock);
422 * All the regions on the recovered and clean lists have
423 * now been pulled out of the system, so no need to do
426 list_for_each_entry_safe (reg, next, &recovered, list) {
427 rh->log->type->clear_region(rh->log, reg->key);
428 complete_resync_work(reg, 1);
429 mempool_free(reg, rh->region_pool);
432 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
433 complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
434 mempool_free(reg, rh->region_pool);
437 list_for_each_entry_safe(reg, next, &clean, list) {
438 rh->log->type->clear_region(rh->log, reg->key);
439 mempool_free(reg, rh->region_pool);
442 rh->log->type->flush(rh->log);
445 static void rh_inc(struct region_hash *rh, region_t region)
449 read_lock(&rh->hash_lock);
450 reg = __rh_find(rh, region);
452 spin_lock_irq(&rh->region_lock);
453 atomic_inc(®->pending);
455 if (reg->state == RH_CLEAN) {
456 reg->state = RH_DIRTY;
457 list_del_init(®->list); /* take off the clean list */
458 spin_unlock_irq(&rh->region_lock);
460 rh->log->type->mark_region(rh->log, reg->key);
462 spin_unlock_irq(&rh->region_lock);
465 read_unlock(&rh->hash_lock);
468 static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
472 for (bio = bios->head; bio; bio = bio->bi_next)
473 rh_inc(rh, bio_to_region(rh, bio));
476 static void rh_dec(struct region_hash *rh, region_t region)
482 read_lock(&rh->hash_lock);
483 reg = __rh_lookup(rh, region);
484 read_unlock(&rh->hash_lock);
486 spin_lock_irqsave(&rh->region_lock, flags);
487 if (atomic_dec_and_test(®->pending)) {
489 * There is no pending I/O for this region.
490 * We can move the region to corresponding list for next action.
491 * At this point, the region is not yet connected to any list.
493 * If the state is RH_NOSYNC, the region should be kept off
495 * The hash entry for RH_NOSYNC will remain in memory
496 * until the region is recovered or the map is reloaded.
499 /* do nothing for RH_NOSYNC */
500 if (reg->state == RH_RECOVERING) {
501 list_add_tail(®->list, &rh->quiesced_regions);
502 } else if (reg->state == RH_DIRTY) {
503 reg->state = RH_CLEAN;
504 list_add(®->list, &rh->clean_regions);
508 spin_unlock_irqrestore(&rh->region_lock, flags);
515 * Starts quiescing a region in preparation for recovery.
517 static int __rh_recovery_prepare(struct region_hash *rh)
524 * Ask the dirty log what's next.
526 r = rh->log->type->get_resync_work(rh->log, ®ion);
531 * Get this region, and start it quiescing by setting the
534 read_lock(&rh->hash_lock);
535 reg = __rh_find(rh, region);
536 read_unlock(&rh->hash_lock);
538 spin_lock_irq(&rh->region_lock);
539 reg->state = RH_RECOVERING;
541 /* Already quiesced ? */
542 if (atomic_read(®->pending))
543 list_del_init(®->list);
545 list_move(®->list, &rh->quiesced_regions);
547 spin_unlock_irq(&rh->region_lock);
552 static void rh_recovery_prepare(struct region_hash *rh)
554 /* Extra reference to avoid race with rh_stop_recovery */
555 atomic_inc(&rh->recovery_in_flight);
557 while (!down_trylock(&rh->recovery_count)) {
558 atomic_inc(&rh->recovery_in_flight);
559 if (__rh_recovery_prepare(rh) <= 0) {
560 atomic_dec(&rh->recovery_in_flight);
561 up(&rh->recovery_count);
566 /* Drop the extra reference */
567 if (atomic_dec_and_test(&rh->recovery_in_flight))
568 wake_up_all(&_kmirrord_recovery_stopped);
572 * Returns any quiesced regions.
574 static struct region *rh_recovery_start(struct region_hash *rh)
576 struct region *reg = NULL;
578 spin_lock_irq(&rh->region_lock);
579 if (!list_empty(&rh->quiesced_regions)) {
580 reg = list_entry(rh->quiesced_regions.next,
581 struct region, list);
582 list_del_init(®->list); /* remove from the quiesced list */
584 spin_unlock_irq(&rh->region_lock);
589 static void rh_recovery_end(struct region *reg, int success)
591 struct region_hash *rh = reg->rh;
593 spin_lock_irq(&rh->region_lock);
595 list_add(®->list, ®->rh->recovered_regions);
597 reg->state = RH_NOSYNC;
598 list_add(®->list, ®->rh->failed_recovered_regions);
600 spin_unlock_irq(&rh->region_lock);
605 static int rh_flush(struct region_hash *rh)
607 return rh->log->type->flush(rh->log);
610 static void rh_delay(struct region_hash *rh, struct bio *bio)
614 read_lock(&rh->hash_lock);
615 reg = __rh_find(rh, bio_to_region(rh, bio));
616 bio_list_add(®->delayed_bios, bio);
617 read_unlock(&rh->hash_lock);
620 static void rh_stop_recovery(struct region_hash *rh)
624 /* wait for any recovering regions */
625 for (i = 0; i < MAX_RECOVERY; i++)
626 down(&rh->recovery_count);
629 static void rh_start_recovery(struct region_hash *rh)
633 for (i = 0; i < MAX_RECOVERY; i++)
634 up(&rh->recovery_count);
640 * Every mirror should look like this one.
642 #define DEFAULT_MIRROR 0
645 * This is yucky. We squirrel the mirror_set struct away inside
646 * bi_next for write buffers. This is safe since the bh
647 * doesn't get submitted to the lower levels of block layer.
649 static struct mirror_set *bio_get_ms(struct bio *bio)
651 return (struct mirror_set *) bio->bi_next;
654 static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
656 bio->bi_next = (struct bio *) ms;
659 static struct mirror *get_default_mirror(struct mirror_set *ms)
661 return &ms->mirror[atomic_read(&ms->default_mirror)];
664 static void set_default_mirror(struct mirror *m)
666 struct mirror_set *ms = m->ms;
667 struct mirror *m0 = &(ms->mirror[0]);
669 atomic_set(&ms->default_mirror, m - m0);
673 * @m: mirror device to fail
674 * @error_type: one of the enum's, DM_RAID1_*_ERROR
676 * If errors are being handled, record the type of
677 * error encountered for this device. If this type
678 * of error has already been recorded, we can return;
679 * otherwise, we must signal userspace by triggering
680 * an event. Additionally, if the device is the
681 * primary device, we must choose a new primary, but
682 * only if the mirror is in-sync.
684 * This function must not block.
686 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
688 struct mirror_set *ms = m->ms;
691 if (!errors_handled(ms))
695 * error_count is used for nothing more than a
696 * simple way to tell if a device has encountered
699 atomic_inc(&m->error_count);
701 if (test_and_set_bit(error_type, &m->error_type))
704 if (m != get_default_mirror(ms))
709 * Better to issue requests to same failing device
710 * than to risk returning corrupt data.
712 DMERR("Primary mirror (%s) failed while out-of-sync: "
713 "Reads may fail.", m->dev->name);
717 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
718 if (!atomic_read(&new->error_count)) {
719 set_default_mirror(new);
723 if (unlikely(new == ms->mirror + ms->nr_mirrors))
724 DMWARN("All sides of mirror have failed.");
727 schedule_work(&ms->trigger_event);
730 /*-----------------------------------------------------------------
733 * When a mirror is first activated we may find that some regions
734 * are in the no-sync state. We have to recover these by
735 * recopying from the default mirror to all the others.
736 *---------------------------------------------------------------*/
737 static void recovery_complete(int read_err, unsigned int write_err,
740 struct region *reg = (struct region *)context;
741 struct mirror_set *ms = reg->rh->ms;
745 /* Read error means the failure of default mirror. */
746 DMERR_LIMIT("Unable to read primary mirror during recovery");
747 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
751 DMERR_LIMIT("Write error during recovery (error = 0x%x)",
754 * Bits correspond to devices (excluding default mirror).
755 * The default mirror cannot change during recovery.
757 for (m = 0; m < ms->nr_mirrors; m++) {
758 if (&ms->mirror[m] == get_default_mirror(ms))
760 if (test_bit(bit, &write_err))
761 fail_mirror(ms->mirror + m,
762 DM_RAID1_SYNC_ERROR);
767 rh_recovery_end(reg, !(read_err || write_err));
770 static int recover(struct mirror_set *ms, struct region *reg)
774 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
776 unsigned long flags = 0;
778 /* fill in the source */
779 m = get_default_mirror(ms);
780 from.bdev = m->dev->bdev;
781 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
782 if (reg->key == (ms->nr_regions - 1)) {
784 * The final region may be smaller than
787 from.count = ms->ti->len & (reg->rh->region_size - 1);
789 from.count = reg->rh->region_size;
791 from.count = reg->rh->region_size;
793 /* fill in the destinations */
794 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
795 if (&ms->mirror[i] == get_default_mirror(ms))
799 dest->bdev = m->dev->bdev;
800 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
801 dest->count = from.count;
806 set_bit(KCOPYD_IGNORE_ERROR, &flags);
807 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
808 recovery_complete, reg);
813 static void do_recovery(struct mirror_set *ms)
817 struct dirty_log *log = ms->rh.log;
820 * Start quiescing some regions.
822 rh_recovery_prepare(&ms->rh);
825 * Copy any already quiesced regions.
827 while ((reg = rh_recovery_start(&ms->rh))) {
828 r = recover(ms, reg);
830 rh_recovery_end(reg, 0);
834 * Update the in sync flag.
837 (log->type->get_sync_count(log) == ms->nr_regions)) {
838 /* the sync is complete */
839 dm_table_event(ms->ti->table);
844 /*-----------------------------------------------------------------
846 *---------------------------------------------------------------*/
847 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
849 /* FIXME: add read balancing */
850 return get_default_mirror(ms);
854 * remap a buffer to a particular mirror.
856 static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
858 bio->bi_bdev = m->dev->bdev;
859 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
862 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
868 while ((bio = bio_list_pop(reads))) {
869 region = bio_to_region(&ms->rh, bio);
872 * We can only read balance if the region is in sync.
874 if (rh_in_sync(&ms->rh, region, 1))
875 m = choose_mirror(ms, bio->bi_sector);
877 m = get_default_mirror(ms);
880 generic_make_request(bio);
884 /*-----------------------------------------------------------------
887 * We do different things with the write io depending on the
888 * state of the region that it's in:
890 * SYNC: increment pending, use kcopyd to write to *all* mirrors
891 * RECOVERING: delay the io until recovery completes
892 * NOSYNC: increment pending, just write to the default mirror
893 *---------------------------------------------------------------*/
901 * The bio was written on some mirror(s) but failed on other mirror(s).
902 * We can successfully endio the bio but should avoid the region being
903 * marked clean by setting the state RH_NOSYNC.
905 * This function is _not_ safe in interrupt context!
907 static void __bio_mark_nosync(struct mirror_set *ms,
908 struct bio *bio, unsigned done, int error)
911 struct region_hash *rh = &ms->rh;
912 struct dirty_log *log = ms->rh.log;
914 region_t region = bio_to_region(rh, bio);
917 /* We must inform the log that the sync count has changed. */
918 log->type->set_region_sync(log, region, 0);
921 read_lock(&rh->hash_lock);
922 reg = __rh_find(rh, region);
923 read_unlock(&rh->hash_lock);
925 /* region hash entry should exist because write was in-flight */
927 BUG_ON(!list_empty(®->list));
929 spin_lock_irqsave(&rh->region_lock, flags);
933 * 2) RH_NOSYNC: was dirty, other preceeding writes failed
934 * 3) RH_RECOVERING: flushing pending writes
935 * Either case, the region should have not been connected to list.
937 recovering = (reg->state == RH_RECOVERING);
938 reg->state = RH_NOSYNC;
939 BUG_ON(!list_empty(®->list));
940 spin_unlock_irqrestore(&rh->region_lock, flags);
942 bio_endio(bio, error);
944 complete_resync_work(reg, 0);
947 static void write_callback(unsigned long error, void *context)
950 struct bio *bio = (struct bio *) context;
951 struct mirror_set *ms;
956 ms = bio_get_ms(bio);
957 bio_set_ms(bio, NULL);
960 * NOTE: We don't decrement the pending count here,
961 * instead it is done by the targets endio function.
962 * This way we handle both writes to SYNC and NOSYNC
963 * regions with the same code.
968 for (i = 0; i < ms->nr_mirrors; i++)
969 if (test_bit(i, &error))
970 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
974 if (unlikely(!uptodate)) {
975 DMERR("All replicated volumes dead, failing I/O");
976 /* None of the writes succeeded, fail the I/O. */
978 } else if (errors_handled(ms)) {
980 * Need to raise event. Since raising
981 * events can block, we need to do it in
984 spin_lock_irqsave(&ms->lock, flags);
985 if (!ms->failures.head)
987 bio_list_add(&ms->failures, bio);
988 spin_unlock_irqrestore(&ms->lock, flags);
997 static void do_write(struct mirror_set *ms, struct bio *bio)
1000 struct io_region io[KCOPYD_MAX_REGIONS+1];
1002 struct dm_io_request io_req = {
1004 .mem.type = DM_IO_BVEC,
1005 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
1006 .notify.fn = write_callback,
1007 .notify.context = bio,
1008 .client = ms->io_client,
1011 for (i = 0; i < ms->nr_mirrors; i++) {
1014 io[i].bdev = m->dev->bdev;
1015 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
1016 io[i].count = bio->bi_size >> 9;
1019 bio_set_ms(bio, ms);
1021 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
1024 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1028 struct bio_list sync, nosync, recover, *this_list = NULL;
1034 * Classify each write.
1036 bio_list_init(&sync);
1037 bio_list_init(&nosync);
1038 bio_list_init(&recover);
1040 while ((bio = bio_list_pop(writes))) {
1041 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
1049 this_list = &nosync;
1053 this_list = &recover;
1057 bio_list_add(this_list, bio);
1061 * Increment the pending counts for any regions that will
1062 * be written to (writes to recover regions are going to
1065 rh_inc_pending(&ms->rh, &sync);
1066 rh_inc_pending(&ms->rh, &nosync);
1067 ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
1072 if (unlikely(ms->log_failure))
1073 while ((bio = bio_list_pop(&sync)))
1074 bio_endio(bio, -EIO);
1075 else while ((bio = bio_list_pop(&sync)))
1078 while ((bio = bio_list_pop(&recover)))
1079 rh_delay(&ms->rh, bio);
1081 while ((bio = bio_list_pop(&nosync))) {
1082 map_bio(ms, get_default_mirror(ms), bio);
1083 generic_make_request(bio);
1087 static void do_failures(struct mirror_set *ms, struct bio_list *failures)
1091 if (!failures->head)
1094 while ((bio = bio_list_pop(failures)))
1095 __bio_mark_nosync(ms, bio, bio->bi_size, 0);
1098 static void trigger_event(struct work_struct *work)
1100 struct mirror_set *ms =
1101 container_of(work, struct mirror_set, trigger_event);
1103 dm_table_event(ms->ti->table);
1106 /*-----------------------------------------------------------------
1108 *---------------------------------------------------------------*/
1109 static int _do_mirror(struct work_struct *work)
1111 struct mirror_set *ms =container_of(work, struct mirror_set,
1113 struct bio_list reads, writes, failures;
1114 unsigned long flags;
1116 spin_lock_irqsave(&ms->lock, flags);
1118 writes = ms->writes;
1119 failures = ms->failures;
1120 bio_list_init(&ms->reads);
1121 bio_list_init(&ms->writes);
1122 bio_list_init(&ms->failures);
1123 spin_unlock_irqrestore(&ms->lock, flags);
1125 rh_update_states(&ms->rh);
1127 do_reads(ms, &reads);
1128 do_writes(ms, &writes);
1129 do_failures(ms, &failures);
1131 return (ms->failures.head) ? 1 : 0;
1134 static void do_mirror(struct work_struct *work)
1137 * If _do_mirror returns 1, we give it
1138 * another shot. This helps for cases like
1139 * 'suspend' where we call flush_workqueue
1140 * and expect all work to be finished. If
1141 * a failure happens during a suspend, we
1142 * couldn't issue a 'wake' because it would
1143 * not be honored. Therefore, we return '1'
1144 * from _do_mirror, and retry here.
1146 while (_do_mirror(work))
1151 /*-----------------------------------------------------------------
1153 *---------------------------------------------------------------*/
1154 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1155 uint32_t region_size,
1156 struct dm_target *ti,
1157 struct dirty_log *dl)
1160 struct mirror_set *ms = NULL;
1162 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
1165 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
1167 ms = kzalloc(len, GFP_KERNEL);
1169 ti->error = "Cannot allocate mirror context";
1173 spin_lock_init(&ms->lock);
1176 ms->nr_mirrors = nr_mirrors;
1177 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
1179 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
1181 ms->io_client = dm_io_client_create(DM_IO_PAGES);
1182 if (IS_ERR(ms->io_client)) {
1183 ti->error = "Error creating dm_io client";
1188 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
1189 ti->error = "Error creating dirty region hash";
1190 dm_io_client_destroy(ms->io_client);
1198 static void free_context(struct mirror_set *ms, struct dm_target *ti,
1202 dm_put_device(ti, ms->mirror[m].dev);
1204 dm_io_client_destroy(ms->io_client);
1209 static inline int _check_region_size(struct dm_target *ti, uint32_t size)
1211 return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) ||
1215 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1216 unsigned int mirror, char **argv)
1218 unsigned long long offset;
1220 if (sscanf(argv[1], "%llu", &offset) != 1) {
1221 ti->error = "Invalid offset";
1225 if (dm_get_device(ti, argv[0], offset, ti->len,
1226 dm_table_get_mode(ti->table),
1227 &ms->mirror[mirror].dev)) {
1228 ti->error = "Device lookup failure";
1232 ms->mirror[mirror].ms = ms;
1233 atomic_set(&(ms->mirror[mirror].error_count), 0);
1234 ms->mirror[mirror].error_type = 0;
1235 ms->mirror[mirror].offset = offset;
1241 * Create dirty log: log_type #log_params <log_params>
1243 static struct dirty_log *create_dirty_log(struct dm_target *ti,
1244 unsigned int argc, char **argv,
1245 unsigned int *args_used)
1247 unsigned int param_count;
1248 struct dirty_log *dl;
1251 ti->error = "Insufficient mirror log arguments";
1255 if (sscanf(argv[1], "%u", ¶m_count) != 1) {
1256 ti->error = "Invalid mirror log argument count";
1260 *args_used = 2 + param_count;
1262 if (argc < *args_used) {
1263 ti->error = "Insufficient mirror log arguments";
1267 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1269 ti->error = "Error creating mirror dirty log";
1273 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
1274 ti->error = "Invalid region size";
1275 dm_destroy_dirty_log(dl);
1282 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1283 unsigned *args_used)
1285 unsigned num_features;
1286 struct dm_target *ti = ms->ti;
1293 if (sscanf(argv[0], "%u", &num_features) != 1) {
1294 ti->error = "Invalid number of features";
1302 if (num_features > argc) {
1303 ti->error = "Not enough arguments to support feature count";
1307 if (!strcmp("handle_errors", argv[0]))
1308 ms->features |= DM_RAID1_HANDLE_ERRORS;
1310 ti->error = "Unrecognised feature requested";
1320 * Construct a mirror mapping:
1322 * log_type #log_params <log_params>
1323 * #mirrors [mirror_path offset]{2,}
1324 * [#features <features>]
1326 * log_type is "core" or "disk"
1327 * #log_params is between 1 and 3
1329 * If present, features must be "handle_errors".
1331 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1334 unsigned int nr_mirrors, m, args_used;
1335 struct mirror_set *ms;
1336 struct dirty_log *dl;
1338 dl = create_dirty_log(ti, argc, argv, &args_used);
1345 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1346 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
1347 ti->error = "Invalid number of mirrors";
1348 dm_destroy_dirty_log(dl);
1354 if (argc < nr_mirrors * 2) {
1355 ti->error = "Too few mirror arguments";
1356 dm_destroy_dirty_log(dl);
1360 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1362 dm_destroy_dirty_log(dl);
1366 /* Get the mirror parameter sets */
1367 for (m = 0; m < nr_mirrors; m++) {
1368 r = get_mirror(ms, ti, m, argv);
1370 free_context(ms, ti, m);
1378 ti->split_io = ms->rh.region_size;
1380 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1381 if (!ms->kmirrord_wq) {
1382 DMERR("couldn't start kmirrord");
1384 goto err_free_context;
1386 INIT_WORK(&ms->kmirrord_work, do_mirror);
1387 INIT_WORK(&ms->trigger_event, trigger_event);
1389 r = parse_features(ms, argc, argv, &args_used);
1391 goto err_destroy_wq;
1397 * Any read-balancing addition depends on the
1398 * DM_RAID1_HANDLE_ERRORS flag being present.
1399 * This is because the decision to balance depends
1400 * on the sync state of a region. If the above
1401 * flag is not present, we ignore errors; and
1402 * the sync state may be inaccurate.
1406 ti->error = "Too many mirror arguments";
1408 goto err_destroy_wq;
1411 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1413 goto err_destroy_wq;
1419 destroy_workqueue(ms->kmirrord_wq);
1421 free_context(ms, ti, ms->nr_mirrors);
1425 static void mirror_dtr(struct dm_target *ti)
1427 struct mirror_set *ms = (struct mirror_set *) ti->private;
1429 flush_workqueue(ms->kmirrord_wq);
1430 kcopyd_client_destroy(ms->kcopyd_client);
1431 destroy_workqueue(ms->kmirrord_wq);
1432 free_context(ms, ti, ms->nr_mirrors);
1435 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1437 unsigned long flags;
1438 int should_wake = 0;
1439 struct bio_list *bl;
1441 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1442 spin_lock_irqsave(&ms->lock, flags);
1443 should_wake = !(bl->head);
1444 bio_list_add(bl, bio);
1445 spin_unlock_irqrestore(&ms->lock, flags);
1452 * Mirror mapping function
1454 static int mirror_map(struct dm_target *ti, struct bio *bio,
1455 union map_info *map_context)
1457 int r, rw = bio_rw(bio);
1459 struct mirror_set *ms = ti->private;
1461 map_context->ll = bio_to_region(&ms->rh, bio);
1464 queue_bio(ms, bio, rw);
1465 return DM_MAPIO_SUBMITTED;
1468 r = ms->rh.log->type->in_sync(ms->rh.log,
1469 bio_to_region(&ms->rh, bio), 0);
1470 if (r < 0 && r != -EWOULDBLOCK)
1473 if (r == -EWOULDBLOCK) /* FIXME: ugly */
1474 r = DM_MAPIO_SUBMITTED;
1477 * We don't want to fast track a recovery just for a read
1478 * ahead. So we just let it silently fail.
1479 * FIXME: get rid of this.
1481 if (!r && rw == READA)
1485 /* Pass this io over to the daemon */
1486 queue_bio(ms, bio, rw);
1487 return DM_MAPIO_SUBMITTED;
1490 m = choose_mirror(ms, bio->bi_sector);
1494 map_bio(ms, m, bio);
1495 return DM_MAPIO_REMAPPED;
1498 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1499 int error, union map_info *map_context)
1501 int rw = bio_rw(bio);
1502 struct mirror_set *ms = (struct mirror_set *) ti->private;
1503 region_t region = map_context->ll;
1506 * We need to dec pending if this was a write.
1509 rh_dec(&ms->rh, region);
1514 static void mirror_postsuspend(struct dm_target *ti)
1516 struct mirror_set *ms = (struct mirror_set *) ti->private;
1517 struct dirty_log *log = ms->rh.log;
1519 rh_stop_recovery(&ms->rh);
1521 /* Wait for all I/O we generated to complete */
1522 wait_event(_kmirrord_recovery_stopped,
1523 !atomic_read(&ms->rh.recovery_in_flight));
1525 if (log->type->postsuspend && log->type->postsuspend(log))
1526 /* FIXME: need better error handling */
1527 DMWARN("log suspend failed");
1530 static void mirror_resume(struct dm_target *ti)
1532 struct mirror_set *ms = (struct mirror_set *) ti->private;
1533 struct dirty_log *log = ms->rh.log;
1534 if (log->type->resume && log->type->resume(log))
1535 /* FIXME: need better error handling */
1536 DMWARN("log resume failed");
1537 rh_start_recovery(&ms->rh);
1540 static int mirror_status(struct dm_target *ti, status_type_t type,
1541 char *result, unsigned int maxlen)
1543 unsigned int m, sz = 0;
1544 struct mirror_set *ms = (struct mirror_set *) ti->private;
1547 case STATUSTYPE_INFO:
1548 DMEMIT("%d ", ms->nr_mirrors);
1549 for (m = 0; m < ms->nr_mirrors; m++)
1550 DMEMIT("%s ", ms->mirror[m].dev->name);
1552 DMEMIT("%llu/%llu 0 ",
1553 (unsigned long long)ms->rh.log->type->
1554 get_sync_count(ms->rh.log),
1555 (unsigned long long)ms->nr_regions);
1557 sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
1561 case STATUSTYPE_TABLE:
1562 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1564 DMEMIT("%d", ms->nr_mirrors);
1565 for (m = 0; m < ms->nr_mirrors; m++)
1566 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1567 (unsigned long long)ms->mirror[m].offset);
1569 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1570 DMEMIT(" 1 handle_errors");
1576 static struct target_type mirror_target = {
1578 .version = {1, 0, 3},
1579 .module = THIS_MODULE,
1583 .end_io = mirror_end_io,
1584 .postsuspend = mirror_postsuspend,
1585 .resume = mirror_resume,
1586 .status = mirror_status,
1589 static int __init dm_mirror_init(void)
1593 r = dm_dirty_log_init();
1597 r = dm_register_target(&mirror_target);
1599 DMERR("Failed to register mirror target");
1600 dm_dirty_log_exit();
1606 static void __exit dm_mirror_exit(void)
1610 r = dm_unregister_target(&mirror_target);
1612 DMERR("unregister failed %d", r);
1614 dm_dirty_log_exit();
1618 module_init(dm_mirror_init);
1619 module_exit(dm_mirror_exit);
1621 MODULE_DESCRIPTION(DM_NAME " mirror target");
1622 MODULE_AUTHOR("Joe Thornber");
1623 MODULE_LICENSE("GPL");