2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
6 * RAID-5 management functions.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * You should have received a copy of the GNU General Public License
14 * (for example /usr/src/linux/COPYING); if not, write to the Free
15 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/raid/raid5.h>
23 #include <linux/highmem.h>
24 #include <linux/bitops.h>
25 #include <asm/atomic.h>
27 #include <linux/raid/bitmap.h>
33 #define NR_STRIPES 256
34 #define STRIPE_SIZE PAGE_SIZE
35 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
36 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
37 #define IO_THRESHOLD 1
38 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
39 #define HASH_MASK (NR_HASH - 1)
41 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
43 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
44 * order without overlap. There may be several bio's per stripe+device, and
45 * a bio could span several devices.
46 * When walking this list for a particular stripe+device, we must never proceed
47 * beyond a bio that extends past this device, as the next bio might no longer
49 * This macro is used to determine the 'next' bio in the list, given the sector
50 * of the current stripe+device
52 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
54 * The following can be used to debug the driver
57 #define RAID5_PARANOIA 1
58 #if RAID5_PARANOIA && defined(CONFIG_SMP)
59 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
61 # define CHECK_DEVLOCK()
64 #define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
70 static void print_raid5_conf (raid5_conf_t *conf);
72 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
74 if (atomic_dec_and_test(&sh->count)) {
75 if (!list_empty(&sh->lru))
77 if (atomic_read(&conf->active_stripes)==0)
79 if (test_bit(STRIPE_HANDLE, &sh->state)) {
80 if (test_bit(STRIPE_DELAYED, &sh->state))
81 list_add_tail(&sh->lru, &conf->delayed_list);
82 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
83 conf->seq_write == sh->bm_seq)
84 list_add_tail(&sh->lru, &conf->bitmap_list);
86 clear_bit(STRIPE_BIT_DELAY, &sh->state);
87 list_add_tail(&sh->lru, &conf->handle_list);
89 md_wakeup_thread(conf->mddev->thread);
91 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
92 atomic_dec(&conf->preread_active_stripes);
93 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
94 md_wakeup_thread(conf->mddev->thread);
96 atomic_dec(&conf->active_stripes);
97 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
98 list_add_tail(&sh->lru, &conf->inactive_list);
99 wake_up(&conf->wait_for_stripe);
104 static void release_stripe(struct stripe_head *sh)
106 raid5_conf_t *conf = sh->raid_conf;
109 spin_lock_irqsave(&conf->device_lock, flags);
110 __release_stripe(conf, sh);
111 spin_unlock_irqrestore(&conf->device_lock, flags);
114 static inline void remove_hash(struct stripe_head *sh)
116 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
118 hlist_del_init(&sh->hash);
121 static void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
123 struct hlist_head *hp = stripe_hash(conf, sh->sector);
125 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
128 hlist_add_head(&sh->hash, hp);
132 /* find an idle stripe, make sure it is unhashed, and return it. */
133 static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
135 struct stripe_head *sh = NULL;
136 struct list_head *first;
139 if (list_empty(&conf->inactive_list))
141 first = conf->inactive_list.next;
142 sh = list_entry(first, struct stripe_head, lru);
143 list_del_init(first);
145 atomic_inc(&conf->active_stripes);
150 static void shrink_buffers(struct stripe_head *sh, int num)
155 for (i=0; i<num ; i++) {
159 sh->dev[i].page = NULL;
164 static int grow_buffers(struct stripe_head *sh, int num)
168 for (i=0; i<num; i++) {
171 if (!(page = alloc_page(GFP_KERNEL))) {
174 sh->dev[i].page = page;
179 static void raid5_build_block (struct stripe_head *sh, int i);
181 static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks)
183 raid5_conf_t *conf = sh->raid_conf;
186 if (atomic_read(&sh->count) != 0)
188 if (test_bit(STRIPE_HANDLE, &sh->state))
192 PRINTK("init_stripe called, stripe %llu\n",
193 (unsigned long long)sh->sector);
203 for (i = sh->disks; i--; ) {
204 struct r5dev *dev = &sh->dev[i];
206 if (dev->toread || dev->towrite || dev->written ||
207 test_bit(R5_LOCKED, &dev->flags)) {
208 printk("sector=%llx i=%d %p %p %p %d\n",
209 (unsigned long long)sh->sector, i, dev->toread,
210 dev->towrite, dev->written,
211 test_bit(R5_LOCKED, &dev->flags));
215 raid5_build_block(sh, i);
217 insert_hash(conf, sh);
220 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks)
222 struct stripe_head *sh;
223 struct hlist_node *hn;
226 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
227 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
228 if (sh->sector == sector && sh->disks == disks)
230 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
234 static void unplug_slaves(mddev_t *mddev);
235 static void raid5_unplug_device(request_queue_t *q);
237 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
238 int pd_idx, int noblock)
240 struct stripe_head *sh;
242 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
244 spin_lock_irq(&conf->device_lock);
247 wait_event_lock_irq(conf->wait_for_stripe,
249 conf->device_lock, /* nothing */);
250 sh = __find_stripe(conf, sector, disks);
252 if (!conf->inactive_blocked)
253 sh = get_free_stripe(conf);
254 if (noblock && sh == NULL)
257 conf->inactive_blocked = 1;
258 wait_event_lock_irq(conf->wait_for_stripe,
259 !list_empty(&conf->inactive_list) &&
260 (atomic_read(&conf->active_stripes)
261 < (conf->max_nr_stripes *3/4)
262 || !conf->inactive_blocked),
264 unplug_slaves(conf->mddev);
266 conf->inactive_blocked = 0;
268 init_stripe(sh, sector, pd_idx, disks);
270 if (atomic_read(&sh->count)) {
271 if (!list_empty(&sh->lru))
274 if (!test_bit(STRIPE_HANDLE, &sh->state))
275 atomic_inc(&conf->active_stripes);
276 if (!list_empty(&sh->lru))
277 list_del_init(&sh->lru);
280 } while (sh == NULL);
283 atomic_inc(&sh->count);
285 spin_unlock_irq(&conf->device_lock);
289 static int grow_one_stripe(raid5_conf_t *conf)
291 struct stripe_head *sh;
292 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
295 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
296 sh->raid_conf = conf;
297 spin_lock_init(&sh->lock);
299 if (grow_buffers(sh, conf->raid_disks)) {
300 shrink_buffers(sh, conf->raid_disks);
301 kmem_cache_free(conf->slab_cache, sh);
304 sh->disks = conf->raid_disks;
305 /* we just created an active stripe so... */
306 atomic_set(&sh->count, 1);
307 atomic_inc(&conf->active_stripes);
308 INIT_LIST_HEAD(&sh->lru);
313 static int grow_stripes(raid5_conf_t *conf, int num)
316 int devs = conf->raid_disks;
318 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
319 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev));
320 conf->active_name = 0;
321 sc = kmem_cache_create(conf->cache_name[conf->active_name],
322 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
326 conf->slab_cache = sc;
327 conf->pool_size = devs;
329 if (!grow_one_stripe(conf))
334 static int resize_stripes(raid5_conf_t *conf, int newsize)
336 /* Make all the stripes able to hold 'newsize' devices.
337 * New slots in each stripe get 'page' set to a new page.
339 * This happens in stages:
340 * 1/ create a new kmem_cache and allocate the required number of
342 * 2/ gather all the old stripe_heads and tranfer the pages across
343 * to the new stripe_heads. This will have the side effect of
344 * freezing the array as once all stripe_heads have been collected,
345 * no IO will be possible. Old stripe heads are freed once their
346 * pages have been transferred over, and the old kmem_cache is
347 * freed when all stripes are done.
348 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
349 * we simple return a failre status - no need to clean anything up.
350 * 4/ allocate new pages for the new slots in the new stripe_heads.
351 * If this fails, we don't bother trying the shrink the
352 * stripe_heads down again, we just leave them as they are.
353 * As each stripe_head is processed the new one is released into
356 * Once step2 is started, we cannot afford to wait for a write,
357 * so we use GFP_NOIO allocations.
359 struct stripe_head *osh, *nsh;
360 LIST_HEAD(newstripes);
361 struct disk_info *ndisks;
366 if (newsize <= conf->pool_size)
367 return 0; /* never bother to shrink */
370 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
371 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
376 for (i = conf->max_nr_stripes; i; i--) {
377 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
381 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
383 nsh->raid_conf = conf;
384 spin_lock_init(&nsh->lock);
386 list_add(&nsh->lru, &newstripes);
389 /* didn't get enough, give up */
390 while (!list_empty(&newstripes)) {
391 nsh = list_entry(newstripes.next, struct stripe_head, lru);
393 kmem_cache_free(sc, nsh);
395 kmem_cache_destroy(sc);
398 /* Step 2 - Must use GFP_NOIO now.
399 * OK, we have enough stripes, start collecting inactive
400 * stripes and copying them over
402 list_for_each_entry(nsh, &newstripes, lru) {
403 spin_lock_irq(&conf->device_lock);
404 wait_event_lock_irq(conf->wait_for_stripe,
405 !list_empty(&conf->inactive_list),
407 unplug_slaves(conf->mddev);
409 osh = get_free_stripe(conf);
410 spin_unlock_irq(&conf->device_lock);
411 atomic_set(&nsh->count, 1);
412 for(i=0; i<conf->pool_size; i++)
413 nsh->dev[i].page = osh->dev[i].page;
414 for( ; i<newsize; i++)
415 nsh->dev[i].page = NULL;
416 kmem_cache_free(conf->slab_cache, osh);
418 kmem_cache_destroy(conf->slab_cache);
421 * At this point, we are holding all the stripes so the array
422 * is completely stalled, so now is a good time to resize
425 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
427 for (i=0; i<conf->raid_disks; i++)
428 ndisks[i] = conf->disks[i];
430 conf->disks = ndisks;
434 /* Step 4, return new stripes to service */
435 while(!list_empty(&newstripes)) {
436 nsh = list_entry(newstripes.next, struct stripe_head, lru);
437 list_del_init(&nsh->lru);
438 for (i=conf->raid_disks; i < newsize; i++)
439 if (nsh->dev[i].page == NULL) {
440 struct page *p = alloc_page(GFP_NOIO);
441 nsh->dev[i].page = p;
447 /* critical section pass, GFP_NOIO no longer needed */
449 conf->slab_cache = sc;
450 conf->active_name = 1-conf->active_name;
451 conf->pool_size = newsize;
456 static int drop_one_stripe(raid5_conf_t *conf)
458 struct stripe_head *sh;
460 spin_lock_irq(&conf->device_lock);
461 sh = get_free_stripe(conf);
462 spin_unlock_irq(&conf->device_lock);
465 if (atomic_read(&sh->count))
467 shrink_buffers(sh, conf->pool_size);
468 kmem_cache_free(conf->slab_cache, sh);
469 atomic_dec(&conf->active_stripes);
473 static void shrink_stripes(raid5_conf_t *conf)
475 while (drop_one_stripe(conf))
478 if (conf->slab_cache)
479 kmem_cache_destroy(conf->slab_cache);
480 conf->slab_cache = NULL;
483 static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
486 struct stripe_head *sh = bi->bi_private;
487 raid5_conf_t *conf = sh->raid_conf;
488 int disks = sh->disks, i;
489 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
494 for (i=0 ; i<disks; i++)
495 if (bi == &sh->dev[i].req)
498 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
499 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
510 spin_lock_irqsave(&conf->device_lock, flags);
511 /* we can return a buffer if we bypassed the cache or
512 * if the top buffer is not in highmem. If there are
513 * multiple buffers, leave the extra work to
516 buffer = sh->bh_read[i];
518 (!PageHighMem(buffer->b_page)
519 || buffer->b_page == bh->b_page )
521 sh->bh_read[i] = buffer->b_reqnext;
522 buffer->b_reqnext = NULL;
525 spin_unlock_irqrestore(&conf->device_lock, flags);
526 if (sh->bh_page[i]==bh->b_page)
527 set_buffer_uptodate(bh);
529 if (buffer->b_page != bh->b_page)
530 memcpy(buffer->b_data, bh->b_data, bh->b_size);
531 buffer->b_end_io(buffer, 1);
534 set_bit(R5_UPTODATE, &sh->dev[i].flags);
536 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
537 printk(KERN_INFO "raid5: read error corrected!!\n");
538 clear_bit(R5_ReadError, &sh->dev[i].flags);
539 clear_bit(R5_ReWrite, &sh->dev[i].flags);
541 if (atomic_read(&conf->disks[i].rdev->read_errors))
542 atomic_set(&conf->disks[i].rdev->read_errors, 0);
545 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
546 atomic_inc(&conf->disks[i].rdev->read_errors);
547 if (conf->mddev->degraded)
548 printk(KERN_WARNING "raid5: read error not correctable.\n");
549 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
551 printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
552 else if (atomic_read(&conf->disks[i].rdev->read_errors)
553 > conf->max_nr_stripes)
555 "raid5: Too many read errors, failing device.\n");
559 set_bit(R5_ReadError, &sh->dev[i].flags);
561 clear_bit(R5_ReadError, &sh->dev[i].flags);
562 clear_bit(R5_ReWrite, &sh->dev[i].flags);
563 md_error(conf->mddev, conf->disks[i].rdev);
566 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
568 /* must restore b_page before unlocking buffer... */
569 if (sh->bh_page[i] != bh->b_page) {
570 bh->b_page = sh->bh_page[i];
571 bh->b_data = page_address(bh->b_page);
572 clear_buffer_uptodate(bh);
575 clear_bit(R5_LOCKED, &sh->dev[i].flags);
576 set_bit(STRIPE_HANDLE, &sh->state);
581 static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
584 struct stripe_head *sh = bi->bi_private;
585 raid5_conf_t *conf = sh->raid_conf;
586 int disks = sh->disks, i;
588 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
593 for (i=0 ; i<disks; i++)
594 if (bi == &sh->dev[i].req)
597 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
598 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
605 spin_lock_irqsave(&conf->device_lock, flags);
607 md_error(conf->mddev, conf->disks[i].rdev);
609 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
611 clear_bit(R5_LOCKED, &sh->dev[i].flags);
612 set_bit(STRIPE_HANDLE, &sh->state);
613 __release_stripe(conf, sh);
614 spin_unlock_irqrestore(&conf->device_lock, flags);
619 static sector_t compute_blocknr(struct stripe_head *sh, int i);
621 static void raid5_build_block (struct stripe_head *sh, int i)
623 struct r5dev *dev = &sh->dev[i];
626 dev->req.bi_io_vec = &dev->vec;
628 dev->req.bi_max_vecs++;
629 dev->vec.bv_page = dev->page;
630 dev->vec.bv_len = STRIPE_SIZE;
631 dev->vec.bv_offset = 0;
633 dev->req.bi_sector = sh->sector;
634 dev->req.bi_private = sh;
638 dev->sector = compute_blocknr(sh, i);
641 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
643 char b[BDEVNAME_SIZE];
644 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
645 PRINTK("raid5: error called\n");
647 if (!test_bit(Faulty, &rdev->flags)) {
649 if (test_bit(In_sync, &rdev->flags)) {
650 conf->working_disks--;
652 conf->failed_disks++;
653 clear_bit(In_sync, &rdev->flags);
655 * if recovery was running, make sure it aborts.
657 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
659 set_bit(Faulty, &rdev->flags);
661 "raid5: Disk failure on %s, disabling device."
662 " Operation continuing on %d devices\n",
663 bdevname(rdev->bdev,b), conf->working_disks);
668 * Input: a 'big' sector number,
669 * Output: index of the data and parity disk, and the sector # in them.
671 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
672 unsigned int data_disks, unsigned int * dd_idx,
673 unsigned int * pd_idx, raid5_conf_t *conf)
676 unsigned long chunk_number;
677 unsigned int chunk_offset;
679 int sectors_per_chunk = conf->chunk_size >> 9;
681 /* First compute the information on this sector */
684 * Compute the chunk number and the sector offset inside the chunk
686 chunk_offset = sector_div(r_sector, sectors_per_chunk);
687 chunk_number = r_sector;
688 BUG_ON(r_sector != chunk_number);
691 * Compute the stripe number
693 stripe = chunk_number / data_disks;
696 * Compute the data disk and parity disk indexes inside the stripe
698 *dd_idx = chunk_number % data_disks;
701 * Select the parity disk based on the user selected algorithm.
703 if (conf->level == 4)
704 *pd_idx = data_disks;
705 else switch (conf->algorithm) {
706 case ALGORITHM_LEFT_ASYMMETRIC:
707 *pd_idx = data_disks - stripe % raid_disks;
708 if (*dd_idx >= *pd_idx)
711 case ALGORITHM_RIGHT_ASYMMETRIC:
712 *pd_idx = stripe % raid_disks;
713 if (*dd_idx >= *pd_idx)
716 case ALGORITHM_LEFT_SYMMETRIC:
717 *pd_idx = data_disks - stripe % raid_disks;
718 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
720 case ALGORITHM_RIGHT_SYMMETRIC:
721 *pd_idx = stripe % raid_disks;
722 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
725 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
730 * Finally, compute the new sector number
732 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
737 static sector_t compute_blocknr(struct stripe_head *sh, int i)
739 raid5_conf_t *conf = sh->raid_conf;
740 int raid_disks = sh->disks, data_disks = raid_disks - 1;
741 sector_t new_sector = sh->sector, check;
742 int sectors_per_chunk = conf->chunk_size >> 9;
745 int chunk_number, dummy1, dummy2, dd_idx = i;
748 chunk_offset = sector_div(new_sector, sectors_per_chunk);
750 BUG_ON(new_sector != stripe);
753 switch (conf->algorithm) {
754 case ALGORITHM_LEFT_ASYMMETRIC:
755 case ALGORITHM_RIGHT_ASYMMETRIC:
759 case ALGORITHM_LEFT_SYMMETRIC:
760 case ALGORITHM_RIGHT_SYMMETRIC:
763 i -= (sh->pd_idx + 1);
766 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
770 chunk_number = stripe * data_disks + i;
771 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
773 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
774 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
775 printk(KERN_ERR "compute_blocknr: map not correct\n");
784 * Copy data between a page in the stripe cache, and a bio.
785 * There are no alignment or size guarantees between the page or the
786 * bio except that there is some overlap.
787 * All iovecs in the bio must be considered.
789 static void copy_data(int frombio, struct bio *bio,
793 char *pa = page_address(page);
798 if (bio->bi_sector >= sector)
799 page_offset = (signed)(bio->bi_sector - sector) * 512;
801 page_offset = (signed)(sector - bio->bi_sector) * -512;
802 bio_for_each_segment(bvl, bio, i) {
803 int len = bio_iovec_idx(bio,i)->bv_len;
807 if (page_offset < 0) {
808 b_offset = -page_offset;
809 page_offset += b_offset;
813 if (len > 0 && page_offset + len > STRIPE_SIZE)
814 clen = STRIPE_SIZE - page_offset;
818 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
820 memcpy(pa+page_offset, ba+b_offset, clen);
822 memcpy(ba+b_offset, pa+page_offset, clen);
823 __bio_kunmap_atomic(ba, KM_USER0);
825 if (clen < len) /* hit end of page */
831 #define check_xor() do { \
832 if (count == MAX_XOR_BLOCKS) { \
833 xor_block(count, STRIPE_SIZE, ptr); \
839 static void compute_block(struct stripe_head *sh, int dd_idx)
841 int i, count, disks = sh->disks;
842 void *ptr[MAX_XOR_BLOCKS], *p;
844 PRINTK("compute_block, stripe %llu, idx %d\n",
845 (unsigned long long)sh->sector, dd_idx);
847 ptr[0] = page_address(sh->dev[dd_idx].page);
848 memset(ptr[0], 0, STRIPE_SIZE);
850 for (i = disks ; i--; ) {
853 p = page_address(sh->dev[i].page);
854 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
857 printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
858 " not present\n", dd_idx,
859 (unsigned long long)sh->sector, i);
864 xor_block(count, STRIPE_SIZE, ptr);
865 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
868 static void compute_parity(struct stripe_head *sh, int method)
870 raid5_conf_t *conf = sh->raid_conf;
871 int i, pd_idx = sh->pd_idx, disks = sh->disks, count;
872 void *ptr[MAX_XOR_BLOCKS];
875 PRINTK("compute_parity, stripe %llu, method %d\n",
876 (unsigned long long)sh->sector, method);
879 ptr[0] = page_address(sh->dev[pd_idx].page);
881 case READ_MODIFY_WRITE:
882 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
884 for (i=disks ; i-- ;) {
887 if (sh->dev[i].towrite &&
888 test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
889 ptr[count++] = page_address(sh->dev[i].page);
890 chosen = sh->dev[i].towrite;
891 sh->dev[i].towrite = NULL;
893 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
894 wake_up(&conf->wait_for_overlap);
896 if (sh->dev[i].written) BUG();
897 sh->dev[i].written = chosen;
902 case RECONSTRUCT_WRITE:
903 memset(ptr[0], 0, STRIPE_SIZE);
904 for (i= disks; i-- ;)
905 if (i!=pd_idx && sh->dev[i].towrite) {
906 chosen = sh->dev[i].towrite;
907 sh->dev[i].towrite = NULL;
909 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
910 wake_up(&conf->wait_for_overlap);
912 if (sh->dev[i].written) BUG();
913 sh->dev[i].written = chosen;
920 xor_block(count, STRIPE_SIZE, ptr);
924 for (i = disks; i--;)
925 if (sh->dev[i].written) {
926 sector_t sector = sh->dev[i].sector;
927 struct bio *wbi = sh->dev[i].written;
928 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
929 copy_data(1, wbi, sh->dev[i].page, sector);
930 wbi = r5_next_bio(wbi, sector);
933 set_bit(R5_LOCKED, &sh->dev[i].flags);
934 set_bit(R5_UPTODATE, &sh->dev[i].flags);
938 case RECONSTRUCT_WRITE:
942 ptr[count++] = page_address(sh->dev[i].page);
946 case READ_MODIFY_WRITE:
947 for (i = disks; i--;)
948 if (sh->dev[i].written) {
949 ptr[count++] = page_address(sh->dev[i].page);
954 xor_block(count, STRIPE_SIZE, ptr);
956 if (method != CHECK_PARITY) {
957 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
958 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
960 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
964 * Each stripe/dev can have one or more bion attached.
965 * toread/towrite point to the first in a chain.
966 * The bi_next chain must be in order.
968 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
971 raid5_conf_t *conf = sh->raid_conf;
974 PRINTK("adding bh b#%llu to stripe s#%llu\n",
975 (unsigned long long)bi->bi_sector,
976 (unsigned long long)sh->sector);
979 spin_lock(&sh->lock);
980 spin_lock_irq(&conf->device_lock);
982 bip = &sh->dev[dd_idx].towrite;
983 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
986 bip = &sh->dev[dd_idx].toread;
987 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
988 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
990 bip = & (*bip)->bi_next;
992 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
995 if (*bip && bi->bi_next && (*bip) != bi->bi_next)
1000 bi->bi_phys_segments ++;
1001 spin_unlock_irq(&conf->device_lock);
1002 spin_unlock(&sh->lock);
1004 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
1005 (unsigned long long)bi->bi_sector,
1006 (unsigned long long)sh->sector, dd_idx);
1008 if (conf->mddev->bitmap && firstwrite) {
1009 sh->bm_seq = conf->seq_write;
1010 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
1012 set_bit(STRIPE_BIT_DELAY, &sh->state);
1016 /* check if page is covered */
1017 sector_t sector = sh->dev[dd_idx].sector;
1018 for (bi=sh->dev[dd_idx].towrite;
1019 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
1020 bi && bi->bi_sector <= sector;
1021 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
1022 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
1023 sector = bi->bi_sector + (bi->bi_size>>9);
1025 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
1026 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
1031 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
1032 spin_unlock_irq(&conf->device_lock);
1033 spin_unlock(&sh->lock);
1037 static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks)
1039 int sectors_per_chunk = conf->chunk_size >> 9;
1040 sector_t x = stripe;
1042 int chunk_offset = sector_div(x, sectors_per_chunk);
1044 raid5_compute_sector(stripe*(disks-1)*sectors_per_chunk
1045 + chunk_offset, disks, disks-1, &dd_idx, &pd_idx, conf);
1051 * handle_stripe - do things to a stripe.
1053 * We lock the stripe and then examine the state of various bits
1054 * to see what needs to be done.
1056 * return some read request which now have data
1057 * return some write requests which are safely on disc
1058 * schedule a read on some buffers
1059 * schedule a write of some buffers
1060 * return confirmation of parity correctness
1062 * Parity calculations are done inside the stripe lock
1063 * buffers are taken off read_list or write_list, and bh_cache buffers
1064 * get BH_Lock set before the stripe lock is released.
1068 static void handle_stripe(struct stripe_head *sh)
1070 raid5_conf_t *conf = sh->raid_conf;
1071 int disks = sh->disks;
1072 struct bio *return_bi= NULL;
1075 int syncing, expanding, expanded;
1076 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
1077 int non_overwrite = 0;
1081 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
1082 (unsigned long long)sh->sector, atomic_read(&sh->count),
1085 spin_lock(&sh->lock);
1086 clear_bit(STRIPE_HANDLE, &sh->state);
1087 clear_bit(STRIPE_DELAYED, &sh->state);
1089 syncing = test_bit(STRIPE_SYNCING, &sh->state);
1090 expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1091 expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
1092 /* Now to look around and see what can be done */
1095 for (i=disks; i--; ) {
1098 clear_bit(R5_Insync, &dev->flags);
1100 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
1101 i, dev->flags, dev->toread, dev->towrite, dev->written);
1102 /* maybe we can reply to a read */
1103 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
1104 struct bio *rbi, *rbi2;
1105 PRINTK("Return read for disc %d\n", i);
1106 spin_lock_irq(&conf->device_lock);
1109 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1110 wake_up(&conf->wait_for_overlap);
1111 spin_unlock_irq(&conf->device_lock);
1112 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1113 copy_data(0, rbi, dev->page, dev->sector);
1114 rbi2 = r5_next_bio(rbi, dev->sector);
1115 spin_lock_irq(&conf->device_lock);
1116 if (--rbi->bi_phys_segments == 0) {
1117 rbi->bi_next = return_bi;
1120 spin_unlock_irq(&conf->device_lock);
1125 /* now count some things */
1126 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
1127 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
1130 if (dev->toread) to_read++;
1133 if (!test_bit(R5_OVERWRITE, &dev->flags))
1136 if (dev->written) written++;
1137 rdev = rcu_dereference(conf->disks[i].rdev);
1138 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1139 /* The ReadError flag will just be confusing now */
1140 clear_bit(R5_ReadError, &dev->flags);
1141 clear_bit(R5_ReWrite, &dev->flags);
1143 if (!rdev || !test_bit(In_sync, &rdev->flags)
1144 || test_bit(R5_ReadError, &dev->flags)) {
1148 set_bit(R5_Insync, &dev->flags);
1151 PRINTK("locked=%d uptodate=%d to_read=%d"
1152 " to_write=%d failed=%d failed_num=%d\n",
1153 locked, uptodate, to_read, to_write, failed, failed_num);
1154 /* check if the array has lost two devices and, if so, some requests might
1157 if (failed > 1 && to_read+to_write+written) {
1158 for (i=disks; i--; ) {
1161 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1164 rdev = rcu_dereference(conf->disks[i].rdev);
1165 if (rdev && test_bit(In_sync, &rdev->flags))
1166 /* multiple read failures in one stripe */
1167 md_error(conf->mddev, rdev);
1171 spin_lock_irq(&conf->device_lock);
1172 /* fail all writes first */
1173 bi = sh->dev[i].towrite;
1174 sh->dev[i].towrite = NULL;
1175 if (bi) { to_write--; bitmap_end = 1; }
1177 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1178 wake_up(&conf->wait_for_overlap);
1180 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1181 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1182 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1183 if (--bi->bi_phys_segments == 0) {
1184 md_write_end(conf->mddev);
1185 bi->bi_next = return_bi;
1190 /* and fail all 'written' */
1191 bi = sh->dev[i].written;
1192 sh->dev[i].written = NULL;
1193 if (bi) bitmap_end = 1;
1194 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1195 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1196 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1197 if (--bi->bi_phys_segments == 0) {
1198 md_write_end(conf->mddev);
1199 bi->bi_next = return_bi;
1205 /* fail any reads if this device is non-operational */
1206 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1207 test_bit(R5_ReadError, &sh->dev[i].flags)) {
1208 bi = sh->dev[i].toread;
1209 sh->dev[i].toread = NULL;
1210 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1211 wake_up(&conf->wait_for_overlap);
1213 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1214 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1215 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1216 if (--bi->bi_phys_segments == 0) {
1217 bi->bi_next = return_bi;
1223 spin_unlock_irq(&conf->device_lock);
1225 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1226 STRIPE_SECTORS, 0, 0);
1229 if (failed > 1 && syncing) {
1230 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1231 clear_bit(STRIPE_SYNCING, &sh->state);
1235 /* might be able to return some write requests if the parity block
1236 * is safe, or on a failed drive
1238 dev = &sh->dev[sh->pd_idx];
1240 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1241 test_bit(R5_UPTODATE, &dev->flags))
1242 || (failed == 1 && failed_num == sh->pd_idx))
1244 /* any written block on an uptodate or failed drive can be returned.
1245 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1246 * never LOCKED, so we don't need to test 'failed' directly.
1248 for (i=disks; i--; )
1249 if (sh->dev[i].written) {
1251 if (!test_bit(R5_LOCKED, &dev->flags) &&
1252 test_bit(R5_UPTODATE, &dev->flags) ) {
1253 /* We can return any write requests */
1254 struct bio *wbi, *wbi2;
1256 PRINTK("Return write for disc %d\n", i);
1257 spin_lock_irq(&conf->device_lock);
1259 dev->written = NULL;
1260 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1261 wbi2 = r5_next_bio(wbi, dev->sector);
1262 if (--wbi->bi_phys_segments == 0) {
1263 md_write_end(conf->mddev);
1264 wbi->bi_next = return_bi;
1269 if (dev->towrite == NULL)
1271 spin_unlock_irq(&conf->device_lock);
1273 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1275 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
1280 /* Now we might consider reading some blocks, either to check/generate
1281 * parity, or to satisfy requests
1282 * or to load a block that is being partially written.
1284 if (to_read || non_overwrite || (syncing && (uptodate < disks)) || expanding) {
1285 for (i=disks; i--;) {
1287 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1289 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1292 (failed && (sh->dev[failed_num].toread ||
1293 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1296 /* we would like to get this block, possibly
1297 * by computing it, but we might not be able to
1299 if (uptodate == disks-1) {
1300 PRINTK("Computing block %d\n", i);
1301 compute_block(sh, i);
1303 } else if (test_bit(R5_Insync, &dev->flags)) {
1304 set_bit(R5_LOCKED, &dev->flags);
1305 set_bit(R5_Wantread, &dev->flags);
1307 /* if I am just reading this block and we don't have
1308 a failed drive, or any pending writes then sidestep the cache */
1309 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1310 ! syncing && !failed && !to_write) {
1311 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1312 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1316 PRINTK("Reading block %d (sync=%d)\n",
1321 set_bit(STRIPE_HANDLE, &sh->state);
1324 /* now to consider writing and what else, if anything should be read */
1327 for (i=disks ; i--;) {
1328 /* would I have to read this buffer for read_modify_write */
1330 if ((dev->towrite || i == sh->pd_idx) &&
1331 (!test_bit(R5_LOCKED, &dev->flags)
1333 || sh->bh_page[i]!=bh->b_page
1336 !test_bit(R5_UPTODATE, &dev->flags)) {
1337 if (test_bit(R5_Insync, &dev->flags)
1338 /* && !(!mddev->insync && i == sh->pd_idx) */
1341 else rmw += 2*disks; /* cannot read it */
1343 /* Would I have to read this buffer for reconstruct_write */
1344 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1345 (!test_bit(R5_LOCKED, &dev->flags)
1347 || sh->bh_page[i] != bh->b_page
1350 !test_bit(R5_UPTODATE, &dev->flags)) {
1351 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1352 else rcw += 2*disks;
1355 PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1356 (unsigned long long)sh->sector, rmw, rcw);
1357 set_bit(STRIPE_HANDLE, &sh->state);
1358 if (rmw < rcw && rmw > 0)
1359 /* prefer read-modify-write, but need to get some data */
1360 for (i=disks; i--;) {
1362 if ((dev->towrite || i == sh->pd_idx) &&
1363 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1364 test_bit(R5_Insync, &dev->flags)) {
1365 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1367 PRINTK("Read_old block %d for r-m-w\n", i);
1368 set_bit(R5_LOCKED, &dev->flags);
1369 set_bit(R5_Wantread, &dev->flags);
1372 set_bit(STRIPE_DELAYED, &sh->state);
1373 set_bit(STRIPE_HANDLE, &sh->state);
1377 if (rcw <= rmw && rcw > 0)
1378 /* want reconstruct write, but need to get some data */
1379 for (i=disks; i--;) {
1381 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1382 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1383 test_bit(R5_Insync, &dev->flags)) {
1384 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1386 PRINTK("Read_old block %d for Reconstruct\n", i);
1387 set_bit(R5_LOCKED, &dev->flags);
1388 set_bit(R5_Wantread, &dev->flags);
1391 set_bit(STRIPE_DELAYED, &sh->state);
1392 set_bit(STRIPE_HANDLE, &sh->state);
1396 /* now if nothing is locked, and if we have enough data, we can start a write request */
1397 if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1398 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1399 PRINTK("Computing parity...\n");
1400 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1401 /* now every locked buffer is ready to be written */
1403 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1404 PRINTK("Writing block %d\n", i);
1406 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1407 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1408 || (i==sh->pd_idx && failed == 0))
1409 set_bit(STRIPE_INSYNC, &sh->state);
1411 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1412 atomic_dec(&conf->preread_active_stripes);
1413 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1414 md_wakeup_thread(conf->mddev->thread);
1419 /* maybe we need to check and possibly fix the parity for this stripe
1420 * Any reads will already have been scheduled, so we just see if enough data
1423 if (syncing && locked == 0 &&
1424 !test_bit(STRIPE_INSYNC, &sh->state)) {
1425 set_bit(STRIPE_HANDLE, &sh->state);
1428 if (uptodate != disks)
1430 compute_parity(sh, CHECK_PARITY);
1432 pagea = page_address(sh->dev[sh->pd_idx].page);
1433 if ((*(u32*)pagea) == 0 &&
1434 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1435 /* parity is correct (on disc, not in buffer any more) */
1436 set_bit(STRIPE_INSYNC, &sh->state);
1438 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1439 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1440 /* don't try to repair!! */
1441 set_bit(STRIPE_INSYNC, &sh->state);
1443 compute_block(sh, sh->pd_idx);
1448 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1449 /* either failed parity check, or recovery is happening */
1451 failed_num = sh->pd_idx;
1452 dev = &sh->dev[failed_num];
1453 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
1454 BUG_ON(uptodate != disks);
1456 set_bit(R5_LOCKED, &dev->flags);
1457 set_bit(R5_Wantwrite, &dev->flags);
1458 clear_bit(STRIPE_DEGRADED, &sh->state);
1460 set_bit(STRIPE_INSYNC, &sh->state);
1463 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1464 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1465 clear_bit(STRIPE_SYNCING, &sh->state);
1468 /* If the failed drive is just a ReadError, then we might need to progress
1469 * the repair/check process
1471 if (failed == 1 && ! conf->mddev->ro &&
1472 test_bit(R5_ReadError, &sh->dev[failed_num].flags)
1473 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1474 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1476 dev = &sh->dev[failed_num];
1477 if (!test_bit(R5_ReWrite, &dev->flags)) {
1478 set_bit(R5_Wantwrite, &dev->flags);
1479 set_bit(R5_ReWrite, &dev->flags);
1480 set_bit(R5_LOCKED, &dev->flags);
1483 /* let's read it back */
1484 set_bit(R5_Wantread, &dev->flags);
1485 set_bit(R5_LOCKED, &dev->flags);
1490 if (expanded && test_bit(STRIPE_EXPANDING, &sh->state)) {
1491 /* Need to write out all blocks after computing parity */
1492 sh->disks = conf->raid_disks;
1493 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, conf->raid_disks);
1494 compute_parity(sh, RECONSTRUCT_WRITE);
1495 for (i= conf->raid_disks; i--;) {
1496 set_bit(R5_LOCKED, &sh->dev[i].flags);
1498 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1500 clear_bit(STRIPE_EXPANDING, &sh->state);
1501 } else if (expanded) {
1502 clear_bit(STRIPE_EXPAND_READY, &sh->state);
1503 wake_up(&conf->wait_for_overlap);
1504 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
1507 if (expanding && locked == 0) {
1508 /* We have read all the blocks in this stripe and now we need to
1509 * copy some of them into a target stripe for expand.
1511 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1512 for (i=0; i< sh->disks; i++)
1513 if (i != sh->pd_idx) {
1514 int dd_idx, pd_idx, j;
1515 struct stripe_head *sh2;
1517 sector_t bn = compute_blocknr(sh, i);
1518 sector_t s = raid5_compute_sector(bn, conf->raid_disks,
1520 &dd_idx, &pd_idx, conf);
1521 sh2 = get_active_stripe(conf, s, conf->raid_disks, pd_idx, 1);
1523 /* so far only the early blocks of this stripe
1524 * have been requested. When later blocks
1525 * get requested, we will try again
1528 if(!test_bit(STRIPE_EXPANDING, &sh2->state) ||
1529 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
1530 /* must have already done this block */
1531 release_stripe(sh2);
1534 memcpy(page_address(sh2->dev[dd_idx].page),
1535 page_address(sh->dev[i].page),
1537 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
1538 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
1539 for (j=0; j<conf->raid_disks; j++)
1540 if (j != sh2->pd_idx &&
1541 !test_bit(R5_Expanded, &sh2->dev[j].flags))
1543 if (j == conf->raid_disks) {
1544 set_bit(STRIPE_EXPAND_READY, &sh2->state);
1545 set_bit(STRIPE_HANDLE, &sh2->state);
1547 release_stripe(sh2);
1551 spin_unlock(&sh->lock);
1553 while ((bi=return_bi)) {
1554 int bytes = bi->bi_size;
1556 return_bi = bi->bi_next;
1559 bi->bi_end_io(bi, bytes, 0);
1561 for (i=disks; i-- ;) {
1565 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1567 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1572 bi = &sh->dev[i].req;
1576 bi->bi_end_io = raid5_end_write_request;
1578 bi->bi_end_io = raid5_end_read_request;
1581 rdev = rcu_dereference(conf->disks[i].rdev);
1582 if (rdev && test_bit(Faulty, &rdev->flags))
1585 atomic_inc(&rdev->nr_pending);
1589 if (syncing || expanding || expanded)
1590 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1592 bi->bi_bdev = rdev->bdev;
1593 PRINTK("for %llu schedule op %ld on disc %d\n",
1594 (unsigned long long)sh->sector, bi->bi_rw, i);
1595 atomic_inc(&sh->count);
1596 bi->bi_sector = sh->sector + rdev->data_offset;
1597 bi->bi_flags = 1 << BIO_UPTODATE;
1599 bi->bi_max_vecs = 1;
1601 bi->bi_io_vec = &sh->dev[i].vec;
1602 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1603 bi->bi_io_vec[0].bv_offset = 0;
1604 bi->bi_size = STRIPE_SIZE;
1607 test_bit(R5_ReWrite, &sh->dev[i].flags))
1608 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1609 generic_make_request(bi);
1612 set_bit(STRIPE_DEGRADED, &sh->state);
1613 PRINTK("skip op %ld on disc %d for sector %llu\n",
1614 bi->bi_rw, i, (unsigned long long)sh->sector);
1615 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1616 set_bit(STRIPE_HANDLE, &sh->state);
1621 static void raid5_activate_delayed(raid5_conf_t *conf)
1623 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1624 while (!list_empty(&conf->delayed_list)) {
1625 struct list_head *l = conf->delayed_list.next;
1626 struct stripe_head *sh;
1627 sh = list_entry(l, struct stripe_head, lru);
1629 clear_bit(STRIPE_DELAYED, &sh->state);
1630 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1631 atomic_inc(&conf->preread_active_stripes);
1632 list_add_tail(&sh->lru, &conf->handle_list);
1637 static void activate_bit_delay(raid5_conf_t *conf)
1639 /* device_lock is held */
1640 struct list_head head;
1641 list_add(&head, &conf->bitmap_list);
1642 list_del_init(&conf->bitmap_list);
1643 while (!list_empty(&head)) {
1644 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1645 list_del_init(&sh->lru);
1646 atomic_inc(&sh->count);
1647 __release_stripe(conf, sh);
1651 static void unplug_slaves(mddev_t *mddev)
1653 raid5_conf_t *conf = mddev_to_conf(mddev);
1657 for (i=0; i<mddev->raid_disks; i++) {
1658 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1659 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
1660 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1662 atomic_inc(&rdev->nr_pending);
1665 if (r_queue->unplug_fn)
1666 r_queue->unplug_fn(r_queue);
1668 rdev_dec_pending(rdev, mddev);
1675 static void raid5_unplug_device(request_queue_t *q)
1677 mddev_t *mddev = q->queuedata;
1678 raid5_conf_t *conf = mddev_to_conf(mddev);
1679 unsigned long flags;
1681 spin_lock_irqsave(&conf->device_lock, flags);
1683 if (blk_remove_plug(q)) {
1685 raid5_activate_delayed(conf);
1687 md_wakeup_thread(mddev->thread);
1689 spin_unlock_irqrestore(&conf->device_lock, flags);
1691 unplug_slaves(mddev);
1694 static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
1695 sector_t *error_sector)
1697 mddev_t *mddev = q->queuedata;
1698 raid5_conf_t *conf = mddev_to_conf(mddev);
1702 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
1703 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1704 if (rdev && !test_bit(Faulty, &rdev->flags)) {
1705 struct block_device *bdev = rdev->bdev;
1706 request_queue_t *r_queue = bdev_get_queue(bdev);
1708 if (!r_queue->issue_flush_fn)
1711 atomic_inc(&rdev->nr_pending);
1713 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1715 rdev_dec_pending(rdev, mddev);
1724 static inline void raid5_plug_device(raid5_conf_t *conf)
1726 spin_lock_irq(&conf->device_lock);
1727 blk_plug_device(conf->mddev->queue);
1728 spin_unlock_irq(&conf->device_lock);
1731 static int make_request(request_queue_t *q, struct bio * bi)
1733 mddev_t *mddev = q->queuedata;
1734 raid5_conf_t *conf = mddev_to_conf(mddev);
1735 unsigned int dd_idx, pd_idx;
1736 sector_t new_sector;
1737 sector_t logical_sector, last_sector;
1738 struct stripe_head *sh;
1739 const int rw = bio_data_dir(bi);
1741 if (unlikely(bio_barrier(bi))) {
1742 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1746 md_write_start(mddev, bi);
1748 disk_stat_inc(mddev->gendisk, ios[rw]);
1749 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
1751 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1752 last_sector = bi->bi_sector + (bi->bi_size>>9);
1754 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
1756 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1761 if (likely(conf->expand_progress == MaxSector))
1762 disks = conf->raid_disks;
1764 spin_lock_irq(&conf->device_lock);
1765 disks = conf->raid_disks;
1766 if (logical_sector >= conf->expand_progress)
1767 disks = conf->previous_raid_disks;
1768 spin_unlock_irq(&conf->device_lock);
1770 new_sector = raid5_compute_sector(logical_sector, disks, disks - 1,
1771 &dd_idx, &pd_idx, conf);
1772 PRINTK("raid5: make_request, sector %llu logical %llu\n",
1773 (unsigned long long)new_sector,
1774 (unsigned long long)logical_sector);
1776 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1777 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK));
1779 if (unlikely(conf->expand_progress != MaxSector)) {
1780 /* expansion might have moved on while waiting for a
1781 * stripe, so we much do the range check again.
1784 spin_lock_irq(&conf->device_lock);
1785 if (logical_sector < conf->expand_progress &&
1786 disks == conf->previous_raid_disks)
1787 /* mismatch, need to try again */
1789 spin_unlock_irq(&conf->device_lock);
1796 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
1797 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1798 /* Stripe is busy expanding or
1799 * add failed due to overlap. Flush everything
1802 raid5_unplug_device(mddev->queue);
1807 finish_wait(&conf->wait_for_overlap, &w);
1808 raid5_plug_device(conf);
1812 /* cannot get stripe for read-ahead, just give-up */
1813 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1814 finish_wait(&conf->wait_for_overlap, &w);
1819 spin_lock_irq(&conf->device_lock);
1820 if (--bi->bi_phys_segments == 0) {
1821 int bytes = bi->bi_size;
1823 if ( bio_data_dir(bi) == WRITE )
1824 md_write_end(mddev);
1826 bi->bi_end_io(bi, bytes, 0);
1828 spin_unlock_irq(&conf->device_lock);
1832 /* FIXME go_faster isn't used */
1833 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1835 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1836 struct stripe_head *sh;
1838 sector_t first_sector, last_sector;
1839 int raid_disks = conf->raid_disks;
1840 int data_disks = raid_disks-1;
1841 sector_t max_sector = mddev->size << 1;
1844 if (sector_nr >= max_sector) {
1845 /* just being told to finish up .. nothing much to do */
1846 unplug_slaves(mddev);
1848 if (mddev->curr_resync < max_sector) /* aborted */
1849 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1851 else /* compelted sync */
1853 bitmap_close_sync(mddev->bitmap);
1858 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
1859 /* reshaping is quite different to recovery/resync so it is
1860 * handled quite separately ... here.
1862 * On each call to sync_request, we gather one chunk worth of
1863 * destination stripes and flag them as expanding.
1864 * Then we find all the source stripes and request reads.
1865 * As the reads complete, handle_stripe will copy the data
1866 * into the destination stripe and release that stripe.
1870 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
1873 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks);
1874 sh = get_active_stripe(conf, sector_nr+i,
1875 conf->raid_disks, pd_idx, 0);
1876 set_bit(STRIPE_EXPANDING, &sh->state);
1877 /* If any of this stripe is beyond the end of the old
1878 * array, then we need to zero those blocks
1880 for (j=sh->disks; j--;) {
1882 if (j == sh->pd_idx)
1884 s = compute_blocknr(sh, j);
1885 if (s < (mddev->array_size<<1)) {
1889 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
1890 set_bit(R5_Expanded, &sh->dev[j].flags);
1891 set_bit(R5_UPTODATE, &sh->dev[j].flags);
1894 set_bit(STRIPE_EXPAND_READY, &sh->state);
1895 set_bit(STRIPE_HANDLE, &sh->state);
1899 spin_lock_irq(&conf->device_lock);
1900 conf->expand_progress = (sector_nr + i)*(conf->raid_disks-1);
1901 spin_unlock_irq(&conf->device_lock);
1902 /* Ok, those stripe are ready. We can start scheduling
1903 * reads on the source stripes.
1904 * The source stripes are determined by mapping the first and last
1905 * block on the destination stripes.
1907 raid_disks = conf->previous_raid_disks;
1908 data_disks = raid_disks - 1;
1910 raid5_compute_sector(sector_nr*(conf->raid_disks-1),
1911 raid_disks, data_disks,
1912 &dd_idx, &pd_idx, conf);
1914 raid5_compute_sector((sector_nr+conf->chunk_size/512)
1915 *(conf->raid_disks-1) -1,
1916 raid_disks, data_disks,
1917 &dd_idx, &pd_idx, conf);
1918 if (last_sector >= (mddev->size<<1))
1919 last_sector = (mddev->size<<1)-1;
1920 while (first_sector <= last_sector) {
1921 pd_idx = stripe_to_pdidx(first_sector, conf, conf->previous_raid_disks);
1922 sh = get_active_stripe(conf, first_sector,
1923 conf->previous_raid_disks, pd_idx, 0);
1924 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
1925 set_bit(STRIPE_HANDLE, &sh->state);
1927 first_sector += STRIPE_SECTORS;
1929 return conf->chunk_size>>9;
1931 /* if there is 1 or more failed drives and we are trying
1932 * to resync, then assert that we are finished, because there is
1933 * nothing we can do.
1935 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1936 sector_t rv = (mddev->size << 1) - sector_nr;
1940 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1941 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1942 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1943 /* we can skip this block, and probably more */
1944 sync_blocks /= STRIPE_SECTORS;
1946 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1949 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks);
1950 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
1952 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0);
1953 /* make sure we don't swamp the stripe cache if someone else
1954 * is trying to get access
1956 schedule_timeout_uninterruptible(1);
1958 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
1959 spin_lock(&sh->lock);
1960 set_bit(STRIPE_SYNCING, &sh->state);
1961 clear_bit(STRIPE_INSYNC, &sh->state);
1962 spin_unlock(&sh->lock);
1967 return STRIPE_SECTORS;
1971 * This is our raid5 kernel thread.
1973 * We scan the hash table for stripes which can be handled now.
1974 * During the scan, completed stripes are saved for us by the interrupt
1975 * handler, so that they will not have to wait for our next wakeup.
1977 static void raid5d (mddev_t *mddev)
1979 struct stripe_head *sh;
1980 raid5_conf_t *conf = mddev_to_conf(mddev);
1983 PRINTK("+++ raid5d active\n");
1985 md_check_recovery(mddev);
1988 spin_lock_irq(&conf->device_lock);
1990 struct list_head *first;
1992 if (conf->seq_flush - conf->seq_write > 0) {
1993 int seq = conf->seq_flush;
1994 spin_unlock_irq(&conf->device_lock);
1995 bitmap_unplug(mddev->bitmap);
1996 spin_lock_irq(&conf->device_lock);
1997 conf->seq_write = seq;
1998 activate_bit_delay(conf);
2001 if (list_empty(&conf->handle_list) &&
2002 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
2003 !blk_queue_plugged(mddev->queue) &&
2004 !list_empty(&conf->delayed_list))
2005 raid5_activate_delayed(conf);
2007 if (list_empty(&conf->handle_list))
2010 first = conf->handle_list.next;
2011 sh = list_entry(first, struct stripe_head, lru);
2013 list_del_init(first);
2014 atomic_inc(&sh->count);
2015 if (atomic_read(&sh->count)!= 1)
2017 spin_unlock_irq(&conf->device_lock);
2023 spin_lock_irq(&conf->device_lock);
2025 PRINTK("%d stripes handled\n", handled);
2027 spin_unlock_irq(&conf->device_lock);
2029 unplug_slaves(mddev);
2031 PRINTK("--- raid5d inactive\n");
2035 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
2037 raid5_conf_t *conf = mddev_to_conf(mddev);
2039 return sprintf(page, "%d\n", conf->max_nr_stripes);
2045 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
2047 raid5_conf_t *conf = mddev_to_conf(mddev);
2050 if (len >= PAGE_SIZE)
2055 new = simple_strtoul(page, &end, 10);
2056 if (!*page || (*end && *end != '\n') )
2058 if (new <= 16 || new > 32768)
2060 while (new < conf->max_nr_stripes) {
2061 if (drop_one_stripe(conf))
2062 conf->max_nr_stripes--;
2066 while (new > conf->max_nr_stripes) {
2067 if (grow_one_stripe(conf))
2068 conf->max_nr_stripes++;
2074 static struct md_sysfs_entry
2075 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
2076 raid5_show_stripe_cache_size,
2077 raid5_store_stripe_cache_size);
2080 stripe_cache_active_show(mddev_t *mddev, char *page)
2082 raid5_conf_t *conf = mddev_to_conf(mddev);
2084 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
2089 static struct md_sysfs_entry
2090 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
2092 static struct attribute *raid5_attrs[] = {
2093 &raid5_stripecache_size.attr,
2094 &raid5_stripecache_active.attr,
2097 static struct attribute_group raid5_attrs_group = {
2099 .attrs = raid5_attrs,
2102 static int run(mddev_t *mddev)
2105 int raid_disk, memory;
2107 struct disk_info *disk;
2108 struct list_head *tmp;
2110 if (mddev->level != 5 && mddev->level != 4) {
2111 printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
2112 mdname(mddev), mddev->level);
2116 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL);
2117 if ((conf = mddev->private) == NULL)
2119 conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info),
2124 conf->mddev = mddev;
2126 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
2129 spin_lock_init(&conf->device_lock);
2130 init_waitqueue_head(&conf->wait_for_stripe);
2131 init_waitqueue_head(&conf->wait_for_overlap);
2132 INIT_LIST_HEAD(&conf->handle_list);
2133 INIT_LIST_HEAD(&conf->delayed_list);
2134 INIT_LIST_HEAD(&conf->bitmap_list);
2135 INIT_LIST_HEAD(&conf->inactive_list);
2136 atomic_set(&conf->active_stripes, 0);
2137 atomic_set(&conf->preread_active_stripes, 0);
2139 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
2141 ITERATE_RDEV(mddev,rdev,tmp) {
2142 raid_disk = rdev->raid_disk;
2143 if (raid_disk >= mddev->raid_disks
2146 disk = conf->disks + raid_disk;
2150 if (test_bit(In_sync, &rdev->flags)) {
2151 char b[BDEVNAME_SIZE];
2152 printk(KERN_INFO "raid5: device %s operational as raid"
2153 " disk %d\n", bdevname(rdev->bdev,b),
2155 conf->working_disks++;
2159 conf->raid_disks = mddev->raid_disks;
2161 * 0 for a fully functional array, 1 for a degraded array.
2163 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
2164 conf->mddev = mddev;
2165 conf->chunk_size = mddev->chunk_size;
2166 conf->level = mddev->level;
2167 conf->algorithm = mddev->layout;
2168 conf->max_nr_stripes = NR_STRIPES;
2169 conf->expand_progress = MaxSector;
2171 /* device size must be a multiple of chunk size */
2172 mddev->size &= ~(mddev->chunk_size/1024 -1);
2173 mddev->resync_max_sectors = mddev->size << 1;
2175 if (!conf->chunk_size || conf->chunk_size % 4) {
2176 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
2177 conf->chunk_size, mdname(mddev));
2180 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
2182 "raid5: unsupported parity algorithm %d for %s\n",
2183 conf->algorithm, mdname(mddev));
2186 if (mddev->degraded > 1) {
2187 printk(KERN_ERR "raid5: not enough operational devices for %s"
2188 " (%d/%d failed)\n",
2189 mdname(mddev), conf->failed_disks, conf->raid_disks);
2193 if (mddev->degraded == 1 &&
2194 mddev->recovery_cp != MaxSector) {
2195 if (mddev->ok_start_degraded)
2197 "raid5: starting dirty degraded array: %s"
2198 "- data corruption possible.\n",
2202 "raid5: cannot start dirty degraded array for %s\n",
2209 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
2210 if (!mddev->thread) {
2212 "raid5: couldn't allocate thread for %s\n",
2217 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
2218 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
2219 if (grow_stripes(conf, conf->max_nr_stripes)) {
2221 "raid5: couldn't allocate %dkB for buffers\n", memory);
2222 shrink_stripes(conf);
2223 md_unregister_thread(mddev->thread);
2226 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
2227 memory, mdname(mddev));
2229 if (mddev->degraded == 0)
2230 printk("raid5: raid level %d set %s active with %d out of %d"
2231 " devices, algorithm %d\n", conf->level, mdname(mddev),
2232 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
2235 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
2236 " out of %d devices, algorithm %d\n", conf->level,
2237 mdname(mddev), mddev->raid_disks - mddev->degraded,
2238 mddev->raid_disks, conf->algorithm);
2240 print_raid5_conf(conf);
2242 /* read-ahead size must cover two whole stripes, which is
2243 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
2246 int stripe = (mddev->raid_disks-1) * mddev->chunk_size
2248 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
2249 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
2252 /* Ok, everything is just fine now */
2253 sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
2255 mddev->queue->unplug_fn = raid5_unplug_device;
2256 mddev->queue->issue_flush_fn = raid5_issue_flush;
2258 mddev->array_size = mddev->size * (mddev->raid_disks - 1);
2262 print_raid5_conf(conf);
2264 kfree(conf->stripe_hashtbl);
2267 mddev->private = NULL;
2268 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
2274 static int stop(mddev_t *mddev)
2276 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2278 md_unregister_thread(mddev->thread);
2279 mddev->thread = NULL;
2280 shrink_stripes(conf);
2281 kfree(conf->stripe_hashtbl);
2282 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2283 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
2286 mddev->private = NULL;
2291 static void print_sh (struct stripe_head *sh)
2295 printk("sh %llu, pd_idx %d, state %ld.\n",
2296 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
2297 printk("sh %llu, count %d.\n",
2298 (unsigned long long)sh->sector, atomic_read(&sh->count));
2299 printk("sh %llu, ", (unsigned long long)sh->sector);
2300 for (i = 0; i < sh->disks; i++) {
2301 printk("(cache%d: %p %ld) ",
2302 i, sh->dev[i].page, sh->dev[i].flags);
2307 static void printall (raid5_conf_t *conf)
2309 struct stripe_head *sh;
2310 struct hlist_node *hn;
2313 spin_lock_irq(&conf->device_lock);
2314 for (i = 0; i < NR_HASH; i++) {
2315 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
2316 if (sh->raid_conf != conf)
2321 spin_unlock_irq(&conf->device_lock);
2325 static void status (struct seq_file *seq, mddev_t *mddev)
2327 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2330 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2331 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2332 for (i = 0; i < conf->raid_disks; i++)
2333 seq_printf (seq, "%s",
2334 conf->disks[i].rdev &&
2335 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
2336 seq_printf (seq, "]");
2339 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
2344 static void print_raid5_conf (raid5_conf_t *conf)
2347 struct disk_info *tmp;
2349 printk("RAID5 conf printout:\n");
2351 printk("(conf==NULL)\n");
2354 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2355 conf->working_disks, conf->failed_disks);
2357 for (i = 0; i < conf->raid_disks; i++) {
2358 char b[BDEVNAME_SIZE];
2359 tmp = conf->disks + i;
2361 printk(" disk %d, o:%d, dev:%s\n",
2362 i, !test_bit(Faulty, &tmp->rdev->flags),
2363 bdevname(tmp->rdev->bdev,b));
2367 static int raid5_spare_active(mddev_t *mddev)
2370 raid5_conf_t *conf = mddev->private;
2371 struct disk_info *tmp;
2373 for (i = 0; i < conf->raid_disks; i++) {
2374 tmp = conf->disks + i;
2376 && !test_bit(Faulty, &tmp->rdev->flags)
2377 && !test_bit(In_sync, &tmp->rdev->flags)) {
2379 conf->failed_disks--;
2380 conf->working_disks++;
2381 set_bit(In_sync, &tmp->rdev->flags);
2384 print_raid5_conf(conf);
2388 static int raid5_remove_disk(mddev_t *mddev, int number)
2390 raid5_conf_t *conf = mddev->private;
2393 struct disk_info *p = conf->disks + number;
2395 print_raid5_conf(conf);
2398 if (test_bit(In_sync, &rdev->flags) ||
2399 atomic_read(&rdev->nr_pending)) {
2405 if (atomic_read(&rdev->nr_pending)) {
2406 /* lost the race, try later */
2413 print_raid5_conf(conf);
2417 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2419 raid5_conf_t *conf = mddev->private;
2422 struct disk_info *p;
2424 if (mddev->degraded > 1)
2425 /* no point adding a device */
2431 for (disk=0; disk < mddev->raid_disks; disk++)
2432 if ((p=conf->disks + disk)->rdev == NULL) {
2433 clear_bit(In_sync, &rdev->flags);
2434 rdev->raid_disk = disk;
2436 if (rdev->saved_raid_disk != disk)
2438 rcu_assign_pointer(p->rdev, rdev);
2441 print_raid5_conf(conf);
2445 static int raid5_resize(mddev_t *mddev, sector_t sectors)
2447 /* no resync is happening, and there is enough space
2448 * on all devices, so we can resize.
2449 * We need to make sure resync covers any new space.
2450 * If the array is shrinking we should possibly wait until
2451 * any io in the removed space completes, but it hardly seems
2454 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2455 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
2456 set_capacity(mddev->gendisk, mddev->array_size << 1);
2458 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
2459 mddev->recovery_cp = mddev->size << 1;
2460 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2462 mddev->size = sectors /2;
2463 mddev->resync_max_sectors = sectors;
2467 static void raid5_quiesce(mddev_t *mddev, int state)
2469 raid5_conf_t *conf = mddev_to_conf(mddev);
2472 case 1: /* stop all writes */
2473 spin_lock_irq(&conf->device_lock);
2475 wait_event_lock_irq(conf->wait_for_stripe,
2476 atomic_read(&conf->active_stripes) == 0,
2477 conf->device_lock, /* nothing */);
2478 spin_unlock_irq(&conf->device_lock);
2481 case 0: /* re-enable writes */
2482 spin_lock_irq(&conf->device_lock);
2484 wake_up(&conf->wait_for_stripe);
2485 spin_unlock_irq(&conf->device_lock);
2490 static struct mdk_personality raid5_personality =
2494 .owner = THIS_MODULE,
2495 .make_request = make_request,
2499 .error_handler = error,
2500 .hot_add_disk = raid5_add_disk,
2501 .hot_remove_disk= raid5_remove_disk,
2502 .spare_active = raid5_spare_active,
2503 .sync_request = sync_request,
2504 .resize = raid5_resize,
2505 .quiesce = raid5_quiesce,
2508 static struct mdk_personality raid4_personality =
2512 .owner = THIS_MODULE,
2513 .make_request = make_request,
2517 .error_handler = error,
2518 .hot_add_disk = raid5_add_disk,
2519 .hot_remove_disk= raid5_remove_disk,
2520 .spare_active = raid5_spare_active,
2521 .sync_request = sync_request,
2522 .resize = raid5_resize,
2523 .quiesce = raid5_quiesce,
2526 static int __init raid5_init(void)
2528 register_md_personality(&raid5_personality);
2529 register_md_personality(&raid4_personality);
2533 static void raid5_exit(void)
2535 unregister_md_personality(&raid5_personality);
2536 unregister_md_personality(&raid4_personality);
2539 module_init(raid5_init);
2540 module_exit(raid5_exit);
2541 MODULE_LICENSE("GPL");
2542 MODULE_ALIAS("md-personality-4"); /* RAID5 */
2543 MODULE_ALIAS("md-raid5");
2544 MODULE_ALIAS("md-raid4");
2545 MODULE_ALIAS("md-level-5");
2546 MODULE_ALIAS("md-level-4");