]> err.no Git - linux-2.6/blobdiff - drivers/md/raid5.c
[TG3]: Remove unnecessary tx_lock
[linux-2.6] / drivers / md / raid5.c
index 20ae32d67e21786de09be86275583ab20819adbc..31843604049cdb7d6f916fee3861c1994869cf6d 100644 (file)
@@ -73,10 +73,8 @@ static void print_raid5_conf (raid5_conf_t *conf);
 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
 {
        if (atomic_dec_and_test(&sh->count)) {
-               if (!list_empty(&sh->lru))
-                       BUG();
-               if (atomic_read(&conf->active_stripes)==0)
-                       BUG();
+               BUG_ON(!list_empty(&sh->lru));
+               BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
                        if (test_bit(STRIPE_DELAYED, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
@@ -184,10 +182,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
        raid5_conf_t *conf = sh->raid_conf;
        int i;
 
-       if (atomic_read(&sh->count) != 0)
-               BUG();
-       if (test_bit(STRIPE_HANDLE, &sh->state))
-               BUG();
+       BUG_ON(atomic_read(&sh->count) != 0);
+       BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
        
        CHECK_DEVLOCK();
        PRINTK("init_stripe called, stripe %llu\n", 
@@ -262,15 +258,14 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
                                                     < (conf->max_nr_stripes *3/4)
                                                     || !conf->inactive_blocked),
                                                    conf->device_lock,
-                                                   unplug_slaves(conf->mddev);
+                                                   unplug_slaves(conf->mddev)
                                        );
                                conf->inactive_blocked = 0;
                        } else
                                init_stripe(sh, sector, pd_idx, disks);
                } else {
                        if (atomic_read(&sh->count)) {
-                               if (!list_empty(&sh->lru))
-                                       BUG();
+                         BUG_ON(!list_empty(&sh->lru));
                        } else {
                                if (!test_bit(STRIPE_HANDLE, &sh->state))
                                        atomic_inc(&conf->active_stripes);
@@ -407,7 +402,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
                wait_event_lock_irq(conf->wait_for_stripe,
                                    !list_empty(&conf->inactive_list),
                                    conf->device_lock,
-                                   unplug_slaves(conf->mddev);
+                                   unplug_slaves(conf->mddev)
                        );
                osh = get_free_stripe(conf);
                spin_unlock_irq(&conf->device_lock);
@@ -465,8 +460,7 @@ static int drop_one_stripe(raid5_conf_t *conf)
        spin_unlock_irq(&conf->device_lock);
        if (!sh)
                return 0;
-       if (atomic_read(&sh->count))
-               BUG();
+       BUG_ON(atomic_read(&sh->count));
        shrink_buffers(sh, conf->pool_size);
        kmem_cache_free(conf->slab_cache, sh);
        atomic_dec(&conf->active_stripes);
@@ -882,8 +876,7 @@ static void compute_parity(struct stripe_head *sh, int method)
        ptr[0] = page_address(sh->dev[pd_idx].page);
        switch(method) {
        case READ_MODIFY_WRITE:
-               if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
-                       BUG();
+               BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
                for (i=disks ; i-- ;) {
                        if (i==pd_idx)
                                continue;
@@ -896,7 +889,7 @@ static void compute_parity(struct stripe_head *sh, int method)
                                if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                                        wake_up(&conf->wait_for_overlap);
 
-                               if (sh->dev[i].written) BUG();
+                               BUG_ON(sh->dev[i].written);
                                sh->dev[i].written = chosen;
                                check_xor();
                        }
@@ -912,7 +905,7 @@ static void compute_parity(struct stripe_head *sh, int method)
                                if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                                        wake_up(&conf->wait_for_overlap);
 
-                               if (sh->dev[i].written) BUG();
+                               BUG_ON(sh->dev[i].written);
                                sh->dev[i].written = chosen;
                        }
                break;
@@ -995,8 +988,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
        if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
                goto overlap;
 
-       if (*bip && bi->bi_next && (*bip) != bi->bi_next)
-               BUG();
+       BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
        if (*bip)
                bi->bi_next = *bip;
        *bip = bi;
@@ -1430,8 +1422,7 @@ static void handle_stripe(struct stripe_head *sh)
                set_bit(STRIPE_HANDLE, &sh->state);
                if (failed == 0) {
                        char *pagea;
-                       if (uptodate != disks)
-                               BUG();
+                       BUG_ON(uptodate != disks);
                        compute_parity(sh, CHECK_PARITY);
                        uptodate--;
                        pagea = page_address(sh->dev[sh->pd_idx].page);
@@ -1743,6 +1734,7 @@ static int make_request(request_queue_t *q, struct bio * bi)
        sector_t logical_sector, last_sector;
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
+       int remaining;
 
        if (unlikely(bio_barrier(bi))) {
                bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
@@ -1762,15 +1754,31 @@ static int make_request(request_queue_t *q, struct bio * bi)
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
                DEFINE_WAIT(w);
                int disks;
-               
+
        retry:
+               prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
                if (likely(conf->expand_progress == MaxSector))
                        disks = conf->raid_disks;
                else {
+                       /* spinlock is needed as expand_progress may be
+                        * 64bit on a 32bit platform, and so it might be
+                        * possible to see a half-updated value
+                        * Ofcourse expand_progress could change after
+                        * the lock is dropped, so once we get a reference
+                        * to the stripe that we think it is, we will have
+                        * to check again.
+                        */
                        spin_lock_irq(&conf->device_lock);
                        disks = conf->raid_disks;
                        if (logical_sector >= conf->expand_progress)
                                disks = conf->previous_raid_disks;
+                       else {
+                               if (logical_sector >= conf->expand_lo) {
+                                       spin_unlock_irq(&conf->device_lock);
+                                       schedule();
+                                       goto retry;
+                               }
+                       }
                        spin_unlock_irq(&conf->device_lock);
                }
                new_sector = raid5_compute_sector(logical_sector, disks, disks - 1,
@@ -1779,12 +1787,16 @@ static int make_request(request_queue_t *q, struct bio * bi)
                        (unsigned long long)new_sector, 
                        (unsigned long long)logical_sector);
 
-               prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
                sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK));
                if (sh) {
                        if (unlikely(conf->expand_progress != MaxSector)) {
                                /* expansion might have moved on while waiting for a
-                                * stripe, so we much do the range check again.
+                                * stripe, so we must do the range check again.
+                                * Expansion could still move past after this
+                                * test, but as we are holding a reference to
+                                * 'sh', we know that if that happens,
+                                *  STRIPE_EXPANDING will get set and the expansion
+                                * won't proceed until we finish with the stripe.
                                 */
                                int must_retry = 0;
                                spin_lock_irq(&conf->device_lock);
@@ -1798,6 +1810,15 @@ static int make_request(request_queue_t *q, struct bio * bi)
                                        goto retry;
                                }
                        }
+                       /* FIXME what if we get a false positive because these
+                        * are being updated.
+                        */
+                       if (logical_sector >= mddev->suspend_lo &&
+                           logical_sector < mddev->suspend_hi) {
+                               release_stripe(sh);
+                               schedule();
+                               goto retry;
+                       }
 
                        if (test_bit(STRIPE_EXPANDING, &sh->state) ||
                            !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
@@ -1823,7 +1844,9 @@ static int make_request(request_queue_t *q, struct bio * bi)
                        
        }
        spin_lock_irq(&conf->device_lock);
-       if (--bi->bi_phys_segments == 0) {
+       remaining = --bi->bi_phys_segments;
+       spin_unlock_irq(&conf->device_lock);
+       if (remaining == 0) {
                int bytes = bi->bi_size;
 
                if ( bio_data_dir(bi) == WRITE )
@@ -1831,7 +1854,6 @@ static int make_request(request_queue_t *q, struct bio * bi)
                bi->bi_size = 0;
                bi->bi_end_io(bi, bytes, 0);
        }
-       spin_unlock_irq(&conf->device_lock);
        return 0;
 }
 
@@ -1877,6 +1899,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                 */
                int i;
                int dd_idx;
+               sector_t writepos, safepos, gap;
 
                if (sector_nr == 0 &&
                    conf->expand_progress != 0) {
@@ -1887,15 +1910,36 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                        return sector_nr;
                }
 
-               /* Cannot proceed until we've updated the superblock... */
-               wait_event(conf->wait_for_overlap,
-                          atomic_read(&conf->reshape_stripes)==0);
-               mddev->reshape_position = conf->expand_progress;
-
-               mddev->sb_dirty = 1;
-               md_wakeup_thread(mddev->thread);
-               wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
-                       kthread_should_stop());
+               /* we update the metadata when there is more than 3Meg
+                * in the block range (that is rather arbitrary, should
+                * probably be time based) or when the data about to be
+                * copied would over-write the source of the data at
+                * the front of the range.
+                * i.e. one new_stripe forward from expand_progress new_maps
+                * to after where expand_lo old_maps to
+                */
+               writepos = conf->expand_progress +
+                       conf->chunk_size/512*(conf->raid_disks-1);
+               sector_div(writepos, conf->raid_disks-1);
+               safepos = conf->expand_lo;
+               sector_div(safepos, conf->previous_raid_disks-1);
+               gap = conf->expand_progress - conf->expand_lo;
+
+               if (writepos >= safepos ||
+                   gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
+                       /* Cannot proceed until we've updated the superblock... */
+                       wait_event(conf->wait_for_overlap,
+                                  atomic_read(&conf->reshape_stripes)==0);
+                       mddev->reshape_position = conf->expand_progress;
+                       mddev->sb_dirty = 1;
+                       md_wakeup_thread(mddev->thread);
+                       wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
+                                  kthread_should_stop());
+                       spin_lock_irq(&conf->device_lock);
+                       conf->expand_lo = mddev->reshape_position;
+                       spin_unlock_irq(&conf->device_lock);
+                       wake_up(&conf->wait_for_overlap);
+               }
 
                for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
                        int j;
@@ -2043,8 +2087,7 @@ static void raid5d (mddev_t *mddev)
 
                list_del_init(first);
                atomic_inc(&sh->count);
-               if (atomic_read(&sh->count)!= 1)
-                       BUG();
+               BUG_ON(atomic_read(&sh->count)!= 1);
                spin_unlock_irq(&conf->device_lock);
                
                handled++;
@@ -2322,6 +2365,7 @@ static int run(mddev_t *mddev)
 
        if (conf->expand_progress != MaxSector) {
                printk("...ok start reshape thread\n");
+               conf->expand_lo = conf->expand_progress;
                atomic_set(&conf->reshape_stripes, 0);
                clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -2560,21 +2604,15 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
 }
 
 #ifdef CONFIG_MD_RAID5_RESHAPE
-static int raid5_reshape(mddev_t *mddev, int raid_disks)
+static int raid5_check_reshape(mddev_t *mddev)
 {
        raid5_conf_t *conf = mddev_to_conf(mddev);
        int err;
-       mdk_rdev_t *rdev;
-       struct list_head *rtmp;
-       int spares = 0;
-       int added_devices = 0;
 
-       if (mddev->degraded ||
-           test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
-               return -EBUSY;
-       if (conf->raid_disks > raid_disks)
-               return -EINVAL; /* Cannot shrink array yet */
-       if (conf->raid_disks == raid_disks)
+       if (mddev->delta_disks < 0 ||
+           mddev->new_level != mddev->level)
+               return -EINVAL; /* Cannot shrink array or change level yet */
+       if (mddev->delta_disks == 0)
                return 0; /* nothing to do */
 
        /* Can only proceed if there are plenty of stripe_heads.
@@ -2585,31 +2623,50 @@ static int raid5_reshape(mddev_t *mddev, int raid_disks)
         * If the chunk size is greater, user-space should request more
         * stripe_heads first.
         */
-       if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
+       if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
+           (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
                printk(KERN_WARNING "raid5: reshape: not enough stripes.  Needed %lu\n",
                       (mddev->chunk_size / STRIPE_SIZE)*4);
                return -ENOSPC;
        }
 
+       err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
+       if (err)
+               return err;
+
+       /* looks like we might be able to manage this */
+       return 0;
+}
+
+static int raid5_start_reshape(mddev_t *mddev)
+{
+       raid5_conf_t *conf = mddev_to_conf(mddev);
+       mdk_rdev_t *rdev;
+       struct list_head *rtmp;
+       int spares = 0;
+       int added_devices = 0;
+
+       if (mddev->degraded ||
+           test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+               return -EBUSY;
+
        ITERATE_RDEV(mddev, rdev, rtmp)
                if (rdev->raid_disk < 0 &&
                    !test_bit(Faulty, &rdev->flags))
                        spares++;
-       if (conf->raid_disks + spares < raid_disks-1)
+
+       if (spares < mddev->delta_disks-1)
                /* Not enough devices even to make a degraded array
                 * of that size
                 */
                return -EINVAL;
 
-       err = resize_stripes(conf, raid_disks);
-       if (err)
-               return err;
-
        atomic_set(&conf->reshape_stripes, 0);
        spin_lock_irq(&conf->device_lock);
        conf->previous_raid_disks = conf->raid_disks;
-       conf->raid_disks = raid_disks;
+       conf->raid_disks += mddev->delta_disks;
        conf->expand_progress = 0;
+       conf->expand_lo = 0;
        spin_unlock_irq(&conf->device_lock);
 
        /* Add some new drives, as many as will fit.
@@ -2629,12 +2686,8 @@ static int raid5_reshape(mddev_t *mddev, int raid_disks)
                                break;
                }
 
-       mddev->degraded = (raid_disks - conf->previous_raid_disks) - added_devices;
-       mddev->new_chunk = mddev->chunk_size;
-       mddev->new_layout = mddev->layout;
-       mddev->new_level = mddev->level;
-       mddev->raid_disks = raid_disks;
-       mddev->delta_disks = raid_disks - conf->previous_raid_disks;
+       mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices;
+       mddev->raid_disks = conf->raid_disks;
        mddev->reshape_position = 0;
        mddev->sb_dirty = 1;
 
@@ -2648,7 +2701,6 @@ static int raid5_reshape(mddev_t *mddev, int raid_disks)
                mddev->recovery = 0;
                spin_lock_irq(&conf->device_lock);
                mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
-               mddev->delta_disks = 0;
                conf->expand_progress = MaxSector;
                spin_unlock_irq(&conf->device_lock);
                return -EAGAIN;
@@ -2687,6 +2739,10 @@ static void raid5_quiesce(mddev_t *mddev, int state)
        raid5_conf_t *conf = mddev_to_conf(mddev);
 
        switch(state) {
+       case 2: /* resume for a suspend */
+               wake_up(&conf->wait_for_overlap);
+               break;
+
        case 1: /* stop all writes */
                spin_lock_irq(&conf->device_lock);
                conf->quiesce = 1;
@@ -2700,6 +2756,7 @@ static void raid5_quiesce(mddev_t *mddev, int state)
                spin_lock_irq(&conf->device_lock);
                conf->quiesce = 0;
                wake_up(&conf->wait_for_stripe);
+               wake_up(&conf->wait_for_overlap);
                spin_unlock_irq(&conf->device_lock);
                break;
        }
@@ -2721,7 +2778,8 @@ static struct mdk_personality raid5_personality =
        .sync_request   = sync_request,
        .resize         = raid5_resize,
 #ifdef CONFIG_MD_RAID5_RESHAPE
-       .reshape        = raid5_reshape,
+       .check_reshape  = raid5_check_reshape,
+       .start_reshape  = raid5_start_reshape,
 #endif
        .quiesce        = raid5_quiesce,
 };