#include <linux/raid/raid5.h>
#include <linux/highmem.h>
#include <linux/bitops.h>
+#include <linux/kthread.h>
#include <asm/atomic.h>
#include <linux/raid/bitmap.h>
static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
{
if (atomic_dec_and_test(&sh->count)) {
- if (!list_empty(&sh->lru))
- BUG();
- if (atomic_read(&conf->active_stripes)==0)
- BUG();
+ BUG_ON(!list_empty(&sh->lru));
+ BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
raid5_conf_t *conf = sh->raid_conf;
int i;
- if (atomic_read(&sh->count) != 0)
- BUG();
- if (test_bit(STRIPE_HANDLE, &sh->state))
- BUG();
+ BUG_ON(atomic_read(&sh->count) != 0);
+ BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
CHECK_DEVLOCK();
PRINTK("init_stripe called, stripe %llu\n",
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
conf->device_lock,
- unplug_slaves(conf->mddev);
+ unplug_slaves(conf->mddev)
);
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, pd_idx, disks);
} else {
if (atomic_read(&sh->count)) {
- if (!list_empty(&sh->lru))
- BUG();
+ BUG_ON(!list_empty(&sh->lru));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list),
conf->device_lock,
- unplug_slaves(conf->mddev);
+ unplug_slaves(conf->mddev)
);
osh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock);
spin_unlock_irq(&conf->device_lock);
if (!sh)
return 0;
- if (atomic_read(&sh->count))
- BUG();
+ BUG_ON(atomic_read(&sh->count));
shrink_buffers(sh, conf->pool_size);
kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
ptr[0] = page_address(sh->dev[pd_idx].page);
switch(method) {
case READ_MODIFY_WRITE:
- if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
- BUG();
+ BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
for (i=disks ; i-- ;) {
if (i==pd_idx)
continue;
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- if (sh->dev[i].written) BUG();
+ BUG_ON(sh->dev[i].written);
sh->dev[i].written = chosen;
check_xor();
}
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
- if (sh->dev[i].written) BUG();
+ BUG_ON(sh->dev[i].written);
sh->dev[i].written = chosen;
}
break;
if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
goto overlap;
- if (*bip && bi->bi_next && (*bip) != bi->bi_next)
- BUG();
+ BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
if (*bip)
bi->bi_next = *bip;
*bip = bi;
set_bit(STRIPE_HANDLE, &sh->state);
if (failed == 0) {
char *pagea;
- if (uptodate != disks)
- BUG();
+ BUG_ON(uptodate != disks);
compute_parity(sh, CHECK_PARITY);
uptodate--;
pagea = page_address(sh->dev[sh->pd_idx].page);
clear_bit(STRIPE_EXPANDING, &sh->state);
} else if (expanded) {
clear_bit(STRIPE_EXPAND_READY, &sh->state);
+ atomic_dec(&conf->reshape_stripes);
wake_up(&conf->wait_for_overlap);
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
}
sector_t logical_sector, last_sector;
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
+ int remaining;
if (unlikely(bio_barrier(bi))) {
bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
int disks;
-
+
retry:
+ prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
if (likely(conf->expand_progress == MaxSector))
disks = conf->raid_disks;
else {
+ /* spinlock is needed as expand_progress may be
+ * 64bit on a 32bit platform, and so it might be
+ * possible to see a half-updated value
+ * Ofcourse expand_progress could change after
+ * the lock is dropped, so once we get a reference
+ * to the stripe that we think it is, we will have
+ * to check again.
+ */
spin_lock_irq(&conf->device_lock);
disks = conf->raid_disks;
if (logical_sector >= conf->expand_progress)
disks = conf->previous_raid_disks;
+ else {
+ if (logical_sector >= conf->expand_lo) {
+ spin_unlock_irq(&conf->device_lock);
+ schedule();
+ goto retry;
+ }
+ }
spin_unlock_irq(&conf->device_lock);
}
new_sector = raid5_compute_sector(logical_sector, disks, disks - 1,
(unsigned long long)new_sector,
(unsigned long long)logical_sector);
- prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK));
if (sh) {
if (unlikely(conf->expand_progress != MaxSector)) {
/* expansion might have moved on while waiting for a
- * stripe, so we much do the range check again.
+ * stripe, so we must do the range check again.
+ * Expansion could still move past after this
+ * test, but as we are holding a reference to
+ * 'sh', we know that if that happens,
+ * STRIPE_EXPANDING will get set and the expansion
+ * won't proceed until we finish with the stripe.
*/
int must_retry = 0;
spin_lock_irq(&conf->device_lock);
goto retry;
}
}
+ /* FIXME what if we get a false positive because these
+ * are being updated.
+ */
+ if (logical_sector >= mddev->suspend_lo &&
+ logical_sector < mddev->suspend_hi) {
+ release_stripe(sh);
+ schedule();
+ goto retry;
+ }
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
}
spin_lock_irq(&conf->device_lock);
- if (--bi->bi_phys_segments == 0) {
+ remaining = --bi->bi_phys_segments;
+ spin_unlock_irq(&conf->device_lock);
+ if (remaining == 0) {
int bytes = bi->bi_size;
if ( bio_data_dir(bi) == WRITE )
bi->bi_size = 0;
bi->bi_end_io(bi, bytes, 0);
}
- spin_unlock_irq(&conf->device_lock);
return 0;
}
*/
int i;
int dd_idx;
+ sector_t writepos, safepos, gap;
+
+ if (sector_nr == 0 &&
+ conf->expand_progress != 0) {
+ /* restarting in the middle, skip the initial sectors */
+ sector_nr = conf->expand_progress;
+ sector_div(sector_nr, conf->raid_disks-1);
+ *skipped = 1;
+ return sector_nr;
+ }
+
+ /* we update the metadata when there is more than 3Meg
+ * in the block range (that is rather arbitrary, should
+ * probably be time based) or when the data about to be
+ * copied would over-write the source of the data at
+ * the front of the range.
+ * i.e. one new_stripe forward from expand_progress new_maps
+ * to after where expand_lo old_maps to
+ */
+ writepos = conf->expand_progress +
+ conf->chunk_size/512*(conf->raid_disks-1);
+ sector_div(writepos, conf->raid_disks-1);
+ safepos = conf->expand_lo;
+ sector_div(safepos, conf->previous_raid_disks-1);
+ gap = conf->expand_progress - conf->expand_lo;
+
+ if (writepos >= safepos ||
+ gap > (conf->raid_disks-1)*3000*2 /*3Meg*/) {
+ /* Cannot proceed until we've updated the superblock... */
+ wait_event(conf->wait_for_overlap,
+ atomic_read(&conf->reshape_stripes)==0);
+ mddev->reshape_position = conf->expand_progress;
+ mddev->sb_dirty = 1;
+ md_wakeup_thread(mddev->thread);
+ wait_event(mddev->sb_wait, mddev->sb_dirty == 0 ||
+ kthread_should_stop());
+ spin_lock_irq(&conf->device_lock);
+ conf->expand_lo = mddev->reshape_position;
+ spin_unlock_irq(&conf->device_lock);
+ wake_up(&conf->wait_for_overlap);
+ }
+
for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) {
int j;
int skipped = 0;
sh = get_active_stripe(conf, sector_nr+i,
conf->raid_disks, pd_idx, 0);
set_bit(STRIPE_EXPANDING, &sh->state);
+ atomic_inc(&conf->reshape_stripes);
/* If any of this stripe is beyond the end of the old
* array, then we need to zero those blocks
*/
list_del_init(first);
atomic_inc(&sh->count);
- if (atomic_read(&sh->count)!= 1)
- BUG();
+ BUG_ON(atomic_read(&sh->count)!= 1);
spin_unlock_irq(&conf->device_lock);
handled++;
return -EIO;
}
+ if (mddev->reshape_position != MaxSector) {
+ /* Check that we can continue the reshape.
+ * Currently only disks can change, it must
+ * increase, and we must be past the point where
+ * a stripe over-writes itself
+ */
+ sector_t here_new, here_old;
+ int old_disks;
+
+ if (mddev->new_level != mddev->level ||
+ mddev->new_layout != mddev->layout ||
+ mddev->new_chunk != mddev->chunk_size) {
+ printk(KERN_ERR "raid5: %s: unsupported reshape required - aborting.\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+ if (mddev->delta_disks <= 0) {
+ printk(KERN_ERR "raid5: %s: unsupported reshape (reduce disks) required - aborting.\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+ old_disks = mddev->raid_disks - mddev->delta_disks;
+ /* reshape_position must be on a new-stripe boundary, and one
+ * further up in new geometry must map after here in old geometry.
+ */
+ here_new = mddev->reshape_position;
+ if (sector_div(here_new, (mddev->chunk_size>>9)*(mddev->raid_disks-1))) {
+ printk(KERN_ERR "raid5: reshape_position not on a stripe boundary\n");
+ return -EINVAL;
+ }
+ /* here_new is the stripe we will write to */
+ here_old = mddev->reshape_position;
+ sector_div(here_old, (mddev->chunk_size>>9)*(old_disks-1));
+ /* here_old is the first stripe that we might need to read from */
+ if (here_new >= here_old) {
+ /* Reading from the same stripe as writing to - bad */
+ printk(KERN_ERR "raid5: reshape_position too early for auto-recovery - aborting.\n");
+ return -EINVAL;
+ }
+ printk(KERN_INFO "raid5: reshape will continue\n");
+ /* OK, we should be able to continue; */
+ }
+
+
mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL);
if ((conf = mddev->private) == NULL)
goto abort;
- conf->disks = kzalloc(mddev->raid_disks * sizeof(struct disk_info),
+ if (mddev->reshape_position == MaxSector) {
+ conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks;
+ } else {
+ conf->raid_disks = mddev->raid_disks;
+ conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
+ }
+
+ conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
GFP_KERNEL);
if (!conf->disks)
goto abort;
ITERATE_RDEV(mddev,rdev,tmp) {
raid_disk = rdev->raid_disk;
- if (raid_disk >= mddev->raid_disks
+ if (raid_disk >= conf->raid_disks
|| raid_disk < 0)
continue;
disk = conf->disks + raid_disk;
}
}
- conf->raid_disks = mddev->raid_disks;
/*
* 0 for a fully functional array, 1 for a degraded array.
*/
conf->level = mddev->level;
conf->algorithm = mddev->layout;
conf->max_nr_stripes = NR_STRIPES;
- conf->expand_progress = MaxSector;
+ conf->expand_progress = mddev->reshape_position;
/* device size must be a multiple of chunk size */
mddev->size &= ~(mddev->chunk_size/1024 -1);
print_raid5_conf(conf);
+ if (conf->expand_progress != MaxSector) {
+ printk("...ok start reshape thread\n");
+ conf->expand_lo = conf->expand_progress;
+ atomic_set(&conf->reshape_stripes, 0);
+ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
+ "%s_reshape");
+ /* FIXME if md_register_thread fails?? */
+ md_wakeup_thread(mddev->sync_thread);
+
+ }
+
/* read-ahead size must cover two whole stripes, which is
* 2 * (n-1) * chunksize where 'n' is the number of raid devices
*/
mddev->queue->unplug_fn = raid5_unplug_device;
mddev->queue->issue_flush_fn = raid5_issue_flush;
+ mddev->array_size = mddev->size * (conf->previous_raid_disks - 1);
- mddev->array_size = mddev->size * (mddev->raid_disks - 1);
return 0;
abort:
if (conf) {
/*
* find the disk ...
*/
- for (disk=0; disk < mddev->raid_disks; disk++)
+ for (disk=0; disk < conf->raid_disks; disk++)
if ((p=conf->disks + disk)->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->raid_disk = disk;
}
#ifdef CONFIG_MD_RAID5_RESHAPE
-static int raid5_reshape(mddev_t *mddev, int raid_disks)
+static int raid5_check_reshape(mddev_t *mddev)
{
raid5_conf_t *conf = mddev_to_conf(mddev);
int err;
- mdk_rdev_t *rdev;
- struct list_head *rtmp;
- int spares = 0;
- int added_devices = 0;
- if (mddev->degraded ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- return -EBUSY;
- if (conf->raid_disks > raid_disks)
- return -EINVAL; /* Cannot shrink array yet */
- if (conf->raid_disks == raid_disks)
+ if (mddev->delta_disks < 0 ||
+ mddev->new_level != mddev->level)
+ return -EINVAL; /* Cannot shrink array or change level yet */
+ if (mddev->delta_disks == 0)
return 0; /* nothing to do */
/* Can only proceed if there are plenty of stripe_heads.
* If the chunk size is greater, user-space should request more
* stripe_heads first.
*/
- if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
+ if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
+ (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
(mddev->chunk_size / STRIPE_SIZE)*4);
return -ENOSPC;
}
+ err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
+ if (err)
+ return err;
+
+ /* looks like we might be able to manage this */
+ return 0;
+}
+
+static int raid5_start_reshape(mddev_t *mddev)
+{
+ raid5_conf_t *conf = mddev_to_conf(mddev);
+ mdk_rdev_t *rdev;
+ struct list_head *rtmp;
+ int spares = 0;
+ int added_devices = 0;
+
+ if (mddev->degraded ||
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ return -EBUSY;
+
ITERATE_RDEV(mddev, rdev, rtmp)
if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags))
spares++;
- if (conf->raid_disks + spares < raid_disks-1)
+
+ if (spares < mddev->delta_disks-1)
/* Not enough devices even to make a degraded array
* of that size
*/
return -EINVAL;
- err = resize_stripes(conf, raid_disks);
- if (err)
- return err;
-
+ atomic_set(&conf->reshape_stripes, 0);
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
- mddev->raid_disks = conf->raid_disks = raid_disks;
+ conf->raid_disks += mddev->delta_disks;
conf->expand_progress = 0;
+ conf->expand_lo = 0;
spin_unlock_irq(&conf->device_lock);
/* Add some new drives, as many as will fit.
break;
}
- mddev->degraded = (raid_disks - conf->previous_raid_disks) - added_devices;
+ mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices;
+ mddev->raid_disks = conf->raid_disks;
+ mddev->reshape_position = 0;
+ mddev->sb_dirty = 1;
+
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
{
struct block_device *bdev;
- conf->mddev->array_size = conf->mddev->size * (conf->mddev->raid_disks-1);
- set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
- conf->mddev->changed = 1;
-
- bdev = bdget_disk(conf->mddev->gendisk, 0);
- if (bdev) {
- mutex_lock(&bdev->bd_inode->i_mutex);
- i_size_write(bdev->bd_inode, conf->mddev->array_size << 10);
- mutex_unlock(&bdev->bd_inode->i_mutex);
- bdput(bdev);
+ if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
+ conf->mddev->array_size = conf->mddev->size * (conf->raid_disks-1);
+ set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1);
+ conf->mddev->changed = 1;
+
+ bdev = bdget_disk(conf->mddev->gendisk, 0);
+ if (bdev) {
+ mutex_lock(&bdev->bd_inode->i_mutex);
+ i_size_write(bdev->bd_inode, conf->mddev->array_size << 10);
+ mutex_unlock(&bdev->bd_inode->i_mutex);
+ bdput(bdev);
+ }
+ spin_lock_irq(&conf->device_lock);
+ conf->expand_progress = MaxSector;
+ spin_unlock_irq(&conf->device_lock);
+ conf->mddev->reshape_position = MaxSector;
}
- spin_lock_irq(&conf->device_lock);
- conf->expand_progress = MaxSector;
- spin_unlock_irq(&conf->device_lock);
}
static void raid5_quiesce(mddev_t *mddev, int state)
raid5_conf_t *conf = mddev_to_conf(mddev);
switch(state) {
+ case 2: /* resume for a suspend */
+ wake_up(&conf->wait_for_overlap);
+ break;
+
case 1: /* stop all writes */
spin_lock_irq(&conf->device_lock);
conf->quiesce = 1;
spin_lock_irq(&conf->device_lock);
conf->quiesce = 0;
wake_up(&conf->wait_for_stripe);
+ wake_up(&conf->wait_for_overlap);
spin_unlock_irq(&conf->device_lock);
break;
}
.sync_request = sync_request,
.resize = raid5_resize,
#ifdef CONFIG_MD_RAID5_RESHAPE
- .reshape = raid5_reshape,
+ .check_reshape = raid5_check_reshape,
+ .start_reshape = raid5_start_reshape,
#endif
.quiesce = raid5_quiesce,
};