2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46 #include <linux/freezer.h>
48 #include <linux/init.h>
50 #include <linux/file.h>
53 #include <linux/kmod.h>
56 #include <asm/unaligned.h>
58 #define MAJOR_NR MD_MAJOR
61 /* 63 partitions with the alternate major number (mdp) */
62 #define MdpMinorShift 6
65 #define dprintk(x...) ((void)(DEBUG && printk(x)))
69 static void autostart_arrays (int part);
72 static LIST_HEAD(pers_list);
73 static DEFINE_SPINLOCK(pers_lock);
75 static void md_print_devices(void);
77 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
80 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
81 * is 1000 KB/sec, so the extra system load does not show up that much.
82 * Increase it if you want to have more _guaranteed_ speed. Note that
83 * the RAID driver will use the maximum available bandwidth if the IO
84 * subsystem is idle. There is also an 'absolute maximum' reconstruction
85 * speed limit - in case reconstruction slows down your system despite
88 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
89 * or /sys/block/mdX/md/sync_speed_{min,max}
92 static int sysctl_speed_limit_min = 1000;
93 static int sysctl_speed_limit_max = 200000;
94 static inline int speed_min(mddev_t *mddev)
96 return mddev->sync_speed_min ?
97 mddev->sync_speed_min : sysctl_speed_limit_min;
100 static inline int speed_max(mddev_t *mddev)
102 return mddev->sync_speed_max ?
103 mddev->sync_speed_max : sysctl_speed_limit_max;
106 static struct ctl_table_header *raid_table_header;
108 static ctl_table raid_table[] = {
110 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
111 .procname = "speed_limit_min",
112 .data = &sysctl_speed_limit_min,
113 .maxlen = sizeof(int),
114 .mode = S_IRUGO|S_IWUSR,
115 .proc_handler = &proc_dointvec,
118 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
119 .procname = "speed_limit_max",
120 .data = &sysctl_speed_limit_max,
121 .maxlen = sizeof(int),
122 .mode = S_IRUGO|S_IWUSR,
123 .proc_handler = &proc_dointvec,
128 static ctl_table raid_dir_table[] = {
130 .ctl_name = DEV_RAID,
133 .mode = S_IRUGO|S_IXUGO,
139 static ctl_table raid_root_table[] = {
145 .child = raid_dir_table,
150 static struct block_device_operations md_fops;
152 static int start_readonly;
155 * We have a system wide 'event count' that is incremented
156 * on any 'interesting' event, and readers of /proc/mdstat
157 * can use 'poll' or 'select' to find out when the event
161 * start array, stop array, error, add device, remove device,
162 * start build, activate spare
164 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
165 static atomic_t md_event_count;
166 void md_new_event(mddev_t *mddev)
168 atomic_inc(&md_event_count);
169 wake_up(&md_event_waiters);
170 sysfs_notify(&mddev->kobj, NULL, "sync_action");
172 EXPORT_SYMBOL_GPL(md_new_event);
174 /* Alternate version that can be called from interrupts
175 * when calling sysfs_notify isn't needed.
177 static void md_new_event_inintr(mddev_t *mddev)
179 atomic_inc(&md_event_count);
180 wake_up(&md_event_waiters);
184 * Enables to iterate over all existing md arrays
185 * all_mddevs_lock protects this list.
187 static LIST_HEAD(all_mddevs);
188 static DEFINE_SPINLOCK(all_mddevs_lock);
192 * iterates through all used mddevs in the system.
193 * We take care to grab the all_mddevs_lock whenever navigating
194 * the list, and to always hold a refcount when unlocked.
195 * Any code which breaks out of this loop while own
196 * a reference to the current mddev and must mddev_put it.
198 #define ITERATE_MDDEV(mddev,tmp) \
200 for (({ spin_lock(&all_mddevs_lock); \
201 tmp = all_mddevs.next; \
203 ({ if (tmp != &all_mddevs) \
204 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
205 spin_unlock(&all_mddevs_lock); \
206 if (mddev) mddev_put(mddev); \
207 mddev = list_entry(tmp, mddev_t, all_mddevs); \
208 tmp != &all_mddevs;}); \
209 ({ spin_lock(&all_mddevs_lock); \
214 static int md_fail_request (request_queue_t *q, struct bio *bio)
216 bio_io_error(bio, bio->bi_size);
220 static inline mddev_t *mddev_get(mddev_t *mddev)
222 atomic_inc(&mddev->active);
226 static void mddev_put(mddev_t *mddev)
228 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
230 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
231 list_del(&mddev->all_mddevs);
232 spin_unlock(&all_mddevs_lock);
233 blk_cleanup_queue(mddev->queue);
234 kobject_unregister(&mddev->kobj);
236 spin_unlock(&all_mddevs_lock);
239 static mddev_t * mddev_find(dev_t unit)
241 mddev_t *mddev, *new = NULL;
244 spin_lock(&all_mddevs_lock);
245 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
246 if (mddev->unit == unit) {
248 spin_unlock(&all_mddevs_lock);
254 list_add(&new->all_mddevs, &all_mddevs);
255 spin_unlock(&all_mddevs_lock);
258 spin_unlock(&all_mddevs_lock);
260 new = kzalloc(sizeof(*new), GFP_KERNEL);
265 if (MAJOR(unit) == MD_MAJOR)
266 new->md_minor = MINOR(unit);
268 new->md_minor = MINOR(unit) >> MdpMinorShift;
270 mutex_init(&new->reconfig_mutex);
271 INIT_LIST_HEAD(&new->disks);
272 INIT_LIST_HEAD(&new->all_mddevs);
273 init_timer(&new->safemode_timer);
274 atomic_set(&new->active, 1);
275 spin_lock_init(&new->write_lock);
276 init_waitqueue_head(&new->sb_wait);
278 new->queue = blk_alloc_queue(GFP_KERNEL);
283 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
285 blk_queue_make_request(new->queue, md_fail_request);
290 static inline int mddev_lock(mddev_t * mddev)
292 return mutex_lock_interruptible(&mddev->reconfig_mutex);
295 static inline int mddev_trylock(mddev_t * mddev)
297 return mutex_trylock(&mddev->reconfig_mutex);
300 static inline void mddev_unlock(mddev_t * mddev)
302 mutex_unlock(&mddev->reconfig_mutex);
304 md_wakeup_thread(mddev->thread);
307 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
310 struct list_head *tmp;
312 ITERATE_RDEV(mddev,rdev,tmp) {
313 if (rdev->desc_nr == nr)
319 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
321 struct list_head *tmp;
324 ITERATE_RDEV(mddev,rdev,tmp) {
325 if (rdev->bdev->bd_dev == dev)
331 static struct mdk_personality *find_pers(int level, char *clevel)
333 struct mdk_personality *pers;
334 list_for_each_entry(pers, &pers_list, list) {
335 if (level != LEVEL_NONE && pers->level == level)
337 if (strcmp(pers->name, clevel)==0)
343 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
345 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
346 return MD_NEW_SIZE_BLOCKS(size);
349 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
353 size = rdev->sb_offset;
356 size &= ~((sector_t)chunk_size/1024 - 1);
360 static int alloc_disk_sb(mdk_rdev_t * rdev)
365 rdev->sb_page = alloc_page(GFP_KERNEL);
366 if (!rdev->sb_page) {
367 printk(KERN_ALERT "md: out of memory.\n");
374 static void free_disk_sb(mdk_rdev_t * rdev)
377 put_page(rdev->sb_page);
379 rdev->sb_page = NULL;
386 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
388 mdk_rdev_t *rdev = bio->bi_private;
389 mddev_t *mddev = rdev->mddev;
393 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
394 printk("md: super_written gets error=%d, uptodate=%d\n",
395 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
396 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
397 md_error(mddev, rdev);
400 if (atomic_dec_and_test(&mddev->pending_writes))
401 wake_up(&mddev->sb_wait);
406 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
408 struct bio *bio2 = bio->bi_private;
409 mdk_rdev_t *rdev = bio2->bi_private;
410 mddev_t *mddev = rdev->mddev;
414 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
415 error == -EOPNOTSUPP) {
417 /* barriers don't appear to be supported :-( */
418 set_bit(BarriersNotsupp, &rdev->flags);
419 mddev->barriers_work = 0;
420 spin_lock_irqsave(&mddev->write_lock, flags);
421 bio2->bi_next = mddev->biolist;
422 mddev->biolist = bio2;
423 spin_unlock_irqrestore(&mddev->write_lock, flags);
424 wake_up(&mddev->sb_wait);
429 bio->bi_private = rdev;
430 return super_written(bio, bytes_done, error);
433 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
434 sector_t sector, int size, struct page *page)
436 /* write first size bytes of page to sector of rdev
437 * Increment mddev->pending_writes before returning
438 * and decrement it on completion, waking up sb_wait
439 * if zero is reached.
440 * If an error occurred, call md_error
442 * As we might need to resubmit the request if BIO_RW_BARRIER
443 * causes ENOTSUPP, we allocate a spare bio...
445 struct bio *bio = bio_alloc(GFP_NOIO, 1);
446 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
448 bio->bi_bdev = rdev->bdev;
449 bio->bi_sector = sector;
450 bio_add_page(bio, page, size, 0);
451 bio->bi_private = rdev;
452 bio->bi_end_io = super_written;
455 atomic_inc(&mddev->pending_writes);
456 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
458 rw |= (1<<BIO_RW_BARRIER);
459 rbio = bio_clone(bio, GFP_NOIO);
460 rbio->bi_private = bio;
461 rbio->bi_end_io = super_written_barrier;
462 submit_bio(rw, rbio);
467 void md_super_wait(mddev_t *mddev)
469 /* wait for all superblock writes that were scheduled to complete.
470 * if any had to be retried (due to BARRIER problems), retry them
474 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
475 if (atomic_read(&mddev->pending_writes)==0)
477 while (mddev->biolist) {
479 spin_lock_irq(&mddev->write_lock);
480 bio = mddev->biolist;
481 mddev->biolist = bio->bi_next ;
483 spin_unlock_irq(&mddev->write_lock);
484 submit_bio(bio->bi_rw, bio);
488 finish_wait(&mddev->sb_wait, &wq);
491 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
496 complete((struct completion*)bio->bi_private);
500 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
501 struct page *page, int rw)
503 struct bio *bio = bio_alloc(GFP_NOIO, 1);
504 struct completion event;
507 rw |= (1 << BIO_RW_SYNC);
510 bio->bi_sector = sector;
511 bio_add_page(bio, page, size, 0);
512 init_completion(&event);
513 bio->bi_private = &event;
514 bio->bi_end_io = bi_complete;
516 wait_for_completion(&event);
518 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
522 EXPORT_SYMBOL_GPL(sync_page_io);
524 static int read_disk_sb(mdk_rdev_t * rdev, int size)
526 char b[BDEVNAME_SIZE];
527 if (!rdev->sb_page) {
535 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
541 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
542 bdevname(rdev->bdev,b));
546 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
548 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
549 (sb1->set_uuid1 == sb2->set_uuid1) &&
550 (sb1->set_uuid2 == sb2->set_uuid2) &&
551 (sb1->set_uuid3 == sb2->set_uuid3))
559 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
562 mdp_super_t *tmp1, *tmp2;
564 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
565 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
567 if (!tmp1 || !tmp2) {
569 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
577 * nr_disks is not constant
582 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
593 static unsigned int calc_sb_csum(mdp_super_t * sb)
595 unsigned int disk_csum, csum;
597 disk_csum = sb->sb_csum;
599 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
600 sb->sb_csum = disk_csum;
606 * Handle superblock details.
607 * We want to be able to handle multiple superblock formats
608 * so we have a common interface to them all, and an array of
609 * different handlers.
610 * We rely on user-space to write the initial superblock, and support
611 * reading and updating of superblocks.
612 * Interface methods are:
613 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
614 * loads and validates a superblock on dev.
615 * if refdev != NULL, compare superblocks on both devices
617 * 0 - dev has a superblock that is compatible with refdev
618 * 1 - dev has a superblock that is compatible and newer than refdev
619 * so dev should be used as the refdev in future
620 * -EINVAL superblock incompatible or invalid
621 * -othererror e.g. -EIO
623 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
624 * Verify that dev is acceptable into mddev.
625 * The first time, mddev->raid_disks will be 0, and data from
626 * dev should be merged in. Subsequent calls check that dev
627 * is new enough. Return 0 or -EINVAL
629 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
630 * Update the superblock for rdev with data in mddev
631 * This does not write to disc.
637 struct module *owner;
638 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
639 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
640 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
644 * load_super for 0.90.0
646 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
648 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
654 * Calculate the position of the superblock,
655 * it's at the end of the disk.
657 * It also happens to be a multiple of 4Kb.
659 sb_offset = calc_dev_sboffset(rdev->bdev);
660 rdev->sb_offset = sb_offset;
662 ret = read_disk_sb(rdev, MD_SB_BYTES);
667 bdevname(rdev->bdev, b);
668 sb = (mdp_super_t*)page_address(rdev->sb_page);
670 if (sb->md_magic != MD_SB_MAGIC) {
671 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
676 if (sb->major_version != 0 ||
677 sb->minor_version < 90 ||
678 sb->minor_version > 91) {
679 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
680 sb->major_version, sb->minor_version,
685 if (sb->raid_disks <= 0)
688 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
689 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
694 rdev->preferred_minor = sb->md_minor;
695 rdev->data_offset = 0;
696 rdev->sb_size = MD_SB_BYTES;
698 if (sb->level == LEVEL_MULTIPATH)
701 rdev->desc_nr = sb->this_disk.number;
707 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
708 if (!uuid_equal(refsb, sb)) {
709 printk(KERN_WARNING "md: %s has different UUID to %s\n",
710 b, bdevname(refdev->bdev,b2));
713 if (!sb_equal(refsb, sb)) {
714 printk(KERN_WARNING "md: %s has same UUID"
715 " but different superblock to %s\n",
716 b, bdevname(refdev->bdev, b2));
720 ev2 = md_event(refsb);
726 rdev->size = calc_dev_size(rdev, sb->chunk_size);
728 if (rdev->size < sb->size && sb->level > 1)
729 /* "this cannot possibly happen" ... */
737 * validate_super for 0.90.0
739 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
742 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
743 __u64 ev1 = md_event(sb);
745 rdev->raid_disk = -1;
747 if (mddev->raid_disks == 0) {
748 mddev->major_version = 0;
749 mddev->minor_version = sb->minor_version;
750 mddev->patch_version = sb->patch_version;
751 mddev->persistent = ! sb->not_persistent;
752 mddev->chunk_size = sb->chunk_size;
753 mddev->ctime = sb->ctime;
754 mddev->utime = sb->utime;
755 mddev->level = sb->level;
756 mddev->clevel[0] = 0;
757 mddev->layout = sb->layout;
758 mddev->raid_disks = sb->raid_disks;
759 mddev->size = sb->size;
761 mddev->bitmap_offset = 0;
762 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
764 if (mddev->minor_version >= 91) {
765 mddev->reshape_position = sb->reshape_position;
766 mddev->delta_disks = sb->delta_disks;
767 mddev->new_level = sb->new_level;
768 mddev->new_layout = sb->new_layout;
769 mddev->new_chunk = sb->new_chunk;
771 mddev->reshape_position = MaxSector;
772 mddev->delta_disks = 0;
773 mddev->new_level = mddev->level;
774 mddev->new_layout = mddev->layout;
775 mddev->new_chunk = mddev->chunk_size;
778 if (sb->state & (1<<MD_SB_CLEAN))
779 mddev->recovery_cp = MaxSector;
781 if (sb->events_hi == sb->cp_events_hi &&
782 sb->events_lo == sb->cp_events_lo) {
783 mddev->recovery_cp = sb->recovery_cp;
785 mddev->recovery_cp = 0;
788 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
789 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
790 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
791 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
793 mddev->max_disks = MD_SB_DISKS;
795 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
796 mddev->bitmap_file == NULL) {
797 if (mddev->level != 1 && mddev->level != 4
798 && mddev->level != 5 && mddev->level != 6
799 && mddev->level != 10) {
800 /* FIXME use a better test */
801 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
804 mddev->bitmap_offset = mddev->default_bitmap_offset;
807 } else if (mddev->pers == NULL) {
808 /* Insist on good event counter while assembling */
810 if (ev1 < mddev->events)
812 } else if (mddev->bitmap) {
813 /* if adding to array with a bitmap, then we can accept an
814 * older device ... but not too old.
816 if (ev1 < mddev->bitmap->events_cleared)
819 if (ev1 < mddev->events)
820 /* just a hot-add of a new device, leave raid_disk at -1 */
824 if (mddev->level != LEVEL_MULTIPATH) {
825 desc = sb->disks + rdev->desc_nr;
827 if (desc->state & (1<<MD_DISK_FAULTY))
828 set_bit(Faulty, &rdev->flags);
829 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
830 desc->raid_disk < mddev->raid_disks */) {
831 set_bit(In_sync, &rdev->flags);
832 rdev->raid_disk = desc->raid_disk;
834 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
835 set_bit(WriteMostly, &rdev->flags);
836 } else /* MULTIPATH are always insync */
837 set_bit(In_sync, &rdev->flags);
842 * sync_super for 0.90.0
844 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
847 struct list_head *tmp;
849 int next_spare = mddev->raid_disks;
852 /* make rdev->sb match mddev data..
855 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
856 * 3/ any empty disks < next_spare become removed
858 * disks[0] gets initialised to REMOVED because
859 * we cannot be sure from other fields if it has
860 * been initialised or not.
863 int active=0, working=0,failed=0,spare=0,nr_disks=0;
865 rdev->sb_size = MD_SB_BYTES;
867 sb = (mdp_super_t*)page_address(rdev->sb_page);
869 memset(sb, 0, sizeof(*sb));
871 sb->md_magic = MD_SB_MAGIC;
872 sb->major_version = mddev->major_version;
873 sb->patch_version = mddev->patch_version;
874 sb->gvalid_words = 0; /* ignored */
875 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
876 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
877 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
878 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
880 sb->ctime = mddev->ctime;
881 sb->level = mddev->level;
882 sb->size = mddev->size;
883 sb->raid_disks = mddev->raid_disks;
884 sb->md_minor = mddev->md_minor;
885 sb->not_persistent = !mddev->persistent;
886 sb->utime = mddev->utime;
888 sb->events_hi = (mddev->events>>32);
889 sb->events_lo = (u32)mddev->events;
891 if (mddev->reshape_position == MaxSector)
892 sb->minor_version = 90;
894 sb->minor_version = 91;
895 sb->reshape_position = mddev->reshape_position;
896 sb->new_level = mddev->new_level;
897 sb->delta_disks = mddev->delta_disks;
898 sb->new_layout = mddev->new_layout;
899 sb->new_chunk = mddev->new_chunk;
901 mddev->minor_version = sb->minor_version;
904 sb->recovery_cp = mddev->recovery_cp;
905 sb->cp_events_hi = (mddev->events>>32);
906 sb->cp_events_lo = (u32)mddev->events;
907 if (mddev->recovery_cp == MaxSector)
908 sb->state = (1<< MD_SB_CLEAN);
912 sb->layout = mddev->layout;
913 sb->chunk_size = mddev->chunk_size;
915 if (mddev->bitmap && mddev->bitmap_file == NULL)
916 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
918 sb->disks[0].state = (1<<MD_DISK_REMOVED);
919 ITERATE_RDEV(mddev,rdev2,tmp) {
922 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
923 && !test_bit(Faulty, &rdev2->flags))
924 desc_nr = rdev2->raid_disk;
926 desc_nr = next_spare++;
927 rdev2->desc_nr = desc_nr;
928 d = &sb->disks[rdev2->desc_nr];
930 d->number = rdev2->desc_nr;
931 d->major = MAJOR(rdev2->bdev->bd_dev);
932 d->minor = MINOR(rdev2->bdev->bd_dev);
933 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
934 && !test_bit(Faulty, &rdev2->flags))
935 d->raid_disk = rdev2->raid_disk;
937 d->raid_disk = rdev2->desc_nr; /* compatibility */
938 if (test_bit(Faulty, &rdev2->flags))
939 d->state = (1<<MD_DISK_FAULTY);
940 else if (test_bit(In_sync, &rdev2->flags)) {
941 d->state = (1<<MD_DISK_ACTIVE);
942 d->state |= (1<<MD_DISK_SYNC);
950 if (test_bit(WriteMostly, &rdev2->flags))
951 d->state |= (1<<MD_DISK_WRITEMOSTLY);
953 /* now set the "removed" and "faulty" bits on any missing devices */
954 for (i=0 ; i < mddev->raid_disks ; i++) {
955 mdp_disk_t *d = &sb->disks[i];
956 if (d->state == 0 && d->number == 0) {
959 d->state = (1<<MD_DISK_REMOVED);
960 d->state |= (1<<MD_DISK_FAULTY);
964 sb->nr_disks = nr_disks;
965 sb->active_disks = active;
966 sb->working_disks = working;
967 sb->failed_disks = failed;
968 sb->spare_disks = spare;
970 sb->this_disk = sb->disks[rdev->desc_nr];
971 sb->sb_csum = calc_sb_csum(sb);
975 * version 1 superblock
978 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
982 unsigned long long newcsum;
983 int size = 256 + le32_to_cpu(sb->max_dev)*2;
984 __le32 *isuper = (__le32*)sb;
987 disk_csum = sb->sb_csum;
990 for (i=0; size>=4; size -= 4 )
991 newcsum += le32_to_cpu(*isuper++);
994 newcsum += le16_to_cpu(*(__le16*) isuper);
996 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
997 sb->sb_csum = disk_csum;
998 return cpu_to_le32(csum);
1001 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1003 struct mdp_superblock_1 *sb;
1006 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1010 * Calculate the position of the superblock.
1011 * It is always aligned to a 4K boundary and
1012 * depeding on minor_version, it can be:
1013 * 0: At least 8K, but less than 12K, from end of device
1014 * 1: At start of device
1015 * 2: 4K from start of device.
1017 switch(minor_version) {
1019 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1021 sb_offset &= ~(sector_t)(4*2-1);
1022 /* convert from sectors to K */
1034 rdev->sb_offset = sb_offset;
1036 /* superblock is rarely larger than 1K, but it can be larger,
1037 * and it is safe to read 4k, so we do that
1039 ret = read_disk_sb(rdev, 4096);
1040 if (ret) return ret;
1043 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1045 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1046 sb->major_version != cpu_to_le32(1) ||
1047 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1048 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1049 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1052 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1053 printk("md: invalid superblock checksum on %s\n",
1054 bdevname(rdev->bdev,b));
1057 if (le64_to_cpu(sb->data_size) < 10) {
1058 printk("md: data_size too small on %s\n",
1059 bdevname(rdev->bdev,b));
1062 rdev->preferred_minor = 0xffff;
1063 rdev->data_offset = le64_to_cpu(sb->data_offset);
1064 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1066 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1067 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1068 if (rdev->sb_size & bmask)
1069 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1071 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1074 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1080 struct mdp_superblock_1 *refsb =
1081 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1083 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1084 sb->level != refsb->level ||
1085 sb->layout != refsb->layout ||
1086 sb->chunksize != refsb->chunksize) {
1087 printk(KERN_WARNING "md: %s has strangely different"
1088 " superblock to %s\n",
1089 bdevname(rdev->bdev,b),
1090 bdevname(refdev->bdev,b2));
1093 ev1 = le64_to_cpu(sb->events);
1094 ev2 = le64_to_cpu(refsb->events);
1102 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1104 rdev->size = rdev->sb_offset;
1105 if (rdev->size < le64_to_cpu(sb->data_size)/2)
1107 rdev->size = le64_to_cpu(sb->data_size)/2;
1108 if (le32_to_cpu(sb->chunksize))
1109 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1111 if (le64_to_cpu(sb->size) > rdev->size*2)
1116 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1118 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1119 __u64 ev1 = le64_to_cpu(sb->events);
1121 rdev->raid_disk = -1;
1123 if (mddev->raid_disks == 0) {
1124 mddev->major_version = 1;
1125 mddev->patch_version = 0;
1126 mddev->persistent = 1;
1127 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1128 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1129 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1130 mddev->level = le32_to_cpu(sb->level);
1131 mddev->clevel[0] = 0;
1132 mddev->layout = le32_to_cpu(sb->layout);
1133 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1134 mddev->size = le64_to_cpu(sb->size)/2;
1135 mddev->events = ev1;
1136 mddev->bitmap_offset = 0;
1137 mddev->default_bitmap_offset = 1024 >> 9;
1139 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1140 memcpy(mddev->uuid, sb->set_uuid, 16);
1142 mddev->max_disks = (4096-256)/2;
1144 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1145 mddev->bitmap_file == NULL ) {
1146 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1147 && mddev->level != 10) {
1148 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1151 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1153 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1154 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1155 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1156 mddev->new_level = le32_to_cpu(sb->new_level);
1157 mddev->new_layout = le32_to_cpu(sb->new_layout);
1158 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1160 mddev->reshape_position = MaxSector;
1161 mddev->delta_disks = 0;
1162 mddev->new_level = mddev->level;
1163 mddev->new_layout = mddev->layout;
1164 mddev->new_chunk = mddev->chunk_size;
1167 } else if (mddev->pers == NULL) {
1168 /* Insist of good event counter while assembling */
1170 if (ev1 < mddev->events)
1172 } else if (mddev->bitmap) {
1173 /* If adding to array with a bitmap, then we can accept an
1174 * older device, but not too old.
1176 if (ev1 < mddev->bitmap->events_cleared)
1179 if (ev1 < mddev->events)
1180 /* just a hot-add of a new device, leave raid_disk at -1 */
1183 if (mddev->level != LEVEL_MULTIPATH) {
1185 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1187 case 0xffff: /* spare */
1189 case 0xfffe: /* faulty */
1190 set_bit(Faulty, &rdev->flags);
1193 if ((le32_to_cpu(sb->feature_map) &
1194 MD_FEATURE_RECOVERY_OFFSET))
1195 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1197 set_bit(In_sync, &rdev->flags);
1198 rdev->raid_disk = role;
1201 if (sb->devflags & WriteMostly1)
1202 set_bit(WriteMostly, &rdev->flags);
1203 } else /* MULTIPATH are always insync */
1204 set_bit(In_sync, &rdev->flags);
1209 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1211 struct mdp_superblock_1 *sb;
1212 struct list_head *tmp;
1215 /* make rdev->sb match mddev and rdev data. */
1217 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1219 sb->feature_map = 0;
1221 sb->recovery_offset = cpu_to_le64(0);
1222 memset(sb->pad1, 0, sizeof(sb->pad1));
1223 memset(sb->pad2, 0, sizeof(sb->pad2));
1224 memset(sb->pad3, 0, sizeof(sb->pad3));
1226 sb->utime = cpu_to_le64((__u64)mddev->utime);
1227 sb->events = cpu_to_le64(mddev->events);
1229 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1231 sb->resync_offset = cpu_to_le64(0);
1233 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1235 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1236 sb->size = cpu_to_le64(mddev->size<<1);
1238 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1239 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1240 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1243 if (rdev->raid_disk >= 0 &&
1244 !test_bit(In_sync, &rdev->flags) &&
1245 rdev->recovery_offset > 0) {
1246 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1247 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1250 if (mddev->reshape_position != MaxSector) {
1251 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1252 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1253 sb->new_layout = cpu_to_le32(mddev->new_layout);
1254 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1255 sb->new_level = cpu_to_le32(mddev->new_level);
1256 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1260 ITERATE_RDEV(mddev,rdev2,tmp)
1261 if (rdev2->desc_nr+1 > max_dev)
1262 max_dev = rdev2->desc_nr+1;
1264 sb->max_dev = cpu_to_le32(max_dev);
1265 for (i=0; i<max_dev;i++)
1266 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1268 ITERATE_RDEV(mddev,rdev2,tmp) {
1270 if (test_bit(Faulty, &rdev2->flags))
1271 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1272 else if (test_bit(In_sync, &rdev2->flags))
1273 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1274 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1275 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1277 sb->dev_roles[i] = cpu_to_le16(0xffff);
1280 sb->sb_csum = calc_sb_1_csum(sb);
1284 static struct super_type super_types[] = {
1287 .owner = THIS_MODULE,
1288 .load_super = super_90_load,
1289 .validate_super = super_90_validate,
1290 .sync_super = super_90_sync,
1294 .owner = THIS_MODULE,
1295 .load_super = super_1_load,
1296 .validate_super = super_1_validate,
1297 .sync_super = super_1_sync,
1301 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1303 struct list_head *tmp, *tmp2;
1304 mdk_rdev_t *rdev, *rdev2;
1306 ITERATE_RDEV(mddev1,rdev,tmp)
1307 ITERATE_RDEV(mddev2, rdev2, tmp2)
1308 if (rdev->bdev->bd_contains ==
1309 rdev2->bdev->bd_contains)
1315 static LIST_HEAD(pending_raid_disks);
1317 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1319 char b[BDEVNAME_SIZE];
1328 /* make sure rdev->size exceeds mddev->size */
1329 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1331 /* Cannot change size, so fail */
1334 mddev->size = rdev->size;
1337 /* Verify rdev->desc_nr is unique.
1338 * If it is -1, assign a free number, else
1339 * check number is not in use
1341 if (rdev->desc_nr < 0) {
1343 if (mddev->pers) choice = mddev->raid_disks;
1344 while (find_rdev_nr(mddev, choice))
1346 rdev->desc_nr = choice;
1348 if (find_rdev_nr(mddev, rdev->desc_nr))
1351 bdevname(rdev->bdev,b);
1352 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1354 while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1357 rdev->mddev = mddev;
1358 printk(KERN_INFO "md: bind<%s>\n", b);
1360 rdev->kobj.parent = &mddev->kobj;
1361 if ((err = kobject_add(&rdev->kobj)))
1364 if (rdev->bdev->bd_part)
1365 ko = &rdev->bdev->bd_part->kobj;
1367 ko = &rdev->bdev->bd_disk->kobj;
1368 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1369 kobject_del(&rdev->kobj);
1372 list_add(&rdev->same_set, &mddev->disks);
1373 bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1377 printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1382 static void delayed_delete(struct work_struct *ws)
1384 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1385 kobject_del(&rdev->kobj);
1388 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1390 char b[BDEVNAME_SIZE];
1395 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1396 list_del_init(&rdev->same_set);
1397 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1399 sysfs_remove_link(&rdev->kobj, "block");
1401 /* We need to delay this, otherwise we can deadlock when
1402 * writing to 'remove' to "dev/state"
1404 INIT_WORK(&rdev->del_work, delayed_delete);
1405 schedule_work(&rdev->del_work);
1409 * prevent the device from being mounted, repartitioned or
1410 * otherwise reused by a RAID array (or any other kernel
1411 * subsystem), by bd_claiming the device.
1413 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1416 struct block_device *bdev;
1417 char b[BDEVNAME_SIZE];
1419 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1421 printk(KERN_ERR "md: could not open %s.\n",
1422 __bdevname(dev, b));
1423 return PTR_ERR(bdev);
1425 err = bd_claim(bdev, rdev);
1427 printk(KERN_ERR "md: could not bd_claim %s.\n",
1436 static void unlock_rdev(mdk_rdev_t *rdev)
1438 struct block_device *bdev = rdev->bdev;
1446 void md_autodetect_dev(dev_t dev);
1448 static void export_rdev(mdk_rdev_t * rdev)
1450 char b[BDEVNAME_SIZE];
1451 printk(KERN_INFO "md: export_rdev(%s)\n",
1452 bdevname(rdev->bdev,b));
1456 list_del_init(&rdev->same_set);
1458 md_autodetect_dev(rdev->bdev->bd_dev);
1461 kobject_put(&rdev->kobj);
1464 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1466 unbind_rdev_from_array(rdev);
1470 static void export_array(mddev_t *mddev)
1472 struct list_head *tmp;
1475 ITERATE_RDEV(mddev,rdev,tmp) {
1480 kick_rdev_from_array(rdev);
1482 if (!list_empty(&mddev->disks))
1484 mddev->raid_disks = 0;
1485 mddev->major_version = 0;
1488 static void print_desc(mdp_disk_t *desc)
1490 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1491 desc->major,desc->minor,desc->raid_disk,desc->state);
1494 static void print_sb(mdp_super_t *sb)
1499 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1500 sb->major_version, sb->minor_version, sb->patch_version,
1501 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1503 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1504 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1505 sb->md_minor, sb->layout, sb->chunk_size);
1506 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1507 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1508 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1509 sb->failed_disks, sb->spare_disks,
1510 sb->sb_csum, (unsigned long)sb->events_lo);
1513 for (i = 0; i < MD_SB_DISKS; i++) {
1516 desc = sb->disks + i;
1517 if (desc->number || desc->major || desc->minor ||
1518 desc->raid_disk || (desc->state && (desc->state != 4))) {
1519 printk(" D %2d: ", i);
1523 printk(KERN_INFO "md: THIS: ");
1524 print_desc(&sb->this_disk);
1528 static void print_rdev(mdk_rdev_t *rdev)
1530 char b[BDEVNAME_SIZE];
1531 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1532 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1533 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1535 if (rdev->sb_loaded) {
1536 printk(KERN_INFO "md: rdev superblock:\n");
1537 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1539 printk(KERN_INFO "md: no rdev superblock!\n");
1542 static void md_print_devices(void)
1544 struct list_head *tmp, *tmp2;
1547 char b[BDEVNAME_SIZE];
1550 printk("md: **********************************\n");
1551 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1552 printk("md: **********************************\n");
1553 ITERATE_MDDEV(mddev,tmp) {
1556 bitmap_print_sb(mddev->bitmap);
1558 printk("%s: ", mdname(mddev));
1559 ITERATE_RDEV(mddev,rdev,tmp2)
1560 printk("<%s>", bdevname(rdev->bdev,b));
1563 ITERATE_RDEV(mddev,rdev,tmp2)
1566 printk("md: **********************************\n");
1571 static void sync_sbs(mddev_t * mddev, int nospares)
1573 /* Update each superblock (in-memory image), but
1574 * if we are allowed to, skip spares which already
1575 * have the right event counter, or have one earlier
1576 * (which would mean they aren't being marked as dirty
1577 * with the rest of the array)
1580 struct list_head *tmp;
1582 ITERATE_RDEV(mddev,rdev,tmp) {
1583 if (rdev->sb_events == mddev->events ||
1585 rdev->raid_disk < 0 &&
1586 (rdev->sb_events&1)==0 &&
1587 rdev->sb_events+1 == mddev->events)) {
1588 /* Don't update this superblock */
1589 rdev->sb_loaded = 2;
1591 super_types[mddev->major_version].
1592 sync_super(mddev, rdev);
1593 rdev->sb_loaded = 1;
1598 static void md_update_sb(mddev_t * mddev, int force_change)
1601 struct list_head *tmp;
1607 spin_lock_irq(&mddev->write_lock);
1609 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1610 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1612 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1613 /* just a clean<-> dirty transition, possibly leave spares alone,
1614 * though if events isn't the right even/odd, we will have to do
1620 if (mddev->degraded)
1621 /* If the array is degraded, then skipping spares is both
1622 * dangerous and fairly pointless.
1623 * Dangerous because a device that was removed from the array
1624 * might have a event_count that still looks up-to-date,
1625 * so it can be re-added without a resync.
1626 * Pointless because if there are any spares to skip,
1627 * then a recovery will happen and soon that array won't
1628 * be degraded any more and the spare can go back to sleep then.
1632 sync_req = mddev->in_sync;
1633 mddev->utime = get_seconds();
1635 /* If this is just a dirty<->clean transition, and the array is clean
1636 * and 'events' is odd, we can roll back to the previous clean state */
1638 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1639 && (mddev->events & 1)
1640 && mddev->events != 1)
1643 /* otherwise we have to go forward and ... */
1645 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1646 /* .. if the array isn't clean, insist on an odd 'events' */
1647 if ((mddev->events&1)==0) {
1652 /* otherwise insist on an even 'events' (for clean states) */
1653 if ((mddev->events&1)) {
1660 if (!mddev->events) {
1662 * oops, this 64-bit counter should never wrap.
1663 * Either we are in around ~1 trillion A.C., assuming
1664 * 1 reboot per second, or we have a bug:
1669 sync_sbs(mddev, nospares);
1672 * do not write anything to disk if using
1673 * nonpersistent superblocks
1675 if (!mddev->persistent) {
1676 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1677 spin_unlock_irq(&mddev->write_lock);
1678 wake_up(&mddev->sb_wait);
1681 spin_unlock_irq(&mddev->write_lock);
1684 "md: updating %s RAID superblock on device (in sync %d)\n",
1685 mdname(mddev),mddev->in_sync);
1687 err = bitmap_update_sb(mddev->bitmap);
1688 ITERATE_RDEV(mddev,rdev,tmp) {
1689 char b[BDEVNAME_SIZE];
1690 dprintk(KERN_INFO "md: ");
1691 if (rdev->sb_loaded != 1)
1692 continue; /* no noise on spare devices */
1693 if (test_bit(Faulty, &rdev->flags))
1694 dprintk("(skipping faulty ");
1696 dprintk("%s ", bdevname(rdev->bdev,b));
1697 if (!test_bit(Faulty, &rdev->flags)) {
1698 md_super_write(mddev,rdev,
1699 rdev->sb_offset<<1, rdev->sb_size,
1701 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1702 bdevname(rdev->bdev,b),
1703 (unsigned long long)rdev->sb_offset);
1704 rdev->sb_events = mddev->events;
1708 if (mddev->level == LEVEL_MULTIPATH)
1709 /* only need to write one superblock... */
1712 md_super_wait(mddev);
1713 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1715 spin_lock_irq(&mddev->write_lock);
1716 if (mddev->in_sync != sync_req ||
1717 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1718 /* have to write it out again */
1719 spin_unlock_irq(&mddev->write_lock);
1722 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1723 spin_unlock_irq(&mddev->write_lock);
1724 wake_up(&mddev->sb_wait);
1728 /* words written to sysfs files may, or my not, be \n terminated.
1729 * We want to accept with case. For this we use cmd_match.
1731 static int cmd_match(const char *cmd, const char *str)
1733 /* See if cmd, written into a sysfs file, matches
1734 * str. They must either be the same, or cmd can
1735 * have a trailing newline
1737 while (*cmd && *str && *cmd == *str) {
1748 struct rdev_sysfs_entry {
1749 struct attribute attr;
1750 ssize_t (*show)(mdk_rdev_t *, char *);
1751 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1755 state_show(mdk_rdev_t *rdev, char *page)
1760 if (test_bit(Faulty, &rdev->flags)) {
1761 len+= sprintf(page+len, "%sfaulty",sep);
1764 if (test_bit(In_sync, &rdev->flags)) {
1765 len += sprintf(page+len, "%sin_sync",sep);
1768 if (test_bit(WriteMostly, &rdev->flags)) {
1769 len += sprintf(page+len, "%swrite_mostly",sep);
1772 if (!test_bit(Faulty, &rdev->flags) &&
1773 !test_bit(In_sync, &rdev->flags)) {
1774 len += sprintf(page+len, "%sspare", sep);
1777 return len+sprintf(page+len, "\n");
1781 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1784 * faulty - simulates and error
1785 * remove - disconnects the device
1786 * writemostly - sets write_mostly
1787 * -writemostly - clears write_mostly
1790 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1791 md_error(rdev->mddev, rdev);
1793 } else if (cmd_match(buf, "remove")) {
1794 if (rdev->raid_disk >= 0)
1797 mddev_t *mddev = rdev->mddev;
1798 kick_rdev_from_array(rdev);
1800 md_update_sb(mddev, 1);
1801 md_new_event(mddev);
1804 } else if (cmd_match(buf, "writemostly")) {
1805 set_bit(WriteMostly, &rdev->flags);
1807 } else if (cmd_match(buf, "-writemostly")) {
1808 clear_bit(WriteMostly, &rdev->flags);
1811 return err ? err : len;
1813 static struct rdev_sysfs_entry rdev_state =
1814 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1817 super_show(mdk_rdev_t *rdev, char *page)
1819 if (rdev->sb_loaded && rdev->sb_size) {
1820 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1821 return rdev->sb_size;
1825 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1828 errors_show(mdk_rdev_t *rdev, char *page)
1830 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1834 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1837 unsigned long n = simple_strtoul(buf, &e, 10);
1838 if (*buf && (*e == 0 || *e == '\n')) {
1839 atomic_set(&rdev->corrected_errors, n);
1844 static struct rdev_sysfs_entry rdev_errors =
1845 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1848 slot_show(mdk_rdev_t *rdev, char *page)
1850 if (rdev->raid_disk < 0)
1851 return sprintf(page, "none\n");
1853 return sprintf(page, "%d\n", rdev->raid_disk);
1857 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1860 int slot = simple_strtoul(buf, &e, 10);
1861 if (strncmp(buf, "none", 4)==0)
1863 else if (e==buf || (*e && *e!= '\n'))
1865 if (rdev->mddev->pers)
1866 /* Cannot set slot in active array (yet) */
1868 if (slot >= rdev->mddev->raid_disks)
1870 rdev->raid_disk = slot;
1871 /* assume it is working */
1873 set_bit(In_sync, &rdev->flags);
1878 static struct rdev_sysfs_entry rdev_slot =
1879 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
1882 offset_show(mdk_rdev_t *rdev, char *page)
1884 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1888 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1891 unsigned long long offset = simple_strtoull(buf, &e, 10);
1892 if (e==buf || (*e && *e != '\n'))
1894 if (rdev->mddev->pers)
1896 rdev->data_offset = offset;
1900 static struct rdev_sysfs_entry rdev_offset =
1901 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
1904 rdev_size_show(mdk_rdev_t *rdev, char *page)
1906 return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1910 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1913 unsigned long long size = simple_strtoull(buf, &e, 10);
1914 if (e==buf || (*e && *e != '\n'))
1916 if (rdev->mddev->pers)
1919 if (size < rdev->mddev->size || rdev->mddev->size == 0)
1920 rdev->mddev->size = size;
1924 static struct rdev_sysfs_entry rdev_size =
1925 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
1927 static struct attribute *rdev_default_attrs[] = {
1937 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1939 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1940 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1944 return entry->show(rdev, page);
1948 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1949 const char *page, size_t length)
1951 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1952 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1956 if (!capable(CAP_SYS_ADMIN))
1958 return entry->store(rdev, page, length);
1961 static void rdev_free(struct kobject *ko)
1963 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1966 static struct sysfs_ops rdev_sysfs_ops = {
1967 .show = rdev_attr_show,
1968 .store = rdev_attr_store,
1970 static struct kobj_type rdev_ktype = {
1971 .release = rdev_free,
1972 .sysfs_ops = &rdev_sysfs_ops,
1973 .default_attrs = rdev_default_attrs,
1977 * Import a device. If 'super_format' >= 0, then sanity check the superblock
1979 * mark the device faulty if:
1981 * - the device is nonexistent (zero size)
1982 * - the device has no valid superblock
1984 * a faulty rdev _never_ has rdev->sb set.
1986 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1988 char b[BDEVNAME_SIZE];
1993 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1995 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1996 return ERR_PTR(-ENOMEM);
1999 if ((err = alloc_disk_sb(rdev)))
2002 err = lock_rdev(rdev, newdev);
2006 rdev->kobj.parent = NULL;
2007 rdev->kobj.ktype = &rdev_ktype;
2008 kobject_init(&rdev->kobj);
2011 rdev->saved_raid_disk = -1;
2012 rdev->raid_disk = -1;
2014 rdev->data_offset = 0;
2015 rdev->sb_events = 0;
2016 atomic_set(&rdev->nr_pending, 0);
2017 atomic_set(&rdev->read_errors, 0);
2018 atomic_set(&rdev->corrected_errors, 0);
2020 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2023 "md: %s has zero or unknown size, marking faulty!\n",
2024 bdevname(rdev->bdev,b));
2029 if (super_format >= 0) {
2030 err = super_types[super_format].
2031 load_super(rdev, NULL, super_minor);
2032 if (err == -EINVAL) {
2034 "md: %s has invalid sb, not importing!\n",
2035 bdevname(rdev->bdev,b));
2040 "md: could not read %s's sb, not importing!\n",
2041 bdevname(rdev->bdev,b));
2045 INIT_LIST_HEAD(&rdev->same_set);
2050 if (rdev->sb_page) {
2056 return ERR_PTR(err);
2060 * Check a full RAID array for plausibility
2064 static void analyze_sbs(mddev_t * mddev)
2067 struct list_head *tmp;
2068 mdk_rdev_t *rdev, *freshest;
2069 char b[BDEVNAME_SIZE];
2072 ITERATE_RDEV(mddev,rdev,tmp)
2073 switch (super_types[mddev->major_version].
2074 load_super(rdev, freshest, mddev->minor_version)) {
2082 "md: fatal superblock inconsistency in %s"
2083 " -- removing from array\n",
2084 bdevname(rdev->bdev,b));
2085 kick_rdev_from_array(rdev);
2089 super_types[mddev->major_version].
2090 validate_super(mddev, freshest);
2093 ITERATE_RDEV(mddev,rdev,tmp) {
2094 if (rdev != freshest)
2095 if (super_types[mddev->major_version].
2096 validate_super(mddev, rdev)) {
2097 printk(KERN_WARNING "md: kicking non-fresh %s"
2099 bdevname(rdev->bdev,b));
2100 kick_rdev_from_array(rdev);
2103 if (mddev->level == LEVEL_MULTIPATH) {
2104 rdev->desc_nr = i++;
2105 rdev->raid_disk = rdev->desc_nr;
2106 set_bit(In_sync, &rdev->flags);
2112 if (mddev->recovery_cp != MaxSector &&
2114 printk(KERN_ERR "md: %s: raid array is not clean"
2115 " -- starting background reconstruction\n",
2121 safe_delay_show(mddev_t *mddev, char *page)
2123 int msec = (mddev->safemode_delay*1000)/HZ;
2124 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2127 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2135 /* remove a period, and count digits after it */
2136 if (len >= sizeof(buf))
2138 strlcpy(buf, cbuf, len);
2140 for (i=0; i<len; i++) {
2142 if (isdigit(buf[i])) {
2147 } else if (buf[i] == '.') {
2152 msec = simple_strtoul(buf, &e, 10);
2153 if (e == buf || (*e && *e != '\n'))
2155 msec = (msec * 1000) / scale;
2157 mddev->safemode_delay = 0;
2159 mddev->safemode_delay = (msec*HZ)/1000;
2160 if (mddev->safemode_delay == 0)
2161 mddev->safemode_delay = 1;
2165 static struct md_sysfs_entry md_safe_delay =
2166 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2169 level_show(mddev_t *mddev, char *page)
2171 struct mdk_personality *p = mddev->pers;
2173 return sprintf(page, "%s\n", p->name);
2174 else if (mddev->clevel[0])
2175 return sprintf(page, "%s\n", mddev->clevel);
2176 else if (mddev->level != LEVEL_NONE)
2177 return sprintf(page, "%d\n", mddev->level);
2183 level_store(mddev_t *mddev, const char *buf, size_t len)
2190 if (len >= sizeof(mddev->clevel))
2192 strncpy(mddev->clevel, buf, len);
2193 if (mddev->clevel[len-1] == '\n')
2195 mddev->clevel[len] = 0;
2196 mddev->level = LEVEL_NONE;
2200 static struct md_sysfs_entry md_level =
2201 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2205 layout_show(mddev_t *mddev, char *page)
2207 /* just a number, not meaningful for all levels */
2208 return sprintf(page, "%d\n", mddev->layout);
2212 layout_store(mddev_t *mddev, const char *buf, size_t len)
2215 unsigned long n = simple_strtoul(buf, &e, 10);
2219 if (!*buf || (*e && *e != '\n'))
2225 static struct md_sysfs_entry md_layout =
2226 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2230 raid_disks_show(mddev_t *mddev, char *page)
2232 if (mddev->raid_disks == 0)
2234 return sprintf(page, "%d\n", mddev->raid_disks);
2237 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2240 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2244 unsigned long n = simple_strtoul(buf, &e, 10);
2246 if (!*buf || (*e && *e != '\n'))
2250 rv = update_raid_disks(mddev, n);
2252 mddev->raid_disks = n;
2253 return rv ? rv : len;
2255 static struct md_sysfs_entry md_raid_disks =
2256 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2259 chunk_size_show(mddev_t *mddev, char *page)
2261 return sprintf(page, "%d\n", mddev->chunk_size);
2265 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2267 /* can only set chunk_size if array is not yet active */
2269 unsigned long n = simple_strtoul(buf, &e, 10);
2273 if (!*buf || (*e && *e != '\n'))
2276 mddev->chunk_size = n;
2279 static struct md_sysfs_entry md_chunk_size =
2280 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2283 resync_start_show(mddev_t *mddev, char *page)
2285 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2289 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2291 /* can only set chunk_size if array is not yet active */
2293 unsigned long long n = simple_strtoull(buf, &e, 10);
2297 if (!*buf || (*e && *e != '\n'))
2300 mddev->recovery_cp = n;
2303 static struct md_sysfs_entry md_resync_start =
2304 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2307 * The array state can be:
2310 * No devices, no size, no level
2311 * Equivalent to STOP_ARRAY ioctl
2313 * May have some settings, but array is not active
2314 * all IO results in error
2315 * When written, doesn't tear down array, but just stops it
2316 * suspended (not supported yet)
2317 * All IO requests will block. The array can be reconfigured.
2318 * Writing this, if accepted, will block until array is quiessent
2320 * no resync can happen. no superblocks get written.
2321 * write requests fail
2323 * like readonly, but behaves like 'clean' on a write request.
2325 * clean - no pending writes, but otherwise active.
2326 * When written to inactive array, starts without resync
2327 * If a write request arrives then
2328 * if metadata is known, mark 'dirty' and switch to 'active'.
2329 * if not known, block and switch to write-pending
2330 * If written to an active array that has pending writes, then fails.
2332 * fully active: IO and resync can be happening.
2333 * When written to inactive array, starts with resync
2336 * clean, but writes are blocked waiting for 'active' to be written.
2339 * like active, but no writes have been seen for a while (100msec).
2342 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2343 write_pending, active_idle, bad_word};
2344 static char *array_states[] = {
2345 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2346 "write-pending", "active-idle", NULL };
2348 static int match_word(const char *word, char **list)
2351 for (n=0; list[n]; n++)
2352 if (cmd_match(word, list[n]))
2358 array_state_show(mddev_t *mddev, char *page)
2360 enum array_state st = inactive;
2373 else if (mddev->safemode)
2379 if (list_empty(&mddev->disks) &&
2380 mddev->raid_disks == 0 &&
2386 return sprintf(page, "%s\n", array_states[st]);
2389 static int do_md_stop(mddev_t * mddev, int ro);
2390 static int do_md_run(mddev_t * mddev);
2391 static int restart_array(mddev_t *mddev);
2394 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2397 enum array_state st = match_word(buf, array_states);
2402 /* stopping an active array */
2404 if (atomic_read(&mddev->active) > 1)
2406 err = do_md_stop(mddev, 0);
2410 /* stopping an active array */
2412 if (atomic_read(&mddev->active) > 1)
2414 err = do_md_stop(mddev, 2);
2418 break; /* not supported yet */
2421 err = do_md_stop(mddev, 1);
2424 err = do_md_run(mddev);
2428 /* stopping an active array */
2430 err = do_md_stop(mddev, 1);
2432 mddev->ro = 2; /* FIXME mark devices writable */
2435 err = do_md_run(mddev);
2440 restart_array(mddev);
2441 spin_lock_irq(&mddev->write_lock);
2442 if (atomic_read(&mddev->writes_pending) == 0) {
2444 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
2446 spin_unlock_irq(&mddev->write_lock);
2449 mddev->recovery_cp = MaxSector;
2450 err = do_md_run(mddev);
2455 restart_array(mddev);
2456 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2457 wake_up(&mddev->sb_wait);
2461 err = do_md_run(mddev);
2466 /* these cannot be set */
2474 static struct md_sysfs_entry md_array_state =
2475 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2478 null_show(mddev_t *mddev, char *page)
2484 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2486 /* buf must be %d:%d\n? giving major and minor numbers */
2487 /* The new device is added to the array.
2488 * If the array has a persistent superblock, we read the
2489 * superblock to initialise info and check validity.
2490 * Otherwise, only checking done is that in bind_rdev_to_array,
2491 * which mainly checks size.
2494 int major = simple_strtoul(buf, &e, 10);
2500 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2502 minor = simple_strtoul(e+1, &e, 10);
2503 if (*e && *e != '\n')
2505 dev = MKDEV(major, minor);
2506 if (major != MAJOR(dev) ||
2507 minor != MINOR(dev))
2511 if (mddev->persistent) {
2512 rdev = md_import_device(dev, mddev->major_version,
2513 mddev->minor_version);
2514 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2515 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2516 mdk_rdev_t, same_set);
2517 err = super_types[mddev->major_version]
2518 .load_super(rdev, rdev0, mddev->minor_version);
2523 rdev = md_import_device(dev, -1, -1);
2526 return PTR_ERR(rdev);
2527 err = bind_rdev_to_array(rdev, mddev);
2531 return err ? err : len;
2534 static struct md_sysfs_entry md_new_device =
2535 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2538 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2541 unsigned long chunk, end_chunk;
2545 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2547 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2548 if (buf == end) break;
2549 if (*end == '-') { /* range */
2551 end_chunk = simple_strtoul(buf, &end, 0);
2552 if (buf == end) break;
2554 if (*end && !isspace(*end)) break;
2555 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2557 while (isspace(*buf)) buf++;
2559 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2564 static struct md_sysfs_entry md_bitmap =
2565 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2568 size_show(mddev_t *mddev, char *page)
2570 return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2573 static int update_size(mddev_t *mddev, unsigned long size);
2576 size_store(mddev_t *mddev, const char *buf, size_t len)
2578 /* If array is inactive, we can reduce the component size, but
2579 * not increase it (except from 0).
2580 * If array is active, we can try an on-line resize
2584 unsigned long long size = simple_strtoull(buf, &e, 10);
2585 if (!*buf || *buf == '\n' ||
2590 err = update_size(mddev, size);
2591 md_update_sb(mddev, 1);
2593 if (mddev->size == 0 ||
2599 return err ? err : len;
2602 static struct md_sysfs_entry md_size =
2603 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2607 * This is either 'none' for arrays with externally managed metadata,
2608 * or N.M for internally known formats
2611 metadata_show(mddev_t *mddev, char *page)
2613 if (mddev->persistent)
2614 return sprintf(page, "%d.%d\n",
2615 mddev->major_version, mddev->minor_version);
2617 return sprintf(page, "none\n");
2621 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2625 if (!list_empty(&mddev->disks))
2628 if (cmd_match(buf, "none")) {
2629 mddev->persistent = 0;
2630 mddev->major_version = 0;
2631 mddev->minor_version = 90;
2634 major = simple_strtoul(buf, &e, 10);
2635 if (e==buf || *e != '.')
2638 minor = simple_strtoul(buf, &e, 10);
2639 if (e==buf || (*e && *e != '\n') )
2641 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2643 mddev->major_version = major;
2644 mddev->minor_version = minor;
2645 mddev->persistent = 1;
2649 static struct md_sysfs_entry md_metadata =
2650 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2653 action_show(mddev_t *mddev, char *page)
2655 char *type = "idle";
2656 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2657 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2658 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2660 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2661 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2663 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2670 return sprintf(page, "%s\n", type);
2674 action_store(mddev_t *mddev, const char *page, size_t len)
2676 if (!mddev->pers || !mddev->pers->sync_request)
2679 if (cmd_match(page, "idle")) {
2680 if (mddev->sync_thread) {
2681 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2682 md_unregister_thread(mddev->sync_thread);
2683 mddev->sync_thread = NULL;
2684 mddev->recovery = 0;
2686 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2687 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2689 else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2690 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2691 else if (cmd_match(page, "reshape")) {
2693 if (mddev->pers->start_reshape == NULL)
2695 err = mddev->pers->start_reshape(mddev);
2699 if (cmd_match(page, "check"))
2700 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2701 else if (!cmd_match(page, "repair"))
2703 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2704 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2706 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2707 md_wakeup_thread(mddev->thread);
2712 mismatch_cnt_show(mddev_t *mddev, char *page)
2714 return sprintf(page, "%llu\n",
2715 (unsigned long long) mddev->resync_mismatches);
2718 static struct md_sysfs_entry md_scan_mode =
2719 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2722 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
2725 sync_min_show(mddev_t *mddev, char *page)
2727 return sprintf(page, "%d (%s)\n", speed_min(mddev),
2728 mddev->sync_speed_min ? "local": "system");
2732 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2736 if (strncmp(buf, "system", 6)==0) {
2737 mddev->sync_speed_min = 0;
2740 min = simple_strtoul(buf, &e, 10);
2741 if (buf == e || (*e && *e != '\n') || min <= 0)
2743 mddev->sync_speed_min = min;
2747 static struct md_sysfs_entry md_sync_min =
2748 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2751 sync_max_show(mddev_t *mddev, char *page)
2753 return sprintf(page, "%d (%s)\n", speed_max(mddev),
2754 mddev->sync_speed_max ? "local": "system");
2758 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2762 if (strncmp(buf, "system", 6)==0) {
2763 mddev->sync_speed_max = 0;
2766 max = simple_strtoul(buf, &e, 10);
2767 if (buf == e || (*e && *e != '\n') || max <= 0)
2769 mddev->sync_speed_max = max;
2773 static struct md_sysfs_entry md_sync_max =
2774 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2778 sync_speed_show(mddev_t *mddev, char *page)
2780 unsigned long resync, dt, db;
2781 resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
2782 dt = ((jiffies - mddev->resync_mark) / HZ);
2784 db = resync - (mddev->resync_mark_cnt);
2785 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2788 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
2791 sync_completed_show(mddev_t *mddev, char *page)
2793 unsigned long max_blocks, resync;
2795 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2796 max_blocks = mddev->resync_max_sectors;
2798 max_blocks = mddev->size << 1;
2800 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2801 return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2804 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
2807 suspend_lo_show(mddev_t *mddev, char *page)
2809 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2813 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2816 unsigned long long new = simple_strtoull(buf, &e, 10);
2818 if (mddev->pers->quiesce == NULL)
2820 if (buf == e || (*e && *e != '\n'))
2822 if (new >= mddev->suspend_hi ||
2823 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2824 mddev->suspend_lo = new;
2825 mddev->pers->quiesce(mddev, 2);
2830 static struct md_sysfs_entry md_suspend_lo =
2831 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2835 suspend_hi_show(mddev_t *mddev, char *page)
2837 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2841 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2844 unsigned long long new = simple_strtoull(buf, &e, 10);
2846 if (mddev->pers->quiesce == NULL)
2848 if (buf == e || (*e && *e != '\n'))
2850 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2851 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2852 mddev->suspend_hi = new;
2853 mddev->pers->quiesce(mddev, 1);
2854 mddev->pers->quiesce(mddev, 0);
2859 static struct md_sysfs_entry md_suspend_hi =
2860 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2863 static struct attribute *md_default_attrs[] = {
2866 &md_raid_disks.attr,
2867 &md_chunk_size.attr,
2869 &md_resync_start.attr,
2871 &md_new_device.attr,
2872 &md_safe_delay.attr,
2873 &md_array_state.attr,
2877 static struct attribute *md_redundancy_attrs[] = {
2879 &md_mismatches.attr,
2882 &md_sync_speed.attr,
2883 &md_sync_completed.attr,
2884 &md_suspend_lo.attr,
2885 &md_suspend_hi.attr,
2889 static struct attribute_group md_redundancy_group = {
2891 .attrs = md_redundancy_attrs,
2896 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2898 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2899 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2904 rv = mddev_lock(mddev);
2906 rv = entry->show(mddev, page);
2907 mddev_unlock(mddev);
2913 md_attr_store(struct kobject *kobj, struct attribute *attr,
2914 const char *page, size_t length)
2916 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2917 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2922 if (!capable(CAP_SYS_ADMIN))
2924 rv = mddev_lock(mddev);
2926 rv = entry->store(mddev, page, length);
2927 mddev_unlock(mddev);
2932 static void md_free(struct kobject *ko)
2934 mddev_t *mddev = container_of(ko, mddev_t, kobj);
2938 static struct sysfs_ops md_sysfs_ops = {
2939 .show = md_attr_show,
2940 .store = md_attr_store,
2942 static struct kobj_type md_ktype = {
2944 .sysfs_ops = &md_sysfs_ops,
2945 .default_attrs = md_default_attrs,
2950 static struct kobject *md_probe(dev_t dev, int *part, void *data)
2952 static DEFINE_MUTEX(disks_mutex);
2953 mddev_t *mddev = mddev_find(dev);
2954 struct gendisk *disk;
2955 int partitioned = (MAJOR(dev) != MD_MAJOR);
2956 int shift = partitioned ? MdpMinorShift : 0;
2957 int unit = MINOR(dev) >> shift;
2962 mutex_lock(&disks_mutex);
2963 if (mddev->gendisk) {
2964 mutex_unlock(&disks_mutex);
2968 disk = alloc_disk(1 << shift);
2970 mutex_unlock(&disks_mutex);
2974 disk->major = MAJOR(dev);
2975 disk->first_minor = unit << shift;
2977 sprintf(disk->disk_name, "md_d%d", unit);
2979 sprintf(disk->disk_name, "md%d", unit);
2980 disk->fops = &md_fops;
2981 disk->private_data = mddev;
2982 disk->queue = mddev->queue;
2984 mddev->gendisk = disk;
2985 mutex_unlock(&disks_mutex);
2986 mddev->kobj.parent = &disk->kobj;
2987 mddev->kobj.k_name = NULL;
2988 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2989 mddev->kobj.ktype = &md_ktype;
2990 if (kobject_register(&mddev->kobj))
2991 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
2996 static void md_safemode_timeout(unsigned long data)
2998 mddev_t *mddev = (mddev_t *) data;
3000 mddev->safemode = 1;
3001 md_wakeup_thread(mddev->thread);
3004 static int start_dirty_degraded;
3006 static int do_md_run(mddev_t * mddev)
3010 struct list_head *tmp;
3012 struct gendisk *disk;
3013 struct mdk_personality *pers;
3014 char b[BDEVNAME_SIZE];
3016 if (list_empty(&mddev->disks))
3017 /* cannot run an array with no devices.. */
3024 * Analyze all RAID superblock(s)
3026 if (!mddev->raid_disks)
3029 chunk_size = mddev->chunk_size;
3032 if (chunk_size > MAX_CHUNK_SIZE) {
3033 printk(KERN_ERR "too big chunk_size: %d > %d\n",
3034 chunk_size, MAX_CHUNK_SIZE);
3038 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3040 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3041 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3044 if (chunk_size < PAGE_SIZE) {
3045 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3046 chunk_size, PAGE_SIZE);
3050 /* devices must have minimum size of one chunk */
3051 ITERATE_RDEV(mddev,rdev,tmp) {
3052 if (test_bit(Faulty, &rdev->flags))
3054 if (rdev->size < chunk_size / 1024) {
3056 "md: Dev %s smaller than chunk_size:"
3058 bdevname(rdev->bdev,b),
3059 (unsigned long long)rdev->size,
3067 if (mddev->level != LEVEL_NONE)
3068 request_module("md-level-%d", mddev->level);
3069 else if (mddev->clevel[0])
3070 request_module("md-%s", mddev->clevel);
3074 * Drop all container device buffers, from now on
3075 * the only valid external interface is through the md
3077 * Also find largest hardsector size
3079 ITERATE_RDEV(mddev,rdev,tmp) {
3080 if (test_bit(Faulty, &rdev->flags))
3082 sync_blockdev(rdev->bdev);
3083 invalidate_bdev(rdev->bdev);
3086 md_probe(mddev->unit, NULL, NULL);
3087 disk = mddev->gendisk;
3091 spin_lock(&pers_lock);
3092 pers = find_pers(mddev->level, mddev->clevel);
3093 if (!pers || !try_module_get(pers->owner)) {
3094 spin_unlock(&pers_lock);
3095 if (mddev->level != LEVEL_NONE)
3096 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3099 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3104 spin_unlock(&pers_lock);
3105 mddev->level = pers->level;
3106 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3108 if (mddev->reshape_position != MaxSector &&
3109 pers->start_reshape == NULL) {
3110 /* This personality cannot handle reshaping... */
3112 module_put(pers->owner);
3116 if (pers->sync_request) {
3117 /* Warn if this is a potentially silly
3120 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3122 struct list_head *tmp2;
3124 ITERATE_RDEV(mddev, rdev, tmp) {
3125 ITERATE_RDEV(mddev, rdev2, tmp2) {
3127 rdev->bdev->bd_contains ==
3128 rdev2->bdev->bd_contains) {
3130 "%s: WARNING: %s appears to be"
3131 " on the same physical disk as"
3134 bdevname(rdev->bdev,b),
3135 bdevname(rdev2->bdev,b2));
3142 "True protection against single-disk"
3143 " failure might be compromised.\n");
3146 mddev->recovery = 0;
3147 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3148 mddev->barriers_work = 1;
3149 mddev->ok_start_degraded = start_dirty_degraded;
3152 mddev->ro = 2; /* read-only, but switch on first write */
3154 err = mddev->pers->run(mddev);
3155 if (!err && mddev->pers->sync_request) {
3156 err = bitmap_create(mddev);
3158 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3159 mdname(mddev), err);
3160 mddev->pers->stop(mddev);
3164 printk(KERN_ERR "md: pers->run() failed ...\n");
3165 module_put(mddev->pers->owner);
3167 bitmap_destroy(mddev);
3170 if (mddev->pers->sync_request) {
3171 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3173 "md: cannot register extra attributes for %s\n",
3175 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
3178 atomic_set(&mddev->writes_pending,0);
3179 mddev->safemode = 0;
3180 mddev->safemode_timer.function = md_safemode_timeout;
3181 mddev->safemode_timer.data = (unsigned long) mddev;
3182 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3185 ITERATE_RDEV(mddev,rdev,tmp)
3186 if (rdev->raid_disk >= 0) {
3188 sprintf(nm, "rd%d", rdev->raid_disk);
3189 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3190 printk("md: cannot register %s for %s\n",
3194 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3197 md_update_sb(mddev, 0);
3199 set_capacity(disk, mddev->array_size<<1);
3201 /* If we call blk_queue_make_request here, it will
3202 * re-initialise max_sectors etc which may have been
3203 * refined inside -> run. So just set the bits we need to set.
3204 * Most initialisation happended when we called
3205 * blk_queue_make_request(..., md_fail_request)
3208 mddev->queue->queuedata = mddev;
3209 mddev->queue->make_request_fn = mddev->pers->make_request;
3211 /* If there is a partially-recovered drive we need to
3212 * start recovery here. If we leave it to md_check_recovery,
3213 * it will remove the drives and not do the right thing
3215 if (mddev->degraded && !mddev->sync_thread) {
3216 struct list_head *rtmp;
3218 ITERATE_RDEV(mddev,rdev,rtmp)
3219 if (rdev->raid_disk >= 0 &&
3220 !test_bit(In_sync, &rdev->flags) &&
3221 !test_bit(Faulty, &rdev->flags))
3222 /* complete an interrupted recovery */
3224 if (spares && mddev->pers->sync_request) {
3225 mddev->recovery = 0;
3226 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3227 mddev->sync_thread = md_register_thread(md_do_sync,
3230 if (!mddev->sync_thread) {
3231 printk(KERN_ERR "%s: could not start resync"
3234 /* leave the spares where they are, it shouldn't hurt */
3235 mddev->recovery = 0;
3239 md_wakeup_thread(mddev->thread);
3240 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3243 md_new_event(mddev);
3244 kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
3248 static int restart_array(mddev_t *mddev)
3250 struct gendisk *disk = mddev->gendisk;
3254 * Complain if it has no devices
3257 if (list_empty(&mddev->disks))
3265 mddev->safemode = 0;
3267 set_disk_ro(disk, 0);
3269 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3272 * Kick recovery or resync if necessary
3274 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3275 md_wakeup_thread(mddev->thread);
3276 md_wakeup_thread(mddev->sync_thread);
3285 /* similar to deny_write_access, but accounts for our holding a reference
3286 * to the file ourselves */
3287 static int deny_bitmap_write_access(struct file * file)
3289 struct inode *inode = file->f_mapping->host;
3291 spin_lock(&inode->i_lock);
3292 if (atomic_read(&inode->i_writecount) > 1) {
3293 spin_unlock(&inode->i_lock);
3296 atomic_set(&inode->i_writecount, -1);
3297 spin_unlock(&inode->i_lock);
3302 static void restore_bitmap_write_access(struct file *file)
3304 struct inode *inode = file->f_mapping->host;
3306 spin_lock(&inode->i_lock);
3307 atomic_set(&inode->i_writecount, 1);
3308 spin_unlock(&inode->i_lock);
3312 * 0 - completely stop and dis-assemble array
3313 * 1 - switch to readonly
3314 * 2 - stop but do not disassemble array
3316 static int do_md_stop(mddev_t * mddev, int mode)
3319 struct gendisk *disk = mddev->gendisk;
3322 if (atomic_read(&mddev->active)>2) {
3323 printk("md: %s still in use.\n",mdname(mddev));
3327 if (mddev->sync_thread) {
3328 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3329 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3330 md_unregister_thread(mddev->sync_thread);
3331 mddev->sync_thread = NULL;
3334 del_timer_sync(&mddev->safemode_timer);
3336 invalidate_partition(disk, 0);
3339 case 1: /* readonly */
3345 case 0: /* disassemble */
3347 bitmap_flush(mddev);
3348 md_super_wait(mddev);
3350 set_disk_ro(disk, 0);
3351 blk_queue_make_request(mddev->queue, md_fail_request);
3352 mddev->pers->stop(mddev);
3353 mddev->queue->merge_bvec_fn = NULL;
3354 mddev->queue->unplug_fn = NULL;
3355 mddev->queue->issue_flush_fn = NULL;
3356 mddev->queue->backing_dev_info.congested_fn = NULL;
3357 if (mddev->pers->sync_request)
3358 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3360 module_put(mddev->pers->owner);
3363 set_capacity(disk, 0);
3369 if (!mddev->in_sync || mddev->flags) {
3370 /* mark array as shutdown cleanly */
3372 md_update_sb(mddev, 1);
3375 set_disk_ro(disk, 1);
3376 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3380 * Free resources if final stop
3384 struct list_head *tmp;
3386 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3388 bitmap_destroy(mddev);
3389 if (mddev->bitmap_file) {
3390 restore_bitmap_write_access(mddev->bitmap_file);
3391 fput(mddev->bitmap_file);
3392 mddev->bitmap_file = NULL;
3394 mddev->bitmap_offset = 0;
3396 ITERATE_RDEV(mddev,rdev,tmp)
3397 if (rdev->raid_disk >= 0) {
3399 sprintf(nm, "rd%d", rdev->raid_disk);
3400 sysfs_remove_link(&mddev->kobj, nm);
3403 /* make sure all delayed_delete calls have finished */
3404 flush_scheduled_work();
3406 export_array(mddev);
3408 mddev->array_size = 0;
3410 mddev->raid_disks = 0;
3411 mddev->recovery_cp = 0;
3413 } else if (mddev->pers)
3414 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3417 md_new_event(mddev);
3423 static void autorun_array(mddev_t *mddev)
3426 struct list_head *tmp;
3429 if (list_empty(&mddev->disks))
3432 printk(KERN_INFO "md: running: ");
3434 ITERATE_RDEV(mddev,rdev,tmp) {
3435 char b[BDEVNAME_SIZE];
3436 printk("<%s>", bdevname(rdev->bdev,b));
3440 err = do_md_run (mddev);
3442 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3443 do_md_stop (mddev, 0);
3448 * lets try to run arrays based on all disks that have arrived
3449 * until now. (those are in pending_raid_disks)
3451 * the method: pick the first pending disk, collect all disks with
3452 * the same UUID, remove all from the pending list and put them into
3453 * the 'same_array' list. Then order this list based on superblock
3454 * update time (freshest comes first), kick out 'old' disks and
3455 * compare superblocks. If everything's fine then run it.
3457 * If "unit" is allocated, then bump its reference count
3459 static void autorun_devices(int part)
3461 struct list_head *tmp;
3462 mdk_rdev_t *rdev0, *rdev;
3464 char b[BDEVNAME_SIZE];
3466 printk(KERN_INFO "md: autorun ...\n");
3467 while (!list_empty(&pending_raid_disks)) {
3470 LIST_HEAD(candidates);
3471 rdev0 = list_entry(pending_raid_disks.next,
3472 mdk_rdev_t, same_set);
3474 printk(KERN_INFO "md: considering %s ...\n",
3475 bdevname(rdev0->bdev,b));
3476 INIT_LIST_HEAD(&candidates);
3477 ITERATE_RDEV_PENDING(rdev,tmp)
3478 if (super_90_load(rdev, rdev0, 0) >= 0) {
3479 printk(KERN_INFO "md: adding %s ...\n",
3480 bdevname(rdev->bdev,b));
3481 list_move(&rdev->same_set, &candidates);
3484 * now we have a set of devices, with all of them having
3485 * mostly sane superblocks. It's time to allocate the
3489 dev = MKDEV(mdp_major,
3490 rdev0->preferred_minor << MdpMinorShift);
3491 unit = MINOR(dev) >> MdpMinorShift;
3493 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3496 if (rdev0->preferred_minor != unit) {
3497 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3498 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3502 md_probe(dev, NULL, NULL);
3503 mddev = mddev_find(dev);
3506 "md: cannot allocate memory for md drive.\n");
3509 if (mddev_lock(mddev))
3510 printk(KERN_WARNING "md: %s locked, cannot run\n",
3512 else if (mddev->raid_disks || mddev->major_version
3513 || !list_empty(&mddev->disks)) {
3515 "md: %s already running, cannot run %s\n",
3516 mdname(mddev), bdevname(rdev0->bdev,b));
3517 mddev_unlock(mddev);
3519 printk(KERN_INFO "md: created %s\n", mdname(mddev));
3520 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
3521 list_del_init(&rdev->same_set);
3522 if (bind_rdev_to_array(rdev, mddev))
3525 autorun_array(mddev);
3526 mddev_unlock(mddev);
3528 /* on success, candidates will be empty, on error
3531 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
3535 printk(KERN_INFO "md: ... autorun DONE.\n");
3537 #endif /* !MODULE */
3539 static int get_version(void __user * arg)
3543 ver.major = MD_MAJOR_VERSION;
3544 ver.minor = MD_MINOR_VERSION;
3545 ver.patchlevel = MD_PATCHLEVEL_VERSION;
3547 if (copy_to_user(arg, &ver, sizeof(ver)))
3553 static int get_array_info(mddev_t * mddev, void __user * arg)
3555 mdu_array_info_t info;
3556 int nr,working,active,failed,spare;
3558 struct list_head *tmp;
3560 nr=working=active=failed=spare=0;
3561 ITERATE_RDEV(mddev,rdev,tmp) {
3563 if (test_bit(Faulty, &rdev->flags))
3567 if (test_bit(In_sync, &rdev->flags))
3574 info.major_version = mddev->major_version;
3575 info.minor_version = mddev->minor_version;
3576 info.patch_version = MD_PATCHLEVEL_VERSION;
3577 info.ctime = mddev->ctime;
3578 info.level = mddev->level;
3579 info.size = mddev->size;
3580 if (info.size != mddev->size) /* overflow */
3583 info.raid_disks = mddev->raid_disks;
3584 info.md_minor = mddev->md_minor;
3585 info.not_persistent= !mddev->persistent;
3587 info.utime = mddev->utime;
3590 info.state = (1<<MD_SB_CLEAN);
3591 if (mddev->bitmap && mddev->bitmap_offset)
3592 info.state = (1<<MD_SB_BITMAP_PRESENT);
3593 info.active_disks = active;
3594 info.working_disks = working;
3595 info.failed_disks = failed;
3596 info.spare_disks = spare;
3598 info.layout = mddev->layout;
3599 info.chunk_size = mddev->chunk_size;
3601 if (copy_to_user(arg, &info, sizeof(info)))
3607 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3609 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3610 char *ptr, *buf = NULL;
3613 md_allow_write(mddev);
3615 file = kmalloc(sizeof(*file), GFP_KERNEL);
3619 /* bitmap disabled, zero the first byte and copy out */
3620 if (!mddev->bitmap || !mddev->bitmap->file) {
3621 file->pathname[0] = '\0';
3625 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3629 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3633 strcpy(file->pathname, ptr);
3637 if (copy_to_user(arg, file, sizeof(*file)))
3645 static int get_disk_info(mddev_t * mddev, void __user * arg)
3647 mdu_disk_info_t info;
3651 if (copy_from_user(&info, arg, sizeof(info)))
3656 rdev = find_rdev_nr(mddev, nr);
3658 info.major = MAJOR(rdev->bdev->bd_dev);
3659 info.minor = MINOR(rdev->bdev->bd_dev);
3660 info.raid_disk = rdev->raid_disk;
3662 if (test_bit(Faulty, &rdev->flags))
3663 info.state |= (1<<MD_DISK_FAULTY);
3664 else if (test_bit(In_sync, &rdev->flags)) {
3665 info.state |= (1<<MD_DISK_ACTIVE);
3666 info.state |= (1<<MD_DISK_SYNC);
3668 if (test_bit(WriteMostly, &rdev->flags))
3669 info.state |= (1<<MD_DISK_WRITEMOSTLY);
3671 info.major = info.minor = 0;
3672 info.raid_disk = -1;
3673 info.state = (1<<MD_DISK_REMOVED);
3676 if (copy_to_user(arg, &info, sizeof(info)))
3682 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3684 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3686 dev_t dev = MKDEV(info->major,info->minor);
3688 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3691 if (!mddev->raid_disks) {
3693 /* expecting a device which has a superblock */
3694 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3697 "md: md_import_device returned %ld\n",
3699 return PTR_ERR(rdev);
3701 if (!list_empty(&mddev->disks)) {
3702 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3703 mdk_rdev_t, same_set);
3704 int err = super_types[mddev->major_version]
3705 .load_super(rdev, rdev0, mddev->minor_version);
3708 "md: %s has different UUID to %s\n",
3709 bdevname(rdev->bdev,b),
3710 bdevname(rdev0->bdev,b2));
3715 err = bind_rdev_to_array(rdev, mddev);
3722 * add_new_disk can be used once the array is assembled
3723 * to add "hot spares". They must already have a superblock
3728 if (!mddev->pers->hot_add_disk) {
3730 "%s: personality does not support diskops!\n",
3734 if (mddev->persistent)
3735 rdev = md_import_device(dev, mddev->major_version,
3736 mddev->minor_version);
3738 rdev = md_import_device(dev, -1, -1);
3741 "md: md_import_device returned %ld\n",
3743 return PTR_ERR(rdev);
3745 /* set save_raid_disk if appropriate */
3746 if (!mddev->persistent) {
3747 if (info->state & (1<<MD_DISK_SYNC) &&
3748 info->raid_disk < mddev->raid_disks)
3749 rdev->raid_disk = info->raid_disk;
3751 rdev->raid_disk = -1;
3753 super_types[mddev->major_version].
3754 validate_super(mddev, rdev);
3755 rdev->saved_raid_disk = rdev->raid_disk;
3757 clear_bit(In_sync, &rdev->flags); /* just to be sure */
3758 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3759 set_bit(WriteMostly, &rdev->flags);
3761 rdev->raid_disk = -1;
3762 err = bind_rdev_to_array(rdev, mddev);
3763 if (!err && !mddev->pers->hot_remove_disk) {
3764 /* If there is hot_add_disk but no hot_remove_disk
3765 * then added disks for geometry changes,
3766 * and should be added immediately.
3768 super_types[mddev->major_version].
3769 validate_super(mddev, rdev);
3770 err = mddev->pers->hot_add_disk(mddev, rdev);
3772 unbind_rdev_from_array(rdev);
3777 md_update_sb(mddev, 1);
3778 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3779 md_wakeup_thread(mddev->thread);
3783 /* otherwise, add_new_disk is only allowed
3784 * for major_version==0 superblocks
3786 if (mddev->major_version != 0) {
3787 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3792 if (!(info->state & (1<<MD_DISK_FAULTY))) {
3794 rdev = md_import_device (dev, -1, 0);
3797 "md: error, md_import_device() returned %ld\n",
3799 return PTR_ERR(rdev);
3801 rdev->desc_nr = info->number;
3802 if (info->raid_disk < mddev->raid_disks)
3803 rdev->raid_disk = info->raid_disk;
3805 rdev->raid_disk = -1;
3809 if (rdev->raid_disk < mddev->raid_disks)
3810 if (info->state & (1<<MD_DISK_SYNC))
3811 set_bit(In_sync, &rdev->flags);
3813 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3814 set_bit(WriteMostly, &rdev->flags);
3816 if (!mddev->persistent) {
3817 printk(KERN_INFO "md: nonpersistent superblock ...\n");
3818 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3820 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3821 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3823 err = bind_rdev_to_array(rdev, mddev);
3833 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3835 char b[BDEVNAME_SIZE];
3841 rdev = find_rdev(mddev, dev);
3845 if (rdev->raid_disk >= 0)
3848 kick_rdev_from_array(rdev);
3849 md_update_sb(mddev, 1);
3850 md_new_event(mddev);
3854 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3855 bdevname(rdev->bdev,b), mdname(mddev));
3859 static int hot_add_disk(mddev_t * mddev, dev_t dev)
3861 char b[BDEVNAME_SIZE];
3869 if (mddev->major_version != 0) {
3870 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3871 " version-0 superblocks.\n",
3875 if (!mddev->pers->hot_add_disk) {
3877 "%s: personality does not support diskops!\n",
3882 rdev = md_import_device (dev, -1, 0);
3885 "md: error, md_import_device() returned %ld\n",
3890 if (mddev->persistent)
3891 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3894 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3896 size = calc_dev_size(rdev, mddev->chunk_size);
3899 if (test_bit(Faulty, &rdev->flags)) {
3901 "md: can not hot-add faulty %s disk to %s!\n",
3902 bdevname(rdev->bdev,b), mdname(mddev));
3906 clear_bit(In_sync, &rdev->flags);
3908 rdev->saved_raid_disk = -1;
3909 err = bind_rdev_to_array(rdev, mddev);
3914 * The rest should better be atomic, we can have disk failures
3915 * noticed in interrupt contexts ...
3918 if (rdev->desc_nr == mddev->max_disks) {
3919 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3922 goto abort_unbind_export;
3925 rdev->raid_disk = -1;
3927 md_update_sb(mddev, 1);
3930 * Kick recovery, maybe this spare has to be added to the
3931 * array immediately.
3933 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3934 md_wakeup_thread(mddev->thread);
3935 md_new_event(mddev);
3938 abort_unbind_export:
3939 unbind_rdev_from_array(rdev);
3946 static int set_bitmap_file(mddev_t *mddev, int fd)
3951 if (!mddev->pers->quiesce)
3953 if (mddev->recovery || mddev->sync_thread)
3955 /* we should be able to change the bitmap.. */
3961 return -EEXIST; /* cannot add when bitmap is present */
3962 mddev->bitmap_file = fget(fd);
3964 if (mddev->bitmap_file == NULL) {
3965 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3970 err = deny_bitmap_write_access(mddev->bitmap_file);
3972 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3974 fput(mddev->bitmap_file);
3975 mddev->bitmap_file = NULL;
3978 mddev->bitmap_offset = 0; /* file overrides offset */
3979 } else if (mddev->bitmap == NULL)
3980 return -ENOENT; /* cannot remove what isn't there */
3983 mddev->pers->quiesce(mddev, 1);
3985 err = bitmap_create(mddev);
3986 if (fd < 0 || err) {
3987 bitmap_destroy(mddev);
3988 fd = -1; /* make sure to put the file */
3990 mddev->pers->quiesce(mddev, 0);
3993 if (mddev->bitmap_file) {
3994 restore_bitmap_write_access(mddev->bitmap_file);
3995 fput(mddev->bitmap_file);
3997 mddev->bitmap_file = NULL;
4004 * set_array_info is used two different ways
4005 * The original usage is when creating a new array.
4006 * In this usage, raid_disks is > 0 and it together with
4007 * level, size, not_persistent,layout,chunksize determine the
4008 * shape of the array.
4009 * This will always create an array with a type-0.90.0 superblock.
4010 * The newer usage is when assembling an array.
4011 * In this case raid_disks will be 0, and the major_version field is
4012 * use to determine which style super-blocks are to be found on the devices.
4013 * The minor and patch _version numbers are also kept incase the
4014 * super_block handler wishes to interpret them.
4016 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4019 if (info->raid_disks == 0) {
4020 /* just setting version number for superblock loading */
4021 if (info->major_version < 0 ||
4022 info->major_version >= ARRAY_SIZE(super_types) ||
4023 super_types[info->major_version].name == NULL) {
4024 /* maybe try to auto-load a module? */
4026 "md: superblock version %d not known\n",
4027 info->major_version);
4030 mddev->major_version = info->major_version;
4031 mddev->minor_version = info->minor_version;
4032 mddev->patch_version = info->patch_version;
4033 mddev->persistent = !info->not_persistent;
4036 mddev->major_version = MD_MAJOR_VERSION;
4037 mddev->minor_version = MD_MINOR_VERSION;
4038 mddev->patch_version = MD_PATCHLEVEL_VERSION;
4039 mddev->ctime = get_seconds();
4041 mddev->level = info->level;
4042 mddev->clevel[0] = 0;
4043 mddev->size = info->size;
4044 mddev->raid_disks = info->raid_disks;
4045 /* don't set md_minor, it is determined by which /dev/md* was
4048 if (info->state & (1<<MD_SB_CLEAN))
4049 mddev->recovery_cp = MaxSector;
4051 mddev->recovery_cp = 0;
4052 mddev->persistent = ! info->not_persistent;
4054 mddev->layout = info->layout;
4055 mddev->chunk_size = info->chunk_size;
4057 mddev->max_disks = MD_SB_DISKS;
4060 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4062 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4063 mddev->bitmap_offset = 0;
4065 mddev->reshape_position = MaxSector;
4068 * Generate a 128 bit UUID
4070 get_random_bytes(mddev->uuid, 16);
4072 mddev->new_level = mddev->level;
4073 mddev->new_chunk = mddev->chunk_size;
4074 mddev->new_layout = mddev->layout;
4075 mddev->delta_disks = 0;
4080 static int update_size(mddev_t *mddev, unsigned long size)
4084 struct list_head *tmp;
4085 int fit = (size == 0);
4087 if (mddev->pers->resize == NULL)
4089 /* The "size" is the amount of each device that is used.
4090 * This can only make sense for arrays with redundancy.
4091 * linear and raid0 always use whatever space is available
4092 * We can only consider changing the size if no resync
4093 * or reconstruction is happening, and if the new size
4094 * is acceptable. It must fit before the sb_offset or,
4095 * if that is <data_offset, it must fit before the
4096 * size of each device.
4097 * If size is zero, we find the largest size that fits.
4099 if (mddev->sync_thread)
4101 ITERATE_RDEV(mddev,rdev,tmp) {
4103 avail = rdev->size * 2;
4105 if (fit && (size == 0 || size > avail/2))
4107 if (avail < ((sector_t)size << 1))
4110 rv = mddev->pers->resize(mddev, (sector_t)size *2);
4112 struct block_device *bdev;
4114 bdev = bdget_disk(mddev->gendisk, 0);
4116 mutex_lock(&bdev->bd_inode->i_mutex);
4117 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4118 mutex_unlock(&bdev->bd_inode->i_mutex);
4125 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4128 /* change the number of raid disks */
4129 if (mddev->pers->check_reshape == NULL)
4131 if (raid_disks <= 0 ||
4132 raid_disks >= mddev->max_disks)
4134 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4136 mddev->delta_disks = raid_disks - mddev->raid_disks;
4138 rv = mddev->pers->check_reshape(mddev);
4144 * update_array_info is used to change the configuration of an
4146 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4147 * fields in the info are checked against the array.
4148 * Any differences that cannot be handled will cause an error.
4149 * Normally, only one change can be managed at a time.
4151 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4157 /* calculate expected state,ignoring low bits */
4158 if (mddev->bitmap && mddev->bitmap_offset)
4159 state |= (1 << MD_SB_BITMAP_PRESENT);
4161 if (mddev->major_version != info->major_version ||
4162 mddev->minor_version != info->minor_version ||
4163 /* mddev->patch_version != info->patch_version || */
4164 mddev->ctime != info->ctime ||
4165 mddev->level != info->level ||
4166 /* mddev->layout != info->layout || */
4167 !mddev->persistent != info->not_persistent||
4168 mddev->chunk_size != info->chunk_size ||
4169 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4170 ((state^info->state) & 0xfffffe00)
4173 /* Check there is only one change */
4174 if (info->size >= 0 && mddev->size != info->size) cnt++;
4175 if (mddev->raid_disks != info->raid_disks) cnt++;
4176 if (mddev->layout != info->layout) cnt++;
4177 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4178 if (cnt == 0) return 0;
4179 if (cnt > 1) return -EINVAL;
4181 if (mddev->layout != info->layout) {
4183 * we don't need to do anything at the md level, the
4184 * personality will take care of it all.
4186 if (mddev->pers->reconfig == NULL)
4189 return mddev->pers->reconfig(mddev, info->layout, -1);
4191 if (info->size >= 0 && mddev->size != info->size)
4192 rv = update_size(mddev, info->size);
4194 if (mddev->raid_disks != info->raid_disks)
4195 rv = update_raid_disks(mddev, info->raid_disks);
4197 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4198 if (mddev->pers->quiesce == NULL)
4200 if (mddev->recovery || mddev->sync_thread)
4202 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4203 /* add the bitmap */
4206 if (mddev->default_bitmap_offset == 0)
4208 mddev->bitmap_offset = mddev->default_bitmap_offset;
4209 mddev->pers->quiesce(mddev, 1);
4210 rv = bitmap_create(mddev);
4212 bitmap_destroy(mddev);
4213 mddev->pers->quiesce(mddev, 0);
4215 /* remove the bitmap */
4218 if (mddev->bitmap->file)
4220 mddev->pers->quiesce(mddev, 1);
4221 bitmap_destroy(mddev);
4222 mddev->pers->quiesce(mddev, 0);
4223 mddev->bitmap_offset = 0;
4226 md_update_sb(mddev, 1);
4230 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4234 if (mddev->pers == NULL)
4237 rdev = find_rdev(mddev, dev);
4241 md_error(mddev, rdev);
4245 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4247 mddev_t *mddev = bdev->bd_disk->private_data;
4251 geo->cylinders = get_capacity(mddev->gendisk) / 8;
4255 static int md_ioctl(struct inode *inode, struct file *file,
4256 unsigned int cmd, unsigned long arg)
4259 void __user *argp = (void __user *)arg;
4260 mddev_t *mddev = NULL;
4262 if (!capable(CAP_SYS_ADMIN))
4266 * Commands dealing with the RAID driver but not any
4272 err = get_version(argp);
4275 case PRINT_RAID_DEBUG:
4283 autostart_arrays(arg);
4290 * Commands creating/starting a new array:
4293 mddev = inode->i_bdev->bd_disk->private_data;
4300 err = mddev_lock(mddev);
4303 "md: ioctl lock interrupted, reason %d, cmd %d\n",
4310 case SET_ARRAY_INFO:
4312 mdu_array_info_t info;
4314 memset(&info, 0, sizeof(info));
4315 else if (copy_from_user(&info, argp, sizeof(info))) {
4320 err = update_array_info(mddev, &info);
4322 printk(KERN_WARNING "md: couldn't update"
4323 " array info. %d\n", err);
4328 if (!list_empty(&mddev->disks)) {
4330 "md: array %s already has disks!\n",
4335 if (mddev->raid_disks) {
4337 "md: array %s already initialised!\n",
4342 err = set_array_info(mddev, &info);
4344 printk(KERN_WARNING "md: couldn't set"
4345 " array info. %d\n", err);
4355 * Commands querying/configuring an existing array:
4357 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4358 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4359 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4360 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
4361 && cmd != GET_BITMAP_FILE) {
4367 * Commands even a read-only array can execute:
4371 case GET_ARRAY_INFO:
4372 err = get_array_info(mddev, argp);
4375 case GET_BITMAP_FILE:
4376 err = get_bitmap_file(mddev, argp);
4380 err = get_disk_info(mddev, argp);
4383 case RESTART_ARRAY_RW:
4384 err = restart_array(mddev);
4388 err = do_md_stop (mddev, 0);
4392 err = do_md_stop (mddev, 1);
4396 * We have a problem here : there is no easy way to give a CHS
4397 * virtual geometry. We currently pretend that we have a 2 heads
4398 * 4 sectors (with a BIG number of cylinders...). This drives
4399 * dosfs just mad... ;-)
4404 * The remaining ioctls are changing the state of the
4405 * superblock, so we do not allow them on read-only arrays.
4406 * However non-MD ioctls (e.g. get-size) will still come through
4407 * here and hit the 'default' below, so only disallow
4408 * 'md' ioctls, and switch to rw mode if started auto-readonly.
4410 if (_IOC_TYPE(cmd) == MD_MAJOR &&
4411 mddev->ro && mddev->pers) {
4412 if (mddev->ro == 2) {
4414 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4415 md_wakeup_thread(mddev->thread);
4427 mdu_disk_info_t info;
4428 if (copy_from_user(&info, argp, sizeof(info)))
4431 err = add_new_disk(mddev, &info);
4435 case HOT_REMOVE_DISK:
4436 err = hot_remove_disk(mddev, new_decode_dev(arg));
4440 err = hot_add_disk(mddev, new_decode_dev(arg));
4443 case SET_DISK_FAULTY:
4444 err = set_disk_faulty(mddev, new_decode_dev(arg));
4448 err = do_md_run (mddev);
4451 case SET_BITMAP_FILE:
4452 err = set_bitmap_file(mddev, (int)arg);
4462 mddev_unlock(mddev);
4472 static int md_open(struct inode *inode, struct file *file)
4475 * Succeed if we can lock the mddev, which confirms that
4476 * it isn't being stopped right now.
4478 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4481 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
4486 mddev_unlock(mddev);
4488 check_disk_change(inode->i_bdev);
4493 static int md_release(struct inode *inode, struct file * file)
4495 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4503 static int md_media_changed(struct gendisk *disk)
4505 mddev_t *mddev = disk->private_data;
4507 return mddev->changed;
4510 static int md_revalidate(struct gendisk *disk)
4512 mddev_t *mddev = disk->private_data;
4517 static struct block_device_operations md_fops =
4519 .owner = THIS_MODULE,
4521 .release = md_release,
4523 .getgeo = md_getgeo,
4524 .media_changed = md_media_changed,
4525 .revalidate_disk= md_revalidate,
4528 static int md_thread(void * arg)
4530 mdk_thread_t *thread = arg;
4533 * md_thread is a 'system-thread', it's priority should be very
4534 * high. We avoid resource deadlocks individually in each
4535 * raid personality. (RAID5 does preallocation) We also use RR and
4536 * the very same RT priority as kswapd, thus we will never get
4537 * into a priority inversion deadlock.
4539 * we definitely have to have equal or higher priority than
4540 * bdflush, otherwise bdflush will deadlock if there are too
4541 * many dirty RAID5 blocks.
4544 current->flags |= PF_NOFREEZE;
4545 allow_signal(SIGKILL);
4546 while (!kthread_should_stop()) {
4548 /* We need to wait INTERRUPTIBLE so that
4549 * we don't add to the load-average.
4550 * That means we need to be sure no signals are
4553 if (signal_pending(current))
4554 flush_signals(current);
4556 wait_event_interruptible_timeout
4558 test_bit(THREAD_WAKEUP, &thread->flags)
4559 || kthread_should_stop(),
4562 clear_bit(THREAD_WAKEUP, &thread->flags);
4564 thread->run(thread->mddev);
4570 void md_wakeup_thread(mdk_thread_t *thread)
4573 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4574 set_bit(THREAD_WAKEUP, &thread->flags);
4575 wake_up(&thread->wqueue);
4579 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4582 mdk_thread_t *thread;
4584 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
4588 init_waitqueue_head(&thread->wqueue);
4591 thread->mddev = mddev;
4592 thread->timeout = MAX_SCHEDULE_TIMEOUT;
4593 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
4594 if (IS_ERR(thread->tsk)) {
4601 void md_unregister_thread(mdk_thread_t *thread)
4603 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
4605 kthread_stop(thread->tsk);
4609 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4616 if (!rdev || test_bit(Faulty, &rdev->flags))
4619 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4621 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4622 __builtin_return_address(0),__builtin_return_address(1),
4623 __builtin_return_address(2),__builtin_return_address(3));
4627 if (!mddev->pers->error_handler)
4629 mddev->pers->error_handler(mddev,rdev);
4630 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4631 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4632 md_wakeup_thread(mddev->thread);
4633 md_new_event_inintr(mddev);
4636 /* seq_file implementation /proc/mdstat */
4638 static void status_unused(struct seq_file *seq)
4642 struct list_head *tmp;
4644 seq_printf(seq, "unused devices: ");
4646 ITERATE_RDEV_PENDING(rdev,tmp) {
4647 char b[BDEVNAME_SIZE];
4649 seq_printf(seq, "%s ",
4650 bdevname(rdev->bdev,b));
4653 seq_printf(seq, "<none>");
4655 seq_printf(seq, "\n");
4659 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4661 sector_t max_blocks, resync, res;
4662 unsigned long dt, db, rt;
4664 unsigned int per_milli;
4666 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4668 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4669 max_blocks = mddev->resync_max_sectors >> 1;
4671 max_blocks = mddev->size;
4674 * Should not happen.
4680 /* Pick 'scale' such that (resync>>scale)*1000 will fit
4681 * in a sector_t, and (max_blocks>>scale) will fit in a
4682 * u32, as those are the requirements for sector_div.
4683 * Thus 'scale' must be at least 10
4686 if (sizeof(sector_t) > sizeof(unsigned long)) {
4687 while ( max_blocks/2 > (1ULL<<(scale+32)))
4690 res = (resync>>scale)*1000;
4691 sector_div(res, (u32)((max_blocks>>scale)+1));
4695 int i, x = per_milli/50, y = 20-x;
4696 seq_printf(seq, "[");
4697 for (i = 0; i < x; i++)
4698 seq_printf(seq, "=");
4699 seq_printf(seq, ">");
4700 for (i = 0; i < y; i++)
4701 seq_printf(seq, ".");
4702 seq_printf(seq, "] ");
4704 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
4705 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4707 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
4709 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4710 "resync" : "recovery"))),
4711 per_milli/10, per_milli % 10,
4712 (unsigned long long) resync,
4713 (unsigned long long) max_blocks);
4716 * We do not want to overflow, so the order of operands and
4717 * the * 100 / 100 trick are important. We do a +1 to be
4718 * safe against division by zero. We only estimate anyway.
4720 * dt: time from mark until now
4721 * db: blocks written from mark until now
4722 * rt: remaining time
4724 dt = ((jiffies - mddev->resync_mark) / HZ);
4726 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
4727 - mddev->resync_mark_cnt;
4728 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
4730 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4732 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
4735 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4737 struct list_head *tmp;
4747 spin_lock(&all_mddevs_lock);
4748 list_for_each(tmp,&all_mddevs)
4750 mddev = list_entry(tmp, mddev_t, all_mddevs);
4752 spin_unlock(&all_mddevs_lock);
4755 spin_unlock(&all_mddevs_lock);
4757 return (void*)2;/* tail */
4761 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4763 struct list_head *tmp;
4764 mddev_t *next_mddev, *mddev = v;
4770 spin_lock(&all_mddevs_lock);
4772 tmp = all_mddevs.next;
4774 tmp = mddev->all_mddevs.next;
4775 if (tmp != &all_mddevs)
4776 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4778 next_mddev = (void*)2;
4781 spin_unlock(&all_mddevs_lock);
4789 static void md_seq_stop(struct seq_file *seq, void *v)
4793 if (mddev && v != (void*)1 && v != (void*)2)
4797 struct mdstat_info {
4801 static int md_seq_show(struct seq_file *seq, void *v)
4805 struct list_head *tmp2;
4807 struct mdstat_info *mi = seq->private;
4808 struct bitmap *bitmap;
4810 if (v == (void*)1) {
4811 struct mdk_personality *pers;
4812 seq_printf(seq, "Personalities : ");
4813 spin_lock(&pers_lock);
4814 list_for_each_entry(pers, &pers_list, list)
4815 seq_printf(seq, "[%s] ", pers->name);
4817 spin_unlock(&pers_lock);
4818 seq_printf(seq, "\n");
4819 mi->event = atomic_read(&md_event_count);
4822 if (v == (void*)2) {
4827 if (mddev_lock(mddev) < 0)
4830 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4831 seq_printf(seq, "%s : %sactive", mdname(mddev),
4832 mddev->pers ? "" : "in");
4835 seq_printf(seq, " (read-only)");
4837 seq_printf(seq, "(auto-read-only)");
4838 seq_printf(seq, " %s", mddev->pers->name);
4842 ITERATE_RDEV(mddev,rdev,tmp2) {
4843 char b[BDEVNAME_SIZE];
4844 seq_printf(seq, " %s[%d]",
4845 bdevname(rdev->bdev,b), rdev->desc_nr);
4846 if (test_bit(WriteMostly, &rdev->flags))
4847 seq_printf(seq, "(W)");
4848 if (test_bit(Faulty, &rdev->flags)) {
4849 seq_printf(seq, "(F)");
4851 } else if (rdev->raid_disk < 0)
4852 seq_printf(seq, "(S)"); /* spare */
4856 if (!list_empty(&mddev->disks)) {
4858 seq_printf(seq, "\n %llu blocks",
4859 (unsigned long long)mddev->array_size);
4861 seq_printf(seq, "\n %llu blocks",
4862 (unsigned long long)size);
4864 if (mddev->persistent) {
4865 if (mddev->major_version != 0 ||
4866 mddev->minor_version != 90) {
4867 seq_printf(seq," super %d.%d",
4868 mddev->major_version,
4869 mddev->minor_version);
4872 seq_printf(seq, " super non-persistent");
4875 mddev->pers->status (seq, mddev);
4876 seq_printf(seq, "\n ");
4877 if (mddev->pers->sync_request) {
4878 if (mddev->curr_resync > 2) {
4879 status_resync (seq, mddev);
4880 seq_printf(seq, "\n ");
4881 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4882 seq_printf(seq, "\tresync=DELAYED\n ");
4883 else if (mddev->recovery_cp < MaxSector)
4884 seq_printf(seq, "\tresync=PENDING\n ");
4887 seq_printf(seq, "\n ");
4889 if ((bitmap = mddev->bitmap)) {
4890 unsigned long chunk_kb;
4891 unsigned long flags;
4892 spin_lock_irqsave(&bitmap->lock, flags);
4893 chunk_kb = bitmap->chunksize >> 10;
4894 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4896 bitmap->pages - bitmap->missing_pages,
4898 (bitmap->pages - bitmap->missing_pages)
4899 << (PAGE_SHIFT - 10),
4900 chunk_kb ? chunk_kb : bitmap->chunksize,
4901 chunk_kb ? "KB" : "B");
4903 seq_printf(seq, ", file: ");
4904 seq_path(seq, bitmap->file->f_path.mnt,
4905 bitmap->file->f_path.dentry," \t\n");
4908 seq_printf(seq, "\n");
4909 spin_unlock_irqrestore(&bitmap->lock, flags);
4912 seq_printf(seq, "\n");
4914 mddev_unlock(mddev);
4919 static struct seq_operations md_seq_ops = {
4920 .start = md_seq_start,
4921 .next = md_seq_next,
4922 .stop = md_seq_stop,
4923 .show = md_seq_show,
4926 static int md_seq_open(struct inode *inode, struct file *file)
4929 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4933 error = seq_open(file, &md_seq_ops);
4937 struct seq_file *p = file->private_data;
4939 mi->event = atomic_read(&md_event_count);
4944 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4946 struct seq_file *m = filp->private_data;
4947 struct mdstat_info *mi = m->private;
4950 poll_wait(filp, &md_event_waiters, wait);
4952 /* always allow read */
4953 mask = POLLIN | POLLRDNORM;
4955 if (mi->event != atomic_read(&md_event_count))
4956 mask |= POLLERR | POLLPRI;
4960 static const struct file_operations md_seq_fops = {
4961 .owner = THIS_MODULE,
4962 .open = md_seq_open,
4964 .llseek = seq_lseek,
4965 .release = seq_release_private,
4966 .poll = mdstat_poll,
4969 int register_md_personality(struct mdk_personality *p)
4971 spin_lock(&pers_lock);
4972 list_add_tail(&p->list, &pers_list);
4973 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
4974 spin_unlock(&pers_lock);
4978 int unregister_md_personality(struct mdk_personality *p)
4980 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
4981 spin_lock(&pers_lock);
4982 list_del_init(&p->list);
4983 spin_unlock(&pers_lock);
4987 static int is_mddev_idle(mddev_t *mddev)
4990 struct list_head *tmp;
4992 unsigned long curr_events;
4995 ITERATE_RDEV(mddev,rdev,tmp) {
4996 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
4997 curr_events = disk_stat_read(disk, sectors[0]) +
4998 disk_stat_read(disk, sectors[1]) -
4999 atomic_read(&disk->sync_io);
5000 /* The difference between curr_events and last_events
5001 * will be affected by any new non-sync IO (making
5002 * curr_events bigger) and any difference in the amount of
5003 * in-flight syncio (making current_events bigger or smaller)
5004 * The amount in-flight is currently limited to
5005 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
5006 * which is at most 4096 sectors.
5007 * These numbers are fairly fragile and should be made
5008 * more robust, probably by enforcing the
5009 * 'window size' that md_do_sync sort-of uses.
5011 * Note: the following is an unsigned comparison.
5013 if ((curr_events - rdev->last_events + 4096) > 8192) {
5014 rdev->last_events = curr_events;
5021 void md_done_sync(mddev_t *mddev, int blocks, int ok)
5023 /* another "blocks" (512byte) blocks have been synced */
5024 atomic_sub(blocks, &mddev->recovery_active);
5025 wake_up(&mddev->recovery_wait);
5027 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5028 md_wakeup_thread(mddev->thread);
5029 // stop recovery, signal do_sync ....
5034 /* md_write_start(mddev, bi)
5035 * If we need to update some array metadata (e.g. 'active' flag
5036 * in superblock) before writing, schedule a superblock update
5037 * and wait for it to complete.
5039 void md_write_start(mddev_t *mddev, struct bio *bi)
5041 if (bio_data_dir(bi) != WRITE)
5044 BUG_ON(mddev->ro == 1);
5045 if (mddev->ro == 2) {
5046 /* need to switch to read/write */
5048 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5049 md_wakeup_thread(mddev->thread);
5051 atomic_inc(&mddev->writes_pending);
5052 if (mddev->in_sync) {
5053 spin_lock_irq(&mddev->write_lock);
5054 if (mddev->in_sync) {
5056 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5057 md_wakeup_thread(mddev->thread);
5059 spin_unlock_irq(&mddev->write_lock);
5061 wait_event(mddev->sb_wait, mddev->flags==0);
5064 void md_write_end(mddev_t *mddev)
5066 if (atomic_dec_and_test(&mddev->writes_pending)) {
5067 if (mddev->safemode == 2)
5068 md_wakeup_thread(mddev->thread);
5069 else if (mddev->safemode_delay)
5070 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5074 /* md_allow_write(mddev)
5075 * Calling this ensures that the array is marked 'active' so that writes
5076 * may proceed without blocking. It is important to call this before
5077 * attempting a GFP_KERNEL allocation while holding the mddev lock.
5078 * Must be called with mddev_lock held.
5080 void md_allow_write(mddev_t *mddev)
5087 spin_lock_irq(&mddev->write_lock);
5088 if (mddev->in_sync) {
5090 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5091 if (mddev->safemode_delay &&
5092 mddev->safemode == 0)
5093 mddev->safemode = 1;
5094 spin_unlock_irq(&mddev->write_lock);
5095 md_update_sb(mddev, 0);
5097 spin_unlock_irq(&mddev->write_lock);
5099 EXPORT_SYMBOL_GPL(md_allow_write);
5101 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5103 #define SYNC_MARKS 10
5104 #define SYNC_MARK_STEP (3*HZ)
5105 void md_do_sync(mddev_t *mddev)
5108 unsigned int currspeed = 0,
5110 sector_t max_sectors,j, io_sectors;
5111 unsigned long mark[SYNC_MARKS];
5112 sector_t mark_cnt[SYNC_MARKS];
5114 struct list_head *tmp;
5115 sector_t last_check;
5117 struct list_head *rtmp;
5121 /* just incase thread restarts... */
5122 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5124 if (mddev->ro) /* never try to sync a read-only array */
5127 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5128 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5129 desc = "data-check";
5130 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5131 desc = "requested-resync";
5134 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5139 /* we overload curr_resync somewhat here.
5140 * 0 == not engaged in resync at all
5141 * 2 == checking that there is no conflict with another sync
5142 * 1 == like 2, but have yielded to allow conflicting resync to
5144 * other == active in resync - this many blocks
5146 * Before starting a resync we must have set curr_resync to
5147 * 2, and then checked that every "conflicting" array has curr_resync
5148 * less than ours. When we find one that is the same or higher
5149 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
5150 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5151 * This will mean we have to start checking from the beginning again.
5156 mddev->curr_resync = 2;
5159 if (kthread_should_stop()) {
5160 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5163 ITERATE_MDDEV(mddev2,tmp) {
5164 if (mddev2 == mddev)
5166 if (mddev2->curr_resync &&
5167 match_mddev_units(mddev,mddev2)) {
5169 if (mddev < mddev2 && mddev->curr_resync == 2) {
5170 /* arbitrarily yield */
5171 mddev->curr_resync = 1;
5172 wake_up(&resync_wait);
5174 if (mddev > mddev2 && mddev->curr_resync == 1)
5175 /* no need to wait here, we can wait the next
5176 * time 'round when curr_resync == 2
5179 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5180 if (!kthread_should_stop() &&
5181 mddev2->curr_resync >= mddev->curr_resync) {
5182 printk(KERN_INFO "md: delaying %s of %s"
5183 " until %s has finished (they"
5184 " share one or more physical units)\n",
5185 desc, mdname(mddev), mdname(mddev2));
5188 finish_wait(&resync_wait, &wq);
5191 finish_wait(&resync_wait, &wq);
5194 } while (mddev->curr_resync < 2);
5197 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5198 /* resync follows the size requested by the personality,
5199 * which defaults to physical size, but can be virtual size
5201 max_sectors = mddev->resync_max_sectors;
5202 mddev->resync_mismatches = 0;
5203 /* we don't use the checkpoint if there's a bitmap */
5204 if (!mddev->bitmap &&
5205 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5206 j = mddev->recovery_cp;
5207 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5208 max_sectors = mddev->size << 1;
5210 /* recovery follows the physical size of devices */
5211 max_sectors = mddev->size << 1;
5213 ITERATE_RDEV(mddev,rdev,rtmp)
5214 if (rdev->raid_disk >= 0 &&
5215 !test_bit(Faulty, &rdev->flags) &&
5216 !test_bit(In_sync, &rdev->flags) &&
5217 rdev->recovery_offset < j)
5218 j = rdev->recovery_offset;
5221 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5222 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
5223 " %d KB/sec/disk.\n", speed_min(mddev));
5224 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5225 "(but not more than %d KB/sec) for %s.\n",
5226 speed_max(mddev), desc);
5228 is_mddev_idle(mddev); /* this also initializes IO event counters */
5231 for (m = 0; m < SYNC_MARKS; m++) {
5233 mark_cnt[m] = io_sectors;
5236 mddev->resync_mark = mark[last_mark];
5237 mddev->resync_mark_cnt = mark_cnt[last_mark];
5240 * Tune reconstruction:
5242 window = 32*(PAGE_SIZE/512);
5243 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5244 window/2,(unsigned long long) max_sectors/2);
5246 atomic_set(&mddev->recovery_active, 0);
5247 init_waitqueue_head(&mddev->recovery_wait);
5252 "md: resuming %s of %s from checkpoint.\n",
5253 desc, mdname(mddev));
5254 mddev->curr_resync = j;
5257 while (j < max_sectors) {
5261 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5262 currspeed < speed_min(mddev));
5264 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5268 if (!skipped) { /* actual IO requested */
5269 io_sectors += sectors;
5270 atomic_add(sectors, &mddev->recovery_active);
5274 if (j>1) mddev->curr_resync = j;
5275 mddev->curr_mark_cnt = io_sectors;
5276 if (last_check == 0)
5277 /* this is the earliers that rebuilt will be
5278 * visible in /proc/mdstat
5280 md_new_event(mddev);
5282 if (last_check + window > io_sectors || j == max_sectors)
5285 last_check = io_sectors;
5287 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
5288 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5292 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5294 int next = (last_mark+1) % SYNC_MARKS;
5296 mddev->resync_mark = mark[next];
5297 mddev->resync_mark_cnt = mark_cnt[next];
5298 mark[next] = jiffies;
5299 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5304 if (kthread_should_stop()) {
5306 * got a signal, exit.
5309 "md: md_do_sync() got signal ... exiting\n");
5310 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5315 * this loop exits only if either when we are slower than
5316 * the 'hard' speed limit, or the system was IO-idle for
5318 * the system might be non-idle CPU-wise, but we only care
5319 * about not overloading the IO subsystem. (things like an
5320 * e2fsck being done on the RAID array should execute fast)
5322 mddev->queue->unplug_fn(mddev->queue);
5325 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5326 /((jiffies-mddev->resync_mark)/HZ +1) +1;
5328 if (currspeed > speed_min(mddev)) {
5329 if ((currspeed > speed_max(mddev)) ||
5330 !is_mddev_idle(mddev)) {
5336 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5338 * this also signals 'finished resyncing' to md_stop
5341 mddev->queue->unplug_fn(mddev->queue);
5343 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5345 /* tell personality that we are finished */
5346 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5348 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5349 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5350 mddev->curr_resync > 2) {
5351 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5352 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5353 if (mddev->curr_resync >= mddev->recovery_cp) {
5355 "md: checkpointing %s of %s.\n",
5356 desc, mdname(mddev));
5357 mddev->recovery_cp = mddev->curr_resync;
5360 mddev->recovery_cp = MaxSector;
5362 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5363 mddev->curr_resync = MaxSector;
5364 ITERATE_RDEV(mddev,rdev,rtmp)
5365 if (rdev->raid_disk >= 0 &&
5366 !test_bit(Faulty, &rdev->flags) &&
5367 !test_bit(In_sync, &rdev->flags) &&
5368 rdev->recovery_offset < mddev->curr_resync)
5369 rdev->recovery_offset = mddev->curr_resync;
5372 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5375 mddev->curr_resync = 0;
5376 wake_up(&resync_wait);
5377 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5378 md_wakeup_thread(mddev->thread);
5380 EXPORT_SYMBOL_GPL(md_do_sync);
5383 static int remove_and_add_spares(mddev_t *mddev)
5386 struct list_head *rtmp;
5389 ITERATE_RDEV(mddev,rdev,rtmp)
5390 if (rdev->raid_disk >= 0 &&
5391 (test_bit(Faulty, &rdev->flags) ||
5392 ! test_bit(In_sync, &rdev->flags)) &&
5393 atomic_read(&rdev->nr_pending)==0) {
5394 if (mddev->pers->hot_remove_disk(
5395 mddev, rdev->raid_disk)==0) {
5397 sprintf(nm,"rd%d", rdev->raid_disk);
5398 sysfs_remove_link(&mddev->kobj, nm);
5399 rdev->raid_disk = -1;
5403 if (mddev->degraded) {
5404 ITERATE_RDEV(mddev,rdev,rtmp)
5405 if (rdev->raid_disk < 0
5406 && !test_bit(Faulty, &rdev->flags)) {
5407 rdev->recovery_offset = 0;
5408 if (mddev->pers->hot_add_disk(mddev,rdev)) {
5410 sprintf(nm, "rd%d", rdev->raid_disk);
5411 if (sysfs_create_link(&mddev->kobj,
5414 "md: cannot register "
5418 md_new_event(mddev);
5426 * This routine is regularly called by all per-raid-array threads to
5427 * deal with generic issues like resync and super-block update.
5428 * Raid personalities that don't have a thread (linear/raid0) do not
5429 * need this as they never do any recovery or update the superblock.
5431 * It does not do any resync itself, but rather "forks" off other threads
5432 * to do that as needed.
5433 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5434 * "->recovery" and create a thread at ->sync_thread.
5435 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5436 * and wakeups up this thread which will reap the thread and finish up.
5437 * This thread also removes any faulty devices (with nr_pending == 0).
5439 * The overall approach is:
5440 * 1/ if the superblock needs updating, update it.
5441 * 2/ If a recovery thread is running, don't do anything else.
5442 * 3/ If recovery has finished, clean up, possibly marking spares active.
5443 * 4/ If there are any faulty devices, remove them.
5444 * 5/ If array is degraded, try to add spares devices
5445 * 6/ If array has spares or is not in-sync, start a resync thread.
5447 void md_check_recovery(mddev_t *mddev)
5450 struct list_head *rtmp;
5454 bitmap_daemon_work(mddev->bitmap);
5459 if (signal_pending(current)) {
5460 if (mddev->pers->sync_request) {
5461 printk(KERN_INFO "md: %s in immediate safe mode\n",
5463 mddev->safemode = 2;
5465 flush_signals(current);
5470 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5471 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5472 (mddev->safemode == 1) ||
5473 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5474 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5478 if (mddev_trylock(mddev)) {
5481 spin_lock_irq(&mddev->write_lock);
5482 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5483 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5485 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5487 if (mddev->safemode == 1)
5488 mddev->safemode = 0;
5489 spin_unlock_irq(&mddev->write_lock);
5492 md_update_sb(mddev, 0);
5495 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5496 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5497 /* resync/recovery still happening */
5498 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5501 if (mddev->sync_thread) {
5502 /* resync has finished, collect result */
5503 md_unregister_thread(mddev->sync_thread);
5504 mddev->sync_thread = NULL;
5505 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5506 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5508 /* activate any spares */
5509 mddev->pers->spare_active(mddev);
5511 md_update_sb(mddev, 1);
5513 /* if array is no-longer degraded, then any saved_raid_disk
5514 * information must be scrapped
5516 if (!mddev->degraded)
5517 ITERATE_RDEV(mddev,rdev,rtmp)
5518 rdev->saved_raid_disk = -1;
5520 mddev->recovery = 0;
5521 /* flag recovery needed just to double check */
5522 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5523 md_new_event(mddev);
5526 /* Clear some bits that don't mean anything, but
5529 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5530 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5531 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5532 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5534 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5536 /* no recovery is running.
5537 * remove any failed drives, then
5538 * add spares if possible.
5539 * Spare are also removed and re-added, to allow
5540 * the personality to fail the re-add.
5543 if (mddev->reshape_position != MaxSector) {
5544 if (mddev->pers->check_reshape(mddev) != 0)
5545 /* Cannot proceed */
5547 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5548 } else if ((spares = remove_and_add_spares(mddev))) {
5549 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5550 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5551 } else if (mddev->recovery_cp < MaxSector) {
5552 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5553 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5554 /* nothing to be done ... */
5557 if (mddev->pers->sync_request) {
5558 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5559 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5560 /* We are adding a device or devices to an array
5561 * which has the bitmap stored on all devices.
5562 * So make sure all bitmap pages get written
5564 bitmap_write_all(mddev->bitmap);
5566 mddev->sync_thread = md_register_thread(md_do_sync,
5569 if (!mddev->sync_thread) {
5570 printk(KERN_ERR "%s: could not start resync"
5573 /* leave the spares where they are, it shouldn't hurt */
5574 mddev->recovery = 0;
5576 md_wakeup_thread(mddev->sync_thread);
5577 md_new_event(mddev);
5580 mddev_unlock(mddev);
5584 static int md_notify_reboot(struct notifier_block *this,
5585 unsigned long code, void *x)
5587 struct list_head *tmp;
5590 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5592 printk(KERN_INFO "md: stopping all md devices.\n");
5594 ITERATE_MDDEV(mddev,tmp)
5595 if (mddev_trylock(mddev)) {
5596 do_md_stop (mddev, 1);
5597 mddev_unlock(mddev);
5600 * certain more exotic SCSI devices are known to be
5601 * volatile wrt too early system reboots. While the
5602 * right place to handle this issue is the given
5603 * driver, we do want to have a safe RAID driver ...
5610 static struct notifier_block md_notifier = {
5611 .notifier_call = md_notify_reboot,
5613 .priority = INT_MAX, /* before any real devices */
5616 static void md_geninit(void)
5618 struct proc_dir_entry *p;
5620 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5622 p = create_proc_entry("mdstat", S_IRUGO, NULL);
5624 p->proc_fops = &md_seq_fops;
5627 static int __init md_init(void)
5629 if (register_blkdev(MAJOR_NR, "md"))
5631 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5632 unregister_blkdev(MAJOR_NR, "md");
5635 blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
5636 md_probe, NULL, NULL);
5637 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
5638 md_probe, NULL, NULL);
5640 register_reboot_notifier(&md_notifier);
5641 raid_table_header = register_sysctl_table(raid_root_table);
5651 * Searches all registered partitions for autorun RAID arrays
5654 static dev_t detected_devices[128];
5657 void md_autodetect_dev(dev_t dev)
5659 if (dev_cnt >= 0 && dev_cnt < 127)
5660 detected_devices[dev_cnt++] = dev;
5664 static void autostart_arrays(int part)
5669 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5671 for (i = 0; i < dev_cnt; i++) {
5672 dev_t dev = detected_devices[i];
5674 rdev = md_import_device(dev,0, 0);
5678 if (test_bit(Faulty, &rdev->flags)) {
5682 list_add(&rdev->same_set, &pending_raid_disks);
5686 autorun_devices(part);
5689 #endif /* !MODULE */
5691 static __exit void md_exit(void)
5694 struct list_head *tmp;
5696 blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
5697 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
5699 unregister_blkdev(MAJOR_NR,"md");
5700 unregister_blkdev(mdp_major, "mdp");
5701 unregister_reboot_notifier(&md_notifier);
5702 unregister_sysctl_table(raid_table_header);
5703 remove_proc_entry("mdstat", NULL);
5704 ITERATE_MDDEV(mddev,tmp) {
5705 struct gendisk *disk = mddev->gendisk;
5708 export_array(mddev);
5711 mddev->gendisk = NULL;
5716 module_init(md_init)
5717 module_exit(md_exit)
5719 static int get_ro(char *buffer, struct kernel_param *kp)
5721 return sprintf(buffer, "%d", start_readonly);
5723 static int set_ro(const char *val, struct kernel_param *kp)
5726 int num = simple_strtoul(val, &e, 10);
5727 if (*val && (*e == '\0' || *e == '\n')) {
5728 start_readonly = num;
5734 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
5735 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
5738 EXPORT_SYMBOL(register_md_personality);
5739 EXPORT_SYMBOL(unregister_md_personality);
5740 EXPORT_SYMBOL(md_error);
5741 EXPORT_SYMBOL(md_done_sync);
5742 EXPORT_SYMBOL(md_write_start);
5743 EXPORT_SYMBOL(md_write_end);
5744 EXPORT_SYMBOL(md_register_thread);
5745 EXPORT_SYMBOL(md_unregister_thread);
5746 EXPORT_SYMBOL(md_wakeup_thread);
5747 EXPORT_SYMBOL(md_check_recovery);
5748 MODULE_LICENSE("GPL");
5750 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);