2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/kthread.h>
37 #include <linux/linkage.h>
38 #include <linux/raid/md.h>
39 #include <linux/raid/bitmap.h>
40 #include <linux/sysctl.h>
41 #include <linux/buffer_head.h> /* for invalidate_bdev */
42 #include <linux/suspend.h>
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
47 #include <linux/init.h>
49 #include <linux/file.h>
52 #include <linux/kmod.h>
55 #include <asm/unaligned.h>
57 #define MAJOR_NR MD_MAJOR
60 /* 63 partitions with the alternate major number (mdp) */
61 #define MdpMinorShift 6
64 #define dprintk(x...) ((void)(DEBUG && printk(x)))
68 static void autostart_arrays (int part);
71 static LIST_HEAD(pers_list);
72 static DEFINE_SPINLOCK(pers_lock);
74 static void md_print_devices(void);
76 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
79 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
80 * is 1000 KB/sec, so the extra system load does not show up that much.
81 * Increase it if you want to have more _guaranteed_ speed. Note that
82 * the RAID driver will use the maximum available bandwidth if the IO
83 * subsystem is idle. There is also an 'absolute maximum' reconstruction
84 * speed limit - in case reconstruction slows down your system despite
87 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
88 * or /sys/block/mdX/md/sync_speed_{min,max}
91 static int sysctl_speed_limit_min = 1000;
92 static int sysctl_speed_limit_max = 200000;
93 static inline int speed_min(mddev_t *mddev)
95 return mddev->sync_speed_min ?
96 mddev->sync_speed_min : sysctl_speed_limit_min;
99 static inline int speed_max(mddev_t *mddev)
101 return mddev->sync_speed_max ?
102 mddev->sync_speed_max : sysctl_speed_limit_max;
105 static struct ctl_table_header *raid_table_header;
107 static ctl_table raid_table[] = {
109 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
110 .procname = "speed_limit_min",
111 .data = &sysctl_speed_limit_min,
112 .maxlen = sizeof(int),
113 .mode = S_IRUGO|S_IWUSR,
114 .proc_handler = &proc_dointvec,
117 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
118 .procname = "speed_limit_max",
119 .data = &sysctl_speed_limit_max,
120 .maxlen = sizeof(int),
121 .mode = S_IRUGO|S_IWUSR,
122 .proc_handler = &proc_dointvec,
127 static ctl_table raid_dir_table[] = {
129 .ctl_name = DEV_RAID,
132 .mode = S_IRUGO|S_IXUGO,
138 static ctl_table raid_root_table[] = {
144 .child = raid_dir_table,
149 static struct block_device_operations md_fops;
151 static int start_readonly;
154 * We have a system wide 'event count' that is incremented
155 * on any 'interesting' event, and readers of /proc/mdstat
156 * can use 'poll' or 'select' to find out when the event
160 * start array, stop array, error, add device, remove device,
161 * start build, activate spare
163 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
164 static atomic_t md_event_count;
165 void md_new_event(mddev_t *mddev)
167 atomic_inc(&md_event_count);
168 wake_up(&md_event_waiters);
169 sysfs_notify(&mddev->kobj, NULL, "sync_action");
171 EXPORT_SYMBOL_GPL(md_new_event);
173 /* Alternate version that can be called from interrupts
174 * when calling sysfs_notify isn't needed.
176 static void md_new_event_inintr(mddev_t *mddev)
178 atomic_inc(&md_event_count);
179 wake_up(&md_event_waiters);
183 * Enables to iterate over all existing md arrays
184 * all_mddevs_lock protects this list.
186 static LIST_HEAD(all_mddevs);
187 static DEFINE_SPINLOCK(all_mddevs_lock);
191 * iterates through all used mddevs in the system.
192 * We take care to grab the all_mddevs_lock whenever navigating
193 * the list, and to always hold a refcount when unlocked.
194 * Any code which breaks out of this loop while own
195 * a reference to the current mddev and must mddev_put it.
197 #define ITERATE_MDDEV(mddev,tmp) \
199 for (({ spin_lock(&all_mddevs_lock); \
200 tmp = all_mddevs.next; \
202 ({ if (tmp != &all_mddevs) \
203 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
204 spin_unlock(&all_mddevs_lock); \
205 if (mddev) mddev_put(mddev); \
206 mddev = list_entry(tmp, mddev_t, all_mddevs); \
207 tmp != &all_mddevs;}); \
208 ({ spin_lock(&all_mddevs_lock); \
213 static int md_fail_request (request_queue_t *q, struct bio *bio)
215 bio_io_error(bio, bio->bi_size);
219 static inline mddev_t *mddev_get(mddev_t *mddev)
221 atomic_inc(&mddev->active);
225 static void mddev_put(mddev_t *mddev)
227 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
229 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
230 list_del(&mddev->all_mddevs);
231 spin_unlock(&all_mddevs_lock);
232 blk_cleanup_queue(mddev->queue);
233 kobject_unregister(&mddev->kobj);
235 spin_unlock(&all_mddevs_lock);
238 static mddev_t * mddev_find(dev_t unit)
240 mddev_t *mddev, *new = NULL;
243 spin_lock(&all_mddevs_lock);
244 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
245 if (mddev->unit == unit) {
247 spin_unlock(&all_mddevs_lock);
253 list_add(&new->all_mddevs, &all_mddevs);
254 spin_unlock(&all_mddevs_lock);
257 spin_unlock(&all_mddevs_lock);
259 new = kzalloc(sizeof(*new), GFP_KERNEL);
264 if (MAJOR(unit) == MD_MAJOR)
265 new->md_minor = MINOR(unit);
267 new->md_minor = MINOR(unit) >> MdpMinorShift;
269 mutex_init(&new->reconfig_mutex);
270 INIT_LIST_HEAD(&new->disks);
271 INIT_LIST_HEAD(&new->all_mddevs);
272 init_timer(&new->safemode_timer);
273 atomic_set(&new->active, 1);
274 spin_lock_init(&new->write_lock);
275 init_waitqueue_head(&new->sb_wait);
277 new->queue = blk_alloc_queue(GFP_KERNEL);
282 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
284 blk_queue_make_request(new->queue, md_fail_request);
289 static inline int mddev_lock(mddev_t * mddev)
291 return mutex_lock_interruptible(&mddev->reconfig_mutex);
294 static inline int mddev_trylock(mddev_t * mddev)
296 return mutex_trylock(&mddev->reconfig_mutex);
299 static inline void mddev_unlock(mddev_t * mddev)
301 mutex_unlock(&mddev->reconfig_mutex);
303 md_wakeup_thread(mddev->thread);
306 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
309 struct list_head *tmp;
311 ITERATE_RDEV(mddev,rdev,tmp) {
312 if (rdev->desc_nr == nr)
318 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
320 struct list_head *tmp;
323 ITERATE_RDEV(mddev,rdev,tmp) {
324 if (rdev->bdev->bd_dev == dev)
330 static struct mdk_personality *find_pers(int level, char *clevel)
332 struct mdk_personality *pers;
333 list_for_each_entry(pers, &pers_list, list) {
334 if (level != LEVEL_NONE && pers->level == level)
336 if (strcmp(pers->name, clevel)==0)
342 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
344 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
345 return MD_NEW_SIZE_BLOCKS(size);
348 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
352 size = rdev->sb_offset;
355 size &= ~((sector_t)chunk_size/1024 - 1);
359 static int alloc_disk_sb(mdk_rdev_t * rdev)
364 rdev->sb_page = alloc_page(GFP_KERNEL);
365 if (!rdev->sb_page) {
366 printk(KERN_ALERT "md: out of memory.\n");
373 static void free_disk_sb(mdk_rdev_t * rdev)
376 put_page(rdev->sb_page);
378 rdev->sb_page = NULL;
385 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
387 mdk_rdev_t *rdev = bio->bi_private;
388 mddev_t *mddev = rdev->mddev;
392 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
393 md_error(mddev, rdev);
395 if (atomic_dec_and_test(&mddev->pending_writes))
396 wake_up(&mddev->sb_wait);
401 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
403 struct bio *bio2 = bio->bi_private;
404 mdk_rdev_t *rdev = bio2->bi_private;
405 mddev_t *mddev = rdev->mddev;
409 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
410 error == -EOPNOTSUPP) {
412 /* barriers don't appear to be supported :-( */
413 set_bit(BarriersNotsupp, &rdev->flags);
414 mddev->barriers_work = 0;
415 spin_lock_irqsave(&mddev->write_lock, flags);
416 bio2->bi_next = mddev->biolist;
417 mddev->biolist = bio2;
418 spin_unlock_irqrestore(&mddev->write_lock, flags);
419 wake_up(&mddev->sb_wait);
424 bio->bi_private = rdev;
425 return super_written(bio, bytes_done, error);
428 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
429 sector_t sector, int size, struct page *page)
431 /* write first size bytes of page to sector of rdev
432 * Increment mddev->pending_writes before returning
433 * and decrement it on completion, waking up sb_wait
434 * if zero is reached.
435 * If an error occurred, call md_error
437 * As we might need to resubmit the request if BIO_RW_BARRIER
438 * causes ENOTSUPP, we allocate a spare bio...
440 struct bio *bio = bio_alloc(GFP_NOIO, 1);
441 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
443 bio->bi_bdev = rdev->bdev;
444 bio->bi_sector = sector;
445 bio_add_page(bio, page, size, 0);
446 bio->bi_private = rdev;
447 bio->bi_end_io = super_written;
450 atomic_inc(&mddev->pending_writes);
451 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
453 rw |= (1<<BIO_RW_BARRIER);
454 rbio = bio_clone(bio, GFP_NOIO);
455 rbio->bi_private = bio;
456 rbio->bi_end_io = super_written_barrier;
457 submit_bio(rw, rbio);
462 void md_super_wait(mddev_t *mddev)
464 /* wait for all superblock writes that were scheduled to complete.
465 * if any had to be retried (due to BARRIER problems), retry them
469 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
470 if (atomic_read(&mddev->pending_writes)==0)
472 while (mddev->biolist) {
474 spin_lock_irq(&mddev->write_lock);
475 bio = mddev->biolist;
476 mddev->biolist = bio->bi_next ;
478 spin_unlock_irq(&mddev->write_lock);
479 submit_bio(bio->bi_rw, bio);
483 finish_wait(&mddev->sb_wait, &wq);
486 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
491 complete((struct completion*)bio->bi_private);
495 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
496 struct page *page, int rw)
498 struct bio *bio = bio_alloc(GFP_NOIO, 1);
499 struct completion event;
502 rw |= (1 << BIO_RW_SYNC);
505 bio->bi_sector = sector;
506 bio_add_page(bio, page, size, 0);
507 init_completion(&event);
508 bio->bi_private = &event;
509 bio->bi_end_io = bi_complete;
511 wait_for_completion(&event);
513 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
517 EXPORT_SYMBOL_GPL(sync_page_io);
519 static int read_disk_sb(mdk_rdev_t * rdev, int size)
521 char b[BDEVNAME_SIZE];
522 if (!rdev->sb_page) {
530 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
536 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
537 bdevname(rdev->bdev,b));
541 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
543 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
544 (sb1->set_uuid1 == sb2->set_uuid1) &&
545 (sb1->set_uuid2 == sb2->set_uuid2) &&
546 (sb1->set_uuid3 == sb2->set_uuid3))
554 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
557 mdp_super_t *tmp1, *tmp2;
559 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
560 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
562 if (!tmp1 || !tmp2) {
564 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
572 * nr_disks is not constant
577 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
588 static unsigned int calc_sb_csum(mdp_super_t * sb)
590 unsigned int disk_csum, csum;
592 disk_csum = sb->sb_csum;
594 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
595 sb->sb_csum = disk_csum;
601 * Handle superblock details.
602 * We want to be able to handle multiple superblock formats
603 * so we have a common interface to them all, and an array of
604 * different handlers.
605 * We rely on user-space to write the initial superblock, and support
606 * reading and updating of superblocks.
607 * Interface methods are:
608 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
609 * loads and validates a superblock on dev.
610 * if refdev != NULL, compare superblocks on both devices
612 * 0 - dev has a superblock that is compatible with refdev
613 * 1 - dev has a superblock that is compatible and newer than refdev
614 * so dev should be used as the refdev in future
615 * -EINVAL superblock incompatible or invalid
616 * -othererror e.g. -EIO
618 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
619 * Verify that dev is acceptable into mddev.
620 * The first time, mddev->raid_disks will be 0, and data from
621 * dev should be merged in. Subsequent calls check that dev
622 * is new enough. Return 0 or -EINVAL
624 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
625 * Update the superblock for rdev with data in mddev
626 * This does not write to disc.
632 struct module *owner;
633 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
634 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
635 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
639 * load_super for 0.90.0
641 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
643 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
649 * Calculate the position of the superblock,
650 * it's at the end of the disk.
652 * It also happens to be a multiple of 4Kb.
654 sb_offset = calc_dev_sboffset(rdev->bdev);
655 rdev->sb_offset = sb_offset;
657 ret = read_disk_sb(rdev, MD_SB_BYTES);
662 bdevname(rdev->bdev, b);
663 sb = (mdp_super_t*)page_address(rdev->sb_page);
665 if (sb->md_magic != MD_SB_MAGIC) {
666 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
671 if (sb->major_version != 0 ||
672 sb->minor_version < 90 ||
673 sb->minor_version > 91) {
674 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
675 sb->major_version, sb->minor_version,
680 if (sb->raid_disks <= 0)
683 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
684 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
689 rdev->preferred_minor = sb->md_minor;
690 rdev->data_offset = 0;
691 rdev->sb_size = MD_SB_BYTES;
693 if (sb->level == LEVEL_MULTIPATH)
696 rdev->desc_nr = sb->this_disk.number;
702 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
703 if (!uuid_equal(refsb, sb)) {
704 printk(KERN_WARNING "md: %s has different UUID to %s\n",
705 b, bdevname(refdev->bdev,b2));
708 if (!sb_equal(refsb, sb)) {
709 printk(KERN_WARNING "md: %s has same UUID"
710 " but different superblock to %s\n",
711 b, bdevname(refdev->bdev, b2));
715 ev2 = md_event(refsb);
721 rdev->size = calc_dev_size(rdev, sb->chunk_size);
723 if (rdev->size < sb->size && sb->level > 1)
724 /* "this cannot possibly happen" ... */
732 * validate_super for 0.90.0
734 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
737 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
738 __u64 ev1 = md_event(sb);
740 rdev->raid_disk = -1;
742 if (mddev->raid_disks == 0) {
743 mddev->major_version = 0;
744 mddev->minor_version = sb->minor_version;
745 mddev->patch_version = sb->patch_version;
746 mddev->persistent = ! sb->not_persistent;
747 mddev->chunk_size = sb->chunk_size;
748 mddev->ctime = sb->ctime;
749 mddev->utime = sb->utime;
750 mddev->level = sb->level;
751 mddev->clevel[0] = 0;
752 mddev->layout = sb->layout;
753 mddev->raid_disks = sb->raid_disks;
754 mddev->size = sb->size;
756 mddev->bitmap_offset = 0;
757 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
759 if (mddev->minor_version >= 91) {
760 mddev->reshape_position = sb->reshape_position;
761 mddev->delta_disks = sb->delta_disks;
762 mddev->new_level = sb->new_level;
763 mddev->new_layout = sb->new_layout;
764 mddev->new_chunk = sb->new_chunk;
766 mddev->reshape_position = MaxSector;
767 mddev->delta_disks = 0;
768 mddev->new_level = mddev->level;
769 mddev->new_layout = mddev->layout;
770 mddev->new_chunk = mddev->chunk_size;
773 if (sb->state & (1<<MD_SB_CLEAN))
774 mddev->recovery_cp = MaxSector;
776 if (sb->events_hi == sb->cp_events_hi &&
777 sb->events_lo == sb->cp_events_lo) {
778 mddev->recovery_cp = sb->recovery_cp;
780 mddev->recovery_cp = 0;
783 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
784 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
785 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
786 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
788 mddev->max_disks = MD_SB_DISKS;
790 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
791 mddev->bitmap_file == NULL) {
792 if (mddev->level != 1 && mddev->level != 4
793 && mddev->level != 5 && mddev->level != 6
794 && mddev->level != 10) {
795 /* FIXME use a better test */
796 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
799 mddev->bitmap_offset = mddev->default_bitmap_offset;
802 } else if (mddev->pers == NULL) {
803 /* Insist on good event counter while assembling */
805 if (ev1 < mddev->events)
807 } else if (mddev->bitmap) {
808 /* if adding to array with a bitmap, then we can accept an
809 * older device ... but not too old.
811 if (ev1 < mddev->bitmap->events_cleared)
814 if (ev1 < mddev->events)
815 /* just a hot-add of a new device, leave raid_disk at -1 */
819 if (mddev->level != LEVEL_MULTIPATH) {
820 desc = sb->disks + rdev->desc_nr;
822 if (desc->state & (1<<MD_DISK_FAULTY))
823 set_bit(Faulty, &rdev->flags);
824 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
825 desc->raid_disk < mddev->raid_disks */) {
826 set_bit(In_sync, &rdev->flags);
827 rdev->raid_disk = desc->raid_disk;
829 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
830 set_bit(WriteMostly, &rdev->flags);
831 } else /* MULTIPATH are always insync */
832 set_bit(In_sync, &rdev->flags);
837 * sync_super for 0.90.0
839 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
842 struct list_head *tmp;
844 int next_spare = mddev->raid_disks;
847 /* make rdev->sb match mddev data..
850 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
851 * 3/ any empty disks < next_spare become removed
853 * disks[0] gets initialised to REMOVED because
854 * we cannot be sure from other fields if it has
855 * been initialised or not.
858 int active=0, working=0,failed=0,spare=0,nr_disks=0;
860 rdev->sb_size = MD_SB_BYTES;
862 sb = (mdp_super_t*)page_address(rdev->sb_page);
864 memset(sb, 0, sizeof(*sb));
866 sb->md_magic = MD_SB_MAGIC;
867 sb->major_version = mddev->major_version;
868 sb->patch_version = mddev->patch_version;
869 sb->gvalid_words = 0; /* ignored */
870 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
871 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
872 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
873 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
875 sb->ctime = mddev->ctime;
876 sb->level = mddev->level;
877 sb->size = mddev->size;
878 sb->raid_disks = mddev->raid_disks;
879 sb->md_minor = mddev->md_minor;
880 sb->not_persistent = !mddev->persistent;
881 sb->utime = mddev->utime;
883 sb->events_hi = (mddev->events>>32);
884 sb->events_lo = (u32)mddev->events;
886 if (mddev->reshape_position == MaxSector)
887 sb->minor_version = 90;
889 sb->minor_version = 91;
890 sb->reshape_position = mddev->reshape_position;
891 sb->new_level = mddev->new_level;
892 sb->delta_disks = mddev->delta_disks;
893 sb->new_layout = mddev->new_layout;
894 sb->new_chunk = mddev->new_chunk;
896 mddev->minor_version = sb->minor_version;
899 sb->recovery_cp = mddev->recovery_cp;
900 sb->cp_events_hi = (mddev->events>>32);
901 sb->cp_events_lo = (u32)mddev->events;
902 if (mddev->recovery_cp == MaxSector)
903 sb->state = (1<< MD_SB_CLEAN);
907 sb->layout = mddev->layout;
908 sb->chunk_size = mddev->chunk_size;
910 if (mddev->bitmap && mddev->bitmap_file == NULL)
911 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
913 sb->disks[0].state = (1<<MD_DISK_REMOVED);
914 ITERATE_RDEV(mddev,rdev2,tmp) {
917 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
918 && !test_bit(Faulty, &rdev2->flags))
919 desc_nr = rdev2->raid_disk;
921 desc_nr = next_spare++;
922 rdev2->desc_nr = desc_nr;
923 d = &sb->disks[rdev2->desc_nr];
925 d->number = rdev2->desc_nr;
926 d->major = MAJOR(rdev2->bdev->bd_dev);
927 d->minor = MINOR(rdev2->bdev->bd_dev);
928 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
929 && !test_bit(Faulty, &rdev2->flags))
930 d->raid_disk = rdev2->raid_disk;
932 d->raid_disk = rdev2->desc_nr; /* compatibility */
933 if (test_bit(Faulty, &rdev2->flags))
934 d->state = (1<<MD_DISK_FAULTY);
935 else if (test_bit(In_sync, &rdev2->flags)) {
936 d->state = (1<<MD_DISK_ACTIVE);
937 d->state |= (1<<MD_DISK_SYNC);
945 if (test_bit(WriteMostly, &rdev2->flags))
946 d->state |= (1<<MD_DISK_WRITEMOSTLY);
948 /* now set the "removed" and "faulty" bits on any missing devices */
949 for (i=0 ; i < mddev->raid_disks ; i++) {
950 mdp_disk_t *d = &sb->disks[i];
951 if (d->state == 0 && d->number == 0) {
954 d->state = (1<<MD_DISK_REMOVED);
955 d->state |= (1<<MD_DISK_FAULTY);
959 sb->nr_disks = nr_disks;
960 sb->active_disks = active;
961 sb->working_disks = working;
962 sb->failed_disks = failed;
963 sb->spare_disks = spare;
965 sb->this_disk = sb->disks[rdev->desc_nr];
966 sb->sb_csum = calc_sb_csum(sb);
970 * version 1 superblock
973 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
975 unsigned int disk_csum, csum;
976 unsigned long long newcsum;
977 int size = 256 + le32_to_cpu(sb->max_dev)*2;
978 unsigned int *isuper = (unsigned int*)sb;
981 disk_csum = sb->sb_csum;
984 for (i=0; size>=4; size -= 4 )
985 newcsum += le32_to_cpu(*isuper++);
988 newcsum += le16_to_cpu(*(unsigned short*) isuper);
990 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
991 sb->sb_csum = disk_csum;
992 return cpu_to_le32(csum);
995 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
997 struct mdp_superblock_1 *sb;
1000 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1004 * Calculate the position of the superblock.
1005 * It is always aligned to a 4K boundary and
1006 * depeding on minor_version, it can be:
1007 * 0: At least 8K, but less than 12K, from end of device
1008 * 1: At start of device
1009 * 2: 4K from start of device.
1011 switch(minor_version) {
1013 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1015 sb_offset &= ~(sector_t)(4*2-1);
1016 /* convert from sectors to K */
1028 rdev->sb_offset = sb_offset;
1030 /* superblock is rarely larger than 1K, but it can be larger,
1031 * and it is safe to read 4k, so we do that
1033 ret = read_disk_sb(rdev, 4096);
1034 if (ret) return ret;
1037 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1039 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1040 sb->major_version != cpu_to_le32(1) ||
1041 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1042 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1043 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1046 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1047 printk("md: invalid superblock checksum on %s\n",
1048 bdevname(rdev->bdev,b));
1051 if (le64_to_cpu(sb->data_size) < 10) {
1052 printk("md: data_size too small on %s\n",
1053 bdevname(rdev->bdev,b));
1056 rdev->preferred_minor = 0xffff;
1057 rdev->data_offset = le64_to_cpu(sb->data_offset);
1058 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1060 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1061 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1062 if (rdev->sb_size & bmask)
1063 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1065 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1068 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1074 struct mdp_superblock_1 *refsb =
1075 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1077 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1078 sb->level != refsb->level ||
1079 sb->layout != refsb->layout ||
1080 sb->chunksize != refsb->chunksize) {
1081 printk(KERN_WARNING "md: %s has strangely different"
1082 " superblock to %s\n",
1083 bdevname(rdev->bdev,b),
1084 bdevname(refdev->bdev,b2));
1087 ev1 = le64_to_cpu(sb->events);
1088 ev2 = le64_to_cpu(refsb->events);
1096 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1098 rdev->size = rdev->sb_offset;
1099 if (rdev->size < le64_to_cpu(sb->data_size)/2)
1101 rdev->size = le64_to_cpu(sb->data_size)/2;
1102 if (le32_to_cpu(sb->chunksize))
1103 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1105 if (le32_to_cpu(sb->size) > rdev->size*2)
1110 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1112 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1113 __u64 ev1 = le64_to_cpu(sb->events);
1115 rdev->raid_disk = -1;
1117 if (mddev->raid_disks == 0) {
1118 mddev->major_version = 1;
1119 mddev->patch_version = 0;
1120 mddev->persistent = 1;
1121 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1122 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1123 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1124 mddev->level = le32_to_cpu(sb->level);
1125 mddev->clevel[0] = 0;
1126 mddev->layout = le32_to_cpu(sb->layout);
1127 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1128 mddev->size = le64_to_cpu(sb->size)/2;
1129 mddev->events = ev1;
1130 mddev->bitmap_offset = 0;
1131 mddev->default_bitmap_offset = 1024 >> 9;
1133 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1134 memcpy(mddev->uuid, sb->set_uuid, 16);
1136 mddev->max_disks = (4096-256)/2;
1138 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1139 mddev->bitmap_file == NULL ) {
1140 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1141 && mddev->level != 10) {
1142 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1145 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1147 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1148 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1149 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1150 mddev->new_level = le32_to_cpu(sb->new_level);
1151 mddev->new_layout = le32_to_cpu(sb->new_layout);
1152 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1154 mddev->reshape_position = MaxSector;
1155 mddev->delta_disks = 0;
1156 mddev->new_level = mddev->level;
1157 mddev->new_layout = mddev->layout;
1158 mddev->new_chunk = mddev->chunk_size;
1161 } else if (mddev->pers == NULL) {
1162 /* Insist of good event counter while assembling */
1164 if (ev1 < mddev->events)
1166 } else if (mddev->bitmap) {
1167 /* If adding to array with a bitmap, then we can accept an
1168 * older device, but not too old.
1170 if (ev1 < mddev->bitmap->events_cleared)
1173 if (ev1 < mddev->events)
1174 /* just a hot-add of a new device, leave raid_disk at -1 */
1177 if (mddev->level != LEVEL_MULTIPATH) {
1179 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1181 case 0xffff: /* spare */
1183 case 0xfffe: /* faulty */
1184 set_bit(Faulty, &rdev->flags);
1187 if ((le32_to_cpu(sb->feature_map) &
1188 MD_FEATURE_RECOVERY_OFFSET))
1189 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1191 set_bit(In_sync, &rdev->flags);
1192 rdev->raid_disk = role;
1195 if (sb->devflags & WriteMostly1)
1196 set_bit(WriteMostly, &rdev->flags);
1197 } else /* MULTIPATH are always insync */
1198 set_bit(In_sync, &rdev->flags);
1203 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1205 struct mdp_superblock_1 *sb;
1206 struct list_head *tmp;
1209 /* make rdev->sb match mddev and rdev data. */
1211 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1213 sb->feature_map = 0;
1215 sb->recovery_offset = cpu_to_le64(0);
1216 memset(sb->pad1, 0, sizeof(sb->pad1));
1217 memset(sb->pad2, 0, sizeof(sb->pad2));
1218 memset(sb->pad3, 0, sizeof(sb->pad3));
1220 sb->utime = cpu_to_le64((__u64)mddev->utime);
1221 sb->events = cpu_to_le64(mddev->events);
1223 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1225 sb->resync_offset = cpu_to_le64(0);
1227 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
1229 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1230 sb->size = cpu_to_le64(mddev->size<<1);
1232 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1233 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1234 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1237 if (rdev->raid_disk >= 0 &&
1238 !test_bit(In_sync, &rdev->flags) &&
1239 rdev->recovery_offset > 0) {
1240 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1241 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1244 if (mddev->reshape_position != MaxSector) {
1245 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1246 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1247 sb->new_layout = cpu_to_le32(mddev->new_layout);
1248 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1249 sb->new_level = cpu_to_le32(mddev->new_level);
1250 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1254 ITERATE_RDEV(mddev,rdev2,tmp)
1255 if (rdev2->desc_nr+1 > max_dev)
1256 max_dev = rdev2->desc_nr+1;
1258 sb->max_dev = cpu_to_le32(max_dev);
1259 for (i=0; i<max_dev;i++)
1260 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1262 ITERATE_RDEV(mddev,rdev2,tmp) {
1264 if (test_bit(Faulty, &rdev2->flags))
1265 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1266 else if (test_bit(In_sync, &rdev2->flags))
1267 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1268 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1269 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1271 sb->dev_roles[i] = cpu_to_le16(0xffff);
1274 sb->sb_csum = calc_sb_1_csum(sb);
1278 static struct super_type super_types[] = {
1281 .owner = THIS_MODULE,
1282 .load_super = super_90_load,
1283 .validate_super = super_90_validate,
1284 .sync_super = super_90_sync,
1288 .owner = THIS_MODULE,
1289 .load_super = super_1_load,
1290 .validate_super = super_1_validate,
1291 .sync_super = super_1_sync,
1295 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1297 struct list_head *tmp;
1300 ITERATE_RDEV(mddev,rdev,tmp)
1301 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1307 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1309 struct list_head *tmp;
1312 ITERATE_RDEV(mddev1,rdev,tmp)
1313 if (match_dev_unit(mddev2, rdev))
1319 static LIST_HEAD(pending_raid_disks);
1321 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1323 mdk_rdev_t *same_pdev;
1324 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1332 /* make sure rdev->size exceeds mddev->size */
1333 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1335 /* Cannot change size, so fail */
1338 mddev->size = rdev->size;
1340 same_pdev = match_dev_unit(mddev, rdev);
1343 "%s: WARNING: %s appears to be on the same physical"
1344 " disk as %s. True\n protection against single-disk"
1345 " failure might be compromised.\n",
1346 mdname(mddev), bdevname(rdev->bdev,b),
1347 bdevname(same_pdev->bdev,b2));
1349 /* Verify rdev->desc_nr is unique.
1350 * If it is -1, assign a free number, else
1351 * check number is not in use
1353 if (rdev->desc_nr < 0) {
1355 if (mddev->pers) choice = mddev->raid_disks;
1356 while (find_rdev_nr(mddev, choice))
1358 rdev->desc_nr = choice;
1360 if (find_rdev_nr(mddev, rdev->desc_nr))
1363 bdevname(rdev->bdev,b);
1364 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1366 while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1369 list_add(&rdev->same_set, &mddev->disks);
1370 rdev->mddev = mddev;
1371 printk(KERN_INFO "md: bind<%s>\n", b);
1373 rdev->kobj.parent = &mddev->kobj;
1374 kobject_add(&rdev->kobj);
1376 if (rdev->bdev->bd_part)
1377 ko = &rdev->bdev->bd_part->kobj;
1379 ko = &rdev->bdev->bd_disk->kobj;
1380 sysfs_create_link(&rdev->kobj, ko, "block");
1381 bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1385 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1387 char b[BDEVNAME_SIZE];
1392 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1393 list_del_init(&rdev->same_set);
1394 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1396 sysfs_remove_link(&rdev->kobj, "block");
1397 kobject_del(&rdev->kobj);
1401 * prevent the device from being mounted, repartitioned or
1402 * otherwise reused by a RAID array (or any other kernel
1403 * subsystem), by bd_claiming the device.
1405 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1408 struct block_device *bdev;
1409 char b[BDEVNAME_SIZE];
1411 bdev = open_partition_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1413 printk(KERN_ERR "md: could not open %s.\n",
1414 __bdevname(dev, b));
1415 return PTR_ERR(bdev);
1417 err = bd_claim(bdev, rdev);
1419 printk(KERN_ERR "md: could not bd_claim %s.\n",
1421 blkdev_put_partition(bdev);
1428 static void unlock_rdev(mdk_rdev_t *rdev)
1430 struct block_device *bdev = rdev->bdev;
1435 blkdev_put_partition(bdev);
1438 void md_autodetect_dev(dev_t dev);
1440 static void export_rdev(mdk_rdev_t * rdev)
1442 char b[BDEVNAME_SIZE];
1443 printk(KERN_INFO "md: export_rdev(%s)\n",
1444 bdevname(rdev->bdev,b));
1448 list_del_init(&rdev->same_set);
1450 md_autodetect_dev(rdev->bdev->bd_dev);
1453 kobject_put(&rdev->kobj);
1456 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1458 unbind_rdev_from_array(rdev);
1462 static void export_array(mddev_t *mddev)
1464 struct list_head *tmp;
1467 ITERATE_RDEV(mddev,rdev,tmp) {
1472 kick_rdev_from_array(rdev);
1474 if (!list_empty(&mddev->disks))
1476 mddev->raid_disks = 0;
1477 mddev->major_version = 0;
1480 static void print_desc(mdp_disk_t *desc)
1482 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1483 desc->major,desc->minor,desc->raid_disk,desc->state);
1486 static void print_sb(mdp_super_t *sb)
1491 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1492 sb->major_version, sb->minor_version, sb->patch_version,
1493 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1495 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1496 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1497 sb->md_minor, sb->layout, sb->chunk_size);
1498 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1499 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1500 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1501 sb->failed_disks, sb->spare_disks,
1502 sb->sb_csum, (unsigned long)sb->events_lo);
1505 for (i = 0; i < MD_SB_DISKS; i++) {
1508 desc = sb->disks + i;
1509 if (desc->number || desc->major || desc->minor ||
1510 desc->raid_disk || (desc->state && (desc->state != 4))) {
1511 printk(" D %2d: ", i);
1515 printk(KERN_INFO "md: THIS: ");
1516 print_desc(&sb->this_disk);
1520 static void print_rdev(mdk_rdev_t *rdev)
1522 char b[BDEVNAME_SIZE];
1523 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1524 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1525 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1527 if (rdev->sb_loaded) {
1528 printk(KERN_INFO "md: rdev superblock:\n");
1529 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1531 printk(KERN_INFO "md: no rdev superblock!\n");
1534 static void md_print_devices(void)
1536 struct list_head *tmp, *tmp2;
1539 char b[BDEVNAME_SIZE];
1542 printk("md: **********************************\n");
1543 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1544 printk("md: **********************************\n");
1545 ITERATE_MDDEV(mddev,tmp) {
1548 bitmap_print_sb(mddev->bitmap);
1550 printk("%s: ", mdname(mddev));
1551 ITERATE_RDEV(mddev,rdev,tmp2)
1552 printk("<%s>", bdevname(rdev->bdev,b));
1555 ITERATE_RDEV(mddev,rdev,tmp2)
1558 printk("md: **********************************\n");
1563 static void sync_sbs(mddev_t * mddev, int nospares)
1565 /* Update each superblock (in-memory image), but
1566 * if we are allowed to, skip spares which already
1567 * have the right event counter, or have one earlier
1568 * (which would mean they aren't being marked as dirty
1569 * with the rest of the array)
1572 struct list_head *tmp;
1574 ITERATE_RDEV(mddev,rdev,tmp) {
1575 if (rdev->sb_events == mddev->events ||
1577 rdev->raid_disk < 0 &&
1578 (rdev->sb_events&1)==0 &&
1579 rdev->sb_events+1 == mddev->events)) {
1580 /* Don't update this superblock */
1581 rdev->sb_loaded = 2;
1583 super_types[mddev->major_version].
1584 sync_super(mddev, rdev);
1585 rdev->sb_loaded = 1;
1590 static void md_update_sb(mddev_t * mddev, int force_change)
1593 struct list_head *tmp;
1599 spin_lock_irq(&mddev->write_lock);
1601 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1602 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1604 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1605 /* just a clean<-> dirty transition, possibly leave spares alone,
1606 * though if events isn't the right even/odd, we will have to do
1612 if (mddev->degraded)
1613 /* If the array is degraded, then skipping spares is both
1614 * dangerous and fairly pointless.
1615 * Dangerous because a device that was removed from the array
1616 * might have a event_count that still looks up-to-date,
1617 * so it can be re-added without a resync.
1618 * Pointless because if there are any spares to skip,
1619 * then a recovery will happen and soon that array won't
1620 * be degraded any more and the spare can go back to sleep then.
1624 sync_req = mddev->in_sync;
1625 mddev->utime = get_seconds();
1627 /* If this is just a dirty<->clean transition, and the array is clean
1628 * and 'events' is odd, we can roll back to the previous clean state */
1630 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1631 && (mddev->events & 1))
1634 /* otherwise we have to go forward and ... */
1636 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1637 /* .. if the array isn't clean, insist on an odd 'events' */
1638 if ((mddev->events&1)==0) {
1643 /* otherwise insist on an even 'events' (for clean states) */
1644 if ((mddev->events&1)) {
1651 if (!mddev->events) {
1653 * oops, this 64-bit counter should never wrap.
1654 * Either we are in around ~1 trillion A.C., assuming
1655 * 1 reboot per second, or we have a bug:
1660 sync_sbs(mddev, nospares);
1663 * do not write anything to disk if using
1664 * nonpersistent superblocks
1666 if (!mddev->persistent) {
1667 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1668 spin_unlock_irq(&mddev->write_lock);
1669 wake_up(&mddev->sb_wait);
1672 spin_unlock_irq(&mddev->write_lock);
1675 "md: updating %s RAID superblock on device (in sync %d)\n",
1676 mdname(mddev),mddev->in_sync);
1678 err = bitmap_update_sb(mddev->bitmap);
1679 ITERATE_RDEV(mddev,rdev,tmp) {
1680 char b[BDEVNAME_SIZE];
1681 dprintk(KERN_INFO "md: ");
1682 if (rdev->sb_loaded != 1)
1683 continue; /* no noise on spare devices */
1684 if (test_bit(Faulty, &rdev->flags))
1685 dprintk("(skipping faulty ");
1687 dprintk("%s ", bdevname(rdev->bdev,b));
1688 if (!test_bit(Faulty, &rdev->flags)) {
1689 md_super_write(mddev,rdev,
1690 rdev->sb_offset<<1, rdev->sb_size,
1692 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1693 bdevname(rdev->bdev,b),
1694 (unsigned long long)rdev->sb_offset);
1695 rdev->sb_events = mddev->events;
1699 if (mddev->level == LEVEL_MULTIPATH)
1700 /* only need to write one superblock... */
1703 md_super_wait(mddev);
1704 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1706 spin_lock_irq(&mddev->write_lock);
1707 if (mddev->in_sync != sync_req ||
1708 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1709 /* have to write it out again */
1710 spin_unlock_irq(&mddev->write_lock);
1713 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1714 spin_unlock_irq(&mddev->write_lock);
1715 wake_up(&mddev->sb_wait);
1719 /* words written to sysfs files may, or my not, be \n terminated.
1720 * We want to accept with case. For this we use cmd_match.
1722 static int cmd_match(const char *cmd, const char *str)
1724 /* See if cmd, written into a sysfs file, matches
1725 * str. They must either be the same, or cmd can
1726 * have a trailing newline
1728 while (*cmd && *str && *cmd == *str) {
1739 struct rdev_sysfs_entry {
1740 struct attribute attr;
1741 ssize_t (*show)(mdk_rdev_t *, char *);
1742 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1746 state_show(mdk_rdev_t *rdev, char *page)
1751 if (test_bit(Faulty, &rdev->flags)) {
1752 len+= sprintf(page+len, "%sfaulty",sep);
1755 if (test_bit(In_sync, &rdev->flags)) {
1756 len += sprintf(page+len, "%sin_sync",sep);
1759 if (test_bit(WriteMostly, &rdev->flags)) {
1760 len += sprintf(page+len, "%swrite_mostly",sep);
1763 if (!test_bit(Faulty, &rdev->flags) &&
1764 !test_bit(In_sync, &rdev->flags)) {
1765 len += sprintf(page+len, "%sspare", sep);
1768 return len+sprintf(page+len, "\n");
1772 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1775 * faulty - simulates and error
1776 * remove - disconnects the device
1777 * writemostly - sets write_mostly
1778 * -writemostly - clears write_mostly
1781 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1782 md_error(rdev->mddev, rdev);
1784 } else if (cmd_match(buf, "remove")) {
1785 if (rdev->raid_disk >= 0)
1788 mddev_t *mddev = rdev->mddev;
1789 kick_rdev_from_array(rdev);
1790 md_update_sb(mddev, 1);
1791 md_new_event(mddev);
1794 } else if (cmd_match(buf, "writemostly")) {
1795 set_bit(WriteMostly, &rdev->flags);
1797 } else if (cmd_match(buf, "-writemostly")) {
1798 clear_bit(WriteMostly, &rdev->flags);
1801 return err ? err : len;
1803 static struct rdev_sysfs_entry rdev_state =
1804 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1807 super_show(mdk_rdev_t *rdev, char *page)
1809 if (rdev->sb_loaded && rdev->sb_size) {
1810 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1811 return rdev->sb_size;
1815 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1818 errors_show(mdk_rdev_t *rdev, char *page)
1820 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1824 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1827 unsigned long n = simple_strtoul(buf, &e, 10);
1828 if (*buf && (*e == 0 || *e == '\n')) {
1829 atomic_set(&rdev->corrected_errors, n);
1834 static struct rdev_sysfs_entry rdev_errors =
1835 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1838 slot_show(mdk_rdev_t *rdev, char *page)
1840 if (rdev->raid_disk < 0)
1841 return sprintf(page, "none\n");
1843 return sprintf(page, "%d\n", rdev->raid_disk);
1847 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1850 int slot = simple_strtoul(buf, &e, 10);
1851 if (strncmp(buf, "none", 4)==0)
1853 else if (e==buf || (*e && *e!= '\n'))
1855 if (rdev->mddev->pers)
1856 /* Cannot set slot in active array (yet) */
1858 if (slot >= rdev->mddev->raid_disks)
1860 rdev->raid_disk = slot;
1861 /* assume it is working */
1863 set_bit(In_sync, &rdev->flags);
1868 static struct rdev_sysfs_entry rdev_slot =
1869 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
1872 offset_show(mdk_rdev_t *rdev, char *page)
1874 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1878 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1881 unsigned long long offset = simple_strtoull(buf, &e, 10);
1882 if (e==buf || (*e && *e != '\n'))
1884 if (rdev->mddev->pers)
1886 rdev->data_offset = offset;
1890 static struct rdev_sysfs_entry rdev_offset =
1891 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
1894 rdev_size_show(mdk_rdev_t *rdev, char *page)
1896 return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1900 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1903 unsigned long long size = simple_strtoull(buf, &e, 10);
1904 if (e==buf || (*e && *e != '\n'))
1906 if (rdev->mddev->pers)
1909 if (size < rdev->mddev->size || rdev->mddev->size == 0)
1910 rdev->mddev->size = size;
1914 static struct rdev_sysfs_entry rdev_size =
1915 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
1917 static struct attribute *rdev_default_attrs[] = {
1927 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1929 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1930 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1934 return entry->show(rdev, page);
1938 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1939 const char *page, size_t length)
1941 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1942 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1946 if (!capable(CAP_SYS_ADMIN))
1948 return entry->store(rdev, page, length);
1951 static void rdev_free(struct kobject *ko)
1953 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1956 static struct sysfs_ops rdev_sysfs_ops = {
1957 .show = rdev_attr_show,
1958 .store = rdev_attr_store,
1960 static struct kobj_type rdev_ktype = {
1961 .release = rdev_free,
1962 .sysfs_ops = &rdev_sysfs_ops,
1963 .default_attrs = rdev_default_attrs,
1967 * Import a device. If 'super_format' >= 0, then sanity check the superblock
1969 * mark the device faulty if:
1971 * - the device is nonexistent (zero size)
1972 * - the device has no valid superblock
1974 * a faulty rdev _never_ has rdev->sb set.
1976 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1978 char b[BDEVNAME_SIZE];
1983 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1985 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1986 return ERR_PTR(-ENOMEM);
1989 if ((err = alloc_disk_sb(rdev)))
1992 err = lock_rdev(rdev, newdev);
1996 rdev->kobj.parent = NULL;
1997 rdev->kobj.ktype = &rdev_ktype;
1998 kobject_init(&rdev->kobj);
2002 rdev->data_offset = 0;
2003 rdev->sb_events = 0;
2004 atomic_set(&rdev->nr_pending, 0);
2005 atomic_set(&rdev->read_errors, 0);
2006 atomic_set(&rdev->corrected_errors, 0);
2008 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2011 "md: %s has zero or unknown size, marking faulty!\n",
2012 bdevname(rdev->bdev,b));
2017 if (super_format >= 0) {
2018 err = super_types[super_format].
2019 load_super(rdev, NULL, super_minor);
2020 if (err == -EINVAL) {
2022 "md: %s has invalid sb, not importing!\n",
2023 bdevname(rdev->bdev,b));
2028 "md: could not read %s's sb, not importing!\n",
2029 bdevname(rdev->bdev,b));
2033 INIT_LIST_HEAD(&rdev->same_set);
2038 if (rdev->sb_page) {
2044 return ERR_PTR(err);
2048 * Check a full RAID array for plausibility
2052 static void analyze_sbs(mddev_t * mddev)
2055 struct list_head *tmp;
2056 mdk_rdev_t *rdev, *freshest;
2057 char b[BDEVNAME_SIZE];
2060 ITERATE_RDEV(mddev,rdev,tmp)
2061 switch (super_types[mddev->major_version].
2062 load_super(rdev, freshest, mddev->minor_version)) {
2070 "md: fatal superblock inconsistency in %s"
2071 " -- removing from array\n",
2072 bdevname(rdev->bdev,b));
2073 kick_rdev_from_array(rdev);
2077 super_types[mddev->major_version].
2078 validate_super(mddev, freshest);
2081 ITERATE_RDEV(mddev,rdev,tmp) {
2082 if (rdev != freshest)
2083 if (super_types[mddev->major_version].
2084 validate_super(mddev, rdev)) {
2085 printk(KERN_WARNING "md: kicking non-fresh %s"
2087 bdevname(rdev->bdev,b));
2088 kick_rdev_from_array(rdev);
2091 if (mddev->level == LEVEL_MULTIPATH) {
2092 rdev->desc_nr = i++;
2093 rdev->raid_disk = rdev->desc_nr;
2094 set_bit(In_sync, &rdev->flags);
2100 if (mddev->recovery_cp != MaxSector &&
2102 printk(KERN_ERR "md: %s: raid array is not clean"
2103 " -- starting background reconstruction\n",
2109 safe_delay_show(mddev_t *mddev, char *page)
2111 int msec = (mddev->safemode_delay*1000)/HZ;
2112 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2115 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2123 /* remove a period, and count digits after it */
2124 if (len >= sizeof(buf))
2126 strlcpy(buf, cbuf, len);
2128 for (i=0; i<len; i++) {
2130 if (isdigit(buf[i])) {
2135 } else if (buf[i] == '.') {
2140 msec = simple_strtoul(buf, &e, 10);
2141 if (e == buf || (*e && *e != '\n'))
2143 msec = (msec * 1000) / scale;
2145 mddev->safemode_delay = 0;
2147 mddev->safemode_delay = (msec*HZ)/1000;
2148 if (mddev->safemode_delay == 0)
2149 mddev->safemode_delay = 1;
2153 static struct md_sysfs_entry md_safe_delay =
2154 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2157 level_show(mddev_t *mddev, char *page)
2159 struct mdk_personality *p = mddev->pers;
2161 return sprintf(page, "%s\n", p->name);
2162 else if (mddev->clevel[0])
2163 return sprintf(page, "%s\n", mddev->clevel);
2164 else if (mddev->level != LEVEL_NONE)
2165 return sprintf(page, "%d\n", mddev->level);
2171 level_store(mddev_t *mddev, const char *buf, size_t len)
2178 if (len >= sizeof(mddev->clevel))
2180 strncpy(mddev->clevel, buf, len);
2181 if (mddev->clevel[len-1] == '\n')
2183 mddev->clevel[len] = 0;
2184 mddev->level = LEVEL_NONE;
2188 static struct md_sysfs_entry md_level =
2189 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2193 layout_show(mddev_t *mddev, char *page)
2195 /* just a number, not meaningful for all levels */
2196 return sprintf(page, "%d\n", mddev->layout);
2200 layout_store(mddev_t *mddev, const char *buf, size_t len)
2203 unsigned long n = simple_strtoul(buf, &e, 10);
2207 if (!*buf || (*e && *e != '\n'))
2213 static struct md_sysfs_entry md_layout =
2214 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2218 raid_disks_show(mddev_t *mddev, char *page)
2220 if (mddev->raid_disks == 0)
2222 return sprintf(page, "%d\n", mddev->raid_disks);
2225 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2228 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2230 /* can only set raid_disks if array is not yet active */
2233 unsigned long n = simple_strtoul(buf, &e, 10);
2235 if (!*buf || (*e && *e != '\n'))
2239 rv = update_raid_disks(mddev, n);
2241 mddev->raid_disks = n;
2242 return rv ? rv : len;
2244 static struct md_sysfs_entry md_raid_disks =
2245 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2248 chunk_size_show(mddev_t *mddev, char *page)
2250 return sprintf(page, "%d\n", mddev->chunk_size);
2254 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2256 /* can only set chunk_size if array is not yet active */
2258 unsigned long n = simple_strtoul(buf, &e, 10);
2262 if (!*buf || (*e && *e != '\n'))
2265 mddev->chunk_size = n;
2268 static struct md_sysfs_entry md_chunk_size =
2269 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2272 resync_start_show(mddev_t *mddev, char *page)
2274 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2278 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2280 /* can only set chunk_size if array is not yet active */
2282 unsigned long long n = simple_strtoull(buf, &e, 10);
2286 if (!*buf || (*e && *e != '\n'))
2289 mddev->recovery_cp = n;
2292 static struct md_sysfs_entry md_resync_start =
2293 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2296 * The array state can be:
2299 * No devices, no size, no level
2300 * Equivalent to STOP_ARRAY ioctl
2302 * May have some settings, but array is not active
2303 * all IO results in error
2304 * When written, doesn't tear down array, but just stops it
2305 * suspended (not supported yet)
2306 * All IO requests will block. The array can be reconfigured.
2307 * Writing this, if accepted, will block until array is quiessent
2309 * no resync can happen. no superblocks get written.
2310 * write requests fail
2312 * like readonly, but behaves like 'clean' on a write request.
2314 * clean - no pending writes, but otherwise active.
2315 * When written to inactive array, starts without resync
2316 * If a write request arrives then
2317 * if metadata is known, mark 'dirty' and switch to 'active'.
2318 * if not known, block and switch to write-pending
2319 * If written to an active array that has pending writes, then fails.
2321 * fully active: IO and resync can be happening.
2322 * When written to inactive array, starts with resync
2325 * clean, but writes are blocked waiting for 'active' to be written.
2328 * like active, but no writes have been seen for a while (100msec).
2331 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2332 write_pending, active_idle, bad_word};
2333 static char *array_states[] = {
2334 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2335 "write-pending", "active-idle", NULL };
2337 static int match_word(const char *word, char **list)
2340 for (n=0; list[n]; n++)
2341 if (cmd_match(word, list[n]))
2347 array_state_show(mddev_t *mddev, char *page)
2349 enum array_state st = inactive;
2362 else if (mddev->safemode)
2368 if (list_empty(&mddev->disks) &&
2369 mddev->raid_disks == 0 &&
2375 return sprintf(page, "%s\n", array_states[st]);
2378 static int do_md_stop(mddev_t * mddev, int ro);
2379 static int do_md_run(mddev_t * mddev);
2380 static int restart_array(mddev_t *mddev);
2383 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2386 enum array_state st = match_word(buf, array_states);
2391 /* stopping an active array */
2393 if (atomic_read(&mddev->active) > 1)
2395 err = do_md_stop(mddev, 0);
2399 /* stopping an active array */
2401 if (atomic_read(&mddev->active) > 1)
2403 err = do_md_stop(mddev, 2);
2407 break; /* not supported yet */
2410 err = do_md_stop(mddev, 1);
2413 err = do_md_run(mddev);
2417 /* stopping an active array */
2419 err = do_md_stop(mddev, 1);
2421 mddev->ro = 2; /* FIXME mark devices writable */
2424 err = do_md_run(mddev);
2429 restart_array(mddev);
2430 spin_lock_irq(&mddev->write_lock);
2431 if (atomic_read(&mddev->writes_pending) == 0) {
2433 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
2435 spin_unlock_irq(&mddev->write_lock);
2438 mddev->recovery_cp = MaxSector;
2439 err = do_md_run(mddev);
2444 restart_array(mddev);
2445 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2446 wake_up(&mddev->sb_wait);
2450 err = do_md_run(mddev);
2455 /* these cannot be set */
2463 static struct md_sysfs_entry md_array_state =
2464 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2467 null_show(mddev_t *mddev, char *page)
2473 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2475 /* buf must be %d:%d\n? giving major and minor numbers */
2476 /* The new device is added to the array.
2477 * If the array has a persistent superblock, we read the
2478 * superblock to initialise info and check validity.
2479 * Otherwise, only checking done is that in bind_rdev_to_array,
2480 * which mainly checks size.
2483 int major = simple_strtoul(buf, &e, 10);
2489 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2491 minor = simple_strtoul(e+1, &e, 10);
2492 if (*e && *e != '\n')
2494 dev = MKDEV(major, minor);
2495 if (major != MAJOR(dev) ||
2496 minor != MINOR(dev))
2500 if (mddev->persistent) {
2501 rdev = md_import_device(dev, mddev->major_version,
2502 mddev->minor_version);
2503 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2504 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2505 mdk_rdev_t, same_set);
2506 err = super_types[mddev->major_version]
2507 .load_super(rdev, rdev0, mddev->minor_version);
2512 rdev = md_import_device(dev, -1, -1);
2515 return PTR_ERR(rdev);
2516 err = bind_rdev_to_array(rdev, mddev);
2520 return err ? err : len;
2523 static struct md_sysfs_entry md_new_device =
2524 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2527 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2530 unsigned long chunk, end_chunk;
2534 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2536 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2537 if (buf == end) break;
2538 if (*end == '-') { /* range */
2540 end_chunk = simple_strtoul(buf, &end, 0);
2541 if (buf == end) break;
2543 if (*end && !isspace(*end)) break;
2544 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2546 while (isspace(*buf)) buf++;
2548 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2553 static struct md_sysfs_entry md_bitmap =
2554 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2557 size_show(mddev_t *mddev, char *page)
2559 return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2562 static int update_size(mddev_t *mddev, unsigned long size);
2565 size_store(mddev_t *mddev, const char *buf, size_t len)
2567 /* If array is inactive, we can reduce the component size, but
2568 * not increase it (except from 0).
2569 * If array is active, we can try an on-line resize
2573 unsigned long long size = simple_strtoull(buf, &e, 10);
2574 if (!*buf || *buf == '\n' ||
2579 err = update_size(mddev, size);
2580 md_update_sb(mddev, 1);
2582 if (mddev->size == 0 ||
2588 return err ? err : len;
2591 static struct md_sysfs_entry md_size =
2592 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2596 * This is either 'none' for arrays with externally managed metadata,
2597 * or N.M for internally known formats
2600 metadata_show(mddev_t *mddev, char *page)
2602 if (mddev->persistent)
2603 return sprintf(page, "%d.%d\n",
2604 mddev->major_version, mddev->minor_version);
2606 return sprintf(page, "none\n");
2610 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2614 if (!list_empty(&mddev->disks))
2617 if (cmd_match(buf, "none")) {
2618 mddev->persistent = 0;
2619 mddev->major_version = 0;
2620 mddev->minor_version = 90;
2623 major = simple_strtoul(buf, &e, 10);
2624 if (e==buf || *e != '.')
2627 minor = simple_strtoul(buf, &e, 10);
2628 if (e==buf || *e != '\n')
2630 if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2631 super_types[major].name == NULL)
2633 mddev->major_version = major;
2634 mddev->minor_version = minor;
2635 mddev->persistent = 1;
2639 static struct md_sysfs_entry md_metadata =
2640 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2643 action_show(mddev_t *mddev, char *page)
2645 char *type = "idle";
2646 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2647 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2648 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2650 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2651 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2653 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2660 return sprintf(page, "%s\n", type);
2664 action_store(mddev_t *mddev, const char *page, size_t len)
2666 if (!mddev->pers || !mddev->pers->sync_request)
2669 if (cmd_match(page, "idle")) {
2670 if (mddev->sync_thread) {
2671 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2672 md_unregister_thread(mddev->sync_thread);
2673 mddev->sync_thread = NULL;
2674 mddev->recovery = 0;
2676 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2677 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2679 else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2680 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2681 else if (cmd_match(page, "reshape")) {
2683 if (mddev->pers->start_reshape == NULL)
2685 err = mddev->pers->start_reshape(mddev);
2689 if (cmd_match(page, "check"))
2690 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2691 else if (!cmd_match(page, "repair"))
2693 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2694 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2696 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2697 md_wakeup_thread(mddev->thread);
2702 mismatch_cnt_show(mddev_t *mddev, char *page)
2704 return sprintf(page, "%llu\n",
2705 (unsigned long long) mddev->resync_mismatches);
2708 static struct md_sysfs_entry md_scan_mode =
2709 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2712 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
2715 sync_min_show(mddev_t *mddev, char *page)
2717 return sprintf(page, "%d (%s)\n", speed_min(mddev),
2718 mddev->sync_speed_min ? "local": "system");
2722 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2726 if (strncmp(buf, "system", 6)==0) {
2727 mddev->sync_speed_min = 0;
2730 min = simple_strtoul(buf, &e, 10);
2731 if (buf == e || (*e && *e != '\n') || min <= 0)
2733 mddev->sync_speed_min = min;
2737 static struct md_sysfs_entry md_sync_min =
2738 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2741 sync_max_show(mddev_t *mddev, char *page)
2743 return sprintf(page, "%d (%s)\n", speed_max(mddev),
2744 mddev->sync_speed_max ? "local": "system");
2748 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2752 if (strncmp(buf, "system", 6)==0) {
2753 mddev->sync_speed_max = 0;
2756 max = simple_strtoul(buf, &e, 10);
2757 if (buf == e || (*e && *e != '\n') || max <= 0)
2759 mddev->sync_speed_max = max;
2763 static struct md_sysfs_entry md_sync_max =
2764 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2768 sync_speed_show(mddev_t *mddev, char *page)
2770 unsigned long resync, dt, db;
2771 resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
2772 dt = ((jiffies - mddev->resync_mark) / HZ);
2774 db = resync - (mddev->resync_mark_cnt);
2775 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2778 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
2781 sync_completed_show(mddev_t *mddev, char *page)
2783 unsigned long max_blocks, resync;
2785 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2786 max_blocks = mddev->resync_max_sectors;
2788 max_blocks = mddev->size << 1;
2790 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2791 return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2794 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
2797 suspend_lo_show(mddev_t *mddev, char *page)
2799 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2803 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2806 unsigned long long new = simple_strtoull(buf, &e, 10);
2808 if (mddev->pers->quiesce == NULL)
2810 if (buf == e || (*e && *e != '\n'))
2812 if (new >= mddev->suspend_hi ||
2813 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2814 mddev->suspend_lo = new;
2815 mddev->pers->quiesce(mddev, 2);
2820 static struct md_sysfs_entry md_suspend_lo =
2821 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2825 suspend_hi_show(mddev_t *mddev, char *page)
2827 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2831 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2834 unsigned long long new = simple_strtoull(buf, &e, 10);
2836 if (mddev->pers->quiesce == NULL)
2838 if (buf == e || (*e && *e != '\n'))
2840 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2841 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2842 mddev->suspend_hi = new;
2843 mddev->pers->quiesce(mddev, 1);
2844 mddev->pers->quiesce(mddev, 0);
2849 static struct md_sysfs_entry md_suspend_hi =
2850 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2853 static struct attribute *md_default_attrs[] = {
2856 &md_raid_disks.attr,
2857 &md_chunk_size.attr,
2859 &md_resync_start.attr,
2861 &md_new_device.attr,
2862 &md_safe_delay.attr,
2863 &md_array_state.attr,
2867 static struct attribute *md_redundancy_attrs[] = {
2869 &md_mismatches.attr,
2872 &md_sync_speed.attr,
2873 &md_sync_completed.attr,
2874 &md_suspend_lo.attr,
2875 &md_suspend_hi.attr,
2879 static struct attribute_group md_redundancy_group = {
2881 .attrs = md_redundancy_attrs,
2886 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2888 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2889 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2894 rv = mddev_lock(mddev);
2896 rv = entry->show(mddev, page);
2897 mddev_unlock(mddev);
2903 md_attr_store(struct kobject *kobj, struct attribute *attr,
2904 const char *page, size_t length)
2906 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2907 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2912 if (!capable(CAP_SYS_ADMIN))
2914 rv = mddev_lock(mddev);
2916 rv = entry->store(mddev, page, length);
2917 mddev_unlock(mddev);
2922 static void md_free(struct kobject *ko)
2924 mddev_t *mddev = container_of(ko, mddev_t, kobj);
2928 static struct sysfs_ops md_sysfs_ops = {
2929 .show = md_attr_show,
2930 .store = md_attr_store,
2932 static struct kobj_type md_ktype = {
2934 .sysfs_ops = &md_sysfs_ops,
2935 .default_attrs = md_default_attrs,
2940 static struct kobject *md_probe(dev_t dev, int *part, void *data)
2942 static DEFINE_MUTEX(disks_mutex);
2943 mddev_t *mddev = mddev_find(dev);
2944 struct gendisk *disk;
2945 int partitioned = (MAJOR(dev) != MD_MAJOR);
2946 int shift = partitioned ? MdpMinorShift : 0;
2947 int unit = MINOR(dev) >> shift;
2952 mutex_lock(&disks_mutex);
2953 if (mddev->gendisk) {
2954 mutex_unlock(&disks_mutex);
2958 disk = alloc_disk(1 << shift);
2960 mutex_unlock(&disks_mutex);
2964 disk->major = MAJOR(dev);
2965 disk->first_minor = unit << shift;
2967 sprintf(disk->disk_name, "md_d%d", unit);
2969 sprintf(disk->disk_name, "md%d", unit);
2970 disk->fops = &md_fops;
2971 disk->private_data = mddev;
2972 disk->queue = mddev->queue;
2974 mddev->gendisk = disk;
2975 mutex_unlock(&disks_mutex);
2976 mddev->kobj.parent = &disk->kobj;
2977 mddev->kobj.k_name = NULL;
2978 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2979 mddev->kobj.ktype = &md_ktype;
2980 kobject_register(&mddev->kobj);
2984 static void md_safemode_timeout(unsigned long data)
2986 mddev_t *mddev = (mddev_t *) data;
2988 mddev->safemode = 1;
2989 md_wakeup_thread(mddev->thread);
2992 static int start_dirty_degraded;
2994 static int do_md_run(mddev_t * mddev)
2998 struct list_head *tmp;
3000 struct gendisk *disk;
3001 struct mdk_personality *pers;
3002 char b[BDEVNAME_SIZE];
3004 if (list_empty(&mddev->disks))
3005 /* cannot run an array with no devices.. */
3012 * Analyze all RAID superblock(s)
3014 if (!mddev->raid_disks)
3017 chunk_size = mddev->chunk_size;
3020 if (chunk_size > MAX_CHUNK_SIZE) {
3021 printk(KERN_ERR "too big chunk_size: %d > %d\n",
3022 chunk_size, MAX_CHUNK_SIZE);
3026 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3028 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3029 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3032 if (chunk_size < PAGE_SIZE) {
3033 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3034 chunk_size, PAGE_SIZE);
3038 /* devices must have minimum size of one chunk */
3039 ITERATE_RDEV(mddev,rdev,tmp) {
3040 if (test_bit(Faulty, &rdev->flags))
3042 if (rdev->size < chunk_size / 1024) {
3044 "md: Dev %s smaller than chunk_size:"
3046 bdevname(rdev->bdev,b),
3047 (unsigned long long)rdev->size,
3055 if (mddev->level != LEVEL_NONE)
3056 request_module("md-level-%d", mddev->level);
3057 else if (mddev->clevel[0])
3058 request_module("md-%s", mddev->clevel);
3062 * Drop all container device buffers, from now on
3063 * the only valid external interface is through the md
3065 * Also find largest hardsector size
3067 ITERATE_RDEV(mddev,rdev,tmp) {
3068 if (test_bit(Faulty, &rdev->flags))
3070 sync_blockdev(rdev->bdev);
3071 invalidate_bdev(rdev->bdev, 0);
3074 md_probe(mddev->unit, NULL, NULL);
3075 disk = mddev->gendisk;
3079 spin_lock(&pers_lock);
3080 pers = find_pers(mddev->level, mddev->clevel);
3081 if (!pers || !try_module_get(pers->owner)) {
3082 spin_unlock(&pers_lock);
3083 if (mddev->level != LEVEL_NONE)
3084 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3087 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3092 spin_unlock(&pers_lock);
3093 mddev->level = pers->level;
3094 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3096 if (mddev->reshape_position != MaxSector &&
3097 pers->start_reshape == NULL) {
3098 /* This personality cannot handle reshaping... */
3100 module_put(pers->owner);
3104 mddev->recovery = 0;
3105 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3106 mddev->barriers_work = 1;
3107 mddev->ok_start_degraded = start_dirty_degraded;
3110 mddev->ro = 2; /* read-only, but switch on first write */
3112 err = mddev->pers->run(mddev);
3113 if (!err && mddev->pers->sync_request) {
3114 err = bitmap_create(mddev);
3116 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3117 mdname(mddev), err);
3118 mddev->pers->stop(mddev);
3122 printk(KERN_ERR "md: pers->run() failed ...\n");
3123 module_put(mddev->pers->owner);
3125 bitmap_destroy(mddev);
3128 if (mddev->pers->sync_request)
3129 sysfs_create_group(&mddev->kobj, &md_redundancy_group);
3130 else if (mddev->ro == 2) /* auto-readonly not meaningful */
3133 atomic_set(&mddev->writes_pending,0);
3134 mddev->safemode = 0;
3135 mddev->safemode_timer.function = md_safemode_timeout;
3136 mddev->safemode_timer.data = (unsigned long) mddev;
3137 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3140 ITERATE_RDEV(mddev,rdev,tmp)
3141 if (rdev->raid_disk >= 0) {
3143 sprintf(nm, "rd%d", rdev->raid_disk);
3144 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
3147 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3150 md_update_sb(mddev, 0);
3152 set_capacity(disk, mddev->array_size<<1);
3154 /* If we call blk_queue_make_request here, it will
3155 * re-initialise max_sectors etc which may have been
3156 * refined inside -> run. So just set the bits we need to set.
3157 * Most initialisation happended when we called
3158 * blk_queue_make_request(..., md_fail_request)
3161 mddev->queue->queuedata = mddev;
3162 mddev->queue->make_request_fn = mddev->pers->make_request;
3164 /* If there is a partially-recovered drive we need to
3165 * start recovery here. If we leave it to md_check_recovery,
3166 * it will remove the drives and not do the right thing
3168 if (mddev->degraded && !mddev->sync_thread) {
3169 struct list_head *rtmp;
3171 ITERATE_RDEV(mddev,rdev,rtmp)
3172 if (rdev->raid_disk >= 0 &&
3173 !test_bit(In_sync, &rdev->flags) &&
3174 !test_bit(Faulty, &rdev->flags))
3175 /* complete an interrupted recovery */
3177 if (spares && mddev->pers->sync_request) {
3178 mddev->recovery = 0;
3179 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3180 mddev->sync_thread = md_register_thread(md_do_sync,
3183 if (!mddev->sync_thread) {
3184 printk(KERN_ERR "%s: could not start resync"
3187 /* leave the spares where they are, it shouldn't hurt */
3188 mddev->recovery = 0;
3192 md_wakeup_thread(mddev->thread);
3193 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3196 md_new_event(mddev);
3200 static int restart_array(mddev_t *mddev)
3202 struct gendisk *disk = mddev->gendisk;
3206 * Complain if it has no devices
3209 if (list_empty(&mddev->disks))
3217 mddev->safemode = 0;
3219 set_disk_ro(disk, 0);
3221 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3224 * Kick recovery or resync if necessary
3226 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3227 md_wakeup_thread(mddev->thread);
3228 md_wakeup_thread(mddev->sync_thread);
3237 /* similar to deny_write_access, but accounts for our holding a reference
3238 * to the file ourselves */
3239 static int deny_bitmap_write_access(struct file * file)
3241 struct inode *inode = file->f_mapping->host;
3243 spin_lock(&inode->i_lock);
3244 if (atomic_read(&inode->i_writecount) > 1) {
3245 spin_unlock(&inode->i_lock);
3248 atomic_set(&inode->i_writecount, -1);
3249 spin_unlock(&inode->i_lock);
3254 static void restore_bitmap_write_access(struct file *file)
3256 struct inode *inode = file->f_mapping->host;
3258 spin_lock(&inode->i_lock);
3259 atomic_set(&inode->i_writecount, 1);
3260 spin_unlock(&inode->i_lock);
3264 * 0 - completely stop and dis-assemble array
3265 * 1 - switch to readonly
3266 * 2 - stop but do not disassemble array
3268 static int do_md_stop(mddev_t * mddev, int mode)
3271 struct gendisk *disk = mddev->gendisk;
3274 if (atomic_read(&mddev->active)>2) {
3275 printk("md: %s still in use.\n",mdname(mddev));
3279 if (mddev->sync_thread) {
3280 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3281 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3282 md_unregister_thread(mddev->sync_thread);
3283 mddev->sync_thread = NULL;
3286 del_timer_sync(&mddev->safemode_timer);
3288 invalidate_partition(disk, 0);
3291 case 1: /* readonly */
3297 case 0: /* disassemble */
3299 bitmap_flush(mddev);
3300 md_super_wait(mddev);
3302 set_disk_ro(disk, 0);
3303 blk_queue_make_request(mddev->queue, md_fail_request);
3304 mddev->pers->stop(mddev);
3305 if (mddev->pers->sync_request)
3306 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3308 module_put(mddev->pers->owner);
3313 if (!mddev->in_sync || mddev->flags) {
3314 /* mark array as shutdown cleanly */
3316 md_update_sb(mddev, 1);
3319 set_disk_ro(disk, 1);
3320 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3324 * Free resources if final stop
3328 struct list_head *tmp;
3329 struct gendisk *disk;
3330 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3332 bitmap_destroy(mddev);
3333 if (mddev->bitmap_file) {
3334 restore_bitmap_write_access(mddev->bitmap_file);
3335 fput(mddev->bitmap_file);
3336 mddev->bitmap_file = NULL;
3338 mddev->bitmap_offset = 0;
3340 ITERATE_RDEV(mddev,rdev,tmp)
3341 if (rdev->raid_disk >= 0) {
3343 sprintf(nm, "rd%d", rdev->raid_disk);
3344 sysfs_remove_link(&mddev->kobj, nm);
3347 export_array(mddev);
3349 mddev->array_size = 0;
3351 mddev->raid_disks = 0;
3352 mddev->recovery_cp = 0;
3354 disk = mddev->gendisk;
3356 set_capacity(disk, 0);
3358 } else if (mddev->pers)
3359 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3362 md_new_event(mddev);
3367 static void autorun_array(mddev_t *mddev)
3370 struct list_head *tmp;
3373 if (list_empty(&mddev->disks))
3376 printk(KERN_INFO "md: running: ");
3378 ITERATE_RDEV(mddev,rdev,tmp) {
3379 char b[BDEVNAME_SIZE];
3380 printk("<%s>", bdevname(rdev->bdev,b));
3384 err = do_md_run (mddev);
3386 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3387 do_md_stop (mddev, 0);
3392 * lets try to run arrays based on all disks that have arrived
3393 * until now. (those are in pending_raid_disks)
3395 * the method: pick the first pending disk, collect all disks with
3396 * the same UUID, remove all from the pending list and put them into
3397 * the 'same_array' list. Then order this list based on superblock
3398 * update time (freshest comes first), kick out 'old' disks and
3399 * compare superblocks. If everything's fine then run it.
3401 * If "unit" is allocated, then bump its reference count
3403 static void autorun_devices(int part)
3405 struct list_head *tmp;
3406 mdk_rdev_t *rdev0, *rdev;
3408 char b[BDEVNAME_SIZE];
3410 printk(KERN_INFO "md: autorun ...\n");
3411 while (!list_empty(&pending_raid_disks)) {
3414 LIST_HEAD(candidates);
3415 rdev0 = list_entry(pending_raid_disks.next,
3416 mdk_rdev_t, same_set);
3418 printk(KERN_INFO "md: considering %s ...\n",
3419 bdevname(rdev0->bdev,b));
3420 INIT_LIST_HEAD(&candidates);
3421 ITERATE_RDEV_PENDING(rdev,tmp)
3422 if (super_90_load(rdev, rdev0, 0) >= 0) {
3423 printk(KERN_INFO "md: adding %s ...\n",
3424 bdevname(rdev->bdev,b));
3425 list_move(&rdev->same_set, &candidates);
3428 * now we have a set of devices, with all of them having
3429 * mostly sane superblocks. It's time to allocate the
3433 dev = MKDEV(mdp_major,
3434 rdev0->preferred_minor << MdpMinorShift);
3435 unit = MINOR(dev) >> MdpMinorShift;
3437 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3440 if (rdev0->preferred_minor != unit) {
3441 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3442 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3446 md_probe(dev, NULL, NULL);
3447 mddev = mddev_find(dev);
3450 "md: cannot allocate memory for md drive.\n");
3453 if (mddev_lock(mddev))
3454 printk(KERN_WARNING "md: %s locked, cannot run\n",
3456 else if (mddev->raid_disks || mddev->major_version
3457 || !list_empty(&mddev->disks)) {
3459 "md: %s already running, cannot run %s\n",
3460 mdname(mddev), bdevname(rdev0->bdev,b));
3461 mddev_unlock(mddev);
3463 printk(KERN_INFO "md: created %s\n", mdname(mddev));
3464 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
3465 list_del_init(&rdev->same_set);
3466 if (bind_rdev_to_array(rdev, mddev))
3469 autorun_array(mddev);
3470 mddev_unlock(mddev);
3472 /* on success, candidates will be empty, on error
3475 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
3479 printk(KERN_INFO "md: ... autorun DONE.\n");
3482 static int get_version(void __user * arg)
3486 ver.major = MD_MAJOR_VERSION;
3487 ver.minor = MD_MINOR_VERSION;
3488 ver.patchlevel = MD_PATCHLEVEL_VERSION;
3490 if (copy_to_user(arg, &ver, sizeof(ver)))
3496 static int get_array_info(mddev_t * mddev, void __user * arg)
3498 mdu_array_info_t info;
3499 int nr,working,active,failed,spare;
3501 struct list_head *tmp;
3503 nr=working=active=failed=spare=0;
3504 ITERATE_RDEV(mddev,rdev,tmp) {
3506 if (test_bit(Faulty, &rdev->flags))
3510 if (test_bit(In_sync, &rdev->flags))
3517 info.major_version = mddev->major_version;
3518 info.minor_version = mddev->minor_version;
3519 info.patch_version = MD_PATCHLEVEL_VERSION;
3520 info.ctime = mddev->ctime;
3521 info.level = mddev->level;
3522 info.size = mddev->size;
3523 if (info.size != mddev->size) /* overflow */
3526 info.raid_disks = mddev->raid_disks;
3527 info.md_minor = mddev->md_minor;
3528 info.not_persistent= !mddev->persistent;
3530 info.utime = mddev->utime;
3533 info.state = (1<<MD_SB_CLEAN);
3534 if (mddev->bitmap && mddev->bitmap_offset)
3535 info.state = (1<<MD_SB_BITMAP_PRESENT);
3536 info.active_disks = active;
3537 info.working_disks = working;
3538 info.failed_disks = failed;
3539 info.spare_disks = spare;
3541 info.layout = mddev->layout;
3542 info.chunk_size = mddev->chunk_size;
3544 if (copy_to_user(arg, &info, sizeof(info)))
3550 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3552 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3553 char *ptr, *buf = NULL;
3556 file = kmalloc(sizeof(*file), GFP_KERNEL);
3560 /* bitmap disabled, zero the first byte and copy out */
3561 if (!mddev->bitmap || !mddev->bitmap->file) {
3562 file->pathname[0] = '\0';
3566 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3570 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3574 strcpy(file->pathname, ptr);
3578 if (copy_to_user(arg, file, sizeof(*file)))
3586 static int get_disk_info(mddev_t * mddev, void __user * arg)
3588 mdu_disk_info_t info;
3592 if (copy_from_user(&info, arg, sizeof(info)))
3597 rdev = find_rdev_nr(mddev, nr);
3599 info.major = MAJOR(rdev->bdev->bd_dev);
3600 info.minor = MINOR(rdev->bdev->bd_dev);
3601 info.raid_disk = rdev->raid_disk;
3603 if (test_bit(Faulty, &rdev->flags))
3604 info.state |= (1<<MD_DISK_FAULTY);
3605 else if (test_bit(In_sync, &rdev->flags)) {
3606 info.state |= (1<<MD_DISK_ACTIVE);
3607 info.state |= (1<<MD_DISK_SYNC);
3609 if (test_bit(WriteMostly, &rdev->flags))
3610 info.state |= (1<<MD_DISK_WRITEMOSTLY);
3612 info.major = info.minor = 0;
3613 info.raid_disk = -1;
3614 info.state = (1<<MD_DISK_REMOVED);
3617 if (copy_to_user(arg, &info, sizeof(info)))
3623 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3625 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3627 dev_t dev = MKDEV(info->major,info->minor);
3629 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3632 if (!mddev->raid_disks) {
3634 /* expecting a device which has a superblock */
3635 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3638 "md: md_import_device returned %ld\n",
3640 return PTR_ERR(rdev);
3642 if (!list_empty(&mddev->disks)) {
3643 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3644 mdk_rdev_t, same_set);
3645 int err = super_types[mddev->major_version]
3646 .load_super(rdev, rdev0, mddev->minor_version);
3649 "md: %s has different UUID to %s\n",
3650 bdevname(rdev->bdev,b),
3651 bdevname(rdev0->bdev,b2));
3656 err = bind_rdev_to_array(rdev, mddev);
3663 * add_new_disk can be used once the array is assembled
3664 * to add "hot spares". They must already have a superblock
3669 if (!mddev->pers->hot_add_disk) {
3671 "%s: personality does not support diskops!\n",
3675 if (mddev->persistent)
3676 rdev = md_import_device(dev, mddev->major_version,
3677 mddev->minor_version);
3679 rdev = md_import_device(dev, -1, -1);
3682 "md: md_import_device returned %ld\n",
3684 return PTR_ERR(rdev);
3686 /* set save_raid_disk if appropriate */
3687 if (!mddev->persistent) {
3688 if (info->state & (1<<MD_DISK_SYNC) &&
3689 info->raid_disk < mddev->raid_disks)
3690 rdev->raid_disk = info->raid_disk;
3692 rdev->raid_disk = -1;
3694 super_types[mddev->major_version].
3695 validate_super(mddev, rdev);
3696 rdev->saved_raid_disk = rdev->raid_disk;
3698 clear_bit(In_sync, &rdev->flags); /* just to be sure */
3699 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3700 set_bit(WriteMostly, &rdev->flags);
3702 rdev->raid_disk = -1;
3703 err = bind_rdev_to_array(rdev, mddev);
3704 if (!err && !mddev->pers->hot_remove_disk) {
3705 /* If there is hot_add_disk but no hot_remove_disk
3706 * then added disks for geometry changes,
3707 * and should be added immediately.
3709 super_types[mddev->major_version].
3710 validate_super(mddev, rdev);
3711 err = mddev->pers->hot_add_disk(mddev, rdev);
3713 unbind_rdev_from_array(rdev);
3718 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3719 md_wakeup_thread(mddev->thread);
3723 /* otherwise, add_new_disk is only allowed
3724 * for major_version==0 superblocks
3726 if (mddev->major_version != 0) {
3727 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3732 if (!(info->state & (1<<MD_DISK_FAULTY))) {
3734 rdev = md_import_device (dev, -1, 0);
3737 "md: error, md_import_device() returned %ld\n",
3739 return PTR_ERR(rdev);
3741 rdev->desc_nr = info->number;
3742 if (info->raid_disk < mddev->raid_disks)
3743 rdev->raid_disk = info->raid_disk;
3745 rdev->raid_disk = -1;
3749 if (rdev->raid_disk < mddev->raid_disks)
3750 if (info->state & (1<<MD_DISK_SYNC))
3751 set_bit(In_sync, &rdev->flags);
3753 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3754 set_bit(WriteMostly, &rdev->flags);
3756 if (!mddev->persistent) {
3757 printk(KERN_INFO "md: nonpersistent superblock ...\n");
3758 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3760 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3761 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3763 err = bind_rdev_to_array(rdev, mddev);
3773 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3775 char b[BDEVNAME_SIZE];
3781 rdev = find_rdev(mddev, dev);
3785 if (rdev->raid_disk >= 0)
3788 kick_rdev_from_array(rdev);
3789 md_update_sb(mddev, 1);
3790 md_new_event(mddev);
3794 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3795 bdevname(rdev->bdev,b), mdname(mddev));
3799 static int hot_add_disk(mddev_t * mddev, dev_t dev)
3801 char b[BDEVNAME_SIZE];
3809 if (mddev->major_version != 0) {
3810 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3811 " version-0 superblocks.\n",
3815 if (!mddev->pers->hot_add_disk) {
3817 "%s: personality does not support diskops!\n",
3822 rdev = md_import_device (dev, -1, 0);
3825 "md: error, md_import_device() returned %ld\n",
3830 if (mddev->persistent)
3831 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3834 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3836 size = calc_dev_size(rdev, mddev->chunk_size);
3839 if (test_bit(Faulty, &rdev->flags)) {
3841 "md: can not hot-add faulty %s disk to %s!\n",
3842 bdevname(rdev->bdev,b), mdname(mddev));
3846 clear_bit(In_sync, &rdev->flags);
3848 err = bind_rdev_to_array(rdev, mddev);
3853 * The rest should better be atomic, we can have disk failures
3854 * noticed in interrupt contexts ...
3857 if (rdev->desc_nr == mddev->max_disks) {
3858 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3861 goto abort_unbind_export;
3864 rdev->raid_disk = -1;
3866 md_update_sb(mddev, 1);
3869 * Kick recovery, maybe this spare has to be added to the
3870 * array immediately.
3872 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3873 md_wakeup_thread(mddev->thread);
3874 md_new_event(mddev);
3877 abort_unbind_export:
3878 unbind_rdev_from_array(rdev);
3885 static int set_bitmap_file(mddev_t *mddev, int fd)
3890 if (!mddev->pers->quiesce)
3892 if (mddev->recovery || mddev->sync_thread)
3894 /* we should be able to change the bitmap.. */
3900 return -EEXIST; /* cannot add when bitmap is present */
3901 mddev->bitmap_file = fget(fd);
3903 if (mddev->bitmap_file == NULL) {
3904 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3909 err = deny_bitmap_write_access(mddev->bitmap_file);
3911 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3913 fput(mddev->bitmap_file);
3914 mddev->bitmap_file = NULL;
3917 mddev->bitmap_offset = 0; /* file overrides offset */
3918 } else if (mddev->bitmap == NULL)
3919 return -ENOENT; /* cannot remove what isn't there */
3922 mddev->pers->quiesce(mddev, 1);
3924 err = bitmap_create(mddev);
3925 if (fd < 0 || err) {
3926 bitmap_destroy(mddev);
3927 fd = -1; /* make sure to put the file */
3929 mddev->pers->quiesce(mddev, 0);
3932 if (mddev->bitmap_file) {
3933 restore_bitmap_write_access(mddev->bitmap_file);
3934 fput(mddev->bitmap_file);
3936 mddev->bitmap_file = NULL;
3943 * set_array_info is used two different ways
3944 * The original usage is when creating a new array.
3945 * In this usage, raid_disks is > 0 and it together with
3946 * level, size, not_persistent,layout,chunksize determine the
3947 * shape of the array.
3948 * This will always create an array with a type-0.90.0 superblock.
3949 * The newer usage is when assembling an array.
3950 * In this case raid_disks will be 0, and the major_version field is
3951 * use to determine which style super-blocks are to be found on the devices.
3952 * The minor and patch _version numbers are also kept incase the
3953 * super_block handler wishes to interpret them.
3955 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
3958 if (info->raid_disks == 0) {
3959 /* just setting version number for superblock loading */
3960 if (info->major_version < 0 ||
3961 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
3962 super_types[info->major_version].name == NULL) {
3963 /* maybe try to auto-load a module? */
3965 "md: superblock version %d not known\n",
3966 info->major_version);
3969 mddev->major_version = info->major_version;
3970 mddev->minor_version = info->minor_version;
3971 mddev->patch_version = info->patch_version;
3974 mddev->major_version = MD_MAJOR_VERSION;
3975 mddev->minor_version = MD_MINOR_VERSION;
3976 mddev->patch_version = MD_PATCHLEVEL_VERSION;
3977 mddev->ctime = get_seconds();
3979 mddev->level = info->level;
3980 mddev->clevel[0] = 0;
3981 mddev->size = info->size;
3982 mddev->raid_disks = info->raid_disks;
3983 /* don't set md_minor, it is determined by which /dev/md* was
3986 if (info->state & (1<<MD_SB_CLEAN))
3987 mddev->recovery_cp = MaxSector;
3989 mddev->recovery_cp = 0;
3990 mddev->persistent = ! info->not_persistent;
3992 mddev->layout = info->layout;
3993 mddev->chunk_size = info->chunk_size;
3995 mddev->max_disks = MD_SB_DISKS;
3998 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4000 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4001 mddev->bitmap_offset = 0;
4003 mddev->reshape_position = MaxSector;
4006 * Generate a 128 bit UUID
4008 get_random_bytes(mddev->uuid, 16);
4010 mddev->new_level = mddev->level;
4011 mddev->new_chunk = mddev->chunk_size;
4012 mddev->new_layout = mddev->layout;
4013 mddev->delta_disks = 0;
4018 static int update_size(mddev_t *mddev, unsigned long size)
4022 struct list_head *tmp;
4023 int fit = (size == 0);
4025 if (mddev->pers->resize == NULL)
4027 /* The "size" is the amount of each device that is used.
4028 * This can only make sense for arrays with redundancy.
4029 * linear and raid0 always use whatever space is available
4030 * We can only consider changing the size if no resync
4031 * or reconstruction is happening, and if the new size
4032 * is acceptable. It must fit before the sb_offset or,
4033 * if that is <data_offset, it must fit before the
4034 * size of each device.
4035 * If size is zero, we find the largest size that fits.
4037 if (mddev->sync_thread)
4039 ITERATE_RDEV(mddev,rdev,tmp) {
4041 if (rdev->sb_offset > rdev->data_offset)
4042 avail = (rdev->sb_offset*2) - rdev->data_offset;
4044 avail = get_capacity(rdev->bdev->bd_disk)
4045 - rdev->data_offset;
4046 if (fit && (size == 0 || size > avail/2))
4048 if (avail < ((sector_t)size << 1))
4051 rv = mddev->pers->resize(mddev, (sector_t)size *2);
4053 struct block_device *bdev;
4055 bdev = bdget_disk(mddev->gendisk, 0);
4057 mutex_lock(&bdev->bd_inode->i_mutex);
4058 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4059 mutex_unlock(&bdev->bd_inode->i_mutex);
4066 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4069 /* change the number of raid disks */
4070 if (mddev->pers->check_reshape == NULL)
4072 if (raid_disks <= 0 ||
4073 raid_disks >= mddev->max_disks)
4075 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4077 mddev->delta_disks = raid_disks - mddev->raid_disks;
4079 rv = mddev->pers->check_reshape(mddev);
4085 * update_array_info is used to change the configuration of an
4087 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4088 * fields in the info are checked against the array.
4089 * Any differences that cannot be handled will cause an error.
4090 * Normally, only one change can be managed at a time.
4092 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4098 /* calculate expected state,ignoring low bits */
4099 if (mddev->bitmap && mddev->bitmap_offset)
4100 state |= (1 << MD_SB_BITMAP_PRESENT);
4102 if (mddev->major_version != info->major_version ||
4103 mddev->minor_version != info->minor_version ||
4104 /* mddev->patch_version != info->patch_version || */
4105 mddev->ctime != info->ctime ||
4106 mddev->level != info->level ||
4107 /* mddev->layout != info->layout || */
4108 !mddev->persistent != info->not_persistent||
4109 mddev->chunk_size != info->chunk_size ||
4110 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4111 ((state^info->state) & 0xfffffe00)
4114 /* Check there is only one change */
4115 if (info->size >= 0 && mddev->size != info->size) cnt++;
4116 if (mddev->raid_disks != info->raid_disks) cnt++;
4117 if (mddev->layout != info->layout) cnt++;
4118 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4119 if (cnt == 0) return 0;
4120 if (cnt > 1) return -EINVAL;
4122 if (mddev->layout != info->layout) {
4124 * we don't need to do anything at the md level, the
4125 * personality will take care of it all.
4127 if (mddev->pers->reconfig == NULL)
4130 return mddev->pers->reconfig(mddev, info->layout, -1);
4132 if (info->size >= 0 && mddev->size != info->size)
4133 rv = update_size(mddev, info->size);
4135 if (mddev->raid_disks != info->raid_disks)
4136 rv = update_raid_disks(mddev, info->raid_disks);
4138 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4139 if (mddev->pers->quiesce == NULL)
4141 if (mddev->recovery || mddev->sync_thread)
4143 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4144 /* add the bitmap */
4147 if (mddev->default_bitmap_offset == 0)
4149 mddev->bitmap_offset = mddev->default_bitmap_offset;
4150 mddev->pers->quiesce(mddev, 1);
4151 rv = bitmap_create(mddev);
4153 bitmap_destroy(mddev);
4154 mddev->pers->quiesce(mddev, 0);
4156 /* remove the bitmap */
4159 if (mddev->bitmap->file)
4161 mddev->pers->quiesce(mddev, 1);
4162 bitmap_destroy(mddev);
4163 mddev->pers->quiesce(mddev, 0);
4164 mddev->bitmap_offset = 0;
4167 md_update_sb(mddev, 1);
4171 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4175 if (mddev->pers == NULL)
4178 rdev = find_rdev(mddev, dev);
4182 md_error(mddev, rdev);
4186 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4188 mddev_t *mddev = bdev->bd_disk->private_data;
4192 geo->cylinders = get_capacity(mddev->gendisk) / 8;
4196 static int md_ioctl(struct inode *inode, struct file *file,
4197 unsigned int cmd, unsigned long arg)
4200 void __user *argp = (void __user *)arg;
4201 mddev_t *mddev = NULL;
4203 if (!capable(CAP_SYS_ADMIN))
4207 * Commands dealing with the RAID driver but not any
4213 err = get_version(argp);
4216 case PRINT_RAID_DEBUG:
4224 autostart_arrays(arg);
4231 * Commands creating/starting a new array:
4234 mddev = inode->i_bdev->bd_disk->private_data;
4241 err = mddev_lock(mddev);
4244 "md: ioctl lock interrupted, reason %d, cmd %d\n",
4251 case SET_ARRAY_INFO:
4253 mdu_array_info_t info;
4255 memset(&info, 0, sizeof(info));
4256 else if (copy_from_user(&info, argp, sizeof(info))) {
4261 err = update_array_info(mddev, &info);
4263 printk(KERN_WARNING "md: couldn't update"
4264 " array info. %d\n", err);
4269 if (!list_empty(&mddev->disks)) {
4271 "md: array %s already has disks!\n",
4276 if (mddev->raid_disks) {
4278 "md: array %s already initialised!\n",
4283 err = set_array_info(mddev, &info);
4285 printk(KERN_WARNING "md: couldn't set"
4286 " array info. %d\n", err);
4296 * Commands querying/configuring an existing array:
4298 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4299 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
4300 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4301 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
4307 * Commands even a read-only array can execute:
4311 case GET_ARRAY_INFO:
4312 err = get_array_info(mddev, argp);
4315 case GET_BITMAP_FILE:
4316 err = get_bitmap_file(mddev, argp);
4320 err = get_disk_info(mddev, argp);
4323 case RESTART_ARRAY_RW:
4324 err = restart_array(mddev);
4328 err = do_md_stop (mddev, 0);
4332 err = do_md_stop (mddev, 1);
4336 * We have a problem here : there is no easy way to give a CHS
4337 * virtual geometry. We currently pretend that we have a 2 heads
4338 * 4 sectors (with a BIG number of cylinders...). This drives
4339 * dosfs just mad... ;-)
4344 * The remaining ioctls are changing the state of the
4345 * superblock, so we do not allow them on read-only arrays.
4346 * However non-MD ioctls (e.g. get-size) will still come through
4347 * here and hit the 'default' below, so only disallow
4348 * 'md' ioctls, and switch to rw mode if started auto-readonly.
4350 if (_IOC_TYPE(cmd) == MD_MAJOR &&
4351 mddev->ro && mddev->pers) {
4352 if (mddev->ro == 2) {
4354 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4355 md_wakeup_thread(mddev->thread);
4367 mdu_disk_info_t info;
4368 if (copy_from_user(&info, argp, sizeof(info)))
4371 err = add_new_disk(mddev, &info);
4375 case HOT_REMOVE_DISK:
4376 err = hot_remove_disk(mddev, new_decode_dev(arg));
4380 err = hot_add_disk(mddev, new_decode_dev(arg));
4383 case SET_DISK_FAULTY:
4384 err = set_disk_faulty(mddev, new_decode_dev(arg));
4388 err = do_md_run (mddev);
4391 case SET_BITMAP_FILE:
4392 err = set_bitmap_file(mddev, (int)arg);
4402 mddev_unlock(mddev);
4412 static int md_open(struct inode *inode, struct file *file)
4415 * Succeed if we can lock the mddev, which confirms that
4416 * it isn't being stopped right now.
4418 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4421 if ((err = mddev_lock(mddev)))
4426 mddev_unlock(mddev);
4428 check_disk_change(inode->i_bdev);
4433 static int md_release(struct inode *inode, struct file * file)
4435 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4444 static int md_media_changed(struct gendisk *disk)
4446 mddev_t *mddev = disk->private_data;
4448 return mddev->changed;
4451 static int md_revalidate(struct gendisk *disk)
4453 mddev_t *mddev = disk->private_data;
4458 static struct block_device_operations md_fops =
4460 .owner = THIS_MODULE,
4462 .release = md_release,
4464 .getgeo = md_getgeo,
4465 .media_changed = md_media_changed,
4466 .revalidate_disk= md_revalidate,
4469 static int md_thread(void * arg)
4471 mdk_thread_t *thread = arg;
4474 * md_thread is a 'system-thread', it's priority should be very
4475 * high. We avoid resource deadlocks individually in each
4476 * raid personality. (RAID5 does preallocation) We also use RR and
4477 * the very same RT priority as kswapd, thus we will never get
4478 * into a priority inversion deadlock.
4480 * we definitely have to have equal or higher priority than
4481 * bdflush, otherwise bdflush will deadlock if there are too
4482 * many dirty RAID5 blocks.
4485 allow_signal(SIGKILL);
4486 while (!kthread_should_stop()) {
4488 /* We need to wait INTERRUPTIBLE so that
4489 * we don't add to the load-average.
4490 * That means we need to be sure no signals are
4493 if (signal_pending(current))
4494 flush_signals(current);
4496 wait_event_interruptible_timeout
4498 test_bit(THREAD_WAKEUP, &thread->flags)
4499 || kthread_should_stop(),
4503 clear_bit(THREAD_WAKEUP, &thread->flags);
4505 thread->run(thread->mddev);
4511 void md_wakeup_thread(mdk_thread_t *thread)
4514 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4515 set_bit(THREAD_WAKEUP, &thread->flags);
4516 wake_up(&thread->wqueue);
4520 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4523 mdk_thread_t *thread;
4525 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
4529 init_waitqueue_head(&thread->wqueue);
4532 thread->mddev = mddev;
4533 thread->timeout = MAX_SCHEDULE_TIMEOUT;
4534 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
4535 if (IS_ERR(thread->tsk)) {
4542 void md_unregister_thread(mdk_thread_t *thread)
4544 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
4546 kthread_stop(thread->tsk);
4550 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4557 if (!rdev || test_bit(Faulty, &rdev->flags))
4560 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4562 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4563 __builtin_return_address(0),__builtin_return_address(1),
4564 __builtin_return_address(2),__builtin_return_address(3));
4568 if (!mddev->pers->error_handler)
4570 mddev->pers->error_handler(mddev,rdev);
4571 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4572 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4573 md_wakeup_thread(mddev->thread);
4574 md_new_event_inintr(mddev);
4577 /* seq_file implementation /proc/mdstat */
4579 static void status_unused(struct seq_file *seq)
4583 struct list_head *tmp;
4585 seq_printf(seq, "unused devices: ");
4587 ITERATE_RDEV_PENDING(rdev,tmp) {
4588 char b[BDEVNAME_SIZE];
4590 seq_printf(seq, "%s ",
4591 bdevname(rdev->bdev,b));
4594 seq_printf(seq, "<none>");
4596 seq_printf(seq, "\n");
4600 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4602 sector_t max_blocks, resync, res;
4603 unsigned long dt, db, rt;
4605 unsigned int per_milli;
4607 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4609 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4610 max_blocks = mddev->resync_max_sectors >> 1;
4612 max_blocks = mddev->size;
4615 * Should not happen.
4621 /* Pick 'scale' such that (resync>>scale)*1000 will fit
4622 * in a sector_t, and (max_blocks>>scale) will fit in a
4623 * u32, as those are the requirements for sector_div.
4624 * Thus 'scale' must be at least 10
4627 if (sizeof(sector_t) > sizeof(unsigned long)) {
4628 while ( max_blocks/2 > (1ULL<<(scale+32)))
4631 res = (resync>>scale)*1000;
4632 sector_div(res, (u32)((max_blocks>>scale)+1));
4636 int i, x = per_milli/50, y = 20-x;
4637 seq_printf(seq, "[");
4638 for (i = 0; i < x; i++)
4639 seq_printf(seq, "=");
4640 seq_printf(seq, ">");
4641 for (i = 0; i < y; i++)
4642 seq_printf(seq, ".");
4643 seq_printf(seq, "] ");
4645 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
4646 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4648 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
4650 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4651 "resync" : "recovery"))),
4652 per_milli/10, per_milli % 10,
4653 (unsigned long long) resync,
4654 (unsigned long long) max_blocks);
4657 * We do not want to overflow, so the order of operands and
4658 * the * 100 / 100 trick are important. We do a +1 to be
4659 * safe against division by zero. We only estimate anyway.
4661 * dt: time from mark until now
4662 * db: blocks written from mark until now
4663 * rt: remaining time
4665 dt = ((jiffies - mddev->resync_mark) / HZ);
4667 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
4668 - mddev->resync_mark_cnt;
4669 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
4671 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4673 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
4676 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4678 struct list_head *tmp;
4688 spin_lock(&all_mddevs_lock);
4689 list_for_each(tmp,&all_mddevs)
4691 mddev = list_entry(tmp, mddev_t, all_mddevs);
4693 spin_unlock(&all_mddevs_lock);
4696 spin_unlock(&all_mddevs_lock);
4698 return (void*)2;/* tail */
4702 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4704 struct list_head *tmp;
4705 mddev_t *next_mddev, *mddev = v;
4711 spin_lock(&all_mddevs_lock);
4713 tmp = all_mddevs.next;
4715 tmp = mddev->all_mddevs.next;
4716 if (tmp != &all_mddevs)
4717 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4719 next_mddev = (void*)2;
4722 spin_unlock(&all_mddevs_lock);
4730 static void md_seq_stop(struct seq_file *seq, void *v)
4734 if (mddev && v != (void*)1 && v != (void*)2)
4738 struct mdstat_info {
4742 static int md_seq_show(struct seq_file *seq, void *v)
4746 struct list_head *tmp2;
4748 struct mdstat_info *mi = seq->private;
4749 struct bitmap *bitmap;
4751 if (v == (void*)1) {
4752 struct mdk_personality *pers;
4753 seq_printf(seq, "Personalities : ");
4754 spin_lock(&pers_lock);
4755 list_for_each_entry(pers, &pers_list, list)
4756 seq_printf(seq, "[%s] ", pers->name);
4758 spin_unlock(&pers_lock);
4759 seq_printf(seq, "\n");
4760 mi->event = atomic_read(&md_event_count);
4763 if (v == (void*)2) {
4768 if (mddev_lock(mddev) < 0)
4771 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4772 seq_printf(seq, "%s : %sactive", mdname(mddev),
4773 mddev->pers ? "" : "in");
4776 seq_printf(seq, " (read-only)");
4778 seq_printf(seq, "(auto-read-only)");
4779 seq_printf(seq, " %s", mddev->pers->name);
4783 ITERATE_RDEV(mddev,rdev,tmp2) {
4784 char b[BDEVNAME_SIZE];
4785 seq_printf(seq, " %s[%d]",
4786 bdevname(rdev->bdev,b), rdev->desc_nr);
4787 if (test_bit(WriteMostly, &rdev->flags))
4788 seq_printf(seq, "(W)");
4789 if (test_bit(Faulty, &rdev->flags)) {
4790 seq_printf(seq, "(F)");
4792 } else if (rdev->raid_disk < 0)
4793 seq_printf(seq, "(S)"); /* spare */
4797 if (!list_empty(&mddev->disks)) {
4799 seq_printf(seq, "\n %llu blocks",
4800 (unsigned long long)mddev->array_size);
4802 seq_printf(seq, "\n %llu blocks",
4803 (unsigned long long)size);
4805 if (mddev->persistent) {
4806 if (mddev->major_version != 0 ||
4807 mddev->minor_version != 90) {
4808 seq_printf(seq," super %d.%d",
4809 mddev->major_version,
4810 mddev->minor_version);
4813 seq_printf(seq, " super non-persistent");
4816 mddev->pers->status (seq, mddev);
4817 seq_printf(seq, "\n ");
4818 if (mddev->pers->sync_request) {
4819 if (mddev->curr_resync > 2) {
4820 status_resync (seq, mddev);
4821 seq_printf(seq, "\n ");
4822 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4823 seq_printf(seq, "\tresync=DELAYED\n ");
4824 else if (mddev->recovery_cp < MaxSector)
4825 seq_printf(seq, "\tresync=PENDING\n ");
4828 seq_printf(seq, "\n ");
4830 if ((bitmap = mddev->bitmap)) {
4831 unsigned long chunk_kb;
4832 unsigned long flags;
4833 spin_lock_irqsave(&bitmap->lock, flags);
4834 chunk_kb = bitmap->chunksize >> 10;
4835 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4837 bitmap->pages - bitmap->missing_pages,
4839 (bitmap->pages - bitmap->missing_pages)
4840 << (PAGE_SHIFT - 10),
4841 chunk_kb ? chunk_kb : bitmap->chunksize,
4842 chunk_kb ? "KB" : "B");
4844 seq_printf(seq, ", file: ");
4845 seq_path(seq, bitmap->file->f_vfsmnt,
4846 bitmap->file->f_dentry," \t\n");
4849 seq_printf(seq, "\n");
4850 spin_unlock_irqrestore(&bitmap->lock, flags);
4853 seq_printf(seq, "\n");
4855 mddev_unlock(mddev);
4860 static struct seq_operations md_seq_ops = {
4861 .start = md_seq_start,
4862 .next = md_seq_next,
4863 .stop = md_seq_stop,
4864 .show = md_seq_show,
4867 static int md_seq_open(struct inode *inode, struct file *file)
4870 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4874 error = seq_open(file, &md_seq_ops);
4878 struct seq_file *p = file->private_data;
4880 mi->event = atomic_read(&md_event_count);
4885 static int md_seq_release(struct inode *inode, struct file *file)
4887 struct seq_file *m = file->private_data;
4888 struct mdstat_info *mi = m->private;
4891 return seq_release(inode, file);
4894 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4896 struct seq_file *m = filp->private_data;
4897 struct mdstat_info *mi = m->private;
4900 poll_wait(filp, &md_event_waiters, wait);
4902 /* always allow read */
4903 mask = POLLIN | POLLRDNORM;
4905 if (mi->event != atomic_read(&md_event_count))
4906 mask |= POLLERR | POLLPRI;
4910 static struct file_operations md_seq_fops = {
4911 .open = md_seq_open,
4913 .llseek = seq_lseek,
4914 .release = md_seq_release,
4915 .poll = mdstat_poll,
4918 int register_md_personality(struct mdk_personality *p)
4920 spin_lock(&pers_lock);
4921 list_add_tail(&p->list, &pers_list);
4922 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
4923 spin_unlock(&pers_lock);
4927 int unregister_md_personality(struct mdk_personality *p)
4929 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
4930 spin_lock(&pers_lock);
4931 list_del_init(&p->list);
4932 spin_unlock(&pers_lock);
4936 static int is_mddev_idle(mddev_t *mddev)
4939 struct list_head *tmp;
4941 unsigned long curr_events;
4944 ITERATE_RDEV(mddev,rdev,tmp) {
4945 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
4946 curr_events = disk_stat_read(disk, sectors[0]) +
4947 disk_stat_read(disk, sectors[1]) -
4948 atomic_read(&disk->sync_io);
4949 /* The difference between curr_events and last_events
4950 * will be affected by any new non-sync IO (making
4951 * curr_events bigger) and any difference in the amount of
4952 * in-flight syncio (making current_events bigger or smaller)
4953 * The amount in-flight is currently limited to
4954 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
4955 * which is at most 4096 sectors.
4956 * These numbers are fairly fragile and should be made
4957 * more robust, probably by enforcing the
4958 * 'window size' that md_do_sync sort-of uses.
4960 * Note: the following is an unsigned comparison.
4962 if ((curr_events - rdev->last_events + 4096) > 8192) {
4963 rdev->last_events = curr_events;
4970 void md_done_sync(mddev_t *mddev, int blocks, int ok)
4972 /* another "blocks" (512byte) blocks have been synced */
4973 atomic_sub(blocks, &mddev->recovery_active);
4974 wake_up(&mddev->recovery_wait);
4976 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4977 md_wakeup_thread(mddev->thread);
4978 // stop recovery, signal do_sync ....
4983 /* md_write_start(mddev, bi)
4984 * If we need to update some array metadata (e.g. 'active' flag
4985 * in superblock) before writing, schedule a superblock update
4986 * and wait for it to complete.
4988 void md_write_start(mddev_t *mddev, struct bio *bi)
4990 if (bio_data_dir(bi) != WRITE)
4993 BUG_ON(mddev->ro == 1);
4994 if (mddev->ro == 2) {
4995 /* need to switch to read/write */
4997 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4998 md_wakeup_thread(mddev->thread);
5000 atomic_inc(&mddev->writes_pending);
5001 if (mddev->in_sync) {
5002 spin_lock_irq(&mddev->write_lock);
5003 if (mddev->in_sync) {
5005 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5006 md_wakeup_thread(mddev->thread);
5008 spin_unlock_irq(&mddev->write_lock);
5010 wait_event(mddev->sb_wait, mddev->flags==0);
5013 void md_write_end(mddev_t *mddev)
5015 if (atomic_dec_and_test(&mddev->writes_pending)) {
5016 if (mddev->safemode == 2)
5017 md_wakeup_thread(mddev->thread);
5018 else if (mddev->safemode_delay)
5019 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5023 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5025 #define SYNC_MARKS 10
5026 #define SYNC_MARK_STEP (3*HZ)
5027 void md_do_sync(mddev_t *mddev)
5030 unsigned int currspeed = 0,
5032 sector_t max_sectors,j, io_sectors;
5033 unsigned long mark[SYNC_MARKS];
5034 sector_t mark_cnt[SYNC_MARKS];
5036 struct list_head *tmp;
5037 sector_t last_check;
5039 struct list_head *rtmp;
5043 /* just incase thread restarts... */
5044 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5046 if (mddev->ro) /* never try to sync a read-only array */
5049 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5050 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5051 desc = "data-check";
5052 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5053 desc = "requested-resync";
5056 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5061 /* we overload curr_resync somewhat here.
5062 * 0 == not engaged in resync at all
5063 * 2 == checking that there is no conflict with another sync
5064 * 1 == like 2, but have yielded to allow conflicting resync to
5066 * other == active in resync - this many blocks
5068 * Before starting a resync we must have set curr_resync to
5069 * 2, and then checked that every "conflicting" array has curr_resync
5070 * less than ours. When we find one that is the same or higher
5071 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
5072 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5073 * This will mean we have to start checking from the beginning again.
5078 mddev->curr_resync = 2;
5081 if (kthread_should_stop()) {
5082 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5085 ITERATE_MDDEV(mddev2,tmp) {
5086 if (mddev2 == mddev)
5088 if (mddev2->curr_resync &&
5089 match_mddev_units(mddev,mddev2)) {
5091 if (mddev < mddev2 && mddev->curr_resync == 2) {
5092 /* arbitrarily yield */
5093 mddev->curr_resync = 1;
5094 wake_up(&resync_wait);
5096 if (mddev > mddev2 && mddev->curr_resync == 1)
5097 /* no need to wait here, we can wait the next
5098 * time 'round when curr_resync == 2
5101 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5102 if (!kthread_should_stop() &&
5103 mddev2->curr_resync >= mddev->curr_resync) {
5104 printk(KERN_INFO "md: delaying %s of %s"
5105 " until %s has finished (they"
5106 " share one or more physical units)\n",
5107 desc, mdname(mddev), mdname(mddev2));
5110 finish_wait(&resync_wait, &wq);
5113 finish_wait(&resync_wait, &wq);
5116 } while (mddev->curr_resync < 2);
5119 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5120 /* resync follows the size requested by the personality,
5121 * which defaults to physical size, but can be virtual size
5123 max_sectors = mddev->resync_max_sectors;
5124 mddev->resync_mismatches = 0;
5125 /* we don't use the checkpoint if there's a bitmap */
5126 if (!mddev->bitmap &&
5127 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5128 j = mddev->recovery_cp;
5129 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5130 max_sectors = mddev->size << 1;
5132 /* recovery follows the physical size of devices */
5133 max_sectors = mddev->size << 1;
5135 ITERATE_RDEV(mddev,rdev,rtmp)
5136 if (rdev->raid_disk >= 0 &&
5137 !test_bit(Faulty, &rdev->flags) &&
5138 !test_bit(In_sync, &rdev->flags) &&
5139 rdev->recovery_offset < j)
5140 j = rdev->recovery_offset;
5143 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5144 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
5145 " %d KB/sec/disk.\n", speed_min(mddev));
5146 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5147 "(but not more than %d KB/sec) for %s.\n",
5148 speed_max(mddev), desc);
5150 is_mddev_idle(mddev); /* this also initializes IO event counters */
5153 for (m = 0; m < SYNC_MARKS; m++) {
5155 mark_cnt[m] = io_sectors;
5158 mddev->resync_mark = mark[last_mark];
5159 mddev->resync_mark_cnt = mark_cnt[last_mark];
5162 * Tune reconstruction:
5164 window = 32*(PAGE_SIZE/512);
5165 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5166 window/2,(unsigned long long) max_sectors/2);
5168 atomic_set(&mddev->recovery_active, 0);
5169 init_waitqueue_head(&mddev->recovery_wait);
5174 "md: resuming %s of %s from checkpoint.\n",
5175 desc, mdname(mddev));
5176 mddev->curr_resync = j;
5179 while (j < max_sectors) {
5183 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5184 currspeed < speed_min(mddev));
5186 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5190 if (!skipped) { /* actual IO requested */
5191 io_sectors += sectors;
5192 atomic_add(sectors, &mddev->recovery_active);
5196 if (j>1) mddev->curr_resync = j;
5197 mddev->curr_mark_cnt = io_sectors;
5198 if (last_check == 0)
5199 /* this is the earliers that rebuilt will be
5200 * visible in /proc/mdstat
5202 md_new_event(mddev);
5204 if (last_check + window > io_sectors || j == max_sectors)
5207 last_check = io_sectors;
5209 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
5210 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5214 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5216 int next = (last_mark+1) % SYNC_MARKS;
5218 mddev->resync_mark = mark[next];
5219 mddev->resync_mark_cnt = mark_cnt[next];
5220 mark[next] = jiffies;
5221 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5226 if (kthread_should_stop()) {
5228 * got a signal, exit.
5231 "md: md_do_sync() got signal ... exiting\n");
5232 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5237 * this loop exits only if either when we are slower than
5238 * the 'hard' speed limit, or the system was IO-idle for
5240 * the system might be non-idle CPU-wise, but we only care
5241 * about not overloading the IO subsystem. (things like an
5242 * e2fsck being done on the RAID array should execute fast)
5244 mddev->queue->unplug_fn(mddev->queue);
5247 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5248 /((jiffies-mddev->resync_mark)/HZ +1) +1;
5250 if (currspeed > speed_min(mddev)) {
5251 if ((currspeed > speed_max(mddev)) ||
5252 !is_mddev_idle(mddev)) {
5258 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5260 * this also signals 'finished resyncing' to md_stop
5263 mddev->queue->unplug_fn(mddev->queue);
5265 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5267 /* tell personality that we are finished */
5268 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5270 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5271 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
5272 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5273 mddev->curr_resync > 2) {
5274 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5275 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5276 if (mddev->curr_resync >= mddev->recovery_cp) {
5278 "md: checkpointing %s of %s.\n",
5279 desc, mdname(mddev));
5280 mddev->recovery_cp = mddev->curr_resync;
5283 mddev->recovery_cp = MaxSector;
5285 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5286 mddev->curr_resync = MaxSector;
5287 ITERATE_RDEV(mddev,rdev,rtmp)
5288 if (rdev->raid_disk >= 0 &&
5289 !test_bit(Faulty, &rdev->flags) &&
5290 !test_bit(In_sync, &rdev->flags) &&
5291 rdev->recovery_offset < mddev->curr_resync)
5292 rdev->recovery_offset = mddev->curr_resync;
5297 mddev->curr_resync = 0;
5298 wake_up(&resync_wait);
5299 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5300 md_wakeup_thread(mddev->thread);
5302 EXPORT_SYMBOL_GPL(md_do_sync);
5306 * This routine is regularly called by all per-raid-array threads to
5307 * deal with generic issues like resync and super-block update.
5308 * Raid personalities that don't have a thread (linear/raid0) do not
5309 * need this as they never do any recovery or update the superblock.
5311 * It does not do any resync itself, but rather "forks" off other threads
5312 * to do that as needed.
5313 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5314 * "->recovery" and create a thread at ->sync_thread.
5315 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5316 * and wakeups up this thread which will reap the thread and finish up.
5317 * This thread also removes any faulty devices (with nr_pending == 0).
5319 * The overall approach is:
5320 * 1/ if the superblock needs updating, update it.
5321 * 2/ If a recovery thread is running, don't do anything else.
5322 * 3/ If recovery has finished, clean up, possibly marking spares active.
5323 * 4/ If there are any faulty devices, remove them.
5324 * 5/ If array is degraded, try to add spares devices
5325 * 6/ If array has spares or is not in-sync, start a resync thread.
5327 void md_check_recovery(mddev_t *mddev)
5330 struct list_head *rtmp;
5334 bitmap_daemon_work(mddev->bitmap);
5339 if (signal_pending(current)) {
5340 if (mddev->pers->sync_request) {
5341 printk(KERN_INFO "md: %s in immediate safe mode\n",
5343 mddev->safemode = 2;
5345 flush_signals(current);
5350 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5351 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5352 (mddev->safemode == 1) ||
5353 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5354 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5358 if (mddev_trylock(mddev)) {
5361 spin_lock_irq(&mddev->write_lock);
5362 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5363 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5365 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5367 if (mddev->safemode == 1)
5368 mddev->safemode = 0;
5369 spin_unlock_irq(&mddev->write_lock);
5372 md_update_sb(mddev, 0);
5375 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5376 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5377 /* resync/recovery still happening */
5378 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5381 if (mddev->sync_thread) {
5382 /* resync has finished, collect result */
5383 md_unregister_thread(mddev->sync_thread);
5384 mddev->sync_thread = NULL;
5385 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5386 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5388 /* activate any spares */
5389 mddev->pers->spare_active(mddev);
5391 md_update_sb(mddev, 1);
5393 /* if array is no-longer degraded, then any saved_raid_disk
5394 * information must be scrapped
5396 if (!mddev->degraded)
5397 ITERATE_RDEV(mddev,rdev,rtmp)
5398 rdev->saved_raid_disk = -1;
5400 mddev->recovery = 0;
5401 /* flag recovery needed just to double check */
5402 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5403 md_new_event(mddev);
5406 /* Clear some bits that don't mean anything, but
5409 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5410 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5411 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5412 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5414 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5416 /* no recovery is running.
5417 * remove any failed drives, then
5418 * add spares if possible.
5419 * Spare are also removed and re-added, to allow
5420 * the personality to fail the re-add.
5422 ITERATE_RDEV(mddev,rdev,rtmp)
5423 if (rdev->raid_disk >= 0 &&
5424 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
5425 atomic_read(&rdev->nr_pending)==0) {
5426 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
5428 sprintf(nm,"rd%d", rdev->raid_disk);
5429 sysfs_remove_link(&mddev->kobj, nm);
5430 rdev->raid_disk = -1;
5434 if (mddev->degraded) {
5435 ITERATE_RDEV(mddev,rdev,rtmp)
5436 if (rdev->raid_disk < 0
5437 && !test_bit(Faulty, &rdev->flags)) {
5438 rdev->recovery_offset = 0;
5439 if (mddev->pers->hot_add_disk(mddev,rdev)) {
5441 sprintf(nm, "rd%d", rdev->raid_disk);
5442 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
5444 md_new_event(mddev);
5451 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5452 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5453 } else if (mddev->recovery_cp < MaxSector) {
5454 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5455 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5456 /* nothing to be done ... */
5459 if (mddev->pers->sync_request) {
5460 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5461 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5462 /* We are adding a device or devices to an array
5463 * which has the bitmap stored on all devices.
5464 * So make sure all bitmap pages get written
5466 bitmap_write_all(mddev->bitmap);
5468 mddev->sync_thread = md_register_thread(md_do_sync,
5471 if (!mddev->sync_thread) {
5472 printk(KERN_ERR "%s: could not start resync"
5475 /* leave the spares where they are, it shouldn't hurt */
5476 mddev->recovery = 0;
5478 md_wakeup_thread(mddev->sync_thread);
5479 md_new_event(mddev);
5482 mddev_unlock(mddev);
5486 static int md_notify_reboot(struct notifier_block *this,
5487 unsigned long code, void *x)
5489 struct list_head *tmp;
5492 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5494 printk(KERN_INFO "md: stopping all md devices.\n");
5496 ITERATE_MDDEV(mddev,tmp)
5497 if (mddev_trylock(mddev)) {
5498 do_md_stop (mddev, 1);
5499 mddev_unlock(mddev);
5502 * certain more exotic SCSI devices are known to be
5503 * volatile wrt too early system reboots. While the
5504 * right place to handle this issue is the given
5505 * driver, we do want to have a safe RAID driver ...
5512 static struct notifier_block md_notifier = {
5513 .notifier_call = md_notify_reboot,
5515 .priority = INT_MAX, /* before any real devices */
5518 static void md_geninit(void)
5520 struct proc_dir_entry *p;
5522 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5524 p = create_proc_entry("mdstat", S_IRUGO, NULL);
5526 p->proc_fops = &md_seq_fops;
5529 static int __init md_init(void)
5531 if (register_blkdev(MAJOR_NR, "md"))
5533 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5534 unregister_blkdev(MAJOR_NR, "md");
5537 blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
5538 md_probe, NULL, NULL);
5539 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
5540 md_probe, NULL, NULL);
5542 register_reboot_notifier(&md_notifier);
5543 raid_table_header = register_sysctl_table(raid_root_table, 1);
5553 * Searches all registered partitions for autorun RAID arrays
5556 static dev_t detected_devices[128];
5559 void md_autodetect_dev(dev_t dev)
5561 if (dev_cnt >= 0 && dev_cnt < 127)
5562 detected_devices[dev_cnt++] = dev;
5566 static void autostart_arrays(int part)
5571 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5573 for (i = 0; i < dev_cnt; i++) {
5574 dev_t dev = detected_devices[i];
5576 rdev = md_import_device(dev,0, 0);
5580 if (test_bit(Faulty, &rdev->flags)) {
5584 list_add(&rdev->same_set, &pending_raid_disks);
5588 autorun_devices(part);
5593 static __exit void md_exit(void)
5596 struct list_head *tmp;
5598 blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
5599 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
5601 unregister_blkdev(MAJOR_NR,"md");
5602 unregister_blkdev(mdp_major, "mdp");
5603 unregister_reboot_notifier(&md_notifier);
5604 unregister_sysctl_table(raid_table_header);
5605 remove_proc_entry("mdstat", NULL);
5606 ITERATE_MDDEV(mddev,tmp) {
5607 struct gendisk *disk = mddev->gendisk;
5610 export_array(mddev);
5613 mddev->gendisk = NULL;
5618 module_init(md_init)
5619 module_exit(md_exit)
5621 static int get_ro(char *buffer, struct kernel_param *kp)
5623 return sprintf(buffer, "%d", start_readonly);
5625 static int set_ro(const char *val, struct kernel_param *kp)
5628 int num = simple_strtoul(val, &e, 10);
5629 if (*val && (*e == '\0' || *e == '\n')) {
5630 start_readonly = num;
5636 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
5637 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
5640 EXPORT_SYMBOL(register_md_personality);
5641 EXPORT_SYMBOL(unregister_md_personality);
5642 EXPORT_SYMBOL(md_error);
5643 EXPORT_SYMBOL(md_done_sync);
5644 EXPORT_SYMBOL(md_write_start);
5645 EXPORT_SYMBOL(md_write_end);
5646 EXPORT_SYMBOL(md_register_thread);
5647 EXPORT_SYMBOL(md_unregister_thread);
5648 EXPORT_SYMBOL(md_wakeup_thread);
5649 EXPORT_SYMBOL(md_check_recovery);
5650 MODULE_LICENSE("GPL");
5652 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);