2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/kthread.h>
37 #include <linux/linkage.h>
38 #include <linux/raid/md.h>
39 #include <linux/raid/bitmap.h>
40 #include <linux/sysctl.h>
41 #include <linux/buffer_head.h> /* for invalidate_bdev */
42 #include <linux/suspend.h>
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
47 #include <linux/init.h>
49 #include <linux/file.h>
52 #include <linux/kmod.h>
55 #include <asm/unaligned.h>
57 #define MAJOR_NR MD_MAJOR
60 /* 63 partitions with the alternate major number (mdp) */
61 #define MdpMinorShift 6
64 #define dprintk(x...) ((void)(DEBUG && printk(x)))
68 static void autostart_arrays (int part);
71 static LIST_HEAD(pers_list);
72 static DEFINE_SPINLOCK(pers_lock);
74 static void md_print_devices(void);
76 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
79 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
80 * is 1000 KB/sec, so the extra system load does not show up that much.
81 * Increase it if you want to have more _guaranteed_ speed. Note that
82 * the RAID driver will use the maximum available bandwidth if the IO
83 * subsystem is idle. There is also an 'absolute maximum' reconstruction
84 * speed limit - in case reconstruction slows down your system despite
87 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
88 * or /sys/block/mdX/md/sync_speed_{min,max}
91 static int sysctl_speed_limit_min = 1000;
92 static int sysctl_speed_limit_max = 200000;
93 static inline int speed_min(mddev_t *mddev)
95 return mddev->sync_speed_min ?
96 mddev->sync_speed_min : sysctl_speed_limit_min;
99 static inline int speed_max(mddev_t *mddev)
101 return mddev->sync_speed_max ?
102 mddev->sync_speed_max : sysctl_speed_limit_max;
105 static struct ctl_table_header *raid_table_header;
107 static ctl_table raid_table[] = {
109 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
110 .procname = "speed_limit_min",
111 .data = &sysctl_speed_limit_min,
112 .maxlen = sizeof(int),
113 .mode = S_IRUGO|S_IWUSR,
114 .proc_handler = &proc_dointvec,
117 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
118 .procname = "speed_limit_max",
119 .data = &sysctl_speed_limit_max,
120 .maxlen = sizeof(int),
121 .mode = S_IRUGO|S_IWUSR,
122 .proc_handler = &proc_dointvec,
127 static ctl_table raid_dir_table[] = {
129 .ctl_name = DEV_RAID,
132 .mode = S_IRUGO|S_IXUGO,
138 static ctl_table raid_root_table[] = {
144 .child = raid_dir_table,
149 static struct block_device_operations md_fops;
151 static int start_readonly;
154 * We have a system wide 'event count' that is incremented
155 * on any 'interesting' event, and readers of /proc/mdstat
156 * can use 'poll' or 'select' to find out when the event
160 * start array, stop array, error, add device, remove device,
161 * start build, activate spare
163 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
164 static atomic_t md_event_count;
165 void md_new_event(mddev_t *mddev)
167 atomic_inc(&md_event_count);
168 wake_up(&md_event_waiters);
169 sysfs_notify(&mddev->kobj, NULL, "sync_action");
171 EXPORT_SYMBOL_GPL(md_new_event);
173 /* Alternate version that can be called from interrupts
174 * when calling sysfs_notify isn't needed.
176 static void md_new_event_inintr(mddev_t *mddev)
178 atomic_inc(&md_event_count);
179 wake_up(&md_event_waiters);
183 * Enables to iterate over all existing md arrays
184 * all_mddevs_lock protects this list.
186 static LIST_HEAD(all_mddevs);
187 static DEFINE_SPINLOCK(all_mddevs_lock);
191 * iterates through all used mddevs in the system.
192 * We take care to grab the all_mddevs_lock whenever navigating
193 * the list, and to always hold a refcount when unlocked.
194 * Any code which breaks out of this loop while own
195 * a reference to the current mddev and must mddev_put it.
197 #define ITERATE_MDDEV(mddev,tmp) \
199 for (({ spin_lock(&all_mddevs_lock); \
200 tmp = all_mddevs.next; \
202 ({ if (tmp != &all_mddevs) \
203 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
204 spin_unlock(&all_mddevs_lock); \
205 if (mddev) mddev_put(mddev); \
206 mddev = list_entry(tmp, mddev_t, all_mddevs); \
207 tmp != &all_mddevs;}); \
208 ({ spin_lock(&all_mddevs_lock); \
213 static int md_fail_request (request_queue_t *q, struct bio *bio)
215 bio_io_error(bio, bio->bi_size);
219 static inline mddev_t *mddev_get(mddev_t *mddev)
221 atomic_inc(&mddev->active);
225 static void mddev_put(mddev_t *mddev)
227 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
229 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
230 list_del(&mddev->all_mddevs);
231 spin_unlock(&all_mddevs_lock);
232 blk_cleanup_queue(mddev->queue);
233 kobject_unregister(&mddev->kobj);
235 spin_unlock(&all_mddevs_lock);
238 static mddev_t * mddev_find(dev_t unit)
240 mddev_t *mddev, *new = NULL;
243 spin_lock(&all_mddevs_lock);
244 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
245 if (mddev->unit == unit) {
247 spin_unlock(&all_mddevs_lock);
253 list_add(&new->all_mddevs, &all_mddevs);
254 spin_unlock(&all_mddevs_lock);
257 spin_unlock(&all_mddevs_lock);
259 new = kzalloc(sizeof(*new), GFP_KERNEL);
264 if (MAJOR(unit) == MD_MAJOR)
265 new->md_minor = MINOR(unit);
267 new->md_minor = MINOR(unit) >> MdpMinorShift;
269 mutex_init(&new->reconfig_mutex);
270 INIT_LIST_HEAD(&new->disks);
271 INIT_LIST_HEAD(&new->all_mddevs);
272 init_timer(&new->safemode_timer);
273 atomic_set(&new->active, 1);
274 spin_lock_init(&new->write_lock);
275 init_waitqueue_head(&new->sb_wait);
277 new->queue = blk_alloc_queue(GFP_KERNEL);
282 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
284 blk_queue_make_request(new->queue, md_fail_request);
289 static inline int mddev_lock(mddev_t * mddev)
291 return mutex_lock_interruptible(&mddev->reconfig_mutex);
294 static inline int mddev_trylock(mddev_t * mddev)
296 return mutex_trylock(&mddev->reconfig_mutex);
299 static inline void mddev_unlock(mddev_t * mddev)
301 mutex_unlock(&mddev->reconfig_mutex);
303 md_wakeup_thread(mddev->thread);
306 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
309 struct list_head *tmp;
311 ITERATE_RDEV(mddev,rdev,tmp) {
312 if (rdev->desc_nr == nr)
318 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
320 struct list_head *tmp;
323 ITERATE_RDEV(mddev,rdev,tmp) {
324 if (rdev->bdev->bd_dev == dev)
330 static struct mdk_personality *find_pers(int level, char *clevel)
332 struct mdk_personality *pers;
333 list_for_each_entry(pers, &pers_list, list) {
334 if (level != LEVEL_NONE && pers->level == level)
336 if (strcmp(pers->name, clevel)==0)
342 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
344 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
345 return MD_NEW_SIZE_BLOCKS(size);
348 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
352 size = rdev->sb_offset;
355 size &= ~((sector_t)chunk_size/1024 - 1);
359 static int alloc_disk_sb(mdk_rdev_t * rdev)
364 rdev->sb_page = alloc_page(GFP_KERNEL);
365 if (!rdev->sb_page) {
366 printk(KERN_ALERT "md: out of memory.\n");
373 static void free_disk_sb(mdk_rdev_t * rdev)
376 put_page(rdev->sb_page);
378 rdev->sb_page = NULL;
385 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
387 mdk_rdev_t *rdev = bio->bi_private;
388 mddev_t *mddev = rdev->mddev;
392 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
393 md_error(mddev, rdev);
395 if (atomic_dec_and_test(&mddev->pending_writes))
396 wake_up(&mddev->sb_wait);
401 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
403 struct bio *bio2 = bio->bi_private;
404 mdk_rdev_t *rdev = bio2->bi_private;
405 mddev_t *mddev = rdev->mddev;
409 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
410 error == -EOPNOTSUPP) {
412 /* barriers don't appear to be supported :-( */
413 set_bit(BarriersNotsupp, &rdev->flags);
414 mddev->barriers_work = 0;
415 spin_lock_irqsave(&mddev->write_lock, flags);
416 bio2->bi_next = mddev->biolist;
417 mddev->biolist = bio2;
418 spin_unlock_irqrestore(&mddev->write_lock, flags);
419 wake_up(&mddev->sb_wait);
424 bio->bi_private = rdev;
425 return super_written(bio, bytes_done, error);
428 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
429 sector_t sector, int size, struct page *page)
431 /* write first size bytes of page to sector of rdev
432 * Increment mddev->pending_writes before returning
433 * and decrement it on completion, waking up sb_wait
434 * if zero is reached.
435 * If an error occurred, call md_error
437 * As we might need to resubmit the request if BIO_RW_BARRIER
438 * causes ENOTSUPP, we allocate a spare bio...
440 struct bio *bio = bio_alloc(GFP_NOIO, 1);
441 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
443 bio->bi_bdev = rdev->bdev;
444 bio->bi_sector = sector;
445 bio_add_page(bio, page, size, 0);
446 bio->bi_private = rdev;
447 bio->bi_end_io = super_written;
450 atomic_inc(&mddev->pending_writes);
451 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
453 rw |= (1<<BIO_RW_BARRIER);
454 rbio = bio_clone(bio, GFP_NOIO);
455 rbio->bi_private = bio;
456 rbio->bi_end_io = super_written_barrier;
457 submit_bio(rw, rbio);
462 void md_super_wait(mddev_t *mddev)
464 /* wait for all superblock writes that were scheduled to complete.
465 * if any had to be retried (due to BARRIER problems), retry them
469 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
470 if (atomic_read(&mddev->pending_writes)==0)
472 while (mddev->biolist) {
474 spin_lock_irq(&mddev->write_lock);
475 bio = mddev->biolist;
476 mddev->biolist = bio->bi_next ;
478 spin_unlock_irq(&mddev->write_lock);
479 submit_bio(bio->bi_rw, bio);
483 finish_wait(&mddev->sb_wait, &wq);
486 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
491 complete((struct completion*)bio->bi_private);
495 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
496 struct page *page, int rw)
498 struct bio *bio = bio_alloc(GFP_NOIO, 1);
499 struct completion event;
502 rw |= (1 << BIO_RW_SYNC);
505 bio->bi_sector = sector;
506 bio_add_page(bio, page, size, 0);
507 init_completion(&event);
508 bio->bi_private = &event;
509 bio->bi_end_io = bi_complete;
511 wait_for_completion(&event);
513 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
517 EXPORT_SYMBOL_GPL(sync_page_io);
519 static int read_disk_sb(mdk_rdev_t * rdev, int size)
521 char b[BDEVNAME_SIZE];
522 if (!rdev->sb_page) {
530 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
536 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
537 bdevname(rdev->bdev,b));
541 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
543 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
544 (sb1->set_uuid1 == sb2->set_uuid1) &&
545 (sb1->set_uuid2 == sb2->set_uuid2) &&
546 (sb1->set_uuid3 == sb2->set_uuid3))
554 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
557 mdp_super_t *tmp1, *tmp2;
559 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
560 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
562 if (!tmp1 || !tmp2) {
564 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
572 * nr_disks is not constant
577 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
588 static unsigned int calc_sb_csum(mdp_super_t * sb)
590 unsigned int disk_csum, csum;
592 disk_csum = sb->sb_csum;
594 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
595 sb->sb_csum = disk_csum;
601 * Handle superblock details.
602 * We want to be able to handle multiple superblock formats
603 * so we have a common interface to them all, and an array of
604 * different handlers.
605 * We rely on user-space to write the initial superblock, and support
606 * reading and updating of superblocks.
607 * Interface methods are:
608 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
609 * loads and validates a superblock on dev.
610 * if refdev != NULL, compare superblocks on both devices
612 * 0 - dev has a superblock that is compatible with refdev
613 * 1 - dev has a superblock that is compatible and newer than refdev
614 * so dev should be used as the refdev in future
615 * -EINVAL superblock incompatible or invalid
616 * -othererror e.g. -EIO
618 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
619 * Verify that dev is acceptable into mddev.
620 * The first time, mddev->raid_disks will be 0, and data from
621 * dev should be merged in. Subsequent calls check that dev
622 * is new enough. Return 0 or -EINVAL
624 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
625 * Update the superblock for rdev with data in mddev
626 * This does not write to disc.
632 struct module *owner;
633 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
634 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
635 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
639 * load_super for 0.90.0
641 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
643 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
649 * Calculate the position of the superblock,
650 * it's at the end of the disk.
652 * It also happens to be a multiple of 4Kb.
654 sb_offset = calc_dev_sboffset(rdev->bdev);
655 rdev->sb_offset = sb_offset;
657 ret = read_disk_sb(rdev, MD_SB_BYTES);
662 bdevname(rdev->bdev, b);
663 sb = (mdp_super_t*)page_address(rdev->sb_page);
665 if (sb->md_magic != MD_SB_MAGIC) {
666 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
671 if (sb->major_version != 0 ||
672 sb->minor_version < 90 ||
673 sb->minor_version > 91) {
674 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
675 sb->major_version, sb->minor_version,
680 if (sb->raid_disks <= 0)
683 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
684 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
689 rdev->preferred_minor = sb->md_minor;
690 rdev->data_offset = 0;
691 rdev->sb_size = MD_SB_BYTES;
693 if (sb->level == LEVEL_MULTIPATH)
696 rdev->desc_nr = sb->this_disk.number;
702 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
703 if (!uuid_equal(refsb, sb)) {
704 printk(KERN_WARNING "md: %s has different UUID to %s\n",
705 b, bdevname(refdev->bdev,b2));
708 if (!sb_equal(refsb, sb)) {
709 printk(KERN_WARNING "md: %s has same UUID"
710 " but different superblock to %s\n",
711 b, bdevname(refdev->bdev, b2));
715 ev2 = md_event(refsb);
721 rdev->size = calc_dev_size(rdev, sb->chunk_size);
723 if (rdev->size < sb->size && sb->level > 1)
724 /* "this cannot possibly happen" ... */
732 * validate_super for 0.90.0
734 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
737 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
738 __u64 ev1 = md_event(sb);
740 rdev->raid_disk = -1;
742 if (mddev->raid_disks == 0) {
743 mddev->major_version = 0;
744 mddev->minor_version = sb->minor_version;
745 mddev->patch_version = sb->patch_version;
746 mddev->persistent = ! sb->not_persistent;
747 mddev->chunk_size = sb->chunk_size;
748 mddev->ctime = sb->ctime;
749 mddev->utime = sb->utime;
750 mddev->level = sb->level;
751 mddev->clevel[0] = 0;
752 mddev->layout = sb->layout;
753 mddev->raid_disks = sb->raid_disks;
754 mddev->size = sb->size;
756 mddev->bitmap_offset = 0;
757 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
759 if (mddev->minor_version >= 91) {
760 mddev->reshape_position = sb->reshape_position;
761 mddev->delta_disks = sb->delta_disks;
762 mddev->new_level = sb->new_level;
763 mddev->new_layout = sb->new_layout;
764 mddev->new_chunk = sb->new_chunk;
766 mddev->reshape_position = MaxSector;
767 mddev->delta_disks = 0;
768 mddev->new_level = mddev->level;
769 mddev->new_layout = mddev->layout;
770 mddev->new_chunk = mddev->chunk_size;
773 if (sb->state & (1<<MD_SB_CLEAN))
774 mddev->recovery_cp = MaxSector;
776 if (sb->events_hi == sb->cp_events_hi &&
777 sb->events_lo == sb->cp_events_lo) {
778 mddev->recovery_cp = sb->recovery_cp;
780 mddev->recovery_cp = 0;
783 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
784 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
785 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
786 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
788 mddev->max_disks = MD_SB_DISKS;
790 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
791 mddev->bitmap_file == NULL) {
792 if (mddev->level != 1 && mddev->level != 4
793 && mddev->level != 5 && mddev->level != 6
794 && mddev->level != 10) {
795 /* FIXME use a better test */
796 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
799 mddev->bitmap_offset = mddev->default_bitmap_offset;
802 } else if (mddev->pers == NULL) {
803 /* Insist on good event counter while assembling */
805 if (ev1 < mddev->events)
807 } else if (mddev->bitmap) {
808 /* if adding to array with a bitmap, then we can accept an
809 * older device ... but not too old.
811 if (ev1 < mddev->bitmap->events_cleared)
814 if (ev1 < mddev->events)
815 /* just a hot-add of a new device, leave raid_disk at -1 */
819 if (mddev->level != LEVEL_MULTIPATH) {
820 desc = sb->disks + rdev->desc_nr;
822 if (desc->state & (1<<MD_DISK_FAULTY))
823 set_bit(Faulty, &rdev->flags);
824 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
825 desc->raid_disk < mddev->raid_disks */) {
826 set_bit(In_sync, &rdev->flags);
827 rdev->raid_disk = desc->raid_disk;
829 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
830 set_bit(WriteMostly, &rdev->flags);
831 } else /* MULTIPATH are always insync */
832 set_bit(In_sync, &rdev->flags);
837 * sync_super for 0.90.0
839 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
842 struct list_head *tmp;
844 int next_spare = mddev->raid_disks;
847 /* make rdev->sb match mddev data..
850 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
851 * 3/ any empty disks < next_spare become removed
853 * disks[0] gets initialised to REMOVED because
854 * we cannot be sure from other fields if it has
855 * been initialised or not.
858 int active=0, working=0,failed=0,spare=0,nr_disks=0;
860 rdev->sb_size = MD_SB_BYTES;
862 sb = (mdp_super_t*)page_address(rdev->sb_page);
864 memset(sb, 0, sizeof(*sb));
866 sb->md_magic = MD_SB_MAGIC;
867 sb->major_version = mddev->major_version;
868 sb->patch_version = mddev->patch_version;
869 sb->gvalid_words = 0; /* ignored */
870 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
871 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
872 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
873 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
875 sb->ctime = mddev->ctime;
876 sb->level = mddev->level;
877 sb->size = mddev->size;
878 sb->raid_disks = mddev->raid_disks;
879 sb->md_minor = mddev->md_minor;
880 sb->not_persistent = !mddev->persistent;
881 sb->utime = mddev->utime;
883 sb->events_hi = (mddev->events>>32);
884 sb->events_lo = (u32)mddev->events;
886 if (mddev->reshape_position == MaxSector)
887 sb->minor_version = 90;
889 sb->minor_version = 91;
890 sb->reshape_position = mddev->reshape_position;
891 sb->new_level = mddev->new_level;
892 sb->delta_disks = mddev->delta_disks;
893 sb->new_layout = mddev->new_layout;
894 sb->new_chunk = mddev->new_chunk;
896 mddev->minor_version = sb->minor_version;
899 sb->recovery_cp = mddev->recovery_cp;
900 sb->cp_events_hi = (mddev->events>>32);
901 sb->cp_events_lo = (u32)mddev->events;
902 if (mddev->recovery_cp == MaxSector)
903 sb->state = (1<< MD_SB_CLEAN);
907 sb->layout = mddev->layout;
908 sb->chunk_size = mddev->chunk_size;
910 if (mddev->bitmap && mddev->bitmap_file == NULL)
911 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
913 sb->disks[0].state = (1<<MD_DISK_REMOVED);
914 ITERATE_RDEV(mddev,rdev2,tmp) {
917 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
918 && !test_bit(Faulty, &rdev2->flags))
919 desc_nr = rdev2->raid_disk;
921 desc_nr = next_spare++;
922 rdev2->desc_nr = desc_nr;
923 d = &sb->disks[rdev2->desc_nr];
925 d->number = rdev2->desc_nr;
926 d->major = MAJOR(rdev2->bdev->bd_dev);
927 d->minor = MINOR(rdev2->bdev->bd_dev);
928 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
929 && !test_bit(Faulty, &rdev2->flags))
930 d->raid_disk = rdev2->raid_disk;
932 d->raid_disk = rdev2->desc_nr; /* compatibility */
933 if (test_bit(Faulty, &rdev2->flags))
934 d->state = (1<<MD_DISK_FAULTY);
935 else if (test_bit(In_sync, &rdev2->flags)) {
936 d->state = (1<<MD_DISK_ACTIVE);
937 d->state |= (1<<MD_DISK_SYNC);
945 if (test_bit(WriteMostly, &rdev2->flags))
946 d->state |= (1<<MD_DISK_WRITEMOSTLY);
948 /* now set the "removed" and "faulty" bits on any missing devices */
949 for (i=0 ; i < mddev->raid_disks ; i++) {
950 mdp_disk_t *d = &sb->disks[i];
951 if (d->state == 0 && d->number == 0) {
954 d->state = (1<<MD_DISK_REMOVED);
955 d->state |= (1<<MD_DISK_FAULTY);
959 sb->nr_disks = nr_disks;
960 sb->active_disks = active;
961 sb->working_disks = working;
962 sb->failed_disks = failed;
963 sb->spare_disks = spare;
965 sb->this_disk = sb->disks[rdev->desc_nr];
966 sb->sb_csum = calc_sb_csum(sb);
970 * version 1 superblock
973 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
975 unsigned int disk_csum, csum;
976 unsigned long long newcsum;
977 int size = 256 + le32_to_cpu(sb->max_dev)*2;
978 unsigned int *isuper = (unsigned int*)sb;
981 disk_csum = sb->sb_csum;
984 for (i=0; size>=4; size -= 4 )
985 newcsum += le32_to_cpu(*isuper++);
988 newcsum += le16_to_cpu(*(unsigned short*) isuper);
990 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
991 sb->sb_csum = disk_csum;
992 return cpu_to_le32(csum);
995 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
997 struct mdp_superblock_1 *sb;
1000 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1004 * Calculate the position of the superblock.
1005 * It is always aligned to a 4K boundary and
1006 * depeding on minor_version, it can be:
1007 * 0: At least 8K, but less than 12K, from end of device
1008 * 1: At start of device
1009 * 2: 4K from start of device.
1011 switch(minor_version) {
1013 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1015 sb_offset &= ~(sector_t)(4*2-1);
1016 /* convert from sectors to K */
1028 rdev->sb_offset = sb_offset;
1030 /* superblock is rarely larger than 1K, but it can be larger,
1031 * and it is safe to read 4k, so we do that
1033 ret = read_disk_sb(rdev, 4096);
1034 if (ret) return ret;
1037 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1039 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1040 sb->major_version != cpu_to_le32(1) ||
1041 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1042 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1043 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1046 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1047 printk("md: invalid superblock checksum on %s\n",
1048 bdevname(rdev->bdev,b));
1051 if (le64_to_cpu(sb->data_size) < 10) {
1052 printk("md: data_size too small on %s\n",
1053 bdevname(rdev->bdev,b));
1056 rdev->preferred_minor = 0xffff;
1057 rdev->data_offset = le64_to_cpu(sb->data_offset);
1058 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1060 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1061 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1062 if (rdev->sb_size & bmask)
1063 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1065 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1068 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1074 struct mdp_superblock_1 *refsb =
1075 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1077 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1078 sb->level != refsb->level ||
1079 sb->layout != refsb->layout ||
1080 sb->chunksize != refsb->chunksize) {
1081 printk(KERN_WARNING "md: %s has strangely different"
1082 " superblock to %s\n",
1083 bdevname(rdev->bdev,b),
1084 bdevname(refdev->bdev,b2));
1087 ev1 = le64_to_cpu(sb->events);
1088 ev2 = le64_to_cpu(refsb->events);
1096 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1098 rdev->size = rdev->sb_offset;
1099 if (rdev->size < le64_to_cpu(sb->data_size)/2)
1101 rdev->size = le64_to_cpu(sb->data_size)/2;
1102 if (le32_to_cpu(sb->chunksize))
1103 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1105 if (le32_to_cpu(sb->size) > rdev->size*2)
1110 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1112 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1113 __u64 ev1 = le64_to_cpu(sb->events);
1115 rdev->raid_disk = -1;
1117 if (mddev->raid_disks == 0) {
1118 mddev->major_version = 1;
1119 mddev->patch_version = 0;
1120 mddev->persistent = 1;
1121 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1122 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1123 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1124 mddev->level = le32_to_cpu(sb->level);
1125 mddev->clevel[0] = 0;
1126 mddev->layout = le32_to_cpu(sb->layout);
1127 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1128 mddev->size = le64_to_cpu(sb->size)/2;
1129 mddev->events = ev1;
1130 mddev->bitmap_offset = 0;
1131 mddev->default_bitmap_offset = 1024 >> 9;
1133 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1134 memcpy(mddev->uuid, sb->set_uuid, 16);
1136 mddev->max_disks = (4096-256)/2;
1138 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1139 mddev->bitmap_file == NULL ) {
1140 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1141 && mddev->level != 10) {
1142 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1145 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1147 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1148 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1149 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1150 mddev->new_level = le32_to_cpu(sb->new_level);
1151 mddev->new_layout = le32_to_cpu(sb->new_layout);
1152 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1154 mddev->reshape_position = MaxSector;
1155 mddev->delta_disks = 0;
1156 mddev->new_level = mddev->level;
1157 mddev->new_layout = mddev->layout;
1158 mddev->new_chunk = mddev->chunk_size;
1161 } else if (mddev->pers == NULL) {
1162 /* Insist of good event counter while assembling */
1164 if (ev1 < mddev->events)
1166 } else if (mddev->bitmap) {
1167 /* If adding to array with a bitmap, then we can accept an
1168 * older device, but not too old.
1170 if (ev1 < mddev->bitmap->events_cleared)
1173 if (ev1 < mddev->events)
1174 /* just a hot-add of a new device, leave raid_disk at -1 */
1177 if (mddev->level != LEVEL_MULTIPATH) {
1179 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1181 case 0xffff: /* spare */
1183 case 0xfffe: /* faulty */
1184 set_bit(Faulty, &rdev->flags);
1187 if ((le32_to_cpu(sb->feature_map) &
1188 MD_FEATURE_RECOVERY_OFFSET))
1189 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1191 set_bit(In_sync, &rdev->flags);
1192 rdev->raid_disk = role;
1195 if (sb->devflags & WriteMostly1)
1196 set_bit(WriteMostly, &rdev->flags);
1197 } else /* MULTIPATH are always insync */
1198 set_bit(In_sync, &rdev->flags);
1203 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1205 struct mdp_superblock_1 *sb;
1206 struct list_head *tmp;
1209 /* make rdev->sb match mddev and rdev data. */
1211 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1213 sb->feature_map = 0;
1215 sb->recovery_offset = cpu_to_le64(0);
1216 memset(sb->pad1, 0, sizeof(sb->pad1));
1217 memset(sb->pad2, 0, sizeof(sb->pad2));
1218 memset(sb->pad3, 0, sizeof(sb->pad3));
1220 sb->utime = cpu_to_le64((__u64)mddev->utime);
1221 sb->events = cpu_to_le64(mddev->events);
1223 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1225 sb->resync_offset = cpu_to_le64(0);
1227 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
1229 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1230 sb->size = cpu_to_le64(mddev->size<<1);
1232 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1233 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1234 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1237 if (rdev->raid_disk >= 0 &&
1238 !test_bit(In_sync, &rdev->flags) &&
1239 rdev->recovery_offset > 0) {
1240 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1241 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1244 if (mddev->reshape_position != MaxSector) {
1245 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1246 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1247 sb->new_layout = cpu_to_le32(mddev->new_layout);
1248 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1249 sb->new_level = cpu_to_le32(mddev->new_level);
1250 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1254 ITERATE_RDEV(mddev,rdev2,tmp)
1255 if (rdev2->desc_nr+1 > max_dev)
1256 max_dev = rdev2->desc_nr+1;
1258 sb->max_dev = cpu_to_le32(max_dev);
1259 for (i=0; i<max_dev;i++)
1260 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1262 ITERATE_RDEV(mddev,rdev2,tmp) {
1264 if (test_bit(Faulty, &rdev2->flags))
1265 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1266 else if (test_bit(In_sync, &rdev2->flags))
1267 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1268 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1269 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1271 sb->dev_roles[i] = cpu_to_le16(0xffff);
1274 sb->sb_csum = calc_sb_1_csum(sb);
1278 static struct super_type super_types[] = {
1281 .owner = THIS_MODULE,
1282 .load_super = super_90_load,
1283 .validate_super = super_90_validate,
1284 .sync_super = super_90_sync,
1288 .owner = THIS_MODULE,
1289 .load_super = super_1_load,
1290 .validate_super = super_1_validate,
1291 .sync_super = super_1_sync,
1295 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1297 struct list_head *tmp;
1300 ITERATE_RDEV(mddev,rdev,tmp)
1301 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1307 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1309 struct list_head *tmp;
1312 ITERATE_RDEV(mddev1,rdev,tmp)
1313 if (match_dev_unit(mddev2, rdev))
1319 static LIST_HEAD(pending_raid_disks);
1321 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1323 mdk_rdev_t *same_pdev;
1324 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1332 /* make sure rdev->size exceeds mddev->size */
1333 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1335 /* Cannot change size, so fail */
1338 mddev->size = rdev->size;
1340 same_pdev = match_dev_unit(mddev, rdev);
1343 "%s: WARNING: %s appears to be on the same physical"
1344 " disk as %s. True\n protection against single-disk"
1345 " failure might be compromised.\n",
1346 mdname(mddev), bdevname(rdev->bdev,b),
1347 bdevname(same_pdev->bdev,b2));
1349 /* Verify rdev->desc_nr is unique.
1350 * If it is -1, assign a free number, else
1351 * check number is not in use
1353 if (rdev->desc_nr < 0) {
1355 if (mddev->pers) choice = mddev->raid_disks;
1356 while (find_rdev_nr(mddev, choice))
1358 rdev->desc_nr = choice;
1360 if (find_rdev_nr(mddev, rdev->desc_nr))
1363 bdevname(rdev->bdev,b);
1364 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1366 while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1369 list_add(&rdev->same_set, &mddev->disks);
1370 rdev->mddev = mddev;
1371 printk(KERN_INFO "md: bind<%s>\n", b);
1373 rdev->kobj.parent = &mddev->kobj;
1374 kobject_add(&rdev->kobj);
1376 if (rdev->bdev->bd_part)
1377 ko = &rdev->bdev->bd_part->kobj;
1379 ko = &rdev->bdev->bd_disk->kobj;
1380 sysfs_create_link(&rdev->kobj, ko, "block");
1381 bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1385 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1387 char b[BDEVNAME_SIZE];
1392 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1393 list_del_init(&rdev->same_set);
1394 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1396 sysfs_remove_link(&rdev->kobj, "block");
1397 kobject_del(&rdev->kobj);
1401 * prevent the device from being mounted, repartitioned or
1402 * otherwise reused by a RAID array (or any other kernel
1403 * subsystem), by bd_claiming the device.
1405 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1408 struct block_device *bdev;
1409 char b[BDEVNAME_SIZE];
1411 bdev = open_partition_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1413 printk(KERN_ERR "md: could not open %s.\n",
1414 __bdevname(dev, b));
1415 return PTR_ERR(bdev);
1417 err = bd_claim(bdev, rdev);
1419 printk(KERN_ERR "md: could not bd_claim %s.\n",
1421 blkdev_put_partition(bdev);
1428 static void unlock_rdev(mdk_rdev_t *rdev)
1430 struct block_device *bdev = rdev->bdev;
1435 blkdev_put_partition(bdev);
1438 void md_autodetect_dev(dev_t dev);
1440 static void export_rdev(mdk_rdev_t * rdev)
1442 char b[BDEVNAME_SIZE];
1443 printk(KERN_INFO "md: export_rdev(%s)\n",
1444 bdevname(rdev->bdev,b));
1448 list_del_init(&rdev->same_set);
1450 md_autodetect_dev(rdev->bdev->bd_dev);
1453 kobject_put(&rdev->kobj);
1456 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1458 unbind_rdev_from_array(rdev);
1462 static void export_array(mddev_t *mddev)
1464 struct list_head *tmp;
1467 ITERATE_RDEV(mddev,rdev,tmp) {
1472 kick_rdev_from_array(rdev);
1474 if (!list_empty(&mddev->disks))
1476 mddev->raid_disks = 0;
1477 mddev->major_version = 0;
1480 static void print_desc(mdp_disk_t *desc)
1482 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1483 desc->major,desc->minor,desc->raid_disk,desc->state);
1486 static void print_sb(mdp_super_t *sb)
1491 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1492 sb->major_version, sb->minor_version, sb->patch_version,
1493 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1495 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1496 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1497 sb->md_minor, sb->layout, sb->chunk_size);
1498 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1499 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1500 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1501 sb->failed_disks, sb->spare_disks,
1502 sb->sb_csum, (unsigned long)sb->events_lo);
1505 for (i = 0; i < MD_SB_DISKS; i++) {
1508 desc = sb->disks + i;
1509 if (desc->number || desc->major || desc->minor ||
1510 desc->raid_disk || (desc->state && (desc->state != 4))) {
1511 printk(" D %2d: ", i);
1515 printk(KERN_INFO "md: THIS: ");
1516 print_desc(&sb->this_disk);
1520 static void print_rdev(mdk_rdev_t *rdev)
1522 char b[BDEVNAME_SIZE];
1523 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1524 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1525 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1527 if (rdev->sb_loaded) {
1528 printk(KERN_INFO "md: rdev superblock:\n");
1529 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1531 printk(KERN_INFO "md: no rdev superblock!\n");
1534 static void md_print_devices(void)
1536 struct list_head *tmp, *tmp2;
1539 char b[BDEVNAME_SIZE];
1542 printk("md: **********************************\n");
1543 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1544 printk("md: **********************************\n");
1545 ITERATE_MDDEV(mddev,tmp) {
1548 bitmap_print_sb(mddev->bitmap);
1550 printk("%s: ", mdname(mddev));
1551 ITERATE_RDEV(mddev,rdev,tmp2)
1552 printk("<%s>", bdevname(rdev->bdev,b));
1555 ITERATE_RDEV(mddev,rdev,tmp2)
1558 printk("md: **********************************\n");
1563 static void sync_sbs(mddev_t * mddev, int nospares)
1565 /* Update each superblock (in-memory image), but
1566 * if we are allowed to, skip spares which already
1567 * have the right event counter, or have one earlier
1568 * (which would mean they aren't being marked as dirty
1569 * with the rest of the array)
1572 struct list_head *tmp;
1574 ITERATE_RDEV(mddev,rdev,tmp) {
1575 if (rdev->sb_events == mddev->events ||
1577 rdev->raid_disk < 0 &&
1578 (rdev->sb_events&1)==0 &&
1579 rdev->sb_events+1 == mddev->events)) {
1580 /* Don't update this superblock */
1581 rdev->sb_loaded = 2;
1583 super_types[mddev->major_version].
1584 sync_super(mddev, rdev);
1585 rdev->sb_loaded = 1;
1590 static void md_update_sb(mddev_t * mddev, int force_change)
1593 struct list_head *tmp;
1599 spin_lock_irq(&mddev->write_lock);
1601 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1602 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1604 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1605 /* just a clean<-> dirty transition, possibly leave spares alone,
1606 * though if events isn't the right even/odd, we will have to do
1612 if (mddev->degraded)
1613 /* If the array is degraded, then skipping spares is both
1614 * dangerous and fairly pointless.
1615 * Dangerous because a device that was removed from the array
1616 * might have a event_count that still looks up-to-date,
1617 * so it can be re-added without a resync.
1618 * Pointless because if there are any spares to skip,
1619 * then a recovery will happen and soon that array won't
1620 * be degraded any more and the spare can go back to sleep then.
1624 sync_req = mddev->in_sync;
1625 mddev->utime = get_seconds();
1627 /* If this is just a dirty<->clean transition, and the array is clean
1628 * and 'events' is odd, we can roll back to the previous clean state */
1630 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1631 && (mddev->events & 1))
1634 /* otherwise we have to go forward and ... */
1636 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1637 /* .. if the array isn't clean, insist on an odd 'events' */
1638 if ((mddev->events&1)==0) {
1643 /* otherwise insist on an even 'events' (for clean states) */
1644 if ((mddev->events&1)) {
1651 if (!mddev->events) {
1653 * oops, this 64-bit counter should never wrap.
1654 * Either we are in around ~1 trillion A.C., assuming
1655 * 1 reboot per second, or we have a bug:
1660 sync_sbs(mddev, nospares);
1663 * do not write anything to disk if using
1664 * nonpersistent superblocks
1666 if (!mddev->persistent) {
1667 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1668 spin_unlock_irq(&mddev->write_lock);
1669 wake_up(&mddev->sb_wait);
1672 spin_unlock_irq(&mddev->write_lock);
1675 "md: updating %s RAID superblock on device (in sync %d)\n",
1676 mdname(mddev),mddev->in_sync);
1678 err = bitmap_update_sb(mddev->bitmap);
1679 ITERATE_RDEV(mddev,rdev,tmp) {
1680 char b[BDEVNAME_SIZE];
1681 dprintk(KERN_INFO "md: ");
1682 if (rdev->sb_loaded != 1)
1683 continue; /* no noise on spare devices */
1684 if (test_bit(Faulty, &rdev->flags))
1685 dprintk("(skipping faulty ");
1687 dprintk("%s ", bdevname(rdev->bdev,b));
1688 if (!test_bit(Faulty, &rdev->flags)) {
1689 md_super_write(mddev,rdev,
1690 rdev->sb_offset<<1, rdev->sb_size,
1692 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1693 bdevname(rdev->bdev,b),
1694 (unsigned long long)rdev->sb_offset);
1695 rdev->sb_events = mddev->events;
1699 if (mddev->level == LEVEL_MULTIPATH)
1700 /* only need to write one superblock... */
1703 md_super_wait(mddev);
1704 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1706 spin_lock_irq(&mddev->write_lock);
1707 if (mddev->in_sync != sync_req ||
1708 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1709 /* have to write it out again */
1710 spin_unlock_irq(&mddev->write_lock);
1713 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1714 spin_unlock_irq(&mddev->write_lock);
1715 wake_up(&mddev->sb_wait);
1719 /* words written to sysfs files may, or my not, be \n terminated.
1720 * We want to accept with case. For this we use cmd_match.
1722 static int cmd_match(const char *cmd, const char *str)
1724 /* See if cmd, written into a sysfs file, matches
1725 * str. They must either be the same, or cmd can
1726 * have a trailing newline
1728 while (*cmd && *str && *cmd == *str) {
1739 struct rdev_sysfs_entry {
1740 struct attribute attr;
1741 ssize_t (*show)(mdk_rdev_t *, char *);
1742 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1746 state_show(mdk_rdev_t *rdev, char *page)
1751 if (test_bit(Faulty, &rdev->flags)) {
1752 len+= sprintf(page+len, "%sfaulty",sep);
1755 if (test_bit(In_sync, &rdev->flags)) {
1756 len += sprintf(page+len, "%sin_sync",sep);
1759 if (test_bit(WriteMostly, &rdev->flags)) {
1760 len += sprintf(page+len, "%swrite_mostly",sep);
1763 if (!test_bit(Faulty, &rdev->flags) &&
1764 !test_bit(In_sync, &rdev->flags)) {
1765 len += sprintf(page+len, "%sspare", sep);
1768 return len+sprintf(page+len, "\n");
1772 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1775 * faulty - simulates and error
1776 * remove - disconnects the device
1777 * writemostly - sets write_mostly
1778 * -writemostly - clears write_mostly
1781 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1782 md_error(rdev->mddev, rdev);
1784 } else if (cmd_match(buf, "remove")) {
1785 if (rdev->raid_disk >= 0)
1788 mddev_t *mddev = rdev->mddev;
1789 kick_rdev_from_array(rdev);
1790 md_update_sb(mddev, 1);
1791 md_new_event(mddev);
1794 } else if (cmd_match(buf, "writemostly")) {
1795 set_bit(WriteMostly, &rdev->flags);
1797 } else if (cmd_match(buf, "-writemostly")) {
1798 clear_bit(WriteMostly, &rdev->flags);
1801 return err ? err : len;
1803 static struct rdev_sysfs_entry rdev_state =
1804 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1807 super_show(mdk_rdev_t *rdev, char *page)
1809 if (rdev->sb_loaded && rdev->sb_size) {
1810 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1811 return rdev->sb_size;
1815 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1818 errors_show(mdk_rdev_t *rdev, char *page)
1820 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1824 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1827 unsigned long n = simple_strtoul(buf, &e, 10);
1828 if (*buf && (*e == 0 || *e == '\n')) {
1829 atomic_set(&rdev->corrected_errors, n);
1834 static struct rdev_sysfs_entry rdev_errors =
1835 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1838 slot_show(mdk_rdev_t *rdev, char *page)
1840 if (rdev->raid_disk < 0)
1841 return sprintf(page, "none\n");
1843 return sprintf(page, "%d\n", rdev->raid_disk);
1847 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1850 int slot = simple_strtoul(buf, &e, 10);
1851 if (strncmp(buf, "none", 4)==0)
1853 else if (e==buf || (*e && *e!= '\n'))
1855 if (rdev->mddev->pers)
1856 /* Cannot set slot in active array (yet) */
1858 if (slot >= rdev->mddev->raid_disks)
1860 rdev->raid_disk = slot;
1861 /* assume it is working */
1863 set_bit(In_sync, &rdev->flags);
1868 static struct rdev_sysfs_entry rdev_slot =
1869 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
1872 offset_show(mdk_rdev_t *rdev, char *page)
1874 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1878 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1881 unsigned long long offset = simple_strtoull(buf, &e, 10);
1882 if (e==buf || (*e && *e != '\n'))
1884 if (rdev->mddev->pers)
1886 rdev->data_offset = offset;
1890 static struct rdev_sysfs_entry rdev_offset =
1891 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
1894 rdev_size_show(mdk_rdev_t *rdev, char *page)
1896 return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1900 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1903 unsigned long long size = simple_strtoull(buf, &e, 10);
1904 if (e==buf || (*e && *e != '\n'))
1906 if (rdev->mddev->pers)
1909 if (size < rdev->mddev->size || rdev->mddev->size == 0)
1910 rdev->mddev->size = size;
1914 static struct rdev_sysfs_entry rdev_size =
1915 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
1917 static struct attribute *rdev_default_attrs[] = {
1927 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1929 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1930 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1934 return entry->show(rdev, page);
1938 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1939 const char *page, size_t length)
1941 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1942 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1946 if (!capable(CAP_SYS_ADMIN))
1948 return entry->store(rdev, page, length);
1951 static void rdev_free(struct kobject *ko)
1953 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1956 static struct sysfs_ops rdev_sysfs_ops = {
1957 .show = rdev_attr_show,
1958 .store = rdev_attr_store,
1960 static struct kobj_type rdev_ktype = {
1961 .release = rdev_free,
1962 .sysfs_ops = &rdev_sysfs_ops,
1963 .default_attrs = rdev_default_attrs,
1967 * Import a device. If 'super_format' >= 0, then sanity check the superblock
1969 * mark the device faulty if:
1971 * - the device is nonexistent (zero size)
1972 * - the device has no valid superblock
1974 * a faulty rdev _never_ has rdev->sb set.
1976 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1978 char b[BDEVNAME_SIZE];
1983 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1985 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1986 return ERR_PTR(-ENOMEM);
1989 if ((err = alloc_disk_sb(rdev)))
1992 err = lock_rdev(rdev, newdev);
1996 rdev->kobj.parent = NULL;
1997 rdev->kobj.ktype = &rdev_ktype;
1998 kobject_init(&rdev->kobj);
2002 rdev->data_offset = 0;
2003 rdev->sb_events = 0;
2004 atomic_set(&rdev->nr_pending, 0);
2005 atomic_set(&rdev->read_errors, 0);
2006 atomic_set(&rdev->corrected_errors, 0);
2008 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2011 "md: %s has zero or unknown size, marking faulty!\n",
2012 bdevname(rdev->bdev,b));
2017 if (super_format >= 0) {
2018 err = super_types[super_format].
2019 load_super(rdev, NULL, super_minor);
2020 if (err == -EINVAL) {
2022 "md: %s has invalid sb, not importing!\n",
2023 bdevname(rdev->bdev,b));
2028 "md: could not read %s's sb, not importing!\n",
2029 bdevname(rdev->bdev,b));
2033 INIT_LIST_HEAD(&rdev->same_set);
2038 if (rdev->sb_page) {
2044 return ERR_PTR(err);
2048 * Check a full RAID array for plausibility
2052 static void analyze_sbs(mddev_t * mddev)
2055 struct list_head *tmp;
2056 mdk_rdev_t *rdev, *freshest;
2057 char b[BDEVNAME_SIZE];
2060 ITERATE_RDEV(mddev,rdev,tmp)
2061 switch (super_types[mddev->major_version].
2062 load_super(rdev, freshest, mddev->minor_version)) {
2070 "md: fatal superblock inconsistency in %s"
2071 " -- removing from array\n",
2072 bdevname(rdev->bdev,b));
2073 kick_rdev_from_array(rdev);
2077 super_types[mddev->major_version].
2078 validate_super(mddev, freshest);
2081 ITERATE_RDEV(mddev,rdev,tmp) {
2082 if (rdev != freshest)
2083 if (super_types[mddev->major_version].
2084 validate_super(mddev, rdev)) {
2085 printk(KERN_WARNING "md: kicking non-fresh %s"
2087 bdevname(rdev->bdev,b));
2088 kick_rdev_from_array(rdev);
2091 if (mddev->level == LEVEL_MULTIPATH) {
2092 rdev->desc_nr = i++;
2093 rdev->raid_disk = rdev->desc_nr;
2094 set_bit(In_sync, &rdev->flags);
2100 if (mddev->recovery_cp != MaxSector &&
2102 printk(KERN_ERR "md: %s: raid array is not clean"
2103 " -- starting background reconstruction\n",
2109 safe_delay_show(mddev_t *mddev, char *page)
2111 int msec = (mddev->safemode_delay*1000)/HZ;
2112 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2115 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2123 /* remove a period, and count digits after it */
2124 if (len >= sizeof(buf))
2126 strlcpy(buf, cbuf, len);
2128 for (i=0; i<len; i++) {
2130 if (isdigit(buf[i])) {
2135 } else if (buf[i] == '.') {
2140 msec = simple_strtoul(buf, &e, 10);
2141 if (e == buf || (*e && *e != '\n'))
2143 msec = (msec * 1000) / scale;
2145 mddev->safemode_delay = 0;
2147 mddev->safemode_delay = (msec*HZ)/1000;
2148 if (mddev->safemode_delay == 0)
2149 mddev->safemode_delay = 1;
2153 static struct md_sysfs_entry md_safe_delay =
2154 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2157 level_show(mddev_t *mddev, char *page)
2159 struct mdk_personality *p = mddev->pers;
2161 return sprintf(page, "%s\n", p->name);
2162 else if (mddev->clevel[0])
2163 return sprintf(page, "%s\n", mddev->clevel);
2164 else if (mddev->level != LEVEL_NONE)
2165 return sprintf(page, "%d\n", mddev->level);
2171 level_store(mddev_t *mddev, const char *buf, size_t len)
2178 if (len >= sizeof(mddev->clevel))
2180 strncpy(mddev->clevel, buf, len);
2181 if (mddev->clevel[len-1] == '\n')
2183 mddev->clevel[len] = 0;
2184 mddev->level = LEVEL_NONE;
2188 static struct md_sysfs_entry md_level =
2189 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2193 layout_show(mddev_t *mddev, char *page)
2195 /* just a number, not meaningful for all levels */
2196 return sprintf(page, "%d\n", mddev->layout);
2200 layout_store(mddev_t *mddev, const char *buf, size_t len)
2203 unsigned long n = simple_strtoul(buf, &e, 10);
2207 if (!*buf || (*e && *e != '\n'))
2213 static struct md_sysfs_entry md_layout =
2214 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2218 raid_disks_show(mddev_t *mddev, char *page)
2220 if (mddev->raid_disks == 0)
2222 return sprintf(page, "%d\n", mddev->raid_disks);
2225 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2228 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2230 /* can only set raid_disks if array is not yet active */
2233 unsigned long n = simple_strtoul(buf, &e, 10);
2235 if (!*buf || (*e && *e != '\n'))
2239 rv = update_raid_disks(mddev, n);
2241 mddev->raid_disks = n;
2242 return rv ? rv : len;
2244 static struct md_sysfs_entry md_raid_disks =
2245 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2248 chunk_size_show(mddev_t *mddev, char *page)
2250 return sprintf(page, "%d\n", mddev->chunk_size);
2254 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2256 /* can only set chunk_size if array is not yet active */
2258 unsigned long n = simple_strtoul(buf, &e, 10);
2262 if (!*buf || (*e && *e != '\n'))
2265 mddev->chunk_size = n;
2268 static struct md_sysfs_entry md_chunk_size =
2269 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2272 resync_start_show(mddev_t *mddev, char *page)
2274 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2278 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2280 /* can only set chunk_size if array is not yet active */
2282 unsigned long long n = simple_strtoull(buf, &e, 10);
2286 if (!*buf || (*e && *e != '\n'))
2289 mddev->recovery_cp = n;
2292 static struct md_sysfs_entry md_resync_start =
2293 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2296 * The array state can be:
2299 * No devices, no size, no level
2300 * Equivalent to STOP_ARRAY ioctl
2302 * May have some settings, but array is not active
2303 * all IO results in error
2304 * When written, doesn't tear down array, but just stops it
2305 * suspended (not supported yet)
2306 * All IO requests will block. The array can be reconfigured.
2307 * Writing this, if accepted, will block until array is quiessent
2309 * no resync can happen. no superblocks get written.
2310 * write requests fail
2312 * like readonly, but behaves like 'clean' on a write request.
2314 * clean - no pending writes, but otherwise active.
2315 * When written to inactive array, starts without resync
2316 * If a write request arrives then
2317 * if metadata is known, mark 'dirty' and switch to 'active'.
2318 * if not known, block and switch to write-pending
2319 * If written to an active array that has pending writes, then fails.
2321 * fully active: IO and resync can be happening.
2322 * When written to inactive array, starts with resync
2325 * clean, but writes are blocked waiting for 'active' to be written.
2328 * like active, but no writes have been seen for a while (100msec).
2331 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2332 write_pending, active_idle, bad_word};
2333 static char *array_states[] = {
2334 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2335 "write-pending", "active-idle", NULL };
2337 static int match_word(const char *word, char **list)
2340 for (n=0; list[n]; n++)
2341 if (cmd_match(word, list[n]))
2347 array_state_show(mddev_t *mddev, char *page)
2349 enum array_state st = inactive;
2362 else if (mddev->safemode)
2368 if (list_empty(&mddev->disks) &&
2369 mddev->raid_disks == 0 &&
2375 return sprintf(page, "%s\n", array_states[st]);
2378 static int do_md_stop(mddev_t * mddev, int ro);
2379 static int do_md_run(mddev_t * mddev);
2380 static int restart_array(mddev_t *mddev);
2383 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2386 enum array_state st = match_word(buf, array_states);
2391 /* stopping an active array */
2393 if (atomic_read(&mddev->active) > 1)
2395 err = do_md_stop(mddev, 0);
2399 /* stopping an active array */
2401 if (atomic_read(&mddev->active) > 1)
2403 err = do_md_stop(mddev, 2);
2407 break; /* not supported yet */
2410 err = do_md_stop(mddev, 1);
2413 err = do_md_run(mddev);
2417 /* stopping an active array */
2419 err = do_md_stop(mddev, 1);
2421 mddev->ro = 2; /* FIXME mark devices writable */
2424 err = do_md_run(mddev);
2429 restart_array(mddev);
2430 spin_lock_irq(&mddev->write_lock);
2431 if (atomic_read(&mddev->writes_pending) == 0) {
2433 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
2435 spin_unlock_irq(&mddev->write_lock);
2438 mddev->recovery_cp = MaxSector;
2439 err = do_md_run(mddev);
2444 restart_array(mddev);
2445 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2446 wake_up(&mddev->sb_wait);
2450 err = do_md_run(mddev);
2455 /* these cannot be set */
2463 static struct md_sysfs_entry md_array_state =
2464 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2467 null_show(mddev_t *mddev, char *page)
2473 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2475 /* buf must be %d:%d\n? giving major and minor numbers */
2476 /* The new device is added to the array.
2477 * If the array has a persistent superblock, we read the
2478 * superblock to initialise info and check validity.
2479 * Otherwise, only checking done is that in bind_rdev_to_array,
2480 * which mainly checks size.
2483 int major = simple_strtoul(buf, &e, 10);
2489 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2491 minor = simple_strtoul(e+1, &e, 10);
2492 if (*e && *e != '\n')
2494 dev = MKDEV(major, minor);
2495 if (major != MAJOR(dev) ||
2496 minor != MINOR(dev))
2500 if (mddev->persistent) {
2501 rdev = md_import_device(dev, mddev->major_version,
2502 mddev->minor_version);
2503 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2504 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2505 mdk_rdev_t, same_set);
2506 err = super_types[mddev->major_version]
2507 .load_super(rdev, rdev0, mddev->minor_version);
2512 rdev = md_import_device(dev, -1, -1);
2515 return PTR_ERR(rdev);
2516 err = bind_rdev_to_array(rdev, mddev);
2520 return err ? err : len;
2523 static struct md_sysfs_entry md_new_device =
2524 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2527 size_show(mddev_t *mddev, char *page)
2529 return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2532 static int update_size(mddev_t *mddev, unsigned long size);
2535 size_store(mddev_t *mddev, const char *buf, size_t len)
2537 /* If array is inactive, we can reduce the component size, but
2538 * not increase it (except from 0).
2539 * If array is active, we can try an on-line resize
2543 unsigned long long size = simple_strtoull(buf, &e, 10);
2544 if (!*buf || *buf == '\n' ||
2549 err = update_size(mddev, size);
2550 md_update_sb(mddev, 1);
2552 if (mddev->size == 0 ||
2558 return err ? err : len;
2561 static struct md_sysfs_entry md_size =
2562 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2566 * This is either 'none' for arrays with externally managed metadata,
2567 * or N.M for internally known formats
2570 metadata_show(mddev_t *mddev, char *page)
2572 if (mddev->persistent)
2573 return sprintf(page, "%d.%d\n",
2574 mddev->major_version, mddev->minor_version);
2576 return sprintf(page, "none\n");
2580 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2584 if (!list_empty(&mddev->disks))
2587 if (cmd_match(buf, "none")) {
2588 mddev->persistent = 0;
2589 mddev->major_version = 0;
2590 mddev->minor_version = 90;
2593 major = simple_strtoul(buf, &e, 10);
2594 if (e==buf || *e != '.')
2597 minor = simple_strtoul(buf, &e, 10);
2598 if (e==buf || *e != '\n')
2600 if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2601 super_types[major].name == NULL)
2603 mddev->major_version = major;
2604 mddev->minor_version = minor;
2605 mddev->persistent = 1;
2609 static struct md_sysfs_entry md_metadata =
2610 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2613 action_show(mddev_t *mddev, char *page)
2615 char *type = "idle";
2616 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2617 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2618 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2620 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2621 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2623 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2630 return sprintf(page, "%s\n", type);
2634 action_store(mddev_t *mddev, const char *page, size_t len)
2636 if (!mddev->pers || !mddev->pers->sync_request)
2639 if (cmd_match(page, "idle")) {
2640 if (mddev->sync_thread) {
2641 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2642 md_unregister_thread(mddev->sync_thread);
2643 mddev->sync_thread = NULL;
2644 mddev->recovery = 0;
2646 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2647 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2649 else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2650 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2651 else if (cmd_match(page, "reshape")) {
2653 if (mddev->pers->start_reshape == NULL)
2655 err = mddev->pers->start_reshape(mddev);
2659 if (cmd_match(page, "check"))
2660 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2661 else if (!cmd_match(page, "repair"))
2663 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2664 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2666 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2667 md_wakeup_thread(mddev->thread);
2672 mismatch_cnt_show(mddev_t *mddev, char *page)
2674 return sprintf(page, "%llu\n",
2675 (unsigned long long) mddev->resync_mismatches);
2678 static struct md_sysfs_entry md_scan_mode =
2679 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2682 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
2685 sync_min_show(mddev_t *mddev, char *page)
2687 return sprintf(page, "%d (%s)\n", speed_min(mddev),
2688 mddev->sync_speed_min ? "local": "system");
2692 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2696 if (strncmp(buf, "system", 6)==0) {
2697 mddev->sync_speed_min = 0;
2700 min = simple_strtoul(buf, &e, 10);
2701 if (buf == e || (*e && *e != '\n') || min <= 0)
2703 mddev->sync_speed_min = min;
2707 static struct md_sysfs_entry md_sync_min =
2708 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2711 sync_max_show(mddev_t *mddev, char *page)
2713 return sprintf(page, "%d (%s)\n", speed_max(mddev),
2714 mddev->sync_speed_max ? "local": "system");
2718 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2722 if (strncmp(buf, "system", 6)==0) {
2723 mddev->sync_speed_max = 0;
2726 max = simple_strtoul(buf, &e, 10);
2727 if (buf == e || (*e && *e != '\n') || max <= 0)
2729 mddev->sync_speed_max = max;
2733 static struct md_sysfs_entry md_sync_max =
2734 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2738 sync_speed_show(mddev_t *mddev, char *page)
2740 unsigned long resync, dt, db;
2741 resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
2742 dt = ((jiffies - mddev->resync_mark) / HZ);
2744 db = resync - (mddev->resync_mark_cnt);
2745 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2748 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
2751 sync_completed_show(mddev_t *mddev, char *page)
2753 unsigned long max_blocks, resync;
2755 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2756 max_blocks = mddev->resync_max_sectors;
2758 max_blocks = mddev->size << 1;
2760 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2761 return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2764 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
2767 suspend_lo_show(mddev_t *mddev, char *page)
2769 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2773 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2776 unsigned long long new = simple_strtoull(buf, &e, 10);
2778 if (mddev->pers->quiesce == NULL)
2780 if (buf == e || (*e && *e != '\n'))
2782 if (new >= mddev->suspend_hi ||
2783 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2784 mddev->suspend_lo = new;
2785 mddev->pers->quiesce(mddev, 2);
2790 static struct md_sysfs_entry md_suspend_lo =
2791 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2795 suspend_hi_show(mddev_t *mddev, char *page)
2797 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2801 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2804 unsigned long long new = simple_strtoull(buf, &e, 10);
2806 if (mddev->pers->quiesce == NULL)
2808 if (buf == e || (*e && *e != '\n'))
2810 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2811 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2812 mddev->suspend_hi = new;
2813 mddev->pers->quiesce(mddev, 1);
2814 mddev->pers->quiesce(mddev, 0);
2819 static struct md_sysfs_entry md_suspend_hi =
2820 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2823 static struct attribute *md_default_attrs[] = {
2826 &md_raid_disks.attr,
2827 &md_chunk_size.attr,
2829 &md_resync_start.attr,
2831 &md_new_device.attr,
2832 &md_safe_delay.attr,
2833 &md_array_state.attr,
2837 static struct attribute *md_redundancy_attrs[] = {
2839 &md_mismatches.attr,
2842 &md_sync_speed.attr,
2843 &md_sync_completed.attr,
2844 &md_suspend_lo.attr,
2845 &md_suspend_hi.attr,
2848 static struct attribute_group md_redundancy_group = {
2850 .attrs = md_redundancy_attrs,
2855 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2857 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2858 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2863 rv = mddev_lock(mddev);
2865 rv = entry->show(mddev, page);
2866 mddev_unlock(mddev);
2872 md_attr_store(struct kobject *kobj, struct attribute *attr,
2873 const char *page, size_t length)
2875 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2876 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2881 if (!capable(CAP_SYS_ADMIN))
2883 rv = mddev_lock(mddev);
2885 rv = entry->store(mddev, page, length);
2886 mddev_unlock(mddev);
2891 static void md_free(struct kobject *ko)
2893 mddev_t *mddev = container_of(ko, mddev_t, kobj);
2897 static struct sysfs_ops md_sysfs_ops = {
2898 .show = md_attr_show,
2899 .store = md_attr_store,
2901 static struct kobj_type md_ktype = {
2903 .sysfs_ops = &md_sysfs_ops,
2904 .default_attrs = md_default_attrs,
2909 static struct kobject *md_probe(dev_t dev, int *part, void *data)
2911 static DEFINE_MUTEX(disks_mutex);
2912 mddev_t *mddev = mddev_find(dev);
2913 struct gendisk *disk;
2914 int partitioned = (MAJOR(dev) != MD_MAJOR);
2915 int shift = partitioned ? MdpMinorShift : 0;
2916 int unit = MINOR(dev) >> shift;
2921 mutex_lock(&disks_mutex);
2922 if (mddev->gendisk) {
2923 mutex_unlock(&disks_mutex);
2927 disk = alloc_disk(1 << shift);
2929 mutex_unlock(&disks_mutex);
2933 disk->major = MAJOR(dev);
2934 disk->first_minor = unit << shift;
2936 sprintf(disk->disk_name, "md_d%d", unit);
2938 sprintf(disk->disk_name, "md%d", unit);
2939 disk->fops = &md_fops;
2940 disk->private_data = mddev;
2941 disk->queue = mddev->queue;
2943 mddev->gendisk = disk;
2944 mutex_unlock(&disks_mutex);
2945 mddev->kobj.parent = &disk->kobj;
2946 mddev->kobj.k_name = NULL;
2947 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2948 mddev->kobj.ktype = &md_ktype;
2949 kobject_register(&mddev->kobj);
2953 static void md_safemode_timeout(unsigned long data)
2955 mddev_t *mddev = (mddev_t *) data;
2957 mddev->safemode = 1;
2958 md_wakeup_thread(mddev->thread);
2961 static int start_dirty_degraded;
2963 static int do_md_run(mddev_t * mddev)
2967 struct list_head *tmp;
2969 struct gendisk *disk;
2970 struct mdk_personality *pers;
2971 char b[BDEVNAME_SIZE];
2973 if (list_empty(&mddev->disks))
2974 /* cannot run an array with no devices.. */
2981 * Analyze all RAID superblock(s)
2983 if (!mddev->raid_disks)
2986 chunk_size = mddev->chunk_size;
2989 if (chunk_size > MAX_CHUNK_SIZE) {
2990 printk(KERN_ERR "too big chunk_size: %d > %d\n",
2991 chunk_size, MAX_CHUNK_SIZE);
2995 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
2997 if ( (1 << ffz(~chunk_size)) != chunk_size) {
2998 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3001 if (chunk_size < PAGE_SIZE) {
3002 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3003 chunk_size, PAGE_SIZE);
3007 /* devices must have minimum size of one chunk */
3008 ITERATE_RDEV(mddev,rdev,tmp) {
3009 if (test_bit(Faulty, &rdev->flags))
3011 if (rdev->size < chunk_size / 1024) {
3013 "md: Dev %s smaller than chunk_size:"
3015 bdevname(rdev->bdev,b),
3016 (unsigned long long)rdev->size,
3024 if (mddev->level != LEVEL_NONE)
3025 request_module("md-level-%d", mddev->level);
3026 else if (mddev->clevel[0])
3027 request_module("md-%s", mddev->clevel);
3031 * Drop all container device buffers, from now on
3032 * the only valid external interface is through the md
3034 * Also find largest hardsector size
3036 ITERATE_RDEV(mddev,rdev,tmp) {
3037 if (test_bit(Faulty, &rdev->flags))
3039 sync_blockdev(rdev->bdev);
3040 invalidate_bdev(rdev->bdev, 0);
3043 md_probe(mddev->unit, NULL, NULL);
3044 disk = mddev->gendisk;
3048 spin_lock(&pers_lock);
3049 pers = find_pers(mddev->level, mddev->clevel);
3050 if (!pers || !try_module_get(pers->owner)) {
3051 spin_unlock(&pers_lock);
3052 if (mddev->level != LEVEL_NONE)
3053 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3056 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3061 spin_unlock(&pers_lock);
3062 mddev->level = pers->level;
3063 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3065 if (mddev->reshape_position != MaxSector &&
3066 pers->start_reshape == NULL) {
3067 /* This personality cannot handle reshaping... */
3069 module_put(pers->owner);
3073 mddev->recovery = 0;
3074 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3075 mddev->barriers_work = 1;
3076 mddev->ok_start_degraded = start_dirty_degraded;
3079 mddev->ro = 2; /* read-only, but switch on first write */
3081 err = mddev->pers->run(mddev);
3082 if (!err && mddev->pers->sync_request) {
3083 err = bitmap_create(mddev);
3085 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3086 mdname(mddev), err);
3087 mddev->pers->stop(mddev);
3091 printk(KERN_ERR "md: pers->run() failed ...\n");
3092 module_put(mddev->pers->owner);
3094 bitmap_destroy(mddev);
3097 if (mddev->pers->sync_request)
3098 sysfs_create_group(&mddev->kobj, &md_redundancy_group);
3099 else if (mddev->ro == 2) /* auto-readonly not meaningful */
3102 atomic_set(&mddev->writes_pending,0);
3103 mddev->safemode = 0;
3104 mddev->safemode_timer.function = md_safemode_timeout;
3105 mddev->safemode_timer.data = (unsigned long) mddev;
3106 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3109 ITERATE_RDEV(mddev,rdev,tmp)
3110 if (rdev->raid_disk >= 0) {
3112 sprintf(nm, "rd%d", rdev->raid_disk);
3113 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
3116 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3119 md_update_sb(mddev, 0);
3121 set_capacity(disk, mddev->array_size<<1);
3123 /* If we call blk_queue_make_request here, it will
3124 * re-initialise max_sectors etc which may have been
3125 * refined inside -> run. So just set the bits we need to set.
3126 * Most initialisation happended when we called
3127 * blk_queue_make_request(..., md_fail_request)
3130 mddev->queue->queuedata = mddev;
3131 mddev->queue->make_request_fn = mddev->pers->make_request;
3133 /* If there is a partially-recovered drive we need to
3134 * start recovery here. If we leave it to md_check_recovery,
3135 * it will remove the drives and not do the right thing
3137 if (mddev->degraded && !mddev->sync_thread) {
3138 struct list_head *rtmp;
3140 ITERATE_RDEV(mddev,rdev,rtmp)
3141 if (rdev->raid_disk >= 0 &&
3142 !test_bit(In_sync, &rdev->flags) &&
3143 !test_bit(Faulty, &rdev->flags))
3144 /* complete an interrupted recovery */
3146 if (spares && mddev->pers->sync_request) {
3147 mddev->recovery = 0;
3148 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3149 mddev->sync_thread = md_register_thread(md_do_sync,
3152 if (!mddev->sync_thread) {
3153 printk(KERN_ERR "%s: could not start resync"
3156 /* leave the spares where they are, it shouldn't hurt */
3157 mddev->recovery = 0;
3161 md_wakeup_thread(mddev->thread);
3162 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3165 md_new_event(mddev);
3169 static int restart_array(mddev_t *mddev)
3171 struct gendisk *disk = mddev->gendisk;
3175 * Complain if it has no devices
3178 if (list_empty(&mddev->disks))
3186 mddev->safemode = 0;
3188 set_disk_ro(disk, 0);
3190 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3193 * Kick recovery or resync if necessary
3195 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3196 md_wakeup_thread(mddev->thread);
3197 md_wakeup_thread(mddev->sync_thread);
3206 /* similar to deny_write_access, but accounts for our holding a reference
3207 * to the file ourselves */
3208 static int deny_bitmap_write_access(struct file * file)
3210 struct inode *inode = file->f_mapping->host;
3212 spin_lock(&inode->i_lock);
3213 if (atomic_read(&inode->i_writecount) > 1) {
3214 spin_unlock(&inode->i_lock);
3217 atomic_set(&inode->i_writecount, -1);
3218 spin_unlock(&inode->i_lock);
3223 static void restore_bitmap_write_access(struct file *file)
3225 struct inode *inode = file->f_mapping->host;
3227 spin_lock(&inode->i_lock);
3228 atomic_set(&inode->i_writecount, 1);
3229 spin_unlock(&inode->i_lock);
3233 * 0 - completely stop and dis-assemble array
3234 * 1 - switch to readonly
3235 * 2 - stop but do not disassemble array
3237 static int do_md_stop(mddev_t * mddev, int mode)
3240 struct gendisk *disk = mddev->gendisk;
3243 if (atomic_read(&mddev->active)>2) {
3244 printk("md: %s still in use.\n",mdname(mddev));
3248 if (mddev->sync_thread) {
3249 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3250 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3251 md_unregister_thread(mddev->sync_thread);
3252 mddev->sync_thread = NULL;
3255 del_timer_sync(&mddev->safemode_timer);
3257 invalidate_partition(disk, 0);
3260 case 1: /* readonly */
3266 case 0: /* disassemble */
3268 bitmap_flush(mddev);
3269 md_super_wait(mddev);
3271 set_disk_ro(disk, 0);
3272 blk_queue_make_request(mddev->queue, md_fail_request);
3273 mddev->pers->stop(mddev);
3274 if (mddev->pers->sync_request)
3275 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3277 module_put(mddev->pers->owner);
3282 if (!mddev->in_sync || mddev->flags) {
3283 /* mark array as shutdown cleanly */
3285 md_update_sb(mddev, 1);
3288 set_disk_ro(disk, 1);
3289 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3293 * Free resources if final stop
3297 struct list_head *tmp;
3298 struct gendisk *disk;
3299 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3301 bitmap_destroy(mddev);
3302 if (mddev->bitmap_file) {
3303 restore_bitmap_write_access(mddev->bitmap_file);
3304 fput(mddev->bitmap_file);
3305 mddev->bitmap_file = NULL;
3307 mddev->bitmap_offset = 0;
3309 ITERATE_RDEV(mddev,rdev,tmp)
3310 if (rdev->raid_disk >= 0) {
3312 sprintf(nm, "rd%d", rdev->raid_disk);
3313 sysfs_remove_link(&mddev->kobj, nm);
3316 export_array(mddev);
3318 mddev->array_size = 0;
3320 mddev->raid_disks = 0;
3321 mddev->recovery_cp = 0;
3323 disk = mddev->gendisk;
3325 set_capacity(disk, 0);
3327 } else if (mddev->pers)
3328 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3331 md_new_event(mddev);
3336 static void autorun_array(mddev_t *mddev)
3339 struct list_head *tmp;
3342 if (list_empty(&mddev->disks))
3345 printk(KERN_INFO "md: running: ");
3347 ITERATE_RDEV(mddev,rdev,tmp) {
3348 char b[BDEVNAME_SIZE];
3349 printk("<%s>", bdevname(rdev->bdev,b));
3353 err = do_md_run (mddev);
3355 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3356 do_md_stop (mddev, 0);
3361 * lets try to run arrays based on all disks that have arrived
3362 * until now. (those are in pending_raid_disks)
3364 * the method: pick the first pending disk, collect all disks with
3365 * the same UUID, remove all from the pending list and put them into
3366 * the 'same_array' list. Then order this list based on superblock
3367 * update time (freshest comes first), kick out 'old' disks and
3368 * compare superblocks. If everything's fine then run it.
3370 * If "unit" is allocated, then bump its reference count
3372 static void autorun_devices(int part)
3374 struct list_head *tmp;
3375 mdk_rdev_t *rdev0, *rdev;
3377 char b[BDEVNAME_SIZE];
3379 printk(KERN_INFO "md: autorun ...\n");
3380 while (!list_empty(&pending_raid_disks)) {
3382 LIST_HEAD(candidates);
3383 rdev0 = list_entry(pending_raid_disks.next,
3384 mdk_rdev_t, same_set);
3386 printk(KERN_INFO "md: considering %s ...\n",
3387 bdevname(rdev0->bdev,b));
3388 INIT_LIST_HEAD(&candidates);
3389 ITERATE_RDEV_PENDING(rdev,tmp)
3390 if (super_90_load(rdev, rdev0, 0) >= 0) {
3391 printk(KERN_INFO "md: adding %s ...\n",
3392 bdevname(rdev->bdev,b));
3393 list_move(&rdev->same_set, &candidates);
3396 * now we have a set of devices, with all of them having
3397 * mostly sane superblocks. It's time to allocate the
3400 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
3401 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3402 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3406 dev = MKDEV(mdp_major,
3407 rdev0->preferred_minor << MdpMinorShift);
3409 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3411 md_probe(dev, NULL, NULL);
3412 mddev = mddev_find(dev);
3415 "md: cannot allocate memory for md drive.\n");
3418 if (mddev_lock(mddev))
3419 printk(KERN_WARNING "md: %s locked, cannot run\n",
3421 else if (mddev->raid_disks || mddev->major_version
3422 || !list_empty(&mddev->disks)) {
3424 "md: %s already running, cannot run %s\n",
3425 mdname(mddev), bdevname(rdev0->bdev,b));
3426 mddev_unlock(mddev);
3428 printk(KERN_INFO "md: created %s\n", mdname(mddev));
3429 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
3430 list_del_init(&rdev->same_set);
3431 if (bind_rdev_to_array(rdev, mddev))
3434 autorun_array(mddev);
3435 mddev_unlock(mddev);
3437 /* on success, candidates will be empty, on error
3440 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
3444 printk(KERN_INFO "md: ... autorun DONE.\n");
3447 static int get_version(void __user * arg)
3451 ver.major = MD_MAJOR_VERSION;
3452 ver.minor = MD_MINOR_VERSION;
3453 ver.patchlevel = MD_PATCHLEVEL_VERSION;
3455 if (copy_to_user(arg, &ver, sizeof(ver)))
3461 static int get_array_info(mddev_t * mddev, void __user * arg)
3463 mdu_array_info_t info;
3464 int nr,working,active,failed,spare;
3466 struct list_head *tmp;
3468 nr=working=active=failed=spare=0;
3469 ITERATE_RDEV(mddev,rdev,tmp) {
3471 if (test_bit(Faulty, &rdev->flags))
3475 if (test_bit(In_sync, &rdev->flags))
3482 info.major_version = mddev->major_version;
3483 info.minor_version = mddev->minor_version;
3484 info.patch_version = MD_PATCHLEVEL_VERSION;
3485 info.ctime = mddev->ctime;
3486 info.level = mddev->level;
3487 info.size = mddev->size;
3488 if (info.size != mddev->size) /* overflow */
3491 info.raid_disks = mddev->raid_disks;
3492 info.md_minor = mddev->md_minor;
3493 info.not_persistent= !mddev->persistent;
3495 info.utime = mddev->utime;
3498 info.state = (1<<MD_SB_CLEAN);
3499 if (mddev->bitmap && mddev->bitmap_offset)
3500 info.state = (1<<MD_SB_BITMAP_PRESENT);
3501 info.active_disks = active;
3502 info.working_disks = working;
3503 info.failed_disks = failed;
3504 info.spare_disks = spare;
3506 info.layout = mddev->layout;
3507 info.chunk_size = mddev->chunk_size;
3509 if (copy_to_user(arg, &info, sizeof(info)))
3515 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3517 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3518 char *ptr, *buf = NULL;
3521 file = kmalloc(sizeof(*file), GFP_KERNEL);
3525 /* bitmap disabled, zero the first byte and copy out */
3526 if (!mddev->bitmap || !mddev->bitmap->file) {
3527 file->pathname[0] = '\0';
3531 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3535 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3539 strcpy(file->pathname, ptr);
3543 if (copy_to_user(arg, file, sizeof(*file)))
3551 static int get_disk_info(mddev_t * mddev, void __user * arg)
3553 mdu_disk_info_t info;
3557 if (copy_from_user(&info, arg, sizeof(info)))
3562 rdev = find_rdev_nr(mddev, nr);
3564 info.major = MAJOR(rdev->bdev->bd_dev);
3565 info.minor = MINOR(rdev->bdev->bd_dev);
3566 info.raid_disk = rdev->raid_disk;
3568 if (test_bit(Faulty, &rdev->flags))
3569 info.state |= (1<<MD_DISK_FAULTY);
3570 else if (test_bit(In_sync, &rdev->flags)) {
3571 info.state |= (1<<MD_DISK_ACTIVE);
3572 info.state |= (1<<MD_DISK_SYNC);
3574 if (test_bit(WriteMostly, &rdev->flags))
3575 info.state |= (1<<MD_DISK_WRITEMOSTLY);
3577 info.major = info.minor = 0;
3578 info.raid_disk = -1;
3579 info.state = (1<<MD_DISK_REMOVED);
3582 if (copy_to_user(arg, &info, sizeof(info)))
3588 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3590 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3592 dev_t dev = MKDEV(info->major,info->minor);
3594 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3597 if (!mddev->raid_disks) {
3599 /* expecting a device which has a superblock */
3600 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3603 "md: md_import_device returned %ld\n",
3605 return PTR_ERR(rdev);
3607 if (!list_empty(&mddev->disks)) {
3608 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3609 mdk_rdev_t, same_set);
3610 int err = super_types[mddev->major_version]
3611 .load_super(rdev, rdev0, mddev->minor_version);
3614 "md: %s has different UUID to %s\n",
3615 bdevname(rdev->bdev,b),
3616 bdevname(rdev0->bdev,b2));
3621 err = bind_rdev_to_array(rdev, mddev);
3628 * add_new_disk can be used once the array is assembled
3629 * to add "hot spares". They must already have a superblock
3634 if (!mddev->pers->hot_add_disk) {
3636 "%s: personality does not support diskops!\n",
3640 if (mddev->persistent)
3641 rdev = md_import_device(dev, mddev->major_version,
3642 mddev->minor_version);
3644 rdev = md_import_device(dev, -1, -1);
3647 "md: md_import_device returned %ld\n",
3649 return PTR_ERR(rdev);
3651 /* set save_raid_disk if appropriate */
3652 if (!mddev->persistent) {
3653 if (info->state & (1<<MD_DISK_SYNC) &&
3654 info->raid_disk < mddev->raid_disks)
3655 rdev->raid_disk = info->raid_disk;
3657 rdev->raid_disk = -1;
3659 super_types[mddev->major_version].
3660 validate_super(mddev, rdev);
3661 rdev->saved_raid_disk = rdev->raid_disk;
3663 clear_bit(In_sync, &rdev->flags); /* just to be sure */
3664 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3665 set_bit(WriteMostly, &rdev->flags);
3667 rdev->raid_disk = -1;
3668 err = bind_rdev_to_array(rdev, mddev);
3669 if (!err && !mddev->pers->hot_remove_disk) {
3670 /* If there is hot_add_disk but no hot_remove_disk
3671 * then added disks for geometry changes,
3672 * and should be added immediately.
3674 super_types[mddev->major_version].
3675 validate_super(mddev, rdev);
3676 err = mddev->pers->hot_add_disk(mddev, rdev);
3678 unbind_rdev_from_array(rdev);
3683 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3684 md_wakeup_thread(mddev->thread);
3688 /* otherwise, add_new_disk is only allowed
3689 * for major_version==0 superblocks
3691 if (mddev->major_version != 0) {
3692 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3697 if (!(info->state & (1<<MD_DISK_FAULTY))) {
3699 rdev = md_import_device (dev, -1, 0);
3702 "md: error, md_import_device() returned %ld\n",
3704 return PTR_ERR(rdev);
3706 rdev->desc_nr = info->number;
3707 if (info->raid_disk < mddev->raid_disks)
3708 rdev->raid_disk = info->raid_disk;
3710 rdev->raid_disk = -1;
3714 if (rdev->raid_disk < mddev->raid_disks)
3715 if (info->state & (1<<MD_DISK_SYNC))
3716 set_bit(In_sync, &rdev->flags);
3718 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3719 set_bit(WriteMostly, &rdev->flags);
3721 if (!mddev->persistent) {
3722 printk(KERN_INFO "md: nonpersistent superblock ...\n");
3723 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3725 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3726 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3728 err = bind_rdev_to_array(rdev, mddev);
3738 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3740 char b[BDEVNAME_SIZE];
3746 rdev = find_rdev(mddev, dev);
3750 if (rdev->raid_disk >= 0)
3753 kick_rdev_from_array(rdev);
3754 md_update_sb(mddev, 1);
3755 md_new_event(mddev);
3759 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3760 bdevname(rdev->bdev,b), mdname(mddev));
3764 static int hot_add_disk(mddev_t * mddev, dev_t dev)
3766 char b[BDEVNAME_SIZE];
3774 if (mddev->major_version != 0) {
3775 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3776 " version-0 superblocks.\n",
3780 if (!mddev->pers->hot_add_disk) {
3782 "%s: personality does not support diskops!\n",
3787 rdev = md_import_device (dev, -1, 0);
3790 "md: error, md_import_device() returned %ld\n",
3795 if (mddev->persistent)
3796 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3799 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3801 size = calc_dev_size(rdev, mddev->chunk_size);
3804 if (test_bit(Faulty, &rdev->flags)) {
3806 "md: can not hot-add faulty %s disk to %s!\n",
3807 bdevname(rdev->bdev,b), mdname(mddev));
3811 clear_bit(In_sync, &rdev->flags);
3813 err = bind_rdev_to_array(rdev, mddev);
3818 * The rest should better be atomic, we can have disk failures
3819 * noticed in interrupt contexts ...
3822 if (rdev->desc_nr == mddev->max_disks) {
3823 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3826 goto abort_unbind_export;
3829 rdev->raid_disk = -1;
3831 md_update_sb(mddev, 1);
3834 * Kick recovery, maybe this spare has to be added to the
3835 * array immediately.
3837 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3838 md_wakeup_thread(mddev->thread);
3839 md_new_event(mddev);
3842 abort_unbind_export:
3843 unbind_rdev_from_array(rdev);
3850 static int set_bitmap_file(mddev_t *mddev, int fd)
3855 if (!mddev->pers->quiesce)
3857 if (mddev->recovery || mddev->sync_thread)
3859 /* we should be able to change the bitmap.. */
3865 return -EEXIST; /* cannot add when bitmap is present */
3866 mddev->bitmap_file = fget(fd);
3868 if (mddev->bitmap_file == NULL) {
3869 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3874 err = deny_bitmap_write_access(mddev->bitmap_file);
3876 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3878 fput(mddev->bitmap_file);
3879 mddev->bitmap_file = NULL;
3882 mddev->bitmap_offset = 0; /* file overrides offset */
3883 } else if (mddev->bitmap == NULL)
3884 return -ENOENT; /* cannot remove what isn't there */
3887 mddev->pers->quiesce(mddev, 1);
3889 err = bitmap_create(mddev);
3890 if (fd < 0 || err) {
3891 bitmap_destroy(mddev);
3892 fd = -1; /* make sure to put the file */
3894 mddev->pers->quiesce(mddev, 0);
3897 if (mddev->bitmap_file) {
3898 restore_bitmap_write_access(mddev->bitmap_file);
3899 fput(mddev->bitmap_file);
3901 mddev->bitmap_file = NULL;
3908 * set_array_info is used two different ways
3909 * The original usage is when creating a new array.
3910 * In this usage, raid_disks is > 0 and it together with
3911 * level, size, not_persistent,layout,chunksize determine the
3912 * shape of the array.
3913 * This will always create an array with a type-0.90.0 superblock.
3914 * The newer usage is when assembling an array.
3915 * In this case raid_disks will be 0, and the major_version field is
3916 * use to determine which style super-blocks are to be found on the devices.
3917 * The minor and patch _version numbers are also kept incase the
3918 * super_block handler wishes to interpret them.
3920 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
3923 if (info->raid_disks == 0) {
3924 /* just setting version number for superblock loading */
3925 if (info->major_version < 0 ||
3926 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
3927 super_types[info->major_version].name == NULL) {
3928 /* maybe try to auto-load a module? */
3930 "md: superblock version %d not known\n",
3931 info->major_version);
3934 mddev->major_version = info->major_version;
3935 mddev->minor_version = info->minor_version;
3936 mddev->patch_version = info->patch_version;
3939 mddev->major_version = MD_MAJOR_VERSION;
3940 mddev->minor_version = MD_MINOR_VERSION;
3941 mddev->patch_version = MD_PATCHLEVEL_VERSION;
3942 mddev->ctime = get_seconds();
3944 mddev->level = info->level;
3945 mddev->clevel[0] = 0;
3946 mddev->size = info->size;
3947 mddev->raid_disks = info->raid_disks;
3948 /* don't set md_minor, it is determined by which /dev/md* was
3951 if (info->state & (1<<MD_SB_CLEAN))
3952 mddev->recovery_cp = MaxSector;
3954 mddev->recovery_cp = 0;
3955 mddev->persistent = ! info->not_persistent;
3957 mddev->layout = info->layout;
3958 mddev->chunk_size = info->chunk_size;
3960 mddev->max_disks = MD_SB_DISKS;
3963 set_bit(MD_CHANGE_DEVS, &mddev->flags);
3965 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
3966 mddev->bitmap_offset = 0;
3968 mddev->reshape_position = MaxSector;
3971 * Generate a 128 bit UUID
3973 get_random_bytes(mddev->uuid, 16);
3975 mddev->new_level = mddev->level;
3976 mddev->new_chunk = mddev->chunk_size;
3977 mddev->new_layout = mddev->layout;
3978 mddev->delta_disks = 0;
3983 static int update_size(mddev_t *mddev, unsigned long size)
3987 struct list_head *tmp;
3988 int fit = (size == 0);
3990 if (mddev->pers->resize == NULL)
3992 /* The "size" is the amount of each device that is used.
3993 * This can only make sense for arrays with redundancy.
3994 * linear and raid0 always use whatever space is available
3995 * We can only consider changing the size if no resync
3996 * or reconstruction is happening, and if the new size
3997 * is acceptable. It must fit before the sb_offset or,
3998 * if that is <data_offset, it must fit before the
3999 * size of each device.
4000 * If size is zero, we find the largest size that fits.
4002 if (mddev->sync_thread)
4004 ITERATE_RDEV(mddev,rdev,tmp) {
4006 if (rdev->sb_offset > rdev->data_offset)
4007 avail = (rdev->sb_offset*2) - rdev->data_offset;
4009 avail = get_capacity(rdev->bdev->bd_disk)
4010 - rdev->data_offset;
4011 if (fit && (size == 0 || size > avail/2))
4013 if (avail < ((sector_t)size << 1))
4016 rv = mddev->pers->resize(mddev, (sector_t)size *2);
4018 struct block_device *bdev;
4020 bdev = bdget_disk(mddev->gendisk, 0);
4022 mutex_lock(&bdev->bd_inode->i_mutex);
4023 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4024 mutex_unlock(&bdev->bd_inode->i_mutex);
4031 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4034 /* change the number of raid disks */
4035 if (mddev->pers->check_reshape == NULL)
4037 if (raid_disks <= 0 ||
4038 raid_disks >= mddev->max_disks)
4040 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4042 mddev->delta_disks = raid_disks - mddev->raid_disks;
4044 rv = mddev->pers->check_reshape(mddev);
4050 * update_array_info is used to change the configuration of an
4052 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4053 * fields in the info are checked against the array.
4054 * Any differences that cannot be handled will cause an error.
4055 * Normally, only one change can be managed at a time.
4057 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4063 /* calculate expected state,ignoring low bits */
4064 if (mddev->bitmap && mddev->bitmap_offset)
4065 state |= (1 << MD_SB_BITMAP_PRESENT);
4067 if (mddev->major_version != info->major_version ||
4068 mddev->minor_version != info->minor_version ||
4069 /* mddev->patch_version != info->patch_version || */
4070 mddev->ctime != info->ctime ||
4071 mddev->level != info->level ||
4072 /* mddev->layout != info->layout || */
4073 !mddev->persistent != info->not_persistent||
4074 mddev->chunk_size != info->chunk_size ||
4075 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4076 ((state^info->state) & 0xfffffe00)
4079 /* Check there is only one change */
4080 if (info->size >= 0 && mddev->size != info->size) cnt++;
4081 if (mddev->raid_disks != info->raid_disks) cnt++;
4082 if (mddev->layout != info->layout) cnt++;
4083 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4084 if (cnt == 0) return 0;
4085 if (cnt > 1) return -EINVAL;
4087 if (mddev->layout != info->layout) {
4089 * we don't need to do anything at the md level, the
4090 * personality will take care of it all.
4092 if (mddev->pers->reconfig == NULL)
4095 return mddev->pers->reconfig(mddev, info->layout, -1);
4097 if (info->size >= 0 && mddev->size != info->size)
4098 rv = update_size(mddev, info->size);
4100 if (mddev->raid_disks != info->raid_disks)
4101 rv = update_raid_disks(mddev, info->raid_disks);
4103 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4104 if (mddev->pers->quiesce == NULL)
4106 if (mddev->recovery || mddev->sync_thread)
4108 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4109 /* add the bitmap */
4112 if (mddev->default_bitmap_offset == 0)
4114 mddev->bitmap_offset = mddev->default_bitmap_offset;
4115 mddev->pers->quiesce(mddev, 1);
4116 rv = bitmap_create(mddev);
4118 bitmap_destroy(mddev);
4119 mddev->pers->quiesce(mddev, 0);
4121 /* remove the bitmap */
4124 if (mddev->bitmap->file)
4126 mddev->pers->quiesce(mddev, 1);
4127 bitmap_destroy(mddev);
4128 mddev->pers->quiesce(mddev, 0);
4129 mddev->bitmap_offset = 0;
4132 md_update_sb(mddev, 1);
4136 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4140 if (mddev->pers == NULL)
4143 rdev = find_rdev(mddev, dev);
4147 md_error(mddev, rdev);
4151 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4153 mddev_t *mddev = bdev->bd_disk->private_data;
4157 geo->cylinders = get_capacity(mddev->gendisk) / 8;
4161 static int md_ioctl(struct inode *inode, struct file *file,
4162 unsigned int cmd, unsigned long arg)
4165 void __user *argp = (void __user *)arg;
4166 mddev_t *mddev = NULL;
4168 if (!capable(CAP_SYS_ADMIN))
4172 * Commands dealing with the RAID driver but not any
4178 err = get_version(argp);
4181 case PRINT_RAID_DEBUG:
4189 autostart_arrays(arg);
4196 * Commands creating/starting a new array:
4199 mddev = inode->i_bdev->bd_disk->private_data;
4206 err = mddev_lock(mddev);
4209 "md: ioctl lock interrupted, reason %d, cmd %d\n",
4216 case SET_ARRAY_INFO:
4218 mdu_array_info_t info;
4220 memset(&info, 0, sizeof(info));
4221 else if (copy_from_user(&info, argp, sizeof(info))) {
4226 err = update_array_info(mddev, &info);
4228 printk(KERN_WARNING "md: couldn't update"
4229 " array info. %d\n", err);
4234 if (!list_empty(&mddev->disks)) {
4236 "md: array %s already has disks!\n",
4241 if (mddev->raid_disks) {
4243 "md: array %s already initialised!\n",
4248 err = set_array_info(mddev, &info);
4250 printk(KERN_WARNING "md: couldn't set"
4251 " array info. %d\n", err);
4261 * Commands querying/configuring an existing array:
4263 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4264 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
4265 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4266 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
4272 * Commands even a read-only array can execute:
4276 case GET_ARRAY_INFO:
4277 err = get_array_info(mddev, argp);
4280 case GET_BITMAP_FILE:
4281 err = get_bitmap_file(mddev, argp);
4285 err = get_disk_info(mddev, argp);
4288 case RESTART_ARRAY_RW:
4289 err = restart_array(mddev);
4293 err = do_md_stop (mddev, 0);
4297 err = do_md_stop (mddev, 1);
4301 * We have a problem here : there is no easy way to give a CHS
4302 * virtual geometry. We currently pretend that we have a 2 heads
4303 * 4 sectors (with a BIG number of cylinders...). This drives
4304 * dosfs just mad... ;-)
4309 * The remaining ioctls are changing the state of the
4310 * superblock, so we do not allow them on read-only arrays.
4311 * However non-MD ioctls (e.g. get-size) will still come through
4312 * here and hit the 'default' below, so only disallow
4313 * 'md' ioctls, and switch to rw mode if started auto-readonly.
4315 if (_IOC_TYPE(cmd) == MD_MAJOR &&
4316 mddev->ro && mddev->pers) {
4317 if (mddev->ro == 2) {
4319 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4320 md_wakeup_thread(mddev->thread);
4332 mdu_disk_info_t info;
4333 if (copy_from_user(&info, argp, sizeof(info)))
4336 err = add_new_disk(mddev, &info);
4340 case HOT_REMOVE_DISK:
4341 err = hot_remove_disk(mddev, new_decode_dev(arg));
4345 err = hot_add_disk(mddev, new_decode_dev(arg));
4348 case SET_DISK_FAULTY:
4349 err = set_disk_faulty(mddev, new_decode_dev(arg));
4353 err = do_md_run (mddev);
4356 case SET_BITMAP_FILE:
4357 err = set_bitmap_file(mddev, (int)arg);
4367 mddev_unlock(mddev);
4377 static int md_open(struct inode *inode, struct file *file)
4380 * Succeed if we can lock the mddev, which confirms that
4381 * it isn't being stopped right now.
4383 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4386 if ((err = mddev_lock(mddev)))
4391 mddev_unlock(mddev);
4393 check_disk_change(inode->i_bdev);
4398 static int md_release(struct inode *inode, struct file * file)
4400 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4409 static int md_media_changed(struct gendisk *disk)
4411 mddev_t *mddev = disk->private_data;
4413 return mddev->changed;
4416 static int md_revalidate(struct gendisk *disk)
4418 mddev_t *mddev = disk->private_data;
4423 static struct block_device_operations md_fops =
4425 .owner = THIS_MODULE,
4427 .release = md_release,
4429 .getgeo = md_getgeo,
4430 .media_changed = md_media_changed,
4431 .revalidate_disk= md_revalidate,
4434 static int md_thread(void * arg)
4436 mdk_thread_t *thread = arg;
4439 * md_thread is a 'system-thread', it's priority should be very
4440 * high. We avoid resource deadlocks individually in each
4441 * raid personality. (RAID5 does preallocation) We also use RR and
4442 * the very same RT priority as kswapd, thus we will never get
4443 * into a priority inversion deadlock.
4445 * we definitely have to have equal or higher priority than
4446 * bdflush, otherwise bdflush will deadlock if there are too
4447 * many dirty RAID5 blocks.
4450 allow_signal(SIGKILL);
4451 while (!kthread_should_stop()) {
4453 /* We need to wait INTERRUPTIBLE so that
4454 * we don't add to the load-average.
4455 * That means we need to be sure no signals are
4458 if (signal_pending(current))
4459 flush_signals(current);
4461 wait_event_interruptible_timeout
4463 test_bit(THREAD_WAKEUP, &thread->flags)
4464 || kthread_should_stop(),
4468 clear_bit(THREAD_WAKEUP, &thread->flags);
4470 thread->run(thread->mddev);
4476 void md_wakeup_thread(mdk_thread_t *thread)
4479 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4480 set_bit(THREAD_WAKEUP, &thread->flags);
4481 wake_up(&thread->wqueue);
4485 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4488 mdk_thread_t *thread;
4490 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
4494 init_waitqueue_head(&thread->wqueue);
4497 thread->mddev = mddev;
4498 thread->timeout = MAX_SCHEDULE_TIMEOUT;
4499 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
4500 if (IS_ERR(thread->tsk)) {
4507 void md_unregister_thread(mdk_thread_t *thread)
4509 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
4511 kthread_stop(thread->tsk);
4515 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4522 if (!rdev || test_bit(Faulty, &rdev->flags))
4525 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4527 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4528 __builtin_return_address(0),__builtin_return_address(1),
4529 __builtin_return_address(2),__builtin_return_address(3));
4533 if (!mddev->pers->error_handler)
4535 mddev->pers->error_handler(mddev,rdev);
4536 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4537 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4538 md_wakeup_thread(mddev->thread);
4539 md_new_event_inintr(mddev);
4542 /* seq_file implementation /proc/mdstat */
4544 static void status_unused(struct seq_file *seq)
4548 struct list_head *tmp;
4550 seq_printf(seq, "unused devices: ");
4552 ITERATE_RDEV_PENDING(rdev,tmp) {
4553 char b[BDEVNAME_SIZE];
4555 seq_printf(seq, "%s ",
4556 bdevname(rdev->bdev,b));
4559 seq_printf(seq, "<none>");
4561 seq_printf(seq, "\n");
4565 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4567 sector_t max_blocks, resync, res;
4568 unsigned long dt, db, rt;
4570 unsigned int per_milli;
4572 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4574 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4575 max_blocks = mddev->resync_max_sectors >> 1;
4577 max_blocks = mddev->size;
4580 * Should not happen.
4586 /* Pick 'scale' such that (resync>>scale)*1000 will fit
4587 * in a sector_t, and (max_blocks>>scale) will fit in a
4588 * u32, as those are the requirements for sector_div.
4589 * Thus 'scale' must be at least 10
4592 if (sizeof(sector_t) > sizeof(unsigned long)) {
4593 while ( max_blocks/2 > (1ULL<<(scale+32)))
4596 res = (resync>>scale)*1000;
4597 sector_div(res, (u32)((max_blocks>>scale)+1));
4601 int i, x = per_milli/50, y = 20-x;
4602 seq_printf(seq, "[");
4603 for (i = 0; i < x; i++)
4604 seq_printf(seq, "=");
4605 seq_printf(seq, ">");
4606 for (i = 0; i < y; i++)
4607 seq_printf(seq, ".");
4608 seq_printf(seq, "] ");
4610 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
4611 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4613 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4614 "resync" : "recovery")),
4615 per_milli/10, per_milli % 10,
4616 (unsigned long long) resync,
4617 (unsigned long long) max_blocks);
4620 * We do not want to overflow, so the order of operands and
4621 * the * 100 / 100 trick are important. We do a +1 to be
4622 * safe against division by zero. We only estimate anyway.
4624 * dt: time from mark until now
4625 * db: blocks written from mark until now
4626 * rt: remaining time
4628 dt = ((jiffies - mddev->resync_mark) / HZ);
4630 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
4631 - mddev->resync_mark_cnt;
4632 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
4634 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4636 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
4639 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4641 struct list_head *tmp;
4651 spin_lock(&all_mddevs_lock);
4652 list_for_each(tmp,&all_mddevs)
4654 mddev = list_entry(tmp, mddev_t, all_mddevs);
4656 spin_unlock(&all_mddevs_lock);
4659 spin_unlock(&all_mddevs_lock);
4661 return (void*)2;/* tail */
4665 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4667 struct list_head *tmp;
4668 mddev_t *next_mddev, *mddev = v;
4674 spin_lock(&all_mddevs_lock);
4676 tmp = all_mddevs.next;
4678 tmp = mddev->all_mddevs.next;
4679 if (tmp != &all_mddevs)
4680 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4682 next_mddev = (void*)2;
4685 spin_unlock(&all_mddevs_lock);
4693 static void md_seq_stop(struct seq_file *seq, void *v)
4697 if (mddev && v != (void*)1 && v != (void*)2)
4701 struct mdstat_info {
4705 static int md_seq_show(struct seq_file *seq, void *v)
4709 struct list_head *tmp2;
4711 struct mdstat_info *mi = seq->private;
4712 struct bitmap *bitmap;
4714 if (v == (void*)1) {
4715 struct mdk_personality *pers;
4716 seq_printf(seq, "Personalities : ");
4717 spin_lock(&pers_lock);
4718 list_for_each_entry(pers, &pers_list, list)
4719 seq_printf(seq, "[%s] ", pers->name);
4721 spin_unlock(&pers_lock);
4722 seq_printf(seq, "\n");
4723 mi->event = atomic_read(&md_event_count);
4726 if (v == (void*)2) {
4731 if (mddev_lock(mddev) < 0)
4734 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4735 seq_printf(seq, "%s : %sactive", mdname(mddev),
4736 mddev->pers ? "" : "in");
4739 seq_printf(seq, " (read-only)");
4741 seq_printf(seq, "(auto-read-only)");
4742 seq_printf(seq, " %s", mddev->pers->name);
4746 ITERATE_RDEV(mddev,rdev,tmp2) {
4747 char b[BDEVNAME_SIZE];
4748 seq_printf(seq, " %s[%d]",
4749 bdevname(rdev->bdev,b), rdev->desc_nr);
4750 if (test_bit(WriteMostly, &rdev->flags))
4751 seq_printf(seq, "(W)");
4752 if (test_bit(Faulty, &rdev->flags)) {
4753 seq_printf(seq, "(F)");
4755 } else if (rdev->raid_disk < 0)
4756 seq_printf(seq, "(S)"); /* spare */
4760 if (!list_empty(&mddev->disks)) {
4762 seq_printf(seq, "\n %llu blocks",
4763 (unsigned long long)mddev->array_size);
4765 seq_printf(seq, "\n %llu blocks",
4766 (unsigned long long)size);
4768 if (mddev->persistent) {
4769 if (mddev->major_version != 0 ||
4770 mddev->minor_version != 90) {
4771 seq_printf(seq," super %d.%d",
4772 mddev->major_version,
4773 mddev->minor_version);
4776 seq_printf(seq, " super non-persistent");
4779 mddev->pers->status (seq, mddev);
4780 seq_printf(seq, "\n ");
4781 if (mddev->pers->sync_request) {
4782 if (mddev->curr_resync > 2) {
4783 status_resync (seq, mddev);
4784 seq_printf(seq, "\n ");
4785 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4786 seq_printf(seq, "\tresync=DELAYED\n ");
4787 else if (mddev->recovery_cp < MaxSector)
4788 seq_printf(seq, "\tresync=PENDING\n ");
4791 seq_printf(seq, "\n ");
4793 if ((bitmap = mddev->bitmap)) {
4794 unsigned long chunk_kb;
4795 unsigned long flags;
4796 spin_lock_irqsave(&bitmap->lock, flags);
4797 chunk_kb = bitmap->chunksize >> 10;
4798 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4800 bitmap->pages - bitmap->missing_pages,
4802 (bitmap->pages - bitmap->missing_pages)
4803 << (PAGE_SHIFT - 10),
4804 chunk_kb ? chunk_kb : bitmap->chunksize,
4805 chunk_kb ? "KB" : "B");
4807 seq_printf(seq, ", file: ");
4808 seq_path(seq, bitmap->file->f_vfsmnt,
4809 bitmap->file->f_dentry," \t\n");
4812 seq_printf(seq, "\n");
4813 spin_unlock_irqrestore(&bitmap->lock, flags);
4816 seq_printf(seq, "\n");
4818 mddev_unlock(mddev);
4823 static struct seq_operations md_seq_ops = {
4824 .start = md_seq_start,
4825 .next = md_seq_next,
4826 .stop = md_seq_stop,
4827 .show = md_seq_show,
4830 static int md_seq_open(struct inode *inode, struct file *file)
4833 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4837 error = seq_open(file, &md_seq_ops);
4841 struct seq_file *p = file->private_data;
4843 mi->event = atomic_read(&md_event_count);
4848 static int md_seq_release(struct inode *inode, struct file *file)
4850 struct seq_file *m = file->private_data;
4851 struct mdstat_info *mi = m->private;
4854 return seq_release(inode, file);
4857 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4859 struct seq_file *m = filp->private_data;
4860 struct mdstat_info *mi = m->private;
4863 poll_wait(filp, &md_event_waiters, wait);
4865 /* always allow read */
4866 mask = POLLIN | POLLRDNORM;
4868 if (mi->event != atomic_read(&md_event_count))
4869 mask |= POLLERR | POLLPRI;
4873 static struct file_operations md_seq_fops = {
4874 .open = md_seq_open,
4876 .llseek = seq_lseek,
4877 .release = md_seq_release,
4878 .poll = mdstat_poll,
4881 int register_md_personality(struct mdk_personality *p)
4883 spin_lock(&pers_lock);
4884 list_add_tail(&p->list, &pers_list);
4885 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
4886 spin_unlock(&pers_lock);
4890 int unregister_md_personality(struct mdk_personality *p)
4892 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
4893 spin_lock(&pers_lock);
4894 list_del_init(&p->list);
4895 spin_unlock(&pers_lock);
4899 static int is_mddev_idle(mddev_t *mddev)
4902 struct list_head *tmp;
4904 unsigned long curr_events;
4907 ITERATE_RDEV(mddev,rdev,tmp) {
4908 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
4909 curr_events = disk_stat_read(disk, sectors[0]) +
4910 disk_stat_read(disk, sectors[1]) -
4911 atomic_read(&disk->sync_io);
4912 /* The difference between curr_events and last_events
4913 * will be affected by any new non-sync IO (making
4914 * curr_events bigger) and any difference in the amount of
4915 * in-flight syncio (making current_events bigger or smaller)
4916 * The amount in-flight is currently limited to
4917 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
4918 * which is at most 4096 sectors.
4919 * These numbers are fairly fragile and should be made
4920 * more robust, probably by enforcing the
4921 * 'window size' that md_do_sync sort-of uses.
4923 * Note: the following is an unsigned comparison.
4925 if ((curr_events - rdev->last_events + 4096) > 8192) {
4926 rdev->last_events = curr_events;
4933 void md_done_sync(mddev_t *mddev, int blocks, int ok)
4935 /* another "blocks" (512byte) blocks have been synced */
4936 atomic_sub(blocks, &mddev->recovery_active);
4937 wake_up(&mddev->recovery_wait);
4939 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4940 md_wakeup_thread(mddev->thread);
4941 // stop recovery, signal do_sync ....
4946 /* md_write_start(mddev, bi)
4947 * If we need to update some array metadata (e.g. 'active' flag
4948 * in superblock) before writing, schedule a superblock update
4949 * and wait for it to complete.
4951 void md_write_start(mddev_t *mddev, struct bio *bi)
4953 if (bio_data_dir(bi) != WRITE)
4956 BUG_ON(mddev->ro == 1);
4957 if (mddev->ro == 2) {
4958 /* need to switch to read/write */
4960 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4961 md_wakeup_thread(mddev->thread);
4963 atomic_inc(&mddev->writes_pending);
4964 if (mddev->in_sync) {
4965 spin_lock_irq(&mddev->write_lock);
4966 if (mddev->in_sync) {
4968 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
4969 md_wakeup_thread(mddev->thread);
4971 spin_unlock_irq(&mddev->write_lock);
4973 wait_event(mddev->sb_wait, mddev->flags==0);
4976 void md_write_end(mddev_t *mddev)
4978 if (atomic_dec_and_test(&mddev->writes_pending)) {
4979 if (mddev->safemode == 2)
4980 md_wakeup_thread(mddev->thread);
4981 else if (mddev->safemode_delay)
4982 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
4986 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
4988 #define SYNC_MARKS 10
4989 #define SYNC_MARK_STEP (3*HZ)
4990 void md_do_sync(mddev_t *mddev)
4993 unsigned int currspeed = 0,
4995 sector_t max_sectors,j, io_sectors;
4996 unsigned long mark[SYNC_MARKS];
4997 sector_t mark_cnt[SYNC_MARKS];
4999 struct list_head *tmp;
5000 sector_t last_check;
5002 struct list_head *rtmp;
5005 /* just incase thread restarts... */
5006 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5008 if (mddev->ro) /* never try to sync a read-only array */
5011 /* we overload curr_resync somewhat here.
5012 * 0 == not engaged in resync at all
5013 * 2 == checking that there is no conflict with another sync
5014 * 1 == like 2, but have yielded to allow conflicting resync to
5016 * other == active in resync - this many blocks
5018 * Before starting a resync we must have set curr_resync to
5019 * 2, and then checked that every "conflicting" array has curr_resync
5020 * less than ours. When we find one that is the same or higher
5021 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
5022 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5023 * This will mean we have to start checking from the beginning again.
5028 mddev->curr_resync = 2;
5031 if (kthread_should_stop()) {
5032 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5035 ITERATE_MDDEV(mddev2,tmp) {
5036 if (mddev2 == mddev)
5038 if (mddev2->curr_resync &&
5039 match_mddev_units(mddev,mddev2)) {
5041 if (mddev < mddev2 && mddev->curr_resync == 2) {
5042 /* arbitrarily yield */
5043 mddev->curr_resync = 1;
5044 wake_up(&resync_wait);
5046 if (mddev > mddev2 && mddev->curr_resync == 1)
5047 /* no need to wait here, we can wait the next
5048 * time 'round when curr_resync == 2
5051 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5052 if (!kthread_should_stop() &&
5053 mddev2->curr_resync >= mddev->curr_resync) {
5054 printk(KERN_INFO "md: delaying resync of %s"
5055 " until %s has finished resync (they"
5056 " share one or more physical units)\n",
5057 mdname(mddev), mdname(mddev2));
5060 finish_wait(&resync_wait, &wq);
5063 finish_wait(&resync_wait, &wq);
5066 } while (mddev->curr_resync < 2);
5069 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5070 /* resync follows the size requested by the personality,
5071 * which defaults to physical size, but can be virtual size
5073 max_sectors = mddev->resync_max_sectors;
5074 mddev->resync_mismatches = 0;
5075 /* we don't use the checkpoint if there's a bitmap */
5076 if (!mddev->bitmap &&
5077 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5078 j = mddev->recovery_cp;
5079 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5080 max_sectors = mddev->size << 1;
5082 /* recovery follows the physical size of devices */
5083 max_sectors = mddev->size << 1;
5085 ITERATE_RDEV(mddev,rdev,rtmp)
5086 if (rdev->raid_disk >= 0 &&
5087 !test_bit(Faulty, &rdev->flags) &&
5088 !test_bit(In_sync, &rdev->flags) &&
5089 rdev->recovery_offset < j)
5090 j = rdev->recovery_offset;
5093 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
5094 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
5095 " %d KB/sec/disc.\n", speed_min(mddev));
5096 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5097 "(but not more than %d KB/sec) for reconstruction.\n",
5100 is_mddev_idle(mddev); /* this also initializes IO event counters */
5103 for (m = 0; m < SYNC_MARKS; m++) {
5105 mark_cnt[m] = io_sectors;
5108 mddev->resync_mark = mark[last_mark];
5109 mddev->resync_mark_cnt = mark_cnt[last_mark];
5112 * Tune reconstruction:
5114 window = 32*(PAGE_SIZE/512);
5115 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5116 window/2,(unsigned long long) max_sectors/2);
5118 atomic_set(&mddev->recovery_active, 0);
5119 init_waitqueue_head(&mddev->recovery_wait);
5124 "md: resuming recovery of %s from checkpoint.\n",
5126 mddev->curr_resync = j;
5129 while (j < max_sectors) {
5133 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5134 currspeed < speed_min(mddev));
5136 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5140 if (!skipped) { /* actual IO requested */
5141 io_sectors += sectors;
5142 atomic_add(sectors, &mddev->recovery_active);
5146 if (j>1) mddev->curr_resync = j;
5147 mddev->curr_mark_cnt = io_sectors;
5148 if (last_check == 0)
5149 /* this is the earliers that rebuilt will be
5150 * visible in /proc/mdstat
5152 md_new_event(mddev);
5154 if (last_check + window > io_sectors || j == max_sectors)
5157 last_check = io_sectors;
5159 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
5160 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5164 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5166 int next = (last_mark+1) % SYNC_MARKS;
5168 mddev->resync_mark = mark[next];
5169 mddev->resync_mark_cnt = mark_cnt[next];
5170 mark[next] = jiffies;
5171 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5176 if (kthread_should_stop()) {
5178 * got a signal, exit.
5181 "md: md_do_sync() got signal ... exiting\n");
5182 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5187 * this loop exits only if either when we are slower than
5188 * the 'hard' speed limit, or the system was IO-idle for
5190 * the system might be non-idle CPU-wise, but we only care
5191 * about not overloading the IO subsystem. (things like an
5192 * e2fsck being done on the RAID array should execute fast)
5194 mddev->queue->unplug_fn(mddev->queue);
5197 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5198 /((jiffies-mddev->resync_mark)/HZ +1) +1;
5200 if (currspeed > speed_min(mddev)) {
5201 if ((currspeed > speed_max(mddev)) ||
5202 !is_mddev_idle(mddev)) {
5208 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
5210 * this also signals 'finished resyncing' to md_stop
5213 mddev->queue->unplug_fn(mddev->queue);
5215 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5217 /* tell personality that we are finished */
5218 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5220 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5221 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
5222 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5223 mddev->curr_resync > 2) {
5224 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5225 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5226 if (mddev->curr_resync >= mddev->recovery_cp) {
5228 "md: checkpointing recovery of %s.\n",
5230 mddev->recovery_cp = mddev->curr_resync;
5233 mddev->recovery_cp = MaxSector;
5235 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5236 mddev->curr_resync = MaxSector;
5237 ITERATE_RDEV(mddev,rdev,rtmp)
5238 if (rdev->raid_disk >= 0 &&
5239 !test_bit(Faulty, &rdev->flags) &&
5240 !test_bit(In_sync, &rdev->flags) &&
5241 rdev->recovery_offset < mddev->curr_resync)
5242 rdev->recovery_offset = mddev->curr_resync;
5247 mddev->curr_resync = 0;
5248 wake_up(&resync_wait);
5249 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5250 md_wakeup_thread(mddev->thread);
5252 EXPORT_SYMBOL_GPL(md_do_sync);
5256 * This routine is regularly called by all per-raid-array threads to
5257 * deal with generic issues like resync and super-block update.
5258 * Raid personalities that don't have a thread (linear/raid0) do not
5259 * need this as they never do any recovery or update the superblock.
5261 * It does not do any resync itself, but rather "forks" off other threads
5262 * to do that as needed.
5263 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5264 * "->recovery" and create a thread at ->sync_thread.
5265 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5266 * and wakeups up this thread which will reap the thread and finish up.
5267 * This thread also removes any faulty devices (with nr_pending == 0).
5269 * The overall approach is:
5270 * 1/ if the superblock needs updating, update it.
5271 * 2/ If a recovery thread is running, don't do anything else.
5272 * 3/ If recovery has finished, clean up, possibly marking spares active.
5273 * 4/ If there are any faulty devices, remove them.
5274 * 5/ If array is degraded, try to add spares devices
5275 * 6/ If array has spares or is not in-sync, start a resync thread.
5277 void md_check_recovery(mddev_t *mddev)
5280 struct list_head *rtmp;
5284 bitmap_daemon_work(mddev->bitmap);
5289 if (signal_pending(current)) {
5290 if (mddev->pers->sync_request) {
5291 printk(KERN_INFO "md: %s in immediate safe mode\n",
5293 mddev->safemode = 2;
5295 flush_signals(current);
5300 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5301 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5302 (mddev->safemode == 1) ||
5303 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5304 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5308 if (mddev_trylock(mddev)) {
5311 spin_lock_irq(&mddev->write_lock);
5312 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5313 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5315 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5317 if (mddev->safemode == 1)
5318 mddev->safemode = 0;
5319 spin_unlock_irq(&mddev->write_lock);
5322 md_update_sb(mddev, 0);
5325 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5326 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5327 /* resync/recovery still happening */
5328 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5331 if (mddev->sync_thread) {
5332 /* resync has finished, collect result */
5333 md_unregister_thread(mddev->sync_thread);
5334 mddev->sync_thread = NULL;
5335 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5336 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5338 /* activate any spares */
5339 mddev->pers->spare_active(mddev);
5341 md_update_sb(mddev, 1);
5343 /* if array is no-longer degraded, then any saved_raid_disk
5344 * information must be scrapped
5346 if (!mddev->degraded)
5347 ITERATE_RDEV(mddev,rdev,rtmp)
5348 rdev->saved_raid_disk = -1;
5350 mddev->recovery = 0;
5351 /* flag recovery needed just to double check */
5352 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5353 md_new_event(mddev);
5356 /* Clear some bits that don't mean anything, but
5359 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5360 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5361 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5362 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5364 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5366 /* no recovery is running.
5367 * remove any failed drives, then
5368 * add spares if possible.
5369 * Spare are also removed and re-added, to allow
5370 * the personality to fail the re-add.
5372 ITERATE_RDEV(mddev,rdev,rtmp)
5373 if (rdev->raid_disk >= 0 &&
5374 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
5375 atomic_read(&rdev->nr_pending)==0) {
5376 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
5378 sprintf(nm,"rd%d", rdev->raid_disk);
5379 sysfs_remove_link(&mddev->kobj, nm);
5380 rdev->raid_disk = -1;
5384 if (mddev->degraded) {
5385 ITERATE_RDEV(mddev,rdev,rtmp)
5386 if (rdev->raid_disk < 0
5387 && !test_bit(Faulty, &rdev->flags)) {
5388 rdev->recovery_offset = 0;
5389 if (mddev->pers->hot_add_disk(mddev,rdev)) {
5391 sprintf(nm, "rd%d", rdev->raid_disk);
5392 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
5394 md_new_event(mddev);
5401 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5402 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5403 } else if (mddev->recovery_cp < MaxSector) {
5404 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5405 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5406 /* nothing to be done ... */
5409 if (mddev->pers->sync_request) {
5410 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5411 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5412 /* We are adding a device or devices to an array
5413 * which has the bitmap stored on all devices.
5414 * So make sure all bitmap pages get written
5416 bitmap_write_all(mddev->bitmap);
5418 mddev->sync_thread = md_register_thread(md_do_sync,
5421 if (!mddev->sync_thread) {
5422 printk(KERN_ERR "%s: could not start resync"
5425 /* leave the spares where they are, it shouldn't hurt */
5426 mddev->recovery = 0;
5428 md_wakeup_thread(mddev->sync_thread);
5429 md_new_event(mddev);
5432 mddev_unlock(mddev);
5436 static int md_notify_reboot(struct notifier_block *this,
5437 unsigned long code, void *x)
5439 struct list_head *tmp;
5442 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5444 printk(KERN_INFO "md: stopping all md devices.\n");
5446 ITERATE_MDDEV(mddev,tmp)
5447 if (mddev_trylock(mddev)) {
5448 do_md_stop (mddev, 1);
5449 mddev_unlock(mddev);
5452 * certain more exotic SCSI devices are known to be
5453 * volatile wrt too early system reboots. While the
5454 * right place to handle this issue is the given
5455 * driver, we do want to have a safe RAID driver ...
5462 static struct notifier_block md_notifier = {
5463 .notifier_call = md_notify_reboot,
5465 .priority = INT_MAX, /* before any real devices */
5468 static void md_geninit(void)
5470 struct proc_dir_entry *p;
5472 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5474 p = create_proc_entry("mdstat", S_IRUGO, NULL);
5476 p->proc_fops = &md_seq_fops;
5479 static int __init md_init(void)
5481 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
5482 " MD_SB_DISKS=%d\n",
5483 MD_MAJOR_VERSION, MD_MINOR_VERSION,
5484 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
5485 printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI,
5488 if (register_blkdev(MAJOR_NR, "md"))
5490 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5491 unregister_blkdev(MAJOR_NR, "md");
5494 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
5495 md_probe, NULL, NULL);
5496 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
5497 md_probe, NULL, NULL);
5499 register_reboot_notifier(&md_notifier);
5500 raid_table_header = register_sysctl_table(raid_root_table, 1);
5510 * Searches all registered partitions for autorun RAID arrays
5513 static dev_t detected_devices[128];
5516 void md_autodetect_dev(dev_t dev)
5518 if (dev_cnt >= 0 && dev_cnt < 127)
5519 detected_devices[dev_cnt++] = dev;
5523 static void autostart_arrays(int part)
5528 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5530 for (i = 0; i < dev_cnt; i++) {
5531 dev_t dev = detected_devices[i];
5533 rdev = md_import_device(dev,0, 0);
5537 if (test_bit(Faulty, &rdev->flags)) {
5541 list_add(&rdev->same_set, &pending_raid_disks);
5545 autorun_devices(part);
5550 static __exit void md_exit(void)
5553 struct list_head *tmp;
5555 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
5556 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
5558 unregister_blkdev(MAJOR_NR,"md");
5559 unregister_blkdev(mdp_major, "mdp");
5560 unregister_reboot_notifier(&md_notifier);
5561 unregister_sysctl_table(raid_table_header);
5562 remove_proc_entry("mdstat", NULL);
5563 ITERATE_MDDEV(mddev,tmp) {
5564 struct gendisk *disk = mddev->gendisk;
5567 export_array(mddev);
5570 mddev->gendisk = NULL;
5575 module_init(md_init)
5576 module_exit(md_exit)
5578 static int get_ro(char *buffer, struct kernel_param *kp)
5580 return sprintf(buffer, "%d", start_readonly);
5582 static int set_ro(const char *val, struct kernel_param *kp)
5585 int num = simple_strtoul(val, &e, 10);
5586 if (*val && (*e == '\0' || *e == '\n')) {
5587 start_readonly = num;
5593 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
5594 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
5597 EXPORT_SYMBOL(register_md_personality);
5598 EXPORT_SYMBOL(unregister_md_personality);
5599 EXPORT_SYMBOL(md_error);
5600 EXPORT_SYMBOL(md_done_sync);
5601 EXPORT_SYMBOL(md_write_start);
5602 EXPORT_SYMBOL(md_write_end);
5603 EXPORT_SYMBOL(md_register_thread);
5604 EXPORT_SYMBOL(md_unregister_thread);
5605 EXPORT_SYMBOL(md_wakeup_thread);
5606 EXPORT_SYMBOL(md_check_recovery);
5607 MODULE_LICENSE("GPL");
5609 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);