]> err.no Git - linux-2.6/blob - drivers/md/md.c
md: resolve external metadata handling deadlock in md_allow_write
[linux-2.6] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46 #include <linux/freezer.h>
47
48 #include <linux/init.h>
49
50 #include <linux/file.h>
51
52 #ifdef CONFIG_KMOD
53 #include <linux/kmod.h>
54 #endif
55
56 #include <asm/unaligned.h>
57
58 #define MAJOR_NR MD_MAJOR
59 #define MD_DRIVER
60
61 /* 63 partitions with the alternate major number (mdp) */
62 #define MdpMinorShift 6
63
64 #define DEBUG 0
65 #define dprintk(x...) ((void)(DEBUG && printk(x)))
66
67
68 #ifndef MODULE
69 static void autostart_arrays (int part);
70 #endif
71
72 static LIST_HEAD(pers_list);
73 static DEFINE_SPINLOCK(pers_lock);
74
75 static void md_print_devices(void);
76
77 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
78
79 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
80
81 /*
82  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
83  * is 1000 KB/sec, so the extra system load does not show up that much.
84  * Increase it if you want to have more _guaranteed_ speed. Note that
85  * the RAID driver will use the maximum available bandwidth if the IO
86  * subsystem is idle. There is also an 'absolute maximum' reconstruction
87  * speed limit - in case reconstruction slows down your system despite
88  * idle IO detection.
89  *
90  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
91  * or /sys/block/mdX/md/sync_speed_{min,max}
92  */
93
94 static int sysctl_speed_limit_min = 1000;
95 static int sysctl_speed_limit_max = 200000;
96 static inline int speed_min(mddev_t *mddev)
97 {
98         return mddev->sync_speed_min ?
99                 mddev->sync_speed_min : sysctl_speed_limit_min;
100 }
101
102 static inline int speed_max(mddev_t *mddev)
103 {
104         return mddev->sync_speed_max ?
105                 mddev->sync_speed_max : sysctl_speed_limit_max;
106 }
107
108 static struct ctl_table_header *raid_table_header;
109
110 static ctl_table raid_table[] = {
111         {
112                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
113                 .procname       = "speed_limit_min",
114                 .data           = &sysctl_speed_limit_min,
115                 .maxlen         = sizeof(int),
116                 .mode           = S_IRUGO|S_IWUSR,
117                 .proc_handler   = &proc_dointvec,
118         },
119         {
120                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
121                 .procname       = "speed_limit_max",
122                 .data           = &sysctl_speed_limit_max,
123                 .maxlen         = sizeof(int),
124                 .mode           = S_IRUGO|S_IWUSR,
125                 .proc_handler   = &proc_dointvec,
126         },
127         { .ctl_name = 0 }
128 };
129
130 static ctl_table raid_dir_table[] = {
131         {
132                 .ctl_name       = DEV_RAID,
133                 .procname       = "raid",
134                 .maxlen         = 0,
135                 .mode           = S_IRUGO|S_IXUGO,
136                 .child          = raid_table,
137         },
138         { .ctl_name = 0 }
139 };
140
141 static ctl_table raid_root_table[] = {
142         {
143                 .ctl_name       = CTL_DEV,
144                 .procname       = "dev",
145                 .maxlen         = 0,
146                 .mode           = 0555,
147                 .child          = raid_dir_table,
148         },
149         { .ctl_name = 0 }
150 };
151
152 static struct block_device_operations md_fops;
153
154 static int start_readonly;
155
156 /*
157  * We have a system wide 'event count' that is incremented
158  * on any 'interesting' event, and readers of /proc/mdstat
159  * can use 'poll' or 'select' to find out when the event
160  * count increases.
161  *
162  * Events are:
163  *  start array, stop array, error, add device, remove device,
164  *  start build, activate spare
165  */
166 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
167 static atomic_t md_event_count;
168 void md_new_event(mddev_t *mddev)
169 {
170         atomic_inc(&md_event_count);
171         wake_up(&md_event_waiters);
172 }
173 EXPORT_SYMBOL_GPL(md_new_event);
174
175 /* Alternate version that can be called from interrupts
176  * when calling sysfs_notify isn't needed.
177  */
178 static void md_new_event_inintr(mddev_t *mddev)
179 {
180         atomic_inc(&md_event_count);
181         wake_up(&md_event_waiters);
182 }
183
184 /*
185  * Enables to iterate over all existing md arrays
186  * all_mddevs_lock protects this list.
187  */
188 static LIST_HEAD(all_mddevs);
189 static DEFINE_SPINLOCK(all_mddevs_lock);
190
191
192 /*
193  * iterates through all used mddevs in the system.
194  * We take care to grab the all_mddevs_lock whenever navigating
195  * the list, and to always hold a refcount when unlocked.
196  * Any code which breaks out of this loop while own
197  * a reference to the current mddev and must mddev_put it.
198  */
199 #define for_each_mddev(mddev,tmp)                                       \
200                                                                         \
201         for (({ spin_lock(&all_mddevs_lock);                            \
202                 tmp = all_mddevs.next;                                  \
203                 mddev = NULL;});                                        \
204              ({ if (tmp != &all_mddevs)                                 \
205                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
206                 spin_unlock(&all_mddevs_lock);                          \
207                 if (mddev) mddev_put(mddev);                            \
208                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
209                 tmp != &all_mddevs;});                                  \
210              ({ spin_lock(&all_mddevs_lock);                            \
211                 tmp = tmp->next;})                                      \
212                 )
213
214
215 static int md_fail_request (struct request_queue *q, struct bio *bio)
216 {
217         bio_io_error(bio);
218         return 0;
219 }
220
221 static inline mddev_t *mddev_get(mddev_t *mddev)
222 {
223         atomic_inc(&mddev->active);
224         return mddev;
225 }
226
227 static void mddev_put(mddev_t *mddev)
228 {
229         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
230                 return;
231         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
232                 list_del(&mddev->all_mddevs);
233                 spin_unlock(&all_mddevs_lock);
234                 blk_cleanup_queue(mddev->queue);
235                 kobject_put(&mddev->kobj);
236         } else
237                 spin_unlock(&all_mddevs_lock);
238 }
239
240 static mddev_t * mddev_find(dev_t unit)
241 {
242         mddev_t *mddev, *new = NULL;
243
244  retry:
245         spin_lock(&all_mddevs_lock);
246         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
247                 if (mddev->unit == unit) {
248                         mddev_get(mddev);
249                         spin_unlock(&all_mddevs_lock);
250                         kfree(new);
251                         return mddev;
252                 }
253
254         if (new) {
255                 list_add(&new->all_mddevs, &all_mddevs);
256                 spin_unlock(&all_mddevs_lock);
257                 return new;
258         }
259         spin_unlock(&all_mddevs_lock);
260
261         new = kzalloc(sizeof(*new), GFP_KERNEL);
262         if (!new)
263                 return NULL;
264
265         new->unit = unit;
266         if (MAJOR(unit) == MD_MAJOR)
267                 new->md_minor = MINOR(unit);
268         else
269                 new->md_minor = MINOR(unit) >> MdpMinorShift;
270
271         mutex_init(&new->reconfig_mutex);
272         INIT_LIST_HEAD(&new->disks);
273         INIT_LIST_HEAD(&new->all_mddevs);
274         init_timer(&new->safemode_timer);
275         atomic_set(&new->active, 1);
276         spin_lock_init(&new->write_lock);
277         init_waitqueue_head(&new->sb_wait);
278         init_waitqueue_head(&new->recovery_wait);
279         new->reshape_position = MaxSector;
280         new->resync_min = 0;
281         new->resync_max = MaxSector;
282         new->level = LEVEL_NONE;
283
284         new->queue = blk_alloc_queue(GFP_KERNEL);
285         if (!new->queue) {
286                 kfree(new);
287                 return NULL;
288         }
289         /* Can be unlocked because the queue is new: no concurrency */
290         queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
291
292         blk_queue_make_request(new->queue, md_fail_request);
293
294         goto retry;
295 }
296
297 static inline int mddev_lock(mddev_t * mddev)
298 {
299         return mutex_lock_interruptible(&mddev->reconfig_mutex);
300 }
301
302 static inline int mddev_trylock(mddev_t * mddev)
303 {
304         return mutex_trylock(&mddev->reconfig_mutex);
305 }
306
307 static inline void mddev_unlock(mddev_t * mddev)
308 {
309         mutex_unlock(&mddev->reconfig_mutex);
310
311         md_wakeup_thread(mddev->thread);
312 }
313
314 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
315 {
316         mdk_rdev_t * rdev;
317         struct list_head *tmp;
318
319         rdev_for_each(rdev, tmp, mddev) {
320                 if (rdev->desc_nr == nr)
321                         return rdev;
322         }
323         return NULL;
324 }
325
326 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
327 {
328         struct list_head *tmp;
329         mdk_rdev_t *rdev;
330
331         rdev_for_each(rdev, tmp, mddev) {
332                 if (rdev->bdev->bd_dev == dev)
333                         return rdev;
334         }
335         return NULL;
336 }
337
338 static struct mdk_personality *find_pers(int level, char *clevel)
339 {
340         struct mdk_personality *pers;
341         list_for_each_entry(pers, &pers_list, list) {
342                 if (level != LEVEL_NONE && pers->level == level)
343                         return pers;
344                 if (strcmp(pers->name, clevel)==0)
345                         return pers;
346         }
347         return NULL;
348 }
349
350 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
351 {
352         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
353         return MD_NEW_SIZE_BLOCKS(size);
354 }
355
356 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
357 {
358         sector_t size;
359
360         size = rdev->sb_offset;
361
362         if (chunk_size)
363                 size &= ~((sector_t)chunk_size/1024 - 1);
364         return size;
365 }
366
367 static int alloc_disk_sb(mdk_rdev_t * rdev)
368 {
369         if (rdev->sb_page)
370                 MD_BUG();
371
372         rdev->sb_page = alloc_page(GFP_KERNEL);
373         if (!rdev->sb_page) {
374                 printk(KERN_ALERT "md: out of memory.\n");
375                 return -EINVAL;
376         }
377
378         return 0;
379 }
380
381 static void free_disk_sb(mdk_rdev_t * rdev)
382 {
383         if (rdev->sb_page) {
384                 put_page(rdev->sb_page);
385                 rdev->sb_loaded = 0;
386                 rdev->sb_page = NULL;
387                 rdev->sb_offset = 0;
388                 rdev->size = 0;
389         }
390 }
391
392
393 static void super_written(struct bio *bio, int error)
394 {
395         mdk_rdev_t *rdev = bio->bi_private;
396         mddev_t *mddev = rdev->mddev;
397
398         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
399                 printk("md: super_written gets error=%d, uptodate=%d\n",
400                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
401                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
402                 md_error(mddev, rdev);
403         }
404
405         if (atomic_dec_and_test(&mddev->pending_writes))
406                 wake_up(&mddev->sb_wait);
407         bio_put(bio);
408 }
409
410 static void super_written_barrier(struct bio *bio, int error)
411 {
412         struct bio *bio2 = bio->bi_private;
413         mdk_rdev_t *rdev = bio2->bi_private;
414         mddev_t *mddev = rdev->mddev;
415
416         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
417             error == -EOPNOTSUPP) {
418                 unsigned long flags;
419                 /* barriers don't appear to be supported :-( */
420                 set_bit(BarriersNotsupp, &rdev->flags);
421                 mddev->barriers_work = 0;
422                 spin_lock_irqsave(&mddev->write_lock, flags);
423                 bio2->bi_next = mddev->biolist;
424                 mddev->biolist = bio2;
425                 spin_unlock_irqrestore(&mddev->write_lock, flags);
426                 wake_up(&mddev->sb_wait);
427                 bio_put(bio);
428         } else {
429                 bio_put(bio2);
430                 bio->bi_private = rdev;
431                 super_written(bio, error);
432         }
433 }
434
435 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
436                    sector_t sector, int size, struct page *page)
437 {
438         /* write first size bytes of page to sector of rdev
439          * Increment mddev->pending_writes before returning
440          * and decrement it on completion, waking up sb_wait
441          * if zero is reached.
442          * If an error occurred, call md_error
443          *
444          * As we might need to resubmit the request if BIO_RW_BARRIER
445          * causes ENOTSUPP, we allocate a spare bio...
446          */
447         struct bio *bio = bio_alloc(GFP_NOIO, 1);
448         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
449
450         bio->bi_bdev = rdev->bdev;
451         bio->bi_sector = sector;
452         bio_add_page(bio, page, size, 0);
453         bio->bi_private = rdev;
454         bio->bi_end_io = super_written;
455         bio->bi_rw = rw;
456
457         atomic_inc(&mddev->pending_writes);
458         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
459                 struct bio *rbio;
460                 rw |= (1<<BIO_RW_BARRIER);
461                 rbio = bio_clone(bio, GFP_NOIO);
462                 rbio->bi_private = bio;
463                 rbio->bi_end_io = super_written_barrier;
464                 submit_bio(rw, rbio);
465         } else
466                 submit_bio(rw, bio);
467 }
468
469 void md_super_wait(mddev_t *mddev)
470 {
471         /* wait for all superblock writes that were scheduled to complete.
472          * if any had to be retried (due to BARRIER problems), retry them
473          */
474         DEFINE_WAIT(wq);
475         for(;;) {
476                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
477                 if (atomic_read(&mddev->pending_writes)==0)
478                         break;
479                 while (mddev->biolist) {
480                         struct bio *bio;
481                         spin_lock_irq(&mddev->write_lock);
482                         bio = mddev->biolist;
483                         mddev->biolist = bio->bi_next ;
484                         bio->bi_next = NULL;
485                         spin_unlock_irq(&mddev->write_lock);
486                         submit_bio(bio->bi_rw, bio);
487                 }
488                 schedule();
489         }
490         finish_wait(&mddev->sb_wait, &wq);
491 }
492
493 static void bi_complete(struct bio *bio, int error)
494 {
495         complete((struct completion*)bio->bi_private);
496 }
497
498 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
499                    struct page *page, int rw)
500 {
501         struct bio *bio = bio_alloc(GFP_NOIO, 1);
502         struct completion event;
503         int ret;
504
505         rw |= (1 << BIO_RW_SYNC);
506
507         bio->bi_bdev = bdev;
508         bio->bi_sector = sector;
509         bio_add_page(bio, page, size, 0);
510         init_completion(&event);
511         bio->bi_private = &event;
512         bio->bi_end_io = bi_complete;
513         submit_bio(rw, bio);
514         wait_for_completion(&event);
515
516         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
517         bio_put(bio);
518         return ret;
519 }
520 EXPORT_SYMBOL_GPL(sync_page_io);
521
522 static int read_disk_sb(mdk_rdev_t * rdev, int size)
523 {
524         char b[BDEVNAME_SIZE];
525         if (!rdev->sb_page) {
526                 MD_BUG();
527                 return -EINVAL;
528         }
529         if (rdev->sb_loaded)
530                 return 0;
531
532
533         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
534                 goto fail;
535         rdev->sb_loaded = 1;
536         return 0;
537
538 fail:
539         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
540                 bdevname(rdev->bdev,b));
541         return -EINVAL;
542 }
543
544 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
545 {
546         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
547                 (sb1->set_uuid1 == sb2->set_uuid1) &&
548                 (sb1->set_uuid2 == sb2->set_uuid2) &&
549                 (sb1->set_uuid3 == sb2->set_uuid3))
550
551                 return 1;
552
553         return 0;
554 }
555
556
557 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
558 {
559         int ret;
560         mdp_super_t *tmp1, *tmp2;
561
562         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
563         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
564
565         if (!tmp1 || !tmp2) {
566                 ret = 0;
567                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
568                 goto abort;
569         }
570
571         *tmp1 = *sb1;
572         *tmp2 = *sb2;
573
574         /*
575          * nr_disks is not constant
576          */
577         tmp1->nr_disks = 0;
578         tmp2->nr_disks = 0;
579
580         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
581                 ret = 0;
582         else
583                 ret = 1;
584
585 abort:
586         kfree(tmp1);
587         kfree(tmp2);
588         return ret;
589 }
590
591
592 static u32 md_csum_fold(u32 csum)
593 {
594         csum = (csum & 0xffff) + (csum >> 16);
595         return (csum & 0xffff) + (csum >> 16);
596 }
597
598 static unsigned int calc_sb_csum(mdp_super_t * sb)
599 {
600         u64 newcsum = 0;
601         u32 *sb32 = (u32*)sb;
602         int i;
603         unsigned int disk_csum, csum;
604
605         disk_csum = sb->sb_csum;
606         sb->sb_csum = 0;
607
608         for (i = 0; i < MD_SB_BYTES/4 ; i++)
609                 newcsum += sb32[i];
610         csum = (newcsum & 0xffffffff) + (newcsum>>32);
611
612
613 #ifdef CONFIG_ALPHA
614         /* This used to use csum_partial, which was wrong for several
615          * reasons including that different results are returned on
616          * different architectures.  It isn't critical that we get exactly
617          * the same return value as before (we always csum_fold before
618          * testing, and that removes any differences).  However as we
619          * know that csum_partial always returned a 16bit value on
620          * alphas, do a fold to maximise conformity to previous behaviour.
621          */
622         sb->sb_csum = md_csum_fold(disk_csum);
623 #else
624         sb->sb_csum = disk_csum;
625 #endif
626         return csum;
627 }
628
629
630 /*
631  * Handle superblock details.
632  * We want to be able to handle multiple superblock formats
633  * so we have a common interface to them all, and an array of
634  * different handlers.
635  * We rely on user-space to write the initial superblock, and support
636  * reading and updating of superblocks.
637  * Interface methods are:
638  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
639  *      loads and validates a superblock on dev.
640  *      if refdev != NULL, compare superblocks on both devices
641  *    Return:
642  *      0 - dev has a superblock that is compatible with refdev
643  *      1 - dev has a superblock that is compatible and newer than refdev
644  *          so dev should be used as the refdev in future
645  *     -EINVAL superblock incompatible or invalid
646  *     -othererror e.g. -EIO
647  *
648  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
649  *      Verify that dev is acceptable into mddev.
650  *       The first time, mddev->raid_disks will be 0, and data from
651  *       dev should be merged in.  Subsequent calls check that dev
652  *       is new enough.  Return 0 or -EINVAL
653  *
654  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
655  *     Update the superblock for rdev with data in mddev
656  *     This does not write to disc.
657  *
658  */
659
660 struct super_type  {
661         char                *name;
662         struct module       *owner;
663         int                 (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev,
664                                           int minor_version);
665         int                 (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
666         void                (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
667         unsigned long long  (*rdev_size_change)(mdk_rdev_t *rdev,
668                                                 unsigned long long size);
669 };
670
671 /*
672  * load_super for 0.90.0 
673  */
674 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
675 {
676         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
677         mdp_super_t *sb;
678         int ret;
679         sector_t sb_offset;
680
681         /*
682          * Calculate the position of the superblock,
683          * it's at the end of the disk.
684          *
685          * It also happens to be a multiple of 4Kb.
686          */
687         sb_offset = calc_dev_sboffset(rdev->bdev);
688         rdev->sb_offset = sb_offset;
689
690         ret = read_disk_sb(rdev, MD_SB_BYTES);
691         if (ret) return ret;
692
693         ret = -EINVAL;
694
695         bdevname(rdev->bdev, b);
696         sb = (mdp_super_t*)page_address(rdev->sb_page);
697
698         if (sb->md_magic != MD_SB_MAGIC) {
699                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
700                        b);
701                 goto abort;
702         }
703
704         if (sb->major_version != 0 ||
705             sb->minor_version < 90 ||
706             sb->minor_version > 91) {
707                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
708                         sb->major_version, sb->minor_version,
709                         b);
710                 goto abort;
711         }
712
713         if (sb->raid_disks <= 0)
714                 goto abort;
715
716         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
717                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
718                         b);
719                 goto abort;
720         }
721
722         rdev->preferred_minor = sb->md_minor;
723         rdev->data_offset = 0;
724         rdev->sb_size = MD_SB_BYTES;
725
726         if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
727                 if (sb->level != 1 && sb->level != 4
728                     && sb->level != 5 && sb->level != 6
729                     && sb->level != 10) {
730                         /* FIXME use a better test */
731                         printk(KERN_WARNING
732                                "md: bitmaps not supported for this level.\n");
733                         goto abort;
734                 }
735         }
736
737         if (sb->level == LEVEL_MULTIPATH)
738                 rdev->desc_nr = -1;
739         else
740                 rdev->desc_nr = sb->this_disk.number;
741
742         if (!refdev) {
743                 ret = 1;
744         } else {
745                 __u64 ev1, ev2;
746                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
747                 if (!uuid_equal(refsb, sb)) {
748                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
749                                 b, bdevname(refdev->bdev,b2));
750                         goto abort;
751                 }
752                 if (!sb_equal(refsb, sb)) {
753                         printk(KERN_WARNING "md: %s has same UUID"
754                                " but different superblock to %s\n",
755                                b, bdevname(refdev->bdev, b2));
756                         goto abort;
757                 }
758                 ev1 = md_event(sb);
759                 ev2 = md_event(refsb);
760                 if (ev1 > ev2)
761                         ret = 1;
762                 else 
763                         ret = 0;
764         }
765         rdev->size = calc_dev_size(rdev, sb->chunk_size);
766
767         if (rdev->size < sb->size && sb->level > 1)
768                 /* "this cannot possibly happen" ... */
769                 ret = -EINVAL;
770
771  abort:
772         return ret;
773 }
774
775 /*
776  * validate_super for 0.90.0
777  */
778 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
779 {
780         mdp_disk_t *desc;
781         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
782         __u64 ev1 = md_event(sb);
783
784         rdev->raid_disk = -1;
785         clear_bit(Faulty, &rdev->flags);
786         clear_bit(In_sync, &rdev->flags);
787         clear_bit(WriteMostly, &rdev->flags);
788         clear_bit(BarriersNotsupp, &rdev->flags);
789
790         if (mddev->raid_disks == 0) {
791                 mddev->major_version = 0;
792                 mddev->minor_version = sb->minor_version;
793                 mddev->patch_version = sb->patch_version;
794                 mddev->external = 0;
795                 mddev->chunk_size = sb->chunk_size;
796                 mddev->ctime = sb->ctime;
797                 mddev->utime = sb->utime;
798                 mddev->level = sb->level;
799                 mddev->clevel[0] = 0;
800                 mddev->layout = sb->layout;
801                 mddev->raid_disks = sb->raid_disks;
802                 mddev->size = sb->size;
803                 mddev->events = ev1;
804                 mddev->bitmap_offset = 0;
805                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
806
807                 if (mddev->minor_version >= 91) {
808                         mddev->reshape_position = sb->reshape_position;
809                         mddev->delta_disks = sb->delta_disks;
810                         mddev->new_level = sb->new_level;
811                         mddev->new_layout = sb->new_layout;
812                         mddev->new_chunk = sb->new_chunk;
813                 } else {
814                         mddev->reshape_position = MaxSector;
815                         mddev->delta_disks = 0;
816                         mddev->new_level = mddev->level;
817                         mddev->new_layout = mddev->layout;
818                         mddev->new_chunk = mddev->chunk_size;
819                 }
820
821                 if (sb->state & (1<<MD_SB_CLEAN))
822                         mddev->recovery_cp = MaxSector;
823                 else {
824                         if (sb->events_hi == sb->cp_events_hi && 
825                                 sb->events_lo == sb->cp_events_lo) {
826                                 mddev->recovery_cp = sb->recovery_cp;
827                         } else
828                                 mddev->recovery_cp = 0;
829                 }
830
831                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
832                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
833                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
834                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
835
836                 mddev->max_disks = MD_SB_DISKS;
837
838                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
839                     mddev->bitmap_file == NULL)
840                         mddev->bitmap_offset = mddev->default_bitmap_offset;
841
842         } else if (mddev->pers == NULL) {
843                 /* Insist on good event counter while assembling */
844                 ++ev1;
845                 if (ev1 < mddev->events) 
846                         return -EINVAL;
847         } else if (mddev->bitmap) {
848                 /* if adding to array with a bitmap, then we can accept an
849                  * older device ... but not too old.
850                  */
851                 if (ev1 < mddev->bitmap->events_cleared)
852                         return 0;
853         } else {
854                 if (ev1 < mddev->events)
855                         /* just a hot-add of a new device, leave raid_disk at -1 */
856                         return 0;
857         }
858
859         if (mddev->level != LEVEL_MULTIPATH) {
860                 desc = sb->disks + rdev->desc_nr;
861
862                 if (desc->state & (1<<MD_DISK_FAULTY))
863                         set_bit(Faulty, &rdev->flags);
864                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
865                             desc->raid_disk < mddev->raid_disks */) {
866                         set_bit(In_sync, &rdev->flags);
867                         rdev->raid_disk = desc->raid_disk;
868                 }
869                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
870                         set_bit(WriteMostly, &rdev->flags);
871         } else /* MULTIPATH are always insync */
872                 set_bit(In_sync, &rdev->flags);
873         return 0;
874 }
875
876 /*
877  * sync_super for 0.90.0
878  */
879 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
880 {
881         mdp_super_t *sb;
882         struct list_head *tmp;
883         mdk_rdev_t *rdev2;
884         int next_spare = mddev->raid_disks;
885
886
887         /* make rdev->sb match mddev data..
888          *
889          * 1/ zero out disks
890          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
891          * 3/ any empty disks < next_spare become removed
892          *
893          * disks[0] gets initialised to REMOVED because
894          * we cannot be sure from other fields if it has
895          * been initialised or not.
896          */
897         int i;
898         int active=0, working=0,failed=0,spare=0,nr_disks=0;
899
900         rdev->sb_size = MD_SB_BYTES;
901
902         sb = (mdp_super_t*)page_address(rdev->sb_page);
903
904         memset(sb, 0, sizeof(*sb));
905
906         sb->md_magic = MD_SB_MAGIC;
907         sb->major_version = mddev->major_version;
908         sb->patch_version = mddev->patch_version;
909         sb->gvalid_words  = 0; /* ignored */
910         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
911         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
912         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
913         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
914
915         sb->ctime = mddev->ctime;
916         sb->level = mddev->level;
917         sb->size  = mddev->size;
918         sb->raid_disks = mddev->raid_disks;
919         sb->md_minor = mddev->md_minor;
920         sb->not_persistent = 0;
921         sb->utime = mddev->utime;
922         sb->state = 0;
923         sb->events_hi = (mddev->events>>32);
924         sb->events_lo = (u32)mddev->events;
925
926         if (mddev->reshape_position == MaxSector)
927                 sb->minor_version = 90;
928         else {
929                 sb->minor_version = 91;
930                 sb->reshape_position = mddev->reshape_position;
931                 sb->new_level = mddev->new_level;
932                 sb->delta_disks = mddev->delta_disks;
933                 sb->new_layout = mddev->new_layout;
934                 sb->new_chunk = mddev->new_chunk;
935         }
936         mddev->minor_version = sb->minor_version;
937         if (mddev->in_sync)
938         {
939                 sb->recovery_cp = mddev->recovery_cp;
940                 sb->cp_events_hi = (mddev->events>>32);
941                 sb->cp_events_lo = (u32)mddev->events;
942                 if (mddev->recovery_cp == MaxSector)
943                         sb->state = (1<< MD_SB_CLEAN);
944         } else
945                 sb->recovery_cp = 0;
946
947         sb->layout = mddev->layout;
948         sb->chunk_size = mddev->chunk_size;
949
950         if (mddev->bitmap && mddev->bitmap_file == NULL)
951                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
952
953         sb->disks[0].state = (1<<MD_DISK_REMOVED);
954         rdev_for_each(rdev2, tmp, mddev) {
955                 mdp_disk_t *d;
956                 int desc_nr;
957                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
958                     && !test_bit(Faulty, &rdev2->flags))
959                         desc_nr = rdev2->raid_disk;
960                 else
961                         desc_nr = next_spare++;
962                 rdev2->desc_nr = desc_nr;
963                 d = &sb->disks[rdev2->desc_nr];
964                 nr_disks++;
965                 d->number = rdev2->desc_nr;
966                 d->major = MAJOR(rdev2->bdev->bd_dev);
967                 d->minor = MINOR(rdev2->bdev->bd_dev);
968                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
969                     && !test_bit(Faulty, &rdev2->flags))
970                         d->raid_disk = rdev2->raid_disk;
971                 else
972                         d->raid_disk = rdev2->desc_nr; /* compatibility */
973                 if (test_bit(Faulty, &rdev2->flags))
974                         d->state = (1<<MD_DISK_FAULTY);
975                 else if (test_bit(In_sync, &rdev2->flags)) {
976                         d->state = (1<<MD_DISK_ACTIVE);
977                         d->state |= (1<<MD_DISK_SYNC);
978                         active++;
979                         working++;
980                 } else {
981                         d->state = 0;
982                         spare++;
983                         working++;
984                 }
985                 if (test_bit(WriteMostly, &rdev2->flags))
986                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
987         }
988         /* now set the "removed" and "faulty" bits on any missing devices */
989         for (i=0 ; i < mddev->raid_disks ; i++) {
990                 mdp_disk_t *d = &sb->disks[i];
991                 if (d->state == 0 && d->number == 0) {
992                         d->number = i;
993                         d->raid_disk = i;
994                         d->state = (1<<MD_DISK_REMOVED);
995                         d->state |= (1<<MD_DISK_FAULTY);
996                         failed++;
997                 }
998         }
999         sb->nr_disks = nr_disks;
1000         sb->active_disks = active;
1001         sb->working_disks = working;
1002         sb->failed_disks = failed;
1003         sb->spare_disks = spare;
1004
1005         sb->this_disk = sb->disks[rdev->desc_nr];
1006         sb->sb_csum = calc_sb_csum(sb);
1007 }
1008
1009 /*
1010  * rdev_size_change for 0.90.0
1011  */
1012 static unsigned long long
1013 super_90_rdev_size_change(mdk_rdev_t *rdev, unsigned long long size)
1014 {
1015         if (size && size < rdev->mddev->size)
1016                 return 0; /* component must fit device */
1017         size *= 2; /* convert to sectors */
1018         if (rdev->mddev->bitmap_offset)
1019                 return 0; /* can't move bitmap */
1020         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
1021         if (!size || size > rdev->sb_offset*2)
1022                 size = rdev->sb_offset*2;
1023         md_super_write(rdev->mddev, rdev, rdev->sb_offset << 1, rdev->sb_size,
1024                        rdev->sb_page);
1025         md_super_wait(rdev->mddev);
1026         return size/2; /* kB for sysfs */
1027 }
1028
1029
1030 /*
1031  * version 1 superblock
1032  */
1033
1034 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1035 {
1036         __le32 disk_csum;
1037         u32 csum;
1038         unsigned long long newcsum;
1039         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1040         __le32 *isuper = (__le32*)sb;
1041         int i;
1042
1043         disk_csum = sb->sb_csum;
1044         sb->sb_csum = 0;
1045         newcsum = 0;
1046         for (i=0; size>=4; size -= 4 )
1047                 newcsum += le32_to_cpu(*isuper++);
1048
1049         if (size == 2)
1050                 newcsum += le16_to_cpu(*(__le16*) isuper);
1051
1052         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1053         sb->sb_csum = disk_csum;
1054         return cpu_to_le32(csum);
1055 }
1056
1057 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1058 {
1059         struct mdp_superblock_1 *sb;
1060         int ret;
1061         sector_t sb_offset;
1062         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1063         int bmask;
1064
1065         /*
1066          * Calculate the position of the superblock.
1067          * It is always aligned to a 4K boundary and
1068          * depeding on minor_version, it can be:
1069          * 0: At least 8K, but less than 12K, from end of device
1070          * 1: At start of device
1071          * 2: 4K from start of device.
1072          */
1073         switch(minor_version) {
1074         case 0:
1075                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1076                 sb_offset -= 8*2;
1077                 sb_offset &= ~(sector_t)(4*2-1);
1078                 /* convert from sectors to K */
1079                 sb_offset /= 2;
1080                 break;
1081         case 1:
1082                 sb_offset = 0;
1083                 break;
1084         case 2:
1085                 sb_offset = 4;
1086                 break;
1087         default:
1088                 return -EINVAL;
1089         }
1090         rdev->sb_offset = sb_offset;
1091
1092         /* superblock is rarely larger than 1K, but it can be larger,
1093          * and it is safe to read 4k, so we do that
1094          */
1095         ret = read_disk_sb(rdev, 4096);
1096         if (ret) return ret;
1097
1098
1099         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1100
1101         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1102             sb->major_version != cpu_to_le32(1) ||
1103             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1104             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1105             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1106                 return -EINVAL;
1107
1108         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1109                 printk("md: invalid superblock checksum on %s\n",
1110                         bdevname(rdev->bdev,b));
1111                 return -EINVAL;
1112         }
1113         if (le64_to_cpu(sb->data_size) < 10) {
1114                 printk("md: data_size too small on %s\n",
1115                        bdevname(rdev->bdev,b));
1116                 return -EINVAL;
1117         }
1118         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1119                 if (sb->level != cpu_to_le32(1) &&
1120                     sb->level != cpu_to_le32(4) &&
1121                     sb->level != cpu_to_le32(5) &&
1122                     sb->level != cpu_to_le32(6) &&
1123                     sb->level != cpu_to_le32(10)) {
1124                         printk(KERN_WARNING
1125                                "md: bitmaps not supported for this level.\n");
1126                         return -EINVAL;
1127                 }
1128         }
1129
1130         rdev->preferred_minor = 0xffff;
1131         rdev->data_offset = le64_to_cpu(sb->data_offset);
1132         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1133
1134         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1135         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1136         if (rdev->sb_size & bmask)
1137                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1138
1139         if (minor_version
1140             && rdev->data_offset < sb_offset + (rdev->sb_size/512))
1141                 return -EINVAL;
1142
1143         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1144                 rdev->desc_nr = -1;
1145         else
1146                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1147
1148         if (!refdev) {
1149                 ret = 1;
1150         } else {
1151                 __u64 ev1, ev2;
1152                 struct mdp_superblock_1 *refsb = 
1153                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1154
1155                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1156                     sb->level != refsb->level ||
1157                     sb->layout != refsb->layout ||
1158                     sb->chunksize != refsb->chunksize) {
1159                         printk(KERN_WARNING "md: %s has strangely different"
1160                                 " superblock to %s\n",
1161                                 bdevname(rdev->bdev,b),
1162                                 bdevname(refdev->bdev,b2));
1163                         return -EINVAL;
1164                 }
1165                 ev1 = le64_to_cpu(sb->events);
1166                 ev2 = le64_to_cpu(refsb->events);
1167
1168                 if (ev1 > ev2)
1169                         ret = 1;
1170                 else
1171                         ret = 0;
1172         }
1173         if (minor_version)
1174                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1175         else
1176                 rdev->size = rdev->sb_offset;
1177         if (rdev->size < le64_to_cpu(sb->data_size)/2)
1178                 return -EINVAL;
1179         rdev->size = le64_to_cpu(sb->data_size)/2;
1180         if (le32_to_cpu(sb->chunksize))
1181                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1182
1183         if (le64_to_cpu(sb->size) > rdev->size*2)
1184                 return -EINVAL;
1185         return ret;
1186 }
1187
1188 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1189 {
1190         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1191         __u64 ev1 = le64_to_cpu(sb->events);
1192
1193         rdev->raid_disk = -1;
1194         clear_bit(Faulty, &rdev->flags);
1195         clear_bit(In_sync, &rdev->flags);
1196         clear_bit(WriteMostly, &rdev->flags);
1197         clear_bit(BarriersNotsupp, &rdev->flags);
1198
1199         if (mddev->raid_disks == 0) {
1200                 mddev->major_version = 1;
1201                 mddev->patch_version = 0;
1202                 mddev->external = 0;
1203                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1204                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1205                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1206                 mddev->level = le32_to_cpu(sb->level);
1207                 mddev->clevel[0] = 0;
1208                 mddev->layout = le32_to_cpu(sb->layout);
1209                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1210                 mddev->size = le64_to_cpu(sb->size)/2;
1211                 mddev->events = ev1;
1212                 mddev->bitmap_offset = 0;
1213                 mddev->default_bitmap_offset = 1024 >> 9;
1214                 
1215                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1216                 memcpy(mddev->uuid, sb->set_uuid, 16);
1217
1218                 mddev->max_disks =  (4096-256)/2;
1219
1220                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1221                     mddev->bitmap_file == NULL )
1222                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1223
1224                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1225                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1226                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1227                         mddev->new_level = le32_to_cpu(sb->new_level);
1228                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1229                         mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1230                 } else {
1231                         mddev->reshape_position = MaxSector;
1232                         mddev->delta_disks = 0;
1233                         mddev->new_level = mddev->level;
1234                         mddev->new_layout = mddev->layout;
1235                         mddev->new_chunk = mddev->chunk_size;
1236                 }
1237
1238         } else if (mddev->pers == NULL) {
1239                 /* Insist of good event counter while assembling */
1240                 ++ev1;
1241                 if (ev1 < mddev->events)
1242                         return -EINVAL;
1243         } else if (mddev->bitmap) {
1244                 /* If adding to array with a bitmap, then we can accept an
1245                  * older device, but not too old.
1246                  */
1247                 if (ev1 < mddev->bitmap->events_cleared)
1248                         return 0;
1249         } else {
1250                 if (ev1 < mddev->events)
1251                         /* just a hot-add of a new device, leave raid_disk at -1 */
1252                         return 0;
1253         }
1254         if (mddev->level != LEVEL_MULTIPATH) {
1255                 int role;
1256                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1257                 switch(role) {
1258                 case 0xffff: /* spare */
1259                         break;
1260                 case 0xfffe: /* faulty */
1261                         set_bit(Faulty, &rdev->flags);
1262                         break;
1263                 default:
1264                         if ((le32_to_cpu(sb->feature_map) &
1265                              MD_FEATURE_RECOVERY_OFFSET))
1266                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1267                         else
1268                                 set_bit(In_sync, &rdev->flags);
1269                         rdev->raid_disk = role;
1270                         break;
1271                 }
1272                 if (sb->devflags & WriteMostly1)
1273                         set_bit(WriteMostly, &rdev->flags);
1274         } else /* MULTIPATH are always insync */
1275                 set_bit(In_sync, &rdev->flags);
1276
1277         return 0;
1278 }
1279
1280 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1281 {
1282         struct mdp_superblock_1 *sb;
1283         struct list_head *tmp;
1284         mdk_rdev_t *rdev2;
1285         int max_dev, i;
1286         /* make rdev->sb match mddev and rdev data. */
1287
1288         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1289
1290         sb->feature_map = 0;
1291         sb->pad0 = 0;
1292         sb->recovery_offset = cpu_to_le64(0);
1293         memset(sb->pad1, 0, sizeof(sb->pad1));
1294         memset(sb->pad2, 0, sizeof(sb->pad2));
1295         memset(sb->pad3, 0, sizeof(sb->pad3));
1296
1297         sb->utime = cpu_to_le64((__u64)mddev->utime);
1298         sb->events = cpu_to_le64(mddev->events);
1299         if (mddev->in_sync)
1300                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1301         else
1302                 sb->resync_offset = cpu_to_le64(0);
1303
1304         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1305
1306         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1307         sb->size = cpu_to_le64(mddev->size<<1);
1308
1309         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1310                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1311                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1312         }
1313
1314         if (rdev->raid_disk >= 0 &&
1315             !test_bit(In_sync, &rdev->flags) &&
1316             rdev->recovery_offset > 0) {
1317                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1318                 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1319         }
1320
1321         if (mddev->reshape_position != MaxSector) {
1322                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1323                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1324                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1325                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1326                 sb->new_level = cpu_to_le32(mddev->new_level);
1327                 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1328         }
1329
1330         max_dev = 0;
1331         rdev_for_each(rdev2, tmp, mddev)
1332                 if (rdev2->desc_nr+1 > max_dev)
1333                         max_dev = rdev2->desc_nr+1;
1334
1335         if (max_dev > le32_to_cpu(sb->max_dev))
1336                 sb->max_dev = cpu_to_le32(max_dev);
1337         for (i=0; i<max_dev;i++)
1338                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1339         
1340         rdev_for_each(rdev2, tmp, mddev) {
1341                 i = rdev2->desc_nr;
1342                 if (test_bit(Faulty, &rdev2->flags))
1343                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1344                 else if (test_bit(In_sync, &rdev2->flags))
1345                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1346                 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1347                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1348                 else
1349                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1350         }
1351
1352         sb->sb_csum = calc_sb_1_csum(sb);
1353 }
1354
1355 static unsigned long long
1356 super_1_rdev_size_change(mdk_rdev_t *rdev, unsigned long long size)
1357 {
1358         struct mdp_superblock_1 *sb;
1359         unsigned long long max_size;
1360         if (size && size < rdev->mddev->size)
1361                 return 0; /* component must fit device */
1362         size *= 2; /* convert to sectors */
1363         if (rdev->sb_offset < rdev->data_offset/2) {
1364                 /* minor versions 1 and 2; superblock before data */
1365                 max_size = (rdev->bdev->bd_inode->i_size >> 9);
1366                 max_size -= rdev->data_offset;
1367                 if (!size || size > max_size)
1368                         size = max_size;
1369         } else if (rdev->mddev->bitmap_offset) {
1370                 /* minor version 0 with bitmap we can't move */
1371                 return 0;
1372         } else {
1373                 /* minor version 0; superblock after data */
1374                 sector_t sb_offset;
1375                 sb_offset = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
1376                 sb_offset &= ~(sector_t)(4*2 - 1);
1377                 max_size = rdev->size*2 + sb_offset - rdev->sb_offset*2;
1378                 if (!size || size > max_size)
1379                         size = max_size;
1380                 rdev->sb_offset = sb_offset/2;
1381         }
1382         sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
1383         sb->data_size = cpu_to_le64(size);
1384         sb->super_offset = rdev->sb_offset*2;
1385         sb->sb_csum = calc_sb_1_csum(sb);
1386         md_super_write(rdev->mddev, rdev, rdev->sb_offset << 1, rdev->sb_size,
1387                        rdev->sb_page);
1388         md_super_wait(rdev->mddev);
1389         return size/2; /* kB for sysfs */
1390 }
1391
1392 static struct super_type super_types[] = {
1393         [0] = {
1394                 .name   = "0.90.0",
1395                 .owner  = THIS_MODULE,
1396                 .load_super         = super_90_load,
1397                 .validate_super     = super_90_validate,
1398                 .sync_super         = super_90_sync,
1399                 .rdev_size_change   = super_90_rdev_size_change,
1400         },
1401         [1] = {
1402                 .name   = "md-1",
1403                 .owner  = THIS_MODULE,
1404                 .load_super         = super_1_load,
1405                 .validate_super     = super_1_validate,
1406                 .sync_super         = super_1_sync,
1407                 .rdev_size_change   = super_1_rdev_size_change,
1408         },
1409 };
1410
1411 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1412 {
1413         struct list_head *tmp, *tmp2;
1414         mdk_rdev_t *rdev, *rdev2;
1415
1416         rdev_for_each(rdev, tmp, mddev1)
1417                 rdev_for_each(rdev2, tmp2, mddev2)
1418                         if (rdev->bdev->bd_contains ==
1419                             rdev2->bdev->bd_contains)
1420                                 return 1;
1421
1422         return 0;
1423 }
1424
1425 static LIST_HEAD(pending_raid_disks);
1426
1427 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1428 {
1429         char b[BDEVNAME_SIZE];
1430         struct kobject *ko;
1431         char *s;
1432         int err;
1433
1434         if (rdev->mddev) {
1435                 MD_BUG();
1436                 return -EINVAL;
1437         }
1438
1439         /* prevent duplicates */
1440         if (find_rdev(mddev, rdev->bdev->bd_dev))
1441                 return -EEXIST;
1442
1443         /* make sure rdev->size exceeds mddev->size */
1444         if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1445                 if (mddev->pers) {
1446                         /* Cannot change size, so fail
1447                          * If mddev->level <= 0, then we don't care
1448                          * about aligning sizes (e.g. linear)
1449                          */
1450                         if (mddev->level > 0)
1451                                 return -ENOSPC;
1452                 } else
1453                         mddev->size = rdev->size;
1454         }
1455
1456         /* Verify rdev->desc_nr is unique.
1457          * If it is -1, assign a free number, else
1458          * check number is not in use
1459          */
1460         if (rdev->desc_nr < 0) {
1461                 int choice = 0;
1462                 if (mddev->pers) choice = mddev->raid_disks;
1463                 while (find_rdev_nr(mddev, choice))
1464                         choice++;
1465                 rdev->desc_nr = choice;
1466         } else {
1467                 if (find_rdev_nr(mddev, rdev->desc_nr))
1468                         return -EBUSY;
1469         }
1470         bdevname(rdev->bdev,b);
1471         while ( (s=strchr(b, '/')) != NULL)
1472                 *s = '!';
1473
1474         rdev->mddev = mddev;
1475         printk(KERN_INFO "md: bind<%s>\n", b);
1476
1477         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1478                 goto fail;
1479
1480         if (rdev->bdev->bd_part)
1481                 ko = &rdev->bdev->bd_part->dev.kobj;
1482         else
1483                 ko = &rdev->bdev->bd_disk->dev.kobj;
1484         if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1485                 kobject_del(&rdev->kobj);
1486                 goto fail;
1487         }
1488         list_add(&rdev->same_set, &mddev->disks);
1489         bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1490         return 0;
1491
1492  fail:
1493         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1494                b, mdname(mddev));
1495         return err;
1496 }
1497
1498 static void md_delayed_delete(struct work_struct *ws)
1499 {
1500         mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1501         kobject_del(&rdev->kobj);
1502         kobject_put(&rdev->kobj);
1503 }
1504
1505 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1506 {
1507         char b[BDEVNAME_SIZE];
1508         if (!rdev->mddev) {
1509                 MD_BUG();
1510                 return;
1511         }
1512         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1513         list_del_init(&rdev->same_set);
1514         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1515         rdev->mddev = NULL;
1516         sysfs_remove_link(&rdev->kobj, "block");
1517
1518         /* We need to delay this, otherwise we can deadlock when
1519          * writing to 'remove' to "dev/state"
1520          */
1521         INIT_WORK(&rdev->del_work, md_delayed_delete);
1522         kobject_get(&rdev->kobj);
1523         schedule_work(&rdev->del_work);
1524 }
1525
1526 /*
1527  * prevent the device from being mounted, repartitioned or
1528  * otherwise reused by a RAID array (or any other kernel
1529  * subsystem), by bd_claiming the device.
1530  */
1531 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1532 {
1533         int err = 0;
1534         struct block_device *bdev;
1535         char b[BDEVNAME_SIZE];
1536
1537         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1538         if (IS_ERR(bdev)) {
1539                 printk(KERN_ERR "md: could not open %s.\n",
1540                         __bdevname(dev, b));
1541                 return PTR_ERR(bdev);
1542         }
1543         err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1544         if (err) {
1545                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1546                         bdevname(bdev, b));
1547                 blkdev_put(bdev);
1548                 return err;
1549         }
1550         if (!shared)
1551                 set_bit(AllReserved, &rdev->flags);
1552         rdev->bdev = bdev;
1553         return err;
1554 }
1555
1556 static void unlock_rdev(mdk_rdev_t *rdev)
1557 {
1558         struct block_device *bdev = rdev->bdev;
1559         rdev->bdev = NULL;
1560         if (!bdev)
1561                 MD_BUG();
1562         bd_release(bdev);
1563         blkdev_put(bdev);
1564 }
1565
1566 void md_autodetect_dev(dev_t dev);
1567
1568 static void export_rdev(mdk_rdev_t * rdev)
1569 {
1570         char b[BDEVNAME_SIZE];
1571         printk(KERN_INFO "md: export_rdev(%s)\n",
1572                 bdevname(rdev->bdev,b));
1573         if (rdev->mddev)
1574                 MD_BUG();
1575         free_disk_sb(rdev);
1576         list_del_init(&rdev->same_set);
1577 #ifndef MODULE
1578         if (test_bit(AutoDetected, &rdev->flags))
1579                 md_autodetect_dev(rdev->bdev->bd_dev);
1580 #endif
1581         unlock_rdev(rdev);
1582         kobject_put(&rdev->kobj);
1583 }
1584
1585 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1586 {
1587         unbind_rdev_from_array(rdev);
1588         export_rdev(rdev);
1589 }
1590
1591 static void export_array(mddev_t *mddev)
1592 {
1593         struct list_head *tmp;
1594         mdk_rdev_t *rdev;
1595
1596         rdev_for_each(rdev, tmp, mddev) {
1597                 if (!rdev->mddev) {
1598                         MD_BUG();
1599                         continue;
1600                 }
1601                 kick_rdev_from_array(rdev);
1602         }
1603         if (!list_empty(&mddev->disks))
1604                 MD_BUG();
1605         mddev->raid_disks = 0;
1606         mddev->major_version = 0;
1607 }
1608
1609 static void print_desc(mdp_disk_t *desc)
1610 {
1611         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1612                 desc->major,desc->minor,desc->raid_disk,desc->state);
1613 }
1614
1615 static void print_sb(mdp_super_t *sb)
1616 {
1617         int i;
1618
1619         printk(KERN_INFO 
1620                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1621                 sb->major_version, sb->minor_version, sb->patch_version,
1622                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1623                 sb->ctime);
1624         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1625                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1626                 sb->md_minor, sb->layout, sb->chunk_size);
1627         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1628                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1629                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1630                 sb->failed_disks, sb->spare_disks,
1631                 sb->sb_csum, (unsigned long)sb->events_lo);
1632
1633         printk(KERN_INFO);
1634         for (i = 0; i < MD_SB_DISKS; i++) {
1635                 mdp_disk_t *desc;
1636
1637                 desc = sb->disks + i;
1638                 if (desc->number || desc->major || desc->minor ||
1639                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1640                         printk("     D %2d: ", i);
1641                         print_desc(desc);
1642                 }
1643         }
1644         printk(KERN_INFO "md:     THIS: ");
1645         print_desc(&sb->this_disk);
1646
1647 }
1648
1649 static void print_rdev(mdk_rdev_t *rdev)
1650 {
1651         char b[BDEVNAME_SIZE];
1652         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1653                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1654                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1655                 rdev->desc_nr);
1656         if (rdev->sb_loaded) {
1657                 printk(KERN_INFO "md: rdev superblock:\n");
1658                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1659         } else
1660                 printk(KERN_INFO "md: no rdev superblock!\n");
1661 }
1662
1663 static void md_print_devices(void)
1664 {
1665         struct list_head *tmp, *tmp2;
1666         mdk_rdev_t *rdev;
1667         mddev_t *mddev;
1668         char b[BDEVNAME_SIZE];
1669
1670         printk("\n");
1671         printk("md:     **********************************\n");
1672         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1673         printk("md:     **********************************\n");
1674         for_each_mddev(mddev, tmp) {
1675
1676                 if (mddev->bitmap)
1677                         bitmap_print_sb(mddev->bitmap);
1678                 else
1679                         printk("%s: ", mdname(mddev));
1680                 rdev_for_each(rdev, tmp2, mddev)
1681                         printk("<%s>", bdevname(rdev->bdev,b));
1682                 printk("\n");
1683
1684                 rdev_for_each(rdev, tmp2, mddev)
1685                         print_rdev(rdev);
1686         }
1687         printk("md:     **********************************\n");
1688         printk("\n");
1689 }
1690
1691
1692 static void sync_sbs(mddev_t * mddev, int nospares)
1693 {
1694         /* Update each superblock (in-memory image), but
1695          * if we are allowed to, skip spares which already
1696          * have the right event counter, or have one earlier
1697          * (which would mean they aren't being marked as dirty
1698          * with the rest of the array)
1699          */
1700         mdk_rdev_t *rdev;
1701         struct list_head *tmp;
1702
1703         rdev_for_each(rdev, tmp, mddev) {
1704                 if (rdev->sb_events == mddev->events ||
1705                     (nospares &&
1706                      rdev->raid_disk < 0 &&
1707                      (rdev->sb_events&1)==0 &&
1708                      rdev->sb_events+1 == mddev->events)) {
1709                         /* Don't update this superblock */
1710                         rdev->sb_loaded = 2;
1711                 } else {
1712                         super_types[mddev->major_version].
1713                                 sync_super(mddev, rdev);
1714                         rdev->sb_loaded = 1;
1715                 }
1716         }
1717 }
1718
1719 static void md_update_sb(mddev_t * mddev, int force_change)
1720 {
1721         struct list_head *tmp;
1722         mdk_rdev_t *rdev;
1723         int sync_req;
1724         int nospares = 0;
1725
1726         if (mddev->external)
1727                 return;
1728 repeat:
1729         spin_lock_irq(&mddev->write_lock);
1730
1731         set_bit(MD_CHANGE_PENDING, &mddev->flags);
1732         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1733                 force_change = 1;
1734         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1735                 /* just a clean<-> dirty transition, possibly leave spares alone,
1736                  * though if events isn't the right even/odd, we will have to do
1737                  * spares after all
1738                  */
1739                 nospares = 1;
1740         if (force_change)
1741                 nospares = 0;
1742         if (mddev->degraded)
1743                 /* If the array is degraded, then skipping spares is both
1744                  * dangerous and fairly pointless.
1745                  * Dangerous because a device that was removed from the array
1746                  * might have a event_count that still looks up-to-date,
1747                  * so it can be re-added without a resync.
1748                  * Pointless because if there are any spares to skip,
1749                  * then a recovery will happen and soon that array won't
1750                  * be degraded any more and the spare can go back to sleep then.
1751                  */
1752                 nospares = 0;
1753
1754         sync_req = mddev->in_sync;
1755         mddev->utime = get_seconds();
1756
1757         /* If this is just a dirty<->clean transition, and the array is clean
1758          * and 'events' is odd, we can roll back to the previous clean state */
1759         if (nospares
1760             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1761             && (mddev->events & 1)
1762             && mddev->events != 1)
1763                 mddev->events--;
1764         else {
1765                 /* otherwise we have to go forward and ... */
1766                 mddev->events ++;
1767                 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1768                         /* .. if the array isn't clean, insist on an odd 'events' */
1769                         if ((mddev->events&1)==0) {
1770                                 mddev->events++;
1771                                 nospares = 0;
1772                         }
1773                 } else {
1774                         /* otherwise insist on an even 'events' (for clean states) */
1775                         if ((mddev->events&1)) {
1776                                 mddev->events++;
1777                                 nospares = 0;
1778                         }
1779                 }
1780         }
1781
1782         if (!mddev->events) {
1783                 /*
1784                  * oops, this 64-bit counter should never wrap.
1785                  * Either we are in around ~1 trillion A.C., assuming
1786                  * 1 reboot per second, or we have a bug:
1787                  */
1788                 MD_BUG();
1789                 mddev->events --;
1790         }
1791
1792         /*
1793          * do not write anything to disk if using
1794          * nonpersistent superblocks
1795          */
1796         if (!mddev->persistent) {
1797                 if (!mddev->external)
1798                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1799
1800                 spin_unlock_irq(&mddev->write_lock);
1801                 wake_up(&mddev->sb_wait);
1802                 return;
1803         }
1804         sync_sbs(mddev, nospares);
1805         spin_unlock_irq(&mddev->write_lock);
1806
1807         dprintk(KERN_INFO 
1808                 "md: updating %s RAID superblock on device (in sync %d)\n",
1809                 mdname(mddev),mddev->in_sync);
1810
1811         bitmap_update_sb(mddev->bitmap);
1812         rdev_for_each(rdev, tmp, mddev) {
1813                 char b[BDEVNAME_SIZE];
1814                 dprintk(KERN_INFO "md: ");
1815                 if (rdev->sb_loaded != 1)
1816                         continue; /* no noise on spare devices */
1817                 if (test_bit(Faulty, &rdev->flags))
1818                         dprintk("(skipping faulty ");
1819
1820                 dprintk("%s ", bdevname(rdev->bdev,b));
1821                 if (!test_bit(Faulty, &rdev->flags)) {
1822                         md_super_write(mddev,rdev,
1823                                        rdev->sb_offset<<1, rdev->sb_size,
1824                                        rdev->sb_page);
1825                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1826                                 bdevname(rdev->bdev,b),
1827                                 (unsigned long long)rdev->sb_offset);
1828                         rdev->sb_events = mddev->events;
1829
1830                 } else
1831                         dprintk(")\n");
1832                 if (mddev->level == LEVEL_MULTIPATH)
1833                         /* only need to write one superblock... */
1834                         break;
1835         }
1836         md_super_wait(mddev);
1837         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1838
1839         spin_lock_irq(&mddev->write_lock);
1840         if (mddev->in_sync != sync_req ||
1841             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1842                 /* have to write it out again */
1843                 spin_unlock_irq(&mddev->write_lock);
1844                 goto repeat;
1845         }
1846         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1847         spin_unlock_irq(&mddev->write_lock);
1848         wake_up(&mddev->sb_wait);
1849
1850 }
1851
1852 /* words written to sysfs files may, or my not, be \n terminated.
1853  * We want to accept with case. For this we use cmd_match.
1854  */
1855 static int cmd_match(const char *cmd, const char *str)
1856 {
1857         /* See if cmd, written into a sysfs file, matches
1858          * str.  They must either be the same, or cmd can
1859          * have a trailing newline
1860          */
1861         while (*cmd && *str && *cmd == *str) {
1862                 cmd++;
1863                 str++;
1864         }
1865         if (*cmd == '\n')
1866                 cmd++;
1867         if (*str || *cmd)
1868                 return 0;
1869         return 1;
1870 }
1871
1872 struct rdev_sysfs_entry {
1873         struct attribute attr;
1874         ssize_t (*show)(mdk_rdev_t *, char *);
1875         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1876 };
1877
1878 static ssize_t
1879 state_show(mdk_rdev_t *rdev, char *page)
1880 {
1881         char *sep = "";
1882         size_t len = 0;
1883
1884         if (test_bit(Faulty, &rdev->flags)) {
1885                 len+= sprintf(page+len, "%sfaulty",sep);
1886                 sep = ",";
1887         }
1888         if (test_bit(In_sync, &rdev->flags)) {
1889                 len += sprintf(page+len, "%sin_sync",sep);
1890                 sep = ",";
1891         }
1892         if (test_bit(WriteMostly, &rdev->flags)) {
1893                 len += sprintf(page+len, "%swrite_mostly",sep);
1894                 sep = ",";
1895         }
1896         if (test_bit(Blocked, &rdev->flags)) {
1897                 len += sprintf(page+len, "%sblocked", sep);
1898                 sep = ",";
1899         }
1900         if (!test_bit(Faulty, &rdev->flags) &&
1901             !test_bit(In_sync, &rdev->flags)) {
1902                 len += sprintf(page+len, "%sspare", sep);
1903                 sep = ",";
1904         }
1905         return len+sprintf(page+len, "\n");
1906 }
1907
1908 static ssize_t
1909 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1910 {
1911         /* can write
1912          *  faulty  - simulates and error
1913          *  remove  - disconnects the device
1914          *  writemostly - sets write_mostly
1915          *  -writemostly - clears write_mostly
1916          *  blocked - sets the Blocked flag
1917          *  -blocked - clears the Blocked flag
1918          */
1919         int err = -EINVAL;
1920         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1921                 md_error(rdev->mddev, rdev);
1922                 err = 0;
1923         } else if (cmd_match(buf, "remove")) {
1924                 if (rdev->raid_disk >= 0)
1925                         err = -EBUSY;
1926                 else {
1927                         mddev_t *mddev = rdev->mddev;
1928                         kick_rdev_from_array(rdev);
1929                         if (mddev->pers)
1930                                 md_update_sb(mddev, 1);
1931                         md_new_event(mddev);
1932                         err = 0;
1933                 }
1934         } else if (cmd_match(buf, "writemostly")) {
1935                 set_bit(WriteMostly, &rdev->flags);
1936                 err = 0;
1937         } else if (cmd_match(buf, "-writemostly")) {
1938                 clear_bit(WriteMostly, &rdev->flags);
1939                 err = 0;
1940         } else if (cmd_match(buf, "blocked")) {
1941                 set_bit(Blocked, &rdev->flags);
1942                 err = 0;
1943         } else if (cmd_match(buf, "-blocked")) {
1944                 clear_bit(Blocked, &rdev->flags);
1945                 wake_up(&rdev->blocked_wait);
1946                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
1947                 md_wakeup_thread(rdev->mddev->thread);
1948
1949                 err = 0;
1950         }
1951         if (!err)
1952                 sysfs_notify(&rdev->kobj, NULL, "state");
1953         return err ? err : len;
1954 }
1955 static struct rdev_sysfs_entry rdev_state =
1956 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1957
1958 static ssize_t
1959 errors_show(mdk_rdev_t *rdev, char *page)
1960 {
1961         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1962 }
1963
1964 static ssize_t
1965 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1966 {
1967         char *e;
1968         unsigned long n = simple_strtoul(buf, &e, 10);
1969         if (*buf && (*e == 0 || *e == '\n')) {
1970                 atomic_set(&rdev->corrected_errors, n);
1971                 return len;
1972         }
1973         return -EINVAL;
1974 }
1975 static struct rdev_sysfs_entry rdev_errors =
1976 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1977
1978 static ssize_t
1979 slot_show(mdk_rdev_t *rdev, char *page)
1980 {
1981         if (rdev->raid_disk < 0)
1982                 return sprintf(page, "none\n");
1983         else
1984                 return sprintf(page, "%d\n", rdev->raid_disk);
1985 }
1986
1987 static ssize_t
1988 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1989 {
1990         char *e;
1991         int err;
1992         char nm[20];
1993         int slot = simple_strtoul(buf, &e, 10);
1994         if (strncmp(buf, "none", 4)==0)
1995                 slot = -1;
1996         else if (e==buf || (*e && *e!= '\n'))
1997                 return -EINVAL;
1998         if (rdev->mddev->pers && slot == -1) {
1999                 /* Setting 'slot' on an active array requires also
2000                  * updating the 'rd%d' link, and communicating
2001                  * with the personality with ->hot_*_disk.
2002                  * For now we only support removing
2003                  * failed/spare devices.  This normally happens automatically,
2004                  * but not when the metadata is externally managed.
2005                  */
2006                 if (rdev->raid_disk == -1)
2007                         return -EEXIST;
2008                 /* personality does all needed checks */
2009                 if (rdev->mddev->pers->hot_add_disk == NULL)
2010                         return -EINVAL;
2011                 err = rdev->mddev->pers->
2012                         hot_remove_disk(rdev->mddev, rdev->raid_disk);
2013                 if (err)
2014                         return err;
2015                 sprintf(nm, "rd%d", rdev->raid_disk);
2016                 sysfs_remove_link(&rdev->mddev->kobj, nm);
2017                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2018                 md_wakeup_thread(rdev->mddev->thread);
2019         } else if (rdev->mddev->pers) {
2020                 mdk_rdev_t *rdev2;
2021                 struct list_head *tmp;
2022                 /* Activating a spare .. or possibly reactivating
2023                  * if we every get bitmaps working here.
2024                  */
2025
2026                 if (rdev->raid_disk != -1)
2027                         return -EBUSY;
2028
2029                 if (rdev->mddev->pers->hot_add_disk == NULL)
2030                         return -EINVAL;
2031
2032                 rdev_for_each(rdev2, tmp, rdev->mddev)
2033                         if (rdev2->raid_disk == slot)
2034                                 return -EEXIST;
2035
2036                 rdev->raid_disk = slot;
2037                 if (test_bit(In_sync, &rdev->flags))
2038                         rdev->saved_raid_disk = slot;
2039                 else
2040                         rdev->saved_raid_disk = -1;
2041                 err = rdev->mddev->pers->
2042                         hot_add_disk(rdev->mddev, rdev);
2043                 if (err) {
2044                         rdev->raid_disk = -1;
2045                         return err;
2046                 } else
2047                         sysfs_notify(&rdev->kobj, NULL, "state");
2048                 sprintf(nm, "rd%d", rdev->raid_disk);
2049                 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
2050                         printk(KERN_WARNING
2051                                "md: cannot register "
2052                                "%s for %s\n",
2053                                nm, mdname(rdev->mddev));
2054
2055                 /* don't wakeup anyone, leave that to userspace. */
2056         } else {
2057                 if (slot >= rdev->mddev->raid_disks)
2058                         return -ENOSPC;
2059                 rdev->raid_disk = slot;
2060                 /* assume it is working */
2061                 clear_bit(Faulty, &rdev->flags);
2062                 clear_bit(WriteMostly, &rdev->flags);
2063                 set_bit(In_sync, &rdev->flags);
2064                 sysfs_notify(&rdev->kobj, NULL, "state");
2065         }
2066         return len;
2067 }
2068
2069
2070 static struct rdev_sysfs_entry rdev_slot =
2071 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2072
2073 static ssize_t
2074 offset_show(mdk_rdev_t *rdev, char *page)
2075 {
2076         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2077 }
2078
2079 static ssize_t
2080 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2081 {
2082         char *e;
2083         unsigned long long offset = simple_strtoull(buf, &e, 10);
2084         if (e==buf || (*e && *e != '\n'))
2085                 return -EINVAL;
2086         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2087                 return -EBUSY;
2088         if (rdev->size && rdev->mddev->external)
2089                 /* Must set offset before size, so overlap checks
2090                  * can be sane */
2091                 return -EBUSY;
2092         rdev->data_offset = offset;
2093         return len;
2094 }
2095
2096 static struct rdev_sysfs_entry rdev_offset =
2097 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2098
2099 static ssize_t
2100 rdev_size_show(mdk_rdev_t *rdev, char *page)
2101 {
2102         return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
2103 }
2104
2105 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2106 {
2107         /* check if two start/length pairs overlap */
2108         if (s1+l1 <= s2)
2109                 return 0;
2110         if (s2+l2 <= s1)
2111                 return 0;
2112         return 1;
2113 }
2114
2115 static ssize_t
2116 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2117 {
2118         char *e;
2119         unsigned long long size = simple_strtoull(buf, &e, 10);
2120         unsigned long long oldsize = rdev->size;
2121         mddev_t *my_mddev = rdev->mddev;
2122
2123         if (e==buf || (*e && *e != '\n'))
2124                 return -EINVAL;
2125         if (my_mddev->pers && rdev->raid_disk >= 0) {
2126                 if (rdev->mddev->persistent) {
2127                         size = super_types[rdev->mddev->major_version].
2128                                 rdev_size_change(rdev, size);
2129                         if (!size)
2130                                 return -EBUSY;
2131                 } else if (!size) {
2132                         size = (rdev->bdev->bd_inode->i_size >> 10);
2133                         size -= rdev->data_offset/2;
2134                 }
2135                 if (size < rdev->mddev->size)
2136                         return -EINVAL; /* component must fit device */
2137         }
2138
2139         rdev->size = size;
2140         if (size > oldsize && rdev->mddev->external) {
2141                 /* need to check that all other rdevs with the same ->bdev
2142                  * do not overlap.  We need to unlock the mddev to avoid
2143                  * a deadlock.  We have already changed rdev->size, and if
2144                  * we have to change it back, we will have the lock again.
2145                  */
2146                 mddev_t *mddev;
2147                 int overlap = 0;
2148                 struct list_head *tmp, *tmp2;
2149
2150                 mddev_unlock(my_mddev);
2151                 for_each_mddev(mddev, tmp) {
2152                         mdk_rdev_t *rdev2;
2153
2154                         mddev_lock(mddev);
2155                         rdev_for_each(rdev2, tmp2, mddev)
2156                                 if (test_bit(AllReserved, &rdev2->flags) ||
2157                                     (rdev->bdev == rdev2->bdev &&
2158                                      rdev != rdev2 &&
2159                                      overlaps(rdev->data_offset, rdev->size,
2160                                             rdev2->data_offset, rdev2->size))) {
2161                                         overlap = 1;
2162                                         break;
2163                                 }
2164                         mddev_unlock(mddev);
2165                         if (overlap) {
2166                                 mddev_put(mddev);
2167                                 break;
2168                         }
2169                 }
2170                 mddev_lock(my_mddev);
2171                 if (overlap) {
2172                         /* Someone else could have slipped in a size
2173                          * change here, but doing so is just silly.
2174                          * We put oldsize back because we *know* it is
2175                          * safe, and trust userspace not to race with
2176                          * itself
2177                          */
2178                         rdev->size = oldsize;
2179                         return -EBUSY;
2180                 }
2181         }
2182         if (size < my_mddev->size || my_mddev->size == 0)
2183                 my_mddev->size = size;
2184         return len;
2185 }
2186
2187 static struct rdev_sysfs_entry rdev_size =
2188 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2189
2190 static struct attribute *rdev_default_attrs[] = {
2191         &rdev_state.attr,
2192         &rdev_errors.attr,
2193         &rdev_slot.attr,
2194         &rdev_offset.attr,
2195         &rdev_size.attr,
2196         NULL,
2197 };
2198 static ssize_t
2199 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2200 {
2201         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2202         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2203         mddev_t *mddev = rdev->mddev;
2204         ssize_t rv;
2205
2206         if (!entry->show)
2207                 return -EIO;
2208
2209         rv = mddev ? mddev_lock(mddev) : -EBUSY;
2210         if (!rv) {
2211                 if (rdev->mddev == NULL)
2212                         rv = -EBUSY;
2213                 else
2214                         rv = entry->show(rdev, page);
2215                 mddev_unlock(mddev);
2216         }
2217         return rv;
2218 }
2219
2220 static ssize_t
2221 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2222               const char *page, size_t length)
2223 {
2224         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2225         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2226         ssize_t rv;
2227         mddev_t *mddev = rdev->mddev;
2228
2229         if (!entry->store)
2230                 return -EIO;
2231         if (!capable(CAP_SYS_ADMIN))
2232                 return -EACCES;
2233         rv = mddev ? mddev_lock(mddev): -EBUSY;
2234         if (!rv) {
2235                 if (rdev->mddev == NULL)
2236                         rv = -EBUSY;
2237                 else
2238                         rv = entry->store(rdev, page, length);
2239                 mddev_unlock(mddev);
2240         }
2241         return rv;
2242 }
2243
2244 static void rdev_free(struct kobject *ko)
2245 {
2246         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2247         kfree(rdev);
2248 }
2249 static struct sysfs_ops rdev_sysfs_ops = {
2250         .show           = rdev_attr_show,
2251         .store          = rdev_attr_store,
2252 };
2253 static struct kobj_type rdev_ktype = {
2254         .release        = rdev_free,
2255         .sysfs_ops      = &rdev_sysfs_ops,
2256         .default_attrs  = rdev_default_attrs,
2257 };
2258
2259 /*
2260  * Import a device. If 'super_format' >= 0, then sanity check the superblock
2261  *
2262  * mark the device faulty if:
2263  *
2264  *   - the device is nonexistent (zero size)
2265  *   - the device has no valid superblock
2266  *
2267  * a faulty rdev _never_ has rdev->sb set.
2268  */
2269 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2270 {
2271         char b[BDEVNAME_SIZE];
2272         int err;
2273         mdk_rdev_t *rdev;
2274         sector_t size;
2275
2276         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2277         if (!rdev) {
2278                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2279                 return ERR_PTR(-ENOMEM);
2280         }
2281
2282         if ((err = alloc_disk_sb(rdev)))
2283                 goto abort_free;
2284
2285         err = lock_rdev(rdev, newdev, super_format == -2);
2286         if (err)
2287                 goto abort_free;
2288
2289         kobject_init(&rdev->kobj, &rdev_ktype);
2290
2291         rdev->desc_nr = -1;
2292         rdev->saved_raid_disk = -1;
2293         rdev->raid_disk = -1;
2294         rdev->flags = 0;
2295         rdev->data_offset = 0;
2296         rdev->sb_events = 0;
2297         atomic_set(&rdev->nr_pending, 0);
2298         atomic_set(&rdev->read_errors, 0);
2299         atomic_set(&rdev->corrected_errors, 0);
2300
2301         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2302         if (!size) {
2303                 printk(KERN_WARNING 
2304                         "md: %s has zero or unknown size, marking faulty!\n",
2305                         bdevname(rdev->bdev,b));
2306                 err = -EINVAL;
2307                 goto abort_free;
2308         }
2309
2310         if (super_format >= 0) {
2311                 err = super_types[super_format].
2312                         load_super(rdev, NULL, super_minor);
2313                 if (err == -EINVAL) {
2314                         printk(KERN_WARNING
2315                                 "md: %s does not have a valid v%d.%d "
2316                                "superblock, not importing!\n",
2317                                 bdevname(rdev->bdev,b),
2318                                super_format, super_minor);
2319                         goto abort_free;
2320                 }
2321                 if (err < 0) {
2322                         printk(KERN_WARNING 
2323                                 "md: could not read %s's sb, not importing!\n",
2324                                 bdevname(rdev->bdev,b));
2325                         goto abort_free;
2326                 }
2327         }
2328
2329         INIT_LIST_HEAD(&rdev->same_set);
2330         init_waitqueue_head(&rdev->blocked_wait);
2331
2332         return rdev;
2333
2334 abort_free:
2335         if (rdev->sb_page) {
2336                 if (rdev->bdev)
2337                         unlock_rdev(rdev);
2338                 free_disk_sb(rdev);
2339         }
2340         kfree(rdev);
2341         return ERR_PTR(err);
2342 }
2343
2344 /*
2345  * Check a full RAID array for plausibility
2346  */
2347
2348
2349 static void analyze_sbs(mddev_t * mddev)
2350 {
2351         int i;
2352         struct list_head *tmp;
2353         mdk_rdev_t *rdev, *freshest;
2354         char b[BDEVNAME_SIZE];
2355
2356         freshest = NULL;
2357         rdev_for_each(rdev, tmp, mddev)
2358                 switch (super_types[mddev->major_version].
2359                         load_super(rdev, freshest, mddev->minor_version)) {
2360                 case 1:
2361                         freshest = rdev;
2362                         break;
2363                 case 0:
2364                         break;
2365                 default:
2366                         printk( KERN_ERR \
2367                                 "md: fatal superblock inconsistency in %s"
2368                                 " -- removing from array\n", 
2369                                 bdevname(rdev->bdev,b));
2370                         kick_rdev_from_array(rdev);
2371                 }
2372
2373
2374         super_types[mddev->major_version].
2375                 validate_super(mddev, freshest);
2376
2377         i = 0;
2378         rdev_for_each(rdev, tmp, mddev) {
2379                 if (rdev != freshest)
2380                         if (super_types[mddev->major_version].
2381                             validate_super(mddev, rdev)) {
2382                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2383                                         " from array!\n",
2384                                         bdevname(rdev->bdev,b));
2385                                 kick_rdev_from_array(rdev);
2386                                 continue;
2387                         }
2388                 if (mddev->level == LEVEL_MULTIPATH) {
2389                         rdev->desc_nr = i++;
2390                         rdev->raid_disk = rdev->desc_nr;
2391                         set_bit(In_sync, &rdev->flags);
2392                 } else if (rdev->raid_disk >= mddev->raid_disks) {
2393                         rdev->raid_disk = -1;
2394                         clear_bit(In_sync, &rdev->flags);
2395                 }
2396         }
2397
2398
2399
2400         if (mddev->recovery_cp != MaxSector &&
2401             mddev->level >= 1)
2402                 printk(KERN_ERR "md: %s: raid array is not clean"
2403                        " -- starting background reconstruction\n",
2404                        mdname(mddev));
2405
2406 }
2407
2408 static ssize_t
2409 safe_delay_show(mddev_t *mddev, char *page)
2410 {
2411         int msec = (mddev->safemode_delay*1000)/HZ;
2412         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2413 }
2414 static ssize_t
2415 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2416 {
2417         int scale=1;
2418         int dot=0;
2419         int i;
2420         unsigned long msec;
2421         char buf[30];
2422         char *e;
2423         /* remove a period, and count digits after it */
2424         if (len >= sizeof(buf))
2425                 return -EINVAL;
2426         strlcpy(buf, cbuf, len);
2427         buf[len] = 0;
2428         for (i=0; i<len; i++) {
2429                 if (dot) {
2430                         if (isdigit(buf[i])) {
2431                                 buf[i-1] = buf[i];
2432                                 scale *= 10;
2433                         }
2434                         buf[i] = 0;
2435                 } else if (buf[i] == '.') {
2436                         dot=1;
2437                         buf[i] = 0;
2438                 }
2439         }
2440         msec = simple_strtoul(buf, &e, 10);
2441         if (e == buf || (*e && *e != '\n'))
2442                 return -EINVAL;
2443         msec = (msec * 1000) / scale;
2444         if (msec == 0)
2445                 mddev->safemode_delay = 0;
2446         else {
2447                 mddev->safemode_delay = (msec*HZ)/1000;
2448                 if (mddev->safemode_delay == 0)
2449                         mddev->safemode_delay = 1;
2450         }
2451         return len;
2452 }
2453 static struct md_sysfs_entry md_safe_delay =
2454 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2455
2456 static ssize_t
2457 level_show(mddev_t *mddev, char *page)
2458 {
2459         struct mdk_personality *p = mddev->pers;
2460         if (p)
2461                 return sprintf(page, "%s\n", p->name);
2462         else if (mddev->clevel[0])
2463                 return sprintf(page, "%s\n", mddev->clevel);
2464         else if (mddev->level != LEVEL_NONE)
2465                 return sprintf(page, "%d\n", mddev->level);
2466         else
2467                 return 0;
2468 }
2469
2470 static ssize_t
2471 level_store(mddev_t *mddev, const char *buf, size_t len)
2472 {
2473         ssize_t rv = len;
2474         if (mddev->pers)
2475                 return -EBUSY;
2476         if (len == 0)
2477                 return 0;
2478         if (len >= sizeof(mddev->clevel))
2479                 return -ENOSPC;
2480         strncpy(mddev->clevel, buf, len);
2481         if (mddev->clevel[len-1] == '\n')
2482                 len--;
2483         mddev->clevel[len] = 0;
2484         mddev->level = LEVEL_NONE;
2485         return rv;
2486 }
2487
2488 static struct md_sysfs_entry md_level =
2489 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2490
2491
2492 static ssize_t
2493 layout_show(mddev_t *mddev, char *page)
2494 {
2495         /* just a number, not meaningful for all levels */
2496         if (mddev->reshape_position != MaxSector &&
2497             mddev->layout != mddev->new_layout)
2498                 return sprintf(page, "%d (%d)\n",
2499                                mddev->new_layout, mddev->layout);
2500         return sprintf(page, "%d\n", mddev->layout);
2501 }
2502
2503 static ssize_t
2504 layout_store(mddev_t *mddev, const char *buf, size_t len)
2505 {
2506         char *e;
2507         unsigned long n = simple_strtoul(buf, &e, 10);
2508
2509         if (!*buf || (*e && *e != '\n'))
2510                 return -EINVAL;
2511
2512         if (mddev->pers)
2513                 return -EBUSY;
2514         if (mddev->reshape_position != MaxSector)
2515                 mddev->new_layout = n;
2516         else
2517                 mddev->layout = n;
2518         return len;
2519 }
2520 static struct md_sysfs_entry md_layout =
2521 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2522
2523
2524 static ssize_t
2525 raid_disks_show(mddev_t *mddev, char *page)
2526 {
2527         if (mddev->raid_disks == 0)
2528                 return 0;
2529         if (mddev->reshape_position != MaxSector &&
2530             mddev->delta_disks != 0)
2531                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2532                                mddev->raid_disks - mddev->delta_disks);
2533         return sprintf(page, "%d\n", mddev->raid_disks);
2534 }
2535
2536 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2537
2538 static ssize_t
2539 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2540 {
2541         char *e;
2542         int rv = 0;
2543         unsigned long n = simple_strtoul(buf, &e, 10);
2544
2545         if (!*buf || (*e && *e != '\n'))
2546                 return -EINVAL;
2547
2548         if (mddev->pers)
2549                 rv = update_raid_disks(mddev, n);
2550         else if (mddev->reshape_position != MaxSector) {
2551                 int olddisks = mddev->raid_disks - mddev->delta_disks;
2552                 mddev->delta_disks = n - olddisks;
2553                 mddev->raid_disks = n;
2554         } else
2555                 mddev->raid_disks = n;
2556         return rv ? rv : len;
2557 }
2558 static struct md_sysfs_entry md_raid_disks =
2559 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2560
2561 static ssize_t
2562 chunk_size_show(mddev_t *mddev, char *page)
2563 {
2564         if (mddev->reshape_position != MaxSector &&
2565             mddev->chunk_size != mddev->new_chunk)
2566                 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2567                                mddev->chunk_size);
2568         return sprintf(page, "%d\n", mddev->chunk_size);
2569 }
2570
2571 static ssize_t
2572 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2573 {
2574         /* can only set chunk_size if array is not yet active */
2575         char *e;
2576         unsigned long n = simple_strtoul(buf, &e, 10);
2577
2578         if (!*buf || (*e && *e != '\n'))
2579                 return -EINVAL;
2580
2581         if (mddev->pers)
2582                 return -EBUSY;
2583         else if (mddev->reshape_position != MaxSector)
2584                 mddev->new_chunk = n;
2585         else
2586                 mddev->chunk_size = n;
2587         return len;
2588 }
2589 static struct md_sysfs_entry md_chunk_size =
2590 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2591
2592 static ssize_t
2593 resync_start_show(mddev_t *mddev, char *page)
2594 {
2595         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2596 }
2597
2598 static ssize_t
2599 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2600 {
2601         char *e;
2602         unsigned long long n = simple_strtoull(buf, &e, 10);
2603
2604         if (mddev->pers)
2605                 return -EBUSY;
2606         if (!*buf || (*e && *e != '\n'))
2607                 return -EINVAL;
2608
2609         mddev->recovery_cp = n;
2610         return len;
2611 }
2612 static struct md_sysfs_entry md_resync_start =
2613 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2614
2615 /*
2616  * The array state can be:
2617  *
2618  * clear
2619  *     No devices, no size, no level
2620  *     Equivalent to STOP_ARRAY ioctl
2621  * inactive
2622  *     May have some settings, but array is not active
2623  *        all IO results in error
2624  *     When written, doesn't tear down array, but just stops it
2625  * suspended (not supported yet)
2626  *     All IO requests will block. The array can be reconfigured.
2627  *     Writing this, if accepted, will block until array is quiessent
2628  * readonly
2629  *     no resync can happen.  no superblocks get written.
2630  *     write requests fail
2631  * read-auto
2632  *     like readonly, but behaves like 'clean' on a write request.
2633  *
2634  * clean - no pending writes, but otherwise active.
2635  *     When written to inactive array, starts without resync
2636  *     If a write request arrives then
2637  *       if metadata is known, mark 'dirty' and switch to 'active'.
2638  *       if not known, block and switch to write-pending
2639  *     If written to an active array that has pending writes, then fails.
2640  * active
2641  *     fully active: IO and resync can be happening.
2642  *     When written to inactive array, starts with resync
2643  *
2644  * write-pending
2645  *     clean, but writes are blocked waiting for 'active' to be written.
2646  *
2647  * active-idle
2648  *     like active, but no writes have been seen for a while (100msec).
2649  *
2650  */
2651 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2652                    write_pending, active_idle, bad_word};
2653 static char *array_states[] = {
2654         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2655         "write-pending", "active-idle", NULL };
2656
2657 static int match_word(const char *word, char **list)
2658 {
2659         int n;
2660         for (n=0; list[n]; n++)
2661                 if (cmd_match(word, list[n]))
2662                         break;
2663         return n;
2664 }
2665
2666 static ssize_t
2667 array_state_show(mddev_t *mddev, char *page)
2668 {
2669         enum array_state st = inactive;
2670
2671         if (mddev->pers)
2672                 switch(mddev->ro) {
2673                 case 1:
2674                         st = readonly;
2675                         break;
2676                 case 2:
2677                         st = read_auto;
2678                         break;
2679                 case 0:
2680                         if (mddev->in_sync)
2681                                 st = clean;
2682                         else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
2683                                 st = write_pending;
2684                         else if (mddev->safemode)
2685                                 st = active_idle;
2686                         else
2687                                 st = active;
2688                 }
2689         else {
2690                 if (list_empty(&mddev->disks) &&
2691                     mddev->raid_disks == 0 &&
2692                     mddev->size == 0)
2693                         st = clear;
2694                 else
2695                         st = inactive;
2696         }
2697         return sprintf(page, "%s\n", array_states[st]);
2698 }
2699
2700 static int do_md_stop(mddev_t * mddev, int ro);
2701 static int do_md_run(mddev_t * mddev);
2702 static int restart_array(mddev_t *mddev);
2703
2704 static ssize_t
2705 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2706 {
2707         int err = -EINVAL;
2708         enum array_state st = match_word(buf, array_states);
2709         switch(st) {
2710         case bad_word:
2711                 break;
2712         case clear:
2713                 /* stopping an active array */
2714                 if (atomic_read(&mddev->active) > 1)
2715                         return -EBUSY;
2716                 err = do_md_stop(mddev, 0);
2717                 break;
2718         case inactive:
2719                 /* stopping an active array */
2720                 if (mddev->pers) {
2721                         if (atomic_read(&mddev->active) > 1)
2722                                 return -EBUSY;
2723                         err = do_md_stop(mddev, 2);
2724                 } else
2725                         err = 0; /* already inactive */
2726                 break;
2727         case suspended:
2728                 break; /* not supported yet */
2729         case readonly:
2730                 if (mddev->pers)
2731                         err = do_md_stop(mddev, 1);
2732                 else {
2733                         mddev->ro = 1;
2734                         set_disk_ro(mddev->gendisk, 1);
2735                         err = do_md_run(mddev);
2736                 }
2737                 break;
2738         case read_auto:
2739                 if (mddev->pers) {
2740                         if (mddev->ro != 1)
2741                                 err = do_md_stop(mddev, 1);
2742                         else
2743                                 err = restart_array(mddev);
2744                         if (err == 0) {
2745                                 mddev->ro = 2;
2746                                 set_disk_ro(mddev->gendisk, 0);
2747                         }
2748                 } else {
2749                         mddev->ro = 2;
2750                         err = do_md_run(mddev);
2751                 }
2752                 break;
2753         case clean:
2754                 if (mddev->pers) {
2755                         restart_array(mddev);
2756                         spin_lock_irq(&mddev->write_lock);
2757                         if (atomic_read(&mddev->writes_pending) == 0) {
2758                                 if (mddev->in_sync == 0) {
2759                                         mddev->in_sync = 1;
2760                                         if (mddev->safemode == 1)
2761                                                 mddev->safemode = 0;
2762                                         if (mddev->persistent)
2763                                                 set_bit(MD_CHANGE_CLEAN,
2764                                                         &mddev->flags);
2765                                 }
2766                                 err = 0;
2767                         } else
2768                                 err = -EBUSY;
2769                         spin_unlock_irq(&mddev->write_lock);
2770                 } else {
2771                         mddev->ro = 0;
2772                         mddev->recovery_cp = MaxSector;
2773                         err = do_md_run(mddev);
2774                 }
2775                 break;
2776         case active:
2777                 if (mddev->pers) {
2778                         restart_array(mddev);
2779                         if (mddev->external)
2780                                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2781                         wake_up(&mddev->sb_wait);
2782                         err = 0;
2783                 } else {
2784                         mddev->ro = 0;
2785                         set_disk_ro(mddev->gendisk, 0);
2786                         err = do_md_run(mddev);
2787                 }
2788                 break;
2789         case write_pending:
2790         case active_idle:
2791                 /* these cannot be set */
2792                 break;
2793         }
2794         if (err)
2795                 return err;
2796         else {
2797                 sysfs_notify(&mddev->kobj, NULL, "array_state");
2798                 return len;
2799         }
2800 }
2801 static struct md_sysfs_entry md_array_state =
2802 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2803
2804 static ssize_t
2805 null_show(mddev_t *mddev, char *page)
2806 {
2807         return -EINVAL;
2808 }
2809
2810 static ssize_t
2811 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2812 {
2813         /* buf must be %d:%d\n? giving major and minor numbers */
2814         /* The new device is added to the array.
2815          * If the array has a persistent superblock, we read the
2816          * superblock to initialise info and check validity.
2817          * Otherwise, only checking done is that in bind_rdev_to_array,
2818          * which mainly checks size.
2819          */
2820         char *e;
2821         int major = simple_strtoul(buf, &e, 10);
2822         int minor;
2823         dev_t dev;
2824         mdk_rdev_t *rdev;
2825         int err;
2826
2827         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2828                 return -EINVAL;
2829         minor = simple_strtoul(e+1, &e, 10);
2830         if (*e && *e != '\n')
2831                 return -EINVAL;
2832         dev = MKDEV(major, minor);
2833         if (major != MAJOR(dev) ||
2834             minor != MINOR(dev))
2835                 return -EOVERFLOW;
2836
2837
2838         if (mddev->persistent) {
2839                 rdev = md_import_device(dev, mddev->major_version,
2840                                         mddev->minor_version);
2841                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2842                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2843                                                        mdk_rdev_t, same_set);
2844                         err = super_types[mddev->major_version]
2845                                 .load_super(rdev, rdev0, mddev->minor_version);
2846                         if (err < 0)
2847                                 goto out;
2848                 }
2849         } else if (mddev->external)
2850                 rdev = md_import_device(dev, -2, -1);
2851         else
2852                 rdev = md_import_device(dev, -1, -1);
2853
2854         if (IS_ERR(rdev))
2855                 return PTR_ERR(rdev);
2856         err = bind_rdev_to_array(rdev, mddev);
2857  out:
2858         if (err)
2859                 export_rdev(rdev);
2860         return err ? err : len;
2861 }
2862
2863 static struct md_sysfs_entry md_new_device =
2864 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2865
2866 static ssize_t
2867 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2868 {
2869         char *end;
2870         unsigned long chunk, end_chunk;
2871
2872         if (!mddev->bitmap)
2873                 goto out;
2874         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2875         while (*buf) {
2876                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2877                 if (buf == end) break;
2878                 if (*end == '-') { /* range */
2879                         buf = end + 1;
2880                         end_chunk = simple_strtoul(buf, &end, 0);
2881                         if (buf == end) break;
2882                 }
2883                 if (*end && !isspace(*end)) break;
2884                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2885                 buf = end;
2886                 while (isspace(*buf)) buf++;
2887         }
2888         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2889 out:
2890         return len;
2891 }
2892
2893 static struct md_sysfs_entry md_bitmap =
2894 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2895
2896 static ssize_t
2897 size_show(mddev_t *mddev, char *page)
2898 {
2899         return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2900 }
2901
2902 static int update_size(mddev_t *mddev, unsigned long size);
2903
2904 static ssize_t
2905 size_store(mddev_t *mddev, const char *buf, size_t len)
2906 {
2907         /* If array is inactive, we can reduce the component size, but
2908          * not increase it (except from 0).
2909          * If array is active, we can try an on-line resize
2910          */
2911         char *e;
2912         int err = 0;
2913         unsigned long long size = simple_strtoull(buf, &e, 10);
2914         if (!*buf || *buf == '\n' ||
2915             (*e && *e != '\n'))
2916                 return -EINVAL;
2917
2918         if (mddev->pers) {
2919                 err = update_size(mddev, size);
2920                 md_update_sb(mddev, 1);
2921         } else {
2922                 if (mddev->size == 0 ||
2923                     mddev->size > size)
2924                         mddev->size = size;
2925                 else
2926                         err = -ENOSPC;
2927         }
2928         return err ? err : len;
2929 }
2930
2931 static struct md_sysfs_entry md_size =
2932 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2933
2934
2935 /* Metdata version.
2936  * This is one of
2937  *   'none' for arrays with no metadata (good luck...)
2938  *   'external' for arrays with externally managed metadata,
2939  * or N.M for internally known formats
2940  */
2941 static ssize_t
2942 metadata_show(mddev_t *mddev, char *page)
2943 {
2944         if (mddev->persistent)
2945                 return sprintf(page, "%d.%d\n",
2946                                mddev->major_version, mddev->minor_version);
2947         else if (mddev->external)
2948                 return sprintf(page, "external:%s\n", mddev->metadata_type);
2949         else
2950                 return sprintf(page, "none\n");
2951 }
2952
2953 static ssize_t
2954 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2955 {
2956         int major, minor;
2957         char *e;
2958         if (!list_empty(&mddev->disks))
2959                 return -EBUSY;
2960
2961         if (cmd_match(buf, "none")) {
2962                 mddev->persistent = 0;
2963                 mddev->external = 0;
2964                 mddev->major_version = 0;
2965                 mddev->minor_version = 90;
2966                 return len;
2967         }
2968         if (strncmp(buf, "external:", 9) == 0) {
2969                 size_t namelen = len-9;
2970                 if (namelen >= sizeof(mddev->metadata_type))
2971                         namelen = sizeof(mddev->metadata_type)-1;
2972                 strncpy(mddev->metadata_type, buf+9, namelen);
2973                 mddev->metadata_type[namelen] = 0;
2974                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
2975                         mddev->metadata_type[--namelen] = 0;
2976                 mddev->persistent = 0;
2977                 mddev->external = 1;
2978                 mddev->major_version = 0;
2979                 mddev->minor_version = 90;
2980                 return len;
2981         }
2982         major = simple_strtoul(buf, &e, 10);
2983         if (e==buf || *e != '.')
2984                 return -EINVAL;
2985         buf = e+1;
2986         minor = simple_strtoul(buf, &e, 10);
2987         if (e==buf || (*e && *e != '\n') )
2988                 return -EINVAL;
2989         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2990                 return -ENOENT;
2991         mddev->major_version = major;
2992         mddev->minor_version = minor;
2993         mddev->persistent = 1;
2994         mddev->external = 0;
2995         return len;
2996 }
2997
2998 static struct md_sysfs_entry md_metadata =
2999 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
3000
3001 static ssize_t
3002 action_show(mddev_t *mddev, char *page)
3003 {
3004         char *type = "idle";
3005         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3006             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
3007                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3008                         type = "reshape";
3009                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3010                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
3011                                 type = "resync";
3012                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
3013                                 type = "check";
3014                         else
3015                                 type = "repair";
3016                 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
3017                         type = "recover";
3018         }
3019         return sprintf(page, "%s\n", type);
3020 }
3021
3022 static ssize_t
3023 action_store(mddev_t *mddev, const char *page, size_t len)
3024 {
3025         if (!mddev->pers || !mddev->pers->sync_request)
3026                 return -EINVAL;
3027
3028         if (cmd_match(page, "idle")) {
3029                 if (mddev->sync_thread) {
3030                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3031                         md_unregister_thread(mddev->sync_thread);
3032                         mddev->sync_thread = NULL;
3033                         mddev->recovery = 0;
3034                 }
3035         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3036                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
3037                 return -EBUSY;
3038         else if (cmd_match(page, "resync"))
3039                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3040         else if (cmd_match(page, "recover")) {
3041                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3042                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3043         } else if (cmd_match(page, "reshape")) {
3044                 int err;
3045                 if (mddev->pers->start_reshape == NULL)
3046                         return -EINVAL;
3047                 err = mddev->pers->start_reshape(mddev);
3048                 if (err)
3049                         return err;
3050                 sysfs_notify(&mddev->kobj, NULL, "degraded");
3051         } else {
3052                 if (cmd_match(page, "check"))
3053                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3054                 else if (!cmd_match(page, "repair"))
3055                         return -EINVAL;
3056                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3057                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3058         }
3059         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3060         md_wakeup_thread(mddev->thread);
3061         sysfs_notify(&mddev->kobj, NULL, "sync_action");
3062         return len;
3063 }
3064
3065 static ssize_t
3066 mismatch_cnt_show(mddev_t *mddev, char *page)
3067 {
3068         return sprintf(page, "%llu\n",
3069                        (unsigned long long) mddev->resync_mismatches);
3070 }
3071
3072 static struct md_sysfs_entry md_scan_mode =
3073 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
3074
3075
3076 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
3077
3078 static ssize_t
3079 sync_min_show(mddev_t *mddev, char *page)
3080 {
3081         return sprintf(page, "%d (%s)\n", speed_min(mddev),
3082                        mddev->sync_speed_min ? "local": "system");
3083 }
3084
3085 static ssize_t
3086 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
3087 {
3088         int min;
3089         char *e;
3090         if (strncmp(buf, "system", 6)==0) {
3091                 mddev->sync_speed_min = 0;
3092                 return len;
3093         }
3094         min = simple_strtoul(buf, &e, 10);
3095         if (buf == e || (*e && *e != '\n') || min <= 0)
3096                 return -EINVAL;
3097         mddev->sync_speed_min = min;
3098         return len;
3099 }
3100
3101 static struct md_sysfs_entry md_sync_min =
3102 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
3103
3104 static ssize_t
3105 sync_max_show(mddev_t *mddev, char *page)
3106 {
3107         return sprintf(page, "%d (%s)\n", speed_max(mddev),
3108                        mddev->sync_speed_max ? "local": "system");
3109 }
3110
3111 static ssize_t
3112 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
3113 {
3114         int max;
3115         char *e;
3116         if (strncmp(buf, "system", 6)==0) {
3117                 mddev->sync_speed_max = 0;
3118                 return len;
3119         }
3120         max = simple_strtoul(buf, &e, 10);
3121         if (buf == e || (*e && *e != '\n') || max <= 0)
3122                 return -EINVAL;
3123         mddev->sync_speed_max = max;
3124         return len;
3125 }
3126
3127 static struct md_sysfs_entry md_sync_max =
3128 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
3129
3130 static ssize_t
3131 degraded_show(mddev_t *mddev, char *page)
3132 {
3133         return sprintf(page, "%d\n", mddev->degraded);
3134 }
3135 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
3136
3137 static ssize_t
3138 sync_force_parallel_show(mddev_t *mddev, char *page)
3139 {
3140         return sprintf(page, "%d\n", mddev->parallel_resync);
3141 }
3142
3143 static ssize_t
3144 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len)
3145 {
3146         long n;
3147
3148         if (strict_strtol(buf, 10, &n))
3149                 return -EINVAL;
3150
3151         if (n != 0 && n != 1)
3152                 return -EINVAL;
3153
3154         mddev->parallel_resync = n;
3155
3156         if (mddev->sync_thread)
3157                 wake_up(&resync_wait);
3158
3159         return len;
3160 }
3161
3162 /* force parallel resync, even with shared block devices */
3163 static struct md_sysfs_entry md_sync_force_parallel =
3164 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
3165        sync_force_parallel_show, sync_force_parallel_store);
3166
3167 static ssize_t
3168 sync_speed_show(mddev_t *mddev, char *page)
3169 {
3170         unsigned long resync, dt, db;
3171         resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
3172         dt = ((jiffies - mddev->resync_mark) / HZ);
3173         if (!dt) dt++;
3174         db = resync - (mddev->resync_mark_cnt);
3175         return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
3176 }
3177
3178 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
3179
3180 static ssize_t
3181 sync_completed_show(mddev_t *mddev, char *page)
3182 {
3183         unsigned long max_blocks, resync;
3184
3185         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3186                 max_blocks = mddev->resync_max_sectors;
3187         else
3188                 max_blocks = mddev->size << 1;
3189
3190         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
3191         return sprintf(page, "%lu / %lu\n", resync, max_blocks);
3192 }
3193
3194 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3195
3196 static ssize_t
3197 min_sync_show(mddev_t *mddev, char *page)
3198 {
3199         return sprintf(page, "%llu\n",
3200                        (unsigned long long)mddev->resync_min);
3201 }
3202 static ssize_t
3203 min_sync_store(mddev_t *mddev, const char *buf, size_t len)
3204 {
3205         unsigned long long min;
3206         if (strict_strtoull(buf, 10, &min))
3207                 return -EINVAL;
3208         if (min > mddev->resync_max)
3209                 return -EINVAL;
3210         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3211                 return -EBUSY;
3212
3213         /* Must be a multiple of chunk_size */
3214         if (mddev->chunk_size) {
3215                 if (min & (sector_t)((mddev->chunk_size>>9)-1))
3216                         return -EINVAL;
3217         }
3218         mddev->resync_min = min;
3219
3220         return len;
3221 }
3222
3223 static struct md_sysfs_entry md_min_sync =
3224 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
3225
3226 static ssize_t
3227 max_sync_show(mddev_t *mddev, char *page)
3228 {
3229         if (mddev->resync_max == MaxSector)
3230                 return sprintf(page, "max\n");
3231         else
3232                 return sprintf(page, "%llu\n",
3233                                (unsigned long long)mddev->resync_max);
3234 }
3235 static ssize_t
3236 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3237 {
3238         if (strncmp(buf, "max", 3) == 0)
3239                 mddev->resync_max = MaxSector;
3240         else {
3241                 unsigned long long max;
3242                 if (strict_strtoull(buf, 10, &max))
3243                         return -EINVAL;
3244                 if (max < mddev->resync_min)
3245                         return -EINVAL;
3246                 if (max < mddev->resync_max &&
3247                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3248                         return -EBUSY;
3249
3250                 /* Must be a multiple of chunk_size */
3251                 if (mddev->chunk_size) {
3252                         if (max & (sector_t)((mddev->chunk_size>>9)-1))
3253                                 return -EINVAL;
3254                 }
3255                 mddev->resync_max = max;
3256         }
3257         wake_up(&mddev->recovery_wait);
3258         return len;
3259 }
3260
3261 static struct md_sysfs_entry md_max_sync =
3262 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3263
3264 static ssize_t
3265 suspend_lo_show(mddev_t *mddev, char *page)
3266 {
3267         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3268 }
3269
3270 static ssize_t
3271 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3272 {
3273         char *e;
3274         unsigned long long new = simple_strtoull(buf, &e, 10);
3275
3276         if (mddev->pers->quiesce == NULL)
3277                 return -EINVAL;
3278         if (buf == e || (*e && *e != '\n'))
3279                 return -EINVAL;
3280         if (new >= mddev->suspend_hi ||
3281             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3282                 mddev->suspend_lo = new;
3283                 mddev->pers->quiesce(mddev, 2);
3284                 return len;
3285         } else
3286                 return -EINVAL;
3287 }
3288 static struct md_sysfs_entry md_suspend_lo =
3289 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3290
3291
3292 static ssize_t
3293 suspend_hi_show(mddev_t *mddev, char *page)
3294 {
3295         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3296 }
3297
3298 static ssize_t
3299 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3300 {
3301         char *e;
3302         unsigned long long new = simple_strtoull(buf, &e, 10);
3303
3304         if (mddev->pers->quiesce == NULL)
3305                 return -EINVAL;
3306         if (buf == e || (*e && *e != '\n'))
3307                 return -EINVAL;
3308         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3309             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3310                 mddev->suspend_hi = new;
3311                 mddev->pers->quiesce(mddev, 1);
3312                 mddev->pers->quiesce(mddev, 0);
3313                 return len;
3314         } else
3315                 return -EINVAL;
3316 }
3317 static struct md_sysfs_entry md_suspend_hi =
3318 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3319
3320 static ssize_t
3321 reshape_position_show(mddev_t *mddev, char *page)
3322 {
3323         if (mddev->reshape_position != MaxSector)
3324                 return sprintf(page, "%llu\n",
3325                                (unsigned long long)mddev->reshape_position);
3326         strcpy(page, "none\n");
3327         return 5;
3328 }
3329
3330 static ssize_t
3331 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3332 {
3333         char *e;
3334         unsigned long long new = simple_strtoull(buf, &e, 10);
3335         if (mddev->pers)
3336                 return -EBUSY;
3337         if (buf == e || (*e && *e != '\n'))
3338                 return -EINVAL;
3339         mddev->reshape_position = new;
3340         mddev->delta_disks = 0;
3341         mddev->new_level = mddev->level;
3342         mddev->new_layout = mddev->layout;
3343         mddev->new_chunk = mddev->chunk_size;
3344         return len;
3345 }
3346
3347 static struct md_sysfs_entry md_reshape_position =
3348 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
3349        reshape_position_store);
3350
3351
3352 static struct attribute *md_default_attrs[] = {
3353         &md_level.attr,
3354         &md_layout.attr,
3355         &md_raid_disks.attr,
3356         &md_chunk_size.attr,
3357         &md_size.attr,
3358         &md_resync_start.attr,
3359         &md_metadata.attr,
3360         &md_new_device.attr,
3361         &md_safe_delay.attr,
3362         &md_array_state.attr,
3363         &md_reshape_position.attr,
3364         NULL,
3365 };
3366
3367 static struct attribute *md_redundancy_attrs[] = {
3368         &md_scan_mode.attr,
3369         &md_mismatches.attr,
3370         &md_sync_min.attr,
3371         &md_sync_max.attr,
3372         &md_sync_speed.attr,
3373         &md_sync_force_parallel.attr,
3374         &md_sync_completed.attr,
3375         &md_min_sync.attr,
3376         &md_max_sync.attr,
3377         &md_suspend_lo.attr,
3378         &md_suspend_hi.attr,
3379         &md_bitmap.attr,
3380         &md_degraded.attr,
3381         NULL,
3382 };
3383 static struct attribute_group md_redundancy_group = {
3384         .name = NULL,
3385         .attrs = md_redundancy_attrs,
3386 };
3387
3388
3389 static ssize_t
3390 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3391 {
3392         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3393         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3394         ssize_t rv;
3395
3396         if (!entry->show)
3397                 return -EIO;
3398         rv = mddev_lock(mddev);
3399         if (!rv) {
3400                 rv = entry->show(mddev, page);
3401                 mddev_unlock(mddev);
3402         }
3403         return rv;
3404 }
3405
3406 static ssize_t
3407 md_attr_store(struct kobject *kobj, struct attribute *attr,
3408               const char *page, size_t length)
3409 {
3410         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3411         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3412         ssize_t rv;
3413
3414         if (!entry->store)
3415                 return -EIO;
3416         if (!capable(CAP_SYS_ADMIN))
3417                 return -EACCES;
3418         rv = mddev_lock(mddev);
3419         if (!rv) {
3420                 rv = entry->store(mddev, page, length);
3421                 mddev_unlock(mddev);
3422         }
3423         return rv;
3424 }
3425
3426 static void md_free(struct kobject *ko)
3427 {
3428         mddev_t *mddev = container_of(ko, mddev_t, kobj);
3429         kfree(mddev);
3430 }
3431
3432 static struct sysfs_ops md_sysfs_ops = {
3433         .show   = md_attr_show,
3434         .store  = md_attr_store,
3435 };
3436 static struct kobj_type md_ktype = {
3437         .release        = md_free,
3438         .sysfs_ops      = &md_sysfs_ops,
3439         .default_attrs  = md_default_attrs,
3440 };
3441
3442 int mdp_major = 0;
3443
3444 static struct kobject *md_probe(dev_t dev, int *part, void *data)
3445 {
3446         static DEFINE_MUTEX(disks_mutex);
3447         mddev_t *mddev = mddev_find(dev);
3448         struct gendisk *disk;
3449         int partitioned = (MAJOR(dev) != MD_MAJOR);
3450         int shift = partitioned ? MdpMinorShift : 0;
3451         int unit = MINOR(dev) >> shift;
3452         int error;
3453
3454         if (!mddev)
3455                 return NULL;
3456
3457         mutex_lock(&disks_mutex);
3458         if (mddev->gendisk) {
3459                 mutex_unlock(&disks_mutex);
3460                 mddev_put(mddev);
3461                 return NULL;
3462         }
3463         disk = alloc_disk(1 << shift);
3464         if (!disk) {
3465                 mutex_unlock(&disks_mutex);
3466                 mddev_put(mddev);
3467                 return NULL;
3468         }
3469         disk->major = MAJOR(dev);
3470         disk->first_minor = unit << shift;
3471         if (partitioned)
3472                 sprintf(disk->disk_name, "md_d%d", unit);
3473         else
3474                 sprintf(disk->disk_name, "md%d", unit);
3475         disk->fops = &md_fops;
3476         disk->private_data = mddev;
3477         disk->queue = mddev->queue;
3478         add_disk(disk);
3479         mddev->gendisk = disk;
3480         error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
3481                                      "%s", "md");
3482         mutex_unlock(&disks_mutex);
3483         if (error)
3484                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3485                        disk->disk_name);
3486         else
3487                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3488         return NULL;
3489 }
3490
3491 static void md_safemode_timeout(unsigned long data)
3492 {
3493         mddev_t *mddev = (mddev_t *) data;
3494
3495         if (!atomic_read(&mddev->writes_pending)) {
3496                 mddev->safemode = 1;
3497                 if (mddev->external)
3498                         sysfs_notify(&mddev->kobj, NULL, "array_state");
3499         }
3500         md_wakeup_thread(mddev->thread);
3501 }
3502
3503 static int start_dirty_degraded;
3504
3505 static int do_md_run(mddev_t * mddev)
3506 {
3507         int err;
3508         int chunk_size;
3509         struct list_head *tmp;
3510         mdk_rdev_t *rdev;
3511         struct gendisk *disk;
3512         struct mdk_personality *pers;
3513         char b[BDEVNAME_SIZE];
3514
3515         if (list_empty(&mddev->disks))
3516                 /* cannot run an array with no devices.. */
3517                 return -EINVAL;
3518
3519         if (mddev->pers)
3520                 return -EBUSY;
3521
3522         /*
3523          * Analyze all RAID superblock(s)
3524          */
3525         if (!mddev->raid_disks) {
3526                 if (!mddev->persistent)
3527                         return -EINVAL;
3528                 analyze_sbs(mddev);
3529         }
3530
3531         chunk_size = mddev->chunk_size;
3532
3533         if (chunk_size) {
3534                 if (chunk_size > MAX_CHUNK_SIZE) {
3535                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
3536                                 chunk_size, MAX_CHUNK_SIZE);
3537                         return -EINVAL;
3538                 }
3539                 /*
3540                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3541                  */
3542                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3543                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3544                         return -EINVAL;
3545                 }
3546                 if (chunk_size < PAGE_SIZE) {
3547                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3548                                 chunk_size, PAGE_SIZE);
3549                         return -EINVAL;
3550                 }
3551
3552                 /* devices must have minimum size of one chunk */
3553                 rdev_for_each(rdev, tmp, mddev) {
3554                         if (test_bit(Faulty, &rdev->flags))
3555                                 continue;
3556                         if (rdev->size < chunk_size / 1024) {
3557                                 printk(KERN_WARNING
3558                                         "md: Dev %s smaller than chunk_size:"
3559                                         " %lluk < %dk\n",
3560                                         bdevname(rdev->bdev,b),
3561                                         (unsigned long long)rdev->size,
3562                                         chunk_size / 1024);
3563                                 return -EINVAL;
3564                         }
3565                 }
3566         }
3567
3568 #ifdef CONFIG_KMOD
3569         if (mddev->level != LEVEL_NONE)
3570                 request_module("md-level-%d", mddev->level);
3571         else if (mddev->clevel[0])
3572                 request_module("md-%s", mddev->clevel);
3573 #endif
3574
3575         /*
3576          * Drop all container device buffers, from now on
3577          * the only valid external interface is through the md
3578          * device.
3579          */
3580         rdev_for_each(rdev, tmp, mddev) {
3581                 if (test_bit(Faulty, &rdev->flags))
3582                         continue;
3583                 sync_blockdev(rdev->bdev);
3584                 invalidate_bdev(rdev->bdev);
3585
3586                 /* perform some consistency tests on the device.
3587                  * We don't want the data to overlap the metadata,
3588                  * Internal Bitmap issues has handled elsewhere.
3589                  */
3590                 if (rdev->data_offset < rdev->sb_offset) {
3591                         if (mddev->size &&
3592                             rdev->data_offset + mddev->size*2
3593                             > rdev->sb_offset*2) {
3594                                 printk("md: %s: data overlaps metadata\n",
3595                                        mdname(mddev));
3596                                 return -EINVAL;
3597                         }
3598                 } else {
3599                         if (rdev->sb_offset*2 + rdev->sb_size/512
3600                             > rdev->data_offset) {
3601                                 printk("md: %s: metadata overlaps data\n",
3602                                        mdname(mddev));
3603                                 return -EINVAL;
3604                         }
3605                 }
3606                 sysfs_notify(&rdev->kobj, NULL, "state");
3607         }
3608
3609         md_probe(mddev->unit, NULL, NULL);
3610         disk = mddev->gendisk;
3611         if (!disk)
3612                 return -ENOMEM;
3613
3614         spin_lock(&pers_lock);
3615         pers = find_pers(mddev->level, mddev->clevel);
3616         if (!pers || !try_module_get(pers->owner)) {
3617                 spin_unlock(&pers_lock);
3618                 if (mddev->level != LEVEL_NONE)
3619                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3620                                mddev->level);
3621                 else
3622                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3623                                mddev->clevel);
3624                 return -EINVAL;
3625         }
3626         mddev->pers = pers;
3627         spin_unlock(&pers_lock);
3628         mddev->level = pers->level;
3629         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3630
3631         if (mddev->reshape_position != MaxSector &&
3632             pers->start_reshape == NULL) {
3633                 /* This personality cannot handle reshaping... */
3634                 mddev->pers = NULL;
3635                 module_put(pers->owner);
3636                 return -EINVAL;
3637         }
3638
3639         if (pers->sync_request) {
3640                 /* Warn if this is a potentially silly
3641                  * configuration.
3642                  */
3643                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3644                 mdk_rdev_t *rdev2;
3645                 struct list_head *tmp2;
3646                 int warned = 0;
3647                 rdev_for_each(rdev, tmp, mddev) {
3648                         rdev_for_each(rdev2, tmp2, mddev) {
3649                                 if (rdev < rdev2 &&
3650                                     rdev->bdev->bd_contains ==
3651                                     rdev2->bdev->bd_contains) {
3652                                         printk(KERN_WARNING
3653                                                "%s: WARNING: %s appears to be"
3654                                                " on the same physical disk as"
3655                                                " %s.\n",
3656                                                mdname(mddev),
3657                                                bdevname(rdev->bdev,b),
3658                                                bdevname(rdev2->bdev,b2));
3659                                         warned = 1;
3660                                 }
3661                         }
3662                 }
3663                 if (warned)
3664                         printk(KERN_WARNING
3665                                "True protection against single-disk"
3666                                " failure might be compromised.\n");
3667         }
3668
3669         mddev->recovery = 0;
3670         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3671         mddev->barriers_work = 1;
3672         mddev->ok_start_degraded = start_dirty_degraded;
3673
3674         if (start_readonly)
3675                 mddev->ro = 2; /* read-only, but switch on first write */
3676
3677         err = mddev->pers->run(mddev);
3678         if (!err && mddev->pers->sync_request) {
3679                 err = bitmap_create(mddev);
3680                 if (err) {
3681                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3682                                mdname(mddev), err);
3683                         mddev->pers->stop(mddev);
3684                 }
3685         }
3686         if (err) {
3687                 printk(KERN_ERR "md: pers->run() failed ...\n");
3688                 module_put(mddev->pers->owner);
3689                 mddev->pers = NULL;
3690                 bitmap_destroy(mddev);
3691                 return err;
3692         }
3693         if (mddev->pers->sync_request) {
3694                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3695                         printk(KERN_WARNING
3696                                "md: cannot register extra attributes for %s\n",
3697                                mdname(mddev));
3698         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
3699                 mddev->ro = 0;
3700
3701         atomic_set(&mddev->writes_pending,0);
3702         mddev->safemode = 0;
3703         mddev->safemode_timer.function = md_safemode_timeout;
3704         mddev->safemode_timer.data = (unsigned long) mddev;
3705         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3706         mddev->in_sync = 1;
3707
3708         rdev_for_each(rdev, tmp, mddev)
3709                 if (rdev->raid_disk >= 0) {
3710                         char nm[20];
3711                         sprintf(nm, "rd%d", rdev->raid_disk);
3712                         if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3713                                 printk("md: cannot register %s for %s\n",
3714                                        nm, mdname(mddev));
3715                 }
3716         
3717         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3718         
3719         if (mddev->flags)
3720                 md_update_sb(mddev, 0);
3721
3722         set_capacity(disk, mddev->array_size<<1);
3723
3724         /* If we call blk_queue_make_request here, it will
3725          * re-initialise max_sectors etc which may have been
3726          * refined inside -> run.  So just set the bits we need to set.
3727          * Most initialisation happended when we called
3728          * blk_queue_make_request(..., md_fail_request)
3729          * earlier.
3730          */
3731         mddev->queue->queuedata = mddev;
3732         mddev->queue->make_request_fn = mddev->pers->make_request;
3733
3734         /* If there is a partially-recovered drive we need to
3735          * start recovery here.  If we leave it to md_check_recovery,
3736          * it will remove the drives and not do the right thing
3737          */
3738         if (mddev->degraded && !mddev->sync_thread) {
3739                 struct list_head *rtmp;
3740                 int spares = 0;
3741                 rdev_for_each(rdev, rtmp, mddev)
3742                         if (rdev->raid_disk >= 0 &&
3743                             !test_bit(In_sync, &rdev->flags) &&
3744                             !test_bit(Faulty, &rdev->flags))
3745                                 /* complete an interrupted recovery */
3746                                 spares++;
3747                 if (spares && mddev->pers->sync_request) {
3748                         mddev->recovery = 0;
3749                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3750                         mddev->sync_thread = md_register_thread(md_do_sync,
3751                                                                 mddev,
3752                                                                 "%s_resync");
3753                         if (!mddev->sync_thread) {
3754                                 printk(KERN_ERR "%s: could not start resync"
3755                                        " thread...\n",
3756                                        mdname(mddev));
3757                                 /* leave the spares where they are, it shouldn't hurt */
3758                                 mddev->recovery = 0;
3759                         }
3760                 }
3761         }
3762         md_wakeup_thread(mddev->thread);
3763         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3764
3765         mddev->changed = 1;
3766         md_new_event(mddev);
3767         sysfs_notify(&mddev->kobj, NULL, "array_state");
3768         sysfs_notify(&mddev->kobj, NULL, "sync_action");
3769         sysfs_notify(&mddev->kobj, NULL, "degraded");
3770         kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
3771         return 0;
3772 }
3773
3774 static int restart_array(mddev_t *mddev)
3775 {
3776         struct gendisk *disk = mddev->gendisk;
3777         int err;
3778
3779         /*
3780          * Complain if it has no devices
3781          */
3782         err = -ENXIO;
3783         if (list_empty(&mddev->disks))
3784                 goto out;
3785
3786         if (mddev->pers) {
3787                 err = -EBUSY;
3788                 if (!mddev->ro)
3789                         goto out;
3790
3791                 mddev->safemode = 0;
3792                 mddev->ro = 0;
3793                 set_disk_ro(disk, 0);
3794
3795                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3796                         mdname(mddev));
3797                 /*
3798                  * Kick recovery or resync if necessary
3799                  */
3800                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3801                 md_wakeup_thread(mddev->thread);
3802                 md_wakeup_thread(mddev->sync_thread);
3803                 err = 0;
3804                 sysfs_notify(&mddev->kobj, NULL, "array_state");
3805
3806         } else
3807                 err = -EINVAL;
3808
3809 out:
3810         return err;
3811 }
3812
3813 /* similar to deny_write_access, but accounts for our holding a reference
3814  * to the file ourselves */
3815 static int deny_bitmap_write_access(struct file * file)
3816 {
3817         struct inode *inode = file->f_mapping->host;
3818
3819         spin_lock(&inode->i_lock);
3820         if (atomic_read(&inode->i_writecount) > 1) {
3821                 spin_unlock(&inode->i_lock);
3822                 return -ETXTBSY;
3823         }
3824         atomic_set(&inode->i_writecount, -1);
3825         spin_unlock(&inode->i_lock);
3826
3827         return 0;
3828 }
3829
3830 static void restore_bitmap_write_access(struct file *file)
3831 {
3832         struct inode *inode = file->f_mapping->host;
3833
3834         spin_lock(&inode->i_lock);
3835         atomic_set(&inode->i_writecount, 1);
3836         spin_unlock(&inode->i_lock);
3837 }
3838
3839 /* mode:
3840  *   0 - completely stop and dis-assemble array
3841  *   1 - switch to readonly
3842  *   2 - stop but do not disassemble array
3843  */
3844 static int do_md_stop(mddev_t * mddev, int mode)
3845 {
3846         int err = 0;
3847         struct gendisk *disk = mddev->gendisk;
3848
3849         if (mddev->pers) {
3850                 if (atomic_read(&mddev->active)>2) {
3851                         printk("md: %s still in use.\n",mdname(mddev));
3852                         return -EBUSY;
3853                 }
3854
3855                 if (mddev->sync_thread) {
3856                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3857                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3858                         md_unregister_thread(mddev->sync_thread);
3859                         mddev->sync_thread = NULL;
3860                 }
3861
3862                 del_timer_sync(&mddev->safemode_timer);
3863
3864                 invalidate_partition(disk, 0);
3865
3866                 switch(mode) {
3867                 case 1: /* readonly */
3868                         err  = -ENXIO;
3869                         if (mddev->ro==1)
3870                                 goto out;
3871                         mddev->ro = 1;
3872                         break;
3873                 case 0: /* disassemble */
3874                 case 2: /* stop */
3875                         bitmap_flush(mddev);
3876                         md_super_wait(mddev);
3877                         if (mddev->ro)
3878                                 set_disk_ro(disk, 0);
3879                         blk_queue_make_request(mddev->queue, md_fail_request);
3880                         mddev->pers->stop(mddev);
3881                         mddev->queue->merge_bvec_fn = NULL;
3882                         mddev->queue->unplug_fn = NULL;
3883                         mddev->queue->backing_dev_info.congested_fn = NULL;
3884                         if (mddev->pers->sync_request)
3885                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3886
3887                         module_put(mddev->pers->owner);
3888                         mddev->pers = NULL;
3889                         /* tell userspace to handle 'inactive' */
3890                         sysfs_notify(&mddev->kobj, NULL, "array_state");
3891
3892                         set_capacity(disk, 0);
3893                         mddev->changed = 1;
3894
3895                         if (mddev->ro)
3896                                 mddev->ro = 0;
3897                 }
3898                 if (!mddev->in_sync || mddev->flags) {
3899                         /* mark array as shutdown cleanly */
3900                         mddev->in_sync = 1;
3901                         md_update_sb(mddev, 1);
3902                 }
3903                 if (mode == 1)
3904                         set_disk_ro(disk, 1);
3905                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3906         }
3907
3908         /*
3909          * Free resources if final stop
3910          */
3911         if (mode == 0) {
3912                 mdk_rdev_t *rdev;
3913                 struct list_head *tmp;
3914
3915                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3916
3917                 bitmap_destroy(mddev);
3918                 if (mddev->bitmap_file) {
3919                         restore_bitmap_write_access(mddev->bitmap_file);
3920                         fput(mddev->bitmap_file);
3921                         mddev->bitmap_file = NULL;
3922                 }
3923                 mddev->bitmap_offset = 0;
3924
3925                 rdev_for_each(rdev, tmp, mddev)
3926                         if (rdev->raid_disk >= 0) {
3927                                 char nm[20];
3928                                 sprintf(nm, "rd%d", rdev->raid_disk);
3929                                 sysfs_remove_link(&mddev->kobj, nm);
3930                         }
3931
3932                 /* make sure all md_delayed_delete calls have finished */
3933                 flush_scheduled_work();
3934
3935                 export_array(mddev);
3936
3937                 mddev->array_size = 0;
3938                 mddev->size = 0;
3939                 mddev->raid_disks = 0;
3940                 mddev->recovery_cp = 0;
3941                 mddev->resync_min = 0;
3942                 mddev->resync_max = MaxSector;
3943                 mddev->reshape_position = MaxSector;
3944                 mddev->external = 0;
3945                 mddev->persistent = 0;
3946                 mddev->level = LEVEL_NONE;
3947                 mddev->clevel[0] = 0;
3948                 mddev->flags = 0;
3949                 mddev->ro = 0;
3950                 mddev->metadata_type[0] = 0;
3951                 mddev->chunk_size = 0;
3952                 mddev->ctime = mddev->utime = 0;
3953                 mddev->layout = 0;
3954                 mddev->max_disks = 0;
3955                 mddev->events = 0;
3956                 mddev->delta_disks = 0;
3957                 mddev->new_level = LEVEL_NONE;
3958                 mddev->new_layout = 0;
3959                 mddev->new_chunk = 0;
3960                 mddev->curr_resync = 0;
3961                 mddev->resync_mismatches = 0;
3962                 mddev->suspend_lo = mddev->suspend_hi = 0;
3963                 mddev->sync_speed_min = mddev->sync_speed_max = 0;
3964                 mddev->recovery = 0;
3965                 mddev->in_sync = 0;
3966                 mddev->changed = 0;
3967                 mddev->degraded = 0;
3968                 mddev->barriers_work = 0;
3969                 mddev->safemode = 0;
3970
3971         } else if (mddev->pers)
3972                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3973                         mdname(mddev));
3974         err = 0;
3975         md_new_event(mddev);
3976         sysfs_notify(&mddev->kobj, NULL, "array_state");
3977 out:
3978         return err;
3979 }
3980
3981 #ifndef MODULE
3982 static void autorun_array(mddev_t *mddev)
3983 {
3984         mdk_rdev_t *rdev;
3985         struct list_head *tmp;
3986         int err;
3987
3988         if (list_empty(&mddev->disks))
3989                 return;
3990
3991         printk(KERN_INFO "md: running: ");
3992
3993         rdev_for_each(rdev, tmp, mddev) {
3994                 char b[BDEVNAME_SIZE];
3995                 printk("<%s>", bdevname(rdev->bdev,b));
3996         }
3997         printk("\n");
3998
3999         err = do_md_run (mddev);
4000         if (err) {
4001                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4002                 do_md_stop (mddev, 0);
4003         }
4004 }
4005
4006 /*
4007  * lets try to run arrays based on all disks that have arrived
4008  * until now. (those are in pending_raid_disks)
4009  *
4010  * the method: pick the first pending disk, collect all disks with
4011  * the same UUID, remove all from the pending list and put them into
4012  * the 'same_array' list. Then order this list based on superblock
4013  * update time (freshest comes first), kick out 'old' disks and
4014  * compare superblocks. If everything's fine then run it.
4015  *
4016  * If "unit" is allocated, then bump its reference count
4017  */
4018 static void autorun_devices(int part)
4019 {
4020         struct list_head *tmp;
4021         mdk_rdev_t *rdev0, *rdev;
4022         mddev_t *mddev;
4023         char b[BDEVNAME_SIZE];
4024
4025         printk(KERN_INFO "md: autorun ...\n");
4026         while (!list_empty(&pending_raid_disks)) {
4027                 int unit;
4028                 dev_t dev;
4029                 LIST_HEAD(candidates);
4030                 rdev0 = list_entry(pending_raid_disks.next,
4031                                          mdk_rdev_t, same_set);
4032
4033                 printk(KERN_INFO "md: considering %s ...\n",
4034                         bdevname(rdev0->bdev,b));
4035                 INIT_LIST_HEAD(&candidates);
4036                 rdev_for_each_list(rdev, tmp, pending_raid_disks)
4037                         if (super_90_load(rdev, rdev0, 0) >= 0) {
4038                                 printk(KERN_INFO "md:  adding %s ...\n",
4039                                         bdevname(rdev->bdev,b));
4040                                 list_move(&rdev->same_set, &candidates);
4041                         }
4042                 /*
4043                  * now we have a set of devices, with all of them having
4044                  * mostly sane superblocks. It's time to allocate the
4045                  * mddev.
4046                  */
4047                 if (part) {
4048                         dev = MKDEV(mdp_major,
4049                                     rdev0->preferred_minor << MdpMinorShift);
4050                         unit = MINOR(dev) >> MdpMinorShift;
4051                 } else {
4052                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
4053                         unit = MINOR(dev);
4054                 }
4055                 if (rdev0->preferred_minor != unit) {
4056                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
4057                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
4058                         break;
4059                 }
4060
4061                 md_probe(dev, NULL, NULL);
4062                 mddev = mddev_find(dev);
4063                 if (!mddev || !mddev->gendisk) {
4064                         if (mddev)
4065                                 mddev_put(mddev);
4066                         printk(KERN_ERR
4067                                 "md: cannot allocate memory for md drive.\n");
4068                         break;
4069                 }
4070                 if (mddev_lock(mddev)) 
4071                         printk(KERN_WARNING "md: %s locked, cannot run\n",
4072                                mdname(mddev));
4073                 else if (mddev->raid_disks || mddev->major_version
4074                          || !list_empty(&mddev->disks)) {
4075                         printk(KERN_WARNING 
4076                                 "md: %s already running, cannot run %s\n",
4077                                 mdname(mddev), bdevname(rdev0->bdev,b));
4078                         mddev_unlock(mddev);
4079                 } else {
4080                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
4081                         mddev->persistent = 1;
4082                         rdev_for_each_list(rdev, tmp, candidates) {
4083                                 list_del_init(&rdev->same_set);
4084                                 if (bind_rdev_to_array(rdev, mddev))
4085                                         export_rdev(rdev);
4086                         }
4087                         autorun_array(mddev);
4088                         mddev_unlock(mddev);
4089                 }
4090                 /* on success, candidates will be empty, on error
4091                  * it won't...
4092                  */
4093                 rdev_for_each_list(rdev, tmp, candidates)
4094                         export_rdev(rdev);
4095                 mddev_put(mddev);
4096         }
4097         printk(KERN_INFO "md: ... autorun DONE.\n");
4098 }
4099 #endif /* !MODULE */
4100
4101 static int get_version(void __user * arg)
4102 {
4103         mdu_version_t ver;
4104
4105         ver.major = MD_MAJOR_VERSION;
4106         ver.minor = MD_MINOR_VERSION;
4107         ver.patchlevel = MD_PATCHLEVEL_VERSION;
4108
4109         if (copy_to_user(arg, &ver, sizeof(ver)))
4110                 return -EFAULT;
4111
4112         return 0;
4113 }
4114
4115 static int get_array_info(mddev_t * mddev, void __user * arg)
4116 {
4117         mdu_array_info_t info;
4118         int nr,working,active,failed,spare;
4119         mdk_rdev_t *rdev;
4120         struct list_head *tmp;
4121
4122         nr=working=active=failed=spare=0;
4123         rdev_for_each(rdev, tmp, mddev) {
4124                 nr++;
4125                 if (test_bit(Faulty, &rdev->flags))
4126                         failed++;
4127                 else {
4128                         working++;
4129                         if (test_bit(In_sync, &rdev->flags))
4130                                 active++;       
4131                         else
4132                                 spare++;
4133                 }
4134         }
4135
4136         info.major_version = mddev->major_version;
4137         info.minor_version = mddev->minor_version;
4138         info.patch_version = MD_PATCHLEVEL_VERSION;
4139         info.ctime         = mddev->ctime;
4140         info.level         = mddev->level;
4141         info.size          = mddev->size;
4142         if (info.size != mddev->size) /* overflow */
4143                 info.size = -1;
4144         info.nr_disks      = nr;
4145         info.raid_disks    = mddev->raid_disks;
4146         info.md_minor      = mddev->md_minor;
4147         info.not_persistent= !mddev->persistent;
4148
4149         info.utime         = mddev->utime;
4150         info.state         = 0;
4151         if (mddev->in_sync)
4152                 info.state = (1<<MD_SB_CLEAN);
4153         if (mddev->bitmap && mddev->bitmap_offset)
4154                 info.state = (1<<MD_SB_BITMAP_PRESENT);
4155         info.active_disks  = active;
4156         info.working_disks = working;
4157         info.failed_disks  = failed;
4158         info.spare_disks   = spare;
4159
4160         info.layout        = mddev->layout;
4161         info.chunk_size    = mddev->chunk_size;
4162
4163         if (copy_to_user(arg, &info, sizeof(info)))
4164                 return -EFAULT;
4165
4166         return 0;
4167 }
4168
4169 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
4170 {
4171         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
4172         char *ptr, *buf = NULL;
4173         int err = -ENOMEM;
4174
4175         if (md_allow_write(mddev))
4176                 file = kmalloc(sizeof(*file), GFP_NOIO);
4177         else
4178                 file = kmalloc(sizeof(*file), GFP_KERNEL);
4179
4180         if (!file)
4181                 goto out;
4182
4183         /* bitmap disabled, zero the first byte and copy out */
4184         if (!mddev->bitmap || !mddev->bitmap->file) {
4185                 file->pathname[0] = '\0';
4186                 goto copy_out;
4187         }
4188
4189         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
4190         if (!buf)
4191                 goto out;
4192
4193         ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname));
4194         if (IS_ERR(ptr))
4195                 goto out;
4196
4197         strcpy(file->pathname, ptr);
4198
4199 copy_out:
4200         err = 0;
4201         if (copy_to_user(arg, file, sizeof(*file)))
4202                 err = -EFAULT;
4203 out:
4204         kfree(buf);
4205         kfree(file);
4206         return err;
4207 }
4208
4209 static int get_disk_info(mddev_t * mddev, void __user * arg)
4210 {
4211         mdu_disk_info_t info;
4212         unsigned int nr;
4213         mdk_rdev_t *rdev;
4214
4215         if (copy_from_user(&info, arg, sizeof(info)))
4216                 return -EFAULT;
4217
4218         nr = info.number;
4219
4220         rdev = find_rdev_nr(mddev, nr);
4221         if (rdev) {
4222                 info.major = MAJOR(rdev->bdev->bd_dev);
4223                 info.minor = MINOR(rdev->bdev->bd_dev);
4224                 info.raid_disk = rdev->raid_disk;
4225                 info.state = 0;
4226                 if (test_bit(Faulty, &rdev->flags))
4227                         info.state |= (1<<MD_DISK_FAULTY);
4228                 else if (test_bit(In_sync, &rdev->flags)) {
4229                         info.state |= (1<<MD_DISK_ACTIVE);
4230                         info.state |= (1<<MD_DISK_SYNC);
4231                 }
4232                 if (test_bit(WriteMostly, &rdev->flags))
4233                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
4234         } else {
4235                 info.major = info.minor = 0;
4236                 info.raid_disk = -1;
4237                 info.state = (1<<MD_DISK_REMOVED);
4238         }
4239
4240         if (copy_to_user(arg, &info, sizeof(info)))
4241                 return -EFAULT;
4242
4243         return 0;
4244 }
4245
4246 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
4247 {
4248         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
4249         mdk_rdev_t *rdev;
4250         dev_t dev = MKDEV(info->major,info->minor);
4251
4252         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
4253                 return -EOVERFLOW;
4254
4255         if (!mddev->raid_disks) {
4256                 int err;
4257                 /* expecting a device which has a superblock */
4258                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
4259                 if (IS_ERR(rdev)) {
4260                         printk(KERN_WARNING 
4261                                 "md: md_import_device returned %ld\n",
4262                                 PTR_ERR(rdev));
4263                         return PTR_ERR(rdev);
4264                 }
4265                 if (!list_empty(&mddev->disks)) {
4266                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
4267                                                         mdk_rdev_t, same_set);
4268                         int err = super_types[mddev->major_version]
4269                                 .load_super(rdev, rdev0, mddev->minor_version);
4270                         if (err < 0) {
4271                                 printk(KERN_WARNING 
4272                                         "md: %s has different UUID to %s\n",
4273                                         bdevname(rdev->bdev,b), 
4274                                         bdevname(rdev0->bdev,b2));
4275                                 export_rdev(rdev);
4276                                 return -EINVAL;
4277                         }
4278                 }
4279                 err = bind_rdev_to_array(rdev, mddev);
4280                 if (err)
4281                         export_rdev(rdev);
4282                 return err;
4283         }
4284
4285         /*
4286          * add_new_disk can be used once the array is assembled
4287          * to add "hot spares".  They must already have a superblock
4288          * written
4289          */
4290         if (mddev->pers) {
4291                 int err;
4292                 if (!mddev->pers->hot_add_disk) {
4293                         printk(KERN_WARNING 
4294                                 "%s: personality does not support diskops!\n",
4295                                mdname(mddev));
4296                         return -EINVAL;
4297                 }
4298                 if (mddev->persistent)
4299                         rdev = md_import_device(dev, mddev->major_version,
4300                                                 mddev->minor_version);
4301                 else
4302                         rdev = md_import_device(dev, -1, -1);
4303                 if (IS_ERR(rdev)) {
4304                         printk(KERN_WARNING 
4305                                 "md: md_import_device returned %ld\n",
4306                                 PTR_ERR(rdev));
4307                         return PTR_ERR(rdev);
4308                 }
4309                 /* set save_raid_disk if appropriate */
4310                 if (!mddev->persistent) {
4311                         if (info->state & (1<<MD_DISK_SYNC)  &&
4312                             info->raid_disk < mddev->raid_disks)
4313                                 rdev->raid_disk = info->raid_disk;
4314                         else
4315                                 rdev->raid_disk = -1;
4316                 } else
4317                         super_types[mddev->major_version].
4318                                 validate_super(mddev, rdev);
4319                 rdev->saved_raid_disk = rdev->raid_disk;
4320
4321                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
4322                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4323                         set_bit(WriteMostly, &rdev->flags);
4324
4325                 rdev->raid_disk = -1;
4326                 err = bind_rdev_to_array(rdev, mddev);
4327                 if (!err && !mddev->pers->hot_remove_disk) {
4328                         /* If there is hot_add_disk but no hot_remove_disk
4329                          * then added disks for geometry changes,
4330                          * and should be added immediately.
4331                          */
4332                         super_types[mddev->major_version].
4333                                 validate_super(mddev, rdev);
4334                         err = mddev->pers->hot_add_disk(mddev, rdev);
4335                         if (err)
4336                                 unbind_rdev_from_array(rdev);
4337                 }
4338                 if (err)
4339                         export_rdev(rdev);
4340                 else
4341                         sysfs_notify(&rdev->kobj, NULL, "state");
4342
4343                 md_update_sb(mddev, 1);
4344                 if (mddev->degraded)
4345                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4346                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4347                 md_wakeup_thread(mddev->thread);
4348                 return err;
4349         }
4350
4351         /* otherwise, add_new_disk is only allowed
4352          * for major_version==0 superblocks
4353          */
4354         if (mddev->major_version != 0) {
4355                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
4356                        mdname(mddev));
4357                 return -EINVAL;
4358         }
4359
4360         if (!(info->state & (1<<MD_DISK_FAULTY))) {
4361                 int err;
4362                 rdev = md_import_device (dev, -1, 0);
4363                 if (IS_ERR(rdev)) {
4364                         printk(KERN_WARNING 
4365                                 "md: error, md_import_device() returned %ld\n",
4366                                 PTR_ERR(rdev));
4367                         return PTR_ERR(rdev);
4368                 }
4369                 rdev->desc_nr = info->number;
4370                 if (info->raid_disk < mddev->raid_disks)
4371                         rdev->raid_disk = info->raid_disk;
4372                 else
4373                         rdev->raid_disk = -1;
4374
4375                 if (rdev->raid_disk < mddev->raid_disks)
4376                         if (info->state & (1<<MD_DISK_SYNC))
4377                                 set_bit(In_sync, &rdev->flags);
4378
4379                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4380                         set_bit(WriteMostly, &rdev->flags);
4381
4382                 if (!mddev->persistent) {
4383                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
4384                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
4385                 } else 
4386                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
4387                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
4388
4389                 err = bind_rdev_to_array(rdev, mddev);
4390                 if (err) {
4391                         export_rdev(rdev);
4392                         return err;
4393                 }
4394         }
4395
4396         return 0;
4397 }
4398
4399 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
4400 {
4401         char b[BDEVNAME_SIZE];
4402         mdk_rdev_t *rdev;
4403
4404         rdev = find_rdev(mddev, dev);
4405         if (!rdev)
4406                 return -ENXIO;
4407
4408         if (rdev->raid_disk >= 0)
4409                 goto busy;
4410
4411         kick_rdev_from_array(rdev);
4412         md_update_sb(mddev, 1);
4413         md_new_event(mddev);
4414
4415         return 0;
4416 busy:
4417         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
4418                 bdevname(rdev->bdev,b), mdname(mddev));
4419         return -EBUSY;
4420 }
4421
4422 static int hot_add_disk(mddev_t * mddev, dev_t dev)
4423 {
4424         char b[BDEVNAME_SIZE];
4425         int err;
4426         unsigned int size;
4427         mdk_rdev_t *rdev;
4428
4429         if (!mddev->pers)
4430                 return -ENODEV;
4431
4432         if (mddev->major_version != 0) {
4433                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
4434                         " version-0 superblocks.\n",
4435                         mdname(mddev));
4436                 return -EINVAL;
4437         }
4438         if (!mddev->pers->hot_add_disk) {
4439                 printk(KERN_WARNING 
4440                         "%s: personality does not support diskops!\n",
4441                         mdname(mddev));
4442                 return -EINVAL;
4443         }
4444
4445         rdev = md_import_device (dev, -1, 0);
4446         if (IS_ERR(rdev)) {
4447                 printk(KERN_WARNING 
4448                         "md: error, md_import_device() returned %ld\n",
4449                         PTR_ERR(rdev));
4450                 return -EINVAL;
4451         }
4452
4453         if (mddev->persistent)
4454                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
4455         else
4456                 rdev->sb_offset =
4457                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
4458
4459         size = calc_dev_size(rdev, mddev->chunk_size);
4460         rdev->size = size;
4461
4462         if (test_bit(Faulty, &rdev->flags)) {
4463                 printk(KERN_WARNING 
4464                         "md: can not hot-add faulty %s disk to %s!\n",
4465                         bdevname(rdev->bdev,b), mdname(mddev));
4466                 err = -EINVAL;
4467                 goto abort_export;
4468         }
4469         clear_bit(In_sync, &rdev->flags);
4470         rdev->desc_nr = -1;
4471         rdev->saved_raid_disk = -1;
4472         err = bind_rdev_to_array(rdev, mddev);
4473         if (err)
4474                 goto abort_export;
4475
4476         /*
4477          * The rest should better be atomic, we can have disk failures
4478          * noticed in interrupt contexts ...
4479          */
4480
4481         if (rdev->desc_nr == mddev->max_disks) {
4482                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
4483                         mdname(mddev));
4484                 err = -EBUSY;
4485                 goto abort_unbind_export;
4486         }
4487
4488         rdev->raid_disk = -1;
4489
4490         md_update_sb(mddev, 1);
4491
4492         /*
4493          * Kick recovery, maybe this spare has to be added to the
4494          * array immediately.
4495          */
4496         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4497         md_wakeup_thread(mddev->thread);
4498         md_new_event(mddev);
4499         return 0;
4500
4501 abort_unbind_export:
4502         unbind_rdev_from_array(rdev);
4503
4504 abort_export:
4505         export_rdev(rdev);
4506         return err;
4507 }
4508
4509 static int set_bitmap_file(mddev_t *mddev, int fd)
4510 {
4511         int err;
4512
4513         if (mddev->pers) {
4514                 if (!mddev->pers->quiesce)
4515                         return -EBUSY;
4516                 if (mddev->recovery || mddev->sync_thread)
4517                         return -EBUSY;
4518                 /* we should be able to change the bitmap.. */
4519         }
4520
4521
4522         if (fd >= 0) {
4523                 if (mddev->bitmap)
4524                         return -EEXIST; /* cannot add when bitmap is present */
4525                 mddev->bitmap_file = fget(fd);
4526
4527                 if (mddev->bitmap_file == NULL) {
4528                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
4529                                mdname(mddev));
4530                         return -EBADF;
4531                 }
4532
4533                 err = deny_bitmap_write_access(mddev->bitmap_file);
4534                 if (err) {
4535                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
4536                                mdname(mddev));
4537                         fput(mddev->bitmap_file);
4538                         mddev->bitmap_file = NULL;
4539                         return err;
4540                 }
4541                 mddev->bitmap_offset = 0; /* file overrides offset */
4542         } else if (mddev->bitmap == NULL)
4543                 return -ENOENT; /* cannot remove what isn't there */
4544         err = 0;
4545         if (mddev->pers) {
4546                 mddev->pers->quiesce(mddev, 1);
4547                 if (fd >= 0)
4548                         err = bitmap_create(mddev);
4549                 if (fd < 0 || err) {
4550                         bitmap_destroy(mddev);
4551                         fd = -1; /* make sure to put the file */
4552                 }
4553                 mddev->pers->quiesce(mddev, 0);
4554         }
4555         if (fd < 0) {
4556                 if (mddev->bitmap_file) {
4557                         restore_bitmap_write_access(mddev->bitmap_file);
4558                         fput(mddev->bitmap_file);
4559                 }
4560                 mddev->bitmap_file = NULL;
4561         }
4562
4563         return err;
4564 }
4565
4566 /*
4567  * set_array_info is used two different ways
4568  * The original usage is when creating a new array.
4569  * In this usage, raid_disks is > 0 and it together with
4570  *  level, size, not_persistent,layout,chunksize determine the
4571  *  shape of the array.
4572  *  This will always create an array with a type-0.90.0 superblock.
4573  * The newer usage is when assembling an array.
4574  *  In this case raid_disks will be 0, and the major_version field is
4575  *  use to determine which style super-blocks are to be found on the devices.
4576  *  The minor and patch _version numbers are also kept incase the
4577  *  super_block handler wishes to interpret them.
4578  */
4579 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4580 {
4581
4582         if (info->raid_disks == 0) {
4583                 /* just setting version number for superblock loading */
4584                 if (info->major_version < 0 ||
4585                     info->major_version >= ARRAY_SIZE(super_types) ||
4586                     super_types[info->major_version].name == NULL) {
4587                         /* maybe try to auto-load a module? */
4588                         printk(KERN_INFO 
4589                                 "md: superblock version %d not known\n",
4590                                 info->major_version);
4591                         return -EINVAL;
4592                 }
4593                 mddev->major_version = info->major_version;
4594                 mddev->minor_version = info->minor_version;
4595                 mddev->patch_version = info->patch_version;
4596                 mddev->persistent = !info->not_persistent;
4597                 return 0;
4598         }
4599         mddev->major_version = MD_MAJOR_VERSION;
4600         mddev->minor_version = MD_MINOR_VERSION;
4601         mddev->patch_version = MD_PATCHLEVEL_VERSION;
4602         mddev->ctime         = get_seconds();
4603
4604         mddev->level         = info->level;
4605         mddev->clevel[0]     = 0;
4606         mddev->size          = info->size;
4607         mddev->raid_disks    = info->raid_disks;
4608         /* don't set md_minor, it is determined by which /dev/md* was
4609          * openned
4610          */
4611         if (info->state & (1<<MD_SB_CLEAN))
4612                 mddev->recovery_cp = MaxSector;
4613         else
4614                 mddev->recovery_cp = 0;
4615         mddev->persistent    = ! info->not_persistent;
4616         mddev->external      = 0;
4617
4618         mddev->layout        = info->layout;
4619         mddev->chunk_size    = info->chunk_size;
4620
4621         mddev->max_disks     = MD_SB_DISKS;
4622
4623         if (mddev->persistent)
4624                 mddev->flags         = 0;
4625         set_bit(MD_CHANGE_DEVS, &mddev->flags);
4626
4627         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4628         mddev->bitmap_offset = 0;
4629
4630         mddev->reshape_position = MaxSector;
4631
4632         /*
4633          * Generate a 128 bit UUID
4634          */
4635         get_random_bytes(mddev->uuid, 16);
4636
4637         mddev->new_level = mddev->level;
4638         mddev->new_chunk = mddev->chunk_size;
4639         mddev->new_layout = mddev->layout;
4640         mddev->delta_disks = 0;
4641
4642         return 0;
4643 }
4644
4645 static int update_size(mddev_t *mddev, unsigned long size)
4646 {
4647         mdk_rdev_t * rdev;
4648         int rv;
4649         struct list_head *tmp;
4650         int fit = (size == 0);
4651
4652         if (mddev->pers->resize == NULL)
4653                 return -EINVAL;
4654         /* The "size" is the amount of each device that is used.
4655          * This can only make sense for arrays with redundancy.
4656          * linear and raid0 always use whatever space is available
4657          * We can only consider changing the size if no resync
4658          * or reconstruction is happening, and if the new size
4659          * is acceptable. It must fit before the sb_offset or,
4660          * if that is <data_offset, it must fit before the
4661          * size of each device.
4662          * If size is zero, we find the largest size that fits.
4663          */
4664         if (mddev->sync_thread)
4665                 return -EBUSY;
4666         rdev_for_each(rdev, tmp, mddev) {
4667                 sector_t avail;
4668                 avail = rdev->size * 2;
4669
4670                 if (fit && (size == 0 || size > avail/2))
4671                         size = avail/2;
4672                 if (avail < ((sector_t)size << 1))
4673                         return -ENOSPC;
4674         }
4675         rv = mddev->pers->resize(mddev, (sector_t)size *2);
4676         if (!rv) {
4677                 struct block_device *bdev;
4678
4679                 bdev = bdget_disk(mddev->gendisk, 0);
4680                 if (bdev) {
4681                         mutex_lock(&bdev->bd_inode->i_mutex);
4682                         i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4683                         mutex_unlock(&bdev->bd_inode->i_mutex);
4684                         bdput(bdev);
4685                 }
4686         }
4687         return rv;
4688 }
4689
4690 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4691 {
4692         int rv;
4693         /* change the number of raid disks */
4694         if (mddev->pers->check_reshape == NULL)
4695                 return -EINVAL;
4696         if (raid_disks <= 0 ||
4697             raid_disks >= mddev->max_disks)
4698                 return -EINVAL;
4699         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4700                 return -EBUSY;
4701         mddev->delta_disks = raid_disks - mddev->raid_disks;
4702
4703         rv = mddev->pers->check_reshape(mddev);
4704         return rv;
4705 }
4706
4707
4708 /*
4709  * update_array_info is used to change the configuration of an
4710  * on-line array.
4711  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4712  * fields in the info are checked against the array.
4713  * Any differences that cannot be handled will cause an error.
4714  * Normally, only one change can be managed at a time.
4715  */
4716 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4717 {
4718         int rv = 0;
4719         int cnt = 0;
4720         int state = 0;
4721
4722         /* calculate expected state,ignoring low bits */
4723         if (mddev->bitmap && mddev->bitmap_offset)
4724                 state |= (1 << MD_SB_BITMAP_PRESENT);
4725
4726         if (mddev->major_version != info->major_version ||
4727             mddev->minor_version != info->minor_version ||
4728 /*          mddev->patch_version != info->patch_version || */
4729             mddev->ctime         != info->ctime         ||
4730             mddev->level         != info->level         ||
4731 /*          mddev->layout        != info->layout        || */
4732             !mddev->persistent   != info->not_persistent||
4733             mddev->chunk_size    != info->chunk_size    ||
4734             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4735             ((state^info->state) & 0xfffffe00)
4736                 )
4737                 return -EINVAL;
4738         /* Check there is only one change */
4739         if (info->size >= 0 && mddev->size != info->size) cnt++;
4740         if (mddev->raid_disks != info->raid_disks) cnt++;
4741         if (mddev->layout != info->layout) cnt++;
4742         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4743         if (cnt == 0) return 0;
4744         if (cnt > 1) return -EINVAL;
4745
4746         if (mddev->layout != info->layout) {
4747                 /* Change layout
4748                  * we don't need to do anything at the md level, the
4749                  * personality will take care of it all.
4750                  */
4751                 if (mddev->pers->reconfig == NULL)
4752                         return -EINVAL;
4753                 else
4754                         return mddev->pers->reconfig(mddev, info->layout, -1);
4755         }
4756         if (info->size >= 0 && mddev->size != info->size)
4757                 rv = update_size(mddev, info->size);
4758
4759         if (mddev->raid_disks    != info->raid_disks)
4760                 rv = update_raid_disks(mddev, info->raid_disks);
4761
4762         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4763                 if (mddev->pers->quiesce == NULL)
4764                         return -EINVAL;
4765                 if (mddev->recovery || mddev->sync_thread)
4766                         return -EBUSY;
4767                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4768                         /* add the bitmap */
4769                         if (mddev->bitmap)
4770                                 return -EEXIST;
4771                         if (mddev->default_bitmap_offset == 0)
4772                                 return -EINVAL;
4773                         mddev->bitmap_offset = mddev->default_bitmap_offset;
4774                         mddev->pers->quiesce(mddev, 1);
4775                         rv = bitmap_create(mddev);
4776                         if (rv)
4777                                 bitmap_destroy(mddev);
4778                         mddev->pers->quiesce(mddev, 0);
4779                 } else {
4780                         /* remove the bitmap */
4781                         if (!mddev->bitmap)
4782                                 return -ENOENT;
4783                         if (mddev->bitmap->file)
4784                                 return -EINVAL;
4785                         mddev->pers->quiesce(mddev, 1);
4786                         bitmap_destroy(mddev);
4787                         mddev->pers->quiesce(mddev, 0);
4788                         mddev->bitmap_offset = 0;
4789                 }
4790         }
4791         md_update_sb(mddev, 1);
4792         return rv;
4793 }
4794
4795 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4796 {
4797         mdk_rdev_t *rdev;
4798
4799         if (mddev->pers == NULL)
4800                 return -ENODEV;
4801
4802         rdev = find_rdev(mddev, dev);
4803         if (!rdev)
4804                 return -ENODEV;
4805
4806         md_error(mddev, rdev);
4807         return 0;
4808 }
4809
4810 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4811 {
4812         mddev_t *mddev = bdev->bd_disk->private_data;
4813
4814         geo->heads = 2;
4815         geo->sectors = 4;
4816         geo->cylinders = get_capacity(mddev->gendisk) / 8;
4817         return 0;
4818 }
4819
4820 static int md_ioctl(struct inode *inode, struct file *file,
4821                         unsigned int cmd, unsigned long arg)
4822 {
4823         int err = 0;
4824         void __user *argp = (void __user *)arg;
4825         mddev_t *mddev = NULL;
4826
4827         if (!capable(CAP_SYS_ADMIN))
4828                 return -EACCES;
4829
4830         /*
4831          * Commands dealing with the RAID driver but not any
4832          * particular array:
4833          */
4834         switch (cmd)
4835         {
4836                 case RAID_VERSION:
4837                         err = get_version(argp);
4838                         goto done;
4839
4840                 case PRINT_RAID_DEBUG:
4841                         err = 0;
4842                         md_print_devices();
4843                         goto done;
4844
4845 #ifndef MODULE
4846                 case RAID_AUTORUN:
4847                         err = 0;
4848                         autostart_arrays(arg);
4849                         goto done;
4850 #endif
4851                 default:;
4852         }
4853
4854         /*
4855          * Commands creating/starting a new array:
4856          */
4857
4858         mddev = inode->i_bdev->bd_disk->private_data;
4859
4860         if (!mddev) {
4861                 BUG();
4862                 goto abort;
4863         }
4864
4865         err = mddev_lock(mddev);
4866         if (err) {
4867                 printk(KERN_INFO 
4868                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
4869                         err, cmd);
4870                 goto abort;
4871         }
4872
4873         switch (cmd)
4874         {
4875                 case SET_ARRAY_INFO:
4876                         {
4877                                 mdu_array_info_t info;
4878                                 if (!arg)
4879                                         memset(&info, 0, sizeof(info));
4880                                 else if (copy_from_user(&info, argp, sizeof(info))) {
4881                                         err = -EFAULT;
4882                                         goto abort_unlock;
4883                                 }
4884                                 if (mddev->pers) {
4885                                         err = update_array_info(mddev, &info);
4886                                         if (err) {
4887                                                 printk(KERN_WARNING "md: couldn't update"
4888                                                        " array info. %d\n", err);
4889                                                 goto abort_unlock;
4890                                         }
4891                                         goto done_unlock;
4892                                 }
4893                                 if (!list_empty(&mddev->disks)) {
4894                                         printk(KERN_WARNING
4895                                                "md: array %s already has disks!\n",
4896                                                mdname(mddev));
4897                                         err = -EBUSY;
4898                                         goto abort_unlock;
4899                                 }
4900                                 if (mddev->raid_disks) {
4901                                         printk(KERN_WARNING
4902                                                "md: array %s already initialised!\n",
4903                                                mdname(mddev));
4904                                         err = -EBUSY;
4905                                         goto abort_unlock;
4906                                 }
4907                                 err = set_array_info(mddev, &info);
4908                                 if (err) {
4909                                         printk(KERN_WARNING "md: couldn't set"
4910                                                " array info. %d\n", err);
4911                                         goto abort_unlock;
4912                                 }
4913                         }
4914                         goto done_unlock;
4915
4916                 default:;
4917         }
4918
4919         /*
4920          * Commands querying/configuring an existing array:
4921          */
4922         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4923          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4924         if ((!mddev->raid_disks && !mddev->external)
4925             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4926             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
4927             && cmd != GET_BITMAP_FILE) {
4928                 err = -ENODEV;
4929                 goto abort_unlock;
4930         }
4931
4932         /*
4933          * Commands even a read-only array can execute:
4934          */
4935         switch (cmd)
4936         {
4937                 case GET_ARRAY_INFO:
4938                         err = get_array_info(mddev, argp);
4939                         goto done_unlock;
4940
4941                 case GET_BITMAP_FILE:
4942                         err = get_bitmap_file(mddev, argp);
4943                         goto done_unlock;
4944
4945                 case GET_DISK_INFO:
4946                         err = get_disk_info(mddev, argp);
4947                         goto done_unlock;
4948
4949                 case RESTART_ARRAY_RW:
4950                         err = restart_array(mddev);
4951                         goto done_unlock;
4952
4953                 case STOP_ARRAY:
4954                         err = do_md_stop (mddev, 0);
4955                         goto done_unlock;
4956
4957                 case STOP_ARRAY_RO:
4958                         err = do_md_stop (mddev, 1);
4959                         goto done_unlock;
4960
4961         /*
4962          * We have a problem here : there is no easy way to give a CHS
4963          * virtual geometry. We currently pretend that we have a 2 heads
4964          * 4 sectors (with a BIG number of cylinders...). This drives
4965          * dosfs just mad... ;-)
4966          */
4967         }
4968
4969         /*
4970          * The remaining ioctls are changing the state of the
4971          * superblock, so we do not allow them on read-only arrays.
4972          * However non-MD ioctls (e.g. get-size) will still come through
4973          * here and hit the 'default' below, so only disallow
4974          * 'md' ioctls, and switch to rw mode if started auto-readonly.
4975          */
4976         if (_IOC_TYPE(cmd) == MD_MAJOR &&
4977             mddev->ro && mddev->pers) {
4978                 if (mddev->ro == 2) {
4979                         mddev->ro = 0;
4980                         sysfs_notify(&mddev->kobj, NULL, "array_state");
4981                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4982                         md_wakeup_thread(mddev->thread);
4983
4984                 } else {
4985                         err = -EROFS;
4986                         goto abort_unlock;
4987                 }
4988         }
4989
4990         switch (cmd)
4991         {
4992                 case ADD_NEW_DISK:
4993                 {
4994                         mdu_disk_info_t info;
4995                         if (copy_from_user(&info, argp, sizeof(info)))
4996                                 err = -EFAULT;
4997                         else
4998                                 err = add_new_disk(mddev, &info);
4999                         goto done_unlock;
5000                 }
5001
5002                 case HOT_REMOVE_DISK:
5003                         err = hot_remove_disk(mddev, new_decode_dev(arg));
5004                         goto done_unlock;
5005
5006                 case HOT_ADD_DISK:
5007                         err = hot_add_disk(mddev, new_decode_dev(arg));
5008                         goto done_unlock;
5009
5010                 case SET_DISK_FAULTY:
5011                         err = set_disk_faulty(mddev, new_decode_dev(arg));
5012                         goto done_unlock;
5013
5014                 case RUN_ARRAY:
5015                         err = do_md_run (mddev);
5016                         goto done_unlock;
5017
5018                 case SET_BITMAP_FILE:
5019                         err = set_bitmap_file(mddev, (int)arg);
5020                         goto done_unlock;
5021
5022                 default:
5023                         err = -EINVAL;
5024                         goto abort_unlock;
5025         }
5026
5027 done_unlock:
5028 abort_unlock:
5029         mddev_unlock(mddev);
5030
5031         return err;
5032 done:
5033         if (err)
5034                 MD_BUG();
5035 abort:
5036         return err;
5037 }
5038
5039 static int md_open(struct inode *inode, struct file *file)
5040 {
5041         /*
5042          * Succeed if we can lock the mddev, which confirms that
5043          * it isn't being stopped right now.
5044          */
5045         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
5046         int err;
5047
5048         if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
5049                 goto out;
5050
5051         err = 0;
5052         mddev_get(mddev);
5053         mddev_unlock(mddev);
5054
5055         check_disk_change(inode->i_bdev);
5056  out:
5057         return err;
5058 }
5059
5060 static int md_release(struct inode *inode, struct file * file)
5061 {
5062         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
5063
5064         BUG_ON(!mddev);
5065         mddev_put(mddev);
5066
5067         return 0;
5068 }
5069
5070 static int md_media_changed(struct gendisk *disk)
5071 {
5072         mddev_t *mddev = disk->private_data;
5073
5074         return mddev->changed;
5075 }
5076
5077 static int md_revalidate(struct gendisk *disk)
5078 {
5079         mddev_t *mddev = disk->private_data;
5080
5081         mddev->changed = 0;
5082         return 0;
5083 }
5084 static struct block_device_operations md_fops =
5085 {
5086         .owner          = THIS_MODULE,
5087         .open           = md_open,
5088         .release        = md_release,
5089         .ioctl          = md_ioctl,
5090         .getgeo         = md_getgeo,
5091         .media_changed  = md_media_changed,
5092         .revalidate_disk= md_revalidate,
5093 };
5094
5095 static int md_thread(void * arg)
5096 {
5097         mdk_thread_t *thread = arg;
5098
5099         /*
5100          * md_thread is a 'system-thread', it's priority should be very
5101          * high. We avoid resource deadlocks individually in each
5102          * raid personality. (RAID5 does preallocation) We also use RR and
5103          * the very same RT priority as kswapd, thus we will never get
5104          * into a priority inversion deadlock.
5105          *
5106          * we definitely have to have equal or higher priority than
5107          * bdflush, otherwise bdflush will deadlock if there are too
5108          * many dirty RAID5 blocks.
5109          */
5110
5111         allow_signal(SIGKILL);
5112         while (!kthread_should_stop()) {
5113
5114                 /* We need to wait INTERRUPTIBLE so that
5115                  * we don't add to the load-average.
5116                  * That means we need to be sure no signals are
5117                  * pending
5118                  */
5119                 if (signal_pending(current))
5120                         flush_signals(current);
5121
5122                 wait_event_interruptible_timeout
5123                         (thread->wqueue,
5124                          test_bit(THREAD_WAKEUP, &thread->flags)
5125                          || kthread_should_stop(),
5126                          thread->timeout);
5127
5128                 clear_bit(THREAD_WAKEUP, &thread->flags);
5129
5130                 thread->run(thread->mddev);
5131         }
5132
5133         return 0;
5134 }
5135
5136 void md_wakeup_thread(mdk_thread_t *thread)
5137 {
5138         if (thread) {
5139                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
5140                 set_bit(THREAD_WAKEUP, &thread->flags);
5141                 wake_up(&thread->wqueue);
5142         }
5143 }
5144
5145 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
5146                                  const char *name)
5147 {
5148         mdk_thread_t *thread;
5149
5150         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
5151         if (!thread)
5152                 return NULL;
5153
5154         init_waitqueue_head(&thread->wqueue);
5155
5156         thread->run = run;
5157         thread->mddev = mddev;
5158         thread->timeout = MAX_SCHEDULE_TIMEOUT;
5159         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
5160         if (IS_ERR(thread->tsk)) {
5161                 kfree(thread);
5162                 return NULL;
5163         }
5164         return thread;
5165 }
5166
5167 void md_unregister_thread(mdk_thread_t *thread)
5168 {
5169         dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
5170
5171         kthread_stop(thread->tsk);
5172         kfree(thread);
5173 }
5174
5175 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
5176 {
5177         if (!mddev) {
5178                 MD_BUG();
5179                 return;
5180         }
5181
5182         if (!rdev || test_bit(Faulty, &rdev->flags))
5183                 return;
5184
5185         if (mddev->external)
5186                 set_bit(Blocked, &rdev->flags);
5187 /*
5188         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
5189                 mdname(mddev),
5190                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
5191                 __builtin_return_address(0),__builtin_return_address(1),
5192                 __builtin_return_address(2),__builtin_return_address(3));
5193 */
5194         if (!mddev->pers)
5195                 return;
5196         if (!mddev->pers->error_handler)
5197                 return;
5198         mddev->pers->error_handler(mddev,rdev);
5199         if (mddev->degraded)
5200                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5201         set_bit(StateChanged, &rdev->flags);
5202         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5203         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5204         md_wakeup_thread(mddev->thread);
5205         md_new_event_inintr(mddev);
5206 }
5207
5208 /* seq_file implementation /proc/mdstat */
5209
5210 static void status_unused(struct seq_file *seq)
5211 {
5212         int i = 0;
5213         mdk_rdev_t *rdev;
5214         struct list_head *tmp;
5215
5216         seq_printf(seq, "unused devices: ");
5217
5218         rdev_for_each_list(rdev, tmp, pending_raid_disks) {
5219                 char b[BDEVNAME_SIZE];
5220                 i++;
5221                 seq_printf(seq, "%s ",
5222                               bdevname(rdev->bdev,b));
5223         }
5224         if (!i)
5225                 seq_printf(seq, "<none>");
5226
5227         seq_printf(seq, "\n");
5228 }
5229
5230
5231 static void status_resync(struct seq_file *seq, mddev_t * mddev)
5232 {
5233         sector_t max_blocks, resync, res;
5234         unsigned long dt, db, rt;
5235         int scale;
5236         unsigned int per_milli;
5237
5238         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
5239
5240         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5241                 max_blocks = mddev->resync_max_sectors >> 1;
5242         else
5243                 max_blocks = mddev->size;
5244
5245         /*
5246          * Should not happen.
5247          */
5248         if (!max_blocks) {
5249                 MD_BUG();
5250                 return;
5251         }
5252         /* Pick 'scale' such that (resync>>scale)*1000 will fit
5253          * in a sector_t, and (max_blocks>>scale) will fit in a
5254          * u32, as those are the requirements for sector_div.
5255          * Thus 'scale' must be at least 10
5256          */
5257         scale = 10;
5258         if (sizeof(sector_t) > sizeof(unsigned long)) {
5259                 while ( max_blocks/2 > (1ULL<<(scale+32)))
5260                         scale++;
5261         }
5262         res = (resync>>scale)*1000;
5263         sector_div(res, (u32)((max_blocks>>scale)+1));
5264
5265         per_milli = res;
5266         {
5267                 int i, x = per_milli/50, y = 20-x;
5268                 seq_printf(seq, "[");
5269                 for (i = 0; i < x; i++)
5270                         seq_printf(seq, "=");
5271                 seq_printf(seq, ">");
5272                 for (i = 0; i < y; i++)
5273                         seq_printf(seq, ".");
5274                 seq_printf(seq, "] ");
5275         }
5276         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
5277                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
5278                     "reshape" :
5279                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
5280                      "check" :
5281                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5282                       "resync" : "recovery"))),
5283                    per_milli/10, per_milli % 10,
5284                    (unsigned long long) resync,
5285                    (unsigned long long) max_blocks);
5286
5287         /*
5288          * We do not want to overflow, so the order of operands and
5289          * the * 100 / 100 trick are important. We do a +1 to be
5290          * safe against division by zero. We only estimate anyway.
5291          *
5292          * dt: time from mark until now
5293          * db: blocks written from mark until now
5294          * rt: remaining time
5295          */
5296         dt = ((jiffies - mddev->resync_mark) / HZ);
5297         if (!dt) dt++;
5298         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5299                 - mddev->resync_mark_cnt;
5300         rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
5301
5302         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
5303
5304         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5305 }
5306
5307 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
5308 {
5309         struct list_head *tmp;
5310         loff_t l = *pos;
5311         mddev_t *mddev;
5312
5313         if (l >= 0x10000)
5314                 return NULL;
5315         if (!l--)
5316                 /* header */
5317                 return (void*)1;
5318
5319         spin_lock(&all_mddevs_lock);
5320         list_for_each(tmp,&all_mddevs)
5321                 if (!l--) {
5322                         mddev = list_entry(tmp, mddev_t, all_mddevs);
5323                         mddev_get(mddev);
5324                         spin_unlock(&all_mddevs_lock);
5325                         return mddev;
5326                 }
5327         spin_unlock(&all_mddevs_lock);
5328         if (!l--)
5329                 return (void*)2;/* tail */
5330         return NULL;
5331 }
5332
5333 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5334 {
5335         struct list_head *tmp;
5336         mddev_t *next_mddev, *mddev = v;
5337         
5338         ++*pos;
5339         if (v == (void*)2)
5340                 return NULL;
5341
5342         spin_lock(&all_mddevs_lock);
5343         if (v == (void*)1)
5344                 tmp = all_mddevs.next;
5345         else
5346                 tmp = mddev->all_mddevs.next;
5347         if (tmp != &all_mddevs)
5348                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
5349         else {
5350                 next_mddev = (void*)2;
5351                 *pos = 0x10000;
5352         }               
5353         spin_unlock(&all_mddevs_lock);
5354
5355         if (v != (void*)1)
5356                 mddev_put(mddev);
5357         return next_mddev;
5358
5359 }
5360
5361 static void md_seq_stop(struct seq_file *seq, void *v)
5362 {
5363         mddev_t *mddev = v;
5364
5365         if (mddev && v != (void*)1 && v != (void*)2)
5366                 mddev_put(mddev);
5367 }
5368
5369 struct mdstat_info {
5370         int event;
5371 };
5372
5373 static int md_seq_show(struct seq_file *seq, void *v)
5374 {
5375         mddev_t *mddev = v;
5376         sector_t size;
5377         struct list_head *tmp2;
5378         mdk_rdev_t *rdev;
5379         struct mdstat_info *mi = seq->private;
5380         struct bitmap *bitmap;
5381
5382         if (v == (void*)1) {
5383                 struct mdk_personality *pers;
5384                 seq_printf(seq, "Personalities : ");
5385                 spin_lock(&pers_lock);
5386                 list_for_each_entry(pers, &pers_list, list)
5387                         seq_printf(seq, "[%s] ", pers->name);
5388
5389                 spin_unlock(&pers_lock);
5390                 seq_printf(seq, "\n");
5391                 mi->event = atomic_read(&md_event_count);
5392                 return 0;
5393         }
5394         if (v == (void*)2) {
5395                 status_unused(seq);
5396                 return 0;
5397         }
5398
5399         if (mddev_lock(mddev) < 0)
5400                 return -EINTR;
5401
5402         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
5403                 seq_printf(seq, "%s : %sactive", mdname(mddev),
5404                                                 mddev->pers ? "" : "in");
5405                 if (mddev->pers) {
5406                         if (mddev->ro==1)
5407                                 seq_printf(seq, " (read-only)");
5408                         if (mddev->ro==2)
5409                                 seq_printf(seq, " (auto-read-only)");
5410                         seq_printf(seq, " %s", mddev->pers->name);
5411                 }
5412
5413                 size = 0;
5414                 rdev_for_each(rdev, tmp2, mddev) {
5415                         char b[BDEVNAME_SIZE];
5416                         seq_printf(seq, " %s[%d]",
5417                                 bdevname(rdev->bdev,b), rdev->desc_nr);
5418                         if (test_bit(WriteMostly, &rdev->flags))
5419                                 seq_printf(seq, "(W)");
5420                         if (test_bit(Faulty, &rdev->flags)) {
5421                                 seq_printf(seq, "(F)");
5422                                 continue;
5423                         } else if (rdev->raid_disk < 0)
5424                                 seq_printf(seq, "(S)"); /* spare */
5425                         size += rdev->size;
5426                 }
5427
5428                 if (!list_empty(&mddev->disks)) {
5429                         if (mddev->pers)
5430                                 seq_printf(seq, "\n      %llu blocks",
5431                                         (unsigned long long)mddev->array_size);
5432                         else
5433                                 seq_printf(seq, "\n      %llu blocks",
5434                                         (unsigned long long)size);
5435                 }
5436                 if (mddev->persistent) {
5437                         if (mddev->major_version != 0 ||
5438                             mddev->minor_version != 90) {
5439                                 seq_printf(seq," super %d.%d",
5440                                            mddev->major_version,
5441                                            mddev->minor_version);
5442                         }
5443                 } else if (mddev->external)
5444                         seq_printf(seq, " super external:%s",
5445                                    mddev->metadata_type);
5446                 else
5447                         seq_printf(seq, " super non-persistent");
5448
5449                 if (mddev->pers) {
5450                         mddev->pers->status (seq, mddev);
5451                         seq_printf(seq, "\n      ");
5452                         if (mddev->pers->sync_request) {
5453                                 if (mddev->curr_resync > 2) {
5454                                         status_resync (seq, mddev);
5455                                         seq_printf(seq, "\n      ");
5456                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
5457                                         seq_printf(seq, "\tresync=DELAYED\n      ");
5458                                 else if (mddev->recovery_cp < MaxSector)
5459                                         seq_printf(seq, "\tresync=PENDING\n      ");
5460                         }
5461                 } else
5462                         seq_printf(seq, "\n       ");
5463
5464                 if ((bitmap = mddev->bitmap)) {
5465                         unsigned long chunk_kb;
5466                         unsigned long flags;
5467                         spin_lock_irqsave(&bitmap->lock, flags);
5468                         chunk_kb = bitmap->chunksize >> 10;
5469                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
5470                                 "%lu%s chunk",
5471                                 bitmap->pages - bitmap->missing_pages,
5472                                 bitmap->pages,
5473                                 (bitmap->pages - bitmap->missing_pages)
5474                                         << (PAGE_SHIFT - 10),
5475                                 chunk_kb ? chunk_kb : bitmap->chunksize,
5476                                 chunk_kb ? "KB" : "B");
5477                         if (bitmap->file) {
5478                                 seq_printf(seq, ", file: ");
5479                                 seq_path(seq, &bitmap->file->f_path, " \t\n");
5480                         }
5481
5482                         seq_printf(seq, "\n");
5483                         spin_unlock_irqrestore(&bitmap->lock, flags);
5484                 }
5485
5486                 seq_printf(seq, "\n");
5487         }
5488         mddev_unlock(mddev);
5489         
5490         return 0;
5491 }
5492
5493 static struct seq_operations md_seq_ops = {
5494         .start  = md_seq_start,
5495         .next   = md_seq_next,
5496         .stop   = md_seq_stop,
5497         .show   = md_seq_show,
5498 };
5499
5500 static int md_seq_open(struct inode *inode, struct file *file)
5501 {
5502         int error;
5503         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
5504         if (mi == NULL)
5505                 return -ENOMEM;
5506
5507         error = seq_open(file, &md_seq_ops);
5508         if (error)
5509                 kfree(mi);
5510         else {
5511                 struct seq_file *p = file->private_data;
5512                 p->private = mi;
5513                 mi->event = atomic_read(&md_event_count);
5514         }
5515         return error;
5516 }
5517
5518 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
5519 {
5520         struct seq_file *m = filp->private_data;
5521         struct mdstat_info *mi = m->private;
5522         int mask;
5523
5524         poll_wait(filp, &md_event_waiters, wait);
5525
5526         /* always allow read */
5527         mask = POLLIN | POLLRDNORM;
5528
5529         if (mi->event != atomic_read(&md_event_count))
5530                 mask |= POLLERR | POLLPRI;
5531         return mask;
5532 }
5533
5534 static const struct file_operations md_seq_fops = {
5535         .owner          = THIS_MODULE,
5536         .open           = md_seq_open,
5537         .read           = seq_read,
5538         .llseek         = seq_lseek,
5539         .release        = seq_release_private,
5540         .poll           = mdstat_poll,
5541 };
5542
5543 int register_md_personality(struct mdk_personality *p)
5544 {
5545         spin_lock(&pers_lock);
5546         list_add_tail(&p->list, &pers_list);
5547         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
5548         spin_unlock(&pers_lock);
5549         return 0;
5550 }
5551
5552 int unregister_md_personality(struct mdk_personality *p)
5553 {
5554         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
5555         spin_lock(&pers_lock);
5556         list_del_init(&p->list);
5557         spin_unlock(&pers_lock);
5558         return 0;
5559 }
5560
5561 static int is_mddev_idle(mddev_t *mddev)
5562 {
5563         mdk_rdev_t * rdev;
5564         struct list_head *tmp;
5565         int idle;
5566         long curr_events;
5567
5568         idle = 1;
5569         rdev_for_each(rdev, tmp, mddev) {
5570                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5571                 curr_events = disk_stat_read(disk, sectors[0]) + 
5572                                 disk_stat_read(disk, sectors[1]) - 
5573                                 atomic_read(&disk->sync_io);
5574                 /* sync IO will cause sync_io to increase before the disk_stats
5575                  * as sync_io is counted when a request starts, and
5576                  * disk_stats is counted when it completes.
5577                  * So resync activity will cause curr_events to be smaller than
5578                  * when there was no such activity.
5579                  * non-sync IO will cause disk_stat to increase without
5580                  * increasing sync_io so curr_events will (eventually)
5581                  * be larger than it was before.  Once it becomes
5582                  * substantially larger, the test below will cause
5583                  * the array to appear non-idle, and resync will slow
5584                  * down.
5585                  * If there is a lot of outstanding resync activity when
5586                  * we set last_event to curr_events, then all that activity
5587                  * completing might cause the array to appear non-idle
5588                  * and resync will be slowed down even though there might
5589                  * not have been non-resync activity.  This will only
5590                  * happen once though.  'last_events' will soon reflect
5591                  * the state where there is little or no outstanding
5592                  * resync requests, and further resync activity will
5593                  * always make curr_events less than last_events.
5594                  *
5595                  */
5596                 if (curr_events - rdev->last_events > 4096) {
5597                         rdev->last_events = curr_events;
5598                         idle = 0;
5599                 }
5600         }
5601         return idle;
5602 }
5603
5604 void md_done_sync(mddev_t *mddev, int blocks, int ok)
5605 {
5606         /* another "blocks" (512byte) blocks have been synced */
5607         atomic_sub(blocks, &mddev->recovery_active);
5608         wake_up(&mddev->recovery_wait);
5609         if (!ok) {
5610                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5611                 md_wakeup_thread(mddev->thread);
5612                 // stop recovery, signal do_sync ....
5613         }
5614 }
5615
5616
5617 /* md_write_start(mddev, bi)
5618  * If we need to update some array metadata (e.g. 'active' flag
5619  * in superblock) before writing, schedule a superblock update
5620  * and wait for it to complete.
5621  */
5622 void md_write_start(mddev_t *mddev, struct bio *bi)
5623 {
5624         int did_change = 0;
5625         if (bio_data_dir(bi) != WRITE)
5626                 return;
5627
5628         BUG_ON(mddev->ro == 1);
5629         if (mddev->ro == 2) {
5630                 /* need to switch to read/write */
5631                 mddev->ro = 0;
5632                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5633                 md_wakeup_thread(mddev->thread);
5634                 md_wakeup_thread(mddev->sync_thread);
5635                 did_change = 1;
5636         }
5637         atomic_inc(&mddev->writes_pending);
5638         if (mddev->safemode == 1)
5639                 mddev->safemode = 0;
5640         if (mddev->in_sync) {
5641                 spin_lock_irq(&mddev->write_lock);
5642                 if (mddev->in_sync) {
5643                         mddev->in_sync = 0;
5644                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5645                         md_wakeup_thread(mddev->thread);
5646                         did_change = 1;
5647                 }
5648                 spin_unlock_irq(&mddev->write_lock);
5649         }
5650         if (did_change)
5651                 sysfs_notify(&mddev->kobj, NULL, "array_state");
5652         wait_event(mddev->sb_wait,
5653                    !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
5654                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5655 }
5656
5657 void md_write_end(mddev_t *mddev)
5658 {
5659         if (atomic_dec_and_test(&mddev->writes_pending)) {
5660                 if (mddev->safemode == 2)
5661                         md_wakeup_thread(mddev->thread);
5662                 else if (mddev->safemode_delay)
5663                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5664         }
5665 }
5666
5667 /* md_allow_write(mddev)
5668  * Calling this ensures that the array is marked 'active' so that writes
5669  * may proceed without blocking.  It is important to call this before
5670  * attempting a GFP_KERNEL allocation while holding the mddev lock.
5671  * Must be called with mddev_lock held.
5672  *
5673  * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
5674  * is dropped, so return -EAGAIN after notifying userspace.
5675  */
5676 int md_allow_write(mddev_t *mddev)
5677 {
5678         if (!mddev->pers)
5679                 return 0;
5680         if (mddev->ro)
5681                 return 0;
5682         if (!mddev->pers->sync_request)
5683                 return 0;
5684
5685         spin_lock_irq(&mddev->write_lock);
5686         if (mddev->in_sync) {
5687                 mddev->in_sync = 0;
5688                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5689                 if (mddev->safemode_delay &&
5690                     mddev->safemode == 0)
5691                         mddev->safemode = 1;
5692                 spin_unlock_irq(&mddev->write_lock);
5693                 md_update_sb(mddev, 0);
5694                 sysfs_notify(&mddev->kobj, NULL, "array_state");
5695         } else
5696                 spin_unlock_irq(&mddev->write_lock);
5697
5698         if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
5699                 return -EAGAIN;
5700         else
5701                 return 0;
5702 }
5703 EXPORT_SYMBOL_GPL(md_allow_write);
5704
5705 #define SYNC_MARKS      10
5706 #define SYNC_MARK_STEP  (3*HZ)
5707 void md_do_sync(mddev_t *mddev)
5708 {
5709         mddev_t *mddev2;
5710         unsigned int currspeed = 0,
5711                  window;
5712         sector_t max_sectors,j, io_sectors;
5713         unsigned long mark[SYNC_MARKS];
5714         sector_t mark_cnt[SYNC_MARKS];
5715         int last_mark,m;
5716         struct list_head *tmp;
5717         sector_t last_check;
5718         int skipped = 0;
5719         struct list_head *rtmp;
5720         mdk_rdev_t *rdev;
5721         char *desc;
5722
5723         /* just incase thread restarts... */
5724         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5725                 return;
5726         if (mddev->ro) /* never try to sync a read-only array */
5727                 return;
5728
5729         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5730                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5731                         desc = "data-check";
5732                 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5733                         desc = "requested-resync";
5734                 else
5735                         desc = "resync";
5736         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5737                 desc = "reshape";
5738         else
5739                 desc = "recovery";
5740
5741         /* we overload curr_resync somewhat here.
5742          * 0 == not engaged in resync at all
5743          * 2 == checking that there is no conflict with another sync
5744          * 1 == like 2, but have yielded to allow conflicting resync to
5745          *              commense
5746          * other == active in resync - this many blocks
5747          *
5748          * Before starting a resync we must have set curr_resync to
5749          * 2, and then checked that every "conflicting" array has curr_resync
5750          * less than ours.  When we find one that is the same or higher
5751          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
5752          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5753          * This will mean we have to start checking from the beginning again.
5754          *
5755          */
5756
5757         do {
5758                 mddev->curr_resync = 2;
5759
5760         try_again:
5761                 if (kthread_should_stop()) {
5762                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5763                         goto skip;
5764                 }
5765                 for_each_mddev(mddev2, tmp) {
5766                         if (mddev2 == mddev)
5767                                 continue;
5768                         if (!mddev->parallel_resync
5769                         &&  mddev2->curr_resync
5770                         &&  match_mddev_units(mddev, mddev2)) {
5771                                 DEFINE_WAIT(wq);
5772                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
5773                                         /* arbitrarily yield */
5774                                         mddev->curr_resync = 1;
5775                                         wake_up(&resync_wait);
5776                                 }
5777                                 if (mddev > mddev2 && mddev->curr_resync == 1)
5778                                         /* no need to wait here, we can wait the next
5779                                          * time 'round when curr_resync == 2
5780                                          */
5781                                         continue;
5782                                 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5783                                 if (!kthread_should_stop() &&
5784                                     mddev2->curr_resync >= mddev->curr_resync) {
5785                                         printk(KERN_INFO "md: delaying %s of %s"
5786                                                " until %s has finished (they"
5787                                                " share one or more physical units)\n",
5788                                                desc, mdname(mddev), mdname(mddev2));
5789                                         mddev_put(mddev2);
5790                                         schedule();
5791                                         finish_wait(&resync_wait, &wq);
5792                                         goto try_again;
5793                                 }
5794                                 finish_wait(&resync_wait, &wq);
5795                         }
5796                 }
5797         } while (mddev->curr_resync < 2);
5798
5799         j = 0;
5800         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5801                 /* resync follows the size requested by the personality,
5802                  * which defaults to physical size, but can be virtual size
5803                  */
5804                 max_sectors = mddev->resync_max_sectors;
5805                 mddev->resync_mismatches = 0;
5806                 /* we don't use the checkpoint if there's a bitmap */
5807                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5808                         j = mddev->resync_min;
5809                 else if (!mddev->bitmap)
5810                         j = mddev->recovery_cp;
5811
5812         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5813                 max_sectors = mddev->size << 1;
5814         else {
5815                 /* recovery follows the physical size of devices */
5816                 max_sectors = mddev->size << 1;
5817                 j = MaxSector;
5818                 rdev_for_each(rdev, rtmp, mddev)
5819                         if (rdev->raid_disk >= 0 &&
5820                             !test_bit(Faulty, &rdev->flags) &&
5821                             !test_bit(In_sync, &rdev->flags) &&
5822                             rdev->recovery_offset < j)
5823                                 j = rdev->recovery_offset;
5824         }
5825
5826         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5827         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
5828                 " %d KB/sec/disk.\n", speed_min(mddev));
5829         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5830                "(but not more than %d KB/sec) for %s.\n",
5831                speed_max(mddev), desc);
5832
5833         is_mddev_idle(mddev); /* this also initializes IO event counters */
5834
5835         io_sectors = 0;
5836         for (m = 0; m < SYNC_MARKS; m++) {
5837                 mark[m] = jiffies;
5838                 mark_cnt[m] = io_sectors;
5839         }
5840         last_mark = 0;
5841         mddev->resync_mark = mark[last_mark];
5842         mddev->resync_mark_cnt = mark_cnt[last_mark];
5843
5844         /*
5845          * Tune reconstruction:
5846          */
5847         window = 32*(PAGE_SIZE/512);
5848         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5849                 window/2,(unsigned long long) max_sectors/2);
5850
5851         atomic_set(&mddev->recovery_active, 0);
5852         last_check = 0;
5853
5854         if (j>2) {
5855                 printk(KERN_INFO 
5856                        "md: resuming %s of %s from checkpoint.\n",
5857                        desc, mdname(mddev));
5858                 mddev->curr_resync = j;
5859         }
5860
5861         while (j < max_sectors) {
5862                 sector_t sectors;
5863
5864                 skipped = 0;
5865                 if (j >= mddev->resync_max) {
5866                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5867                         wait_event(mddev->recovery_wait,
5868                                    mddev->resync_max > j
5869                                    || kthread_should_stop());
5870                 }
5871                 if (kthread_should_stop())
5872                         goto interrupted;
5873                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5874                                                   currspeed < speed_min(mddev));
5875                 if (sectors == 0) {
5876                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5877                         goto out;
5878                 }
5879
5880                 if (!skipped) { /* actual IO requested */
5881                         io_sectors += sectors;
5882                         atomic_add(sectors, &mddev->recovery_active);
5883                 }
5884
5885                 j += sectors;
5886                 if (j>1) mddev->curr_resync = j;
5887                 mddev->curr_mark_cnt = io_sectors;
5888                 if (last_check == 0)
5889                         /* this is the earliers that rebuilt will be
5890                          * visible in /proc/mdstat
5891                          */
5892                         md_new_event(mddev);
5893
5894                 if (last_check + window > io_sectors || j == max_sectors)
5895                         continue;
5896
5897                 last_check = io_sectors;
5898
5899                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5900                         break;
5901
5902         repeat:
5903                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5904                         /* step marks */
5905                         int next = (last_mark+1) % SYNC_MARKS;
5906
5907                         mddev->resync_mark = mark[next];
5908                         mddev->resync_mark_cnt = mark_cnt[next];
5909                         mark[next] = jiffies;
5910                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5911                         last_mark = next;
5912                 }
5913
5914
5915                 if (kthread_should_stop())
5916                         goto interrupted;
5917
5918
5919                 /*
5920                  * this loop exits only if either when we are slower than
5921                  * the 'hard' speed limit, or the system was IO-idle for
5922                  * a jiffy.
5923                  * the system might be non-idle CPU-wise, but we only care
5924                  * about not overloading the IO subsystem. (things like an
5925                  * e2fsck being done on the RAID array should execute fast)
5926                  */
5927                 blk_unplug(mddev->queue);
5928                 cond_resched();
5929
5930                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5931                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
5932
5933                 if (currspeed > speed_min(mddev)) {
5934                         if ((currspeed > speed_max(mddev)) ||
5935                                         !is_mddev_idle(mddev)) {
5936                                 msleep(500);
5937                                 goto repeat;
5938                         }
5939                 }
5940         }
5941         printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5942         /*
5943          * this also signals 'finished resyncing' to md_stop
5944          */
5945  out:
5946         blk_unplug(mddev->queue);
5947
5948         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5949
5950         /* tell personality that we are finished */
5951         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5952
5953         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5954             mddev->curr_resync > 2) {
5955                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5956                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5957                                 if (mddev->curr_resync >= mddev->recovery_cp) {
5958                                         printk(KERN_INFO
5959                                                "md: checkpointing %s of %s.\n",
5960                                                desc, mdname(mddev));
5961                                         mddev->recovery_cp = mddev->curr_resync;
5962                                 }
5963                         } else
5964                                 mddev->recovery_cp = MaxSector;
5965                 } else {
5966                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5967                                 mddev->curr_resync = MaxSector;
5968                         rdev_for_each(rdev, rtmp, mddev)
5969                                 if (rdev->raid_disk >= 0 &&
5970                                     !test_bit(Faulty, &rdev->flags) &&
5971                                     !test_bit(In_sync, &rdev->flags) &&
5972                                     rdev->recovery_offset < mddev->curr_resync)
5973                                         rdev->recovery_offset = mddev->curr_resync;
5974                 }
5975         }
5976         set_bit(MD_CHANGE_DEVS, &mddev->flags);
5977
5978  skip:
5979         mddev->curr_resync = 0;
5980         mddev->resync_min = 0;
5981         mddev->resync_max = MaxSector;
5982         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5983         wake_up(&resync_wait);
5984         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5985         md_wakeup_thread(mddev->thread);
5986         return;
5987
5988  interrupted:
5989         /*
5990          * got a signal, exit.
5991          */
5992         printk(KERN_INFO
5993                "md: md_do_sync() got signal ... exiting\n");
5994         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5995         goto out;
5996
5997 }
5998 EXPORT_SYMBOL_GPL(md_do_sync);
5999
6000
6001 static int remove_and_add_spares(mddev_t *mddev)
6002 {
6003         mdk_rdev_t *rdev;
6004         struct list_head *rtmp;
6005         int spares = 0;
6006
6007         rdev_for_each(rdev, rtmp, mddev)
6008                 if (rdev->raid_disk >= 0 &&
6009                     !test_bit(Blocked, &rdev->flags) &&
6010                     (test_bit(Faulty, &rdev->flags) ||
6011                      ! test_bit(In_sync, &rdev->flags)) &&
6012                     atomic_read(&rdev->nr_pending)==0) {
6013                         if (mddev->pers->hot_remove_disk(
6014                                     mddev, rdev->raid_disk)==0) {
6015                                 char nm[20];
6016                                 sprintf(nm,"rd%d", rdev->raid_disk);
6017                                 sysfs_remove_link(&mddev->kobj, nm);
6018                                 rdev->raid_disk = -1;
6019                         }
6020                 }
6021
6022         if (mddev->degraded) {
6023                 rdev_for_each(rdev, rtmp, mddev) {
6024                         if (rdev->raid_disk >= 0 &&
6025                             !test_bit(In_sync, &rdev->flags))
6026                                 spares++;
6027                         if (rdev->raid_disk < 0
6028                             && !test_bit(Faulty, &rdev->flags)) {
6029                                 rdev->recovery_offset = 0;
6030                                 if (mddev->pers->
6031                                     hot_add_disk(mddev, rdev) == 0) {
6032                                         char nm[20];
6033                                         sprintf(nm, "rd%d", rdev->raid_disk);
6034                                         if (sysfs_create_link(&mddev->kobj,
6035                                                               &rdev->kobj, nm))
6036                                                 printk(KERN_WARNING
6037                                                        "md: cannot register "
6038                                                        "%s for %s\n",
6039                                                        nm, mdname(mddev));
6040                                         spares++;
6041                                         md_new_event(mddev);
6042                                 } else
6043                                         break;
6044                         }
6045                 }
6046         }
6047         return spares;
6048 }
6049 /*
6050  * This routine is regularly called by all per-raid-array threads to
6051  * deal with generic issues like resync and super-block update.
6052  * Raid personalities that don't have a thread (linear/raid0) do not
6053  * need this as they never do any recovery or update the superblock.
6054  *
6055  * It does not do any resync itself, but rather "forks" off other threads
6056  * to do that as needed.
6057  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
6058  * "->recovery" and create a thread at ->sync_thread.
6059  * When the thread finishes it sets MD_RECOVERY_DONE
6060  * and wakeups up this thread which will reap the thread and finish up.
6061  * This thread also removes any faulty devices (with nr_pending == 0).
6062  *
6063  * The overall approach is:
6064  *  1/ if the superblock needs updating, update it.
6065  *  2/ If a recovery thread is running, don't do anything else.
6066  *  3/ If recovery has finished, clean up, possibly marking spares active.
6067  *  4/ If there are any faulty devices, remove them.
6068  *  5/ If array is degraded, try to add spares devices
6069  *  6/ If array has spares or is not in-sync, start a resync thread.
6070  */
6071 void md_check_recovery(mddev_t *mddev)
6072 {
6073         mdk_rdev_t *rdev;
6074         struct list_head *rtmp;
6075
6076
6077         if (mddev->bitmap)
6078                 bitmap_daemon_work(mddev->bitmap);
6079
6080         if (mddev->ro)
6081                 return;
6082
6083         if (signal_pending(current)) {
6084                 if (mddev->pers->sync_request && !mddev->external) {
6085                         printk(KERN_INFO "md: %s in immediate safe mode\n",
6086                                mdname(mddev));
6087                         mddev->safemode = 2;
6088                 }
6089                 flush_signals(current);
6090         }
6091
6092         if ( ! (
6093                 (mddev->flags && !mddev->external) ||
6094                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
6095                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
6096                 (mddev->external == 0 && mddev->safemode == 1) ||
6097                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
6098                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
6099                 ))
6100                 return;
6101
6102         if (mddev_trylock(mddev)) {
6103                 int spares = 0;
6104
6105                 if (!mddev->external) {
6106                         int did_change = 0;
6107                         spin_lock_irq(&mddev->write_lock);
6108                         if (mddev->safemode &&
6109                             !atomic_read(&mddev->writes_pending) &&
6110                             !mddev->in_sync &&
6111                             mddev->recovery_cp == MaxSector) {
6112                                 mddev->in_sync = 1;
6113                                 did_change = 1;
6114                                 if (mddev->persistent)
6115                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6116                         }
6117                         if (mddev->safemode == 1)
6118                                 mddev->safemode = 0;
6119                         spin_unlock_irq(&mddev->write_lock);
6120                         if (did_change)
6121                                 sysfs_notify(&mddev->kobj, NULL, "array_state");
6122                 }
6123
6124                 if (mddev->flags)
6125                         md_update_sb(mddev, 0);
6126
6127                 rdev_for_each(rdev, rtmp, mddev)
6128                         if (test_and_clear_bit(StateChanged, &rdev->flags))
6129                                 sysfs_notify(&rdev->kobj, NULL, "state");
6130
6131
6132                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
6133                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
6134                         /* resync/recovery still happening */
6135                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6136                         goto unlock;
6137                 }
6138                 if (mddev->sync_thread) {
6139                         /* resync has finished, collect result */
6140                         md_unregister_thread(mddev->sync_thread);
6141                         mddev->sync_thread = NULL;
6142                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
6143                                 /* success...*/
6144                                 /* activate any spares */
6145                                 if (mddev->pers->spare_active(mddev))
6146                                         sysfs_notify(&mddev->kobj, NULL,
6147                                                      "degraded");
6148                         }
6149                         md_update_sb(mddev, 1);
6150
6151                         /* if array is no-longer degraded, then any saved_raid_disk
6152                          * information must be scrapped
6153                          */
6154                         if (!mddev->degraded)
6155                                 rdev_for_each(rdev, rtmp, mddev)
6156                                         rdev->saved_raid_disk = -1;
6157
6158                         mddev->recovery = 0;
6159                         /* flag recovery needed just to double check */
6160                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6161                         sysfs_notify(&mddev->kobj, NULL, "sync_action");
6162                         md_new_event(mddev);
6163                         goto unlock;
6164                 }
6165                 /* Set RUNNING before clearing NEEDED to avoid
6166                  * any transients in the value of "sync_action".
6167                  */
6168                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6169                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6170                 /* Clear some bits that don't mean anything, but
6171                  * might be left set
6172                  */
6173                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
6174                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
6175
6176                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
6177                         goto unlock;
6178                 /* no recovery is running.
6179                  * remove any failed drives, then
6180                  * add spares if possible.
6181                  * Spare are also removed and re-added, to allow
6182                  * the personality to fail the re-add.
6183                  */
6184
6185                 if (mddev->reshape_position != MaxSector) {
6186                         if (mddev->pers->check_reshape(mddev) != 0)
6187                                 /* Cannot proceed */
6188                                 goto unlock;
6189                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
6190                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6191                 } else if ((spares = remove_and_add_spares(mddev))) {
6192                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6193                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6194                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6195                 } else if (mddev->recovery_cp < MaxSector) {
6196                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6197                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6198                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
6199                         /* nothing to be done ... */
6200                         goto unlock;
6201
6202                 if (mddev->pers->sync_request) {
6203                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
6204                                 /* We are adding a device or devices to an array
6205                                  * which has the bitmap stored on all devices.
6206                                  * So make sure all bitmap pages get written
6207                                  */
6208                                 bitmap_write_all(mddev->bitmap);
6209                         }
6210                         mddev->sync_thread = md_register_thread(md_do_sync,
6211                                                                 mddev,
6212                                                                 "%s_resync");
6213                         if (!mddev->sync_thread) {
6214                                 printk(KERN_ERR "%s: could not start resync"
6215                                         " thread...\n", 
6216                                         mdname(mddev));
6217                                 /* leave the spares where they are, it shouldn't hurt */
6218                                 mddev->recovery = 0;
6219                         } else
6220                                 md_wakeup_thread(mddev->sync_thread);
6221                         sysfs_notify(&mddev->kobj, NULL, "sync_action");
6222                         md_new_event(mddev);
6223                 }
6224         unlock:
6225                 if (!mddev->sync_thread) {
6226                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
6227                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
6228                                                &mddev->recovery))
6229                                 sysfs_notify(&mddev->kobj, NULL, "sync_action");
6230                 }
6231                 mddev_unlock(mddev);
6232         }
6233 }
6234
6235 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
6236 {
6237         sysfs_notify(&rdev->kobj, NULL, "state");
6238         wait_event_timeout(rdev->blocked_wait,
6239                            !test_bit(Blocked, &rdev->flags),
6240                            msecs_to_jiffies(5000));
6241         rdev_dec_pending(rdev, mddev);
6242 }
6243 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
6244
6245 static int md_notify_reboot(struct notifier_block *this,
6246                             unsigned long code, void *x)
6247 {
6248         struct list_head *tmp;
6249         mddev_t *mddev;
6250
6251         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
6252
6253                 printk(KERN_INFO "md: stopping all md devices.\n");
6254
6255                 for_each_mddev(mddev, tmp)
6256                         if (mddev_trylock(mddev)) {
6257                                 do_md_stop (mddev, 1);
6258                                 mddev_unlock(mddev);
6259                         }
6260                 /*
6261                  * certain more exotic SCSI devices are known to be
6262                  * volatile wrt too early system reboots. While the
6263                  * right place to handle this issue is the given
6264                  * driver, we do want to have a safe RAID driver ...
6265                  */
6266                 mdelay(1000*1);
6267         }
6268         return NOTIFY_DONE;
6269 }
6270
6271 static struct notifier_block md_notifier = {
6272         .notifier_call  = md_notify_reboot,
6273         .next           = NULL,
6274         .priority       = INT_MAX, /* before any real devices */
6275 };
6276
6277 static void md_geninit(void)
6278 {
6279         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
6280
6281         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
6282 }
6283
6284 static int __init md_init(void)
6285 {
6286         if (register_blkdev(MAJOR_NR, "md"))
6287                 return -1;
6288         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
6289                 unregister_blkdev(MAJOR_NR, "md");
6290                 return -1;
6291         }
6292         blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
6293                             md_probe, NULL, NULL);
6294         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
6295                             md_probe, NULL, NULL);
6296
6297         register_reboot_notifier(&md_notifier);
6298         raid_table_header = register_sysctl_table(raid_root_table);
6299
6300         md_geninit();
6301         return (0);
6302 }
6303
6304
6305 #ifndef MODULE
6306
6307 /*
6308  * Searches all registered partitions for autorun RAID arrays
6309  * at boot time.
6310  */
6311
6312 static LIST_HEAD(all_detected_devices);
6313 struct detected_devices_node {
6314         struct list_head list;
6315         dev_t dev;
6316 };
6317
6318 void md_autodetect_dev(dev_t dev)
6319 {
6320         struct detected_devices_node *node_detected_dev;
6321
6322         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
6323         if (node_detected_dev) {
6324                 node_detected_dev->dev = dev;
6325                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
6326         } else {
6327                 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
6328                         ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
6329         }
6330 }
6331
6332
6333 static void autostart_arrays(int part)
6334 {
6335         mdk_rdev_t *rdev;
6336         struct detected_devices_node *node_detected_dev;
6337         dev_t dev;
6338         int i_scanned, i_passed;
6339
6340         i_scanned = 0;
6341         i_passed = 0;
6342
6343         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
6344
6345         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
6346                 i_scanned++;
6347                 node_detected_dev = list_entry(all_detected_devices.next,
6348                                         struct detected_devices_node, list);
6349                 list_del(&node_detected_dev->list);
6350                 dev = node_detected_dev->dev;
6351                 kfree(node_detected_dev);
6352                 rdev = md_import_device(dev,0, 90);
6353                 if (IS_ERR(rdev))
6354                         continue;
6355
6356                 if (test_bit(Faulty, &rdev->flags)) {
6357                         MD_BUG();
6358                         continue;
6359                 }
6360                 set_bit(AutoDetected, &rdev->flags);
6361                 list_add(&rdev->same_set, &pending_raid_disks);
6362                 i_passed++;
6363         }
6364
6365         printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
6366                                                 i_scanned, i_passed);
6367
6368         autorun_devices(part);
6369 }
6370
6371 #endif /* !MODULE */
6372
6373 static __exit void md_exit(void)
6374 {
6375         mddev_t *mddev;
6376         struct list_head *tmp;
6377
6378         blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
6379         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
6380
6381         unregister_blkdev(MAJOR_NR,"md");
6382         unregister_blkdev(mdp_major, "mdp");
6383         unregister_reboot_notifier(&md_notifier);
6384         unregister_sysctl_table(raid_table_header);
6385         remove_proc_entry("mdstat", NULL);
6386         for_each_mddev(mddev, tmp) {
6387                 struct gendisk *disk = mddev->gendisk;
6388                 if (!disk)
6389                         continue;
6390                 export_array(mddev);
6391                 del_gendisk(disk);
6392                 put_disk(disk);
6393                 mddev->gendisk = NULL;
6394                 mddev_put(mddev);
6395         }
6396 }
6397
6398 subsys_initcall(md_init);
6399 module_exit(md_exit)
6400
6401 static int get_ro(char *buffer, struct kernel_param *kp)
6402 {
6403         return sprintf(buffer, "%d", start_readonly);
6404 }
6405 static int set_ro(const char *val, struct kernel_param *kp)
6406 {
6407         char *e;
6408         int num = simple_strtoul(val, &e, 10);
6409         if (*val && (*e == '\0' || *e == '\n')) {
6410                 start_readonly = num;
6411                 return 0;
6412         }
6413         return -EINVAL;
6414 }
6415
6416 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
6417 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
6418
6419
6420 EXPORT_SYMBOL(register_md_personality);
6421 EXPORT_SYMBOL(unregister_md_personality);
6422 EXPORT_SYMBOL(md_error);
6423 EXPORT_SYMBOL(md_done_sync);
6424 EXPORT_SYMBOL(md_write_start);
6425 EXPORT_SYMBOL(md_write_end);
6426 EXPORT_SYMBOL(md_register_thread);
6427 EXPORT_SYMBOL(md_unregister_thread);
6428 EXPORT_SYMBOL(md_wakeup_thread);
6429 EXPORT_SYMBOL(md_check_recovery);
6430 MODULE_LICENSE("GPL");
6431 MODULE_ALIAS("md");
6432 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);