]> err.no Git - linux-2.6/blob - drivers/md/md.c
md: cleanup: use seq_release_private() where appropriate
[linux-2.6] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46 #include <linux/freezer.h>
47
48 #include <linux/init.h>
49
50 #include <linux/file.h>
51
52 #ifdef CONFIG_KMOD
53 #include <linux/kmod.h>
54 #endif
55
56 #include <asm/unaligned.h>
57
58 #define MAJOR_NR MD_MAJOR
59 #define MD_DRIVER
60
61 /* 63 partitions with the alternate major number (mdp) */
62 #define MdpMinorShift 6
63
64 #define DEBUG 0
65 #define dprintk(x...) ((void)(DEBUG && printk(x)))
66
67
68 #ifndef MODULE
69 static void autostart_arrays (int part);
70 #endif
71
72 static LIST_HEAD(pers_list);
73 static DEFINE_SPINLOCK(pers_lock);
74
75 static void md_print_devices(void);
76
77 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
78
79 /*
80  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
81  * is 1000 KB/sec, so the extra system load does not show up that much.
82  * Increase it if you want to have more _guaranteed_ speed. Note that
83  * the RAID driver will use the maximum available bandwidth if the IO
84  * subsystem is idle. There is also an 'absolute maximum' reconstruction
85  * speed limit - in case reconstruction slows down your system despite
86  * idle IO detection.
87  *
88  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
89  * or /sys/block/mdX/md/sync_speed_{min,max}
90  */
91
92 static int sysctl_speed_limit_min = 1000;
93 static int sysctl_speed_limit_max = 200000;
94 static inline int speed_min(mddev_t *mddev)
95 {
96         return mddev->sync_speed_min ?
97                 mddev->sync_speed_min : sysctl_speed_limit_min;
98 }
99
100 static inline int speed_max(mddev_t *mddev)
101 {
102         return mddev->sync_speed_max ?
103                 mddev->sync_speed_max : sysctl_speed_limit_max;
104 }
105
106 static struct ctl_table_header *raid_table_header;
107
108 static ctl_table raid_table[] = {
109         {
110                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
111                 .procname       = "speed_limit_min",
112                 .data           = &sysctl_speed_limit_min,
113                 .maxlen         = sizeof(int),
114                 .mode           = S_IRUGO|S_IWUSR,
115                 .proc_handler   = &proc_dointvec,
116         },
117         {
118                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
119                 .procname       = "speed_limit_max",
120                 .data           = &sysctl_speed_limit_max,
121                 .maxlen         = sizeof(int),
122                 .mode           = S_IRUGO|S_IWUSR,
123                 .proc_handler   = &proc_dointvec,
124         },
125         { .ctl_name = 0 }
126 };
127
128 static ctl_table raid_dir_table[] = {
129         {
130                 .ctl_name       = DEV_RAID,
131                 .procname       = "raid",
132                 .maxlen         = 0,
133                 .mode           = S_IRUGO|S_IXUGO,
134                 .child          = raid_table,
135         },
136         { .ctl_name = 0 }
137 };
138
139 static ctl_table raid_root_table[] = {
140         {
141                 .ctl_name       = CTL_DEV,
142                 .procname       = "dev",
143                 .maxlen         = 0,
144                 .mode           = 0555,
145                 .child          = raid_dir_table,
146         },
147         { .ctl_name = 0 }
148 };
149
150 static struct block_device_operations md_fops;
151
152 static int start_readonly;
153
154 /*
155  * We have a system wide 'event count' that is incremented
156  * on any 'interesting' event, and readers of /proc/mdstat
157  * can use 'poll' or 'select' to find out when the event
158  * count increases.
159  *
160  * Events are:
161  *  start array, stop array, error, add device, remove device,
162  *  start build, activate spare
163  */
164 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
165 static atomic_t md_event_count;
166 void md_new_event(mddev_t *mddev)
167 {
168         atomic_inc(&md_event_count);
169         wake_up(&md_event_waiters);
170         sysfs_notify(&mddev->kobj, NULL, "sync_action");
171 }
172 EXPORT_SYMBOL_GPL(md_new_event);
173
174 /* Alternate version that can be called from interrupts
175  * when calling sysfs_notify isn't needed.
176  */
177 static void md_new_event_inintr(mddev_t *mddev)
178 {
179         atomic_inc(&md_event_count);
180         wake_up(&md_event_waiters);
181 }
182
183 /*
184  * Enables to iterate over all existing md arrays
185  * all_mddevs_lock protects this list.
186  */
187 static LIST_HEAD(all_mddevs);
188 static DEFINE_SPINLOCK(all_mddevs_lock);
189
190
191 /*
192  * iterates through all used mddevs in the system.
193  * We take care to grab the all_mddevs_lock whenever navigating
194  * the list, and to always hold a refcount when unlocked.
195  * Any code which breaks out of this loop while own
196  * a reference to the current mddev and must mddev_put it.
197  */
198 #define ITERATE_MDDEV(mddev,tmp)                                        \
199                                                                         \
200         for (({ spin_lock(&all_mddevs_lock);                            \
201                 tmp = all_mddevs.next;                                  \
202                 mddev = NULL;});                                        \
203              ({ if (tmp != &all_mddevs)                                 \
204                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
205                 spin_unlock(&all_mddevs_lock);                          \
206                 if (mddev) mddev_put(mddev);                            \
207                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
208                 tmp != &all_mddevs;});                                  \
209              ({ spin_lock(&all_mddevs_lock);                            \
210                 tmp = tmp->next;})                                      \
211                 )
212
213
214 static int md_fail_request (request_queue_t *q, struct bio *bio)
215 {
216         bio_io_error(bio, bio->bi_size);
217         return 0;
218 }
219
220 static inline mddev_t *mddev_get(mddev_t *mddev)
221 {
222         atomic_inc(&mddev->active);
223         return mddev;
224 }
225
226 static void mddev_put(mddev_t *mddev)
227 {
228         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
229                 return;
230         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
231                 list_del(&mddev->all_mddevs);
232                 spin_unlock(&all_mddevs_lock);
233                 blk_cleanup_queue(mddev->queue);
234                 kobject_unregister(&mddev->kobj);
235         } else
236                 spin_unlock(&all_mddevs_lock);
237 }
238
239 static mddev_t * mddev_find(dev_t unit)
240 {
241         mddev_t *mddev, *new = NULL;
242
243  retry:
244         spin_lock(&all_mddevs_lock);
245         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
246                 if (mddev->unit == unit) {
247                         mddev_get(mddev);
248                         spin_unlock(&all_mddevs_lock);
249                         kfree(new);
250                         return mddev;
251                 }
252
253         if (new) {
254                 list_add(&new->all_mddevs, &all_mddevs);
255                 spin_unlock(&all_mddevs_lock);
256                 return new;
257         }
258         spin_unlock(&all_mddevs_lock);
259
260         new = kzalloc(sizeof(*new), GFP_KERNEL);
261         if (!new)
262                 return NULL;
263
264         new->unit = unit;
265         if (MAJOR(unit) == MD_MAJOR)
266                 new->md_minor = MINOR(unit);
267         else
268                 new->md_minor = MINOR(unit) >> MdpMinorShift;
269
270         mutex_init(&new->reconfig_mutex);
271         INIT_LIST_HEAD(&new->disks);
272         INIT_LIST_HEAD(&new->all_mddevs);
273         init_timer(&new->safemode_timer);
274         atomic_set(&new->active, 1);
275         spin_lock_init(&new->write_lock);
276         init_waitqueue_head(&new->sb_wait);
277
278         new->queue = blk_alloc_queue(GFP_KERNEL);
279         if (!new->queue) {
280                 kfree(new);
281                 return NULL;
282         }
283         set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
284
285         blk_queue_make_request(new->queue, md_fail_request);
286
287         goto retry;
288 }
289
290 static inline int mddev_lock(mddev_t * mddev)
291 {
292         return mutex_lock_interruptible(&mddev->reconfig_mutex);
293 }
294
295 static inline int mddev_trylock(mddev_t * mddev)
296 {
297         return mutex_trylock(&mddev->reconfig_mutex);
298 }
299
300 static inline void mddev_unlock(mddev_t * mddev)
301 {
302         mutex_unlock(&mddev->reconfig_mutex);
303
304         md_wakeup_thread(mddev->thread);
305 }
306
307 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
308 {
309         mdk_rdev_t * rdev;
310         struct list_head *tmp;
311
312         ITERATE_RDEV(mddev,rdev,tmp) {
313                 if (rdev->desc_nr == nr)
314                         return rdev;
315         }
316         return NULL;
317 }
318
319 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
320 {
321         struct list_head *tmp;
322         mdk_rdev_t *rdev;
323
324         ITERATE_RDEV(mddev,rdev,tmp) {
325                 if (rdev->bdev->bd_dev == dev)
326                         return rdev;
327         }
328         return NULL;
329 }
330
331 static struct mdk_personality *find_pers(int level, char *clevel)
332 {
333         struct mdk_personality *pers;
334         list_for_each_entry(pers, &pers_list, list) {
335                 if (level != LEVEL_NONE && pers->level == level)
336                         return pers;
337                 if (strcmp(pers->name, clevel)==0)
338                         return pers;
339         }
340         return NULL;
341 }
342
343 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
344 {
345         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
346         return MD_NEW_SIZE_BLOCKS(size);
347 }
348
349 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
350 {
351         sector_t size;
352
353         size = rdev->sb_offset;
354
355         if (chunk_size)
356                 size &= ~((sector_t)chunk_size/1024 - 1);
357         return size;
358 }
359
360 static int alloc_disk_sb(mdk_rdev_t * rdev)
361 {
362         if (rdev->sb_page)
363                 MD_BUG();
364
365         rdev->sb_page = alloc_page(GFP_KERNEL);
366         if (!rdev->sb_page) {
367                 printk(KERN_ALERT "md: out of memory.\n");
368                 return -EINVAL;
369         }
370
371         return 0;
372 }
373
374 static void free_disk_sb(mdk_rdev_t * rdev)
375 {
376         if (rdev->sb_page) {
377                 put_page(rdev->sb_page);
378                 rdev->sb_loaded = 0;
379                 rdev->sb_page = NULL;
380                 rdev->sb_offset = 0;
381                 rdev->size = 0;
382         }
383 }
384
385
386 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
387 {
388         mdk_rdev_t *rdev = bio->bi_private;
389         mddev_t *mddev = rdev->mddev;
390         if (bio->bi_size)
391                 return 1;
392
393         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
394                 printk("md: super_written gets error=%d, uptodate=%d\n",
395                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
396                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
397                 md_error(mddev, rdev);
398         }
399
400         if (atomic_dec_and_test(&mddev->pending_writes))
401                 wake_up(&mddev->sb_wait);
402         bio_put(bio);
403         return 0;
404 }
405
406 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
407 {
408         struct bio *bio2 = bio->bi_private;
409         mdk_rdev_t *rdev = bio2->bi_private;
410         mddev_t *mddev = rdev->mddev;
411         if (bio->bi_size)
412                 return 1;
413
414         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
415             error == -EOPNOTSUPP) {
416                 unsigned long flags;
417                 /* barriers don't appear to be supported :-( */
418                 set_bit(BarriersNotsupp, &rdev->flags);
419                 mddev->barriers_work = 0;
420                 spin_lock_irqsave(&mddev->write_lock, flags);
421                 bio2->bi_next = mddev->biolist;
422                 mddev->biolist = bio2;
423                 spin_unlock_irqrestore(&mddev->write_lock, flags);
424                 wake_up(&mddev->sb_wait);
425                 bio_put(bio);
426                 return 0;
427         }
428         bio_put(bio2);
429         bio->bi_private = rdev;
430         return super_written(bio, bytes_done, error);
431 }
432
433 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
434                    sector_t sector, int size, struct page *page)
435 {
436         /* write first size bytes of page to sector of rdev
437          * Increment mddev->pending_writes before returning
438          * and decrement it on completion, waking up sb_wait
439          * if zero is reached.
440          * If an error occurred, call md_error
441          *
442          * As we might need to resubmit the request if BIO_RW_BARRIER
443          * causes ENOTSUPP, we allocate a spare bio...
444          */
445         struct bio *bio = bio_alloc(GFP_NOIO, 1);
446         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
447
448         bio->bi_bdev = rdev->bdev;
449         bio->bi_sector = sector;
450         bio_add_page(bio, page, size, 0);
451         bio->bi_private = rdev;
452         bio->bi_end_io = super_written;
453         bio->bi_rw = rw;
454
455         atomic_inc(&mddev->pending_writes);
456         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
457                 struct bio *rbio;
458                 rw |= (1<<BIO_RW_BARRIER);
459                 rbio = bio_clone(bio, GFP_NOIO);
460                 rbio->bi_private = bio;
461                 rbio->bi_end_io = super_written_barrier;
462                 submit_bio(rw, rbio);
463         } else
464                 submit_bio(rw, bio);
465 }
466
467 void md_super_wait(mddev_t *mddev)
468 {
469         /* wait for all superblock writes that were scheduled to complete.
470          * if any had to be retried (due to BARRIER problems), retry them
471          */
472         DEFINE_WAIT(wq);
473         for(;;) {
474                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
475                 if (atomic_read(&mddev->pending_writes)==0)
476                         break;
477                 while (mddev->biolist) {
478                         struct bio *bio;
479                         spin_lock_irq(&mddev->write_lock);
480                         bio = mddev->biolist;
481                         mddev->biolist = bio->bi_next ;
482                         bio->bi_next = NULL;
483                         spin_unlock_irq(&mddev->write_lock);
484                         submit_bio(bio->bi_rw, bio);
485                 }
486                 schedule();
487         }
488         finish_wait(&mddev->sb_wait, &wq);
489 }
490
491 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
492 {
493         if (bio->bi_size)
494                 return 1;
495
496         complete((struct completion*)bio->bi_private);
497         return 0;
498 }
499
500 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
501                    struct page *page, int rw)
502 {
503         struct bio *bio = bio_alloc(GFP_NOIO, 1);
504         struct completion event;
505         int ret;
506
507         rw |= (1 << BIO_RW_SYNC);
508
509         bio->bi_bdev = bdev;
510         bio->bi_sector = sector;
511         bio_add_page(bio, page, size, 0);
512         init_completion(&event);
513         bio->bi_private = &event;
514         bio->bi_end_io = bi_complete;
515         submit_bio(rw, bio);
516         wait_for_completion(&event);
517
518         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
519         bio_put(bio);
520         return ret;
521 }
522 EXPORT_SYMBOL_GPL(sync_page_io);
523
524 static int read_disk_sb(mdk_rdev_t * rdev, int size)
525 {
526         char b[BDEVNAME_SIZE];
527         if (!rdev->sb_page) {
528                 MD_BUG();
529                 return -EINVAL;
530         }
531         if (rdev->sb_loaded)
532                 return 0;
533
534
535         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
536                 goto fail;
537         rdev->sb_loaded = 1;
538         return 0;
539
540 fail:
541         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
542                 bdevname(rdev->bdev,b));
543         return -EINVAL;
544 }
545
546 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
547 {
548         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
549                 (sb1->set_uuid1 == sb2->set_uuid1) &&
550                 (sb1->set_uuid2 == sb2->set_uuid2) &&
551                 (sb1->set_uuid3 == sb2->set_uuid3))
552
553                 return 1;
554
555         return 0;
556 }
557
558
559 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
560 {
561         int ret;
562         mdp_super_t *tmp1, *tmp2;
563
564         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
565         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
566
567         if (!tmp1 || !tmp2) {
568                 ret = 0;
569                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
570                 goto abort;
571         }
572
573         *tmp1 = *sb1;
574         *tmp2 = *sb2;
575
576         /*
577          * nr_disks is not constant
578          */
579         tmp1->nr_disks = 0;
580         tmp2->nr_disks = 0;
581
582         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
583                 ret = 0;
584         else
585                 ret = 1;
586
587 abort:
588         kfree(tmp1);
589         kfree(tmp2);
590         return ret;
591 }
592
593 static unsigned int calc_sb_csum(mdp_super_t * sb)
594 {
595         unsigned int disk_csum, csum;
596
597         disk_csum = sb->sb_csum;
598         sb->sb_csum = 0;
599         csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
600         sb->sb_csum = disk_csum;
601         return csum;
602 }
603
604
605 /*
606  * Handle superblock details.
607  * We want to be able to handle multiple superblock formats
608  * so we have a common interface to them all, and an array of
609  * different handlers.
610  * We rely on user-space to write the initial superblock, and support
611  * reading and updating of superblocks.
612  * Interface methods are:
613  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
614  *      loads and validates a superblock on dev.
615  *      if refdev != NULL, compare superblocks on both devices
616  *    Return:
617  *      0 - dev has a superblock that is compatible with refdev
618  *      1 - dev has a superblock that is compatible and newer than refdev
619  *          so dev should be used as the refdev in future
620  *     -EINVAL superblock incompatible or invalid
621  *     -othererror e.g. -EIO
622  *
623  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
624  *      Verify that dev is acceptable into mddev.
625  *       The first time, mddev->raid_disks will be 0, and data from
626  *       dev should be merged in.  Subsequent calls check that dev
627  *       is new enough.  Return 0 or -EINVAL
628  *
629  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
630  *     Update the superblock for rdev with data in mddev
631  *     This does not write to disc.
632  *
633  */
634
635 struct super_type  {
636         char            *name;
637         struct module   *owner;
638         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
639         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
640         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
641 };
642
643 /*
644  * load_super for 0.90.0 
645  */
646 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
647 {
648         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
649         mdp_super_t *sb;
650         int ret;
651         sector_t sb_offset;
652
653         /*
654          * Calculate the position of the superblock,
655          * it's at the end of the disk.
656          *
657          * It also happens to be a multiple of 4Kb.
658          */
659         sb_offset = calc_dev_sboffset(rdev->bdev);
660         rdev->sb_offset = sb_offset;
661
662         ret = read_disk_sb(rdev, MD_SB_BYTES);
663         if (ret) return ret;
664
665         ret = -EINVAL;
666
667         bdevname(rdev->bdev, b);
668         sb = (mdp_super_t*)page_address(rdev->sb_page);
669
670         if (sb->md_magic != MD_SB_MAGIC) {
671                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
672                        b);
673                 goto abort;
674         }
675
676         if (sb->major_version != 0 ||
677             sb->minor_version < 90 ||
678             sb->minor_version > 91) {
679                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
680                         sb->major_version, sb->minor_version,
681                         b);
682                 goto abort;
683         }
684
685         if (sb->raid_disks <= 0)
686                 goto abort;
687
688         if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
689                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
690                         b);
691                 goto abort;
692         }
693
694         rdev->preferred_minor = sb->md_minor;
695         rdev->data_offset = 0;
696         rdev->sb_size = MD_SB_BYTES;
697
698         if (sb->level == LEVEL_MULTIPATH)
699                 rdev->desc_nr = -1;
700         else
701                 rdev->desc_nr = sb->this_disk.number;
702
703         if (refdev == 0)
704                 ret = 1;
705         else {
706                 __u64 ev1, ev2;
707                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
708                 if (!uuid_equal(refsb, sb)) {
709                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
710                                 b, bdevname(refdev->bdev,b2));
711                         goto abort;
712                 }
713                 if (!sb_equal(refsb, sb)) {
714                         printk(KERN_WARNING "md: %s has same UUID"
715                                " but different superblock to %s\n",
716                                b, bdevname(refdev->bdev, b2));
717                         goto abort;
718                 }
719                 ev1 = md_event(sb);
720                 ev2 = md_event(refsb);
721                 if (ev1 > ev2)
722                         ret = 1;
723                 else 
724                         ret = 0;
725         }
726         rdev->size = calc_dev_size(rdev, sb->chunk_size);
727
728         if (rdev->size < sb->size && sb->level > 1)
729                 /* "this cannot possibly happen" ... */
730                 ret = -EINVAL;
731
732  abort:
733         return ret;
734 }
735
736 /*
737  * validate_super for 0.90.0
738  */
739 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
740 {
741         mdp_disk_t *desc;
742         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
743         __u64 ev1 = md_event(sb);
744
745         rdev->raid_disk = -1;
746         rdev->flags = 0;
747         if (mddev->raid_disks == 0) {
748                 mddev->major_version = 0;
749                 mddev->minor_version = sb->minor_version;
750                 mddev->patch_version = sb->patch_version;
751                 mddev->persistent = ! sb->not_persistent;
752                 mddev->chunk_size = sb->chunk_size;
753                 mddev->ctime = sb->ctime;
754                 mddev->utime = sb->utime;
755                 mddev->level = sb->level;
756                 mddev->clevel[0] = 0;
757                 mddev->layout = sb->layout;
758                 mddev->raid_disks = sb->raid_disks;
759                 mddev->size = sb->size;
760                 mddev->events = ev1;
761                 mddev->bitmap_offset = 0;
762                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
763
764                 if (mddev->minor_version >= 91) {
765                         mddev->reshape_position = sb->reshape_position;
766                         mddev->delta_disks = sb->delta_disks;
767                         mddev->new_level = sb->new_level;
768                         mddev->new_layout = sb->new_layout;
769                         mddev->new_chunk = sb->new_chunk;
770                 } else {
771                         mddev->reshape_position = MaxSector;
772                         mddev->delta_disks = 0;
773                         mddev->new_level = mddev->level;
774                         mddev->new_layout = mddev->layout;
775                         mddev->new_chunk = mddev->chunk_size;
776                 }
777
778                 if (sb->state & (1<<MD_SB_CLEAN))
779                         mddev->recovery_cp = MaxSector;
780                 else {
781                         if (sb->events_hi == sb->cp_events_hi && 
782                                 sb->events_lo == sb->cp_events_lo) {
783                                 mddev->recovery_cp = sb->recovery_cp;
784                         } else
785                                 mddev->recovery_cp = 0;
786                 }
787
788                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
789                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
790                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
791                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
792
793                 mddev->max_disks = MD_SB_DISKS;
794
795                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
796                     mddev->bitmap_file == NULL) {
797                         if (mddev->level != 1 && mddev->level != 4
798                             && mddev->level != 5 && mddev->level != 6
799                             && mddev->level != 10) {
800                                 /* FIXME use a better test */
801                                 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
802                                 return -EINVAL;
803                         }
804                         mddev->bitmap_offset = mddev->default_bitmap_offset;
805                 }
806
807         } else if (mddev->pers == NULL) {
808                 /* Insist on good event counter while assembling */
809                 ++ev1;
810                 if (ev1 < mddev->events) 
811                         return -EINVAL;
812         } else if (mddev->bitmap) {
813                 /* if adding to array with a bitmap, then we can accept an
814                  * older device ... but not too old.
815                  */
816                 if (ev1 < mddev->bitmap->events_cleared)
817                         return 0;
818         } else {
819                 if (ev1 < mddev->events)
820                         /* just a hot-add of a new device, leave raid_disk at -1 */
821                         return 0;
822         }
823
824         if (mddev->level != LEVEL_MULTIPATH) {
825                 desc = sb->disks + rdev->desc_nr;
826
827                 if (desc->state & (1<<MD_DISK_FAULTY))
828                         set_bit(Faulty, &rdev->flags);
829                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
830                             desc->raid_disk < mddev->raid_disks */) {
831                         set_bit(In_sync, &rdev->flags);
832                         rdev->raid_disk = desc->raid_disk;
833                 }
834                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
835                         set_bit(WriteMostly, &rdev->flags);
836         } else /* MULTIPATH are always insync */
837                 set_bit(In_sync, &rdev->flags);
838         return 0;
839 }
840
841 /*
842  * sync_super for 0.90.0
843  */
844 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
845 {
846         mdp_super_t *sb;
847         struct list_head *tmp;
848         mdk_rdev_t *rdev2;
849         int next_spare = mddev->raid_disks;
850
851
852         /* make rdev->sb match mddev data..
853          *
854          * 1/ zero out disks
855          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
856          * 3/ any empty disks < next_spare become removed
857          *
858          * disks[0] gets initialised to REMOVED because
859          * we cannot be sure from other fields if it has
860          * been initialised or not.
861          */
862         int i;
863         int active=0, working=0,failed=0,spare=0,nr_disks=0;
864
865         rdev->sb_size = MD_SB_BYTES;
866
867         sb = (mdp_super_t*)page_address(rdev->sb_page);
868
869         memset(sb, 0, sizeof(*sb));
870
871         sb->md_magic = MD_SB_MAGIC;
872         sb->major_version = mddev->major_version;
873         sb->patch_version = mddev->patch_version;
874         sb->gvalid_words  = 0; /* ignored */
875         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
876         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
877         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
878         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
879
880         sb->ctime = mddev->ctime;
881         sb->level = mddev->level;
882         sb->size  = mddev->size;
883         sb->raid_disks = mddev->raid_disks;
884         sb->md_minor = mddev->md_minor;
885         sb->not_persistent = !mddev->persistent;
886         sb->utime = mddev->utime;
887         sb->state = 0;
888         sb->events_hi = (mddev->events>>32);
889         sb->events_lo = (u32)mddev->events;
890
891         if (mddev->reshape_position == MaxSector)
892                 sb->minor_version = 90;
893         else {
894                 sb->minor_version = 91;
895                 sb->reshape_position = mddev->reshape_position;
896                 sb->new_level = mddev->new_level;
897                 sb->delta_disks = mddev->delta_disks;
898                 sb->new_layout = mddev->new_layout;
899                 sb->new_chunk = mddev->new_chunk;
900         }
901         mddev->minor_version = sb->minor_version;
902         if (mddev->in_sync)
903         {
904                 sb->recovery_cp = mddev->recovery_cp;
905                 sb->cp_events_hi = (mddev->events>>32);
906                 sb->cp_events_lo = (u32)mddev->events;
907                 if (mddev->recovery_cp == MaxSector)
908                         sb->state = (1<< MD_SB_CLEAN);
909         } else
910                 sb->recovery_cp = 0;
911
912         sb->layout = mddev->layout;
913         sb->chunk_size = mddev->chunk_size;
914
915         if (mddev->bitmap && mddev->bitmap_file == NULL)
916                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
917
918         sb->disks[0].state = (1<<MD_DISK_REMOVED);
919         ITERATE_RDEV(mddev,rdev2,tmp) {
920                 mdp_disk_t *d;
921                 int desc_nr;
922                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
923                     && !test_bit(Faulty, &rdev2->flags))
924                         desc_nr = rdev2->raid_disk;
925                 else
926                         desc_nr = next_spare++;
927                 rdev2->desc_nr = desc_nr;
928                 d = &sb->disks[rdev2->desc_nr];
929                 nr_disks++;
930                 d->number = rdev2->desc_nr;
931                 d->major = MAJOR(rdev2->bdev->bd_dev);
932                 d->minor = MINOR(rdev2->bdev->bd_dev);
933                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
934                     && !test_bit(Faulty, &rdev2->flags))
935                         d->raid_disk = rdev2->raid_disk;
936                 else
937                         d->raid_disk = rdev2->desc_nr; /* compatibility */
938                 if (test_bit(Faulty, &rdev2->flags))
939                         d->state = (1<<MD_DISK_FAULTY);
940                 else if (test_bit(In_sync, &rdev2->flags)) {
941                         d->state = (1<<MD_DISK_ACTIVE);
942                         d->state |= (1<<MD_DISK_SYNC);
943                         active++;
944                         working++;
945                 } else {
946                         d->state = 0;
947                         spare++;
948                         working++;
949                 }
950                 if (test_bit(WriteMostly, &rdev2->flags))
951                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
952         }
953         /* now set the "removed" and "faulty" bits on any missing devices */
954         for (i=0 ; i < mddev->raid_disks ; i++) {
955                 mdp_disk_t *d = &sb->disks[i];
956                 if (d->state == 0 && d->number == 0) {
957                         d->number = i;
958                         d->raid_disk = i;
959                         d->state = (1<<MD_DISK_REMOVED);
960                         d->state |= (1<<MD_DISK_FAULTY);
961                         failed++;
962                 }
963         }
964         sb->nr_disks = nr_disks;
965         sb->active_disks = active;
966         sb->working_disks = working;
967         sb->failed_disks = failed;
968         sb->spare_disks = spare;
969
970         sb->this_disk = sb->disks[rdev->desc_nr];
971         sb->sb_csum = calc_sb_csum(sb);
972 }
973
974 /*
975  * version 1 superblock
976  */
977
978 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
979 {
980         __le32 disk_csum;
981         u32 csum;
982         unsigned long long newcsum;
983         int size = 256 + le32_to_cpu(sb->max_dev)*2;
984         __le32 *isuper = (__le32*)sb;
985         int i;
986
987         disk_csum = sb->sb_csum;
988         sb->sb_csum = 0;
989         newcsum = 0;
990         for (i=0; size>=4; size -= 4 )
991                 newcsum += le32_to_cpu(*isuper++);
992
993         if (size == 2)
994                 newcsum += le16_to_cpu(*(__le16*) isuper);
995
996         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
997         sb->sb_csum = disk_csum;
998         return cpu_to_le32(csum);
999 }
1000
1001 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1002 {
1003         struct mdp_superblock_1 *sb;
1004         int ret;
1005         sector_t sb_offset;
1006         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1007         int bmask;
1008
1009         /*
1010          * Calculate the position of the superblock.
1011          * It is always aligned to a 4K boundary and
1012          * depeding on minor_version, it can be:
1013          * 0: At least 8K, but less than 12K, from end of device
1014          * 1: At start of device
1015          * 2: 4K from start of device.
1016          */
1017         switch(minor_version) {
1018         case 0:
1019                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1020                 sb_offset -= 8*2;
1021                 sb_offset &= ~(sector_t)(4*2-1);
1022                 /* convert from sectors to K */
1023                 sb_offset /= 2;
1024                 break;
1025         case 1:
1026                 sb_offset = 0;
1027                 break;
1028         case 2:
1029                 sb_offset = 4;
1030                 break;
1031         default:
1032                 return -EINVAL;
1033         }
1034         rdev->sb_offset = sb_offset;
1035
1036         /* superblock is rarely larger than 1K, but it can be larger,
1037          * and it is safe to read 4k, so we do that
1038          */
1039         ret = read_disk_sb(rdev, 4096);
1040         if (ret) return ret;
1041
1042
1043         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1044
1045         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1046             sb->major_version != cpu_to_le32(1) ||
1047             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1048             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1049             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1050                 return -EINVAL;
1051
1052         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1053                 printk("md: invalid superblock checksum on %s\n",
1054                         bdevname(rdev->bdev,b));
1055                 return -EINVAL;
1056         }
1057         if (le64_to_cpu(sb->data_size) < 10) {
1058                 printk("md: data_size too small on %s\n",
1059                        bdevname(rdev->bdev,b));
1060                 return -EINVAL;
1061         }
1062         rdev->preferred_minor = 0xffff;
1063         rdev->data_offset = le64_to_cpu(sb->data_offset);
1064         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1065
1066         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1067         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1068         if (rdev->sb_size & bmask)
1069                 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1070
1071         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1072                 rdev->desc_nr = -1;
1073         else
1074                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1075
1076         if (refdev == 0)
1077                 ret = 1;
1078         else {
1079                 __u64 ev1, ev2;
1080                 struct mdp_superblock_1 *refsb = 
1081                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1082
1083                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1084                     sb->level != refsb->level ||
1085                     sb->layout != refsb->layout ||
1086                     sb->chunksize != refsb->chunksize) {
1087                         printk(KERN_WARNING "md: %s has strangely different"
1088                                 " superblock to %s\n",
1089                                 bdevname(rdev->bdev,b),
1090                                 bdevname(refdev->bdev,b2));
1091                         return -EINVAL;
1092                 }
1093                 ev1 = le64_to_cpu(sb->events);
1094                 ev2 = le64_to_cpu(refsb->events);
1095
1096                 if (ev1 > ev2)
1097                         ret = 1;
1098                 else
1099                         ret = 0;
1100         }
1101         if (minor_version) 
1102                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1103         else
1104                 rdev->size = rdev->sb_offset;
1105         if (rdev->size < le64_to_cpu(sb->data_size)/2)
1106                 return -EINVAL;
1107         rdev->size = le64_to_cpu(sb->data_size)/2;
1108         if (le32_to_cpu(sb->chunksize))
1109                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1110
1111         if (le64_to_cpu(sb->size) > rdev->size*2)
1112                 return -EINVAL;
1113         return ret;
1114 }
1115
1116 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1117 {
1118         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1119         __u64 ev1 = le64_to_cpu(sb->events);
1120
1121         rdev->raid_disk = -1;
1122         rdev->flags = 0;
1123         if (mddev->raid_disks == 0) {
1124                 mddev->major_version = 1;
1125                 mddev->patch_version = 0;
1126                 mddev->persistent = 1;
1127                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1128                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1129                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1130                 mddev->level = le32_to_cpu(sb->level);
1131                 mddev->clevel[0] = 0;
1132                 mddev->layout = le32_to_cpu(sb->layout);
1133                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1134                 mddev->size = le64_to_cpu(sb->size)/2;
1135                 mddev->events = ev1;
1136                 mddev->bitmap_offset = 0;
1137                 mddev->default_bitmap_offset = 1024 >> 9;
1138                 
1139                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1140                 memcpy(mddev->uuid, sb->set_uuid, 16);
1141
1142                 mddev->max_disks =  (4096-256)/2;
1143
1144                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1145                     mddev->bitmap_file == NULL ) {
1146                         if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1147                             && mddev->level != 10) {
1148                                 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1149                                 return -EINVAL;
1150                         }
1151                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1152                 }
1153                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1154                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1155                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1156                         mddev->new_level = le32_to_cpu(sb->new_level);
1157                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1158                         mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1159                 } else {
1160                         mddev->reshape_position = MaxSector;
1161                         mddev->delta_disks = 0;
1162                         mddev->new_level = mddev->level;
1163                         mddev->new_layout = mddev->layout;
1164                         mddev->new_chunk = mddev->chunk_size;
1165                 }
1166
1167         } else if (mddev->pers == NULL) {
1168                 /* Insist of good event counter while assembling */
1169                 ++ev1;
1170                 if (ev1 < mddev->events)
1171                         return -EINVAL;
1172         } else if (mddev->bitmap) {
1173                 /* If adding to array with a bitmap, then we can accept an
1174                  * older device, but not too old.
1175                  */
1176                 if (ev1 < mddev->bitmap->events_cleared)
1177                         return 0;
1178         } else {
1179                 if (ev1 < mddev->events)
1180                         /* just a hot-add of a new device, leave raid_disk at -1 */
1181                         return 0;
1182         }
1183         if (mddev->level != LEVEL_MULTIPATH) {
1184                 int role;
1185                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1186                 switch(role) {
1187                 case 0xffff: /* spare */
1188                         break;
1189                 case 0xfffe: /* faulty */
1190                         set_bit(Faulty, &rdev->flags);
1191                         break;
1192                 default:
1193                         if ((le32_to_cpu(sb->feature_map) &
1194                              MD_FEATURE_RECOVERY_OFFSET))
1195                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1196                         else
1197                                 set_bit(In_sync, &rdev->flags);
1198                         rdev->raid_disk = role;
1199                         break;
1200                 }
1201                 if (sb->devflags & WriteMostly1)
1202                         set_bit(WriteMostly, &rdev->flags);
1203         } else /* MULTIPATH are always insync */
1204                 set_bit(In_sync, &rdev->flags);
1205
1206         return 0;
1207 }
1208
1209 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1210 {
1211         struct mdp_superblock_1 *sb;
1212         struct list_head *tmp;
1213         mdk_rdev_t *rdev2;
1214         int max_dev, i;
1215         /* make rdev->sb match mddev and rdev data. */
1216
1217         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1218
1219         sb->feature_map = 0;
1220         sb->pad0 = 0;
1221         sb->recovery_offset = cpu_to_le64(0);
1222         memset(sb->pad1, 0, sizeof(sb->pad1));
1223         memset(sb->pad2, 0, sizeof(sb->pad2));
1224         memset(sb->pad3, 0, sizeof(sb->pad3));
1225
1226         sb->utime = cpu_to_le64((__u64)mddev->utime);
1227         sb->events = cpu_to_le64(mddev->events);
1228         if (mddev->in_sync)
1229                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1230         else
1231                 sb->resync_offset = cpu_to_le64(0);
1232
1233         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1234
1235         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1236         sb->size = cpu_to_le64(mddev->size<<1);
1237
1238         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1239                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1240                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1241         }
1242
1243         if (rdev->raid_disk >= 0 &&
1244             !test_bit(In_sync, &rdev->flags) &&
1245             rdev->recovery_offset > 0) {
1246                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1247                 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1248         }
1249
1250         if (mddev->reshape_position != MaxSector) {
1251                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1252                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1253                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1254                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1255                 sb->new_level = cpu_to_le32(mddev->new_level);
1256                 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1257         }
1258
1259         max_dev = 0;
1260         ITERATE_RDEV(mddev,rdev2,tmp)
1261                 if (rdev2->desc_nr+1 > max_dev)
1262                         max_dev = rdev2->desc_nr+1;
1263         
1264         sb->max_dev = cpu_to_le32(max_dev);
1265         for (i=0; i<max_dev;i++)
1266                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1267         
1268         ITERATE_RDEV(mddev,rdev2,tmp) {
1269                 i = rdev2->desc_nr;
1270                 if (test_bit(Faulty, &rdev2->flags))
1271                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1272                 else if (test_bit(In_sync, &rdev2->flags))
1273                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1274                 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1275                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1276                 else
1277                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1278         }
1279
1280         sb->sb_csum = calc_sb_1_csum(sb);
1281 }
1282
1283
1284 static struct super_type super_types[] = {
1285         [0] = {
1286                 .name   = "0.90.0",
1287                 .owner  = THIS_MODULE,
1288                 .load_super     = super_90_load,
1289                 .validate_super = super_90_validate,
1290                 .sync_super     = super_90_sync,
1291         },
1292         [1] = {
1293                 .name   = "md-1",
1294                 .owner  = THIS_MODULE,
1295                 .load_super     = super_1_load,
1296                 .validate_super = super_1_validate,
1297                 .sync_super     = super_1_sync,
1298         },
1299 };
1300
1301 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1302 {
1303         struct list_head *tmp, *tmp2;
1304         mdk_rdev_t *rdev, *rdev2;
1305
1306         ITERATE_RDEV(mddev1,rdev,tmp)
1307                 ITERATE_RDEV(mddev2, rdev2, tmp2)
1308                         if (rdev->bdev->bd_contains ==
1309                             rdev2->bdev->bd_contains)
1310                                 return 1;
1311
1312         return 0;
1313 }
1314
1315 static LIST_HEAD(pending_raid_disks);
1316
1317 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1318 {
1319         char b[BDEVNAME_SIZE];
1320         struct kobject *ko;
1321         char *s;
1322         int err;
1323
1324         if (rdev->mddev) {
1325                 MD_BUG();
1326                 return -EINVAL;
1327         }
1328         /* make sure rdev->size exceeds mddev->size */
1329         if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1330                 if (mddev->pers)
1331                         /* Cannot change size, so fail */
1332                         return -ENOSPC;
1333                 else
1334                         mddev->size = rdev->size;
1335         }
1336
1337         /* Verify rdev->desc_nr is unique.
1338          * If it is -1, assign a free number, else
1339          * check number is not in use
1340          */
1341         if (rdev->desc_nr < 0) {
1342                 int choice = 0;
1343                 if (mddev->pers) choice = mddev->raid_disks;
1344                 while (find_rdev_nr(mddev, choice))
1345                         choice++;
1346                 rdev->desc_nr = choice;
1347         } else {
1348                 if (find_rdev_nr(mddev, rdev->desc_nr))
1349                         return -EBUSY;
1350         }
1351         bdevname(rdev->bdev,b);
1352         if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1353                 return -ENOMEM;
1354         while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1355                 *s = '!';
1356                         
1357         rdev->mddev = mddev;
1358         printk(KERN_INFO "md: bind<%s>\n", b);
1359
1360         rdev->kobj.parent = &mddev->kobj;
1361         if ((err = kobject_add(&rdev->kobj)))
1362                 goto fail;
1363
1364         if (rdev->bdev->bd_part)
1365                 ko = &rdev->bdev->bd_part->kobj;
1366         else
1367                 ko = &rdev->bdev->bd_disk->kobj;
1368         if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1369                 kobject_del(&rdev->kobj);
1370                 goto fail;
1371         }
1372         list_add(&rdev->same_set, &mddev->disks);
1373         bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1374         return 0;
1375
1376  fail:
1377         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1378                b, mdname(mddev));
1379         return err;
1380 }
1381
1382 static void delayed_delete(struct work_struct *ws)
1383 {
1384         mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1385         kobject_del(&rdev->kobj);
1386 }
1387
1388 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1389 {
1390         char b[BDEVNAME_SIZE];
1391         if (!rdev->mddev) {
1392                 MD_BUG();
1393                 return;
1394         }
1395         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1396         list_del_init(&rdev->same_set);
1397         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1398         rdev->mddev = NULL;
1399         sysfs_remove_link(&rdev->kobj, "block");
1400
1401         /* We need to delay this, otherwise we can deadlock when
1402          * writing to 'remove' to "dev/state"
1403          */
1404         INIT_WORK(&rdev->del_work, delayed_delete);
1405         schedule_work(&rdev->del_work);
1406 }
1407
1408 /*
1409  * prevent the device from being mounted, repartitioned or
1410  * otherwise reused by a RAID array (or any other kernel
1411  * subsystem), by bd_claiming the device.
1412  */
1413 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1414 {
1415         int err = 0;
1416         struct block_device *bdev;
1417         char b[BDEVNAME_SIZE];
1418
1419         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1420         if (IS_ERR(bdev)) {
1421                 printk(KERN_ERR "md: could not open %s.\n",
1422                         __bdevname(dev, b));
1423                 return PTR_ERR(bdev);
1424         }
1425         err = bd_claim(bdev, rdev);
1426         if (err) {
1427                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1428                         bdevname(bdev, b));
1429                 blkdev_put(bdev);
1430                 return err;
1431         }
1432         rdev->bdev = bdev;
1433         return err;
1434 }
1435
1436 static void unlock_rdev(mdk_rdev_t *rdev)
1437 {
1438         struct block_device *bdev = rdev->bdev;
1439         rdev->bdev = NULL;
1440         if (!bdev)
1441                 MD_BUG();
1442         bd_release(bdev);
1443         blkdev_put(bdev);
1444 }
1445
1446 void md_autodetect_dev(dev_t dev);
1447
1448 static void export_rdev(mdk_rdev_t * rdev)
1449 {
1450         char b[BDEVNAME_SIZE];
1451         printk(KERN_INFO "md: export_rdev(%s)\n",
1452                 bdevname(rdev->bdev,b));
1453         if (rdev->mddev)
1454                 MD_BUG();
1455         free_disk_sb(rdev);
1456         list_del_init(&rdev->same_set);
1457 #ifndef MODULE
1458         md_autodetect_dev(rdev->bdev->bd_dev);
1459 #endif
1460         unlock_rdev(rdev);
1461         kobject_put(&rdev->kobj);
1462 }
1463
1464 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1465 {
1466         unbind_rdev_from_array(rdev);
1467         export_rdev(rdev);
1468 }
1469
1470 static void export_array(mddev_t *mddev)
1471 {
1472         struct list_head *tmp;
1473         mdk_rdev_t *rdev;
1474
1475         ITERATE_RDEV(mddev,rdev,tmp) {
1476                 if (!rdev->mddev) {
1477                         MD_BUG();
1478                         continue;
1479                 }
1480                 kick_rdev_from_array(rdev);
1481         }
1482         if (!list_empty(&mddev->disks))
1483                 MD_BUG();
1484         mddev->raid_disks = 0;
1485         mddev->major_version = 0;
1486 }
1487
1488 static void print_desc(mdp_disk_t *desc)
1489 {
1490         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1491                 desc->major,desc->minor,desc->raid_disk,desc->state);
1492 }
1493
1494 static void print_sb(mdp_super_t *sb)
1495 {
1496         int i;
1497
1498         printk(KERN_INFO 
1499                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1500                 sb->major_version, sb->minor_version, sb->patch_version,
1501                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1502                 sb->ctime);
1503         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1504                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1505                 sb->md_minor, sb->layout, sb->chunk_size);
1506         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1507                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1508                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1509                 sb->failed_disks, sb->spare_disks,
1510                 sb->sb_csum, (unsigned long)sb->events_lo);
1511
1512         printk(KERN_INFO);
1513         for (i = 0; i < MD_SB_DISKS; i++) {
1514                 mdp_disk_t *desc;
1515
1516                 desc = sb->disks + i;
1517                 if (desc->number || desc->major || desc->minor ||
1518                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1519                         printk("     D %2d: ", i);
1520                         print_desc(desc);
1521                 }
1522         }
1523         printk(KERN_INFO "md:     THIS: ");
1524         print_desc(&sb->this_disk);
1525
1526 }
1527
1528 static void print_rdev(mdk_rdev_t *rdev)
1529 {
1530         char b[BDEVNAME_SIZE];
1531         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1532                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1533                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1534                 rdev->desc_nr);
1535         if (rdev->sb_loaded) {
1536                 printk(KERN_INFO "md: rdev superblock:\n");
1537                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1538         } else
1539                 printk(KERN_INFO "md: no rdev superblock!\n");
1540 }
1541
1542 static void md_print_devices(void)
1543 {
1544         struct list_head *tmp, *tmp2;
1545         mdk_rdev_t *rdev;
1546         mddev_t *mddev;
1547         char b[BDEVNAME_SIZE];
1548
1549         printk("\n");
1550         printk("md:     **********************************\n");
1551         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1552         printk("md:     **********************************\n");
1553         ITERATE_MDDEV(mddev,tmp) {
1554
1555                 if (mddev->bitmap)
1556                         bitmap_print_sb(mddev->bitmap);
1557                 else
1558                         printk("%s: ", mdname(mddev));
1559                 ITERATE_RDEV(mddev,rdev,tmp2)
1560                         printk("<%s>", bdevname(rdev->bdev,b));
1561                 printk("\n");
1562
1563                 ITERATE_RDEV(mddev,rdev,tmp2)
1564                         print_rdev(rdev);
1565         }
1566         printk("md:     **********************************\n");
1567         printk("\n");
1568 }
1569
1570
1571 static void sync_sbs(mddev_t * mddev, int nospares)
1572 {
1573         /* Update each superblock (in-memory image), but
1574          * if we are allowed to, skip spares which already
1575          * have the right event counter, or have one earlier
1576          * (which would mean they aren't being marked as dirty
1577          * with the rest of the array)
1578          */
1579         mdk_rdev_t *rdev;
1580         struct list_head *tmp;
1581
1582         ITERATE_RDEV(mddev,rdev,tmp) {
1583                 if (rdev->sb_events == mddev->events ||
1584                     (nospares &&
1585                      rdev->raid_disk < 0 &&
1586                      (rdev->sb_events&1)==0 &&
1587                      rdev->sb_events+1 == mddev->events)) {
1588                         /* Don't update this superblock */
1589                         rdev->sb_loaded = 2;
1590                 } else {
1591                         super_types[mddev->major_version].
1592                                 sync_super(mddev, rdev);
1593                         rdev->sb_loaded = 1;
1594                 }
1595         }
1596 }
1597
1598 static void md_update_sb(mddev_t * mddev, int force_change)
1599 {
1600         int err;
1601         struct list_head *tmp;
1602         mdk_rdev_t *rdev;
1603         int sync_req;
1604         int nospares = 0;
1605
1606 repeat:
1607         spin_lock_irq(&mddev->write_lock);
1608
1609         set_bit(MD_CHANGE_PENDING, &mddev->flags);
1610         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1611                 force_change = 1;
1612         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1613                 /* just a clean<-> dirty transition, possibly leave spares alone,
1614                  * though if events isn't the right even/odd, we will have to do
1615                  * spares after all
1616                  */
1617                 nospares = 1;
1618         if (force_change)
1619                 nospares = 0;
1620         if (mddev->degraded)
1621                 /* If the array is degraded, then skipping spares is both
1622                  * dangerous and fairly pointless.
1623                  * Dangerous because a device that was removed from the array
1624                  * might have a event_count that still looks up-to-date,
1625                  * so it can be re-added without a resync.
1626                  * Pointless because if there are any spares to skip,
1627                  * then a recovery will happen and soon that array won't
1628                  * be degraded any more and the spare can go back to sleep then.
1629                  */
1630                 nospares = 0;
1631
1632         sync_req = mddev->in_sync;
1633         mddev->utime = get_seconds();
1634
1635         /* If this is just a dirty<->clean transition, and the array is clean
1636          * and 'events' is odd, we can roll back to the previous clean state */
1637         if (nospares
1638             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1639             && (mddev->events & 1)
1640             && mddev->events != 1)
1641                 mddev->events--;
1642         else {
1643                 /* otherwise we have to go forward and ... */
1644                 mddev->events ++;
1645                 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1646                         /* .. if the array isn't clean, insist on an odd 'events' */
1647                         if ((mddev->events&1)==0) {
1648                                 mddev->events++;
1649                                 nospares = 0;
1650                         }
1651                 } else {
1652                         /* otherwise insist on an even 'events' (for clean states) */
1653                         if ((mddev->events&1)) {
1654                                 mddev->events++;
1655                                 nospares = 0;
1656                         }
1657                 }
1658         }
1659
1660         if (!mddev->events) {
1661                 /*
1662                  * oops, this 64-bit counter should never wrap.
1663                  * Either we are in around ~1 trillion A.C., assuming
1664                  * 1 reboot per second, or we have a bug:
1665                  */
1666                 MD_BUG();
1667                 mddev->events --;
1668         }
1669         sync_sbs(mddev, nospares);
1670
1671         /*
1672          * do not write anything to disk if using
1673          * nonpersistent superblocks
1674          */
1675         if (!mddev->persistent) {
1676                 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1677                 spin_unlock_irq(&mddev->write_lock);
1678                 wake_up(&mddev->sb_wait);
1679                 return;
1680         }
1681         spin_unlock_irq(&mddev->write_lock);
1682
1683         dprintk(KERN_INFO 
1684                 "md: updating %s RAID superblock on device (in sync %d)\n",
1685                 mdname(mddev),mddev->in_sync);
1686
1687         err = bitmap_update_sb(mddev->bitmap);
1688         ITERATE_RDEV(mddev,rdev,tmp) {
1689                 char b[BDEVNAME_SIZE];
1690                 dprintk(KERN_INFO "md: ");
1691                 if (rdev->sb_loaded != 1)
1692                         continue; /* no noise on spare devices */
1693                 if (test_bit(Faulty, &rdev->flags))
1694                         dprintk("(skipping faulty ");
1695
1696                 dprintk("%s ", bdevname(rdev->bdev,b));
1697                 if (!test_bit(Faulty, &rdev->flags)) {
1698                         md_super_write(mddev,rdev,
1699                                        rdev->sb_offset<<1, rdev->sb_size,
1700                                        rdev->sb_page);
1701                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1702                                 bdevname(rdev->bdev,b),
1703                                 (unsigned long long)rdev->sb_offset);
1704                         rdev->sb_events = mddev->events;
1705
1706                 } else
1707                         dprintk(")\n");
1708                 if (mddev->level == LEVEL_MULTIPATH)
1709                         /* only need to write one superblock... */
1710                         break;
1711         }
1712         md_super_wait(mddev);
1713         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1714
1715         spin_lock_irq(&mddev->write_lock);
1716         if (mddev->in_sync != sync_req ||
1717             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1718                 /* have to write it out again */
1719                 spin_unlock_irq(&mddev->write_lock);
1720                 goto repeat;
1721         }
1722         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1723         spin_unlock_irq(&mddev->write_lock);
1724         wake_up(&mddev->sb_wait);
1725
1726 }
1727
1728 /* words written to sysfs files may, or my not, be \n terminated.
1729  * We want to accept with case. For this we use cmd_match.
1730  */
1731 static int cmd_match(const char *cmd, const char *str)
1732 {
1733         /* See if cmd, written into a sysfs file, matches
1734          * str.  They must either be the same, or cmd can
1735          * have a trailing newline
1736          */
1737         while (*cmd && *str && *cmd == *str) {
1738                 cmd++;
1739                 str++;
1740         }
1741         if (*cmd == '\n')
1742                 cmd++;
1743         if (*str || *cmd)
1744                 return 0;
1745         return 1;
1746 }
1747
1748 struct rdev_sysfs_entry {
1749         struct attribute attr;
1750         ssize_t (*show)(mdk_rdev_t *, char *);
1751         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1752 };
1753
1754 static ssize_t
1755 state_show(mdk_rdev_t *rdev, char *page)
1756 {
1757         char *sep = "";
1758         int len=0;
1759
1760         if (test_bit(Faulty, &rdev->flags)) {
1761                 len+= sprintf(page+len, "%sfaulty",sep);
1762                 sep = ",";
1763         }
1764         if (test_bit(In_sync, &rdev->flags)) {
1765                 len += sprintf(page+len, "%sin_sync",sep);
1766                 sep = ",";
1767         }
1768         if (test_bit(WriteMostly, &rdev->flags)) {
1769                 len += sprintf(page+len, "%swrite_mostly",sep);
1770                 sep = ",";
1771         }
1772         if (!test_bit(Faulty, &rdev->flags) &&
1773             !test_bit(In_sync, &rdev->flags)) {
1774                 len += sprintf(page+len, "%sspare", sep);
1775                 sep = ",";
1776         }
1777         return len+sprintf(page+len, "\n");
1778 }
1779
1780 static ssize_t
1781 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1782 {
1783         /* can write
1784          *  faulty  - simulates and error
1785          *  remove  - disconnects the device
1786          *  writemostly - sets write_mostly
1787          *  -writemostly - clears write_mostly
1788          */
1789         int err = -EINVAL;
1790         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1791                 md_error(rdev->mddev, rdev);
1792                 err = 0;
1793         } else if (cmd_match(buf, "remove")) {
1794                 if (rdev->raid_disk >= 0)
1795                         err = -EBUSY;
1796                 else {
1797                         mddev_t *mddev = rdev->mddev;
1798                         kick_rdev_from_array(rdev);
1799                         if (mddev->pers)
1800                                 md_update_sb(mddev, 1);
1801                         md_new_event(mddev);
1802                         err = 0;
1803                 }
1804         } else if (cmd_match(buf, "writemostly")) {
1805                 set_bit(WriteMostly, &rdev->flags);
1806                 err = 0;
1807         } else if (cmd_match(buf, "-writemostly")) {
1808                 clear_bit(WriteMostly, &rdev->flags);
1809                 err = 0;
1810         }
1811         return err ? err : len;
1812 }
1813 static struct rdev_sysfs_entry rdev_state =
1814 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1815
1816 static ssize_t
1817 super_show(mdk_rdev_t *rdev, char *page)
1818 {
1819         if (rdev->sb_loaded && rdev->sb_size) {
1820                 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1821                 return rdev->sb_size;
1822         } else
1823                 return 0;
1824 }
1825 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1826
1827 static ssize_t
1828 errors_show(mdk_rdev_t *rdev, char *page)
1829 {
1830         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1831 }
1832
1833 static ssize_t
1834 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1835 {
1836         char *e;
1837         unsigned long n = simple_strtoul(buf, &e, 10);
1838         if (*buf && (*e == 0 || *e == '\n')) {
1839                 atomic_set(&rdev->corrected_errors, n);
1840                 return len;
1841         }
1842         return -EINVAL;
1843 }
1844 static struct rdev_sysfs_entry rdev_errors =
1845 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1846
1847 static ssize_t
1848 slot_show(mdk_rdev_t *rdev, char *page)
1849 {
1850         if (rdev->raid_disk < 0)
1851                 return sprintf(page, "none\n");
1852         else
1853                 return sprintf(page, "%d\n", rdev->raid_disk);
1854 }
1855
1856 static ssize_t
1857 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1858 {
1859         char *e;
1860         int slot = simple_strtoul(buf, &e, 10);
1861         if (strncmp(buf, "none", 4)==0)
1862                 slot = -1;
1863         else if (e==buf || (*e && *e!= '\n'))
1864                 return -EINVAL;
1865         if (rdev->mddev->pers)
1866                 /* Cannot set slot in active array (yet) */
1867                 return -EBUSY;
1868         if (slot >= rdev->mddev->raid_disks)
1869                 return -ENOSPC;
1870         rdev->raid_disk = slot;
1871         /* assume it is working */
1872         rdev->flags = 0;
1873         set_bit(In_sync, &rdev->flags);
1874         return len;
1875 }
1876
1877
1878 static struct rdev_sysfs_entry rdev_slot =
1879 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
1880
1881 static ssize_t
1882 offset_show(mdk_rdev_t *rdev, char *page)
1883 {
1884         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1885 }
1886
1887 static ssize_t
1888 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1889 {
1890         char *e;
1891         unsigned long long offset = simple_strtoull(buf, &e, 10);
1892         if (e==buf || (*e && *e != '\n'))
1893                 return -EINVAL;
1894         if (rdev->mddev->pers)
1895                 return -EBUSY;
1896         rdev->data_offset = offset;
1897         return len;
1898 }
1899
1900 static struct rdev_sysfs_entry rdev_offset =
1901 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
1902
1903 static ssize_t
1904 rdev_size_show(mdk_rdev_t *rdev, char *page)
1905 {
1906         return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1907 }
1908
1909 static ssize_t
1910 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1911 {
1912         char *e;
1913         unsigned long long size = simple_strtoull(buf, &e, 10);
1914         if (e==buf || (*e && *e != '\n'))
1915                 return -EINVAL;
1916         if (rdev->mddev->pers)
1917                 return -EBUSY;
1918         rdev->size = size;
1919         if (size < rdev->mddev->size || rdev->mddev->size == 0)
1920                 rdev->mddev->size = size;
1921         return len;
1922 }
1923
1924 static struct rdev_sysfs_entry rdev_size =
1925 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
1926
1927 static struct attribute *rdev_default_attrs[] = {
1928         &rdev_state.attr,
1929         &rdev_super.attr,
1930         &rdev_errors.attr,
1931         &rdev_slot.attr,
1932         &rdev_offset.attr,
1933         &rdev_size.attr,
1934         NULL,
1935 };
1936 static ssize_t
1937 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1938 {
1939         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1940         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1941
1942         if (!entry->show)
1943                 return -EIO;
1944         return entry->show(rdev, page);
1945 }
1946
1947 static ssize_t
1948 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1949               const char *page, size_t length)
1950 {
1951         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1952         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1953
1954         if (!entry->store)
1955                 return -EIO;
1956         if (!capable(CAP_SYS_ADMIN))
1957                 return -EACCES;
1958         return entry->store(rdev, page, length);
1959 }
1960
1961 static void rdev_free(struct kobject *ko)
1962 {
1963         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1964         kfree(rdev);
1965 }
1966 static struct sysfs_ops rdev_sysfs_ops = {
1967         .show           = rdev_attr_show,
1968         .store          = rdev_attr_store,
1969 };
1970 static struct kobj_type rdev_ktype = {
1971         .release        = rdev_free,
1972         .sysfs_ops      = &rdev_sysfs_ops,
1973         .default_attrs  = rdev_default_attrs,
1974 };
1975
1976 /*
1977  * Import a device. If 'super_format' >= 0, then sanity check the superblock
1978  *
1979  * mark the device faulty if:
1980  *
1981  *   - the device is nonexistent (zero size)
1982  *   - the device has no valid superblock
1983  *
1984  * a faulty rdev _never_ has rdev->sb set.
1985  */
1986 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1987 {
1988         char b[BDEVNAME_SIZE];
1989         int err;
1990         mdk_rdev_t *rdev;
1991         sector_t size;
1992
1993         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1994         if (!rdev) {
1995                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1996                 return ERR_PTR(-ENOMEM);
1997         }
1998
1999         if ((err = alloc_disk_sb(rdev)))
2000                 goto abort_free;
2001
2002         err = lock_rdev(rdev, newdev);
2003         if (err)
2004                 goto abort_free;
2005
2006         rdev->kobj.parent = NULL;
2007         rdev->kobj.ktype = &rdev_ktype;
2008         kobject_init(&rdev->kobj);
2009
2010         rdev->desc_nr = -1;
2011         rdev->saved_raid_disk = -1;
2012         rdev->raid_disk = -1;
2013         rdev->flags = 0;
2014         rdev->data_offset = 0;
2015         rdev->sb_events = 0;
2016         atomic_set(&rdev->nr_pending, 0);
2017         atomic_set(&rdev->read_errors, 0);
2018         atomic_set(&rdev->corrected_errors, 0);
2019
2020         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2021         if (!size) {
2022                 printk(KERN_WARNING 
2023                         "md: %s has zero or unknown size, marking faulty!\n",
2024                         bdevname(rdev->bdev,b));
2025                 err = -EINVAL;
2026                 goto abort_free;
2027         }
2028
2029         if (super_format >= 0) {
2030                 err = super_types[super_format].
2031                         load_super(rdev, NULL, super_minor);
2032                 if (err == -EINVAL) {
2033                         printk(KERN_WARNING 
2034                                 "md: %s has invalid sb, not importing!\n",
2035                                 bdevname(rdev->bdev,b));
2036                         goto abort_free;
2037                 }
2038                 if (err < 0) {
2039                         printk(KERN_WARNING 
2040                                 "md: could not read %s's sb, not importing!\n",
2041                                 bdevname(rdev->bdev,b));
2042                         goto abort_free;
2043                 }
2044         }
2045         INIT_LIST_HEAD(&rdev->same_set);
2046
2047         return rdev;
2048
2049 abort_free:
2050         if (rdev->sb_page) {
2051                 if (rdev->bdev)
2052                         unlock_rdev(rdev);
2053                 free_disk_sb(rdev);
2054         }
2055         kfree(rdev);
2056         return ERR_PTR(err);
2057 }
2058
2059 /*
2060  * Check a full RAID array for plausibility
2061  */
2062
2063
2064 static void analyze_sbs(mddev_t * mddev)
2065 {
2066         int i;
2067         struct list_head *tmp;
2068         mdk_rdev_t *rdev, *freshest;
2069         char b[BDEVNAME_SIZE];
2070
2071         freshest = NULL;
2072         ITERATE_RDEV(mddev,rdev,tmp)
2073                 switch (super_types[mddev->major_version].
2074                         load_super(rdev, freshest, mddev->minor_version)) {
2075                 case 1:
2076                         freshest = rdev;
2077                         break;
2078                 case 0:
2079                         break;
2080                 default:
2081                         printk( KERN_ERR \
2082                                 "md: fatal superblock inconsistency in %s"
2083                                 " -- removing from array\n", 
2084                                 bdevname(rdev->bdev,b));
2085                         kick_rdev_from_array(rdev);
2086                 }
2087
2088
2089         super_types[mddev->major_version].
2090                 validate_super(mddev, freshest);
2091
2092         i = 0;
2093         ITERATE_RDEV(mddev,rdev,tmp) {
2094                 if (rdev != freshest)
2095                         if (super_types[mddev->major_version].
2096                             validate_super(mddev, rdev)) {
2097                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2098                                         " from array!\n",
2099                                         bdevname(rdev->bdev,b));
2100                                 kick_rdev_from_array(rdev);
2101                                 continue;
2102                         }
2103                 if (mddev->level == LEVEL_MULTIPATH) {
2104                         rdev->desc_nr = i++;
2105                         rdev->raid_disk = rdev->desc_nr;
2106                         set_bit(In_sync, &rdev->flags);
2107                 }
2108         }
2109
2110
2111
2112         if (mddev->recovery_cp != MaxSector &&
2113             mddev->level >= 1)
2114                 printk(KERN_ERR "md: %s: raid array is not clean"
2115                        " -- starting background reconstruction\n",
2116                        mdname(mddev));
2117
2118 }
2119
2120 static ssize_t
2121 safe_delay_show(mddev_t *mddev, char *page)
2122 {
2123         int msec = (mddev->safemode_delay*1000)/HZ;
2124         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2125 }
2126 static ssize_t
2127 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2128 {
2129         int scale=1;
2130         int dot=0;
2131         int i;
2132         unsigned long msec;
2133         char buf[30];
2134         char *e;
2135         /* remove a period, and count digits after it */
2136         if (len >= sizeof(buf))
2137                 return -EINVAL;
2138         strlcpy(buf, cbuf, len);
2139         buf[len] = 0;
2140         for (i=0; i<len; i++) {
2141                 if (dot) {
2142                         if (isdigit(buf[i])) {
2143                                 buf[i-1] = buf[i];
2144                                 scale *= 10;
2145                         }
2146                         buf[i] = 0;
2147                 } else if (buf[i] == '.') {
2148                         dot=1;
2149                         buf[i] = 0;
2150                 }
2151         }
2152         msec = simple_strtoul(buf, &e, 10);
2153         if (e == buf || (*e && *e != '\n'))
2154                 return -EINVAL;
2155         msec = (msec * 1000) / scale;
2156         if (msec == 0)
2157                 mddev->safemode_delay = 0;
2158         else {
2159                 mddev->safemode_delay = (msec*HZ)/1000;
2160                 if (mddev->safemode_delay == 0)
2161                         mddev->safemode_delay = 1;
2162         }
2163         return len;
2164 }
2165 static struct md_sysfs_entry md_safe_delay =
2166 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2167
2168 static ssize_t
2169 level_show(mddev_t *mddev, char *page)
2170 {
2171         struct mdk_personality *p = mddev->pers;
2172         if (p)
2173                 return sprintf(page, "%s\n", p->name);
2174         else if (mddev->clevel[0])
2175                 return sprintf(page, "%s\n", mddev->clevel);
2176         else if (mddev->level != LEVEL_NONE)
2177                 return sprintf(page, "%d\n", mddev->level);
2178         else
2179                 return 0;
2180 }
2181
2182 static ssize_t
2183 level_store(mddev_t *mddev, const char *buf, size_t len)
2184 {
2185         int rv = len;
2186         if (mddev->pers)
2187                 return -EBUSY;
2188         if (len == 0)
2189                 return 0;
2190         if (len >= sizeof(mddev->clevel))
2191                 return -ENOSPC;
2192         strncpy(mddev->clevel, buf, len);
2193         if (mddev->clevel[len-1] == '\n')
2194                 len--;
2195         mddev->clevel[len] = 0;
2196         mddev->level = LEVEL_NONE;
2197         return rv;
2198 }
2199
2200 static struct md_sysfs_entry md_level =
2201 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2202
2203
2204 static ssize_t
2205 layout_show(mddev_t *mddev, char *page)
2206 {
2207         /* just a number, not meaningful for all levels */
2208         return sprintf(page, "%d\n", mddev->layout);
2209 }
2210
2211 static ssize_t
2212 layout_store(mddev_t *mddev, const char *buf, size_t len)
2213 {
2214         char *e;
2215         unsigned long n = simple_strtoul(buf, &e, 10);
2216         if (mddev->pers)
2217                 return -EBUSY;
2218
2219         if (!*buf || (*e && *e != '\n'))
2220                 return -EINVAL;
2221
2222         mddev->layout = n;
2223         return len;
2224 }
2225 static struct md_sysfs_entry md_layout =
2226 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2227
2228
2229 static ssize_t
2230 raid_disks_show(mddev_t *mddev, char *page)
2231 {
2232         if (mddev->raid_disks == 0)
2233                 return 0;
2234         return sprintf(page, "%d\n", mddev->raid_disks);
2235 }
2236
2237 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2238
2239 static ssize_t
2240 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2241 {
2242         char *e;
2243         int rv = 0;
2244         unsigned long n = simple_strtoul(buf, &e, 10);
2245
2246         if (!*buf || (*e && *e != '\n'))
2247                 return -EINVAL;
2248
2249         if (mddev->pers)
2250                 rv = update_raid_disks(mddev, n);
2251         else
2252                 mddev->raid_disks = n;
2253         return rv ? rv : len;
2254 }
2255 static struct md_sysfs_entry md_raid_disks =
2256 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2257
2258 static ssize_t
2259 chunk_size_show(mddev_t *mddev, char *page)
2260 {
2261         return sprintf(page, "%d\n", mddev->chunk_size);
2262 }
2263
2264 static ssize_t
2265 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2266 {
2267         /* can only set chunk_size if array is not yet active */
2268         char *e;
2269         unsigned long n = simple_strtoul(buf, &e, 10);
2270
2271         if (mddev->pers)
2272                 return -EBUSY;
2273         if (!*buf || (*e && *e != '\n'))
2274                 return -EINVAL;
2275
2276         mddev->chunk_size = n;
2277         return len;
2278 }
2279 static struct md_sysfs_entry md_chunk_size =
2280 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2281
2282 static ssize_t
2283 resync_start_show(mddev_t *mddev, char *page)
2284 {
2285         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2286 }
2287
2288 static ssize_t
2289 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2290 {
2291         /* can only set chunk_size if array is not yet active */
2292         char *e;
2293         unsigned long long n = simple_strtoull(buf, &e, 10);
2294
2295         if (mddev->pers)
2296                 return -EBUSY;
2297         if (!*buf || (*e && *e != '\n'))
2298                 return -EINVAL;
2299
2300         mddev->recovery_cp = n;
2301         return len;
2302 }
2303 static struct md_sysfs_entry md_resync_start =
2304 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2305
2306 /*
2307  * The array state can be:
2308  *
2309  * clear
2310  *     No devices, no size, no level
2311  *     Equivalent to STOP_ARRAY ioctl
2312  * inactive
2313  *     May have some settings, but array is not active
2314  *        all IO results in error
2315  *     When written, doesn't tear down array, but just stops it
2316  * suspended (not supported yet)
2317  *     All IO requests will block. The array can be reconfigured.
2318  *     Writing this, if accepted, will block until array is quiessent
2319  * readonly
2320  *     no resync can happen.  no superblocks get written.
2321  *     write requests fail
2322  * read-auto
2323  *     like readonly, but behaves like 'clean' on a write request.
2324  *
2325  * clean - no pending writes, but otherwise active.
2326  *     When written to inactive array, starts without resync
2327  *     If a write request arrives then
2328  *       if metadata is known, mark 'dirty' and switch to 'active'.
2329  *       if not known, block and switch to write-pending
2330  *     If written to an active array that has pending writes, then fails.
2331  * active
2332  *     fully active: IO and resync can be happening.
2333  *     When written to inactive array, starts with resync
2334  *
2335  * write-pending
2336  *     clean, but writes are blocked waiting for 'active' to be written.
2337  *
2338  * active-idle
2339  *     like active, but no writes have been seen for a while (100msec).
2340  *
2341  */
2342 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2343                    write_pending, active_idle, bad_word};
2344 static char *array_states[] = {
2345         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2346         "write-pending", "active-idle", NULL };
2347
2348 static int match_word(const char *word, char **list)
2349 {
2350         int n;
2351         for (n=0; list[n]; n++)
2352                 if (cmd_match(word, list[n]))
2353                         break;
2354         return n;
2355 }
2356
2357 static ssize_t
2358 array_state_show(mddev_t *mddev, char *page)
2359 {
2360         enum array_state st = inactive;
2361
2362         if (mddev->pers)
2363                 switch(mddev->ro) {
2364                 case 1:
2365                         st = readonly;
2366                         break;
2367                 case 2:
2368                         st = read_auto;
2369                         break;
2370                 case 0:
2371                         if (mddev->in_sync)
2372                                 st = clean;
2373                         else if (mddev->safemode)
2374                                 st = active_idle;
2375                         else
2376                                 st = active;
2377                 }
2378         else {
2379                 if (list_empty(&mddev->disks) &&
2380                     mddev->raid_disks == 0 &&
2381                     mddev->size == 0)
2382                         st = clear;
2383                 else
2384                         st = inactive;
2385         }
2386         return sprintf(page, "%s\n", array_states[st]);
2387 }
2388
2389 static int do_md_stop(mddev_t * mddev, int ro);
2390 static int do_md_run(mddev_t * mddev);
2391 static int restart_array(mddev_t *mddev);
2392
2393 static ssize_t
2394 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2395 {
2396         int err = -EINVAL;
2397         enum array_state st = match_word(buf, array_states);
2398         switch(st) {
2399         case bad_word:
2400                 break;
2401         case clear:
2402                 /* stopping an active array */
2403                 if (mddev->pers) {
2404                         if (atomic_read(&mddev->active) > 1)
2405                                 return -EBUSY;
2406                         err = do_md_stop(mddev, 0);
2407                 }
2408                 break;
2409         case inactive:
2410                 /* stopping an active array */
2411                 if (mddev->pers) {
2412                         if (atomic_read(&mddev->active) > 1)
2413                                 return -EBUSY;
2414                         err = do_md_stop(mddev, 2);
2415                 }
2416                 break;
2417         case suspended:
2418                 break; /* not supported yet */
2419         case readonly:
2420                 if (mddev->pers)
2421                         err = do_md_stop(mddev, 1);
2422                 else {
2423                         mddev->ro = 1;
2424                         err = do_md_run(mddev);
2425                 }
2426                 break;
2427         case read_auto:
2428                 /* stopping an active array */
2429                 if (mddev->pers) {
2430                         err = do_md_stop(mddev, 1);
2431                         if (err == 0)
2432                                 mddev->ro = 2; /* FIXME mark devices writable */
2433                 } else {
2434                         mddev->ro = 2;
2435                         err = do_md_run(mddev);
2436                 }
2437                 break;
2438         case clean:
2439                 if (mddev->pers) {
2440                         restart_array(mddev);
2441                         spin_lock_irq(&mddev->write_lock);
2442                         if (atomic_read(&mddev->writes_pending) == 0) {
2443                                 mddev->in_sync = 1;
2444                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
2445                         }
2446                         spin_unlock_irq(&mddev->write_lock);
2447                 } else {
2448                         mddev->ro = 0;
2449                         mddev->recovery_cp = MaxSector;
2450                         err = do_md_run(mddev);
2451                 }
2452                 break;
2453         case active:
2454                 if (mddev->pers) {
2455                         restart_array(mddev);
2456                         clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2457                         wake_up(&mddev->sb_wait);
2458                         err = 0;
2459                 } else {
2460                         mddev->ro = 0;
2461                         err = do_md_run(mddev);
2462                 }
2463                 break;
2464         case write_pending:
2465         case active_idle:
2466                 /* these cannot be set */
2467                 break;
2468         }
2469         if (err)
2470                 return err;
2471         else
2472                 return len;
2473 }
2474 static struct md_sysfs_entry md_array_state =
2475 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2476
2477 static ssize_t
2478 null_show(mddev_t *mddev, char *page)
2479 {
2480         return -EINVAL;
2481 }
2482
2483 static ssize_t
2484 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2485 {
2486         /* buf must be %d:%d\n? giving major and minor numbers */
2487         /* The new device is added to the array.
2488          * If the array has a persistent superblock, we read the
2489          * superblock to initialise info and check validity.
2490          * Otherwise, only checking done is that in bind_rdev_to_array,
2491          * which mainly checks size.
2492          */
2493         char *e;
2494         int major = simple_strtoul(buf, &e, 10);
2495         int minor;
2496         dev_t dev;
2497         mdk_rdev_t *rdev;
2498         int err;
2499
2500         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2501                 return -EINVAL;
2502         minor = simple_strtoul(e+1, &e, 10);
2503         if (*e && *e != '\n')
2504                 return -EINVAL;
2505         dev = MKDEV(major, minor);
2506         if (major != MAJOR(dev) ||
2507             minor != MINOR(dev))
2508                 return -EOVERFLOW;
2509
2510
2511         if (mddev->persistent) {
2512                 rdev = md_import_device(dev, mddev->major_version,
2513                                         mddev->minor_version);
2514                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2515                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2516                                                        mdk_rdev_t, same_set);
2517                         err = super_types[mddev->major_version]
2518                                 .load_super(rdev, rdev0, mddev->minor_version);
2519                         if (err < 0)
2520                                 goto out;
2521                 }
2522         } else
2523                 rdev = md_import_device(dev, -1, -1);
2524
2525         if (IS_ERR(rdev))
2526                 return PTR_ERR(rdev);
2527         err = bind_rdev_to_array(rdev, mddev);
2528  out:
2529         if (err)
2530                 export_rdev(rdev);
2531         return err ? err : len;
2532 }
2533
2534 static struct md_sysfs_entry md_new_device =
2535 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2536
2537 static ssize_t
2538 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2539 {
2540         char *end;
2541         unsigned long chunk, end_chunk;
2542
2543         if (!mddev->bitmap)
2544                 goto out;
2545         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2546         while (*buf) {
2547                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2548                 if (buf == end) break;
2549                 if (*end == '-') { /* range */
2550                         buf = end + 1;
2551                         end_chunk = simple_strtoul(buf, &end, 0);
2552                         if (buf == end) break;
2553                 }
2554                 if (*end && !isspace(*end)) break;
2555                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2556                 buf = end;
2557                 while (isspace(*buf)) buf++;
2558         }
2559         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2560 out:
2561         return len;
2562 }
2563
2564 static struct md_sysfs_entry md_bitmap =
2565 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2566
2567 static ssize_t
2568 size_show(mddev_t *mddev, char *page)
2569 {
2570         return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2571 }
2572
2573 static int update_size(mddev_t *mddev, unsigned long size);
2574
2575 static ssize_t
2576 size_store(mddev_t *mddev, const char *buf, size_t len)
2577 {
2578         /* If array is inactive, we can reduce the component size, but
2579          * not increase it (except from 0).
2580          * If array is active, we can try an on-line resize
2581          */
2582         char *e;
2583         int err = 0;
2584         unsigned long long size = simple_strtoull(buf, &e, 10);
2585         if (!*buf || *buf == '\n' ||
2586             (*e && *e != '\n'))
2587                 return -EINVAL;
2588
2589         if (mddev->pers) {
2590                 err = update_size(mddev, size);
2591                 md_update_sb(mddev, 1);
2592         } else {
2593                 if (mddev->size == 0 ||
2594                     mddev->size > size)
2595                         mddev->size = size;
2596                 else
2597                         err = -ENOSPC;
2598         }
2599         return err ? err : len;
2600 }
2601
2602 static struct md_sysfs_entry md_size =
2603 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2604
2605
2606 /* Metdata version.
2607  * This is either 'none' for arrays with externally managed metadata,
2608  * or N.M for internally known formats
2609  */
2610 static ssize_t
2611 metadata_show(mddev_t *mddev, char *page)
2612 {
2613         if (mddev->persistent)
2614                 return sprintf(page, "%d.%d\n",
2615                                mddev->major_version, mddev->minor_version);
2616         else
2617                 return sprintf(page, "none\n");
2618 }
2619
2620 static ssize_t
2621 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2622 {
2623         int major, minor;
2624         char *e;
2625         if (!list_empty(&mddev->disks))
2626                 return -EBUSY;
2627
2628         if (cmd_match(buf, "none")) {
2629                 mddev->persistent = 0;
2630                 mddev->major_version = 0;
2631                 mddev->minor_version = 90;
2632                 return len;
2633         }
2634         major = simple_strtoul(buf, &e, 10);
2635         if (e==buf || *e != '.')
2636                 return -EINVAL;
2637         buf = e+1;
2638         minor = simple_strtoul(buf, &e, 10);
2639         if (e==buf || (*e && *e != '\n') )
2640                 return -EINVAL;
2641         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2642                 return -ENOENT;
2643         mddev->major_version = major;
2644         mddev->minor_version = minor;
2645         mddev->persistent = 1;
2646         return len;
2647 }
2648
2649 static struct md_sysfs_entry md_metadata =
2650 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2651
2652 static ssize_t
2653 action_show(mddev_t *mddev, char *page)
2654 {
2655         char *type = "idle";
2656         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2657             test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2658                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2659                         type = "reshape";
2660                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2661                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2662                                 type = "resync";
2663                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2664                                 type = "check";
2665                         else
2666                                 type = "repair";
2667                 } else
2668                         type = "recover";
2669         }
2670         return sprintf(page, "%s\n", type);
2671 }
2672
2673 static ssize_t
2674 action_store(mddev_t *mddev, const char *page, size_t len)
2675 {
2676         if (!mddev->pers || !mddev->pers->sync_request)
2677                 return -EINVAL;
2678
2679         if (cmd_match(page, "idle")) {
2680                 if (mddev->sync_thread) {
2681                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2682                         md_unregister_thread(mddev->sync_thread);
2683                         mddev->sync_thread = NULL;
2684                         mddev->recovery = 0;
2685                 }
2686         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2687                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2688                 return -EBUSY;
2689         else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2690                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2691         else if (cmd_match(page, "reshape")) {
2692                 int err;
2693                 if (mddev->pers->start_reshape == NULL)
2694                         return -EINVAL;
2695                 err = mddev->pers->start_reshape(mddev);
2696                 if (err)
2697                         return err;
2698         } else {
2699                 if (cmd_match(page, "check"))
2700                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2701                 else if (!cmd_match(page, "repair"))
2702                         return -EINVAL;
2703                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2704                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2705         }
2706         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2707         md_wakeup_thread(mddev->thread);
2708         return len;
2709 }
2710
2711 static ssize_t
2712 mismatch_cnt_show(mddev_t *mddev, char *page)
2713 {
2714         return sprintf(page, "%llu\n",
2715                        (unsigned long long) mddev->resync_mismatches);
2716 }
2717
2718 static struct md_sysfs_entry md_scan_mode =
2719 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2720
2721
2722 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
2723
2724 static ssize_t
2725 sync_min_show(mddev_t *mddev, char *page)
2726 {
2727         return sprintf(page, "%d (%s)\n", speed_min(mddev),
2728                        mddev->sync_speed_min ? "local": "system");
2729 }
2730
2731 static ssize_t
2732 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2733 {
2734         int min;
2735         char *e;
2736         if (strncmp(buf, "system", 6)==0) {
2737                 mddev->sync_speed_min = 0;
2738                 return len;
2739         }
2740         min = simple_strtoul(buf, &e, 10);
2741         if (buf == e || (*e && *e != '\n') || min <= 0)
2742                 return -EINVAL;
2743         mddev->sync_speed_min = min;
2744         return len;
2745 }
2746
2747 static struct md_sysfs_entry md_sync_min =
2748 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2749
2750 static ssize_t
2751 sync_max_show(mddev_t *mddev, char *page)
2752 {
2753         return sprintf(page, "%d (%s)\n", speed_max(mddev),
2754                        mddev->sync_speed_max ? "local": "system");
2755 }
2756
2757 static ssize_t
2758 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2759 {
2760         int max;
2761         char *e;
2762         if (strncmp(buf, "system", 6)==0) {
2763                 mddev->sync_speed_max = 0;
2764                 return len;
2765         }
2766         max = simple_strtoul(buf, &e, 10);
2767         if (buf == e || (*e && *e != '\n') || max <= 0)
2768                 return -EINVAL;
2769         mddev->sync_speed_max = max;
2770         return len;
2771 }
2772
2773 static struct md_sysfs_entry md_sync_max =
2774 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2775
2776
2777 static ssize_t
2778 sync_speed_show(mddev_t *mddev, char *page)
2779 {
2780         unsigned long resync, dt, db;
2781         resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
2782         dt = ((jiffies - mddev->resync_mark) / HZ);
2783         if (!dt) dt++;
2784         db = resync - (mddev->resync_mark_cnt);
2785         return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2786 }
2787
2788 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
2789
2790 static ssize_t
2791 sync_completed_show(mddev_t *mddev, char *page)
2792 {
2793         unsigned long max_blocks, resync;
2794
2795         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2796                 max_blocks = mddev->resync_max_sectors;
2797         else
2798                 max_blocks = mddev->size << 1;
2799
2800         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2801         return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2802 }
2803
2804 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
2805
2806 static ssize_t
2807 suspend_lo_show(mddev_t *mddev, char *page)
2808 {
2809         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2810 }
2811
2812 static ssize_t
2813 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2814 {
2815         char *e;
2816         unsigned long long new = simple_strtoull(buf, &e, 10);
2817
2818         if (mddev->pers->quiesce == NULL)
2819                 return -EINVAL;
2820         if (buf == e || (*e && *e != '\n'))
2821                 return -EINVAL;
2822         if (new >= mddev->suspend_hi ||
2823             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2824                 mddev->suspend_lo = new;
2825                 mddev->pers->quiesce(mddev, 2);
2826                 return len;
2827         } else
2828                 return -EINVAL;
2829 }
2830 static struct md_sysfs_entry md_suspend_lo =
2831 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2832
2833
2834 static ssize_t
2835 suspend_hi_show(mddev_t *mddev, char *page)
2836 {
2837         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2838 }
2839
2840 static ssize_t
2841 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2842 {
2843         char *e;
2844         unsigned long long new = simple_strtoull(buf, &e, 10);
2845
2846         if (mddev->pers->quiesce == NULL)
2847                 return -EINVAL;
2848         if (buf == e || (*e && *e != '\n'))
2849                 return -EINVAL;
2850         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2851             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2852                 mddev->suspend_hi = new;
2853                 mddev->pers->quiesce(mddev, 1);
2854                 mddev->pers->quiesce(mddev, 0);
2855                 return len;
2856         } else
2857                 return -EINVAL;
2858 }
2859 static struct md_sysfs_entry md_suspend_hi =
2860 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2861
2862
2863 static struct attribute *md_default_attrs[] = {
2864         &md_level.attr,
2865         &md_layout.attr,
2866         &md_raid_disks.attr,
2867         &md_chunk_size.attr,
2868         &md_size.attr,
2869         &md_resync_start.attr,
2870         &md_metadata.attr,
2871         &md_new_device.attr,
2872         &md_safe_delay.attr,
2873         &md_array_state.attr,
2874         NULL,
2875 };
2876
2877 static struct attribute *md_redundancy_attrs[] = {
2878         &md_scan_mode.attr,
2879         &md_mismatches.attr,
2880         &md_sync_min.attr,
2881         &md_sync_max.attr,
2882         &md_sync_speed.attr,
2883         &md_sync_completed.attr,
2884         &md_suspend_lo.attr,
2885         &md_suspend_hi.attr,
2886         &md_bitmap.attr,
2887         NULL,
2888 };
2889 static struct attribute_group md_redundancy_group = {
2890         .name = NULL,
2891         .attrs = md_redundancy_attrs,
2892 };
2893
2894
2895 static ssize_t
2896 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2897 {
2898         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2899         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2900         ssize_t rv;
2901
2902         if (!entry->show)
2903                 return -EIO;
2904         rv = mddev_lock(mddev);
2905         if (!rv) {
2906                 rv = entry->show(mddev, page);
2907                 mddev_unlock(mddev);
2908         }
2909         return rv;
2910 }
2911
2912 static ssize_t
2913 md_attr_store(struct kobject *kobj, struct attribute *attr,
2914               const char *page, size_t length)
2915 {
2916         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2917         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2918         ssize_t rv;
2919
2920         if (!entry->store)
2921                 return -EIO;
2922         if (!capable(CAP_SYS_ADMIN))
2923                 return -EACCES;
2924         rv = mddev_lock(mddev);
2925         if (!rv) {
2926                 rv = entry->store(mddev, page, length);
2927                 mddev_unlock(mddev);
2928         }
2929         return rv;
2930 }
2931
2932 static void md_free(struct kobject *ko)
2933 {
2934         mddev_t *mddev = container_of(ko, mddev_t, kobj);
2935         kfree(mddev);
2936 }
2937
2938 static struct sysfs_ops md_sysfs_ops = {
2939         .show   = md_attr_show,
2940         .store  = md_attr_store,
2941 };
2942 static struct kobj_type md_ktype = {
2943         .release        = md_free,
2944         .sysfs_ops      = &md_sysfs_ops,
2945         .default_attrs  = md_default_attrs,
2946 };
2947
2948 int mdp_major = 0;
2949
2950 static struct kobject *md_probe(dev_t dev, int *part, void *data)
2951 {
2952         static DEFINE_MUTEX(disks_mutex);
2953         mddev_t *mddev = mddev_find(dev);
2954         struct gendisk *disk;
2955         int partitioned = (MAJOR(dev) != MD_MAJOR);
2956         int shift = partitioned ? MdpMinorShift : 0;
2957         int unit = MINOR(dev) >> shift;
2958
2959         if (!mddev)
2960                 return NULL;
2961
2962         mutex_lock(&disks_mutex);
2963         if (mddev->gendisk) {
2964                 mutex_unlock(&disks_mutex);
2965                 mddev_put(mddev);
2966                 return NULL;
2967         }
2968         disk = alloc_disk(1 << shift);
2969         if (!disk) {
2970                 mutex_unlock(&disks_mutex);
2971                 mddev_put(mddev);
2972                 return NULL;
2973         }
2974         disk->major = MAJOR(dev);
2975         disk->first_minor = unit << shift;
2976         if (partitioned)
2977                 sprintf(disk->disk_name, "md_d%d", unit);
2978         else
2979                 sprintf(disk->disk_name, "md%d", unit);
2980         disk->fops = &md_fops;
2981         disk->private_data = mddev;
2982         disk->queue = mddev->queue;
2983         add_disk(disk);
2984         mddev->gendisk = disk;
2985         mutex_unlock(&disks_mutex);
2986         mddev->kobj.parent = &disk->kobj;
2987         mddev->kobj.k_name = NULL;
2988         snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2989         mddev->kobj.ktype = &md_ktype;
2990         if (kobject_register(&mddev->kobj))
2991                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
2992                        disk->disk_name);
2993         return NULL;
2994 }
2995
2996 static void md_safemode_timeout(unsigned long data)
2997 {
2998         mddev_t *mddev = (mddev_t *) data;
2999
3000         mddev->safemode = 1;
3001         md_wakeup_thread(mddev->thread);
3002 }
3003
3004 static int start_dirty_degraded;
3005
3006 static int do_md_run(mddev_t * mddev)
3007 {
3008         int err;
3009         int chunk_size;
3010         struct list_head *tmp;
3011         mdk_rdev_t *rdev;
3012         struct gendisk *disk;
3013         struct mdk_personality *pers;
3014         char b[BDEVNAME_SIZE];
3015
3016         if (list_empty(&mddev->disks))
3017                 /* cannot run an array with no devices.. */
3018                 return -EINVAL;
3019
3020         if (mddev->pers)
3021                 return -EBUSY;
3022
3023         /*
3024          * Analyze all RAID superblock(s)
3025          */
3026         if (!mddev->raid_disks)
3027                 analyze_sbs(mddev);
3028
3029         chunk_size = mddev->chunk_size;
3030
3031         if (chunk_size) {
3032                 if (chunk_size > MAX_CHUNK_SIZE) {
3033                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
3034                                 chunk_size, MAX_CHUNK_SIZE);
3035                         return -EINVAL;
3036                 }
3037                 /*
3038                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3039                  */
3040                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3041                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3042                         return -EINVAL;
3043                 }
3044                 if (chunk_size < PAGE_SIZE) {
3045                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3046                                 chunk_size, PAGE_SIZE);
3047                         return -EINVAL;
3048                 }
3049
3050                 /* devices must have minimum size of one chunk */
3051                 ITERATE_RDEV(mddev,rdev,tmp) {
3052                         if (test_bit(Faulty, &rdev->flags))
3053                                 continue;
3054                         if (rdev->size < chunk_size / 1024) {
3055                                 printk(KERN_WARNING
3056                                         "md: Dev %s smaller than chunk_size:"
3057                                         " %lluk < %dk\n",
3058                                         bdevname(rdev->bdev,b),
3059                                         (unsigned long long)rdev->size,
3060                                         chunk_size / 1024);
3061                                 return -EINVAL;
3062                         }
3063                 }
3064         }
3065
3066 #ifdef CONFIG_KMOD
3067         if (mddev->level != LEVEL_NONE)
3068                 request_module("md-level-%d", mddev->level);
3069         else if (mddev->clevel[0])
3070                 request_module("md-%s", mddev->clevel);
3071 #endif
3072
3073         /*
3074          * Drop all container device buffers, from now on
3075          * the only valid external interface is through the md
3076          * device.
3077          * Also find largest hardsector size
3078          */
3079         ITERATE_RDEV(mddev,rdev,tmp) {
3080                 if (test_bit(Faulty, &rdev->flags))
3081                         continue;
3082                 sync_blockdev(rdev->bdev);
3083                 invalidate_bdev(rdev->bdev);
3084         }
3085
3086         md_probe(mddev->unit, NULL, NULL);
3087         disk = mddev->gendisk;
3088         if (!disk)
3089                 return -ENOMEM;
3090
3091         spin_lock(&pers_lock);
3092         pers = find_pers(mddev->level, mddev->clevel);
3093         if (!pers || !try_module_get(pers->owner)) {
3094                 spin_unlock(&pers_lock);
3095                 if (mddev->level != LEVEL_NONE)
3096                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3097                                mddev->level);
3098                 else
3099                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3100                                mddev->clevel);
3101                 return -EINVAL;
3102         }
3103         mddev->pers = pers;
3104         spin_unlock(&pers_lock);
3105         mddev->level = pers->level;
3106         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3107
3108         if (mddev->reshape_position != MaxSector &&
3109             pers->start_reshape == NULL) {
3110                 /* This personality cannot handle reshaping... */
3111                 mddev->pers = NULL;
3112                 module_put(pers->owner);
3113                 return -EINVAL;
3114         }
3115
3116         if (pers->sync_request) {
3117                 /* Warn if this is a potentially silly
3118                  * configuration.
3119                  */
3120                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3121                 mdk_rdev_t *rdev2;
3122                 struct list_head *tmp2;
3123                 int warned = 0;
3124                 ITERATE_RDEV(mddev, rdev, tmp) {
3125                         ITERATE_RDEV(mddev, rdev2, tmp2) {
3126                                 if (rdev < rdev2 &&
3127                                     rdev->bdev->bd_contains ==
3128                                     rdev2->bdev->bd_contains) {
3129                                         printk(KERN_WARNING
3130                                                "%s: WARNING: %s appears to be"
3131                                                " on the same physical disk as"
3132                                                " %s.\n",
3133                                                mdname(mddev),
3134                                                bdevname(rdev->bdev,b),
3135                                                bdevname(rdev2->bdev,b2));
3136                                         warned = 1;
3137                                 }
3138                         }
3139                 }
3140                 if (warned)
3141                         printk(KERN_WARNING
3142                                "True protection against single-disk"
3143                                " failure might be compromised.\n");
3144         }
3145
3146         mddev->recovery = 0;
3147         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3148         mddev->barriers_work = 1;
3149         mddev->ok_start_degraded = start_dirty_degraded;
3150
3151         if (start_readonly)
3152                 mddev->ro = 2; /* read-only, but switch on first write */
3153
3154         err = mddev->pers->run(mddev);
3155         if (!err && mddev->pers->sync_request) {
3156                 err = bitmap_create(mddev);
3157                 if (err) {
3158                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3159                                mdname(mddev), err);
3160                         mddev->pers->stop(mddev);
3161                 }
3162         }
3163         if (err) {
3164                 printk(KERN_ERR "md: pers->run() failed ...\n");
3165                 module_put(mddev->pers->owner);
3166                 mddev->pers = NULL;
3167                 bitmap_destroy(mddev);
3168                 return err;
3169         }
3170         if (mddev->pers->sync_request) {
3171                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3172                         printk(KERN_WARNING
3173                                "md: cannot register extra attributes for %s\n",
3174                                mdname(mddev));
3175         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
3176                 mddev->ro = 0;
3177
3178         atomic_set(&mddev->writes_pending,0);
3179         mddev->safemode = 0;
3180         mddev->safemode_timer.function = md_safemode_timeout;
3181         mddev->safemode_timer.data = (unsigned long) mddev;
3182         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3183         mddev->in_sync = 1;
3184
3185         ITERATE_RDEV(mddev,rdev,tmp)
3186                 if (rdev->raid_disk >= 0) {
3187                         char nm[20];
3188                         sprintf(nm, "rd%d", rdev->raid_disk);
3189                         if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3190                                 printk("md: cannot register %s for %s\n",
3191                                        nm, mdname(mddev));
3192                 }
3193         
3194         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3195         
3196         if (mddev->flags)
3197                 md_update_sb(mddev, 0);
3198
3199         set_capacity(disk, mddev->array_size<<1);
3200
3201         /* If we call blk_queue_make_request here, it will
3202          * re-initialise max_sectors etc which may have been
3203          * refined inside -> run.  So just set the bits we need to set.
3204          * Most initialisation happended when we called
3205          * blk_queue_make_request(..., md_fail_request)
3206          * earlier.
3207          */
3208         mddev->queue->queuedata = mddev;
3209         mddev->queue->make_request_fn = mddev->pers->make_request;
3210
3211         /* If there is a partially-recovered drive we need to
3212          * start recovery here.  If we leave it to md_check_recovery,
3213          * it will remove the drives and not do the right thing
3214          */
3215         if (mddev->degraded && !mddev->sync_thread) {
3216                 struct list_head *rtmp;
3217                 int spares = 0;
3218                 ITERATE_RDEV(mddev,rdev,rtmp)
3219                         if (rdev->raid_disk >= 0 &&
3220                             !test_bit(In_sync, &rdev->flags) &&
3221                             !test_bit(Faulty, &rdev->flags))
3222                                 /* complete an interrupted recovery */
3223                                 spares++;
3224                 if (spares && mddev->pers->sync_request) {
3225                         mddev->recovery = 0;
3226                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3227                         mddev->sync_thread = md_register_thread(md_do_sync,
3228                                                                 mddev,
3229                                                                 "%s_resync");
3230                         if (!mddev->sync_thread) {
3231                                 printk(KERN_ERR "%s: could not start resync"
3232                                        " thread...\n",
3233                                        mdname(mddev));
3234                                 /* leave the spares where they are, it shouldn't hurt */
3235                                 mddev->recovery = 0;
3236                         }
3237                 }
3238         }
3239         md_wakeup_thread(mddev->thread);
3240         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3241
3242         mddev->changed = 1;
3243         md_new_event(mddev);
3244         kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
3245         return 0;
3246 }
3247
3248 static int restart_array(mddev_t *mddev)
3249 {
3250         struct gendisk *disk = mddev->gendisk;
3251         int err;
3252
3253         /*
3254          * Complain if it has no devices
3255          */
3256         err = -ENXIO;
3257         if (list_empty(&mddev->disks))
3258                 goto out;
3259
3260         if (mddev->pers) {
3261                 err = -EBUSY;
3262                 if (!mddev->ro)
3263                         goto out;
3264
3265                 mddev->safemode = 0;
3266                 mddev->ro = 0;
3267                 set_disk_ro(disk, 0);
3268
3269                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3270                         mdname(mddev));
3271                 /*
3272                  * Kick recovery or resync if necessary
3273                  */
3274                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3275                 md_wakeup_thread(mddev->thread);
3276                 md_wakeup_thread(mddev->sync_thread);
3277                 err = 0;
3278         } else
3279                 err = -EINVAL;
3280
3281 out:
3282         return err;
3283 }
3284
3285 /* similar to deny_write_access, but accounts for our holding a reference
3286  * to the file ourselves */
3287 static int deny_bitmap_write_access(struct file * file)
3288 {
3289         struct inode *inode = file->f_mapping->host;
3290
3291         spin_lock(&inode->i_lock);
3292         if (atomic_read(&inode->i_writecount) > 1) {
3293                 spin_unlock(&inode->i_lock);
3294                 return -ETXTBSY;
3295         }
3296         atomic_set(&inode->i_writecount, -1);
3297         spin_unlock(&inode->i_lock);
3298
3299         return 0;
3300 }
3301
3302 static void restore_bitmap_write_access(struct file *file)
3303 {
3304         struct inode *inode = file->f_mapping->host;
3305
3306         spin_lock(&inode->i_lock);
3307         atomic_set(&inode->i_writecount, 1);
3308         spin_unlock(&inode->i_lock);
3309 }
3310
3311 /* mode:
3312  *   0 - completely stop and dis-assemble array
3313  *   1 - switch to readonly
3314  *   2 - stop but do not disassemble array
3315  */
3316 static int do_md_stop(mddev_t * mddev, int mode)
3317 {
3318         int err = 0;
3319         struct gendisk *disk = mddev->gendisk;
3320
3321         if (mddev->pers) {
3322                 if (atomic_read(&mddev->active)>2) {
3323                         printk("md: %s still in use.\n",mdname(mddev));
3324                         return -EBUSY;
3325                 }
3326
3327                 if (mddev->sync_thread) {
3328                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3329                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3330                         md_unregister_thread(mddev->sync_thread);
3331                         mddev->sync_thread = NULL;
3332                 }
3333
3334                 del_timer_sync(&mddev->safemode_timer);
3335
3336                 invalidate_partition(disk, 0);
3337
3338                 switch(mode) {
3339                 case 1: /* readonly */
3340                         err  = -ENXIO;
3341                         if (mddev->ro==1)
3342                                 goto out;
3343                         mddev->ro = 1;
3344                         break;
3345                 case 0: /* disassemble */
3346                 case 2: /* stop */
3347                         bitmap_flush(mddev);
3348                         md_super_wait(mddev);
3349                         if (mddev->ro)
3350                                 set_disk_ro(disk, 0);
3351                         blk_queue_make_request(mddev->queue, md_fail_request);
3352                         mddev->pers->stop(mddev);
3353                         mddev->queue->merge_bvec_fn = NULL;
3354                         mddev->queue->unplug_fn = NULL;
3355                         mddev->queue->issue_flush_fn = NULL;
3356                         mddev->queue->backing_dev_info.congested_fn = NULL;
3357                         if (mddev->pers->sync_request)
3358                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3359
3360                         module_put(mddev->pers->owner);
3361                         mddev->pers = NULL;
3362
3363                         set_capacity(disk, 0);
3364                         mddev->changed = 1;
3365
3366                         if (mddev->ro)
3367                                 mddev->ro = 0;
3368                 }
3369                 if (!mddev->in_sync || mddev->flags) {
3370                         /* mark array as shutdown cleanly */
3371                         mddev->in_sync = 1;
3372                         md_update_sb(mddev, 1);
3373                 }
3374                 if (mode == 1)
3375                         set_disk_ro(disk, 1);
3376                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3377         }
3378
3379         /*
3380          * Free resources if final stop
3381          */
3382         if (mode == 0) {
3383                 mdk_rdev_t *rdev;
3384                 struct list_head *tmp;
3385
3386                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3387
3388                 bitmap_destroy(mddev);
3389                 if (mddev->bitmap_file) {
3390                         restore_bitmap_write_access(mddev->bitmap_file);
3391                         fput(mddev->bitmap_file);
3392                         mddev->bitmap_file = NULL;
3393                 }
3394                 mddev->bitmap_offset = 0;
3395
3396                 ITERATE_RDEV(mddev,rdev,tmp)
3397                         if (rdev->raid_disk >= 0) {
3398                                 char nm[20];
3399                                 sprintf(nm, "rd%d", rdev->raid_disk);
3400                                 sysfs_remove_link(&mddev->kobj, nm);
3401                         }
3402
3403                 /* make sure all delayed_delete calls have finished */
3404                 flush_scheduled_work();
3405
3406                 export_array(mddev);
3407
3408                 mddev->array_size = 0;
3409                 mddev->size = 0;
3410                 mddev->raid_disks = 0;
3411                 mddev->recovery_cp = 0;
3412
3413         } else if (mddev->pers)
3414                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3415                         mdname(mddev));
3416         err = 0;
3417         md_new_event(mddev);
3418 out:
3419         return err;
3420 }
3421
3422 #ifndef MODULE
3423 static void autorun_array(mddev_t *mddev)
3424 {
3425         mdk_rdev_t *rdev;
3426         struct list_head *tmp;
3427         int err;
3428
3429         if (list_empty(&mddev->disks))
3430                 return;
3431
3432         printk(KERN_INFO "md: running: ");
3433
3434         ITERATE_RDEV(mddev,rdev,tmp) {
3435                 char b[BDEVNAME_SIZE];
3436                 printk("<%s>", bdevname(rdev->bdev,b));
3437         }
3438         printk("\n");
3439
3440         err = do_md_run (mddev);
3441         if (err) {
3442                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3443                 do_md_stop (mddev, 0);
3444         }
3445 }
3446
3447 /*
3448  * lets try to run arrays based on all disks that have arrived
3449  * until now. (those are in pending_raid_disks)
3450  *
3451  * the method: pick the first pending disk, collect all disks with
3452  * the same UUID, remove all from the pending list and put them into
3453  * the 'same_array' list. Then order this list based on superblock
3454  * update time (freshest comes first), kick out 'old' disks and
3455  * compare superblocks. If everything's fine then run it.
3456  *
3457  * If "unit" is allocated, then bump its reference count
3458  */
3459 static void autorun_devices(int part)
3460 {
3461         struct list_head *tmp;
3462         mdk_rdev_t *rdev0, *rdev;
3463         mddev_t *mddev;
3464         char b[BDEVNAME_SIZE];
3465
3466         printk(KERN_INFO "md: autorun ...\n");
3467         while (!list_empty(&pending_raid_disks)) {
3468                 int unit;
3469                 dev_t dev;
3470                 LIST_HEAD(candidates);
3471                 rdev0 = list_entry(pending_raid_disks.next,
3472                                          mdk_rdev_t, same_set);
3473
3474                 printk(KERN_INFO "md: considering %s ...\n",
3475                         bdevname(rdev0->bdev,b));
3476                 INIT_LIST_HEAD(&candidates);
3477                 ITERATE_RDEV_PENDING(rdev,tmp)
3478                         if (super_90_load(rdev, rdev0, 0) >= 0) {
3479                                 printk(KERN_INFO "md:  adding %s ...\n",
3480                                         bdevname(rdev->bdev,b));
3481                                 list_move(&rdev->same_set, &candidates);
3482                         }
3483                 /*
3484                  * now we have a set of devices, with all of them having
3485                  * mostly sane superblocks. It's time to allocate the
3486                  * mddev.
3487                  */
3488                 if (part) {
3489                         dev = MKDEV(mdp_major,
3490                                     rdev0->preferred_minor << MdpMinorShift);
3491                         unit = MINOR(dev) >> MdpMinorShift;
3492                 } else {
3493                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3494                         unit = MINOR(dev);
3495                 }
3496                 if (rdev0->preferred_minor != unit) {
3497                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3498                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3499                         break;
3500                 }
3501
3502                 md_probe(dev, NULL, NULL);
3503                 mddev = mddev_find(dev);
3504                 if (!mddev) {
3505                         printk(KERN_ERR 
3506                                 "md: cannot allocate memory for md drive.\n");
3507                         break;
3508                 }
3509                 if (mddev_lock(mddev)) 
3510                         printk(KERN_WARNING "md: %s locked, cannot run\n",
3511                                mdname(mddev));
3512                 else if (mddev->raid_disks || mddev->major_version
3513                          || !list_empty(&mddev->disks)) {
3514                         printk(KERN_WARNING 
3515                                 "md: %s already running, cannot run %s\n",
3516                                 mdname(mddev), bdevname(rdev0->bdev,b));
3517                         mddev_unlock(mddev);
3518                 } else {
3519                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
3520                         ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
3521                                 list_del_init(&rdev->same_set);
3522                                 if (bind_rdev_to_array(rdev, mddev))
3523                                         export_rdev(rdev);
3524                         }
3525                         autorun_array(mddev);
3526                         mddev_unlock(mddev);
3527                 }
3528                 /* on success, candidates will be empty, on error
3529                  * it won't...
3530                  */
3531                 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
3532                         export_rdev(rdev);
3533                 mddev_put(mddev);
3534         }
3535         printk(KERN_INFO "md: ... autorun DONE.\n");
3536 }
3537 #endif /* !MODULE */
3538
3539 static int get_version(void __user * arg)
3540 {
3541         mdu_version_t ver;
3542
3543         ver.major = MD_MAJOR_VERSION;
3544         ver.minor = MD_MINOR_VERSION;
3545         ver.patchlevel = MD_PATCHLEVEL_VERSION;
3546
3547         if (copy_to_user(arg, &ver, sizeof(ver)))
3548                 return -EFAULT;
3549
3550         return 0;
3551 }
3552
3553 static int get_array_info(mddev_t * mddev, void __user * arg)
3554 {
3555         mdu_array_info_t info;
3556         int nr,working,active,failed,spare;
3557         mdk_rdev_t *rdev;
3558         struct list_head *tmp;
3559
3560         nr=working=active=failed=spare=0;
3561         ITERATE_RDEV(mddev,rdev,tmp) {
3562                 nr++;
3563                 if (test_bit(Faulty, &rdev->flags))
3564                         failed++;
3565                 else {
3566                         working++;
3567                         if (test_bit(In_sync, &rdev->flags))
3568                                 active++;       
3569                         else
3570                                 spare++;
3571                 }
3572         }
3573
3574         info.major_version = mddev->major_version;
3575         info.minor_version = mddev->minor_version;
3576         info.patch_version = MD_PATCHLEVEL_VERSION;
3577         info.ctime         = mddev->ctime;
3578         info.level         = mddev->level;
3579         info.size          = mddev->size;
3580         if (info.size != mddev->size) /* overflow */
3581                 info.size = -1;
3582         info.nr_disks      = nr;
3583         info.raid_disks    = mddev->raid_disks;
3584         info.md_minor      = mddev->md_minor;
3585         info.not_persistent= !mddev->persistent;
3586
3587         info.utime         = mddev->utime;
3588         info.state         = 0;
3589         if (mddev->in_sync)
3590                 info.state = (1<<MD_SB_CLEAN);
3591         if (mddev->bitmap && mddev->bitmap_offset)
3592                 info.state = (1<<MD_SB_BITMAP_PRESENT);
3593         info.active_disks  = active;
3594         info.working_disks = working;
3595         info.failed_disks  = failed;
3596         info.spare_disks   = spare;
3597
3598         info.layout        = mddev->layout;
3599         info.chunk_size    = mddev->chunk_size;
3600
3601         if (copy_to_user(arg, &info, sizeof(info)))
3602                 return -EFAULT;
3603
3604         return 0;
3605 }
3606
3607 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3608 {
3609         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3610         char *ptr, *buf = NULL;
3611         int err = -ENOMEM;
3612
3613         md_allow_write(mddev);
3614
3615         file = kmalloc(sizeof(*file), GFP_KERNEL);
3616         if (!file)
3617                 goto out;
3618
3619         /* bitmap disabled, zero the first byte and copy out */
3620         if (!mddev->bitmap || !mddev->bitmap->file) {
3621                 file->pathname[0] = '\0';
3622                 goto copy_out;
3623         }
3624
3625         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3626         if (!buf)
3627                 goto out;
3628
3629         ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3630         if (!ptr)
3631                 goto out;
3632
3633         strcpy(file->pathname, ptr);
3634
3635 copy_out:
3636         err = 0;
3637         if (copy_to_user(arg, file, sizeof(*file)))
3638                 err = -EFAULT;
3639 out:
3640         kfree(buf);
3641         kfree(file);
3642         return err;
3643 }
3644
3645 static int get_disk_info(mddev_t * mddev, void __user * arg)
3646 {
3647         mdu_disk_info_t info;
3648         unsigned int nr;
3649         mdk_rdev_t *rdev;
3650
3651         if (copy_from_user(&info, arg, sizeof(info)))
3652                 return -EFAULT;
3653
3654         nr = info.number;
3655
3656         rdev = find_rdev_nr(mddev, nr);
3657         if (rdev) {
3658                 info.major = MAJOR(rdev->bdev->bd_dev);
3659                 info.minor = MINOR(rdev->bdev->bd_dev);
3660                 info.raid_disk = rdev->raid_disk;
3661                 info.state = 0;
3662                 if (test_bit(Faulty, &rdev->flags))
3663                         info.state |= (1<<MD_DISK_FAULTY);
3664                 else if (test_bit(In_sync, &rdev->flags)) {
3665                         info.state |= (1<<MD_DISK_ACTIVE);
3666                         info.state |= (1<<MD_DISK_SYNC);
3667                 }
3668                 if (test_bit(WriteMostly, &rdev->flags))
3669                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
3670         } else {
3671                 info.major = info.minor = 0;
3672                 info.raid_disk = -1;
3673                 info.state = (1<<MD_DISK_REMOVED);
3674         }
3675
3676         if (copy_to_user(arg, &info, sizeof(info)))
3677                 return -EFAULT;
3678
3679         return 0;
3680 }
3681
3682 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3683 {
3684         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3685         mdk_rdev_t *rdev;
3686         dev_t dev = MKDEV(info->major,info->minor);
3687
3688         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3689                 return -EOVERFLOW;
3690
3691         if (!mddev->raid_disks) {
3692                 int err;
3693                 /* expecting a device which has a superblock */
3694                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3695                 if (IS_ERR(rdev)) {
3696                         printk(KERN_WARNING 
3697                                 "md: md_import_device returned %ld\n",
3698                                 PTR_ERR(rdev));
3699                         return PTR_ERR(rdev);
3700                 }
3701                 if (!list_empty(&mddev->disks)) {
3702                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3703                                                         mdk_rdev_t, same_set);
3704                         int err = super_types[mddev->major_version]
3705                                 .load_super(rdev, rdev0, mddev->minor_version);
3706                         if (err < 0) {
3707                                 printk(KERN_WARNING 
3708                                         "md: %s has different UUID to %s\n",
3709                                         bdevname(rdev->bdev,b), 
3710                                         bdevname(rdev0->bdev,b2));
3711                                 export_rdev(rdev);
3712                                 return -EINVAL;
3713                         }
3714                 }
3715                 err = bind_rdev_to_array(rdev, mddev);
3716                 if (err)
3717                         export_rdev(rdev);
3718                 return err;
3719         }
3720
3721         /*
3722          * add_new_disk can be used once the array is assembled
3723          * to add "hot spares".  They must already have a superblock
3724          * written
3725          */
3726         if (mddev->pers) {
3727                 int err;
3728                 if (!mddev->pers->hot_add_disk) {
3729                         printk(KERN_WARNING 
3730                                 "%s: personality does not support diskops!\n",
3731                                mdname(mddev));
3732                         return -EINVAL;
3733                 }
3734                 if (mddev->persistent)
3735                         rdev = md_import_device(dev, mddev->major_version,
3736                                                 mddev->minor_version);
3737                 else
3738                         rdev = md_import_device(dev, -1, -1);
3739                 if (IS_ERR(rdev)) {
3740                         printk(KERN_WARNING 
3741                                 "md: md_import_device returned %ld\n",
3742                                 PTR_ERR(rdev));
3743                         return PTR_ERR(rdev);
3744                 }
3745                 /* set save_raid_disk if appropriate */
3746                 if (!mddev->persistent) {
3747                         if (info->state & (1<<MD_DISK_SYNC)  &&
3748                             info->raid_disk < mddev->raid_disks)
3749                                 rdev->raid_disk = info->raid_disk;
3750                         else
3751                                 rdev->raid_disk = -1;
3752                 } else
3753                         super_types[mddev->major_version].
3754                                 validate_super(mddev, rdev);
3755                 rdev->saved_raid_disk = rdev->raid_disk;
3756
3757                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
3758                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3759                         set_bit(WriteMostly, &rdev->flags);
3760
3761                 rdev->raid_disk = -1;
3762                 err = bind_rdev_to_array(rdev, mddev);
3763                 if (!err && !mddev->pers->hot_remove_disk) {
3764                         /* If there is hot_add_disk but no hot_remove_disk
3765                          * then added disks for geometry changes,
3766                          * and should be added immediately.
3767                          */
3768                         super_types[mddev->major_version].
3769                                 validate_super(mddev, rdev);
3770                         err = mddev->pers->hot_add_disk(mddev, rdev);
3771                         if (err)
3772                                 unbind_rdev_from_array(rdev);
3773                 }
3774                 if (err)
3775                         export_rdev(rdev);
3776
3777                 md_update_sb(mddev, 1);
3778                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3779                 md_wakeup_thread(mddev->thread);
3780                 return err;
3781         }
3782
3783         /* otherwise, add_new_disk is only allowed
3784          * for major_version==0 superblocks
3785          */
3786         if (mddev->major_version != 0) {
3787                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3788                        mdname(mddev));
3789                 return -EINVAL;
3790         }
3791
3792         if (!(info->state & (1<<MD_DISK_FAULTY))) {
3793                 int err;
3794                 rdev = md_import_device (dev, -1, 0);
3795                 if (IS_ERR(rdev)) {
3796                         printk(KERN_WARNING 
3797                                 "md: error, md_import_device() returned %ld\n",
3798                                 PTR_ERR(rdev));
3799                         return PTR_ERR(rdev);
3800                 }
3801                 rdev->desc_nr = info->number;
3802                 if (info->raid_disk < mddev->raid_disks)
3803                         rdev->raid_disk = info->raid_disk;
3804                 else
3805                         rdev->raid_disk = -1;
3806
3807                 rdev->flags = 0;
3808
3809                 if (rdev->raid_disk < mddev->raid_disks)
3810                         if (info->state & (1<<MD_DISK_SYNC))
3811                                 set_bit(In_sync, &rdev->flags);
3812
3813                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3814                         set_bit(WriteMostly, &rdev->flags);
3815
3816                 if (!mddev->persistent) {
3817                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
3818                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3819                 } else 
3820                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3821                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3822
3823                 err = bind_rdev_to_array(rdev, mddev);
3824                 if (err) {
3825                         export_rdev(rdev);
3826                         return err;
3827                 }
3828         }
3829
3830         return 0;
3831 }
3832
3833 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3834 {
3835         char b[BDEVNAME_SIZE];
3836         mdk_rdev_t *rdev;
3837
3838         if (!mddev->pers)
3839                 return -ENODEV;
3840
3841         rdev = find_rdev(mddev, dev);
3842         if (!rdev)
3843                 return -ENXIO;
3844
3845         if (rdev->raid_disk >= 0)
3846                 goto busy;
3847
3848         kick_rdev_from_array(rdev);
3849         md_update_sb(mddev, 1);
3850         md_new_event(mddev);
3851
3852         return 0;
3853 busy:
3854         printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3855                 bdevname(rdev->bdev,b), mdname(mddev));
3856         return -EBUSY;
3857 }
3858
3859 static int hot_add_disk(mddev_t * mddev, dev_t dev)
3860 {
3861         char b[BDEVNAME_SIZE];
3862         int err;
3863         unsigned int size;
3864         mdk_rdev_t *rdev;
3865
3866         if (!mddev->pers)
3867                 return -ENODEV;
3868
3869         if (mddev->major_version != 0) {
3870                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3871                         " version-0 superblocks.\n",
3872                         mdname(mddev));
3873                 return -EINVAL;
3874         }
3875         if (!mddev->pers->hot_add_disk) {
3876                 printk(KERN_WARNING 
3877                         "%s: personality does not support diskops!\n",
3878                         mdname(mddev));
3879                 return -EINVAL;
3880         }
3881
3882         rdev = md_import_device (dev, -1, 0);
3883         if (IS_ERR(rdev)) {
3884                 printk(KERN_WARNING 
3885                         "md: error, md_import_device() returned %ld\n",
3886                         PTR_ERR(rdev));
3887                 return -EINVAL;
3888         }
3889
3890         if (mddev->persistent)
3891                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3892         else
3893                 rdev->sb_offset =
3894                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3895
3896         size = calc_dev_size(rdev, mddev->chunk_size);
3897         rdev->size = size;
3898
3899         if (test_bit(Faulty, &rdev->flags)) {
3900                 printk(KERN_WARNING 
3901                         "md: can not hot-add faulty %s disk to %s!\n",
3902                         bdevname(rdev->bdev,b), mdname(mddev));
3903                 err = -EINVAL;
3904                 goto abort_export;
3905         }
3906         clear_bit(In_sync, &rdev->flags);
3907         rdev->desc_nr = -1;
3908         rdev->saved_raid_disk = -1;
3909         err = bind_rdev_to_array(rdev, mddev);
3910         if (err)
3911                 goto abort_export;
3912
3913         /*
3914          * The rest should better be atomic, we can have disk failures
3915          * noticed in interrupt contexts ...
3916          */
3917
3918         if (rdev->desc_nr == mddev->max_disks) {
3919                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3920                         mdname(mddev));
3921                 err = -EBUSY;
3922                 goto abort_unbind_export;
3923         }
3924
3925         rdev->raid_disk = -1;
3926
3927         md_update_sb(mddev, 1);
3928
3929         /*
3930          * Kick recovery, maybe this spare has to be added to the
3931          * array immediately.
3932          */
3933         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3934         md_wakeup_thread(mddev->thread);
3935         md_new_event(mddev);
3936         return 0;
3937
3938 abort_unbind_export:
3939         unbind_rdev_from_array(rdev);
3940
3941 abort_export:
3942         export_rdev(rdev);
3943         return err;
3944 }
3945
3946 static int set_bitmap_file(mddev_t *mddev, int fd)
3947 {
3948         int err;
3949
3950         if (mddev->pers) {
3951                 if (!mddev->pers->quiesce)
3952                         return -EBUSY;
3953                 if (mddev->recovery || mddev->sync_thread)
3954                         return -EBUSY;
3955                 /* we should be able to change the bitmap.. */
3956         }
3957
3958
3959         if (fd >= 0) {
3960                 if (mddev->bitmap)
3961                         return -EEXIST; /* cannot add when bitmap is present */
3962                 mddev->bitmap_file = fget(fd);
3963
3964                 if (mddev->bitmap_file == NULL) {
3965                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3966                                mdname(mddev));
3967                         return -EBADF;
3968                 }
3969
3970                 err = deny_bitmap_write_access(mddev->bitmap_file);
3971                 if (err) {
3972                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3973                                mdname(mddev));
3974                         fput(mddev->bitmap_file);
3975                         mddev->bitmap_file = NULL;
3976                         return err;
3977                 }
3978                 mddev->bitmap_offset = 0; /* file overrides offset */
3979         } else if (mddev->bitmap == NULL)
3980                 return -ENOENT; /* cannot remove what isn't there */
3981         err = 0;
3982         if (mddev->pers) {
3983                 mddev->pers->quiesce(mddev, 1);
3984                 if (fd >= 0)
3985                         err = bitmap_create(mddev);
3986                 if (fd < 0 || err) {
3987                         bitmap_destroy(mddev);
3988                         fd = -1; /* make sure to put the file */
3989                 }
3990                 mddev->pers->quiesce(mddev, 0);
3991         }
3992         if (fd < 0) {
3993                 if (mddev->bitmap_file) {
3994                         restore_bitmap_write_access(mddev->bitmap_file);
3995                         fput(mddev->bitmap_file);
3996                 }
3997                 mddev->bitmap_file = NULL;
3998         }
3999
4000         return err;
4001 }
4002
4003 /*
4004  * set_array_info is used two different ways
4005  * The original usage is when creating a new array.
4006  * In this usage, raid_disks is > 0 and it together with
4007  *  level, size, not_persistent,layout,chunksize determine the
4008  *  shape of the array.
4009  *  This will always create an array with a type-0.90.0 superblock.
4010  * The newer usage is when assembling an array.
4011  *  In this case raid_disks will be 0, and the major_version field is
4012  *  use to determine which style super-blocks are to be found on the devices.
4013  *  The minor and patch _version numbers are also kept incase the
4014  *  super_block handler wishes to interpret them.
4015  */
4016 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4017 {
4018
4019         if (info->raid_disks == 0) {
4020                 /* just setting version number for superblock loading */
4021                 if (info->major_version < 0 ||
4022                     info->major_version >= ARRAY_SIZE(super_types) ||
4023                     super_types[info->major_version].name == NULL) {
4024                         /* maybe try to auto-load a module? */
4025                         printk(KERN_INFO 
4026                                 "md: superblock version %d not known\n",
4027                                 info->major_version);
4028                         return -EINVAL;
4029                 }
4030                 mddev->major_version = info->major_version;
4031                 mddev->minor_version = info->minor_version;
4032                 mddev->patch_version = info->patch_version;
4033                 mddev->persistent = !info->not_persistent;
4034                 return 0;
4035         }
4036         mddev->major_version = MD_MAJOR_VERSION;
4037         mddev->minor_version = MD_MINOR_VERSION;
4038         mddev->patch_version = MD_PATCHLEVEL_VERSION;
4039         mddev->ctime         = get_seconds();
4040
4041         mddev->level         = info->level;
4042         mddev->clevel[0]     = 0;
4043         mddev->size          = info->size;
4044         mddev->raid_disks    = info->raid_disks;
4045         /* don't set md_minor, it is determined by which /dev/md* was
4046          * openned
4047          */
4048         if (info->state & (1<<MD_SB_CLEAN))
4049                 mddev->recovery_cp = MaxSector;
4050         else
4051                 mddev->recovery_cp = 0;
4052         mddev->persistent    = ! info->not_persistent;
4053
4054         mddev->layout        = info->layout;
4055         mddev->chunk_size    = info->chunk_size;
4056
4057         mddev->max_disks     = MD_SB_DISKS;
4058
4059         mddev->flags         = 0;
4060         set_bit(MD_CHANGE_DEVS, &mddev->flags);
4061
4062         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4063         mddev->bitmap_offset = 0;
4064
4065         mddev->reshape_position = MaxSector;
4066
4067         /*
4068          * Generate a 128 bit UUID
4069          */
4070         get_random_bytes(mddev->uuid, 16);
4071
4072         mddev->new_level = mddev->level;
4073         mddev->new_chunk = mddev->chunk_size;
4074         mddev->new_layout = mddev->layout;
4075         mddev->delta_disks = 0;
4076
4077         return 0;
4078 }
4079
4080 static int update_size(mddev_t *mddev, unsigned long size)
4081 {
4082         mdk_rdev_t * rdev;
4083         int rv;
4084         struct list_head *tmp;
4085         int fit = (size == 0);
4086
4087         if (mddev->pers->resize == NULL)
4088                 return -EINVAL;
4089         /* The "size" is the amount of each device that is used.
4090          * This can only make sense for arrays with redundancy.
4091          * linear and raid0 always use whatever space is available
4092          * We can only consider changing the size if no resync
4093          * or reconstruction is happening, and if the new size
4094          * is acceptable. It must fit before the sb_offset or,
4095          * if that is <data_offset, it must fit before the
4096          * size of each device.
4097          * If size is zero, we find the largest size that fits.
4098          */
4099         if (mddev->sync_thread)
4100                 return -EBUSY;
4101         ITERATE_RDEV(mddev,rdev,tmp) {
4102                 sector_t avail;
4103                 avail = rdev->size * 2;
4104
4105                 if (fit && (size == 0 || size > avail/2))
4106                         size = avail/2;
4107                 if (avail < ((sector_t)size << 1))
4108                         return -ENOSPC;
4109         }
4110         rv = mddev->pers->resize(mddev, (sector_t)size *2);
4111         if (!rv) {
4112                 struct block_device *bdev;
4113
4114                 bdev = bdget_disk(mddev->gendisk, 0);
4115                 if (bdev) {
4116                         mutex_lock(&bdev->bd_inode->i_mutex);
4117                         i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4118                         mutex_unlock(&bdev->bd_inode->i_mutex);
4119                         bdput(bdev);
4120                 }
4121         }
4122         return rv;
4123 }
4124
4125 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4126 {
4127         int rv;
4128         /* change the number of raid disks */
4129         if (mddev->pers->check_reshape == NULL)
4130                 return -EINVAL;
4131         if (raid_disks <= 0 ||
4132             raid_disks >= mddev->max_disks)
4133                 return -EINVAL;
4134         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4135                 return -EBUSY;
4136         mddev->delta_disks = raid_disks - mddev->raid_disks;
4137
4138         rv = mddev->pers->check_reshape(mddev);
4139         return rv;
4140 }
4141
4142
4143 /*
4144  * update_array_info is used to change the configuration of an
4145  * on-line array.
4146  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4147  * fields in the info are checked against the array.
4148  * Any differences that cannot be handled will cause an error.
4149  * Normally, only one change can be managed at a time.
4150  */
4151 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4152 {
4153         int rv = 0;
4154         int cnt = 0;
4155         int state = 0;
4156
4157         /* calculate expected state,ignoring low bits */
4158         if (mddev->bitmap && mddev->bitmap_offset)
4159                 state |= (1 << MD_SB_BITMAP_PRESENT);
4160
4161         if (mddev->major_version != info->major_version ||
4162             mddev->minor_version != info->minor_version ||
4163 /*          mddev->patch_version != info->patch_version || */
4164             mddev->ctime         != info->ctime         ||
4165             mddev->level         != info->level         ||
4166 /*          mddev->layout        != info->layout        || */
4167             !mddev->persistent   != info->not_persistent||
4168             mddev->chunk_size    != info->chunk_size    ||
4169             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4170             ((state^info->state) & 0xfffffe00)
4171                 )
4172                 return -EINVAL;
4173         /* Check there is only one change */
4174         if (info->size >= 0 && mddev->size != info->size) cnt++;
4175         if (mddev->raid_disks != info->raid_disks) cnt++;
4176         if (mddev->layout != info->layout) cnt++;
4177         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4178         if (cnt == 0) return 0;
4179         if (cnt > 1) return -EINVAL;
4180
4181         if (mddev->layout != info->layout) {
4182                 /* Change layout
4183                  * we don't need to do anything at the md level, the
4184                  * personality will take care of it all.
4185                  */
4186                 if (mddev->pers->reconfig == NULL)
4187                         return -EINVAL;
4188                 else
4189                         return mddev->pers->reconfig(mddev, info->layout, -1);
4190         }
4191         if (info->size >= 0 && mddev->size != info->size)
4192                 rv = update_size(mddev, info->size);
4193
4194         if (mddev->raid_disks    != info->raid_disks)
4195                 rv = update_raid_disks(mddev, info->raid_disks);
4196
4197         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4198                 if (mddev->pers->quiesce == NULL)
4199                         return -EINVAL;
4200                 if (mddev->recovery || mddev->sync_thread)
4201                         return -EBUSY;
4202                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4203                         /* add the bitmap */
4204                         if (mddev->bitmap)
4205                                 return -EEXIST;
4206                         if (mddev->default_bitmap_offset == 0)
4207                                 return -EINVAL;
4208                         mddev->bitmap_offset = mddev->default_bitmap_offset;
4209                         mddev->pers->quiesce(mddev, 1);
4210                         rv = bitmap_create(mddev);
4211                         if (rv)
4212                                 bitmap_destroy(mddev);
4213                         mddev->pers->quiesce(mddev, 0);
4214                 } else {
4215                         /* remove the bitmap */
4216                         if (!mddev->bitmap)
4217                                 return -ENOENT;
4218                         if (mddev->bitmap->file)
4219                                 return -EINVAL;
4220                         mddev->pers->quiesce(mddev, 1);
4221                         bitmap_destroy(mddev);
4222                         mddev->pers->quiesce(mddev, 0);
4223                         mddev->bitmap_offset = 0;
4224                 }
4225         }
4226         md_update_sb(mddev, 1);
4227         return rv;
4228 }
4229
4230 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4231 {
4232         mdk_rdev_t *rdev;
4233
4234         if (mddev->pers == NULL)
4235                 return -ENODEV;
4236
4237         rdev = find_rdev(mddev, dev);
4238         if (!rdev)
4239                 return -ENODEV;
4240
4241         md_error(mddev, rdev);
4242         return 0;
4243 }
4244
4245 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4246 {
4247         mddev_t *mddev = bdev->bd_disk->private_data;
4248
4249         geo->heads = 2;
4250         geo->sectors = 4;
4251         geo->cylinders = get_capacity(mddev->gendisk) / 8;
4252         return 0;
4253 }
4254
4255 static int md_ioctl(struct inode *inode, struct file *file,
4256                         unsigned int cmd, unsigned long arg)
4257 {
4258         int err = 0;
4259         void __user *argp = (void __user *)arg;
4260         mddev_t *mddev = NULL;
4261
4262         if (!capable(CAP_SYS_ADMIN))
4263                 return -EACCES;
4264
4265         /*
4266          * Commands dealing with the RAID driver but not any
4267          * particular array:
4268          */
4269         switch (cmd)
4270         {
4271                 case RAID_VERSION:
4272                         err = get_version(argp);
4273                         goto done;
4274
4275                 case PRINT_RAID_DEBUG:
4276                         err = 0;
4277                         md_print_devices();
4278                         goto done;
4279
4280 #ifndef MODULE
4281                 case RAID_AUTORUN:
4282                         err = 0;
4283                         autostart_arrays(arg);
4284                         goto done;
4285 #endif
4286                 default:;
4287         }
4288
4289         /*
4290          * Commands creating/starting a new array:
4291          */
4292
4293         mddev = inode->i_bdev->bd_disk->private_data;
4294
4295         if (!mddev) {
4296                 BUG();
4297                 goto abort;
4298         }
4299
4300         err = mddev_lock(mddev);
4301         if (err) {
4302                 printk(KERN_INFO 
4303                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
4304                         err, cmd);
4305                 goto abort;
4306         }
4307
4308         switch (cmd)
4309         {
4310                 case SET_ARRAY_INFO:
4311                         {
4312                                 mdu_array_info_t info;
4313                                 if (!arg)
4314                                         memset(&info, 0, sizeof(info));
4315                                 else if (copy_from_user(&info, argp, sizeof(info))) {
4316                                         err = -EFAULT;
4317                                         goto abort_unlock;
4318                                 }
4319                                 if (mddev->pers) {
4320                                         err = update_array_info(mddev, &info);
4321                                         if (err) {
4322                                                 printk(KERN_WARNING "md: couldn't update"
4323                                                        " array info. %d\n", err);
4324                                                 goto abort_unlock;
4325                                         }
4326                                         goto done_unlock;
4327                                 }
4328                                 if (!list_empty(&mddev->disks)) {
4329                                         printk(KERN_WARNING
4330                                                "md: array %s already has disks!\n",
4331                                                mdname(mddev));
4332                                         err = -EBUSY;
4333                                         goto abort_unlock;
4334                                 }
4335                                 if (mddev->raid_disks) {
4336                                         printk(KERN_WARNING
4337                                                "md: array %s already initialised!\n",
4338                                                mdname(mddev));
4339                                         err = -EBUSY;
4340                                         goto abort_unlock;
4341                                 }
4342                                 err = set_array_info(mddev, &info);
4343                                 if (err) {
4344                                         printk(KERN_WARNING "md: couldn't set"
4345                                                " array info. %d\n", err);
4346                                         goto abort_unlock;
4347                                 }
4348                         }
4349                         goto done_unlock;
4350
4351                 default:;
4352         }
4353
4354         /*
4355          * Commands querying/configuring an existing array:
4356          */
4357         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4358          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4359         if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4360                         && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
4361                         && cmd != GET_BITMAP_FILE) {
4362                 err = -ENODEV;
4363                 goto abort_unlock;
4364         }
4365
4366         /*
4367          * Commands even a read-only array can execute:
4368          */
4369         switch (cmd)
4370         {
4371                 case GET_ARRAY_INFO:
4372                         err = get_array_info(mddev, argp);
4373                         goto done_unlock;
4374
4375                 case GET_BITMAP_FILE:
4376                         err = get_bitmap_file(mddev, argp);
4377                         goto done_unlock;
4378
4379                 case GET_DISK_INFO:
4380                         err = get_disk_info(mddev, argp);
4381                         goto done_unlock;
4382
4383                 case RESTART_ARRAY_RW:
4384                         err = restart_array(mddev);
4385                         goto done_unlock;
4386
4387                 case STOP_ARRAY:
4388                         err = do_md_stop (mddev, 0);
4389                         goto done_unlock;
4390
4391                 case STOP_ARRAY_RO:
4392                         err = do_md_stop (mddev, 1);
4393                         goto done_unlock;
4394
4395         /*
4396          * We have a problem here : there is no easy way to give a CHS
4397          * virtual geometry. We currently pretend that we have a 2 heads
4398          * 4 sectors (with a BIG number of cylinders...). This drives
4399          * dosfs just mad... ;-)
4400          */
4401         }
4402
4403         /*
4404          * The remaining ioctls are changing the state of the
4405          * superblock, so we do not allow them on read-only arrays.
4406          * However non-MD ioctls (e.g. get-size) will still come through
4407          * here and hit the 'default' below, so only disallow
4408          * 'md' ioctls, and switch to rw mode if started auto-readonly.
4409          */
4410         if (_IOC_TYPE(cmd) == MD_MAJOR &&
4411             mddev->ro && mddev->pers) {
4412                 if (mddev->ro == 2) {
4413                         mddev->ro = 0;
4414                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4415                 md_wakeup_thread(mddev->thread);
4416
4417                 } else {
4418                         err = -EROFS;
4419                         goto abort_unlock;
4420                 }
4421         }
4422
4423         switch (cmd)
4424         {
4425                 case ADD_NEW_DISK:
4426                 {
4427                         mdu_disk_info_t info;
4428                         if (copy_from_user(&info, argp, sizeof(info)))
4429                                 err = -EFAULT;
4430                         else
4431                                 err = add_new_disk(mddev, &info);
4432                         goto done_unlock;
4433                 }
4434
4435                 case HOT_REMOVE_DISK:
4436                         err = hot_remove_disk(mddev, new_decode_dev(arg));
4437                         goto done_unlock;
4438
4439                 case HOT_ADD_DISK:
4440                         err = hot_add_disk(mddev, new_decode_dev(arg));
4441                         goto done_unlock;
4442
4443                 case SET_DISK_FAULTY:
4444                         err = set_disk_faulty(mddev, new_decode_dev(arg));
4445                         goto done_unlock;
4446
4447                 case RUN_ARRAY:
4448                         err = do_md_run (mddev);
4449                         goto done_unlock;
4450
4451                 case SET_BITMAP_FILE:
4452                         err = set_bitmap_file(mddev, (int)arg);
4453                         goto done_unlock;
4454
4455                 default:
4456                         err = -EINVAL;
4457                         goto abort_unlock;
4458         }
4459
4460 done_unlock:
4461 abort_unlock:
4462         mddev_unlock(mddev);
4463
4464         return err;
4465 done:
4466         if (err)
4467                 MD_BUG();
4468 abort:
4469         return err;
4470 }
4471
4472 static int md_open(struct inode *inode, struct file *file)
4473 {
4474         /*
4475          * Succeed if we can lock the mddev, which confirms that
4476          * it isn't being stopped right now.
4477          */
4478         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4479         int err;
4480
4481         if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
4482                 goto out;
4483
4484         err = 0;
4485         mddev_get(mddev);
4486         mddev_unlock(mddev);
4487
4488         check_disk_change(inode->i_bdev);
4489  out:
4490         return err;
4491 }
4492
4493 static int md_release(struct inode *inode, struct file * file)
4494 {
4495         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4496
4497         BUG_ON(!mddev);
4498         mddev_put(mddev);
4499
4500         return 0;
4501 }
4502
4503 static int md_media_changed(struct gendisk *disk)
4504 {
4505         mddev_t *mddev = disk->private_data;
4506
4507         return mddev->changed;
4508 }
4509
4510 static int md_revalidate(struct gendisk *disk)
4511 {
4512         mddev_t *mddev = disk->private_data;
4513
4514         mddev->changed = 0;
4515         return 0;
4516 }
4517 static struct block_device_operations md_fops =
4518 {
4519         .owner          = THIS_MODULE,
4520         .open           = md_open,
4521         .release        = md_release,
4522         .ioctl          = md_ioctl,
4523         .getgeo         = md_getgeo,
4524         .media_changed  = md_media_changed,
4525         .revalidate_disk= md_revalidate,
4526 };
4527
4528 static int md_thread(void * arg)
4529 {
4530         mdk_thread_t *thread = arg;
4531
4532         /*
4533          * md_thread is a 'system-thread', it's priority should be very
4534          * high. We avoid resource deadlocks individually in each
4535          * raid personality. (RAID5 does preallocation) We also use RR and
4536          * the very same RT priority as kswapd, thus we will never get
4537          * into a priority inversion deadlock.
4538          *
4539          * we definitely have to have equal or higher priority than
4540          * bdflush, otherwise bdflush will deadlock if there are too
4541          * many dirty RAID5 blocks.
4542          */
4543
4544         current->flags |= PF_NOFREEZE;
4545         allow_signal(SIGKILL);
4546         while (!kthread_should_stop()) {
4547
4548                 /* We need to wait INTERRUPTIBLE so that
4549                  * we don't add to the load-average.
4550                  * That means we need to be sure no signals are
4551                  * pending
4552                  */
4553                 if (signal_pending(current))
4554                         flush_signals(current);
4555
4556                 wait_event_interruptible_timeout
4557                         (thread->wqueue,
4558                          test_bit(THREAD_WAKEUP, &thread->flags)
4559                          || kthread_should_stop(),
4560                          thread->timeout);
4561
4562                 clear_bit(THREAD_WAKEUP, &thread->flags);
4563
4564                 thread->run(thread->mddev);
4565         }
4566
4567         return 0;
4568 }
4569
4570 void md_wakeup_thread(mdk_thread_t *thread)
4571 {
4572         if (thread) {
4573                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4574                 set_bit(THREAD_WAKEUP, &thread->flags);
4575                 wake_up(&thread->wqueue);
4576         }
4577 }
4578
4579 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4580                                  const char *name)
4581 {
4582         mdk_thread_t *thread;
4583
4584         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
4585         if (!thread)
4586                 return NULL;
4587
4588         init_waitqueue_head(&thread->wqueue);
4589
4590         thread->run = run;
4591         thread->mddev = mddev;
4592         thread->timeout = MAX_SCHEDULE_TIMEOUT;
4593         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
4594         if (IS_ERR(thread->tsk)) {
4595                 kfree(thread);
4596                 return NULL;
4597         }
4598         return thread;
4599 }
4600
4601 void md_unregister_thread(mdk_thread_t *thread)
4602 {
4603         dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
4604
4605         kthread_stop(thread->tsk);
4606         kfree(thread);
4607 }
4608
4609 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4610 {
4611         if (!mddev) {
4612                 MD_BUG();
4613                 return;
4614         }
4615
4616         if (!rdev || test_bit(Faulty, &rdev->flags))
4617                 return;
4618 /*
4619         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4620                 mdname(mddev),
4621                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4622                 __builtin_return_address(0),__builtin_return_address(1),
4623                 __builtin_return_address(2),__builtin_return_address(3));
4624 */
4625         if (!mddev->pers)
4626                 return;
4627         if (!mddev->pers->error_handler)
4628                 return;
4629         mddev->pers->error_handler(mddev,rdev);
4630         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4631         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4632         md_wakeup_thread(mddev->thread);
4633         md_new_event_inintr(mddev);
4634 }
4635
4636 /* seq_file implementation /proc/mdstat */
4637
4638 static void status_unused(struct seq_file *seq)
4639 {
4640         int i = 0;
4641         mdk_rdev_t *rdev;
4642         struct list_head *tmp;
4643
4644         seq_printf(seq, "unused devices: ");
4645
4646         ITERATE_RDEV_PENDING(rdev,tmp) {
4647                 char b[BDEVNAME_SIZE];
4648                 i++;
4649                 seq_printf(seq, "%s ",
4650                               bdevname(rdev->bdev,b));
4651         }
4652         if (!i)
4653                 seq_printf(seq, "<none>");
4654
4655         seq_printf(seq, "\n");
4656 }
4657
4658
4659 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4660 {
4661         sector_t max_blocks, resync, res;
4662         unsigned long dt, db, rt;
4663         int scale;
4664         unsigned int per_milli;
4665
4666         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4667
4668         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4669                 max_blocks = mddev->resync_max_sectors >> 1;
4670         else
4671                 max_blocks = mddev->size;
4672
4673         /*
4674          * Should not happen.
4675          */
4676         if (!max_blocks) {
4677                 MD_BUG();
4678                 return;
4679         }
4680         /* Pick 'scale' such that (resync>>scale)*1000 will fit
4681          * in a sector_t, and (max_blocks>>scale) will fit in a
4682          * u32, as those are the requirements for sector_div.
4683          * Thus 'scale' must be at least 10
4684          */
4685         scale = 10;
4686         if (sizeof(sector_t) > sizeof(unsigned long)) {
4687                 while ( max_blocks/2 > (1ULL<<(scale+32)))
4688                         scale++;
4689         }
4690         res = (resync>>scale)*1000;
4691         sector_div(res, (u32)((max_blocks>>scale)+1));
4692
4693         per_milli = res;
4694         {
4695                 int i, x = per_milli/50, y = 20-x;
4696                 seq_printf(seq, "[");
4697                 for (i = 0; i < x; i++)
4698                         seq_printf(seq, "=");
4699                 seq_printf(seq, ">");
4700                 for (i = 0; i < y; i++)
4701                         seq_printf(seq, ".");
4702                 seq_printf(seq, "] ");
4703         }
4704         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
4705                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4706                     "reshape" :
4707                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
4708                      "check" :
4709                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4710                       "resync" : "recovery"))),
4711                    per_milli/10, per_milli % 10,
4712                    (unsigned long long) resync,
4713                    (unsigned long long) max_blocks);
4714
4715         /*
4716          * We do not want to overflow, so the order of operands and
4717          * the * 100 / 100 trick are important. We do a +1 to be
4718          * safe against division by zero. We only estimate anyway.
4719          *
4720          * dt: time from mark until now
4721          * db: blocks written from mark until now
4722          * rt: remaining time
4723          */
4724         dt = ((jiffies - mddev->resync_mark) / HZ);
4725         if (!dt) dt++;
4726         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
4727                 - mddev->resync_mark_cnt;
4728         rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
4729
4730         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4731
4732         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
4733 }
4734
4735 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4736 {
4737         struct list_head *tmp;
4738         loff_t l = *pos;
4739         mddev_t *mddev;
4740
4741         if (l >= 0x10000)
4742                 return NULL;
4743         if (!l--)
4744                 /* header */
4745                 return (void*)1;
4746
4747         spin_lock(&all_mddevs_lock);
4748         list_for_each(tmp,&all_mddevs)
4749                 if (!l--) {
4750                         mddev = list_entry(tmp, mddev_t, all_mddevs);
4751                         mddev_get(mddev);
4752                         spin_unlock(&all_mddevs_lock);
4753                         return mddev;
4754                 }
4755         spin_unlock(&all_mddevs_lock);
4756         if (!l--)
4757                 return (void*)2;/* tail */
4758         return NULL;
4759 }
4760
4761 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4762 {
4763         struct list_head *tmp;
4764         mddev_t *next_mddev, *mddev = v;
4765         
4766         ++*pos;
4767         if (v == (void*)2)
4768                 return NULL;
4769
4770         spin_lock(&all_mddevs_lock);
4771         if (v == (void*)1)
4772                 tmp = all_mddevs.next;
4773         else
4774                 tmp = mddev->all_mddevs.next;
4775         if (tmp != &all_mddevs)
4776                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4777         else {
4778                 next_mddev = (void*)2;
4779                 *pos = 0x10000;
4780         }               
4781         spin_unlock(&all_mddevs_lock);
4782
4783         if (v != (void*)1)
4784                 mddev_put(mddev);
4785         return next_mddev;
4786
4787 }
4788
4789 static void md_seq_stop(struct seq_file *seq, void *v)
4790 {
4791         mddev_t *mddev = v;
4792
4793         if (mddev && v != (void*)1 && v != (void*)2)
4794                 mddev_put(mddev);
4795 }
4796
4797 struct mdstat_info {
4798         int event;
4799 };
4800
4801 static int md_seq_show(struct seq_file *seq, void *v)
4802 {
4803         mddev_t *mddev = v;
4804         sector_t size;
4805         struct list_head *tmp2;
4806         mdk_rdev_t *rdev;
4807         struct mdstat_info *mi = seq->private;
4808         struct bitmap *bitmap;
4809
4810         if (v == (void*)1) {
4811                 struct mdk_personality *pers;
4812                 seq_printf(seq, "Personalities : ");
4813                 spin_lock(&pers_lock);
4814                 list_for_each_entry(pers, &pers_list, list)
4815                         seq_printf(seq, "[%s] ", pers->name);
4816
4817                 spin_unlock(&pers_lock);
4818                 seq_printf(seq, "\n");
4819                 mi->event = atomic_read(&md_event_count);
4820                 return 0;
4821         }
4822         if (v == (void*)2) {
4823                 status_unused(seq);
4824                 return 0;
4825         }
4826
4827         if (mddev_lock(mddev) < 0)
4828                 return -EINTR;
4829
4830         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4831                 seq_printf(seq, "%s : %sactive", mdname(mddev),
4832                                                 mddev->pers ? "" : "in");
4833                 if (mddev->pers) {
4834                         if (mddev->ro==1)
4835                                 seq_printf(seq, " (read-only)");
4836                         if (mddev->ro==2)
4837                                 seq_printf(seq, "(auto-read-only)");
4838                         seq_printf(seq, " %s", mddev->pers->name);
4839                 }
4840
4841                 size = 0;
4842                 ITERATE_RDEV(mddev,rdev,tmp2) {
4843                         char b[BDEVNAME_SIZE];
4844                         seq_printf(seq, " %s[%d]",
4845                                 bdevname(rdev->bdev,b), rdev->desc_nr);
4846                         if (test_bit(WriteMostly, &rdev->flags))
4847                                 seq_printf(seq, "(W)");
4848                         if (test_bit(Faulty, &rdev->flags)) {
4849                                 seq_printf(seq, "(F)");
4850                                 continue;
4851                         } else if (rdev->raid_disk < 0)
4852                                 seq_printf(seq, "(S)"); /* spare */
4853                         size += rdev->size;
4854                 }
4855
4856                 if (!list_empty(&mddev->disks)) {
4857                         if (mddev->pers)
4858                                 seq_printf(seq, "\n      %llu blocks",
4859                                         (unsigned long long)mddev->array_size);
4860                         else
4861                                 seq_printf(seq, "\n      %llu blocks",
4862                                         (unsigned long long)size);
4863                 }
4864                 if (mddev->persistent) {
4865                         if (mddev->major_version != 0 ||
4866                             mddev->minor_version != 90) {
4867                                 seq_printf(seq," super %d.%d",
4868                                            mddev->major_version,
4869                                            mddev->minor_version);
4870                         }
4871                 } else
4872                         seq_printf(seq, " super non-persistent");
4873
4874                 if (mddev->pers) {
4875                         mddev->pers->status (seq, mddev);
4876                         seq_printf(seq, "\n      ");
4877                         if (mddev->pers->sync_request) {
4878                                 if (mddev->curr_resync > 2) {
4879                                         status_resync (seq, mddev);
4880                                         seq_printf(seq, "\n      ");
4881                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4882                                         seq_printf(seq, "\tresync=DELAYED\n      ");
4883                                 else if (mddev->recovery_cp < MaxSector)
4884                                         seq_printf(seq, "\tresync=PENDING\n      ");
4885                         }
4886                 } else
4887                         seq_printf(seq, "\n       ");
4888
4889                 if ((bitmap = mddev->bitmap)) {
4890                         unsigned long chunk_kb;
4891                         unsigned long flags;
4892                         spin_lock_irqsave(&bitmap->lock, flags);
4893                         chunk_kb = bitmap->chunksize >> 10;
4894                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4895                                 "%lu%s chunk",
4896                                 bitmap->pages - bitmap->missing_pages,
4897                                 bitmap->pages,
4898                                 (bitmap->pages - bitmap->missing_pages)
4899                                         << (PAGE_SHIFT - 10),
4900                                 chunk_kb ? chunk_kb : bitmap->chunksize,
4901                                 chunk_kb ? "KB" : "B");
4902                         if (bitmap->file) {
4903                                 seq_printf(seq, ", file: ");
4904                                 seq_path(seq, bitmap->file->f_path.mnt,
4905                                          bitmap->file->f_path.dentry," \t\n");
4906                         }
4907
4908                         seq_printf(seq, "\n");
4909                         spin_unlock_irqrestore(&bitmap->lock, flags);
4910                 }
4911
4912                 seq_printf(seq, "\n");
4913         }
4914         mddev_unlock(mddev);
4915         
4916         return 0;
4917 }
4918
4919 static struct seq_operations md_seq_ops = {
4920         .start  = md_seq_start,
4921         .next   = md_seq_next,
4922         .stop   = md_seq_stop,
4923         .show   = md_seq_show,
4924 };
4925
4926 static int md_seq_open(struct inode *inode, struct file *file)
4927 {
4928         int error;
4929         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4930         if (mi == NULL)
4931                 return -ENOMEM;
4932
4933         error = seq_open(file, &md_seq_ops);
4934         if (error)
4935                 kfree(mi);
4936         else {
4937                 struct seq_file *p = file->private_data;
4938                 p->private = mi;
4939                 mi->event = atomic_read(&md_event_count);
4940         }
4941         return error;
4942 }
4943
4944 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4945 {
4946         struct seq_file *m = filp->private_data;
4947         struct mdstat_info *mi = m->private;
4948         int mask;
4949
4950         poll_wait(filp, &md_event_waiters, wait);
4951
4952         /* always allow read */
4953         mask = POLLIN | POLLRDNORM;
4954
4955         if (mi->event != atomic_read(&md_event_count))
4956                 mask |= POLLERR | POLLPRI;
4957         return mask;
4958 }
4959
4960 static const struct file_operations md_seq_fops = {
4961         .owner          = THIS_MODULE,
4962         .open           = md_seq_open,
4963         .read           = seq_read,
4964         .llseek         = seq_lseek,
4965         .release        = seq_release_private,
4966         .poll           = mdstat_poll,
4967 };
4968
4969 int register_md_personality(struct mdk_personality *p)
4970 {
4971         spin_lock(&pers_lock);
4972         list_add_tail(&p->list, &pers_list);
4973         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
4974         spin_unlock(&pers_lock);
4975         return 0;
4976 }
4977
4978 int unregister_md_personality(struct mdk_personality *p)
4979 {
4980         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
4981         spin_lock(&pers_lock);
4982         list_del_init(&p->list);
4983         spin_unlock(&pers_lock);
4984         return 0;
4985 }
4986
4987 static int is_mddev_idle(mddev_t *mddev)
4988 {
4989         mdk_rdev_t * rdev;
4990         struct list_head *tmp;
4991         int idle;
4992         unsigned long curr_events;
4993
4994         idle = 1;
4995         ITERATE_RDEV(mddev,rdev,tmp) {
4996                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
4997                 curr_events = disk_stat_read(disk, sectors[0]) + 
4998                                 disk_stat_read(disk, sectors[1]) - 
4999                                 atomic_read(&disk->sync_io);
5000                 /* The difference between curr_events and last_events
5001                  * will be affected by any new non-sync IO (making
5002                  * curr_events bigger) and any difference in the amount of
5003                  * in-flight syncio (making current_events bigger or smaller)
5004                  * The amount in-flight is currently limited to
5005                  * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
5006                  * which is at most 4096 sectors.
5007                  * These numbers are fairly fragile and should be made
5008                  * more robust, probably by enforcing the
5009                  * 'window size' that md_do_sync sort-of uses.
5010                  *
5011                  * Note: the following is an unsigned comparison.
5012                  */
5013                 if ((curr_events - rdev->last_events + 4096) > 8192) {
5014                         rdev->last_events = curr_events;
5015                         idle = 0;
5016                 }
5017         }
5018         return idle;
5019 }
5020
5021 void md_done_sync(mddev_t *mddev, int blocks, int ok)
5022 {
5023         /* another "blocks" (512byte) blocks have been synced */
5024         atomic_sub(blocks, &mddev->recovery_active);
5025         wake_up(&mddev->recovery_wait);
5026         if (!ok) {
5027                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5028                 md_wakeup_thread(mddev->thread);
5029                 // stop recovery, signal do_sync ....
5030         }
5031 }
5032
5033
5034 /* md_write_start(mddev, bi)
5035  * If we need to update some array metadata (e.g. 'active' flag
5036  * in superblock) before writing, schedule a superblock update
5037  * and wait for it to complete.
5038  */
5039 void md_write_start(mddev_t *mddev, struct bio *bi)
5040 {
5041         if (bio_data_dir(bi) != WRITE)
5042                 return;
5043
5044         BUG_ON(mddev->ro == 1);
5045         if (mddev->ro == 2) {
5046                 /* need to switch to read/write */
5047                 mddev->ro = 0;
5048                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5049                 md_wakeup_thread(mddev->thread);
5050         }
5051         atomic_inc(&mddev->writes_pending);
5052         if (mddev->in_sync) {
5053                 spin_lock_irq(&mddev->write_lock);
5054                 if (mddev->in_sync) {
5055                         mddev->in_sync = 0;
5056                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5057                         md_wakeup_thread(mddev->thread);
5058                 }
5059                 spin_unlock_irq(&mddev->write_lock);
5060         }
5061         wait_event(mddev->sb_wait, mddev->flags==0);
5062 }
5063
5064 void md_write_end(mddev_t *mddev)
5065 {
5066         if (atomic_dec_and_test(&mddev->writes_pending)) {
5067                 if (mddev->safemode == 2)
5068                         md_wakeup_thread(mddev->thread);
5069                 else if (mddev->safemode_delay)
5070                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5071         }
5072 }
5073
5074 /* md_allow_write(mddev)
5075  * Calling this ensures that the array is marked 'active' so that writes
5076  * may proceed without blocking.  It is important to call this before
5077  * attempting a GFP_KERNEL allocation while holding the mddev lock.
5078  * Must be called with mddev_lock held.
5079  */
5080 void md_allow_write(mddev_t *mddev)
5081 {
5082         if (!mddev->pers)
5083                 return;
5084         if (mddev->ro)
5085                 return;
5086
5087         spin_lock_irq(&mddev->write_lock);
5088         if (mddev->in_sync) {
5089                 mddev->in_sync = 0;
5090                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5091                 if (mddev->safemode_delay &&
5092                     mddev->safemode == 0)
5093                         mddev->safemode = 1;
5094                 spin_unlock_irq(&mddev->write_lock);
5095                 md_update_sb(mddev, 0);
5096         } else
5097                 spin_unlock_irq(&mddev->write_lock);
5098 }
5099 EXPORT_SYMBOL_GPL(md_allow_write);
5100
5101 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5102
5103 #define SYNC_MARKS      10
5104 #define SYNC_MARK_STEP  (3*HZ)
5105 void md_do_sync(mddev_t *mddev)
5106 {
5107         mddev_t *mddev2;
5108         unsigned int currspeed = 0,
5109                  window;
5110         sector_t max_sectors,j, io_sectors;
5111         unsigned long mark[SYNC_MARKS];
5112         sector_t mark_cnt[SYNC_MARKS];
5113         int last_mark,m;
5114         struct list_head *tmp;
5115         sector_t last_check;
5116         int skipped = 0;
5117         struct list_head *rtmp;
5118         mdk_rdev_t *rdev;
5119         char *desc;
5120
5121         /* just incase thread restarts... */
5122         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5123                 return;
5124         if (mddev->ro) /* never try to sync a read-only array */
5125                 return;
5126
5127         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5128                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5129                         desc = "data-check";
5130                 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5131                         desc = "requested-resync";
5132                 else
5133                         desc = "resync";
5134         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5135                 desc = "reshape";
5136         else
5137                 desc = "recovery";
5138
5139         /* we overload curr_resync somewhat here.
5140          * 0 == not engaged in resync at all
5141          * 2 == checking that there is no conflict with another sync
5142          * 1 == like 2, but have yielded to allow conflicting resync to
5143          *              commense
5144          * other == active in resync - this many blocks
5145          *
5146          * Before starting a resync we must have set curr_resync to
5147          * 2, and then checked that every "conflicting" array has curr_resync
5148          * less than ours.  When we find one that is the same or higher
5149          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
5150          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5151          * This will mean we have to start checking from the beginning again.
5152          *
5153          */
5154
5155         do {
5156                 mddev->curr_resync = 2;
5157
5158         try_again:
5159                 if (kthread_should_stop()) {
5160                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5161                         goto skip;
5162                 }
5163                 ITERATE_MDDEV(mddev2,tmp) {
5164                         if (mddev2 == mddev)
5165                                 continue;
5166                         if (mddev2->curr_resync && 
5167                             match_mddev_units(mddev,mddev2)) {
5168                                 DEFINE_WAIT(wq);
5169                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
5170                                         /* arbitrarily yield */
5171                                         mddev->curr_resync = 1;
5172                                         wake_up(&resync_wait);
5173                                 }
5174                                 if (mddev > mddev2 && mddev->curr_resync == 1)
5175                                         /* no need to wait here, we can wait the next
5176                                          * time 'round when curr_resync == 2
5177                                          */
5178                                         continue;
5179                                 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5180                                 if (!kthread_should_stop() &&
5181                                     mddev2->curr_resync >= mddev->curr_resync) {
5182                                         printk(KERN_INFO "md: delaying %s of %s"
5183                                                " until %s has finished (they"
5184                                                " share one or more physical units)\n",
5185                                                desc, mdname(mddev), mdname(mddev2));
5186                                         mddev_put(mddev2);
5187                                         schedule();
5188                                         finish_wait(&resync_wait, &wq);
5189                                         goto try_again;
5190                                 }
5191                                 finish_wait(&resync_wait, &wq);
5192                         }
5193                 }
5194         } while (mddev->curr_resync < 2);
5195
5196         j = 0;
5197         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5198                 /* resync follows the size requested by the personality,
5199                  * which defaults to physical size, but can be virtual size
5200                  */
5201                 max_sectors = mddev->resync_max_sectors;
5202                 mddev->resync_mismatches = 0;
5203                 /* we don't use the checkpoint if there's a bitmap */
5204                 if (!mddev->bitmap &&
5205                     !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5206                         j = mddev->recovery_cp;
5207         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5208                 max_sectors = mddev->size << 1;
5209         else {
5210                 /* recovery follows the physical size of devices */
5211                 max_sectors = mddev->size << 1;
5212                 j = MaxSector;
5213                 ITERATE_RDEV(mddev,rdev,rtmp)
5214                         if (rdev->raid_disk >= 0 &&
5215                             !test_bit(Faulty, &rdev->flags) &&
5216                             !test_bit(In_sync, &rdev->flags) &&
5217                             rdev->recovery_offset < j)
5218                                 j = rdev->recovery_offset;
5219         }
5220
5221         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5222         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
5223                 " %d KB/sec/disk.\n", speed_min(mddev));
5224         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5225                "(but not more than %d KB/sec) for %s.\n",
5226                speed_max(mddev), desc);
5227
5228         is_mddev_idle(mddev); /* this also initializes IO event counters */
5229
5230         io_sectors = 0;
5231         for (m = 0; m < SYNC_MARKS; m++) {
5232                 mark[m] = jiffies;
5233                 mark_cnt[m] = io_sectors;
5234         }
5235         last_mark = 0;
5236         mddev->resync_mark = mark[last_mark];
5237         mddev->resync_mark_cnt = mark_cnt[last_mark];
5238
5239         /*
5240          * Tune reconstruction:
5241          */
5242         window = 32*(PAGE_SIZE/512);
5243         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5244                 window/2,(unsigned long long) max_sectors/2);
5245
5246         atomic_set(&mddev->recovery_active, 0);
5247         init_waitqueue_head(&mddev->recovery_wait);
5248         last_check = 0;
5249
5250         if (j>2) {
5251                 printk(KERN_INFO 
5252                        "md: resuming %s of %s from checkpoint.\n",
5253                        desc, mdname(mddev));
5254                 mddev->curr_resync = j;
5255         }
5256
5257         while (j < max_sectors) {
5258                 sector_t sectors;
5259
5260                 skipped = 0;
5261                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5262                                             currspeed < speed_min(mddev));
5263                 if (sectors == 0) {
5264                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5265                         goto out;
5266                 }
5267
5268                 if (!skipped) { /* actual IO requested */
5269                         io_sectors += sectors;
5270                         atomic_add(sectors, &mddev->recovery_active);
5271                 }
5272
5273                 j += sectors;
5274                 if (j>1) mddev->curr_resync = j;
5275                 mddev->curr_mark_cnt = io_sectors;
5276                 if (last_check == 0)
5277                         /* this is the earliers that rebuilt will be
5278                          * visible in /proc/mdstat
5279                          */
5280                         md_new_event(mddev);
5281
5282                 if (last_check + window > io_sectors || j == max_sectors)
5283                         continue;
5284
5285                 last_check = io_sectors;
5286
5287                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
5288                     test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5289                         break;
5290
5291         repeat:
5292                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5293                         /* step marks */
5294                         int next = (last_mark+1) % SYNC_MARKS;
5295
5296                         mddev->resync_mark = mark[next];
5297                         mddev->resync_mark_cnt = mark_cnt[next];
5298                         mark[next] = jiffies;
5299                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5300                         last_mark = next;
5301                 }
5302
5303
5304                 if (kthread_should_stop()) {
5305                         /*
5306                          * got a signal, exit.
5307                          */
5308                         printk(KERN_INFO 
5309                                 "md: md_do_sync() got signal ... exiting\n");
5310                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5311                         goto out;
5312                 }
5313
5314                 /*
5315                  * this loop exits only if either when we are slower than
5316                  * the 'hard' speed limit, or the system was IO-idle for
5317                  * a jiffy.
5318                  * the system might be non-idle CPU-wise, but we only care
5319                  * about not overloading the IO subsystem. (things like an
5320                  * e2fsck being done on the RAID array should execute fast)
5321                  */
5322                 mddev->queue->unplug_fn(mddev->queue);
5323                 cond_resched();
5324
5325                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5326                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
5327
5328                 if (currspeed > speed_min(mddev)) {
5329                         if ((currspeed > speed_max(mddev)) ||
5330                                         !is_mddev_idle(mddev)) {
5331                                 msleep(500);
5332                                 goto repeat;
5333                         }
5334                 }
5335         }
5336         printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5337         /*
5338          * this also signals 'finished resyncing' to md_stop
5339          */
5340  out:
5341         mddev->queue->unplug_fn(mddev->queue);
5342
5343         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5344
5345         /* tell personality that we are finished */
5346         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5347
5348         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5349             !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5350             mddev->curr_resync > 2) {
5351                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5352                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5353                                 if (mddev->curr_resync >= mddev->recovery_cp) {
5354                                         printk(KERN_INFO
5355                                                "md: checkpointing %s of %s.\n",
5356                                                desc, mdname(mddev));
5357                                         mddev->recovery_cp = mddev->curr_resync;
5358                                 }
5359                         } else
5360                                 mddev->recovery_cp = MaxSector;
5361                 } else {
5362                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5363                                 mddev->curr_resync = MaxSector;
5364                         ITERATE_RDEV(mddev,rdev,rtmp)
5365                                 if (rdev->raid_disk >= 0 &&
5366                                     !test_bit(Faulty, &rdev->flags) &&
5367                                     !test_bit(In_sync, &rdev->flags) &&
5368                                     rdev->recovery_offset < mddev->curr_resync)
5369                                         rdev->recovery_offset = mddev->curr_resync;
5370                 }
5371         }
5372         set_bit(MD_CHANGE_DEVS, &mddev->flags);
5373
5374  skip:
5375         mddev->curr_resync = 0;
5376         wake_up(&resync_wait);
5377         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5378         md_wakeup_thread(mddev->thread);
5379 }
5380 EXPORT_SYMBOL_GPL(md_do_sync);
5381
5382
5383 static int remove_and_add_spares(mddev_t *mddev)
5384 {
5385         mdk_rdev_t *rdev;
5386         struct list_head *rtmp;
5387         int spares = 0;
5388
5389         ITERATE_RDEV(mddev,rdev,rtmp)
5390                 if (rdev->raid_disk >= 0 &&
5391                     (test_bit(Faulty, &rdev->flags) ||
5392                      ! test_bit(In_sync, &rdev->flags)) &&
5393                     atomic_read(&rdev->nr_pending)==0) {
5394                         if (mddev->pers->hot_remove_disk(
5395                                     mddev, rdev->raid_disk)==0) {
5396                                 char nm[20];
5397                                 sprintf(nm,"rd%d", rdev->raid_disk);
5398                                 sysfs_remove_link(&mddev->kobj, nm);
5399                                 rdev->raid_disk = -1;
5400                         }
5401                 }
5402
5403         if (mddev->degraded) {
5404                 ITERATE_RDEV(mddev,rdev,rtmp)
5405                         if (rdev->raid_disk < 0
5406                             && !test_bit(Faulty, &rdev->flags)) {
5407                                 rdev->recovery_offset = 0;
5408                                 if (mddev->pers->hot_add_disk(mddev,rdev)) {
5409                                         char nm[20];
5410                                         sprintf(nm, "rd%d", rdev->raid_disk);
5411                                         if (sysfs_create_link(&mddev->kobj,
5412                                                               &rdev->kobj, nm))
5413                                                 printk(KERN_WARNING
5414                                                        "md: cannot register "
5415                                                        "%s for %s\n",
5416                                                        nm, mdname(mddev));
5417                                         spares++;
5418                                         md_new_event(mddev);
5419                                 } else
5420                                         break;
5421                         }
5422         }
5423         return spares;
5424 }
5425 /*
5426  * This routine is regularly called by all per-raid-array threads to
5427  * deal with generic issues like resync and super-block update.
5428  * Raid personalities that don't have a thread (linear/raid0) do not
5429  * need this as they never do any recovery or update the superblock.
5430  *
5431  * It does not do any resync itself, but rather "forks" off other threads
5432  * to do that as needed.
5433  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5434  * "->recovery" and create a thread at ->sync_thread.
5435  * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5436  * and wakeups up this thread which will reap the thread and finish up.
5437  * This thread also removes any faulty devices (with nr_pending == 0).
5438  *
5439  * The overall approach is:
5440  *  1/ if the superblock needs updating, update it.
5441  *  2/ If a recovery thread is running, don't do anything else.
5442  *  3/ If recovery has finished, clean up, possibly marking spares active.
5443  *  4/ If there are any faulty devices, remove them.
5444  *  5/ If array is degraded, try to add spares devices
5445  *  6/ If array has spares or is not in-sync, start a resync thread.
5446  */
5447 void md_check_recovery(mddev_t *mddev)
5448 {
5449         mdk_rdev_t *rdev;
5450         struct list_head *rtmp;
5451
5452
5453         if (mddev->bitmap)
5454                 bitmap_daemon_work(mddev->bitmap);
5455
5456         if (mddev->ro)
5457                 return;
5458
5459         if (signal_pending(current)) {
5460                 if (mddev->pers->sync_request) {
5461                         printk(KERN_INFO "md: %s in immediate safe mode\n",
5462                                mdname(mddev));
5463                         mddev->safemode = 2;
5464                 }
5465                 flush_signals(current);
5466         }
5467
5468         if ( ! (
5469                 mddev->flags ||
5470                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5471                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5472                 (mddev->safemode == 1) ||
5473                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5474                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5475                 ))
5476                 return;
5477
5478         if (mddev_trylock(mddev)) {
5479                 int spares = 0;
5480
5481                 spin_lock_irq(&mddev->write_lock);
5482                 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5483                     !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5484                         mddev->in_sync = 1;
5485                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5486                 }
5487                 if (mddev->safemode == 1)
5488                         mddev->safemode = 0;
5489                 spin_unlock_irq(&mddev->write_lock);
5490
5491                 if (mddev->flags)
5492                         md_update_sb(mddev, 0);
5493
5494
5495                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5496                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5497                         /* resync/recovery still happening */
5498                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5499                         goto unlock;
5500                 }
5501                 if (mddev->sync_thread) {
5502                         /* resync has finished, collect result */
5503                         md_unregister_thread(mddev->sync_thread);
5504                         mddev->sync_thread = NULL;
5505                         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5506                             !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5507                                 /* success...*/
5508                                 /* activate any spares */
5509                                 mddev->pers->spare_active(mddev);
5510                         }
5511                         md_update_sb(mddev, 1);
5512
5513                         /* if array is no-longer degraded, then any saved_raid_disk
5514                          * information must be scrapped
5515                          */
5516                         if (!mddev->degraded)
5517                                 ITERATE_RDEV(mddev,rdev,rtmp)
5518                                         rdev->saved_raid_disk = -1;
5519
5520                         mddev->recovery = 0;
5521                         /* flag recovery needed just to double check */
5522                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5523                         md_new_event(mddev);
5524                         goto unlock;
5525                 }
5526                 /* Clear some bits that don't mean anything, but
5527                  * might be left set
5528                  */
5529                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5530                 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5531                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5532                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5533
5534                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5535                         goto unlock;
5536                 /* no recovery is running.
5537                  * remove any failed drives, then
5538                  * add spares if possible.
5539                  * Spare are also removed and re-added, to allow
5540                  * the personality to fail the re-add.
5541                  */
5542
5543                 if (mddev->reshape_position != MaxSector) {
5544                         if (mddev->pers->check_reshape(mddev) != 0)
5545                                 /* Cannot proceed */
5546                                 goto unlock;
5547                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5548                 } else if ((spares = remove_and_add_spares(mddev))) {
5549                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5550                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5551                 } else if (mddev->recovery_cp < MaxSector) {
5552                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5553                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5554                         /* nothing to be done ... */
5555                         goto unlock;
5556
5557                 if (mddev->pers->sync_request) {
5558                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5559                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5560                                 /* We are adding a device or devices to an array
5561                                  * which has the bitmap stored on all devices.
5562                                  * So make sure all bitmap pages get written
5563                                  */
5564                                 bitmap_write_all(mddev->bitmap);
5565                         }
5566                         mddev->sync_thread = md_register_thread(md_do_sync,
5567                                                                 mddev,
5568                                                                 "%s_resync");
5569                         if (!mddev->sync_thread) {
5570                                 printk(KERN_ERR "%s: could not start resync"
5571                                         " thread...\n", 
5572                                         mdname(mddev));
5573                                 /* leave the spares where they are, it shouldn't hurt */
5574                                 mddev->recovery = 0;
5575                         } else
5576                                 md_wakeup_thread(mddev->sync_thread);
5577                         md_new_event(mddev);
5578                 }
5579         unlock:
5580                 mddev_unlock(mddev);
5581         }
5582 }
5583
5584 static int md_notify_reboot(struct notifier_block *this,
5585                             unsigned long code, void *x)
5586 {
5587         struct list_head *tmp;
5588         mddev_t *mddev;
5589
5590         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5591
5592                 printk(KERN_INFO "md: stopping all md devices.\n");
5593
5594                 ITERATE_MDDEV(mddev,tmp)
5595                         if (mddev_trylock(mddev)) {
5596                                 do_md_stop (mddev, 1);
5597                                 mddev_unlock(mddev);
5598                         }
5599                 /*
5600                  * certain more exotic SCSI devices are known to be
5601                  * volatile wrt too early system reboots. While the
5602                  * right place to handle this issue is the given
5603                  * driver, we do want to have a safe RAID driver ...
5604                  */
5605                 mdelay(1000*1);
5606         }
5607         return NOTIFY_DONE;
5608 }
5609
5610 static struct notifier_block md_notifier = {
5611         .notifier_call  = md_notify_reboot,
5612         .next           = NULL,
5613         .priority       = INT_MAX, /* before any real devices */
5614 };
5615
5616 static void md_geninit(void)
5617 {
5618         struct proc_dir_entry *p;
5619
5620         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5621
5622         p = create_proc_entry("mdstat", S_IRUGO, NULL);
5623         if (p)
5624                 p->proc_fops = &md_seq_fops;
5625 }
5626
5627 static int __init md_init(void)
5628 {
5629         if (register_blkdev(MAJOR_NR, "md"))
5630                 return -1;
5631         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5632                 unregister_blkdev(MAJOR_NR, "md");
5633                 return -1;
5634         }
5635         blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
5636                             md_probe, NULL, NULL);
5637         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
5638                             md_probe, NULL, NULL);
5639
5640         register_reboot_notifier(&md_notifier);
5641         raid_table_header = register_sysctl_table(raid_root_table);
5642
5643         md_geninit();
5644         return (0);
5645 }
5646
5647
5648 #ifndef MODULE
5649
5650 /*
5651  * Searches all registered partitions for autorun RAID arrays
5652  * at boot time.
5653  */
5654 static dev_t detected_devices[128];
5655 static int dev_cnt;
5656
5657 void md_autodetect_dev(dev_t dev)
5658 {
5659         if (dev_cnt >= 0 && dev_cnt < 127)
5660                 detected_devices[dev_cnt++] = dev;
5661 }
5662
5663
5664 static void autostart_arrays(int part)
5665 {
5666         mdk_rdev_t *rdev;
5667         int i;
5668
5669         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5670
5671         for (i = 0; i < dev_cnt; i++) {
5672                 dev_t dev = detected_devices[i];
5673
5674                 rdev = md_import_device(dev,0, 0);
5675                 if (IS_ERR(rdev))
5676                         continue;
5677
5678                 if (test_bit(Faulty, &rdev->flags)) {
5679                         MD_BUG();
5680                         continue;
5681                 }
5682                 list_add(&rdev->same_set, &pending_raid_disks);
5683         }
5684         dev_cnt = 0;
5685
5686         autorun_devices(part);
5687 }
5688
5689 #endif /* !MODULE */
5690
5691 static __exit void md_exit(void)
5692 {
5693         mddev_t *mddev;
5694         struct list_head *tmp;
5695
5696         blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
5697         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
5698
5699         unregister_blkdev(MAJOR_NR,"md");
5700         unregister_blkdev(mdp_major, "mdp");
5701         unregister_reboot_notifier(&md_notifier);
5702         unregister_sysctl_table(raid_table_header);
5703         remove_proc_entry("mdstat", NULL);
5704         ITERATE_MDDEV(mddev,tmp) {
5705                 struct gendisk *disk = mddev->gendisk;
5706                 if (!disk)
5707                         continue;
5708                 export_array(mddev);
5709                 del_gendisk(disk);
5710                 put_disk(disk);
5711                 mddev->gendisk = NULL;
5712                 mddev_put(mddev);
5713         }
5714 }
5715
5716 module_init(md_init)
5717 module_exit(md_exit)
5718
5719 static int get_ro(char *buffer, struct kernel_param *kp)
5720 {
5721         return sprintf(buffer, "%d", start_readonly);
5722 }
5723 static int set_ro(const char *val, struct kernel_param *kp)
5724 {
5725         char *e;
5726         int num = simple_strtoul(val, &e, 10);
5727         if (*val && (*e == '\0' || *e == '\n')) {
5728                 start_readonly = num;
5729                 return 0;
5730         }
5731         return -EINVAL;
5732 }
5733
5734 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
5735 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
5736
5737
5738 EXPORT_SYMBOL(register_md_personality);
5739 EXPORT_SYMBOL(unregister_md_personality);
5740 EXPORT_SYMBOL(md_error);
5741 EXPORT_SYMBOL(md_done_sync);
5742 EXPORT_SYMBOL(md_write_start);
5743 EXPORT_SYMBOL(md_write_end);
5744 EXPORT_SYMBOL(md_register_thread);
5745 EXPORT_SYMBOL(md_unregister_thread);
5746 EXPORT_SYMBOL(md_wakeup_thread);
5747 EXPORT_SYMBOL(md_check_recovery);
5748 MODULE_LICENSE("GPL");
5749 MODULE_ALIAS("md");
5750 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);