]> err.no Git - linux-2.6/blob - drivers/md/md.c
[PATCH] md: move warning about creating a raid array on partitions of the one device
[linux-2.6] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/kthread.h>
37 #include <linux/linkage.h>
38 #include <linux/raid/md.h>
39 #include <linux/raid/bitmap.h>
40 #include <linux/sysctl.h>
41 #include <linux/buffer_head.h> /* for invalidate_bdev */
42 #include <linux/poll.h>
43 #include <linux/mutex.h>
44 #include <linux/ctype.h>
45 #include <linux/freezer.h>
46
47 #include <linux/init.h>
48
49 #include <linux/file.h>
50
51 #ifdef CONFIG_KMOD
52 #include <linux/kmod.h>
53 #endif
54
55 #include <asm/unaligned.h>
56
57 #define MAJOR_NR MD_MAJOR
58 #define MD_DRIVER
59
60 /* 63 partitions with the alternate major number (mdp) */
61 #define MdpMinorShift 6
62
63 #define DEBUG 0
64 #define dprintk(x...) ((void)(DEBUG && printk(x)))
65
66
67 #ifndef MODULE
68 static void autostart_arrays (int part);
69 #endif
70
71 static LIST_HEAD(pers_list);
72 static DEFINE_SPINLOCK(pers_lock);
73
74 static void md_print_devices(void);
75
76 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
77
78 /*
79  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
80  * is 1000 KB/sec, so the extra system load does not show up that much.
81  * Increase it if you want to have more _guaranteed_ speed. Note that
82  * the RAID driver will use the maximum available bandwidth if the IO
83  * subsystem is idle. There is also an 'absolute maximum' reconstruction
84  * speed limit - in case reconstruction slows down your system despite
85  * idle IO detection.
86  *
87  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
88  * or /sys/block/mdX/md/sync_speed_{min,max}
89  */
90
91 static int sysctl_speed_limit_min = 1000;
92 static int sysctl_speed_limit_max = 200000;
93 static inline int speed_min(mddev_t *mddev)
94 {
95         return mddev->sync_speed_min ?
96                 mddev->sync_speed_min : sysctl_speed_limit_min;
97 }
98
99 static inline int speed_max(mddev_t *mddev)
100 {
101         return mddev->sync_speed_max ?
102                 mddev->sync_speed_max : sysctl_speed_limit_max;
103 }
104
105 static struct ctl_table_header *raid_table_header;
106
107 static ctl_table raid_table[] = {
108         {
109                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
110                 .procname       = "speed_limit_min",
111                 .data           = &sysctl_speed_limit_min,
112                 .maxlen         = sizeof(int),
113                 .mode           = S_IRUGO|S_IWUSR,
114                 .proc_handler   = &proc_dointvec,
115         },
116         {
117                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
118                 .procname       = "speed_limit_max",
119                 .data           = &sysctl_speed_limit_max,
120                 .maxlen         = sizeof(int),
121                 .mode           = S_IRUGO|S_IWUSR,
122                 .proc_handler   = &proc_dointvec,
123         },
124         { .ctl_name = 0 }
125 };
126
127 static ctl_table raid_dir_table[] = {
128         {
129                 .ctl_name       = DEV_RAID,
130                 .procname       = "raid",
131                 .maxlen         = 0,
132                 .mode           = S_IRUGO|S_IXUGO,
133                 .child          = raid_table,
134         },
135         { .ctl_name = 0 }
136 };
137
138 static ctl_table raid_root_table[] = {
139         {
140                 .ctl_name       = CTL_DEV,
141                 .procname       = "dev",
142                 .maxlen         = 0,
143                 .mode           = 0555,
144                 .child          = raid_dir_table,
145         },
146         { .ctl_name = 0 }
147 };
148
149 static struct block_device_operations md_fops;
150
151 static int start_readonly;
152
153 /*
154  * We have a system wide 'event count' that is incremented
155  * on any 'interesting' event, and readers of /proc/mdstat
156  * can use 'poll' or 'select' to find out when the event
157  * count increases.
158  *
159  * Events are:
160  *  start array, stop array, error, add device, remove device,
161  *  start build, activate spare
162  */
163 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
164 static atomic_t md_event_count;
165 void md_new_event(mddev_t *mddev)
166 {
167         atomic_inc(&md_event_count);
168         wake_up(&md_event_waiters);
169         sysfs_notify(&mddev->kobj, NULL, "sync_action");
170 }
171 EXPORT_SYMBOL_GPL(md_new_event);
172
173 /* Alternate version that can be called from interrupts
174  * when calling sysfs_notify isn't needed.
175  */
176 static void md_new_event_inintr(mddev_t *mddev)
177 {
178         atomic_inc(&md_event_count);
179         wake_up(&md_event_waiters);
180 }
181
182 /*
183  * Enables to iterate over all existing md arrays
184  * all_mddevs_lock protects this list.
185  */
186 static LIST_HEAD(all_mddevs);
187 static DEFINE_SPINLOCK(all_mddevs_lock);
188
189
190 /*
191  * iterates through all used mddevs in the system.
192  * We take care to grab the all_mddevs_lock whenever navigating
193  * the list, and to always hold a refcount when unlocked.
194  * Any code which breaks out of this loop while own
195  * a reference to the current mddev and must mddev_put it.
196  */
197 #define ITERATE_MDDEV(mddev,tmp)                                        \
198                                                                         \
199         for (({ spin_lock(&all_mddevs_lock);                            \
200                 tmp = all_mddevs.next;                                  \
201                 mddev = NULL;});                                        \
202              ({ if (tmp != &all_mddevs)                                 \
203                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
204                 spin_unlock(&all_mddevs_lock);                          \
205                 if (mddev) mddev_put(mddev);                            \
206                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
207                 tmp != &all_mddevs;});                                  \
208              ({ spin_lock(&all_mddevs_lock);                            \
209                 tmp = tmp->next;})                                      \
210                 )
211
212
213 static int md_fail_request (request_queue_t *q, struct bio *bio)
214 {
215         bio_io_error(bio, bio->bi_size);
216         return 0;
217 }
218
219 static inline mddev_t *mddev_get(mddev_t *mddev)
220 {
221         atomic_inc(&mddev->active);
222         return mddev;
223 }
224
225 static void mddev_put(mddev_t *mddev)
226 {
227         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
228                 return;
229         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
230                 list_del(&mddev->all_mddevs);
231                 spin_unlock(&all_mddevs_lock);
232                 blk_cleanup_queue(mddev->queue);
233                 kobject_unregister(&mddev->kobj);
234         } else
235                 spin_unlock(&all_mddevs_lock);
236 }
237
238 static mddev_t * mddev_find(dev_t unit)
239 {
240         mddev_t *mddev, *new = NULL;
241
242  retry:
243         spin_lock(&all_mddevs_lock);
244         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
245                 if (mddev->unit == unit) {
246                         mddev_get(mddev);
247                         spin_unlock(&all_mddevs_lock);
248                         kfree(new);
249                         return mddev;
250                 }
251
252         if (new) {
253                 list_add(&new->all_mddevs, &all_mddevs);
254                 spin_unlock(&all_mddevs_lock);
255                 return new;
256         }
257         spin_unlock(&all_mddevs_lock);
258
259         new = kzalloc(sizeof(*new), GFP_KERNEL);
260         if (!new)
261                 return NULL;
262
263         new->unit = unit;
264         if (MAJOR(unit) == MD_MAJOR)
265                 new->md_minor = MINOR(unit);
266         else
267                 new->md_minor = MINOR(unit) >> MdpMinorShift;
268
269         mutex_init(&new->reconfig_mutex);
270         INIT_LIST_HEAD(&new->disks);
271         INIT_LIST_HEAD(&new->all_mddevs);
272         init_timer(&new->safemode_timer);
273         atomic_set(&new->active, 1);
274         spin_lock_init(&new->write_lock);
275         init_waitqueue_head(&new->sb_wait);
276
277         new->queue = blk_alloc_queue(GFP_KERNEL);
278         if (!new->queue) {
279                 kfree(new);
280                 return NULL;
281         }
282         set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
283
284         blk_queue_make_request(new->queue, md_fail_request);
285
286         goto retry;
287 }
288
289 static inline int mddev_lock(mddev_t * mddev)
290 {
291         return mutex_lock_interruptible(&mddev->reconfig_mutex);
292 }
293
294 static inline int mddev_trylock(mddev_t * mddev)
295 {
296         return mutex_trylock(&mddev->reconfig_mutex);
297 }
298
299 static inline void mddev_unlock(mddev_t * mddev)
300 {
301         mutex_unlock(&mddev->reconfig_mutex);
302
303         md_wakeup_thread(mddev->thread);
304 }
305
306 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
307 {
308         mdk_rdev_t * rdev;
309         struct list_head *tmp;
310
311         ITERATE_RDEV(mddev,rdev,tmp) {
312                 if (rdev->desc_nr == nr)
313                         return rdev;
314         }
315         return NULL;
316 }
317
318 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
319 {
320         struct list_head *tmp;
321         mdk_rdev_t *rdev;
322
323         ITERATE_RDEV(mddev,rdev,tmp) {
324                 if (rdev->bdev->bd_dev == dev)
325                         return rdev;
326         }
327         return NULL;
328 }
329
330 static struct mdk_personality *find_pers(int level, char *clevel)
331 {
332         struct mdk_personality *pers;
333         list_for_each_entry(pers, &pers_list, list) {
334                 if (level != LEVEL_NONE && pers->level == level)
335                         return pers;
336                 if (strcmp(pers->name, clevel)==0)
337                         return pers;
338         }
339         return NULL;
340 }
341
342 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
343 {
344         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
345         return MD_NEW_SIZE_BLOCKS(size);
346 }
347
348 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
349 {
350         sector_t size;
351
352         size = rdev->sb_offset;
353
354         if (chunk_size)
355                 size &= ~((sector_t)chunk_size/1024 - 1);
356         return size;
357 }
358
359 static int alloc_disk_sb(mdk_rdev_t * rdev)
360 {
361         if (rdev->sb_page)
362                 MD_BUG();
363
364         rdev->sb_page = alloc_page(GFP_KERNEL);
365         if (!rdev->sb_page) {
366                 printk(KERN_ALERT "md: out of memory.\n");
367                 return -EINVAL;
368         }
369
370         return 0;
371 }
372
373 static void free_disk_sb(mdk_rdev_t * rdev)
374 {
375         if (rdev->sb_page) {
376                 put_page(rdev->sb_page);
377                 rdev->sb_loaded = 0;
378                 rdev->sb_page = NULL;
379                 rdev->sb_offset = 0;
380                 rdev->size = 0;
381         }
382 }
383
384
385 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
386 {
387         mdk_rdev_t *rdev = bio->bi_private;
388         mddev_t *mddev = rdev->mddev;
389         if (bio->bi_size)
390                 return 1;
391
392         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
393                 printk("md: super_written gets error=%d, uptodate=%d\n",
394                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
395                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
396                 md_error(mddev, rdev);
397         }
398
399         if (atomic_dec_and_test(&mddev->pending_writes))
400                 wake_up(&mddev->sb_wait);
401         bio_put(bio);
402         return 0;
403 }
404
405 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
406 {
407         struct bio *bio2 = bio->bi_private;
408         mdk_rdev_t *rdev = bio2->bi_private;
409         mddev_t *mddev = rdev->mddev;
410         if (bio->bi_size)
411                 return 1;
412
413         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
414             error == -EOPNOTSUPP) {
415                 unsigned long flags;
416                 /* barriers don't appear to be supported :-( */
417                 set_bit(BarriersNotsupp, &rdev->flags);
418                 mddev->barriers_work = 0;
419                 spin_lock_irqsave(&mddev->write_lock, flags);
420                 bio2->bi_next = mddev->biolist;
421                 mddev->biolist = bio2;
422                 spin_unlock_irqrestore(&mddev->write_lock, flags);
423                 wake_up(&mddev->sb_wait);
424                 bio_put(bio);
425                 return 0;
426         }
427         bio_put(bio2);
428         bio->bi_private = rdev;
429         return super_written(bio, bytes_done, error);
430 }
431
432 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
433                    sector_t sector, int size, struct page *page)
434 {
435         /* write first size bytes of page to sector of rdev
436          * Increment mddev->pending_writes before returning
437          * and decrement it on completion, waking up sb_wait
438          * if zero is reached.
439          * If an error occurred, call md_error
440          *
441          * As we might need to resubmit the request if BIO_RW_BARRIER
442          * causes ENOTSUPP, we allocate a spare bio...
443          */
444         struct bio *bio = bio_alloc(GFP_NOIO, 1);
445         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
446
447         bio->bi_bdev = rdev->bdev;
448         bio->bi_sector = sector;
449         bio_add_page(bio, page, size, 0);
450         bio->bi_private = rdev;
451         bio->bi_end_io = super_written;
452         bio->bi_rw = rw;
453
454         atomic_inc(&mddev->pending_writes);
455         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
456                 struct bio *rbio;
457                 rw |= (1<<BIO_RW_BARRIER);
458                 rbio = bio_clone(bio, GFP_NOIO);
459                 rbio->bi_private = bio;
460                 rbio->bi_end_io = super_written_barrier;
461                 submit_bio(rw, rbio);
462         } else
463                 submit_bio(rw, bio);
464 }
465
466 void md_super_wait(mddev_t *mddev)
467 {
468         /* wait for all superblock writes that were scheduled to complete.
469          * if any had to be retried (due to BARRIER problems), retry them
470          */
471         DEFINE_WAIT(wq);
472         for(;;) {
473                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
474                 if (atomic_read(&mddev->pending_writes)==0)
475                         break;
476                 while (mddev->biolist) {
477                         struct bio *bio;
478                         spin_lock_irq(&mddev->write_lock);
479                         bio = mddev->biolist;
480                         mddev->biolist = bio->bi_next ;
481                         bio->bi_next = NULL;
482                         spin_unlock_irq(&mddev->write_lock);
483                         submit_bio(bio->bi_rw, bio);
484                 }
485                 schedule();
486         }
487         finish_wait(&mddev->sb_wait, &wq);
488 }
489
490 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
491 {
492         if (bio->bi_size)
493                 return 1;
494
495         complete((struct completion*)bio->bi_private);
496         return 0;
497 }
498
499 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
500                    struct page *page, int rw)
501 {
502         struct bio *bio = bio_alloc(GFP_NOIO, 1);
503         struct completion event;
504         int ret;
505
506         rw |= (1 << BIO_RW_SYNC);
507
508         bio->bi_bdev = bdev;
509         bio->bi_sector = sector;
510         bio_add_page(bio, page, size, 0);
511         init_completion(&event);
512         bio->bi_private = &event;
513         bio->bi_end_io = bi_complete;
514         submit_bio(rw, bio);
515         wait_for_completion(&event);
516
517         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
518         bio_put(bio);
519         return ret;
520 }
521 EXPORT_SYMBOL_GPL(sync_page_io);
522
523 static int read_disk_sb(mdk_rdev_t * rdev, int size)
524 {
525         char b[BDEVNAME_SIZE];
526         if (!rdev->sb_page) {
527                 MD_BUG();
528                 return -EINVAL;
529         }
530         if (rdev->sb_loaded)
531                 return 0;
532
533
534         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
535                 goto fail;
536         rdev->sb_loaded = 1;
537         return 0;
538
539 fail:
540         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
541                 bdevname(rdev->bdev,b));
542         return -EINVAL;
543 }
544
545 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
546 {
547         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
548                 (sb1->set_uuid1 == sb2->set_uuid1) &&
549                 (sb1->set_uuid2 == sb2->set_uuid2) &&
550                 (sb1->set_uuid3 == sb2->set_uuid3))
551
552                 return 1;
553
554         return 0;
555 }
556
557
558 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
559 {
560         int ret;
561         mdp_super_t *tmp1, *tmp2;
562
563         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
564         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
565
566         if (!tmp1 || !tmp2) {
567                 ret = 0;
568                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
569                 goto abort;
570         }
571
572         *tmp1 = *sb1;
573         *tmp2 = *sb2;
574
575         /*
576          * nr_disks is not constant
577          */
578         tmp1->nr_disks = 0;
579         tmp2->nr_disks = 0;
580
581         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
582                 ret = 0;
583         else
584                 ret = 1;
585
586 abort:
587         kfree(tmp1);
588         kfree(tmp2);
589         return ret;
590 }
591
592 static unsigned int calc_sb_csum(mdp_super_t * sb)
593 {
594         unsigned int disk_csum, csum;
595
596         disk_csum = sb->sb_csum;
597         sb->sb_csum = 0;
598         csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
599         sb->sb_csum = disk_csum;
600         return csum;
601 }
602
603
604 /*
605  * Handle superblock details.
606  * We want to be able to handle multiple superblock formats
607  * so we have a common interface to them all, and an array of
608  * different handlers.
609  * We rely on user-space to write the initial superblock, and support
610  * reading and updating of superblocks.
611  * Interface methods are:
612  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
613  *      loads and validates a superblock on dev.
614  *      if refdev != NULL, compare superblocks on both devices
615  *    Return:
616  *      0 - dev has a superblock that is compatible with refdev
617  *      1 - dev has a superblock that is compatible and newer than refdev
618  *          so dev should be used as the refdev in future
619  *     -EINVAL superblock incompatible or invalid
620  *     -othererror e.g. -EIO
621  *
622  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
623  *      Verify that dev is acceptable into mddev.
624  *       The first time, mddev->raid_disks will be 0, and data from
625  *       dev should be merged in.  Subsequent calls check that dev
626  *       is new enough.  Return 0 or -EINVAL
627  *
628  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
629  *     Update the superblock for rdev with data in mddev
630  *     This does not write to disc.
631  *
632  */
633
634 struct super_type  {
635         char            *name;
636         struct module   *owner;
637         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
638         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
639         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
640 };
641
642 /*
643  * load_super for 0.90.0 
644  */
645 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
646 {
647         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
648         mdp_super_t *sb;
649         int ret;
650         sector_t sb_offset;
651
652         /*
653          * Calculate the position of the superblock,
654          * it's at the end of the disk.
655          *
656          * It also happens to be a multiple of 4Kb.
657          */
658         sb_offset = calc_dev_sboffset(rdev->bdev);
659         rdev->sb_offset = sb_offset;
660
661         ret = read_disk_sb(rdev, MD_SB_BYTES);
662         if (ret) return ret;
663
664         ret = -EINVAL;
665
666         bdevname(rdev->bdev, b);
667         sb = (mdp_super_t*)page_address(rdev->sb_page);
668
669         if (sb->md_magic != MD_SB_MAGIC) {
670                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
671                        b);
672                 goto abort;
673         }
674
675         if (sb->major_version != 0 ||
676             sb->minor_version < 90 ||
677             sb->minor_version > 91) {
678                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
679                         sb->major_version, sb->minor_version,
680                         b);
681                 goto abort;
682         }
683
684         if (sb->raid_disks <= 0)
685                 goto abort;
686
687         if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
688                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
689                         b);
690                 goto abort;
691         }
692
693         rdev->preferred_minor = sb->md_minor;
694         rdev->data_offset = 0;
695         rdev->sb_size = MD_SB_BYTES;
696
697         if (sb->level == LEVEL_MULTIPATH)
698                 rdev->desc_nr = -1;
699         else
700                 rdev->desc_nr = sb->this_disk.number;
701
702         if (refdev == 0)
703                 ret = 1;
704         else {
705                 __u64 ev1, ev2;
706                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
707                 if (!uuid_equal(refsb, sb)) {
708                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
709                                 b, bdevname(refdev->bdev,b2));
710                         goto abort;
711                 }
712                 if (!sb_equal(refsb, sb)) {
713                         printk(KERN_WARNING "md: %s has same UUID"
714                                " but different superblock to %s\n",
715                                b, bdevname(refdev->bdev, b2));
716                         goto abort;
717                 }
718                 ev1 = md_event(sb);
719                 ev2 = md_event(refsb);
720                 if (ev1 > ev2)
721                         ret = 1;
722                 else 
723                         ret = 0;
724         }
725         rdev->size = calc_dev_size(rdev, sb->chunk_size);
726
727         if (rdev->size < sb->size && sb->level > 1)
728                 /* "this cannot possibly happen" ... */
729                 ret = -EINVAL;
730
731  abort:
732         return ret;
733 }
734
735 /*
736  * validate_super for 0.90.0
737  */
738 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
739 {
740         mdp_disk_t *desc;
741         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
742         __u64 ev1 = md_event(sb);
743
744         rdev->raid_disk = -1;
745         rdev->flags = 0;
746         if (mddev->raid_disks == 0) {
747                 mddev->major_version = 0;
748                 mddev->minor_version = sb->minor_version;
749                 mddev->patch_version = sb->patch_version;
750                 mddev->persistent = ! sb->not_persistent;
751                 mddev->chunk_size = sb->chunk_size;
752                 mddev->ctime = sb->ctime;
753                 mddev->utime = sb->utime;
754                 mddev->level = sb->level;
755                 mddev->clevel[0] = 0;
756                 mddev->layout = sb->layout;
757                 mddev->raid_disks = sb->raid_disks;
758                 mddev->size = sb->size;
759                 mddev->events = ev1;
760                 mddev->bitmap_offset = 0;
761                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
762
763                 if (mddev->minor_version >= 91) {
764                         mddev->reshape_position = sb->reshape_position;
765                         mddev->delta_disks = sb->delta_disks;
766                         mddev->new_level = sb->new_level;
767                         mddev->new_layout = sb->new_layout;
768                         mddev->new_chunk = sb->new_chunk;
769                 } else {
770                         mddev->reshape_position = MaxSector;
771                         mddev->delta_disks = 0;
772                         mddev->new_level = mddev->level;
773                         mddev->new_layout = mddev->layout;
774                         mddev->new_chunk = mddev->chunk_size;
775                 }
776
777                 if (sb->state & (1<<MD_SB_CLEAN))
778                         mddev->recovery_cp = MaxSector;
779                 else {
780                         if (sb->events_hi == sb->cp_events_hi && 
781                                 sb->events_lo == sb->cp_events_lo) {
782                                 mddev->recovery_cp = sb->recovery_cp;
783                         } else
784                                 mddev->recovery_cp = 0;
785                 }
786
787                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
788                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
789                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
790                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
791
792                 mddev->max_disks = MD_SB_DISKS;
793
794                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
795                     mddev->bitmap_file == NULL) {
796                         if (mddev->level != 1 && mddev->level != 4
797                             && mddev->level != 5 && mddev->level != 6
798                             && mddev->level != 10) {
799                                 /* FIXME use a better test */
800                                 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
801                                 return -EINVAL;
802                         }
803                         mddev->bitmap_offset = mddev->default_bitmap_offset;
804                 }
805
806         } else if (mddev->pers == NULL) {
807                 /* Insist on good event counter while assembling */
808                 ++ev1;
809                 if (ev1 < mddev->events) 
810                         return -EINVAL;
811         } else if (mddev->bitmap) {
812                 /* if adding to array with a bitmap, then we can accept an
813                  * older device ... but not too old.
814                  */
815                 if (ev1 < mddev->bitmap->events_cleared)
816                         return 0;
817         } else {
818                 if (ev1 < mddev->events)
819                         /* just a hot-add of a new device, leave raid_disk at -1 */
820                         return 0;
821         }
822
823         if (mddev->level != LEVEL_MULTIPATH) {
824                 desc = sb->disks + rdev->desc_nr;
825
826                 if (desc->state & (1<<MD_DISK_FAULTY))
827                         set_bit(Faulty, &rdev->flags);
828                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
829                             desc->raid_disk < mddev->raid_disks */) {
830                         set_bit(In_sync, &rdev->flags);
831                         rdev->raid_disk = desc->raid_disk;
832                 }
833                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
834                         set_bit(WriteMostly, &rdev->flags);
835         } else /* MULTIPATH are always insync */
836                 set_bit(In_sync, &rdev->flags);
837         return 0;
838 }
839
840 /*
841  * sync_super for 0.90.0
842  */
843 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
844 {
845         mdp_super_t *sb;
846         struct list_head *tmp;
847         mdk_rdev_t *rdev2;
848         int next_spare = mddev->raid_disks;
849
850
851         /* make rdev->sb match mddev data..
852          *
853          * 1/ zero out disks
854          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
855          * 3/ any empty disks < next_spare become removed
856          *
857          * disks[0] gets initialised to REMOVED because
858          * we cannot be sure from other fields if it has
859          * been initialised or not.
860          */
861         int i;
862         int active=0, working=0,failed=0,spare=0,nr_disks=0;
863
864         rdev->sb_size = MD_SB_BYTES;
865
866         sb = (mdp_super_t*)page_address(rdev->sb_page);
867
868         memset(sb, 0, sizeof(*sb));
869
870         sb->md_magic = MD_SB_MAGIC;
871         sb->major_version = mddev->major_version;
872         sb->patch_version = mddev->patch_version;
873         sb->gvalid_words  = 0; /* ignored */
874         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
875         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
876         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
877         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
878
879         sb->ctime = mddev->ctime;
880         sb->level = mddev->level;
881         sb->size  = mddev->size;
882         sb->raid_disks = mddev->raid_disks;
883         sb->md_minor = mddev->md_minor;
884         sb->not_persistent = !mddev->persistent;
885         sb->utime = mddev->utime;
886         sb->state = 0;
887         sb->events_hi = (mddev->events>>32);
888         sb->events_lo = (u32)mddev->events;
889
890         if (mddev->reshape_position == MaxSector)
891                 sb->minor_version = 90;
892         else {
893                 sb->minor_version = 91;
894                 sb->reshape_position = mddev->reshape_position;
895                 sb->new_level = mddev->new_level;
896                 sb->delta_disks = mddev->delta_disks;
897                 sb->new_layout = mddev->new_layout;
898                 sb->new_chunk = mddev->new_chunk;
899         }
900         mddev->minor_version = sb->minor_version;
901         if (mddev->in_sync)
902         {
903                 sb->recovery_cp = mddev->recovery_cp;
904                 sb->cp_events_hi = (mddev->events>>32);
905                 sb->cp_events_lo = (u32)mddev->events;
906                 if (mddev->recovery_cp == MaxSector)
907                         sb->state = (1<< MD_SB_CLEAN);
908         } else
909                 sb->recovery_cp = 0;
910
911         sb->layout = mddev->layout;
912         sb->chunk_size = mddev->chunk_size;
913
914         if (mddev->bitmap && mddev->bitmap_file == NULL)
915                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
916
917         sb->disks[0].state = (1<<MD_DISK_REMOVED);
918         ITERATE_RDEV(mddev,rdev2,tmp) {
919                 mdp_disk_t *d;
920                 int desc_nr;
921                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
922                     && !test_bit(Faulty, &rdev2->flags))
923                         desc_nr = rdev2->raid_disk;
924                 else
925                         desc_nr = next_spare++;
926                 rdev2->desc_nr = desc_nr;
927                 d = &sb->disks[rdev2->desc_nr];
928                 nr_disks++;
929                 d->number = rdev2->desc_nr;
930                 d->major = MAJOR(rdev2->bdev->bd_dev);
931                 d->minor = MINOR(rdev2->bdev->bd_dev);
932                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
933                     && !test_bit(Faulty, &rdev2->flags))
934                         d->raid_disk = rdev2->raid_disk;
935                 else
936                         d->raid_disk = rdev2->desc_nr; /* compatibility */
937                 if (test_bit(Faulty, &rdev2->flags))
938                         d->state = (1<<MD_DISK_FAULTY);
939                 else if (test_bit(In_sync, &rdev2->flags)) {
940                         d->state = (1<<MD_DISK_ACTIVE);
941                         d->state |= (1<<MD_DISK_SYNC);
942                         active++;
943                         working++;
944                 } else {
945                         d->state = 0;
946                         spare++;
947                         working++;
948                 }
949                 if (test_bit(WriteMostly, &rdev2->flags))
950                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
951         }
952         /* now set the "removed" and "faulty" bits on any missing devices */
953         for (i=0 ; i < mddev->raid_disks ; i++) {
954                 mdp_disk_t *d = &sb->disks[i];
955                 if (d->state == 0 && d->number == 0) {
956                         d->number = i;
957                         d->raid_disk = i;
958                         d->state = (1<<MD_DISK_REMOVED);
959                         d->state |= (1<<MD_DISK_FAULTY);
960                         failed++;
961                 }
962         }
963         sb->nr_disks = nr_disks;
964         sb->active_disks = active;
965         sb->working_disks = working;
966         sb->failed_disks = failed;
967         sb->spare_disks = spare;
968
969         sb->this_disk = sb->disks[rdev->desc_nr];
970         sb->sb_csum = calc_sb_csum(sb);
971 }
972
973 /*
974  * version 1 superblock
975  */
976
977 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
978 {
979         __le32 disk_csum;
980         u32 csum;
981         unsigned long long newcsum;
982         int size = 256 + le32_to_cpu(sb->max_dev)*2;
983         __le32 *isuper = (__le32*)sb;
984         int i;
985
986         disk_csum = sb->sb_csum;
987         sb->sb_csum = 0;
988         newcsum = 0;
989         for (i=0; size>=4; size -= 4 )
990                 newcsum += le32_to_cpu(*isuper++);
991
992         if (size == 2)
993                 newcsum += le16_to_cpu(*(__le16*) isuper);
994
995         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
996         sb->sb_csum = disk_csum;
997         return cpu_to_le32(csum);
998 }
999
1000 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1001 {
1002         struct mdp_superblock_1 *sb;
1003         int ret;
1004         sector_t sb_offset;
1005         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1006         int bmask;
1007
1008         /*
1009          * Calculate the position of the superblock.
1010          * It is always aligned to a 4K boundary and
1011          * depeding on minor_version, it can be:
1012          * 0: At least 8K, but less than 12K, from end of device
1013          * 1: At start of device
1014          * 2: 4K from start of device.
1015          */
1016         switch(minor_version) {
1017         case 0:
1018                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1019                 sb_offset -= 8*2;
1020                 sb_offset &= ~(sector_t)(4*2-1);
1021                 /* convert from sectors to K */
1022                 sb_offset /= 2;
1023                 break;
1024         case 1:
1025                 sb_offset = 0;
1026                 break;
1027         case 2:
1028                 sb_offset = 4;
1029                 break;
1030         default:
1031                 return -EINVAL;
1032         }
1033         rdev->sb_offset = sb_offset;
1034
1035         /* superblock is rarely larger than 1K, but it can be larger,
1036          * and it is safe to read 4k, so we do that
1037          */
1038         ret = read_disk_sb(rdev, 4096);
1039         if (ret) return ret;
1040
1041
1042         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1043
1044         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1045             sb->major_version != cpu_to_le32(1) ||
1046             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1047             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1048             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1049                 return -EINVAL;
1050
1051         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1052                 printk("md: invalid superblock checksum on %s\n",
1053                         bdevname(rdev->bdev,b));
1054                 return -EINVAL;
1055         }
1056         if (le64_to_cpu(sb->data_size) < 10) {
1057                 printk("md: data_size too small on %s\n",
1058                        bdevname(rdev->bdev,b));
1059                 return -EINVAL;
1060         }
1061         rdev->preferred_minor = 0xffff;
1062         rdev->data_offset = le64_to_cpu(sb->data_offset);
1063         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1064
1065         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1066         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1067         if (rdev->sb_size & bmask)
1068                 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1069
1070         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1071                 rdev->desc_nr = -1;
1072         else
1073                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1074
1075         if (refdev == 0)
1076                 ret = 1;
1077         else {
1078                 __u64 ev1, ev2;
1079                 struct mdp_superblock_1 *refsb = 
1080                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1081
1082                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1083                     sb->level != refsb->level ||
1084                     sb->layout != refsb->layout ||
1085                     sb->chunksize != refsb->chunksize) {
1086                         printk(KERN_WARNING "md: %s has strangely different"
1087                                 " superblock to %s\n",
1088                                 bdevname(rdev->bdev,b),
1089                                 bdevname(refdev->bdev,b2));
1090                         return -EINVAL;
1091                 }
1092                 ev1 = le64_to_cpu(sb->events);
1093                 ev2 = le64_to_cpu(refsb->events);
1094
1095                 if (ev1 > ev2)
1096                         ret = 1;
1097                 else
1098                         ret = 0;
1099         }
1100         if (minor_version) 
1101                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1102         else
1103                 rdev->size = rdev->sb_offset;
1104         if (rdev->size < le64_to_cpu(sb->data_size)/2)
1105                 return -EINVAL;
1106         rdev->size = le64_to_cpu(sb->data_size)/2;
1107         if (le32_to_cpu(sb->chunksize))
1108                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1109
1110         if (le64_to_cpu(sb->size) > rdev->size*2)
1111                 return -EINVAL;
1112         return ret;
1113 }
1114
1115 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1116 {
1117         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1118         __u64 ev1 = le64_to_cpu(sb->events);
1119
1120         rdev->raid_disk = -1;
1121         rdev->flags = 0;
1122         if (mddev->raid_disks == 0) {
1123                 mddev->major_version = 1;
1124                 mddev->patch_version = 0;
1125                 mddev->persistent = 1;
1126                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1127                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1128                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1129                 mddev->level = le32_to_cpu(sb->level);
1130                 mddev->clevel[0] = 0;
1131                 mddev->layout = le32_to_cpu(sb->layout);
1132                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1133                 mddev->size = le64_to_cpu(sb->size)/2;
1134                 mddev->events = ev1;
1135                 mddev->bitmap_offset = 0;
1136                 mddev->default_bitmap_offset = 1024 >> 9;
1137                 
1138                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1139                 memcpy(mddev->uuid, sb->set_uuid, 16);
1140
1141                 mddev->max_disks =  (4096-256)/2;
1142
1143                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1144                     mddev->bitmap_file == NULL ) {
1145                         if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1146                             && mddev->level != 10) {
1147                                 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1148                                 return -EINVAL;
1149                         }
1150                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1151                 }
1152                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1153                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1154                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1155                         mddev->new_level = le32_to_cpu(sb->new_level);
1156                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1157                         mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1158                 } else {
1159                         mddev->reshape_position = MaxSector;
1160                         mddev->delta_disks = 0;
1161                         mddev->new_level = mddev->level;
1162                         mddev->new_layout = mddev->layout;
1163                         mddev->new_chunk = mddev->chunk_size;
1164                 }
1165
1166         } else if (mddev->pers == NULL) {
1167                 /* Insist of good event counter while assembling */
1168                 ++ev1;
1169                 if (ev1 < mddev->events)
1170                         return -EINVAL;
1171         } else if (mddev->bitmap) {
1172                 /* If adding to array with a bitmap, then we can accept an
1173                  * older device, but not too old.
1174                  */
1175                 if (ev1 < mddev->bitmap->events_cleared)
1176                         return 0;
1177         } else {
1178                 if (ev1 < mddev->events)
1179                         /* just a hot-add of a new device, leave raid_disk at -1 */
1180                         return 0;
1181         }
1182         if (mddev->level != LEVEL_MULTIPATH) {
1183                 int role;
1184                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1185                 switch(role) {
1186                 case 0xffff: /* spare */
1187                         break;
1188                 case 0xfffe: /* faulty */
1189                         set_bit(Faulty, &rdev->flags);
1190                         break;
1191                 default:
1192                         if ((le32_to_cpu(sb->feature_map) &
1193                              MD_FEATURE_RECOVERY_OFFSET))
1194                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1195                         else
1196                                 set_bit(In_sync, &rdev->flags);
1197                         rdev->raid_disk = role;
1198                         break;
1199                 }
1200                 if (sb->devflags & WriteMostly1)
1201                         set_bit(WriteMostly, &rdev->flags);
1202         } else /* MULTIPATH are always insync */
1203                 set_bit(In_sync, &rdev->flags);
1204
1205         return 0;
1206 }
1207
1208 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1209 {
1210         struct mdp_superblock_1 *sb;
1211         struct list_head *tmp;
1212         mdk_rdev_t *rdev2;
1213         int max_dev, i;
1214         /* make rdev->sb match mddev and rdev data. */
1215
1216         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1217
1218         sb->feature_map = 0;
1219         sb->pad0 = 0;
1220         sb->recovery_offset = cpu_to_le64(0);
1221         memset(sb->pad1, 0, sizeof(sb->pad1));
1222         memset(sb->pad2, 0, sizeof(sb->pad2));
1223         memset(sb->pad3, 0, sizeof(sb->pad3));
1224
1225         sb->utime = cpu_to_le64((__u64)mddev->utime);
1226         sb->events = cpu_to_le64(mddev->events);
1227         if (mddev->in_sync)
1228                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1229         else
1230                 sb->resync_offset = cpu_to_le64(0);
1231
1232         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1233
1234         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1235         sb->size = cpu_to_le64(mddev->size<<1);
1236
1237         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1238                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1239                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1240         }
1241
1242         if (rdev->raid_disk >= 0 &&
1243             !test_bit(In_sync, &rdev->flags) &&
1244             rdev->recovery_offset > 0) {
1245                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1246                 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1247         }
1248
1249         if (mddev->reshape_position != MaxSector) {
1250                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1251                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1252                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1253                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1254                 sb->new_level = cpu_to_le32(mddev->new_level);
1255                 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1256         }
1257
1258         max_dev = 0;
1259         ITERATE_RDEV(mddev,rdev2,tmp)
1260                 if (rdev2->desc_nr+1 > max_dev)
1261                         max_dev = rdev2->desc_nr+1;
1262         
1263         sb->max_dev = cpu_to_le32(max_dev);
1264         for (i=0; i<max_dev;i++)
1265                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1266         
1267         ITERATE_RDEV(mddev,rdev2,tmp) {
1268                 i = rdev2->desc_nr;
1269                 if (test_bit(Faulty, &rdev2->flags))
1270                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1271                 else if (test_bit(In_sync, &rdev2->flags))
1272                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1273                 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1274                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1275                 else
1276                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1277         }
1278
1279         sb->sb_csum = calc_sb_1_csum(sb);
1280 }
1281
1282
1283 static struct super_type super_types[] = {
1284         [0] = {
1285                 .name   = "0.90.0",
1286                 .owner  = THIS_MODULE,
1287                 .load_super     = super_90_load,
1288                 .validate_super = super_90_validate,
1289                 .sync_super     = super_90_sync,
1290         },
1291         [1] = {
1292                 .name   = "md-1",
1293                 .owner  = THIS_MODULE,
1294                 .load_super     = super_1_load,
1295                 .validate_super = super_1_validate,
1296                 .sync_super     = super_1_sync,
1297         },
1298 };
1299
1300 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1301 {
1302         struct list_head *tmp, *tmp2;
1303         mdk_rdev_t *rdev, *rdev2;
1304
1305         ITERATE_RDEV(mddev1,rdev,tmp)
1306                 ITERATE_RDEV(mddev2, rdev2, tmp2)
1307                         if (rdev->bdev->bd_contains ==
1308                             rdev2->bdev->bd_contains)
1309                                 return 1;
1310
1311         return 0;
1312 }
1313
1314 static LIST_HEAD(pending_raid_disks);
1315
1316 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1317 {
1318         char b[BDEVNAME_SIZE];
1319         struct kobject *ko;
1320         char *s;
1321
1322         if (rdev->mddev) {
1323                 MD_BUG();
1324                 return -EINVAL;
1325         }
1326         /* make sure rdev->size exceeds mddev->size */
1327         if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1328                 if (mddev->pers)
1329                         /* Cannot change size, so fail */
1330                         return -ENOSPC;
1331                 else
1332                         mddev->size = rdev->size;
1333         }
1334
1335         /* Verify rdev->desc_nr is unique.
1336          * If it is -1, assign a free number, else
1337          * check number is not in use
1338          */
1339         if (rdev->desc_nr < 0) {
1340                 int choice = 0;
1341                 if (mddev->pers) choice = mddev->raid_disks;
1342                 while (find_rdev_nr(mddev, choice))
1343                         choice++;
1344                 rdev->desc_nr = choice;
1345         } else {
1346                 if (find_rdev_nr(mddev, rdev->desc_nr))
1347                         return -EBUSY;
1348         }
1349         bdevname(rdev->bdev,b);
1350         if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1351                 return -ENOMEM;
1352         while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1353                 *s = '!';
1354                         
1355         list_add(&rdev->same_set, &mddev->disks);
1356         rdev->mddev = mddev;
1357         printk(KERN_INFO "md: bind<%s>\n", b);
1358
1359         rdev->kobj.parent = &mddev->kobj;
1360         kobject_add(&rdev->kobj);
1361
1362         if (rdev->bdev->bd_part)
1363                 ko = &rdev->bdev->bd_part->kobj;
1364         else
1365                 ko = &rdev->bdev->bd_disk->kobj;
1366         sysfs_create_link(&rdev->kobj, ko, "block");
1367         bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1368         return 0;
1369 }
1370
1371 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1372 {
1373         char b[BDEVNAME_SIZE];
1374         if (!rdev->mddev) {
1375                 MD_BUG();
1376                 return;
1377         }
1378         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1379         list_del_init(&rdev->same_set);
1380         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1381         rdev->mddev = NULL;
1382         sysfs_remove_link(&rdev->kobj, "block");
1383         kobject_del(&rdev->kobj);
1384 }
1385
1386 /*
1387  * prevent the device from being mounted, repartitioned or
1388  * otherwise reused by a RAID array (or any other kernel
1389  * subsystem), by bd_claiming the device.
1390  */
1391 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1392 {
1393         int err = 0;
1394         struct block_device *bdev;
1395         char b[BDEVNAME_SIZE];
1396
1397         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1398         if (IS_ERR(bdev)) {
1399                 printk(KERN_ERR "md: could not open %s.\n",
1400                         __bdevname(dev, b));
1401                 return PTR_ERR(bdev);
1402         }
1403         err = bd_claim(bdev, rdev);
1404         if (err) {
1405                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1406                         bdevname(bdev, b));
1407                 blkdev_put(bdev);
1408                 return err;
1409         }
1410         rdev->bdev = bdev;
1411         return err;
1412 }
1413
1414 static void unlock_rdev(mdk_rdev_t *rdev)
1415 {
1416         struct block_device *bdev = rdev->bdev;
1417         rdev->bdev = NULL;
1418         if (!bdev)
1419                 MD_BUG();
1420         bd_release(bdev);
1421         blkdev_put(bdev);
1422 }
1423
1424 void md_autodetect_dev(dev_t dev);
1425
1426 static void export_rdev(mdk_rdev_t * rdev)
1427 {
1428         char b[BDEVNAME_SIZE];
1429         printk(KERN_INFO "md: export_rdev(%s)\n",
1430                 bdevname(rdev->bdev,b));
1431         if (rdev->mddev)
1432                 MD_BUG();
1433         free_disk_sb(rdev);
1434         list_del_init(&rdev->same_set);
1435 #ifndef MODULE
1436         md_autodetect_dev(rdev->bdev->bd_dev);
1437 #endif
1438         unlock_rdev(rdev);
1439         kobject_put(&rdev->kobj);
1440 }
1441
1442 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1443 {
1444         unbind_rdev_from_array(rdev);
1445         export_rdev(rdev);
1446 }
1447
1448 static void export_array(mddev_t *mddev)
1449 {
1450         struct list_head *tmp;
1451         mdk_rdev_t *rdev;
1452
1453         ITERATE_RDEV(mddev,rdev,tmp) {
1454                 if (!rdev->mddev) {
1455                         MD_BUG();
1456                         continue;
1457                 }
1458                 kick_rdev_from_array(rdev);
1459         }
1460         if (!list_empty(&mddev->disks))
1461                 MD_BUG();
1462         mddev->raid_disks = 0;
1463         mddev->major_version = 0;
1464 }
1465
1466 static void print_desc(mdp_disk_t *desc)
1467 {
1468         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1469                 desc->major,desc->minor,desc->raid_disk,desc->state);
1470 }
1471
1472 static void print_sb(mdp_super_t *sb)
1473 {
1474         int i;
1475
1476         printk(KERN_INFO 
1477                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1478                 sb->major_version, sb->minor_version, sb->patch_version,
1479                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1480                 sb->ctime);
1481         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1482                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1483                 sb->md_minor, sb->layout, sb->chunk_size);
1484         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1485                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1486                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1487                 sb->failed_disks, sb->spare_disks,
1488                 sb->sb_csum, (unsigned long)sb->events_lo);
1489
1490         printk(KERN_INFO);
1491         for (i = 0; i < MD_SB_DISKS; i++) {
1492                 mdp_disk_t *desc;
1493
1494                 desc = sb->disks + i;
1495                 if (desc->number || desc->major || desc->minor ||
1496                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1497                         printk("     D %2d: ", i);
1498                         print_desc(desc);
1499                 }
1500         }
1501         printk(KERN_INFO "md:     THIS: ");
1502         print_desc(&sb->this_disk);
1503
1504 }
1505
1506 static void print_rdev(mdk_rdev_t *rdev)
1507 {
1508         char b[BDEVNAME_SIZE];
1509         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1510                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1511                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1512                 rdev->desc_nr);
1513         if (rdev->sb_loaded) {
1514                 printk(KERN_INFO "md: rdev superblock:\n");
1515                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1516         } else
1517                 printk(KERN_INFO "md: no rdev superblock!\n");
1518 }
1519
1520 static void md_print_devices(void)
1521 {
1522         struct list_head *tmp, *tmp2;
1523         mdk_rdev_t *rdev;
1524         mddev_t *mddev;
1525         char b[BDEVNAME_SIZE];
1526
1527         printk("\n");
1528         printk("md:     **********************************\n");
1529         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1530         printk("md:     **********************************\n");
1531         ITERATE_MDDEV(mddev,tmp) {
1532
1533                 if (mddev->bitmap)
1534                         bitmap_print_sb(mddev->bitmap);
1535                 else
1536                         printk("%s: ", mdname(mddev));
1537                 ITERATE_RDEV(mddev,rdev,tmp2)
1538                         printk("<%s>", bdevname(rdev->bdev,b));
1539                 printk("\n");
1540
1541                 ITERATE_RDEV(mddev,rdev,tmp2)
1542                         print_rdev(rdev);
1543         }
1544         printk("md:     **********************************\n");
1545         printk("\n");
1546 }
1547
1548
1549 static void sync_sbs(mddev_t * mddev, int nospares)
1550 {
1551         /* Update each superblock (in-memory image), but
1552          * if we are allowed to, skip spares which already
1553          * have the right event counter, or have one earlier
1554          * (which would mean they aren't being marked as dirty
1555          * with the rest of the array)
1556          */
1557         mdk_rdev_t *rdev;
1558         struct list_head *tmp;
1559
1560         ITERATE_RDEV(mddev,rdev,tmp) {
1561                 if (rdev->sb_events == mddev->events ||
1562                     (nospares &&
1563                      rdev->raid_disk < 0 &&
1564                      (rdev->sb_events&1)==0 &&
1565                      rdev->sb_events+1 == mddev->events)) {
1566                         /* Don't update this superblock */
1567                         rdev->sb_loaded = 2;
1568                 } else {
1569                         super_types[mddev->major_version].
1570                                 sync_super(mddev, rdev);
1571                         rdev->sb_loaded = 1;
1572                 }
1573         }
1574 }
1575
1576 static void md_update_sb(mddev_t * mddev, int force_change)
1577 {
1578         int err;
1579         struct list_head *tmp;
1580         mdk_rdev_t *rdev;
1581         int sync_req;
1582         int nospares = 0;
1583
1584 repeat:
1585         spin_lock_irq(&mddev->write_lock);
1586
1587         set_bit(MD_CHANGE_PENDING, &mddev->flags);
1588         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1589                 force_change = 1;
1590         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1591                 /* just a clean<-> dirty transition, possibly leave spares alone,
1592                  * though if events isn't the right even/odd, we will have to do
1593                  * spares after all
1594                  */
1595                 nospares = 1;
1596         if (force_change)
1597                 nospares = 0;
1598         if (mddev->degraded)
1599                 /* If the array is degraded, then skipping spares is both
1600                  * dangerous and fairly pointless.
1601                  * Dangerous because a device that was removed from the array
1602                  * might have a event_count that still looks up-to-date,
1603                  * so it can be re-added without a resync.
1604                  * Pointless because if there are any spares to skip,
1605                  * then a recovery will happen and soon that array won't
1606                  * be degraded any more and the spare can go back to sleep then.
1607                  */
1608                 nospares = 0;
1609
1610         sync_req = mddev->in_sync;
1611         mddev->utime = get_seconds();
1612
1613         /* If this is just a dirty<->clean transition, and the array is clean
1614          * and 'events' is odd, we can roll back to the previous clean state */
1615         if (nospares
1616             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1617             && (mddev->events & 1)
1618             && mddev->events != 1)
1619                 mddev->events--;
1620         else {
1621                 /* otherwise we have to go forward and ... */
1622                 mddev->events ++;
1623                 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1624                         /* .. if the array isn't clean, insist on an odd 'events' */
1625                         if ((mddev->events&1)==0) {
1626                                 mddev->events++;
1627                                 nospares = 0;
1628                         }
1629                 } else {
1630                         /* otherwise insist on an even 'events' (for clean states) */
1631                         if ((mddev->events&1)) {
1632                                 mddev->events++;
1633                                 nospares = 0;
1634                         }
1635                 }
1636         }
1637
1638         if (!mddev->events) {
1639                 /*
1640                  * oops, this 64-bit counter should never wrap.
1641                  * Either we are in around ~1 trillion A.C., assuming
1642                  * 1 reboot per second, or we have a bug:
1643                  */
1644                 MD_BUG();
1645                 mddev->events --;
1646         }
1647         sync_sbs(mddev, nospares);
1648
1649         /*
1650          * do not write anything to disk if using
1651          * nonpersistent superblocks
1652          */
1653         if (!mddev->persistent) {
1654                 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1655                 spin_unlock_irq(&mddev->write_lock);
1656                 wake_up(&mddev->sb_wait);
1657                 return;
1658         }
1659         spin_unlock_irq(&mddev->write_lock);
1660
1661         dprintk(KERN_INFO 
1662                 "md: updating %s RAID superblock on device (in sync %d)\n",
1663                 mdname(mddev),mddev->in_sync);
1664
1665         err = bitmap_update_sb(mddev->bitmap);
1666         ITERATE_RDEV(mddev,rdev,tmp) {
1667                 char b[BDEVNAME_SIZE];
1668                 dprintk(KERN_INFO "md: ");
1669                 if (rdev->sb_loaded != 1)
1670                         continue; /* no noise on spare devices */
1671                 if (test_bit(Faulty, &rdev->flags))
1672                         dprintk("(skipping faulty ");
1673
1674                 dprintk("%s ", bdevname(rdev->bdev,b));
1675                 if (!test_bit(Faulty, &rdev->flags)) {
1676                         md_super_write(mddev,rdev,
1677                                        rdev->sb_offset<<1, rdev->sb_size,
1678                                        rdev->sb_page);
1679                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1680                                 bdevname(rdev->bdev,b),
1681                                 (unsigned long long)rdev->sb_offset);
1682                         rdev->sb_events = mddev->events;
1683
1684                 } else
1685                         dprintk(")\n");
1686                 if (mddev->level == LEVEL_MULTIPATH)
1687                         /* only need to write one superblock... */
1688                         break;
1689         }
1690         md_super_wait(mddev);
1691         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1692
1693         spin_lock_irq(&mddev->write_lock);
1694         if (mddev->in_sync != sync_req ||
1695             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1696                 /* have to write it out again */
1697                 spin_unlock_irq(&mddev->write_lock);
1698                 goto repeat;
1699         }
1700         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1701         spin_unlock_irq(&mddev->write_lock);
1702         wake_up(&mddev->sb_wait);
1703
1704 }
1705
1706 /* words written to sysfs files may, or my not, be \n terminated.
1707  * We want to accept with case. For this we use cmd_match.
1708  */
1709 static int cmd_match(const char *cmd, const char *str)
1710 {
1711         /* See if cmd, written into a sysfs file, matches
1712          * str.  They must either be the same, or cmd can
1713          * have a trailing newline
1714          */
1715         while (*cmd && *str && *cmd == *str) {
1716                 cmd++;
1717                 str++;
1718         }
1719         if (*cmd == '\n')
1720                 cmd++;
1721         if (*str || *cmd)
1722                 return 0;
1723         return 1;
1724 }
1725
1726 struct rdev_sysfs_entry {
1727         struct attribute attr;
1728         ssize_t (*show)(mdk_rdev_t *, char *);
1729         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1730 };
1731
1732 static ssize_t
1733 state_show(mdk_rdev_t *rdev, char *page)
1734 {
1735         char *sep = "";
1736         int len=0;
1737
1738         if (test_bit(Faulty, &rdev->flags)) {
1739                 len+= sprintf(page+len, "%sfaulty",sep);
1740                 sep = ",";
1741         }
1742         if (test_bit(In_sync, &rdev->flags)) {
1743                 len += sprintf(page+len, "%sin_sync",sep);
1744                 sep = ",";
1745         }
1746         if (test_bit(WriteMostly, &rdev->flags)) {
1747                 len += sprintf(page+len, "%swrite_mostly",sep);
1748                 sep = ",";
1749         }
1750         if (!test_bit(Faulty, &rdev->flags) &&
1751             !test_bit(In_sync, &rdev->flags)) {
1752                 len += sprintf(page+len, "%sspare", sep);
1753                 sep = ",";
1754         }
1755         return len+sprintf(page+len, "\n");
1756 }
1757
1758 static ssize_t
1759 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1760 {
1761         /* can write
1762          *  faulty  - simulates and error
1763          *  remove  - disconnects the device
1764          *  writemostly - sets write_mostly
1765          *  -writemostly - clears write_mostly
1766          */
1767         int err = -EINVAL;
1768         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1769                 md_error(rdev->mddev, rdev);
1770                 err = 0;
1771         } else if (cmd_match(buf, "remove")) {
1772                 if (rdev->raid_disk >= 0)
1773                         err = -EBUSY;
1774                 else {
1775                         mddev_t *mddev = rdev->mddev;
1776                         kick_rdev_from_array(rdev);
1777                         if (mddev->pers)
1778                                 md_update_sb(mddev, 1);
1779                         md_new_event(mddev);
1780                         err = 0;
1781                 }
1782         } else if (cmd_match(buf, "writemostly")) {
1783                 set_bit(WriteMostly, &rdev->flags);
1784                 err = 0;
1785         } else if (cmd_match(buf, "-writemostly")) {
1786                 clear_bit(WriteMostly, &rdev->flags);
1787                 err = 0;
1788         }
1789         return err ? err : len;
1790 }
1791 static struct rdev_sysfs_entry rdev_state =
1792 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1793
1794 static ssize_t
1795 super_show(mdk_rdev_t *rdev, char *page)
1796 {
1797         if (rdev->sb_loaded && rdev->sb_size) {
1798                 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1799                 return rdev->sb_size;
1800         } else
1801                 return 0;
1802 }
1803 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1804
1805 static ssize_t
1806 errors_show(mdk_rdev_t *rdev, char *page)
1807 {
1808         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1809 }
1810
1811 static ssize_t
1812 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1813 {
1814         char *e;
1815         unsigned long n = simple_strtoul(buf, &e, 10);
1816         if (*buf && (*e == 0 || *e == '\n')) {
1817                 atomic_set(&rdev->corrected_errors, n);
1818                 return len;
1819         }
1820         return -EINVAL;
1821 }
1822 static struct rdev_sysfs_entry rdev_errors =
1823 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1824
1825 static ssize_t
1826 slot_show(mdk_rdev_t *rdev, char *page)
1827 {
1828         if (rdev->raid_disk < 0)
1829                 return sprintf(page, "none\n");
1830         else
1831                 return sprintf(page, "%d\n", rdev->raid_disk);
1832 }
1833
1834 static ssize_t
1835 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1836 {
1837         char *e;
1838         int slot = simple_strtoul(buf, &e, 10);
1839         if (strncmp(buf, "none", 4)==0)
1840                 slot = -1;
1841         else if (e==buf || (*e && *e!= '\n'))
1842                 return -EINVAL;
1843         if (rdev->mddev->pers)
1844                 /* Cannot set slot in active array (yet) */
1845                 return -EBUSY;
1846         if (slot >= rdev->mddev->raid_disks)
1847                 return -ENOSPC;
1848         rdev->raid_disk = slot;
1849         /* assume it is working */
1850         rdev->flags = 0;
1851         set_bit(In_sync, &rdev->flags);
1852         return len;
1853 }
1854
1855
1856 static struct rdev_sysfs_entry rdev_slot =
1857 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
1858
1859 static ssize_t
1860 offset_show(mdk_rdev_t *rdev, char *page)
1861 {
1862         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1863 }
1864
1865 static ssize_t
1866 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1867 {
1868         char *e;
1869         unsigned long long offset = simple_strtoull(buf, &e, 10);
1870         if (e==buf || (*e && *e != '\n'))
1871                 return -EINVAL;
1872         if (rdev->mddev->pers)
1873                 return -EBUSY;
1874         rdev->data_offset = offset;
1875         return len;
1876 }
1877
1878 static struct rdev_sysfs_entry rdev_offset =
1879 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
1880
1881 static ssize_t
1882 rdev_size_show(mdk_rdev_t *rdev, char *page)
1883 {
1884         return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1885 }
1886
1887 static ssize_t
1888 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1889 {
1890         char *e;
1891         unsigned long long size = simple_strtoull(buf, &e, 10);
1892         if (e==buf || (*e && *e != '\n'))
1893                 return -EINVAL;
1894         if (rdev->mddev->pers)
1895                 return -EBUSY;
1896         rdev->size = size;
1897         if (size < rdev->mddev->size || rdev->mddev->size == 0)
1898                 rdev->mddev->size = size;
1899         return len;
1900 }
1901
1902 static struct rdev_sysfs_entry rdev_size =
1903 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
1904
1905 static struct attribute *rdev_default_attrs[] = {
1906         &rdev_state.attr,
1907         &rdev_super.attr,
1908         &rdev_errors.attr,
1909         &rdev_slot.attr,
1910         &rdev_offset.attr,
1911         &rdev_size.attr,
1912         NULL,
1913 };
1914 static ssize_t
1915 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1916 {
1917         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1918         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1919
1920         if (!entry->show)
1921                 return -EIO;
1922         return entry->show(rdev, page);
1923 }
1924
1925 static ssize_t
1926 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1927               const char *page, size_t length)
1928 {
1929         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1930         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1931
1932         if (!entry->store)
1933                 return -EIO;
1934         if (!capable(CAP_SYS_ADMIN))
1935                 return -EACCES;
1936         return entry->store(rdev, page, length);
1937 }
1938
1939 static void rdev_free(struct kobject *ko)
1940 {
1941         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1942         kfree(rdev);
1943 }
1944 static struct sysfs_ops rdev_sysfs_ops = {
1945         .show           = rdev_attr_show,
1946         .store          = rdev_attr_store,
1947 };
1948 static struct kobj_type rdev_ktype = {
1949         .release        = rdev_free,
1950         .sysfs_ops      = &rdev_sysfs_ops,
1951         .default_attrs  = rdev_default_attrs,
1952 };
1953
1954 /*
1955  * Import a device. If 'super_format' >= 0, then sanity check the superblock
1956  *
1957  * mark the device faulty if:
1958  *
1959  *   - the device is nonexistent (zero size)
1960  *   - the device has no valid superblock
1961  *
1962  * a faulty rdev _never_ has rdev->sb set.
1963  */
1964 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1965 {
1966         char b[BDEVNAME_SIZE];
1967         int err;
1968         mdk_rdev_t *rdev;
1969         sector_t size;
1970
1971         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1972         if (!rdev) {
1973                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1974                 return ERR_PTR(-ENOMEM);
1975         }
1976
1977         if ((err = alloc_disk_sb(rdev)))
1978                 goto abort_free;
1979
1980         err = lock_rdev(rdev, newdev);
1981         if (err)
1982                 goto abort_free;
1983
1984         rdev->kobj.parent = NULL;
1985         rdev->kobj.ktype = &rdev_ktype;
1986         kobject_init(&rdev->kobj);
1987
1988         rdev->desc_nr = -1;
1989         rdev->saved_raid_disk = -1;
1990         rdev->raid_disk = -1;
1991         rdev->flags = 0;
1992         rdev->data_offset = 0;
1993         rdev->sb_events = 0;
1994         atomic_set(&rdev->nr_pending, 0);
1995         atomic_set(&rdev->read_errors, 0);
1996         atomic_set(&rdev->corrected_errors, 0);
1997
1998         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1999         if (!size) {
2000                 printk(KERN_WARNING 
2001                         "md: %s has zero or unknown size, marking faulty!\n",
2002                         bdevname(rdev->bdev,b));
2003                 err = -EINVAL;
2004                 goto abort_free;
2005         }
2006
2007         if (super_format >= 0) {
2008                 err = super_types[super_format].
2009                         load_super(rdev, NULL, super_minor);
2010                 if (err == -EINVAL) {
2011                         printk(KERN_WARNING 
2012                                 "md: %s has invalid sb, not importing!\n",
2013                                 bdevname(rdev->bdev,b));
2014                         goto abort_free;
2015                 }
2016                 if (err < 0) {
2017                         printk(KERN_WARNING 
2018                                 "md: could not read %s's sb, not importing!\n",
2019                                 bdevname(rdev->bdev,b));
2020                         goto abort_free;
2021                 }
2022         }
2023         INIT_LIST_HEAD(&rdev->same_set);
2024
2025         return rdev;
2026
2027 abort_free:
2028         if (rdev->sb_page) {
2029                 if (rdev->bdev)
2030                         unlock_rdev(rdev);
2031                 free_disk_sb(rdev);
2032         }
2033         kfree(rdev);
2034         return ERR_PTR(err);
2035 }
2036
2037 /*
2038  * Check a full RAID array for plausibility
2039  */
2040
2041
2042 static void analyze_sbs(mddev_t * mddev)
2043 {
2044         int i;
2045         struct list_head *tmp;
2046         mdk_rdev_t *rdev, *freshest;
2047         char b[BDEVNAME_SIZE];
2048
2049         freshest = NULL;
2050         ITERATE_RDEV(mddev,rdev,tmp)
2051                 switch (super_types[mddev->major_version].
2052                         load_super(rdev, freshest, mddev->minor_version)) {
2053                 case 1:
2054                         freshest = rdev;
2055                         break;
2056                 case 0:
2057                         break;
2058                 default:
2059                         printk( KERN_ERR \
2060                                 "md: fatal superblock inconsistency in %s"
2061                                 " -- removing from array\n", 
2062                                 bdevname(rdev->bdev,b));
2063                         kick_rdev_from_array(rdev);
2064                 }
2065
2066
2067         super_types[mddev->major_version].
2068                 validate_super(mddev, freshest);
2069
2070         i = 0;
2071         ITERATE_RDEV(mddev,rdev,tmp) {
2072                 if (rdev != freshest)
2073                         if (super_types[mddev->major_version].
2074                             validate_super(mddev, rdev)) {
2075                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2076                                         " from array!\n",
2077                                         bdevname(rdev->bdev,b));
2078                                 kick_rdev_from_array(rdev);
2079                                 continue;
2080                         }
2081                 if (mddev->level == LEVEL_MULTIPATH) {
2082                         rdev->desc_nr = i++;
2083                         rdev->raid_disk = rdev->desc_nr;
2084                         set_bit(In_sync, &rdev->flags);
2085                 }
2086         }
2087
2088
2089
2090         if (mddev->recovery_cp != MaxSector &&
2091             mddev->level >= 1)
2092                 printk(KERN_ERR "md: %s: raid array is not clean"
2093                        " -- starting background reconstruction\n",
2094                        mdname(mddev));
2095
2096 }
2097
2098 static ssize_t
2099 safe_delay_show(mddev_t *mddev, char *page)
2100 {
2101         int msec = (mddev->safemode_delay*1000)/HZ;
2102         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2103 }
2104 static ssize_t
2105 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2106 {
2107         int scale=1;
2108         int dot=0;
2109         int i;
2110         unsigned long msec;
2111         char buf[30];
2112         char *e;
2113         /* remove a period, and count digits after it */
2114         if (len >= sizeof(buf))
2115                 return -EINVAL;
2116         strlcpy(buf, cbuf, len);
2117         buf[len] = 0;
2118         for (i=0; i<len; i++) {
2119                 if (dot) {
2120                         if (isdigit(buf[i])) {
2121                                 buf[i-1] = buf[i];
2122                                 scale *= 10;
2123                         }
2124                         buf[i] = 0;
2125                 } else if (buf[i] == '.') {
2126                         dot=1;
2127                         buf[i] = 0;
2128                 }
2129         }
2130         msec = simple_strtoul(buf, &e, 10);
2131         if (e == buf || (*e && *e != '\n'))
2132                 return -EINVAL;
2133         msec = (msec * 1000) / scale;
2134         if (msec == 0)
2135                 mddev->safemode_delay = 0;
2136         else {
2137                 mddev->safemode_delay = (msec*HZ)/1000;
2138                 if (mddev->safemode_delay == 0)
2139                         mddev->safemode_delay = 1;
2140         }
2141         return len;
2142 }
2143 static struct md_sysfs_entry md_safe_delay =
2144 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2145
2146 static ssize_t
2147 level_show(mddev_t *mddev, char *page)
2148 {
2149         struct mdk_personality *p = mddev->pers;
2150         if (p)
2151                 return sprintf(page, "%s\n", p->name);
2152         else if (mddev->clevel[0])
2153                 return sprintf(page, "%s\n", mddev->clevel);
2154         else if (mddev->level != LEVEL_NONE)
2155                 return sprintf(page, "%d\n", mddev->level);
2156         else
2157                 return 0;
2158 }
2159
2160 static ssize_t
2161 level_store(mddev_t *mddev, const char *buf, size_t len)
2162 {
2163         int rv = len;
2164         if (mddev->pers)
2165                 return -EBUSY;
2166         if (len == 0)
2167                 return 0;
2168         if (len >= sizeof(mddev->clevel))
2169                 return -ENOSPC;
2170         strncpy(mddev->clevel, buf, len);
2171         if (mddev->clevel[len-1] == '\n')
2172                 len--;
2173         mddev->clevel[len] = 0;
2174         mddev->level = LEVEL_NONE;
2175         return rv;
2176 }
2177
2178 static struct md_sysfs_entry md_level =
2179 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2180
2181
2182 static ssize_t
2183 layout_show(mddev_t *mddev, char *page)
2184 {
2185         /* just a number, not meaningful for all levels */
2186         return sprintf(page, "%d\n", mddev->layout);
2187 }
2188
2189 static ssize_t
2190 layout_store(mddev_t *mddev, const char *buf, size_t len)
2191 {
2192         char *e;
2193         unsigned long n = simple_strtoul(buf, &e, 10);
2194         if (mddev->pers)
2195                 return -EBUSY;
2196
2197         if (!*buf || (*e && *e != '\n'))
2198                 return -EINVAL;
2199
2200         mddev->layout = n;
2201         return len;
2202 }
2203 static struct md_sysfs_entry md_layout =
2204 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2205
2206
2207 static ssize_t
2208 raid_disks_show(mddev_t *mddev, char *page)
2209 {
2210         if (mddev->raid_disks == 0)
2211                 return 0;
2212         return sprintf(page, "%d\n", mddev->raid_disks);
2213 }
2214
2215 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2216
2217 static ssize_t
2218 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2219 {
2220         char *e;
2221         int rv = 0;
2222         unsigned long n = simple_strtoul(buf, &e, 10);
2223
2224         if (!*buf || (*e && *e != '\n'))
2225                 return -EINVAL;
2226
2227         if (mddev->pers)
2228                 rv = update_raid_disks(mddev, n);
2229         else
2230                 mddev->raid_disks = n;
2231         return rv ? rv : len;
2232 }
2233 static struct md_sysfs_entry md_raid_disks =
2234 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2235
2236 static ssize_t
2237 chunk_size_show(mddev_t *mddev, char *page)
2238 {
2239         return sprintf(page, "%d\n", mddev->chunk_size);
2240 }
2241
2242 static ssize_t
2243 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2244 {
2245         /* can only set chunk_size if array is not yet active */
2246         char *e;
2247         unsigned long n = simple_strtoul(buf, &e, 10);
2248
2249         if (mddev->pers)
2250                 return -EBUSY;
2251         if (!*buf || (*e && *e != '\n'))
2252                 return -EINVAL;
2253
2254         mddev->chunk_size = n;
2255         return len;
2256 }
2257 static struct md_sysfs_entry md_chunk_size =
2258 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2259
2260 static ssize_t
2261 resync_start_show(mddev_t *mddev, char *page)
2262 {
2263         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2264 }
2265
2266 static ssize_t
2267 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2268 {
2269         /* can only set chunk_size if array is not yet active */
2270         char *e;
2271         unsigned long long n = simple_strtoull(buf, &e, 10);
2272
2273         if (mddev->pers)
2274                 return -EBUSY;
2275         if (!*buf || (*e && *e != '\n'))
2276                 return -EINVAL;
2277
2278         mddev->recovery_cp = n;
2279         return len;
2280 }
2281 static struct md_sysfs_entry md_resync_start =
2282 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2283
2284 /*
2285  * The array state can be:
2286  *
2287  * clear
2288  *     No devices, no size, no level
2289  *     Equivalent to STOP_ARRAY ioctl
2290  * inactive
2291  *     May have some settings, but array is not active
2292  *        all IO results in error
2293  *     When written, doesn't tear down array, but just stops it
2294  * suspended (not supported yet)
2295  *     All IO requests will block. The array can be reconfigured.
2296  *     Writing this, if accepted, will block until array is quiessent
2297  * readonly
2298  *     no resync can happen.  no superblocks get written.
2299  *     write requests fail
2300  * read-auto
2301  *     like readonly, but behaves like 'clean' on a write request.
2302  *
2303  * clean - no pending writes, but otherwise active.
2304  *     When written to inactive array, starts without resync
2305  *     If a write request arrives then
2306  *       if metadata is known, mark 'dirty' and switch to 'active'.
2307  *       if not known, block and switch to write-pending
2308  *     If written to an active array that has pending writes, then fails.
2309  * active
2310  *     fully active: IO and resync can be happening.
2311  *     When written to inactive array, starts with resync
2312  *
2313  * write-pending
2314  *     clean, but writes are blocked waiting for 'active' to be written.
2315  *
2316  * active-idle
2317  *     like active, but no writes have been seen for a while (100msec).
2318  *
2319  */
2320 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2321                    write_pending, active_idle, bad_word};
2322 static char *array_states[] = {
2323         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2324         "write-pending", "active-idle", NULL };
2325
2326 static int match_word(const char *word, char **list)
2327 {
2328         int n;
2329         for (n=0; list[n]; n++)
2330                 if (cmd_match(word, list[n]))
2331                         break;
2332         return n;
2333 }
2334
2335 static ssize_t
2336 array_state_show(mddev_t *mddev, char *page)
2337 {
2338         enum array_state st = inactive;
2339
2340         if (mddev->pers)
2341                 switch(mddev->ro) {
2342                 case 1:
2343                         st = readonly;
2344                         break;
2345                 case 2:
2346                         st = read_auto;
2347                         break;
2348                 case 0:
2349                         if (mddev->in_sync)
2350                                 st = clean;
2351                         else if (mddev->safemode)
2352                                 st = active_idle;
2353                         else
2354                                 st = active;
2355                 }
2356         else {
2357                 if (list_empty(&mddev->disks) &&
2358                     mddev->raid_disks == 0 &&
2359                     mddev->size == 0)
2360                         st = clear;
2361                 else
2362                         st = inactive;
2363         }
2364         return sprintf(page, "%s\n", array_states[st]);
2365 }
2366
2367 static int do_md_stop(mddev_t * mddev, int ro);
2368 static int do_md_run(mddev_t * mddev);
2369 static int restart_array(mddev_t *mddev);
2370
2371 static ssize_t
2372 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2373 {
2374         int err = -EINVAL;
2375         enum array_state st = match_word(buf, array_states);
2376         switch(st) {
2377         case bad_word:
2378                 break;
2379         case clear:
2380                 /* stopping an active array */
2381                 if (mddev->pers) {
2382                         if (atomic_read(&mddev->active) > 1)
2383                                 return -EBUSY;
2384                         err = do_md_stop(mddev, 0);
2385                 }
2386                 break;
2387         case inactive:
2388                 /* stopping an active array */
2389                 if (mddev->pers) {
2390                         if (atomic_read(&mddev->active) > 1)
2391                                 return -EBUSY;
2392                         err = do_md_stop(mddev, 2);
2393                 }
2394                 break;
2395         case suspended:
2396                 break; /* not supported yet */
2397         case readonly:
2398                 if (mddev->pers)
2399                         err = do_md_stop(mddev, 1);
2400                 else {
2401                         mddev->ro = 1;
2402                         err = do_md_run(mddev);
2403                 }
2404                 break;
2405         case read_auto:
2406                 /* stopping an active array */
2407                 if (mddev->pers) {
2408                         err = do_md_stop(mddev, 1);
2409                         if (err == 0)
2410                                 mddev->ro = 2; /* FIXME mark devices writable */
2411                 } else {
2412                         mddev->ro = 2;
2413                         err = do_md_run(mddev);
2414                 }
2415                 break;
2416         case clean:
2417                 if (mddev->pers) {
2418                         restart_array(mddev);
2419                         spin_lock_irq(&mddev->write_lock);
2420                         if (atomic_read(&mddev->writes_pending) == 0) {
2421                                 mddev->in_sync = 1;
2422                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
2423                         }
2424                         spin_unlock_irq(&mddev->write_lock);
2425                 } else {
2426                         mddev->ro = 0;
2427                         mddev->recovery_cp = MaxSector;
2428                         err = do_md_run(mddev);
2429                 }
2430                 break;
2431         case active:
2432                 if (mddev->pers) {
2433                         restart_array(mddev);
2434                         clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2435                         wake_up(&mddev->sb_wait);
2436                         err = 0;
2437                 } else {
2438                         mddev->ro = 0;
2439                         err = do_md_run(mddev);
2440                 }
2441                 break;
2442         case write_pending:
2443         case active_idle:
2444                 /* these cannot be set */
2445                 break;
2446         }
2447         if (err)
2448                 return err;
2449         else
2450                 return len;
2451 }
2452 static struct md_sysfs_entry md_array_state =
2453 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2454
2455 static ssize_t
2456 null_show(mddev_t *mddev, char *page)
2457 {
2458         return -EINVAL;
2459 }
2460
2461 static ssize_t
2462 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2463 {
2464         /* buf must be %d:%d\n? giving major and minor numbers */
2465         /* The new device is added to the array.
2466          * If the array has a persistent superblock, we read the
2467          * superblock to initialise info and check validity.
2468          * Otherwise, only checking done is that in bind_rdev_to_array,
2469          * which mainly checks size.
2470          */
2471         char *e;
2472         int major = simple_strtoul(buf, &e, 10);
2473         int minor;
2474         dev_t dev;
2475         mdk_rdev_t *rdev;
2476         int err;
2477
2478         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2479                 return -EINVAL;
2480         minor = simple_strtoul(e+1, &e, 10);
2481         if (*e && *e != '\n')
2482                 return -EINVAL;
2483         dev = MKDEV(major, minor);
2484         if (major != MAJOR(dev) ||
2485             minor != MINOR(dev))
2486                 return -EOVERFLOW;
2487
2488
2489         if (mddev->persistent) {
2490                 rdev = md_import_device(dev, mddev->major_version,
2491                                         mddev->minor_version);
2492                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2493                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2494                                                        mdk_rdev_t, same_set);
2495                         err = super_types[mddev->major_version]
2496                                 .load_super(rdev, rdev0, mddev->minor_version);
2497                         if (err < 0)
2498                                 goto out;
2499                 }
2500         } else
2501                 rdev = md_import_device(dev, -1, -1);
2502
2503         if (IS_ERR(rdev))
2504                 return PTR_ERR(rdev);
2505         err = bind_rdev_to_array(rdev, mddev);
2506  out:
2507         if (err)
2508                 export_rdev(rdev);
2509         return err ? err : len;
2510 }
2511
2512 static struct md_sysfs_entry md_new_device =
2513 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2514
2515 static ssize_t
2516 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2517 {
2518         char *end;
2519         unsigned long chunk, end_chunk;
2520
2521         if (!mddev->bitmap)
2522                 goto out;
2523         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2524         while (*buf) {
2525                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2526                 if (buf == end) break;
2527                 if (*end == '-') { /* range */
2528                         buf = end + 1;
2529                         end_chunk = simple_strtoul(buf, &end, 0);
2530                         if (buf == end) break;
2531                 }
2532                 if (*end && !isspace(*end)) break;
2533                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2534                 buf = end;
2535                 while (isspace(*buf)) buf++;
2536         }
2537         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2538 out:
2539         return len;
2540 }
2541
2542 static struct md_sysfs_entry md_bitmap =
2543 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2544
2545 static ssize_t
2546 size_show(mddev_t *mddev, char *page)
2547 {
2548         return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2549 }
2550
2551 static int update_size(mddev_t *mddev, unsigned long size);
2552
2553 static ssize_t
2554 size_store(mddev_t *mddev, const char *buf, size_t len)
2555 {
2556         /* If array is inactive, we can reduce the component size, but
2557          * not increase it (except from 0).
2558          * If array is active, we can try an on-line resize
2559          */
2560         char *e;
2561         int err = 0;
2562         unsigned long long size = simple_strtoull(buf, &e, 10);
2563         if (!*buf || *buf == '\n' ||
2564             (*e && *e != '\n'))
2565                 return -EINVAL;
2566
2567         if (mddev->pers) {
2568                 err = update_size(mddev, size);
2569                 md_update_sb(mddev, 1);
2570         } else {
2571                 if (mddev->size == 0 ||
2572                     mddev->size > size)
2573                         mddev->size = size;
2574                 else
2575                         err = -ENOSPC;
2576         }
2577         return err ? err : len;
2578 }
2579
2580 static struct md_sysfs_entry md_size =
2581 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2582
2583
2584 /* Metdata version.
2585  * This is either 'none' for arrays with externally managed metadata,
2586  * or N.M for internally known formats
2587  */
2588 static ssize_t
2589 metadata_show(mddev_t *mddev, char *page)
2590 {
2591         if (mddev->persistent)
2592                 return sprintf(page, "%d.%d\n",
2593                                mddev->major_version, mddev->minor_version);
2594         else
2595                 return sprintf(page, "none\n");
2596 }
2597
2598 static ssize_t
2599 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2600 {
2601         int major, minor;
2602         char *e;
2603         if (!list_empty(&mddev->disks))
2604                 return -EBUSY;
2605
2606         if (cmd_match(buf, "none")) {
2607                 mddev->persistent = 0;
2608                 mddev->major_version = 0;
2609                 mddev->minor_version = 90;
2610                 return len;
2611         }
2612         major = simple_strtoul(buf, &e, 10);
2613         if (e==buf || *e != '.')
2614                 return -EINVAL;
2615         buf = e+1;
2616         minor = simple_strtoul(buf, &e, 10);
2617         if (e==buf || (*e && *e != '\n') )
2618                 return -EINVAL;
2619         if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2620             super_types[major].name == NULL)
2621                 return -ENOENT;
2622         mddev->major_version = major;
2623         mddev->minor_version = minor;
2624         mddev->persistent = 1;
2625         return len;
2626 }
2627
2628 static struct md_sysfs_entry md_metadata =
2629 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2630
2631 static ssize_t
2632 action_show(mddev_t *mddev, char *page)
2633 {
2634         char *type = "idle";
2635         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2636             test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2637                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2638                         type = "reshape";
2639                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2640                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2641                                 type = "resync";
2642                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2643                                 type = "check";
2644                         else
2645                                 type = "repair";
2646                 } else
2647                         type = "recover";
2648         }
2649         return sprintf(page, "%s\n", type);
2650 }
2651
2652 static ssize_t
2653 action_store(mddev_t *mddev, const char *page, size_t len)
2654 {
2655         if (!mddev->pers || !mddev->pers->sync_request)
2656                 return -EINVAL;
2657
2658         if (cmd_match(page, "idle")) {
2659                 if (mddev->sync_thread) {
2660                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2661                         md_unregister_thread(mddev->sync_thread);
2662                         mddev->sync_thread = NULL;
2663                         mddev->recovery = 0;
2664                 }
2665         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2666                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2667                 return -EBUSY;
2668         else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2669                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2670         else if (cmd_match(page, "reshape")) {
2671                 int err;
2672                 if (mddev->pers->start_reshape == NULL)
2673                         return -EINVAL;
2674                 err = mddev->pers->start_reshape(mddev);
2675                 if (err)
2676                         return err;
2677         } else {
2678                 if (cmd_match(page, "check"))
2679                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2680                 else if (!cmd_match(page, "repair"))
2681                         return -EINVAL;
2682                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2683                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2684         }
2685         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2686         md_wakeup_thread(mddev->thread);
2687         return len;
2688 }
2689
2690 static ssize_t
2691 mismatch_cnt_show(mddev_t *mddev, char *page)
2692 {
2693         return sprintf(page, "%llu\n",
2694                        (unsigned long long) mddev->resync_mismatches);
2695 }
2696
2697 static struct md_sysfs_entry md_scan_mode =
2698 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2699
2700
2701 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
2702
2703 static ssize_t
2704 sync_min_show(mddev_t *mddev, char *page)
2705 {
2706         return sprintf(page, "%d (%s)\n", speed_min(mddev),
2707                        mddev->sync_speed_min ? "local": "system");
2708 }
2709
2710 static ssize_t
2711 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2712 {
2713         int min;
2714         char *e;
2715         if (strncmp(buf, "system", 6)==0) {
2716                 mddev->sync_speed_min = 0;
2717                 return len;
2718         }
2719         min = simple_strtoul(buf, &e, 10);
2720         if (buf == e || (*e && *e != '\n') || min <= 0)
2721                 return -EINVAL;
2722         mddev->sync_speed_min = min;
2723         return len;
2724 }
2725
2726 static struct md_sysfs_entry md_sync_min =
2727 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2728
2729 static ssize_t
2730 sync_max_show(mddev_t *mddev, char *page)
2731 {
2732         return sprintf(page, "%d (%s)\n", speed_max(mddev),
2733                        mddev->sync_speed_max ? "local": "system");
2734 }
2735
2736 static ssize_t
2737 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2738 {
2739         int max;
2740         char *e;
2741         if (strncmp(buf, "system", 6)==0) {
2742                 mddev->sync_speed_max = 0;
2743                 return len;
2744         }
2745         max = simple_strtoul(buf, &e, 10);
2746         if (buf == e || (*e && *e != '\n') || max <= 0)
2747                 return -EINVAL;
2748         mddev->sync_speed_max = max;
2749         return len;
2750 }
2751
2752 static struct md_sysfs_entry md_sync_max =
2753 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2754
2755
2756 static ssize_t
2757 sync_speed_show(mddev_t *mddev, char *page)
2758 {
2759         unsigned long resync, dt, db;
2760         resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
2761         dt = ((jiffies - mddev->resync_mark) / HZ);
2762         if (!dt) dt++;
2763         db = resync - (mddev->resync_mark_cnt);
2764         return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2765 }
2766
2767 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
2768
2769 static ssize_t
2770 sync_completed_show(mddev_t *mddev, char *page)
2771 {
2772         unsigned long max_blocks, resync;
2773
2774         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2775                 max_blocks = mddev->resync_max_sectors;
2776         else
2777                 max_blocks = mddev->size << 1;
2778
2779         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2780         return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2781 }
2782
2783 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
2784
2785 static ssize_t
2786 suspend_lo_show(mddev_t *mddev, char *page)
2787 {
2788         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2789 }
2790
2791 static ssize_t
2792 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2793 {
2794         char *e;
2795         unsigned long long new = simple_strtoull(buf, &e, 10);
2796
2797         if (mddev->pers->quiesce == NULL)
2798                 return -EINVAL;
2799         if (buf == e || (*e && *e != '\n'))
2800                 return -EINVAL;
2801         if (new >= mddev->suspend_hi ||
2802             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2803                 mddev->suspend_lo = new;
2804                 mddev->pers->quiesce(mddev, 2);
2805                 return len;
2806         } else
2807                 return -EINVAL;
2808 }
2809 static struct md_sysfs_entry md_suspend_lo =
2810 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2811
2812
2813 static ssize_t
2814 suspend_hi_show(mddev_t *mddev, char *page)
2815 {
2816         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2817 }
2818
2819 static ssize_t
2820 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2821 {
2822         char *e;
2823         unsigned long long new = simple_strtoull(buf, &e, 10);
2824
2825         if (mddev->pers->quiesce == NULL)
2826                 return -EINVAL;
2827         if (buf == e || (*e && *e != '\n'))
2828                 return -EINVAL;
2829         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2830             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2831                 mddev->suspend_hi = new;
2832                 mddev->pers->quiesce(mddev, 1);
2833                 mddev->pers->quiesce(mddev, 0);
2834                 return len;
2835         } else
2836                 return -EINVAL;
2837 }
2838 static struct md_sysfs_entry md_suspend_hi =
2839 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2840
2841
2842 static struct attribute *md_default_attrs[] = {
2843         &md_level.attr,
2844         &md_layout.attr,
2845         &md_raid_disks.attr,
2846         &md_chunk_size.attr,
2847         &md_size.attr,
2848         &md_resync_start.attr,
2849         &md_metadata.attr,
2850         &md_new_device.attr,
2851         &md_safe_delay.attr,
2852         &md_array_state.attr,
2853         NULL,
2854 };
2855
2856 static struct attribute *md_redundancy_attrs[] = {
2857         &md_scan_mode.attr,
2858         &md_mismatches.attr,
2859         &md_sync_min.attr,
2860         &md_sync_max.attr,
2861         &md_sync_speed.attr,
2862         &md_sync_completed.attr,
2863         &md_suspend_lo.attr,
2864         &md_suspend_hi.attr,
2865         &md_bitmap.attr,
2866         NULL,
2867 };
2868 static struct attribute_group md_redundancy_group = {
2869         .name = NULL,
2870         .attrs = md_redundancy_attrs,
2871 };
2872
2873
2874 static ssize_t
2875 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2876 {
2877         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2878         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2879         ssize_t rv;
2880
2881         if (!entry->show)
2882                 return -EIO;
2883         rv = mddev_lock(mddev);
2884         if (!rv) {
2885                 rv = entry->show(mddev, page);
2886                 mddev_unlock(mddev);
2887         }
2888         return rv;
2889 }
2890
2891 static ssize_t
2892 md_attr_store(struct kobject *kobj, struct attribute *attr,
2893               const char *page, size_t length)
2894 {
2895         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2896         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2897         ssize_t rv;
2898
2899         if (!entry->store)
2900                 return -EIO;
2901         if (!capable(CAP_SYS_ADMIN))
2902                 return -EACCES;
2903         rv = mddev_lock(mddev);
2904         if (!rv) {
2905                 rv = entry->store(mddev, page, length);
2906                 mddev_unlock(mddev);
2907         }
2908         return rv;
2909 }
2910
2911 static void md_free(struct kobject *ko)
2912 {
2913         mddev_t *mddev = container_of(ko, mddev_t, kobj);
2914         kfree(mddev);
2915 }
2916
2917 static struct sysfs_ops md_sysfs_ops = {
2918         .show   = md_attr_show,
2919         .store  = md_attr_store,
2920 };
2921 static struct kobj_type md_ktype = {
2922         .release        = md_free,
2923         .sysfs_ops      = &md_sysfs_ops,
2924         .default_attrs  = md_default_attrs,
2925 };
2926
2927 int mdp_major = 0;
2928
2929 static struct kobject *md_probe(dev_t dev, int *part, void *data)
2930 {
2931         static DEFINE_MUTEX(disks_mutex);
2932         mddev_t *mddev = mddev_find(dev);
2933         struct gendisk *disk;
2934         int partitioned = (MAJOR(dev) != MD_MAJOR);
2935         int shift = partitioned ? MdpMinorShift : 0;
2936         int unit = MINOR(dev) >> shift;
2937
2938         if (!mddev)
2939                 return NULL;
2940
2941         mutex_lock(&disks_mutex);
2942         if (mddev->gendisk) {
2943                 mutex_unlock(&disks_mutex);
2944                 mddev_put(mddev);
2945                 return NULL;
2946         }
2947         disk = alloc_disk(1 << shift);
2948         if (!disk) {
2949                 mutex_unlock(&disks_mutex);
2950                 mddev_put(mddev);
2951                 return NULL;
2952         }
2953         disk->major = MAJOR(dev);
2954         disk->first_minor = unit << shift;
2955         if (partitioned)
2956                 sprintf(disk->disk_name, "md_d%d", unit);
2957         else
2958                 sprintf(disk->disk_name, "md%d", unit);
2959         disk->fops = &md_fops;
2960         disk->private_data = mddev;
2961         disk->queue = mddev->queue;
2962         add_disk(disk);
2963         mddev->gendisk = disk;
2964         mutex_unlock(&disks_mutex);
2965         mddev->kobj.parent = &disk->kobj;
2966         mddev->kobj.k_name = NULL;
2967         snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2968         mddev->kobj.ktype = &md_ktype;
2969         kobject_register(&mddev->kobj);
2970         return NULL;
2971 }
2972
2973 static void md_safemode_timeout(unsigned long data)
2974 {
2975         mddev_t *mddev = (mddev_t *) data;
2976
2977         mddev->safemode = 1;
2978         md_wakeup_thread(mddev->thread);
2979 }
2980
2981 static int start_dirty_degraded;
2982
2983 static int do_md_run(mddev_t * mddev)
2984 {
2985         int err;
2986         int chunk_size;
2987         struct list_head *tmp;
2988         mdk_rdev_t *rdev;
2989         struct gendisk *disk;
2990         struct mdk_personality *pers;
2991         char b[BDEVNAME_SIZE];
2992
2993         if (list_empty(&mddev->disks))
2994                 /* cannot run an array with no devices.. */
2995                 return -EINVAL;
2996
2997         if (mddev->pers)
2998                 return -EBUSY;
2999
3000         /*
3001          * Analyze all RAID superblock(s)
3002          */
3003         if (!mddev->raid_disks)
3004                 analyze_sbs(mddev);
3005
3006         chunk_size = mddev->chunk_size;
3007
3008         if (chunk_size) {
3009                 if (chunk_size > MAX_CHUNK_SIZE) {
3010                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
3011                                 chunk_size, MAX_CHUNK_SIZE);
3012                         return -EINVAL;
3013                 }
3014                 /*
3015                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3016                  */
3017                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3018                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3019                         return -EINVAL;
3020                 }
3021                 if (chunk_size < PAGE_SIZE) {
3022                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3023                                 chunk_size, PAGE_SIZE);
3024                         return -EINVAL;
3025                 }
3026
3027                 /* devices must have minimum size of one chunk */
3028                 ITERATE_RDEV(mddev,rdev,tmp) {
3029                         if (test_bit(Faulty, &rdev->flags))
3030                                 continue;
3031                         if (rdev->size < chunk_size / 1024) {
3032                                 printk(KERN_WARNING
3033                                         "md: Dev %s smaller than chunk_size:"
3034                                         " %lluk < %dk\n",
3035                                         bdevname(rdev->bdev,b),
3036                                         (unsigned long long)rdev->size,
3037                                         chunk_size / 1024);
3038                                 return -EINVAL;
3039                         }
3040                 }
3041         }
3042
3043 #ifdef CONFIG_KMOD
3044         if (mddev->level != LEVEL_NONE)
3045                 request_module("md-level-%d", mddev->level);
3046         else if (mddev->clevel[0])
3047                 request_module("md-%s", mddev->clevel);
3048 #endif
3049
3050         /*
3051          * Drop all container device buffers, from now on
3052          * the only valid external interface is through the md
3053          * device.
3054          * Also find largest hardsector size
3055          */
3056         ITERATE_RDEV(mddev,rdev,tmp) {
3057                 if (test_bit(Faulty, &rdev->flags))
3058                         continue;
3059                 sync_blockdev(rdev->bdev);
3060                 invalidate_bdev(rdev->bdev, 0);
3061         }
3062
3063         md_probe(mddev->unit, NULL, NULL);
3064         disk = mddev->gendisk;
3065         if (!disk)
3066                 return -ENOMEM;
3067
3068         spin_lock(&pers_lock);
3069         pers = find_pers(mddev->level, mddev->clevel);
3070         if (!pers || !try_module_get(pers->owner)) {
3071                 spin_unlock(&pers_lock);
3072                 if (mddev->level != LEVEL_NONE)
3073                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3074                                mddev->level);
3075                 else
3076                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3077                                mddev->clevel);
3078                 return -EINVAL;
3079         }
3080         mddev->pers = pers;
3081         spin_unlock(&pers_lock);
3082         mddev->level = pers->level;
3083         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3084
3085         if (mddev->reshape_position != MaxSector &&
3086             pers->start_reshape == NULL) {
3087                 /* This personality cannot handle reshaping... */
3088                 mddev->pers = NULL;
3089                 module_put(pers->owner);
3090                 return -EINVAL;
3091         }
3092
3093         if (pers->sync_request) {
3094                 /* Warn if this is a potentially silly
3095                  * configuration.
3096                  */
3097                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3098                 mdk_rdev_t *rdev2;
3099                 struct list_head *tmp2;
3100                 int warned = 0;
3101                 ITERATE_RDEV(mddev, rdev, tmp) {
3102                         ITERATE_RDEV(mddev, rdev2, tmp2) {
3103                                 if (rdev < rdev2 &&
3104                                     rdev->bdev->bd_contains ==
3105                                     rdev2->bdev->bd_contains) {
3106                                         printk(KERN_WARNING
3107                                                "%s: WARNING: %s appears to be"
3108                                                " on the same physical disk as"
3109                                                " %s.\n",
3110                                                mdname(mddev),
3111                                                bdevname(rdev->bdev,b),
3112                                                bdevname(rdev2->bdev,b2));
3113                                         warned = 1;
3114                                 }
3115                         }
3116                 }
3117                 if (warned)
3118                         printk(KERN_WARNING
3119                                "True protection against single-disk"
3120                                " failure might be compromised.\n");
3121         }
3122
3123         mddev->recovery = 0;
3124         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3125         mddev->barriers_work = 1;
3126         mddev->ok_start_degraded = start_dirty_degraded;
3127
3128         if (start_readonly)
3129                 mddev->ro = 2; /* read-only, but switch on first write */
3130
3131         err = mddev->pers->run(mddev);
3132         if (!err && mddev->pers->sync_request) {
3133                 err = bitmap_create(mddev);
3134                 if (err) {
3135                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3136                                mdname(mddev), err);
3137                         mddev->pers->stop(mddev);
3138                 }
3139         }
3140         if (err) {
3141                 printk(KERN_ERR "md: pers->run() failed ...\n");
3142                 module_put(mddev->pers->owner);
3143                 mddev->pers = NULL;
3144                 bitmap_destroy(mddev);
3145                 return err;
3146         }
3147         if (mddev->pers->sync_request)
3148                 sysfs_create_group(&mddev->kobj, &md_redundancy_group);
3149         else if (mddev->ro == 2) /* auto-readonly not meaningful */
3150                 mddev->ro = 0;
3151
3152         atomic_set(&mddev->writes_pending,0);
3153         mddev->safemode = 0;
3154         mddev->safemode_timer.function = md_safemode_timeout;
3155         mddev->safemode_timer.data = (unsigned long) mddev;
3156         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3157         mddev->in_sync = 1;
3158
3159         ITERATE_RDEV(mddev,rdev,tmp)
3160                 if (rdev->raid_disk >= 0) {
3161                         char nm[20];
3162                         sprintf(nm, "rd%d", rdev->raid_disk);
3163                         sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
3164                 }
3165         
3166         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3167         
3168         if (mddev->flags)
3169                 md_update_sb(mddev, 0);
3170
3171         set_capacity(disk, mddev->array_size<<1);
3172
3173         /* If we call blk_queue_make_request here, it will
3174          * re-initialise max_sectors etc which may have been
3175          * refined inside -> run.  So just set the bits we need to set.
3176          * Most initialisation happended when we called
3177          * blk_queue_make_request(..., md_fail_request)
3178          * earlier.
3179          */
3180         mddev->queue->queuedata = mddev;
3181         mddev->queue->make_request_fn = mddev->pers->make_request;
3182
3183         /* If there is a partially-recovered drive we need to
3184          * start recovery here.  If we leave it to md_check_recovery,
3185          * it will remove the drives and not do the right thing
3186          */
3187         if (mddev->degraded && !mddev->sync_thread) {
3188                 struct list_head *rtmp;
3189                 int spares = 0;
3190                 ITERATE_RDEV(mddev,rdev,rtmp)
3191                         if (rdev->raid_disk >= 0 &&
3192                             !test_bit(In_sync, &rdev->flags) &&
3193                             !test_bit(Faulty, &rdev->flags))
3194                                 /* complete an interrupted recovery */
3195                                 spares++;
3196                 if (spares && mddev->pers->sync_request) {
3197                         mddev->recovery = 0;
3198                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3199                         mddev->sync_thread = md_register_thread(md_do_sync,
3200                                                                 mddev,
3201                                                                 "%s_resync");
3202                         if (!mddev->sync_thread) {
3203                                 printk(KERN_ERR "%s: could not start resync"
3204                                        " thread...\n",
3205                                        mdname(mddev));
3206                                 /* leave the spares where they are, it shouldn't hurt */
3207                                 mddev->recovery = 0;
3208                         }
3209                 }
3210         }
3211         md_wakeup_thread(mddev->thread);
3212         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3213
3214         mddev->changed = 1;
3215         md_new_event(mddev);
3216         kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
3217         return 0;
3218 }
3219
3220 static int restart_array(mddev_t *mddev)
3221 {
3222         struct gendisk *disk = mddev->gendisk;
3223         int err;
3224
3225         /*
3226          * Complain if it has no devices
3227          */
3228         err = -ENXIO;
3229         if (list_empty(&mddev->disks))
3230                 goto out;
3231
3232         if (mddev->pers) {
3233                 err = -EBUSY;
3234                 if (!mddev->ro)
3235                         goto out;
3236
3237                 mddev->safemode = 0;
3238                 mddev->ro = 0;
3239                 set_disk_ro(disk, 0);
3240
3241                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3242                         mdname(mddev));
3243                 /*
3244                  * Kick recovery or resync if necessary
3245                  */
3246                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3247                 md_wakeup_thread(mddev->thread);
3248                 md_wakeup_thread(mddev->sync_thread);
3249                 err = 0;
3250         } else
3251                 err = -EINVAL;
3252
3253 out:
3254         return err;
3255 }
3256
3257 /* similar to deny_write_access, but accounts for our holding a reference
3258  * to the file ourselves */
3259 static int deny_bitmap_write_access(struct file * file)
3260 {
3261         struct inode *inode = file->f_mapping->host;
3262
3263         spin_lock(&inode->i_lock);
3264         if (atomic_read(&inode->i_writecount) > 1) {
3265                 spin_unlock(&inode->i_lock);
3266                 return -ETXTBSY;
3267         }
3268         atomic_set(&inode->i_writecount, -1);
3269         spin_unlock(&inode->i_lock);
3270
3271         return 0;
3272 }
3273
3274 static void restore_bitmap_write_access(struct file *file)
3275 {
3276         struct inode *inode = file->f_mapping->host;
3277
3278         spin_lock(&inode->i_lock);
3279         atomic_set(&inode->i_writecount, 1);
3280         spin_unlock(&inode->i_lock);
3281 }
3282
3283 /* mode:
3284  *   0 - completely stop and dis-assemble array
3285  *   1 - switch to readonly
3286  *   2 - stop but do not disassemble array
3287  */
3288 static int do_md_stop(mddev_t * mddev, int mode)
3289 {
3290         int err = 0;
3291         struct gendisk *disk = mddev->gendisk;
3292
3293         if (mddev->pers) {
3294                 if (atomic_read(&mddev->active)>2) {
3295                         printk("md: %s still in use.\n",mdname(mddev));
3296                         return -EBUSY;
3297                 }
3298
3299                 if (mddev->sync_thread) {
3300                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3301                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3302                         md_unregister_thread(mddev->sync_thread);
3303                         mddev->sync_thread = NULL;
3304                 }
3305
3306                 del_timer_sync(&mddev->safemode_timer);
3307
3308                 invalidate_partition(disk, 0);
3309
3310                 switch(mode) {
3311                 case 1: /* readonly */
3312                         err  = -ENXIO;
3313                         if (mddev->ro==1)
3314                                 goto out;
3315                         mddev->ro = 1;
3316                         break;
3317                 case 0: /* disassemble */
3318                 case 2: /* stop */
3319                         bitmap_flush(mddev);
3320                         md_super_wait(mddev);
3321                         if (mddev->ro)
3322                                 set_disk_ro(disk, 0);
3323                         blk_queue_make_request(mddev->queue, md_fail_request);
3324                         mddev->pers->stop(mddev);
3325                         if (mddev->pers->sync_request)
3326                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3327
3328                         module_put(mddev->pers->owner);
3329                         mddev->pers = NULL;
3330
3331                         set_capacity(disk, 0);
3332                         mddev->changed = 1;
3333
3334                         if (mddev->ro)
3335                                 mddev->ro = 0;
3336                 }
3337                 if (!mddev->in_sync || mddev->flags) {
3338                         /* mark array as shutdown cleanly */
3339                         mddev->in_sync = 1;
3340                         md_update_sb(mddev, 1);
3341                 }
3342                 if (mode == 1)
3343                         set_disk_ro(disk, 1);
3344                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3345         }
3346
3347         /*
3348          * Free resources if final stop
3349          */
3350         if (mode == 0) {
3351                 mdk_rdev_t *rdev;
3352                 struct list_head *tmp;
3353
3354                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3355
3356                 bitmap_destroy(mddev);
3357                 if (mddev->bitmap_file) {
3358                         restore_bitmap_write_access(mddev->bitmap_file);
3359                         fput(mddev->bitmap_file);
3360                         mddev->bitmap_file = NULL;
3361                 }
3362                 mddev->bitmap_offset = 0;
3363
3364                 ITERATE_RDEV(mddev,rdev,tmp)
3365                         if (rdev->raid_disk >= 0) {
3366                                 char nm[20];
3367                                 sprintf(nm, "rd%d", rdev->raid_disk);
3368                                 sysfs_remove_link(&mddev->kobj, nm);
3369                         }
3370
3371                 export_array(mddev);
3372
3373                 mddev->array_size = 0;
3374                 mddev->size = 0;
3375                 mddev->raid_disks = 0;
3376                 mddev->recovery_cp = 0;
3377
3378         } else if (mddev->pers)
3379                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3380                         mdname(mddev));
3381         err = 0;
3382         md_new_event(mddev);
3383 out:
3384         return err;
3385 }
3386
3387 #ifndef MODULE
3388 static void autorun_array(mddev_t *mddev)
3389 {
3390         mdk_rdev_t *rdev;
3391         struct list_head *tmp;
3392         int err;
3393
3394         if (list_empty(&mddev->disks))
3395                 return;
3396
3397         printk(KERN_INFO "md: running: ");
3398
3399         ITERATE_RDEV(mddev,rdev,tmp) {
3400                 char b[BDEVNAME_SIZE];
3401                 printk("<%s>", bdevname(rdev->bdev,b));
3402         }
3403         printk("\n");
3404
3405         err = do_md_run (mddev);
3406         if (err) {
3407                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3408                 do_md_stop (mddev, 0);
3409         }
3410 }
3411
3412 /*
3413  * lets try to run arrays based on all disks that have arrived
3414  * until now. (those are in pending_raid_disks)
3415  *
3416  * the method: pick the first pending disk, collect all disks with
3417  * the same UUID, remove all from the pending list and put them into
3418  * the 'same_array' list. Then order this list based on superblock
3419  * update time (freshest comes first), kick out 'old' disks and
3420  * compare superblocks. If everything's fine then run it.
3421  *
3422  * If "unit" is allocated, then bump its reference count
3423  */
3424 static void autorun_devices(int part)
3425 {
3426         struct list_head *tmp;
3427         mdk_rdev_t *rdev0, *rdev;
3428         mddev_t *mddev;
3429         char b[BDEVNAME_SIZE];
3430
3431         printk(KERN_INFO "md: autorun ...\n");
3432         while (!list_empty(&pending_raid_disks)) {
3433                 int unit;
3434                 dev_t dev;
3435                 LIST_HEAD(candidates);
3436                 rdev0 = list_entry(pending_raid_disks.next,
3437                                          mdk_rdev_t, same_set);
3438
3439                 printk(KERN_INFO "md: considering %s ...\n",
3440                         bdevname(rdev0->bdev,b));
3441                 INIT_LIST_HEAD(&candidates);
3442                 ITERATE_RDEV_PENDING(rdev,tmp)
3443                         if (super_90_load(rdev, rdev0, 0) >= 0) {
3444                                 printk(KERN_INFO "md:  adding %s ...\n",
3445                                         bdevname(rdev->bdev,b));
3446                                 list_move(&rdev->same_set, &candidates);
3447                         }
3448                 /*
3449                  * now we have a set of devices, with all of them having
3450                  * mostly sane superblocks. It's time to allocate the
3451                  * mddev.
3452                  */
3453                 if (part) {
3454                         dev = MKDEV(mdp_major,
3455                                     rdev0->preferred_minor << MdpMinorShift);
3456                         unit = MINOR(dev) >> MdpMinorShift;
3457                 } else {
3458                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3459                         unit = MINOR(dev);
3460                 }
3461                 if (rdev0->preferred_minor != unit) {
3462                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3463                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3464                         break;
3465                 }
3466
3467                 md_probe(dev, NULL, NULL);
3468                 mddev = mddev_find(dev);
3469                 if (!mddev) {
3470                         printk(KERN_ERR 
3471                                 "md: cannot allocate memory for md drive.\n");
3472                         break;
3473                 }
3474                 if (mddev_lock(mddev)) 
3475                         printk(KERN_WARNING "md: %s locked, cannot run\n",
3476                                mdname(mddev));
3477                 else if (mddev->raid_disks || mddev->major_version
3478                          || !list_empty(&mddev->disks)) {
3479                         printk(KERN_WARNING 
3480                                 "md: %s already running, cannot run %s\n",
3481                                 mdname(mddev), bdevname(rdev0->bdev,b));
3482                         mddev_unlock(mddev);
3483                 } else {
3484                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
3485                         ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
3486                                 list_del_init(&rdev->same_set);
3487                                 if (bind_rdev_to_array(rdev, mddev))
3488                                         export_rdev(rdev);
3489                         }
3490                         autorun_array(mddev);
3491                         mddev_unlock(mddev);
3492                 }
3493                 /* on success, candidates will be empty, on error
3494                  * it won't...
3495                  */
3496                 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
3497                         export_rdev(rdev);
3498                 mddev_put(mddev);
3499         }
3500         printk(KERN_INFO "md: ... autorun DONE.\n");
3501 }
3502 #endif /* !MODULE */
3503
3504 static int get_version(void __user * arg)
3505 {
3506         mdu_version_t ver;
3507
3508         ver.major = MD_MAJOR_VERSION;
3509         ver.minor = MD_MINOR_VERSION;
3510         ver.patchlevel = MD_PATCHLEVEL_VERSION;
3511
3512         if (copy_to_user(arg, &ver, sizeof(ver)))
3513                 return -EFAULT;
3514
3515         return 0;
3516 }
3517
3518 static int get_array_info(mddev_t * mddev, void __user * arg)
3519 {
3520         mdu_array_info_t info;
3521         int nr,working,active,failed,spare;
3522         mdk_rdev_t *rdev;
3523         struct list_head *tmp;
3524
3525         nr=working=active=failed=spare=0;
3526         ITERATE_RDEV(mddev,rdev,tmp) {
3527                 nr++;
3528                 if (test_bit(Faulty, &rdev->flags))
3529                         failed++;
3530                 else {
3531                         working++;
3532                         if (test_bit(In_sync, &rdev->flags))
3533                                 active++;       
3534                         else
3535                                 spare++;
3536                 }
3537         }
3538
3539         info.major_version = mddev->major_version;
3540         info.minor_version = mddev->minor_version;
3541         info.patch_version = MD_PATCHLEVEL_VERSION;
3542         info.ctime         = mddev->ctime;
3543         info.level         = mddev->level;
3544         info.size          = mddev->size;
3545         if (info.size != mddev->size) /* overflow */
3546                 info.size = -1;
3547         info.nr_disks      = nr;
3548         info.raid_disks    = mddev->raid_disks;
3549         info.md_minor      = mddev->md_minor;
3550         info.not_persistent= !mddev->persistent;
3551
3552         info.utime         = mddev->utime;
3553         info.state         = 0;
3554         if (mddev->in_sync)
3555                 info.state = (1<<MD_SB_CLEAN);
3556         if (mddev->bitmap && mddev->bitmap_offset)
3557                 info.state = (1<<MD_SB_BITMAP_PRESENT);
3558         info.active_disks  = active;
3559         info.working_disks = working;
3560         info.failed_disks  = failed;
3561         info.spare_disks   = spare;
3562
3563         info.layout        = mddev->layout;
3564         info.chunk_size    = mddev->chunk_size;
3565
3566         if (copy_to_user(arg, &info, sizeof(info)))
3567                 return -EFAULT;
3568
3569         return 0;
3570 }
3571
3572 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3573 {
3574         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3575         char *ptr, *buf = NULL;
3576         int err = -ENOMEM;
3577
3578         md_allow_write(mddev);
3579
3580         file = kmalloc(sizeof(*file), GFP_KERNEL);
3581         if (!file)
3582                 goto out;
3583
3584         /* bitmap disabled, zero the first byte and copy out */
3585         if (!mddev->bitmap || !mddev->bitmap->file) {
3586                 file->pathname[0] = '\0';
3587                 goto copy_out;
3588         }
3589
3590         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3591         if (!buf)
3592                 goto out;
3593
3594         ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3595         if (!ptr)
3596                 goto out;
3597
3598         strcpy(file->pathname, ptr);
3599
3600 copy_out:
3601         err = 0;
3602         if (copy_to_user(arg, file, sizeof(*file)))
3603                 err = -EFAULT;
3604 out:
3605         kfree(buf);
3606         kfree(file);
3607         return err;
3608 }
3609
3610 static int get_disk_info(mddev_t * mddev, void __user * arg)
3611 {
3612         mdu_disk_info_t info;
3613         unsigned int nr;
3614         mdk_rdev_t *rdev;
3615
3616         if (copy_from_user(&info, arg, sizeof(info)))
3617                 return -EFAULT;
3618
3619         nr = info.number;
3620
3621         rdev = find_rdev_nr(mddev, nr);
3622         if (rdev) {
3623                 info.major = MAJOR(rdev->bdev->bd_dev);
3624                 info.minor = MINOR(rdev->bdev->bd_dev);
3625                 info.raid_disk = rdev->raid_disk;
3626                 info.state = 0;
3627                 if (test_bit(Faulty, &rdev->flags))
3628                         info.state |= (1<<MD_DISK_FAULTY);
3629                 else if (test_bit(In_sync, &rdev->flags)) {
3630                         info.state |= (1<<MD_DISK_ACTIVE);
3631                         info.state |= (1<<MD_DISK_SYNC);
3632                 }
3633                 if (test_bit(WriteMostly, &rdev->flags))
3634                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
3635         } else {
3636                 info.major = info.minor = 0;
3637                 info.raid_disk = -1;
3638                 info.state = (1<<MD_DISK_REMOVED);
3639         }
3640
3641         if (copy_to_user(arg, &info, sizeof(info)))
3642                 return -EFAULT;
3643
3644         return 0;
3645 }
3646
3647 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3648 {
3649         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3650         mdk_rdev_t *rdev;
3651         dev_t dev = MKDEV(info->major,info->minor);
3652
3653         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3654                 return -EOVERFLOW;
3655
3656         if (!mddev->raid_disks) {
3657                 int err;
3658                 /* expecting a device which has a superblock */
3659                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3660                 if (IS_ERR(rdev)) {
3661                         printk(KERN_WARNING 
3662                                 "md: md_import_device returned %ld\n",
3663                                 PTR_ERR(rdev));
3664                         return PTR_ERR(rdev);
3665                 }
3666                 if (!list_empty(&mddev->disks)) {
3667                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3668                                                         mdk_rdev_t, same_set);
3669                         int err = super_types[mddev->major_version]
3670                                 .load_super(rdev, rdev0, mddev->minor_version);
3671                         if (err < 0) {
3672                                 printk(KERN_WARNING 
3673                                         "md: %s has different UUID to %s\n",
3674                                         bdevname(rdev->bdev,b), 
3675                                         bdevname(rdev0->bdev,b2));
3676                                 export_rdev(rdev);
3677                                 return -EINVAL;
3678                         }
3679                 }
3680                 err = bind_rdev_to_array(rdev, mddev);
3681                 if (err)
3682                         export_rdev(rdev);
3683                 return err;
3684         }
3685
3686         /*
3687          * add_new_disk can be used once the array is assembled
3688          * to add "hot spares".  They must already have a superblock
3689          * written
3690          */
3691         if (mddev->pers) {
3692                 int err;
3693                 if (!mddev->pers->hot_add_disk) {
3694                         printk(KERN_WARNING 
3695                                 "%s: personality does not support diskops!\n",
3696                                mdname(mddev));
3697                         return -EINVAL;
3698                 }
3699                 if (mddev->persistent)
3700                         rdev = md_import_device(dev, mddev->major_version,
3701                                                 mddev->minor_version);
3702                 else
3703                         rdev = md_import_device(dev, -1, -1);
3704                 if (IS_ERR(rdev)) {
3705                         printk(KERN_WARNING 
3706                                 "md: md_import_device returned %ld\n",
3707                                 PTR_ERR(rdev));
3708                         return PTR_ERR(rdev);
3709                 }
3710                 /* set save_raid_disk if appropriate */
3711                 if (!mddev->persistent) {
3712                         if (info->state & (1<<MD_DISK_SYNC)  &&
3713                             info->raid_disk < mddev->raid_disks)
3714                                 rdev->raid_disk = info->raid_disk;
3715                         else
3716                                 rdev->raid_disk = -1;
3717                 } else
3718                         super_types[mddev->major_version].
3719                                 validate_super(mddev, rdev);
3720                 rdev->saved_raid_disk = rdev->raid_disk;
3721
3722                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
3723                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3724                         set_bit(WriteMostly, &rdev->flags);
3725
3726                 rdev->raid_disk = -1;
3727                 err = bind_rdev_to_array(rdev, mddev);
3728                 if (!err && !mddev->pers->hot_remove_disk) {
3729                         /* If there is hot_add_disk but no hot_remove_disk
3730                          * then added disks for geometry changes,
3731                          * and should be added immediately.
3732                          */
3733                         super_types[mddev->major_version].
3734                                 validate_super(mddev, rdev);
3735                         err = mddev->pers->hot_add_disk(mddev, rdev);
3736                         if (err)
3737                                 unbind_rdev_from_array(rdev);
3738                 }
3739                 if (err)
3740                         export_rdev(rdev);
3741
3742                 md_update_sb(mddev, 1);
3743                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3744                 md_wakeup_thread(mddev->thread);
3745                 return err;
3746         }
3747
3748         /* otherwise, add_new_disk is only allowed
3749          * for major_version==0 superblocks
3750          */
3751         if (mddev->major_version != 0) {
3752                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3753                        mdname(mddev));
3754                 return -EINVAL;
3755         }
3756
3757         if (!(info->state & (1<<MD_DISK_FAULTY))) {
3758                 int err;
3759                 rdev = md_import_device (dev, -1, 0);
3760                 if (IS_ERR(rdev)) {
3761                         printk(KERN_WARNING 
3762                                 "md: error, md_import_device() returned %ld\n",
3763                                 PTR_ERR(rdev));
3764                         return PTR_ERR(rdev);
3765                 }
3766                 rdev->desc_nr = info->number;
3767                 if (info->raid_disk < mddev->raid_disks)
3768                         rdev->raid_disk = info->raid_disk;
3769                 else
3770                         rdev->raid_disk = -1;
3771
3772                 rdev->flags = 0;
3773
3774                 if (rdev->raid_disk < mddev->raid_disks)
3775                         if (info->state & (1<<MD_DISK_SYNC))
3776                                 set_bit(In_sync, &rdev->flags);
3777
3778                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3779                         set_bit(WriteMostly, &rdev->flags);
3780
3781                 if (!mddev->persistent) {
3782                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
3783                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3784                 } else 
3785                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3786                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3787
3788                 err = bind_rdev_to_array(rdev, mddev);
3789                 if (err) {
3790                         export_rdev(rdev);
3791                         return err;
3792                 }
3793         }
3794
3795         return 0;
3796 }
3797
3798 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3799 {
3800         char b[BDEVNAME_SIZE];
3801         mdk_rdev_t *rdev;
3802
3803         if (!mddev->pers)
3804                 return -ENODEV;
3805
3806         rdev = find_rdev(mddev, dev);
3807         if (!rdev)
3808                 return -ENXIO;
3809
3810         if (rdev->raid_disk >= 0)
3811                 goto busy;
3812
3813         kick_rdev_from_array(rdev);
3814         md_update_sb(mddev, 1);
3815         md_new_event(mddev);
3816
3817         return 0;
3818 busy:
3819         printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3820                 bdevname(rdev->bdev,b), mdname(mddev));
3821         return -EBUSY;
3822 }
3823
3824 static int hot_add_disk(mddev_t * mddev, dev_t dev)
3825 {
3826         char b[BDEVNAME_SIZE];
3827         int err;
3828         unsigned int size;
3829         mdk_rdev_t *rdev;
3830
3831         if (!mddev->pers)
3832                 return -ENODEV;
3833
3834         if (mddev->major_version != 0) {
3835                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3836                         " version-0 superblocks.\n",
3837                         mdname(mddev));
3838                 return -EINVAL;
3839         }
3840         if (!mddev->pers->hot_add_disk) {
3841                 printk(KERN_WARNING 
3842                         "%s: personality does not support diskops!\n",
3843                         mdname(mddev));
3844                 return -EINVAL;
3845         }
3846
3847         rdev = md_import_device (dev, -1, 0);
3848         if (IS_ERR(rdev)) {
3849                 printk(KERN_WARNING 
3850                         "md: error, md_import_device() returned %ld\n",
3851                         PTR_ERR(rdev));
3852                 return -EINVAL;
3853         }
3854
3855         if (mddev->persistent)
3856                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3857         else
3858                 rdev->sb_offset =
3859                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3860
3861         size = calc_dev_size(rdev, mddev->chunk_size);
3862         rdev->size = size;
3863
3864         if (test_bit(Faulty, &rdev->flags)) {
3865                 printk(KERN_WARNING 
3866                         "md: can not hot-add faulty %s disk to %s!\n",
3867                         bdevname(rdev->bdev,b), mdname(mddev));
3868                 err = -EINVAL;
3869                 goto abort_export;
3870         }
3871         clear_bit(In_sync, &rdev->flags);
3872         rdev->desc_nr = -1;
3873         rdev->saved_raid_disk = -1;
3874         err = bind_rdev_to_array(rdev, mddev);
3875         if (err)
3876                 goto abort_export;
3877
3878         /*
3879          * The rest should better be atomic, we can have disk failures
3880          * noticed in interrupt contexts ...
3881          */
3882
3883         if (rdev->desc_nr == mddev->max_disks) {
3884                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3885                         mdname(mddev));
3886                 err = -EBUSY;
3887                 goto abort_unbind_export;
3888         }
3889
3890         rdev->raid_disk = -1;
3891
3892         md_update_sb(mddev, 1);
3893
3894         /*
3895          * Kick recovery, maybe this spare has to be added to the
3896          * array immediately.
3897          */
3898         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3899         md_wakeup_thread(mddev->thread);
3900         md_new_event(mddev);
3901         return 0;
3902
3903 abort_unbind_export:
3904         unbind_rdev_from_array(rdev);
3905
3906 abort_export:
3907         export_rdev(rdev);
3908         return err;
3909 }
3910
3911 static int set_bitmap_file(mddev_t *mddev, int fd)
3912 {
3913         int err;
3914
3915         if (mddev->pers) {
3916                 if (!mddev->pers->quiesce)
3917                         return -EBUSY;
3918                 if (mddev->recovery || mddev->sync_thread)
3919                         return -EBUSY;
3920                 /* we should be able to change the bitmap.. */
3921         }
3922
3923
3924         if (fd >= 0) {
3925                 if (mddev->bitmap)
3926                         return -EEXIST; /* cannot add when bitmap is present */
3927                 mddev->bitmap_file = fget(fd);
3928
3929                 if (mddev->bitmap_file == NULL) {
3930                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3931                                mdname(mddev));
3932                         return -EBADF;
3933                 }
3934
3935                 err = deny_bitmap_write_access(mddev->bitmap_file);
3936                 if (err) {
3937                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3938                                mdname(mddev));
3939                         fput(mddev->bitmap_file);
3940                         mddev->bitmap_file = NULL;
3941                         return err;
3942                 }
3943                 mddev->bitmap_offset = 0; /* file overrides offset */
3944         } else if (mddev->bitmap == NULL)
3945                 return -ENOENT; /* cannot remove what isn't there */
3946         err = 0;
3947         if (mddev->pers) {
3948                 mddev->pers->quiesce(mddev, 1);
3949                 if (fd >= 0)
3950                         err = bitmap_create(mddev);
3951                 if (fd < 0 || err) {
3952                         bitmap_destroy(mddev);
3953                         fd = -1; /* make sure to put the file */
3954                 }
3955                 mddev->pers->quiesce(mddev, 0);
3956         }
3957         if (fd < 0) {
3958                 if (mddev->bitmap_file) {
3959                         restore_bitmap_write_access(mddev->bitmap_file);
3960                         fput(mddev->bitmap_file);
3961                 }
3962                 mddev->bitmap_file = NULL;
3963         }
3964
3965         return err;
3966 }
3967
3968 /*
3969  * set_array_info is used two different ways
3970  * The original usage is when creating a new array.
3971  * In this usage, raid_disks is > 0 and it together with
3972  *  level, size, not_persistent,layout,chunksize determine the
3973  *  shape of the array.
3974  *  This will always create an array with a type-0.90.0 superblock.
3975  * The newer usage is when assembling an array.
3976  *  In this case raid_disks will be 0, and the major_version field is
3977  *  use to determine which style super-blocks are to be found on the devices.
3978  *  The minor and patch _version numbers are also kept incase the
3979  *  super_block handler wishes to interpret them.
3980  */
3981 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
3982 {
3983
3984         if (info->raid_disks == 0) {
3985                 /* just setting version number for superblock loading */
3986                 if (info->major_version < 0 ||
3987                     info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
3988                     super_types[info->major_version].name == NULL) {
3989                         /* maybe try to auto-load a module? */
3990                         printk(KERN_INFO 
3991                                 "md: superblock version %d not known\n",
3992                                 info->major_version);
3993                         return -EINVAL;
3994                 }
3995                 mddev->major_version = info->major_version;
3996                 mddev->minor_version = info->minor_version;
3997                 mddev->patch_version = info->patch_version;
3998                 mddev->persistent = !info->not_persistent;
3999                 return 0;
4000         }
4001         mddev->major_version = MD_MAJOR_VERSION;
4002         mddev->minor_version = MD_MINOR_VERSION;
4003         mddev->patch_version = MD_PATCHLEVEL_VERSION;
4004         mddev->ctime         = get_seconds();
4005
4006         mddev->level         = info->level;
4007         mddev->clevel[0]     = 0;
4008         mddev->size          = info->size;
4009         mddev->raid_disks    = info->raid_disks;
4010         /* don't set md_minor, it is determined by which /dev/md* was
4011          * openned
4012          */
4013         if (info->state & (1<<MD_SB_CLEAN))
4014                 mddev->recovery_cp = MaxSector;
4015         else
4016                 mddev->recovery_cp = 0;
4017         mddev->persistent    = ! info->not_persistent;
4018
4019         mddev->layout        = info->layout;
4020         mddev->chunk_size    = info->chunk_size;
4021
4022         mddev->max_disks     = MD_SB_DISKS;
4023
4024         mddev->flags         = 0;
4025         set_bit(MD_CHANGE_DEVS, &mddev->flags);
4026
4027         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4028         mddev->bitmap_offset = 0;
4029
4030         mddev->reshape_position = MaxSector;
4031
4032         /*
4033          * Generate a 128 bit UUID
4034          */
4035         get_random_bytes(mddev->uuid, 16);
4036
4037         mddev->new_level = mddev->level;
4038         mddev->new_chunk = mddev->chunk_size;
4039         mddev->new_layout = mddev->layout;
4040         mddev->delta_disks = 0;
4041
4042         return 0;
4043 }
4044
4045 static int update_size(mddev_t *mddev, unsigned long size)
4046 {
4047         mdk_rdev_t * rdev;
4048         int rv;
4049         struct list_head *tmp;
4050         int fit = (size == 0);
4051
4052         if (mddev->pers->resize == NULL)
4053                 return -EINVAL;
4054         /* The "size" is the amount of each device that is used.
4055          * This can only make sense for arrays with redundancy.
4056          * linear and raid0 always use whatever space is available
4057          * We can only consider changing the size if no resync
4058          * or reconstruction is happening, and if the new size
4059          * is acceptable. It must fit before the sb_offset or,
4060          * if that is <data_offset, it must fit before the
4061          * size of each device.
4062          * If size is zero, we find the largest size that fits.
4063          */
4064         if (mddev->sync_thread)
4065                 return -EBUSY;
4066         ITERATE_RDEV(mddev,rdev,tmp) {
4067                 sector_t avail;
4068                 avail = rdev->size * 2;
4069
4070                 if (fit && (size == 0 || size > avail/2))
4071                         size = avail/2;
4072                 if (avail < ((sector_t)size << 1))
4073                         return -ENOSPC;
4074         }
4075         rv = mddev->pers->resize(mddev, (sector_t)size *2);
4076         if (!rv) {
4077                 struct block_device *bdev;
4078
4079                 bdev = bdget_disk(mddev->gendisk, 0);
4080                 if (bdev) {
4081                         mutex_lock(&bdev->bd_inode->i_mutex);
4082                         i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4083                         mutex_unlock(&bdev->bd_inode->i_mutex);
4084                         bdput(bdev);
4085                 }
4086         }
4087         return rv;
4088 }
4089
4090 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4091 {
4092         int rv;
4093         /* change the number of raid disks */
4094         if (mddev->pers->check_reshape == NULL)
4095                 return -EINVAL;
4096         if (raid_disks <= 0 ||
4097             raid_disks >= mddev->max_disks)
4098                 return -EINVAL;
4099         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4100                 return -EBUSY;
4101         mddev->delta_disks = raid_disks - mddev->raid_disks;
4102
4103         rv = mddev->pers->check_reshape(mddev);
4104         return rv;
4105 }
4106
4107
4108 /*
4109  * update_array_info is used to change the configuration of an
4110  * on-line array.
4111  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4112  * fields in the info are checked against the array.
4113  * Any differences that cannot be handled will cause an error.
4114  * Normally, only one change can be managed at a time.
4115  */
4116 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4117 {
4118         int rv = 0;
4119         int cnt = 0;
4120         int state = 0;
4121
4122         /* calculate expected state,ignoring low bits */
4123         if (mddev->bitmap && mddev->bitmap_offset)
4124                 state |= (1 << MD_SB_BITMAP_PRESENT);
4125
4126         if (mddev->major_version != info->major_version ||
4127             mddev->minor_version != info->minor_version ||
4128 /*          mddev->patch_version != info->patch_version || */
4129             mddev->ctime         != info->ctime         ||
4130             mddev->level         != info->level         ||
4131 /*          mddev->layout        != info->layout        || */
4132             !mddev->persistent   != info->not_persistent||
4133             mddev->chunk_size    != info->chunk_size    ||
4134             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4135             ((state^info->state) & 0xfffffe00)
4136                 )
4137                 return -EINVAL;
4138         /* Check there is only one change */
4139         if (info->size >= 0 && mddev->size != info->size) cnt++;
4140         if (mddev->raid_disks != info->raid_disks) cnt++;
4141         if (mddev->layout != info->layout) cnt++;
4142         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4143         if (cnt == 0) return 0;
4144         if (cnt > 1) return -EINVAL;
4145
4146         if (mddev->layout != info->layout) {
4147                 /* Change layout
4148                  * we don't need to do anything at the md level, the
4149                  * personality will take care of it all.
4150                  */
4151                 if (mddev->pers->reconfig == NULL)
4152                         return -EINVAL;
4153                 else
4154                         return mddev->pers->reconfig(mddev, info->layout, -1);
4155         }
4156         if (info->size >= 0 && mddev->size != info->size)
4157                 rv = update_size(mddev, info->size);
4158
4159         if (mddev->raid_disks    != info->raid_disks)
4160                 rv = update_raid_disks(mddev, info->raid_disks);
4161
4162         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4163                 if (mddev->pers->quiesce == NULL)
4164                         return -EINVAL;
4165                 if (mddev->recovery || mddev->sync_thread)
4166                         return -EBUSY;
4167                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4168                         /* add the bitmap */
4169                         if (mddev->bitmap)
4170                                 return -EEXIST;
4171                         if (mddev->default_bitmap_offset == 0)
4172                                 return -EINVAL;
4173                         mddev->bitmap_offset = mddev->default_bitmap_offset;
4174                         mddev->pers->quiesce(mddev, 1);
4175                         rv = bitmap_create(mddev);
4176                         if (rv)
4177                                 bitmap_destroy(mddev);
4178                         mddev->pers->quiesce(mddev, 0);
4179                 } else {
4180                         /* remove the bitmap */
4181                         if (!mddev->bitmap)
4182                                 return -ENOENT;
4183                         if (mddev->bitmap->file)
4184                                 return -EINVAL;
4185                         mddev->pers->quiesce(mddev, 1);
4186                         bitmap_destroy(mddev);
4187                         mddev->pers->quiesce(mddev, 0);
4188                         mddev->bitmap_offset = 0;
4189                 }
4190         }
4191         md_update_sb(mddev, 1);
4192         return rv;
4193 }
4194
4195 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4196 {
4197         mdk_rdev_t *rdev;
4198
4199         if (mddev->pers == NULL)
4200                 return -ENODEV;
4201
4202         rdev = find_rdev(mddev, dev);
4203         if (!rdev)
4204                 return -ENODEV;
4205
4206         md_error(mddev, rdev);
4207         return 0;
4208 }
4209
4210 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4211 {
4212         mddev_t *mddev = bdev->bd_disk->private_data;
4213
4214         geo->heads = 2;
4215         geo->sectors = 4;
4216         geo->cylinders = get_capacity(mddev->gendisk) / 8;
4217         return 0;
4218 }
4219
4220 static int md_ioctl(struct inode *inode, struct file *file,
4221                         unsigned int cmd, unsigned long arg)
4222 {
4223         int err = 0;
4224         void __user *argp = (void __user *)arg;
4225         mddev_t *mddev = NULL;
4226
4227         if (!capable(CAP_SYS_ADMIN))
4228                 return -EACCES;
4229
4230         /*
4231          * Commands dealing with the RAID driver but not any
4232          * particular array:
4233          */
4234         switch (cmd)
4235         {
4236                 case RAID_VERSION:
4237                         err = get_version(argp);
4238                         goto done;
4239
4240                 case PRINT_RAID_DEBUG:
4241                         err = 0;
4242                         md_print_devices();
4243                         goto done;
4244
4245 #ifndef MODULE
4246                 case RAID_AUTORUN:
4247                         err = 0;
4248                         autostart_arrays(arg);
4249                         goto done;
4250 #endif
4251                 default:;
4252         }
4253
4254         /*
4255          * Commands creating/starting a new array:
4256          */
4257
4258         mddev = inode->i_bdev->bd_disk->private_data;
4259
4260         if (!mddev) {
4261                 BUG();
4262                 goto abort;
4263         }
4264
4265         err = mddev_lock(mddev);
4266         if (err) {
4267                 printk(KERN_INFO 
4268                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
4269                         err, cmd);
4270                 goto abort;
4271         }
4272
4273         switch (cmd)
4274         {
4275                 case SET_ARRAY_INFO:
4276                         {
4277                                 mdu_array_info_t info;
4278                                 if (!arg)
4279                                         memset(&info, 0, sizeof(info));
4280                                 else if (copy_from_user(&info, argp, sizeof(info))) {
4281                                         err = -EFAULT;
4282                                         goto abort_unlock;
4283                                 }
4284                                 if (mddev->pers) {
4285                                         err = update_array_info(mddev, &info);
4286                                         if (err) {
4287                                                 printk(KERN_WARNING "md: couldn't update"
4288                                                        " array info. %d\n", err);
4289                                                 goto abort_unlock;
4290                                         }
4291                                         goto done_unlock;
4292                                 }
4293                                 if (!list_empty(&mddev->disks)) {
4294                                         printk(KERN_WARNING
4295                                                "md: array %s already has disks!\n",
4296                                                mdname(mddev));
4297                                         err = -EBUSY;
4298                                         goto abort_unlock;
4299                                 }
4300                                 if (mddev->raid_disks) {
4301                                         printk(KERN_WARNING
4302                                                "md: array %s already initialised!\n",
4303                                                mdname(mddev));
4304                                         err = -EBUSY;
4305                                         goto abort_unlock;
4306                                 }
4307                                 err = set_array_info(mddev, &info);
4308                                 if (err) {
4309                                         printk(KERN_WARNING "md: couldn't set"
4310                                                " array info. %d\n", err);
4311                                         goto abort_unlock;
4312                                 }
4313                         }
4314                         goto done_unlock;
4315
4316                 default:;
4317         }
4318
4319         /*
4320          * Commands querying/configuring an existing array:
4321          */
4322         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4323          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4324         if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4325                         && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
4326                         && cmd != GET_BITMAP_FILE) {
4327                 err = -ENODEV;
4328                 goto abort_unlock;
4329         }
4330
4331         /*
4332          * Commands even a read-only array can execute:
4333          */
4334         switch (cmd)
4335         {
4336                 case GET_ARRAY_INFO:
4337                         err = get_array_info(mddev, argp);
4338                         goto done_unlock;
4339
4340                 case GET_BITMAP_FILE:
4341                         err = get_bitmap_file(mddev, argp);
4342                         goto done_unlock;
4343
4344                 case GET_DISK_INFO:
4345                         err = get_disk_info(mddev, argp);
4346                         goto done_unlock;
4347
4348                 case RESTART_ARRAY_RW:
4349                         err = restart_array(mddev);
4350                         goto done_unlock;
4351
4352                 case STOP_ARRAY:
4353                         err = do_md_stop (mddev, 0);
4354                         goto done_unlock;
4355
4356                 case STOP_ARRAY_RO:
4357                         err = do_md_stop (mddev, 1);
4358                         goto done_unlock;
4359
4360         /*
4361          * We have a problem here : there is no easy way to give a CHS
4362          * virtual geometry. We currently pretend that we have a 2 heads
4363          * 4 sectors (with a BIG number of cylinders...). This drives
4364          * dosfs just mad... ;-)
4365          */
4366         }
4367
4368         /*
4369          * The remaining ioctls are changing the state of the
4370          * superblock, so we do not allow them on read-only arrays.
4371          * However non-MD ioctls (e.g. get-size) will still come through
4372          * here and hit the 'default' below, so only disallow
4373          * 'md' ioctls, and switch to rw mode if started auto-readonly.
4374          */
4375         if (_IOC_TYPE(cmd) == MD_MAJOR &&
4376             mddev->ro && mddev->pers) {
4377                 if (mddev->ro == 2) {
4378                         mddev->ro = 0;
4379                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4380                 md_wakeup_thread(mddev->thread);
4381
4382                 } else {
4383                         err = -EROFS;
4384                         goto abort_unlock;
4385                 }
4386         }
4387
4388         switch (cmd)
4389         {
4390                 case ADD_NEW_DISK:
4391                 {
4392                         mdu_disk_info_t info;
4393                         if (copy_from_user(&info, argp, sizeof(info)))
4394                                 err = -EFAULT;
4395                         else
4396                                 err = add_new_disk(mddev, &info);
4397                         goto done_unlock;
4398                 }
4399
4400                 case HOT_REMOVE_DISK:
4401                         err = hot_remove_disk(mddev, new_decode_dev(arg));
4402                         goto done_unlock;
4403
4404                 case HOT_ADD_DISK:
4405                         err = hot_add_disk(mddev, new_decode_dev(arg));
4406                         goto done_unlock;
4407
4408                 case SET_DISK_FAULTY:
4409                         err = set_disk_faulty(mddev, new_decode_dev(arg));
4410                         goto done_unlock;
4411
4412                 case RUN_ARRAY:
4413                         err = do_md_run (mddev);
4414                         goto done_unlock;
4415
4416                 case SET_BITMAP_FILE:
4417                         err = set_bitmap_file(mddev, (int)arg);
4418                         goto done_unlock;
4419
4420                 default:
4421                         err = -EINVAL;
4422                         goto abort_unlock;
4423         }
4424
4425 done_unlock:
4426 abort_unlock:
4427         mddev_unlock(mddev);
4428
4429         return err;
4430 done:
4431         if (err)
4432                 MD_BUG();
4433 abort:
4434         return err;
4435 }
4436
4437 static int md_open(struct inode *inode, struct file *file)
4438 {
4439         /*
4440          * Succeed if we can lock the mddev, which confirms that
4441          * it isn't being stopped right now.
4442          */
4443         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4444         int err;
4445
4446         if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
4447                 goto out;
4448
4449         err = 0;
4450         mddev_get(mddev);
4451         mddev_unlock(mddev);
4452
4453         check_disk_change(inode->i_bdev);
4454  out:
4455         return err;
4456 }
4457
4458 static int md_release(struct inode *inode, struct file * file)
4459 {
4460         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4461
4462         BUG_ON(!mddev);
4463         mddev_put(mddev);
4464
4465         return 0;
4466 }
4467
4468 static int md_media_changed(struct gendisk *disk)
4469 {
4470         mddev_t *mddev = disk->private_data;
4471
4472         return mddev->changed;
4473 }
4474
4475 static int md_revalidate(struct gendisk *disk)
4476 {
4477         mddev_t *mddev = disk->private_data;
4478
4479         mddev->changed = 0;
4480         return 0;
4481 }
4482 static struct block_device_operations md_fops =
4483 {
4484         .owner          = THIS_MODULE,
4485         .open           = md_open,
4486         .release        = md_release,
4487         .ioctl          = md_ioctl,
4488         .getgeo         = md_getgeo,
4489         .media_changed  = md_media_changed,
4490         .revalidate_disk= md_revalidate,
4491 };
4492
4493 static int md_thread(void * arg)
4494 {
4495         mdk_thread_t *thread = arg;
4496
4497         /*
4498          * md_thread is a 'system-thread', it's priority should be very
4499          * high. We avoid resource deadlocks individually in each
4500          * raid personality. (RAID5 does preallocation) We also use RR and
4501          * the very same RT priority as kswapd, thus we will never get
4502          * into a priority inversion deadlock.
4503          *
4504          * we definitely have to have equal or higher priority than
4505          * bdflush, otherwise bdflush will deadlock if there are too
4506          * many dirty RAID5 blocks.
4507          */
4508
4509         current->flags |= PF_NOFREEZE;
4510         allow_signal(SIGKILL);
4511         while (!kthread_should_stop()) {
4512
4513                 /* We need to wait INTERRUPTIBLE so that
4514                  * we don't add to the load-average.
4515                  * That means we need to be sure no signals are
4516                  * pending
4517                  */
4518                 if (signal_pending(current))
4519                         flush_signals(current);
4520
4521                 wait_event_interruptible_timeout
4522                         (thread->wqueue,
4523                          test_bit(THREAD_WAKEUP, &thread->flags)
4524                          || kthread_should_stop(),
4525                          thread->timeout);
4526
4527                 clear_bit(THREAD_WAKEUP, &thread->flags);
4528
4529                 thread->run(thread->mddev);
4530         }
4531
4532         return 0;
4533 }
4534
4535 void md_wakeup_thread(mdk_thread_t *thread)
4536 {
4537         if (thread) {
4538                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4539                 set_bit(THREAD_WAKEUP, &thread->flags);
4540                 wake_up(&thread->wqueue);
4541         }
4542 }
4543
4544 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4545                                  const char *name)
4546 {
4547         mdk_thread_t *thread;
4548
4549         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
4550         if (!thread)
4551                 return NULL;
4552
4553         init_waitqueue_head(&thread->wqueue);
4554
4555         thread->run = run;
4556         thread->mddev = mddev;
4557         thread->timeout = MAX_SCHEDULE_TIMEOUT;
4558         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
4559         if (IS_ERR(thread->tsk)) {
4560                 kfree(thread);
4561                 return NULL;
4562         }
4563         return thread;
4564 }
4565
4566 void md_unregister_thread(mdk_thread_t *thread)
4567 {
4568         dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
4569
4570         kthread_stop(thread->tsk);
4571         kfree(thread);
4572 }
4573
4574 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4575 {
4576         if (!mddev) {
4577                 MD_BUG();
4578                 return;
4579         }
4580
4581         if (!rdev || test_bit(Faulty, &rdev->flags))
4582                 return;
4583 /*
4584         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4585                 mdname(mddev),
4586                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4587                 __builtin_return_address(0),__builtin_return_address(1),
4588                 __builtin_return_address(2),__builtin_return_address(3));
4589 */
4590         if (!mddev->pers)
4591                 return;
4592         if (!mddev->pers->error_handler)
4593                 return;
4594         mddev->pers->error_handler(mddev,rdev);
4595         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4596         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4597         md_wakeup_thread(mddev->thread);
4598         md_new_event_inintr(mddev);
4599 }
4600
4601 /* seq_file implementation /proc/mdstat */
4602
4603 static void status_unused(struct seq_file *seq)
4604 {
4605         int i = 0;
4606         mdk_rdev_t *rdev;
4607         struct list_head *tmp;
4608
4609         seq_printf(seq, "unused devices: ");
4610
4611         ITERATE_RDEV_PENDING(rdev,tmp) {
4612                 char b[BDEVNAME_SIZE];
4613                 i++;
4614                 seq_printf(seq, "%s ",
4615                               bdevname(rdev->bdev,b));
4616         }
4617         if (!i)
4618                 seq_printf(seq, "<none>");
4619
4620         seq_printf(seq, "\n");
4621 }
4622
4623
4624 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4625 {
4626         sector_t max_blocks, resync, res;
4627         unsigned long dt, db, rt;
4628         int scale;
4629         unsigned int per_milli;
4630
4631         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4632
4633         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4634                 max_blocks = mddev->resync_max_sectors >> 1;
4635         else
4636                 max_blocks = mddev->size;
4637
4638         /*
4639          * Should not happen.
4640          */
4641         if (!max_blocks) {
4642                 MD_BUG();
4643                 return;
4644         }
4645         /* Pick 'scale' such that (resync>>scale)*1000 will fit
4646          * in a sector_t, and (max_blocks>>scale) will fit in a
4647          * u32, as those are the requirements for sector_div.
4648          * Thus 'scale' must be at least 10
4649          */
4650         scale = 10;
4651         if (sizeof(sector_t) > sizeof(unsigned long)) {
4652                 while ( max_blocks/2 > (1ULL<<(scale+32)))
4653                         scale++;
4654         }
4655         res = (resync>>scale)*1000;
4656         sector_div(res, (u32)((max_blocks>>scale)+1));
4657
4658         per_milli = res;
4659         {
4660                 int i, x = per_milli/50, y = 20-x;
4661                 seq_printf(seq, "[");
4662                 for (i = 0; i < x; i++)
4663                         seq_printf(seq, "=");
4664                 seq_printf(seq, ">");
4665                 for (i = 0; i < y; i++)
4666                         seq_printf(seq, ".");
4667                 seq_printf(seq, "] ");
4668         }
4669         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
4670                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4671                     "reshape" :
4672                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
4673                      "check" :
4674                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4675                       "resync" : "recovery"))),
4676                    per_milli/10, per_milli % 10,
4677                    (unsigned long long) resync,
4678                    (unsigned long long) max_blocks);
4679
4680         /*
4681          * We do not want to overflow, so the order of operands and
4682          * the * 100 / 100 trick are important. We do a +1 to be
4683          * safe against division by zero. We only estimate anyway.
4684          *
4685          * dt: time from mark until now
4686          * db: blocks written from mark until now
4687          * rt: remaining time
4688          */
4689         dt = ((jiffies - mddev->resync_mark) / HZ);
4690         if (!dt) dt++;
4691         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
4692                 - mddev->resync_mark_cnt;
4693         rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
4694
4695         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4696
4697         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
4698 }
4699
4700 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4701 {
4702         struct list_head *tmp;
4703         loff_t l = *pos;
4704         mddev_t *mddev;
4705
4706         if (l >= 0x10000)
4707                 return NULL;
4708         if (!l--)
4709                 /* header */
4710                 return (void*)1;
4711
4712         spin_lock(&all_mddevs_lock);
4713         list_for_each(tmp,&all_mddevs)
4714                 if (!l--) {
4715                         mddev = list_entry(tmp, mddev_t, all_mddevs);
4716                         mddev_get(mddev);
4717                         spin_unlock(&all_mddevs_lock);
4718                         return mddev;
4719                 }
4720         spin_unlock(&all_mddevs_lock);
4721         if (!l--)
4722                 return (void*)2;/* tail */
4723         return NULL;
4724 }
4725
4726 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4727 {
4728         struct list_head *tmp;
4729         mddev_t *next_mddev, *mddev = v;
4730         
4731         ++*pos;
4732         if (v == (void*)2)
4733                 return NULL;
4734
4735         spin_lock(&all_mddevs_lock);
4736         if (v == (void*)1)
4737                 tmp = all_mddevs.next;
4738         else
4739                 tmp = mddev->all_mddevs.next;
4740         if (tmp != &all_mddevs)
4741                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4742         else {
4743                 next_mddev = (void*)2;
4744                 *pos = 0x10000;
4745         }               
4746         spin_unlock(&all_mddevs_lock);
4747
4748         if (v != (void*)1)
4749                 mddev_put(mddev);
4750         return next_mddev;
4751
4752 }
4753
4754 static void md_seq_stop(struct seq_file *seq, void *v)
4755 {
4756         mddev_t *mddev = v;
4757
4758         if (mddev && v != (void*)1 && v != (void*)2)
4759                 mddev_put(mddev);
4760 }
4761
4762 struct mdstat_info {
4763         int event;
4764 };
4765
4766 static int md_seq_show(struct seq_file *seq, void *v)
4767 {
4768         mddev_t *mddev = v;
4769         sector_t size;
4770         struct list_head *tmp2;
4771         mdk_rdev_t *rdev;
4772         struct mdstat_info *mi = seq->private;
4773         struct bitmap *bitmap;
4774
4775         if (v == (void*)1) {
4776                 struct mdk_personality *pers;
4777                 seq_printf(seq, "Personalities : ");
4778                 spin_lock(&pers_lock);
4779                 list_for_each_entry(pers, &pers_list, list)
4780                         seq_printf(seq, "[%s] ", pers->name);
4781
4782                 spin_unlock(&pers_lock);
4783                 seq_printf(seq, "\n");
4784                 mi->event = atomic_read(&md_event_count);
4785                 return 0;
4786         }
4787         if (v == (void*)2) {
4788                 status_unused(seq);
4789                 return 0;
4790         }
4791
4792         if (mddev_lock(mddev) < 0)
4793                 return -EINTR;
4794
4795         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4796                 seq_printf(seq, "%s : %sactive", mdname(mddev),
4797                                                 mddev->pers ? "" : "in");
4798                 if (mddev->pers) {
4799                         if (mddev->ro==1)
4800                                 seq_printf(seq, " (read-only)");
4801                         if (mddev->ro==2)
4802                                 seq_printf(seq, "(auto-read-only)");
4803                         seq_printf(seq, " %s", mddev->pers->name);
4804                 }
4805
4806                 size = 0;
4807                 ITERATE_RDEV(mddev,rdev,tmp2) {
4808                         char b[BDEVNAME_SIZE];
4809                         seq_printf(seq, " %s[%d]",
4810                                 bdevname(rdev->bdev,b), rdev->desc_nr);
4811                         if (test_bit(WriteMostly, &rdev->flags))
4812                                 seq_printf(seq, "(W)");
4813                         if (test_bit(Faulty, &rdev->flags)) {
4814                                 seq_printf(seq, "(F)");
4815                                 continue;
4816                         } else if (rdev->raid_disk < 0)
4817                                 seq_printf(seq, "(S)"); /* spare */
4818                         size += rdev->size;
4819                 }
4820
4821                 if (!list_empty(&mddev->disks)) {
4822                         if (mddev->pers)
4823                                 seq_printf(seq, "\n      %llu blocks",
4824                                         (unsigned long long)mddev->array_size);
4825                         else
4826                                 seq_printf(seq, "\n      %llu blocks",
4827                                         (unsigned long long)size);
4828                 }
4829                 if (mddev->persistent) {
4830                         if (mddev->major_version != 0 ||
4831                             mddev->minor_version != 90) {
4832                                 seq_printf(seq," super %d.%d",
4833                                            mddev->major_version,
4834                                            mddev->minor_version);
4835                         }
4836                 } else
4837                         seq_printf(seq, " super non-persistent");
4838
4839                 if (mddev->pers) {
4840                         mddev->pers->status (seq, mddev);
4841                         seq_printf(seq, "\n      ");
4842                         if (mddev->pers->sync_request) {
4843                                 if (mddev->curr_resync > 2) {
4844                                         status_resync (seq, mddev);
4845                                         seq_printf(seq, "\n      ");
4846                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4847                                         seq_printf(seq, "\tresync=DELAYED\n      ");
4848                                 else if (mddev->recovery_cp < MaxSector)
4849                                         seq_printf(seq, "\tresync=PENDING\n      ");
4850                         }
4851                 } else
4852                         seq_printf(seq, "\n       ");
4853
4854                 if ((bitmap = mddev->bitmap)) {
4855                         unsigned long chunk_kb;
4856                         unsigned long flags;
4857                         spin_lock_irqsave(&bitmap->lock, flags);
4858                         chunk_kb = bitmap->chunksize >> 10;
4859                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4860                                 "%lu%s chunk",
4861                                 bitmap->pages - bitmap->missing_pages,
4862                                 bitmap->pages,
4863                                 (bitmap->pages - bitmap->missing_pages)
4864                                         << (PAGE_SHIFT - 10),
4865                                 chunk_kb ? chunk_kb : bitmap->chunksize,
4866                                 chunk_kb ? "KB" : "B");
4867                         if (bitmap->file) {
4868                                 seq_printf(seq, ", file: ");
4869                                 seq_path(seq, bitmap->file->f_path.mnt,
4870                                          bitmap->file->f_path.dentry," \t\n");
4871                         }
4872
4873                         seq_printf(seq, "\n");
4874                         spin_unlock_irqrestore(&bitmap->lock, flags);
4875                 }
4876
4877                 seq_printf(seq, "\n");
4878         }
4879         mddev_unlock(mddev);
4880         
4881         return 0;
4882 }
4883
4884 static struct seq_operations md_seq_ops = {
4885         .start  = md_seq_start,
4886         .next   = md_seq_next,
4887         .stop   = md_seq_stop,
4888         .show   = md_seq_show,
4889 };
4890
4891 static int md_seq_open(struct inode *inode, struct file *file)
4892 {
4893         int error;
4894         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4895         if (mi == NULL)
4896                 return -ENOMEM;
4897
4898         error = seq_open(file, &md_seq_ops);
4899         if (error)
4900                 kfree(mi);
4901         else {
4902                 struct seq_file *p = file->private_data;
4903                 p->private = mi;
4904                 mi->event = atomic_read(&md_event_count);
4905         }
4906         return error;
4907 }
4908
4909 static int md_seq_release(struct inode *inode, struct file *file)
4910 {
4911         struct seq_file *m = file->private_data;
4912         struct mdstat_info *mi = m->private;
4913         m->private = NULL;
4914         kfree(mi);
4915         return seq_release(inode, file);
4916 }
4917
4918 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4919 {
4920         struct seq_file *m = filp->private_data;
4921         struct mdstat_info *mi = m->private;
4922         int mask;
4923
4924         poll_wait(filp, &md_event_waiters, wait);
4925
4926         /* always allow read */
4927         mask = POLLIN | POLLRDNORM;
4928
4929         if (mi->event != atomic_read(&md_event_count))
4930                 mask |= POLLERR | POLLPRI;
4931         return mask;
4932 }
4933
4934 static const struct file_operations md_seq_fops = {
4935         .owner          = THIS_MODULE,
4936         .open           = md_seq_open,
4937         .read           = seq_read,
4938         .llseek         = seq_lseek,
4939         .release        = md_seq_release,
4940         .poll           = mdstat_poll,
4941 };
4942
4943 int register_md_personality(struct mdk_personality *p)
4944 {
4945         spin_lock(&pers_lock);
4946         list_add_tail(&p->list, &pers_list);
4947         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
4948         spin_unlock(&pers_lock);
4949         return 0;
4950 }
4951
4952 int unregister_md_personality(struct mdk_personality *p)
4953 {
4954         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
4955         spin_lock(&pers_lock);
4956         list_del_init(&p->list);
4957         spin_unlock(&pers_lock);
4958         return 0;
4959 }
4960
4961 static int is_mddev_idle(mddev_t *mddev)
4962 {
4963         mdk_rdev_t * rdev;
4964         struct list_head *tmp;
4965         int idle;
4966         unsigned long curr_events;
4967
4968         idle = 1;
4969         ITERATE_RDEV(mddev,rdev,tmp) {
4970                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
4971                 curr_events = disk_stat_read(disk, sectors[0]) + 
4972                                 disk_stat_read(disk, sectors[1]) - 
4973                                 atomic_read(&disk->sync_io);
4974                 /* The difference between curr_events and last_events
4975                  * will be affected by any new non-sync IO (making
4976                  * curr_events bigger) and any difference in the amount of
4977                  * in-flight syncio (making current_events bigger or smaller)
4978                  * The amount in-flight is currently limited to
4979                  * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
4980                  * which is at most 4096 sectors.
4981                  * These numbers are fairly fragile and should be made
4982                  * more robust, probably by enforcing the
4983                  * 'window size' that md_do_sync sort-of uses.
4984                  *
4985                  * Note: the following is an unsigned comparison.
4986                  */
4987                 if ((curr_events - rdev->last_events + 4096) > 8192) {
4988                         rdev->last_events = curr_events;
4989                         idle = 0;
4990                 }
4991         }
4992         return idle;
4993 }
4994
4995 void md_done_sync(mddev_t *mddev, int blocks, int ok)
4996 {
4997         /* another "blocks" (512byte) blocks have been synced */
4998         atomic_sub(blocks, &mddev->recovery_active);
4999         wake_up(&mddev->recovery_wait);
5000         if (!ok) {
5001                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5002                 md_wakeup_thread(mddev->thread);
5003                 // stop recovery, signal do_sync ....
5004         }
5005 }
5006
5007
5008 /* md_write_start(mddev, bi)
5009  * If we need to update some array metadata (e.g. 'active' flag
5010  * in superblock) before writing, schedule a superblock update
5011  * and wait for it to complete.
5012  */
5013 void md_write_start(mddev_t *mddev, struct bio *bi)
5014 {
5015         if (bio_data_dir(bi) != WRITE)
5016                 return;
5017
5018         BUG_ON(mddev->ro == 1);
5019         if (mddev->ro == 2) {
5020                 /* need to switch to read/write */
5021                 mddev->ro = 0;
5022                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5023                 md_wakeup_thread(mddev->thread);
5024         }
5025         atomic_inc(&mddev->writes_pending);
5026         if (mddev->in_sync) {
5027                 spin_lock_irq(&mddev->write_lock);
5028                 if (mddev->in_sync) {
5029                         mddev->in_sync = 0;
5030                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5031                         md_wakeup_thread(mddev->thread);
5032                 }
5033                 spin_unlock_irq(&mddev->write_lock);
5034         }
5035         wait_event(mddev->sb_wait, mddev->flags==0);
5036 }
5037
5038 void md_write_end(mddev_t *mddev)
5039 {
5040         if (atomic_dec_and_test(&mddev->writes_pending)) {
5041                 if (mddev->safemode == 2)
5042                         md_wakeup_thread(mddev->thread);
5043                 else if (mddev->safemode_delay)
5044                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5045         }
5046 }
5047
5048 /* md_allow_write(mddev)
5049  * Calling this ensures that the array is marked 'active' so that writes
5050  * may proceed without blocking.  It is important to call this before
5051  * attempting a GFP_KERNEL allocation while holding the mddev lock.
5052  * Must be called with mddev_lock held.
5053  */
5054 void md_allow_write(mddev_t *mddev)
5055 {
5056         if (!mddev->pers)
5057                 return;
5058         if (mddev->ro)
5059                 return;
5060
5061         spin_lock_irq(&mddev->write_lock);
5062         if (mddev->in_sync) {
5063                 mddev->in_sync = 0;
5064                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5065                 if (mddev->safemode_delay &&
5066                     mddev->safemode == 0)
5067                         mddev->safemode = 1;
5068                 spin_unlock_irq(&mddev->write_lock);
5069                 md_update_sb(mddev, 0);
5070         } else
5071                 spin_unlock_irq(&mddev->write_lock);
5072 }
5073 EXPORT_SYMBOL_GPL(md_allow_write);
5074
5075 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5076
5077 #define SYNC_MARKS      10
5078 #define SYNC_MARK_STEP  (3*HZ)
5079 void md_do_sync(mddev_t *mddev)
5080 {
5081         mddev_t *mddev2;
5082         unsigned int currspeed = 0,
5083                  window;
5084         sector_t max_sectors,j, io_sectors;
5085         unsigned long mark[SYNC_MARKS];
5086         sector_t mark_cnt[SYNC_MARKS];
5087         int last_mark,m;
5088         struct list_head *tmp;
5089         sector_t last_check;
5090         int skipped = 0;
5091         struct list_head *rtmp;
5092         mdk_rdev_t *rdev;
5093         char *desc;
5094
5095         /* just incase thread restarts... */
5096         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5097                 return;
5098         if (mddev->ro) /* never try to sync a read-only array */
5099                 return;
5100
5101         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5102                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5103                         desc = "data-check";
5104                 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5105                         desc = "requested-resync";
5106                 else
5107                         desc = "resync";
5108         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5109                 desc = "reshape";
5110         else
5111                 desc = "recovery";
5112
5113         /* we overload curr_resync somewhat here.
5114          * 0 == not engaged in resync at all
5115          * 2 == checking that there is no conflict with another sync
5116          * 1 == like 2, but have yielded to allow conflicting resync to
5117          *              commense
5118          * other == active in resync - this many blocks
5119          *
5120          * Before starting a resync we must have set curr_resync to
5121          * 2, and then checked that every "conflicting" array has curr_resync
5122          * less than ours.  When we find one that is the same or higher
5123          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
5124          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5125          * This will mean we have to start checking from the beginning again.
5126          *
5127          */
5128
5129         do {
5130                 mddev->curr_resync = 2;
5131
5132         try_again:
5133                 if (kthread_should_stop()) {
5134                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5135                         goto skip;
5136                 }
5137                 ITERATE_MDDEV(mddev2,tmp) {
5138                         if (mddev2 == mddev)
5139                                 continue;
5140                         if (mddev2->curr_resync && 
5141                             match_mddev_units(mddev,mddev2)) {
5142                                 DEFINE_WAIT(wq);
5143                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
5144                                         /* arbitrarily yield */
5145                                         mddev->curr_resync = 1;
5146                                         wake_up(&resync_wait);
5147                                 }
5148                                 if (mddev > mddev2 && mddev->curr_resync == 1)
5149                                         /* no need to wait here, we can wait the next
5150                                          * time 'round when curr_resync == 2
5151                                          */
5152                                         continue;
5153                                 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5154                                 if (!kthread_should_stop() &&
5155                                     mddev2->curr_resync >= mddev->curr_resync) {
5156                                         printk(KERN_INFO "md: delaying %s of %s"
5157                                                " until %s has finished (they"
5158                                                " share one or more physical units)\n",
5159                                                desc, mdname(mddev), mdname(mddev2));
5160                                         mddev_put(mddev2);
5161                                         schedule();
5162                                         finish_wait(&resync_wait, &wq);
5163                                         goto try_again;
5164                                 }
5165                                 finish_wait(&resync_wait, &wq);
5166                         }
5167                 }
5168         } while (mddev->curr_resync < 2);
5169
5170         j = 0;
5171         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5172                 /* resync follows the size requested by the personality,
5173                  * which defaults to physical size, but can be virtual size
5174                  */
5175                 max_sectors = mddev->resync_max_sectors;
5176                 mddev->resync_mismatches = 0;
5177                 /* we don't use the checkpoint if there's a bitmap */
5178                 if (!mddev->bitmap &&
5179                     !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5180                         j = mddev->recovery_cp;
5181         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5182                 max_sectors = mddev->size << 1;
5183         else {
5184                 /* recovery follows the physical size of devices */
5185                 max_sectors = mddev->size << 1;
5186                 j = MaxSector;
5187                 ITERATE_RDEV(mddev,rdev,rtmp)
5188                         if (rdev->raid_disk >= 0 &&
5189                             !test_bit(Faulty, &rdev->flags) &&
5190                             !test_bit(In_sync, &rdev->flags) &&
5191                             rdev->recovery_offset < j)
5192                                 j = rdev->recovery_offset;
5193         }
5194
5195         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5196         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
5197                 " %d KB/sec/disk.\n", speed_min(mddev));
5198         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5199                "(but not more than %d KB/sec) for %s.\n",
5200                speed_max(mddev), desc);
5201
5202         is_mddev_idle(mddev); /* this also initializes IO event counters */
5203
5204         io_sectors = 0;
5205         for (m = 0; m < SYNC_MARKS; m++) {
5206                 mark[m] = jiffies;
5207                 mark_cnt[m] = io_sectors;
5208         }
5209         last_mark = 0;
5210         mddev->resync_mark = mark[last_mark];
5211         mddev->resync_mark_cnt = mark_cnt[last_mark];
5212
5213         /*
5214          * Tune reconstruction:
5215          */
5216         window = 32*(PAGE_SIZE/512);
5217         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5218                 window/2,(unsigned long long) max_sectors/2);
5219
5220         atomic_set(&mddev->recovery_active, 0);
5221         init_waitqueue_head(&mddev->recovery_wait);
5222         last_check = 0;
5223
5224         if (j>2) {
5225                 printk(KERN_INFO 
5226                        "md: resuming %s of %s from checkpoint.\n",
5227                        desc, mdname(mddev));
5228                 mddev->curr_resync = j;
5229         }
5230
5231         while (j < max_sectors) {
5232                 sector_t sectors;
5233
5234                 skipped = 0;
5235                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5236                                             currspeed < speed_min(mddev));
5237                 if (sectors == 0) {
5238                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5239                         goto out;
5240                 }
5241
5242                 if (!skipped) { /* actual IO requested */
5243                         io_sectors += sectors;
5244                         atomic_add(sectors, &mddev->recovery_active);
5245                 }
5246
5247                 j += sectors;
5248                 if (j>1) mddev->curr_resync = j;
5249                 mddev->curr_mark_cnt = io_sectors;
5250                 if (last_check == 0)
5251                         /* this is the earliers that rebuilt will be
5252                          * visible in /proc/mdstat
5253                          */
5254                         md_new_event(mddev);
5255
5256                 if (last_check + window > io_sectors || j == max_sectors)
5257                         continue;
5258
5259                 last_check = io_sectors;
5260
5261                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
5262                     test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5263                         break;
5264
5265         repeat:
5266                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5267                         /* step marks */
5268                         int next = (last_mark+1) % SYNC_MARKS;
5269
5270                         mddev->resync_mark = mark[next];
5271                         mddev->resync_mark_cnt = mark_cnt[next];
5272                         mark[next] = jiffies;
5273                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5274                         last_mark = next;
5275                 }
5276
5277
5278                 if (kthread_should_stop()) {
5279                         /*
5280                          * got a signal, exit.
5281                          */
5282                         printk(KERN_INFO 
5283                                 "md: md_do_sync() got signal ... exiting\n");
5284                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5285                         goto out;
5286                 }
5287
5288                 /*
5289                  * this loop exits only if either when we are slower than
5290                  * the 'hard' speed limit, or the system was IO-idle for
5291                  * a jiffy.
5292                  * the system might be non-idle CPU-wise, but we only care
5293                  * about not overloading the IO subsystem. (things like an
5294                  * e2fsck being done on the RAID array should execute fast)
5295                  */
5296                 mddev->queue->unplug_fn(mddev->queue);
5297                 cond_resched();
5298
5299                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5300                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
5301
5302                 if (currspeed > speed_min(mddev)) {
5303                         if ((currspeed > speed_max(mddev)) ||
5304                                         !is_mddev_idle(mddev)) {
5305                                 msleep(500);
5306                                 goto repeat;
5307                         }
5308                 }
5309         }
5310         printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5311         /*
5312          * this also signals 'finished resyncing' to md_stop
5313          */
5314  out:
5315         mddev->queue->unplug_fn(mddev->queue);
5316
5317         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5318
5319         /* tell personality that we are finished */
5320         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5321
5322         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5323             !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5324             mddev->curr_resync > 2) {
5325                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5326                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5327                                 if (mddev->curr_resync >= mddev->recovery_cp) {
5328                                         printk(KERN_INFO
5329                                                "md: checkpointing %s of %s.\n",
5330                                                desc, mdname(mddev));
5331                                         mddev->recovery_cp = mddev->curr_resync;
5332                                 }
5333                         } else
5334                                 mddev->recovery_cp = MaxSector;
5335                 } else {
5336                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5337                                 mddev->curr_resync = MaxSector;
5338                         ITERATE_RDEV(mddev,rdev,rtmp)
5339                                 if (rdev->raid_disk >= 0 &&
5340                                     !test_bit(Faulty, &rdev->flags) &&
5341                                     !test_bit(In_sync, &rdev->flags) &&
5342                                     rdev->recovery_offset < mddev->curr_resync)
5343                                         rdev->recovery_offset = mddev->curr_resync;
5344                 }
5345         }
5346         set_bit(MD_CHANGE_DEVS, &mddev->flags);
5347
5348  skip:
5349         mddev->curr_resync = 0;
5350         wake_up(&resync_wait);
5351         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5352         md_wakeup_thread(mddev->thread);
5353 }
5354 EXPORT_SYMBOL_GPL(md_do_sync);
5355
5356
5357 /*
5358  * This routine is regularly called by all per-raid-array threads to
5359  * deal with generic issues like resync and super-block update.
5360  * Raid personalities that don't have a thread (linear/raid0) do not
5361  * need this as they never do any recovery or update the superblock.
5362  *
5363  * It does not do any resync itself, but rather "forks" off other threads
5364  * to do that as needed.
5365  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5366  * "->recovery" and create a thread at ->sync_thread.
5367  * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5368  * and wakeups up this thread which will reap the thread and finish up.
5369  * This thread also removes any faulty devices (with nr_pending == 0).
5370  *
5371  * The overall approach is:
5372  *  1/ if the superblock needs updating, update it.
5373  *  2/ If a recovery thread is running, don't do anything else.
5374  *  3/ If recovery has finished, clean up, possibly marking spares active.
5375  *  4/ If there are any faulty devices, remove them.
5376  *  5/ If array is degraded, try to add spares devices
5377  *  6/ If array has spares or is not in-sync, start a resync thread.
5378  */
5379 void md_check_recovery(mddev_t *mddev)
5380 {
5381         mdk_rdev_t *rdev;
5382         struct list_head *rtmp;
5383
5384
5385         if (mddev->bitmap)
5386                 bitmap_daemon_work(mddev->bitmap);
5387
5388         if (mddev->ro)
5389                 return;
5390
5391         if (signal_pending(current)) {
5392                 if (mddev->pers->sync_request) {
5393                         printk(KERN_INFO "md: %s in immediate safe mode\n",
5394                                mdname(mddev));
5395                         mddev->safemode = 2;
5396                 }
5397                 flush_signals(current);
5398         }
5399
5400         if ( ! (
5401                 mddev->flags ||
5402                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5403                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5404                 (mddev->safemode == 1) ||
5405                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5406                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5407                 ))
5408                 return;
5409
5410         if (mddev_trylock(mddev)) {
5411                 int spares =0;
5412
5413                 spin_lock_irq(&mddev->write_lock);
5414                 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5415                     !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5416                         mddev->in_sync = 1;
5417                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5418                 }
5419                 if (mddev->safemode == 1)
5420                         mddev->safemode = 0;
5421                 spin_unlock_irq(&mddev->write_lock);
5422
5423                 if (mddev->flags)
5424                         md_update_sb(mddev, 0);
5425
5426
5427                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5428                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5429                         /* resync/recovery still happening */
5430                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5431                         goto unlock;
5432                 }
5433                 if (mddev->sync_thread) {
5434                         /* resync has finished, collect result */
5435                         md_unregister_thread(mddev->sync_thread);
5436                         mddev->sync_thread = NULL;
5437                         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5438                             !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5439                                 /* success...*/
5440                                 /* activate any spares */
5441                                 mddev->pers->spare_active(mddev);
5442                         }
5443                         md_update_sb(mddev, 1);
5444
5445                         /* if array is no-longer degraded, then any saved_raid_disk
5446                          * information must be scrapped
5447                          */
5448                         if (!mddev->degraded)
5449                                 ITERATE_RDEV(mddev,rdev,rtmp)
5450                                         rdev->saved_raid_disk = -1;
5451
5452                         mddev->recovery = 0;
5453                         /* flag recovery needed just to double check */
5454                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5455                         md_new_event(mddev);
5456                         goto unlock;
5457                 }
5458                 /* Clear some bits that don't mean anything, but
5459                  * might be left set
5460                  */
5461                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5462                 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5463                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5464                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5465
5466                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5467                         goto unlock;
5468                 /* no recovery is running.
5469                  * remove any failed drives, then
5470                  * add spares if possible.
5471                  * Spare are also removed and re-added, to allow
5472                  * the personality to fail the re-add.
5473                  */
5474                 ITERATE_RDEV(mddev,rdev,rtmp)
5475                         if (rdev->raid_disk >= 0 &&
5476                             (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
5477                             atomic_read(&rdev->nr_pending)==0) {
5478                                 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
5479                                         char nm[20];
5480                                         sprintf(nm,"rd%d", rdev->raid_disk);
5481                                         sysfs_remove_link(&mddev->kobj, nm);
5482                                         rdev->raid_disk = -1;
5483                                 }
5484                         }
5485
5486                 if (mddev->degraded) {
5487                         ITERATE_RDEV(mddev,rdev,rtmp)
5488                                 if (rdev->raid_disk < 0
5489                                     && !test_bit(Faulty, &rdev->flags)) {
5490                                         rdev->recovery_offset = 0;
5491                                         if (mddev->pers->hot_add_disk(mddev,rdev)) {
5492                                                 char nm[20];
5493                                                 sprintf(nm, "rd%d", rdev->raid_disk);
5494                                                 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
5495                                                 spares++;
5496                                                 md_new_event(mddev);
5497                                         } else
5498                                                 break;
5499                                 }
5500                 }
5501
5502                 if (spares) {
5503                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5504                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5505                 } else if (mddev->recovery_cp < MaxSector) {
5506                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5507                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5508                         /* nothing to be done ... */
5509                         goto unlock;
5510
5511                 if (mddev->pers->sync_request) {
5512                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5513                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5514                                 /* We are adding a device or devices to an array
5515                                  * which has the bitmap stored on all devices.
5516                                  * So make sure all bitmap pages get written
5517                                  */
5518                                 bitmap_write_all(mddev->bitmap);
5519                         }
5520                         mddev->sync_thread = md_register_thread(md_do_sync,
5521                                                                 mddev,
5522                                                                 "%s_resync");
5523                         if (!mddev->sync_thread) {
5524                                 printk(KERN_ERR "%s: could not start resync"
5525                                         " thread...\n", 
5526                                         mdname(mddev));
5527                                 /* leave the spares where they are, it shouldn't hurt */
5528                                 mddev->recovery = 0;
5529                         } else
5530                                 md_wakeup_thread(mddev->sync_thread);
5531                         md_new_event(mddev);
5532                 }
5533         unlock:
5534                 mddev_unlock(mddev);
5535         }
5536 }
5537
5538 static int md_notify_reboot(struct notifier_block *this,
5539                             unsigned long code, void *x)
5540 {
5541         struct list_head *tmp;
5542         mddev_t *mddev;
5543
5544         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5545
5546                 printk(KERN_INFO "md: stopping all md devices.\n");
5547
5548                 ITERATE_MDDEV(mddev,tmp)
5549                         if (mddev_trylock(mddev)) {
5550                                 do_md_stop (mddev, 1);
5551                                 mddev_unlock(mddev);
5552                         }
5553                 /*
5554                  * certain more exotic SCSI devices are known to be
5555                  * volatile wrt too early system reboots. While the
5556                  * right place to handle this issue is the given
5557                  * driver, we do want to have a safe RAID driver ...
5558                  */
5559                 mdelay(1000*1);
5560         }
5561         return NOTIFY_DONE;
5562 }
5563
5564 static struct notifier_block md_notifier = {
5565         .notifier_call  = md_notify_reboot,
5566         .next           = NULL,
5567         .priority       = INT_MAX, /* before any real devices */
5568 };
5569
5570 static void md_geninit(void)
5571 {
5572         struct proc_dir_entry *p;
5573
5574         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5575
5576         p = create_proc_entry("mdstat", S_IRUGO, NULL);
5577         if (p)
5578                 p->proc_fops = &md_seq_fops;
5579 }
5580
5581 static int __init md_init(void)
5582 {
5583         if (register_blkdev(MAJOR_NR, "md"))
5584                 return -1;
5585         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5586                 unregister_blkdev(MAJOR_NR, "md");
5587                 return -1;
5588         }
5589         blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
5590                             md_probe, NULL, NULL);
5591         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
5592                             md_probe, NULL, NULL);
5593
5594         register_reboot_notifier(&md_notifier);
5595         raid_table_header = register_sysctl_table(raid_root_table);
5596
5597         md_geninit();
5598         return (0);
5599 }
5600
5601
5602 #ifndef MODULE
5603
5604 /*
5605  * Searches all registered partitions for autorun RAID arrays
5606  * at boot time.
5607  */
5608 static dev_t detected_devices[128];
5609 static int dev_cnt;
5610
5611 void md_autodetect_dev(dev_t dev)
5612 {
5613         if (dev_cnt >= 0 && dev_cnt < 127)
5614                 detected_devices[dev_cnt++] = dev;
5615 }
5616
5617
5618 static void autostart_arrays(int part)
5619 {
5620         mdk_rdev_t *rdev;
5621         int i;
5622
5623         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5624
5625         for (i = 0; i < dev_cnt; i++) {
5626                 dev_t dev = detected_devices[i];
5627
5628                 rdev = md_import_device(dev,0, 0);
5629                 if (IS_ERR(rdev))
5630                         continue;
5631
5632                 if (test_bit(Faulty, &rdev->flags)) {
5633                         MD_BUG();
5634                         continue;
5635                 }
5636                 list_add(&rdev->same_set, &pending_raid_disks);
5637         }
5638         dev_cnt = 0;
5639
5640         autorun_devices(part);
5641 }
5642
5643 #endif /* !MODULE */
5644
5645 static __exit void md_exit(void)
5646 {
5647         mddev_t *mddev;
5648         struct list_head *tmp;
5649
5650         blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
5651         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
5652
5653         unregister_blkdev(MAJOR_NR,"md");
5654         unregister_blkdev(mdp_major, "mdp");
5655         unregister_reboot_notifier(&md_notifier);
5656         unregister_sysctl_table(raid_table_header);
5657         remove_proc_entry("mdstat", NULL);
5658         ITERATE_MDDEV(mddev,tmp) {
5659                 struct gendisk *disk = mddev->gendisk;
5660                 if (!disk)
5661                         continue;
5662                 export_array(mddev);
5663                 del_gendisk(disk);
5664                 put_disk(disk);
5665                 mddev->gendisk = NULL;
5666                 mddev_put(mddev);
5667         }
5668 }
5669
5670 module_init(md_init)
5671 module_exit(md_exit)
5672
5673 static int get_ro(char *buffer, struct kernel_param *kp)
5674 {
5675         return sprintf(buffer, "%d", start_readonly);
5676 }
5677 static int set_ro(const char *val, struct kernel_param *kp)
5678 {
5679         char *e;
5680         int num = simple_strtoul(val, &e, 10);
5681         if (*val && (*e == '\0' || *e == '\n')) {
5682                 start_readonly = num;
5683                 return 0;
5684         }
5685         return -EINVAL;
5686 }
5687
5688 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
5689 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
5690
5691
5692 EXPORT_SYMBOL(register_md_personality);
5693 EXPORT_SYMBOL(unregister_md_personality);
5694 EXPORT_SYMBOL(md_error);
5695 EXPORT_SYMBOL(md_done_sync);
5696 EXPORT_SYMBOL(md_write_start);
5697 EXPORT_SYMBOL(md_write_end);
5698 EXPORT_SYMBOL(md_register_thread);
5699 EXPORT_SYMBOL(md_unregister_thread);
5700 EXPORT_SYMBOL(md_wakeup_thread);
5701 EXPORT_SYMBOL(md_check_recovery);
5702 MODULE_LICENSE("GPL");
5703 MODULE_ALIAS("md");
5704 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);