2 * MTD device concatenation layer
4 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
6 * NAND support by Christian Gan <cgan@iders.ca>
10 * $Id: mtdconcat.c,v 1.11 2005/11/07 11:14:20 gleixner Exp $
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/types.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/concat.h>
22 #include <asm/div64.h>
25 * Our storage structure:
26 * Subdev points to an array of pointers to struct mtd_info objects
27 * which is allocated along with this structure
33 struct mtd_info **subdev;
37 * how to calculate the size required for the above structure,
38 * including the pointer array subdev points to:
40 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
41 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
44 * Given a pointer to the MTD object in the mtd_concat structure,
45 * we can retrieve the pointer to that structure with this macro.
47 #define CONCAT(x) ((struct mtd_concat *)(x))
50 * MTD methods which look up the relevant subdevice, translate the
51 * effective address and pass through to the subdevice.
55 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
56 size_t * retlen, u_char * buf)
58 struct mtd_concat *concat = CONCAT(mtd);
64 for (i = 0; i < concat->num_subdev; i++) {
65 struct mtd_info *subdev = concat->subdev[i];
68 if (from >= subdev->size) {
69 /* Not destined for this subdev */
74 if (from + len > subdev->size)
75 /* First part goes into this subdev */
76 size = subdev->size - from;
78 /* Entire transaction goes into this subdev */
81 err = subdev->read(subdev, from, size, &retsize, buf);
99 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
100 size_t * retlen, const u_char * buf)
102 struct mtd_concat *concat = CONCAT(mtd);
106 if (!(mtd->flags & MTD_WRITEABLE))
111 for (i = 0; i < concat->num_subdev; i++) {
112 struct mtd_info *subdev = concat->subdev[i];
113 size_t size, retsize;
115 if (to >= subdev->size) {
120 if (to + len > subdev->size)
121 size = subdev->size - to;
125 if (!(subdev->flags & MTD_WRITEABLE))
128 err = subdev->write(subdev, to, size, &retsize, buf);
146 concat_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
147 size_t * retlen, u_char * buf, u_char * eccbuf,
148 struct nand_oobinfo *oobsel)
150 struct mtd_concat *concat = CONCAT(mtd);
156 for (i = 0; i < concat->num_subdev; i++) {
157 struct mtd_info *subdev = concat->subdev[i];
158 size_t size, retsize;
160 if (from >= subdev->size) {
161 /* Not destined for this subdev */
163 from -= subdev->size;
167 if (from + len > subdev->size)
168 /* First part goes into this subdev */
169 size = subdev->size - from;
171 /* Entire transaction goes into this subdev */
174 if (subdev->read_ecc)
175 err = subdev->read_ecc(subdev, from, size,
176 &retsize, buf, eccbuf, oobsel);
191 eccbuf += subdev->oobsize;
192 /* in nand.c at least, eccbufs are
193 tagged with 2 (int)eccstatus'; we
194 must account for these */
195 eccbuf += 2 * (sizeof (int));
203 concat_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
204 size_t * retlen, const u_char * buf, u_char * eccbuf,
205 struct nand_oobinfo *oobsel)
207 struct mtd_concat *concat = CONCAT(mtd);
211 if (!(mtd->flags & MTD_WRITEABLE))
216 for (i = 0; i < concat->num_subdev; i++) {
217 struct mtd_info *subdev = concat->subdev[i];
218 size_t size, retsize;
220 if (to >= subdev->size) {
225 if (to + len > subdev->size)
226 size = subdev->size - to;
230 if (!(subdev->flags & MTD_WRITEABLE))
232 else if (subdev->write_ecc)
233 err = subdev->write_ecc(subdev, to, size,
234 &retsize, buf, eccbuf, oobsel);
249 eccbuf += subdev->oobsize;
256 concat_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
257 unsigned long count, loff_t to, size_t * retlen,
258 u_char *eccbuf, struct nand_oobinfo *oobsel)
260 struct mtd_concat *concat = CONCAT(mtd);
261 struct kvec *vecs_copy;
262 unsigned long entry_low, entry_high;
263 size_t total_len = 0;
267 if (!(mtd->flags & MTD_WRITEABLE))
272 /* Calculate total length of data */
273 for (i = 0; i < count; i++)
274 total_len += vecs[i].iov_len;
276 /* Do not allow write past end of device */
277 if ((to + total_len) > mtd->size)
280 /* Check alignment */
281 if (mtd->writesize > 1) {
283 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
287 /* make a copy of vecs */
288 vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
291 memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
294 for (i = 0; i < concat->num_subdev; i++) {
295 struct mtd_info *subdev = concat->subdev[i];
296 size_t size, wsize, retsize, old_iov_len;
298 if (to >= subdev->size) {
303 size = min(total_len, (size_t)(subdev->size - to));
304 wsize = size; /* store for future use */
306 entry_high = entry_low;
307 while (entry_high < count) {
308 if (size <= vecs_copy[entry_high].iov_len)
310 size -= vecs_copy[entry_high++].iov_len;
313 old_iov_len = vecs_copy[entry_high].iov_len;
314 vecs_copy[entry_high].iov_len = size;
316 if (!(subdev->flags & MTD_WRITEABLE))
319 err = subdev->writev_ecc(subdev, &vecs_copy[entry_low],
320 entry_high - entry_low + 1, to, &retsize,
323 err = subdev->writev(subdev, &vecs_copy[entry_low],
324 entry_high - entry_low + 1, to, &retsize);
326 vecs_copy[entry_high].iov_len = old_iov_len - size;
327 vecs_copy[entry_high].iov_base += size;
329 entry_low = entry_high;
336 if (concat->mtd.type == MTD_NANDFLASH && eccbuf)
337 eccbuf += mtd->oobavail * (wsize / mtd->writesize);
351 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
352 unsigned long count, loff_t to, size_t * retlen)
354 return concat_writev_ecc(mtd, vecs, count, to, retlen, NULL, NULL);
358 concat_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
359 size_t * retlen, u_char * buf)
361 struct mtd_concat *concat = CONCAT(mtd);
367 for (i = 0; i < concat->num_subdev; i++) {
368 struct mtd_info *subdev = concat->subdev[i];
369 size_t size, retsize;
371 if (from >= subdev->size) {
372 /* Not destined for this subdev */
374 from -= subdev->size;
377 if (from + len > subdev->size)
378 /* First part goes into this subdev */
379 size = subdev->size - from;
381 /* Entire transaction goes into this subdev */
384 if (subdev->read_oob)
385 err = subdev->read_oob(subdev, from, size,
406 concat_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
407 size_t * retlen, const u_char * buf)
409 struct mtd_concat *concat = CONCAT(mtd);
413 if (!(mtd->flags & MTD_WRITEABLE))
418 for (i = 0; i < concat->num_subdev; i++) {
419 struct mtd_info *subdev = concat->subdev[i];
420 size_t size, retsize;
422 if (to >= subdev->size) {
427 if (to + len > subdev->size)
428 size = subdev->size - to;
432 if (!(subdev->flags & MTD_WRITEABLE))
434 else if (subdev->write_oob)
435 err = subdev->write_oob(subdev, to, size, &retsize,
455 static void concat_erase_callback(struct erase_info *instr)
457 wake_up((wait_queue_head_t *) instr->priv);
460 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
463 wait_queue_head_t waitq;
464 DECLARE_WAITQUEUE(wait, current);
467 * This code was stol^H^H^H^Hinspired by mtdchar.c
469 init_waitqueue_head(&waitq);
472 erase->callback = concat_erase_callback;
473 erase->priv = (unsigned long) &waitq;
476 * FIXME: Allow INTERRUPTIBLE. Which means
477 * not having the wait_queue head on the stack.
479 err = mtd->erase(mtd, erase);
481 set_current_state(TASK_UNINTERRUPTIBLE);
482 add_wait_queue(&waitq, &wait);
483 if (erase->state != MTD_ERASE_DONE
484 && erase->state != MTD_ERASE_FAILED)
486 remove_wait_queue(&waitq, &wait);
487 set_current_state(TASK_RUNNING);
489 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
494 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
496 struct mtd_concat *concat = CONCAT(mtd);
497 struct mtd_info *subdev;
499 u_int32_t length, offset = 0;
500 struct erase_info *erase;
502 if (!(mtd->flags & MTD_WRITEABLE))
505 if (instr->addr > concat->mtd.size)
508 if (instr->len + instr->addr > concat->mtd.size)
512 * Check for proper erase block alignment of the to-be-erased area.
513 * It is easier to do this based on the super device's erase
514 * region info rather than looking at each particular sub-device
517 if (!concat->mtd.numeraseregions) {
518 /* the easy case: device has uniform erase block size */
519 if (instr->addr & (concat->mtd.erasesize - 1))
521 if (instr->len & (concat->mtd.erasesize - 1))
524 /* device has variable erase size */
525 struct mtd_erase_region_info *erase_regions =
526 concat->mtd.eraseregions;
529 * Find the erase region where the to-be-erased area begins:
531 for (i = 0; i < concat->mtd.numeraseregions &&
532 instr->addr >= erase_regions[i].offset; i++) ;
536 * Now erase_regions[i] is the region in which the
537 * to-be-erased area begins. Verify that the starting
538 * offset is aligned to this region's erase size:
540 if (instr->addr & (erase_regions[i].erasesize - 1))
544 * now find the erase region where the to-be-erased area ends:
546 for (; i < concat->mtd.numeraseregions &&
547 (instr->addr + instr->len) >= erase_regions[i].offset;
551 * check if the ending offset is aligned to this region's erase size
553 if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
558 instr->fail_addr = 0xffffffff;
560 /* make a local copy of instr to avoid modifying the caller's struct */
561 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
570 * find the subdevice where the to-be-erased area begins, adjust
571 * starting offset to be relative to the subdevice start
573 for (i = 0; i < concat->num_subdev; i++) {
574 subdev = concat->subdev[i];
575 if (subdev->size <= erase->addr) {
576 erase->addr -= subdev->size;
577 offset += subdev->size;
583 /* must never happen since size limit has been verified above */
584 BUG_ON(i >= concat->num_subdev);
586 /* now do the erase: */
588 for (; length > 0; i++) {
589 /* loop for all subdevices affected by this request */
590 subdev = concat->subdev[i]; /* get current subdevice */
592 /* limit length to subdevice's size: */
593 if (erase->addr + length > subdev->size)
594 erase->len = subdev->size - erase->addr;
598 if (!(subdev->flags & MTD_WRITEABLE)) {
602 length -= erase->len;
603 if ((err = concat_dev_erase(subdev, erase))) {
604 /* sanity check: should never happen since
605 * block alignment has been checked above */
606 BUG_ON(err == -EINVAL);
607 if (erase->fail_addr != 0xffffffff)
608 instr->fail_addr = erase->fail_addr + offset;
612 * erase->addr specifies the offset of the area to be
613 * erased *within the current subdevice*. It can be
614 * non-zero only the first time through this loop, i.e.
615 * for the first subdevice where blocks need to be erased.
616 * All the following erases must begin at the start of the
617 * current subdevice, i.e. at offset zero.
620 offset += subdev->size;
622 instr->state = erase->state;
628 instr->callback(instr);
632 static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
634 struct mtd_concat *concat = CONCAT(mtd);
635 int i, err = -EINVAL;
637 if ((len + ofs) > mtd->size)
640 for (i = 0; i < concat->num_subdev; i++) {
641 struct mtd_info *subdev = concat->subdev[i];
644 if (ofs >= subdev->size) {
649 if (ofs + len > subdev->size)
650 size = subdev->size - ofs;
654 err = subdev->lock(subdev, ofs, size);
670 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
672 struct mtd_concat *concat = CONCAT(mtd);
675 if ((len + ofs) > mtd->size)
678 for (i = 0; i < concat->num_subdev; i++) {
679 struct mtd_info *subdev = concat->subdev[i];
682 if (ofs >= subdev->size) {
687 if (ofs + len > subdev->size)
688 size = subdev->size - ofs;
692 err = subdev->unlock(subdev, ofs, size);
708 static void concat_sync(struct mtd_info *mtd)
710 struct mtd_concat *concat = CONCAT(mtd);
713 for (i = 0; i < concat->num_subdev; i++) {
714 struct mtd_info *subdev = concat->subdev[i];
715 subdev->sync(subdev);
719 static int concat_suspend(struct mtd_info *mtd)
721 struct mtd_concat *concat = CONCAT(mtd);
724 for (i = 0; i < concat->num_subdev; i++) {
725 struct mtd_info *subdev = concat->subdev[i];
726 if ((rc = subdev->suspend(subdev)) < 0)
732 static void concat_resume(struct mtd_info *mtd)
734 struct mtd_concat *concat = CONCAT(mtd);
737 for (i = 0; i < concat->num_subdev; i++) {
738 struct mtd_info *subdev = concat->subdev[i];
739 subdev->resume(subdev);
743 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
745 struct mtd_concat *concat = CONCAT(mtd);
748 if (!concat->subdev[0]->block_isbad)
754 for (i = 0; i < concat->num_subdev; i++) {
755 struct mtd_info *subdev = concat->subdev[i];
757 if (ofs >= subdev->size) {
762 res = subdev->block_isbad(subdev, ofs);
769 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
771 struct mtd_concat *concat = CONCAT(mtd);
772 int i, err = -EINVAL;
774 if (!concat->subdev[0]->block_markbad)
780 for (i = 0; i < concat->num_subdev; i++) {
781 struct mtd_info *subdev = concat->subdev[i];
783 if (ofs >= subdev->size) {
788 err = subdev->block_markbad(subdev, ofs);
796 * This function constructs a virtual MTD device by concatenating
797 * num_devs MTD devices. A pointer to the new device object is
798 * stored to *new_dev upon success. This function does _not_
799 * register any devices: this is the caller's responsibility.
801 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
802 int num_devs, /* number of subdevices */
804 { /* name for the new device */
807 struct mtd_concat *concat;
808 u_int32_t max_erasesize, curr_erasesize;
809 int num_erase_region;
811 printk(KERN_NOTICE "Concatenating MTD devices:\n");
812 for (i = 0; i < num_devs; i++)
813 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
814 printk(KERN_NOTICE "into device \"%s\"\n", name);
816 /* allocate the device structure */
817 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
818 concat = kmalloc(size, GFP_KERNEL);
821 ("memory allocation error while creating concatenated device \"%s\"\n",
825 memset(concat, 0, size);
826 concat->subdev = (struct mtd_info **) (concat + 1);
829 * Set up the new "super" device's MTD object structure, check for
830 * incompatibilites between the subdevices.
832 concat->mtd.type = subdev[0]->type;
833 concat->mtd.flags = subdev[0]->flags;
834 concat->mtd.size = subdev[0]->size;
835 concat->mtd.erasesize = subdev[0]->erasesize;
836 concat->mtd.writesize = subdev[0]->writesize;
837 concat->mtd.oobsize = subdev[0]->oobsize;
838 concat->mtd.ecctype = subdev[0]->ecctype;
839 concat->mtd.eccsize = subdev[0]->eccsize;
840 if (subdev[0]->read_ecc)
841 concat->mtd.read_ecc = concat_read_ecc;
842 if (subdev[0]->write_ecc)
843 concat->mtd.write_ecc = concat_write_ecc;
844 if (subdev[0]->writev)
845 concat->mtd.writev = concat_writev;
846 if (subdev[0]->writev_ecc)
847 concat->mtd.writev_ecc = concat_writev_ecc;
848 if (subdev[0]->read_oob)
849 concat->mtd.read_oob = concat_read_oob;
850 if (subdev[0]->write_oob)
851 concat->mtd.write_oob = concat_write_oob;
852 if (subdev[0]->block_isbad)
853 concat->mtd.block_isbad = concat_block_isbad;
854 if (subdev[0]->block_markbad)
855 concat->mtd.block_markbad = concat_block_markbad;
857 concat->subdev[0] = subdev[0];
859 for (i = 1; i < num_devs; i++) {
860 if (concat->mtd.type != subdev[i]->type) {
862 printk("Incompatible device type on \"%s\"\n",
866 if (concat->mtd.flags != subdev[i]->flags) {
868 * Expect all flags except MTD_WRITEABLE to be
869 * equal on all subdevices.
871 if ((concat->mtd.flags ^ subdev[i]->
872 flags) & ~MTD_WRITEABLE) {
874 printk("Incompatible device flags on \"%s\"\n",
878 /* if writeable attribute differs,
879 make super device writeable */
881 subdev[i]->flags & MTD_WRITEABLE;
883 concat->mtd.size += subdev[i]->size;
884 if (concat->mtd.writesize != subdev[i]->writesize ||
885 concat->mtd.oobsize != subdev[i]->oobsize ||
886 concat->mtd.ecctype != subdev[i]->ecctype ||
887 concat->mtd.eccsize != subdev[i]->eccsize ||
888 !concat->mtd.read_ecc != !subdev[i]->read_ecc ||
889 !concat->mtd.write_ecc != !subdev[i]->write_ecc ||
890 !concat->mtd.read_oob != !subdev[i]->read_oob ||
891 !concat->mtd.write_oob != !subdev[i]->write_oob) {
893 printk("Incompatible OOB or ECC data on \"%s\"\n",
897 concat->subdev[i] = subdev[i];
901 if(concat->mtd.type == MTD_NANDFLASH)
902 memcpy(&concat->mtd.oobinfo, &subdev[0]->oobinfo,
903 sizeof(struct nand_oobinfo));
905 concat->num_subdev = num_devs;
906 concat->mtd.name = name;
908 concat->mtd.erase = concat_erase;
909 concat->mtd.read = concat_read;
910 concat->mtd.write = concat_write;
911 concat->mtd.sync = concat_sync;
912 concat->mtd.lock = concat_lock;
913 concat->mtd.unlock = concat_unlock;
914 concat->mtd.suspend = concat_suspend;
915 concat->mtd.resume = concat_resume;
918 * Combine the erase block size info of the subdevices:
920 * first, walk the map of the new device and see how
921 * many changes in erase size we have
923 max_erasesize = curr_erasesize = subdev[0]->erasesize;
924 num_erase_region = 1;
925 for (i = 0; i < num_devs; i++) {
926 if (subdev[i]->numeraseregions == 0) {
927 /* current subdevice has uniform erase size */
928 if (subdev[i]->erasesize != curr_erasesize) {
929 /* if it differs from the last subdevice's erase size, count it */
931 curr_erasesize = subdev[i]->erasesize;
932 if (curr_erasesize > max_erasesize)
933 max_erasesize = curr_erasesize;
936 /* current subdevice has variable erase size */
938 for (j = 0; j < subdev[i]->numeraseregions; j++) {
940 /* walk the list of erase regions, count any changes */
941 if (subdev[i]->eraseregions[j].erasesize !=
945 subdev[i]->eraseregions[j].
947 if (curr_erasesize > max_erasesize)
948 max_erasesize = curr_erasesize;
954 if (num_erase_region == 1) {
956 * All subdevices have the same uniform erase size.
959 concat->mtd.erasesize = curr_erasesize;
960 concat->mtd.numeraseregions = 0;
963 * erase block size varies across the subdevices: allocate
964 * space to store the data describing the variable erase regions
966 struct mtd_erase_region_info *erase_region_p;
967 u_int32_t begin, position;
969 concat->mtd.erasesize = max_erasesize;
970 concat->mtd.numeraseregions = num_erase_region;
971 concat->mtd.eraseregions = erase_region_p =
972 kmalloc(num_erase_region *
973 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
974 if (!erase_region_p) {
977 ("memory allocation error while creating erase region list"
978 " for device \"%s\"\n", name);
983 * walk the map of the new device once more and fill in
984 * in erase region info:
986 curr_erasesize = subdev[0]->erasesize;
987 begin = position = 0;
988 for (i = 0; i < num_devs; i++) {
989 if (subdev[i]->numeraseregions == 0) {
990 /* current subdevice has uniform erase size */
991 if (subdev[i]->erasesize != curr_erasesize) {
993 * fill in an mtd_erase_region_info structure for the area
994 * we have walked so far:
996 erase_region_p->offset = begin;
997 erase_region_p->erasesize =
999 erase_region_p->numblocks =
1000 (position - begin) / curr_erasesize;
1003 curr_erasesize = subdev[i]->erasesize;
1006 position += subdev[i]->size;
1008 /* current subdevice has variable erase size */
1010 for (j = 0; j < subdev[i]->numeraseregions; j++) {
1011 /* walk the list of erase regions, count any changes */
1012 if (subdev[i]->eraseregions[j].
1013 erasesize != curr_erasesize) {
1014 erase_region_p->offset = begin;
1015 erase_region_p->erasesize =
1017 erase_region_p->numblocks =
1019 begin) / curr_erasesize;
1023 subdev[i]->eraseregions[j].
1028 subdev[i]->eraseregions[j].
1029 numblocks * curr_erasesize;
1033 /* Now write the final entry */
1034 erase_region_p->offset = begin;
1035 erase_region_p->erasesize = curr_erasesize;
1036 erase_region_p->numblocks = (position - begin) / curr_erasesize;
1039 return &concat->mtd;
1043 * This function destroys an MTD object obtained from concat_mtd_devs()
1046 void mtd_concat_destroy(struct mtd_info *mtd)
1048 struct mtd_concat *concat = CONCAT(mtd);
1049 if (concat->mtd.numeraseregions)
1050 kfree(concat->mtd.eraseregions);
1054 EXPORT_SYMBOL(mtd_concat_create);
1055 EXPORT_SYMBOL(mtd_concat_destroy);
1057 MODULE_LICENSE("GPL");
1058 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
1059 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");