2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
19 * - auto unlock sectors on resume for auto locking flash on power up
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
48 #define MANUFACTURER_INTEL 0x0089
49 #define I82802AB 0x00ad
50 #define I82802AC 0x00ac
51 #define MANUFACTURER_ST 0x0020
52 #define M50LPW080 0x002F
54 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
58 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
59 static void cfi_intelext_sync (struct mtd_info *);
60 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
61 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
63 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
67 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
68 struct otp_info *, size_t);
69 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
70 struct otp_info *, size_t);
72 static int cfi_intelext_suspend (struct mtd_info *);
73 static void cfi_intelext_resume (struct mtd_info *);
74 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
76 static void cfi_intelext_destroy(struct mtd_info *);
78 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
81 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
84 size_t *retlen, u_char **mtdbuf);
85 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
88 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
95 * *********** SETUP AND PROBE BITS ***********
98 static struct mtd_chip_driver cfi_intelext_chipdrv = {
99 .probe = NULL, /* Not usable directly */
100 .destroy = cfi_intelext_destroy,
101 .name = "cfi_cmdset_0001",
102 .module = THIS_MODULE
105 /* #define DEBUG_LOCK_BITS */
106 /* #define DEBUG_CFI_FEATURES */
108 #ifdef DEBUG_CFI_FEATURES
109 static void cfi_tell_features(struct cfi_pri_intelext *extp)
112 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
113 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
114 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
115 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
116 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
117 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
118 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
119 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
120 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
121 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
122 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
123 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
124 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
125 for (i=11; i<32; i++) {
126 if (extp->FeatureSupport & (1<<i))
127 printk(" - Unknown Bit %X: supported\n", i);
130 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
131 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
132 for (i=1; i<8; i++) {
133 if (extp->SuspendCmdSupport & (1<<i))
134 printk(" - Unknown Bit %X: supported\n", i);
137 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
138 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
139 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
140 for (i=2; i<3; i++) {
141 if (extp->BlkStatusRegMask & (1<<i))
142 printk(" - Unknown Bit %X Active: yes\n",i);
144 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
145 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
146 for (i=6; i<16; i++) {
147 if (extp->BlkStatusRegMask & (1<<i))
148 printk(" - Unknown Bit %X Active: yes\n",i);
151 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
153 if (extp->VppOptimal)
154 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
155 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
159 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
160 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
161 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
163 struct map_info *map = mtd->priv;
164 struct cfi_private *cfi = map->fldrv_priv;
165 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
167 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
168 "erase on write disabled.\n");
169 extp->SuspendCmdSupport &= ~1;
173 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
174 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
176 struct map_info *map = mtd->priv;
177 struct cfi_private *cfi = map->fldrv_priv;
178 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
180 if (cfip && (cfip->FeatureSupport&4)) {
181 cfip->FeatureSupport &= ~4;
182 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
187 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
189 struct map_info *map = mtd->priv;
190 struct cfi_private *cfi = map->fldrv_priv;
192 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
193 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
196 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
198 struct map_info *map = mtd->priv;
199 struct cfi_private *cfi = map->fldrv_priv;
201 /* Note this is done after the region info is endian swapped */
202 cfi->cfiq->EraseRegionInfo[1] =
203 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
206 static void fixup_use_point(struct mtd_info *mtd, void *param)
208 struct map_info *map = mtd->priv;
209 if (!mtd->point && map_is_linear(map)) {
210 mtd->point = cfi_intelext_point;
211 mtd->unpoint = cfi_intelext_unpoint;
215 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
217 struct map_info *map = mtd->priv;
218 struct cfi_private *cfi = map->fldrv_priv;
219 if (cfi->cfiq->BufWriteTimeoutTyp) {
220 printk(KERN_INFO "Using buffer write method\n" );
221 mtd->write = cfi_intelext_write_buffers;
222 mtd->writev = cfi_intelext_writev;
227 * Some chips power-up with all sectors locked by default.
229 static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
231 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
232 mtd->flags |= MTD_STUPID_LOCK;
235 static struct cfi_fixup cfi_fixup_table[] = {
236 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
237 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
239 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
240 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
242 #if !FORCE_WORD_WRITE
243 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
245 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
246 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
247 { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, },
251 static struct cfi_fixup jedec_fixup_table[] = {
252 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
253 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
254 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
257 static struct cfi_fixup fixup_table[] = {
258 /* The CFI vendor ids and the JEDEC vendor IDs appear
259 * to be common. It is like the devices id's are as
260 * well. This table is to pick all cases where
261 * we know that is the case.
263 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
267 static inline struct cfi_pri_intelext *
268 read_pri_intelext(struct map_info *map, __u16 adr)
270 struct cfi_pri_intelext *extp;
271 unsigned int extp_size = sizeof(*extp);
274 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
278 if (extp->MajorVersion != '1' ||
279 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
280 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
281 "version %c.%c.\n", extp->MajorVersion,
287 /* Do some byteswapping if necessary */
288 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
289 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
290 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
292 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
293 unsigned int extra_size = 0;
296 /* Protection Register info */
297 extra_size += (extp->NumProtectionFields - 1) *
298 sizeof(struct cfi_intelext_otpinfo);
300 /* Burst Read info */
302 if (extp_size < sizeof(*extp) + extra_size)
304 extra_size += extp->extra[extra_size-1];
306 /* Number of hardware-partitions */
308 if (extp_size < sizeof(*extp) + extra_size)
310 nb_parts = extp->extra[extra_size - 1];
312 /* skip the sizeof(partregion) field in CFI 1.4 */
313 if (extp->MinorVersion >= '4')
316 for (i = 0; i < nb_parts; i++) {
317 struct cfi_intelext_regioninfo *rinfo;
318 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
319 extra_size += sizeof(*rinfo);
320 if (extp_size < sizeof(*extp) + extra_size)
322 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
323 extra_size += (rinfo->NumBlockTypes - 1)
324 * sizeof(struct cfi_intelext_blockinfo);
327 if (extp->MinorVersion >= '4')
328 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
330 if (extp_size < sizeof(*extp) + extra_size) {
332 extp_size = sizeof(*extp) + extra_size;
334 if (extp_size > 4096) {
336 "%s: cfi_pri_intelext is too fat\n",
347 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
349 struct cfi_private *cfi = map->fldrv_priv;
350 struct mtd_info *mtd;
353 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
355 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
359 mtd->type = MTD_NORFLASH;
361 /* Fill in the default mtd operations */
362 mtd->erase = cfi_intelext_erase_varsize;
363 mtd->read = cfi_intelext_read;
364 mtd->write = cfi_intelext_write_words;
365 mtd->sync = cfi_intelext_sync;
366 mtd->lock = cfi_intelext_lock;
367 mtd->unlock = cfi_intelext_unlock;
368 mtd->suspend = cfi_intelext_suspend;
369 mtd->resume = cfi_intelext_resume;
370 mtd->flags = MTD_CAP_NORFLASH;
371 mtd->name = map->name;
374 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
376 if (cfi->cfi_mode == CFI_MODE_CFI) {
378 * It's a real CFI chip, not one for which the probe
379 * routine faked a CFI structure. So we read the feature
382 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
383 struct cfi_pri_intelext *extp;
385 extp = read_pri_intelext(map, adr);
391 /* Install our own private info structure */
392 cfi->cmdset_priv = extp;
394 cfi_fixup(mtd, cfi_fixup_table);
396 #ifdef DEBUG_CFI_FEATURES
397 /* Tell the user about it in lots of lovely detail */
398 cfi_tell_features(extp);
401 if(extp->SuspendCmdSupport & 1) {
402 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
405 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
406 /* Apply jedec specific fixups */
407 cfi_fixup(mtd, jedec_fixup_table);
409 /* Apply generic fixups */
410 cfi_fixup(mtd, fixup_table);
412 for (i=0; i< cfi->numchips; i++) {
413 if (cfi->cfiq->WordWriteTimeoutTyp)
414 cfi->chips[i].word_write_time =
415 1<<cfi->cfiq->WordWriteTimeoutTyp;
417 cfi->chips[i].word_write_time = 50000;
419 if (cfi->cfiq->BufWriteTimeoutTyp)
420 cfi->chips[i].buffer_write_time =
421 1<<cfi->cfiq->BufWriteTimeoutTyp;
422 /* No default; if it isn't specified, we won't use it */
424 if (cfi->cfiq->BlockEraseTimeoutTyp)
425 cfi->chips[i].erase_time =
426 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
428 cfi->chips[i].erase_time = 2000000;
430 cfi->chips[i].ref_point_counter = 0;
431 init_waitqueue_head(&(cfi->chips[i].wq));
434 map->fldrv = &cfi_intelext_chipdrv;
436 return cfi_intelext_setup(mtd);
438 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
439 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
440 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
441 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
442 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
444 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
446 struct map_info *map = mtd->priv;
447 struct cfi_private *cfi = map->fldrv_priv;
448 unsigned long offset = 0;
450 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
452 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
454 mtd->size = devsize * cfi->numchips;
456 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
457 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
458 * mtd->numeraseregions, GFP_KERNEL);
459 if (!mtd->eraseregions) {
460 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
464 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
465 unsigned long ernum, ersize;
466 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
467 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
469 if (mtd->erasesize < ersize) {
470 mtd->erasesize = ersize;
472 for (j=0; j<cfi->numchips; j++) {
473 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
474 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
475 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
476 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
478 offset += (ersize * ernum);
481 if (offset != devsize) {
483 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
487 for (i=0; i<mtd->numeraseregions;i++){
488 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
489 i,mtd->eraseregions[i].offset,
490 mtd->eraseregions[i].erasesize,
491 mtd->eraseregions[i].numblocks);
494 #ifdef CONFIG_MTD_OTP
495 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
496 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
497 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
498 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
499 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
500 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
503 /* This function has the potential to distort the reality
504 a bit and therefore should be called last. */
505 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
508 __module_get(THIS_MODULE);
509 register_reboot_notifier(&mtd->reboot_notifier);
514 kfree(mtd->eraseregions);
517 kfree(cfi->cmdset_priv);
521 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
522 struct cfi_private **pcfi)
524 struct map_info *map = mtd->priv;
525 struct cfi_private *cfi = *pcfi;
526 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
529 * Probing of multi-partition flash chips.
531 * To support multiple partitions when available, we simply arrange
532 * for each of them to have their own flchip structure even if they
533 * are on the same physical chip. This means completely recreating
534 * a new cfi_private structure right here which is a blatent code
535 * layering violation, but this is still the least intrusive
536 * arrangement at this point. This can be rearranged in the future
537 * if someone feels motivated enough. --nico
539 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
540 && extp->FeatureSupport & (1 << 9)) {
541 struct cfi_private *newcfi;
543 struct flchip_shared *shared;
544 int offs, numregions, numparts, partshift, numvirtchips, i, j;
546 /* Protection Register info */
547 offs = (extp->NumProtectionFields - 1) *
548 sizeof(struct cfi_intelext_otpinfo);
550 /* Burst Read info */
551 offs += extp->extra[offs+1]+2;
553 /* Number of partition regions */
554 numregions = extp->extra[offs];
557 /* skip the sizeof(partregion) field in CFI 1.4 */
558 if (extp->MinorVersion >= '4')
561 /* Number of hardware partitions */
563 for (i = 0; i < numregions; i++) {
564 struct cfi_intelext_regioninfo *rinfo;
565 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
566 numparts += rinfo->NumIdentPartitions;
567 offs += sizeof(*rinfo)
568 + (rinfo->NumBlockTypes - 1) *
569 sizeof(struct cfi_intelext_blockinfo);
572 /* Programming Region info */
573 if (extp->MinorVersion >= '4') {
574 struct cfi_intelext_programming_regioninfo *prinfo;
575 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
576 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
577 mtd->flags &= ~MTD_BIT_WRITEABLE;
578 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
579 map->name, mtd->writesize,
580 cfi->interleave * prinfo->ControlValid,
581 cfi->interleave * prinfo->ControlInvalid);
585 * All functions below currently rely on all chips having
586 * the same geometry so we'll just assume that all hardware
587 * partitions are of the same size too.
589 partshift = cfi->chipshift - __ffs(numparts);
591 if ((1 << partshift) < mtd->erasesize) {
593 "%s: bad number of hw partitions (%d)\n",
594 __FUNCTION__, numparts);
598 numvirtchips = cfi->numchips * numparts;
599 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
602 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
607 memcpy(newcfi, cfi, sizeof(struct cfi_private));
608 newcfi->numchips = numvirtchips;
609 newcfi->chipshift = partshift;
611 chip = &newcfi->chips[0];
612 for (i = 0; i < cfi->numchips; i++) {
613 shared[i].writing = shared[i].erasing = NULL;
614 spin_lock_init(&shared[i].lock);
615 for (j = 0; j < numparts; j++) {
616 *chip = cfi->chips[i];
617 chip->start += j << partshift;
618 chip->priv = &shared[i];
619 /* those should be reset too since
620 they create memory references. */
621 init_waitqueue_head(&chip->wq);
622 spin_lock_init(&chip->_spinlock);
623 chip->mutex = &chip->_spinlock;
628 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
629 "--> %d partitions of %d KiB\n",
630 map->name, cfi->numchips, cfi->interleave,
631 newcfi->numchips, 1<<(newcfi->chipshift-10));
633 map->fldrv_priv = newcfi;
642 * *********** CHIP ACCESS FUNCTIONS ***********
645 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
647 DECLARE_WAITQUEUE(wait, current);
648 struct cfi_private *cfi = map->fldrv_priv;
649 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
651 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
654 timeo = jiffies + HZ;
656 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
658 * OK. We have possibility for contension on the write/erase
659 * operations which are global to the real chip and not per
660 * partition. So let's fight it over in the partition which
661 * currently has authority on the operation.
663 * The rules are as follows:
665 * - any write operation must own shared->writing.
667 * - any erase operation must own _both_ shared->writing and
670 * - contension arbitration is handled in the owner's context.
672 * The 'shared' struct can be read and/or written only when
675 struct flchip_shared *shared = chip->priv;
676 struct flchip *contender;
677 spin_lock(&shared->lock);
678 contender = shared->writing;
679 if (contender && contender != chip) {
681 * The engine to perform desired operation on this
682 * partition is already in use by someone else.
683 * Let's fight over it in the context of the chip
684 * currently using it. If it is possible to suspend,
685 * that other partition will do just that, otherwise
686 * it'll happily send us to sleep. In any case, when
687 * get_chip returns success we're clear to go ahead.
689 int ret = spin_trylock(contender->mutex);
690 spin_unlock(&shared->lock);
693 spin_unlock(chip->mutex);
694 ret = get_chip(map, contender, contender->start, mode);
695 spin_lock(chip->mutex);
697 spin_unlock(contender->mutex);
700 timeo = jiffies + HZ;
701 spin_lock(&shared->lock);
702 spin_unlock(contender->mutex);
706 shared->writing = chip;
707 if (mode == FL_ERASING)
708 shared->erasing = chip;
709 spin_unlock(&shared->lock);
712 switch (chip->state) {
716 status = map_read(map, adr);
717 if (map_word_andequal(map, status, status_OK, status_OK))
720 /* At this point we're fine with write operations
721 in other partitions as they don't conflict. */
722 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
725 if (time_after(jiffies, timeo)) {
726 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
727 map->name, status.x[0]);
730 spin_unlock(chip->mutex);
732 spin_lock(chip->mutex);
733 /* Someone else might have been playing with it. */
744 !(cfip->FeatureSupport & 2) ||
745 !(mode == FL_READY || mode == FL_POINT ||
746 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
751 map_write(map, CMD(0xB0), adr);
753 /* If the flash has finished erasing, then 'erase suspend'
754 * appears to make some (28F320) flash devices switch to
755 * 'read' mode. Make sure that we switch to 'read status'
756 * mode so we get the right data. --rmk
758 map_write(map, CMD(0x70), adr);
759 chip->oldstate = FL_ERASING;
760 chip->state = FL_ERASE_SUSPENDING;
761 chip->erase_suspended = 1;
763 status = map_read(map, adr);
764 if (map_word_andequal(map, status, status_OK, status_OK))
767 if (time_after(jiffies, timeo)) {
768 /* Urgh. Resume and pretend we weren't here. */
769 map_write(map, CMD(0xd0), adr);
770 /* Make sure we're in 'read status' mode if it had finished */
771 map_write(map, CMD(0x70), adr);
772 chip->state = FL_ERASING;
773 chip->oldstate = FL_READY;
774 printk(KERN_ERR "%s: Chip not ready after erase "
775 "suspended: status = 0x%lx\n", map->name, status.x[0]);
779 spin_unlock(chip->mutex);
781 spin_lock(chip->mutex);
782 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
783 So we can just loop here. */
785 chip->state = FL_STATUS;
788 case FL_XIP_WHILE_ERASING:
789 if (mode != FL_READY && mode != FL_POINT &&
790 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
792 chip->oldstate = chip->state;
793 chip->state = FL_READY;
797 /* Only if there's no operation suspended... */
798 if (mode == FL_READY && chip->oldstate == FL_READY)
803 set_current_state(TASK_UNINTERRUPTIBLE);
804 add_wait_queue(&chip->wq, &wait);
805 spin_unlock(chip->mutex);
807 remove_wait_queue(&chip->wq, &wait);
808 spin_lock(chip->mutex);
813 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
815 struct cfi_private *cfi = map->fldrv_priv;
818 struct flchip_shared *shared = chip->priv;
819 spin_lock(&shared->lock);
820 if (shared->writing == chip && chip->oldstate == FL_READY) {
821 /* We own the ability to write, but we're done */
822 shared->writing = shared->erasing;
823 if (shared->writing && shared->writing != chip) {
824 /* give back ownership to who we loaned it from */
825 struct flchip *loaner = shared->writing;
826 spin_lock(loaner->mutex);
827 spin_unlock(&shared->lock);
828 spin_unlock(chip->mutex);
829 put_chip(map, loaner, loaner->start);
830 spin_lock(chip->mutex);
831 spin_unlock(loaner->mutex);
835 shared->erasing = NULL;
836 shared->writing = NULL;
837 } else if (shared->erasing == chip && shared->writing != chip) {
839 * We own the ability to erase without the ability
840 * to write, which means the erase was suspended
841 * and some other partition is currently writing.
842 * Don't let the switch below mess things up since
843 * we don't have ownership to resume anything.
845 spin_unlock(&shared->lock);
849 spin_unlock(&shared->lock);
852 switch(chip->oldstate) {
854 chip->state = chip->oldstate;
855 /* What if one interleaved chip has finished and the
856 other hasn't? The old code would leave the finished
857 one in READY mode. That's bad, and caused -EROFS
858 errors to be returned from do_erase_oneblock because
859 that's the only bit it checked for at the time.
860 As the state machine appears to explicitly allow
861 sending the 0x70 (Read Status) command to an erasing
862 chip and expecting it to be ignored, that's what we
864 map_write(map, CMD(0xd0), adr);
865 map_write(map, CMD(0x70), adr);
866 chip->oldstate = FL_READY;
867 chip->state = FL_ERASING;
870 case FL_XIP_WHILE_ERASING:
871 chip->state = chip->oldstate;
872 chip->oldstate = FL_READY;
878 /* We should really make set_vpp() count, rather than doing this */
882 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
887 #ifdef CONFIG_MTD_XIP
890 * No interrupt what so ever can be serviced while the flash isn't in array
891 * mode. This is ensured by the xip_disable() and xip_enable() functions
892 * enclosing any code path where the flash is known not to be in array mode.
893 * And within a XIP disabled code path, only functions marked with __xipram
894 * may be called and nothing else (it's a good thing to inspect generated
895 * assembly to make sure inline functions were actually inlined and that gcc
896 * didn't emit calls to its own support functions). Also configuring MTD CFI
897 * support to a single buswidth and a single interleave is also recommended.
900 static void xip_disable(struct map_info *map, struct flchip *chip,
903 /* TODO: chips with no XIP use should ignore and return */
904 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
908 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
911 struct cfi_private *cfi = map->fldrv_priv;
912 if (chip->state != FL_POINT && chip->state != FL_READY) {
913 map_write(map, CMD(0xff), adr);
914 chip->state = FL_READY;
916 (void) map_read(map, adr);
922 * When a delay is required for the flash operation to complete, the
923 * xip_wait_for_operation() function is polling for both the given timeout
924 * and pending (but still masked) hardware interrupts. Whenever there is an
925 * interrupt pending then the flash erase or write operation is suspended,
926 * array mode restored and interrupts unmasked. Task scheduling might also
927 * happen at that point. The CPU eventually returns from the interrupt or
928 * the call to schedule() and the suspended flash operation is resumed for
929 * the remaining of the delay period.
931 * Warning: this function _will_ fool interrupt latency tracing tools.
934 static int __xipram xip_wait_for_operation(
935 struct map_info *map, struct flchip *chip,
936 unsigned long adr, unsigned int chip_op_time )
938 struct cfi_private *cfi = map->fldrv_priv;
939 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
940 map_word status, OK = CMD(0x80);
941 unsigned long usec, suspended, start, done;
942 flstate_t oldstate, newstate;
944 start = xip_currtime();
945 usec = chip_op_time * 8;
952 if (xip_irqpending() && cfip &&
953 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
954 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
955 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
957 * Let's suspend the erase or write operation when
958 * supported. Note that we currently don't try to
959 * suspend interleaved chips if there is already
960 * another operation suspended (imagine what happens
961 * when one chip was already done with the current
962 * operation while another chip suspended it, then
963 * we resume the whole thing at once). Yes, it
967 map_write(map, CMD(0xb0), adr);
968 map_write(map, CMD(0x70), adr);
969 suspended = xip_currtime();
971 if (xip_elapsed_since(suspended) > 100000) {
973 * The chip doesn't want to suspend
974 * after waiting for 100 msecs.
975 * This is a critical error but there
976 * is not much we can do here.
980 status = map_read(map, adr);
981 } while (!map_word_andequal(map, status, OK, OK));
983 /* Suspend succeeded */
984 oldstate = chip->state;
985 if (oldstate == FL_ERASING) {
986 if (!map_word_bitsset(map, status, CMD(0x40)))
988 newstate = FL_XIP_WHILE_ERASING;
989 chip->erase_suspended = 1;
991 if (!map_word_bitsset(map, status, CMD(0x04)))
993 newstate = FL_XIP_WHILE_WRITING;
994 chip->write_suspended = 1;
996 chip->state = newstate;
997 map_write(map, CMD(0xff), adr);
998 (void) map_read(map, adr);
999 asm volatile (".rep 8; nop; .endr");
1001 spin_unlock(chip->mutex);
1002 asm volatile (".rep 8; nop; .endr");
1006 * We're back. However someone else might have
1007 * decided to go write to the chip if we are in
1008 * a suspended erase state. If so let's wait
1011 spin_lock(chip->mutex);
1012 while (chip->state != newstate) {
1013 DECLARE_WAITQUEUE(wait, current);
1014 set_current_state(TASK_UNINTERRUPTIBLE);
1015 add_wait_queue(&chip->wq, &wait);
1016 spin_unlock(chip->mutex);
1018 remove_wait_queue(&chip->wq, &wait);
1019 spin_lock(chip->mutex);
1021 /* Disallow XIP again */
1022 local_irq_disable();
1024 /* Resume the write or erase operation */
1025 map_write(map, CMD(0xd0), adr);
1026 map_write(map, CMD(0x70), adr);
1027 chip->state = oldstate;
1028 start = xip_currtime();
1029 } else if (usec >= 1000000/HZ) {
1031 * Try to save on CPU power when waiting delay
1032 * is at least a system timer tick period.
1033 * No need to be extremely accurate here.
1037 status = map_read(map, adr);
1038 done = xip_elapsed_since(start);
1039 } while (!map_word_andequal(map, status, OK, OK)
1042 return (done >= usec) ? -ETIME : 0;
1046 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1047 * the flash is actively programming or erasing since we have to poll for
1048 * the operation to complete anyway. We can't do that in a generic way with
1049 * a XIP setup so do it before the actual flash operation in this case
1050 * and stub it out from INVAL_CACHE_AND_WAIT.
1052 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1053 INVALIDATE_CACHED_RANGE(map, from, size)
1055 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1056 xip_wait_for_operation(map, chip, cmd_adr, usec)
1060 #define xip_disable(map, chip, adr)
1061 #define xip_enable(map, chip, adr)
1062 #define XIP_INVAL_CACHED_RANGE(x...)
1063 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1065 static int inval_cache_and_wait_for_operation(
1066 struct map_info *map, struct flchip *chip,
1067 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1068 unsigned int chip_op_time)
1070 struct cfi_private *cfi = map->fldrv_priv;
1071 map_word status, status_OK = CMD(0x80);
1072 int chip_state = chip->state;
1073 unsigned int timeo, sleep_time;
1075 spin_unlock(chip->mutex);
1077 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1078 spin_lock(chip->mutex);
1080 /* set our timeout to 8 times the expected delay */
1081 timeo = chip_op_time * 8;
1084 sleep_time = chip_op_time / 2;
1087 status = map_read(map, cmd_adr);
1088 if (map_word_andequal(map, status, status_OK, status_OK))
1092 map_write(map, CMD(0x70), cmd_adr);
1093 chip->state = FL_STATUS;
1097 /* OK Still waiting. Drop the lock, wait a while and retry. */
1098 spin_unlock(chip->mutex);
1099 if (sleep_time >= 1000000/HZ) {
1101 * Half of the normal delay still remaining
1102 * can be performed with a sleeping delay instead
1105 msleep(sleep_time/1000);
1106 timeo -= sleep_time;
1107 sleep_time = 1000000/HZ;
1113 spin_lock(chip->mutex);
1115 while (chip->state != chip_state) {
1116 /* Someone's suspended the operation: sleep */
1117 DECLARE_WAITQUEUE(wait, current);
1118 set_current_state(TASK_UNINTERRUPTIBLE);
1119 add_wait_queue(&chip->wq, &wait);
1120 spin_unlock(chip->mutex);
1122 remove_wait_queue(&chip->wq, &wait);
1123 spin_lock(chip->mutex);
1127 /* Done and happy. */
1128 chip->state = FL_STATUS;
1134 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1135 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1138 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1140 unsigned long cmd_addr;
1141 struct cfi_private *cfi = map->fldrv_priv;
1146 /* Ensure cmd read/writes are aligned. */
1147 cmd_addr = adr & ~(map_bankwidth(map)-1);
1149 spin_lock(chip->mutex);
1151 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1154 if (chip->state != FL_POINT && chip->state != FL_READY)
1155 map_write(map, CMD(0xff), cmd_addr);
1157 chip->state = FL_POINT;
1158 chip->ref_point_counter++;
1160 spin_unlock(chip->mutex);
1165 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1167 struct map_info *map = mtd->priv;
1168 struct cfi_private *cfi = map->fldrv_priv;
1169 unsigned long ofs, last_end = 0;
1173 if (!map->virt || (from + len > mtd->size))
1176 /* Now lock the chip(s) to POINT state */
1178 /* ofs: offset within the first chip that the first read should start */
1179 chipnum = (from >> cfi->chipshift);
1180 ofs = from - (chipnum << cfi->chipshift);
1182 *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1186 unsigned long thislen;
1188 if (chipnum >= cfi->numchips)
1191 /* We cannot point across chips that are virtually disjoint */
1193 last_end = cfi->chips[chipnum].start;
1194 else if (cfi->chips[chipnum].start != last_end)
1197 if ((len + ofs -1) >> cfi->chipshift)
1198 thislen = (1<<cfi->chipshift) - ofs;
1202 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1210 last_end += 1 << cfi->chipshift;
1216 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1218 struct map_info *map = mtd->priv;
1219 struct cfi_private *cfi = map->fldrv_priv;
1223 /* Now unlock the chip(s) POINT state */
1225 /* ofs: offset within the first chip that the first read should start */
1226 chipnum = (from >> cfi->chipshift);
1227 ofs = from - (chipnum << cfi->chipshift);
1230 unsigned long thislen;
1231 struct flchip *chip;
1233 chip = &cfi->chips[chipnum];
1234 if (chipnum >= cfi->numchips)
1237 if ((len + ofs -1) >> cfi->chipshift)
1238 thislen = (1<<cfi->chipshift) - ofs;
1242 spin_lock(chip->mutex);
1243 if (chip->state == FL_POINT) {
1244 chip->ref_point_counter--;
1245 if(chip->ref_point_counter == 0)
1246 chip->state = FL_READY;
1248 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1250 put_chip(map, chip, chip->start);
1251 spin_unlock(chip->mutex);
1259 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1261 unsigned long cmd_addr;
1262 struct cfi_private *cfi = map->fldrv_priv;
1267 /* Ensure cmd read/writes are aligned. */
1268 cmd_addr = adr & ~(map_bankwidth(map)-1);
1270 spin_lock(chip->mutex);
1271 ret = get_chip(map, chip, cmd_addr, FL_READY);
1273 spin_unlock(chip->mutex);
1277 if (chip->state != FL_POINT && chip->state != FL_READY) {
1278 map_write(map, CMD(0xff), cmd_addr);
1280 chip->state = FL_READY;
1283 map_copy_from(map, buf, adr, len);
1285 put_chip(map, chip, cmd_addr);
1287 spin_unlock(chip->mutex);
1291 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1293 struct map_info *map = mtd->priv;
1294 struct cfi_private *cfi = map->fldrv_priv;
1299 /* ofs: offset within the first chip that the first read should start */
1300 chipnum = (from >> cfi->chipshift);
1301 ofs = from - (chipnum << cfi->chipshift);
1306 unsigned long thislen;
1308 if (chipnum >= cfi->numchips)
1311 if ((len + ofs -1) >> cfi->chipshift)
1312 thislen = (1<<cfi->chipshift) - ofs;
1316 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1330 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1331 unsigned long adr, map_word datum, int mode)
1333 struct cfi_private *cfi = map->fldrv_priv;
1334 map_word status, write_cmd;
1341 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1344 write_cmd = CMD(0xc0);
1350 spin_lock(chip->mutex);
1351 ret = get_chip(map, chip, adr, mode);
1353 spin_unlock(chip->mutex);
1357 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1359 xip_disable(map, chip, adr);
1360 map_write(map, write_cmd, adr);
1361 map_write(map, datum, adr);
1364 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1365 adr, map_bankwidth(map),
1366 chip->word_write_time);
1368 xip_enable(map, chip, adr);
1369 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1373 /* check for errors */
1374 status = map_read(map, adr);
1375 if (map_word_bitsset(map, status, CMD(0x1a))) {
1376 unsigned long chipstatus = MERGESTATUS(status);
1379 map_write(map, CMD(0x50), adr);
1380 map_write(map, CMD(0x70), adr);
1381 xip_enable(map, chip, adr);
1383 if (chipstatus & 0x02) {
1385 } else if (chipstatus & 0x08) {
1386 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1389 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1396 xip_enable(map, chip, adr);
1397 out: put_chip(map, chip, adr);
1398 spin_unlock(chip->mutex);
1403 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1405 struct map_info *map = mtd->priv;
1406 struct cfi_private *cfi = map->fldrv_priv;
1415 chipnum = to >> cfi->chipshift;
1416 ofs = to - (chipnum << cfi->chipshift);
1418 /* If it's not bus-aligned, do the first byte write */
1419 if (ofs & (map_bankwidth(map)-1)) {
1420 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1421 int gap = ofs - bus_ofs;
1425 n = min_t(int, len, map_bankwidth(map)-gap);
1426 datum = map_word_ff(map);
1427 datum = map_word_load_partial(map, datum, buf, gap, n);
1429 ret = do_write_oneword(map, &cfi->chips[chipnum],
1430 bus_ofs, datum, FL_WRITING);
1439 if (ofs >> cfi->chipshift) {
1442 if (chipnum == cfi->numchips)
1447 while(len >= map_bankwidth(map)) {
1448 map_word datum = map_word_load(map, buf);
1450 ret = do_write_oneword(map, &cfi->chips[chipnum],
1451 ofs, datum, FL_WRITING);
1455 ofs += map_bankwidth(map);
1456 buf += map_bankwidth(map);
1457 (*retlen) += map_bankwidth(map);
1458 len -= map_bankwidth(map);
1460 if (ofs >> cfi->chipshift) {
1463 if (chipnum == cfi->numchips)
1468 if (len & (map_bankwidth(map)-1)) {
1471 datum = map_word_ff(map);
1472 datum = map_word_load_partial(map, datum, buf, 0, len);
1474 ret = do_write_oneword(map, &cfi->chips[chipnum],
1475 ofs, datum, FL_WRITING);
1486 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1487 unsigned long adr, const struct kvec **pvec,
1488 unsigned long *pvec_seek, int len)
1490 struct cfi_private *cfi = map->fldrv_priv;
1491 map_word status, write_cmd, datum;
1492 unsigned long cmd_adr;
1493 int ret, wbufsize, word_gap, words;
1494 const struct kvec *vec;
1495 unsigned long vec_seek;
1497 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1499 cmd_adr = adr & ~(wbufsize-1);
1501 /* Let's determine this according to the interleave only once */
1502 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1504 spin_lock(chip->mutex);
1505 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1507 spin_unlock(chip->mutex);
1511 XIP_INVAL_CACHED_RANGE(map, adr, len);
1513 xip_disable(map, chip, cmd_adr);
1515 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1516 [...], the device will not accept any more Write to Buffer commands".
1517 So we must check here and reset those bits if they're set. Otherwise
1518 we're just pissing in the wind */
1519 if (chip->state != FL_STATUS) {
1520 map_write(map, CMD(0x70), cmd_adr);
1521 chip->state = FL_STATUS;
1523 status = map_read(map, cmd_adr);
1524 if (map_word_bitsset(map, status, CMD(0x30))) {
1525 xip_enable(map, chip, cmd_adr);
1526 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1527 xip_disable(map, chip, cmd_adr);
1528 map_write(map, CMD(0x50), cmd_adr);
1529 map_write(map, CMD(0x70), cmd_adr);
1532 chip->state = FL_WRITING_TO_BUFFER;
1533 map_write(map, write_cmd, cmd_adr);
1534 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1536 /* Argh. Not ready for write to buffer */
1537 map_word Xstatus = map_read(map, cmd_adr);
1538 map_write(map, CMD(0x70), cmd_adr);
1539 chip->state = FL_STATUS;
1540 status = map_read(map, cmd_adr);
1541 map_write(map, CMD(0x50), cmd_adr);
1542 map_write(map, CMD(0x70), cmd_adr);
1543 xip_enable(map, chip, cmd_adr);
1544 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1545 map->name, Xstatus.x[0], status.x[0]);
1549 /* Figure out the number of words to write */
1550 word_gap = (-adr & (map_bankwidth(map)-1));
1551 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1555 word_gap = map_bankwidth(map) - word_gap;
1557 datum = map_word_ff(map);
1560 /* Write length of data to come */
1561 map_write(map, CMD(words), cmd_adr );
1565 vec_seek = *pvec_seek;
1567 int n = map_bankwidth(map) - word_gap;
1568 if (n > vec->iov_len - vec_seek)
1569 n = vec->iov_len - vec_seek;
1573 if (!word_gap && len < map_bankwidth(map))
1574 datum = map_word_ff(map);
1576 datum = map_word_load_partial(map, datum,
1577 vec->iov_base + vec_seek,
1582 if (!len || word_gap == map_bankwidth(map)) {
1583 map_write(map, datum, adr);
1584 adr += map_bankwidth(map);
1589 if (vec_seek == vec->iov_len) {
1595 *pvec_seek = vec_seek;
1598 map_write(map, CMD(0xd0), cmd_adr);
1599 chip->state = FL_WRITING;
1601 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1603 chip->buffer_write_time);
1605 map_write(map, CMD(0x70), cmd_adr);
1606 chip->state = FL_STATUS;
1607 xip_enable(map, chip, cmd_adr);
1608 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1612 /* check for errors */
1613 status = map_read(map, cmd_adr);
1614 if (map_word_bitsset(map, status, CMD(0x1a))) {
1615 unsigned long chipstatus = MERGESTATUS(status);
1618 map_write(map, CMD(0x50), cmd_adr);
1619 map_write(map, CMD(0x70), cmd_adr);
1620 xip_enable(map, chip, cmd_adr);
1622 if (chipstatus & 0x02) {
1624 } else if (chipstatus & 0x08) {
1625 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1628 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1635 xip_enable(map, chip, cmd_adr);
1636 out: put_chip(map, chip, cmd_adr);
1637 spin_unlock(chip->mutex);
1641 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1642 unsigned long count, loff_t to, size_t *retlen)
1644 struct map_info *map = mtd->priv;
1645 struct cfi_private *cfi = map->fldrv_priv;
1646 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1649 unsigned long ofs, vec_seek, i;
1652 for (i = 0; i < count; i++)
1653 len += vecs[i].iov_len;
1659 chipnum = to >> cfi->chipshift;
1660 ofs = to - (chipnum << cfi->chipshift);
1664 /* We must not cross write block boundaries */
1665 int size = wbufsize - (ofs & (wbufsize-1));
1669 ret = do_write_buffer(map, &cfi->chips[chipnum],
1670 ofs, &vecs, &vec_seek, size);
1678 if (ofs >> cfi->chipshift) {
1681 if (chipnum == cfi->numchips)
1685 /* Be nice and reschedule with the chip in a usable state for other
1694 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1695 size_t len, size_t *retlen, const u_char *buf)
1699 vec.iov_base = (void *) buf;
1702 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1705 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1706 unsigned long adr, int len, void *thunk)
1708 struct cfi_private *cfi = map->fldrv_priv;
1716 spin_lock(chip->mutex);
1717 ret = get_chip(map, chip, adr, FL_ERASING);
1719 spin_unlock(chip->mutex);
1723 XIP_INVAL_CACHED_RANGE(map, adr, len);
1725 xip_disable(map, chip, adr);
1727 /* Clear the status register first */
1728 map_write(map, CMD(0x50), adr);
1731 map_write(map, CMD(0x20), adr);
1732 map_write(map, CMD(0xD0), adr);
1733 chip->state = FL_ERASING;
1734 chip->erase_suspended = 0;
1736 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1740 map_write(map, CMD(0x70), adr);
1741 chip->state = FL_STATUS;
1742 xip_enable(map, chip, adr);
1743 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1747 /* We've broken this before. It doesn't hurt to be safe */
1748 map_write(map, CMD(0x70), adr);
1749 chip->state = FL_STATUS;
1750 status = map_read(map, adr);
1752 /* check for errors */
1753 if (map_word_bitsset(map, status, CMD(0x3a))) {
1754 unsigned long chipstatus = MERGESTATUS(status);
1756 /* Reset the error bits */
1757 map_write(map, CMD(0x50), adr);
1758 map_write(map, CMD(0x70), adr);
1759 xip_enable(map, chip, adr);
1761 if ((chipstatus & 0x30) == 0x30) {
1762 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1764 } else if (chipstatus & 0x02) {
1765 /* Protection bit set */
1767 } else if (chipstatus & 0x8) {
1769 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1771 } else if (chipstatus & 0x20 && retries--) {
1772 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1773 put_chip(map, chip, adr);
1774 spin_unlock(chip->mutex);
1777 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1784 xip_enable(map, chip, adr);
1785 out: put_chip(map, chip, adr);
1786 spin_unlock(chip->mutex);
1790 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1792 unsigned long ofs, len;
1798 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1802 instr->state = MTD_ERASE_DONE;
1803 mtd_erase_callback(instr);
1808 static void cfi_intelext_sync (struct mtd_info *mtd)
1810 struct map_info *map = mtd->priv;
1811 struct cfi_private *cfi = map->fldrv_priv;
1813 struct flchip *chip;
1816 for (i=0; !ret && i<cfi->numchips; i++) {
1817 chip = &cfi->chips[i];
1819 spin_lock(chip->mutex);
1820 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1823 chip->oldstate = chip->state;
1824 chip->state = FL_SYNCING;
1825 /* No need to wake_up() on this state change -
1826 * as the whole point is that nobody can do anything
1827 * with the chip now anyway.
1830 spin_unlock(chip->mutex);
1833 /* Unlock the chips again */
1835 for (i--; i >=0; i--) {
1836 chip = &cfi->chips[i];
1838 spin_lock(chip->mutex);
1840 if (chip->state == FL_SYNCING) {
1841 chip->state = chip->oldstate;
1842 chip->oldstate = FL_READY;
1845 spin_unlock(chip->mutex);
1849 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1850 struct flchip *chip,
1852 int len, void *thunk)
1854 struct cfi_private *cfi = map->fldrv_priv;
1855 int status, ofs_factor = cfi->interleave * cfi->device_type;
1858 xip_disable(map, chip, adr+(2*ofs_factor));
1859 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1860 chip->state = FL_JEDEC_QUERY;
1861 status = cfi_read_query(map, adr+(2*ofs_factor));
1862 xip_enable(map, chip, 0);
1866 #ifdef DEBUG_LOCK_BITS
1867 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1868 struct flchip *chip,
1870 int len, void *thunk)
1872 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1873 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1878 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1879 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1881 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1882 unsigned long adr, int len, void *thunk)
1884 struct cfi_private *cfi = map->fldrv_priv;
1885 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1891 spin_lock(chip->mutex);
1892 ret = get_chip(map, chip, adr, FL_LOCKING);
1894 spin_unlock(chip->mutex);
1899 xip_disable(map, chip, adr);
1901 map_write(map, CMD(0x60), adr);
1902 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1903 map_write(map, CMD(0x01), adr);
1904 chip->state = FL_LOCKING;
1905 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1906 map_write(map, CMD(0xD0), adr);
1907 chip->state = FL_UNLOCKING;
1912 * If Instant Individual Block Locking supported then no need
1915 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1917 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1919 map_write(map, CMD(0x70), adr);
1920 chip->state = FL_STATUS;
1921 xip_enable(map, chip, adr);
1922 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1926 xip_enable(map, chip, adr);
1927 out: put_chip(map, chip, adr);
1928 spin_unlock(chip->mutex);
1932 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1936 #ifdef DEBUG_LOCK_BITS
1937 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1938 __FUNCTION__, ofs, len);
1939 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1943 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1944 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1946 #ifdef DEBUG_LOCK_BITS
1947 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1949 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1956 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1960 #ifdef DEBUG_LOCK_BITS
1961 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1962 __FUNCTION__, ofs, len);
1963 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1967 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1968 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1970 #ifdef DEBUG_LOCK_BITS
1971 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1973 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1980 #ifdef CONFIG_MTD_OTP
1982 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1983 u_long data_offset, u_char *buf, u_int size,
1984 u_long prot_offset, u_int groupno, u_int groupsize);
1987 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1988 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1990 struct cfi_private *cfi = map->fldrv_priv;
1993 spin_lock(chip->mutex);
1994 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1996 spin_unlock(chip->mutex);
2000 /* let's ensure we're not reading back cached data from array mode */
2001 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2003 xip_disable(map, chip, chip->start);
2004 if (chip->state != FL_JEDEC_QUERY) {
2005 map_write(map, CMD(0x90), chip->start);
2006 chip->state = FL_JEDEC_QUERY;
2008 map_copy_from(map, buf, chip->start + offset, size);
2009 xip_enable(map, chip, chip->start);
2011 /* then ensure we don't keep OTP data in the cache */
2012 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2014 put_chip(map, chip, chip->start);
2015 spin_unlock(chip->mutex);
2020 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2021 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2026 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2027 int gap = offset - bus_ofs;
2028 int n = min_t(int, size, map_bankwidth(map)-gap);
2029 map_word datum = map_word_ff(map);
2031 datum = map_word_load_partial(map, datum, buf, gap, n);
2032 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2045 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2046 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2048 struct cfi_private *cfi = map->fldrv_priv;
2051 /* make sure area matches group boundaries */
2055 datum = map_word_ff(map);
2056 datum = map_word_clr(map, datum, CMD(1 << grpno));
2057 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2060 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2061 size_t *retlen, u_char *buf,
2062 otp_op_t action, int user_regs)
2064 struct map_info *map = mtd->priv;
2065 struct cfi_private *cfi = map->fldrv_priv;
2066 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2067 struct flchip *chip;
2068 struct cfi_intelext_otpinfo *otp;
2069 u_long devsize, reg_prot_offset, data_offset;
2070 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2071 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2076 /* Check that we actually have some OTP registers */
2077 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2080 /* we need real chips here not virtual ones */
2081 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2082 chip_step = devsize >> cfi->chipshift;
2085 /* Some chips have OTP located in the _top_ partition only.
2086 For example: Intel 28F256L18T (T means top-parameter device) */
2087 if (cfi->mfr == MANUFACTURER_INTEL) {
2092 chip_num = chip_step - 1;
2096 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2097 chip = &cfi->chips[chip_num];
2098 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2100 /* first OTP region */
2102 reg_prot_offset = extp->ProtRegAddr;
2103 reg_fact_groups = 1;
2104 reg_fact_size = 1 << extp->FactProtRegSize;
2105 reg_user_groups = 1;
2106 reg_user_size = 1 << extp->UserProtRegSize;
2109 /* flash geometry fixup */
2110 data_offset = reg_prot_offset + 1;
2111 data_offset *= cfi->interleave * cfi->device_type;
2112 reg_prot_offset *= cfi->interleave * cfi->device_type;
2113 reg_fact_size *= cfi->interleave;
2114 reg_user_size *= cfi->interleave;
2117 groups = reg_user_groups;
2118 groupsize = reg_user_size;
2119 /* skip over factory reg area */
2120 groupno = reg_fact_groups;
2121 data_offset += reg_fact_groups * reg_fact_size;
2123 groups = reg_fact_groups;
2124 groupsize = reg_fact_size;
2128 while (len > 0 && groups > 0) {
2131 * Special case: if action is NULL
2132 * we fill buf with otp_info records.
2134 struct otp_info *otpinfo;
2136 len -= sizeof(struct otp_info);
2139 ret = do_otp_read(map, chip,
2141 (u_char *)&lockword,
2146 otpinfo = (struct otp_info *)buf;
2147 otpinfo->start = from;
2148 otpinfo->length = groupsize;
2150 !map_word_bitsset(map, lockword,
2153 buf += sizeof(*otpinfo);
2154 *retlen += sizeof(*otpinfo);
2155 } else if (from >= groupsize) {
2157 data_offset += groupsize;
2159 int size = groupsize;
2160 data_offset += from;
2165 ret = action(map, chip, data_offset,
2166 buf, size, reg_prot_offset,
2167 groupno, groupsize);
2173 data_offset += size;
2179 /* next OTP region */
2180 if (++field == extp->NumProtectionFields)
2182 reg_prot_offset = otp->ProtRegAddr;
2183 reg_fact_groups = otp->FactGroups;
2184 reg_fact_size = 1 << otp->FactProtRegSize;
2185 reg_user_groups = otp->UserGroups;
2186 reg_user_size = 1 << otp->UserProtRegSize;
2194 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2195 size_t len, size_t *retlen,
2198 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2199 buf, do_otp_read, 0);
2202 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2203 size_t len, size_t *retlen,
2206 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2207 buf, do_otp_read, 1);
2210 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2211 size_t len, size_t *retlen,
2214 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2215 buf, do_otp_write, 1);
2218 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2219 loff_t from, size_t len)
2222 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2223 NULL, do_otp_lock, 1);
2226 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2227 struct otp_info *buf, size_t len)
2232 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2233 return ret ? : retlen;
2236 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2237 struct otp_info *buf, size_t len)
2242 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2243 return ret ? : retlen;
2248 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2250 struct mtd_erase_region_info *region;
2251 int block, status, i;
2255 for (i = 0; i < mtd->numeraseregions; i++) {
2256 region = &mtd->eraseregions[i];
2257 if (!region->lockmap)
2260 for (block = 0; block < region->numblocks; block++){
2261 len = region->erasesize;
2262 adr = region->offset + block * len;
2264 status = cfi_varsize_frob(mtd,
2265 do_getlockstatus_oneblock, adr, len, NULL);
2267 set_bit(block, region->lockmap);
2269 clear_bit(block, region->lockmap);
2274 static int cfi_intelext_suspend(struct mtd_info *mtd)
2276 struct map_info *map = mtd->priv;
2277 struct cfi_private *cfi = map->fldrv_priv;
2278 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2280 struct flchip *chip;
2283 if ((mtd->flags & MTD_STUPID_LOCK)
2284 && extp && (extp->FeatureSupport & (1 << 5)))
2285 cfi_intelext_save_locks(mtd);
2287 for (i=0; !ret && i<cfi->numchips; i++) {
2288 chip = &cfi->chips[i];
2290 spin_lock(chip->mutex);
2292 switch (chip->state) {
2296 case FL_JEDEC_QUERY:
2297 if (chip->oldstate == FL_READY) {
2298 /* place the chip in a known state before suspend */
2299 map_write(map, CMD(0xFF), cfi->chips[i].start);
2300 chip->oldstate = chip->state;
2301 chip->state = FL_PM_SUSPENDED;
2302 /* No need to wake_up() on this state change -
2303 * as the whole point is that nobody can do anything
2304 * with the chip now anyway.
2307 /* There seems to be an operation pending. We must wait for it. */
2308 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2313 /* Should we actually wait? Once upon a time these routines weren't
2314 allowed to. Or should we return -EAGAIN, because the upper layers
2315 ought to have already shut down anything which was using the device
2316 anyway? The latter for now. */
2317 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2319 case FL_PM_SUSPENDED:
2322 spin_unlock(chip->mutex);
2325 /* Unlock the chips again */
2328 for (i--; i >=0; i--) {
2329 chip = &cfi->chips[i];
2331 spin_lock(chip->mutex);
2333 if (chip->state == FL_PM_SUSPENDED) {
2334 /* No need to force it into a known state here,
2335 because we're returning failure, and it didn't
2337 chip->state = chip->oldstate;
2338 chip->oldstate = FL_READY;
2341 spin_unlock(chip->mutex);
2348 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2350 struct mtd_erase_region_info *region;
2355 for (i = 0; i < mtd->numeraseregions; i++) {
2356 region = &mtd->eraseregions[i];
2357 if (!region->lockmap)
2360 for (block = 0; block < region->numblocks; block++) {
2361 len = region->erasesize;
2362 adr = region->offset + block * len;
2364 if (!test_bit(block, region->lockmap))
2365 cfi_intelext_unlock(mtd, adr, len);
2370 static void cfi_intelext_resume(struct mtd_info *mtd)
2372 struct map_info *map = mtd->priv;
2373 struct cfi_private *cfi = map->fldrv_priv;
2374 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2376 struct flchip *chip;
2378 for (i=0; i<cfi->numchips; i++) {
2380 chip = &cfi->chips[i];
2382 spin_lock(chip->mutex);
2384 /* Go to known state. Chip may have been power cycled */
2385 if (chip->state == FL_PM_SUSPENDED) {
2386 map_write(map, CMD(0xFF), cfi->chips[i].start);
2387 chip->oldstate = chip->state = FL_READY;
2391 spin_unlock(chip->mutex);
2394 if ((mtd->flags & MTD_STUPID_LOCK)
2395 && extp && (extp->FeatureSupport & (1 << 5)))
2396 cfi_intelext_restore_locks(mtd);
2399 static int cfi_intelext_reset(struct mtd_info *mtd)
2401 struct map_info *map = mtd->priv;
2402 struct cfi_private *cfi = map->fldrv_priv;
2405 for (i=0; i < cfi->numchips; i++) {
2406 struct flchip *chip = &cfi->chips[i];
2408 /* force the completion of any ongoing operation
2409 and switch to array mode so any bootloader in
2410 flash is accessible for soft reboot. */
2411 spin_lock(chip->mutex);
2412 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2414 map_write(map, CMD(0xff), chip->start);
2415 chip->state = FL_READY;
2417 spin_unlock(chip->mutex);
2423 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2426 struct mtd_info *mtd;
2428 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2429 cfi_intelext_reset(mtd);
2433 static void cfi_intelext_destroy(struct mtd_info *mtd)
2435 struct map_info *map = mtd->priv;
2436 struct cfi_private *cfi = map->fldrv_priv;
2437 struct mtd_erase_region_info *region;
2439 cfi_intelext_reset(mtd);
2440 unregister_reboot_notifier(&mtd->reboot_notifier);
2441 kfree(cfi->cmdset_priv);
2443 kfree(cfi->chips[0].priv);
2445 for (i = 0; i < mtd->numeraseregions; i++) {
2446 region = &mtd->eraseregions[i];
2447 if (region->lockmap)
2448 kfree(region->lockmap);
2450 kfree(mtd->eraseregions);
2453 MODULE_LICENSE("GPL");
2454 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2455 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2456 MODULE_ALIAS("cfi_cmdset_0003");
2457 MODULE_ALIAS("cfi_cmdset_0200");