2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
19 * - auto unlock sectors on resume for auto locking flash on power up
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
48 #define MANUFACTURER_INTEL 0x0089
49 #define I82802AB 0x00ad
50 #define I82802AC 0x00ac
51 #define MANUFACTURER_ST 0x0020
52 #define M50LPW080 0x002F
53 #define AT49BV640D 0x02de
55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_intelext_sync (struct mtd_info *);
61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69 struct otp_info *, size_t);
70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71 struct otp_info *, size_t);
73 static int cfi_intelext_suspend (struct mtd_info *);
74 static void cfi_intelext_resume (struct mtd_info *);
75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
77 static void cfi_intelext_destroy(struct mtd_info *);
79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85 size_t *retlen, u_char **mtdbuf);
86 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
89 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
91 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
97 * *********** SETUP AND PROBE BITS ***********
100 static struct mtd_chip_driver cfi_intelext_chipdrv = {
101 .probe = NULL, /* Not usable directly */
102 .destroy = cfi_intelext_destroy,
103 .name = "cfi_cmdset_0001",
104 .module = THIS_MODULE
107 /* #define DEBUG_LOCK_BITS */
108 /* #define DEBUG_CFI_FEATURES */
110 #ifdef DEBUG_CFI_FEATURES
111 static void cfi_tell_features(struct cfi_pri_intelext *extp)
114 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
115 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
116 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
117 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
118 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
119 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
120 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
121 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
122 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
123 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
124 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
125 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
126 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
127 for (i=11; i<32; i++) {
128 if (extp->FeatureSupport & (1<<i))
129 printk(" - Unknown Bit %X: supported\n", i);
132 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
133 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
134 for (i=1; i<8; i++) {
135 if (extp->SuspendCmdSupport & (1<<i))
136 printk(" - Unknown Bit %X: supported\n", i);
139 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
140 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
141 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
142 for (i=2; i<3; i++) {
143 if (extp->BlkStatusRegMask & (1<<i))
144 printk(" - Unknown Bit %X Active: yes\n",i);
146 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
147 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
148 for (i=6; i<16; i++) {
149 if (extp->BlkStatusRegMask & (1<<i))
150 printk(" - Unknown Bit %X Active: yes\n",i);
153 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
154 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
155 if (extp->VppOptimal)
156 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
157 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
161 /* Atmel chips don't use the same PRI format as Intel chips */
162 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
164 struct map_info *map = mtd->priv;
165 struct cfi_private *cfi = map->fldrv_priv;
166 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
167 struct cfi_pri_atmel atmel_pri;
168 uint32_t features = 0;
170 /* Reverse byteswapping */
171 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
172 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
173 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
175 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
176 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
178 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
180 if (atmel_pri.Features & 0x01) /* chip erase supported */
182 if (atmel_pri.Features & 0x02) /* erase suspend supported */
184 if (atmel_pri.Features & 0x04) /* program suspend supported */
186 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
188 if (atmel_pri.Features & 0x20) /* page mode read supported */
190 if (atmel_pri.Features & 0x40) /* queued erase supported */
192 if (atmel_pri.Features & 0x80) /* Protection bits supported */
195 extp->FeatureSupport = features;
197 /* burst write mode not supported */
198 cfi->cfiq->BufWriteTimeoutTyp = 0;
199 cfi->cfiq->BufWriteTimeoutMax = 0;
202 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
203 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
204 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
206 struct map_info *map = mtd->priv;
207 struct cfi_private *cfi = map->fldrv_priv;
208 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
210 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
211 "erase on write disabled.\n");
212 extp->SuspendCmdSupport &= ~1;
216 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
217 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
219 struct map_info *map = mtd->priv;
220 struct cfi_private *cfi = map->fldrv_priv;
221 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
223 if (cfip && (cfip->FeatureSupport&4)) {
224 cfip->FeatureSupport &= ~4;
225 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
230 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
232 struct map_info *map = mtd->priv;
233 struct cfi_private *cfi = map->fldrv_priv;
235 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
236 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
239 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
241 struct map_info *map = mtd->priv;
242 struct cfi_private *cfi = map->fldrv_priv;
244 /* Note this is done after the region info is endian swapped */
245 cfi->cfiq->EraseRegionInfo[1] =
246 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
249 static void fixup_use_point(struct mtd_info *mtd, void *param)
251 struct map_info *map = mtd->priv;
252 if (!mtd->point && map_is_linear(map)) {
253 mtd->point = cfi_intelext_point;
254 mtd->unpoint = cfi_intelext_unpoint;
258 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
260 struct map_info *map = mtd->priv;
261 struct cfi_private *cfi = map->fldrv_priv;
262 if (cfi->cfiq->BufWriteTimeoutTyp) {
263 printk(KERN_INFO "Using buffer write method\n" );
264 mtd->write = cfi_intelext_write_buffers;
265 mtd->writev = cfi_intelext_writev;
270 * Some chips power-up with all sectors locked by default.
272 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
274 struct map_info *map = mtd->priv;
275 struct cfi_private *cfi = map->fldrv_priv;
276 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
278 if (cfip->FeatureSupport&32) {
279 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
280 mtd->flags |= MTD_POWERUP_LOCK;
284 static struct cfi_fixup cfi_fixup_table[] = {
285 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
286 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
287 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
289 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
290 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
292 #if !FORCE_WORD_WRITE
293 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
295 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
296 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
297 { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
301 static struct cfi_fixup jedec_fixup_table[] = {
302 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
303 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
304 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
307 static struct cfi_fixup fixup_table[] = {
308 /* The CFI vendor ids and the JEDEC vendor IDs appear
309 * to be common. It is like the devices id's are as
310 * well. This table is to pick all cases where
311 * we know that is the case.
313 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
317 static inline struct cfi_pri_intelext *
318 read_pri_intelext(struct map_info *map, __u16 adr)
320 struct cfi_pri_intelext *extp;
321 unsigned int extp_size = sizeof(*extp);
324 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
328 if (extp->MajorVersion != '1' ||
329 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
330 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
331 "version %c.%c.\n", extp->MajorVersion,
337 /* Do some byteswapping if necessary */
338 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
339 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
340 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
342 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
343 unsigned int extra_size = 0;
346 /* Protection Register info */
347 extra_size += (extp->NumProtectionFields - 1) *
348 sizeof(struct cfi_intelext_otpinfo);
350 /* Burst Read info */
352 if (extp_size < sizeof(*extp) + extra_size)
354 extra_size += extp->extra[extra_size-1];
356 /* Number of hardware-partitions */
358 if (extp_size < sizeof(*extp) + extra_size)
360 nb_parts = extp->extra[extra_size - 1];
362 /* skip the sizeof(partregion) field in CFI 1.4 */
363 if (extp->MinorVersion >= '4')
366 for (i = 0; i < nb_parts; i++) {
367 struct cfi_intelext_regioninfo *rinfo;
368 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
369 extra_size += sizeof(*rinfo);
370 if (extp_size < sizeof(*extp) + extra_size)
372 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
373 extra_size += (rinfo->NumBlockTypes - 1)
374 * sizeof(struct cfi_intelext_blockinfo);
377 if (extp->MinorVersion >= '4')
378 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
380 if (extp_size < sizeof(*extp) + extra_size) {
382 extp_size = sizeof(*extp) + extra_size;
384 if (extp_size > 4096) {
386 "%s: cfi_pri_intelext is too fat\n",
397 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
399 struct cfi_private *cfi = map->fldrv_priv;
400 struct mtd_info *mtd;
403 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
405 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
409 mtd->type = MTD_NORFLASH;
411 /* Fill in the default mtd operations */
412 mtd->erase = cfi_intelext_erase_varsize;
413 mtd->read = cfi_intelext_read;
414 mtd->write = cfi_intelext_write_words;
415 mtd->sync = cfi_intelext_sync;
416 mtd->lock = cfi_intelext_lock;
417 mtd->unlock = cfi_intelext_unlock;
418 mtd->suspend = cfi_intelext_suspend;
419 mtd->resume = cfi_intelext_resume;
420 mtd->flags = MTD_CAP_NORFLASH;
421 mtd->name = map->name;
424 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
426 if (cfi->cfi_mode == CFI_MODE_CFI) {
428 * It's a real CFI chip, not one for which the probe
429 * routine faked a CFI structure. So we read the feature
432 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
433 struct cfi_pri_intelext *extp;
435 extp = read_pri_intelext(map, adr);
441 /* Install our own private info structure */
442 cfi->cmdset_priv = extp;
444 cfi_fixup(mtd, cfi_fixup_table);
446 #ifdef DEBUG_CFI_FEATURES
447 /* Tell the user about it in lots of lovely detail */
448 cfi_tell_features(extp);
451 if(extp->SuspendCmdSupport & 1) {
452 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
455 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
456 /* Apply jedec specific fixups */
457 cfi_fixup(mtd, jedec_fixup_table);
459 /* Apply generic fixups */
460 cfi_fixup(mtd, fixup_table);
462 for (i=0; i< cfi->numchips; i++) {
463 if (cfi->cfiq->WordWriteTimeoutTyp)
464 cfi->chips[i].word_write_time =
465 1<<cfi->cfiq->WordWriteTimeoutTyp;
467 cfi->chips[i].word_write_time = 50000;
469 if (cfi->cfiq->BufWriteTimeoutTyp)
470 cfi->chips[i].buffer_write_time =
471 1<<cfi->cfiq->BufWriteTimeoutTyp;
472 /* No default; if it isn't specified, we won't use it */
474 if (cfi->cfiq->BlockEraseTimeoutTyp)
475 cfi->chips[i].erase_time =
476 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
478 cfi->chips[i].erase_time = 2000000;
480 cfi->chips[i].ref_point_counter = 0;
481 init_waitqueue_head(&(cfi->chips[i].wq));
484 map->fldrv = &cfi_intelext_chipdrv;
486 return cfi_intelext_setup(mtd);
488 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
489 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
490 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
491 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
492 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
494 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
496 struct map_info *map = mtd->priv;
497 struct cfi_private *cfi = map->fldrv_priv;
498 unsigned long offset = 0;
500 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
502 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
504 mtd->size = devsize * cfi->numchips;
506 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
507 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
508 * mtd->numeraseregions, GFP_KERNEL);
509 if (!mtd->eraseregions) {
510 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
514 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
515 unsigned long ernum, ersize;
516 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
517 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
519 if (mtd->erasesize < ersize) {
520 mtd->erasesize = ersize;
522 for (j=0; j<cfi->numchips; j++) {
523 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
524 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
525 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
526 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
528 offset += (ersize * ernum);
531 if (offset != devsize) {
533 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
537 for (i=0; i<mtd->numeraseregions;i++){
538 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
539 i,mtd->eraseregions[i].offset,
540 mtd->eraseregions[i].erasesize,
541 mtd->eraseregions[i].numblocks);
544 #ifdef CONFIG_MTD_OTP
545 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
546 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
547 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
548 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
549 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
550 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
553 /* This function has the potential to distort the reality
554 a bit and therefore should be called last. */
555 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
558 __module_get(THIS_MODULE);
559 register_reboot_notifier(&mtd->reboot_notifier);
564 kfree(mtd->eraseregions);
567 kfree(cfi->cmdset_priv);
571 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
572 struct cfi_private **pcfi)
574 struct map_info *map = mtd->priv;
575 struct cfi_private *cfi = *pcfi;
576 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
579 * Probing of multi-partition flash chips.
581 * To support multiple partitions when available, we simply arrange
582 * for each of them to have their own flchip structure even if they
583 * are on the same physical chip. This means completely recreating
584 * a new cfi_private structure right here which is a blatent code
585 * layering violation, but this is still the least intrusive
586 * arrangement at this point. This can be rearranged in the future
587 * if someone feels motivated enough. --nico
589 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
590 && extp->FeatureSupport & (1 << 9)) {
591 struct cfi_private *newcfi;
593 struct flchip_shared *shared;
594 int offs, numregions, numparts, partshift, numvirtchips, i, j;
596 /* Protection Register info */
597 offs = (extp->NumProtectionFields - 1) *
598 sizeof(struct cfi_intelext_otpinfo);
600 /* Burst Read info */
601 offs += extp->extra[offs+1]+2;
603 /* Number of partition regions */
604 numregions = extp->extra[offs];
607 /* skip the sizeof(partregion) field in CFI 1.4 */
608 if (extp->MinorVersion >= '4')
611 /* Number of hardware partitions */
613 for (i = 0; i < numregions; i++) {
614 struct cfi_intelext_regioninfo *rinfo;
615 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
616 numparts += rinfo->NumIdentPartitions;
617 offs += sizeof(*rinfo)
618 + (rinfo->NumBlockTypes - 1) *
619 sizeof(struct cfi_intelext_blockinfo);
625 /* Programming Region info */
626 if (extp->MinorVersion >= '4') {
627 struct cfi_intelext_programming_regioninfo *prinfo;
628 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
629 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
630 mtd->flags &= ~MTD_BIT_WRITEABLE;
631 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
632 map->name, mtd->writesize,
633 cfi->interleave * prinfo->ControlValid,
634 cfi->interleave * prinfo->ControlInvalid);
638 * All functions below currently rely on all chips having
639 * the same geometry so we'll just assume that all hardware
640 * partitions are of the same size too.
642 partshift = cfi->chipshift - __ffs(numparts);
644 if ((1 << partshift) < mtd->erasesize) {
646 "%s: bad number of hw partitions (%d)\n",
651 numvirtchips = cfi->numchips * numparts;
652 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
655 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
660 memcpy(newcfi, cfi, sizeof(struct cfi_private));
661 newcfi->numchips = numvirtchips;
662 newcfi->chipshift = partshift;
664 chip = &newcfi->chips[0];
665 for (i = 0; i < cfi->numchips; i++) {
666 shared[i].writing = shared[i].erasing = NULL;
667 spin_lock_init(&shared[i].lock);
668 for (j = 0; j < numparts; j++) {
669 *chip = cfi->chips[i];
670 chip->start += j << partshift;
671 chip->priv = &shared[i];
672 /* those should be reset too since
673 they create memory references. */
674 init_waitqueue_head(&chip->wq);
675 spin_lock_init(&chip->_spinlock);
676 chip->mutex = &chip->_spinlock;
681 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
682 "--> %d partitions of %d KiB\n",
683 map->name, cfi->numchips, cfi->interleave,
684 newcfi->numchips, 1<<(newcfi->chipshift-10));
686 map->fldrv_priv = newcfi;
695 * *********** CHIP ACCESS FUNCTIONS ***********
697 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
699 DECLARE_WAITQUEUE(wait, current);
700 struct cfi_private *cfi = map->fldrv_priv;
701 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
702 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
703 unsigned long timeo = jiffies + HZ;
705 switch (chip->state) {
709 status = map_read(map, adr);
710 if (map_word_andequal(map, status, status_OK, status_OK))
713 /* At this point we're fine with write operations
714 in other partitions as they don't conflict. */
715 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
718 spin_unlock(chip->mutex);
720 spin_lock(chip->mutex);
721 /* Someone else might have been playing with it. */
732 !(cfip->FeatureSupport & 2) ||
733 !(mode == FL_READY || mode == FL_POINT ||
734 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
739 map_write(map, CMD(0xB0), adr);
741 /* If the flash has finished erasing, then 'erase suspend'
742 * appears to make some (28F320) flash devices switch to
743 * 'read' mode. Make sure that we switch to 'read status'
744 * mode so we get the right data. --rmk
746 map_write(map, CMD(0x70), adr);
747 chip->oldstate = FL_ERASING;
748 chip->state = FL_ERASE_SUSPENDING;
749 chip->erase_suspended = 1;
751 status = map_read(map, adr);
752 if (map_word_andequal(map, status, status_OK, status_OK))
755 if (time_after(jiffies, timeo)) {
756 /* Urgh. Resume and pretend we weren't here. */
757 map_write(map, CMD(0xd0), adr);
758 /* Make sure we're in 'read status' mode if it had finished */
759 map_write(map, CMD(0x70), adr);
760 chip->state = FL_ERASING;
761 chip->oldstate = FL_READY;
762 printk(KERN_ERR "%s: Chip not ready after erase "
763 "suspended: status = 0x%lx\n", map->name, status.x[0]);
767 spin_unlock(chip->mutex);
769 spin_lock(chip->mutex);
770 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
771 So we can just loop here. */
773 chip->state = FL_STATUS;
776 case FL_XIP_WHILE_ERASING:
777 if (mode != FL_READY && mode != FL_POINT &&
778 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
780 chip->oldstate = chip->state;
781 chip->state = FL_READY;
785 /* The machine is rebooting now,so no one can get chip anymore */
788 /* Only if there's no operation suspended... */
789 if (mode == FL_READY && chip->oldstate == FL_READY)
794 set_current_state(TASK_UNINTERRUPTIBLE);
795 add_wait_queue(&chip->wq, &wait);
796 spin_unlock(chip->mutex);
798 remove_wait_queue(&chip->wq, &wait);
799 spin_lock(chip->mutex);
804 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
807 DECLARE_WAITQUEUE(wait, current);
810 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
811 || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
813 * OK. We have possibility for contention on the write/erase
814 * operations which are global to the real chip and not per
815 * partition. So let's fight it over in the partition which
816 * currently has authority on the operation.
818 * The rules are as follows:
820 * - any write operation must own shared->writing.
822 * - any erase operation must own _both_ shared->writing and
825 * - contention arbitration is handled in the owner's context.
827 * The 'shared' struct can be read and/or written only when
830 struct flchip_shared *shared = chip->priv;
831 struct flchip *contender;
832 spin_lock(&shared->lock);
833 contender = shared->writing;
834 if (contender && contender != chip) {
836 * The engine to perform desired operation on this
837 * partition is already in use by someone else.
838 * Let's fight over it in the context of the chip
839 * currently using it. If it is possible to suspend,
840 * that other partition will do just that, otherwise
841 * it'll happily send us to sleep. In any case, when
842 * get_chip returns success we're clear to go ahead.
844 ret = spin_trylock(contender->mutex);
845 spin_unlock(&shared->lock);
848 spin_unlock(chip->mutex);
849 ret = chip_ready(map, contender, contender->start, mode);
850 spin_lock(chip->mutex);
852 if (ret == -EAGAIN) {
853 spin_unlock(contender->mutex);
857 spin_unlock(contender->mutex);
860 spin_lock(&shared->lock);
861 spin_unlock(contender->mutex);
864 /* Check if we already have suspended erase
865 * on this chip. Sleep. */
866 if (mode == FL_ERASING && shared->erasing
867 && shared->erasing->oldstate == FL_ERASING) {
868 spin_unlock(&shared->lock);
869 set_current_state(TASK_UNINTERRUPTIBLE);
870 add_wait_queue(&chip->wq, &wait);
871 spin_unlock(chip->mutex);
873 remove_wait_queue(&chip->wq, &wait);
874 spin_lock(chip->mutex);
879 shared->writing = chip;
880 if (mode == FL_ERASING)
881 shared->erasing = chip;
882 spin_unlock(&shared->lock);
884 ret = chip_ready(map, chip, adr, mode);
891 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
893 struct cfi_private *cfi = map->fldrv_priv;
896 struct flchip_shared *shared = chip->priv;
897 spin_lock(&shared->lock);
898 if (shared->writing == chip && chip->oldstate == FL_READY) {
899 /* We own the ability to write, but we're done */
900 shared->writing = shared->erasing;
901 if (shared->writing && shared->writing != chip) {
902 /* give back ownership to who we loaned it from */
903 struct flchip *loaner = shared->writing;
904 spin_lock(loaner->mutex);
905 spin_unlock(&shared->lock);
906 spin_unlock(chip->mutex);
907 put_chip(map, loaner, loaner->start);
908 spin_lock(chip->mutex);
909 spin_unlock(loaner->mutex);
913 shared->erasing = NULL;
914 shared->writing = NULL;
915 } else if (shared->erasing == chip && shared->writing != chip) {
917 * We own the ability to erase without the ability
918 * to write, which means the erase was suspended
919 * and some other partition is currently writing.
920 * Don't let the switch below mess things up since
921 * we don't have ownership to resume anything.
923 spin_unlock(&shared->lock);
927 spin_unlock(&shared->lock);
930 switch(chip->oldstate) {
932 chip->state = chip->oldstate;
933 /* What if one interleaved chip has finished and the
934 other hasn't? The old code would leave the finished
935 one in READY mode. That's bad, and caused -EROFS
936 errors to be returned from do_erase_oneblock because
937 that's the only bit it checked for at the time.
938 As the state machine appears to explicitly allow
939 sending the 0x70 (Read Status) command to an erasing
940 chip and expecting it to be ignored, that's what we
942 map_write(map, CMD(0xd0), adr);
943 map_write(map, CMD(0x70), adr);
944 chip->oldstate = FL_READY;
945 chip->state = FL_ERASING;
948 case FL_XIP_WHILE_ERASING:
949 chip->state = chip->oldstate;
950 chip->oldstate = FL_READY;
956 /* We should really make set_vpp() count, rather than doing this */
960 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
965 #ifdef CONFIG_MTD_XIP
968 * No interrupt what so ever can be serviced while the flash isn't in array
969 * mode. This is ensured by the xip_disable() and xip_enable() functions
970 * enclosing any code path where the flash is known not to be in array mode.
971 * And within a XIP disabled code path, only functions marked with __xipram
972 * may be called and nothing else (it's a good thing to inspect generated
973 * assembly to make sure inline functions were actually inlined and that gcc
974 * didn't emit calls to its own support functions). Also configuring MTD CFI
975 * support to a single buswidth and a single interleave is also recommended.
978 static void xip_disable(struct map_info *map, struct flchip *chip,
981 /* TODO: chips with no XIP use should ignore and return */
982 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
986 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
989 struct cfi_private *cfi = map->fldrv_priv;
990 if (chip->state != FL_POINT && chip->state != FL_READY) {
991 map_write(map, CMD(0xff), adr);
992 chip->state = FL_READY;
994 (void) map_read(map, adr);
1000 * When a delay is required for the flash operation to complete, the
1001 * xip_wait_for_operation() function is polling for both the given timeout
1002 * and pending (but still masked) hardware interrupts. Whenever there is an
1003 * interrupt pending then the flash erase or write operation is suspended,
1004 * array mode restored and interrupts unmasked. Task scheduling might also
1005 * happen at that point. The CPU eventually returns from the interrupt or
1006 * the call to schedule() and the suspended flash operation is resumed for
1007 * the remaining of the delay period.
1009 * Warning: this function _will_ fool interrupt latency tracing tools.
1012 static int __xipram xip_wait_for_operation(
1013 struct map_info *map, struct flchip *chip,
1014 unsigned long adr, unsigned int chip_op_time )
1016 struct cfi_private *cfi = map->fldrv_priv;
1017 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1018 map_word status, OK = CMD(0x80);
1019 unsigned long usec, suspended, start, done;
1020 flstate_t oldstate, newstate;
1022 start = xip_currtime();
1023 usec = chip_op_time * 8;
1030 if (xip_irqpending() && cfip &&
1031 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1032 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1033 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1035 * Let's suspend the erase or write operation when
1036 * supported. Note that we currently don't try to
1037 * suspend interleaved chips if there is already
1038 * another operation suspended (imagine what happens
1039 * when one chip was already done with the current
1040 * operation while another chip suspended it, then
1041 * we resume the whole thing at once). Yes, it
1045 map_write(map, CMD(0xb0), adr);
1046 map_write(map, CMD(0x70), adr);
1047 suspended = xip_currtime();
1049 if (xip_elapsed_since(suspended) > 100000) {
1051 * The chip doesn't want to suspend
1052 * after waiting for 100 msecs.
1053 * This is a critical error but there
1054 * is not much we can do here.
1058 status = map_read(map, adr);
1059 } while (!map_word_andequal(map, status, OK, OK));
1061 /* Suspend succeeded */
1062 oldstate = chip->state;
1063 if (oldstate == FL_ERASING) {
1064 if (!map_word_bitsset(map, status, CMD(0x40)))
1066 newstate = FL_XIP_WHILE_ERASING;
1067 chip->erase_suspended = 1;
1069 if (!map_word_bitsset(map, status, CMD(0x04)))
1071 newstate = FL_XIP_WHILE_WRITING;
1072 chip->write_suspended = 1;
1074 chip->state = newstate;
1075 map_write(map, CMD(0xff), adr);
1076 (void) map_read(map, adr);
1079 spin_unlock(chip->mutex);
1084 * We're back. However someone else might have
1085 * decided to go write to the chip if we are in
1086 * a suspended erase state. If so let's wait
1089 spin_lock(chip->mutex);
1090 while (chip->state != newstate) {
1091 DECLARE_WAITQUEUE(wait, current);
1092 set_current_state(TASK_UNINTERRUPTIBLE);
1093 add_wait_queue(&chip->wq, &wait);
1094 spin_unlock(chip->mutex);
1096 remove_wait_queue(&chip->wq, &wait);
1097 spin_lock(chip->mutex);
1099 /* Disallow XIP again */
1100 local_irq_disable();
1102 /* Resume the write or erase operation */
1103 map_write(map, CMD(0xd0), adr);
1104 map_write(map, CMD(0x70), adr);
1105 chip->state = oldstate;
1106 start = xip_currtime();
1107 } else if (usec >= 1000000/HZ) {
1109 * Try to save on CPU power when waiting delay
1110 * is at least a system timer tick period.
1111 * No need to be extremely accurate here.
1115 status = map_read(map, adr);
1116 done = xip_elapsed_since(start);
1117 } while (!map_word_andequal(map, status, OK, OK)
1120 return (done >= usec) ? -ETIME : 0;
1124 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1125 * the flash is actively programming or erasing since we have to poll for
1126 * the operation to complete anyway. We can't do that in a generic way with
1127 * a XIP setup so do it before the actual flash operation in this case
1128 * and stub it out from INVAL_CACHE_AND_WAIT.
1130 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1131 INVALIDATE_CACHED_RANGE(map, from, size)
1133 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1134 xip_wait_for_operation(map, chip, cmd_adr, usec)
1138 #define xip_disable(map, chip, adr)
1139 #define xip_enable(map, chip, adr)
1140 #define XIP_INVAL_CACHED_RANGE(x...)
1141 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1143 static int inval_cache_and_wait_for_operation(
1144 struct map_info *map, struct flchip *chip,
1145 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1146 unsigned int chip_op_time)
1148 struct cfi_private *cfi = map->fldrv_priv;
1149 map_word status, status_OK = CMD(0x80);
1150 int chip_state = chip->state;
1151 unsigned int timeo, sleep_time;
1153 spin_unlock(chip->mutex);
1155 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1156 spin_lock(chip->mutex);
1158 /* set our timeout to 8 times the expected delay */
1159 timeo = chip_op_time * 8;
1162 sleep_time = chip_op_time / 2;
1165 status = map_read(map, cmd_adr);
1166 if (map_word_andequal(map, status, status_OK, status_OK))
1170 map_write(map, CMD(0x70), cmd_adr);
1171 chip->state = FL_STATUS;
1175 /* OK Still waiting. Drop the lock, wait a while and retry. */
1176 spin_unlock(chip->mutex);
1177 if (sleep_time >= 1000000/HZ) {
1179 * Half of the normal delay still remaining
1180 * can be performed with a sleeping delay instead
1183 msleep(sleep_time/1000);
1184 timeo -= sleep_time;
1185 sleep_time = 1000000/HZ;
1191 spin_lock(chip->mutex);
1193 while (chip->state != chip_state) {
1194 /* Someone's suspended the operation: sleep */
1195 DECLARE_WAITQUEUE(wait, current);
1196 set_current_state(TASK_UNINTERRUPTIBLE);
1197 add_wait_queue(&chip->wq, &wait);
1198 spin_unlock(chip->mutex);
1200 remove_wait_queue(&chip->wq, &wait);
1201 spin_lock(chip->mutex);
1205 /* Done and happy. */
1206 chip->state = FL_STATUS;
1212 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1213 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1216 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1218 unsigned long cmd_addr;
1219 struct cfi_private *cfi = map->fldrv_priv;
1224 /* Ensure cmd read/writes are aligned. */
1225 cmd_addr = adr & ~(map_bankwidth(map)-1);
1227 spin_lock(chip->mutex);
1229 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1232 if (chip->state != FL_POINT && chip->state != FL_READY)
1233 map_write(map, CMD(0xff), cmd_addr);
1235 chip->state = FL_POINT;
1236 chip->ref_point_counter++;
1238 spin_unlock(chip->mutex);
1243 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1245 struct map_info *map = mtd->priv;
1246 struct cfi_private *cfi = map->fldrv_priv;
1247 unsigned long ofs, last_end = 0;
1251 if (!map->virt || (from + len > mtd->size))
1254 /* Now lock the chip(s) to POINT state */
1256 /* ofs: offset within the first chip that the first read should start */
1257 chipnum = (from >> cfi->chipshift);
1258 ofs = from - (chipnum << cfi->chipshift);
1260 *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1264 unsigned long thislen;
1266 if (chipnum >= cfi->numchips)
1269 /* We cannot point across chips that are virtually disjoint */
1271 last_end = cfi->chips[chipnum].start;
1272 else if (cfi->chips[chipnum].start != last_end)
1275 if ((len + ofs -1) >> cfi->chipshift)
1276 thislen = (1<<cfi->chipshift) - ofs;
1280 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1288 last_end += 1 << cfi->chipshift;
1294 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1296 struct map_info *map = mtd->priv;
1297 struct cfi_private *cfi = map->fldrv_priv;
1301 /* Now unlock the chip(s) POINT state */
1303 /* ofs: offset within the first chip that the first read should start */
1304 chipnum = (from >> cfi->chipshift);
1305 ofs = from - (chipnum << cfi->chipshift);
1308 unsigned long thislen;
1309 struct flchip *chip;
1311 chip = &cfi->chips[chipnum];
1312 if (chipnum >= cfi->numchips)
1315 if ((len + ofs -1) >> cfi->chipshift)
1316 thislen = (1<<cfi->chipshift) - ofs;
1320 spin_lock(chip->mutex);
1321 if (chip->state == FL_POINT) {
1322 chip->ref_point_counter--;
1323 if(chip->ref_point_counter == 0)
1324 chip->state = FL_READY;
1326 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1328 put_chip(map, chip, chip->start);
1329 spin_unlock(chip->mutex);
1337 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1339 unsigned long cmd_addr;
1340 struct cfi_private *cfi = map->fldrv_priv;
1345 /* Ensure cmd read/writes are aligned. */
1346 cmd_addr = adr & ~(map_bankwidth(map)-1);
1348 spin_lock(chip->mutex);
1349 ret = get_chip(map, chip, cmd_addr, FL_READY);
1351 spin_unlock(chip->mutex);
1355 if (chip->state != FL_POINT && chip->state != FL_READY) {
1356 map_write(map, CMD(0xff), cmd_addr);
1358 chip->state = FL_READY;
1361 map_copy_from(map, buf, adr, len);
1363 put_chip(map, chip, cmd_addr);
1365 spin_unlock(chip->mutex);
1369 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1371 struct map_info *map = mtd->priv;
1372 struct cfi_private *cfi = map->fldrv_priv;
1377 /* ofs: offset within the first chip that the first read should start */
1378 chipnum = (from >> cfi->chipshift);
1379 ofs = from - (chipnum << cfi->chipshift);
1384 unsigned long thislen;
1386 if (chipnum >= cfi->numchips)
1389 if ((len + ofs -1) >> cfi->chipshift)
1390 thislen = (1<<cfi->chipshift) - ofs;
1394 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1408 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1409 unsigned long adr, map_word datum, int mode)
1411 struct cfi_private *cfi = map->fldrv_priv;
1412 map_word status, write_cmd;
1419 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1422 write_cmd = CMD(0xc0);
1428 spin_lock(chip->mutex);
1429 ret = get_chip(map, chip, adr, mode);
1431 spin_unlock(chip->mutex);
1435 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1437 xip_disable(map, chip, adr);
1438 map_write(map, write_cmd, adr);
1439 map_write(map, datum, adr);
1442 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1443 adr, map_bankwidth(map),
1444 chip->word_write_time);
1446 xip_enable(map, chip, adr);
1447 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1451 /* check for errors */
1452 status = map_read(map, adr);
1453 if (map_word_bitsset(map, status, CMD(0x1a))) {
1454 unsigned long chipstatus = MERGESTATUS(status);
1457 map_write(map, CMD(0x50), adr);
1458 map_write(map, CMD(0x70), adr);
1459 xip_enable(map, chip, adr);
1461 if (chipstatus & 0x02) {
1463 } else if (chipstatus & 0x08) {
1464 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1467 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1474 xip_enable(map, chip, adr);
1475 out: put_chip(map, chip, adr);
1476 spin_unlock(chip->mutex);
1481 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1483 struct map_info *map = mtd->priv;
1484 struct cfi_private *cfi = map->fldrv_priv;
1493 chipnum = to >> cfi->chipshift;
1494 ofs = to - (chipnum << cfi->chipshift);
1496 /* If it's not bus-aligned, do the first byte write */
1497 if (ofs & (map_bankwidth(map)-1)) {
1498 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1499 int gap = ofs - bus_ofs;
1503 n = min_t(int, len, map_bankwidth(map)-gap);
1504 datum = map_word_ff(map);
1505 datum = map_word_load_partial(map, datum, buf, gap, n);
1507 ret = do_write_oneword(map, &cfi->chips[chipnum],
1508 bus_ofs, datum, FL_WRITING);
1517 if (ofs >> cfi->chipshift) {
1520 if (chipnum == cfi->numchips)
1525 while(len >= map_bankwidth(map)) {
1526 map_word datum = map_word_load(map, buf);
1528 ret = do_write_oneword(map, &cfi->chips[chipnum],
1529 ofs, datum, FL_WRITING);
1533 ofs += map_bankwidth(map);
1534 buf += map_bankwidth(map);
1535 (*retlen) += map_bankwidth(map);
1536 len -= map_bankwidth(map);
1538 if (ofs >> cfi->chipshift) {
1541 if (chipnum == cfi->numchips)
1546 if (len & (map_bankwidth(map)-1)) {
1549 datum = map_word_ff(map);
1550 datum = map_word_load_partial(map, datum, buf, 0, len);
1552 ret = do_write_oneword(map, &cfi->chips[chipnum],
1553 ofs, datum, FL_WRITING);
1564 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1565 unsigned long adr, const struct kvec **pvec,
1566 unsigned long *pvec_seek, int len)
1568 struct cfi_private *cfi = map->fldrv_priv;
1569 map_word status, write_cmd, datum;
1570 unsigned long cmd_adr;
1571 int ret, wbufsize, word_gap, words;
1572 const struct kvec *vec;
1573 unsigned long vec_seek;
1574 unsigned long initial_adr;
1575 int initial_len = len;
1577 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1580 cmd_adr = adr & ~(wbufsize-1);
1582 /* Let's determine this according to the interleave only once */
1583 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1585 spin_lock(chip->mutex);
1586 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1588 spin_unlock(chip->mutex);
1592 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1594 xip_disable(map, chip, cmd_adr);
1596 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1597 [...], the device will not accept any more Write to Buffer commands".
1598 So we must check here and reset those bits if they're set. Otherwise
1599 we're just pissing in the wind */
1600 if (chip->state != FL_STATUS) {
1601 map_write(map, CMD(0x70), cmd_adr);
1602 chip->state = FL_STATUS;
1604 status = map_read(map, cmd_adr);
1605 if (map_word_bitsset(map, status, CMD(0x30))) {
1606 xip_enable(map, chip, cmd_adr);
1607 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1608 xip_disable(map, chip, cmd_adr);
1609 map_write(map, CMD(0x50), cmd_adr);
1610 map_write(map, CMD(0x70), cmd_adr);
1613 chip->state = FL_WRITING_TO_BUFFER;
1614 map_write(map, write_cmd, cmd_adr);
1615 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1617 /* Argh. Not ready for write to buffer */
1618 map_word Xstatus = map_read(map, cmd_adr);
1619 map_write(map, CMD(0x70), cmd_adr);
1620 chip->state = FL_STATUS;
1621 status = map_read(map, cmd_adr);
1622 map_write(map, CMD(0x50), cmd_adr);
1623 map_write(map, CMD(0x70), cmd_adr);
1624 xip_enable(map, chip, cmd_adr);
1625 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1626 map->name, Xstatus.x[0], status.x[0]);
1630 /* Figure out the number of words to write */
1631 word_gap = (-adr & (map_bankwidth(map)-1));
1632 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1636 word_gap = map_bankwidth(map) - word_gap;
1638 datum = map_word_ff(map);
1641 /* Write length of data to come */
1642 map_write(map, CMD(words), cmd_adr );
1646 vec_seek = *pvec_seek;
1648 int n = map_bankwidth(map) - word_gap;
1649 if (n > vec->iov_len - vec_seek)
1650 n = vec->iov_len - vec_seek;
1654 if (!word_gap && len < map_bankwidth(map))
1655 datum = map_word_ff(map);
1657 datum = map_word_load_partial(map, datum,
1658 vec->iov_base + vec_seek,
1663 if (!len || word_gap == map_bankwidth(map)) {
1664 map_write(map, datum, adr);
1665 adr += map_bankwidth(map);
1670 if (vec_seek == vec->iov_len) {
1676 *pvec_seek = vec_seek;
1679 map_write(map, CMD(0xd0), cmd_adr);
1680 chip->state = FL_WRITING;
1682 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1683 initial_adr, initial_len,
1684 chip->buffer_write_time);
1686 map_write(map, CMD(0x70), cmd_adr);
1687 chip->state = FL_STATUS;
1688 xip_enable(map, chip, cmd_adr);
1689 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1693 /* check for errors */
1694 status = map_read(map, cmd_adr);
1695 if (map_word_bitsset(map, status, CMD(0x1a))) {
1696 unsigned long chipstatus = MERGESTATUS(status);
1699 map_write(map, CMD(0x50), cmd_adr);
1700 map_write(map, CMD(0x70), cmd_adr);
1701 xip_enable(map, chip, cmd_adr);
1703 if (chipstatus & 0x02) {
1705 } else if (chipstatus & 0x08) {
1706 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1709 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1716 xip_enable(map, chip, cmd_adr);
1717 out: put_chip(map, chip, cmd_adr);
1718 spin_unlock(chip->mutex);
1722 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1723 unsigned long count, loff_t to, size_t *retlen)
1725 struct map_info *map = mtd->priv;
1726 struct cfi_private *cfi = map->fldrv_priv;
1727 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1730 unsigned long ofs, vec_seek, i;
1733 for (i = 0; i < count; i++)
1734 len += vecs[i].iov_len;
1740 chipnum = to >> cfi->chipshift;
1741 ofs = to - (chipnum << cfi->chipshift);
1745 /* We must not cross write block boundaries */
1746 int size = wbufsize - (ofs & (wbufsize-1));
1750 ret = do_write_buffer(map, &cfi->chips[chipnum],
1751 ofs, &vecs, &vec_seek, size);
1759 if (ofs >> cfi->chipshift) {
1762 if (chipnum == cfi->numchips)
1766 /* Be nice and reschedule with the chip in a usable state for other
1775 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1776 size_t len, size_t *retlen, const u_char *buf)
1780 vec.iov_base = (void *) buf;
1783 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1786 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1787 unsigned long adr, int len, void *thunk)
1789 struct cfi_private *cfi = map->fldrv_priv;
1797 spin_lock(chip->mutex);
1798 ret = get_chip(map, chip, adr, FL_ERASING);
1800 spin_unlock(chip->mutex);
1804 XIP_INVAL_CACHED_RANGE(map, adr, len);
1806 xip_disable(map, chip, adr);
1808 /* Clear the status register first */
1809 map_write(map, CMD(0x50), adr);
1812 map_write(map, CMD(0x20), adr);
1813 map_write(map, CMD(0xD0), adr);
1814 chip->state = FL_ERASING;
1815 chip->erase_suspended = 0;
1817 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1821 map_write(map, CMD(0x70), adr);
1822 chip->state = FL_STATUS;
1823 xip_enable(map, chip, adr);
1824 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1828 /* We've broken this before. It doesn't hurt to be safe */
1829 map_write(map, CMD(0x70), adr);
1830 chip->state = FL_STATUS;
1831 status = map_read(map, adr);
1833 /* check for errors */
1834 if (map_word_bitsset(map, status, CMD(0x3a))) {
1835 unsigned long chipstatus = MERGESTATUS(status);
1837 /* Reset the error bits */
1838 map_write(map, CMD(0x50), adr);
1839 map_write(map, CMD(0x70), adr);
1840 xip_enable(map, chip, adr);
1842 if ((chipstatus & 0x30) == 0x30) {
1843 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1845 } else if (chipstatus & 0x02) {
1846 /* Protection bit set */
1848 } else if (chipstatus & 0x8) {
1850 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1852 } else if (chipstatus & 0x20 && retries--) {
1853 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1854 put_chip(map, chip, adr);
1855 spin_unlock(chip->mutex);
1858 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1865 xip_enable(map, chip, adr);
1866 out: put_chip(map, chip, adr);
1867 spin_unlock(chip->mutex);
1871 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1873 unsigned long ofs, len;
1879 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1883 instr->state = MTD_ERASE_DONE;
1884 mtd_erase_callback(instr);
1889 static void cfi_intelext_sync (struct mtd_info *mtd)
1891 struct map_info *map = mtd->priv;
1892 struct cfi_private *cfi = map->fldrv_priv;
1894 struct flchip *chip;
1897 for (i=0; !ret && i<cfi->numchips; i++) {
1898 chip = &cfi->chips[i];
1900 spin_lock(chip->mutex);
1901 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1904 chip->oldstate = chip->state;
1905 chip->state = FL_SYNCING;
1906 /* No need to wake_up() on this state change -
1907 * as the whole point is that nobody can do anything
1908 * with the chip now anyway.
1911 spin_unlock(chip->mutex);
1914 /* Unlock the chips again */
1916 for (i--; i >=0; i--) {
1917 chip = &cfi->chips[i];
1919 spin_lock(chip->mutex);
1921 if (chip->state == FL_SYNCING) {
1922 chip->state = chip->oldstate;
1923 chip->oldstate = FL_READY;
1926 spin_unlock(chip->mutex);
1930 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1931 struct flchip *chip,
1933 int len, void *thunk)
1935 struct cfi_private *cfi = map->fldrv_priv;
1936 int status, ofs_factor = cfi->interleave * cfi->device_type;
1939 xip_disable(map, chip, adr+(2*ofs_factor));
1940 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1941 chip->state = FL_JEDEC_QUERY;
1942 status = cfi_read_query(map, adr+(2*ofs_factor));
1943 xip_enable(map, chip, 0);
1947 #ifdef DEBUG_LOCK_BITS
1948 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1949 struct flchip *chip,
1951 int len, void *thunk)
1953 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1954 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1959 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1960 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1962 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1963 unsigned long adr, int len, void *thunk)
1965 struct cfi_private *cfi = map->fldrv_priv;
1966 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1972 spin_lock(chip->mutex);
1973 ret = get_chip(map, chip, adr, FL_LOCKING);
1975 spin_unlock(chip->mutex);
1980 xip_disable(map, chip, adr);
1982 map_write(map, CMD(0x60), adr);
1983 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1984 map_write(map, CMD(0x01), adr);
1985 chip->state = FL_LOCKING;
1986 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1987 map_write(map, CMD(0xD0), adr);
1988 chip->state = FL_UNLOCKING;
1993 * If Instant Individual Block Locking supported then no need
1996 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1998 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
2000 map_write(map, CMD(0x70), adr);
2001 chip->state = FL_STATUS;
2002 xip_enable(map, chip, adr);
2003 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2007 xip_enable(map, chip, adr);
2008 out: put_chip(map, chip, adr);
2009 spin_unlock(chip->mutex);
2013 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2017 #ifdef DEBUG_LOCK_BITS
2018 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2019 __func__, ofs, len);
2020 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2024 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2025 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2027 #ifdef DEBUG_LOCK_BITS
2028 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2030 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2037 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2041 #ifdef DEBUG_LOCK_BITS
2042 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2043 __func__, ofs, len);
2044 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2048 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2049 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2051 #ifdef DEBUG_LOCK_BITS
2052 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2054 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2061 #ifdef CONFIG_MTD_OTP
2063 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2064 u_long data_offset, u_char *buf, u_int size,
2065 u_long prot_offset, u_int groupno, u_int groupsize);
2068 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2069 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2071 struct cfi_private *cfi = map->fldrv_priv;
2074 spin_lock(chip->mutex);
2075 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2077 spin_unlock(chip->mutex);
2081 /* let's ensure we're not reading back cached data from array mode */
2082 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2084 xip_disable(map, chip, chip->start);
2085 if (chip->state != FL_JEDEC_QUERY) {
2086 map_write(map, CMD(0x90), chip->start);
2087 chip->state = FL_JEDEC_QUERY;
2089 map_copy_from(map, buf, chip->start + offset, size);
2090 xip_enable(map, chip, chip->start);
2092 /* then ensure we don't keep OTP data in the cache */
2093 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2095 put_chip(map, chip, chip->start);
2096 spin_unlock(chip->mutex);
2101 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2102 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2107 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2108 int gap = offset - bus_ofs;
2109 int n = min_t(int, size, map_bankwidth(map)-gap);
2110 map_word datum = map_word_ff(map);
2112 datum = map_word_load_partial(map, datum, buf, gap, n);
2113 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2126 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2127 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2129 struct cfi_private *cfi = map->fldrv_priv;
2132 /* make sure area matches group boundaries */
2136 datum = map_word_ff(map);
2137 datum = map_word_clr(map, datum, CMD(1 << grpno));
2138 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2141 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2142 size_t *retlen, u_char *buf,
2143 otp_op_t action, int user_regs)
2145 struct map_info *map = mtd->priv;
2146 struct cfi_private *cfi = map->fldrv_priv;
2147 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2148 struct flchip *chip;
2149 struct cfi_intelext_otpinfo *otp;
2150 u_long devsize, reg_prot_offset, data_offset;
2151 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2152 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2157 /* Check that we actually have some OTP registers */
2158 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2161 /* we need real chips here not virtual ones */
2162 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2163 chip_step = devsize >> cfi->chipshift;
2166 /* Some chips have OTP located in the _top_ partition only.
2167 For example: Intel 28F256L18T (T means top-parameter device) */
2168 if (cfi->mfr == MANUFACTURER_INTEL) {
2173 chip_num = chip_step - 1;
2177 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2178 chip = &cfi->chips[chip_num];
2179 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2181 /* first OTP region */
2183 reg_prot_offset = extp->ProtRegAddr;
2184 reg_fact_groups = 1;
2185 reg_fact_size = 1 << extp->FactProtRegSize;
2186 reg_user_groups = 1;
2187 reg_user_size = 1 << extp->UserProtRegSize;
2190 /* flash geometry fixup */
2191 data_offset = reg_prot_offset + 1;
2192 data_offset *= cfi->interleave * cfi->device_type;
2193 reg_prot_offset *= cfi->interleave * cfi->device_type;
2194 reg_fact_size *= cfi->interleave;
2195 reg_user_size *= cfi->interleave;
2198 groups = reg_user_groups;
2199 groupsize = reg_user_size;
2200 /* skip over factory reg area */
2201 groupno = reg_fact_groups;
2202 data_offset += reg_fact_groups * reg_fact_size;
2204 groups = reg_fact_groups;
2205 groupsize = reg_fact_size;
2209 while (len > 0 && groups > 0) {
2212 * Special case: if action is NULL
2213 * we fill buf with otp_info records.
2215 struct otp_info *otpinfo;
2217 len -= sizeof(struct otp_info);
2220 ret = do_otp_read(map, chip,
2222 (u_char *)&lockword,
2227 otpinfo = (struct otp_info *)buf;
2228 otpinfo->start = from;
2229 otpinfo->length = groupsize;
2231 !map_word_bitsset(map, lockword,
2234 buf += sizeof(*otpinfo);
2235 *retlen += sizeof(*otpinfo);
2236 } else if (from >= groupsize) {
2238 data_offset += groupsize;
2240 int size = groupsize;
2241 data_offset += from;
2246 ret = action(map, chip, data_offset,
2247 buf, size, reg_prot_offset,
2248 groupno, groupsize);
2254 data_offset += size;
2260 /* next OTP region */
2261 if (++field == extp->NumProtectionFields)
2263 reg_prot_offset = otp->ProtRegAddr;
2264 reg_fact_groups = otp->FactGroups;
2265 reg_fact_size = 1 << otp->FactProtRegSize;
2266 reg_user_groups = otp->UserGroups;
2267 reg_user_size = 1 << otp->UserProtRegSize;
2275 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2276 size_t len, size_t *retlen,
2279 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2280 buf, do_otp_read, 0);
2283 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2284 size_t len, size_t *retlen,
2287 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2288 buf, do_otp_read, 1);
2291 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2292 size_t len, size_t *retlen,
2295 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2296 buf, do_otp_write, 1);
2299 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2300 loff_t from, size_t len)
2303 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2304 NULL, do_otp_lock, 1);
2307 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2308 struct otp_info *buf, size_t len)
2313 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2314 return ret ? : retlen;
2317 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2318 struct otp_info *buf, size_t len)
2323 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2324 return ret ? : retlen;
2329 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2331 struct mtd_erase_region_info *region;
2332 int block, status, i;
2336 for (i = 0; i < mtd->numeraseregions; i++) {
2337 region = &mtd->eraseregions[i];
2338 if (!region->lockmap)
2341 for (block = 0; block < region->numblocks; block++){
2342 len = region->erasesize;
2343 adr = region->offset + block * len;
2345 status = cfi_varsize_frob(mtd,
2346 do_getlockstatus_oneblock, adr, len, NULL);
2348 set_bit(block, region->lockmap);
2350 clear_bit(block, region->lockmap);
2355 static int cfi_intelext_suspend(struct mtd_info *mtd)
2357 struct map_info *map = mtd->priv;
2358 struct cfi_private *cfi = map->fldrv_priv;
2359 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2361 struct flchip *chip;
2364 if ((mtd->flags & MTD_POWERUP_LOCK)
2365 && extp && (extp->FeatureSupport & (1 << 5)))
2366 cfi_intelext_save_locks(mtd);
2368 for (i=0; !ret && i<cfi->numchips; i++) {
2369 chip = &cfi->chips[i];
2371 spin_lock(chip->mutex);
2373 switch (chip->state) {
2377 case FL_JEDEC_QUERY:
2378 if (chip->oldstate == FL_READY) {
2379 /* place the chip in a known state before suspend */
2380 map_write(map, CMD(0xFF), cfi->chips[i].start);
2381 chip->oldstate = chip->state;
2382 chip->state = FL_PM_SUSPENDED;
2383 /* No need to wake_up() on this state change -
2384 * as the whole point is that nobody can do anything
2385 * with the chip now anyway.
2388 /* There seems to be an operation pending. We must wait for it. */
2389 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2394 /* Should we actually wait? Once upon a time these routines weren't
2395 allowed to. Or should we return -EAGAIN, because the upper layers
2396 ought to have already shut down anything which was using the device
2397 anyway? The latter for now. */
2398 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2400 case FL_PM_SUSPENDED:
2403 spin_unlock(chip->mutex);
2406 /* Unlock the chips again */
2409 for (i--; i >=0; i--) {
2410 chip = &cfi->chips[i];
2412 spin_lock(chip->mutex);
2414 if (chip->state == FL_PM_SUSPENDED) {
2415 /* No need to force it into a known state here,
2416 because we're returning failure, and it didn't
2418 chip->state = chip->oldstate;
2419 chip->oldstate = FL_READY;
2422 spin_unlock(chip->mutex);
2429 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2431 struct mtd_erase_region_info *region;
2436 for (i = 0; i < mtd->numeraseregions; i++) {
2437 region = &mtd->eraseregions[i];
2438 if (!region->lockmap)
2441 for (block = 0; block < region->numblocks; block++) {
2442 len = region->erasesize;
2443 adr = region->offset + block * len;
2445 if (!test_bit(block, region->lockmap))
2446 cfi_intelext_unlock(mtd, adr, len);
2451 static void cfi_intelext_resume(struct mtd_info *mtd)
2453 struct map_info *map = mtd->priv;
2454 struct cfi_private *cfi = map->fldrv_priv;
2455 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2457 struct flchip *chip;
2459 for (i=0; i<cfi->numchips; i++) {
2461 chip = &cfi->chips[i];
2463 spin_lock(chip->mutex);
2465 /* Go to known state. Chip may have been power cycled */
2466 if (chip->state == FL_PM_SUSPENDED) {
2467 map_write(map, CMD(0xFF), cfi->chips[i].start);
2468 chip->oldstate = chip->state = FL_READY;
2472 spin_unlock(chip->mutex);
2475 if ((mtd->flags & MTD_POWERUP_LOCK)
2476 && extp && (extp->FeatureSupport & (1 << 5)))
2477 cfi_intelext_restore_locks(mtd);
2480 static int cfi_intelext_reset(struct mtd_info *mtd)
2482 struct map_info *map = mtd->priv;
2483 struct cfi_private *cfi = map->fldrv_priv;
2486 for (i=0; i < cfi->numchips; i++) {
2487 struct flchip *chip = &cfi->chips[i];
2489 /* force the completion of any ongoing operation
2490 and switch to array mode so any bootloader in
2491 flash is accessible for soft reboot. */
2492 spin_lock(chip->mutex);
2493 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2495 map_write(map, CMD(0xff), chip->start);
2496 chip->state = FL_SHUTDOWN;
2498 spin_unlock(chip->mutex);
2504 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2507 struct mtd_info *mtd;
2509 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2510 cfi_intelext_reset(mtd);
2514 static void cfi_intelext_destroy(struct mtd_info *mtd)
2516 struct map_info *map = mtd->priv;
2517 struct cfi_private *cfi = map->fldrv_priv;
2518 struct mtd_erase_region_info *region;
2520 cfi_intelext_reset(mtd);
2521 unregister_reboot_notifier(&mtd->reboot_notifier);
2522 kfree(cfi->cmdset_priv);
2524 kfree(cfi->chips[0].priv);
2526 for (i = 0; i < mtd->numeraseregions; i++) {
2527 region = &mtd->eraseregions[i];
2528 if (region->lockmap)
2529 kfree(region->lockmap);
2531 kfree(mtd->eraseregions);
2534 MODULE_LICENSE("GPL");
2535 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2536 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2537 MODULE_ALIAS("cfi_cmdset_0003");
2538 MODULE_ALIAS("cfi_cmdset_0200");