]> err.no Git - linux-2.6/blob - drivers/mtd/chips/cfi_cmdset_0001.c
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfashe...
[linux-2.6] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
19  *      - auto unlock sectors on resume for auto locking flash on power up
20  */
21
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
29
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
41
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
44
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
47
48 #define MANUFACTURER_INTEL      0x0089
49 #define I82802AB        0x00ad
50 #define I82802AC        0x00ac
51 #define MANUFACTURER_ST         0x0020
52 #define M50LPW080       0x002F
53 #define AT49BV640D      0x02de
54
55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_intelext_sync (struct mtd_info *);
61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
63 #ifdef CONFIG_MTD_OTP
64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69                                             struct otp_info *, size_t);
70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71                                             struct otp_info *, size_t);
72 #endif
73 static int cfi_intelext_suspend (struct mtd_info *);
74 static void cfi_intelext_resume (struct mtd_info *);
75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
76
77 static void cfi_intelext_destroy(struct mtd_info *);
78
79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80
81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83
84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85                      size_t *retlen, void **virt, resource_size_t *phys);
86 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
87
88 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
91 #include "fwh_lock.h"
92
93
94
95 /*
96  *  *********** SETUP AND PROBE BITS  ***********
97  */
98
99 static struct mtd_chip_driver cfi_intelext_chipdrv = {
100         .probe          = NULL, /* Not usable directly */
101         .destroy        = cfi_intelext_destroy,
102         .name           = "cfi_cmdset_0001",
103         .module         = THIS_MODULE
104 };
105
106 /* #define DEBUG_LOCK_BITS */
107 /* #define DEBUG_CFI_FEATURES */
108
109 #ifdef DEBUG_CFI_FEATURES
110 static void cfi_tell_features(struct cfi_pri_intelext *extp)
111 {
112         int i;
113         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
114         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
115         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
116         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
117         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
118         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
119         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
120         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
121         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
122         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
123         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
124         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
125         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
126         for (i=11; i<32; i++) {
127                 if (extp->FeatureSupport & (1<<i))
128                         printk("     - Unknown Bit %X:      supported\n", i);
129         }
130
131         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
132         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
133         for (i=1; i<8; i++) {
134                 if (extp->SuspendCmdSupport & (1<<i))
135                         printk("     - Unknown Bit %X:               supported\n", i);
136         }
137
138         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
139         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
140         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
141         for (i=2; i<3; i++) {
142                 if (extp->BlkStatusRegMask & (1<<i))
143                         printk("     - Unknown Bit %X Active: yes\n",i);
144         }
145         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
146         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
147         for (i=6; i<16; i++) {
148                 if (extp->BlkStatusRegMask & (1<<i))
149                         printk("     - Unknown Bit %X Active: yes\n",i);
150         }
151
152         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
153                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
154         if (extp->VppOptimal)
155                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
156                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
157 }
158 #endif
159
160 /* Atmel chips don't use the same PRI format as Intel chips */
161 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
162 {
163         struct map_info *map = mtd->priv;
164         struct cfi_private *cfi = map->fldrv_priv;
165         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
166         struct cfi_pri_atmel atmel_pri;
167         uint32_t features = 0;
168
169         /* Reverse byteswapping */
170         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
171         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
172         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
173
174         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
175         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
176
177         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
178
179         if (atmel_pri.Features & 0x01) /* chip erase supported */
180                 features |= (1<<0);
181         if (atmel_pri.Features & 0x02) /* erase suspend supported */
182                 features |= (1<<1);
183         if (atmel_pri.Features & 0x04) /* program suspend supported */
184                 features |= (1<<2);
185         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
186                 features |= (1<<9);
187         if (atmel_pri.Features & 0x20) /* page mode read supported */
188                 features |= (1<<7);
189         if (atmel_pri.Features & 0x40) /* queued erase supported */
190                 features |= (1<<4);
191         if (atmel_pri.Features & 0x80) /* Protection bits supported */
192                 features |= (1<<6);
193
194         extp->FeatureSupport = features;
195
196         /* burst write mode not supported */
197         cfi->cfiq->BufWriteTimeoutTyp = 0;
198         cfi->cfiq->BufWriteTimeoutMax = 0;
199 }
200
201 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
202 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
203 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
204 {
205         struct map_info *map = mtd->priv;
206         struct cfi_private *cfi = map->fldrv_priv;
207         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
208
209         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
210                             "erase on write disabled.\n");
211         extp->SuspendCmdSupport &= ~1;
212 }
213 #endif
214
215 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
216 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
217 {
218         struct map_info *map = mtd->priv;
219         struct cfi_private *cfi = map->fldrv_priv;
220         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
221
222         if (cfip && (cfip->FeatureSupport&4)) {
223                 cfip->FeatureSupport &= ~4;
224                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
225         }
226 }
227 #endif
228
229 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
230 {
231         struct map_info *map = mtd->priv;
232         struct cfi_private *cfi = map->fldrv_priv;
233
234         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
235         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
236 }
237
238 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
239 {
240         struct map_info *map = mtd->priv;
241         struct cfi_private *cfi = map->fldrv_priv;
242
243         /* Note this is done after the region info is endian swapped */
244         cfi->cfiq->EraseRegionInfo[1] =
245                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
246 };
247
248 static void fixup_use_point(struct mtd_info *mtd, void *param)
249 {
250         struct map_info *map = mtd->priv;
251         if (!mtd->point && map_is_linear(map)) {
252                 mtd->point   = cfi_intelext_point;
253                 mtd->unpoint = cfi_intelext_unpoint;
254         }
255 }
256
257 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261         if (cfi->cfiq->BufWriteTimeoutTyp) {
262                 printk(KERN_INFO "Using buffer write method\n" );
263                 mtd->write = cfi_intelext_write_buffers;
264                 mtd->writev = cfi_intelext_writev;
265         }
266 }
267
268 /*
269  * Some chips power-up with all sectors locked by default.
270  */
271 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
272 {
273         struct map_info *map = mtd->priv;
274         struct cfi_private *cfi = map->fldrv_priv;
275         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
276
277         if (cfip->FeatureSupport&32) {
278                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
279                 mtd->flags |= MTD_POWERUP_LOCK;
280         }
281 }
282
283 static struct cfi_fixup cfi_fixup_table[] = {
284         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
285 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
286         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
287 #endif
288 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
289         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
290 #endif
291 #if !FORCE_WORD_WRITE
292         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
293 #endif
294         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
295         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
296         { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
297         { 0, 0, NULL, NULL }
298 };
299
300 static struct cfi_fixup jedec_fixup_table[] = {
301         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
302         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
303         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
304         { 0, 0, NULL, NULL }
305 };
306 static struct cfi_fixup fixup_table[] = {
307         /* The CFI vendor ids and the JEDEC vendor IDs appear
308          * to be common.  It is like the devices id's are as
309          * well.  This table is to pick all cases where
310          * we know that is the case.
311          */
312         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
313         { 0, 0, NULL, NULL }
314 };
315
316 static inline struct cfi_pri_intelext *
317 read_pri_intelext(struct map_info *map, __u16 adr)
318 {
319         struct cfi_pri_intelext *extp;
320         unsigned int extp_size = sizeof(*extp);
321
322  again:
323         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
324         if (!extp)
325                 return NULL;
326
327         if (extp->MajorVersion != '1' ||
328             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
329                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
330                        "version %c.%c.\n",  extp->MajorVersion,
331                        extp->MinorVersion);
332                 kfree(extp);
333                 return NULL;
334         }
335
336         /* Do some byteswapping if necessary */
337         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
338         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
339         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
340
341         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
342                 unsigned int extra_size = 0;
343                 int nb_parts, i;
344
345                 /* Protection Register info */
346                 extra_size += (extp->NumProtectionFields - 1) *
347                               sizeof(struct cfi_intelext_otpinfo);
348
349                 /* Burst Read info */
350                 extra_size += 2;
351                 if (extp_size < sizeof(*extp) + extra_size)
352                         goto need_more;
353                 extra_size += extp->extra[extra_size-1];
354
355                 /* Number of hardware-partitions */
356                 extra_size += 1;
357                 if (extp_size < sizeof(*extp) + extra_size)
358                         goto need_more;
359                 nb_parts = extp->extra[extra_size - 1];
360
361                 /* skip the sizeof(partregion) field in CFI 1.4 */
362                 if (extp->MinorVersion >= '4')
363                         extra_size += 2;
364
365                 for (i = 0; i < nb_parts; i++) {
366                         struct cfi_intelext_regioninfo *rinfo;
367                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
368                         extra_size += sizeof(*rinfo);
369                         if (extp_size < sizeof(*extp) + extra_size)
370                                 goto need_more;
371                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
372                         extra_size += (rinfo->NumBlockTypes - 1)
373                                       * sizeof(struct cfi_intelext_blockinfo);
374                 }
375
376                 if (extp->MinorVersion >= '4')
377                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
378
379                 if (extp_size < sizeof(*extp) + extra_size) {
380                         need_more:
381                         extp_size = sizeof(*extp) + extra_size;
382                         kfree(extp);
383                         if (extp_size > 4096) {
384                                 printk(KERN_ERR
385                                         "%s: cfi_pri_intelext is too fat\n",
386                                         __func__);
387                                 return NULL;
388                         }
389                         goto again;
390                 }
391         }
392
393         return extp;
394 }
395
396 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
397 {
398         struct cfi_private *cfi = map->fldrv_priv;
399         struct mtd_info *mtd;
400         int i;
401
402         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
403         if (!mtd) {
404                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
405                 return NULL;
406         }
407         mtd->priv = map;
408         mtd->type = MTD_NORFLASH;
409
410         /* Fill in the default mtd operations */
411         mtd->erase   = cfi_intelext_erase_varsize;
412         mtd->read    = cfi_intelext_read;
413         mtd->write   = cfi_intelext_write_words;
414         mtd->sync    = cfi_intelext_sync;
415         mtd->lock    = cfi_intelext_lock;
416         mtd->unlock  = cfi_intelext_unlock;
417         mtd->suspend = cfi_intelext_suspend;
418         mtd->resume  = cfi_intelext_resume;
419         mtd->flags   = MTD_CAP_NORFLASH;
420         mtd->name    = map->name;
421         mtd->writesize = 1;
422
423         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
424
425         if (cfi->cfi_mode == CFI_MODE_CFI) {
426                 /*
427                  * It's a real CFI chip, not one for which the probe
428                  * routine faked a CFI structure. So we read the feature
429                  * table from it.
430                  */
431                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
432                 struct cfi_pri_intelext *extp;
433
434                 extp = read_pri_intelext(map, adr);
435                 if (!extp) {
436                         kfree(mtd);
437                         return NULL;
438                 }
439
440                 /* Install our own private info structure */
441                 cfi->cmdset_priv = extp;
442
443                 cfi_fixup(mtd, cfi_fixup_table);
444
445 #ifdef DEBUG_CFI_FEATURES
446                 /* Tell the user about it in lots of lovely detail */
447                 cfi_tell_features(extp);
448 #endif
449
450                 if(extp->SuspendCmdSupport & 1) {
451                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
452                 }
453         }
454         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
455                 /* Apply jedec specific fixups */
456                 cfi_fixup(mtd, jedec_fixup_table);
457         }
458         /* Apply generic fixups */
459         cfi_fixup(mtd, fixup_table);
460
461         for (i=0; i< cfi->numchips; i++) {
462                 if (cfi->cfiq->WordWriteTimeoutTyp)
463                         cfi->chips[i].word_write_time =
464                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
465                 else
466                         cfi->chips[i].word_write_time = 50000;
467
468                 if (cfi->cfiq->BufWriteTimeoutTyp)
469                         cfi->chips[i].buffer_write_time =
470                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
471                 /* No default; if it isn't specified, we won't use it */
472
473                 if (cfi->cfiq->BlockEraseTimeoutTyp)
474                         cfi->chips[i].erase_time =
475                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
476                 else
477                         cfi->chips[i].erase_time = 2000000;
478
479                 cfi->chips[i].ref_point_counter = 0;
480                 init_waitqueue_head(&(cfi->chips[i].wq));
481         }
482
483         map->fldrv = &cfi_intelext_chipdrv;
484
485         return cfi_intelext_setup(mtd);
486 }
487 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
488 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
489 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
490 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
491 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
492
493 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
494 {
495         struct map_info *map = mtd->priv;
496         struct cfi_private *cfi = map->fldrv_priv;
497         unsigned long offset = 0;
498         int i,j;
499         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
500
501         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
502
503         mtd->size = devsize * cfi->numchips;
504
505         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
506         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
507                         * mtd->numeraseregions, GFP_KERNEL);
508         if (!mtd->eraseregions) {
509                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
510                 goto setup_err;
511         }
512
513         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
514                 unsigned long ernum, ersize;
515                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
516                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
517
518                 if (mtd->erasesize < ersize) {
519                         mtd->erasesize = ersize;
520                 }
521                 for (j=0; j<cfi->numchips; j++) {
522                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
523                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
524                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
525                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
526                 }
527                 offset += (ersize * ernum);
528         }
529
530         if (offset != devsize) {
531                 /* Argh */
532                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
533                 goto setup_err;
534         }
535
536         for (i=0; i<mtd->numeraseregions;i++){
537                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
538                        i,mtd->eraseregions[i].offset,
539                        mtd->eraseregions[i].erasesize,
540                        mtd->eraseregions[i].numblocks);
541         }
542
543 #ifdef CONFIG_MTD_OTP
544         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
545         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
546         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
547         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
548         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
549         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
550 #endif
551
552         /* This function has the potential to distort the reality
553            a bit and therefore should be called last. */
554         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
555                 goto setup_err;
556
557         __module_get(THIS_MODULE);
558         register_reboot_notifier(&mtd->reboot_notifier);
559         return mtd;
560
561  setup_err:
562         if(mtd) {
563                 kfree(mtd->eraseregions);
564                 kfree(mtd);
565         }
566         kfree(cfi->cmdset_priv);
567         return NULL;
568 }
569
570 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
571                                         struct cfi_private **pcfi)
572 {
573         struct map_info *map = mtd->priv;
574         struct cfi_private *cfi = *pcfi;
575         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
576
577         /*
578          * Probing of multi-partition flash chips.
579          *
580          * To support multiple partitions when available, we simply arrange
581          * for each of them to have their own flchip structure even if they
582          * are on the same physical chip.  This means completely recreating
583          * a new cfi_private structure right here which is a blatent code
584          * layering violation, but this is still the least intrusive
585          * arrangement at this point. This can be rearranged in the future
586          * if someone feels motivated enough.  --nico
587          */
588         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
589             && extp->FeatureSupport & (1 << 9)) {
590                 struct cfi_private *newcfi;
591                 struct flchip *chip;
592                 struct flchip_shared *shared;
593                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
594
595                 /* Protection Register info */
596                 offs = (extp->NumProtectionFields - 1) *
597                        sizeof(struct cfi_intelext_otpinfo);
598
599                 /* Burst Read info */
600                 offs += extp->extra[offs+1]+2;
601
602                 /* Number of partition regions */
603                 numregions = extp->extra[offs];
604                 offs += 1;
605
606                 /* skip the sizeof(partregion) field in CFI 1.4 */
607                 if (extp->MinorVersion >= '4')
608                         offs += 2;
609
610                 /* Number of hardware partitions */
611                 numparts = 0;
612                 for (i = 0; i < numregions; i++) {
613                         struct cfi_intelext_regioninfo *rinfo;
614                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
615                         numparts += rinfo->NumIdentPartitions;
616                         offs += sizeof(*rinfo)
617                                 + (rinfo->NumBlockTypes - 1) *
618                                   sizeof(struct cfi_intelext_blockinfo);
619                 }
620
621                 if (!numparts)
622                         numparts = 1;
623
624                 /* Programming Region info */
625                 if (extp->MinorVersion >= '4') {
626                         struct cfi_intelext_programming_regioninfo *prinfo;
627                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
628                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
629                         mtd->flags &= ~MTD_BIT_WRITEABLE;
630                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
631                                map->name, mtd->writesize,
632                                cfi->interleave * prinfo->ControlValid,
633                                cfi->interleave * prinfo->ControlInvalid);
634                 }
635
636                 /*
637                  * All functions below currently rely on all chips having
638                  * the same geometry so we'll just assume that all hardware
639                  * partitions are of the same size too.
640                  */
641                 partshift = cfi->chipshift - __ffs(numparts);
642
643                 if ((1 << partshift) < mtd->erasesize) {
644                         printk( KERN_ERR
645                                 "%s: bad number of hw partitions (%d)\n",
646                                 __func__, numparts);
647                         return -EINVAL;
648                 }
649
650                 numvirtchips = cfi->numchips * numparts;
651                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
652                 if (!newcfi)
653                         return -ENOMEM;
654                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
655                 if (!shared) {
656                         kfree(newcfi);
657                         return -ENOMEM;
658                 }
659                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
660                 newcfi->numchips = numvirtchips;
661                 newcfi->chipshift = partshift;
662
663                 chip = &newcfi->chips[0];
664                 for (i = 0; i < cfi->numchips; i++) {
665                         shared[i].writing = shared[i].erasing = NULL;
666                         spin_lock_init(&shared[i].lock);
667                         for (j = 0; j < numparts; j++) {
668                                 *chip = cfi->chips[i];
669                                 chip->start += j << partshift;
670                                 chip->priv = &shared[i];
671                                 /* those should be reset too since
672                                    they create memory references. */
673                                 init_waitqueue_head(&chip->wq);
674                                 spin_lock_init(&chip->_spinlock);
675                                 chip->mutex = &chip->_spinlock;
676                                 chip++;
677                         }
678                 }
679
680                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
681                                   "--> %d partitions of %d KiB\n",
682                                   map->name, cfi->numchips, cfi->interleave,
683                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
684
685                 map->fldrv_priv = newcfi;
686                 *pcfi = newcfi;
687                 kfree(cfi);
688         }
689
690         return 0;
691 }
692
693 /*
694  *  *********** CHIP ACCESS FUNCTIONS ***********
695  */
696 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
697 {
698         DECLARE_WAITQUEUE(wait, current);
699         struct cfi_private *cfi = map->fldrv_priv;
700         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
701         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
702         unsigned long timeo = jiffies + HZ;
703
704         switch (chip->state) {
705
706         case FL_STATUS:
707                 for (;;) {
708                         status = map_read(map, adr);
709                         if (map_word_andequal(map, status, status_OK, status_OK))
710                                 break;
711
712                         /* At this point we're fine with write operations
713                            in other partitions as they don't conflict. */
714                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
715                                 break;
716
717                         spin_unlock(chip->mutex);
718                         cfi_udelay(1);
719                         spin_lock(chip->mutex);
720                         /* Someone else might have been playing with it. */
721                         return -EAGAIN;
722                 }
723                 /* Fall through */
724         case FL_READY:
725         case FL_CFI_QUERY:
726         case FL_JEDEC_QUERY:
727                 return 0;
728
729         case FL_ERASING:
730                 if (!cfip ||
731                     !(cfip->FeatureSupport & 2) ||
732                     !(mode == FL_READY || mode == FL_POINT ||
733                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
734                         goto sleep;
735
736
737                 /* Erase suspend */
738                 map_write(map, CMD(0xB0), adr);
739
740                 /* If the flash has finished erasing, then 'erase suspend'
741                  * appears to make some (28F320) flash devices switch to
742                  * 'read' mode.  Make sure that we switch to 'read status'
743                  * mode so we get the right data. --rmk
744                  */
745                 map_write(map, CMD(0x70), adr);
746                 chip->oldstate = FL_ERASING;
747                 chip->state = FL_ERASE_SUSPENDING;
748                 chip->erase_suspended = 1;
749                 for (;;) {
750                         status = map_read(map, adr);
751                         if (map_word_andequal(map, status, status_OK, status_OK))
752                                 break;
753
754                         if (time_after(jiffies, timeo)) {
755                                 /* Urgh. Resume and pretend we weren't here.  */
756                                 map_write(map, CMD(0xd0), adr);
757                                 /* Make sure we're in 'read status' mode if it had finished */
758                                 map_write(map, CMD(0x70), adr);
759                                 chip->state = FL_ERASING;
760                                 chip->oldstate = FL_READY;
761                                 printk(KERN_ERR "%s: Chip not ready after erase "
762                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
763                                 return -EIO;
764                         }
765
766                         spin_unlock(chip->mutex);
767                         cfi_udelay(1);
768                         spin_lock(chip->mutex);
769                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
770                            So we can just loop here. */
771                 }
772                 chip->state = FL_STATUS;
773                 return 0;
774
775         case FL_XIP_WHILE_ERASING:
776                 if (mode != FL_READY && mode != FL_POINT &&
777                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
778                         goto sleep;
779                 chip->oldstate = chip->state;
780                 chip->state = FL_READY;
781                 return 0;
782
783         case FL_SHUTDOWN:
784                 /* The machine is rebooting now,so no one can get chip anymore */
785                 return -EIO;
786         case FL_POINT:
787                 /* Only if there's no operation suspended... */
788                 if (mode == FL_READY && chip->oldstate == FL_READY)
789                         return 0;
790                 /* Fall through */
791         default:
792         sleep:
793                 set_current_state(TASK_UNINTERRUPTIBLE);
794                 add_wait_queue(&chip->wq, &wait);
795                 spin_unlock(chip->mutex);
796                 schedule();
797                 remove_wait_queue(&chip->wq, &wait);
798                 spin_lock(chip->mutex);
799                 return -EAGAIN;
800         }
801 }
802
803 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
804 {
805         int ret;
806         DECLARE_WAITQUEUE(wait, current);
807
808  retry:
809         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
810                            || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
811                 /*
812                  * OK. We have possibility for contention on the write/erase
813                  * operations which are global to the real chip and not per
814                  * partition.  So let's fight it over in the partition which
815                  * currently has authority on the operation.
816                  *
817                  * The rules are as follows:
818                  *
819                  * - any write operation must own shared->writing.
820                  *
821                  * - any erase operation must own _both_ shared->writing and
822                  *   shared->erasing.
823                  *
824                  * - contention arbitration is handled in the owner's context.
825                  *
826                  * The 'shared' struct can be read and/or written only when
827                  * its lock is taken.
828                  */
829                 struct flchip_shared *shared = chip->priv;
830                 struct flchip *contender;
831                 spin_lock(&shared->lock);
832                 contender = shared->writing;
833                 if (contender && contender != chip) {
834                         /*
835                          * The engine to perform desired operation on this
836                          * partition is already in use by someone else.
837                          * Let's fight over it in the context of the chip
838                          * currently using it.  If it is possible to suspend,
839                          * that other partition will do just that, otherwise
840                          * it'll happily send us to sleep.  In any case, when
841                          * get_chip returns success we're clear to go ahead.
842                          */
843                         ret = spin_trylock(contender->mutex);
844                         spin_unlock(&shared->lock);
845                         if (!ret)
846                                 goto retry;
847                         spin_unlock(chip->mutex);
848                         ret = chip_ready(map, contender, contender->start, mode);
849                         spin_lock(chip->mutex);
850
851                         if (ret == -EAGAIN) {
852                                 spin_unlock(contender->mutex);
853                                 goto retry;
854                         }
855                         if (ret) {
856                                 spin_unlock(contender->mutex);
857                                 return ret;
858                         }
859                         spin_lock(&shared->lock);
860                         spin_unlock(contender->mutex);
861                 }
862
863                 /* Check if we already have suspended erase
864                  * on this chip. Sleep. */
865                 if (mode == FL_ERASING && shared->erasing
866                     && shared->erasing->oldstate == FL_ERASING) {
867                         spin_unlock(&shared->lock);
868                         set_current_state(TASK_UNINTERRUPTIBLE);
869                         add_wait_queue(&chip->wq, &wait);
870                         spin_unlock(chip->mutex);
871                         schedule();
872                         remove_wait_queue(&chip->wq, &wait);
873                         spin_lock(chip->mutex);
874                         goto retry;
875                 }
876
877                 /* We now own it */
878                 shared->writing = chip;
879                 if (mode == FL_ERASING)
880                         shared->erasing = chip;
881                 spin_unlock(&shared->lock);
882         }
883         ret = chip_ready(map, chip, adr, mode);
884         if (ret == -EAGAIN)
885                 goto retry;
886
887         return ret;
888 }
889
890 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
891 {
892         struct cfi_private *cfi = map->fldrv_priv;
893
894         if (chip->priv) {
895                 struct flchip_shared *shared = chip->priv;
896                 spin_lock(&shared->lock);
897                 if (shared->writing == chip && chip->oldstate == FL_READY) {
898                         /* We own the ability to write, but we're done */
899                         shared->writing = shared->erasing;
900                         if (shared->writing && shared->writing != chip) {
901                                 /* give back ownership to who we loaned it from */
902                                 struct flchip *loaner = shared->writing;
903                                 spin_lock(loaner->mutex);
904                                 spin_unlock(&shared->lock);
905                                 spin_unlock(chip->mutex);
906                                 put_chip(map, loaner, loaner->start);
907                                 spin_lock(chip->mutex);
908                                 spin_unlock(loaner->mutex);
909                                 wake_up(&chip->wq);
910                                 return;
911                         }
912                         shared->erasing = NULL;
913                         shared->writing = NULL;
914                 } else if (shared->erasing == chip && shared->writing != chip) {
915                         /*
916                          * We own the ability to erase without the ability
917                          * to write, which means the erase was suspended
918                          * and some other partition is currently writing.
919                          * Don't let the switch below mess things up since
920                          * we don't have ownership to resume anything.
921                          */
922                         spin_unlock(&shared->lock);
923                         wake_up(&chip->wq);
924                         return;
925                 }
926                 spin_unlock(&shared->lock);
927         }
928
929         switch(chip->oldstate) {
930         case FL_ERASING:
931                 chip->state = chip->oldstate;
932                 /* What if one interleaved chip has finished and the
933                    other hasn't? The old code would leave the finished
934                    one in READY mode. That's bad, and caused -EROFS
935                    errors to be returned from do_erase_oneblock because
936                    that's the only bit it checked for at the time.
937                    As the state machine appears to explicitly allow
938                    sending the 0x70 (Read Status) command to an erasing
939                    chip and expecting it to be ignored, that's what we
940                    do. */
941                 map_write(map, CMD(0xd0), adr);
942                 map_write(map, CMD(0x70), adr);
943                 chip->oldstate = FL_READY;
944                 chip->state = FL_ERASING;
945                 break;
946
947         case FL_XIP_WHILE_ERASING:
948                 chip->state = chip->oldstate;
949                 chip->oldstate = FL_READY;
950                 break;
951
952         case FL_READY:
953         case FL_STATUS:
954         case FL_JEDEC_QUERY:
955                 /* We should really make set_vpp() count, rather than doing this */
956                 DISABLE_VPP(map);
957                 break;
958         default:
959                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
960         }
961         wake_up(&chip->wq);
962 }
963
964 #ifdef CONFIG_MTD_XIP
965
966 /*
967  * No interrupt what so ever can be serviced while the flash isn't in array
968  * mode.  This is ensured by the xip_disable() and xip_enable() functions
969  * enclosing any code path where the flash is known not to be in array mode.
970  * And within a XIP disabled code path, only functions marked with __xipram
971  * may be called and nothing else (it's a good thing to inspect generated
972  * assembly to make sure inline functions were actually inlined and that gcc
973  * didn't emit calls to its own support functions). Also configuring MTD CFI
974  * support to a single buswidth and a single interleave is also recommended.
975  */
976
977 static void xip_disable(struct map_info *map, struct flchip *chip,
978                         unsigned long adr)
979 {
980         /* TODO: chips with no XIP use should ignore and return */
981         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
982         local_irq_disable();
983 }
984
985 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
986                                 unsigned long adr)
987 {
988         struct cfi_private *cfi = map->fldrv_priv;
989         if (chip->state != FL_POINT && chip->state != FL_READY) {
990                 map_write(map, CMD(0xff), adr);
991                 chip->state = FL_READY;
992         }
993         (void) map_read(map, adr);
994         xip_iprefetch();
995         local_irq_enable();
996 }
997
998 /*
999  * When a delay is required for the flash operation to complete, the
1000  * xip_wait_for_operation() function is polling for both the given timeout
1001  * and pending (but still masked) hardware interrupts.  Whenever there is an
1002  * interrupt pending then the flash erase or write operation is suspended,
1003  * array mode restored and interrupts unmasked.  Task scheduling might also
1004  * happen at that point.  The CPU eventually returns from the interrupt or
1005  * the call to schedule() and the suspended flash operation is resumed for
1006  * the remaining of the delay period.
1007  *
1008  * Warning: this function _will_ fool interrupt latency tracing tools.
1009  */
1010
1011 static int __xipram xip_wait_for_operation(
1012                 struct map_info *map, struct flchip *chip,
1013                 unsigned long adr, unsigned int chip_op_time )
1014 {
1015         struct cfi_private *cfi = map->fldrv_priv;
1016         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1017         map_word status, OK = CMD(0x80);
1018         unsigned long usec, suspended, start, done;
1019         flstate_t oldstate, newstate;
1020
1021         start = xip_currtime();
1022         usec = chip_op_time * 8;
1023         if (usec == 0)
1024                 usec = 500000;
1025         done = 0;
1026
1027         do {
1028                 cpu_relax();
1029                 if (xip_irqpending() && cfip &&
1030                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1031                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1032                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1033                         /*
1034                          * Let's suspend the erase or write operation when
1035                          * supported.  Note that we currently don't try to
1036                          * suspend interleaved chips if there is already
1037                          * another operation suspended (imagine what happens
1038                          * when one chip was already done with the current
1039                          * operation while another chip suspended it, then
1040                          * we resume the whole thing at once).  Yes, it
1041                          * can happen!
1042                          */
1043                         usec -= done;
1044                         map_write(map, CMD(0xb0), adr);
1045                         map_write(map, CMD(0x70), adr);
1046                         suspended = xip_currtime();
1047                         do {
1048                                 if (xip_elapsed_since(suspended) > 100000) {
1049                                         /*
1050                                          * The chip doesn't want to suspend
1051                                          * after waiting for 100 msecs.
1052                                          * This is a critical error but there
1053                                          * is not much we can do here.
1054                                          */
1055                                         return -EIO;
1056                                 }
1057                                 status = map_read(map, adr);
1058                         } while (!map_word_andequal(map, status, OK, OK));
1059
1060                         /* Suspend succeeded */
1061                         oldstate = chip->state;
1062                         if (oldstate == FL_ERASING) {
1063                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1064                                         break;
1065                                 newstate = FL_XIP_WHILE_ERASING;
1066                                 chip->erase_suspended = 1;
1067                         } else {
1068                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1069                                         break;
1070                                 newstate = FL_XIP_WHILE_WRITING;
1071                                 chip->write_suspended = 1;
1072                         }
1073                         chip->state = newstate;
1074                         map_write(map, CMD(0xff), adr);
1075                         (void) map_read(map, adr);
1076                         xip_iprefetch();
1077                         local_irq_enable();
1078                         spin_unlock(chip->mutex);
1079                         xip_iprefetch();
1080                         cond_resched();
1081
1082                         /*
1083                          * We're back.  However someone else might have
1084                          * decided to go write to the chip if we are in
1085                          * a suspended erase state.  If so let's wait
1086                          * until it's done.
1087                          */
1088                         spin_lock(chip->mutex);
1089                         while (chip->state != newstate) {
1090                                 DECLARE_WAITQUEUE(wait, current);
1091                                 set_current_state(TASK_UNINTERRUPTIBLE);
1092                                 add_wait_queue(&chip->wq, &wait);
1093                                 spin_unlock(chip->mutex);
1094                                 schedule();
1095                                 remove_wait_queue(&chip->wq, &wait);
1096                                 spin_lock(chip->mutex);
1097                         }
1098                         /* Disallow XIP again */
1099                         local_irq_disable();
1100
1101                         /* Resume the write or erase operation */
1102                         map_write(map, CMD(0xd0), adr);
1103                         map_write(map, CMD(0x70), adr);
1104                         chip->state = oldstate;
1105                         start = xip_currtime();
1106                 } else if (usec >= 1000000/HZ) {
1107                         /*
1108                          * Try to save on CPU power when waiting delay
1109                          * is at least a system timer tick period.
1110                          * No need to be extremely accurate here.
1111                          */
1112                         xip_cpu_idle();
1113                 }
1114                 status = map_read(map, adr);
1115                 done = xip_elapsed_since(start);
1116         } while (!map_word_andequal(map, status, OK, OK)
1117                  && done < usec);
1118
1119         return (done >= usec) ? -ETIME : 0;
1120 }
1121
1122 /*
1123  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1124  * the flash is actively programming or erasing since we have to poll for
1125  * the operation to complete anyway.  We can't do that in a generic way with
1126  * a XIP setup so do it before the actual flash operation in this case
1127  * and stub it out from INVAL_CACHE_AND_WAIT.
1128  */
1129 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1130         INVALIDATE_CACHED_RANGE(map, from, size)
1131
1132 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1133         xip_wait_for_operation(map, chip, cmd_adr, usec)
1134
1135 #else
1136
1137 #define xip_disable(map, chip, adr)
1138 #define xip_enable(map, chip, adr)
1139 #define XIP_INVAL_CACHED_RANGE(x...)
1140 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1141
1142 static int inval_cache_and_wait_for_operation(
1143                 struct map_info *map, struct flchip *chip,
1144                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1145                 unsigned int chip_op_time)
1146 {
1147         struct cfi_private *cfi = map->fldrv_priv;
1148         map_word status, status_OK = CMD(0x80);
1149         int chip_state = chip->state;
1150         unsigned int timeo, sleep_time;
1151
1152         spin_unlock(chip->mutex);
1153         if (inval_len)
1154                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1155         spin_lock(chip->mutex);
1156
1157         /* set our timeout to 8 times the expected delay */
1158         timeo = chip_op_time * 8;
1159         if (!timeo)
1160                 timeo = 500000;
1161         sleep_time = chip_op_time / 2;
1162
1163         for (;;) {
1164                 status = map_read(map, cmd_adr);
1165                 if (map_word_andequal(map, status, status_OK, status_OK))
1166                         break;
1167
1168                 if (!timeo) {
1169                         map_write(map, CMD(0x70), cmd_adr);
1170                         chip->state = FL_STATUS;
1171                         return -ETIME;
1172                 }
1173
1174                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1175                 spin_unlock(chip->mutex);
1176                 if (sleep_time >= 1000000/HZ) {
1177                         /*
1178                          * Half of the normal delay still remaining
1179                          * can be performed with a sleeping delay instead
1180                          * of busy waiting.
1181                          */
1182                         msleep(sleep_time/1000);
1183                         timeo -= sleep_time;
1184                         sleep_time = 1000000/HZ;
1185                 } else {
1186                         udelay(1);
1187                         cond_resched();
1188                         timeo--;
1189                 }
1190                 spin_lock(chip->mutex);
1191
1192                 while (chip->state != chip_state) {
1193                         /* Someone's suspended the operation: sleep */
1194                         DECLARE_WAITQUEUE(wait, current);
1195                         set_current_state(TASK_UNINTERRUPTIBLE);
1196                         add_wait_queue(&chip->wq, &wait);
1197                         spin_unlock(chip->mutex);
1198                         schedule();
1199                         remove_wait_queue(&chip->wq, &wait);
1200                         spin_lock(chip->mutex);
1201                 }
1202         }
1203
1204         /* Done and happy. */
1205         chip->state = FL_STATUS;
1206         return 0;
1207 }
1208
1209 #endif
1210
1211 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1212         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1213
1214
1215 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1216 {
1217         unsigned long cmd_addr;
1218         struct cfi_private *cfi = map->fldrv_priv;
1219         int ret = 0;
1220
1221         adr += chip->start;
1222
1223         /* Ensure cmd read/writes are aligned. */
1224         cmd_addr = adr & ~(map_bankwidth(map)-1);
1225
1226         spin_lock(chip->mutex);
1227
1228         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1229
1230         if (!ret) {
1231                 if (chip->state != FL_POINT && chip->state != FL_READY)
1232                         map_write(map, CMD(0xff), cmd_addr);
1233
1234                 chip->state = FL_POINT;
1235                 chip->ref_point_counter++;
1236         }
1237         spin_unlock(chip->mutex);
1238
1239         return ret;
1240 }
1241
1242 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1243                 size_t *retlen, void **virt, resource_size_t *phys)
1244 {
1245         struct map_info *map = mtd->priv;
1246         struct cfi_private *cfi = map->fldrv_priv;
1247         unsigned long ofs, last_end = 0;
1248         int chipnum;
1249         int ret = 0;
1250
1251         if (!map->virt || (from + len > mtd->size))
1252                 return -EINVAL;
1253
1254         /* Now lock the chip(s) to POINT state */
1255
1256         /* ofs: offset within the first chip that the first read should start */
1257         chipnum = (from >> cfi->chipshift);
1258         ofs = from - (chipnum << cfi->chipshift);
1259
1260         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1261         *retlen = 0;
1262         if (phys)
1263                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1264
1265         while (len) {
1266                 unsigned long thislen;
1267
1268                 if (chipnum >= cfi->numchips)
1269                         break;
1270
1271                 /* We cannot point across chips that are virtually disjoint */
1272                 if (!last_end)
1273                         last_end = cfi->chips[chipnum].start;
1274                 else if (cfi->chips[chipnum].start != last_end)
1275                         break;
1276
1277                 if ((len + ofs -1) >> cfi->chipshift)
1278                         thislen = (1<<cfi->chipshift) - ofs;
1279                 else
1280                         thislen = len;
1281
1282                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1283                 if (ret)
1284                         break;
1285
1286                 *retlen += thislen;
1287                 len -= thislen;
1288
1289                 ofs = 0;
1290                 last_end += 1 << cfi->chipshift;
1291                 chipnum++;
1292         }
1293         return 0;
1294 }
1295
1296 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1297 {
1298         struct map_info *map = mtd->priv;
1299         struct cfi_private *cfi = map->fldrv_priv;
1300         unsigned long ofs;
1301         int chipnum;
1302
1303         /* Now unlock the chip(s) POINT state */
1304
1305         /* ofs: offset within the first chip that the first read should start */
1306         chipnum = (from >> cfi->chipshift);
1307         ofs = from - (chipnum <<  cfi->chipshift);
1308
1309         while (len) {
1310                 unsigned long thislen;
1311                 struct flchip *chip;
1312
1313                 chip = &cfi->chips[chipnum];
1314                 if (chipnum >= cfi->numchips)
1315                         break;
1316
1317                 if ((len + ofs -1) >> cfi->chipshift)
1318                         thislen = (1<<cfi->chipshift) - ofs;
1319                 else
1320                         thislen = len;
1321
1322                 spin_lock(chip->mutex);
1323                 if (chip->state == FL_POINT) {
1324                         chip->ref_point_counter--;
1325                         if(chip->ref_point_counter == 0)
1326                                 chip->state = FL_READY;
1327                 } else
1328                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1329
1330                 put_chip(map, chip, chip->start);
1331                 spin_unlock(chip->mutex);
1332
1333                 len -= thislen;
1334                 ofs = 0;
1335                 chipnum++;
1336         }
1337 }
1338
1339 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1340 {
1341         unsigned long cmd_addr;
1342         struct cfi_private *cfi = map->fldrv_priv;
1343         int ret;
1344
1345         adr += chip->start;
1346
1347         /* Ensure cmd read/writes are aligned. */
1348         cmd_addr = adr & ~(map_bankwidth(map)-1);
1349
1350         spin_lock(chip->mutex);
1351         ret = get_chip(map, chip, cmd_addr, FL_READY);
1352         if (ret) {
1353                 spin_unlock(chip->mutex);
1354                 return ret;
1355         }
1356
1357         if (chip->state != FL_POINT && chip->state != FL_READY) {
1358                 map_write(map, CMD(0xff), cmd_addr);
1359
1360                 chip->state = FL_READY;
1361         }
1362
1363         map_copy_from(map, buf, adr, len);
1364
1365         put_chip(map, chip, cmd_addr);
1366
1367         spin_unlock(chip->mutex);
1368         return 0;
1369 }
1370
1371 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1372 {
1373         struct map_info *map = mtd->priv;
1374         struct cfi_private *cfi = map->fldrv_priv;
1375         unsigned long ofs;
1376         int chipnum;
1377         int ret = 0;
1378
1379         /* ofs: offset within the first chip that the first read should start */
1380         chipnum = (from >> cfi->chipshift);
1381         ofs = from - (chipnum <<  cfi->chipshift);
1382
1383         *retlen = 0;
1384
1385         while (len) {
1386                 unsigned long thislen;
1387
1388                 if (chipnum >= cfi->numchips)
1389                         break;
1390
1391                 if ((len + ofs -1) >> cfi->chipshift)
1392                         thislen = (1<<cfi->chipshift) - ofs;
1393                 else
1394                         thislen = len;
1395
1396                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1397                 if (ret)
1398                         break;
1399
1400                 *retlen += thislen;
1401                 len -= thislen;
1402                 buf += thislen;
1403
1404                 ofs = 0;
1405                 chipnum++;
1406         }
1407         return ret;
1408 }
1409
1410 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1411                                      unsigned long adr, map_word datum, int mode)
1412 {
1413         struct cfi_private *cfi = map->fldrv_priv;
1414         map_word status, write_cmd;
1415         int ret=0;
1416
1417         adr += chip->start;
1418
1419         switch (mode) {
1420         case FL_WRITING:
1421                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1422                 break;
1423         case FL_OTP_WRITE:
1424                 write_cmd = CMD(0xc0);
1425                 break;
1426         default:
1427                 return -EINVAL;
1428         }
1429
1430         spin_lock(chip->mutex);
1431         ret = get_chip(map, chip, adr, mode);
1432         if (ret) {
1433                 spin_unlock(chip->mutex);
1434                 return ret;
1435         }
1436
1437         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1438         ENABLE_VPP(map);
1439         xip_disable(map, chip, adr);
1440         map_write(map, write_cmd, adr);
1441         map_write(map, datum, adr);
1442         chip->state = mode;
1443
1444         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1445                                    adr, map_bankwidth(map),
1446                                    chip->word_write_time);
1447         if (ret) {
1448                 xip_enable(map, chip, adr);
1449                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1450                 goto out;
1451         }
1452
1453         /* check for errors */
1454         status = map_read(map, adr);
1455         if (map_word_bitsset(map, status, CMD(0x1a))) {
1456                 unsigned long chipstatus = MERGESTATUS(status);
1457
1458                 /* reset status */
1459                 map_write(map, CMD(0x50), adr);
1460                 map_write(map, CMD(0x70), adr);
1461                 xip_enable(map, chip, adr);
1462
1463                 if (chipstatus & 0x02) {
1464                         ret = -EROFS;
1465                 } else if (chipstatus & 0x08) {
1466                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1467                         ret = -EIO;
1468                 } else {
1469                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1470                         ret = -EINVAL;
1471                 }
1472
1473                 goto out;
1474         }
1475
1476         xip_enable(map, chip, adr);
1477  out:   put_chip(map, chip, adr);
1478         spin_unlock(chip->mutex);
1479         return ret;
1480 }
1481
1482
1483 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1484 {
1485         struct map_info *map = mtd->priv;
1486         struct cfi_private *cfi = map->fldrv_priv;
1487         int ret = 0;
1488         int chipnum;
1489         unsigned long ofs;
1490
1491         *retlen = 0;
1492         if (!len)
1493                 return 0;
1494
1495         chipnum = to >> cfi->chipshift;
1496         ofs = to  - (chipnum << cfi->chipshift);
1497
1498         /* If it's not bus-aligned, do the first byte write */
1499         if (ofs & (map_bankwidth(map)-1)) {
1500                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1501                 int gap = ofs - bus_ofs;
1502                 int n;
1503                 map_word datum;
1504
1505                 n = min_t(int, len, map_bankwidth(map)-gap);
1506                 datum = map_word_ff(map);
1507                 datum = map_word_load_partial(map, datum, buf, gap, n);
1508
1509                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1510                                                bus_ofs, datum, FL_WRITING);
1511                 if (ret)
1512                         return ret;
1513
1514                 len -= n;
1515                 ofs += n;
1516                 buf += n;
1517                 (*retlen) += n;
1518
1519                 if (ofs >> cfi->chipshift) {
1520                         chipnum ++;
1521                         ofs = 0;
1522                         if (chipnum == cfi->numchips)
1523                                 return 0;
1524                 }
1525         }
1526
1527         while(len >= map_bankwidth(map)) {
1528                 map_word datum = map_word_load(map, buf);
1529
1530                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1531                                        ofs, datum, FL_WRITING);
1532                 if (ret)
1533                         return ret;
1534
1535                 ofs += map_bankwidth(map);
1536                 buf += map_bankwidth(map);
1537                 (*retlen) += map_bankwidth(map);
1538                 len -= map_bankwidth(map);
1539
1540                 if (ofs >> cfi->chipshift) {
1541                         chipnum ++;
1542                         ofs = 0;
1543                         if (chipnum == cfi->numchips)
1544                                 return 0;
1545                 }
1546         }
1547
1548         if (len & (map_bankwidth(map)-1)) {
1549                 map_word datum;
1550
1551                 datum = map_word_ff(map);
1552                 datum = map_word_load_partial(map, datum, buf, 0, len);
1553
1554                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1555                                        ofs, datum, FL_WRITING);
1556                 if (ret)
1557                         return ret;
1558
1559                 (*retlen) += len;
1560         }
1561
1562         return 0;
1563 }
1564
1565
1566 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1567                                     unsigned long adr, const struct kvec **pvec,
1568                                     unsigned long *pvec_seek, int len)
1569 {
1570         struct cfi_private *cfi = map->fldrv_priv;
1571         map_word status, write_cmd, datum;
1572         unsigned long cmd_adr;
1573         int ret, wbufsize, word_gap, words;
1574         const struct kvec *vec;
1575         unsigned long vec_seek;
1576         unsigned long initial_adr;
1577         int initial_len = len;
1578
1579         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1580         adr += chip->start;
1581         initial_adr = adr;
1582         cmd_adr = adr & ~(wbufsize-1);
1583
1584         /* Let's determine this according to the interleave only once */
1585         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1586
1587         spin_lock(chip->mutex);
1588         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1589         if (ret) {
1590                 spin_unlock(chip->mutex);
1591                 return ret;
1592         }
1593
1594         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1595         ENABLE_VPP(map);
1596         xip_disable(map, chip, cmd_adr);
1597
1598         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1599            [...], the device will not accept any more Write to Buffer commands".
1600            So we must check here and reset those bits if they're set. Otherwise
1601            we're just pissing in the wind */
1602         if (chip->state != FL_STATUS) {
1603                 map_write(map, CMD(0x70), cmd_adr);
1604                 chip->state = FL_STATUS;
1605         }
1606         status = map_read(map, cmd_adr);
1607         if (map_word_bitsset(map, status, CMD(0x30))) {
1608                 xip_enable(map, chip, cmd_adr);
1609                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1610                 xip_disable(map, chip, cmd_adr);
1611                 map_write(map, CMD(0x50), cmd_adr);
1612                 map_write(map, CMD(0x70), cmd_adr);
1613         }
1614
1615         chip->state = FL_WRITING_TO_BUFFER;
1616         map_write(map, write_cmd, cmd_adr);
1617         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1618         if (ret) {
1619                 /* Argh. Not ready for write to buffer */
1620                 map_word Xstatus = map_read(map, cmd_adr);
1621                 map_write(map, CMD(0x70), cmd_adr);
1622                 chip->state = FL_STATUS;
1623                 status = map_read(map, cmd_adr);
1624                 map_write(map, CMD(0x50), cmd_adr);
1625                 map_write(map, CMD(0x70), cmd_adr);
1626                 xip_enable(map, chip, cmd_adr);
1627                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1628                                 map->name, Xstatus.x[0], status.x[0]);
1629                 goto out;
1630         }
1631
1632         /* Figure out the number of words to write */
1633         word_gap = (-adr & (map_bankwidth(map)-1));
1634         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1635         if (!word_gap) {
1636                 words--;
1637         } else {
1638                 word_gap = map_bankwidth(map) - word_gap;
1639                 adr -= word_gap;
1640                 datum = map_word_ff(map);
1641         }
1642
1643         /* Write length of data to come */
1644         map_write(map, CMD(words), cmd_adr );
1645
1646         /* Write data */
1647         vec = *pvec;
1648         vec_seek = *pvec_seek;
1649         do {
1650                 int n = map_bankwidth(map) - word_gap;
1651                 if (n > vec->iov_len - vec_seek)
1652                         n = vec->iov_len - vec_seek;
1653                 if (n > len)
1654                         n = len;
1655
1656                 if (!word_gap && len < map_bankwidth(map))
1657                         datum = map_word_ff(map);
1658
1659                 datum = map_word_load_partial(map, datum,
1660                                               vec->iov_base + vec_seek,
1661                                               word_gap, n);
1662
1663                 len -= n;
1664                 word_gap += n;
1665                 if (!len || word_gap == map_bankwidth(map)) {
1666                         map_write(map, datum, adr);
1667                         adr += map_bankwidth(map);
1668                         word_gap = 0;
1669                 }
1670
1671                 vec_seek += n;
1672                 if (vec_seek == vec->iov_len) {
1673                         vec++;
1674                         vec_seek = 0;
1675                 }
1676         } while (len);
1677         *pvec = vec;
1678         *pvec_seek = vec_seek;
1679
1680         /* GO GO GO */
1681         map_write(map, CMD(0xd0), cmd_adr);
1682         chip->state = FL_WRITING;
1683
1684         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1685                                    initial_adr, initial_len,
1686                                    chip->buffer_write_time);
1687         if (ret) {
1688                 map_write(map, CMD(0x70), cmd_adr);
1689                 chip->state = FL_STATUS;
1690                 xip_enable(map, chip, cmd_adr);
1691                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1692                 goto out;
1693         }
1694
1695         /* check for errors */
1696         status = map_read(map, cmd_adr);
1697         if (map_word_bitsset(map, status, CMD(0x1a))) {
1698                 unsigned long chipstatus = MERGESTATUS(status);
1699
1700                 /* reset status */
1701                 map_write(map, CMD(0x50), cmd_adr);
1702                 map_write(map, CMD(0x70), cmd_adr);
1703                 xip_enable(map, chip, cmd_adr);
1704
1705                 if (chipstatus & 0x02) {
1706                         ret = -EROFS;
1707                 } else if (chipstatus & 0x08) {
1708                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1709                         ret = -EIO;
1710                 } else {
1711                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1712                         ret = -EINVAL;
1713                 }
1714
1715                 goto out;
1716         }
1717
1718         xip_enable(map, chip, cmd_adr);
1719  out:   put_chip(map, chip, cmd_adr);
1720         spin_unlock(chip->mutex);
1721         return ret;
1722 }
1723
1724 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1725                                 unsigned long count, loff_t to, size_t *retlen)
1726 {
1727         struct map_info *map = mtd->priv;
1728         struct cfi_private *cfi = map->fldrv_priv;
1729         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1730         int ret = 0;
1731         int chipnum;
1732         unsigned long ofs, vec_seek, i;
1733         size_t len = 0;
1734
1735         for (i = 0; i < count; i++)
1736                 len += vecs[i].iov_len;
1737
1738         *retlen = 0;
1739         if (!len)
1740                 return 0;
1741
1742         chipnum = to >> cfi->chipshift;
1743         ofs = to - (chipnum << cfi->chipshift);
1744         vec_seek = 0;
1745
1746         do {
1747                 /* We must not cross write block boundaries */
1748                 int size = wbufsize - (ofs & (wbufsize-1));
1749
1750                 if (size > len)
1751                         size = len;
1752                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1753                                       ofs, &vecs, &vec_seek, size);
1754                 if (ret)
1755                         return ret;
1756
1757                 ofs += size;
1758                 (*retlen) += size;
1759                 len -= size;
1760
1761                 if (ofs >> cfi->chipshift) {
1762                         chipnum ++;
1763                         ofs = 0;
1764                         if (chipnum == cfi->numchips)
1765                                 return 0;
1766                 }
1767
1768                 /* Be nice and reschedule with the chip in a usable state for other
1769                    processes. */
1770                 cond_resched();
1771
1772         } while (len);
1773
1774         return 0;
1775 }
1776
1777 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1778                                        size_t len, size_t *retlen, const u_char *buf)
1779 {
1780         struct kvec vec;
1781
1782         vec.iov_base = (void *) buf;
1783         vec.iov_len = len;
1784
1785         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1786 }
1787
1788 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1789                                       unsigned long adr, int len, void *thunk)
1790 {
1791         struct cfi_private *cfi = map->fldrv_priv;
1792         map_word status;
1793         int retries = 3;
1794         int ret;
1795
1796         adr += chip->start;
1797
1798  retry:
1799         spin_lock(chip->mutex);
1800         ret = get_chip(map, chip, adr, FL_ERASING);
1801         if (ret) {
1802                 spin_unlock(chip->mutex);
1803                 return ret;
1804         }
1805
1806         XIP_INVAL_CACHED_RANGE(map, adr, len);
1807         ENABLE_VPP(map);
1808         xip_disable(map, chip, adr);
1809
1810         /* Clear the status register first */
1811         map_write(map, CMD(0x50), adr);
1812
1813         /* Now erase */
1814         map_write(map, CMD(0x20), adr);
1815         map_write(map, CMD(0xD0), adr);
1816         chip->state = FL_ERASING;
1817         chip->erase_suspended = 0;
1818
1819         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1820                                    adr, len,
1821                                    chip->erase_time);
1822         if (ret) {
1823                 map_write(map, CMD(0x70), adr);
1824                 chip->state = FL_STATUS;
1825                 xip_enable(map, chip, adr);
1826                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1827                 goto out;
1828         }
1829
1830         /* We've broken this before. It doesn't hurt to be safe */
1831         map_write(map, CMD(0x70), adr);
1832         chip->state = FL_STATUS;
1833         status = map_read(map, adr);
1834
1835         /* check for errors */
1836         if (map_word_bitsset(map, status, CMD(0x3a))) {
1837                 unsigned long chipstatus = MERGESTATUS(status);
1838
1839                 /* Reset the error bits */
1840                 map_write(map, CMD(0x50), adr);
1841                 map_write(map, CMD(0x70), adr);
1842                 xip_enable(map, chip, adr);
1843
1844                 if ((chipstatus & 0x30) == 0x30) {
1845                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1846                         ret = -EINVAL;
1847                 } else if (chipstatus & 0x02) {
1848                         /* Protection bit set */
1849                         ret = -EROFS;
1850                 } else if (chipstatus & 0x8) {
1851                         /* Voltage */
1852                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1853                         ret = -EIO;
1854                 } else if (chipstatus & 0x20 && retries--) {
1855                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1856                         put_chip(map, chip, adr);
1857                         spin_unlock(chip->mutex);
1858                         goto retry;
1859                 } else {
1860                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1861                         ret = -EIO;
1862                 }
1863
1864                 goto out;
1865         }
1866
1867         xip_enable(map, chip, adr);
1868  out:   put_chip(map, chip, adr);
1869         spin_unlock(chip->mutex);
1870         return ret;
1871 }
1872
1873 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1874 {
1875         unsigned long ofs, len;
1876         int ret;
1877
1878         ofs = instr->addr;
1879         len = instr->len;
1880
1881         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1882         if (ret)
1883                 return ret;
1884
1885         instr->state = MTD_ERASE_DONE;
1886         mtd_erase_callback(instr);
1887
1888         return 0;
1889 }
1890
1891 static void cfi_intelext_sync (struct mtd_info *mtd)
1892 {
1893         struct map_info *map = mtd->priv;
1894         struct cfi_private *cfi = map->fldrv_priv;
1895         int i;
1896         struct flchip *chip;
1897         int ret = 0;
1898
1899         for (i=0; !ret && i<cfi->numchips; i++) {
1900                 chip = &cfi->chips[i];
1901
1902                 spin_lock(chip->mutex);
1903                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1904
1905                 if (!ret) {
1906                         chip->oldstate = chip->state;
1907                         chip->state = FL_SYNCING;
1908                         /* No need to wake_up() on this state change -
1909                          * as the whole point is that nobody can do anything
1910                          * with the chip now anyway.
1911                          */
1912                 }
1913                 spin_unlock(chip->mutex);
1914         }
1915
1916         /* Unlock the chips again */
1917
1918         for (i--; i >=0; i--) {
1919                 chip = &cfi->chips[i];
1920
1921                 spin_lock(chip->mutex);
1922
1923                 if (chip->state == FL_SYNCING) {
1924                         chip->state = chip->oldstate;
1925                         chip->oldstate = FL_READY;
1926                         wake_up(&chip->wq);
1927                 }
1928                 spin_unlock(chip->mutex);
1929         }
1930 }
1931
1932 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1933                                                 struct flchip *chip,
1934                                                 unsigned long adr,
1935                                                 int len, void *thunk)
1936 {
1937         struct cfi_private *cfi = map->fldrv_priv;
1938         int status, ofs_factor = cfi->interleave * cfi->device_type;
1939
1940         adr += chip->start;
1941         xip_disable(map, chip, adr+(2*ofs_factor));
1942         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1943         chip->state = FL_JEDEC_QUERY;
1944         status = cfi_read_query(map, adr+(2*ofs_factor));
1945         xip_enable(map, chip, 0);
1946         return status;
1947 }
1948
1949 #ifdef DEBUG_LOCK_BITS
1950 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1951                                                 struct flchip *chip,
1952                                                 unsigned long adr,
1953                                                 int len, void *thunk)
1954 {
1955         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1956                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1957         return 0;
1958 }
1959 #endif
1960
1961 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1962 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1963
1964 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1965                                        unsigned long adr, int len, void *thunk)
1966 {
1967         struct cfi_private *cfi = map->fldrv_priv;
1968         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1969         int udelay;
1970         int ret;
1971
1972         adr += chip->start;
1973
1974         spin_lock(chip->mutex);
1975         ret = get_chip(map, chip, adr, FL_LOCKING);
1976         if (ret) {
1977                 spin_unlock(chip->mutex);
1978                 return ret;
1979         }
1980
1981         ENABLE_VPP(map);
1982         xip_disable(map, chip, adr);
1983
1984         map_write(map, CMD(0x60), adr);
1985         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1986                 map_write(map, CMD(0x01), adr);
1987                 chip->state = FL_LOCKING;
1988         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1989                 map_write(map, CMD(0xD0), adr);
1990                 chip->state = FL_UNLOCKING;
1991         } else
1992                 BUG();
1993
1994         /*
1995          * If Instant Individual Block Locking supported then no need
1996          * to delay.
1997          */
1998         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1999
2000         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
2001         if (ret) {
2002                 map_write(map, CMD(0x70), adr);
2003                 chip->state = FL_STATUS;
2004                 xip_enable(map, chip, adr);
2005                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2006                 goto out;
2007         }
2008
2009         xip_enable(map, chip, adr);
2010 out:    put_chip(map, chip, adr);
2011         spin_unlock(chip->mutex);
2012         return ret;
2013 }
2014
2015 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2016 {
2017         int ret;
2018
2019 #ifdef DEBUG_LOCK_BITS
2020         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2021                __func__, ofs, len);
2022         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2023                 ofs, len, NULL);
2024 #endif
2025
2026         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2027                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2028
2029 #ifdef DEBUG_LOCK_BITS
2030         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2031                __func__, ret);
2032         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2033                 ofs, len, NULL);
2034 #endif
2035
2036         return ret;
2037 }
2038
2039 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2040 {
2041         int ret;
2042
2043 #ifdef DEBUG_LOCK_BITS
2044         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2045                __func__, ofs, len);
2046         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2047                 ofs, len, NULL);
2048 #endif
2049
2050         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2051                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2052
2053 #ifdef DEBUG_LOCK_BITS
2054         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2055                __func__, ret);
2056         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2057                 ofs, len, NULL);
2058 #endif
2059
2060         return ret;
2061 }
2062
2063 #ifdef CONFIG_MTD_OTP
2064
2065 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2066                         u_long data_offset, u_char *buf, u_int size,
2067                         u_long prot_offset, u_int groupno, u_int groupsize);
2068
2069 static int __xipram
2070 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2071             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2072 {
2073         struct cfi_private *cfi = map->fldrv_priv;
2074         int ret;
2075
2076         spin_lock(chip->mutex);
2077         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2078         if (ret) {
2079                 spin_unlock(chip->mutex);
2080                 return ret;
2081         }
2082
2083         /* let's ensure we're not reading back cached data from array mode */
2084         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2085
2086         xip_disable(map, chip, chip->start);
2087         if (chip->state != FL_JEDEC_QUERY) {
2088                 map_write(map, CMD(0x90), chip->start);
2089                 chip->state = FL_JEDEC_QUERY;
2090         }
2091         map_copy_from(map, buf, chip->start + offset, size);
2092         xip_enable(map, chip, chip->start);
2093
2094         /* then ensure we don't keep OTP data in the cache */
2095         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2096
2097         put_chip(map, chip, chip->start);
2098         spin_unlock(chip->mutex);
2099         return 0;
2100 }
2101
2102 static int
2103 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2104              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2105 {
2106         int ret;
2107
2108         while (size) {
2109                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2110                 int gap = offset - bus_ofs;
2111                 int n = min_t(int, size, map_bankwidth(map)-gap);
2112                 map_word datum = map_word_ff(map);
2113
2114                 datum = map_word_load_partial(map, datum, buf, gap, n);
2115                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2116                 if (ret)
2117                         return ret;
2118
2119                 offset += n;
2120                 buf += n;
2121                 size -= n;
2122         }
2123
2124         return 0;
2125 }
2126
2127 static int
2128 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2129             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2130 {
2131         struct cfi_private *cfi = map->fldrv_priv;
2132         map_word datum;
2133
2134         /* make sure area matches group boundaries */
2135         if (size != grpsz)
2136                 return -EXDEV;
2137
2138         datum = map_word_ff(map);
2139         datum = map_word_clr(map, datum, CMD(1 << grpno));
2140         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2141 }
2142
2143 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2144                                  size_t *retlen, u_char *buf,
2145                                  otp_op_t action, int user_regs)
2146 {
2147         struct map_info *map = mtd->priv;
2148         struct cfi_private *cfi = map->fldrv_priv;
2149         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2150         struct flchip *chip;
2151         struct cfi_intelext_otpinfo *otp;
2152         u_long devsize, reg_prot_offset, data_offset;
2153         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2154         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2155         int ret;
2156
2157         *retlen = 0;
2158
2159         /* Check that we actually have some OTP registers */
2160         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2161                 return -ENODATA;
2162
2163         /* we need real chips here not virtual ones */
2164         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2165         chip_step = devsize >> cfi->chipshift;
2166         chip_num = 0;
2167
2168         /* Some chips have OTP located in the _top_ partition only.
2169            For example: Intel 28F256L18T (T means top-parameter device) */
2170         if (cfi->mfr == MANUFACTURER_INTEL) {
2171                 switch (cfi->id) {
2172                 case 0x880b:
2173                 case 0x880c:
2174                 case 0x880d:
2175                         chip_num = chip_step - 1;
2176                 }
2177         }
2178
2179         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2180                 chip = &cfi->chips[chip_num];
2181                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2182
2183                 /* first OTP region */
2184                 field = 0;
2185                 reg_prot_offset = extp->ProtRegAddr;
2186                 reg_fact_groups = 1;
2187                 reg_fact_size = 1 << extp->FactProtRegSize;
2188                 reg_user_groups = 1;
2189                 reg_user_size = 1 << extp->UserProtRegSize;
2190
2191                 while (len > 0) {
2192                         /* flash geometry fixup */
2193                         data_offset = reg_prot_offset + 1;
2194                         data_offset *= cfi->interleave * cfi->device_type;
2195                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2196                         reg_fact_size *= cfi->interleave;
2197                         reg_user_size *= cfi->interleave;
2198
2199                         if (user_regs) {
2200                                 groups = reg_user_groups;
2201                                 groupsize = reg_user_size;
2202                                 /* skip over factory reg area */
2203                                 groupno = reg_fact_groups;
2204                                 data_offset += reg_fact_groups * reg_fact_size;
2205                         } else {
2206                                 groups = reg_fact_groups;
2207                                 groupsize = reg_fact_size;
2208                                 groupno = 0;
2209                         }
2210
2211                         while (len > 0 && groups > 0) {
2212                                 if (!action) {
2213                                         /*
2214                                          * Special case: if action is NULL
2215                                          * we fill buf with otp_info records.
2216                                          */
2217                                         struct otp_info *otpinfo;
2218                                         map_word lockword;
2219                                         len -= sizeof(struct otp_info);
2220                                         if (len <= 0)
2221                                                 return -ENOSPC;
2222                                         ret = do_otp_read(map, chip,
2223                                                           reg_prot_offset,
2224                                                           (u_char *)&lockword,
2225                                                           map_bankwidth(map),
2226                                                           0, 0,  0);
2227                                         if (ret)
2228                                                 return ret;
2229                                         otpinfo = (struct otp_info *)buf;
2230                                         otpinfo->start = from;
2231                                         otpinfo->length = groupsize;
2232                                         otpinfo->locked =
2233                                            !map_word_bitsset(map, lockword,
2234                                                              CMD(1 << groupno));
2235                                         from += groupsize;
2236                                         buf += sizeof(*otpinfo);
2237                                         *retlen += sizeof(*otpinfo);
2238                                 } else if (from >= groupsize) {
2239                                         from -= groupsize;
2240                                         data_offset += groupsize;
2241                                 } else {
2242                                         int size = groupsize;
2243                                         data_offset += from;
2244                                         size -= from;
2245                                         from = 0;
2246                                         if (size > len)
2247                                                 size = len;
2248                                         ret = action(map, chip, data_offset,
2249                                                      buf, size, reg_prot_offset,
2250                                                      groupno, groupsize);
2251                                         if (ret < 0)
2252                                                 return ret;
2253                                         buf += size;
2254                                         len -= size;
2255                                         *retlen += size;
2256                                         data_offset += size;
2257                                 }
2258                                 groupno++;
2259                                 groups--;
2260                         }
2261
2262                         /* next OTP region */
2263                         if (++field == extp->NumProtectionFields)
2264                                 break;
2265                         reg_prot_offset = otp->ProtRegAddr;
2266                         reg_fact_groups = otp->FactGroups;
2267                         reg_fact_size = 1 << otp->FactProtRegSize;
2268                         reg_user_groups = otp->UserGroups;
2269                         reg_user_size = 1 << otp->UserProtRegSize;
2270                         otp++;
2271                 }
2272         }
2273
2274         return 0;
2275 }
2276
2277 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2278                                            size_t len, size_t *retlen,
2279                                             u_char *buf)
2280 {
2281         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2282                                      buf, do_otp_read, 0);
2283 }
2284
2285 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2286                                            size_t len, size_t *retlen,
2287                                             u_char *buf)
2288 {
2289         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2290                                      buf, do_otp_read, 1);
2291 }
2292
2293 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2294                                             size_t len, size_t *retlen,
2295                                              u_char *buf)
2296 {
2297         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2298                                      buf, do_otp_write, 1);
2299 }
2300
2301 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2302                                            loff_t from, size_t len)
2303 {
2304         size_t retlen;
2305         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2306                                      NULL, do_otp_lock, 1);
2307 }
2308
2309 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2310                                            struct otp_info *buf, size_t len)
2311 {
2312         size_t retlen;
2313         int ret;
2314
2315         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2316         return ret ? : retlen;
2317 }
2318
2319 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2320                                            struct otp_info *buf, size_t len)
2321 {
2322         size_t retlen;
2323         int ret;
2324
2325         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2326         return ret ? : retlen;
2327 }
2328
2329 #endif
2330
2331 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2332 {
2333         struct mtd_erase_region_info *region;
2334         int block, status, i;
2335         unsigned long adr;
2336         size_t len;
2337
2338         for (i = 0; i < mtd->numeraseregions; i++) {
2339                 region = &mtd->eraseregions[i];
2340                 if (!region->lockmap)
2341                         continue;
2342
2343                 for (block = 0; block < region->numblocks; block++){
2344                         len = region->erasesize;
2345                         adr = region->offset + block * len;
2346
2347                         status = cfi_varsize_frob(mtd,
2348                                         do_getlockstatus_oneblock, adr, len, NULL);
2349                         if (status)
2350                                 set_bit(block, region->lockmap);
2351                         else
2352                                 clear_bit(block, region->lockmap);
2353                 }
2354         }
2355 }
2356
2357 static int cfi_intelext_suspend(struct mtd_info *mtd)
2358 {
2359         struct map_info *map = mtd->priv;
2360         struct cfi_private *cfi = map->fldrv_priv;
2361         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2362         int i;
2363         struct flchip *chip;
2364         int ret = 0;
2365
2366         if ((mtd->flags & MTD_POWERUP_LOCK)
2367             && extp && (extp->FeatureSupport & (1 << 5)))
2368                 cfi_intelext_save_locks(mtd);
2369
2370         for (i=0; !ret && i<cfi->numchips; i++) {
2371                 chip = &cfi->chips[i];
2372
2373                 spin_lock(chip->mutex);
2374
2375                 switch (chip->state) {
2376                 case FL_READY:
2377                 case FL_STATUS:
2378                 case FL_CFI_QUERY:
2379                 case FL_JEDEC_QUERY:
2380                         if (chip->oldstate == FL_READY) {
2381                                 /* place the chip in a known state before suspend */
2382                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2383                                 chip->oldstate = chip->state;
2384                                 chip->state = FL_PM_SUSPENDED;
2385                                 /* No need to wake_up() on this state change -
2386                                  * as the whole point is that nobody can do anything
2387                                  * with the chip now anyway.
2388                                  */
2389                         } else {
2390                                 /* There seems to be an operation pending. We must wait for it. */
2391                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2392                                 ret = -EAGAIN;
2393                         }
2394                         break;
2395                 default:
2396                         /* Should we actually wait? Once upon a time these routines weren't
2397                            allowed to. Or should we return -EAGAIN, because the upper layers
2398                            ought to have already shut down anything which was using the device
2399                            anyway? The latter for now. */
2400                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2401                         ret = -EAGAIN;
2402                 case FL_PM_SUSPENDED:
2403                         break;
2404                 }
2405                 spin_unlock(chip->mutex);
2406         }
2407
2408         /* Unlock the chips again */
2409
2410         if (ret) {
2411                 for (i--; i >=0; i--) {
2412                         chip = &cfi->chips[i];
2413
2414                         spin_lock(chip->mutex);
2415
2416                         if (chip->state == FL_PM_SUSPENDED) {
2417                                 /* No need to force it into a known state here,
2418                                    because we're returning failure, and it didn't
2419                                    get power cycled */
2420                                 chip->state = chip->oldstate;
2421                                 chip->oldstate = FL_READY;
2422                                 wake_up(&chip->wq);
2423                         }
2424                         spin_unlock(chip->mutex);
2425                 }
2426         }
2427
2428         return ret;
2429 }
2430
2431 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2432 {
2433         struct mtd_erase_region_info *region;
2434         int block, i;
2435         unsigned long adr;
2436         size_t len;
2437
2438         for (i = 0; i < mtd->numeraseregions; i++) {
2439                 region = &mtd->eraseregions[i];
2440                 if (!region->lockmap)
2441                         continue;
2442
2443                 for (block = 0; block < region->numblocks; block++) {
2444                         len = region->erasesize;
2445                         adr = region->offset + block * len;
2446
2447                         if (!test_bit(block, region->lockmap))
2448                                 cfi_intelext_unlock(mtd, adr, len);
2449                 }
2450         }
2451 }
2452
2453 static void cfi_intelext_resume(struct mtd_info *mtd)
2454 {
2455         struct map_info *map = mtd->priv;
2456         struct cfi_private *cfi = map->fldrv_priv;
2457         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2458         int i;
2459         struct flchip *chip;
2460
2461         for (i=0; i<cfi->numchips; i++) {
2462
2463                 chip = &cfi->chips[i];
2464
2465                 spin_lock(chip->mutex);
2466
2467                 /* Go to known state. Chip may have been power cycled */
2468                 if (chip->state == FL_PM_SUSPENDED) {
2469                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2470                         chip->oldstate = chip->state = FL_READY;
2471                         wake_up(&chip->wq);
2472                 }
2473
2474                 spin_unlock(chip->mutex);
2475         }
2476
2477         if ((mtd->flags & MTD_POWERUP_LOCK)
2478             && extp && (extp->FeatureSupport & (1 << 5)))
2479                 cfi_intelext_restore_locks(mtd);
2480 }
2481
2482 static int cfi_intelext_reset(struct mtd_info *mtd)
2483 {
2484         struct map_info *map = mtd->priv;
2485         struct cfi_private *cfi = map->fldrv_priv;
2486         int i, ret;
2487
2488         for (i=0; i < cfi->numchips; i++) {
2489                 struct flchip *chip = &cfi->chips[i];
2490
2491                 /* force the completion of any ongoing operation
2492                    and switch to array mode so any bootloader in
2493                    flash is accessible for soft reboot. */
2494                 spin_lock(chip->mutex);
2495                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2496                 if (!ret) {
2497                         map_write(map, CMD(0xff), chip->start);
2498                         chip->state = FL_SHUTDOWN;
2499                 }
2500                 spin_unlock(chip->mutex);
2501         }
2502
2503         return 0;
2504 }
2505
2506 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2507                                void *v)
2508 {
2509         struct mtd_info *mtd;
2510
2511         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2512         cfi_intelext_reset(mtd);
2513         return NOTIFY_DONE;
2514 }
2515
2516 static void cfi_intelext_destroy(struct mtd_info *mtd)
2517 {
2518         struct map_info *map = mtd->priv;
2519         struct cfi_private *cfi = map->fldrv_priv;
2520         struct mtd_erase_region_info *region;
2521         int i;
2522         cfi_intelext_reset(mtd);
2523         unregister_reboot_notifier(&mtd->reboot_notifier);
2524         kfree(cfi->cmdset_priv);
2525         kfree(cfi->cfiq);
2526         kfree(cfi->chips[0].priv);
2527         kfree(cfi);
2528         for (i = 0; i < mtd->numeraseregions; i++) {
2529                 region = &mtd->eraseregions[i];
2530                 if (region->lockmap)
2531                         kfree(region->lockmap);
2532         }
2533         kfree(mtd->eraseregions);
2534 }
2535
2536 MODULE_LICENSE("GPL");
2537 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2538 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2539 MODULE_ALIAS("cfi_cmdset_0003");
2540 MODULE_ALIAS("cfi_cmdset_0200");