0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #include <linux/module.h>
0021 #include <linux/types.h>
0022 #include <linux/kernel.h>
0023 #include <linux/sched.h>
0024 #include <asm/io.h>
0025 #include <asm/byteorder.h>
0026
0027 #include <linux/errno.h>
0028 #include <linux/slab.h>
0029 #include <linux/delay.h>
0030 #include <linux/interrupt.h>
0031 #include <linux/reboot.h>
0032 #include <linux/bitmap.h>
0033 #include <linux/mtd/xip.h>
0034 #include <linux/mtd/map.h>
0035 #include <linux/mtd/mtd.h>
0036 #include <linux/mtd/cfi.h>
0037
0038
0039
0040
0041
0042 #define FORCE_WORD_WRITE 0
0043
0044
0045 #define I82802AB 0x00ad
0046 #define I82802AC 0x00ac
0047 #define PF38F4476 0x881c
0048 #define M28F00AP30 0x8963
0049
0050 #define M50LPW080 0x002F
0051 #define M50FLW080A 0x0080
0052 #define M50FLW080B 0x0081
0053
0054 #define AT49BV640D 0x02de
0055 #define AT49BV640DT 0x02db
0056
0057 #define LH28F640BFHE_PTTL90 0x00b0
0058 #define LH28F640BFHE_PBTL90 0x00b1
0059 #define LH28F640BFHE_PTTL70A 0x00b2
0060 #define LH28F640BFHE_PBTL70A 0x00b3
0061
0062 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
0063 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
0064 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
0065 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
0066 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
0067 static void cfi_intelext_sync (struct mtd_info *);
0068 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
0069 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
0070 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
0071 uint64_t len);
0072 #ifdef CONFIG_MTD_OTP
0073 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
0074 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
0075 static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
0076 size_t *, const u_char *);
0077 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
0078 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
0079 size_t *, struct otp_info *);
0080 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
0081 size_t *, struct otp_info *);
0082 #endif
0083 static int cfi_intelext_suspend (struct mtd_info *);
0084 static void cfi_intelext_resume (struct mtd_info *);
0085 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
0086
0087 static void cfi_intelext_destroy(struct mtd_info *);
0088
0089 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
0090
0091 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
0092 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
0093
0094 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
0095 size_t *retlen, void **virt, resource_size_t *phys);
0096 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
0097
0098 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
0099 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
0100 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
0101 #include "fwh_lock.h"
0102
0103
0104
0105
0106
0107
0108
0109 static struct mtd_chip_driver cfi_intelext_chipdrv = {
0110 .probe = NULL,
0111 .destroy = cfi_intelext_destroy,
0112 .name = "cfi_cmdset_0001",
0113 .module = THIS_MODULE
0114 };
0115
0116
0117
0118
0119 #ifdef DEBUG_CFI_FEATURES
0120 static void cfi_tell_features(struct cfi_pri_intelext *extp)
0121 {
0122 int i;
0123 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
0124 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
0125 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
0126 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
0127 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
0128 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
0129 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
0130 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
0131 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
0132 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
0133 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
0134 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
0135 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
0136 for (i=11; i<32; i++) {
0137 if (extp->FeatureSupport & (1<<i))
0138 printk(" - Unknown Bit %X: supported\n", i);
0139 }
0140
0141 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
0142 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
0143 for (i=1; i<8; i++) {
0144 if (extp->SuspendCmdSupport & (1<<i))
0145 printk(" - Unknown Bit %X: supported\n", i);
0146 }
0147
0148 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
0149 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
0150 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
0151 for (i=2; i<3; i++) {
0152 if (extp->BlkStatusRegMask & (1<<i))
0153 printk(" - Unknown Bit %X Active: yes\n",i);
0154 }
0155 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
0156 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
0157 for (i=6; i<16; i++) {
0158 if (extp->BlkStatusRegMask & (1<<i))
0159 printk(" - Unknown Bit %X Active: yes\n",i);
0160 }
0161
0162 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
0163 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
0164 if (extp->VppOptimal)
0165 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
0166 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
0167 }
0168 #endif
0169
0170
0171 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
0172 {
0173 struct map_info *map = mtd->priv;
0174 struct cfi_private *cfi = map->fldrv_priv;
0175 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
0176 struct cfi_pri_atmel atmel_pri;
0177 uint32_t features = 0;
0178
0179
0180 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
0181 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
0182 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
0183
0184 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
0185 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
0186
0187 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
0188
0189 if (atmel_pri.Features & 0x01)
0190 features |= (1<<0);
0191 if (atmel_pri.Features & 0x02)
0192 features |= (1<<1);
0193 if (atmel_pri.Features & 0x04)
0194 features |= (1<<2);
0195 if (atmel_pri.Features & 0x08)
0196 features |= (1<<9);
0197 if (atmel_pri.Features & 0x20)
0198 features |= (1<<7);
0199 if (atmel_pri.Features & 0x40)
0200 features |= (1<<4);
0201 if (atmel_pri.Features & 0x80)
0202 features |= (1<<6);
0203
0204 extp->FeatureSupport = features;
0205
0206
0207 cfi->cfiq->BufWriteTimeoutTyp = 0;
0208 cfi->cfiq->BufWriteTimeoutMax = 0;
0209 }
0210
0211 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
0212 {
0213 struct map_info *map = mtd->priv;
0214 struct cfi_private *cfi = map->fldrv_priv;
0215 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
0216
0217 cfip->FeatureSupport |= (1 << 5);
0218 mtd->flags |= MTD_POWERUP_LOCK;
0219 }
0220
0221 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
0222
0223 static void fixup_intel_strataflash(struct mtd_info *mtd)
0224 {
0225 struct map_info *map = mtd->priv;
0226 struct cfi_private *cfi = map->fldrv_priv;
0227 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
0228
0229 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
0230 "erase on write disabled.\n");
0231 extp->SuspendCmdSupport &= ~1;
0232 }
0233 #endif
0234
0235 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
0236 static void fixup_no_write_suspend(struct mtd_info *mtd)
0237 {
0238 struct map_info *map = mtd->priv;
0239 struct cfi_private *cfi = map->fldrv_priv;
0240 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
0241
0242 if (cfip && (cfip->FeatureSupport&4)) {
0243 cfip->FeatureSupport &= ~4;
0244 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
0245 }
0246 }
0247 #endif
0248
0249 static void fixup_st_m28w320ct(struct mtd_info *mtd)
0250 {
0251 struct map_info *map = mtd->priv;
0252 struct cfi_private *cfi = map->fldrv_priv;
0253
0254 cfi->cfiq->BufWriteTimeoutTyp = 0;
0255 cfi->cfiq->BufWriteTimeoutMax = 0;
0256 }
0257
0258 static void fixup_st_m28w320cb(struct mtd_info *mtd)
0259 {
0260 struct map_info *map = mtd->priv;
0261 struct cfi_private *cfi = map->fldrv_priv;
0262
0263
0264 cfi->cfiq->EraseRegionInfo[1] =
0265 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
0266 };
0267
0268 static int is_LH28F640BF(struct cfi_private *cfi)
0269 {
0270
0271 if (cfi->mfr == CFI_MFR_SHARP && (
0272 cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
0273 cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
0274 return 1;
0275 return 0;
0276 }
0277
0278 static void fixup_LH28F640BF(struct mtd_info *mtd)
0279 {
0280 struct map_info *map = mtd->priv;
0281 struct cfi_private *cfi = map->fldrv_priv;
0282 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
0283
0284
0285
0286 if (is_LH28F640BF(cfi)) {
0287 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
0288 map_write(map, CMD(0x60), 0);
0289 map_write(map, CMD(0x04), 0);
0290
0291
0292
0293 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
0294 extp->FeatureSupport &= ~512;
0295 }
0296 }
0297
0298 static void fixup_use_point(struct mtd_info *mtd)
0299 {
0300 struct map_info *map = mtd->priv;
0301 if (!mtd->_point && map_is_linear(map)) {
0302 mtd->_point = cfi_intelext_point;
0303 mtd->_unpoint = cfi_intelext_unpoint;
0304 }
0305 }
0306
0307 static void fixup_use_write_buffers(struct mtd_info *mtd)
0308 {
0309 struct map_info *map = mtd->priv;
0310 struct cfi_private *cfi = map->fldrv_priv;
0311 if (cfi->cfiq->BufWriteTimeoutTyp) {
0312 printk(KERN_INFO "Using buffer write method\n" );
0313 mtd->_write = cfi_intelext_write_buffers;
0314 mtd->_writev = cfi_intelext_writev;
0315 }
0316 }
0317
0318
0319
0320
0321 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
0322 {
0323 struct map_info *map = mtd->priv;
0324 struct cfi_private *cfi = map->fldrv_priv;
0325 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
0326
0327 if (cfip->FeatureSupport&32) {
0328 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
0329 mtd->flags |= MTD_POWERUP_LOCK;
0330 }
0331 }
0332
0333 static struct cfi_fixup cfi_fixup_table[] = {
0334 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
0335 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
0336 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
0337 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
0338 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
0339 #endif
0340 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
0341 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
0342 #endif
0343 #if !FORCE_WORD_WRITE
0344 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
0345 #endif
0346 { CFI_MFR_ST, 0x00ba, fixup_st_m28w320ct },
0347 { CFI_MFR_ST, 0x00bb, fixup_st_m28w320cb },
0348 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
0349 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
0350 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
0351 { 0, 0, NULL }
0352 };
0353
0354 static struct cfi_fixup jedec_fixup_table[] = {
0355 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
0356 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
0357 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
0358 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
0359 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
0360 { 0, 0, NULL }
0361 };
0362 static struct cfi_fixup fixup_table[] = {
0363
0364
0365
0366
0367
0368 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
0369 { 0, 0, NULL }
0370 };
0371
0372 static void cfi_fixup_major_minor(struct cfi_private *cfi,
0373 struct cfi_pri_intelext *extp)
0374 {
0375 if (cfi->mfr == CFI_MFR_INTEL &&
0376 cfi->id == PF38F4476 && extp->MinorVersion == '3')
0377 extp->MinorVersion = '1';
0378 }
0379
0380 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
0381 {
0382
0383
0384
0385
0386 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
0387 return 1;
0388 return 0;
0389 }
0390
0391 static inline struct cfi_pri_intelext *
0392 read_pri_intelext(struct map_info *map, __u16 adr)
0393 {
0394 struct cfi_private *cfi = map->fldrv_priv;
0395 struct cfi_pri_intelext *extp;
0396 unsigned int extra_size = 0;
0397 unsigned int extp_size = sizeof(*extp);
0398
0399 again:
0400 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
0401 if (!extp)
0402 return NULL;
0403
0404 cfi_fixup_major_minor(cfi, extp);
0405
0406 if (extp->MajorVersion != '1' ||
0407 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
0408 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
0409 "version %c.%c.\n", extp->MajorVersion,
0410 extp->MinorVersion);
0411 kfree(extp);
0412 return NULL;
0413 }
0414
0415
0416 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
0417 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
0418 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
0419
0420 if (extp->MinorVersion >= '0') {
0421 extra_size = 0;
0422
0423
0424 if (extp->NumProtectionFields)
0425 extra_size += (extp->NumProtectionFields - 1) *
0426 sizeof(struct cfi_intelext_otpinfo);
0427 }
0428
0429 if (extp->MinorVersion >= '1') {
0430
0431 extra_size += 2;
0432 if (extp_size < sizeof(*extp) + extra_size)
0433 goto need_more;
0434 extra_size += extp->extra[extra_size - 1];
0435 }
0436
0437 if (extp->MinorVersion >= '3') {
0438 int nb_parts, i;
0439
0440
0441 extra_size += 1;
0442 if (extp_size < sizeof(*extp) + extra_size)
0443 goto need_more;
0444 nb_parts = extp->extra[extra_size - 1];
0445
0446
0447 if (extp->MinorVersion >= '4')
0448 extra_size += 2;
0449
0450 for (i = 0; i < nb_parts; i++) {
0451 struct cfi_intelext_regioninfo *rinfo;
0452 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
0453 extra_size += sizeof(*rinfo);
0454 if (extp_size < sizeof(*extp) + extra_size)
0455 goto need_more;
0456 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
0457 extra_size += (rinfo->NumBlockTypes - 1)
0458 * sizeof(struct cfi_intelext_blockinfo);
0459 }
0460
0461 if (extp->MinorVersion >= '4')
0462 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
0463
0464 if (extp_size < sizeof(*extp) + extra_size) {
0465 need_more:
0466 extp_size = sizeof(*extp) + extra_size;
0467 kfree(extp);
0468 if (extp_size > 4096) {
0469 printk(KERN_ERR
0470 "%s: cfi_pri_intelext is too fat\n",
0471 __func__);
0472 return NULL;
0473 }
0474 goto again;
0475 }
0476 }
0477
0478 return extp;
0479 }
0480
0481 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
0482 {
0483 struct cfi_private *cfi = map->fldrv_priv;
0484 struct mtd_info *mtd;
0485 int i;
0486
0487 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
0488 if (!mtd)
0489 return NULL;
0490 mtd->priv = map;
0491 mtd->type = MTD_NORFLASH;
0492
0493
0494 mtd->_erase = cfi_intelext_erase_varsize;
0495 mtd->_read = cfi_intelext_read;
0496 mtd->_write = cfi_intelext_write_words;
0497 mtd->_sync = cfi_intelext_sync;
0498 mtd->_lock = cfi_intelext_lock;
0499 mtd->_unlock = cfi_intelext_unlock;
0500 mtd->_is_locked = cfi_intelext_is_locked;
0501 mtd->_suspend = cfi_intelext_suspend;
0502 mtd->_resume = cfi_intelext_resume;
0503 mtd->flags = MTD_CAP_NORFLASH;
0504 mtd->name = map->name;
0505 mtd->writesize = 1;
0506 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
0507
0508 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
0509
0510 if (cfi->cfi_mode == CFI_MODE_CFI) {
0511
0512
0513
0514
0515
0516 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
0517 struct cfi_pri_intelext *extp;
0518
0519 extp = read_pri_intelext(map, adr);
0520 if (!extp) {
0521 kfree(mtd);
0522 return NULL;
0523 }
0524
0525
0526 cfi->cmdset_priv = extp;
0527
0528 cfi_fixup(mtd, cfi_fixup_table);
0529
0530 #ifdef DEBUG_CFI_FEATURES
0531
0532 cfi_tell_features(extp);
0533 #endif
0534
0535 if(extp->SuspendCmdSupport & 1) {
0536 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
0537 }
0538 }
0539 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
0540
0541 cfi_fixup(mtd, jedec_fixup_table);
0542 }
0543
0544 cfi_fixup(mtd, fixup_table);
0545
0546 for (i=0; i< cfi->numchips; i++) {
0547 if (cfi->cfiq->WordWriteTimeoutTyp)
0548 cfi->chips[i].word_write_time =
0549 1<<cfi->cfiq->WordWriteTimeoutTyp;
0550 else
0551 cfi->chips[i].word_write_time = 50000;
0552
0553 if (cfi->cfiq->BufWriteTimeoutTyp)
0554 cfi->chips[i].buffer_write_time =
0555 1<<cfi->cfiq->BufWriteTimeoutTyp;
0556
0557
0558 if (cfi->cfiq->BlockEraseTimeoutTyp)
0559 cfi->chips[i].erase_time =
0560 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
0561 else
0562 cfi->chips[i].erase_time = 2000000;
0563
0564 if (cfi->cfiq->WordWriteTimeoutTyp &&
0565 cfi->cfiq->WordWriteTimeoutMax)
0566 cfi->chips[i].word_write_time_max =
0567 1<<(cfi->cfiq->WordWriteTimeoutTyp +
0568 cfi->cfiq->WordWriteTimeoutMax);
0569 else
0570 cfi->chips[i].word_write_time_max = 50000 * 8;
0571
0572 if (cfi->cfiq->BufWriteTimeoutTyp &&
0573 cfi->cfiq->BufWriteTimeoutMax)
0574 cfi->chips[i].buffer_write_time_max =
0575 1<<(cfi->cfiq->BufWriteTimeoutTyp +
0576 cfi->cfiq->BufWriteTimeoutMax);
0577
0578 if (cfi->cfiq->BlockEraseTimeoutTyp &&
0579 cfi->cfiq->BlockEraseTimeoutMax)
0580 cfi->chips[i].erase_time_max =
0581 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
0582 cfi->cfiq->BlockEraseTimeoutMax);
0583 else
0584 cfi->chips[i].erase_time_max = 2000000 * 8;
0585
0586 cfi->chips[i].ref_point_counter = 0;
0587 init_waitqueue_head(&(cfi->chips[i].wq));
0588 }
0589
0590 map->fldrv = &cfi_intelext_chipdrv;
0591
0592 return cfi_intelext_setup(mtd);
0593 }
0594 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
0595 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
0596 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
0597 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
0598 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
0599
0600 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
0601 {
0602 struct map_info *map = mtd->priv;
0603 struct cfi_private *cfi = map->fldrv_priv;
0604 unsigned long offset = 0;
0605 int i,j;
0606 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
0607
0608
0609
0610 mtd->size = devsize * cfi->numchips;
0611
0612 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
0613 mtd->eraseregions = kcalloc(mtd->numeraseregions,
0614 sizeof(struct mtd_erase_region_info),
0615 GFP_KERNEL);
0616 if (!mtd->eraseregions)
0617 goto setup_err;
0618
0619 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
0620 unsigned long ernum, ersize;
0621 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
0622 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
0623
0624 if (mtd->erasesize < ersize) {
0625 mtd->erasesize = ersize;
0626 }
0627 for (j=0; j<cfi->numchips; j++) {
0628 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
0629 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
0630 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
0631 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
0632 if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
0633 goto setup_err;
0634 }
0635 offset += (ersize * ernum);
0636 }
0637
0638 if (offset != devsize) {
0639
0640 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
0641 goto setup_err;
0642 }
0643
0644 for (i=0; i<mtd->numeraseregions;i++){
0645 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
0646 i,(unsigned long long)mtd->eraseregions[i].offset,
0647 mtd->eraseregions[i].erasesize,
0648 mtd->eraseregions[i].numblocks);
0649 }
0650
0651 #ifdef CONFIG_MTD_OTP
0652 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
0653 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
0654 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
0655 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
0656 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
0657 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
0658 #endif
0659
0660
0661
0662 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
0663 goto setup_err;
0664
0665 __module_get(THIS_MODULE);
0666 register_reboot_notifier(&mtd->reboot_notifier);
0667 return mtd;
0668
0669 setup_err:
0670 if (mtd->eraseregions)
0671 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
0672 for (j=0; j<cfi->numchips; j++)
0673 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
0674 kfree(mtd->eraseregions);
0675 kfree(mtd);
0676 kfree(cfi->cmdset_priv);
0677 return NULL;
0678 }
0679
0680 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
0681 struct cfi_private **pcfi)
0682 {
0683 struct map_info *map = mtd->priv;
0684 struct cfi_private *cfi = *pcfi;
0685 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
0699 && extp->FeatureSupport & (1 << 9)) {
0700 int offs = 0;
0701 struct cfi_private *newcfi;
0702 struct flchip *chip;
0703 struct flchip_shared *shared;
0704 int numregions, numparts, partshift, numvirtchips, i, j;
0705
0706
0707 if (extp->NumProtectionFields)
0708 offs = (extp->NumProtectionFields - 1) *
0709 sizeof(struct cfi_intelext_otpinfo);
0710
0711
0712 offs += extp->extra[offs+1]+2;
0713
0714
0715 numregions = extp->extra[offs];
0716 offs += 1;
0717
0718
0719 if (extp->MinorVersion >= '4')
0720 offs += 2;
0721
0722
0723 numparts = 0;
0724 for (i = 0; i < numregions; i++) {
0725 struct cfi_intelext_regioninfo *rinfo;
0726 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
0727 numparts += rinfo->NumIdentPartitions;
0728 offs += sizeof(*rinfo)
0729 + (rinfo->NumBlockTypes - 1) *
0730 sizeof(struct cfi_intelext_blockinfo);
0731 }
0732
0733 if (!numparts)
0734 numparts = 1;
0735
0736
0737 if (extp->MinorVersion >= '4') {
0738 struct cfi_intelext_programming_regioninfo *prinfo;
0739 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
0740 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
0741 mtd->flags &= ~MTD_BIT_WRITEABLE;
0742 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
0743 map->name, mtd->writesize,
0744 cfi->interleave * prinfo->ControlValid,
0745 cfi->interleave * prinfo->ControlInvalid);
0746 }
0747
0748
0749
0750
0751
0752
0753 partshift = cfi->chipshift - __ffs(numparts);
0754
0755 if ((1 << partshift) < mtd->erasesize) {
0756 printk( KERN_ERR
0757 "%s: bad number of hw partitions (%d)\n",
0758 __func__, numparts);
0759 return -EINVAL;
0760 }
0761
0762 numvirtchips = cfi->numchips * numparts;
0763 newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
0764 GFP_KERNEL);
0765 if (!newcfi)
0766 return -ENOMEM;
0767 shared = kmalloc_array(cfi->numchips,
0768 sizeof(struct flchip_shared),
0769 GFP_KERNEL);
0770 if (!shared) {
0771 kfree(newcfi);
0772 return -ENOMEM;
0773 }
0774 memcpy(newcfi, cfi, sizeof(struct cfi_private));
0775 newcfi->numchips = numvirtchips;
0776 newcfi->chipshift = partshift;
0777
0778 chip = &newcfi->chips[0];
0779 for (i = 0; i < cfi->numchips; i++) {
0780 shared[i].writing = shared[i].erasing = NULL;
0781 mutex_init(&shared[i].lock);
0782 for (j = 0; j < numparts; j++) {
0783 *chip = cfi->chips[i];
0784 chip->start += j << partshift;
0785 chip->priv = &shared[i];
0786
0787
0788 init_waitqueue_head(&chip->wq);
0789 mutex_init(&chip->mutex);
0790 chip++;
0791 }
0792 }
0793
0794 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
0795 "--> %d partitions of %d KiB\n",
0796 map->name, cfi->numchips, cfi->interleave,
0797 newcfi->numchips, 1<<(newcfi->chipshift-10));
0798
0799 map->fldrv_priv = newcfi;
0800 *pcfi = newcfi;
0801 kfree(cfi);
0802 }
0803
0804 return 0;
0805 }
0806
0807
0808
0809
0810 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
0811 {
0812 DECLARE_WAITQUEUE(wait, current);
0813 struct cfi_private *cfi = map->fldrv_priv;
0814 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
0815 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
0816 unsigned long timeo = jiffies + HZ;
0817
0818
0819 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
0820 goto sleep;
0821
0822 switch (chip->state) {
0823
0824 case FL_STATUS:
0825 for (;;) {
0826 status = map_read(map, adr);
0827 if (map_word_andequal(map, status, status_OK, status_OK))
0828 break;
0829
0830
0831
0832 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
0833 break;
0834
0835 mutex_unlock(&chip->mutex);
0836 cfi_udelay(1);
0837 mutex_lock(&chip->mutex);
0838
0839 return -EAGAIN;
0840 }
0841 fallthrough;
0842 case FL_READY:
0843 case FL_CFI_QUERY:
0844 case FL_JEDEC_QUERY:
0845 return 0;
0846
0847 case FL_ERASING:
0848 if (!cfip ||
0849 !(cfip->FeatureSupport & 2) ||
0850 !(mode == FL_READY || mode == FL_POINT ||
0851 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
0852 goto sleep;
0853
0854
0855 if ((adr & chip->in_progress_block_mask) ==
0856 chip->in_progress_block_addr)
0857 goto sleep;
0858
0859
0860 if (cfi_is_micron_28F00AP30(cfi, chip) &&
0861 (chip->in_progress_block_mask == ~(0x8000-1)))
0862 goto sleep;
0863
0864
0865 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
0866
0867
0868
0869
0870
0871
0872 map_write(map, CMD(0x70), chip->in_progress_block_addr);
0873 chip->oldstate = FL_ERASING;
0874 chip->state = FL_ERASE_SUSPENDING;
0875 chip->erase_suspended = 1;
0876 for (;;) {
0877 status = map_read(map, chip->in_progress_block_addr);
0878 if (map_word_andequal(map, status, status_OK, status_OK))
0879 break;
0880
0881 if (time_after(jiffies, timeo)) {
0882
0883
0884 put_chip(map, chip, adr);
0885 printk(KERN_ERR "%s: Chip not ready after erase "
0886 "suspended: status = 0x%lx\n", map->name, status.x[0]);
0887 return -EIO;
0888 }
0889
0890 mutex_unlock(&chip->mutex);
0891 cfi_udelay(1);
0892 mutex_lock(&chip->mutex);
0893
0894
0895 }
0896 chip->state = FL_STATUS;
0897 return 0;
0898
0899 case FL_XIP_WHILE_ERASING:
0900 if (mode != FL_READY && mode != FL_POINT &&
0901 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
0902 goto sleep;
0903 chip->oldstate = chip->state;
0904 chip->state = FL_READY;
0905 return 0;
0906
0907 case FL_SHUTDOWN:
0908
0909 return -EIO;
0910 case FL_POINT:
0911
0912 if (mode == FL_READY && chip->oldstate == FL_READY)
0913 return 0;
0914 fallthrough;
0915 default:
0916 sleep:
0917 set_current_state(TASK_UNINTERRUPTIBLE);
0918 add_wait_queue(&chip->wq, &wait);
0919 mutex_unlock(&chip->mutex);
0920 schedule();
0921 remove_wait_queue(&chip->wq, &wait);
0922 mutex_lock(&chip->mutex);
0923 return -EAGAIN;
0924 }
0925 }
0926
0927 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
0928 {
0929 int ret;
0930 DECLARE_WAITQUEUE(wait, current);
0931
0932 retry:
0933 if (chip->priv &&
0934 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
0935 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954 struct flchip_shared *shared = chip->priv;
0955 struct flchip *contender;
0956 mutex_lock(&shared->lock);
0957 contender = shared->writing;
0958 if (contender && contender != chip) {
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968 ret = mutex_trylock(&contender->mutex);
0969 mutex_unlock(&shared->lock);
0970 if (!ret)
0971 goto retry;
0972 mutex_unlock(&chip->mutex);
0973 ret = chip_ready(map, contender, contender->start, mode);
0974 mutex_lock(&chip->mutex);
0975
0976 if (ret == -EAGAIN) {
0977 mutex_unlock(&contender->mutex);
0978 goto retry;
0979 }
0980 if (ret) {
0981 mutex_unlock(&contender->mutex);
0982 return ret;
0983 }
0984 mutex_lock(&shared->lock);
0985
0986
0987
0988 if (chip->state == FL_SYNCING) {
0989 put_chip(map, contender, contender->start);
0990 mutex_unlock(&contender->mutex);
0991 goto retry;
0992 }
0993 mutex_unlock(&contender->mutex);
0994 }
0995
0996
0997
0998 if (mode == FL_ERASING && shared->erasing
0999 && shared->erasing->oldstate == FL_ERASING) {
1000 mutex_unlock(&shared->lock);
1001 set_current_state(TASK_UNINTERRUPTIBLE);
1002 add_wait_queue(&chip->wq, &wait);
1003 mutex_unlock(&chip->mutex);
1004 schedule();
1005 remove_wait_queue(&chip->wq, &wait);
1006 mutex_lock(&chip->mutex);
1007 goto retry;
1008 }
1009
1010
1011 shared->writing = chip;
1012 if (mode == FL_ERASING)
1013 shared->erasing = chip;
1014 mutex_unlock(&shared->lock);
1015 }
1016 ret = chip_ready(map, chip, adr, mode);
1017 if (ret == -EAGAIN)
1018 goto retry;
1019
1020 return ret;
1021 }
1022
1023 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1024 {
1025 struct cfi_private *cfi = map->fldrv_priv;
1026
1027 if (chip->priv) {
1028 struct flchip_shared *shared = chip->priv;
1029 mutex_lock(&shared->lock);
1030 if (shared->writing == chip && chip->oldstate == FL_READY) {
1031
1032 shared->writing = shared->erasing;
1033 if (shared->writing && shared->writing != chip) {
1034
1035 struct flchip *loaner = shared->writing;
1036 mutex_lock(&loaner->mutex);
1037 mutex_unlock(&shared->lock);
1038 mutex_unlock(&chip->mutex);
1039 put_chip(map, loaner, loaner->start);
1040 mutex_lock(&chip->mutex);
1041 mutex_unlock(&loaner->mutex);
1042 wake_up(&chip->wq);
1043 return;
1044 }
1045 shared->erasing = NULL;
1046 shared->writing = NULL;
1047 } else if (shared->erasing == chip && shared->writing != chip) {
1048
1049
1050
1051
1052
1053
1054
1055 mutex_unlock(&shared->lock);
1056 wake_up(&chip->wq);
1057 return;
1058 }
1059 mutex_unlock(&shared->lock);
1060 }
1061
1062 switch(chip->oldstate) {
1063 case FL_ERASING:
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1074 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1075 chip->oldstate = FL_READY;
1076 chip->state = FL_ERASING;
1077 break;
1078
1079 case FL_XIP_WHILE_ERASING:
1080 chip->state = chip->oldstate;
1081 chip->oldstate = FL_READY;
1082 break;
1083
1084 case FL_READY:
1085 case FL_STATUS:
1086 case FL_JEDEC_QUERY:
1087 break;
1088 default:
1089 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1090 }
1091 wake_up(&chip->wq);
1092 }
1093
1094 #ifdef CONFIG_MTD_XIP
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 static void xip_disable(struct map_info *map, struct flchip *chip,
1108 unsigned long adr)
1109 {
1110
1111 (void) map_read(map, adr);
1112 local_irq_disable();
1113 }
1114
1115 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1116 unsigned long adr)
1117 {
1118 struct cfi_private *cfi = map->fldrv_priv;
1119 if (chip->state != FL_POINT && chip->state != FL_READY) {
1120 map_write(map, CMD(0xff), adr);
1121 chip->state = FL_READY;
1122 }
1123 (void) map_read(map, adr);
1124 xip_iprefetch();
1125 local_irq_enable();
1126 }
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141 static int __xipram xip_wait_for_operation(
1142 struct map_info *map, struct flchip *chip,
1143 unsigned long adr, unsigned int chip_op_time_max)
1144 {
1145 struct cfi_private *cfi = map->fldrv_priv;
1146 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1147 map_word status, OK = CMD(0x80);
1148 unsigned long usec, suspended, start, done;
1149 flstate_t oldstate, newstate;
1150
1151 start = xip_currtime();
1152 usec = chip_op_time_max;
1153 if (usec == 0)
1154 usec = 500000;
1155 done = 0;
1156
1157 do {
1158 cpu_relax();
1159 if (xip_irqpending() && cfip &&
1160 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1161 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1162 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 usec -= done;
1174 map_write(map, CMD(0xb0), adr);
1175 map_write(map, CMD(0x70), adr);
1176 suspended = xip_currtime();
1177 do {
1178 if (xip_elapsed_since(suspended) > 100000) {
1179
1180
1181
1182
1183
1184
1185 return -EIO;
1186 }
1187 status = map_read(map, adr);
1188 } while (!map_word_andequal(map, status, OK, OK));
1189
1190
1191 oldstate = chip->state;
1192 if (oldstate == FL_ERASING) {
1193 if (!map_word_bitsset(map, status, CMD(0x40)))
1194 break;
1195 newstate = FL_XIP_WHILE_ERASING;
1196 chip->erase_suspended = 1;
1197 } else {
1198 if (!map_word_bitsset(map, status, CMD(0x04)))
1199 break;
1200 newstate = FL_XIP_WHILE_WRITING;
1201 chip->write_suspended = 1;
1202 }
1203 chip->state = newstate;
1204 map_write(map, CMD(0xff), adr);
1205 (void) map_read(map, adr);
1206 xip_iprefetch();
1207 local_irq_enable();
1208 mutex_unlock(&chip->mutex);
1209 xip_iprefetch();
1210 cond_resched();
1211
1212
1213
1214
1215
1216
1217
1218 mutex_lock(&chip->mutex);
1219 while (chip->state != newstate) {
1220 DECLARE_WAITQUEUE(wait, current);
1221 set_current_state(TASK_UNINTERRUPTIBLE);
1222 add_wait_queue(&chip->wq, &wait);
1223 mutex_unlock(&chip->mutex);
1224 schedule();
1225 remove_wait_queue(&chip->wq, &wait);
1226 mutex_lock(&chip->mutex);
1227 }
1228
1229 local_irq_disable();
1230
1231
1232 map_write(map, CMD(0xd0), adr);
1233 map_write(map, CMD(0x70), adr);
1234 chip->state = oldstate;
1235 start = xip_currtime();
1236 } else if (usec >= 1000000/HZ) {
1237
1238
1239
1240
1241
1242 xip_cpu_idle();
1243 }
1244 status = map_read(map, adr);
1245 done = xip_elapsed_since(start);
1246 } while (!map_word_andequal(map, status, OK, OK)
1247 && done < usec);
1248
1249 return (done >= usec) ? -ETIME : 0;
1250 }
1251
1252
1253
1254
1255
1256
1257
1258
1259 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1260 INVALIDATE_CACHED_RANGE(map, from, size)
1261
1262 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1263 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1264
1265 #else
1266
1267 #define xip_disable(map, chip, adr)
1268 #define xip_enable(map, chip, adr)
1269 #define XIP_INVAL_CACHED_RANGE(x...)
1270 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1271
1272 static int inval_cache_and_wait_for_operation(
1273 struct map_info *map, struct flchip *chip,
1274 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1275 unsigned int chip_op_time, unsigned int chip_op_time_max)
1276 {
1277 struct cfi_private *cfi = map->fldrv_priv;
1278 map_word status, status_OK = CMD(0x80);
1279 int chip_state = chip->state;
1280 unsigned int timeo, sleep_time, reset_timeo;
1281
1282 mutex_unlock(&chip->mutex);
1283 if (inval_len)
1284 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1285 mutex_lock(&chip->mutex);
1286
1287 timeo = chip_op_time_max;
1288 if (!timeo)
1289 timeo = 500000;
1290 reset_timeo = timeo;
1291 sleep_time = chip_op_time / 2;
1292
1293 for (;;) {
1294 if (chip->state != chip_state) {
1295
1296 DECLARE_WAITQUEUE(wait, current);
1297 set_current_state(TASK_UNINTERRUPTIBLE);
1298 add_wait_queue(&chip->wq, &wait);
1299 mutex_unlock(&chip->mutex);
1300 schedule();
1301 remove_wait_queue(&chip->wq, &wait);
1302 mutex_lock(&chip->mutex);
1303 continue;
1304 }
1305
1306 status = map_read(map, cmd_adr);
1307 if (map_word_andequal(map, status, status_OK, status_OK))
1308 break;
1309
1310 if (chip->erase_suspended && chip_state == FL_ERASING) {
1311
1312 timeo = reset_timeo;
1313 chip->erase_suspended = 0;
1314 }
1315 if (chip->write_suspended && chip_state == FL_WRITING) {
1316
1317 timeo = reset_timeo;
1318 chip->write_suspended = 0;
1319 }
1320 if (!timeo) {
1321 map_write(map, CMD(0x70), cmd_adr);
1322 chip->state = FL_STATUS;
1323 return -ETIME;
1324 }
1325
1326
1327 mutex_unlock(&chip->mutex);
1328 if (sleep_time >= 1000000/HZ) {
1329
1330
1331
1332
1333
1334 msleep(sleep_time/1000);
1335 timeo -= sleep_time;
1336 sleep_time = 1000000/HZ;
1337 } else {
1338 udelay(1);
1339 cond_resched();
1340 timeo--;
1341 }
1342 mutex_lock(&chip->mutex);
1343 }
1344
1345
1346 chip->state = FL_STATUS;
1347 return 0;
1348 }
1349
1350 #endif
1351
1352 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1353 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1354
1355
1356 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1357 {
1358 unsigned long cmd_addr;
1359 struct cfi_private *cfi = map->fldrv_priv;
1360 int ret;
1361
1362 adr += chip->start;
1363
1364
1365 cmd_addr = adr & ~(map_bankwidth(map)-1);
1366
1367 mutex_lock(&chip->mutex);
1368
1369 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1370
1371 if (!ret) {
1372 if (chip->state != FL_POINT && chip->state != FL_READY)
1373 map_write(map, CMD(0xff), cmd_addr);
1374
1375 chip->state = FL_POINT;
1376 chip->ref_point_counter++;
1377 }
1378 mutex_unlock(&chip->mutex);
1379
1380 return ret;
1381 }
1382
1383 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1384 size_t *retlen, void **virt, resource_size_t *phys)
1385 {
1386 struct map_info *map = mtd->priv;
1387 struct cfi_private *cfi = map->fldrv_priv;
1388 unsigned long ofs, last_end = 0;
1389 int chipnum;
1390 int ret;
1391
1392 if (!map->virt)
1393 return -EINVAL;
1394
1395
1396
1397
1398 chipnum = (from >> cfi->chipshift);
1399 ofs = from - (chipnum << cfi->chipshift);
1400
1401 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1402 if (phys)
1403 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1404
1405 while (len) {
1406 unsigned long thislen;
1407
1408 if (chipnum >= cfi->numchips)
1409 break;
1410
1411
1412 if (!last_end)
1413 last_end = cfi->chips[chipnum].start;
1414 else if (cfi->chips[chipnum].start != last_end)
1415 break;
1416
1417 if ((len + ofs -1) >> cfi->chipshift)
1418 thislen = (1<<cfi->chipshift) - ofs;
1419 else
1420 thislen = len;
1421
1422 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1423 if (ret)
1424 break;
1425
1426 *retlen += thislen;
1427 len -= thislen;
1428
1429 ofs = 0;
1430 last_end += 1 << cfi->chipshift;
1431 chipnum++;
1432 }
1433 return 0;
1434 }
1435
1436 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1437 {
1438 struct map_info *map = mtd->priv;
1439 struct cfi_private *cfi = map->fldrv_priv;
1440 unsigned long ofs;
1441 int chipnum, err = 0;
1442
1443
1444
1445
1446 chipnum = (from >> cfi->chipshift);
1447 ofs = from - (chipnum << cfi->chipshift);
1448
1449 while (len && !err) {
1450 unsigned long thislen;
1451 struct flchip *chip;
1452
1453 chip = &cfi->chips[chipnum];
1454 if (chipnum >= cfi->numchips)
1455 break;
1456
1457 if ((len + ofs -1) >> cfi->chipshift)
1458 thislen = (1<<cfi->chipshift) - ofs;
1459 else
1460 thislen = len;
1461
1462 mutex_lock(&chip->mutex);
1463 if (chip->state == FL_POINT) {
1464 chip->ref_point_counter--;
1465 if(chip->ref_point_counter == 0)
1466 chip->state = FL_READY;
1467 } else {
1468 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1469 err = -EINVAL;
1470 }
1471
1472 put_chip(map, chip, chip->start);
1473 mutex_unlock(&chip->mutex);
1474
1475 len -= thislen;
1476 ofs = 0;
1477 chipnum++;
1478 }
1479
1480 return err;
1481 }
1482
1483 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1484 {
1485 unsigned long cmd_addr;
1486 struct cfi_private *cfi = map->fldrv_priv;
1487 int ret;
1488
1489 adr += chip->start;
1490
1491
1492 cmd_addr = adr & ~(map_bankwidth(map)-1);
1493
1494 mutex_lock(&chip->mutex);
1495 ret = get_chip(map, chip, cmd_addr, FL_READY);
1496 if (ret) {
1497 mutex_unlock(&chip->mutex);
1498 return ret;
1499 }
1500
1501 if (chip->state != FL_POINT && chip->state != FL_READY) {
1502 map_write(map, CMD(0xff), cmd_addr);
1503
1504 chip->state = FL_READY;
1505 }
1506
1507 map_copy_from(map, buf, adr, len);
1508
1509 put_chip(map, chip, cmd_addr);
1510
1511 mutex_unlock(&chip->mutex);
1512 return 0;
1513 }
1514
1515 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1516 {
1517 struct map_info *map = mtd->priv;
1518 struct cfi_private *cfi = map->fldrv_priv;
1519 unsigned long ofs;
1520 int chipnum;
1521 int ret = 0;
1522
1523
1524 chipnum = (from >> cfi->chipshift);
1525 ofs = from - (chipnum << cfi->chipshift);
1526
1527 while (len) {
1528 unsigned long thislen;
1529
1530 if (chipnum >= cfi->numchips)
1531 break;
1532
1533 if ((len + ofs -1) >> cfi->chipshift)
1534 thislen = (1<<cfi->chipshift) - ofs;
1535 else
1536 thislen = len;
1537
1538 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1539 if (ret)
1540 break;
1541
1542 *retlen += thislen;
1543 len -= thislen;
1544 buf += thislen;
1545
1546 ofs = 0;
1547 chipnum++;
1548 }
1549 return ret;
1550 }
1551
1552 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1553 unsigned long adr, map_word datum, int mode)
1554 {
1555 struct cfi_private *cfi = map->fldrv_priv;
1556 map_word status, write_cmd;
1557 int ret;
1558
1559 adr += chip->start;
1560
1561 switch (mode) {
1562 case FL_WRITING:
1563 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1564 break;
1565 case FL_OTP_WRITE:
1566 write_cmd = CMD(0xc0);
1567 break;
1568 default:
1569 return -EINVAL;
1570 }
1571
1572 mutex_lock(&chip->mutex);
1573 ret = get_chip(map, chip, adr, mode);
1574 if (ret) {
1575 mutex_unlock(&chip->mutex);
1576 return ret;
1577 }
1578
1579 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1580 ENABLE_VPP(map);
1581 xip_disable(map, chip, adr);
1582 map_write(map, write_cmd, adr);
1583 map_write(map, datum, adr);
1584 chip->state = mode;
1585
1586 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1587 adr, map_bankwidth(map),
1588 chip->word_write_time,
1589 chip->word_write_time_max);
1590 if (ret) {
1591 xip_enable(map, chip, adr);
1592 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1593 goto out;
1594 }
1595
1596
1597 status = map_read(map, adr);
1598 if (map_word_bitsset(map, status, CMD(0x1a))) {
1599 unsigned long chipstatus = MERGESTATUS(status);
1600
1601
1602 map_write(map, CMD(0x50), adr);
1603 map_write(map, CMD(0x70), adr);
1604 xip_enable(map, chip, adr);
1605
1606 if (chipstatus & 0x02) {
1607 ret = -EROFS;
1608 } else if (chipstatus & 0x08) {
1609 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1610 ret = -EIO;
1611 } else {
1612 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1613 ret = -EINVAL;
1614 }
1615
1616 goto out;
1617 }
1618
1619 xip_enable(map, chip, adr);
1620 out: DISABLE_VPP(map);
1621 put_chip(map, chip, adr);
1622 mutex_unlock(&chip->mutex);
1623 return ret;
1624 }
1625
1626
1627 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1628 {
1629 struct map_info *map = mtd->priv;
1630 struct cfi_private *cfi = map->fldrv_priv;
1631 int ret;
1632 int chipnum;
1633 unsigned long ofs;
1634
1635 chipnum = to >> cfi->chipshift;
1636 ofs = to - (chipnum << cfi->chipshift);
1637
1638
1639 if (ofs & (map_bankwidth(map)-1)) {
1640 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1641 int gap = ofs - bus_ofs;
1642 int n;
1643 map_word datum;
1644
1645 n = min_t(int, len, map_bankwidth(map)-gap);
1646 datum = map_word_ff(map);
1647 datum = map_word_load_partial(map, datum, buf, gap, n);
1648
1649 ret = do_write_oneword(map, &cfi->chips[chipnum],
1650 bus_ofs, datum, FL_WRITING);
1651 if (ret)
1652 return ret;
1653
1654 len -= n;
1655 ofs += n;
1656 buf += n;
1657 (*retlen) += n;
1658
1659 if (ofs >> cfi->chipshift) {
1660 chipnum ++;
1661 ofs = 0;
1662 if (chipnum == cfi->numchips)
1663 return 0;
1664 }
1665 }
1666
1667 while(len >= map_bankwidth(map)) {
1668 map_word datum = map_word_load(map, buf);
1669
1670 ret = do_write_oneword(map, &cfi->chips[chipnum],
1671 ofs, datum, FL_WRITING);
1672 if (ret)
1673 return ret;
1674
1675 ofs += map_bankwidth(map);
1676 buf += map_bankwidth(map);
1677 (*retlen) += map_bankwidth(map);
1678 len -= map_bankwidth(map);
1679
1680 if (ofs >> cfi->chipshift) {
1681 chipnum ++;
1682 ofs = 0;
1683 if (chipnum == cfi->numchips)
1684 return 0;
1685 }
1686 }
1687
1688 if (len & (map_bankwidth(map)-1)) {
1689 map_word datum;
1690
1691 datum = map_word_ff(map);
1692 datum = map_word_load_partial(map, datum, buf, 0, len);
1693
1694 ret = do_write_oneword(map, &cfi->chips[chipnum],
1695 ofs, datum, FL_WRITING);
1696 if (ret)
1697 return ret;
1698
1699 (*retlen) += len;
1700 }
1701
1702 return 0;
1703 }
1704
1705
1706 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1707 unsigned long adr, const struct kvec **pvec,
1708 unsigned long *pvec_seek, int len)
1709 {
1710 struct cfi_private *cfi = map->fldrv_priv;
1711 map_word status, write_cmd, datum;
1712 unsigned long cmd_adr;
1713 int ret, wbufsize, word_gap, words;
1714 const struct kvec *vec;
1715 unsigned long vec_seek;
1716 unsigned long initial_adr;
1717 int initial_len = len;
1718
1719 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1720 adr += chip->start;
1721 initial_adr = adr;
1722 cmd_adr = adr & ~(wbufsize-1);
1723
1724
1725
1726
1727 if (is_LH28F640BF(cfi))
1728 cmd_adr = adr;
1729
1730
1731 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1732
1733 mutex_lock(&chip->mutex);
1734 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1735 if (ret) {
1736 mutex_unlock(&chip->mutex);
1737 return ret;
1738 }
1739
1740 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1741 ENABLE_VPP(map);
1742 xip_disable(map, chip, cmd_adr);
1743
1744
1745
1746
1747
1748 if (chip->state != FL_STATUS) {
1749 map_write(map, CMD(0x70), cmd_adr);
1750 chip->state = FL_STATUS;
1751 }
1752 status = map_read(map, cmd_adr);
1753 if (map_word_bitsset(map, status, CMD(0x30))) {
1754 xip_enable(map, chip, cmd_adr);
1755 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1756 xip_disable(map, chip, cmd_adr);
1757 map_write(map, CMD(0x50), cmd_adr);
1758 map_write(map, CMD(0x70), cmd_adr);
1759 }
1760
1761 chip->state = FL_WRITING_TO_BUFFER;
1762 map_write(map, write_cmd, cmd_adr);
1763 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1764 if (ret) {
1765
1766 map_word Xstatus = map_read(map, cmd_adr);
1767 map_write(map, CMD(0x70), cmd_adr);
1768 chip->state = FL_STATUS;
1769 status = map_read(map, cmd_adr);
1770 map_write(map, CMD(0x50), cmd_adr);
1771 map_write(map, CMD(0x70), cmd_adr);
1772 xip_enable(map, chip, cmd_adr);
1773 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1774 map->name, Xstatus.x[0], status.x[0]);
1775 goto out;
1776 }
1777
1778
1779 word_gap = (-adr & (map_bankwidth(map)-1));
1780 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1781 if (!word_gap) {
1782 words--;
1783 } else {
1784 word_gap = map_bankwidth(map) - word_gap;
1785 adr -= word_gap;
1786 datum = map_word_ff(map);
1787 }
1788
1789
1790 map_write(map, CMD(words), cmd_adr );
1791
1792
1793 vec = *pvec;
1794 vec_seek = *pvec_seek;
1795 do {
1796 int n = map_bankwidth(map) - word_gap;
1797 if (n > vec->iov_len - vec_seek)
1798 n = vec->iov_len - vec_seek;
1799 if (n > len)
1800 n = len;
1801
1802 if (!word_gap && len < map_bankwidth(map))
1803 datum = map_word_ff(map);
1804
1805 datum = map_word_load_partial(map, datum,
1806 vec->iov_base + vec_seek,
1807 word_gap, n);
1808
1809 len -= n;
1810 word_gap += n;
1811 if (!len || word_gap == map_bankwidth(map)) {
1812 map_write(map, datum, adr);
1813 adr += map_bankwidth(map);
1814 word_gap = 0;
1815 }
1816
1817 vec_seek += n;
1818 if (vec_seek == vec->iov_len) {
1819 vec++;
1820 vec_seek = 0;
1821 }
1822 } while (len);
1823 *pvec = vec;
1824 *pvec_seek = vec_seek;
1825
1826
1827 map_write(map, CMD(0xd0), cmd_adr);
1828 chip->state = FL_WRITING;
1829
1830 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1831 initial_adr, initial_len,
1832 chip->buffer_write_time,
1833 chip->buffer_write_time_max);
1834 if (ret) {
1835 map_write(map, CMD(0x70), cmd_adr);
1836 chip->state = FL_STATUS;
1837 xip_enable(map, chip, cmd_adr);
1838 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1839 goto out;
1840 }
1841
1842
1843 status = map_read(map, cmd_adr);
1844 if (map_word_bitsset(map, status, CMD(0x1a))) {
1845 unsigned long chipstatus = MERGESTATUS(status);
1846
1847
1848 map_write(map, CMD(0x50), cmd_adr);
1849 map_write(map, CMD(0x70), cmd_adr);
1850 xip_enable(map, chip, cmd_adr);
1851
1852 if (chipstatus & 0x02) {
1853 ret = -EROFS;
1854 } else if (chipstatus & 0x08) {
1855 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1856 ret = -EIO;
1857 } else {
1858 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1859 ret = -EINVAL;
1860 }
1861
1862 goto out;
1863 }
1864
1865 xip_enable(map, chip, cmd_adr);
1866 out: DISABLE_VPP(map);
1867 put_chip(map, chip, cmd_adr);
1868 mutex_unlock(&chip->mutex);
1869 return ret;
1870 }
1871
1872 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1873 unsigned long count, loff_t to, size_t *retlen)
1874 {
1875 struct map_info *map = mtd->priv;
1876 struct cfi_private *cfi = map->fldrv_priv;
1877 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1878 int ret;
1879 int chipnum;
1880 unsigned long ofs, vec_seek, i;
1881 size_t len = 0;
1882
1883 for (i = 0; i < count; i++)
1884 len += vecs[i].iov_len;
1885
1886 if (!len)
1887 return 0;
1888
1889 chipnum = to >> cfi->chipshift;
1890 ofs = to - (chipnum << cfi->chipshift);
1891 vec_seek = 0;
1892
1893 do {
1894
1895 int size = wbufsize - (ofs & (wbufsize-1));
1896
1897 if (size > len)
1898 size = len;
1899 ret = do_write_buffer(map, &cfi->chips[chipnum],
1900 ofs, &vecs, &vec_seek, size);
1901 if (ret)
1902 return ret;
1903
1904 ofs += size;
1905 (*retlen) += size;
1906 len -= size;
1907
1908 if (ofs >> cfi->chipshift) {
1909 chipnum ++;
1910 ofs = 0;
1911 if (chipnum == cfi->numchips)
1912 return 0;
1913 }
1914
1915
1916
1917 cond_resched();
1918
1919 } while (len);
1920
1921 return 0;
1922 }
1923
1924 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1925 size_t len, size_t *retlen, const u_char *buf)
1926 {
1927 struct kvec vec;
1928
1929 vec.iov_base = (void *) buf;
1930 vec.iov_len = len;
1931
1932 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1933 }
1934
1935 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1936 unsigned long adr, int len, void *thunk)
1937 {
1938 struct cfi_private *cfi = map->fldrv_priv;
1939 map_word status;
1940 int retries = 3;
1941 int ret;
1942
1943 adr += chip->start;
1944
1945 retry:
1946 mutex_lock(&chip->mutex);
1947 ret = get_chip(map, chip, adr, FL_ERASING);
1948 if (ret) {
1949 mutex_unlock(&chip->mutex);
1950 return ret;
1951 }
1952
1953 XIP_INVAL_CACHED_RANGE(map, adr, len);
1954 ENABLE_VPP(map);
1955 xip_disable(map, chip, adr);
1956
1957
1958 map_write(map, CMD(0x50), adr);
1959
1960
1961 map_write(map, CMD(0x20), adr);
1962 map_write(map, CMD(0xD0), adr);
1963 chip->state = FL_ERASING;
1964 chip->erase_suspended = 0;
1965 chip->in_progress_block_addr = adr;
1966 chip->in_progress_block_mask = ~(len - 1);
1967
1968 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1969 adr, len,
1970 chip->erase_time,
1971 chip->erase_time_max);
1972 if (ret) {
1973 map_write(map, CMD(0x70), adr);
1974 chip->state = FL_STATUS;
1975 xip_enable(map, chip, adr);
1976 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1977 goto out;
1978 }
1979
1980
1981 map_write(map, CMD(0x70), adr);
1982 chip->state = FL_STATUS;
1983 status = map_read(map, adr);
1984
1985
1986 if (map_word_bitsset(map, status, CMD(0x3a))) {
1987 unsigned long chipstatus = MERGESTATUS(status);
1988
1989
1990 map_write(map, CMD(0x50), adr);
1991 map_write(map, CMD(0x70), adr);
1992 xip_enable(map, chip, adr);
1993
1994 if ((chipstatus & 0x30) == 0x30) {
1995 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1996 ret = -EINVAL;
1997 } else if (chipstatus & 0x02) {
1998
1999 ret = -EROFS;
2000 } else if (chipstatus & 0x8) {
2001
2002 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2003 ret = -EIO;
2004 } else if (chipstatus & 0x20 && retries--) {
2005 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2006 DISABLE_VPP(map);
2007 put_chip(map, chip, adr);
2008 mutex_unlock(&chip->mutex);
2009 goto retry;
2010 } else {
2011 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2012 ret = -EIO;
2013 }
2014
2015 goto out;
2016 }
2017
2018 xip_enable(map, chip, adr);
2019 out: DISABLE_VPP(map);
2020 put_chip(map, chip, adr);
2021 mutex_unlock(&chip->mutex);
2022 return ret;
2023 }
2024
2025 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2026 {
2027 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2028 instr->len, NULL);
2029 }
2030
2031 static void cfi_intelext_sync (struct mtd_info *mtd)
2032 {
2033 struct map_info *map = mtd->priv;
2034 struct cfi_private *cfi = map->fldrv_priv;
2035 int i;
2036 struct flchip *chip;
2037 int ret = 0;
2038
2039 for (i=0; !ret && i<cfi->numchips; i++) {
2040 chip = &cfi->chips[i];
2041
2042 mutex_lock(&chip->mutex);
2043 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2044
2045 if (!ret) {
2046 chip->oldstate = chip->state;
2047 chip->state = FL_SYNCING;
2048
2049
2050
2051
2052 }
2053 mutex_unlock(&chip->mutex);
2054 }
2055
2056
2057
2058 for (i--; i >=0; i--) {
2059 chip = &cfi->chips[i];
2060
2061 mutex_lock(&chip->mutex);
2062
2063 if (chip->state == FL_SYNCING) {
2064 chip->state = chip->oldstate;
2065 chip->oldstate = FL_READY;
2066 wake_up(&chip->wq);
2067 }
2068 mutex_unlock(&chip->mutex);
2069 }
2070 }
2071
2072 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2073 struct flchip *chip,
2074 unsigned long adr,
2075 int len, void *thunk)
2076 {
2077 struct cfi_private *cfi = map->fldrv_priv;
2078 int status, ofs_factor = cfi->interleave * cfi->device_type;
2079
2080 adr += chip->start;
2081 xip_disable(map, chip, adr+(2*ofs_factor));
2082 map_write(map, CMD(0x90), adr+(2*ofs_factor));
2083 chip->state = FL_JEDEC_QUERY;
2084 status = cfi_read_query(map, adr+(2*ofs_factor));
2085 xip_enable(map, chip, 0);
2086 return status;
2087 }
2088
2089 #ifdef DEBUG_LOCK_BITS
2090 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2091 struct flchip *chip,
2092 unsigned long adr,
2093 int len, void *thunk)
2094 {
2095 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2096 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2097 return 0;
2098 }
2099 #endif
2100
2101 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2102 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2103
2104 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2105 unsigned long adr, int len, void *thunk)
2106 {
2107 struct cfi_private *cfi = map->fldrv_priv;
2108 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2109 int mdelay;
2110 int ret;
2111
2112 adr += chip->start;
2113
2114 mutex_lock(&chip->mutex);
2115 ret = get_chip(map, chip, adr, FL_LOCKING);
2116 if (ret) {
2117 mutex_unlock(&chip->mutex);
2118 return ret;
2119 }
2120
2121 ENABLE_VPP(map);
2122 xip_disable(map, chip, adr);
2123
2124 map_write(map, CMD(0x60), adr);
2125 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2126 map_write(map, CMD(0x01), adr);
2127 chip->state = FL_LOCKING;
2128 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2129 map_write(map, CMD(0xD0), adr);
2130 chip->state = FL_UNLOCKING;
2131 } else
2132 BUG();
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146 mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2147
2148 ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2149 if (ret) {
2150 map_write(map, CMD(0x70), adr);
2151 chip->state = FL_STATUS;
2152 xip_enable(map, chip, adr);
2153 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2154 goto out;
2155 }
2156
2157 xip_enable(map, chip, adr);
2158 out: DISABLE_VPP(map);
2159 put_chip(map, chip, adr);
2160 mutex_unlock(&chip->mutex);
2161 return ret;
2162 }
2163
2164 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2165 {
2166 int ret;
2167
2168 #ifdef DEBUG_LOCK_BITS
2169 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2170 __func__, ofs, len);
2171 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2172 ofs, len, NULL);
2173 #endif
2174
2175 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2176 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2177
2178 #ifdef DEBUG_LOCK_BITS
2179 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2180 __func__, ret);
2181 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2182 ofs, len, NULL);
2183 #endif
2184
2185 return ret;
2186 }
2187
2188 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2189 {
2190 int ret;
2191
2192 #ifdef DEBUG_LOCK_BITS
2193 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2194 __func__, ofs, len);
2195 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2196 ofs, len, NULL);
2197 #endif
2198
2199 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2200 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2201
2202 #ifdef DEBUG_LOCK_BITS
2203 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2204 __func__, ret);
2205 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2206 ofs, len, NULL);
2207 #endif
2208
2209 return ret;
2210 }
2211
2212 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2213 uint64_t len)
2214 {
2215 return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2216 ofs, len, NULL) ? 1 : 0;
2217 }
2218
2219 #ifdef CONFIG_MTD_OTP
2220
2221 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2222 u_long data_offset, u_char *buf, u_int size,
2223 u_long prot_offset, u_int groupno, u_int groupsize);
2224
2225 static int __xipram
2226 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2227 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2228 {
2229 struct cfi_private *cfi = map->fldrv_priv;
2230 int ret;
2231
2232 mutex_lock(&chip->mutex);
2233 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2234 if (ret) {
2235 mutex_unlock(&chip->mutex);
2236 return ret;
2237 }
2238
2239
2240 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2241
2242 xip_disable(map, chip, chip->start);
2243 if (chip->state != FL_JEDEC_QUERY) {
2244 map_write(map, CMD(0x90), chip->start);
2245 chip->state = FL_JEDEC_QUERY;
2246 }
2247 map_copy_from(map, buf, chip->start + offset, size);
2248 xip_enable(map, chip, chip->start);
2249
2250
2251 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2252
2253 put_chip(map, chip, chip->start);
2254 mutex_unlock(&chip->mutex);
2255 return 0;
2256 }
2257
2258 static int
2259 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2260 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2261 {
2262 int ret;
2263
2264 while (size) {
2265 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2266 int gap = offset - bus_ofs;
2267 int n = min_t(int, size, map_bankwidth(map)-gap);
2268 map_word datum = map_word_ff(map);
2269
2270 datum = map_word_load_partial(map, datum, buf, gap, n);
2271 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2272 if (ret)
2273 return ret;
2274
2275 offset += n;
2276 buf += n;
2277 size -= n;
2278 }
2279
2280 return 0;
2281 }
2282
2283 static int
2284 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2285 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2286 {
2287 struct cfi_private *cfi = map->fldrv_priv;
2288 map_word datum;
2289
2290
2291 if (size != grpsz)
2292 return -EXDEV;
2293
2294 datum = map_word_ff(map);
2295 datum = map_word_clr(map, datum, CMD(1 << grpno));
2296 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2297 }
2298
2299 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2300 size_t *retlen, u_char *buf,
2301 otp_op_t action, int user_regs)
2302 {
2303 struct map_info *map = mtd->priv;
2304 struct cfi_private *cfi = map->fldrv_priv;
2305 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2306 struct flchip *chip;
2307 struct cfi_intelext_otpinfo *otp;
2308 u_long devsize, reg_prot_offset, data_offset;
2309 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2310 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2311 int ret;
2312
2313 *retlen = 0;
2314
2315
2316 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2317 return -ENODATA;
2318
2319
2320 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2321 chip_step = devsize >> cfi->chipshift;
2322 chip_num = 0;
2323
2324
2325
2326 if (cfi->mfr == CFI_MFR_INTEL) {
2327 switch (cfi->id) {
2328 case 0x880b:
2329 case 0x880c:
2330 case 0x880d:
2331 chip_num = chip_step - 1;
2332 }
2333 }
2334
2335 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2336 chip = &cfi->chips[chip_num];
2337 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2338
2339
2340 field = 0;
2341 reg_prot_offset = extp->ProtRegAddr;
2342 reg_fact_groups = 1;
2343 reg_fact_size = 1 << extp->FactProtRegSize;
2344 reg_user_groups = 1;
2345 reg_user_size = 1 << extp->UserProtRegSize;
2346
2347 while (len > 0) {
2348
2349 data_offset = reg_prot_offset + 1;
2350 data_offset *= cfi->interleave * cfi->device_type;
2351 reg_prot_offset *= cfi->interleave * cfi->device_type;
2352 reg_fact_size *= cfi->interleave;
2353 reg_user_size *= cfi->interleave;
2354
2355 if (user_regs) {
2356 groups = reg_user_groups;
2357 groupsize = reg_user_size;
2358
2359 groupno = reg_fact_groups;
2360 data_offset += reg_fact_groups * reg_fact_size;
2361 } else {
2362 groups = reg_fact_groups;
2363 groupsize = reg_fact_size;
2364 groupno = 0;
2365 }
2366
2367 while (len > 0 && groups > 0) {
2368 if (!action) {
2369
2370
2371
2372
2373 struct otp_info *otpinfo;
2374 map_word lockword;
2375 len -= sizeof(struct otp_info);
2376 if (len <= 0)
2377 return -ENOSPC;
2378 ret = do_otp_read(map, chip,
2379 reg_prot_offset,
2380 (u_char *)&lockword,
2381 map_bankwidth(map),
2382 0, 0, 0);
2383 if (ret)
2384 return ret;
2385 otpinfo = (struct otp_info *)buf;
2386 otpinfo->start = from;
2387 otpinfo->length = groupsize;
2388 otpinfo->locked =
2389 !map_word_bitsset(map, lockword,
2390 CMD(1 << groupno));
2391 from += groupsize;
2392 buf += sizeof(*otpinfo);
2393 *retlen += sizeof(*otpinfo);
2394 } else if (from >= groupsize) {
2395 from -= groupsize;
2396 data_offset += groupsize;
2397 } else {
2398 int size = groupsize;
2399 data_offset += from;
2400 size -= from;
2401 from = 0;
2402 if (size > len)
2403 size = len;
2404 ret = action(map, chip, data_offset,
2405 buf, size, reg_prot_offset,
2406 groupno, groupsize);
2407 if (ret < 0)
2408 return ret;
2409 buf += size;
2410 len -= size;
2411 *retlen += size;
2412 data_offset += size;
2413 }
2414 groupno++;
2415 groups--;
2416 }
2417
2418
2419 if (++field == extp->NumProtectionFields)
2420 break;
2421 reg_prot_offset = otp->ProtRegAddr;
2422 reg_fact_groups = otp->FactGroups;
2423 reg_fact_size = 1 << otp->FactProtRegSize;
2424 reg_user_groups = otp->UserGroups;
2425 reg_user_size = 1 << otp->UserProtRegSize;
2426 otp++;
2427 }
2428 }
2429
2430 return 0;
2431 }
2432
2433 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2434 size_t len, size_t *retlen,
2435 u_char *buf)
2436 {
2437 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2438 buf, do_otp_read, 0);
2439 }
2440
2441 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2442 size_t len, size_t *retlen,
2443 u_char *buf)
2444 {
2445 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2446 buf, do_otp_read, 1);
2447 }
2448
2449 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2450 size_t len, size_t *retlen,
2451 const u_char *buf)
2452 {
2453 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2454 (u_char *)buf, do_otp_write, 1);
2455 }
2456
2457 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2458 loff_t from, size_t len)
2459 {
2460 size_t retlen;
2461 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2462 NULL, do_otp_lock, 1);
2463 }
2464
2465 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2466 size_t *retlen, struct otp_info *buf)
2467
2468 {
2469 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2470 NULL, 0);
2471 }
2472
2473 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2474 size_t *retlen, struct otp_info *buf)
2475 {
2476 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2477 NULL, 1);
2478 }
2479
2480 #endif
2481
2482 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2483 {
2484 struct mtd_erase_region_info *region;
2485 int block, status, i;
2486 unsigned long adr;
2487 size_t len;
2488
2489 for (i = 0; i < mtd->numeraseregions; i++) {
2490 region = &mtd->eraseregions[i];
2491 if (!region->lockmap)
2492 continue;
2493
2494 for (block = 0; block < region->numblocks; block++){
2495 len = region->erasesize;
2496 adr = region->offset + block * len;
2497
2498 status = cfi_varsize_frob(mtd,
2499 do_getlockstatus_oneblock, adr, len, NULL);
2500 if (status)
2501 set_bit(block, region->lockmap);
2502 else
2503 clear_bit(block, region->lockmap);
2504 }
2505 }
2506 }
2507
2508 static int cfi_intelext_suspend(struct mtd_info *mtd)
2509 {
2510 struct map_info *map = mtd->priv;
2511 struct cfi_private *cfi = map->fldrv_priv;
2512 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2513 int i;
2514 struct flchip *chip;
2515 int ret = 0;
2516
2517 if ((mtd->flags & MTD_POWERUP_LOCK)
2518 && extp && (extp->FeatureSupport & (1 << 5)))
2519 cfi_intelext_save_locks(mtd);
2520
2521 for (i=0; !ret && i<cfi->numchips; i++) {
2522 chip = &cfi->chips[i];
2523
2524 mutex_lock(&chip->mutex);
2525
2526 switch (chip->state) {
2527 case FL_READY:
2528 case FL_STATUS:
2529 case FL_CFI_QUERY:
2530 case FL_JEDEC_QUERY:
2531 if (chip->oldstate == FL_READY) {
2532
2533 map_write(map, CMD(0xFF), cfi->chips[i].start);
2534 chip->oldstate = chip->state;
2535 chip->state = FL_PM_SUSPENDED;
2536
2537
2538
2539
2540 } else {
2541
2542 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2543 ret = -EAGAIN;
2544 }
2545 break;
2546 default:
2547
2548
2549
2550
2551 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2552 ret = -EAGAIN;
2553 break;
2554 case FL_PM_SUSPENDED:
2555 break;
2556 }
2557 mutex_unlock(&chip->mutex);
2558 }
2559
2560
2561
2562 if (ret) {
2563 for (i--; i >=0; i--) {
2564 chip = &cfi->chips[i];
2565
2566 mutex_lock(&chip->mutex);
2567
2568 if (chip->state == FL_PM_SUSPENDED) {
2569
2570
2571
2572 chip->state = chip->oldstate;
2573 chip->oldstate = FL_READY;
2574 wake_up(&chip->wq);
2575 }
2576 mutex_unlock(&chip->mutex);
2577 }
2578 }
2579
2580 return ret;
2581 }
2582
2583 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2584 {
2585 struct mtd_erase_region_info *region;
2586 int block, i;
2587 unsigned long adr;
2588 size_t len;
2589
2590 for (i = 0; i < mtd->numeraseregions; i++) {
2591 region = &mtd->eraseregions[i];
2592 if (!region->lockmap)
2593 continue;
2594
2595 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2596 len = region->erasesize;
2597 adr = region->offset + block * len;
2598 cfi_intelext_unlock(mtd, adr, len);
2599 }
2600 }
2601 }
2602
2603 static void cfi_intelext_resume(struct mtd_info *mtd)
2604 {
2605 struct map_info *map = mtd->priv;
2606 struct cfi_private *cfi = map->fldrv_priv;
2607 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2608 int i;
2609 struct flchip *chip;
2610
2611 for (i=0; i<cfi->numchips; i++) {
2612
2613 chip = &cfi->chips[i];
2614
2615 mutex_lock(&chip->mutex);
2616
2617
2618 if (chip->state == FL_PM_SUSPENDED) {
2619
2620 fixup_LH28F640BF(mtd);
2621 map_write(map, CMD(0xFF), cfi->chips[i].start);
2622 chip->oldstate = chip->state = FL_READY;
2623 wake_up(&chip->wq);
2624 }
2625
2626 mutex_unlock(&chip->mutex);
2627 }
2628
2629 if ((mtd->flags & MTD_POWERUP_LOCK)
2630 && extp && (extp->FeatureSupport & (1 << 5)))
2631 cfi_intelext_restore_locks(mtd);
2632 }
2633
2634 static int cfi_intelext_reset(struct mtd_info *mtd)
2635 {
2636 struct map_info *map = mtd->priv;
2637 struct cfi_private *cfi = map->fldrv_priv;
2638 int i, ret;
2639
2640 for (i=0; i < cfi->numchips; i++) {
2641 struct flchip *chip = &cfi->chips[i];
2642
2643
2644
2645
2646 mutex_lock(&chip->mutex);
2647 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2648 if (!ret) {
2649 map_write(map, CMD(0xff), chip->start);
2650 chip->state = FL_SHUTDOWN;
2651 put_chip(map, chip, chip->start);
2652 }
2653 mutex_unlock(&chip->mutex);
2654 }
2655
2656 return 0;
2657 }
2658
2659 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2660 void *v)
2661 {
2662 struct mtd_info *mtd;
2663
2664 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2665 cfi_intelext_reset(mtd);
2666 return NOTIFY_DONE;
2667 }
2668
2669 static void cfi_intelext_destroy(struct mtd_info *mtd)
2670 {
2671 struct map_info *map = mtd->priv;
2672 struct cfi_private *cfi = map->fldrv_priv;
2673 struct mtd_erase_region_info *region;
2674 int i;
2675 cfi_intelext_reset(mtd);
2676 unregister_reboot_notifier(&mtd->reboot_notifier);
2677 kfree(cfi->cmdset_priv);
2678 kfree(cfi->cfiq);
2679 kfree(cfi->chips[0].priv);
2680 kfree(cfi);
2681 for (i = 0; i < mtd->numeraseregions; i++) {
2682 region = &mtd->eraseregions[i];
2683 kfree(region->lockmap);
2684 }
2685 kfree(mtd->eraseregions);
2686 }
2687
2688 MODULE_LICENSE("GPL");
2689 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2690 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2691 MODULE_ALIAS("cfi_cmdset_0003");
2692 MODULE_ALIAS("cfi_cmdset_0200");