Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Common Flash Interface support:
0003  *   Generic utility functions not dependent on command set
0004  *
0005  * Copyright (C) 2002 Red Hat
0006  * Copyright (C) 2003 STMicroelectronics Limited
0007  *
0008  * This code is covered by the GPL.
0009  */
0010 
0011 #include <linux/module.h>
0012 #include <linux/types.h>
0013 #include <linux/kernel.h>
0014 #include <asm/io.h>
0015 #include <asm/byteorder.h>
0016 
0017 #include <linux/errno.h>
0018 #include <linux/slab.h>
0019 #include <linux/delay.h>
0020 #include <linux/interrupt.h>
0021 #include <linux/mtd/xip.h>
0022 #include <linux/mtd/mtd.h>
0023 #include <linux/mtd/map.h>
0024 #include <linux/mtd/cfi.h>
0025 
0026 void cfi_udelay(int us)
0027 {
0028     if (us >= 1000) {
0029         msleep(DIV_ROUND_UP(us, 1000));
0030     } else {
0031         udelay(us);
0032         cond_resched();
0033     }
0034 }
0035 EXPORT_SYMBOL(cfi_udelay);
0036 
0037 /*
0038  * Returns the command address according to the given geometry.
0039  */
0040 uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
0041                 struct map_info *map, struct cfi_private *cfi)
0042 {
0043     unsigned bankwidth = map_bankwidth(map);
0044     unsigned interleave = cfi_interleave(cfi);
0045     unsigned type = cfi->device_type;
0046     uint32_t addr;
0047 
0048     addr = (cmd_ofs * type) * interleave;
0049 
0050     /* Modify the unlock address if we are in compatibility mode.
0051      * For 16bit devices on 8 bit busses
0052      * and 32bit devices on 16 bit busses
0053      * set the low bit of the alternating bit sequence of the address.
0054      */
0055     if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
0056         addr |= (type >> 1)*interleave;
0057 
0058     return  addr;
0059 }
0060 EXPORT_SYMBOL(cfi_build_cmd_addr);
0061 
0062 /*
0063  * Transforms the CFI command for the given geometry (bus width & interleave).
0064  * It looks too long to be inline, but in the common case it should almost all
0065  * get optimised away.
0066  */
0067 map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
0068 {
0069     map_word val = { {0} };
0070     int wordwidth, words_per_bus, chip_mode, chips_per_word;
0071     unsigned long onecmd;
0072     int i;
0073 
0074     /* We do it this way to give the compiler a fighting chance
0075        of optimising away all the crap for 'bankwidth' larger than
0076        an unsigned long, in the common case where that support is
0077        disabled */
0078     if (map_bankwidth_is_large(map)) {
0079         wordwidth = sizeof(unsigned long);
0080         words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
0081     } else {
0082         wordwidth = map_bankwidth(map);
0083         words_per_bus = 1;
0084     }
0085 
0086     chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
0087     chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
0088 
0089     /* First, determine what the bit-pattern should be for a single
0090        device, according to chip mode and endianness... */
0091     switch (chip_mode) {
0092     default: BUG();
0093     case 1:
0094         onecmd = cmd;
0095         break;
0096     case 2:
0097         onecmd = cpu_to_cfi16(map, cmd);
0098         break;
0099     case 4:
0100         onecmd = cpu_to_cfi32(map, cmd);
0101         break;
0102     }
0103 
0104     /* Now replicate it across the size of an unsigned long, or
0105        just to the bus width as appropriate */
0106     switch (chips_per_word) {
0107     default: BUG();
0108 #if BITS_PER_LONG >= 64
0109     case 8:
0110         onecmd |= (onecmd << (chip_mode * 32));
0111         fallthrough;
0112 #endif
0113     case 4:
0114         onecmd |= (onecmd << (chip_mode * 16));
0115         fallthrough;
0116     case 2:
0117         onecmd |= (onecmd << (chip_mode * 8));
0118         fallthrough;
0119     case 1:
0120         ;
0121     }
0122 
0123     /* And finally, for the multi-word case, replicate it
0124        in all words in the structure */
0125     for (i=0; i < words_per_bus; i++) {
0126         val.x[i] = onecmd;
0127     }
0128 
0129     return val;
0130 }
0131 EXPORT_SYMBOL(cfi_build_cmd);
0132 
0133 unsigned long cfi_merge_status(map_word val, struct map_info *map,
0134                        struct cfi_private *cfi)
0135 {
0136     int wordwidth, words_per_bus, chip_mode, chips_per_word;
0137     unsigned long onestat, res = 0;
0138     int i;
0139 
0140     /* We do it this way to give the compiler a fighting chance
0141        of optimising away all the crap for 'bankwidth' larger than
0142        an unsigned long, in the common case where that support is
0143        disabled */
0144     if (map_bankwidth_is_large(map)) {
0145         wordwidth = sizeof(unsigned long);
0146         words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
0147     } else {
0148         wordwidth = map_bankwidth(map);
0149         words_per_bus = 1;
0150     }
0151 
0152     chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
0153     chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
0154 
0155     onestat = val.x[0];
0156     /* Or all status words together */
0157     for (i=1; i < words_per_bus; i++) {
0158         onestat |= val.x[i];
0159     }
0160 
0161     res = onestat;
0162     switch(chips_per_word) {
0163     default: BUG();
0164 #if BITS_PER_LONG >= 64
0165     case 8:
0166         res |= (onestat >> (chip_mode * 32));
0167         fallthrough;
0168 #endif
0169     case 4:
0170         res |= (onestat >> (chip_mode * 16));
0171         fallthrough;
0172     case 2:
0173         res |= (onestat >> (chip_mode * 8));
0174         fallthrough;
0175     case 1:
0176         ;
0177     }
0178 
0179     /* Last, determine what the bit-pattern should be for a single
0180        device, according to chip mode and endianness... */
0181     switch (chip_mode) {
0182     case 1:
0183         break;
0184     case 2:
0185         res = cfi16_to_cpu(map, res);
0186         break;
0187     case 4:
0188         res = cfi32_to_cpu(map, res);
0189         break;
0190     default: BUG();
0191     }
0192     return res;
0193 }
0194 EXPORT_SYMBOL(cfi_merge_status);
0195 
0196 /*
0197  * Sends a CFI command to a bank of flash for the given geometry.
0198  *
0199  * Returns the offset in flash where the command was written.
0200  * If prev_val is non-null, it will be set to the value at the command address,
0201  * before the command was written.
0202  */
0203 uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
0204                 struct map_info *map, struct cfi_private *cfi,
0205                 int type, map_word *prev_val)
0206 {
0207     map_word val;
0208     uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
0209     val = cfi_build_cmd(cmd, map, cfi);
0210 
0211     if (prev_val)
0212         *prev_val = map_read(map, addr);
0213 
0214     map_write(map, val, addr);
0215 
0216     return addr - base;
0217 }
0218 EXPORT_SYMBOL(cfi_send_gen_cmd);
0219 
0220 int __xipram cfi_qry_present(struct map_info *map, __u32 base,
0221                  struct cfi_private *cfi)
0222 {
0223     int osf = cfi->interleave * cfi->device_type;   /* scale factor */
0224     map_word val[3];
0225     map_word qry[3];
0226 
0227     qry[0] = cfi_build_cmd('Q', map, cfi);
0228     qry[1] = cfi_build_cmd('R', map, cfi);
0229     qry[2] = cfi_build_cmd('Y', map, cfi);
0230 
0231     val[0] = map_read(map, base + osf*0x10);
0232     val[1] = map_read(map, base + osf*0x11);
0233     val[2] = map_read(map, base + osf*0x12);
0234 
0235     if (!map_word_equal(map, qry[0], val[0]))
0236         return 0;
0237 
0238     if (!map_word_equal(map, qry[1], val[1]))
0239         return 0;
0240 
0241     if (!map_word_equal(map, qry[2], val[2]))
0242         return 0;
0243 
0244     return 1;   /* "QRY" found */
0245 }
0246 EXPORT_SYMBOL_GPL(cfi_qry_present);
0247 
0248 int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
0249                  struct cfi_private *cfi)
0250 {
0251     cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
0252     cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
0253     if (cfi_qry_present(map, base, cfi))
0254         return 1;
0255     /* QRY not found probably we deal with some odd CFI chips */
0256     /* Some revisions of some old Intel chips? */
0257     cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
0258     cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
0259     cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
0260     if (cfi_qry_present(map, base, cfi))
0261         return 1;
0262     /* ST M29DW chips */
0263     cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
0264     cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
0265     if (cfi_qry_present(map, base, cfi))
0266         return 1;
0267     /* some old SST chips, e.g. 39VF160x/39VF320x */
0268     cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
0269     cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
0270     cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
0271     cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
0272     if (cfi_qry_present(map, base, cfi))
0273         return 1;
0274     /* SST 39VF640xB */
0275     cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
0276     cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL);
0277     cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL);
0278     cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
0279     if (cfi_qry_present(map, base, cfi))
0280         return 1;
0281     /* QRY not found */
0282     return 0;
0283 }
0284 EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
0285 
0286 void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
0287                    struct cfi_private *cfi)
0288 {
0289     cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
0290     cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
0291     /* M29W128G flashes require an additional reset command
0292        when exit qry mode */
0293     if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E))
0294         cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
0295 }
0296 EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
0297 
0298 struct cfi_extquery *
0299 __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
0300 {
0301     struct cfi_private *cfi = map->fldrv_priv;
0302     __u32 base = 0; // cfi->chips[0].start;
0303     int ofs_factor = cfi->interleave * cfi->device_type;
0304     int i;
0305     struct cfi_extquery *extp = NULL;
0306 
0307     if (!adr)
0308         goto out;
0309 
0310     printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr);
0311 
0312     extp = kmalloc(size, GFP_KERNEL);
0313     if (!extp)
0314         goto out;
0315 
0316 #ifdef CONFIG_MTD_XIP
0317     local_irq_disable();
0318 #endif
0319 
0320     /* Switch it into Query Mode */
0321     cfi_qry_mode_on(base, map, cfi);
0322     /* Read in the Extended Query Table */
0323     for (i=0; i<size; i++) {
0324         ((unsigned char *)extp)[i] =
0325             cfi_read_query(map, base+((adr+i)*ofs_factor));
0326     }
0327 
0328     /* Make sure it returns to read mode */
0329     cfi_qry_mode_off(base, map, cfi);
0330 
0331 #ifdef CONFIG_MTD_XIP
0332     (void) map_read(map, base);
0333     xip_iprefetch();
0334     local_irq_enable();
0335 #endif
0336 
0337  out:   return extp;
0338 }
0339 
0340 EXPORT_SYMBOL(cfi_read_pri);
0341 
0342 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
0343 {
0344     struct map_info *map = mtd->priv;
0345     struct cfi_private *cfi = map->fldrv_priv;
0346     struct cfi_fixup *f;
0347 
0348     for (f=fixups; f->fixup; f++) {
0349         if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
0350             ((f->id  == CFI_ID_ANY)  || (f->id  == cfi->id))) {
0351             f->fixup(mtd);
0352         }
0353     }
0354 }
0355 
0356 EXPORT_SYMBOL(cfi_fixup);
0357 
0358 int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
0359                      loff_t ofs, size_t len, void *thunk)
0360 {
0361     struct map_info *map = mtd->priv;
0362     struct cfi_private *cfi = map->fldrv_priv;
0363     unsigned long adr;
0364     int chipnum, ret = 0;
0365     int i, first;
0366     struct mtd_erase_region_info *regions = mtd->eraseregions;
0367 
0368     /* Check that both start and end of the requested erase are
0369      * aligned with the erasesize at the appropriate addresses.
0370      */
0371 
0372     i = 0;
0373 
0374     /* Skip all erase regions which are ended before the start of
0375        the requested erase. Actually, to save on the calculations,
0376        we skip to the first erase region which starts after the
0377        start of the requested erase, and then go back one.
0378     */
0379 
0380     while (i < mtd->numeraseregions && ofs >= regions[i].offset)
0381            i++;
0382     i--;
0383 
0384     /* OK, now i is pointing at the erase region in which this
0385        erase request starts. Check the start of the requested
0386        erase range is aligned with the erase size which is in
0387        effect here.
0388     */
0389 
0390     if (ofs & (regions[i].erasesize-1))
0391         return -EINVAL;
0392 
0393     /* Remember the erase region we start on */
0394     first = i;
0395 
0396     /* Next, check that the end of the requested erase is aligned
0397      * with the erase region at that address.
0398      */
0399 
0400     while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
0401         i++;
0402 
0403     /* As before, drop back one to point at the region in which
0404        the address actually falls
0405     */
0406     i--;
0407 
0408     if ((ofs + len) & (regions[i].erasesize-1))
0409         return -EINVAL;
0410 
0411     chipnum = ofs >> cfi->chipshift;
0412     adr = ofs - (chipnum << cfi->chipshift);
0413 
0414     i=first;
0415 
0416     while(len) {
0417         int size = regions[i].erasesize;
0418 
0419         ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
0420 
0421         if (ret)
0422             return ret;
0423 
0424         adr += size;
0425         ofs += size;
0426         len -= size;
0427 
0428         if (ofs == regions[i].offset + size * regions[i].numblocks)
0429             i++;
0430 
0431         if (adr >> cfi->chipshift) {
0432             adr = 0;
0433             chipnum++;
0434 
0435             if (chipnum >= cfi->numchips)
0436                 break;
0437         }
0438     }
0439 
0440     return 0;
0441 }
0442 
0443 EXPORT_SYMBOL(cfi_varsize_frob);
0444 
0445 MODULE_LICENSE("GPL");