Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
0004  * Copyright © 2004 Micron Technology Inc.
0005  * Copyright © 2004 David Brownell
0006  */
0007 
0008 #include <linux/platform_device.h>
0009 #include <linux/dmaengine.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/delay.h>
0012 #include <linux/gpio/consumer.h>
0013 #include <linux/module.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/jiffies.h>
0016 #include <linux/sched.h>
0017 #include <linux/mtd/mtd.h>
0018 #include <linux/mtd/nand-ecc-sw-bch.h>
0019 #include <linux/mtd/rawnand.h>
0020 #include <linux/mtd/partitions.h>
0021 #include <linux/omap-dma.h>
0022 #include <linux/iopoll.h>
0023 #include <linux/slab.h>
0024 #include <linux/of.h>
0025 #include <linux/of_device.h>
0026 
0027 #include <linux/platform_data/elm.h>
0028 
0029 #include <linux/omap-gpmc.h>
0030 #include <linux/platform_data/mtd-nand-omap2.h>
0031 
0032 #define DRIVER_NAME "omap2-nand"
0033 #define OMAP_NAND_TIMEOUT_MS    5000
0034 
0035 #define NAND_Ecc_P1e        (1 << 0)
0036 #define NAND_Ecc_P2e        (1 << 1)
0037 #define NAND_Ecc_P4e        (1 << 2)
0038 #define NAND_Ecc_P8e        (1 << 3)
0039 #define NAND_Ecc_P16e       (1 << 4)
0040 #define NAND_Ecc_P32e       (1 << 5)
0041 #define NAND_Ecc_P64e       (1 << 6)
0042 #define NAND_Ecc_P128e      (1 << 7)
0043 #define NAND_Ecc_P256e      (1 << 8)
0044 #define NAND_Ecc_P512e      (1 << 9)
0045 #define NAND_Ecc_P1024e     (1 << 10)
0046 #define NAND_Ecc_P2048e     (1 << 11)
0047 
0048 #define NAND_Ecc_P1o        (1 << 16)
0049 #define NAND_Ecc_P2o        (1 << 17)
0050 #define NAND_Ecc_P4o        (1 << 18)
0051 #define NAND_Ecc_P8o        (1 << 19)
0052 #define NAND_Ecc_P16o       (1 << 20)
0053 #define NAND_Ecc_P32o       (1 << 21)
0054 #define NAND_Ecc_P64o       (1 << 22)
0055 #define NAND_Ecc_P128o      (1 << 23)
0056 #define NAND_Ecc_P256o      (1 << 24)
0057 #define NAND_Ecc_P512o      (1 << 25)
0058 #define NAND_Ecc_P1024o     (1 << 26)
0059 #define NAND_Ecc_P2048o     (1 << 27)
0060 
0061 #define TF(value)   (value ? 1 : 0)
0062 
0063 #define P2048e(a)   (TF(a & NAND_Ecc_P2048e)    << 0)
0064 #define P2048o(a)   (TF(a & NAND_Ecc_P2048o)    << 1)
0065 #define P1e(a)      (TF(a & NAND_Ecc_P1e)       << 2)
0066 #define P1o(a)      (TF(a & NAND_Ecc_P1o)       << 3)
0067 #define P2e(a)      (TF(a & NAND_Ecc_P2e)       << 4)
0068 #define P2o(a)      (TF(a & NAND_Ecc_P2o)       << 5)
0069 #define P4e(a)      (TF(a & NAND_Ecc_P4e)       << 6)
0070 #define P4o(a)      (TF(a & NAND_Ecc_P4o)       << 7)
0071 
0072 #define P8e(a)      (TF(a & NAND_Ecc_P8e)       << 0)
0073 #define P8o(a)      (TF(a & NAND_Ecc_P8o)       << 1)
0074 #define P16e(a)     (TF(a & NAND_Ecc_P16e)      << 2)
0075 #define P16o(a)     (TF(a & NAND_Ecc_P16o)      << 3)
0076 #define P32e(a)     (TF(a & NAND_Ecc_P32e)      << 4)
0077 #define P32o(a)     (TF(a & NAND_Ecc_P32o)      << 5)
0078 #define P64e(a)     (TF(a & NAND_Ecc_P64e)      << 6)
0079 #define P64o(a)     (TF(a & NAND_Ecc_P64o)      << 7)
0080 
0081 #define P128e(a)    (TF(a & NAND_Ecc_P128e)     << 0)
0082 #define P128o(a)    (TF(a & NAND_Ecc_P128o)     << 1)
0083 #define P256e(a)    (TF(a & NAND_Ecc_P256e)     << 2)
0084 #define P256o(a)    (TF(a & NAND_Ecc_P256o)     << 3)
0085 #define P512e(a)    (TF(a & NAND_Ecc_P512e)     << 4)
0086 #define P512o(a)    (TF(a & NAND_Ecc_P512o)     << 5)
0087 #define P1024e(a)   (TF(a & NAND_Ecc_P1024e)    << 6)
0088 #define P1024o(a)   (TF(a & NAND_Ecc_P1024o)    << 7)
0089 
0090 #define P8e_s(a)    (TF(a & NAND_Ecc_P8e)       << 0)
0091 #define P8o_s(a)    (TF(a & NAND_Ecc_P8o)       << 1)
0092 #define P16e_s(a)   (TF(a & NAND_Ecc_P16e)      << 2)
0093 #define P16o_s(a)   (TF(a & NAND_Ecc_P16o)      << 3)
0094 #define P1e_s(a)    (TF(a & NAND_Ecc_P1e)       << 4)
0095 #define P1o_s(a)    (TF(a & NAND_Ecc_P1o)       << 5)
0096 #define P2e_s(a)    (TF(a & NAND_Ecc_P2e)       << 6)
0097 #define P2o_s(a)    (TF(a & NAND_Ecc_P2o)       << 7)
0098 
0099 #define P4e_s(a)    (TF(a & NAND_Ecc_P4e)       << 0)
0100 #define P4o_s(a)    (TF(a & NAND_Ecc_P4o)       << 1)
0101 
0102 #define PREFETCH_CONFIG1_CS_SHIFT   24
0103 #define ECC_CONFIG_CS_SHIFT     1
0104 #define CS_MASK             0x7
0105 #define ENABLE_PREFETCH         (0x1 << 7)
0106 #define DMA_MPU_MODE_SHIFT      2
0107 #define ECCSIZE0_SHIFT          12
0108 #define ECCSIZE1_SHIFT          22
0109 #define ECC1RESULTSIZE          0x1
0110 #define ECCCLEAR            0x100
0111 #define ECC1                0x1
0112 #define PREFETCH_FIFOTHRESHOLD_MAX  0x40
0113 #define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
0114 #define PREFETCH_STATUS_COUNT(val)  (val & 0x00003fff)
0115 #define PREFETCH_STATUS_FIFO_CNT(val)   ((val >> 24) & 0x7F)
0116 #define STATUS_BUFF_EMPTY       0x00000001
0117 
0118 #define SECTOR_BYTES        512
0119 /* 4 bit padding to make byte aligned, 56 = 52 + 4 */
0120 #define BCH4_BIT_PAD        4
0121 
0122 /* GPMC ecc engine settings for read */
0123 #define BCH_WRAPMODE_1      1   /* BCH wrap mode 1 */
0124 #define BCH8R_ECC_SIZE0     0x1a    /* ecc_size0 = 26 */
0125 #define BCH8R_ECC_SIZE1     0x2 /* ecc_size1 = 2 */
0126 #define BCH4R_ECC_SIZE0     0xd /* ecc_size0 = 13 */
0127 #define BCH4R_ECC_SIZE1     0x3 /* ecc_size1 = 3 */
0128 
0129 /* GPMC ecc engine settings for write */
0130 #define BCH_WRAPMODE_6      6   /* BCH wrap mode 6 */
0131 #define BCH_ECC_SIZE0       0x0 /* ecc_size0 = 0, no oob protection */
0132 #define BCH_ECC_SIZE1       0x20    /* ecc_size1 = 32 */
0133 
0134 #define BBM_LEN         2
0135 
0136 static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
0137                 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
0138                 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
0139                 0x07, 0x0e};
0140 static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
0141     0xac, 0x6b, 0xff, 0x99, 0x7b};
0142 static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
0143 
0144 struct omap_nand_info {
0145     struct nand_chip        nand;
0146     struct platform_device      *pdev;
0147 
0148     int             gpmc_cs;
0149     bool                dev_ready;
0150     enum nand_io            xfer_type;
0151     enum omap_ecc           ecc_opt;
0152     struct device_node      *elm_of_node;
0153 
0154     unsigned long           phys_base;
0155     struct completion       comp;
0156     struct dma_chan         *dma;
0157     int             gpmc_irq_fifo;
0158     int             gpmc_irq_count;
0159     enum {
0160         OMAP_NAND_IO_READ = 0,  /* read */
0161         OMAP_NAND_IO_WRITE, /* write */
0162     } iomode;
0163     u_char              *buf;
0164     int                 buf_len;
0165     /* Interface to GPMC */
0166     void __iomem            *fifo;
0167     struct gpmc_nand_regs       reg;
0168     struct gpmc_nand_ops        *ops;
0169     bool                flash_bbt;
0170     /* fields specific for BCHx_HW ECC scheme */
0171     struct device           *elm_dev;
0172     /* NAND ready gpio */
0173     struct gpio_desc        *ready_gpiod;
0174     unsigned int            neccpg;
0175     unsigned int            nsteps_per_eccpg;
0176     unsigned int            eccpg_size;
0177     unsigned int            eccpg_bytes;
0178     void (*data_in)(struct nand_chip *chip, void *buf,
0179             unsigned int len, bool force_8bit);
0180     void (*data_out)(struct nand_chip *chip,
0181              const void *buf, unsigned int len,
0182              bool force_8bit);
0183 };
0184 
0185 static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
0186 {
0187     return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
0188 }
0189 
0190 static void omap_nand_data_in(struct nand_chip *chip, void *buf,
0191                   unsigned int len, bool force_8bit);
0192 
0193 static void omap_nand_data_out(struct nand_chip *chip,
0194                    const void *buf, unsigned int len,
0195                    bool force_8bit);
0196 
0197 /**
0198  * omap_prefetch_enable - configures and starts prefetch transfer
0199  * @cs: cs (chip select) number
0200  * @fifo_th: fifo threshold to be used for read/ write
0201  * @dma_mode: dma mode enable (1) or disable (0)
0202  * @u32_count: number of bytes to be transferred
0203  * @is_write: prefetch read(0) or write post(1) mode
0204  * @info: NAND device structure containing platform data
0205  */
0206 static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
0207     unsigned int u32_count, int is_write, struct omap_nand_info *info)
0208 {
0209     u32 val;
0210 
0211     if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
0212         return -1;
0213 
0214     if (readl(info->reg.gpmc_prefetch_control))
0215         return -EBUSY;
0216 
0217     /* Set the amount of bytes to be prefetched */
0218     writel(u32_count, info->reg.gpmc_prefetch_config2);
0219 
0220     /* Set dma/mpu mode, the prefetch read / post write and
0221      * enable the engine. Set which cs is has requested for.
0222      */
0223     val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
0224         PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
0225         (dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
0226     writel(val, info->reg.gpmc_prefetch_config1);
0227 
0228     /*  Start the prefetch engine */
0229     writel(0x1, info->reg.gpmc_prefetch_control);
0230 
0231     return 0;
0232 }
0233 
0234 /*
0235  * omap_prefetch_reset - disables and stops the prefetch engine
0236  */
0237 static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
0238 {
0239     u32 config1;
0240 
0241     /* check if the same module/cs is trying to reset */
0242     config1 = readl(info->reg.gpmc_prefetch_config1);
0243     if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
0244         return -EINVAL;
0245 
0246     /* Stop the PFPW engine */
0247     writel(0x0, info->reg.gpmc_prefetch_control);
0248 
0249     /* Reset/disable the PFPW engine */
0250     writel(0x0, info->reg.gpmc_prefetch_config1);
0251 
0252     return 0;
0253 }
0254 
0255 /**
0256  * omap_nand_data_in_pref - NAND data in using prefetch engine
0257  */
0258 static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
0259                    unsigned int len, bool force_8bit)
0260 {
0261     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
0262     uint32_t r_count = 0;
0263     int ret = 0;
0264     u32 *p = (u32 *)buf;
0265     unsigned int pref_len;
0266 
0267     if (force_8bit) {
0268         omap_nand_data_in(chip, buf, len, force_8bit);
0269         return;
0270     }
0271 
0272     /* read 32-bit words using prefetch and remaining bytes normally */
0273 
0274     /* configure and start prefetch transfer */
0275     pref_len = len - (len & 3);
0276     ret = omap_prefetch_enable(info->gpmc_cs,
0277             PREFETCH_FIFOTHRESHOLD_MAX, 0x0, pref_len, 0x0, info);
0278     if (ret) {
0279         /* prefetch engine is busy, use CPU copy method */
0280         omap_nand_data_in(chip, buf, len, false);
0281     } else {
0282         do {
0283             r_count = readl(info->reg.gpmc_prefetch_status);
0284             r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
0285             r_count = r_count >> 2;
0286             ioread32_rep(info->fifo, p, r_count);
0287             p += r_count;
0288             pref_len -= r_count << 2;
0289         } while (pref_len);
0290         /* disable and stop the Prefetch engine */
0291         omap_prefetch_reset(info->gpmc_cs, info);
0292         /* fetch any remaining bytes */
0293         if (len & 3)
0294             omap_nand_data_in(chip, p, len & 3, false);
0295     }
0296 }
0297 
0298 /**
0299  * omap_nand_data_out_pref - NAND data out using Write Posting engine
0300  */
0301 static void omap_nand_data_out_pref(struct nand_chip *chip,
0302                     const void *buf, unsigned int len,
0303                     bool force_8bit)
0304 {
0305     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
0306     uint32_t w_count = 0;
0307     int i = 0, ret = 0;
0308     u16 *p = (u16 *)buf;
0309     unsigned long tim, limit;
0310     u32 val;
0311 
0312     if (force_8bit) {
0313         omap_nand_data_out(chip, buf, len, force_8bit);
0314         return;
0315     }
0316 
0317     /* take care of subpage writes */
0318     if (len % 2 != 0) {
0319         writeb(*(u8 *)buf, info->fifo);
0320         p = (u16 *)(buf + 1);
0321         len--;
0322     }
0323 
0324     /*  configure and start prefetch transfer */
0325     ret = omap_prefetch_enable(info->gpmc_cs,
0326             PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
0327     if (ret) {
0328         /* write posting engine is busy, use CPU copy method */
0329         omap_nand_data_out(chip, buf, len, false);
0330     } else {
0331         while (len) {
0332             w_count = readl(info->reg.gpmc_prefetch_status);
0333             w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
0334             w_count = w_count >> 1;
0335             for (i = 0; (i < w_count) && len; i++, len -= 2)
0336                 iowrite16(*p++, info->fifo);
0337         }
0338         /* wait for data to flushed-out before reset the prefetch */
0339         tim = 0;
0340         limit = (loops_per_jiffy *
0341                     msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
0342         do {
0343             cpu_relax();
0344             val = readl(info->reg.gpmc_prefetch_status);
0345             val = PREFETCH_STATUS_COUNT(val);
0346         } while (val && (tim++ < limit));
0347 
0348         /* disable and stop the PFPW engine */
0349         omap_prefetch_reset(info->gpmc_cs, info);
0350     }
0351 }
0352 
0353 /*
0354  * omap_nand_dma_callback: callback on the completion of dma transfer
0355  * @data: pointer to completion data structure
0356  */
0357 static void omap_nand_dma_callback(void *data)
0358 {
0359     complete((struct completion *) data);
0360 }
0361 
0362 /*
0363  * omap_nand_dma_transfer: configure and start dma transfer
0364  * @chip: nand chip structure
0365  * @addr: virtual address in RAM of source/destination
0366  * @len: number of data bytes to be transferred
0367  * @is_write: flag for read/write operation
0368  */
0369 static inline int omap_nand_dma_transfer(struct nand_chip *chip,
0370                      const void *addr, unsigned int len,
0371                      int is_write)
0372 {
0373     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
0374     struct dma_async_tx_descriptor *tx;
0375     enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
0376                             DMA_FROM_DEVICE;
0377     struct scatterlist sg;
0378     unsigned long tim, limit;
0379     unsigned n;
0380     int ret;
0381     u32 val;
0382 
0383     if (!virt_addr_valid(addr))
0384         goto out_copy;
0385 
0386     sg_init_one(&sg, addr, len);
0387     n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
0388     if (n == 0) {
0389         dev_err(&info->pdev->dev,
0390             "Couldn't DMA map a %d byte buffer\n", len);
0391         goto out_copy;
0392     }
0393 
0394     tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
0395         is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
0396         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0397     if (!tx)
0398         goto out_copy_unmap;
0399 
0400     tx->callback = omap_nand_dma_callback;
0401     tx->callback_param = &info->comp;
0402     dmaengine_submit(tx);
0403 
0404     init_completion(&info->comp);
0405 
0406     /* setup and start DMA using dma_addr */
0407     dma_async_issue_pending(info->dma);
0408 
0409     /*  configure and start prefetch transfer */
0410     ret = omap_prefetch_enable(info->gpmc_cs,
0411         PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
0412     if (ret)
0413         /* PFPW engine is busy, use cpu copy method */
0414         goto out_copy_unmap;
0415 
0416     wait_for_completion(&info->comp);
0417     tim = 0;
0418     limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
0419 
0420     do {
0421         cpu_relax();
0422         val = readl(info->reg.gpmc_prefetch_status);
0423         val = PREFETCH_STATUS_COUNT(val);
0424     } while (val && (tim++ < limit));
0425 
0426     /* disable and stop the PFPW engine */
0427     omap_prefetch_reset(info->gpmc_cs, info);
0428 
0429     dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
0430     return 0;
0431 
0432 out_copy_unmap:
0433     dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
0434 out_copy:
0435     is_write == 0 ? omap_nand_data_in(chip, (void *)addr, len, false)
0436               : omap_nand_data_out(chip, addr, len, false);
0437 
0438     return 0;
0439 }
0440 
0441 /**
0442  * omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
0443  */
0444 static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
0445                        unsigned int len, bool force_8bit)
0446 {
0447     struct mtd_info *mtd = nand_to_mtd(chip);
0448 
0449     if (force_8bit) {
0450         omap_nand_data_in(chip, buf, len, force_8bit);
0451         return;
0452     }
0453 
0454     if (len <= mtd->oobsize)
0455         omap_nand_data_in_pref(chip, buf, len, false);
0456     else
0457         /* start transfer in DMA mode */
0458         omap_nand_dma_transfer(chip, buf, len, 0x0);
0459 }
0460 
0461 /**
0462  * omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
0463  */
0464 static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
0465                     const void *buf, unsigned int len,
0466                     bool force_8bit)
0467 {
0468     struct mtd_info *mtd = nand_to_mtd(chip);
0469 
0470     if (force_8bit) {
0471         omap_nand_data_out(chip, buf, len, force_8bit);
0472         return;
0473     }
0474 
0475     if (len <= mtd->oobsize)
0476         omap_nand_data_out_pref(chip, buf, len, false);
0477     else
0478         /* start transfer in DMA mode */
0479         omap_nand_dma_transfer(chip, buf, len, 0x1);
0480 }
0481 
0482 /*
0483  * omap_nand_irq - GPMC irq handler
0484  * @this_irq: gpmc irq number
0485  * @dev: omap_nand_info structure pointer is passed here
0486  */
0487 static irqreturn_t omap_nand_irq(int this_irq, void *dev)
0488 {
0489     struct omap_nand_info *info = (struct omap_nand_info *) dev;
0490     u32 bytes;
0491 
0492     bytes = readl(info->reg.gpmc_prefetch_status);
0493     bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
0494     bytes = bytes  & 0xFFFC; /* io in multiple of 4 bytes */
0495     if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
0496         if (this_irq == info->gpmc_irq_count)
0497             goto done;
0498 
0499         if (info->buf_len && (info->buf_len < bytes))
0500             bytes = info->buf_len;
0501         else if (!info->buf_len)
0502             bytes = 0;
0503         iowrite32_rep(info->fifo, (u32 *)info->buf,
0504                   bytes >> 2);
0505         info->buf = info->buf + bytes;
0506         info->buf_len -= bytes;
0507 
0508     } else {
0509         ioread32_rep(info->fifo, (u32 *)info->buf,
0510                  bytes >> 2);
0511         info->buf = info->buf + bytes;
0512 
0513         if (this_irq == info->gpmc_irq_count)
0514             goto done;
0515     }
0516 
0517     return IRQ_HANDLED;
0518 
0519 done:
0520     complete(&info->comp);
0521 
0522     disable_irq_nosync(info->gpmc_irq_fifo);
0523     disable_irq_nosync(info->gpmc_irq_count);
0524 
0525     return IRQ_HANDLED;
0526 }
0527 
0528 /*
0529  * omap_nand_data_in_irq_pref - NAND data in using Prefetch and IRQ
0530  */
0531 static void omap_nand_data_in_irq_pref(struct nand_chip *chip, void *buf,
0532                        unsigned int len, bool force_8bit)
0533 {
0534     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
0535     struct mtd_info *mtd = nand_to_mtd(&info->nand);
0536     int ret = 0;
0537 
0538     if (len <= mtd->oobsize || force_8bit) {
0539         omap_nand_data_in(chip, buf, len, force_8bit);
0540         return;
0541     }
0542 
0543     info->iomode = OMAP_NAND_IO_READ;
0544     info->buf = buf;
0545     init_completion(&info->comp);
0546 
0547     /*  configure and start prefetch transfer */
0548     ret = omap_prefetch_enable(info->gpmc_cs,
0549             PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
0550     if (ret) {
0551         /* PFPW engine is busy, use cpu copy method */
0552         omap_nand_data_in(chip, buf, len, false);
0553         return;
0554     }
0555 
0556     info->buf_len = len;
0557 
0558     enable_irq(info->gpmc_irq_count);
0559     enable_irq(info->gpmc_irq_fifo);
0560 
0561     /* waiting for read to complete */
0562     wait_for_completion(&info->comp);
0563 
0564     /* disable and stop the PFPW engine */
0565     omap_prefetch_reset(info->gpmc_cs, info);
0566     return;
0567 }
0568 
0569 /*
0570  * omap_nand_data_out_irq_pref - NAND out using write posting and IRQ
0571  */
0572 static void omap_nand_data_out_irq_pref(struct nand_chip *chip,
0573                     const void *buf, unsigned int len,
0574                     bool force_8bit)
0575 {
0576     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
0577     struct mtd_info *mtd = nand_to_mtd(&info->nand);
0578     int ret = 0;
0579     unsigned long tim, limit;
0580     u32 val;
0581 
0582     if (len <= mtd->oobsize || force_8bit) {
0583         omap_nand_data_out(chip, buf, len, force_8bit);
0584         return;
0585     }
0586 
0587     info->iomode = OMAP_NAND_IO_WRITE;
0588     info->buf = (u_char *) buf;
0589     init_completion(&info->comp);
0590 
0591     /* configure and start prefetch transfer : size=24 */
0592     ret = omap_prefetch_enable(info->gpmc_cs,
0593         (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
0594     if (ret) {
0595         /* PFPW engine is busy, use cpu copy method */
0596         omap_nand_data_out(chip, buf, len, false);
0597         return;
0598     }
0599 
0600     info->buf_len = len;
0601 
0602     enable_irq(info->gpmc_irq_count);
0603     enable_irq(info->gpmc_irq_fifo);
0604 
0605     /* waiting for write to complete */
0606     wait_for_completion(&info->comp);
0607 
0608     /* wait for data to flushed-out before reset the prefetch */
0609     tim = 0;
0610     limit = (loops_per_jiffy *  msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
0611     do {
0612         val = readl(info->reg.gpmc_prefetch_status);
0613         val = PREFETCH_STATUS_COUNT(val);
0614         cpu_relax();
0615     } while (val && (tim++ < limit));
0616 
0617     /* disable and stop the PFPW engine */
0618     omap_prefetch_reset(info->gpmc_cs, info);
0619     return;
0620 }
0621 
0622 /**
0623  * gen_true_ecc - This function will generate true ECC value
0624  * @ecc_buf: buffer to store ecc code
0625  *
0626  * This generated true ECC value can be used when correcting
0627  * data read from NAND flash memory core
0628  */
0629 static void gen_true_ecc(u8 *ecc_buf)
0630 {
0631     u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
0632         ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
0633 
0634     ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
0635             P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
0636     ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
0637             P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
0638     ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
0639             P1e(tmp) | P2048o(tmp) | P2048e(tmp));
0640 }
0641 
0642 /**
0643  * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
0644  * @ecc_data1:  ecc code from nand spare area
0645  * @ecc_data2:  ecc code from hardware register obtained from hardware ecc
0646  * @page_data:  page data
0647  *
0648  * This function compares two ECC's and indicates if there is an error.
0649  * If the error can be corrected it will be corrected to the buffer.
0650  * If there is no error, %0 is returned. If there is an error but it
0651  * was corrected, %1 is returned. Otherwise, %-1 is returned.
0652  */
0653 static int omap_compare_ecc(u8 *ecc_data1,  /* read from NAND memory */
0654                 u8 *ecc_data2,  /* read from register */
0655                 u8 *page_data)
0656 {
0657     uint    i;
0658     u8  tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
0659     u8  comp0_bit[8], comp1_bit[8], comp2_bit[8];
0660     u8  ecc_bit[24];
0661     u8  ecc_sum = 0;
0662     u8  find_bit = 0;
0663     uint    find_byte = 0;
0664     int isEccFF;
0665 
0666     isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
0667 
0668     gen_true_ecc(ecc_data1);
0669     gen_true_ecc(ecc_data2);
0670 
0671     for (i = 0; i <= 2; i++) {
0672         *(ecc_data1 + i) = ~(*(ecc_data1 + i));
0673         *(ecc_data2 + i) = ~(*(ecc_data2 + i));
0674     }
0675 
0676     for (i = 0; i < 8; i++) {
0677         tmp0_bit[i]     = *ecc_data1 % 2;
0678         *ecc_data1  = *ecc_data1 / 2;
0679     }
0680 
0681     for (i = 0; i < 8; i++) {
0682         tmp1_bit[i]  = *(ecc_data1 + 1) % 2;
0683         *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
0684     }
0685 
0686     for (i = 0; i < 8; i++) {
0687         tmp2_bit[i]  = *(ecc_data1 + 2) % 2;
0688         *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
0689     }
0690 
0691     for (i = 0; i < 8; i++) {
0692         comp0_bit[i]     = *ecc_data2 % 2;
0693         *ecc_data2       = *ecc_data2 / 2;
0694     }
0695 
0696     for (i = 0; i < 8; i++) {
0697         comp1_bit[i]     = *(ecc_data2 + 1) % 2;
0698         *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
0699     }
0700 
0701     for (i = 0; i < 8; i++) {
0702         comp2_bit[i]     = *(ecc_data2 + 2) % 2;
0703         *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
0704     }
0705 
0706     for (i = 0; i < 6; i++)
0707         ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
0708 
0709     for (i = 0; i < 8; i++)
0710         ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
0711 
0712     for (i = 0; i < 8; i++)
0713         ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
0714 
0715     ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
0716     ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
0717 
0718     for (i = 0; i < 24; i++)
0719         ecc_sum += ecc_bit[i];
0720 
0721     switch (ecc_sum) {
0722     case 0:
0723         /* Not reached because this function is not called if
0724          *  ECC values are equal
0725          */
0726         return 0;
0727 
0728     case 1:
0729         /* Uncorrectable error */
0730         pr_debug("ECC UNCORRECTED_ERROR 1\n");
0731         return -EBADMSG;
0732 
0733     case 11:
0734         /* UN-Correctable error */
0735         pr_debug("ECC UNCORRECTED_ERROR B\n");
0736         return -EBADMSG;
0737 
0738     case 12:
0739         /* Correctable error */
0740         find_byte = (ecc_bit[23] << 8) +
0741                 (ecc_bit[21] << 7) +
0742                 (ecc_bit[19] << 6) +
0743                 (ecc_bit[17] << 5) +
0744                 (ecc_bit[15] << 4) +
0745                 (ecc_bit[13] << 3) +
0746                 (ecc_bit[11] << 2) +
0747                 (ecc_bit[9]  << 1) +
0748                 ecc_bit[7];
0749 
0750         find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
0751 
0752         pr_debug("Correcting single bit ECC error at offset: "
0753                 "%d, bit: %d\n", find_byte, find_bit);
0754 
0755         page_data[find_byte] ^= (1 << find_bit);
0756 
0757         return 1;
0758     default:
0759         if (isEccFF) {
0760             if (ecc_data2[0] == 0 &&
0761                 ecc_data2[1] == 0 &&
0762                 ecc_data2[2] == 0)
0763                 return 0;
0764         }
0765         pr_debug("UNCORRECTED_ERROR default\n");
0766         return -EBADMSG;
0767     }
0768 }
0769 
0770 /**
0771  * omap_correct_data - Compares the ECC read with HW generated ECC
0772  * @chip: NAND chip object
0773  * @dat: page data
0774  * @read_ecc: ecc read from nand flash
0775  * @calc_ecc: ecc read from HW ECC registers
0776  *
0777  * Compares the ecc read from nand spare area with ECC registers values
0778  * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
0779  * detection and correction. If there are no errors, %0 is returned. If
0780  * there were errors and all of the errors were corrected, the number of
0781  * corrected errors is returned. If uncorrectable errors exist, %-1 is
0782  * returned.
0783  */
0784 static int omap_correct_data(struct nand_chip *chip, u_char *dat,
0785                  u_char *read_ecc, u_char *calc_ecc)
0786 {
0787     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
0788     int blockCnt = 0, i = 0, ret = 0;
0789     int stat = 0;
0790 
0791     /* Ex NAND_ECC_HW12_2048 */
0792     if (info->nand.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
0793         info->nand.ecc.size == 2048)
0794         blockCnt = 4;
0795     else
0796         blockCnt = 1;
0797 
0798     for (i = 0; i < blockCnt; i++) {
0799         if (memcmp(read_ecc, calc_ecc, 3) != 0) {
0800             ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
0801             if (ret < 0)
0802                 return ret;
0803             /* keep track of the number of corrected errors */
0804             stat += ret;
0805         }
0806         read_ecc += 3;
0807         calc_ecc += 3;
0808         dat      += 512;
0809     }
0810     return stat;
0811 }
0812 
0813 /**
0814  * omap_calculate_ecc - Generate non-inverted ECC bytes.
0815  * @chip: NAND chip object
0816  * @dat: The pointer to data on which ecc is computed
0817  * @ecc_code: The ecc_code buffer
0818  *
0819  * Using noninverted ECC can be considered ugly since writing a blank
0820  * page ie. padding will clear the ECC bytes. This is no problem as long
0821  * nobody is trying to write data on the seemingly unused page. Reading
0822  * an erased page will produce an ECC mismatch between generated and read
0823  * ECC bytes that has to be dealt with separately.
0824  */
0825 static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat,
0826                   u_char *ecc_code)
0827 {
0828     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
0829     u32 val;
0830 
0831     val = readl(info->reg.gpmc_ecc_config);
0832     if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
0833         return -EINVAL;
0834 
0835     /* read ecc result */
0836     val = readl(info->reg.gpmc_ecc1_result);
0837     *ecc_code++ = val;          /* P128e, ..., P1e */
0838     *ecc_code++ = val >> 16;    /* P128o, ..., P1o */
0839     /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
0840     *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
0841 
0842     return 0;
0843 }
0844 
0845 /**
0846  * omap_enable_hwecc - This function enables the hardware ecc functionality
0847  * @chip: NAND chip object
0848  * @mode: Read/Write mode
0849  */
0850 static void omap_enable_hwecc(struct nand_chip *chip, int mode)
0851 {
0852     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
0853     unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
0854     u32 val;
0855 
0856     /* clear ecc and enable bits */
0857     val = ECCCLEAR | ECC1;
0858     writel(val, info->reg.gpmc_ecc_control);
0859 
0860     /* program ecc and result sizes */
0861     val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
0862              ECC1RESULTSIZE);
0863     writel(val, info->reg.gpmc_ecc_size_config);
0864 
0865     switch (mode) {
0866     case NAND_ECC_READ:
0867     case NAND_ECC_WRITE:
0868         writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
0869         break;
0870     case NAND_ECC_READSYN:
0871         writel(ECCCLEAR, info->reg.gpmc_ecc_control);
0872         break;
0873     default:
0874         dev_info(&info->pdev->dev,
0875             "error: unrecognized Mode[%d]!\n", mode);
0876         break;
0877     }
0878 
0879     /* (ECC 16 or 8 bit col) | ( CS  )  | ECC Enable */
0880     val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
0881     writel(val, info->reg.gpmc_ecc_config);
0882 }
0883 
0884 /**
0885  * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
0886  * @chip: NAND chip object
0887  * @mode: Read/Write mode
0888  *
0889  * When using BCH with SW correction (i.e. no ELM), sector size is set
0890  * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
0891  * for both reading and writing with:
0892  * eccsize0 = 0  (no additional protected byte in spare area)
0893  * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
0894  */
0895 static void __maybe_unused omap_enable_hwecc_bch(struct nand_chip *chip,
0896                          int mode)
0897 {
0898     unsigned int bch_type;
0899     unsigned int dev_width, nsectors;
0900     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
0901     enum omap_ecc ecc_opt = info->ecc_opt;
0902     u32 val, wr_mode;
0903     unsigned int ecc_size1, ecc_size0;
0904 
0905     /* GPMC configurations for calculating ECC */
0906     switch (ecc_opt) {
0907     case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
0908         bch_type = 0;
0909         nsectors = 1;
0910         wr_mode   = BCH_WRAPMODE_6;
0911         ecc_size0 = BCH_ECC_SIZE0;
0912         ecc_size1 = BCH_ECC_SIZE1;
0913         break;
0914     case OMAP_ECC_BCH4_CODE_HW:
0915         bch_type = 0;
0916         nsectors = chip->ecc.steps;
0917         if (mode == NAND_ECC_READ) {
0918             wr_mode   = BCH_WRAPMODE_1;
0919             ecc_size0 = BCH4R_ECC_SIZE0;
0920             ecc_size1 = BCH4R_ECC_SIZE1;
0921         } else {
0922             wr_mode   = BCH_WRAPMODE_6;
0923             ecc_size0 = BCH_ECC_SIZE0;
0924             ecc_size1 = BCH_ECC_SIZE1;
0925         }
0926         break;
0927     case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
0928         bch_type = 1;
0929         nsectors = 1;
0930         wr_mode   = BCH_WRAPMODE_6;
0931         ecc_size0 = BCH_ECC_SIZE0;
0932         ecc_size1 = BCH_ECC_SIZE1;
0933         break;
0934     case OMAP_ECC_BCH8_CODE_HW:
0935         bch_type = 1;
0936         nsectors = chip->ecc.steps;
0937         if (mode == NAND_ECC_READ) {
0938             wr_mode   = BCH_WRAPMODE_1;
0939             ecc_size0 = BCH8R_ECC_SIZE0;
0940             ecc_size1 = BCH8R_ECC_SIZE1;
0941         } else {
0942             wr_mode   = BCH_WRAPMODE_6;
0943             ecc_size0 = BCH_ECC_SIZE0;
0944             ecc_size1 = BCH_ECC_SIZE1;
0945         }
0946         break;
0947     case OMAP_ECC_BCH16_CODE_HW:
0948         bch_type = 0x2;
0949         nsectors = chip->ecc.steps;
0950         if (mode == NAND_ECC_READ) {
0951             wr_mode   = 0x01;
0952             ecc_size0 = 52; /* ECC bits in nibbles per sector */
0953             ecc_size1 = 0;  /* non-ECC bits in nibbles per sector */
0954         } else {
0955             wr_mode   = 0x01;
0956             ecc_size0 = 0;  /* extra bits in nibbles per sector */
0957             ecc_size1 = 52; /* OOB bits in nibbles per sector */
0958         }
0959         break;
0960     default:
0961         return;
0962     }
0963 
0964     writel(ECC1, info->reg.gpmc_ecc_control);
0965 
0966     /* Configure ecc size for BCH */
0967     val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
0968     writel(val, info->reg.gpmc_ecc_size_config);
0969 
0970     dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
0971 
0972     /* BCH configuration */
0973     val = ((1                        << 16) | /* enable BCH */
0974            (bch_type         << 12) | /* BCH4/BCH8/BCH16 */
0975            (wr_mode                  <<  8) | /* wrap mode */
0976            (dev_width                <<  7) | /* bus width */
0977            (((nsectors-1) & 0x7)     <<  4) | /* number of sectors */
0978            (info->gpmc_cs            <<  1) | /* ECC CS */
0979            (0x1));                            /* enable ECC */
0980 
0981     writel(val, info->reg.gpmc_ecc_config);
0982 
0983     /* Clear ecc and enable bits */
0984     writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
0985 }
0986 
0987 static u8  bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
0988 static u8  bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
0989                 0x97, 0x79, 0xe5, 0x24, 0xb5};
0990 
0991 /**
0992  * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
0993  * @mtd:    MTD device structure
0994  * @dat:    The pointer to data on which ecc is computed
0995  * @ecc_calc:   The ecc_code buffer
0996  * @i:      The sector number (for a multi sector page)
0997  *
0998  * Support calculating of BCH4/8/16 ECC vectors for one sector
0999  * within a page. Sector number is in @i.
1000  */
1001 static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
1002                    const u_char *dat, u_char *ecc_calc, int i)
1003 {
1004     struct omap_nand_info *info = mtd_to_omap(mtd);
1005     int eccbytes    = info->nand.ecc.bytes;
1006     struct gpmc_nand_regs   *gpmc_regs = &info->reg;
1007     u8 *ecc_code;
1008     unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
1009     u32 val;
1010     int j;
1011 
1012     ecc_code = ecc_calc;
1013     switch (info->ecc_opt) {
1014     case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1015     case OMAP_ECC_BCH8_CODE_HW:
1016         bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1017         bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1018         bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
1019         bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
1020         *ecc_code++ = (bch_val4 & 0xFF);
1021         *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
1022         *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
1023         *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
1024         *ecc_code++ = (bch_val3 & 0xFF);
1025         *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
1026         *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
1027         *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
1028         *ecc_code++ = (bch_val2 & 0xFF);
1029         *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
1030         *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
1031         *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
1032         *ecc_code++ = (bch_val1 & 0xFF);
1033         break;
1034     case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1035     case OMAP_ECC_BCH4_CODE_HW:
1036         bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1037         bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1038         *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
1039         *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
1040         *ecc_code++ = ((bch_val2 & 0xF) << 4) |
1041             ((bch_val1 >> 28) & 0xF);
1042         *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
1043         *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
1044         *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1045         *ecc_code++ = ((bch_val1 & 0xF) << 4);
1046         break;
1047     case OMAP_ECC_BCH16_CODE_HW:
1048         val = readl(gpmc_regs->gpmc_bch_result6[i]);
1049         ecc_code[0]  = ((val >>  8) & 0xFF);
1050         ecc_code[1]  = ((val >>  0) & 0xFF);
1051         val = readl(gpmc_regs->gpmc_bch_result5[i]);
1052         ecc_code[2]  = ((val >> 24) & 0xFF);
1053         ecc_code[3]  = ((val >> 16) & 0xFF);
1054         ecc_code[4]  = ((val >>  8) & 0xFF);
1055         ecc_code[5]  = ((val >>  0) & 0xFF);
1056         val = readl(gpmc_regs->gpmc_bch_result4[i]);
1057         ecc_code[6]  = ((val >> 24) & 0xFF);
1058         ecc_code[7]  = ((val >> 16) & 0xFF);
1059         ecc_code[8]  = ((val >>  8) & 0xFF);
1060         ecc_code[9]  = ((val >>  0) & 0xFF);
1061         val = readl(gpmc_regs->gpmc_bch_result3[i]);
1062         ecc_code[10] = ((val >> 24) & 0xFF);
1063         ecc_code[11] = ((val >> 16) & 0xFF);
1064         ecc_code[12] = ((val >>  8) & 0xFF);
1065         ecc_code[13] = ((val >>  0) & 0xFF);
1066         val = readl(gpmc_regs->gpmc_bch_result2[i]);
1067         ecc_code[14] = ((val >> 24) & 0xFF);
1068         ecc_code[15] = ((val >> 16) & 0xFF);
1069         ecc_code[16] = ((val >>  8) & 0xFF);
1070         ecc_code[17] = ((val >>  0) & 0xFF);
1071         val = readl(gpmc_regs->gpmc_bch_result1[i]);
1072         ecc_code[18] = ((val >> 24) & 0xFF);
1073         ecc_code[19] = ((val >> 16) & 0xFF);
1074         ecc_code[20] = ((val >>  8) & 0xFF);
1075         ecc_code[21] = ((val >>  0) & 0xFF);
1076         val = readl(gpmc_regs->gpmc_bch_result0[i]);
1077         ecc_code[22] = ((val >> 24) & 0xFF);
1078         ecc_code[23] = ((val >> 16) & 0xFF);
1079         ecc_code[24] = ((val >>  8) & 0xFF);
1080         ecc_code[25] = ((val >>  0) & 0xFF);
1081         break;
1082     default:
1083         return -EINVAL;
1084     }
1085 
1086     /* ECC scheme specific syndrome customizations */
1087     switch (info->ecc_opt) {
1088     case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1089         /* Add constant polynomial to remainder, so that
1090          * ECC of blank pages results in 0x0 on reading back
1091          */
1092         for (j = 0; j < eccbytes; j++)
1093             ecc_calc[j] ^= bch4_polynomial[j];
1094         break;
1095     case OMAP_ECC_BCH4_CODE_HW:
1096         /* Set  8th ECC byte as 0x0 for ROM compatibility */
1097         ecc_calc[eccbytes - 1] = 0x0;
1098         break;
1099     case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1100         /* Add constant polynomial to remainder, so that
1101          * ECC of blank pages results in 0x0 on reading back
1102          */
1103         for (j = 0; j < eccbytes; j++)
1104             ecc_calc[j] ^= bch8_polynomial[j];
1105         break;
1106     case OMAP_ECC_BCH8_CODE_HW:
1107         /* Set 14th ECC byte as 0x0 for ROM compatibility */
1108         ecc_calc[eccbytes - 1] = 0x0;
1109         break;
1110     case OMAP_ECC_BCH16_CODE_HW:
1111         break;
1112     default:
1113         return -EINVAL;
1114     }
1115 
1116     return 0;
1117 }
1118 
1119 /**
1120  * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
1121  * @chip:   NAND chip object
1122  * @dat:    The pointer to data on which ecc is computed
1123  * @ecc_calc:   Buffer storing the calculated ECC bytes
1124  *
1125  * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
1126  * when SW based correction is required as ECC is required for one sector
1127  * at a time.
1128  */
1129 static int omap_calculate_ecc_bch_sw(struct nand_chip *chip,
1130                      const u_char *dat, u_char *ecc_calc)
1131 {
1132     return _omap_calculate_ecc_bch(nand_to_mtd(chip), dat, ecc_calc, 0);
1133 }
1134 
1135 /**
1136  * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
1137  * @mtd:    MTD device structure
1138  * @dat:    The pointer to data on which ecc is computed
1139  * @ecc_calc:   Buffer storing the calculated ECC bytes
1140  *
1141  * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
1142  */
1143 static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
1144                     const u_char *dat, u_char *ecc_calc)
1145 {
1146     struct omap_nand_info *info = mtd_to_omap(mtd);
1147     int eccbytes = info->nand.ecc.bytes;
1148     unsigned long nsectors;
1149     int i, ret;
1150 
1151     nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1152     for (i = 0; i < nsectors; i++) {
1153         ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
1154         if (ret)
1155             return ret;
1156 
1157         ecc_calc += eccbytes;
1158     }
1159 
1160     return 0;
1161 }
1162 
1163 /**
1164  * erased_sector_bitflips - count bit flips
1165  * @data:   data sector buffer
1166  * @oob:    oob buffer
1167  * @info:   omap_nand_info
1168  *
1169  * Check the bit flips in erased page falls below correctable level.
1170  * If falls below, report the page as erased with correctable bit
1171  * flip, else report as uncorrectable page.
1172  */
1173 static int erased_sector_bitflips(u_char *data, u_char *oob,
1174         struct omap_nand_info *info)
1175 {
1176     int flip_bits = 0, i;
1177 
1178     for (i = 0; i < info->nand.ecc.size; i++) {
1179         flip_bits += hweight8(~data[i]);
1180         if (flip_bits > info->nand.ecc.strength)
1181             return 0;
1182     }
1183 
1184     for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
1185         flip_bits += hweight8(~oob[i]);
1186         if (flip_bits > info->nand.ecc.strength)
1187             return 0;
1188     }
1189 
1190     /*
1191      * Bit flips falls in correctable level.
1192      * Fill data area with 0xFF
1193      */
1194     if (flip_bits) {
1195         memset(data, 0xFF, info->nand.ecc.size);
1196         memset(oob, 0xFF, info->nand.ecc.bytes);
1197     }
1198 
1199     return flip_bits;
1200 }
1201 
1202 /**
1203  * omap_elm_correct_data - corrects page data area in case error reported
1204  * @chip:   NAND chip object
1205  * @data:   page data
1206  * @read_ecc:   ecc read from nand flash
1207  * @calc_ecc:   ecc read from HW ECC registers
1208  *
1209  * Calculated ecc vector reported as zero in case of non-error pages.
1210  * In case of non-zero ecc vector, first filter out erased-pages, and
1211  * then process data via ELM to detect bit-flips.
1212  */
1213 static int omap_elm_correct_data(struct nand_chip *chip, u_char *data,
1214                  u_char *read_ecc, u_char *calc_ecc)
1215 {
1216     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
1217     struct nand_ecc_ctrl *ecc = &info->nand.ecc;
1218     int eccsteps = info->nsteps_per_eccpg;
1219     int i , j, stat = 0;
1220     int eccflag, actual_eccbytes;
1221     struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
1222     u_char *ecc_vec = calc_ecc;
1223     u_char *spare_ecc = read_ecc;
1224     u_char *erased_ecc_vec;
1225     u_char *buf;
1226     int bitflip_count;
1227     bool is_error_reported = false;
1228     u32 bit_pos, byte_pos, error_max, pos;
1229     int err;
1230 
1231     switch (info->ecc_opt) {
1232     case OMAP_ECC_BCH4_CODE_HW:
1233         /* omit  7th ECC byte reserved for ROM code compatibility */
1234         actual_eccbytes = ecc->bytes - 1;
1235         erased_ecc_vec = bch4_vector;
1236         break;
1237     case OMAP_ECC_BCH8_CODE_HW:
1238         /* omit 14th ECC byte reserved for ROM code compatibility */
1239         actual_eccbytes = ecc->bytes - 1;
1240         erased_ecc_vec = bch8_vector;
1241         break;
1242     case OMAP_ECC_BCH16_CODE_HW:
1243         actual_eccbytes = ecc->bytes;
1244         erased_ecc_vec = bch16_vector;
1245         break;
1246     default:
1247         dev_err(&info->pdev->dev, "invalid driver configuration\n");
1248         return -EINVAL;
1249     }
1250 
1251     /* Initialize elm error vector to zero */
1252     memset(err_vec, 0, sizeof(err_vec));
1253 
1254     for (i = 0; i < eccsteps ; i++) {
1255         eccflag = 0;    /* initialize eccflag */
1256 
1257         /*
1258          * Check any error reported,
1259          * In case of error, non zero ecc reported.
1260          */
1261         for (j = 0; j < actual_eccbytes; j++) {
1262             if (calc_ecc[j] != 0) {
1263                 eccflag = 1; /* non zero ecc, error present */
1264                 break;
1265             }
1266         }
1267 
1268         if (eccflag == 1) {
1269             if (memcmp(calc_ecc, erased_ecc_vec,
1270                         actual_eccbytes) == 0) {
1271                 /*
1272                  * calc_ecc[] matches pattern for ECC(all 0xff)
1273                  * so this is definitely an erased-page
1274                  */
1275             } else {
1276                 buf = &data[info->nand.ecc.size * i];
1277                 /*
1278                  * count number of 0-bits in read_buf.
1279                  * This check can be removed once a similar
1280                  * check is introduced in generic NAND driver
1281                  */
1282                 bitflip_count = erased_sector_bitflips(
1283                         buf, read_ecc, info);
1284                 if (bitflip_count) {
1285                     /*
1286                      * number of 0-bits within ECC limits
1287                      * So this may be an erased-page
1288                      */
1289                     stat += bitflip_count;
1290                 } else {
1291                     /*
1292                      * Too many 0-bits. It may be a
1293                      * - programmed-page, OR
1294                      * - erased-page with many bit-flips
1295                      * So this page requires check by ELM
1296                      */
1297                     err_vec[i].error_reported = true;
1298                     is_error_reported = true;
1299                 }
1300             }
1301         }
1302 
1303         /* Update the ecc vector */
1304         calc_ecc += ecc->bytes;
1305         read_ecc += ecc->bytes;
1306     }
1307 
1308     /* Check if any error reported */
1309     if (!is_error_reported)
1310         return stat;
1311 
1312     /* Decode BCH error using ELM module */
1313     elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
1314 
1315     err = 0;
1316     for (i = 0; i < eccsteps; i++) {
1317         if (err_vec[i].error_uncorrectable) {
1318             dev_err(&info->pdev->dev,
1319                 "uncorrectable bit-flips found\n");
1320             err = -EBADMSG;
1321         } else if (err_vec[i].error_reported) {
1322             for (j = 0; j < err_vec[i].error_count; j++) {
1323                 switch (info->ecc_opt) {
1324                 case OMAP_ECC_BCH4_CODE_HW:
1325                     /* Add 4 bits to take care of padding */
1326                     pos = err_vec[i].error_loc[j] +
1327                         BCH4_BIT_PAD;
1328                     break;
1329                 case OMAP_ECC_BCH8_CODE_HW:
1330                 case OMAP_ECC_BCH16_CODE_HW:
1331                     pos = err_vec[i].error_loc[j];
1332                     break;
1333                 default:
1334                     return -EINVAL;
1335                 }
1336                 error_max = (ecc->size + actual_eccbytes) * 8;
1337                 /* Calculate bit position of error */
1338                 bit_pos = pos % 8;
1339 
1340                 /* Calculate byte position of error */
1341                 byte_pos = (error_max - pos - 1) / 8;
1342 
1343                 if (pos < error_max) {
1344                     if (byte_pos < 512) {
1345                         pr_debug("bitflip@dat[%d]=%x\n",
1346                              byte_pos, data[byte_pos]);
1347                         data[byte_pos] ^= 1 << bit_pos;
1348                     } else {
1349                         pr_debug("bitflip@oob[%d]=%x\n",
1350                             (byte_pos - 512),
1351                              spare_ecc[byte_pos - 512]);
1352                         spare_ecc[byte_pos - 512] ^=
1353                             1 << bit_pos;
1354                     }
1355                 } else {
1356                     dev_err(&info->pdev->dev,
1357                         "invalid bit-flip @ %d:%d\n",
1358                         byte_pos, bit_pos);
1359                     err = -EBADMSG;
1360                 }
1361             }
1362         }
1363 
1364         /* Update number of correctable errors */
1365         stat = max_t(unsigned int, stat, err_vec[i].error_count);
1366 
1367         /* Update page data with sector size */
1368         data += ecc->size;
1369         spare_ecc += ecc->bytes;
1370     }
1371 
1372     return (err) ? err : stat;
1373 }
1374 
1375 /**
1376  * omap_write_page_bch - BCH ecc based write page function for entire page
1377  * @chip:       nand chip info structure
1378  * @buf:        data buffer
1379  * @oob_required:   must write chip->oob_poi to OOB
1380  * @page:       page
1381  *
1382  * Custom write page method evolved to support multi sector writing in one shot
1383  */
1384 static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
1385                    int oob_required, int page)
1386 {
1387     struct mtd_info *mtd = nand_to_mtd(chip);
1388     struct omap_nand_info *info = mtd_to_omap(mtd);
1389     uint8_t *ecc_calc = chip->ecc.calc_buf;
1390     unsigned int eccpg;
1391     int ret;
1392 
1393     ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1394     if (ret)
1395         return ret;
1396 
1397     for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
1398         /* Enable GPMC ecc engine */
1399         chip->ecc.hwctl(chip, NAND_ECC_WRITE);
1400 
1401         /* Write data */
1402         info->data_out(chip, buf + (eccpg * info->eccpg_size),
1403                    info->eccpg_size, false);
1404 
1405         /* Update ecc vector from GPMC result registers */
1406         ret = omap_calculate_ecc_bch_multi(mtd,
1407                            buf + (eccpg * info->eccpg_size),
1408                            ecc_calc);
1409         if (ret)
1410             return ret;
1411 
1412         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc,
1413                          chip->oob_poi,
1414                          eccpg * info->eccpg_bytes,
1415                          info->eccpg_bytes);
1416         if (ret)
1417             return ret;
1418     }
1419 
1420     /* Write ecc vector to OOB area */
1421     info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
1422 
1423     return nand_prog_page_end_op(chip);
1424 }
1425 
1426 /**
1427  * omap_write_subpage_bch - BCH hardware ECC based subpage write
1428  * @chip:   nand chip info structure
1429  * @offset: column address of subpage within the page
1430  * @data_len:   data length
1431  * @buf:    data buffer
1432  * @oob_required: must write chip->oob_poi to OOB
1433  * @page: page number to write
1434  *
1435  * OMAP optimized subpage write method.
1436  */
1437 static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
1438                   u32 data_len, const u8 *buf,
1439                   int oob_required, int page)
1440 {
1441     struct mtd_info *mtd = nand_to_mtd(chip);
1442     struct omap_nand_info *info = mtd_to_omap(mtd);
1443     u8 *ecc_calc = chip->ecc.calc_buf;
1444     int ecc_size      = chip->ecc.size;
1445     int ecc_bytes     = chip->ecc.bytes;
1446     u32 start_step = offset / ecc_size;
1447     u32 end_step   = (offset + data_len - 1) / ecc_size;
1448     unsigned int eccpg;
1449     int step, ret = 0;
1450 
1451     /*
1452      * Write entire page at one go as it would be optimal
1453      * as ECC is calculated by hardware.
1454      * ECC is calculated for all subpages but we choose
1455      * only what we want.
1456      */
1457     ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1458     if (ret)
1459         return ret;
1460 
1461     for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
1462         /* Enable GPMC ECC engine */
1463         chip->ecc.hwctl(chip, NAND_ECC_WRITE);
1464 
1465         /* Write data */
1466         info->data_out(chip, buf + (eccpg * info->eccpg_size),
1467                    info->eccpg_size, false);
1468 
1469         for (step = 0; step < info->nsteps_per_eccpg; step++) {
1470             unsigned int base_step = eccpg * info->nsteps_per_eccpg;
1471             const u8 *bufoffs = buf + (eccpg * info->eccpg_size);
1472 
1473             /* Mask ECC of un-touched subpages with 0xFFs */
1474             if ((step + base_step) < start_step ||
1475                 (step + base_step) > end_step)
1476                 memset(ecc_calc + (step * ecc_bytes), 0xff,
1477                        ecc_bytes);
1478             else
1479                 ret = _omap_calculate_ecc_bch(mtd,
1480                                   bufoffs + (step * ecc_size),
1481                                   ecc_calc + (step * ecc_bytes),
1482                                   step);
1483 
1484             if (ret)
1485                 return ret;
1486         }
1487 
1488         /*
1489          * Copy the calculated ECC for the whole page including the
1490          * masked values (0xFF) corresponding to unwritten subpages.
1491          */
1492         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi,
1493                          eccpg * info->eccpg_bytes,
1494                          info->eccpg_bytes);
1495         if (ret)
1496             return ret;
1497     }
1498 
1499     /* write OOB buffer to NAND device */
1500     info->data_out(chip, chip->oob_poi, mtd->oobsize, false);
1501 
1502     return nand_prog_page_end_op(chip);
1503 }
1504 
1505 /**
1506  * omap_read_page_bch - BCH ecc based page read function for entire page
1507  * @chip:       nand chip info structure
1508  * @buf:        buffer to store read data
1509  * @oob_required:   caller requires OOB data read to chip->oob_poi
1510  * @page:       page number to read
1511  *
1512  * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
1513  * used for error correction.
1514  * Custom method evolved to support ELM error correction & multi sector
1515  * reading. On reading page data area is read along with OOB data with
1516  * ecc engine enabled. ecc vector updated after read of OOB data.
1517  * For non error pages ecc vector reported as zero.
1518  */
1519 static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
1520                   int oob_required, int page)
1521 {
1522     struct mtd_info *mtd = nand_to_mtd(chip);
1523     struct omap_nand_info *info = mtd_to_omap(mtd);
1524     uint8_t *ecc_calc = chip->ecc.calc_buf;
1525     uint8_t *ecc_code = chip->ecc.code_buf;
1526     unsigned int max_bitflips = 0, eccpg;
1527     int stat, ret;
1528 
1529     ret = nand_read_page_op(chip, page, 0, NULL, 0);
1530     if (ret)
1531         return ret;
1532 
1533     for (eccpg = 0; eccpg < info->neccpg; eccpg++) {
1534         /* Enable GPMC ecc engine */
1535         chip->ecc.hwctl(chip, NAND_ECC_READ);
1536 
1537         /* Read data */
1538         ret = nand_change_read_column_op(chip, eccpg * info->eccpg_size,
1539                          buf + (eccpg * info->eccpg_size),
1540                          info->eccpg_size, false);
1541         if (ret)
1542             return ret;
1543 
1544         /* Read oob bytes */
1545         ret = nand_change_read_column_op(chip,
1546                          mtd->writesize + BBM_LEN +
1547                          (eccpg * info->eccpg_bytes),
1548                          chip->oob_poi + BBM_LEN +
1549                          (eccpg * info->eccpg_bytes),
1550                          info->eccpg_bytes, false);
1551         if (ret)
1552             return ret;
1553 
1554         /* Calculate ecc bytes */
1555         ret = omap_calculate_ecc_bch_multi(mtd,
1556                            buf + (eccpg * info->eccpg_size),
1557                            ecc_calc);
1558         if (ret)
1559             return ret;
1560 
1561         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code,
1562                          chip->oob_poi,
1563                          eccpg * info->eccpg_bytes,
1564                          info->eccpg_bytes);
1565         if (ret)
1566             return ret;
1567 
1568         stat = chip->ecc.correct(chip,
1569                      buf + (eccpg * info->eccpg_size),
1570                      ecc_code, ecc_calc);
1571         if (stat < 0) {
1572             mtd->ecc_stats.failed++;
1573         } else {
1574             mtd->ecc_stats.corrected += stat;
1575             max_bitflips = max_t(unsigned int, max_bitflips, stat);
1576         }
1577     }
1578 
1579     return max_bitflips;
1580 }
1581 
1582 /**
1583  * is_elm_present - checks for presence of ELM module by scanning DT nodes
1584  * @info: NAND device structure containing platform data
1585  * @elm_node: ELM's DT node
1586  */
1587 static bool is_elm_present(struct omap_nand_info *info,
1588                struct device_node *elm_node)
1589 {
1590     struct platform_device *pdev;
1591 
1592     /* check whether elm-id is passed via DT */
1593     if (!elm_node) {
1594         dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
1595         return false;
1596     }
1597     pdev = of_find_device_by_node(elm_node);
1598     /* check whether ELM device is registered */
1599     if (!pdev) {
1600         dev_err(&info->pdev->dev, "ELM device not found\n");
1601         return false;
1602     }
1603     /* ELM module available, now configure it */
1604     info->elm_dev = &pdev->dev;
1605     return true;
1606 }
1607 
1608 static bool omap2_nand_ecc_check(struct omap_nand_info *info)
1609 {
1610     bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
1611 
1612     switch (info->ecc_opt) {
1613     case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1614     case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1615         ecc_needs_omap_bch = false;
1616         ecc_needs_bch = true;
1617         ecc_needs_elm = false;
1618         break;
1619     case OMAP_ECC_BCH4_CODE_HW:
1620     case OMAP_ECC_BCH8_CODE_HW:
1621     case OMAP_ECC_BCH16_CODE_HW:
1622         ecc_needs_omap_bch = true;
1623         ecc_needs_bch = false;
1624         ecc_needs_elm = true;
1625         break;
1626     default:
1627         ecc_needs_omap_bch = false;
1628         ecc_needs_bch = false;
1629         ecc_needs_elm = false;
1630         break;
1631     }
1632 
1633     if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
1634         dev_err(&info->pdev->dev,
1635             "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
1636         return false;
1637     }
1638     if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
1639         dev_err(&info->pdev->dev,
1640             "CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
1641         return false;
1642     }
1643     if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
1644         dev_err(&info->pdev->dev, "ELM not available\n");
1645         return false;
1646     }
1647 
1648     return true;
1649 }
1650 
1651 static const char * const nand_xfer_types[] = {
1652     [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
1653     [NAND_OMAP_POLLED] = "polled",
1654     [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
1655     [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
1656 };
1657 
1658 static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
1659 {
1660     struct device_node *child = dev->of_node;
1661     int i;
1662     const char *s;
1663     u32 cs;
1664 
1665     if (of_property_read_u32(child, "reg", &cs) < 0) {
1666         dev_err(dev, "reg not found in DT\n");
1667         return -EINVAL;
1668     }
1669 
1670     info->gpmc_cs = cs;
1671 
1672     /* detect availability of ELM module. Won't be present pre-OMAP4 */
1673     info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
1674     if (!info->elm_of_node) {
1675         info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
1676         if (!info->elm_of_node)
1677             dev_dbg(dev, "ti,elm-id not in DT\n");
1678     }
1679 
1680     /* select ecc-scheme for NAND */
1681     if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
1682         dev_err(dev, "ti,nand-ecc-opt not found\n");
1683         return -EINVAL;
1684     }
1685 
1686     if (!strcmp(s, "sw")) {
1687         info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
1688     } else if (!strcmp(s, "ham1") ||
1689            !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
1690         info->ecc_opt = OMAP_ECC_HAM1_CODE_HW;
1691     } else if (!strcmp(s, "bch4")) {
1692         if (info->elm_of_node)
1693             info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
1694         else
1695             info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
1696     } else if (!strcmp(s, "bch8")) {
1697         if (info->elm_of_node)
1698             info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
1699         else
1700             info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
1701     } else if (!strcmp(s, "bch16")) {
1702         info->ecc_opt = OMAP_ECC_BCH16_CODE_HW;
1703     } else {
1704         dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
1705         return -EINVAL;
1706     }
1707 
1708     /* select data transfer mode */
1709     if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
1710         for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
1711             if (!strcasecmp(s, nand_xfer_types[i])) {
1712                 info->xfer_type = i;
1713                 return 0;
1714             }
1715         }
1716 
1717         dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
1718         return -EINVAL;
1719     }
1720 
1721     return 0;
1722 }
1723 
1724 static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
1725                   struct mtd_oob_region *oobregion)
1726 {
1727     struct omap_nand_info *info = mtd_to_omap(mtd);
1728     struct nand_chip *chip = &info->nand;
1729     int off = BBM_LEN;
1730 
1731     if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
1732         !(chip->options & NAND_BUSWIDTH_16))
1733         off = 1;
1734 
1735     if (section)
1736         return -ERANGE;
1737 
1738     oobregion->offset = off;
1739     oobregion->length = chip->ecc.total;
1740 
1741     return 0;
1742 }
1743 
1744 static int omap_ooblayout_free(struct mtd_info *mtd, int section,
1745                    struct mtd_oob_region *oobregion)
1746 {
1747     struct omap_nand_info *info = mtd_to_omap(mtd);
1748     struct nand_chip *chip = &info->nand;
1749     int off = BBM_LEN;
1750 
1751     if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
1752         !(chip->options & NAND_BUSWIDTH_16))
1753         off = 1;
1754 
1755     if (section)
1756         return -ERANGE;
1757 
1758     off += chip->ecc.total;
1759     if (off >= mtd->oobsize)
1760         return -ERANGE;
1761 
1762     oobregion->offset = off;
1763     oobregion->length = mtd->oobsize - off;
1764 
1765     return 0;
1766 }
1767 
1768 static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
1769     .ecc = omap_ooblayout_ecc,
1770     .free = omap_ooblayout_free,
1771 };
1772 
1773 static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
1774                  struct mtd_oob_region *oobregion)
1775 {
1776     struct nand_device *nand = mtd_to_nanddev(mtd);
1777     unsigned int nsteps = nanddev_get_ecc_nsteps(nand);
1778     unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand);
1779     int off = BBM_LEN;
1780 
1781     if (section >= nsteps)
1782         return -ERANGE;
1783 
1784     /*
1785      * When SW correction is employed, one OMAP specific marker byte is
1786      * reserved after each ECC step.
1787      */
1788     oobregion->offset = off + (section * (ecc_bytes + 1));
1789     oobregion->length = ecc_bytes;
1790 
1791     return 0;
1792 }
1793 
1794 static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
1795                   struct mtd_oob_region *oobregion)
1796 {
1797     struct nand_device *nand = mtd_to_nanddev(mtd);
1798     unsigned int nsteps = nanddev_get_ecc_nsteps(nand);
1799     unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand);
1800     int off = BBM_LEN;
1801 
1802     if (section)
1803         return -ERANGE;
1804 
1805     /*
1806      * When SW correction is employed, one OMAP specific marker byte is
1807      * reserved after each ECC step.
1808      */
1809     off += ((ecc_bytes + 1) * nsteps);
1810     if (off >= mtd->oobsize)
1811         return -ERANGE;
1812 
1813     oobregion->offset = off;
1814     oobregion->length = mtd->oobsize - off;
1815 
1816     return 0;
1817 }
1818 
1819 static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
1820     .ecc = omap_sw_ooblayout_ecc,
1821     .free = omap_sw_ooblayout_free,
1822 };
1823 
1824 static int omap_nand_attach_chip(struct nand_chip *chip)
1825 {
1826     struct mtd_info *mtd = nand_to_mtd(chip);
1827     struct omap_nand_info *info = mtd_to_omap(mtd);
1828     struct device *dev = &info->pdev->dev;
1829     int min_oobbytes = BBM_LEN;
1830     int elm_bch_strength = -1;
1831     int oobbytes_per_step;
1832     dma_cap_mask_t mask;
1833     int err;
1834 
1835     if (chip->bbt_options & NAND_BBT_USE_FLASH)
1836         chip->bbt_options |= NAND_BBT_NO_OOB;
1837     else
1838         chip->options |= NAND_SKIP_BBTSCAN;
1839 
1840     /* Re-populate low-level callbacks based on xfer modes */
1841     switch (info->xfer_type) {
1842     case NAND_OMAP_PREFETCH_POLLED:
1843         info->data_in = omap_nand_data_in_pref;
1844         info->data_out = omap_nand_data_out_pref;
1845         break;
1846 
1847     case NAND_OMAP_POLLED:
1848         /* Use nand_base defaults for {read,write}_buf */
1849         break;
1850 
1851     case NAND_OMAP_PREFETCH_DMA:
1852         dma_cap_zero(mask);
1853         dma_cap_set(DMA_SLAVE, mask);
1854         info->dma = dma_request_chan(dev->parent, "rxtx");
1855 
1856         if (IS_ERR(info->dma)) {
1857             dev_err(dev, "DMA engine request failed\n");
1858             return PTR_ERR(info->dma);
1859         } else {
1860             struct dma_slave_config cfg;
1861 
1862             memset(&cfg, 0, sizeof(cfg));
1863             cfg.src_addr = info->phys_base;
1864             cfg.dst_addr = info->phys_base;
1865             cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1866             cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1867             cfg.src_maxburst = 16;
1868             cfg.dst_maxburst = 16;
1869             err = dmaengine_slave_config(info->dma, &cfg);
1870             if (err) {
1871                 dev_err(dev,
1872                     "DMA engine slave config failed: %d\n",
1873                     err);
1874                 return err;
1875             }
1876 
1877             info->data_in = omap_nand_data_in_dma_pref;
1878             info->data_out = omap_nand_data_out_dma_pref;
1879         }
1880         break;
1881 
1882     case NAND_OMAP_PREFETCH_IRQ:
1883         info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
1884         if (info->gpmc_irq_fifo <= 0)
1885             return -ENODEV;
1886         err = devm_request_irq(dev, info->gpmc_irq_fifo,
1887                        omap_nand_irq, IRQF_SHARED,
1888                        "gpmc-nand-fifo", info);
1889         if (err) {
1890             dev_err(dev, "Requesting IRQ %d, error %d\n",
1891                 info->gpmc_irq_fifo, err);
1892             info->gpmc_irq_fifo = 0;
1893             return err;
1894         }
1895 
1896         info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
1897         if (info->gpmc_irq_count <= 0)
1898             return -ENODEV;
1899         err = devm_request_irq(dev, info->gpmc_irq_count,
1900                        omap_nand_irq, IRQF_SHARED,
1901                        "gpmc-nand-count", info);
1902         if (err) {
1903             dev_err(dev, "Requesting IRQ %d, error %d\n",
1904                 info->gpmc_irq_count, err);
1905             info->gpmc_irq_count = 0;
1906             return err;
1907         }
1908 
1909         info->data_in = omap_nand_data_in_irq_pref;
1910         info->data_out = omap_nand_data_out_irq_pref;
1911         break;
1912 
1913     default:
1914         dev_err(dev, "xfer_type %d not supported!\n", info->xfer_type);
1915         return -EINVAL;
1916     }
1917 
1918     if (!omap2_nand_ecc_check(info))
1919         return -EINVAL;
1920 
1921     /*
1922      * Bail out earlier to let NAND_ECC_ENGINE_TYPE_SOFT code create its own
1923      * ooblayout instead of using ours.
1924      */
1925     if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
1926         chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
1927         chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
1928         return 0;
1929     }
1930 
1931     /* Populate MTD interface based on ECC scheme */
1932     switch (info->ecc_opt) {
1933     case OMAP_ECC_HAM1_CODE_HW:
1934         dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
1935         chip->ecc.engine_type   = NAND_ECC_ENGINE_TYPE_ON_HOST;
1936         chip->ecc.bytes     = 3;
1937         chip->ecc.size      = 512;
1938         chip->ecc.strength  = 1;
1939         chip->ecc.calculate = omap_calculate_ecc;
1940         chip->ecc.hwctl     = omap_enable_hwecc;
1941         chip->ecc.correct   = omap_correct_data;
1942         mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
1943         oobbytes_per_step   = chip->ecc.bytes;
1944 
1945         if (!(chip->options & NAND_BUSWIDTH_16))
1946             min_oobbytes    = 1;
1947 
1948         break;
1949 
1950     case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1951         pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
1952         chip->ecc.engine_type   = NAND_ECC_ENGINE_TYPE_ON_HOST;
1953         chip->ecc.size      = 512;
1954         chip->ecc.bytes     = 7;
1955         chip->ecc.strength  = 4;
1956         chip->ecc.hwctl     = omap_enable_hwecc_bch;
1957         chip->ecc.correct   = rawnand_sw_bch_correct;
1958         chip->ecc.calculate = omap_calculate_ecc_bch_sw;
1959         mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
1960         /* Reserve one byte for the OMAP marker */
1961         oobbytes_per_step   = chip->ecc.bytes + 1;
1962         /* Software BCH library is used for locating errors */
1963         err = rawnand_sw_bch_init(chip);
1964         if (err) {
1965             dev_err(dev, "Unable to use BCH library\n");
1966             return err;
1967         }
1968         break;
1969 
1970     case OMAP_ECC_BCH4_CODE_HW:
1971         pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
1972         chip->ecc.engine_type   = NAND_ECC_ENGINE_TYPE_ON_HOST;
1973         chip->ecc.size      = 512;
1974         /* 14th bit is kept reserved for ROM-code compatibility */
1975         chip->ecc.bytes     = 7 + 1;
1976         chip->ecc.strength  = 4;
1977         chip->ecc.hwctl     = omap_enable_hwecc_bch;
1978         chip->ecc.correct   = omap_elm_correct_data;
1979         chip->ecc.read_page = omap_read_page_bch;
1980         chip->ecc.write_page    = omap_write_page_bch;
1981         chip->ecc.write_subpage = omap_write_subpage_bch;
1982         mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
1983         oobbytes_per_step   = chip->ecc.bytes;
1984         elm_bch_strength = BCH4_ECC;
1985         break;
1986 
1987     case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1988         pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
1989         chip->ecc.engine_type   = NAND_ECC_ENGINE_TYPE_ON_HOST;
1990         chip->ecc.size      = 512;
1991         chip->ecc.bytes     = 13;
1992         chip->ecc.strength  = 8;
1993         chip->ecc.hwctl     = omap_enable_hwecc_bch;
1994         chip->ecc.correct   = rawnand_sw_bch_correct;
1995         chip->ecc.calculate = omap_calculate_ecc_bch_sw;
1996         mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
1997         /* Reserve one byte for the OMAP marker */
1998         oobbytes_per_step   = chip->ecc.bytes + 1;
1999         /* Software BCH library is used for locating errors */
2000         err = rawnand_sw_bch_init(chip);
2001         if (err) {
2002             dev_err(dev, "unable to use BCH library\n");
2003             return err;
2004         }
2005         break;
2006 
2007     case OMAP_ECC_BCH8_CODE_HW:
2008         pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
2009         chip->ecc.engine_type   = NAND_ECC_ENGINE_TYPE_ON_HOST;
2010         chip->ecc.size      = 512;
2011         /* 14th bit is kept reserved for ROM-code compatibility */
2012         chip->ecc.bytes     = 13 + 1;
2013         chip->ecc.strength  = 8;
2014         chip->ecc.hwctl     = omap_enable_hwecc_bch;
2015         chip->ecc.correct   = omap_elm_correct_data;
2016         chip->ecc.read_page = omap_read_page_bch;
2017         chip->ecc.write_page    = omap_write_page_bch;
2018         chip->ecc.write_subpage = omap_write_subpage_bch;
2019         mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2020         oobbytes_per_step   = chip->ecc.bytes;
2021         elm_bch_strength = BCH8_ECC;
2022         break;
2023 
2024     case OMAP_ECC_BCH16_CODE_HW:
2025         pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
2026         chip->ecc.engine_type   = NAND_ECC_ENGINE_TYPE_ON_HOST;
2027         chip->ecc.size      = 512;
2028         chip->ecc.bytes     = 26;
2029         chip->ecc.strength  = 16;
2030         chip->ecc.hwctl     = omap_enable_hwecc_bch;
2031         chip->ecc.correct   = omap_elm_correct_data;
2032         chip->ecc.read_page = omap_read_page_bch;
2033         chip->ecc.write_page    = omap_write_page_bch;
2034         chip->ecc.write_subpage = omap_write_subpage_bch;
2035         mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2036         oobbytes_per_step   = chip->ecc.bytes;
2037         elm_bch_strength = BCH16_ECC;
2038         break;
2039     default:
2040         dev_err(dev, "Invalid or unsupported ECC scheme\n");
2041         return -EINVAL;
2042     }
2043 
2044     if (elm_bch_strength >= 0) {
2045         chip->ecc.steps = mtd->writesize / chip->ecc.size;
2046         info->neccpg = chip->ecc.steps / ERROR_VECTOR_MAX;
2047         if (info->neccpg) {
2048             info->nsteps_per_eccpg = ERROR_VECTOR_MAX;
2049         } else {
2050             info->neccpg = 1;
2051             info->nsteps_per_eccpg = chip->ecc.steps;
2052         }
2053         info->eccpg_size = info->nsteps_per_eccpg * chip->ecc.size;
2054         info->eccpg_bytes = info->nsteps_per_eccpg * chip->ecc.bytes;
2055 
2056         err = elm_config(info->elm_dev, elm_bch_strength,
2057                  info->nsteps_per_eccpg, chip->ecc.size,
2058                  chip->ecc.bytes);
2059         if (err < 0)
2060             return err;
2061     }
2062 
2063     /* Check if NAND device's OOB is enough to store ECC signatures */
2064     min_oobbytes += (oobbytes_per_step *
2065              (mtd->writesize / chip->ecc.size));
2066     if (mtd->oobsize < min_oobbytes) {
2067         dev_err(dev,
2068             "Not enough OOB bytes: required = %d, available=%d\n",
2069             min_oobbytes, mtd->oobsize);
2070         return -EINVAL;
2071     }
2072 
2073     return 0;
2074 }
2075 
2076 static void omap_nand_data_in(struct nand_chip *chip, void *buf,
2077                   unsigned int len, bool force_8bit)
2078 {
2079     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
2080     u32 alignment = ((uintptr_t)buf | len) & 3;
2081 
2082     if (force_8bit || (alignment & 1))
2083         ioread8_rep(info->fifo, buf, len);
2084     else if (alignment & 3)
2085         ioread16_rep(info->fifo, buf, len >> 1);
2086     else
2087         ioread32_rep(info->fifo, buf, len >> 2);
2088 }
2089 
2090 static void omap_nand_data_out(struct nand_chip *chip,
2091                    const void *buf, unsigned int len,
2092                    bool force_8bit)
2093 {
2094     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
2095     u32 alignment = ((uintptr_t)buf | len) & 3;
2096 
2097     if (force_8bit || (alignment & 1))
2098         iowrite8_rep(info->fifo, buf, len);
2099     else if (alignment & 3)
2100         iowrite16_rep(info->fifo, buf, len >> 1);
2101     else
2102         iowrite32_rep(info->fifo, buf, len >> 2);
2103 }
2104 
2105 static int omap_nand_exec_instr(struct nand_chip *chip,
2106                 const struct nand_op_instr *instr)
2107 {
2108     struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
2109     unsigned int i;
2110     int ret;
2111 
2112     switch (instr->type) {
2113     case NAND_OP_CMD_INSTR:
2114         iowrite8(instr->ctx.cmd.opcode,
2115              info->reg.gpmc_nand_command);
2116         break;
2117 
2118     case NAND_OP_ADDR_INSTR:
2119         for (i = 0; i < instr->ctx.addr.naddrs; i++) {
2120             iowrite8(instr->ctx.addr.addrs[i],
2121                  info->reg.gpmc_nand_address);
2122         }
2123         break;
2124 
2125     case NAND_OP_DATA_IN_INSTR:
2126         info->data_in(chip, instr->ctx.data.buf.in,
2127                   instr->ctx.data.len,
2128                   instr->ctx.data.force_8bit);
2129         break;
2130 
2131     case NAND_OP_DATA_OUT_INSTR:
2132         info->data_out(chip, instr->ctx.data.buf.out,
2133                    instr->ctx.data.len,
2134                    instr->ctx.data.force_8bit);
2135         break;
2136 
2137     case NAND_OP_WAITRDY_INSTR:
2138         ret = info->ready_gpiod ?
2139             nand_gpio_waitrdy(chip, info->ready_gpiod, instr->ctx.waitrdy.timeout_ms) :
2140             nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
2141         if (ret)
2142             return ret;
2143         break;
2144     }
2145 
2146     if (instr->delay_ns)
2147         ndelay(instr->delay_ns);
2148 
2149     return 0;
2150 }
2151 
2152 static int omap_nand_exec_op(struct nand_chip *chip,
2153                  const struct nand_operation *op,
2154                  bool check_only)
2155 {
2156     unsigned int i;
2157 
2158     if (check_only)
2159         return 0;
2160 
2161     for (i = 0; i < op->ninstrs; i++) {
2162         int ret;
2163 
2164         ret = omap_nand_exec_instr(chip, &op->instrs[i]);
2165         if (ret)
2166             return ret;
2167     }
2168 
2169     return 0;
2170 }
2171 
2172 static const struct nand_controller_ops omap_nand_controller_ops = {
2173     .attach_chip = omap_nand_attach_chip,
2174     .exec_op = omap_nand_exec_op,
2175 };
2176 
2177 /* Shared among all NAND instances to synchronize access to the ECC Engine */
2178 static struct nand_controller omap_gpmc_controller;
2179 static bool omap_gpmc_controller_initialized;
2180 
2181 static int omap_nand_probe(struct platform_device *pdev)
2182 {
2183     struct omap_nand_info       *info;
2184     struct mtd_info         *mtd;
2185     struct nand_chip        *nand_chip;
2186     int             err;
2187     struct resource         *res;
2188     struct device           *dev = &pdev->dev;
2189     void __iomem *vaddr;
2190 
2191     info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
2192                 GFP_KERNEL);
2193     if (!info)
2194         return -ENOMEM;
2195 
2196     info->pdev = pdev;
2197 
2198     err = omap_get_dt_info(dev, info);
2199     if (err)
2200         return err;
2201 
2202     info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
2203     if (!info->ops) {
2204         dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
2205         return -ENODEV;
2206     }
2207 
2208     nand_chip       = &info->nand;
2209     mtd         = nand_to_mtd(nand_chip);
2210     mtd->dev.parent     = &pdev->dev;
2211     nand_set_flash_node(nand_chip, dev->of_node);
2212 
2213     if (!mtd->name) {
2214         mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
2215                        "omap2-nand.%d", info->gpmc_cs);
2216         if (!mtd->name) {
2217             dev_err(&pdev->dev, "Failed to set MTD name\n");
2218             return -ENOMEM;
2219         }
2220     }
2221 
2222     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2223     vaddr = devm_ioremap_resource(&pdev->dev, res);
2224     if (IS_ERR(vaddr))
2225         return PTR_ERR(vaddr);
2226 
2227     info->fifo = vaddr;
2228     info->phys_base = res->start;
2229 
2230     if (!omap_gpmc_controller_initialized) {
2231         omap_gpmc_controller.ops = &omap_nand_controller_ops;
2232         nand_controller_init(&omap_gpmc_controller);
2233         omap_gpmc_controller_initialized = true;
2234     }
2235 
2236     nand_chip->controller = &omap_gpmc_controller;
2237 
2238     info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
2239                             GPIOD_IN);
2240     if (IS_ERR(info->ready_gpiod)) {
2241         dev_err(dev, "failed to get ready gpio\n");
2242         return PTR_ERR(info->ready_gpiod);
2243     }
2244 
2245     if (info->flash_bbt)
2246         nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
2247 
2248     /* default operations */
2249     info->data_in = omap_nand_data_in;
2250     info->data_out = omap_nand_data_out;
2251 
2252     err = nand_scan(nand_chip, 1);
2253     if (err)
2254         goto return_error;
2255 
2256     err = mtd_device_register(mtd, NULL, 0);
2257     if (err)
2258         goto cleanup_nand;
2259 
2260     platform_set_drvdata(pdev, mtd);
2261 
2262     return 0;
2263 
2264 cleanup_nand:
2265     nand_cleanup(nand_chip);
2266 
2267 return_error:
2268     if (!IS_ERR_OR_NULL(info->dma))
2269         dma_release_channel(info->dma);
2270 
2271     rawnand_sw_bch_cleanup(nand_chip);
2272 
2273     return err;
2274 }
2275 
2276 static int omap_nand_remove(struct platform_device *pdev)
2277 {
2278     struct mtd_info *mtd = platform_get_drvdata(pdev);
2279     struct nand_chip *nand_chip = mtd_to_nand(mtd);
2280     struct omap_nand_info *info = mtd_to_omap(mtd);
2281 
2282     rawnand_sw_bch_cleanup(nand_chip);
2283 
2284     if (info->dma)
2285         dma_release_channel(info->dma);
2286     WARN_ON(mtd_device_unregister(mtd));
2287     nand_cleanup(nand_chip);
2288     return 0;
2289 }
2290 
2291 /* omap_nand_ids defined in linux/platform_data/mtd-nand-omap2.h */
2292 MODULE_DEVICE_TABLE(of, omap_nand_ids);
2293 
2294 static struct platform_driver omap_nand_driver = {
2295     .probe      = omap_nand_probe,
2296     .remove     = omap_nand_remove,
2297     .driver     = {
2298         .name   = DRIVER_NAME,
2299         .of_match_table = omap_nand_ids,
2300     },
2301 };
2302 
2303 module_platform_driver(omap_nand_driver);
2304 
2305 MODULE_ALIAS("platform:" DRIVER_NAME);
2306 MODULE_LICENSE("GPL");
2307 MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");