0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/device.h>
0012 #include <linux/module.h>
0013 #include <linux/mtd/mtd.h>
0014 #include <linux/mtd/onenand.h>
0015 #include <linux/mtd/partitions.h>
0016 #include <linux/of_device.h>
0017 #include <linux/omap-gpmc.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/interrupt.h>
0020 #include <linux/delay.h>
0021 #include <linux/dma-mapping.h>
0022 #include <linux/dmaengine.h>
0023 #include <linux/io.h>
0024 #include <linux/slab.h>
0025 #include <linux/gpio/consumer.h>
0026
0027 #include <asm/mach/flash.h>
0028
0029 #define DRIVER_NAME "omap2-onenand"
0030
0031 #define ONENAND_BUFRAM_SIZE (1024 * 5)
0032
0033 struct omap2_onenand {
0034 struct platform_device *pdev;
0035 int gpmc_cs;
0036 unsigned long phys_base;
0037 struct gpio_desc *int_gpiod;
0038 struct mtd_info mtd;
0039 struct onenand_chip onenand;
0040 struct completion irq_done;
0041 struct completion dma_done;
0042 struct dma_chan *dma_chan;
0043 };
0044
0045 static void omap2_onenand_dma_complete_func(void *completion)
0046 {
0047 complete(completion);
0048 }
0049
0050 static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
0051 {
0052 struct omap2_onenand *c = dev_id;
0053
0054 complete(&c->irq_done);
0055
0056 return IRQ_HANDLED;
0057 }
0058
0059 static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
0060 {
0061 return readw(c->onenand.base + reg);
0062 }
0063
0064 static inline void write_reg(struct omap2_onenand *c, unsigned short value,
0065 int reg)
0066 {
0067 writew(value, c->onenand.base + reg);
0068 }
0069
0070 static int omap2_onenand_set_cfg(struct omap2_onenand *c,
0071 bool sr, bool sw,
0072 int latency, int burst_len)
0073 {
0074 unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
0075
0076 reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
0077
0078 switch (burst_len) {
0079 case 0:
0080 break;
0081 case 4:
0082 reg |= ONENAND_SYS_CFG1_BL_4;
0083 break;
0084 case 8:
0085 reg |= ONENAND_SYS_CFG1_BL_8;
0086 break;
0087 case 16:
0088 reg |= ONENAND_SYS_CFG1_BL_16;
0089 break;
0090 case 32:
0091 reg |= ONENAND_SYS_CFG1_BL_32;
0092 break;
0093 default:
0094 return -EINVAL;
0095 }
0096
0097 if (latency > 5)
0098 reg |= ONENAND_SYS_CFG1_HF;
0099 if (latency > 7)
0100 reg |= ONENAND_SYS_CFG1_VHF;
0101 if (sr)
0102 reg |= ONENAND_SYS_CFG1_SYNC_READ;
0103 if (sw)
0104 reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
0105
0106 write_reg(c, reg, ONENAND_REG_SYS_CFG1);
0107
0108 return 0;
0109 }
0110
0111 static int omap2_onenand_get_freq(int ver)
0112 {
0113 switch ((ver >> 4) & 0xf) {
0114 case 0:
0115 return 40;
0116 case 1:
0117 return 54;
0118 case 2:
0119 return 66;
0120 case 3:
0121 return 83;
0122 case 4:
0123 return 104;
0124 }
0125
0126 return -EINVAL;
0127 }
0128
0129 static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
0130 {
0131 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
0132 msg, state, ctrl, intr);
0133 }
0134
0135 static void wait_warn(char *msg, int state, unsigned int ctrl,
0136 unsigned int intr)
0137 {
0138 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
0139 "intr 0x%04x\n", msg, state, ctrl, intr);
0140 }
0141
0142 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
0143 {
0144 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
0145 struct onenand_chip *this = mtd->priv;
0146 unsigned int intr = 0;
0147 unsigned int ctrl, ctrl_mask;
0148 unsigned long timeout;
0149 u32 syscfg;
0150
0151 if (state == FL_RESETTING || state == FL_PREPARING_ERASE ||
0152 state == FL_VERIFYING_ERASE) {
0153 int i = 21;
0154 unsigned int intr_flags = ONENAND_INT_MASTER;
0155
0156 switch (state) {
0157 case FL_RESETTING:
0158 intr_flags |= ONENAND_INT_RESET;
0159 break;
0160 case FL_PREPARING_ERASE:
0161 intr_flags |= ONENAND_INT_ERASE;
0162 break;
0163 case FL_VERIFYING_ERASE:
0164 i = 101;
0165 break;
0166 }
0167
0168 while (--i) {
0169 udelay(1);
0170 intr = read_reg(c, ONENAND_REG_INTERRUPT);
0171 if (intr & ONENAND_INT_MASTER)
0172 break;
0173 }
0174 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
0175 if (ctrl & ONENAND_CTRL_ERROR) {
0176 wait_err("controller error", state, ctrl, intr);
0177 return -EIO;
0178 }
0179 if ((intr & intr_flags) == intr_flags)
0180 return 0;
0181
0182 }
0183
0184 if (state != FL_READING) {
0185 int result;
0186
0187
0188 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
0189 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
0190 syscfg |= ONENAND_SYS_CFG1_IOBE;
0191 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
0192
0193 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
0194 }
0195
0196 reinit_completion(&c->irq_done);
0197 result = gpiod_get_value(c->int_gpiod);
0198 if (result < 0) {
0199 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
0200 intr = read_reg(c, ONENAND_REG_INTERRUPT);
0201 wait_err("gpio error", state, ctrl, intr);
0202 return result;
0203 } else if (result == 0) {
0204 int retry_cnt = 0;
0205 retry:
0206 if (!wait_for_completion_io_timeout(&c->irq_done,
0207 msecs_to_jiffies(20))) {
0208
0209 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
0210 if (ctrl & ONENAND_CTRL_ONGO &&
0211 !this->ongoing) {
0212
0213
0214
0215
0216 retry_cnt += 1;
0217 if (retry_cnt < 3)
0218 goto retry;
0219 intr = read_reg(c,
0220 ONENAND_REG_INTERRUPT);
0221 wait_err("timeout", state, ctrl, intr);
0222 return -EIO;
0223 }
0224 intr = read_reg(c, ONENAND_REG_INTERRUPT);
0225 if ((intr & ONENAND_INT_MASTER) == 0)
0226 wait_warn("timeout", state, ctrl, intr);
0227 }
0228 }
0229 } else {
0230 int retry_cnt = 0;
0231
0232
0233 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
0234 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
0235 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
0236
0237 timeout = jiffies + msecs_to_jiffies(20);
0238 while (1) {
0239 if (time_before(jiffies, timeout)) {
0240 intr = read_reg(c, ONENAND_REG_INTERRUPT);
0241 if (intr & ONENAND_INT_MASTER)
0242 break;
0243 } else {
0244
0245 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
0246 if (ctrl & ONENAND_CTRL_ONGO) {
0247
0248
0249
0250
0251 retry_cnt += 1;
0252 if (retry_cnt < 3) {
0253 timeout = jiffies +
0254 msecs_to_jiffies(20);
0255 continue;
0256 }
0257 }
0258 break;
0259 }
0260 }
0261 }
0262
0263 intr = read_reg(c, ONENAND_REG_INTERRUPT);
0264 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
0265
0266 if (intr & ONENAND_INT_READ) {
0267 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
0268
0269 if (ecc) {
0270 unsigned int addr1, addr8;
0271
0272 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
0273 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
0274 if (ecc & ONENAND_ECC_2BIT_ALL) {
0275 printk(KERN_ERR "onenand_wait: ECC error = "
0276 "0x%04x, addr1 %#x, addr8 %#x\n",
0277 ecc, addr1, addr8);
0278 mtd->ecc_stats.failed++;
0279 return -EBADMSG;
0280 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
0281 printk(KERN_NOTICE "onenand_wait: correctable "
0282 "ECC error = 0x%04x, addr1 %#x, "
0283 "addr8 %#x\n", ecc, addr1, addr8);
0284 mtd->ecc_stats.corrected++;
0285 }
0286 }
0287 } else if (state == FL_READING) {
0288 wait_err("timeout", state, ctrl, intr);
0289 return -EIO;
0290 }
0291
0292 if (ctrl & ONENAND_CTRL_ERROR) {
0293 wait_err("controller error", state, ctrl, intr);
0294 if (ctrl & ONENAND_CTRL_LOCK)
0295 printk(KERN_ERR "onenand_wait: "
0296 "Device is write protected!!!\n");
0297 return -EIO;
0298 }
0299
0300 ctrl_mask = 0xFE9F;
0301 if (this->ongoing)
0302 ctrl_mask &= ~0x8000;
0303
0304 if (ctrl & ctrl_mask)
0305 wait_warn("unexpected controller status", state, ctrl, intr);
0306
0307 return 0;
0308 }
0309
0310 static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
0311 {
0312 struct onenand_chip *this = mtd->priv;
0313
0314 if (ONENAND_CURRENT_BUFFERRAM(this)) {
0315 if (area == ONENAND_DATARAM)
0316 return this->writesize;
0317 if (area == ONENAND_SPARERAM)
0318 return mtd->oobsize;
0319 }
0320
0321 return 0;
0322 }
0323
0324 static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
0325 dma_addr_t src, dma_addr_t dst,
0326 size_t count)
0327 {
0328 struct dma_async_tx_descriptor *tx;
0329 dma_cookie_t cookie;
0330
0331 tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
0332 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
0333 if (!tx) {
0334 dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
0335 return -EIO;
0336 }
0337
0338 reinit_completion(&c->dma_done);
0339
0340 tx->callback = omap2_onenand_dma_complete_func;
0341 tx->callback_param = &c->dma_done;
0342
0343 cookie = tx->tx_submit(tx);
0344 if (dma_submit_error(cookie)) {
0345 dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
0346 return -EIO;
0347 }
0348
0349 dma_async_issue_pending(c->dma_chan);
0350
0351 if (!wait_for_completion_io_timeout(&c->dma_done,
0352 msecs_to_jiffies(20))) {
0353 dmaengine_terminate_sync(c->dma_chan);
0354 return -ETIMEDOUT;
0355 }
0356
0357 return 0;
0358 }
0359
0360 static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
0361 unsigned char *buffer, int offset,
0362 size_t count)
0363 {
0364 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
0365 struct onenand_chip *this = mtd->priv;
0366 struct device *dev = &c->pdev->dev;
0367 void *buf = (void *)buffer;
0368 dma_addr_t dma_src, dma_dst;
0369 int bram_offset, err;
0370 size_t xtra;
0371
0372 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
0373
0374
0375
0376
0377
0378 if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
0379 count < 384 || mtd->oops_panic_write)
0380 goto out_copy;
0381
0382 xtra = count & 3;
0383 if (xtra) {
0384 count -= xtra;
0385 memcpy(buf + count, this->base + bram_offset + count, xtra);
0386 }
0387
0388 dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
0389 dma_src = c->phys_base + bram_offset;
0390
0391 if (dma_mapping_error(dev, dma_dst)) {
0392 dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
0393 goto out_copy;
0394 }
0395
0396 err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
0397 dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
0398 if (!err)
0399 return 0;
0400
0401 dev_err(dev, "timeout waiting for DMA\n");
0402
0403 out_copy:
0404 memcpy(buf, this->base + bram_offset, count);
0405 return 0;
0406 }
0407
0408 static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
0409 const unsigned char *buffer,
0410 int offset, size_t count)
0411 {
0412 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
0413 struct onenand_chip *this = mtd->priv;
0414 struct device *dev = &c->pdev->dev;
0415 void *buf = (void *)buffer;
0416 dma_addr_t dma_src, dma_dst;
0417 int bram_offset, err;
0418
0419 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
0420
0421
0422
0423
0424
0425 if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
0426 count < 384 || mtd->oops_panic_write)
0427 goto out_copy;
0428
0429 dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
0430 dma_dst = c->phys_base + bram_offset;
0431 if (dma_mapping_error(dev, dma_src)) {
0432 dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
0433 goto out_copy;
0434 }
0435
0436 err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
0437 dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
0438 if (!err)
0439 return 0;
0440
0441 dev_err(dev, "timeout waiting for DMA\n");
0442
0443 out_copy:
0444 memcpy(this->base + bram_offset, buf, count);
0445 return 0;
0446 }
0447
0448 static void omap2_onenand_shutdown(struct platform_device *pdev)
0449 {
0450 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
0451
0452
0453
0454
0455
0456 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
0457 }
0458
0459 static int omap2_onenand_probe(struct platform_device *pdev)
0460 {
0461 u32 val;
0462 dma_cap_mask_t mask;
0463 int freq, latency, r;
0464 struct resource *res;
0465 struct omap2_onenand *c;
0466 struct gpmc_onenand_info info;
0467 struct device *dev = &pdev->dev;
0468 struct device_node *np = dev->of_node;
0469
0470 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0471 if (!res) {
0472 dev_err(dev, "error getting memory resource\n");
0473 return -EINVAL;
0474 }
0475
0476 r = of_property_read_u32(np, "reg", &val);
0477 if (r) {
0478 dev_err(dev, "reg not found in DT\n");
0479 return r;
0480 }
0481
0482 c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
0483 if (!c)
0484 return -ENOMEM;
0485
0486 init_completion(&c->irq_done);
0487 init_completion(&c->dma_done);
0488 c->gpmc_cs = val;
0489 c->phys_base = res->start;
0490
0491 c->onenand.base = devm_ioremap_resource(dev, res);
0492 if (IS_ERR(c->onenand.base))
0493 return PTR_ERR(c->onenand.base);
0494
0495 c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
0496 if (IS_ERR(c->int_gpiod)) {
0497
0498 return dev_err_probe(dev, PTR_ERR(c->int_gpiod), "error getting gpio\n");
0499 }
0500
0501 if (c->int_gpiod) {
0502 r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
0503 omap2_onenand_interrupt,
0504 IRQF_TRIGGER_RISING, "onenand", c);
0505 if (r)
0506 return r;
0507
0508 c->onenand.wait = omap2_onenand_wait;
0509 }
0510
0511 dma_cap_zero(mask);
0512 dma_cap_set(DMA_MEMCPY, mask);
0513
0514 c->dma_chan = dma_request_channel(mask, NULL, NULL);
0515 if (c->dma_chan) {
0516 c->onenand.read_bufferram = omap2_onenand_read_bufferram;
0517 c->onenand.write_bufferram = omap2_onenand_write_bufferram;
0518 }
0519
0520 c->pdev = pdev;
0521 c->mtd.priv = &c->onenand;
0522 c->mtd.dev.parent = dev;
0523 mtd_set_of_node(&c->mtd, dev->of_node);
0524
0525 dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
0526 c->gpmc_cs, c->phys_base, c->onenand.base,
0527 c->dma_chan ? "DMA" : "PIO");
0528
0529 r = onenand_scan(&c->mtd, 1);
0530 if (r < 0)
0531 goto err_release_dma;
0532
0533 freq = omap2_onenand_get_freq(c->onenand.version_id);
0534 if (freq > 0) {
0535 switch (freq) {
0536 case 104:
0537 latency = 7;
0538 break;
0539 case 83:
0540 latency = 6;
0541 break;
0542 case 66:
0543 latency = 5;
0544 break;
0545 case 56:
0546 latency = 4;
0547 break;
0548 default:
0549 latency = 3;
0550 break;
0551 }
0552
0553 r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
0554 freq, latency, &info);
0555 if (r)
0556 goto err_release_onenand;
0557
0558 r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
0559 latency, info.burst_len);
0560 if (r)
0561 goto err_release_onenand;
0562
0563 if (info.sync_read || info.sync_write)
0564 dev_info(dev, "optimized timings for %d MHz\n", freq);
0565 }
0566
0567 r = mtd_device_register(&c->mtd, NULL, 0);
0568 if (r)
0569 goto err_release_onenand;
0570
0571 platform_set_drvdata(pdev, c);
0572
0573 return 0;
0574
0575 err_release_onenand:
0576 onenand_release(&c->mtd);
0577 err_release_dma:
0578 if (c->dma_chan)
0579 dma_release_channel(c->dma_chan);
0580
0581 return r;
0582 }
0583
0584 static int omap2_onenand_remove(struct platform_device *pdev)
0585 {
0586 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
0587
0588 onenand_release(&c->mtd);
0589 if (c->dma_chan)
0590 dma_release_channel(c->dma_chan);
0591 omap2_onenand_shutdown(pdev);
0592
0593 return 0;
0594 }
0595
0596 static const struct of_device_id omap2_onenand_id_table[] = {
0597 { .compatible = "ti,omap2-onenand", },
0598 {},
0599 };
0600 MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
0601
0602 static struct platform_driver omap2_onenand_driver = {
0603 .probe = omap2_onenand_probe,
0604 .remove = omap2_onenand_remove,
0605 .shutdown = omap2_onenand_shutdown,
0606 .driver = {
0607 .name = DRIVER_NAME,
0608 .of_match_table = omap2_onenand_id_table,
0609 },
0610 };
0611
0612 module_platform_driver(omap2_onenand_driver);
0613
0614 MODULE_ALIAS("platform:" DRIVER_NAME);
0615 MODULE_LICENSE("GPL");
0616 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
0617 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");