0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070 #include <linux/kernel.h>
0071 #include <linux/module.h>
0072 #include <linux/init.h>
0073 #include <linux/device.h>
0074 #include <linux/mutex.h>
0075 #include <linux/clk.h>
0076 #include <linux/interrupt.h>
0077 #include <linux/dma-mapping.h>
0078 #include <linux/iopoll.h>
0079 #include <linux/of_platform.h>
0080 #include <linux/mtd/nand-ecc-mtk.h>
0081 #include <linux/spi/spi.h>
0082 #include <linux/spi/spi-mem.h>
0083 #include <linux/mtd/nand.h>
0084
0085
0086 #define NFI_CNFG 0x000
0087 #define CNFG_OP_MODE_S 12
0088 #define CNFG_OP_MODE_CUST 6
0089 #define CNFG_OP_MODE_PROGRAM 3
0090 #define CNFG_AUTO_FMT_EN BIT(9)
0091 #define CNFG_HW_ECC_EN BIT(8)
0092 #define CNFG_DMA_BURST_EN BIT(2)
0093 #define CNFG_READ_MODE BIT(1)
0094 #define CNFG_DMA_MODE BIT(0)
0095
0096 #define NFI_PAGEFMT 0x0004
0097 #define NFI_SPARE_SIZE_LS_S 16
0098 #define NFI_FDM_ECC_NUM_S 12
0099 #define NFI_FDM_NUM_S 8
0100 #define NFI_SPARE_SIZE_S 4
0101 #define NFI_SEC_SEL_512 BIT(2)
0102 #define NFI_PAGE_SIZE_S 0
0103 #define NFI_PAGE_SIZE_512_2K 0
0104 #define NFI_PAGE_SIZE_2K_4K 1
0105 #define NFI_PAGE_SIZE_4K_8K 2
0106 #define NFI_PAGE_SIZE_8K_16K 3
0107
0108 #define NFI_CON 0x008
0109 #define CON_SEC_NUM_S 12
0110 #define CON_BWR BIT(9)
0111 #define CON_BRD BIT(8)
0112 #define CON_NFI_RST BIT(1)
0113 #define CON_FIFO_FLUSH BIT(0)
0114
0115 #define NFI_INTR_EN 0x010
0116 #define NFI_INTR_STA 0x014
0117 #define NFI_IRQ_INTR_EN BIT(31)
0118 #define NFI_IRQ_CUS_READ BIT(8)
0119 #define NFI_IRQ_CUS_PG BIT(7)
0120
0121 #define NFI_CMD 0x020
0122 #define NFI_CMD_DUMMY_READ 0x00
0123 #define NFI_CMD_DUMMY_WRITE 0x80
0124
0125 #define NFI_STRDATA 0x040
0126 #define STR_DATA BIT(0)
0127
0128 #define NFI_STA 0x060
0129 #define NFI_NAND_FSM GENMASK(28, 24)
0130 #define NFI_FSM GENMASK(19, 16)
0131 #define READ_EMPTY BIT(12)
0132
0133 #define NFI_FIFOSTA 0x064
0134 #define FIFO_WR_REMAIN_S 8
0135 #define FIFO_RD_REMAIN_S 0
0136
0137 #define NFI_ADDRCNTR 0x070
0138 #define SEC_CNTR GENMASK(16, 12)
0139 #define SEC_CNTR_S 12
0140 #define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
0141
0142 #define NFI_STRADDR 0x080
0143
0144 #define NFI_BYTELEN 0x084
0145 #define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S)
0146
0147 #define NFI_FDM0L 0x0a0
0148 #define NFI_FDM0M 0x0a4
0149 #define NFI_FDML(n) (NFI_FDM0L + (n)*8)
0150 #define NFI_FDMM(n) (NFI_FDM0M + (n)*8)
0151
0152 #define NFI_DEBUG_CON1 0x220
0153 #define WBUF_EN BIT(2)
0154
0155 #define NFI_MASTERSTA 0x224
0156 #define MAS_ADDR GENMASK(11, 9)
0157 #define MAS_RD GENMASK(8, 6)
0158 #define MAS_WR GENMASK(5, 3)
0159 #define MAS_RDDLY GENMASK(2, 0)
0160 #define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
0161
0162
0163 #define SNF_MAC_CTL 0x500
0164 #define MAC_XIO_SEL BIT(4)
0165 #define SF_MAC_EN BIT(3)
0166 #define SF_TRIG BIT(2)
0167 #define WIP_READY BIT(1)
0168 #define WIP BIT(0)
0169
0170 #define SNF_MAC_OUTL 0x504
0171 #define SNF_MAC_INL 0x508
0172
0173 #define SNF_RD_CTL2 0x510
0174 #define DATA_READ_DUMMY_S 8
0175 #define DATA_READ_MAX_DUMMY 0xf
0176 #define DATA_READ_CMD_S 0
0177
0178 #define SNF_RD_CTL3 0x514
0179
0180 #define SNF_PG_CTL1 0x524
0181 #define PG_LOAD_CMD_S 8
0182
0183 #define SNF_PG_CTL2 0x528
0184
0185 #define SNF_MISC_CTL 0x538
0186 #define SW_RST BIT(28)
0187 #define FIFO_RD_LTC_S 25
0188 #define PG_LOAD_X4_EN BIT(20)
0189 #define DATA_READ_MODE_S 16
0190 #define DATA_READ_MODE GENMASK(18, 16)
0191 #define DATA_READ_MODE_X1 0
0192 #define DATA_READ_MODE_X2 1
0193 #define DATA_READ_MODE_X4 2
0194 #define DATA_READ_MODE_DUAL 5
0195 #define DATA_READ_MODE_QUAD 6
0196 #define PG_LOAD_CUSTOM_EN BIT(7)
0197 #define DATARD_CUSTOM_EN BIT(6)
0198 #define CS_DESELECT_CYC_S 0
0199
0200 #define SNF_MISC_CTL2 0x53c
0201 #define PROGRAM_LOAD_BYTE_NUM_S 16
0202 #define READ_DATA_BYTE_NUM_S 11
0203
0204 #define SNF_DLY_CTL3 0x548
0205 #define SFCK_SAM_DLY_S 0
0206
0207 #define SNF_STA_CTL1 0x550
0208 #define CUS_PG_DONE BIT(28)
0209 #define CUS_READ_DONE BIT(27)
0210 #define SPI_STATE_S 0
0211 #define SPI_STATE GENMASK(3, 0)
0212
0213 #define SNF_CFG 0x55c
0214 #define SPI_MODE BIT(0)
0215
0216 #define SNF_GPRAM 0x800
0217 #define SNF_GPRAM_SIZE 0xa0
0218
0219 #define SNFI_POLL_INTERVAL 1000000
0220
0221 static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 };
0222
0223 struct mtk_snand_caps {
0224 u16 sector_size;
0225 u16 max_sectors;
0226 u16 fdm_size;
0227 u16 fdm_ecc_size;
0228 u16 fifo_size;
0229
0230 bool bbm_swap;
0231 bool empty_page_check;
0232 u32 mastersta_mask;
0233
0234 const u8 *spare_sizes;
0235 u32 num_spare_size;
0236 };
0237
0238 static const struct mtk_snand_caps mt7622_snand_caps = {
0239 .sector_size = 512,
0240 .max_sectors = 8,
0241 .fdm_size = 8,
0242 .fdm_ecc_size = 1,
0243 .fifo_size = 32,
0244 .bbm_swap = false,
0245 .empty_page_check = false,
0246 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
0247 .spare_sizes = mt7622_spare_sizes,
0248 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
0249 };
0250
0251 static const struct mtk_snand_caps mt7629_snand_caps = {
0252 .sector_size = 512,
0253 .max_sectors = 8,
0254 .fdm_size = 8,
0255 .fdm_ecc_size = 1,
0256 .fifo_size = 32,
0257 .bbm_swap = true,
0258 .empty_page_check = false,
0259 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
0260 .spare_sizes = mt7622_spare_sizes,
0261 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
0262 };
0263
0264 struct mtk_snand_conf {
0265 size_t page_size;
0266 size_t oob_size;
0267 u8 nsectors;
0268 u8 spare_size;
0269 };
0270
0271 struct mtk_snand {
0272 struct spi_controller *ctlr;
0273 struct device *dev;
0274 struct clk *nfi_clk;
0275 struct clk *pad_clk;
0276 void __iomem *nfi_base;
0277 int irq;
0278 struct completion op_done;
0279 const struct mtk_snand_caps *caps;
0280 struct mtk_ecc_config *ecc_cfg;
0281 struct mtk_ecc *ecc;
0282 struct mtk_snand_conf nfi_cfg;
0283 struct mtk_ecc_stats ecc_stats;
0284 struct nand_ecc_engine ecc_eng;
0285 bool autofmt;
0286 u8 *buf;
0287 size_t buf_len;
0288 };
0289
0290 static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand)
0291 {
0292 struct nand_ecc_engine *eng = nand->ecc.engine;
0293
0294 return container_of(eng, struct mtk_snand, ecc_eng);
0295 }
0296
0297 static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size)
0298 {
0299 if (snf->buf_len >= size)
0300 return 0;
0301 kfree(snf->buf);
0302 snf->buf = kmalloc(size, GFP_KERNEL);
0303 if (!snf->buf)
0304 return -ENOMEM;
0305 snf->buf_len = size;
0306 memset(snf->buf, 0xff, snf->buf_len);
0307 return 0;
0308 }
0309
0310 static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg)
0311 {
0312 return readl(snf->nfi_base + reg);
0313 }
0314
0315 static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val)
0316 {
0317 writel(val, snf->nfi_base + reg);
0318 }
0319
0320 static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val)
0321 {
0322 writew(val, snf->nfi_base + reg);
0323 }
0324
0325 static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set)
0326 {
0327 u32 val;
0328
0329 val = readl(snf->nfi_base + reg);
0330 val &= ~clr;
0331 val |= set;
0332 writel(val, snf->nfi_base + reg);
0333 }
0334
0335 static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len)
0336 {
0337 u32 i, val = 0, es = sizeof(u32);
0338
0339 for (i = reg; i < reg + len; i++) {
0340 if (i == reg || i % es == 0)
0341 val = nfi_read32(snf, i & ~(es - 1));
0342
0343 *data++ = (u8)(val >> (8 * (i % es)));
0344 }
0345 }
0346
0347 static int mtk_nfi_reset(struct mtk_snand *snf)
0348 {
0349 u32 val, fifo_mask;
0350 int ret;
0351
0352 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
0353
0354 ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
0355 !(val & snf->caps->mastersta_mask), 0,
0356 SNFI_POLL_INTERVAL);
0357 if (ret) {
0358 dev_err(snf->dev, "NFI master is still busy after reset\n");
0359 return ret;
0360 }
0361
0362 ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val,
0363 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
0364 SNFI_POLL_INTERVAL);
0365 if (ret) {
0366 dev_err(snf->dev, "Failed to reset NFI\n");
0367 return ret;
0368 }
0369
0370 fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) |
0371 ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S);
0372 ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
0373 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
0374 if (ret) {
0375 dev_err(snf->dev, "NFI FIFOs are not empty\n");
0376 return ret;
0377 }
0378
0379 return 0;
0380 }
0381
0382 static int mtk_snand_mac_reset(struct mtk_snand *snf)
0383 {
0384 int ret;
0385 u32 val;
0386
0387 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
0388
0389 ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
0390 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
0391 if (ret)
0392 dev_err(snf->dev, "Failed to reset SNFI MAC\n");
0393
0394 nfi_write32(snf, SNF_MISC_CTL,
0395 (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S));
0396
0397 return ret;
0398 }
0399
0400 static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen)
0401 {
0402 int ret;
0403 u32 val;
0404
0405 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
0406 nfi_write32(snf, SNF_MAC_OUTL, outlen);
0407 nfi_write32(snf, SNF_MAC_INL, inlen);
0408
0409 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
0410
0411 ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
0412 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
0413 if (ret) {
0414 dev_err(snf->dev, "Timed out waiting for WIP_READY\n");
0415 goto cleanup;
0416 }
0417
0418 ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP),
0419 0, SNFI_POLL_INTERVAL);
0420 if (ret)
0421 dev_err(snf->dev, "Timed out waiting for WIP cleared\n");
0422
0423 cleanup:
0424 nfi_write32(snf, SNF_MAC_CTL, 0);
0425
0426 return ret;
0427 }
0428
0429 static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op)
0430 {
0431 u32 rx_len = 0;
0432 u32 reg_offs = 0;
0433 u32 val = 0;
0434 const u8 *tx_buf = NULL;
0435 u8 *rx_buf = NULL;
0436 int i, ret;
0437 u8 b;
0438
0439 if (op->data.dir == SPI_MEM_DATA_IN) {
0440 rx_len = op->data.nbytes;
0441 rx_buf = op->data.buf.in;
0442 } else {
0443 tx_buf = op->data.buf.out;
0444 }
0445
0446 mtk_snand_mac_reset(snf);
0447
0448 for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) {
0449 b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff;
0450 val |= b << (8 * (reg_offs % 4));
0451 if (reg_offs % 4 == 3) {
0452 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
0453 val = 0;
0454 }
0455 }
0456
0457 for (i = 0; i < op->addr.nbytes; i++, reg_offs++) {
0458 b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff;
0459 val |= b << (8 * (reg_offs % 4));
0460 if (reg_offs % 4 == 3) {
0461 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
0462 val = 0;
0463 }
0464 }
0465
0466 for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) {
0467 if (reg_offs % 4 == 3) {
0468 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
0469 val = 0;
0470 }
0471 }
0472
0473 if (op->data.dir == SPI_MEM_DATA_OUT) {
0474 for (i = 0; i < op->data.nbytes; i++, reg_offs++) {
0475 val |= tx_buf[i] << (8 * (reg_offs % 4));
0476 if (reg_offs % 4 == 3) {
0477 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
0478 val = 0;
0479 }
0480 }
0481 }
0482
0483 if (reg_offs % 4)
0484 nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val);
0485
0486 for (i = 0; i < reg_offs; i += 4)
0487 dev_dbg(snf->dev, "%d: %08X", i,
0488 nfi_read32(snf, SNF_GPRAM + i));
0489
0490 dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len);
0491
0492 ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len);
0493 if (ret)
0494 return ret;
0495
0496 if (!rx_len)
0497 return 0;
0498
0499 nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len);
0500 return 0;
0501 }
0502
0503 static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size,
0504 u32 oob_size)
0505 {
0506 int spare_idx = -1;
0507 u32 spare_size, spare_size_shift, pagesize_idx;
0508 u32 sector_size_512;
0509 u8 nsectors;
0510 int i;
0511
0512
0513 if (snf->nfi_cfg.page_size == page_size &&
0514 snf->nfi_cfg.oob_size == oob_size)
0515 return 0;
0516
0517 nsectors = page_size / snf->caps->sector_size;
0518 if (nsectors > snf->caps->max_sectors) {
0519 dev_err(snf->dev, "too many sectors required.\n");
0520 goto err;
0521 }
0522
0523 if (snf->caps->sector_size == 512) {
0524 sector_size_512 = NFI_SEC_SEL_512;
0525 spare_size_shift = NFI_SPARE_SIZE_S;
0526 } else {
0527 sector_size_512 = 0;
0528 spare_size_shift = NFI_SPARE_SIZE_LS_S;
0529 }
0530
0531 switch (page_size) {
0532 case SZ_512:
0533 pagesize_idx = NFI_PAGE_SIZE_512_2K;
0534 break;
0535 case SZ_2K:
0536 if (snf->caps->sector_size == 512)
0537 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
0538 else
0539 pagesize_idx = NFI_PAGE_SIZE_512_2K;
0540 break;
0541 case SZ_4K:
0542 if (snf->caps->sector_size == 512)
0543 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
0544 else
0545 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
0546 break;
0547 case SZ_8K:
0548 if (snf->caps->sector_size == 512)
0549 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
0550 else
0551 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
0552 break;
0553 case SZ_16K:
0554 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
0555 break;
0556 default:
0557 dev_err(snf->dev, "unsupported page size.\n");
0558 goto err;
0559 }
0560
0561 spare_size = oob_size / nsectors;
0562
0563
0564 if (snf->caps->sector_size == 1024)
0565 spare_size /= 2;
0566
0567 for (i = snf->caps->num_spare_size - 1; i >= 0; i--) {
0568 if (snf->caps->spare_sizes[i] <= spare_size) {
0569 spare_size = snf->caps->spare_sizes[i];
0570 if (snf->caps->sector_size == 1024)
0571 spare_size *= 2;
0572 spare_idx = i;
0573 break;
0574 }
0575 }
0576
0577 if (spare_idx < 0) {
0578 dev_err(snf->dev, "unsupported spare size: %u\n", spare_size);
0579 goto err;
0580 }
0581
0582 nfi_write32(snf, NFI_PAGEFMT,
0583 (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
0584 (snf->caps->fdm_size << NFI_FDM_NUM_S) |
0585 (spare_idx << spare_size_shift) |
0586 (pagesize_idx << NFI_PAGE_SIZE_S) |
0587 sector_size_512);
0588
0589 snf->nfi_cfg.page_size = page_size;
0590 snf->nfi_cfg.oob_size = oob_size;
0591 snf->nfi_cfg.nsectors = nsectors;
0592 snf->nfi_cfg.spare_size = spare_size;
0593
0594 dev_dbg(snf->dev, "page format: (%u + %u) * %u\n",
0595 snf->caps->sector_size, spare_size, nsectors);
0596 return snand_prepare_bouncebuf(snf, page_size + oob_size);
0597 err:
0598 dev_err(snf->dev, "page size %u + %u is not supported\n", page_size,
0599 oob_size);
0600 return -EOPNOTSUPP;
0601 }
0602
0603 static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
0604 struct mtd_oob_region *oobecc)
0605 {
0606
0607 return -ERANGE;
0608 }
0609
0610 static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
0611 struct mtd_oob_region *oobfree)
0612 {
0613 struct nand_device *nand = mtd_to_nanddev(mtd);
0614 struct mtk_snand *ms = nand_to_mtk_snand(nand);
0615
0616 if (section >= ms->nfi_cfg.nsectors)
0617 return -ERANGE;
0618
0619 oobfree->length = ms->caps->fdm_size - 1;
0620 oobfree->offset = section * ms->caps->fdm_size + 1;
0621 return 0;
0622 }
0623
0624 static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
0625 .ecc = mtk_snand_ooblayout_ecc,
0626 .free = mtk_snand_ooblayout_free,
0627 };
0628
0629 static int mtk_snand_ecc_init_ctx(struct nand_device *nand)
0630 {
0631 struct mtk_snand *snf = nand_to_mtk_snand(nand);
0632 struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
0633 struct nand_ecc_props *reqs = &nand->ecc.requirements;
0634 struct nand_ecc_props *user = &nand->ecc.user_conf;
0635 struct mtd_info *mtd = nanddev_to_mtd(nand);
0636 int step_size = 0, strength = 0, desired_correction = 0, steps;
0637 bool ecc_user = false;
0638 int ret;
0639 u32 parity_bits, max_ecc_bytes;
0640 struct mtk_ecc_config *ecc_cfg;
0641
0642 ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
0643 nand->memorg.oobsize);
0644 if (ret)
0645 return ret;
0646
0647 ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
0648 if (!ecc_cfg)
0649 return -ENOMEM;
0650
0651 nand->ecc.ctx.priv = ecc_cfg;
0652
0653 if (user->step_size && user->strength) {
0654 step_size = user->step_size;
0655 strength = user->strength;
0656 ecc_user = true;
0657 } else if (reqs->step_size && reqs->strength) {
0658 step_size = reqs->step_size;
0659 strength = reqs->strength;
0660 }
0661
0662 if (step_size && strength) {
0663 steps = mtd->writesize / step_size;
0664 desired_correction = steps * strength;
0665 strength = desired_correction / snf->nfi_cfg.nsectors;
0666 }
0667
0668 ecc_cfg->mode = ECC_NFI_MODE;
0669 ecc_cfg->sectors = snf->nfi_cfg.nsectors;
0670 ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size;
0671
0672
0673 parity_bits = mtk_ecc_get_parity_bits(snf->ecc);
0674 max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size;
0675 ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits;
0676 mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength);
0677
0678
0679
0680
0681 if (ecc_user && strength) {
0682 u32 s_next = ecc_cfg->strength - 1;
0683
0684 while (1) {
0685 mtk_ecc_adjust_strength(snf->ecc, &s_next);
0686 if (s_next >= ecc_cfg->strength)
0687 break;
0688 if (s_next < strength)
0689 break;
0690 s_next = ecc_cfg->strength - 1;
0691 }
0692 }
0693
0694 mtd_set_ooblayout(mtd, &mtk_snand_ooblayout);
0695
0696 conf->step_size = snf->caps->sector_size;
0697 conf->strength = ecc_cfg->strength;
0698
0699 if (ecc_cfg->strength < strength)
0700 dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n",
0701 strength);
0702 dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n",
0703 ecc_cfg->strength, snf->caps->sector_size);
0704
0705 return 0;
0706 }
0707
0708 static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand)
0709 {
0710 struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
0711
0712 kfree(ecc_cfg);
0713 }
0714
0715 static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand,
0716 struct nand_page_io_req *req)
0717 {
0718 struct mtk_snand *snf = nand_to_mtk_snand(nand);
0719 struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand);
0720 int ret;
0721
0722 ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
0723 nand->memorg.oobsize);
0724 if (ret)
0725 return ret;
0726 snf->autofmt = true;
0727 snf->ecc_cfg = ecc_cfg;
0728 return 0;
0729 }
0730
0731 static int mtk_snand_ecc_finish_io_req(struct nand_device *nand,
0732 struct nand_page_io_req *req)
0733 {
0734 struct mtk_snand *snf = nand_to_mtk_snand(nand);
0735 struct mtd_info *mtd = nanddev_to_mtd(nand);
0736
0737 snf->ecc_cfg = NULL;
0738 snf->autofmt = false;
0739 if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ))
0740 return 0;
0741
0742 if (snf->ecc_stats.failed)
0743 mtd->ecc_stats.failed += snf->ecc_stats.failed;
0744 mtd->ecc_stats.corrected += snf->ecc_stats.corrected;
0745 return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips;
0746 }
0747
0748 static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = {
0749 .init_ctx = mtk_snand_ecc_init_ctx,
0750 .cleanup_ctx = mtk_snand_ecc_cleanup_ctx,
0751 .prepare_io_req = mtk_snand_ecc_prepare_io_req,
0752 .finish_io_req = mtk_snand_ecc_finish_io_req,
0753 };
0754
0755 static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf)
0756 {
0757 u32 vall, valm;
0758 u8 *oobptr = buf;
0759 int i, j;
0760
0761 for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
0762 vall = nfi_read32(snf, NFI_FDML(i));
0763 valm = nfi_read32(snf, NFI_FDMM(i));
0764
0765 for (j = 0; j < snf->caps->fdm_size; j++)
0766 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
0767
0768 oobptr += snf->caps->fdm_size;
0769 }
0770 }
0771
0772 static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf)
0773 {
0774 u32 fdm_size = snf->caps->fdm_size;
0775 const u8 *oobptr = buf;
0776 u32 vall, valm;
0777 int i, j;
0778
0779 for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
0780 vall = 0;
0781 valm = 0;
0782
0783 for (j = 0; j < 8; j++) {
0784 if (j < 4)
0785 vall |= (j < fdm_size ? oobptr[j] : 0xff)
0786 << (j * 8);
0787 else
0788 valm |= (j < fdm_size ? oobptr[j] : 0xff)
0789 << ((j - 4) * 8);
0790 }
0791
0792 nfi_write32(snf, NFI_FDML(i), vall);
0793 nfi_write32(snf, NFI_FDMM(i), valm);
0794
0795 oobptr += fdm_size;
0796 }
0797 }
0798
0799 static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf)
0800 {
0801 u32 buf_bbm_pos, fdm_bbm_pos;
0802
0803 if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
0804 return;
0805
0806
0807
0808 buf_bbm_pos = snf->nfi_cfg.page_size -
0809 (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size;
0810 fdm_bbm_pos = snf->nfi_cfg.page_size +
0811 (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
0812
0813 swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]);
0814 }
0815
0816 static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
0817 {
0818 u32 fdm_bbm_pos1, fdm_bbm_pos2;
0819
0820 if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
0821 return;
0822
0823
0824 fdm_bbm_pos1 = snf->nfi_cfg.page_size;
0825 fdm_bbm_pos2 = snf->nfi_cfg.page_size +
0826 (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
0827 swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]);
0828 }
0829
0830 static int mtk_snand_read_page_cache(struct mtk_snand *snf,
0831 const struct spi_mem_op *op)
0832 {
0833 u8 *buf = snf->buf;
0834 u8 *buf_fdm = buf + snf->nfi_cfg.page_size;
0835
0836 u32 op_addr = op->addr.val;
0837
0838 u32 rd_offset = 0;
0839 u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth);
0840 u32 op_mode = 0;
0841 u32 dma_len = snf->buf_len;
0842 int ret = 0;
0843 u32 rd_mode, rd_bytes, val;
0844 dma_addr_t buf_dma;
0845
0846 if (snf->autofmt) {
0847 u32 last_bit;
0848 u32 mask;
0849
0850 dma_len = snf->nfi_cfg.page_size;
0851 op_mode = CNFG_AUTO_FMT_EN;
0852 if (op->data.ecc)
0853 op_mode |= CNFG_HW_ECC_EN;
0854
0855
0856
0857
0858
0859 last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
0860 mask = (1 << last_bit) - 1;
0861 rd_offset = op_addr & mask;
0862 op_addr &= ~mask;
0863
0864
0865 if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size)
0866 buf = op->data.buf.in;
0867 }
0868 mtk_snand_mac_reset(snf);
0869 mtk_nfi_reset(snf);
0870
0871
0872 nfi_write32(snf, SNF_RD_CTL2,
0873 (dummy_clk << DATA_READ_DUMMY_S) |
0874 (op->cmd.opcode << DATA_READ_CMD_S));
0875
0876
0877 nfi_write32(snf, SNF_RD_CTL3, op_addr);
0878
0879
0880 if (op->data.buswidth == 4)
0881 rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD :
0882 DATA_READ_MODE_X4;
0883 else if (op->data.buswidth == 2)
0884 rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL :
0885 DATA_READ_MODE_X2;
0886 else
0887 rd_mode = DATA_READ_MODE_X1;
0888 rd_mode <<= DATA_READ_MODE_S;
0889 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
0890 rd_mode | DATARD_CUSTOM_EN);
0891
0892
0893 rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
0894 snf->nfi_cfg.nsectors;
0895 nfi_write32(snf, SNF_MISC_CTL2,
0896 (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes);
0897
0898
0899 nfi_write16(snf, NFI_CNFG,
0900 (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN |
0901 CNFG_READ_MODE | CNFG_DMA_MODE | op_mode);
0902
0903 nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
0904
0905 buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE);
0906 ret = dma_mapping_error(snf->dev, buf_dma);
0907 if (ret) {
0908 dev_err(snf->dev, "DMA mapping failed.\n");
0909 goto cleanup;
0910 }
0911 nfi_write32(snf, NFI_STRADDR, buf_dma);
0912 if (op->data.ecc) {
0913 snf->ecc_cfg->op = ECC_DECODE;
0914 ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
0915 if (ret)
0916 goto cleanup_dma;
0917 }
0918
0919 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
0920 reinit_completion(&snf->op_done);
0921
0922
0923 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
0924
0925
0926 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
0927 nfi_write16(snf, NFI_STRDATA, STR_DATA);
0928
0929 if (!wait_for_completion_timeout(
0930 &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
0931 dev_err(snf->dev, "DMA timed out for reading from cache.\n");
0932 ret = -ETIMEDOUT;
0933 goto cleanup;
0934 }
0935
0936
0937 ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
0938 BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
0939 SNFI_POLL_INTERVAL);
0940 if (ret) {
0941 dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n");
0942 goto cleanup2;
0943 }
0944
0945
0946 ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
0947 !(val & snf->caps->mastersta_mask), 0,
0948 SNFI_POLL_INTERVAL);
0949 if (ret) {
0950 dev_err(snf->dev, "Timed out waiting for bus becoming idle\n");
0951 goto cleanup2;
0952 }
0953
0954 if (op->data.ecc) {
0955 ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE);
0956 if (ret) {
0957 dev_err(snf->dev, "wait ecc done timeout\n");
0958 goto cleanup2;
0959 }
0960
0961 mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats,
0962 snf->nfi_cfg.nsectors);
0963 }
0964
0965 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
0966
0967 if (snf->autofmt) {
0968 mtk_snand_read_fdm(snf, buf_fdm);
0969 if (snf->caps->bbm_swap) {
0970 mtk_snand_bm_swap(snf, buf);
0971 mtk_snand_fdm_bm_swap(snf);
0972 }
0973 }
0974
0975
0976 if (nfi_read32(snf, NFI_STA) & READ_EMPTY) {
0977 memset(op->data.buf.in, 0xff, op->data.nbytes);
0978 snf->ecc_stats.bitflips = 0;
0979 snf->ecc_stats.failed = 0;
0980 snf->ecc_stats.corrected = 0;
0981 } else {
0982 if (buf == op->data.buf.in) {
0983 u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size;
0984 u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size;
0985
0986 if (req_left)
0987 memcpy(op->data.buf.in + snf->nfi_cfg.page_size,
0988 buf_fdm,
0989 cap_len < req_left ? cap_len : req_left);
0990 } else if (rd_offset < snf->buf_len) {
0991 u32 cap_len = snf->buf_len - rd_offset;
0992
0993 if (op->data.nbytes < cap_len)
0994 cap_len = op->data.nbytes;
0995 memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len);
0996 }
0997 }
0998 cleanup2:
0999 if (op->data.ecc)
1000 mtk_ecc_disable(snf->ecc);
1001 cleanup_dma:
1002
1003
1004 if (ret)
1005 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
1006 cleanup:
1007
1008 nfi_write32(snf, NFI_CON, 0);
1009 nfi_write16(snf, NFI_CNFG, 0);
1010
1011
1012 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
1013 nfi_write32(snf, SNF_STA_CTL1, 0);
1014
1015
1016 nfi_read32(snf, NFI_INTR_STA);
1017 nfi_write32(snf, NFI_INTR_EN, 0);
1018
1019 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
1020 return ret;
1021 }
1022
1023 static int mtk_snand_write_page_cache(struct mtk_snand *snf,
1024 const struct spi_mem_op *op)
1025 {
1026
1027 u32 op_addr = op->addr.val;
1028
1029 u32 wr_offset = 0;
1030 u32 op_mode = 0;
1031 int ret = 0;
1032 u32 wr_mode = 0;
1033 u32 dma_len = snf->buf_len;
1034 u32 wr_bytes, val;
1035 size_t cap_len;
1036 dma_addr_t buf_dma;
1037
1038 if (snf->autofmt) {
1039 u32 last_bit;
1040 u32 mask;
1041
1042 dma_len = snf->nfi_cfg.page_size;
1043 op_mode = CNFG_AUTO_FMT_EN;
1044 if (op->data.ecc)
1045 op_mode |= CNFG_HW_ECC_EN;
1046
1047 last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
1048 mask = (1 << last_bit) - 1;
1049 wr_offset = op_addr & mask;
1050 op_addr &= ~mask;
1051 }
1052 mtk_snand_mac_reset(snf);
1053 mtk_nfi_reset(snf);
1054
1055 if (wr_offset)
1056 memset(snf->buf, 0xff, wr_offset);
1057
1058 cap_len = snf->buf_len - wr_offset;
1059 if (op->data.nbytes < cap_len)
1060 cap_len = op->data.nbytes;
1061 memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len);
1062 if (snf->autofmt) {
1063 if (snf->caps->bbm_swap) {
1064 mtk_snand_fdm_bm_swap(snf);
1065 mtk_snand_bm_swap(snf, snf->buf);
1066 }
1067 mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size);
1068 }
1069
1070
1071 nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S));
1072
1073
1074 nfi_write32(snf, SNF_PG_CTL2, op_addr);
1075
1076
1077 if (op->data.buswidth == 4)
1078 wr_mode = PG_LOAD_X4_EN;
1079
1080 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN,
1081 wr_mode | PG_LOAD_CUSTOM_EN);
1082
1083
1084 wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
1085 snf->nfi_cfg.nsectors;
1086 nfi_write32(snf, SNF_MISC_CTL2,
1087 (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes);
1088
1089
1090 nfi_write16(snf, NFI_CNFG,
1091 (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
1092 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode);
1093
1094 nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
1095 buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE);
1096 ret = dma_mapping_error(snf->dev, buf_dma);
1097 if (ret) {
1098 dev_err(snf->dev, "DMA mapping failed.\n");
1099 goto cleanup;
1100 }
1101 nfi_write32(snf, NFI_STRADDR, buf_dma);
1102 if (op->data.ecc) {
1103 snf->ecc_cfg->op = ECC_ENCODE;
1104 ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
1105 if (ret)
1106 goto cleanup_dma;
1107 }
1108
1109 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1110 reinit_completion(&snf->op_done);
1111 ;
1112
1113
1114 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1115
1116
1117 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1118 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1119
1120 if (!wait_for_completion_timeout(
1121 &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
1122 dev_err(snf->dev, "DMA timed out for program load.\n");
1123 ret = -ETIMEDOUT;
1124 goto cleanup_ecc;
1125 }
1126
1127
1128 ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
1129 NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
1130 SNFI_POLL_INTERVAL);
1131 if (ret)
1132 dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n");
1133
1134 cleanup_ecc:
1135 if (op->data.ecc)
1136 mtk_ecc_disable(snf->ecc);
1137 cleanup_dma:
1138 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE);
1139 cleanup:
1140
1141 nfi_write32(snf, NFI_CON, 0);
1142 nfi_write16(snf, NFI_CNFG, 0);
1143
1144
1145 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1146 nfi_write32(snf, SNF_STA_CTL1, 0);
1147
1148
1149 nfi_read32(snf, NFI_INTR_STA);
1150 nfi_write32(snf, NFI_INTR_EN, 0);
1151
1152 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1153
1154 return ret;
1155 }
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 static bool mtk_snand_is_page_ops(const struct spi_mem_op *op)
1170 {
1171 if (op->addr.nbytes != 2)
1172 return false;
1173
1174 if (op->addr.buswidth != 1 && op->addr.buswidth != 2 &&
1175 op->addr.buswidth != 4)
1176 return false;
1177
1178
1179 if (op->data.dir == SPI_MEM_DATA_IN) {
1180
1181 if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth >
1182 DATA_READ_MAX_DUMMY)
1183 return false;
1184
1185 if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) &&
1186 op->data.buswidth == 4)
1187 return true;
1188
1189
1190 if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) &&
1191 op->data.buswidth == 2)
1192 return true;
1193
1194
1195 if (op->addr.buswidth == 1 && op->data.buswidth == 1)
1196 return true;
1197 } else if (op->data.dir == SPI_MEM_DATA_OUT) {
1198
1199 if (op->dummy.nbytes)
1200 return false;
1201
1202 if (op->addr.buswidth == 1 && op->data.buswidth == 4)
1203 return true;
1204
1205 if (op->addr.buswidth == 1 && op->data.buswidth == 1)
1206 return true;
1207 }
1208 return false;
1209 }
1210
1211 static bool mtk_snand_supports_op(struct spi_mem *mem,
1212 const struct spi_mem_op *op)
1213 {
1214 if (!spi_mem_default_supports_op(mem, op))
1215 return false;
1216 if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
1217 return false;
1218 if (mtk_snand_is_page_ops(op))
1219 return true;
1220 return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) &&
1221 (op->dummy.nbytes == 0 || op->dummy.buswidth == 1) &&
1222 (op->data.nbytes == 0 || op->data.buswidth == 1));
1223 }
1224
1225 static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
1226 {
1227 struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master);
1228
1229
1230
1231
1232 if (mtk_snand_is_page_ops(op)) {
1233 size_t l;
1234
1235 if (ms->autofmt)
1236 return 0;
1237 l = ms->caps->sector_size + ms->nfi_cfg.spare_size;
1238 l *= ms->nfi_cfg.nsectors;
1239 if (op->data.nbytes > l)
1240 op->data.nbytes = l;
1241 } else {
1242 size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
1243
1244 if (hl >= SNF_GPRAM_SIZE)
1245 return -EOPNOTSUPP;
1246 if (op->data.nbytes > SNF_GPRAM_SIZE - hl)
1247 op->data.nbytes = SNF_GPRAM_SIZE - hl;
1248 }
1249 return 0;
1250 }
1251
1252 static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
1253 {
1254 struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master);
1255
1256 dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
1257 op->addr.val, op->addr.buswidth, op->addr.nbytes,
1258 op->data.buswidth, op->data.nbytes);
1259 if (mtk_snand_is_page_ops(op)) {
1260 if (op->data.dir == SPI_MEM_DATA_IN)
1261 return mtk_snand_read_page_cache(ms, op);
1262 else
1263 return mtk_snand_write_page_cache(ms, op);
1264 } else {
1265 return mtk_snand_mac_io(ms, op);
1266 }
1267 }
1268
1269 static const struct spi_controller_mem_ops mtk_snand_mem_ops = {
1270 .adjust_op_size = mtk_snand_adjust_op_size,
1271 .supports_op = mtk_snand_supports_op,
1272 .exec_op = mtk_snand_exec_op,
1273 };
1274
1275 static const struct spi_controller_mem_caps mtk_snand_mem_caps = {
1276 .ecc = true,
1277 };
1278
1279 static irqreturn_t mtk_snand_irq(int irq, void *id)
1280 {
1281 struct mtk_snand *snf = id;
1282 u32 sta, ien;
1283
1284 sta = nfi_read32(snf, NFI_INTR_STA);
1285 ien = nfi_read32(snf, NFI_INTR_EN);
1286
1287 if (!(sta & ien))
1288 return IRQ_NONE;
1289
1290 nfi_write32(snf, NFI_INTR_EN, 0);
1291 complete(&snf->op_done);
1292 return IRQ_HANDLED;
1293 }
1294
1295 static const struct of_device_id mtk_snand_ids[] = {
1296 { .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps },
1297 { .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps },
1298 {},
1299 };
1300
1301 MODULE_DEVICE_TABLE(of, mtk_snand_ids);
1302
1303 static int mtk_snand_enable_clk(struct mtk_snand *ms)
1304 {
1305 int ret;
1306
1307 ret = clk_prepare_enable(ms->nfi_clk);
1308 if (ret) {
1309 dev_err(ms->dev, "unable to enable nfi clk\n");
1310 return ret;
1311 }
1312 ret = clk_prepare_enable(ms->pad_clk);
1313 if (ret) {
1314 dev_err(ms->dev, "unable to enable pad clk\n");
1315 goto err1;
1316 }
1317 return 0;
1318 err1:
1319 clk_disable_unprepare(ms->nfi_clk);
1320 return ret;
1321 }
1322
1323 static void mtk_snand_disable_clk(struct mtk_snand *ms)
1324 {
1325 clk_disable_unprepare(ms->pad_clk);
1326 clk_disable_unprepare(ms->nfi_clk);
1327 }
1328
1329 static int mtk_snand_probe(struct platform_device *pdev)
1330 {
1331 struct device_node *np = pdev->dev.of_node;
1332 const struct of_device_id *dev_id;
1333 struct spi_controller *ctlr;
1334 struct mtk_snand *ms;
1335 int ret;
1336
1337 dev_id = of_match_node(mtk_snand_ids, np);
1338 if (!dev_id)
1339 return -EINVAL;
1340
1341 ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms));
1342 if (!ctlr)
1343 return -ENOMEM;
1344 platform_set_drvdata(pdev, ctlr);
1345
1346 ms = spi_controller_get_devdata(ctlr);
1347
1348 ms->ctlr = ctlr;
1349 ms->caps = dev_id->data;
1350
1351 ms->ecc = of_mtk_ecc_get(np);
1352 if (IS_ERR(ms->ecc))
1353 return PTR_ERR(ms->ecc);
1354 else if (!ms->ecc)
1355 return -ENODEV;
1356
1357 ms->nfi_base = devm_platform_ioremap_resource(pdev, 0);
1358 if (IS_ERR(ms->nfi_base)) {
1359 ret = PTR_ERR(ms->nfi_base);
1360 goto release_ecc;
1361 }
1362
1363 ms->dev = &pdev->dev;
1364
1365 ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk");
1366 if (IS_ERR(ms->nfi_clk)) {
1367 ret = PTR_ERR(ms->nfi_clk);
1368 dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret);
1369 goto release_ecc;
1370 }
1371
1372 ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk");
1373 if (IS_ERR(ms->pad_clk)) {
1374 ret = PTR_ERR(ms->pad_clk);
1375 dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret);
1376 goto release_ecc;
1377 }
1378
1379 ret = mtk_snand_enable_clk(ms);
1380 if (ret)
1381 goto release_ecc;
1382
1383 init_completion(&ms->op_done);
1384
1385 ms->irq = platform_get_irq(pdev, 0);
1386 if (ms->irq < 0) {
1387 ret = ms->irq;
1388 goto disable_clk;
1389 }
1390 ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0,
1391 "mtk-snand", ms);
1392 if (ret) {
1393 dev_err(ms->dev, "failed to request snfi irq\n");
1394 goto disable_clk;
1395 }
1396
1397 ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32));
1398 if (ret) {
1399 dev_err(ms->dev, "failed to set dma mask\n");
1400 goto disable_clk;
1401 }
1402
1403
1404 nfi_write32(ms, SNF_CFG, SPI_MODE);
1405
1406
1407
1408 ret = mtk_snand_setup_pagefmt(ms, ms->caps->sector_size,
1409 ms->caps->spare_sizes[0]);
1410 if (ret) {
1411 dev_err(ms->dev, "failed to set initial page format\n");
1412 goto disable_clk;
1413 }
1414
1415
1416 ms->ecc_eng.dev = &pdev->dev;
1417 ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
1418 ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops;
1419 ms->ecc_eng.priv = ms;
1420
1421 ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng);
1422 if (ret) {
1423 dev_err(&pdev->dev, "failed to register ecc engine.\n");
1424 goto disable_clk;
1425 }
1426
1427 ctlr->num_chipselect = 1;
1428 ctlr->mem_ops = &mtk_snand_mem_ops;
1429 ctlr->mem_caps = &mtk_snand_mem_caps;
1430 ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
1431 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
1432 ctlr->dev.of_node = pdev->dev.of_node;
1433 ret = spi_register_controller(ctlr);
1434 if (ret) {
1435 dev_err(&pdev->dev, "spi_register_controller failed.\n");
1436 goto disable_clk;
1437 }
1438
1439 return 0;
1440 disable_clk:
1441 mtk_snand_disable_clk(ms);
1442 release_ecc:
1443 mtk_ecc_release(ms->ecc);
1444 return ret;
1445 }
1446
1447 static int mtk_snand_remove(struct platform_device *pdev)
1448 {
1449 struct spi_controller *ctlr = platform_get_drvdata(pdev);
1450 struct mtk_snand *ms = spi_controller_get_devdata(ctlr);
1451
1452 spi_unregister_controller(ctlr);
1453 mtk_snand_disable_clk(ms);
1454 mtk_ecc_release(ms->ecc);
1455 kfree(ms->buf);
1456 return 0;
1457 }
1458
1459 static struct platform_driver mtk_snand_driver = {
1460 .probe = mtk_snand_probe,
1461 .remove = mtk_snand_remove,
1462 .driver = {
1463 .name = "mtk-snand",
1464 .of_match_table = mtk_snand_ids,
1465 },
1466 };
1467
1468 module_platform_driver(mtk_snand_driver);
1469
1470 MODULE_LICENSE("GPL");
1471 MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
1472 MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");