0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/amba/bus.h>
0014 #include <linux/err.h>
0015 #include <linux/delay.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/io.h>
0018 #include <linux/ioport.h>
0019 #include <linux/iopoll.h>
0020 #include <linux/irq.h>
0021 #include <linux/module.h>
0022 #include <linux/moduleparam.h>
0023 #include <linux/mtd/mtd.h>
0024 #include <linux/mtd/rawnand.h>
0025 #include <linux/mtd/partitions.h>
0026 #include <linux/of_address.h>
0027 #include <linux/of_device.h>
0028 #include <linux/of_platform.h>
0029 #include <linux/platform_device.h>
0030 #include <linux/slab.h>
0031 #include <linux/clk.h>
0032
0033 #define PL35X_NANDC_DRIVER_NAME "pl35x-nand-controller"
0034
0035
0036 #define PL35X_SMC_MEMC_STATUS 0x0
0037 #define PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1 BIT(6)
0038
0039 #define PL35X_SMC_MEMC_CFG_CLR 0xC
0040 #define PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1 BIT(1)
0041 #define PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 BIT(4)
0042 #define PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 BIT(6)
0043
0044 #define PL35X_SMC_DIRECT_CMD 0x10
0045 #define PL35X_SMC_DIRECT_CMD_NAND_CS (0x4 << 23)
0046 #define PL35X_SMC_DIRECT_CMD_UPD_REGS (0x2 << 21)
0047
0048 #define PL35X_SMC_CYCLES 0x14
0049 #define PL35X_SMC_NAND_TRC_CYCLES(x) ((x) << 0)
0050 #define PL35X_SMC_NAND_TWC_CYCLES(x) ((x) << 4)
0051 #define PL35X_SMC_NAND_TREA_CYCLES(x) ((x) << 8)
0052 #define PL35X_SMC_NAND_TWP_CYCLES(x) ((x) << 11)
0053 #define PL35X_SMC_NAND_TCLR_CYCLES(x) ((x) << 14)
0054 #define PL35X_SMC_NAND_TAR_CYCLES(x) ((x) << 17)
0055 #define PL35X_SMC_NAND_TRR_CYCLES(x) ((x) << 20)
0056
0057 #define PL35X_SMC_OPMODE 0x18
0058 #define PL35X_SMC_OPMODE_BW_8 0
0059 #define PL35X_SMC_OPMODE_BW_16 1
0060
0061 #define PL35X_SMC_ECC_STATUS 0x400
0062 #define PL35X_SMC_ECC_STATUS_ECC_BUSY BIT(6)
0063
0064 #define PL35X_SMC_ECC_CFG 0x404
0065 #define PL35X_SMC_ECC_CFG_MODE_MASK 0xC
0066 #define PL35X_SMC_ECC_CFG_MODE_BYPASS 0
0067 #define PL35X_SMC_ECC_CFG_MODE_APB BIT(2)
0068 #define PL35X_SMC_ECC_CFG_MODE_MEM BIT(3)
0069 #define PL35X_SMC_ECC_CFG_PGSIZE_MASK 0x3
0070
0071 #define PL35X_SMC_ECC_CMD1 0x408
0072 #define PL35X_SMC_ECC_CMD1_WRITE(x) ((x) << 0)
0073 #define PL35X_SMC_ECC_CMD1_READ(x) ((x) << 8)
0074 #define PL35X_SMC_ECC_CMD1_READ_END(x) ((x) << 16)
0075 #define PL35X_SMC_ECC_CMD1_READ_END_VALID(x) ((x) << 24)
0076
0077 #define PL35X_SMC_ECC_CMD2 0x40C
0078 #define PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(x) ((x) << 0)
0079 #define PL35X_SMC_ECC_CMD2_READ_COL_CHG(x) ((x) << 8)
0080 #define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(x) ((x) << 16)
0081 #define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(x) ((x) << 24)
0082
0083 #define PL35X_SMC_ECC_VALUE(x) (0x418 + (4 * (x)))
0084 #define PL35X_SMC_ECC_VALUE_IS_CORRECTABLE(x) ((x) & BIT(27))
0085 #define PL35X_SMC_ECC_VALUE_HAS_FAILED(x) ((x) & BIT(28))
0086 #define PL35X_SMC_ECC_VALUE_IS_VALID(x) ((x) & BIT(30))
0087
0088
0089 #define PL35X_SMC_CMD_PHASE 0
0090 #define PL35X_SMC_CMD_PHASE_CMD0(x) ((x) << 3)
0091 #define PL35X_SMC_CMD_PHASE_CMD1(x) ((x) << 11)
0092 #define PL35X_SMC_CMD_PHASE_CMD1_VALID BIT(20)
0093 #define PL35X_SMC_CMD_PHASE_ADDR(pos, x) ((x) << (8 * (pos)))
0094 #define PL35X_SMC_CMD_PHASE_NADDRS(x) ((x) << 21)
0095 #define PL35X_SMC_DATA_PHASE BIT(19)
0096 #define PL35X_SMC_DATA_PHASE_ECC_LAST BIT(10)
0097 #define PL35X_SMC_DATA_PHASE_CLEAR_CS BIT(21)
0098
0099 #define PL35X_NAND_MAX_CS 1
0100 #define PL35X_NAND_LAST_XFER_SZ 4
0101 #define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, period_ns))
0102
0103 #define PL35X_NAND_ECC_BITS_MASK 0xFFF
0104 #define PL35X_NAND_ECC_BYTE_OFF_MASK 0x1FF
0105 #define PL35X_NAND_ECC_BIT_OFF_MASK 0x7
0106
0107 struct pl35x_nand_timings {
0108 unsigned int t_rc:4;
0109 unsigned int t_wc:4;
0110 unsigned int t_rea:3;
0111 unsigned int t_wp:3;
0112 unsigned int t_clr:3;
0113 unsigned int t_ar:3;
0114 unsigned int t_rr:4;
0115 unsigned int rsvd:8;
0116 };
0117
0118 struct pl35x_nand {
0119 struct list_head node;
0120 struct nand_chip chip;
0121 unsigned int cs;
0122 unsigned int addr_cycles;
0123 u32 ecc_cfg;
0124 u32 timings;
0125 };
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 struct pl35x_nandc {
0139 struct device *dev;
0140 void __iomem *conf_regs;
0141 void __iomem *io_regs;
0142 struct nand_controller controller;
0143 struct list_head chips;
0144 struct nand_chip *selected_chip;
0145 unsigned long assigned_cs;
0146 u8 *ecc_buf;
0147 };
0148
0149 static inline struct pl35x_nandc *to_pl35x_nandc(struct nand_controller *ctrl)
0150 {
0151 return container_of(ctrl, struct pl35x_nandc, controller);
0152 }
0153
0154 static inline struct pl35x_nand *to_pl35x_nand(struct nand_chip *chip)
0155 {
0156 return container_of(chip, struct pl35x_nand, chip);
0157 }
0158
0159 static int pl35x_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section,
0160 struct mtd_oob_region *oobregion)
0161 {
0162 struct nand_chip *chip = mtd_to_nand(mtd);
0163
0164 if (section >= chip->ecc.steps)
0165 return -ERANGE;
0166
0167 oobregion->offset = (section * chip->ecc.bytes);
0168 oobregion->length = chip->ecc.bytes;
0169
0170 return 0;
0171 }
0172
0173 static int pl35x_ecc_ooblayout16_free(struct mtd_info *mtd, int section,
0174 struct mtd_oob_region *oobregion)
0175 {
0176 struct nand_chip *chip = mtd_to_nand(mtd);
0177
0178 if (section >= chip->ecc.steps)
0179 return -ERANGE;
0180
0181 oobregion->offset = (section * chip->ecc.bytes) + 8;
0182 oobregion->length = 8;
0183
0184 return 0;
0185 }
0186
0187 static const struct mtd_ooblayout_ops pl35x_ecc_ooblayout16_ops = {
0188 .ecc = pl35x_ecc_ooblayout16_ecc,
0189 .free = pl35x_ecc_ooblayout16_free,
0190 };
0191
0192
0193 static u8 bbt_pattern[] = { 'B', 'b', 't', '0' };
0194 static u8 mirror_pattern[] = { '1', 't', 'b', 'B' };
0195
0196 static struct nand_bbt_descr bbt_main_descr = {
0197 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
0198 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
0199 .offs = 4,
0200 .len = 4,
0201 .veroffs = 20,
0202 .maxblocks = 4,
0203 .pattern = bbt_pattern
0204 };
0205
0206 static struct nand_bbt_descr bbt_mirror_descr = {
0207 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
0208 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
0209 .offs = 4,
0210 .len = 4,
0211 .veroffs = 20,
0212 .maxblocks = 4,
0213 .pattern = mirror_pattern
0214 };
0215
0216 static void pl35x_smc_update_regs(struct pl35x_nandc *nfc)
0217 {
0218 writel(PL35X_SMC_DIRECT_CMD_NAND_CS |
0219 PL35X_SMC_DIRECT_CMD_UPD_REGS,
0220 nfc->conf_regs + PL35X_SMC_DIRECT_CMD);
0221 }
0222
0223 static int pl35x_smc_set_buswidth(struct pl35x_nandc *nfc, unsigned int bw)
0224 {
0225 if (bw != PL35X_SMC_OPMODE_BW_8 && bw != PL35X_SMC_OPMODE_BW_16)
0226 return -EINVAL;
0227
0228 writel(bw, nfc->conf_regs + PL35X_SMC_OPMODE);
0229 pl35x_smc_update_regs(nfc);
0230
0231 return 0;
0232 }
0233
0234 static void pl35x_smc_clear_irq(struct pl35x_nandc *nfc)
0235 {
0236 writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1,
0237 nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
0238 }
0239
0240 static int pl35x_smc_wait_for_irq(struct pl35x_nandc *nfc)
0241 {
0242 u32 reg;
0243 int ret;
0244
0245 ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_MEMC_STATUS, reg,
0246 reg & PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1,
0247 10, 1000000);
0248 if (ret)
0249 dev_err(nfc->dev,
0250 "Timeout polling on NAND controller interrupt (0x%x)\n",
0251 reg);
0252
0253 pl35x_smc_clear_irq(nfc);
0254
0255 return ret;
0256 }
0257
0258 static int pl35x_smc_wait_for_ecc_done(struct pl35x_nandc *nfc)
0259 {
0260 u32 reg;
0261 int ret;
0262
0263 ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_ECC_STATUS, reg,
0264 !(reg & PL35X_SMC_ECC_STATUS_ECC_BUSY),
0265 10, 1000000);
0266 if (ret)
0267 dev_err(nfc->dev,
0268 "Timeout polling on ECC controller interrupt\n");
0269
0270 return ret;
0271 }
0272
0273 static int pl35x_smc_set_ecc_mode(struct pl35x_nandc *nfc,
0274 struct nand_chip *chip,
0275 unsigned int mode)
0276 {
0277 struct pl35x_nand *plnand;
0278 u32 ecc_cfg;
0279
0280 ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
0281 ecc_cfg &= ~PL35X_SMC_ECC_CFG_MODE_MASK;
0282 ecc_cfg |= mode;
0283 writel(ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
0284
0285 if (chip) {
0286 plnand = to_pl35x_nand(chip);
0287 plnand->ecc_cfg = ecc_cfg;
0288 }
0289
0290 if (mode != PL35X_SMC_ECC_CFG_MODE_BYPASS)
0291 return pl35x_smc_wait_for_ecc_done(nfc);
0292
0293 return 0;
0294 }
0295
0296 static void pl35x_smc_force_byte_access(struct nand_chip *chip,
0297 bool force_8bit)
0298 {
0299 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
0300 int ret;
0301
0302 if (!(chip->options & NAND_BUSWIDTH_16))
0303 return;
0304
0305 if (force_8bit)
0306 ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
0307 else
0308 ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_16);
0309
0310 if (ret)
0311 dev_err(nfc->dev, "Error in Buswidth\n");
0312 }
0313
0314 static void pl35x_nand_select_target(struct nand_chip *chip,
0315 unsigned int die_nr)
0316 {
0317 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
0318 struct pl35x_nand *plnand = to_pl35x_nand(chip);
0319
0320 if (chip == nfc->selected_chip)
0321 return;
0322
0323
0324 writel(plnand->timings, nfc->conf_regs + PL35X_SMC_CYCLES);
0325 pl35x_smc_update_regs(nfc);
0326
0327
0328 writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
0329
0330 nfc->selected_chip = chip;
0331 }
0332
0333 static void pl35x_nand_read_data_op(struct nand_chip *chip, u8 *in,
0334 unsigned int len, bool force_8bit,
0335 unsigned int flags, unsigned int last_flags)
0336 {
0337 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
0338 unsigned int buf_end = len / 4;
0339 unsigned int in_start = round_down(len, 4);
0340 unsigned int data_phase_addr;
0341 u32 *buf32 = (u32 *)in;
0342 u8 *buf8 = (u8 *)in;
0343 int i;
0344
0345 if (force_8bit)
0346 pl35x_smc_force_byte_access(chip, true);
0347
0348 for (i = 0; i < buf_end; i++) {
0349 data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
0350 if (i + 1 == buf_end)
0351 data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
0352
0353 buf32[i] = readl(nfc->io_regs + data_phase_addr);
0354 }
0355
0356
0357 for (i = in_start; i < len; i++)
0358 buf8[i] = readb(nfc->io_regs + PL35X_SMC_DATA_PHASE);
0359
0360 if (force_8bit)
0361 pl35x_smc_force_byte_access(chip, false);
0362 }
0363
0364 static void pl35x_nand_write_data_op(struct nand_chip *chip, const u8 *out,
0365 int len, bool force_8bit,
0366 unsigned int flags,
0367 unsigned int last_flags)
0368 {
0369 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
0370 unsigned int buf_end = len / 4;
0371 unsigned int in_start = round_down(len, 4);
0372 const u32 *buf32 = (const u32 *)out;
0373 const u8 *buf8 = (const u8 *)out;
0374 unsigned int data_phase_addr;
0375 int i;
0376
0377 if (force_8bit)
0378 pl35x_smc_force_byte_access(chip, true);
0379
0380 for (i = 0; i < buf_end; i++) {
0381 data_phase_addr = PL35X_SMC_DATA_PHASE + flags;
0382 if (i + 1 == buf_end)
0383 data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags;
0384
0385 writel(buf32[i], nfc->io_regs + data_phase_addr);
0386 }
0387
0388
0389 for (i = in_start; i < len; i++)
0390 writeb(buf8[i], nfc->io_regs + PL35X_SMC_DATA_PHASE);
0391
0392 if (force_8bit)
0393 pl35x_smc_force_byte_access(chip, false);
0394 }
0395
0396 static int pl35x_nand_correct_data(struct pl35x_nandc *nfc, unsigned char *buf,
0397 unsigned char *read_ecc,
0398 unsigned char *calc_ecc)
0399 {
0400 unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper;
0401 unsigned short calc_ecc_lower, calc_ecc_upper;
0402 unsigned short byte_addr, bit_addr;
0403
0404 read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) &
0405 PL35X_NAND_ECC_BITS_MASK;
0406 read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) &
0407 PL35X_NAND_ECC_BITS_MASK;
0408
0409 calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) &
0410 PL35X_NAND_ECC_BITS_MASK;
0411 calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) &
0412 PL35X_NAND_ECC_BITS_MASK;
0413
0414 ecc_odd = read_ecc_lower ^ calc_ecc_lower;
0415 ecc_even = read_ecc_upper ^ calc_ecc_upper;
0416
0417
0418 if (likely(!ecc_odd && !ecc_even))
0419 return 0;
0420
0421
0422 if (ecc_odd == (~ecc_even & PL35X_NAND_ECC_BITS_MASK)) {
0423
0424 byte_addr = (ecc_odd >> 3) & PL35X_NAND_ECC_BYTE_OFF_MASK;
0425
0426 bit_addr = ecc_odd & PL35X_NAND_ECC_BIT_OFF_MASK;
0427
0428 buf[byte_addr] ^= (BIT(bit_addr));
0429
0430 return 1;
0431 }
0432
0433
0434 if (hweight32(ecc_odd | ecc_even) == 1)
0435 return 1;
0436
0437 return -EBADMSG;
0438 }
0439
0440 static void pl35x_nand_ecc_reg_to_array(struct nand_chip *chip, u32 ecc_reg,
0441 u8 *ecc_array)
0442 {
0443 u32 ecc_value = ~ecc_reg;
0444 unsigned int ecc_byte;
0445
0446 for (ecc_byte = 0; ecc_byte < chip->ecc.bytes; ecc_byte++)
0447 ecc_array[ecc_byte] = ecc_value >> (8 * ecc_byte);
0448 }
0449
0450 static int pl35x_nand_read_eccbytes(struct pl35x_nandc *nfc,
0451 struct nand_chip *chip, u8 *read_ecc)
0452 {
0453 u32 ecc_value;
0454 int chunk;
0455
0456 for (chunk = 0; chunk < chip->ecc.steps;
0457 chunk++, read_ecc += chip->ecc.bytes) {
0458 ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
0459 if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
0460 return -EINVAL;
0461
0462 pl35x_nand_ecc_reg_to_array(chip, ecc_value, read_ecc);
0463 }
0464
0465 return 0;
0466 }
0467
0468 static int pl35x_nand_recover_data_hwecc(struct pl35x_nandc *nfc,
0469 struct nand_chip *chip, u8 *data,
0470 u8 *read_ecc)
0471 {
0472 struct mtd_info *mtd = nand_to_mtd(chip);
0473 unsigned int max_bitflips = 0, chunk;
0474 u8 calc_ecc[3];
0475 u32 ecc_value;
0476 int stats;
0477
0478 for (chunk = 0; chunk < chip->ecc.steps;
0479 chunk++, data += chip->ecc.size, read_ecc += chip->ecc.bytes) {
0480
0481 ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk));
0482
0483 if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value))
0484 return -EINVAL;
0485
0486 if (PL35X_SMC_ECC_VALUE_HAS_FAILED(ecc_value)) {
0487 mtd->ecc_stats.failed++;
0488 continue;
0489 }
0490
0491 pl35x_nand_ecc_reg_to_array(chip, ecc_value, calc_ecc);
0492 stats = pl35x_nand_correct_data(nfc, data, read_ecc, calc_ecc);
0493 if (stats < 0) {
0494 mtd->ecc_stats.failed++;
0495 } else {
0496 mtd->ecc_stats.corrected += stats;
0497 max_bitflips = max_t(unsigned int, max_bitflips, stats);
0498 }
0499 }
0500
0501 return max_bitflips;
0502 }
0503
0504 static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
0505 const u8 *buf, int oob_required,
0506 int page)
0507 {
0508 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
0509 struct pl35x_nand *plnand = to_pl35x_nand(chip);
0510 struct mtd_info *mtd = nand_to_mtd(chip);
0511 unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
0512 unsigned int nrows = plnand->addr_cycles;
0513 u32 addr1 = 0, addr2 = 0, row;
0514 u32 cmd_addr;
0515 int i, ret;
0516
0517 ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
0518 if (ret)
0519 return ret;
0520
0521 cmd_addr = PL35X_SMC_CMD_PHASE |
0522 PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
0523 PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_SEQIN);
0524
0525 for (i = 0, row = first_row; row < nrows; i++, row++) {
0526 u8 addr = page >> ((i * 8) & 0xFF);
0527
0528 if (row < 4)
0529 addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
0530 else
0531 addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
0532 }
0533
0534
0535 writel(addr1, nfc->io_regs + cmd_addr);
0536 if (plnand->addr_cycles > 4)
0537 writel(addr2, nfc->io_regs + cmd_addr);
0538
0539
0540 pl35x_nand_write_data_op(chip, buf, mtd->writesize, false,
0541 0, PL35X_SMC_DATA_PHASE_ECC_LAST);
0542 ret = pl35x_smc_wait_for_ecc_done(nfc);
0543 if (ret)
0544 goto disable_ecc_engine;
0545
0546
0547 ret = pl35x_nand_read_eccbytes(nfc, chip, nfc->ecc_buf);
0548 if (ret)
0549 goto disable_ecc_engine;
0550
0551 if (!oob_required)
0552 memset(chip->oob_poi, 0xFF, mtd->oobsize);
0553
0554 ret = mtd_ooblayout_set_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi,
0555 0, chip->ecc.total);
0556 if (ret)
0557 goto disable_ecc_engine;
0558
0559
0560 pl35x_nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false, 0,
0561 PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_PAGEPROG) |
0562 PL35X_SMC_CMD_PHASE_CMD1_VALID |
0563 PL35X_SMC_DATA_PHASE_CLEAR_CS);
0564 ret = pl35x_smc_wait_for_irq(nfc);
0565 if (ret)
0566 goto disable_ecc_engine;
0567
0568 disable_ecc_engine:
0569 pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
0570
0571 return ret;
0572 }
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585 static int pl35x_nand_read_page_hwecc(struct nand_chip *chip,
0586 u8 *buf, int oob_required, int page)
0587 {
0588 const struct nand_sdr_timings *sdr =
0589 nand_get_sdr_timings(nand_get_interface_config(chip));
0590 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
0591 struct pl35x_nand *plnand = to_pl35x_nand(chip);
0592 struct mtd_info *mtd = nand_to_mtd(chip);
0593 unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2;
0594 unsigned int nrows = plnand->addr_cycles;
0595 unsigned int addr1 = 0, addr2 = 0, row;
0596 u32 cmd_addr;
0597 int i, ret;
0598
0599 ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
0600 if (ret)
0601 return ret;
0602
0603 cmd_addr = PL35X_SMC_CMD_PHASE |
0604 PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) |
0605 PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_READ0) |
0606 PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_READSTART) |
0607 PL35X_SMC_CMD_PHASE_CMD1_VALID;
0608
0609 for (i = 0, row = first_row; row < nrows; i++, row++) {
0610 u8 addr = page >> ((i * 8) & 0xFF);
0611
0612 if (row < 4)
0613 addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr);
0614 else
0615 addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr);
0616 }
0617
0618
0619 writel(addr1, nfc->io_regs + cmd_addr);
0620 if (plnand->addr_cycles > 4)
0621 writel(addr2, nfc->io_regs + cmd_addr);
0622
0623
0624 ndelay(PSEC_TO_NSEC(sdr->tRR_min));
0625 ret = pl35x_smc_wait_for_irq(nfc);
0626 if (ret)
0627 goto disable_ecc_engine;
0628
0629
0630 pl35x_nand_read_data_op(chip, buf, mtd->writesize, false,
0631 0, PL35X_SMC_DATA_PHASE_ECC_LAST);
0632 ret = pl35x_smc_wait_for_ecc_done(nfc);
0633 if (ret)
0634 goto disable_ecc_engine;
0635
0636
0637 pl35x_nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
0638 0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
0639 ret = mtd_ooblayout_get_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi, 0,
0640 chip->ecc.total);
0641 if (ret)
0642 goto disable_ecc_engine;
0643
0644 pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
0645
0646
0647 return pl35x_nand_recover_data_hwecc(nfc, chip, buf, nfc->ecc_buf);
0648
0649 disable_ecc_engine:
0650 pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
0651
0652 return ret;
0653 }
0654
0655 static int pl35x_nand_exec_op(struct nand_chip *chip,
0656 const struct nand_subop *subop)
0657 {
0658 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
0659 const struct nand_op_instr *instr, *data_instr = NULL;
0660 unsigned int rdy_tim_ms = 0, naddrs = 0, cmds = 0, last_flags = 0;
0661 u32 addr1 = 0, addr2 = 0, cmd0 = 0, cmd1 = 0, cmd_addr = 0;
0662 unsigned int op_id, len, offset, rdy_del_ns;
0663 int last_instr_type = -1;
0664 bool cmd1_valid = false;
0665 const u8 *addrs;
0666 int i, ret;
0667
0668 for (op_id = 0; op_id < subop->ninstrs; op_id++) {
0669 instr = &subop->instrs[op_id];
0670
0671 switch (instr->type) {
0672 case NAND_OP_CMD_INSTR:
0673 if (!cmds) {
0674 cmd0 = PL35X_SMC_CMD_PHASE_CMD0(instr->ctx.cmd.opcode);
0675 } else {
0676 cmd1 = PL35X_SMC_CMD_PHASE_CMD1(instr->ctx.cmd.opcode);
0677 if (last_instr_type != NAND_OP_DATA_OUT_INSTR)
0678 cmd1_valid = true;
0679 }
0680 cmds++;
0681 break;
0682
0683 case NAND_OP_ADDR_INSTR:
0684 offset = nand_subop_get_addr_start_off(subop, op_id);
0685 naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
0686 addrs = &instr->ctx.addr.addrs[offset];
0687 cmd_addr |= PL35X_SMC_CMD_PHASE_NADDRS(naddrs);
0688
0689 for (i = offset; i < naddrs; i++) {
0690 if (i < 4)
0691 addr1 |= PL35X_SMC_CMD_PHASE_ADDR(i, addrs[i]);
0692 else
0693 addr2 |= PL35X_SMC_CMD_PHASE_ADDR(i - 4, addrs[i]);
0694 }
0695 break;
0696
0697 case NAND_OP_DATA_IN_INSTR:
0698 case NAND_OP_DATA_OUT_INSTR:
0699 data_instr = instr;
0700 len = nand_subop_get_data_len(subop, op_id);
0701 break;
0702
0703 case NAND_OP_WAITRDY_INSTR:
0704 rdy_tim_ms = instr->ctx.waitrdy.timeout_ms;
0705 rdy_del_ns = instr->delay_ns;
0706 break;
0707 }
0708
0709 last_instr_type = instr->type;
0710 }
0711
0712
0713 cmd_addr |= PL35X_SMC_CMD_PHASE | cmd0 | cmd1 |
0714 (cmd1_valid ? PL35X_SMC_CMD_PHASE_CMD1_VALID : 0);
0715 writel(addr1, nfc->io_regs + cmd_addr);
0716 if (naddrs > 4)
0717 writel(addr2, nfc->io_regs + cmd_addr);
0718
0719
0720 if (data_instr && data_instr->type == NAND_OP_DATA_OUT_INSTR) {
0721 last_flags = PL35X_SMC_DATA_PHASE_CLEAR_CS;
0722 if (cmds == 2)
0723 last_flags |= cmd1 | PL35X_SMC_CMD_PHASE_CMD1_VALID;
0724
0725 pl35x_nand_write_data_op(chip, data_instr->ctx.data.buf.out,
0726 len, data_instr->ctx.data.force_8bit,
0727 0, last_flags);
0728 }
0729
0730 if (rdy_tim_ms) {
0731 ndelay(rdy_del_ns);
0732 ret = pl35x_smc_wait_for_irq(nfc);
0733 if (ret)
0734 return ret;
0735 }
0736
0737 if (data_instr && data_instr->type == NAND_OP_DATA_IN_INSTR)
0738 pl35x_nand_read_data_op(chip, data_instr->ctx.data.buf.in,
0739 len, data_instr->ctx.data.force_8bit,
0740 0, PL35X_SMC_DATA_PHASE_CLEAR_CS);
0741
0742 return 0;
0743 }
0744
0745 static const struct nand_op_parser pl35x_nandc_op_parser = NAND_OP_PARSER(
0746 NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
0747 NAND_OP_PARSER_PAT_CMD_ELEM(true),
0748 NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7),
0749 NAND_OP_PARSER_PAT_CMD_ELEM(true),
0750 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
0751 NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 2112)),
0752 NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
0753 NAND_OP_PARSER_PAT_CMD_ELEM(false),
0754 NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
0755 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
0756 NAND_OP_PARSER_PAT_CMD_ELEM(false),
0757 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
0758 NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op,
0759 NAND_OP_PARSER_PAT_CMD_ELEM(false),
0760 NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7),
0761 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112),
0762 NAND_OP_PARSER_PAT_CMD_ELEM(true),
0763 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
0764 );
0765
0766 static int pl35x_nfc_exec_op(struct nand_chip *chip,
0767 const struct nand_operation *op,
0768 bool check_only)
0769 {
0770 if (!check_only)
0771 pl35x_nand_select_target(chip, op->cs);
0772
0773 return nand_op_parser_exec_op(chip, &pl35x_nandc_op_parser,
0774 op, check_only);
0775 }
0776
0777 static int pl35x_nfc_setup_interface(struct nand_chip *chip, int cs,
0778 const struct nand_interface_config *conf)
0779 {
0780 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
0781 struct pl35x_nand *plnand = to_pl35x_nand(chip);
0782 struct pl35x_nand_timings tmgs = {};
0783 const struct nand_sdr_timings *sdr;
0784 unsigned int period_ns, val;
0785 struct clk *mclk;
0786
0787 sdr = nand_get_sdr_timings(conf);
0788 if (IS_ERR(sdr))
0789 return PTR_ERR(sdr);
0790
0791 mclk = of_clk_get_by_name(nfc->dev->parent->of_node, "memclk");
0792 if (IS_ERR(mclk)) {
0793 dev_err(nfc->dev, "Failed to retrieve SMC memclk\n");
0794 return PTR_ERR(mclk);
0795 }
0796
0797
0798
0799
0800
0801
0802 period_ns = NSEC_PER_SEC / clk_get_rate(mclk);
0803
0804
0805
0806
0807
0808 val = TO_CYCLES(sdr->tRC_min, period_ns);
0809 if (sdr->tRC_min <= 20000)
0810 val++;
0811
0812 tmgs.t_rc = val;
0813 if (tmgs.t_rc != val || tmgs.t_rc < 2)
0814 return -EINVAL;
0815
0816 val = TO_CYCLES(sdr->tWC_min, period_ns);
0817 tmgs.t_wc = val;
0818 if (tmgs.t_wc != val || tmgs.t_wc < 2)
0819 return -EINVAL;
0820
0821
0822
0823
0824
0825 tmgs.t_rea = 1;
0826
0827 val = TO_CYCLES(sdr->tWP_min, period_ns);
0828 tmgs.t_wp = val;
0829 if (tmgs.t_wp != val || tmgs.t_wp < 1)
0830 return -EINVAL;
0831
0832 val = TO_CYCLES(sdr->tCLR_min, period_ns);
0833 tmgs.t_clr = val;
0834 if (tmgs.t_clr != val)
0835 return -EINVAL;
0836
0837 val = TO_CYCLES(sdr->tAR_min, period_ns);
0838 tmgs.t_ar = val;
0839 if (tmgs.t_ar != val)
0840 return -EINVAL;
0841
0842 val = TO_CYCLES(sdr->tRR_min, period_ns);
0843 tmgs.t_rr = val;
0844 if (tmgs.t_rr != val)
0845 return -EINVAL;
0846
0847 if (cs == NAND_DATA_IFACE_CHECK_ONLY)
0848 return 0;
0849
0850 plnand->timings = PL35X_SMC_NAND_TRC_CYCLES(tmgs.t_rc) |
0851 PL35X_SMC_NAND_TWC_CYCLES(tmgs.t_wc) |
0852 PL35X_SMC_NAND_TREA_CYCLES(tmgs.t_rea) |
0853 PL35X_SMC_NAND_TWP_CYCLES(tmgs.t_wp) |
0854 PL35X_SMC_NAND_TCLR_CYCLES(tmgs.t_clr) |
0855 PL35X_SMC_NAND_TAR_CYCLES(tmgs.t_ar) |
0856 PL35X_SMC_NAND_TRR_CYCLES(tmgs.t_rr);
0857
0858 return 0;
0859 }
0860
0861 static void pl35x_smc_set_ecc_pg_size(struct pl35x_nandc *nfc,
0862 struct nand_chip *chip,
0863 unsigned int pg_sz)
0864 {
0865 struct pl35x_nand *plnand = to_pl35x_nand(chip);
0866 u32 sz;
0867
0868 switch (pg_sz) {
0869 case SZ_512:
0870 sz = 1;
0871 break;
0872 case SZ_1K:
0873 sz = 2;
0874 break;
0875 case SZ_2K:
0876 sz = 3;
0877 break;
0878 default:
0879 sz = 0;
0880 break;
0881 }
0882
0883 plnand->ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG);
0884 plnand->ecc_cfg &= ~PL35X_SMC_ECC_CFG_PGSIZE_MASK;
0885 plnand->ecc_cfg |= sz;
0886 writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG);
0887 }
0888
0889 static int pl35x_nand_init_hw_ecc_controller(struct pl35x_nandc *nfc,
0890 struct nand_chip *chip)
0891 {
0892 struct mtd_info *mtd = nand_to_mtd(chip);
0893 int ret = 0;
0894
0895 if (mtd->writesize < SZ_512 || mtd->writesize > SZ_2K) {
0896 dev_err(nfc->dev,
0897 "The hardware ECC engine is limited to pages up to 2kiB\n");
0898 return -EOPNOTSUPP;
0899 }
0900
0901 chip->ecc.strength = 1;
0902 chip->ecc.bytes = 3;
0903 chip->ecc.size = SZ_512;
0904 chip->ecc.steps = mtd->writesize / chip->ecc.size;
0905 chip->ecc.read_page = pl35x_nand_read_page_hwecc;
0906 chip->ecc.write_page = pl35x_nand_write_page_hwecc;
0907 chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
0908 pl35x_smc_set_ecc_pg_size(nfc, chip, mtd->writesize);
0909
0910 nfc->ecc_buf = devm_kmalloc(nfc->dev, chip->ecc.bytes * chip->ecc.steps,
0911 GFP_KERNEL);
0912 if (!nfc->ecc_buf)
0913 return -ENOMEM;
0914
0915 switch (mtd->oobsize) {
0916 case 16:
0917
0918 mtd_set_ooblayout(mtd, &pl35x_ecc_ooblayout16_ops);
0919 chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
0920 break;
0921 case 64:
0922 mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
0923 break;
0924 default:
0925 dev_err(nfc->dev, "Unsupported OOB size\n");
0926 return -EOPNOTSUPP;
0927 }
0928
0929 return ret;
0930 }
0931
0932 static int pl35x_nand_attach_chip(struct nand_chip *chip)
0933 {
0934 const struct nand_ecc_props *requirements =
0935 nanddev_get_ecc_requirements(&chip->base);
0936 struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller);
0937 struct pl35x_nand *plnand = to_pl35x_nand(chip);
0938 struct mtd_info *mtd = nand_to_mtd(chip);
0939 int ret;
0940
0941 if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
0942 (!chip->ecc.size || !chip->ecc.strength)) {
0943 if (requirements->step_size && requirements->strength) {
0944 chip->ecc.size = requirements->step_size;
0945 chip->ecc.strength = requirements->strength;
0946 } else {
0947 dev_info(nfc->dev,
0948 "No minimum ECC strength, using 1b/512B\n");
0949 chip->ecc.size = 512;
0950 chip->ecc.strength = 1;
0951 }
0952 }
0953
0954 if (mtd->writesize <= SZ_512)
0955 plnand->addr_cycles = 1;
0956 else
0957 plnand->addr_cycles = 2;
0958
0959 if (chip->options & NAND_ROW_ADDR_3)
0960 plnand->addr_cycles += 3;
0961 else
0962 plnand->addr_cycles += 2;
0963
0964 switch (chip->ecc.engine_type) {
0965 case NAND_ECC_ENGINE_TYPE_ON_DIE:
0966
0967 chip->bbt_td = &bbt_main_descr;
0968 chip->bbt_md = &bbt_mirror_descr;
0969 fallthrough;
0970 case NAND_ECC_ENGINE_TYPE_NONE:
0971 case NAND_ECC_ENGINE_TYPE_SOFT:
0972 break;
0973 case NAND_ECC_ENGINE_TYPE_ON_HOST:
0974 ret = pl35x_nand_init_hw_ecc_controller(nfc, chip);
0975 if (ret)
0976 return ret;
0977 break;
0978 default:
0979 dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
0980 chip->ecc.engine_type);
0981 return -EINVAL;
0982 }
0983
0984 return 0;
0985 }
0986
0987 static const struct nand_controller_ops pl35x_nandc_ops = {
0988 .attach_chip = pl35x_nand_attach_chip,
0989 .exec_op = pl35x_nfc_exec_op,
0990 .setup_interface = pl35x_nfc_setup_interface,
0991 };
0992
0993 static int pl35x_nand_reset_state(struct pl35x_nandc *nfc)
0994 {
0995 int ret;
0996
0997
0998 writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 |
0999 PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 |
1000 PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1,
1001 nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR);
1002
1003
1004 ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8);
1005 if (ret)
1006 return ret;
1007
1008
1009 ret = pl35x_smc_set_ecc_mode(nfc, NULL, PL35X_SMC_ECC_CFG_MODE_BYPASS);
1010 if (ret)
1011 return ret;
1012
1013
1014
1015
1016
1017 writel(PL35X_SMC_ECC_CMD1_WRITE(NAND_CMD_SEQIN) |
1018 PL35X_SMC_ECC_CMD1_READ(NAND_CMD_READ0) |
1019 PL35X_SMC_ECC_CMD1_READ_END(NAND_CMD_READSTART) |
1020 PL35X_SMC_ECC_CMD1_READ_END_VALID(NAND_CMD_READ1),
1021 nfc->conf_regs + PL35X_SMC_ECC_CMD1);
1022 writel(PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(NAND_CMD_RNDIN) |
1023 PL35X_SMC_ECC_CMD2_READ_COL_CHG(NAND_CMD_RNDOUT) |
1024 PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(NAND_CMD_RNDOUTSTART) |
1025 PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(NAND_CMD_READ1),
1026 nfc->conf_regs + PL35X_SMC_ECC_CMD2);
1027
1028 return 0;
1029 }
1030
1031 static int pl35x_nand_chip_init(struct pl35x_nandc *nfc,
1032 struct device_node *np)
1033 {
1034 struct pl35x_nand *plnand;
1035 struct nand_chip *chip;
1036 struct mtd_info *mtd;
1037 int cs, ret;
1038
1039 plnand = devm_kzalloc(nfc->dev, sizeof(*plnand), GFP_KERNEL);
1040 if (!plnand)
1041 return -ENOMEM;
1042
1043 ret = of_property_read_u32(np, "reg", &cs);
1044 if (ret)
1045 return ret;
1046
1047 if (cs >= PL35X_NAND_MAX_CS) {
1048 dev_err(nfc->dev, "Wrong CS %d\n", cs);
1049 return -EINVAL;
1050 }
1051
1052 if (test_and_set_bit(cs, &nfc->assigned_cs)) {
1053 dev_err(nfc->dev, "Already assigned CS %d\n", cs);
1054 return -EINVAL;
1055 }
1056
1057 plnand->cs = cs;
1058
1059 chip = &plnand->chip;
1060 chip->options = NAND_BUSWIDTH_AUTO | NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE;
1061 chip->bbt_options = NAND_BBT_USE_FLASH;
1062 chip->controller = &nfc->controller;
1063 mtd = nand_to_mtd(chip);
1064 mtd->dev.parent = nfc->dev;
1065 nand_set_flash_node(chip, np);
1066 if (!mtd->name) {
1067 mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
1068 "%s", PL35X_NANDC_DRIVER_NAME);
1069 if (!mtd->name) {
1070 dev_err(nfc->dev, "Failed to allocate mtd->name\n");
1071 return -ENOMEM;
1072 }
1073 }
1074
1075 ret = nand_scan(chip, 1);
1076 if (ret)
1077 return ret;
1078
1079 ret = mtd_device_register(mtd, NULL, 0);
1080 if (ret) {
1081 nand_cleanup(chip);
1082 return ret;
1083 }
1084
1085 list_add_tail(&plnand->node, &nfc->chips);
1086
1087 return ret;
1088 }
1089
1090 static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc)
1091 {
1092 struct pl35x_nand *plnand, *tmp;
1093 struct nand_chip *chip;
1094 int ret;
1095
1096 list_for_each_entry_safe(plnand, tmp, &nfc->chips, node) {
1097 chip = &plnand->chip;
1098 ret = mtd_device_unregister(nand_to_mtd(chip));
1099 WARN_ON(ret);
1100 nand_cleanup(chip);
1101 list_del(&plnand->node);
1102 }
1103 }
1104
1105 static int pl35x_nand_chips_init(struct pl35x_nandc *nfc)
1106 {
1107 struct device_node *np = nfc->dev->of_node, *nand_np;
1108 int nchips = of_get_child_count(np);
1109 int ret;
1110
1111 if (!nchips || nchips > PL35X_NAND_MAX_CS) {
1112 dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
1113 nchips);
1114 return -EINVAL;
1115 }
1116
1117 for_each_child_of_node(np, nand_np) {
1118 ret = pl35x_nand_chip_init(nfc, nand_np);
1119 if (ret) {
1120 of_node_put(nand_np);
1121 pl35x_nand_chips_cleanup(nfc);
1122 break;
1123 }
1124 }
1125
1126 return ret;
1127 }
1128
1129 static int pl35x_nand_probe(struct platform_device *pdev)
1130 {
1131 struct device *smc_dev = pdev->dev.parent;
1132 struct amba_device *smc_amba = to_amba_device(smc_dev);
1133 struct pl35x_nandc *nfc;
1134 u32 ret;
1135
1136 nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
1137 if (!nfc)
1138 return -ENOMEM;
1139
1140 nfc->dev = &pdev->dev;
1141 nand_controller_init(&nfc->controller);
1142 nfc->controller.ops = &pl35x_nandc_ops;
1143 INIT_LIST_HEAD(&nfc->chips);
1144
1145 nfc->conf_regs = devm_ioremap_resource(&smc_amba->dev, &smc_amba->res);
1146 if (IS_ERR(nfc->conf_regs))
1147 return PTR_ERR(nfc->conf_regs);
1148
1149 nfc->io_regs = devm_platform_ioremap_resource(pdev, 0);
1150 if (IS_ERR(nfc->io_regs))
1151 return PTR_ERR(nfc->io_regs);
1152
1153 ret = pl35x_nand_reset_state(nfc);
1154 if (ret)
1155 return ret;
1156
1157 ret = pl35x_nand_chips_init(nfc);
1158 if (ret)
1159 return ret;
1160
1161 platform_set_drvdata(pdev, nfc);
1162
1163 return 0;
1164 }
1165
1166 static int pl35x_nand_remove(struct platform_device *pdev)
1167 {
1168 struct pl35x_nandc *nfc = platform_get_drvdata(pdev);
1169
1170 pl35x_nand_chips_cleanup(nfc);
1171
1172 return 0;
1173 }
1174
1175 static const struct of_device_id pl35x_nand_of_match[] = {
1176 { .compatible = "arm,pl353-nand-r2p1" },
1177 {},
1178 };
1179 MODULE_DEVICE_TABLE(of, pl35x_nand_of_match);
1180
1181 static struct platform_driver pl35x_nandc_driver = {
1182 .probe = pl35x_nand_probe,
1183 .remove = pl35x_nand_remove,
1184 .driver = {
1185 .name = PL35X_NANDC_DRIVER_NAME,
1186 .of_match_table = pl35x_nand_of_match,
1187 },
1188 };
1189 module_platform_driver(pl35x_nandc_driver);
1190
1191 MODULE_AUTHOR("Xilinx, Inc.");
1192 MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME);
1193 MODULE_DESCRIPTION("ARM PL35X NAND controller driver");
1194 MODULE_LICENSE("GPL");