0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bitfield.h>
0010 #include <linux/clk.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/iopoll.h>
0014 #include <linux/module.h>
0015 #include <linux/mtd/mtd.h>
0016 #include <linux/mtd/rawnand.h>
0017 #include <linux/of.h>
0018 #include <linux/platform_device.h>
0019 #include <linux/pm_runtime.h>
0020 #include <linux/slab.h>
0021
0022 #define COMMAND_REG 0x00
0023 #define COMMAND_SEQ(x) FIELD_PREP(GENMASK(5, 0), (x))
0024 #define COMMAND_SEQ_10 COMMAND_SEQ(0x2A)
0025 #define COMMAND_SEQ_12 COMMAND_SEQ(0x0C)
0026 #define COMMAND_SEQ_18 COMMAND_SEQ(0x32)
0027 #define COMMAND_SEQ_19 COMMAND_SEQ(0x13)
0028 #define COMMAND_SEQ_GEN_IN COMMAND_SEQ_18
0029 #define COMMAND_SEQ_GEN_OUT COMMAND_SEQ_19
0030 #define COMMAND_SEQ_READ_PAGE COMMAND_SEQ_10
0031 #define COMMAND_SEQ_WRITE_PAGE COMMAND_SEQ_12
0032 #define COMMAND_INPUT_SEL_AHBS 0
0033 #define COMMAND_INPUT_SEL_DMA BIT(6)
0034 #define COMMAND_FIFO_SEL 0
0035 #define COMMAND_DATA_SEL BIT(7)
0036 #define COMMAND_0(x) FIELD_PREP(GENMASK(15, 8), (x))
0037 #define COMMAND_1(x) FIELD_PREP(GENMASK(23, 16), (x))
0038 #define COMMAND_2(x) FIELD_PREP(GENMASK(31, 24), (x))
0039
0040 #define CONTROL_REG 0x04
0041 #define CONTROL_CHECK_RB_LINE 0
0042 #define CONTROL_ECC_BLOCK_SIZE(x) FIELD_PREP(GENMASK(2, 1), (x))
0043 #define CONTROL_ECC_BLOCK_SIZE_256 CONTROL_ECC_BLOCK_SIZE(0)
0044 #define CONTROL_ECC_BLOCK_SIZE_512 CONTROL_ECC_BLOCK_SIZE(1)
0045 #define CONTROL_ECC_BLOCK_SIZE_1024 CONTROL_ECC_BLOCK_SIZE(2)
0046 #define CONTROL_INT_EN BIT(4)
0047 #define CONTROL_ECC_EN BIT(5)
0048 #define CONTROL_BLOCK_SIZE(x) FIELD_PREP(GENMASK(7, 6), (x))
0049 #define CONTROL_BLOCK_SIZE_32P CONTROL_BLOCK_SIZE(0)
0050 #define CONTROL_BLOCK_SIZE_64P CONTROL_BLOCK_SIZE(1)
0051 #define CONTROL_BLOCK_SIZE_128P CONTROL_BLOCK_SIZE(2)
0052 #define CONTROL_BLOCK_SIZE_256P CONTROL_BLOCK_SIZE(3)
0053
0054 #define STATUS_REG 0x8
0055 #define MEM_RDY(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
0056 #define CTRL_RDY(reg) (FIELD_GET(BIT(8), (reg)) == 0)
0057
0058 #define ECC_CTRL_REG 0x18
0059 #define ECC_CTRL_CAP(x) FIELD_PREP(GENMASK(2, 0), (x))
0060 #define ECC_CTRL_CAP_2B ECC_CTRL_CAP(0)
0061 #define ECC_CTRL_CAP_4B ECC_CTRL_CAP(1)
0062 #define ECC_CTRL_CAP_8B ECC_CTRL_CAP(2)
0063 #define ECC_CTRL_CAP_16B ECC_CTRL_CAP(3)
0064 #define ECC_CTRL_CAP_24B ECC_CTRL_CAP(4)
0065 #define ECC_CTRL_CAP_32B ECC_CTRL_CAP(5)
0066 #define ECC_CTRL_ERR_THRESHOLD(x) FIELD_PREP(GENMASK(13, 8), (x))
0067
0068 #define INT_MASK_REG 0x10
0069 #define INT_STATUS_REG 0x14
0070 #define INT_CMD_END BIT(1)
0071 #define INT_DMA_END BIT(3)
0072 #define INT_MEM_RDY(cs) FIELD_PREP(GENMASK(11, 8), BIT(cs))
0073 #define INT_DMA_ENDED BIT(3)
0074 #define MEM_IS_RDY(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
0075 #define DMA_HAS_ENDED(reg) FIELD_GET(BIT(3), (reg))
0076
0077 #define ECC_OFFSET_REG 0x1C
0078 #define ECC_OFFSET(x) FIELD_PREP(GENMASK(15, 0), (x))
0079
0080 #define ECC_STAT_REG 0x20
0081 #define ECC_STAT_CORRECTABLE(cs, reg) (FIELD_GET(GENMASK(3, 0), (reg)) & BIT(cs))
0082 #define ECC_STAT_UNCORRECTABLE(cs, reg) (FIELD_GET(GENMASK(11, 8), (reg)) & BIT(cs))
0083
0084 #define ADDR0_COL_REG 0x24
0085 #define ADDR0_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
0086
0087 #define ADDR0_ROW_REG 0x28
0088 #define ADDR0_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
0089
0090 #define ADDR1_COL_REG 0x2C
0091 #define ADDR1_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
0092
0093 #define ADDR1_ROW_REG 0x30
0094 #define ADDR1_ROW(x) FIELD_PREP(GENMASK(23, 0), (x))
0095
0096 #define FIFO_DATA_REG 0x38
0097
0098 #define DATA_REG 0x3C
0099
0100 #define DATA_REG_SIZE_REG 0x40
0101
0102 #define DMA_ADDR_LOW_REG 0x64
0103
0104 #define DMA_ADDR_HIGH_REG 0x68
0105
0106 #define DMA_CNT_REG 0x6C
0107
0108 #define DMA_CTRL_REG 0x70
0109 #define DMA_CTRL_INCREMENT_BURST_4 0
0110 #define DMA_CTRL_REGISTER_MANAGED_MODE 0
0111 #define DMA_CTRL_START BIT(7)
0112
0113 #define MEM_CTRL_REG 0x80
0114 #define MEM_CTRL_CS(cs) FIELD_PREP(GENMASK(1, 0), (cs))
0115 #define MEM_CTRL_DIS_WP(cs) FIELD_PREP(GENMASK(11, 8), BIT((cs)))
0116
0117 #define DATA_SIZE_REG 0x84
0118 #define DATA_SIZE(x) FIELD_PREP(GENMASK(14, 0), (x))
0119
0120 #define TIMINGS_ASYN_REG 0x88
0121 #define TIMINGS_ASYN_TRWP(x) FIELD_PREP(GENMASK(3, 0), max((x), 1U) - 1)
0122 #define TIMINGS_ASYN_TRWH(x) FIELD_PREP(GENMASK(7, 4), max((x), 1U) - 1)
0123
0124 #define TIM_SEQ0_REG 0x90
0125 #define TIM_SEQ0_TCCS(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
0126 #define TIM_SEQ0_TADL(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
0127 #define TIM_SEQ0_TRHW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
0128 #define TIM_SEQ0_TWHR(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
0129
0130 #define TIM_SEQ1_REG 0x94
0131 #define TIM_SEQ1_TWB(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
0132 #define TIM_SEQ1_TRR(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
0133 #define TIM_SEQ1_TWW(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
0134
0135 #define TIM_GEN_SEQ0_REG 0x98
0136 #define TIM_GEN_SEQ0_D0(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
0137 #define TIM_GEN_SEQ0_D1(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
0138 #define TIM_GEN_SEQ0_D2(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
0139 #define TIM_GEN_SEQ0_D3(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
0140
0141 #define TIM_GEN_SEQ1_REG 0x9c
0142 #define TIM_GEN_SEQ1_D4(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
0143 #define TIM_GEN_SEQ1_D5(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
0144 #define TIM_GEN_SEQ1_D6(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
0145 #define TIM_GEN_SEQ1_D7(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
0146
0147 #define TIM_GEN_SEQ2_REG 0xA0
0148 #define TIM_GEN_SEQ2_D8(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
0149 #define TIM_GEN_SEQ2_D9(x) FIELD_PREP(GENMASK(13, 8), max((x), 1U) - 1)
0150 #define TIM_GEN_SEQ2_D10(x) FIELD_PREP(GENMASK(21, 16), max((x), 1U) - 1)
0151 #define TIM_GEN_SEQ2_D11(x) FIELD_PREP(GENMASK(29, 24), max((x), 1U) - 1)
0152
0153 #define FIFO_INIT_REG 0xB4
0154 #define FIFO_INIT BIT(0)
0155
0156 #define FIFO_STATE_REG 0xB4
0157 #define FIFO_STATE_R_EMPTY(reg) FIELD_GET(BIT(0), (reg))
0158 #define FIFO_STATE_W_FULL(reg) FIELD_GET(BIT(1), (reg))
0159 #define FIFO_STATE_C_EMPTY(reg) FIELD_GET(BIT(2), (reg))
0160 #define FIFO_STATE_R_FULL(reg) FIELD_GET(BIT(6), (reg))
0161 #define FIFO_STATE_W_EMPTY(reg) FIELD_GET(BIT(7), (reg))
0162
0163 #define GEN_SEQ_CTRL_REG 0xB8
0164 #define GEN_SEQ_CMD0_EN BIT(0)
0165 #define GEN_SEQ_CMD1_EN BIT(1)
0166 #define GEN_SEQ_CMD2_EN BIT(2)
0167 #define GEN_SEQ_CMD3_EN BIT(3)
0168 #define GEN_SEQ_COL_A0(x) FIELD_PREP(GENMASK(5, 4), min((x), 2U))
0169 #define GEN_SEQ_COL_A1(x) FIELD_PREP(GENMASK(7, 6), min((x), 2U))
0170 #define GEN_SEQ_ROW_A0(x) FIELD_PREP(GENMASK(9, 8), min((x), 3U))
0171 #define GEN_SEQ_ROW_A1(x) FIELD_PREP(GENMASK(11, 10), min((x), 3U))
0172 #define GEN_SEQ_DATA_EN BIT(12)
0173 #define GEN_SEQ_DELAY_EN(x) FIELD_PREP(GENMASK(14, 13), (x))
0174 #define GEN_SEQ_DELAY0_EN GEN_SEQ_DELAY_EN(1)
0175 #define GEN_SEQ_DELAY1_EN GEN_SEQ_DELAY_EN(2)
0176 #define GEN_SEQ_IMD_SEQ BIT(15)
0177 #define GEN_SEQ_COMMAND_3(x) FIELD_PREP(GENMASK(26, 16), (x))
0178
0179 #define DMA_TLVL_REG 0x114
0180 #define DMA_TLVL(x) FIELD_PREP(GENMASK(7, 0), (x))
0181 #define DMA_TLVL_MAX DMA_TLVL(0xFF)
0182
0183 #define TIM_GEN_SEQ3_REG 0x134
0184 #define TIM_GEN_SEQ3_D12(x) FIELD_PREP(GENMASK(5, 0), max((x), 1U) - 1)
0185
0186 #define ECC_CNT_REG 0x14C
0187 #define ECC_CNT(cs, reg) FIELD_GET(GENMASK(5, 0), (reg) >> ((cs) * 8))
0188
0189 #define RNANDC_CS_NUM 4
0190
0191 #define TO_CYCLES64(ps, period_ns) ((unsigned int)DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
0192 period_ns))
0193
0194 struct rnand_chip_sel {
0195 unsigned int cs;
0196 };
0197
0198 struct rnand_chip {
0199 struct nand_chip chip;
0200 struct list_head node;
0201 int selected_die;
0202 u32 ctrl;
0203 unsigned int nsels;
0204 u32 control;
0205 u32 ecc_ctrl;
0206 u32 timings_asyn;
0207 u32 tim_seq0;
0208 u32 tim_seq1;
0209 u32 tim_gen_seq0;
0210 u32 tim_gen_seq1;
0211 u32 tim_gen_seq2;
0212 u32 tim_gen_seq3;
0213 struct rnand_chip_sel sels[];
0214 };
0215
0216 struct rnandc {
0217 struct nand_controller controller;
0218 struct device *dev;
0219 void __iomem *regs;
0220 unsigned long ext_clk_rate;
0221 unsigned long assigned_cs;
0222 struct list_head chips;
0223 struct nand_chip *selected_chip;
0224 struct completion complete;
0225 bool use_polling;
0226 u8 *buf;
0227 unsigned int buf_sz;
0228 };
0229
0230 struct rnandc_op {
0231 u32 command;
0232 u32 addr0_col;
0233 u32 addr0_row;
0234 u32 addr1_col;
0235 u32 addr1_row;
0236 u32 data_size;
0237 u32 ecc_offset;
0238 u32 gen_seq_ctrl;
0239 u8 *buf;
0240 bool read;
0241 unsigned int len;
0242 };
0243
0244 static inline struct rnandc *to_rnandc(struct nand_controller *ctrl)
0245 {
0246 return container_of(ctrl, struct rnandc, controller);
0247 }
0248
0249 static inline struct rnand_chip *to_rnand(struct nand_chip *chip)
0250 {
0251 return container_of(chip, struct rnand_chip, chip);
0252 }
0253
0254 static inline unsigned int to_rnandc_cs(struct rnand_chip *nand)
0255 {
0256 return nand->sels[nand->selected_die].cs;
0257 }
0258
0259 static void rnandc_dis_correction(struct rnandc *rnandc)
0260 {
0261 u32 control;
0262
0263 control = readl_relaxed(rnandc->regs + CONTROL_REG);
0264 control &= ~CONTROL_ECC_EN;
0265 writel_relaxed(control, rnandc->regs + CONTROL_REG);
0266 }
0267
0268 static void rnandc_en_correction(struct rnandc *rnandc)
0269 {
0270 u32 control;
0271
0272 control = readl_relaxed(rnandc->regs + CONTROL_REG);
0273 control |= CONTROL_ECC_EN;
0274 writel_relaxed(control, rnandc->regs + CONTROL_REG);
0275 }
0276
0277 static void rnandc_clear_status(struct rnandc *rnandc)
0278 {
0279 writel_relaxed(0, rnandc->regs + INT_STATUS_REG);
0280 writel_relaxed(0, rnandc->regs + ECC_STAT_REG);
0281 writel_relaxed(0, rnandc->regs + ECC_CNT_REG);
0282 }
0283
0284 static void rnandc_dis_interrupts(struct rnandc *rnandc)
0285 {
0286 writel_relaxed(0, rnandc->regs + INT_MASK_REG);
0287 }
0288
0289 static void rnandc_en_interrupts(struct rnandc *rnandc, u32 val)
0290 {
0291 if (!rnandc->use_polling)
0292 writel_relaxed(val, rnandc->regs + INT_MASK_REG);
0293 }
0294
0295 static void rnandc_clear_fifo(struct rnandc *rnandc)
0296 {
0297 writel_relaxed(FIFO_INIT, rnandc->regs + FIFO_INIT_REG);
0298 }
0299
0300 static void rnandc_select_target(struct nand_chip *chip, int die_nr)
0301 {
0302 struct rnand_chip *rnand = to_rnand(chip);
0303 struct rnandc *rnandc = to_rnandc(chip->controller);
0304 unsigned int cs = rnand->sels[die_nr].cs;
0305
0306 if (chip == rnandc->selected_chip && die_nr == rnand->selected_die)
0307 return;
0308
0309 rnandc_clear_status(rnandc);
0310 writel_relaxed(MEM_CTRL_CS(cs) | MEM_CTRL_DIS_WP(cs), rnandc->regs + MEM_CTRL_REG);
0311 writel_relaxed(rnand->control, rnandc->regs + CONTROL_REG);
0312 writel_relaxed(rnand->ecc_ctrl, rnandc->regs + ECC_CTRL_REG);
0313 writel_relaxed(rnand->timings_asyn, rnandc->regs + TIMINGS_ASYN_REG);
0314 writel_relaxed(rnand->tim_seq0, rnandc->regs + TIM_SEQ0_REG);
0315 writel_relaxed(rnand->tim_seq1, rnandc->regs + TIM_SEQ1_REG);
0316 writel_relaxed(rnand->tim_gen_seq0, rnandc->regs + TIM_GEN_SEQ0_REG);
0317 writel_relaxed(rnand->tim_gen_seq1, rnandc->regs + TIM_GEN_SEQ1_REG);
0318 writel_relaxed(rnand->tim_gen_seq2, rnandc->regs + TIM_GEN_SEQ2_REG);
0319 writel_relaxed(rnand->tim_gen_seq3, rnandc->regs + TIM_GEN_SEQ3_REG);
0320
0321 rnandc->selected_chip = chip;
0322 rnand->selected_die = die_nr;
0323 }
0324
0325 static void rnandc_trigger_op(struct rnandc *rnandc, struct rnandc_op *rop)
0326 {
0327 writel_relaxed(rop->addr0_col, rnandc->regs + ADDR0_COL_REG);
0328 writel_relaxed(rop->addr0_row, rnandc->regs + ADDR0_ROW_REG);
0329 writel_relaxed(rop->addr1_col, rnandc->regs + ADDR1_COL_REG);
0330 writel_relaxed(rop->addr1_row, rnandc->regs + ADDR1_ROW_REG);
0331 writel_relaxed(rop->ecc_offset, rnandc->regs + ECC_OFFSET_REG);
0332 writel_relaxed(rop->gen_seq_ctrl, rnandc->regs + GEN_SEQ_CTRL_REG);
0333 writel_relaxed(DATA_SIZE(rop->len), rnandc->regs + DATA_SIZE_REG);
0334 writel_relaxed(rop->command, rnandc->regs + COMMAND_REG);
0335 }
0336
0337 static void rnandc_trigger_dma(struct rnandc *rnandc)
0338 {
0339 writel_relaxed(DMA_CTRL_INCREMENT_BURST_4 |
0340 DMA_CTRL_REGISTER_MANAGED_MODE |
0341 DMA_CTRL_START, rnandc->regs + DMA_CTRL_REG);
0342 }
0343
0344 static irqreturn_t rnandc_irq_handler(int irq, void *private)
0345 {
0346 struct rnandc *rnandc = private;
0347
0348 rnandc_dis_interrupts(rnandc);
0349 complete(&rnandc->complete);
0350
0351 return IRQ_HANDLED;
0352 }
0353
0354 static int rnandc_wait_end_of_op(struct rnandc *rnandc,
0355 struct nand_chip *chip)
0356 {
0357 struct rnand_chip *rnand = to_rnand(chip);
0358 unsigned int cs = to_rnandc_cs(rnand);
0359 u32 status;
0360 int ret;
0361
0362 ret = readl_poll_timeout(rnandc->regs + STATUS_REG, status,
0363 MEM_RDY(cs, status) && CTRL_RDY(status),
0364 1, 100000);
0365 if (ret)
0366 dev_err(rnandc->dev, "Operation timed out, status: 0x%08x\n",
0367 status);
0368
0369 return ret;
0370 }
0371
0372 static int rnandc_wait_end_of_io(struct rnandc *rnandc,
0373 struct nand_chip *chip)
0374 {
0375 int timeout_ms = 1000;
0376 int ret;
0377
0378 if (rnandc->use_polling) {
0379 struct rnand_chip *rnand = to_rnand(chip);
0380 unsigned int cs = to_rnandc_cs(rnand);
0381 u32 status;
0382
0383 ret = readl_poll_timeout(rnandc->regs + INT_STATUS_REG, status,
0384 MEM_IS_RDY(cs, status) &
0385 DMA_HAS_ENDED(status),
0386 0, timeout_ms * 1000);
0387 } else {
0388 ret = wait_for_completion_timeout(&rnandc->complete,
0389 msecs_to_jiffies(timeout_ms));
0390 if (!ret)
0391 ret = -ETIMEDOUT;
0392 else
0393 ret = 0;
0394 }
0395
0396 return ret;
0397 }
0398
0399 static int rnandc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
0400 int oob_required, int page)
0401 {
0402 struct rnandc *rnandc = to_rnandc(chip->controller);
0403 struct mtd_info *mtd = nand_to_mtd(chip);
0404 struct rnand_chip *rnand = to_rnand(chip);
0405 unsigned int cs = to_rnandc_cs(rnand);
0406 struct rnandc_op rop = {
0407 .command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_READ0) |
0408 COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
0409 COMMAND_SEQ_READ_PAGE,
0410 .addr0_row = page,
0411 .len = mtd->writesize,
0412 .ecc_offset = ECC_OFFSET(mtd->writesize + 2),
0413 };
0414 unsigned int max_bitflips = 0;
0415 dma_addr_t dma_addr;
0416 u32 ecc_stat;
0417 int bf, ret, i;
0418
0419
0420 rnandc_select_target(chip, chip->cur_cs);
0421 rnandc_clear_status(rnandc);
0422 reinit_completion(&rnandc->complete);
0423 rnandc_en_interrupts(rnandc, INT_DMA_ENDED);
0424 rnandc_en_correction(rnandc);
0425
0426
0427 dma_addr = dma_map_single(rnandc->dev, rnandc->buf, mtd->writesize,
0428 DMA_FROM_DEVICE);
0429 writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
0430 writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
0431 writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
0432
0433 rnandc_trigger_op(rnandc, &rop);
0434 rnandc_trigger_dma(rnandc);
0435
0436 ret = rnandc_wait_end_of_io(rnandc, chip);
0437 dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_FROM_DEVICE);
0438 rnandc_dis_correction(rnandc);
0439 if (ret) {
0440 dev_err(rnandc->dev, "Read page operation never ending\n");
0441 return ret;
0442 }
0443
0444 ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
0445
0446 if (oob_required || ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
0447 ret = nand_change_read_column_op(chip, mtd->writesize,
0448 chip->oob_poi, mtd->oobsize,
0449 false);
0450 if (ret)
0451 return ret;
0452 }
0453
0454 if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
0455 for (i = 0; i < chip->ecc.steps; i++) {
0456 unsigned int off = i * chip->ecc.size;
0457 unsigned int eccoff = i * chip->ecc.bytes;
0458
0459 bf = nand_check_erased_ecc_chunk(rnandc->buf + off,
0460 chip->ecc.size,
0461 chip->oob_poi + 2 + eccoff,
0462 chip->ecc.bytes,
0463 NULL, 0,
0464 chip->ecc.strength);
0465 if (bf < 0) {
0466 mtd->ecc_stats.failed++;
0467 } else {
0468 mtd->ecc_stats.corrected += bf;
0469 max_bitflips = max_t(unsigned int, max_bitflips, bf);
0470 }
0471 }
0472 } else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
0473 bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
0474
0475
0476
0477
0478
0479 mtd->ecc_stats.corrected += bf;
0480 }
0481
0482 memcpy(buf, rnandc->buf, mtd->writesize);
0483
0484 return 0;
0485 }
0486
0487 static int rnandc_read_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
0488 u32 req_len, u8 *bufpoi, int page)
0489 {
0490 struct rnandc *rnandc = to_rnandc(chip->controller);
0491 struct mtd_info *mtd = nand_to_mtd(chip);
0492 struct rnand_chip *rnand = to_rnand(chip);
0493 unsigned int cs = to_rnandc_cs(rnand);
0494 unsigned int page_off = round_down(req_offset, chip->ecc.size);
0495 unsigned int real_len = round_up(req_offset + req_len - page_off,
0496 chip->ecc.size);
0497 unsigned int start_chunk = page_off / chip->ecc.size;
0498 unsigned int nchunks = real_len / chip->ecc.size;
0499 unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
0500 struct rnandc_op rop = {
0501 .command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_READ0) |
0502 COMMAND_2(NAND_CMD_READSTART) | COMMAND_FIFO_SEL |
0503 COMMAND_SEQ_READ_PAGE,
0504 .addr0_row = page,
0505 .addr0_col = page_off,
0506 .len = real_len,
0507 .ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
0508 };
0509 unsigned int max_bitflips = 0, i;
0510 u32 ecc_stat;
0511 int bf, ret;
0512
0513
0514 rnandc_select_target(chip, chip->cur_cs);
0515 rnandc_clear_status(rnandc);
0516 rnandc_en_correction(rnandc);
0517 rnandc_trigger_op(rnandc, &rop);
0518
0519 while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
0520 cpu_relax();
0521
0522 while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
0523 cpu_relax();
0524
0525 ioread32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
0526 real_len / 4);
0527
0528 if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
0529 dev_err(rnandc->dev, "Clearing residual data in the read FIFO\n");
0530 rnandc_clear_fifo(rnandc);
0531 }
0532
0533 ret = rnandc_wait_end_of_op(rnandc, chip);
0534 rnandc_dis_correction(rnandc);
0535 if (ret) {
0536 dev_err(rnandc->dev, "Read subpage operation never ending\n");
0537 return ret;
0538 }
0539
0540 ecc_stat = readl_relaxed(rnandc->regs + ECC_STAT_REG);
0541
0542 if (ECC_STAT_UNCORRECTABLE(cs, ecc_stat)) {
0543 ret = nand_change_read_column_op(chip, mtd->writesize,
0544 chip->oob_poi, mtd->oobsize,
0545 false);
0546 if (ret)
0547 return ret;
0548
0549 for (i = start_chunk; i < nchunks; i++) {
0550 unsigned int dataoff = i * chip->ecc.size;
0551 unsigned int eccoff = 2 + (i * chip->ecc.bytes);
0552
0553 bf = nand_check_erased_ecc_chunk(bufpoi + dataoff,
0554 chip->ecc.size,
0555 chip->oob_poi + eccoff,
0556 chip->ecc.bytes,
0557 NULL, 0,
0558 chip->ecc.strength);
0559 if (bf < 0) {
0560 mtd->ecc_stats.failed++;
0561 } else {
0562 mtd->ecc_stats.corrected += bf;
0563 max_bitflips = max_t(unsigned int, max_bitflips, bf);
0564 }
0565 }
0566 } else if (ECC_STAT_CORRECTABLE(cs, ecc_stat)) {
0567 bf = ECC_CNT(cs, readl_relaxed(rnandc->regs + ECC_CNT_REG));
0568
0569
0570
0571
0572
0573 mtd->ecc_stats.corrected += bf;
0574 }
0575
0576 return 0;
0577 }
0578
0579 static int rnandc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
0580 int oob_required, int page)
0581 {
0582 struct rnandc *rnandc = to_rnandc(chip->controller);
0583 struct mtd_info *mtd = nand_to_mtd(chip);
0584 struct rnand_chip *rnand = to_rnand(chip);
0585 unsigned int cs = to_rnandc_cs(rnand);
0586 struct rnandc_op rop = {
0587 .command = COMMAND_INPUT_SEL_DMA | COMMAND_0(NAND_CMD_SEQIN) |
0588 COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
0589 COMMAND_SEQ_WRITE_PAGE,
0590 .addr0_row = page,
0591 .len = mtd->writesize,
0592 .ecc_offset = ECC_OFFSET(mtd->writesize + 2),
0593 };
0594 dma_addr_t dma_addr;
0595 int ret;
0596
0597 memcpy(rnandc->buf, buf, mtd->writesize);
0598
0599
0600 rnandc_select_target(chip, chip->cur_cs);
0601 rnandc_clear_status(rnandc);
0602 reinit_completion(&rnandc->complete);
0603 rnandc_en_interrupts(rnandc, INT_MEM_RDY(cs));
0604 rnandc_en_correction(rnandc);
0605
0606
0607 dma_addr = dma_map_single(rnandc->dev, (void *)rnandc->buf, mtd->writesize,
0608 DMA_TO_DEVICE);
0609 writel(dma_addr, rnandc->regs + DMA_ADDR_LOW_REG);
0610 writel(mtd->writesize, rnandc->regs + DMA_CNT_REG);
0611 writel(DMA_TLVL_MAX, rnandc->regs + DMA_TLVL_REG);
0612
0613 rnandc_trigger_op(rnandc, &rop);
0614 rnandc_trigger_dma(rnandc);
0615
0616 ret = rnandc_wait_end_of_io(rnandc, chip);
0617 dma_unmap_single(rnandc->dev, dma_addr, mtd->writesize, DMA_TO_DEVICE);
0618 rnandc_dis_correction(rnandc);
0619 if (ret) {
0620 dev_err(rnandc->dev, "Write page operation never ending\n");
0621 return ret;
0622 }
0623
0624 if (!oob_required)
0625 return 0;
0626
0627 return nand_change_write_column_op(chip, mtd->writesize, chip->oob_poi,
0628 mtd->oobsize, false);
0629 }
0630
0631 static int rnandc_write_subpage_hw_ecc(struct nand_chip *chip, u32 req_offset,
0632 u32 req_len, const u8 *bufpoi,
0633 int oob_required, int page)
0634 {
0635 struct rnandc *rnandc = to_rnandc(chip->controller);
0636 struct mtd_info *mtd = nand_to_mtd(chip);
0637 unsigned int page_off = round_down(req_offset, chip->ecc.size);
0638 unsigned int real_len = round_up(req_offset + req_len - page_off,
0639 chip->ecc.size);
0640 unsigned int start_chunk = page_off / chip->ecc.size;
0641 unsigned int ecc_off = 2 + (start_chunk * chip->ecc.bytes);
0642 struct rnandc_op rop = {
0643 .command = COMMAND_INPUT_SEL_AHBS | COMMAND_0(NAND_CMD_SEQIN) |
0644 COMMAND_1(NAND_CMD_PAGEPROG) | COMMAND_FIFO_SEL |
0645 COMMAND_SEQ_WRITE_PAGE,
0646 .addr0_row = page,
0647 .addr0_col = page_off,
0648 .len = real_len,
0649 .ecc_offset = ECC_OFFSET(mtd->writesize + ecc_off),
0650 };
0651 int ret;
0652
0653
0654 rnandc_select_target(chip, chip->cur_cs);
0655 rnandc_clear_status(rnandc);
0656 rnandc_en_correction(rnandc);
0657 rnandc_trigger_op(rnandc, &rop);
0658
0659 while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
0660 cpu_relax();
0661
0662 iowrite32_rep(rnandc->regs + FIFO_DATA_REG, bufpoi + page_off,
0663 real_len / 4);
0664
0665 while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
0666 cpu_relax();
0667
0668 ret = rnandc_wait_end_of_op(rnandc, chip);
0669 rnandc_dis_correction(rnandc);
0670 if (ret) {
0671 dev_err(rnandc->dev, "Write subpage operation never ending\n");
0672 return ret;
0673 }
0674
0675 return 0;
0676 }
0677
0678
0679
0680
0681
0682 static int rnandc_exec_op(struct nand_chip *chip,
0683 const struct nand_operation *op, bool check_only)
0684 {
0685 struct rnandc *rnandc = to_rnandc(chip->controller);
0686 const struct nand_op_instr *instr = NULL;
0687 struct rnandc_op rop = {
0688 .command = COMMAND_INPUT_SEL_AHBS,
0689 .gen_seq_ctrl = GEN_SEQ_IMD_SEQ,
0690 };
0691 unsigned int cmd_phase = 0, addr_phase = 0, data_phase = 0,
0692 delay_phase = 0, delays = 0;
0693 unsigned int op_id, col_addrs, row_addrs, naddrs, remainder, words, i;
0694 const u8 *addrs;
0695 u32 last_bytes;
0696 int ret;
0697
0698 if (!check_only)
0699 rnandc_select_target(chip, op->cs);
0700
0701 for (op_id = 0; op_id < op->ninstrs; op_id++) {
0702 instr = &op->instrs[op_id];
0703
0704 nand_op_trace(" ", instr);
0705
0706 switch (instr->type) {
0707 case NAND_OP_CMD_INSTR:
0708 switch (cmd_phase++) {
0709 case 0:
0710 rop.command |= COMMAND_0(instr->ctx.cmd.opcode);
0711 rop.gen_seq_ctrl |= GEN_SEQ_CMD0_EN;
0712 break;
0713 case 1:
0714 rop.gen_seq_ctrl |= GEN_SEQ_COMMAND_3(instr->ctx.cmd.opcode);
0715 rop.gen_seq_ctrl |= GEN_SEQ_CMD3_EN;
0716 if (addr_phase == 0)
0717 addr_phase = 1;
0718 break;
0719 case 2:
0720 rop.command |= COMMAND_2(instr->ctx.cmd.opcode);
0721 rop.gen_seq_ctrl |= GEN_SEQ_CMD2_EN;
0722 if (addr_phase <= 1)
0723 addr_phase = 2;
0724 break;
0725 case 3:
0726 rop.command |= COMMAND_1(instr->ctx.cmd.opcode);
0727 rop.gen_seq_ctrl |= GEN_SEQ_CMD1_EN;
0728 if (addr_phase <= 1)
0729 addr_phase = 2;
0730 if (delay_phase == 0)
0731 delay_phase = 1;
0732 if (data_phase == 0)
0733 data_phase = 1;
0734 break;
0735 default:
0736 return -EOPNOTSUPP;
0737 }
0738 break;
0739
0740 case NAND_OP_ADDR_INSTR:
0741 addrs = instr->ctx.addr.addrs;
0742 naddrs = instr->ctx.addr.naddrs;
0743 if (naddrs > 5)
0744 return -EOPNOTSUPP;
0745
0746 col_addrs = min(2U, naddrs);
0747 row_addrs = naddrs > 2 ? naddrs - col_addrs : 0;
0748
0749 switch (addr_phase++) {
0750 case 0:
0751 for (i = 0; i < col_addrs; i++)
0752 rop.addr0_col |= addrs[i] << (i * 8);
0753 rop.gen_seq_ctrl |= GEN_SEQ_COL_A0(col_addrs);
0754
0755 for (i = 0; i < row_addrs; i++)
0756 rop.addr0_row |= addrs[2 + i] << (i * 8);
0757 rop.gen_seq_ctrl |= GEN_SEQ_ROW_A0(row_addrs);
0758
0759 if (cmd_phase == 0)
0760 cmd_phase = 1;
0761 break;
0762 case 1:
0763 for (i = 0; i < col_addrs; i++)
0764 rop.addr1_col |= addrs[i] << (i * 8);
0765 rop.gen_seq_ctrl |= GEN_SEQ_COL_A1(col_addrs);
0766
0767 for (i = 0; i < row_addrs; i++)
0768 rop.addr1_row |= addrs[2 + i] << (i * 8);
0769 rop.gen_seq_ctrl |= GEN_SEQ_ROW_A1(row_addrs);
0770
0771 if (cmd_phase <= 1)
0772 cmd_phase = 2;
0773 break;
0774 default:
0775 return -EOPNOTSUPP;
0776 }
0777 break;
0778
0779 case NAND_OP_DATA_IN_INSTR:
0780 rop.read = true;
0781 fallthrough;
0782 case NAND_OP_DATA_OUT_INSTR:
0783 rop.gen_seq_ctrl |= GEN_SEQ_DATA_EN;
0784 rop.buf = instr->ctx.data.buf.in;
0785 rop.len = instr->ctx.data.len;
0786 rop.command |= COMMAND_FIFO_SEL;
0787
0788 switch (data_phase++) {
0789 case 0:
0790 if (cmd_phase <= 2)
0791 cmd_phase = 3;
0792 if (addr_phase <= 1)
0793 addr_phase = 2;
0794 if (delay_phase == 0)
0795 delay_phase = 1;
0796 break;
0797 default:
0798 return -EOPNOTSUPP;
0799 }
0800 break;
0801
0802 case NAND_OP_WAITRDY_INSTR:
0803 switch (delay_phase++) {
0804 case 0:
0805 rop.gen_seq_ctrl |= GEN_SEQ_DELAY0_EN;
0806
0807 if (cmd_phase <= 2)
0808 cmd_phase = 3;
0809 break;
0810 case 1:
0811 rop.gen_seq_ctrl |= GEN_SEQ_DELAY1_EN;
0812
0813 if (cmd_phase <= 3)
0814 cmd_phase = 4;
0815 if (data_phase == 0)
0816 data_phase = 1;
0817 break;
0818 default:
0819 return -EOPNOTSUPP;
0820 }
0821 break;
0822 }
0823 }
0824
0825
0826
0827
0828
0829 if (rop.buf && !rop.read)
0830 rop.command |= COMMAND_SEQ_GEN_OUT;
0831 else
0832 rop.command |= COMMAND_SEQ_GEN_IN;
0833
0834 if (delays > 1) {
0835 dev_err(rnandc->dev, "Cannot handle more than one wait delay\n");
0836 return -EOPNOTSUPP;
0837 }
0838
0839 if (check_only)
0840 return 0;
0841
0842 rnandc_trigger_op(rnandc, &rop);
0843
0844 words = rop.len / sizeof(u32);
0845 remainder = rop.len % sizeof(u32);
0846 if (rop.buf && rop.read) {
0847 while (!FIFO_STATE_C_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
0848 cpu_relax();
0849
0850 while (FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
0851 cpu_relax();
0852
0853 ioread32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf, words);
0854 if (remainder) {
0855 last_bytes = readl_relaxed(rnandc->regs + FIFO_DATA_REG);
0856 memcpy(rop.buf + (words * sizeof(u32)), &last_bytes,
0857 remainder);
0858 }
0859
0860 if (!FIFO_STATE_R_EMPTY(readl(rnandc->regs + FIFO_STATE_REG))) {
0861 dev_warn(rnandc->dev,
0862 "Clearing residual data in the read FIFO\n");
0863 rnandc_clear_fifo(rnandc);
0864 }
0865 } else if (rop.len && !rop.read) {
0866 while (FIFO_STATE_W_FULL(readl(rnandc->regs + FIFO_STATE_REG)))
0867 cpu_relax();
0868
0869 iowrite32_rep(rnandc->regs + FIFO_DATA_REG, rop.buf,
0870 DIV_ROUND_UP(rop.len, 4));
0871
0872 if (remainder) {
0873 last_bytes = 0;
0874 memcpy(&last_bytes, rop.buf + (words * sizeof(u32)), remainder);
0875 writel_relaxed(last_bytes, rnandc->regs + FIFO_DATA_REG);
0876 }
0877
0878 while (!FIFO_STATE_W_EMPTY(readl(rnandc->regs + FIFO_STATE_REG)))
0879 cpu_relax();
0880 }
0881
0882 ret = rnandc_wait_end_of_op(rnandc, chip);
0883 if (ret)
0884 return ret;
0885
0886 return 0;
0887 }
0888
0889 static int rnandc_setup_interface(struct nand_chip *chip, int chipnr,
0890 const struct nand_interface_config *conf)
0891 {
0892 struct rnand_chip *rnand = to_rnand(chip);
0893 struct rnandc *rnandc = to_rnandc(chip->controller);
0894 unsigned int period_ns = 1000000000 / rnandc->ext_clk_rate;
0895 const struct nand_sdr_timings *sdr;
0896 unsigned int cyc, cle, ale, bef_dly, ca_to_data;
0897
0898 sdr = nand_get_sdr_timings(conf);
0899 if (IS_ERR(sdr))
0900 return PTR_ERR(sdr);
0901
0902 if (sdr->tRP_min != sdr->tWP_min || sdr->tREH_min != sdr->tWH_min) {
0903 dev_err(rnandc->dev, "Read and write hold times must be identical\n");
0904 return -EINVAL;
0905 }
0906
0907 if (chipnr < 0)
0908 return 0;
0909
0910 rnand->timings_asyn =
0911 TIMINGS_ASYN_TRWP(TO_CYCLES64(sdr->tRP_min, period_ns)) |
0912 TIMINGS_ASYN_TRWH(TO_CYCLES64(sdr->tREH_min, period_ns));
0913 rnand->tim_seq0 =
0914 TIM_SEQ0_TCCS(TO_CYCLES64(sdr->tCCS_min, period_ns)) |
0915 TIM_SEQ0_TADL(TO_CYCLES64(sdr->tADL_min, period_ns)) |
0916 TIM_SEQ0_TRHW(TO_CYCLES64(sdr->tRHW_min, period_ns)) |
0917 TIM_SEQ0_TWHR(TO_CYCLES64(sdr->tWHR_min, period_ns));
0918 rnand->tim_seq1 =
0919 TIM_SEQ1_TWB(TO_CYCLES64(sdr->tWB_max, period_ns)) |
0920 TIM_SEQ1_TRR(TO_CYCLES64(sdr->tRR_min, period_ns)) |
0921 TIM_SEQ1_TWW(TO_CYCLES64(sdr->tWW_min, period_ns));
0922
0923 cyc = sdr->tDS_min + sdr->tDH_min;
0924 cle = sdr->tCLH_min + sdr->tCLS_min;
0925 ale = sdr->tALH_min + sdr->tALS_min;
0926 bef_dly = sdr->tWB_max - sdr->tDH_min;
0927 ca_to_data = sdr->tWHR_min + sdr->tREA_max - sdr->tDH_min;
0928
0929
0930
0931
0932
0933
0934
0935 rnand->tim_gen_seq0 =
0936 TIM_GEN_SEQ0_D0(TO_CYCLES64(cle - cyc, period_ns)) |
0937 TIM_GEN_SEQ0_D1(TO_CYCLES64(cle - cyc, period_ns)) |
0938 TIM_GEN_SEQ0_D2(TO_CYCLES64(bef_dly, period_ns)) |
0939 TIM_GEN_SEQ0_D3(TO_CYCLES64(ca_to_data, period_ns));
0940
0941
0942
0943
0944
0945
0946
0947 rnand->tim_gen_seq1 =
0948 TIM_GEN_SEQ1_D4(TO_CYCLES64(ale - cyc, period_ns)) |
0949 TIM_GEN_SEQ1_D5(TO_CYCLES64(ale - cyc, period_ns)) |
0950 TIM_GEN_SEQ1_D6(TO_CYCLES64(bef_dly, period_ns)) |
0951 TIM_GEN_SEQ1_D7(TO_CYCLES64(ca_to_data, period_ns));
0952
0953
0954
0955
0956
0957
0958
0959 rnand->tim_gen_seq2 =
0960 TIM_GEN_SEQ2_D8(TO_CYCLES64(sdr->tRR_min + sdr->tREA_max, period_ns)) |
0961 TIM_GEN_SEQ2_D9(TO_CYCLES64(sdr->tRR_min, period_ns)) |
0962 TIM_GEN_SEQ2_D10(TO_CYCLES64(cle - cyc, period_ns)) |
0963 TIM_GEN_SEQ2_D11(TO_CYCLES64(bef_dly, period_ns));
0964
0965
0966 rnand->tim_gen_seq3 =
0967 TIM_GEN_SEQ3_D12(TO_CYCLES64(sdr->tCLH_min - sdr->tDH_min, period_ns));
0968
0969 return 0;
0970 }
0971
0972 static int rnandc_ooblayout_ecc(struct mtd_info *mtd, int section,
0973 struct mtd_oob_region *oobregion)
0974 {
0975 struct nand_chip *chip = mtd_to_nand(mtd);
0976 unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
0977
0978 if (section)
0979 return -ERANGE;
0980
0981 oobregion->offset = 2;
0982 oobregion->length = eccbytes;
0983
0984 return 0;
0985 }
0986
0987 static int rnandc_ooblayout_free(struct mtd_info *mtd, int section,
0988 struct mtd_oob_region *oobregion)
0989 {
0990 struct nand_chip *chip = mtd_to_nand(mtd);
0991 unsigned int eccbytes = round_up(chip->ecc.bytes, 4) * chip->ecc.steps;
0992
0993 if (section)
0994 return -ERANGE;
0995
0996 oobregion->offset = 2 + eccbytes;
0997 oobregion->length = mtd->oobsize - oobregion->offset;
0998
0999 return 0;
1000 }
1001
1002 static const struct mtd_ooblayout_ops rnandc_ooblayout_ops = {
1003 .ecc = rnandc_ooblayout_ecc,
1004 .free = rnandc_ooblayout_free,
1005 };
1006
1007 static int rnandc_hw_ecc_controller_init(struct nand_chip *chip)
1008 {
1009 struct rnand_chip *rnand = to_rnand(chip);
1010 struct mtd_info *mtd = nand_to_mtd(chip);
1011 struct rnandc *rnandc = to_rnandc(chip->controller);
1012
1013 if (mtd->writesize > SZ_16K) {
1014 dev_err(rnandc->dev, "Unsupported page size\n");
1015 return -EINVAL;
1016 }
1017
1018 switch (chip->ecc.size) {
1019 case SZ_256:
1020 rnand->control |= CONTROL_ECC_BLOCK_SIZE_256;
1021 break;
1022 case SZ_512:
1023 rnand->control |= CONTROL_ECC_BLOCK_SIZE_512;
1024 break;
1025 case SZ_1K:
1026 rnand->control |= CONTROL_ECC_BLOCK_SIZE_1024;
1027 break;
1028 default:
1029 dev_err(rnandc->dev, "Unsupported ECC chunk size\n");
1030 return -EINVAL;
1031 }
1032
1033 switch (chip->ecc.strength) {
1034 case 2:
1035 chip->ecc.bytes = 4;
1036 rnand->ecc_ctrl |= ECC_CTRL_CAP_2B;
1037 break;
1038 case 4:
1039 chip->ecc.bytes = 7;
1040 rnand->ecc_ctrl |= ECC_CTRL_CAP_4B;
1041 break;
1042 case 8:
1043 chip->ecc.bytes = 14;
1044 rnand->ecc_ctrl |= ECC_CTRL_CAP_8B;
1045 break;
1046 case 16:
1047 chip->ecc.bytes = 28;
1048 rnand->ecc_ctrl |= ECC_CTRL_CAP_16B;
1049 break;
1050 case 24:
1051 chip->ecc.bytes = 42;
1052 rnand->ecc_ctrl |= ECC_CTRL_CAP_24B;
1053 break;
1054 case 32:
1055 chip->ecc.bytes = 56;
1056 rnand->ecc_ctrl |= ECC_CTRL_CAP_32B;
1057 break;
1058 default:
1059 dev_err(rnandc->dev, "Unsupported ECC strength\n");
1060 return -EINVAL;
1061 }
1062
1063 rnand->ecc_ctrl |= ECC_CTRL_ERR_THRESHOLD(chip->ecc.strength);
1064
1065 mtd_set_ooblayout(mtd, &rnandc_ooblayout_ops);
1066 chip->ecc.steps = mtd->writesize / chip->ecc.size;
1067 chip->ecc.read_page = rnandc_read_page_hw_ecc;
1068 chip->ecc.read_subpage = rnandc_read_subpage_hw_ecc;
1069 chip->ecc.write_page = rnandc_write_page_hw_ecc;
1070 chip->ecc.write_subpage = rnandc_write_subpage_hw_ecc;
1071
1072 return 0;
1073 }
1074
1075 static int rnandc_ecc_init(struct nand_chip *chip)
1076 {
1077 struct nand_ecc_ctrl *ecc = &chip->ecc;
1078 const struct nand_ecc_props *requirements =
1079 nanddev_get_ecc_requirements(&chip->base);
1080 struct rnandc *rnandc = to_rnandc(chip->controller);
1081 int ret;
1082
1083 if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_NONE &&
1084 (!ecc->size || !ecc->strength)) {
1085 if (requirements->step_size && requirements->strength) {
1086 ecc->size = requirements->step_size;
1087 ecc->strength = requirements->strength;
1088 } else {
1089 dev_err(rnandc->dev, "No minimum ECC strength\n");
1090 return -EINVAL;
1091 }
1092 }
1093
1094 switch (ecc->engine_type) {
1095 case NAND_ECC_ENGINE_TYPE_ON_HOST:
1096 ret = rnandc_hw_ecc_controller_init(chip);
1097 if (ret)
1098 return ret;
1099 break;
1100 case NAND_ECC_ENGINE_TYPE_NONE:
1101 case NAND_ECC_ENGINE_TYPE_SOFT:
1102 case NAND_ECC_ENGINE_TYPE_ON_DIE:
1103 break;
1104 default:
1105 return -EINVAL;
1106 }
1107
1108 return 0;
1109 }
1110
1111 static int rnandc_attach_chip(struct nand_chip *chip)
1112 {
1113 struct rnand_chip *rnand = to_rnand(chip);
1114 struct rnandc *rnandc = to_rnandc(chip->controller);
1115 struct mtd_info *mtd = nand_to_mtd(chip);
1116 struct nand_memory_organization *memorg = nanddev_get_memorg(&chip->base);
1117 int ret;
1118
1119
1120 if (chip->bbt_options & NAND_BBT_USE_FLASH)
1121 chip->bbt_options |= NAND_BBT_NO_OOB;
1122
1123 if (mtd->writesize <= 512) {
1124 dev_err(rnandc->dev, "Small page devices not supported\n");
1125 return -EINVAL;
1126 }
1127
1128 rnand->control |= CONTROL_CHECK_RB_LINE | CONTROL_INT_EN;
1129
1130 switch (memorg->pages_per_eraseblock) {
1131 case 32:
1132 rnand->control |= CONTROL_BLOCK_SIZE_32P;
1133 break;
1134 case 64:
1135 rnand->control |= CONTROL_BLOCK_SIZE_64P;
1136 break;
1137 case 128:
1138 rnand->control |= CONTROL_BLOCK_SIZE_128P;
1139 break;
1140 case 256:
1141 rnand->control |= CONTROL_BLOCK_SIZE_256P;
1142 break;
1143 default:
1144 dev_err(rnandc->dev, "Unsupported memory organization\n");
1145 return -EINVAL;
1146 }
1147
1148 chip->options |= NAND_SUBPAGE_READ;
1149
1150 ret = rnandc_ecc_init(chip);
1151 if (ret) {
1152 dev_err(rnandc->dev, "ECC initialization failed (%d)\n", ret);
1153 return ret;
1154 }
1155
1156
1157 rnand->selected_die = -1;
1158
1159 return 0;
1160 }
1161
1162 static const struct nand_controller_ops rnandc_ops = {
1163 .attach_chip = rnandc_attach_chip,
1164 .exec_op = rnandc_exec_op,
1165 .setup_interface = rnandc_setup_interface,
1166 };
1167
1168 static int rnandc_alloc_dma_buf(struct rnandc *rnandc,
1169 struct mtd_info *new_mtd)
1170 {
1171 unsigned int max_len = new_mtd->writesize + new_mtd->oobsize;
1172 struct rnand_chip *entry, *temp;
1173 struct nand_chip *chip;
1174 struct mtd_info *mtd;
1175
1176 list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
1177 chip = &entry->chip;
1178 mtd = nand_to_mtd(chip);
1179 max_len = max(max_len, mtd->writesize + mtd->oobsize);
1180 }
1181
1182 if (rnandc->buf && rnandc->buf_sz < max_len) {
1183 devm_kfree(rnandc->dev, rnandc->buf);
1184 rnandc->buf = NULL;
1185 }
1186
1187 if (!rnandc->buf) {
1188 rnandc->buf_sz = max_len;
1189 rnandc->buf = devm_kmalloc(rnandc->dev, max_len,
1190 GFP_KERNEL | GFP_DMA);
1191 if (!rnandc->buf)
1192 return -ENOMEM;
1193 }
1194
1195 return 0;
1196 }
1197
1198 static int rnandc_chip_init(struct rnandc *rnandc, struct device_node *np)
1199 {
1200 struct rnand_chip *rnand;
1201 struct mtd_info *mtd;
1202 struct nand_chip *chip;
1203 int nsels, ret, i;
1204 u32 cs;
1205
1206 nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
1207 if (nsels <= 0) {
1208 ret = (nsels < 0) ? nsels : -EINVAL;
1209 dev_err(rnandc->dev, "Invalid reg property (%d)\n", ret);
1210 return ret;
1211 }
1212
1213
1214 rnand = devm_kzalloc(rnandc->dev, struct_size(rnand, sels, nsels),
1215 GFP_KERNEL);
1216 if (!rnand)
1217 return -ENOMEM;
1218
1219 rnand->nsels = nsels;
1220 rnand->selected_die = -1;
1221
1222 for (i = 0; i < nsels; i++) {
1223 ret = of_property_read_u32_index(np, "reg", i, &cs);
1224 if (ret) {
1225 dev_err(rnandc->dev, "Incomplete reg property (%d)\n", ret);
1226 return ret;
1227 }
1228
1229 if (cs >= RNANDC_CS_NUM) {
1230 dev_err(rnandc->dev, "Invalid reg property (%d)\n", cs);
1231 return -EINVAL;
1232 }
1233
1234 if (test_and_set_bit(cs, &rnandc->assigned_cs)) {
1235 dev_err(rnandc->dev, "CS %d already assigned\n", cs);
1236 return -EINVAL;
1237 }
1238
1239
1240
1241
1242
1243 rnand->sels[i].cs = cs;
1244 }
1245
1246 chip = &rnand->chip;
1247 chip->controller = &rnandc->controller;
1248 nand_set_flash_node(chip, np);
1249
1250 mtd = nand_to_mtd(chip);
1251 mtd->dev.parent = rnandc->dev;
1252 if (!mtd->name) {
1253 dev_err(rnandc->dev, "Missing MTD label\n");
1254 return -EINVAL;
1255 }
1256
1257 ret = nand_scan(chip, rnand->nsels);
1258 if (ret) {
1259 dev_err(rnandc->dev, "Failed to scan the NAND chip (%d)\n", ret);
1260 return ret;
1261 }
1262
1263 ret = rnandc_alloc_dma_buf(rnandc, mtd);
1264 if (ret)
1265 goto cleanup_nand;
1266
1267 ret = mtd_device_register(mtd, NULL, 0);
1268 if (ret) {
1269 dev_err(rnandc->dev, "Failed to register MTD device (%d)\n", ret);
1270 goto cleanup_nand;
1271 }
1272
1273 list_add_tail(&rnand->node, &rnandc->chips);
1274
1275 return 0;
1276
1277 cleanup_nand:
1278 nand_cleanup(chip);
1279
1280 return ret;
1281 }
1282
1283 static void rnandc_chips_cleanup(struct rnandc *rnandc)
1284 {
1285 struct rnand_chip *entry, *temp;
1286 struct nand_chip *chip;
1287 int ret;
1288
1289 list_for_each_entry_safe(entry, temp, &rnandc->chips, node) {
1290 chip = &entry->chip;
1291 ret = mtd_device_unregister(nand_to_mtd(chip));
1292 WARN_ON(ret);
1293 nand_cleanup(chip);
1294 list_del(&entry->node);
1295 }
1296 }
1297
1298 static int rnandc_chips_init(struct rnandc *rnandc)
1299 {
1300 struct device_node *np;
1301 int ret;
1302
1303 for_each_child_of_node(rnandc->dev->of_node, np) {
1304 ret = rnandc_chip_init(rnandc, np);
1305 if (ret) {
1306 of_node_put(np);
1307 goto cleanup_chips;
1308 }
1309 }
1310
1311 return 0;
1312
1313 cleanup_chips:
1314 rnandc_chips_cleanup(rnandc);
1315
1316 return ret;
1317 }
1318
1319 static int rnandc_probe(struct platform_device *pdev)
1320 {
1321 struct rnandc *rnandc;
1322 struct clk *eclk;
1323 int irq, ret;
1324
1325 rnandc = devm_kzalloc(&pdev->dev, sizeof(*rnandc), GFP_KERNEL);
1326 if (!rnandc)
1327 return -ENOMEM;
1328
1329 rnandc->dev = &pdev->dev;
1330 nand_controller_init(&rnandc->controller);
1331 rnandc->controller.ops = &rnandc_ops;
1332 INIT_LIST_HEAD(&rnandc->chips);
1333 init_completion(&rnandc->complete);
1334
1335 rnandc->regs = devm_platform_ioremap_resource(pdev, 0);
1336 if (IS_ERR(rnandc->regs))
1337 return PTR_ERR(rnandc->regs);
1338
1339 devm_pm_runtime_enable(&pdev->dev);
1340 ret = pm_runtime_resume_and_get(&pdev->dev);
1341 if (ret < 0)
1342 return ret;
1343
1344
1345 eclk = clk_get(&pdev->dev, "eclk");
1346 if (IS_ERR(eclk)) {
1347 ret = PTR_ERR(eclk);
1348 goto dis_runtime_pm;
1349 }
1350
1351 rnandc->ext_clk_rate = clk_get_rate(eclk);
1352 clk_put(eclk);
1353
1354 rnandc_dis_interrupts(rnandc);
1355 irq = platform_get_irq_optional(pdev, 0);
1356 if (irq == -EPROBE_DEFER) {
1357 ret = irq;
1358 goto dis_runtime_pm;
1359 } else if (irq < 0) {
1360 dev_info(&pdev->dev, "No IRQ found, fallback to polling\n");
1361 rnandc->use_polling = true;
1362 } else {
1363 ret = devm_request_irq(&pdev->dev, irq, rnandc_irq_handler, 0,
1364 "renesas-nand-controller", rnandc);
1365 if (ret < 0)
1366 goto dis_runtime_pm;
1367 }
1368
1369 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1370 if (ret)
1371 goto dis_runtime_pm;
1372
1373 rnandc_clear_fifo(rnandc);
1374
1375 platform_set_drvdata(pdev, rnandc);
1376
1377 ret = rnandc_chips_init(rnandc);
1378 if (ret)
1379 goto dis_runtime_pm;
1380
1381 return 0;
1382
1383 dis_runtime_pm:
1384 pm_runtime_put(&pdev->dev);
1385
1386 return ret;
1387 }
1388
1389 static int rnandc_remove(struct platform_device *pdev)
1390 {
1391 struct rnandc *rnandc = platform_get_drvdata(pdev);
1392
1393 rnandc_chips_cleanup(rnandc);
1394
1395 pm_runtime_put(&pdev->dev);
1396
1397 return 0;
1398 }
1399
1400 static const struct of_device_id rnandc_id_table[] = {
1401 { .compatible = "renesas,rcar-gen3-nandc" },
1402 { .compatible = "renesas,rzn1-nandc" },
1403 {}
1404 };
1405 MODULE_DEVICE_TABLE(of, rnandc_id_table);
1406
1407 static struct platform_driver rnandc_driver = {
1408 .driver = {
1409 .name = "renesas-nandc",
1410 .of_match_table = rnandc_id_table,
1411 },
1412 .probe = rnandc_probe,
1413 .remove = rnandc_remove,
1414 };
1415 module_platform_driver(rnandc_driver);
1416
1417 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
1418 MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 NAND controller driver");
1419 MODULE_LICENSE("GPL v2");