Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2018 Stefan Agner <stefan@agner.ch>
0004  * Copyright (C) 2014-2015 Lucas Stach <dev@lynxeye.de>
0005  * Copyright (C) 2012 Avionic Design GmbH
0006  */
0007 
0008 #include <linux/clk.h>
0009 #include <linux/completion.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/err.h>
0012 #include <linux/gpio/consumer.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/io.h>
0015 #include <linux/module.h>
0016 #include <linux/mtd/partitions.h>
0017 #include <linux/mtd/rawnand.h>
0018 #include <linux/of.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/pm_runtime.h>
0021 #include <linux/reset.h>
0022 
0023 #include <soc/tegra/common.h>
0024 
0025 #define COMMAND                 0x00
0026 #define   COMMAND_GO                BIT(31)
0027 #define   COMMAND_CLE               BIT(30)
0028 #define   COMMAND_ALE               BIT(29)
0029 #define   COMMAND_PIO               BIT(28)
0030 #define   COMMAND_TX                BIT(27)
0031 #define   COMMAND_RX                BIT(26)
0032 #define   COMMAND_SEC_CMD           BIT(25)
0033 #define   COMMAND_AFT_DAT           BIT(24)
0034 #define   COMMAND_TRANS_SIZE(size)      ((((size) - 1) & 0xf) << 20)
0035 #define   COMMAND_A_VALID           BIT(19)
0036 #define   COMMAND_B_VALID           BIT(18)
0037 #define   COMMAND_RD_STATUS_CHK         BIT(17)
0038 #define   COMMAND_RBSY_CHK          BIT(16)
0039 #define   COMMAND_CE(x)             BIT(8 + ((x) & 0x7))
0040 #define   COMMAND_CLE_SIZE(size)        ((((size) - 1) & 0x3) << 4)
0041 #define   COMMAND_ALE_SIZE(size)        ((((size) - 1) & 0xf) << 0)
0042 
0043 #define STATUS                  0x04
0044 
0045 #define ISR                 0x08
0046 #define   ISR_CORRFAIL_ERR          BIT(24)
0047 #define   ISR_UND               BIT(7)
0048 #define   ISR_OVR               BIT(6)
0049 #define   ISR_CMD_DONE              BIT(5)
0050 #define   ISR_ECC_ERR               BIT(4)
0051 
0052 #define IER                 0x0c
0053 #define   IER_ERR_TRIG_VAL(x)           (((x) & 0xf) << 16)
0054 #define   IER_UND               BIT(7)
0055 #define   IER_OVR               BIT(6)
0056 #define   IER_CMD_DONE              BIT(5)
0057 #define   IER_ECC_ERR               BIT(4)
0058 #define   IER_GIE               BIT(0)
0059 
0060 #define CONFIG                  0x10
0061 #define   CONFIG_HW_ECC             BIT(31)
0062 #define   CONFIG_ECC_SEL            BIT(30)
0063 #define   CONFIG_ERR_COR            BIT(29)
0064 #define   CONFIG_PIPE_EN            BIT(28)
0065 #define   CONFIG_TVAL_4             (0 << 24)
0066 #define   CONFIG_TVAL_6             (1 << 24)
0067 #define   CONFIG_TVAL_8             (2 << 24)
0068 #define   CONFIG_SKIP_SPARE         BIT(23)
0069 #define   CONFIG_BUS_WIDTH_16           BIT(21)
0070 #define   CONFIG_COM_BSY            BIT(20)
0071 #define   CONFIG_PS_256             (0 << 16)
0072 #define   CONFIG_PS_512             (1 << 16)
0073 #define   CONFIG_PS_1024            (2 << 16)
0074 #define   CONFIG_PS_2048            (3 << 16)
0075 #define   CONFIG_PS_4096            (4 << 16)
0076 #define   CONFIG_SKIP_SPARE_SIZE_4      (0 << 14)
0077 #define   CONFIG_SKIP_SPARE_SIZE_8      (1 << 14)
0078 #define   CONFIG_SKIP_SPARE_SIZE_12     (2 << 14)
0079 #define   CONFIG_SKIP_SPARE_SIZE_16     (3 << 14)
0080 #define   CONFIG_TAG_BYTE_SIZE(x)           ((x) & 0xff)
0081 
0082 #define TIMING_1                0x14
0083 #define   TIMING_TRP_RESP(x)            (((x) & 0xf) << 28)
0084 #define   TIMING_TWB(x)             (((x) & 0xf) << 24)
0085 #define   TIMING_TCR_TAR_TRR(x)         (((x) & 0xf) << 20)
0086 #define   TIMING_TWHR(x)            (((x) & 0xf) << 16)
0087 #define   TIMING_TCS(x)             (((x) & 0x3) << 14)
0088 #define   TIMING_TWH(x)             (((x) & 0x3) << 12)
0089 #define   TIMING_TWP(x)             (((x) & 0xf) <<  8)
0090 #define   TIMING_TRH(x)             (((x) & 0x3) <<  4)
0091 #define   TIMING_TRP(x)             (((x) & 0xf) <<  0)
0092 
0093 #define RESP                    0x18
0094 
0095 #define TIMING_2                0x1c
0096 #define   TIMING_TADL(x)            ((x) & 0xf)
0097 
0098 #define CMD_REG1                0x20
0099 #define CMD_REG2                0x24
0100 #define ADDR_REG1               0x28
0101 #define ADDR_REG2               0x2c
0102 
0103 #define DMA_MST_CTRL                0x30
0104 #define   DMA_MST_CTRL_GO           BIT(31)
0105 #define   DMA_MST_CTRL_IN           (0 << 30)
0106 #define   DMA_MST_CTRL_OUT          BIT(30)
0107 #define   DMA_MST_CTRL_PERF_EN          BIT(29)
0108 #define   DMA_MST_CTRL_IE_DONE          BIT(28)
0109 #define   DMA_MST_CTRL_REUSE            BIT(27)
0110 #define   DMA_MST_CTRL_BURST_1          (2 << 24)
0111 #define   DMA_MST_CTRL_BURST_4          (3 << 24)
0112 #define   DMA_MST_CTRL_BURST_8          (4 << 24)
0113 #define   DMA_MST_CTRL_BURST_16         (5 << 24)
0114 #define   DMA_MST_CTRL_IS_DONE          BIT(20)
0115 #define   DMA_MST_CTRL_EN_A         BIT(2)
0116 #define   DMA_MST_CTRL_EN_B         BIT(1)
0117 
0118 #define DMA_CFG_A               0x34
0119 #define DMA_CFG_B               0x38
0120 
0121 #define FIFO_CTRL               0x3c
0122 #define   FIFO_CTRL_CLR_ALL         BIT(3)
0123 
0124 #define DATA_PTR                0x40
0125 #define TAG_PTR                 0x44
0126 #define ECC_PTR                 0x48
0127 
0128 #define DEC_STATUS              0x4c
0129 #define   DEC_STATUS_A_ECC_FAIL         BIT(1)
0130 #define   DEC_STATUS_ERR_COUNT_MASK     0x00ff0000
0131 #define   DEC_STATUS_ERR_COUNT_SHIFT        16
0132 
0133 #define HWSTATUS_CMD                0x50
0134 #define HWSTATUS_MASK               0x54
0135 #define   HWSTATUS_RDSTATUS_MASK(x)     (((x) & 0xff) << 24)
0136 #define   HWSTATUS_RDSTATUS_VALUE(x)        (((x) & 0xff) << 16)
0137 #define   HWSTATUS_RBSY_MASK(x)         (((x) & 0xff) << 8)
0138 #define   HWSTATUS_RBSY_VALUE(x)        (((x) & 0xff) << 0)
0139 
0140 #define BCH_CONFIG              0xcc
0141 #define   BCH_ENABLE                BIT(0)
0142 #define   BCH_TVAL_4                (0 << 4)
0143 #define   BCH_TVAL_8                (1 << 4)
0144 #define   BCH_TVAL_14               (2 << 4)
0145 #define   BCH_TVAL_16               (3 << 4)
0146 
0147 #define DEC_STAT_RESULT             0xd0
0148 #define DEC_STAT_BUF                0xd4
0149 #define   DEC_STAT_BUF_FAIL_SEC_FLAG_MASK   0xff000000
0150 #define   DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT  24
0151 #define   DEC_STAT_BUF_CORR_SEC_FLAG_MASK   0x00ff0000
0152 #define   DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT  16
0153 #define   DEC_STAT_BUF_MAX_CORR_CNT_MASK    0x00001f00
0154 #define   DEC_STAT_BUF_MAX_CORR_CNT_SHIFT   8
0155 
0156 #define OFFSET(val, off)    ((val) < (off) ? 0 : (val) - (off))
0157 
0158 #define SKIP_SPARE_BYTES    4
0159 #define BITS_PER_STEP_RS    18
0160 #define BITS_PER_STEP_BCH   13
0161 
0162 #define INT_MASK        (IER_UND | IER_OVR | IER_CMD_DONE | IER_GIE)
0163 #define HWSTATUS_CMD_DEFAULT    NAND_STATUS_READY
0164 #define HWSTATUS_MASK_DEFAULT   (HWSTATUS_RDSTATUS_MASK(1) | \
0165                 HWSTATUS_RDSTATUS_VALUE(0) | \
0166                 HWSTATUS_RBSY_MASK(NAND_STATUS_READY) | \
0167                 HWSTATUS_RBSY_VALUE(NAND_STATUS_READY))
0168 
0169 struct tegra_nand_controller {
0170     struct nand_controller controller;
0171     struct device *dev;
0172     void __iomem *regs;
0173     int irq;
0174     struct clk *clk;
0175     struct completion command_complete;
0176     struct completion dma_complete;
0177     bool last_read_error;
0178     int cur_cs;
0179     struct nand_chip *chip;
0180 };
0181 
0182 struct tegra_nand_chip {
0183     struct nand_chip chip;
0184     struct gpio_desc *wp_gpio;
0185     struct mtd_oob_region ecc;
0186     u32 config;
0187     u32 config_ecc;
0188     u32 bch_config;
0189     int cs[1];
0190 };
0191 
0192 static inline struct tegra_nand_controller *
0193             to_tegra_ctrl(struct nand_controller *hw_ctrl)
0194 {
0195     return container_of(hw_ctrl, struct tegra_nand_controller, controller);
0196 }
0197 
0198 static inline struct tegra_nand_chip *to_tegra_chip(struct nand_chip *chip)
0199 {
0200     return container_of(chip, struct tegra_nand_chip, chip);
0201 }
0202 
0203 static int tegra_nand_ooblayout_rs_ecc(struct mtd_info *mtd, int section,
0204                        struct mtd_oob_region *oobregion)
0205 {
0206     struct nand_chip *chip = mtd_to_nand(mtd);
0207     int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_RS * chip->ecc.strength,
0208                       BITS_PER_BYTE);
0209 
0210     if (section > 0)
0211         return -ERANGE;
0212 
0213     oobregion->offset = SKIP_SPARE_BYTES;
0214     oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
0215 
0216     return 0;
0217 }
0218 
0219 static int tegra_nand_ooblayout_no_free(struct mtd_info *mtd, int section,
0220                     struct mtd_oob_region *oobregion)
0221 {
0222     return -ERANGE;
0223 }
0224 
0225 static const struct mtd_ooblayout_ops tegra_nand_oob_rs_ops = {
0226     .ecc = tegra_nand_ooblayout_rs_ecc,
0227     .free = tegra_nand_ooblayout_no_free,
0228 };
0229 
0230 static int tegra_nand_ooblayout_bch_ecc(struct mtd_info *mtd, int section,
0231                     struct mtd_oob_region *oobregion)
0232 {
0233     struct nand_chip *chip = mtd_to_nand(mtd);
0234     int bytes_per_step = DIV_ROUND_UP(BITS_PER_STEP_BCH * chip->ecc.strength,
0235                       BITS_PER_BYTE);
0236 
0237     if (section > 0)
0238         return -ERANGE;
0239 
0240     oobregion->offset = SKIP_SPARE_BYTES;
0241     oobregion->length = round_up(bytes_per_step * chip->ecc.steps, 4);
0242 
0243     return 0;
0244 }
0245 
0246 static const struct mtd_ooblayout_ops tegra_nand_oob_bch_ops = {
0247     .ecc = tegra_nand_ooblayout_bch_ecc,
0248     .free = tegra_nand_ooblayout_no_free,
0249 };
0250 
0251 static irqreturn_t tegra_nand_irq(int irq, void *data)
0252 {
0253     struct tegra_nand_controller *ctrl = data;
0254     u32 isr, dma;
0255 
0256     isr = readl_relaxed(ctrl->regs + ISR);
0257     dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
0258     dev_dbg(ctrl->dev, "isr %08x\n", isr);
0259 
0260     if (!isr && !(dma & DMA_MST_CTRL_IS_DONE))
0261         return IRQ_NONE;
0262 
0263     /*
0264      * The bit name is somewhat missleading: This is also set when
0265      * HW ECC was successful. The data sheet states:
0266      * Correctable OR Un-correctable errors occurred in the DMA transfer...
0267      */
0268     if (isr & ISR_CORRFAIL_ERR)
0269         ctrl->last_read_error = true;
0270 
0271     if (isr & ISR_CMD_DONE)
0272         complete(&ctrl->command_complete);
0273 
0274     if (isr & ISR_UND)
0275         dev_err(ctrl->dev, "FIFO underrun\n");
0276 
0277     if (isr & ISR_OVR)
0278         dev_err(ctrl->dev, "FIFO overrun\n");
0279 
0280     /* handle DMA interrupts */
0281     if (dma & DMA_MST_CTRL_IS_DONE) {
0282         writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
0283         complete(&ctrl->dma_complete);
0284     }
0285 
0286     /* clear interrupts */
0287     writel_relaxed(isr, ctrl->regs + ISR);
0288 
0289     return IRQ_HANDLED;
0290 }
0291 
0292 static const char * const tegra_nand_reg_names[] = {
0293     "COMMAND",
0294     "STATUS",
0295     "ISR",
0296     "IER",
0297     "CONFIG",
0298     "TIMING",
0299     NULL,
0300     "TIMING2",
0301     "CMD_REG1",
0302     "CMD_REG2",
0303     "ADDR_REG1",
0304     "ADDR_REG2",
0305     "DMA_MST_CTRL",
0306     "DMA_CFG_A",
0307     "DMA_CFG_B",
0308     "FIFO_CTRL",
0309 };
0310 
0311 static void tegra_nand_dump_reg(struct tegra_nand_controller *ctrl)
0312 {
0313     u32 reg;
0314     int i;
0315 
0316     dev_err(ctrl->dev, "Tegra NAND controller register dump\n");
0317     for (i = 0; i < ARRAY_SIZE(tegra_nand_reg_names); i++) {
0318         const char *reg_name = tegra_nand_reg_names[i];
0319 
0320         if (!reg_name)
0321             continue;
0322 
0323         reg = readl_relaxed(ctrl->regs + (i * 4));
0324         dev_err(ctrl->dev, "%s: 0x%08x\n", reg_name, reg);
0325     }
0326 }
0327 
0328 static void tegra_nand_controller_abort(struct tegra_nand_controller *ctrl)
0329 {
0330     u32 isr, dma;
0331 
0332     disable_irq(ctrl->irq);
0333 
0334     /* Abort current command/DMA operation */
0335     writel_relaxed(0, ctrl->regs + DMA_MST_CTRL);
0336     writel_relaxed(0, ctrl->regs + COMMAND);
0337 
0338     /* clear interrupts */
0339     isr = readl_relaxed(ctrl->regs + ISR);
0340     writel_relaxed(isr, ctrl->regs + ISR);
0341     dma = readl_relaxed(ctrl->regs + DMA_MST_CTRL);
0342     writel_relaxed(dma, ctrl->regs + DMA_MST_CTRL);
0343 
0344     reinit_completion(&ctrl->command_complete);
0345     reinit_completion(&ctrl->dma_complete);
0346 
0347     enable_irq(ctrl->irq);
0348 }
0349 
0350 static int tegra_nand_cmd(struct nand_chip *chip,
0351               const struct nand_subop *subop)
0352 {
0353     const struct nand_op_instr *instr;
0354     const struct nand_op_instr *instr_data_in = NULL;
0355     struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
0356     unsigned int op_id, size = 0, offset = 0;
0357     bool first_cmd = true;
0358     u32 reg, cmd = 0;
0359     int ret;
0360 
0361     for (op_id = 0; op_id < subop->ninstrs; op_id++) {
0362         unsigned int naddrs, i;
0363         const u8 *addrs;
0364         u32 addr1 = 0, addr2 = 0;
0365 
0366         instr = &subop->instrs[op_id];
0367 
0368         switch (instr->type) {
0369         case NAND_OP_CMD_INSTR:
0370             if (first_cmd) {
0371                 cmd |= COMMAND_CLE;
0372                 writel_relaxed(instr->ctx.cmd.opcode,
0373                            ctrl->regs + CMD_REG1);
0374             } else {
0375                 cmd |= COMMAND_SEC_CMD;
0376                 writel_relaxed(instr->ctx.cmd.opcode,
0377                            ctrl->regs + CMD_REG2);
0378             }
0379             first_cmd = false;
0380             break;
0381 
0382         case NAND_OP_ADDR_INSTR:
0383             offset = nand_subop_get_addr_start_off(subop, op_id);
0384             naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
0385             addrs = &instr->ctx.addr.addrs[offset];
0386 
0387             cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(naddrs);
0388             for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
0389                 addr1 |= *addrs++ << (BITS_PER_BYTE * i);
0390             naddrs -= i;
0391             for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
0392                 addr2 |= *addrs++ << (BITS_PER_BYTE * i);
0393 
0394             writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
0395             writel_relaxed(addr2, ctrl->regs + ADDR_REG2);
0396             break;
0397 
0398         case NAND_OP_DATA_IN_INSTR:
0399             size = nand_subop_get_data_len(subop, op_id);
0400             offset = nand_subop_get_data_start_off(subop, op_id);
0401 
0402             cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
0403                 COMMAND_RX | COMMAND_A_VALID;
0404 
0405             instr_data_in = instr;
0406             break;
0407 
0408         case NAND_OP_DATA_OUT_INSTR:
0409             size = nand_subop_get_data_len(subop, op_id);
0410             offset = nand_subop_get_data_start_off(subop, op_id);
0411 
0412             cmd |= COMMAND_TRANS_SIZE(size) | COMMAND_PIO |
0413                 COMMAND_TX | COMMAND_A_VALID;
0414             memcpy(&reg, instr->ctx.data.buf.out + offset, size);
0415 
0416             writel_relaxed(reg, ctrl->regs + RESP);
0417             break;
0418 
0419         case NAND_OP_WAITRDY_INSTR:
0420             cmd |= COMMAND_RBSY_CHK;
0421             break;
0422         }
0423     }
0424 
0425     cmd |= COMMAND_GO | COMMAND_CE(ctrl->cur_cs);
0426     writel_relaxed(cmd, ctrl->regs + COMMAND);
0427     ret = wait_for_completion_timeout(&ctrl->command_complete,
0428                       msecs_to_jiffies(500));
0429     if (!ret) {
0430         dev_err(ctrl->dev, "COMMAND timeout\n");
0431         tegra_nand_dump_reg(ctrl);
0432         tegra_nand_controller_abort(ctrl);
0433         return -ETIMEDOUT;
0434     }
0435 
0436     if (instr_data_in) {
0437         reg = readl_relaxed(ctrl->regs + RESP);
0438         memcpy(instr_data_in->ctx.data.buf.in + offset, &reg, size);
0439     }
0440 
0441     return 0;
0442 }
0443 
0444 static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
0445     NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
0446         NAND_OP_PARSER_PAT_CMD_ELEM(true),
0447         NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
0448         NAND_OP_PARSER_PAT_CMD_ELEM(true),
0449         NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
0450     NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
0451         NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 4)),
0452     NAND_OP_PARSER_PATTERN(tegra_nand_cmd,
0453         NAND_OP_PARSER_PAT_CMD_ELEM(true),
0454         NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8),
0455         NAND_OP_PARSER_PAT_CMD_ELEM(true),
0456         NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
0457         NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
0458     );
0459 
0460 static void tegra_nand_select_target(struct nand_chip *chip,
0461                      unsigned int die_nr)
0462 {
0463     struct tegra_nand_chip *nand = to_tegra_chip(chip);
0464     struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
0465 
0466     ctrl->cur_cs = nand->cs[die_nr];
0467 }
0468 
0469 static int tegra_nand_exec_op(struct nand_chip *chip,
0470                   const struct nand_operation *op,
0471                   bool check_only)
0472 {
0473     if (!check_only)
0474         tegra_nand_select_target(chip, op->cs);
0475 
0476     return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
0477                       check_only);
0478 }
0479 
0480 static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
0481                   struct nand_chip *chip, bool enable)
0482 {
0483     struct tegra_nand_chip *nand = to_tegra_chip(chip);
0484 
0485     if (chip->ecc.algo == NAND_ECC_ALGO_BCH && enable)
0486         writel_relaxed(nand->bch_config, ctrl->regs + BCH_CONFIG);
0487     else
0488         writel_relaxed(0, ctrl->regs + BCH_CONFIG);
0489 
0490     if (enable)
0491         writel_relaxed(nand->config_ecc, ctrl->regs + CONFIG);
0492     else
0493         writel_relaxed(nand->config, ctrl->regs + CONFIG);
0494 }
0495 
0496 static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
0497                 void *buf, void *oob_buf, int oob_len, int page,
0498                 bool read)
0499 {
0500     struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
0501     enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
0502     dma_addr_t dma_addr = 0, dma_addr_oob = 0;
0503     u32 addr1, cmd, dma_ctrl;
0504     int ret;
0505 
0506     tegra_nand_select_target(chip, chip->cur_cs);
0507 
0508     if (read) {
0509         writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
0510         writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
0511     } else {
0512         writel_relaxed(NAND_CMD_SEQIN, ctrl->regs + CMD_REG1);
0513         writel_relaxed(NAND_CMD_PAGEPROG, ctrl->regs + CMD_REG2);
0514     }
0515     cmd = COMMAND_CLE | COMMAND_SEC_CMD;
0516 
0517     /* Lower 16-bits are column, by default 0 */
0518     addr1 = page << 16;
0519 
0520     if (!buf)
0521         addr1 |= mtd->writesize;
0522     writel_relaxed(addr1, ctrl->regs + ADDR_REG1);
0523 
0524     if (chip->options & NAND_ROW_ADDR_3) {
0525         writel_relaxed(page >> 16, ctrl->regs + ADDR_REG2);
0526         cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(5);
0527     } else {
0528         cmd |= COMMAND_ALE | COMMAND_ALE_SIZE(4);
0529     }
0530 
0531     if (buf) {
0532         dma_addr = dma_map_single(ctrl->dev, buf, mtd->writesize, dir);
0533         ret = dma_mapping_error(ctrl->dev, dma_addr);
0534         if (ret) {
0535             dev_err(ctrl->dev, "dma mapping error\n");
0536             return -EINVAL;
0537         }
0538 
0539         writel_relaxed(mtd->writesize - 1, ctrl->regs + DMA_CFG_A);
0540         writel_relaxed(dma_addr, ctrl->regs + DATA_PTR);
0541     }
0542 
0543     if (oob_buf) {
0544         dma_addr_oob = dma_map_single(ctrl->dev, oob_buf, mtd->oobsize,
0545                           dir);
0546         ret = dma_mapping_error(ctrl->dev, dma_addr_oob);
0547         if (ret) {
0548             dev_err(ctrl->dev, "dma mapping error\n");
0549             ret = -EINVAL;
0550             goto err_unmap_dma_page;
0551         }
0552 
0553         writel_relaxed(oob_len - 1, ctrl->regs + DMA_CFG_B);
0554         writel_relaxed(dma_addr_oob, ctrl->regs + TAG_PTR);
0555     }
0556 
0557     dma_ctrl = DMA_MST_CTRL_GO | DMA_MST_CTRL_PERF_EN |
0558            DMA_MST_CTRL_IE_DONE | DMA_MST_CTRL_IS_DONE |
0559            DMA_MST_CTRL_BURST_16;
0560 
0561     if (buf)
0562         dma_ctrl |= DMA_MST_CTRL_EN_A;
0563     if (oob_buf)
0564         dma_ctrl |= DMA_MST_CTRL_EN_B;
0565 
0566     if (read)
0567         dma_ctrl |= DMA_MST_CTRL_IN | DMA_MST_CTRL_REUSE;
0568     else
0569         dma_ctrl |= DMA_MST_CTRL_OUT;
0570 
0571     writel_relaxed(dma_ctrl, ctrl->regs + DMA_MST_CTRL);
0572 
0573     cmd |= COMMAND_GO | COMMAND_RBSY_CHK | COMMAND_TRANS_SIZE(9) |
0574            COMMAND_CE(ctrl->cur_cs);
0575 
0576     if (buf)
0577         cmd |= COMMAND_A_VALID;
0578     if (oob_buf)
0579         cmd |= COMMAND_B_VALID;
0580 
0581     if (read)
0582         cmd |= COMMAND_RX;
0583     else
0584         cmd |= COMMAND_TX | COMMAND_AFT_DAT;
0585 
0586     writel_relaxed(cmd, ctrl->regs + COMMAND);
0587 
0588     ret = wait_for_completion_timeout(&ctrl->command_complete,
0589                       msecs_to_jiffies(500));
0590     if (!ret) {
0591         dev_err(ctrl->dev, "COMMAND timeout\n");
0592         tegra_nand_dump_reg(ctrl);
0593         tegra_nand_controller_abort(ctrl);
0594         ret = -ETIMEDOUT;
0595         goto err_unmap_dma;
0596     }
0597 
0598     ret = wait_for_completion_timeout(&ctrl->dma_complete,
0599                       msecs_to_jiffies(500));
0600     if (!ret) {
0601         dev_err(ctrl->dev, "DMA timeout\n");
0602         tegra_nand_dump_reg(ctrl);
0603         tegra_nand_controller_abort(ctrl);
0604         ret = -ETIMEDOUT;
0605         goto err_unmap_dma;
0606     }
0607     ret = 0;
0608 
0609 err_unmap_dma:
0610     if (oob_buf)
0611         dma_unmap_single(ctrl->dev, dma_addr_oob, mtd->oobsize, dir);
0612 err_unmap_dma_page:
0613     if (buf)
0614         dma_unmap_single(ctrl->dev, dma_addr, mtd->writesize, dir);
0615 
0616     return ret;
0617 }
0618 
0619 static int tegra_nand_read_page_raw(struct nand_chip *chip, u8 *buf,
0620                     int oob_required, int page)
0621 {
0622     struct mtd_info *mtd = nand_to_mtd(chip);
0623     void *oob_buf = oob_required ? chip->oob_poi : NULL;
0624 
0625     return tegra_nand_page_xfer(mtd, chip, buf, oob_buf,
0626                     mtd->oobsize, page, true);
0627 }
0628 
0629 static int tegra_nand_write_page_raw(struct nand_chip *chip, const u8 *buf,
0630                      int oob_required, int page)
0631 {
0632     struct mtd_info *mtd = nand_to_mtd(chip);
0633     void *oob_buf = oob_required ? chip->oob_poi : NULL;
0634 
0635     return tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
0636                      mtd->oobsize, page, false);
0637 }
0638 
0639 static int tegra_nand_read_oob(struct nand_chip *chip, int page)
0640 {
0641     struct mtd_info *mtd = nand_to_mtd(chip);
0642 
0643     return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
0644                     mtd->oobsize, page, true);
0645 }
0646 
0647 static int tegra_nand_write_oob(struct nand_chip *chip, int page)
0648 {
0649     struct mtd_info *mtd = nand_to_mtd(chip);
0650 
0651     return tegra_nand_page_xfer(mtd, chip, NULL, chip->oob_poi,
0652                     mtd->oobsize, page, false);
0653 }
0654 
0655 static int tegra_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
0656                       int oob_required, int page)
0657 {
0658     struct mtd_info *mtd = nand_to_mtd(chip);
0659     struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
0660     struct tegra_nand_chip *nand = to_tegra_chip(chip);
0661     void *oob_buf = oob_required ? chip->oob_poi : NULL;
0662     u32 dec_stat, max_corr_cnt;
0663     unsigned long fail_sec_flag;
0664     int ret;
0665 
0666     tegra_nand_hw_ecc(ctrl, chip, true);
0667     ret = tegra_nand_page_xfer(mtd, chip, buf, oob_buf, 0, page, true);
0668     tegra_nand_hw_ecc(ctrl, chip, false);
0669     if (ret)
0670         return ret;
0671 
0672     /* No correctable or un-correctable errors, page must have 0 bitflips */
0673     if (!ctrl->last_read_error)
0674         return 0;
0675 
0676     /*
0677      * Correctable or un-correctable errors occurred. Use DEC_STAT_BUF
0678      * which contains information for all ECC selections.
0679      *
0680      * Note that since we do not use Command Queues DEC_RESULT does not
0681      * state the number of pages we can read from the DEC_STAT_BUF. But
0682      * since CORRFAIL_ERR did occur during page read we do have a valid
0683      * result in DEC_STAT_BUF.
0684      */
0685     ctrl->last_read_error = false;
0686     dec_stat = readl_relaxed(ctrl->regs + DEC_STAT_BUF);
0687 
0688     fail_sec_flag = (dec_stat & DEC_STAT_BUF_FAIL_SEC_FLAG_MASK) >>
0689             DEC_STAT_BUF_FAIL_SEC_FLAG_SHIFT;
0690 
0691     max_corr_cnt = (dec_stat & DEC_STAT_BUF_MAX_CORR_CNT_MASK) >>
0692                DEC_STAT_BUF_MAX_CORR_CNT_SHIFT;
0693 
0694     if (fail_sec_flag) {
0695         int bit, max_bitflips = 0;
0696 
0697         /*
0698          * Since we do not support subpage writes, a complete page
0699          * is either written or not. We can take a shortcut here by
0700          * checking wheather any of the sector has been successful
0701          * read. If at least one sectors has been read successfully,
0702          * the page must have been a written previously. It cannot
0703          * be an erased page.
0704          *
0705          * E.g. controller might return fail_sec_flag with 0x4, which
0706          * would mean only the third sector failed to correct. The
0707          * page must have been written and the third sector is really
0708          * not correctable anymore.
0709          */
0710         if (fail_sec_flag ^ GENMASK(chip->ecc.steps - 1, 0)) {
0711             mtd->ecc_stats.failed += hweight8(fail_sec_flag);
0712             return max_corr_cnt;
0713         }
0714 
0715         /*
0716          * All sectors failed to correct, but the ECC isn't smart
0717          * enough to figure out if a page is really just erased.
0718          * Read OOB data and check whether data/OOB is completely
0719          * erased or if error correction just failed for all sub-
0720          * pages.
0721          */
0722         ret = tegra_nand_read_oob(chip, page);
0723         if (ret < 0)
0724             return ret;
0725 
0726         for_each_set_bit(bit, &fail_sec_flag, chip->ecc.steps) {
0727             u8 *data = buf + (chip->ecc.size * bit);
0728             u8 *oob = chip->oob_poi + nand->ecc.offset +
0729                   (chip->ecc.bytes * bit);
0730 
0731             ret = nand_check_erased_ecc_chunk(data, chip->ecc.size,
0732                               oob, chip->ecc.bytes,
0733                               NULL, 0,
0734                               chip->ecc.strength);
0735             if (ret < 0) {
0736                 mtd->ecc_stats.failed++;
0737             } else {
0738                 mtd->ecc_stats.corrected += ret;
0739                 max_bitflips = max(ret, max_bitflips);
0740             }
0741         }
0742 
0743         return max_t(unsigned int, max_corr_cnt, max_bitflips);
0744     } else {
0745         int corr_sec_flag;
0746 
0747         corr_sec_flag = (dec_stat & DEC_STAT_BUF_CORR_SEC_FLAG_MASK) >>
0748                 DEC_STAT_BUF_CORR_SEC_FLAG_SHIFT;
0749 
0750         /*
0751          * The value returned in the register is the maximum of
0752          * bitflips encountered in any of the ECC regions. As there is
0753          * no way to get the number of bitflips in a specific regions
0754          * we are not able to deliver correct stats but instead
0755          * overestimate the number of corrected bitflips by assuming
0756          * that all regions where errors have been corrected
0757          * encountered the maximum number of bitflips.
0758          */
0759         mtd->ecc_stats.corrected += max_corr_cnt * hweight8(corr_sec_flag);
0760 
0761         return max_corr_cnt;
0762     }
0763 }
0764 
0765 static int tegra_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
0766                        int oob_required, int page)
0767 {
0768     struct mtd_info *mtd = nand_to_mtd(chip);
0769     struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
0770     void *oob_buf = oob_required ? chip->oob_poi : NULL;
0771     int ret;
0772 
0773     tegra_nand_hw_ecc(ctrl, chip, true);
0774     ret = tegra_nand_page_xfer(mtd, chip, (void *)buf, oob_buf,
0775                    0, page, false);
0776     tegra_nand_hw_ecc(ctrl, chip, false);
0777 
0778     return ret;
0779 }
0780 
0781 static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl,
0782                     const struct nand_sdr_timings *timings)
0783 {
0784     /*
0785      * The period (and all other timings in this function) is in ps,
0786      * so need to take care here to avoid integer overflows.
0787      */
0788     unsigned int rate = clk_get_rate(ctrl->clk) / 1000000;
0789     unsigned int period = DIV_ROUND_UP(1000000, rate);
0790     u32 val, reg = 0;
0791 
0792     val = DIV_ROUND_UP(max3(timings->tAR_min, timings->tRR_min,
0793                 timings->tRC_min), period);
0794     reg |= TIMING_TCR_TAR_TRR(OFFSET(val, 3));
0795 
0796     val = DIV_ROUND_UP(max(max(timings->tCS_min, timings->tCH_min),
0797                    max(timings->tALS_min, timings->tALH_min)),
0798                period);
0799     reg |= TIMING_TCS(OFFSET(val, 2));
0800 
0801     val = DIV_ROUND_UP(max(timings->tRP_min, timings->tREA_max) + 6000,
0802                period);
0803     reg |= TIMING_TRP(OFFSET(val, 1)) | TIMING_TRP_RESP(OFFSET(val, 1));
0804 
0805     reg |= TIMING_TWB(OFFSET(DIV_ROUND_UP(timings->tWB_max, period), 1));
0806     reg |= TIMING_TWHR(OFFSET(DIV_ROUND_UP(timings->tWHR_min, period), 1));
0807     reg |= TIMING_TWH(OFFSET(DIV_ROUND_UP(timings->tWH_min, period), 1));
0808     reg |= TIMING_TWP(OFFSET(DIV_ROUND_UP(timings->tWP_min, period), 1));
0809     reg |= TIMING_TRH(OFFSET(DIV_ROUND_UP(timings->tREH_min, period), 1));
0810 
0811     writel_relaxed(reg, ctrl->regs + TIMING_1);
0812 
0813     val = DIV_ROUND_UP(timings->tADL_min, period);
0814     reg = TIMING_TADL(OFFSET(val, 3));
0815 
0816     writel_relaxed(reg, ctrl->regs + TIMING_2);
0817 }
0818 
0819 static int tegra_nand_setup_interface(struct nand_chip *chip, int csline,
0820                       const struct nand_interface_config *conf)
0821 {
0822     struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
0823     const struct nand_sdr_timings *timings;
0824 
0825     timings = nand_get_sdr_timings(conf);
0826     if (IS_ERR(timings))
0827         return PTR_ERR(timings);
0828 
0829     if (csline == NAND_DATA_IFACE_CHECK_ONLY)
0830         return 0;
0831 
0832     tegra_nand_setup_timing(ctrl, timings);
0833 
0834     return 0;
0835 }
0836 
0837 static const int rs_strength_bootable[] = { 4 };
0838 static const int rs_strength[] = { 4, 6, 8 };
0839 static const int bch_strength_bootable[] = { 8, 16 };
0840 static const int bch_strength[] = { 4, 8, 14, 16 };
0841 
0842 static int tegra_nand_get_strength(struct nand_chip *chip, const int *strength,
0843                    int strength_len, int bits_per_step,
0844                    int oobsize)
0845 {
0846     struct nand_device *base = mtd_to_nanddev(nand_to_mtd(chip));
0847     const struct nand_ecc_props *requirements =
0848         nanddev_get_ecc_requirements(base);
0849     bool maximize = base->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH;
0850     int i;
0851 
0852     /*
0853      * Loop through available strengths. Backwards in case we try to
0854      * maximize the BCH strength.
0855      */
0856     for (i = 0; i < strength_len; i++) {
0857         int strength_sel, bytes_per_step, bytes_per_page;
0858 
0859         if (maximize) {
0860             strength_sel = strength[strength_len - i - 1];
0861         } else {
0862             strength_sel = strength[i];
0863 
0864             if (strength_sel < requirements->strength)
0865                 continue;
0866         }
0867 
0868         bytes_per_step = DIV_ROUND_UP(bits_per_step * strength_sel,
0869                           BITS_PER_BYTE);
0870         bytes_per_page = round_up(bytes_per_step * chip->ecc.steps, 4);
0871 
0872         /* Check whether strength fits OOB */
0873         if (bytes_per_page < (oobsize - SKIP_SPARE_BYTES))
0874             return strength_sel;
0875     }
0876 
0877     return -EINVAL;
0878 }
0879 
0880 static int tegra_nand_select_strength(struct nand_chip *chip, int oobsize)
0881 {
0882     const int *strength;
0883     int strength_len, bits_per_step;
0884 
0885     switch (chip->ecc.algo) {
0886     case NAND_ECC_ALGO_RS:
0887         bits_per_step = BITS_PER_STEP_RS;
0888         if (chip->options & NAND_IS_BOOT_MEDIUM) {
0889             strength = rs_strength_bootable;
0890             strength_len = ARRAY_SIZE(rs_strength_bootable);
0891         } else {
0892             strength = rs_strength;
0893             strength_len = ARRAY_SIZE(rs_strength);
0894         }
0895         break;
0896     case NAND_ECC_ALGO_BCH:
0897         bits_per_step = BITS_PER_STEP_BCH;
0898         if (chip->options & NAND_IS_BOOT_MEDIUM) {
0899             strength = bch_strength_bootable;
0900             strength_len = ARRAY_SIZE(bch_strength_bootable);
0901         } else {
0902             strength = bch_strength;
0903             strength_len = ARRAY_SIZE(bch_strength);
0904         }
0905         break;
0906     default:
0907         return -EINVAL;
0908     }
0909 
0910     return tegra_nand_get_strength(chip, strength, strength_len,
0911                        bits_per_step, oobsize);
0912 }
0913 
0914 static int tegra_nand_attach_chip(struct nand_chip *chip)
0915 {
0916     struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
0917     const struct nand_ecc_props *requirements =
0918         nanddev_get_ecc_requirements(&chip->base);
0919     struct tegra_nand_chip *nand = to_tegra_chip(chip);
0920     struct mtd_info *mtd = nand_to_mtd(chip);
0921     int bits_per_step;
0922     int ret;
0923 
0924     if (chip->bbt_options & NAND_BBT_USE_FLASH)
0925         chip->bbt_options |= NAND_BBT_NO_OOB;
0926 
0927     chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
0928     chip->ecc.size = 512;
0929     chip->ecc.steps = mtd->writesize / chip->ecc.size;
0930     if (requirements->step_size != 512) {
0931         dev_err(ctrl->dev, "Unsupported step size %d\n",
0932             requirements->step_size);
0933         return -EINVAL;
0934     }
0935 
0936     chip->ecc.read_page = tegra_nand_read_page_hwecc;
0937     chip->ecc.write_page = tegra_nand_write_page_hwecc;
0938     chip->ecc.read_page_raw = tegra_nand_read_page_raw;
0939     chip->ecc.write_page_raw = tegra_nand_write_page_raw;
0940     chip->ecc.read_oob = tegra_nand_read_oob;
0941     chip->ecc.write_oob = tegra_nand_write_oob;
0942 
0943     if (chip->options & NAND_BUSWIDTH_16)
0944         nand->config |= CONFIG_BUS_WIDTH_16;
0945 
0946     if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) {
0947         if (mtd->writesize < 2048)
0948             chip->ecc.algo = NAND_ECC_ALGO_RS;
0949         else
0950             chip->ecc.algo = NAND_ECC_ALGO_BCH;
0951     }
0952 
0953     if (chip->ecc.algo == NAND_ECC_ALGO_BCH && mtd->writesize < 2048) {
0954         dev_err(ctrl->dev, "BCH supports 2K or 4K page size only\n");
0955         return -EINVAL;
0956     }
0957 
0958     if (!chip->ecc.strength) {
0959         ret = tegra_nand_select_strength(chip, mtd->oobsize);
0960         if (ret < 0) {
0961             dev_err(ctrl->dev,
0962                 "No valid strength found, minimum %d\n",
0963                 requirements->strength);
0964             return ret;
0965         }
0966 
0967         chip->ecc.strength = ret;
0968     }
0969 
0970     nand->config_ecc = CONFIG_PIPE_EN | CONFIG_SKIP_SPARE |
0971                CONFIG_SKIP_SPARE_SIZE_4;
0972 
0973     switch (chip->ecc.algo) {
0974     case NAND_ECC_ALGO_RS:
0975         bits_per_step = BITS_PER_STEP_RS * chip->ecc.strength;
0976         mtd_set_ooblayout(mtd, &tegra_nand_oob_rs_ops);
0977         nand->config_ecc |= CONFIG_HW_ECC | CONFIG_ECC_SEL |
0978                     CONFIG_ERR_COR;
0979         switch (chip->ecc.strength) {
0980         case 4:
0981             nand->config_ecc |= CONFIG_TVAL_4;
0982             break;
0983         case 6:
0984             nand->config_ecc |= CONFIG_TVAL_6;
0985             break;
0986         case 8:
0987             nand->config_ecc |= CONFIG_TVAL_8;
0988             break;
0989         default:
0990             dev_err(ctrl->dev, "ECC strength %d not supported\n",
0991                 chip->ecc.strength);
0992             return -EINVAL;
0993         }
0994         break;
0995     case NAND_ECC_ALGO_BCH:
0996         bits_per_step = BITS_PER_STEP_BCH * chip->ecc.strength;
0997         mtd_set_ooblayout(mtd, &tegra_nand_oob_bch_ops);
0998         nand->bch_config = BCH_ENABLE;
0999         switch (chip->ecc.strength) {
1000         case 4:
1001             nand->bch_config |= BCH_TVAL_4;
1002             break;
1003         case 8:
1004             nand->bch_config |= BCH_TVAL_8;
1005             break;
1006         case 14:
1007             nand->bch_config |= BCH_TVAL_14;
1008             break;
1009         case 16:
1010             nand->bch_config |= BCH_TVAL_16;
1011             break;
1012         default:
1013             dev_err(ctrl->dev, "ECC strength %d not supported\n",
1014                 chip->ecc.strength);
1015             return -EINVAL;
1016         }
1017         break;
1018     default:
1019         dev_err(ctrl->dev, "ECC algorithm not supported\n");
1020         return -EINVAL;
1021     }
1022 
1023     dev_info(ctrl->dev, "Using %s with strength %d per 512 byte step\n",
1024          chip->ecc.algo == NAND_ECC_ALGO_BCH ? "BCH" : "RS",
1025          chip->ecc.strength);
1026 
1027     chip->ecc.bytes = DIV_ROUND_UP(bits_per_step, BITS_PER_BYTE);
1028 
1029     switch (mtd->writesize) {
1030     case 256:
1031         nand->config |= CONFIG_PS_256;
1032         break;
1033     case 512:
1034         nand->config |= CONFIG_PS_512;
1035         break;
1036     case 1024:
1037         nand->config |= CONFIG_PS_1024;
1038         break;
1039     case 2048:
1040         nand->config |= CONFIG_PS_2048;
1041         break;
1042     case 4096:
1043         nand->config |= CONFIG_PS_4096;
1044         break;
1045     default:
1046         dev_err(ctrl->dev, "Unsupported writesize %d\n",
1047             mtd->writesize);
1048         return -ENODEV;
1049     }
1050 
1051     /* Store complete configuration for HW ECC in config_ecc */
1052     nand->config_ecc |= nand->config;
1053 
1054     /* Non-HW ECC read/writes complete OOB */
1055     nand->config |= CONFIG_TAG_BYTE_SIZE(mtd->oobsize - 1);
1056     writel_relaxed(nand->config, ctrl->regs + CONFIG);
1057 
1058     return 0;
1059 }
1060 
1061 static const struct nand_controller_ops tegra_nand_controller_ops = {
1062     .attach_chip = &tegra_nand_attach_chip,
1063     .exec_op = tegra_nand_exec_op,
1064     .setup_interface = tegra_nand_setup_interface,
1065 };
1066 
1067 static int tegra_nand_chips_init(struct device *dev,
1068                  struct tegra_nand_controller *ctrl)
1069 {
1070     struct device_node *np = dev->of_node;
1071     struct device_node *np_nand;
1072     int nsels, nchips = of_get_child_count(np);
1073     struct tegra_nand_chip *nand;
1074     struct mtd_info *mtd;
1075     struct nand_chip *chip;
1076     int ret;
1077     u32 cs;
1078 
1079     if (nchips != 1) {
1080         dev_err(dev, "Currently only one NAND chip supported\n");
1081         return -EINVAL;
1082     }
1083 
1084     np_nand = of_get_next_child(np, NULL);
1085 
1086     nsels = of_property_count_elems_of_size(np_nand, "reg", sizeof(u32));
1087     if (nsels != 1) {
1088         dev_err(dev, "Missing/invalid reg property\n");
1089         return -EINVAL;
1090     }
1091 
1092     /* Retrieve CS id, currently only single die NAND supported */
1093     ret = of_property_read_u32(np_nand, "reg", &cs);
1094     if (ret) {
1095         dev_err(dev, "could not retrieve reg property: %d\n", ret);
1096         return ret;
1097     }
1098 
1099     nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
1100     if (!nand)
1101         return -ENOMEM;
1102 
1103     nand->cs[0] = cs;
1104 
1105     nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
1106 
1107     if (IS_ERR(nand->wp_gpio)) {
1108         ret = PTR_ERR(nand->wp_gpio);
1109         dev_err(dev, "Failed to request WP GPIO: %d\n", ret);
1110         return ret;
1111     }
1112 
1113     chip = &nand->chip;
1114     chip->controller = &ctrl->controller;
1115 
1116     mtd = nand_to_mtd(chip);
1117 
1118     mtd->dev.parent = dev;
1119     mtd->owner = THIS_MODULE;
1120 
1121     nand_set_flash_node(chip, np_nand);
1122 
1123     if (!mtd->name)
1124         mtd->name = "tegra_nand";
1125 
1126     chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
1127 
1128     ret = nand_scan(chip, 1);
1129     if (ret)
1130         return ret;
1131 
1132     mtd_ooblayout_ecc(mtd, 0, &nand->ecc);
1133 
1134     ret = mtd_device_register(mtd, NULL, 0);
1135     if (ret) {
1136         dev_err(dev, "Failed to register mtd device: %d\n", ret);
1137         nand_cleanup(chip);
1138         return ret;
1139     }
1140 
1141     ctrl->chip = chip;
1142 
1143     return 0;
1144 }
1145 
1146 static int tegra_nand_probe(struct platform_device *pdev)
1147 {
1148     struct reset_control *rst;
1149     struct tegra_nand_controller *ctrl;
1150     int err = 0;
1151 
1152     ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
1153     if (!ctrl)
1154         return -ENOMEM;
1155 
1156     ctrl->dev = &pdev->dev;
1157     platform_set_drvdata(pdev, ctrl);
1158     nand_controller_init(&ctrl->controller);
1159     ctrl->controller.ops = &tegra_nand_controller_ops;
1160 
1161     ctrl->regs = devm_platform_ioremap_resource(pdev, 0);
1162     if (IS_ERR(ctrl->regs))
1163         return PTR_ERR(ctrl->regs);
1164 
1165     rst = devm_reset_control_get(&pdev->dev, "nand");
1166     if (IS_ERR(rst))
1167         return PTR_ERR(rst);
1168 
1169     ctrl->clk = devm_clk_get(&pdev->dev, "nand");
1170     if (IS_ERR(ctrl->clk))
1171         return PTR_ERR(ctrl->clk);
1172 
1173     err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
1174     if (err)
1175         return err;
1176 
1177     /*
1178      * This driver doesn't support active power management yet,
1179      * so we will simply keep device resumed.
1180      */
1181     pm_runtime_enable(&pdev->dev);
1182     err = pm_runtime_resume_and_get(&pdev->dev);
1183     if (err)
1184         return err;
1185 
1186     err = reset_control_reset(rst);
1187     if (err) {
1188         dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
1189         goto err_put_pm;
1190     }
1191 
1192     writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
1193     writel_relaxed(HWSTATUS_MASK_DEFAULT, ctrl->regs + HWSTATUS_MASK);
1194     writel_relaxed(INT_MASK, ctrl->regs + IER);
1195 
1196     init_completion(&ctrl->command_complete);
1197     init_completion(&ctrl->dma_complete);
1198 
1199     ctrl->irq = platform_get_irq(pdev, 0);
1200     err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
1201                    dev_name(&pdev->dev), ctrl);
1202     if (err) {
1203         dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
1204         goto err_put_pm;
1205     }
1206 
1207     writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
1208 
1209     err = tegra_nand_chips_init(ctrl->dev, ctrl);
1210     if (err)
1211         goto err_put_pm;
1212 
1213     return 0;
1214 
1215 err_put_pm:
1216     pm_runtime_put_sync_suspend(ctrl->dev);
1217     pm_runtime_force_suspend(ctrl->dev);
1218     return err;
1219 }
1220 
1221 static int tegra_nand_remove(struct platform_device *pdev)
1222 {
1223     struct tegra_nand_controller *ctrl = platform_get_drvdata(pdev);
1224     struct nand_chip *chip = ctrl->chip;
1225     struct mtd_info *mtd = nand_to_mtd(chip);
1226 
1227     WARN_ON(mtd_device_unregister(mtd));
1228 
1229     nand_cleanup(chip);
1230 
1231     pm_runtime_put_sync_suspend(ctrl->dev);
1232     pm_runtime_force_suspend(ctrl->dev);
1233 
1234     return 0;
1235 }
1236 
1237 static int __maybe_unused tegra_nand_runtime_resume(struct device *dev)
1238 {
1239     struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
1240     int err;
1241 
1242     err = clk_prepare_enable(ctrl->clk);
1243     if (err) {
1244         dev_err(dev, "Failed to enable clock: %d\n", err);
1245         return err;
1246     }
1247 
1248     return 0;
1249 }
1250 
1251 static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev)
1252 {
1253     struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
1254 
1255     clk_disable_unprepare(ctrl->clk);
1256 
1257     return 0;
1258 }
1259 
1260 static const struct dev_pm_ops tegra_nand_pm = {
1261     SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume,
1262                NULL)
1263 };
1264 
1265 static const struct of_device_id tegra_nand_of_match[] = {
1266     { .compatible = "nvidia,tegra20-nand" },
1267     { /* sentinel */ }
1268 };
1269 MODULE_DEVICE_TABLE(of, tegra_nand_of_match);
1270 
1271 static struct platform_driver tegra_nand_driver = {
1272     .driver = {
1273         .name = "tegra-nand",
1274         .of_match_table = tegra_nand_of_match,
1275         .pm = &tegra_nand_pm,
1276     },
1277     .probe = tegra_nand_probe,
1278     .remove = tegra_nand_remove,
1279 };
1280 module_platform_driver(tegra_nand_driver);
1281 
1282 MODULE_DESCRIPTION("NVIDIA Tegra NAND driver");
1283 MODULE_AUTHOR("Thierry Reding <thierry.reding@nvidia.com>");
1284 MODULE_AUTHOR("Lucas Stach <dev@lynxeye.de>");
1285 MODULE_AUTHOR("Stefan Agner <stefan@agner.ch>");
1286 MODULE_LICENSE("GPL v2");