Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * NAND Flash Controller Device Driver
0004  * Copyright © 2009-2010, Intel Corporation and its suppliers.
0005  *
0006  * Copyright (c) 2017-2019 Socionext Inc.
0007  *   Reworked by Masahiro Yamada <yamada.masahiro@socionext.com>
0008  */
0009 
0010 #include <linux/bitfield.h>
0011 #include <linux/completion.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/io.h>
0015 #include <linux/module.h>
0016 #include <linux/mtd/mtd.h>
0017 #include <linux/mtd/rawnand.h>
0018 #include <linux/slab.h>
0019 #include <linux/spinlock.h>
0020 
0021 #include "denali.h"
0022 
0023 #define DENALI_NAND_NAME    "denali-nand"
0024 
0025 /* for Indexed Addressing */
0026 #define DENALI_INDEXED_CTRL 0x00
0027 #define DENALI_INDEXED_DATA 0x10
0028 
0029 #define DENALI_MAP00        (0 << 26)   /* direct access to buffer */
0030 #define DENALI_MAP01        (1 << 26)   /* read/write pages in PIO */
0031 #define DENALI_MAP10        (2 << 26)   /* high-level control plane */
0032 #define DENALI_MAP11        (3 << 26)   /* direct controller access */
0033 
0034 /* MAP11 access cycle type */
0035 #define DENALI_MAP11_CMD    ((DENALI_MAP11) | 0)    /* command cycle */
0036 #define DENALI_MAP11_ADDR   ((DENALI_MAP11) | 1)    /* address cycle */
0037 #define DENALI_MAP11_DATA   ((DENALI_MAP11) | 2)    /* data cycle */
0038 
0039 #define DENALI_BANK(denali) ((denali)->active_bank << 24)
0040 
0041 #define DENALI_INVALID_BANK -1
0042 
0043 static struct denali_chip *to_denali_chip(struct nand_chip *chip)
0044 {
0045     return container_of(chip, struct denali_chip, chip);
0046 }
0047 
0048 static struct denali_controller *to_denali_controller(struct nand_chip *chip)
0049 {
0050     return container_of(chip->controller, struct denali_controller,
0051                 controller);
0052 }
0053 
0054 /*
0055  * Direct Addressing - the slave address forms the control information (command
0056  * type, bank, block, and page address).  The slave data is the actual data to
0057  * be transferred.  This mode requires 28 bits of address region allocated.
0058  */
0059 static u32 denali_direct_read(struct denali_controller *denali, u32 addr)
0060 {
0061     return ioread32(denali->host + addr);
0062 }
0063 
0064 static void denali_direct_write(struct denali_controller *denali, u32 addr,
0065                 u32 data)
0066 {
0067     iowrite32(data, denali->host + addr);
0068 }
0069 
0070 /*
0071  * Indexed Addressing - address translation module intervenes in passing the
0072  * control information.  This mode reduces the required address range.  The
0073  * control information and transferred data are latched by the registers in
0074  * the translation module.
0075  */
0076 static u32 denali_indexed_read(struct denali_controller *denali, u32 addr)
0077 {
0078     iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
0079     return ioread32(denali->host + DENALI_INDEXED_DATA);
0080 }
0081 
0082 static void denali_indexed_write(struct denali_controller *denali, u32 addr,
0083                  u32 data)
0084 {
0085     iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
0086     iowrite32(data, denali->host + DENALI_INDEXED_DATA);
0087 }
0088 
0089 static void denali_enable_irq(struct denali_controller *denali)
0090 {
0091     int i;
0092 
0093     for (i = 0; i < denali->nbanks; i++)
0094         iowrite32(U32_MAX, denali->reg + INTR_EN(i));
0095     iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
0096 }
0097 
0098 static void denali_disable_irq(struct denali_controller *denali)
0099 {
0100     int i;
0101 
0102     for (i = 0; i < denali->nbanks; i++)
0103         iowrite32(0, denali->reg + INTR_EN(i));
0104     iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
0105 }
0106 
0107 static void denali_clear_irq(struct denali_controller *denali,
0108                  int bank, u32 irq_status)
0109 {
0110     /* write one to clear bits */
0111     iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
0112 }
0113 
0114 static void denali_clear_irq_all(struct denali_controller *denali)
0115 {
0116     int i;
0117 
0118     for (i = 0; i < denali->nbanks; i++)
0119         denali_clear_irq(denali, i, U32_MAX);
0120 }
0121 
0122 static irqreturn_t denali_isr(int irq, void *dev_id)
0123 {
0124     struct denali_controller *denali = dev_id;
0125     irqreturn_t ret = IRQ_NONE;
0126     u32 irq_status;
0127     int i;
0128 
0129     spin_lock(&denali->irq_lock);
0130 
0131     for (i = 0; i < denali->nbanks; i++) {
0132         irq_status = ioread32(denali->reg + INTR_STATUS(i));
0133         if (irq_status)
0134             ret = IRQ_HANDLED;
0135 
0136         denali_clear_irq(denali, i, irq_status);
0137 
0138         if (i != denali->active_bank)
0139             continue;
0140 
0141         denali->irq_status |= irq_status;
0142 
0143         if (denali->irq_status & denali->irq_mask)
0144             complete(&denali->complete);
0145     }
0146 
0147     spin_unlock(&denali->irq_lock);
0148 
0149     return ret;
0150 }
0151 
0152 static void denali_reset_irq(struct denali_controller *denali)
0153 {
0154     unsigned long flags;
0155 
0156     spin_lock_irqsave(&denali->irq_lock, flags);
0157     denali->irq_status = 0;
0158     denali->irq_mask = 0;
0159     spin_unlock_irqrestore(&denali->irq_lock, flags);
0160 }
0161 
0162 static u32 denali_wait_for_irq(struct denali_controller *denali, u32 irq_mask)
0163 {
0164     unsigned long time_left, flags;
0165     u32 irq_status;
0166 
0167     spin_lock_irqsave(&denali->irq_lock, flags);
0168 
0169     irq_status = denali->irq_status;
0170 
0171     if (irq_mask & irq_status) {
0172         /* return immediately if the IRQ has already happened. */
0173         spin_unlock_irqrestore(&denali->irq_lock, flags);
0174         return irq_status;
0175     }
0176 
0177     denali->irq_mask = irq_mask;
0178     reinit_completion(&denali->complete);
0179     spin_unlock_irqrestore(&denali->irq_lock, flags);
0180 
0181     time_left = wait_for_completion_timeout(&denali->complete,
0182                         msecs_to_jiffies(1000));
0183     if (!time_left) {
0184         dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
0185             irq_mask);
0186         return 0;
0187     }
0188 
0189     return denali->irq_status;
0190 }
0191 
0192 static void denali_select_target(struct nand_chip *chip, int cs)
0193 {
0194     struct denali_controller *denali = to_denali_controller(chip);
0195     struct denali_chip_sel *sel = &to_denali_chip(chip)->sels[cs];
0196     struct mtd_info *mtd = nand_to_mtd(chip);
0197 
0198     denali->active_bank = sel->bank;
0199 
0200     iowrite32(1 << (chip->phys_erase_shift - chip->page_shift),
0201           denali->reg + PAGES_PER_BLOCK);
0202     iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
0203           denali->reg + DEVICE_WIDTH);
0204     iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
0205     iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
0206     iowrite32(chip->options & NAND_ROW_ADDR_3 ?
0207           0 : TWO_ROW_ADDR_CYCLES__FLAG,
0208           denali->reg + TWO_ROW_ADDR_CYCLES);
0209     iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
0210           FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
0211           denali->reg + ECC_CORRECTION);
0212     iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
0213     iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
0214     iowrite32(chip->ecc.steps, denali->reg + CFG_NUM_DATA_BLOCKS);
0215 
0216     if (chip->options & NAND_KEEP_TIMINGS)
0217         return;
0218 
0219     /* update timing registers unless NAND_KEEP_TIMINGS is set */
0220     iowrite32(sel->hwhr2_and_we_2_re, denali->reg + TWHR2_AND_WE_2_RE);
0221     iowrite32(sel->tcwaw_and_addr_2_data,
0222           denali->reg + TCWAW_AND_ADDR_2_DATA);
0223     iowrite32(sel->re_2_we, denali->reg + RE_2_WE);
0224     iowrite32(sel->acc_clks, denali->reg + ACC_CLKS);
0225     iowrite32(sel->rdwr_en_lo_cnt, denali->reg + RDWR_EN_LO_CNT);
0226     iowrite32(sel->rdwr_en_hi_cnt, denali->reg + RDWR_EN_HI_CNT);
0227     iowrite32(sel->cs_setup_cnt, denali->reg + CS_SETUP_CNT);
0228     iowrite32(sel->re_2_re, denali->reg + RE_2_RE);
0229 }
0230 
0231 static int denali_change_column(struct nand_chip *chip, unsigned int offset,
0232                 void *buf, unsigned int len, bool write)
0233 {
0234     if (write)
0235         return nand_change_write_column_op(chip, offset, buf, len,
0236                            false);
0237     else
0238         return nand_change_read_column_op(chip, offset, buf, len,
0239                           false);
0240 }
0241 
0242 static int denali_payload_xfer(struct nand_chip *chip, void *buf, bool write)
0243 {
0244     struct denali_controller *denali = to_denali_controller(chip);
0245     struct mtd_info *mtd = nand_to_mtd(chip);
0246     struct nand_ecc_ctrl *ecc = &chip->ecc;
0247     int writesize = mtd->writesize;
0248     int oob_skip = denali->oob_skip_bytes;
0249     int ret, i, pos, len;
0250 
0251     for (i = 0; i < ecc->steps; i++) {
0252         pos = i * (ecc->size + ecc->bytes);
0253         len = ecc->size;
0254 
0255         if (pos >= writesize) {
0256             pos += oob_skip;
0257         } else if (pos + len > writesize) {
0258             /* This chunk overwraps the BBM area. Must be split */
0259             ret = denali_change_column(chip, pos, buf,
0260                            writesize - pos, write);
0261             if (ret)
0262                 return ret;
0263 
0264             buf += writesize - pos;
0265             len -= writesize - pos;
0266             pos = writesize + oob_skip;
0267         }
0268 
0269         ret = denali_change_column(chip, pos, buf, len, write);
0270         if (ret)
0271             return ret;
0272 
0273         buf += len;
0274     }
0275 
0276     return 0;
0277 }
0278 
0279 static int denali_oob_xfer(struct nand_chip *chip, void *buf, bool write)
0280 {
0281     struct denali_controller *denali = to_denali_controller(chip);
0282     struct mtd_info *mtd = nand_to_mtd(chip);
0283     struct nand_ecc_ctrl *ecc = &chip->ecc;
0284     int writesize = mtd->writesize;
0285     int oobsize = mtd->oobsize;
0286     int oob_skip = denali->oob_skip_bytes;
0287     int ret, i, pos, len;
0288 
0289     /* BBM at the beginning of the OOB area */
0290     ret = denali_change_column(chip, writesize, buf, oob_skip, write);
0291     if (ret)
0292         return ret;
0293 
0294     buf += oob_skip;
0295 
0296     for (i = 0; i < ecc->steps; i++) {
0297         pos = ecc->size + i * (ecc->size + ecc->bytes);
0298 
0299         if (i == ecc->steps - 1)
0300             /* The last chunk includes OOB free */
0301             len = writesize + oobsize - pos - oob_skip;
0302         else
0303             len = ecc->bytes;
0304 
0305         if (pos >= writesize) {
0306             pos += oob_skip;
0307         } else if (pos + len > writesize) {
0308             /* This chunk overwraps the BBM area. Must be split */
0309             ret = denali_change_column(chip, pos, buf,
0310                            writesize - pos, write);
0311             if (ret)
0312                 return ret;
0313 
0314             buf += writesize - pos;
0315             len -= writesize - pos;
0316             pos = writesize + oob_skip;
0317         }
0318 
0319         ret = denali_change_column(chip, pos, buf, len, write);
0320         if (ret)
0321             return ret;
0322 
0323         buf += len;
0324     }
0325 
0326     return 0;
0327 }
0328 
0329 static int denali_read_raw(struct nand_chip *chip, void *buf, void *oob_buf,
0330                int page)
0331 {
0332     int ret;
0333 
0334     if (!buf && !oob_buf)
0335         return -EINVAL;
0336 
0337     ret = nand_read_page_op(chip, page, 0, NULL, 0);
0338     if (ret)
0339         return ret;
0340 
0341     if (buf) {
0342         ret = denali_payload_xfer(chip, buf, false);
0343         if (ret)
0344             return ret;
0345     }
0346 
0347     if (oob_buf) {
0348         ret = denali_oob_xfer(chip, oob_buf, false);
0349         if (ret)
0350             return ret;
0351     }
0352 
0353     return 0;
0354 }
0355 
0356 static int denali_write_raw(struct nand_chip *chip, const void *buf,
0357                 const void *oob_buf, int page)
0358 {
0359     int ret;
0360 
0361     if (!buf && !oob_buf)
0362         return -EINVAL;
0363 
0364     ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
0365     if (ret)
0366         return ret;
0367 
0368     if (buf) {
0369         ret = denali_payload_xfer(chip, (void *)buf, true);
0370         if (ret)
0371             return ret;
0372     }
0373 
0374     if (oob_buf) {
0375         ret = denali_oob_xfer(chip, (void *)oob_buf, true);
0376         if (ret)
0377             return ret;
0378     }
0379 
0380     return nand_prog_page_end_op(chip);
0381 }
0382 
0383 static int denali_read_page_raw(struct nand_chip *chip, u8 *buf,
0384                 int oob_required, int page)
0385 {
0386     return denali_read_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
0387                    page);
0388 }
0389 
0390 static int denali_write_page_raw(struct nand_chip *chip, const u8 *buf,
0391                  int oob_required, int page)
0392 {
0393     return denali_write_raw(chip, buf, oob_required ? chip->oob_poi : NULL,
0394                 page);
0395 }
0396 
0397 static int denali_read_oob(struct nand_chip *chip, int page)
0398 {
0399     return denali_read_raw(chip, NULL, chip->oob_poi, page);
0400 }
0401 
0402 static int denali_write_oob(struct nand_chip *chip, int page)
0403 {
0404     return denali_write_raw(chip, NULL, chip->oob_poi, page);
0405 }
0406 
0407 static int denali_check_erased_page(struct nand_chip *chip, u8 *buf,
0408                     unsigned long uncor_ecc_flags,
0409                     unsigned int max_bitflips)
0410 {
0411     struct denali_controller *denali = to_denali_controller(chip);
0412     struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
0413     struct nand_ecc_ctrl *ecc = &chip->ecc;
0414     u8 *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
0415     int i, stat;
0416 
0417     for (i = 0; i < ecc->steps; i++) {
0418         if (!(uncor_ecc_flags & BIT(i)))
0419             continue;
0420 
0421         stat = nand_check_erased_ecc_chunk(buf, ecc->size, ecc_code,
0422                            ecc->bytes, NULL, 0,
0423                            ecc->strength);
0424         if (stat < 0) {
0425             ecc_stats->failed++;
0426         } else {
0427             ecc_stats->corrected += stat;
0428             max_bitflips = max_t(unsigned int, max_bitflips, stat);
0429         }
0430 
0431         buf += ecc->size;
0432         ecc_code += ecc->bytes;
0433     }
0434 
0435     return max_bitflips;
0436 }
0437 
0438 static int denali_hw_ecc_fixup(struct nand_chip *chip,
0439                    unsigned long *uncor_ecc_flags)
0440 {
0441     struct denali_controller *denali = to_denali_controller(chip);
0442     struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
0443     int bank = denali->active_bank;
0444     u32 ecc_cor;
0445     unsigned int max_bitflips;
0446 
0447     ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
0448     ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
0449 
0450     if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
0451         /*
0452          * This flag is set when uncorrectable error occurs at least in
0453          * one ECC sector.  We can not know "how many sectors", or
0454          * "which sector(s)".  We need erase-page check for all sectors.
0455          */
0456         *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
0457         return 0;
0458     }
0459 
0460     max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
0461 
0462     /*
0463      * The register holds the maximum of per-sector corrected bitflips.
0464      * This is suitable for the return value of the ->read_page() callback.
0465      * Unfortunately, we can not know the total number of corrected bits in
0466      * the page.  Increase the stats by max_bitflips. (compromised solution)
0467      */
0468     ecc_stats->corrected += max_bitflips;
0469 
0470     return max_bitflips;
0471 }
0472 
0473 static int denali_sw_ecc_fixup(struct nand_chip *chip,
0474                    unsigned long *uncor_ecc_flags, u8 *buf)
0475 {
0476     struct denali_controller *denali = to_denali_controller(chip);
0477     struct mtd_ecc_stats *ecc_stats = &nand_to_mtd(chip)->ecc_stats;
0478     unsigned int ecc_size = chip->ecc.size;
0479     unsigned int bitflips = 0;
0480     unsigned int max_bitflips = 0;
0481     u32 err_addr, err_cor_info;
0482     unsigned int err_byte, err_sector, err_device;
0483     u8 err_cor_value;
0484     unsigned int prev_sector = 0;
0485     u32 irq_status;
0486 
0487     denali_reset_irq(denali);
0488 
0489     do {
0490         err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
0491         err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
0492         err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
0493 
0494         err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
0495         err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
0496                       err_cor_info);
0497         err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
0498                        err_cor_info);
0499 
0500         /* reset the bitflip counter when crossing ECC sector */
0501         if (err_sector != prev_sector)
0502             bitflips = 0;
0503 
0504         if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
0505             /*
0506              * Check later if this is a real ECC error, or
0507              * an erased sector.
0508              */
0509             *uncor_ecc_flags |= BIT(err_sector);
0510         } else if (err_byte < ecc_size) {
0511             /*
0512              * If err_byte is larger than ecc_size, means error
0513              * happened in OOB, so we ignore it. It's no need for
0514              * us to correct it err_device is represented the NAND
0515              * error bits are happened in if there are more than
0516              * one NAND connected.
0517              */
0518             int offset;
0519             unsigned int flips_in_byte;
0520 
0521             offset = (err_sector * ecc_size + err_byte) *
0522                     denali->devs_per_cs + err_device;
0523 
0524             /* correct the ECC error */
0525             flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
0526             buf[offset] ^= err_cor_value;
0527             ecc_stats->corrected += flips_in_byte;
0528             bitflips += flips_in_byte;
0529 
0530             max_bitflips = max(max_bitflips, bitflips);
0531         }
0532 
0533         prev_sector = err_sector;
0534     } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
0535 
0536     /*
0537      * Once handle all ECC errors, controller will trigger an
0538      * ECC_TRANSACTION_DONE interrupt.
0539      */
0540     irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
0541     if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
0542         return -EIO;
0543 
0544     return max_bitflips;
0545 }
0546 
0547 static void denali_setup_dma64(struct denali_controller *denali,
0548                    dma_addr_t dma_addr, int page, bool write)
0549 {
0550     u32 mode;
0551     const int page_count = 1;
0552 
0553     mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
0554 
0555     /* DMA is a three step process */
0556 
0557     /*
0558      * 1. setup transfer type, interrupt when complete,
0559      *    burst len = 64 bytes, the number of pages
0560      */
0561     denali->host_write(denali, mode,
0562                0x01002000 | (64 << 16) |
0563                (write ? BIT(8) : 0) | page_count);
0564 
0565     /* 2. set memory low address */
0566     denali->host_write(denali, mode, lower_32_bits(dma_addr));
0567 
0568     /* 3. set memory high address */
0569     denali->host_write(denali, mode, upper_32_bits(dma_addr));
0570 }
0571 
0572 static void denali_setup_dma32(struct denali_controller *denali,
0573                    dma_addr_t dma_addr, int page, bool write)
0574 {
0575     u32 mode;
0576     const int page_count = 1;
0577 
0578     mode = DENALI_MAP10 | DENALI_BANK(denali);
0579 
0580     /* DMA is a four step process */
0581 
0582     /* 1. setup transfer type and # of pages */
0583     denali->host_write(denali, mode | page,
0584                0x2000 | (write ? BIT(8) : 0) | page_count);
0585 
0586     /* 2. set memory high address bits 23:8 */
0587     denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
0588 
0589     /* 3. set memory low address bits 23:8 */
0590     denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
0591 
0592     /* 4. interrupt when complete, burst len = 64 bytes */
0593     denali->host_write(denali, mode | 0x14000, 0x2400);
0594 }
0595 
0596 static int denali_pio_read(struct denali_controller *denali, u32 *buf,
0597                size_t size, int page)
0598 {
0599     u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
0600     u32 irq_status, ecc_err_mask;
0601     int i;
0602 
0603     if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
0604         ecc_err_mask = INTR__ECC_UNCOR_ERR;
0605     else
0606         ecc_err_mask = INTR__ECC_ERR;
0607 
0608     denali_reset_irq(denali);
0609 
0610     for (i = 0; i < size / 4; i++)
0611         buf[i] = denali->host_read(denali, addr);
0612 
0613     irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
0614     if (!(irq_status & INTR__PAGE_XFER_INC))
0615         return -EIO;
0616 
0617     if (irq_status & INTR__ERASED_PAGE)
0618         memset(buf, 0xff, size);
0619 
0620     return irq_status & ecc_err_mask ? -EBADMSG : 0;
0621 }
0622 
0623 static int denali_pio_write(struct denali_controller *denali, const u32 *buf,
0624                 size_t size, int page)
0625 {
0626     u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
0627     u32 irq_status;
0628     int i;
0629 
0630     denali_reset_irq(denali);
0631 
0632     for (i = 0; i < size / 4; i++)
0633         denali->host_write(denali, addr, buf[i]);
0634 
0635     irq_status = denali_wait_for_irq(denali,
0636                      INTR__PROGRAM_COMP |
0637                      INTR__PROGRAM_FAIL);
0638     if (!(irq_status & INTR__PROGRAM_COMP))
0639         return -EIO;
0640 
0641     return 0;
0642 }
0643 
0644 static int denali_pio_xfer(struct denali_controller *denali, void *buf,
0645                size_t size, int page, bool write)
0646 {
0647     if (write)
0648         return denali_pio_write(denali, buf, size, page);
0649     else
0650         return denali_pio_read(denali, buf, size, page);
0651 }
0652 
0653 static int denali_dma_xfer(struct denali_controller *denali, void *buf,
0654                size_t size, int page, bool write)
0655 {
0656     dma_addr_t dma_addr;
0657     u32 irq_mask, irq_status, ecc_err_mask;
0658     enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
0659     int ret = 0;
0660 
0661     dma_addr = dma_map_single(denali->dev, buf, size, dir);
0662     if (dma_mapping_error(denali->dev, dma_addr)) {
0663         dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
0664         return denali_pio_xfer(denali, buf, size, page, write);
0665     }
0666 
0667     if (write) {
0668         /*
0669          * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
0670          * We can use INTR__DMA_CMD_COMP instead.  This flag is asserted
0671          * when the page program is completed.
0672          */
0673         irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
0674         ecc_err_mask = 0;
0675     } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
0676         irq_mask = INTR__DMA_CMD_COMP;
0677         ecc_err_mask = INTR__ECC_UNCOR_ERR;
0678     } else {
0679         irq_mask = INTR__DMA_CMD_COMP;
0680         ecc_err_mask = INTR__ECC_ERR;
0681     }
0682 
0683     iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
0684     /*
0685      * The ->setup_dma() hook kicks DMA by using the data/command
0686      * interface, which belongs to a different AXI port from the
0687      * register interface.  Read back the register to avoid a race.
0688      */
0689     ioread32(denali->reg + DMA_ENABLE);
0690 
0691     denali_reset_irq(denali);
0692     denali->setup_dma(denali, dma_addr, page, write);
0693 
0694     irq_status = denali_wait_for_irq(denali, irq_mask);
0695     if (!(irq_status & INTR__DMA_CMD_COMP))
0696         ret = -EIO;
0697     else if (irq_status & ecc_err_mask)
0698         ret = -EBADMSG;
0699 
0700     iowrite32(0, denali->reg + DMA_ENABLE);
0701 
0702     dma_unmap_single(denali->dev, dma_addr, size, dir);
0703 
0704     if (irq_status & INTR__ERASED_PAGE)
0705         memset(buf, 0xff, size);
0706 
0707     return ret;
0708 }
0709 
0710 static int denali_page_xfer(struct nand_chip *chip, void *buf, size_t size,
0711                 int page, bool write)
0712 {
0713     struct denali_controller *denali = to_denali_controller(chip);
0714 
0715     denali_select_target(chip, chip->cur_cs);
0716 
0717     if (denali->dma_avail)
0718         return denali_dma_xfer(denali, buf, size, page, write);
0719     else
0720         return denali_pio_xfer(denali, buf, size, page, write);
0721 }
0722 
0723 static int denali_read_page(struct nand_chip *chip, u8 *buf,
0724                 int oob_required, int page)
0725 {
0726     struct denali_controller *denali = to_denali_controller(chip);
0727     struct mtd_info *mtd = nand_to_mtd(chip);
0728     unsigned long uncor_ecc_flags = 0;
0729     int stat = 0;
0730     int ret;
0731 
0732     ret = denali_page_xfer(chip, buf, mtd->writesize, page, false);
0733     if (ret && ret != -EBADMSG)
0734         return ret;
0735 
0736     if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
0737         stat = denali_hw_ecc_fixup(chip, &uncor_ecc_flags);
0738     else if (ret == -EBADMSG)
0739         stat = denali_sw_ecc_fixup(chip, &uncor_ecc_flags, buf);
0740 
0741     if (stat < 0)
0742         return stat;
0743 
0744     if (uncor_ecc_flags) {
0745         ret = denali_read_oob(chip, page);
0746         if (ret)
0747             return ret;
0748 
0749         stat = denali_check_erased_page(chip, buf,
0750                         uncor_ecc_flags, stat);
0751     }
0752 
0753     return stat;
0754 }
0755 
0756 static int denali_write_page(struct nand_chip *chip, const u8 *buf,
0757                  int oob_required, int page)
0758 {
0759     struct mtd_info *mtd = nand_to_mtd(chip);
0760 
0761     return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true);
0762 }
0763 
0764 static int denali_setup_interface(struct nand_chip *chip, int chipnr,
0765                   const struct nand_interface_config *conf)
0766 {
0767     static const unsigned int data_setup_on_host = 10000;
0768     struct denali_controller *denali = to_denali_controller(chip);
0769     struct denali_chip_sel *sel;
0770     const struct nand_sdr_timings *timings;
0771     unsigned long t_x, mult_x;
0772     int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
0773     int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
0774     int addr_2_data_mask;
0775     u32 tmp;
0776 
0777     timings = nand_get_sdr_timings(conf);
0778     if (IS_ERR(timings))
0779         return PTR_ERR(timings);
0780 
0781     /* clk_x period in picoseconds */
0782     t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
0783     if (!t_x)
0784         return -EINVAL;
0785 
0786     /*
0787      * The bus interface clock, clk_x, is phase aligned with the core clock.
0788      * The clk_x is an integral multiple N of the core clk.  The value N is
0789      * configured at IP delivery time, and its available value is 4, 5, 6.
0790      */
0791     mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
0792     if (mult_x < 4 || mult_x > 6)
0793         return -EINVAL;
0794 
0795     if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
0796         return 0;
0797 
0798     sel = &to_denali_chip(chip)->sels[chipnr];
0799 
0800     /* tRWH -> RE_2_WE */
0801     re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
0802     re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
0803 
0804     tmp = ioread32(denali->reg + RE_2_WE);
0805     tmp &= ~RE_2_WE__VALUE;
0806     tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
0807     sel->re_2_we = tmp;
0808 
0809     /* tRHZ -> RE_2_RE */
0810     re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
0811     re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
0812 
0813     tmp = ioread32(denali->reg + RE_2_RE);
0814     tmp &= ~RE_2_RE__VALUE;
0815     tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
0816     sel->re_2_re = tmp;
0817 
0818     /*
0819      * tCCS, tWHR -> WE_2_RE
0820      *
0821      * With WE_2_RE properly set, the Denali controller automatically takes
0822      * care of the delay; the driver need not set NAND_WAIT_TCCS.
0823      */
0824     we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
0825     we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
0826 
0827     tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
0828     tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
0829     tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
0830     sel->hwhr2_and_we_2_re = tmp;
0831 
0832     /* tADL -> ADDR_2_DATA */
0833 
0834     /* for older versions, ADDR_2_DATA is only 6 bit wide */
0835     addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
0836     if (denali->revision < 0x0501)
0837         addr_2_data_mask >>= 1;
0838 
0839     addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
0840     addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
0841 
0842     tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
0843     tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
0844     tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
0845     sel->tcwaw_and_addr_2_data = tmp;
0846 
0847     /* tREH, tWH -> RDWR_EN_HI_CNT */
0848     rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
0849                   t_x);
0850     rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
0851 
0852     tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
0853     tmp &= ~RDWR_EN_HI_CNT__VALUE;
0854     tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
0855     sel->rdwr_en_hi_cnt = tmp;
0856 
0857     /*
0858      * tREA -> ACC_CLKS
0859      * tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT
0860      */
0861 
0862     /*
0863      * Determine the minimum of acc_clks to meet the setup timing when
0864      * capturing the incoming data.
0865      *
0866      * The delay on the chip side is well-defined as tREA, but we need to
0867      * take additional delay into account. This includes a certain degree
0868      * of unknowledge, such as signal propagation delays on the PCB and
0869      * in the SoC, load capacity of the I/O pins, etc.
0870      */
0871     acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x);
0872 
0873     /* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */
0874     rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
0875 
0876     /* Extend rdwr_en_lo to meet the data hold timing */
0877     rdwr_en_lo = max_t(int, rdwr_en_lo,
0878                acc_clks - timings->tRHOH_min / t_x);
0879 
0880     /* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */
0881     rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
0882                      t_x);
0883     rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
0884     rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
0885 
0886     /* Center the data latch timing for extra safety */
0887     acc_clks = (acc_clks + rdwr_en_lo +
0888             DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2;
0889     acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
0890 
0891     tmp = ioread32(denali->reg + ACC_CLKS);
0892     tmp &= ~ACC_CLKS__VALUE;
0893     tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
0894     sel->acc_clks = tmp;
0895 
0896     tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
0897     tmp &= ~RDWR_EN_LO_CNT__VALUE;
0898     tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
0899     sel->rdwr_en_lo_cnt = tmp;
0900 
0901     /* tCS, tCEA -> CS_SETUP_CNT */
0902     cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
0903             (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
0904             0);
0905     cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
0906 
0907     tmp = ioread32(denali->reg + CS_SETUP_CNT);
0908     tmp &= ~CS_SETUP_CNT__VALUE;
0909     tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
0910     sel->cs_setup_cnt = tmp;
0911 
0912     return 0;
0913 }
0914 
0915 int denali_calc_ecc_bytes(int step_size, int strength)
0916 {
0917     /* BCH code.  Denali requires ecc.bytes to be multiple of 2 */
0918     return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
0919 }
0920 EXPORT_SYMBOL(denali_calc_ecc_bytes);
0921 
0922 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
0923                 struct mtd_oob_region *oobregion)
0924 {
0925     struct nand_chip *chip = mtd_to_nand(mtd);
0926     struct denali_controller *denali = to_denali_controller(chip);
0927 
0928     if (section > 0)
0929         return -ERANGE;
0930 
0931     oobregion->offset = denali->oob_skip_bytes;
0932     oobregion->length = chip->ecc.total;
0933 
0934     return 0;
0935 }
0936 
0937 static int denali_ooblayout_free(struct mtd_info *mtd, int section,
0938                  struct mtd_oob_region *oobregion)
0939 {
0940     struct nand_chip *chip = mtd_to_nand(mtd);
0941     struct denali_controller *denali = to_denali_controller(chip);
0942 
0943     if (section > 0)
0944         return -ERANGE;
0945 
0946     oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
0947     oobregion->length = mtd->oobsize - oobregion->offset;
0948 
0949     return 0;
0950 }
0951 
0952 static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
0953     .ecc = denali_ooblayout_ecc,
0954     .free = denali_ooblayout_free,
0955 };
0956 
0957 static int denali_multidev_fixup(struct nand_chip *chip)
0958 {
0959     struct denali_controller *denali = to_denali_controller(chip);
0960     struct mtd_info *mtd = nand_to_mtd(chip);
0961     struct nand_memory_organization *memorg;
0962 
0963     memorg = nanddev_get_memorg(&chip->base);
0964 
0965     /*
0966      * Support for multi device:
0967      * When the IP configuration is x16 capable and two x8 chips are
0968      * connected in parallel, DEVICES_CONNECTED should be set to 2.
0969      * In this case, the core framework knows nothing about this fact,
0970      * so we should tell it the _logical_ pagesize and anything necessary.
0971      */
0972     denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
0973 
0974     /*
0975      * On some SoCs, DEVICES_CONNECTED is not auto-detected.
0976      * For those, DEVICES_CONNECTED is left to 0.  Set 1 if it is the case.
0977      */
0978     if (denali->devs_per_cs == 0) {
0979         denali->devs_per_cs = 1;
0980         iowrite32(1, denali->reg + DEVICES_CONNECTED);
0981     }
0982 
0983     if (denali->devs_per_cs == 1)
0984         return 0;
0985 
0986     if (denali->devs_per_cs != 2) {
0987         dev_err(denali->dev, "unsupported number of devices %d\n",
0988             denali->devs_per_cs);
0989         return -EINVAL;
0990     }
0991 
0992     /* 2 chips in parallel */
0993     memorg->pagesize <<= 1;
0994     memorg->oobsize <<= 1;
0995     mtd->size <<= 1;
0996     mtd->erasesize <<= 1;
0997     mtd->writesize <<= 1;
0998     mtd->oobsize <<= 1;
0999     chip->page_shift += 1;
1000     chip->phys_erase_shift += 1;
1001     chip->bbt_erase_shift += 1;
1002     chip->chip_shift += 1;
1003     chip->pagemask <<= 1;
1004     chip->ecc.size <<= 1;
1005     chip->ecc.bytes <<= 1;
1006     chip->ecc.strength <<= 1;
1007     denali->oob_skip_bytes <<= 1;
1008 
1009     return 0;
1010 }
1011 
1012 static int denali_attach_chip(struct nand_chip *chip)
1013 {
1014     struct denali_controller *denali = to_denali_controller(chip);
1015     struct mtd_info *mtd = nand_to_mtd(chip);
1016     int ret;
1017 
1018     ret = nand_ecc_choose_conf(chip, denali->ecc_caps,
1019                    mtd->oobsize - denali->oob_skip_bytes);
1020     if (ret) {
1021         dev_err(denali->dev, "Failed to setup ECC settings.\n");
1022         return ret;
1023     }
1024 
1025     dev_dbg(denali->dev,
1026         "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1027         chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1028 
1029     ret = denali_multidev_fixup(chip);
1030     if (ret)
1031         return ret;
1032 
1033     return 0;
1034 }
1035 
1036 static void denali_exec_in8(struct denali_controller *denali, u32 type,
1037                 u8 *buf, unsigned int len)
1038 {
1039     int i;
1040 
1041     for (i = 0; i < len; i++)
1042         buf[i] = denali->host_read(denali, type | DENALI_BANK(denali));
1043 }
1044 
1045 static void denali_exec_in16(struct denali_controller *denali, u32 type,
1046                  u8 *buf, unsigned int len)
1047 {
1048     u32 data;
1049     int i;
1050 
1051     for (i = 0; i < len; i += 2) {
1052         data = denali->host_read(denali, type | DENALI_BANK(denali));
1053         /* bit 31:24 and 15:8 are used for DDR */
1054         buf[i] = data;
1055         buf[i + 1] = data >> 16;
1056     }
1057 }
1058 
1059 static void denali_exec_in(struct denali_controller *denali, u32 type,
1060                u8 *buf, unsigned int len, bool width16)
1061 {
1062     if (width16)
1063         denali_exec_in16(denali, type, buf, len);
1064     else
1065         denali_exec_in8(denali, type, buf, len);
1066 }
1067 
1068 static void denali_exec_out8(struct denali_controller *denali, u32 type,
1069                  const u8 *buf, unsigned int len)
1070 {
1071     int i;
1072 
1073     for (i = 0; i < len; i++)
1074         denali->host_write(denali, type | DENALI_BANK(denali), buf[i]);
1075 }
1076 
1077 static void denali_exec_out16(struct denali_controller *denali, u32 type,
1078                   const u8 *buf, unsigned int len)
1079 {
1080     int i;
1081 
1082     for (i = 0; i < len; i += 2)
1083         denali->host_write(denali, type | DENALI_BANK(denali),
1084                    buf[i + 1] << 16 | buf[i]);
1085 }
1086 
1087 static void denali_exec_out(struct denali_controller *denali, u32 type,
1088                 const u8 *buf, unsigned int len, bool width16)
1089 {
1090     if (width16)
1091         denali_exec_out16(denali, type, buf, len);
1092     else
1093         denali_exec_out8(denali, type, buf, len);
1094 }
1095 
1096 static int denali_exec_waitrdy(struct denali_controller *denali)
1097 {
1098     u32 irq_stat;
1099 
1100     /* R/B# pin transitioned from low to high? */
1101     irq_stat = denali_wait_for_irq(denali, INTR__INT_ACT);
1102 
1103     /* Just in case nand_operation has multiple NAND_OP_WAITRDY_INSTR. */
1104     denali_reset_irq(denali);
1105 
1106     return irq_stat & INTR__INT_ACT ? 0 : -EIO;
1107 }
1108 
1109 static int denali_exec_instr(struct nand_chip *chip,
1110                  const struct nand_op_instr *instr)
1111 {
1112     struct denali_controller *denali = to_denali_controller(chip);
1113 
1114     switch (instr->type) {
1115     case NAND_OP_CMD_INSTR:
1116         denali_exec_out8(denali, DENALI_MAP11_CMD,
1117                  &instr->ctx.cmd.opcode, 1);
1118         return 0;
1119     case NAND_OP_ADDR_INSTR:
1120         denali_exec_out8(denali, DENALI_MAP11_ADDR,
1121                  instr->ctx.addr.addrs,
1122                  instr->ctx.addr.naddrs);
1123         return 0;
1124     case NAND_OP_DATA_IN_INSTR:
1125         denali_exec_in(denali, DENALI_MAP11_DATA,
1126                    instr->ctx.data.buf.in,
1127                    instr->ctx.data.len,
1128                    !instr->ctx.data.force_8bit &&
1129                    chip->options & NAND_BUSWIDTH_16);
1130         return 0;
1131     case NAND_OP_DATA_OUT_INSTR:
1132         denali_exec_out(denali, DENALI_MAP11_DATA,
1133                 instr->ctx.data.buf.out,
1134                 instr->ctx.data.len,
1135                 !instr->ctx.data.force_8bit &&
1136                 chip->options & NAND_BUSWIDTH_16);
1137         return 0;
1138     case NAND_OP_WAITRDY_INSTR:
1139         return denali_exec_waitrdy(denali);
1140     default:
1141         WARN_ONCE(1, "unsupported NAND instruction type: %d\n",
1142               instr->type);
1143 
1144         return -EINVAL;
1145     }
1146 }
1147 
1148 static int denali_exec_op(struct nand_chip *chip,
1149               const struct nand_operation *op, bool check_only)
1150 {
1151     int i, ret;
1152 
1153     if (check_only)
1154         return 0;
1155 
1156     denali_select_target(chip, op->cs);
1157 
1158     /*
1159      * Some commands contain NAND_OP_WAITRDY_INSTR.
1160      * irq must be cleared here to catch the R/B# interrupt there.
1161      */
1162     denali_reset_irq(to_denali_controller(chip));
1163 
1164     for (i = 0; i < op->ninstrs; i++) {
1165         ret = denali_exec_instr(chip, &op->instrs[i]);
1166         if (ret)
1167             return ret;
1168     }
1169 
1170     return 0;
1171 }
1172 
1173 static const struct nand_controller_ops denali_controller_ops = {
1174     .attach_chip = denali_attach_chip,
1175     .exec_op = denali_exec_op,
1176     .setup_interface = denali_setup_interface,
1177 };
1178 
1179 int denali_chip_init(struct denali_controller *denali,
1180              struct denali_chip *dchip)
1181 {
1182     struct nand_chip *chip = &dchip->chip;
1183     struct mtd_info *mtd = nand_to_mtd(chip);
1184     struct denali_chip *dchip2;
1185     int i, j, ret;
1186 
1187     chip->controller = &denali->controller;
1188 
1189     /* sanity checks for bank numbers */
1190     for (i = 0; i < dchip->nsels; i++) {
1191         unsigned int bank = dchip->sels[i].bank;
1192 
1193         if (bank >= denali->nbanks) {
1194             dev_err(denali->dev, "unsupported bank %d\n", bank);
1195             return -EINVAL;
1196         }
1197 
1198         for (j = 0; j < i; j++) {
1199             if (bank == dchip->sels[j].bank) {
1200                 dev_err(denali->dev,
1201                     "bank %d is assigned twice in the same chip\n",
1202                     bank);
1203                 return -EINVAL;
1204             }
1205         }
1206 
1207         list_for_each_entry(dchip2, &denali->chips, node) {
1208             for (j = 0; j < dchip2->nsels; j++) {
1209                 if (bank == dchip2->sels[j].bank) {
1210                     dev_err(denali->dev,
1211                         "bank %d is already used\n",
1212                         bank);
1213                     return -EINVAL;
1214                 }
1215             }
1216         }
1217     }
1218 
1219     mtd->dev.parent = denali->dev;
1220 
1221     /*
1222      * Fallback to the default name if DT did not give "label" property.
1223      * Use "label" property if multiple chips are connected.
1224      */
1225     if (!mtd->name && list_empty(&denali->chips))
1226         mtd->name = "denali-nand";
1227 
1228     if (denali->dma_avail) {
1229         chip->options |= NAND_USES_DMA;
1230         chip->buf_align = 16;
1231     }
1232 
1233     /* clk rate info is needed for setup_interface */
1234     if (!denali->clk_rate || !denali->clk_x_rate)
1235         chip->options |= NAND_KEEP_TIMINGS;
1236 
1237     chip->bbt_options |= NAND_BBT_USE_FLASH;
1238     chip->bbt_options |= NAND_BBT_NO_OOB;
1239     chip->options |= NAND_NO_SUBPAGE_WRITE;
1240     chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
1241     chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
1242     chip->ecc.read_page = denali_read_page;
1243     chip->ecc.write_page = denali_write_page;
1244     chip->ecc.read_page_raw = denali_read_page_raw;
1245     chip->ecc.write_page_raw = denali_write_page_raw;
1246     chip->ecc.read_oob = denali_read_oob;
1247     chip->ecc.write_oob = denali_write_oob;
1248 
1249     mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
1250 
1251     ret = nand_scan(chip, dchip->nsels);
1252     if (ret)
1253         return ret;
1254 
1255     ret = mtd_device_register(mtd, NULL, 0);
1256     if (ret) {
1257         dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
1258         goto cleanup_nand;
1259     }
1260 
1261     list_add_tail(&dchip->node, &denali->chips);
1262 
1263     return 0;
1264 
1265 cleanup_nand:
1266     nand_cleanup(chip);
1267 
1268     return ret;
1269 }
1270 EXPORT_SYMBOL_GPL(denali_chip_init);
1271 
1272 int denali_init(struct denali_controller *denali)
1273 {
1274     u32 features = ioread32(denali->reg + FEATURES);
1275     int ret;
1276 
1277     nand_controller_init(&denali->controller);
1278     denali->controller.ops = &denali_controller_ops;
1279     init_completion(&denali->complete);
1280     spin_lock_init(&denali->irq_lock);
1281     INIT_LIST_HEAD(&denali->chips);
1282     denali->active_bank = DENALI_INVALID_BANK;
1283 
1284     /*
1285      * The REVISION register may not be reliable. Platforms are allowed to
1286      * override it.
1287      */
1288     if (!denali->revision)
1289         denali->revision = swab16(ioread32(denali->reg + REVISION));
1290 
1291     denali->nbanks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
1292 
1293     /* the encoding changed from rev 5.0 to 5.1 */
1294     if (denali->revision < 0x0501)
1295         denali->nbanks <<= 1;
1296 
1297     if (features & FEATURES__DMA)
1298         denali->dma_avail = true;
1299 
1300     if (denali->dma_avail) {
1301         int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32;
1302 
1303         ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit));
1304         if (ret) {
1305             dev_info(denali->dev,
1306                  "Failed to set DMA mask. Disabling DMA.\n");
1307             denali->dma_avail = false;
1308         }
1309     }
1310 
1311     if (denali->dma_avail) {
1312         if (denali->caps & DENALI_CAP_DMA_64BIT)
1313             denali->setup_dma = denali_setup_dma64;
1314         else
1315             denali->setup_dma = denali_setup_dma32;
1316     }
1317 
1318     if (features & FEATURES__INDEX_ADDR) {
1319         denali->host_read = denali_indexed_read;
1320         denali->host_write = denali_indexed_write;
1321     } else {
1322         denali->host_read = denali_direct_read;
1323         denali->host_write = denali_direct_write;
1324     }
1325 
1326     /*
1327      * Set how many bytes should be skipped before writing data in OOB.
1328      * If a platform requests a non-zero value, set it to the register.
1329      * Otherwise, read the value out, expecting it has already been set up
1330      * by firmware.
1331      */
1332     if (denali->oob_skip_bytes)
1333         iowrite32(denali->oob_skip_bytes,
1334               denali->reg + SPARE_AREA_SKIP_BYTES);
1335     else
1336         denali->oob_skip_bytes = ioread32(denali->reg +
1337                           SPARE_AREA_SKIP_BYTES);
1338 
1339     iowrite32(0, denali->reg + TRANSFER_SPARE_REG);
1340     iowrite32(GENMASK(denali->nbanks - 1, 0), denali->reg + RB_PIN_ENABLED);
1341     iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
1342     iowrite32(ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
1343     iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
1344     iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
1345 
1346     denali_clear_irq_all(denali);
1347 
1348     ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
1349                    IRQF_SHARED, DENALI_NAND_NAME, denali);
1350     if (ret) {
1351         dev_err(denali->dev, "Unable to request IRQ\n");
1352         return ret;
1353     }
1354 
1355     denali_enable_irq(denali);
1356 
1357     return 0;
1358 }
1359 EXPORT_SYMBOL(denali_init);
1360 
1361 void denali_remove(struct denali_controller *denali)
1362 {
1363     struct denali_chip *dchip, *tmp;
1364     struct nand_chip *chip;
1365     int ret;
1366 
1367     list_for_each_entry_safe(dchip, tmp, &denali->chips, node) {
1368         chip = &dchip->chip;
1369         ret = mtd_device_unregister(nand_to_mtd(chip));
1370         WARN_ON(ret);
1371         nand_cleanup(chip);
1372         list_del(&dchip->node);
1373     }
1374 
1375     denali_disable_irq(denali);
1376 }
1377 EXPORT_SYMBOL(denali_remove);
1378 
1379 MODULE_DESCRIPTION("Driver core for Denali NAND controller");
1380 MODULE_AUTHOR("Intel Corporation and its suppliers");
1381 MODULE_LICENSE("GPL v2");