0001
0002
0003
0004
0005 #include <linux/clk.h>
0006 #include <linux/slab.h>
0007 #include <linux/bitops.h>
0008 #include <linux/dma/qcom_adm.h>
0009 #include <linux/dma-mapping.h>
0010 #include <linux/dmaengine.h>
0011 #include <linux/module.h>
0012 #include <linux/mtd/rawnand.h>
0013 #include <linux/mtd/partitions.h>
0014 #include <linux/of.h>
0015 #include <linux/of_device.h>
0016 #include <linux/delay.h>
0017 #include <linux/dma/qcom_bam_dma.h>
0018
0019
0020 #define NAND_FLASH_CMD 0x00
0021 #define NAND_ADDR0 0x04
0022 #define NAND_ADDR1 0x08
0023 #define NAND_FLASH_CHIP_SELECT 0x0c
0024 #define NAND_EXEC_CMD 0x10
0025 #define NAND_FLASH_STATUS 0x14
0026 #define NAND_BUFFER_STATUS 0x18
0027 #define NAND_DEV0_CFG0 0x20
0028 #define NAND_DEV0_CFG1 0x24
0029 #define NAND_DEV0_ECC_CFG 0x28
0030 #define NAND_AUTO_STATUS_EN 0x2c
0031 #define NAND_DEV1_CFG0 0x30
0032 #define NAND_DEV1_CFG1 0x34
0033 #define NAND_READ_ID 0x40
0034 #define NAND_READ_STATUS 0x44
0035 #define NAND_DEV_CMD0 0xa0
0036 #define NAND_DEV_CMD1 0xa4
0037 #define NAND_DEV_CMD2 0xa8
0038 #define NAND_DEV_CMD_VLD 0xac
0039 #define SFLASHC_BURST_CFG 0xe0
0040 #define NAND_ERASED_CW_DETECT_CFG 0xe8
0041 #define NAND_ERASED_CW_DETECT_STATUS 0xec
0042 #define NAND_EBI2_ECC_BUF_CFG 0xf0
0043 #define FLASH_BUF_ACC 0x100
0044
0045 #define NAND_CTRL 0xf00
0046 #define NAND_VERSION 0xf08
0047 #define NAND_READ_LOCATION_0 0xf20
0048 #define NAND_READ_LOCATION_1 0xf24
0049 #define NAND_READ_LOCATION_2 0xf28
0050 #define NAND_READ_LOCATION_3 0xf2c
0051 #define NAND_READ_LOCATION_LAST_CW_0 0xf40
0052 #define NAND_READ_LOCATION_LAST_CW_1 0xf44
0053 #define NAND_READ_LOCATION_LAST_CW_2 0xf48
0054 #define NAND_READ_LOCATION_LAST_CW_3 0xf4c
0055
0056
0057 #define NAND_DEV_CMD1_RESTORE 0xdead
0058 #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
0059
0060
0061 #define PAGE_ACC BIT(4)
0062 #define LAST_PAGE BIT(5)
0063
0064
0065 #define NAND_DEV_SEL 0
0066 #define DM_EN BIT(2)
0067
0068
0069 #define FS_OP_ERR BIT(4)
0070 #define FS_READY_BSY_N BIT(5)
0071 #define FS_MPU_ERR BIT(8)
0072 #define FS_DEVICE_STS_ERR BIT(16)
0073 #define FS_DEVICE_WP BIT(23)
0074
0075
0076 #define BS_UNCORRECTABLE_BIT BIT(8)
0077 #define BS_CORRECTABLE_ERR_MSK 0x1f
0078
0079
0080 #define DISABLE_STATUS_AFTER_WRITE 4
0081 #define CW_PER_PAGE 6
0082 #define UD_SIZE_BYTES 9
0083 #define UD_SIZE_BYTES_MASK GENMASK(18, 9)
0084 #define ECC_PARITY_SIZE_BYTES_RS 19
0085 #define SPARE_SIZE_BYTES 23
0086 #define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
0087 #define NUM_ADDR_CYCLES 27
0088 #define STATUS_BFR_READ 30
0089 #define SET_RD_MODE_AFTER_STATUS 31
0090
0091
0092 #define DEV0_CFG1_ECC_DISABLE 0
0093 #define WIDE_FLASH 1
0094 #define NAND_RECOVERY_CYCLES 2
0095 #define CS_ACTIVE_BSY 5
0096 #define BAD_BLOCK_BYTE_NUM 6
0097 #define BAD_BLOCK_IN_SPARE_AREA 16
0098 #define WR_RD_BSY_GAP 17
0099 #define ENABLE_BCH_ECC 27
0100
0101
0102 #define ECC_CFG_ECC_DISABLE 0
0103 #define ECC_SW_RESET 1
0104 #define ECC_MODE 4
0105 #define ECC_PARITY_SIZE_BYTES_BCH 8
0106 #define ECC_NUM_DATA_BYTES 16
0107 #define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
0108 #define ECC_FORCE_CLK_OPEN 30
0109
0110
0111 #define READ_ADDR 0
0112
0113
0114 #define READ_START_VLD BIT(0)
0115 #define READ_STOP_VLD BIT(1)
0116 #define WRITE_START_VLD BIT(2)
0117 #define ERASE_START_VLD BIT(3)
0118 #define SEQ_READ_START_VLD BIT(4)
0119
0120
0121 #define NUM_STEPS 0
0122
0123
0124 #define ERASED_CW_ECC_MASK 1
0125 #define AUTO_DETECT_RES 0
0126 #define MASK_ECC (1 << ERASED_CW_ECC_MASK)
0127 #define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
0128 #define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
0129 #define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
0130 #define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
0131
0132
0133 #define PAGE_ALL_ERASED BIT(7)
0134 #define CODEWORD_ALL_ERASED BIT(6)
0135 #define PAGE_ERASED BIT(5)
0136 #define CODEWORD_ERASED BIT(4)
0137 #define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
0138 #define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
0139
0140
0141 #define READ_LOCATION_OFFSET 0
0142 #define READ_LOCATION_SIZE 16
0143 #define READ_LOCATION_LAST 31
0144
0145
0146 #define NAND_VERSION_MAJOR_MASK 0xf0000000
0147 #define NAND_VERSION_MAJOR_SHIFT 28
0148 #define NAND_VERSION_MINOR_MASK 0x0fff0000
0149 #define NAND_VERSION_MINOR_SHIFT 16
0150
0151
0152 #define OP_PAGE_READ 0x2
0153 #define OP_PAGE_READ_WITH_ECC 0x3
0154 #define OP_PAGE_READ_WITH_ECC_SPARE 0x4
0155 #define OP_PAGE_READ_ONFI_READ 0x5
0156 #define OP_PROGRAM_PAGE 0x6
0157 #define OP_PAGE_PROGRAM_WITH_ECC 0x7
0158 #define OP_PROGRAM_PAGE_SPARE 0x9
0159 #define OP_BLOCK_ERASE 0xa
0160 #define OP_FETCH_ID 0xb
0161 #define OP_RESET_DEVICE 0xd
0162
0163
0164 #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
0165 ERASE_START_VLD | SEQ_READ_START_VLD)
0166
0167
0168 #define BAM_MODE_EN BIT(0)
0169
0170
0171
0172
0173
0174 #define NANDC_STEP_SIZE 512
0175
0176
0177
0178
0179
0180 #define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
0181
0182
0183 #define MAX_REG_RD (3 * MAX_NUM_STEPS)
0184
0185
0186 #define ECC_NONE BIT(0)
0187 #define ECC_RS_4BIT BIT(1)
0188 #define ECC_BCH_4BIT BIT(2)
0189 #define ECC_BCH_8BIT BIT(3)
0190
0191 #define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \
0192 nandc_set_reg(chip, reg, \
0193 ((cw_offset) << READ_LOCATION_OFFSET) | \
0194 ((read_size) << READ_LOCATION_SIZE) | \
0195 ((is_last_read_loc) << READ_LOCATION_LAST))
0196
0197 #define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \
0198 nandc_set_reg(chip, reg, \
0199 ((cw_offset) << READ_LOCATION_OFFSET) | \
0200 ((read_size) << READ_LOCATION_SIZE) | \
0201 ((is_last_read_loc) << READ_LOCATION_LAST))
0202
0203
0204
0205
0206 #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
0207
0208
0209 #define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
0210
0211
0212 #define reg_buf_dma_addr(chip, vaddr) \
0213 ((chip)->reg_read_dma + \
0214 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
0215
0216 #define QPIC_PER_CW_CMD_ELEMENTS 32
0217 #define QPIC_PER_CW_CMD_SGL 32
0218 #define QPIC_PER_CW_DATA_SGL 8
0219
0220 #define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
0221
0222
0223
0224
0225
0226
0227 #define NAND_BAM_NO_EOT BIT(0)
0228
0229 #define NAND_BAM_NWD BIT(1)
0230
0231 #define NAND_BAM_NEXT_SGL BIT(2)
0232
0233
0234
0235
0236 #define NAND_ERASED_CW_SET BIT(4)
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 struct bam_transaction {
0261 struct bam_cmd_element *bam_ce;
0262 struct scatterlist *cmd_sgl;
0263 struct scatterlist *data_sgl;
0264 struct dma_async_tx_descriptor *last_data_desc;
0265 struct dma_async_tx_descriptor *last_cmd_desc;
0266 struct completion txn_done;
0267 u32 bam_ce_pos;
0268 u32 bam_ce_start;
0269 u32 cmd_sgl_pos;
0270 u32 cmd_sgl_start;
0271 u32 tx_sgl_pos;
0272 u32 tx_sgl_start;
0273 u32 rx_sgl_pos;
0274 u32 rx_sgl_start;
0275 bool wait_second_completion;
0276 };
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289 struct desc_info {
0290 struct dma_async_tx_descriptor *dma_desc;
0291 struct list_head node;
0292
0293 union {
0294 struct scatterlist adm_sgl;
0295 struct {
0296 struct scatterlist *bam_sgl;
0297 int sgl_cnt;
0298 };
0299 };
0300 enum dma_data_direction dir;
0301 };
0302
0303
0304
0305
0306
0307 struct nandc_regs {
0308 __le32 cmd;
0309 __le32 addr0;
0310 __le32 addr1;
0311 __le32 chip_sel;
0312 __le32 exec;
0313
0314 __le32 cfg0;
0315 __le32 cfg1;
0316 __le32 ecc_bch_cfg;
0317
0318 __le32 clrflashstatus;
0319 __le32 clrreadstatus;
0320
0321 __le32 cmd1;
0322 __le32 vld;
0323
0324 __le32 orig_cmd1;
0325 __le32 orig_vld;
0326
0327 __le32 ecc_buf_cfg;
0328 __le32 read_location0;
0329 __le32 read_location1;
0330 __le32 read_location2;
0331 __le32 read_location3;
0332 __le32 read_location_last0;
0333 __le32 read_location_last1;
0334 __le32 read_location_last2;
0335 __le32 read_location_last3;
0336
0337 __le32 erased_cw_detect_cfg_clr;
0338 __le32 erased_cw_detect_cfg_set;
0339 };
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 struct qcom_nand_controller {
0387 struct device *dev;
0388
0389 void __iomem *base;
0390
0391 struct clk *core_clk;
0392 struct clk *aon_clk;
0393
0394 struct nandc_regs *regs;
0395 struct bam_transaction *bam_txn;
0396
0397 const struct qcom_nandc_props *props;
0398
0399 struct nand_controller controller;
0400 struct list_head host_list;
0401
0402 union {
0403
0404 struct {
0405 struct dma_chan *tx_chan;
0406 struct dma_chan *rx_chan;
0407 struct dma_chan *cmd_chan;
0408 };
0409
0410
0411 struct {
0412 struct dma_chan *chan;
0413 unsigned int cmd_crci;
0414 unsigned int data_crci;
0415 };
0416 };
0417
0418 struct list_head desc_list;
0419
0420 u8 *data_buffer;
0421 __le32 *reg_read_buf;
0422
0423 phys_addr_t base_phys;
0424 dma_addr_t base_dma;
0425 dma_addr_t reg_read_dma;
0426
0427 int buf_size;
0428 int buf_count;
0429 int buf_start;
0430 unsigned int max_cwperpage;
0431
0432 int reg_read_pos;
0433
0434 u32 cmd1, vld;
0435 };
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445 struct qcom_nand_boot_partition {
0446 u32 page_offset;
0447 u32 page_size;
0448 };
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 struct qcom_nand_host {
0488 struct qcom_nand_boot_partition *boot_partitions;
0489
0490 struct nand_chip chip;
0491 struct list_head node;
0492
0493 int nr_boot_partitions;
0494
0495 int cs;
0496 int cw_size;
0497 int cw_data;
0498 int ecc_bytes_hw;
0499 int spare_bytes;
0500 int bbm_size;
0501
0502 int last_command;
0503
0504 u32 cfg0, cfg1;
0505 u32 cfg0_raw, cfg1_raw;
0506 u32 ecc_buf_cfg;
0507 u32 ecc_bch_cfg;
0508 u32 clrflashstatus;
0509 u32 clrreadstatus;
0510
0511 u8 status;
0512 bool codeword_fixup;
0513 bool use_ecc;
0514 bool bch_enabled;
0515 };
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527 struct qcom_nandc_props {
0528 u32 ecc_modes;
0529 u32 dev_cmd_reg_start;
0530 bool is_bam;
0531 bool is_qpic;
0532 bool qpic_v2;
0533 bool use_codeword_fixup;
0534 };
0535
0536
0537 static void free_bam_transaction(struct qcom_nand_controller *nandc)
0538 {
0539 struct bam_transaction *bam_txn = nandc->bam_txn;
0540
0541 devm_kfree(nandc->dev, bam_txn);
0542 }
0543
0544
0545 static struct bam_transaction *
0546 alloc_bam_transaction(struct qcom_nand_controller *nandc)
0547 {
0548 struct bam_transaction *bam_txn;
0549 size_t bam_txn_size;
0550 unsigned int num_cw = nandc->max_cwperpage;
0551 void *bam_txn_buf;
0552
0553 bam_txn_size =
0554 sizeof(*bam_txn) + num_cw *
0555 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
0556 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
0557 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
0558
0559 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
0560 if (!bam_txn_buf)
0561 return NULL;
0562
0563 bam_txn = bam_txn_buf;
0564 bam_txn_buf += sizeof(*bam_txn);
0565
0566 bam_txn->bam_ce = bam_txn_buf;
0567 bam_txn_buf +=
0568 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
0569
0570 bam_txn->cmd_sgl = bam_txn_buf;
0571 bam_txn_buf +=
0572 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
0573
0574 bam_txn->data_sgl = bam_txn_buf;
0575
0576 init_completion(&bam_txn->txn_done);
0577
0578 return bam_txn;
0579 }
0580
0581
0582 static void clear_bam_transaction(struct qcom_nand_controller *nandc)
0583 {
0584 struct bam_transaction *bam_txn = nandc->bam_txn;
0585
0586 if (!nandc->props->is_bam)
0587 return;
0588
0589 bam_txn->bam_ce_pos = 0;
0590 bam_txn->bam_ce_start = 0;
0591 bam_txn->cmd_sgl_pos = 0;
0592 bam_txn->cmd_sgl_start = 0;
0593 bam_txn->tx_sgl_pos = 0;
0594 bam_txn->tx_sgl_start = 0;
0595 bam_txn->rx_sgl_pos = 0;
0596 bam_txn->rx_sgl_start = 0;
0597 bam_txn->last_data_desc = NULL;
0598 bam_txn->wait_second_completion = false;
0599
0600 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
0601 QPIC_PER_CW_CMD_SGL);
0602 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
0603 QPIC_PER_CW_DATA_SGL);
0604
0605 reinit_completion(&bam_txn->txn_done);
0606 }
0607
0608
0609 static void qpic_bam_dma_done(void *data)
0610 {
0611 struct bam_transaction *bam_txn = data;
0612
0613
0614
0615
0616
0617
0618
0619
0620 if (bam_txn->wait_second_completion)
0621 bam_txn->wait_second_completion = false;
0622 else
0623 complete(&bam_txn->txn_done);
0624 }
0625
0626 static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
0627 {
0628 return container_of(chip, struct qcom_nand_host, chip);
0629 }
0630
0631 static inline struct qcom_nand_controller *
0632 get_qcom_nand_controller(struct nand_chip *chip)
0633 {
0634 return container_of(chip->controller, struct qcom_nand_controller,
0635 controller);
0636 }
0637
0638 static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
0639 {
0640 return ioread32(nandc->base + offset);
0641 }
0642
0643 static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
0644 u32 val)
0645 {
0646 iowrite32(val, nandc->base + offset);
0647 }
0648
0649 static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
0650 bool is_cpu)
0651 {
0652 if (!nandc->props->is_bam)
0653 return;
0654
0655 if (is_cpu)
0656 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
0657 MAX_REG_RD *
0658 sizeof(*nandc->reg_read_buf),
0659 DMA_FROM_DEVICE);
0660 else
0661 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
0662 MAX_REG_RD *
0663 sizeof(*nandc->reg_read_buf),
0664 DMA_FROM_DEVICE);
0665 }
0666
0667 static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
0668 {
0669 switch (offset) {
0670 case NAND_FLASH_CMD:
0671 return ®s->cmd;
0672 case NAND_ADDR0:
0673 return ®s->addr0;
0674 case NAND_ADDR1:
0675 return ®s->addr1;
0676 case NAND_FLASH_CHIP_SELECT:
0677 return ®s->chip_sel;
0678 case NAND_EXEC_CMD:
0679 return ®s->exec;
0680 case NAND_FLASH_STATUS:
0681 return ®s->clrflashstatus;
0682 case NAND_DEV0_CFG0:
0683 return ®s->cfg0;
0684 case NAND_DEV0_CFG1:
0685 return ®s->cfg1;
0686 case NAND_DEV0_ECC_CFG:
0687 return ®s->ecc_bch_cfg;
0688 case NAND_READ_STATUS:
0689 return ®s->clrreadstatus;
0690 case NAND_DEV_CMD1:
0691 return ®s->cmd1;
0692 case NAND_DEV_CMD1_RESTORE:
0693 return ®s->orig_cmd1;
0694 case NAND_DEV_CMD_VLD:
0695 return ®s->vld;
0696 case NAND_DEV_CMD_VLD_RESTORE:
0697 return ®s->orig_vld;
0698 case NAND_EBI2_ECC_BUF_CFG:
0699 return ®s->ecc_buf_cfg;
0700 case NAND_READ_LOCATION_0:
0701 return ®s->read_location0;
0702 case NAND_READ_LOCATION_1:
0703 return ®s->read_location1;
0704 case NAND_READ_LOCATION_2:
0705 return ®s->read_location2;
0706 case NAND_READ_LOCATION_3:
0707 return ®s->read_location3;
0708 case NAND_READ_LOCATION_LAST_CW_0:
0709 return ®s->read_location_last0;
0710 case NAND_READ_LOCATION_LAST_CW_1:
0711 return ®s->read_location_last1;
0712 case NAND_READ_LOCATION_LAST_CW_2:
0713 return ®s->read_location_last2;
0714 case NAND_READ_LOCATION_LAST_CW_3:
0715 return ®s->read_location_last3;
0716 default:
0717 return NULL;
0718 }
0719 }
0720
0721 static void nandc_set_reg(struct nand_chip *chip, int offset,
0722 u32 val)
0723 {
0724 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
0725 struct nandc_regs *regs = nandc->regs;
0726 __le32 *reg;
0727
0728 reg = offset_to_nandc_reg(regs, offset);
0729
0730 if (reg)
0731 *reg = cpu_to_le32(val);
0732 }
0733
0734
0735 static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
0736 {
0737 return cw == (ecc->steps - 1);
0738 }
0739
0740
0741 static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
0742 int cw_offset, int read_size, int is_last_read_loc)
0743 {
0744 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
0745 struct nand_ecc_ctrl *ecc = &chip->ecc;
0746 int reg_base = NAND_READ_LOCATION_0;
0747
0748 if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
0749 reg_base = NAND_READ_LOCATION_LAST_CW_0;
0750
0751 reg_base += reg * 4;
0752
0753 if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
0754 return nandc_set_read_loc_last(chip, reg_base, cw_offset,
0755 read_size, is_last_read_loc);
0756 else
0757 return nandc_set_read_loc_first(chip, reg_base, cw_offset,
0758 read_size, is_last_read_loc);
0759 }
0760
0761
0762 static void set_address(struct qcom_nand_host *host, u16 column, int page)
0763 {
0764 struct nand_chip *chip = &host->chip;
0765
0766 if (chip->options & NAND_BUSWIDTH_16)
0767 column >>= 1;
0768
0769 nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
0770 nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
0771 }
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781 static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
0782 {
0783 struct nand_chip *chip = &host->chip;
0784 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
0785 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
0786
0787 if (read) {
0788 if (host->use_ecc)
0789 cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
0790 else
0791 cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
0792 } else {
0793 cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
0794 }
0795
0796 if (host->use_ecc) {
0797 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
0798 (num_cw - 1) << CW_PER_PAGE;
0799
0800 cfg1 = host->cfg1;
0801 ecc_bch_cfg = host->ecc_bch_cfg;
0802 } else {
0803 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
0804 (num_cw - 1) << CW_PER_PAGE;
0805
0806 cfg1 = host->cfg1_raw;
0807 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
0808 }
0809
0810 nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
0811 nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
0812 nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
0813 nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
0814 if (!nandc->props->qpic_v2)
0815 nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
0816 nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
0817 nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
0818 nandc_set_reg(chip, NAND_EXEC_CMD, 1);
0819
0820 if (read)
0821 nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
0822 host->cw_data : host->cw_size, 1);
0823 }
0824
0825
0826
0827
0828
0829
0830 static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
0831 struct dma_chan *chan,
0832 unsigned long flags)
0833 {
0834 struct desc_info *desc;
0835 struct scatterlist *sgl;
0836 unsigned int sgl_cnt;
0837 int ret;
0838 struct bam_transaction *bam_txn = nandc->bam_txn;
0839 enum dma_transfer_direction dir_eng;
0840 struct dma_async_tx_descriptor *dma_desc;
0841
0842 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
0843 if (!desc)
0844 return -ENOMEM;
0845
0846 if (chan == nandc->cmd_chan) {
0847 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
0848 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
0849 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
0850 dir_eng = DMA_MEM_TO_DEV;
0851 desc->dir = DMA_TO_DEVICE;
0852 } else if (chan == nandc->tx_chan) {
0853 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
0854 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
0855 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
0856 dir_eng = DMA_MEM_TO_DEV;
0857 desc->dir = DMA_TO_DEVICE;
0858 } else {
0859 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
0860 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
0861 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
0862 dir_eng = DMA_DEV_TO_MEM;
0863 desc->dir = DMA_FROM_DEVICE;
0864 }
0865
0866 sg_mark_end(sgl + sgl_cnt - 1);
0867 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
0868 if (ret == 0) {
0869 dev_err(nandc->dev, "failure in mapping desc\n");
0870 kfree(desc);
0871 return -ENOMEM;
0872 }
0873
0874 desc->sgl_cnt = sgl_cnt;
0875 desc->bam_sgl = sgl;
0876
0877 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
0878 flags);
0879
0880 if (!dma_desc) {
0881 dev_err(nandc->dev, "failure in prep desc\n");
0882 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
0883 kfree(desc);
0884 return -EINVAL;
0885 }
0886
0887 desc->dma_desc = dma_desc;
0888
0889
0890 if (chan == nandc->cmd_chan)
0891 bam_txn->last_cmd_desc = dma_desc;
0892 else
0893 bam_txn->last_data_desc = dma_desc;
0894
0895 list_add_tail(&desc->node, &nandc->desc_list);
0896
0897 return 0;
0898 }
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
0910 int reg_off, const void *vaddr,
0911 int size, unsigned int flags)
0912 {
0913 int bam_ce_size;
0914 int i, ret;
0915 struct bam_cmd_element *bam_ce_buffer;
0916 struct bam_transaction *bam_txn = nandc->bam_txn;
0917
0918 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
0919
0920
0921 for (i = 0; i < size; i++) {
0922 if (read)
0923 bam_prep_ce(&bam_ce_buffer[i],
0924 nandc_reg_phys(nandc, reg_off + 4 * i),
0925 BAM_READ_COMMAND,
0926 reg_buf_dma_addr(nandc,
0927 (__le32 *)vaddr + i));
0928 else
0929 bam_prep_ce_le32(&bam_ce_buffer[i],
0930 nandc_reg_phys(nandc, reg_off + 4 * i),
0931 BAM_WRITE_COMMAND,
0932 *((__le32 *)vaddr + i));
0933 }
0934
0935 bam_txn->bam_ce_pos += size;
0936
0937
0938 if (flags & NAND_BAM_NEXT_SGL) {
0939 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
0940 bam_ce_size = (bam_txn->bam_ce_pos -
0941 bam_txn->bam_ce_start) *
0942 sizeof(struct bam_cmd_element);
0943 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
0944 bam_ce_buffer, bam_ce_size);
0945 bam_txn->cmd_sgl_pos++;
0946 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
0947
0948 if (flags & NAND_BAM_NWD) {
0949 ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
0950 DMA_PREP_FENCE |
0951 DMA_PREP_CMD);
0952 if (ret)
0953 return ret;
0954 }
0955 }
0956
0957 return 0;
0958 }
0959
0960
0961
0962
0963
0964 static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
0965 const void *vaddr,
0966 int size, unsigned int flags)
0967 {
0968 int ret;
0969 struct bam_transaction *bam_txn = nandc->bam_txn;
0970
0971 if (read) {
0972 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
0973 vaddr, size);
0974 bam_txn->rx_sgl_pos++;
0975 } else {
0976 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
0977 vaddr, size);
0978 bam_txn->tx_sgl_pos++;
0979
0980
0981
0982
0983
0984 if (!(flags & NAND_BAM_NO_EOT)) {
0985 ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
0986 DMA_PREP_INTERRUPT);
0987 if (ret)
0988 return ret;
0989 }
0990 }
0991
0992 return 0;
0993 }
0994
0995 static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
0996 int reg_off, const void *vaddr, int size,
0997 bool flow_control)
0998 {
0999 struct desc_info *desc;
1000 struct dma_async_tx_descriptor *dma_desc;
1001 struct scatterlist *sgl;
1002 struct dma_slave_config slave_conf;
1003 struct qcom_adm_peripheral_config periph_conf = {};
1004 enum dma_transfer_direction dir_eng;
1005 int ret;
1006
1007 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
1008 if (!desc)
1009 return -ENOMEM;
1010
1011 sgl = &desc->adm_sgl;
1012
1013 sg_init_one(sgl, vaddr, size);
1014
1015 if (read) {
1016 dir_eng = DMA_DEV_TO_MEM;
1017 desc->dir = DMA_FROM_DEVICE;
1018 } else {
1019 dir_eng = DMA_MEM_TO_DEV;
1020 desc->dir = DMA_TO_DEVICE;
1021 }
1022
1023 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
1024 if (ret == 0) {
1025 ret = -ENOMEM;
1026 goto err;
1027 }
1028
1029 memset(&slave_conf, 0x00, sizeof(slave_conf));
1030
1031 slave_conf.device_fc = flow_control;
1032 if (read) {
1033 slave_conf.src_maxburst = 16;
1034 slave_conf.src_addr = nandc->base_dma + reg_off;
1035 if (nandc->data_crci) {
1036 periph_conf.crci = nandc->data_crci;
1037 slave_conf.peripheral_config = &periph_conf;
1038 slave_conf.peripheral_size = sizeof(periph_conf);
1039 }
1040 } else {
1041 slave_conf.dst_maxburst = 16;
1042 slave_conf.dst_addr = nandc->base_dma + reg_off;
1043 if (nandc->cmd_crci) {
1044 periph_conf.crci = nandc->cmd_crci;
1045 slave_conf.peripheral_config = &periph_conf;
1046 slave_conf.peripheral_size = sizeof(periph_conf);
1047 }
1048 }
1049
1050 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
1051 if (ret) {
1052 dev_err(nandc->dev, "failed to configure dma channel\n");
1053 goto err;
1054 }
1055
1056 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
1057 if (!dma_desc) {
1058 dev_err(nandc->dev, "failed to prepare desc\n");
1059 ret = -EINVAL;
1060 goto err;
1061 }
1062
1063 desc->dma_desc = dma_desc;
1064
1065 list_add_tail(&desc->node, &nandc->desc_list);
1066
1067 return 0;
1068 err:
1069 kfree(desc);
1070
1071 return ret;
1072 }
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
1083 int num_regs, unsigned int flags)
1084 {
1085 bool flow_control = false;
1086 void *vaddr;
1087
1088 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
1089 nandc->reg_read_pos += num_regs;
1090
1091 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
1092 first = dev_cmd_reg_addr(nandc, first);
1093
1094 if (nandc->props->is_bam)
1095 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
1096 num_regs, flags);
1097
1098 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
1099 flow_control = true;
1100
1101 return prep_adm_dma_desc(nandc, true, first, vaddr,
1102 num_regs * sizeof(u32), flow_control);
1103 }
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
1114 int num_regs, unsigned int flags)
1115 {
1116 bool flow_control = false;
1117 struct nandc_regs *regs = nandc->regs;
1118 void *vaddr;
1119
1120 vaddr = offset_to_nandc_reg(regs, first);
1121
1122 if (first == NAND_ERASED_CW_DETECT_CFG) {
1123 if (flags & NAND_ERASED_CW_SET)
1124 vaddr = ®s->erased_cw_detect_cfg_set;
1125 else
1126 vaddr = ®s->erased_cw_detect_cfg_clr;
1127 }
1128
1129 if (first == NAND_EXEC_CMD)
1130 flags |= NAND_BAM_NWD;
1131
1132 if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
1133 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
1134
1135 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
1136 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
1137
1138 if (nandc->props->is_bam)
1139 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
1140 num_regs, flags);
1141
1142 if (first == NAND_FLASH_CMD)
1143 flow_control = true;
1144
1145 return prep_adm_dma_desc(nandc, false, first, vaddr,
1146 num_regs * sizeof(u32), flow_control);
1147 }
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1159 const u8 *vaddr, int size, unsigned int flags)
1160 {
1161 if (nandc->props->is_bam)
1162 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1163
1164 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
1165 }
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176 static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
1177 const u8 *vaddr, int size, unsigned int flags)
1178 {
1179 if (nandc->props->is_bam)
1180 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1181
1182 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
1183 }
1184
1185
1186
1187
1188
1189 static void config_nand_page_read(struct nand_chip *chip)
1190 {
1191 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1192
1193 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1194 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1195 if (!nandc->props->qpic_v2)
1196 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1197 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1198 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1199 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1200 }
1201
1202
1203
1204
1205
1206 static void
1207 config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
1208 {
1209 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1210 struct nand_ecc_ctrl *ecc = &chip->ecc;
1211
1212 int reg = NAND_READ_LOCATION_0;
1213
1214 if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
1215 reg = NAND_READ_LOCATION_LAST_CW_0;
1216
1217 if (nandc->props->is_bam)
1218 write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
1219
1220 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1221 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1222
1223 if (use_ecc) {
1224 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1225 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1226 NAND_BAM_NEXT_SGL);
1227 } else {
1228 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1229 }
1230 }
1231
1232
1233
1234
1235
1236 static void
1237 config_nand_single_cw_page_read(struct nand_chip *chip,
1238 bool use_ecc, int cw)
1239 {
1240 config_nand_page_read(chip);
1241 config_nand_cw_read(chip, use_ecc, cw);
1242 }
1243
1244
1245
1246
1247
1248 static void config_nand_page_write(struct nand_chip *chip)
1249 {
1250 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1251
1252 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1253 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1254 if (!nandc->props->qpic_v2)
1255 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1256 NAND_BAM_NEXT_SGL);
1257 }
1258
1259
1260
1261
1262
1263 static void config_nand_cw_write(struct nand_chip *chip)
1264 {
1265 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1266
1267 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1268 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1269
1270 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1271
1272 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1273 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1274 }
1275
1276
1277
1278
1279
1280
1281
1282 static int nandc_param(struct qcom_nand_host *host)
1283 {
1284 struct nand_chip *chip = &host->chip;
1285 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1286
1287
1288
1289
1290
1291
1292 if (nandc->props->qpic_v2)
1293 nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ |
1294 PAGE_ACC | LAST_PAGE);
1295 else
1296 nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ |
1297 PAGE_ACC | LAST_PAGE);
1298
1299 nandc_set_reg(chip, NAND_ADDR0, 0);
1300 nandc_set_reg(chip, NAND_ADDR1, 0);
1301 nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1302 | 512 << UD_SIZE_BYTES
1303 | 5 << NUM_ADDR_CYCLES
1304 | 0 << SPARE_SIZE_BYTES);
1305 nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1306 | 0 << CS_ACTIVE_BSY
1307 | 17 << BAD_BLOCK_BYTE_NUM
1308 | 1 << BAD_BLOCK_IN_SPARE_AREA
1309 | 2 << WR_RD_BSY_GAP
1310 | 0 << WIDE_FLASH
1311 | 1 << DEV0_CFG1_ECC_DISABLE);
1312 if (!nandc->props->qpic_v2)
1313 nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1314
1315
1316 if (!nandc->props->qpic_v2) {
1317 nandc_set_reg(chip, NAND_DEV_CMD_VLD,
1318 (nandc->vld & ~READ_START_VLD));
1319 nandc_set_reg(chip, NAND_DEV_CMD1,
1320 (nandc->cmd1 & ~(0xFF << READ_ADDR))
1321 | NAND_CMD_PARAM << READ_ADDR);
1322 }
1323
1324 nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1325
1326 if (!nandc->props->qpic_v2) {
1327 nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1328 nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
1329 }
1330
1331 nandc_set_read_loc(chip, 0, 0, 0, 512, 1);
1332
1333 if (!nandc->props->qpic_v2) {
1334 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1335 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
1336 }
1337
1338 nandc->buf_count = 512;
1339 memset(nandc->data_buffer, 0xff, nandc->buf_count);
1340
1341 config_nand_single_cw_page_read(chip, false, 0);
1342
1343 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
1344 nandc->buf_count, 0);
1345
1346
1347 if (!nandc->props->qpic_v2) {
1348 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1349 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
1350 }
1351
1352 return 0;
1353 }
1354
1355
1356 static int erase_block(struct qcom_nand_host *host, int page_addr)
1357 {
1358 struct nand_chip *chip = &host->chip;
1359 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1360
1361 nandc_set_reg(chip, NAND_FLASH_CMD,
1362 OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1363 nandc_set_reg(chip, NAND_ADDR0, page_addr);
1364 nandc_set_reg(chip, NAND_ADDR1, 0);
1365 nandc_set_reg(chip, NAND_DEV0_CFG0,
1366 host->cfg0_raw & ~(7 << CW_PER_PAGE));
1367 nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
1368 nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1369 nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
1370 nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
1371
1372 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1373 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1374 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1375
1376 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1377
1378 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1379 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
1380
1381 return 0;
1382 }
1383
1384
1385 static int read_id(struct qcom_nand_host *host, int column)
1386 {
1387 struct nand_chip *chip = &host->chip;
1388 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1389
1390 if (column == -1)
1391 return 0;
1392
1393 nandc_set_reg(chip, NAND_FLASH_CMD, OP_FETCH_ID);
1394 nandc_set_reg(chip, NAND_ADDR0, column);
1395 nandc_set_reg(chip, NAND_ADDR1, 0);
1396 nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
1397 nandc->props->is_bam ? 0 : DM_EN);
1398 nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1399
1400 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1401 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1402
1403 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1404
1405 return 0;
1406 }
1407
1408
1409 static int reset(struct qcom_nand_host *host)
1410 {
1411 struct nand_chip *chip = &host->chip;
1412 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1413
1414 nandc_set_reg(chip, NAND_FLASH_CMD, OP_RESET_DEVICE);
1415 nandc_set_reg(chip, NAND_EXEC_CMD, 1);
1416
1417 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1418 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1419
1420 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1421
1422 return 0;
1423 }
1424
1425
1426 static int submit_descs(struct qcom_nand_controller *nandc)
1427 {
1428 struct desc_info *desc;
1429 dma_cookie_t cookie = 0;
1430 struct bam_transaction *bam_txn = nandc->bam_txn;
1431 int r;
1432
1433 if (nandc->props->is_bam) {
1434 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1435 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1436 if (r)
1437 return r;
1438 }
1439
1440 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1441 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1442 DMA_PREP_INTERRUPT);
1443 if (r)
1444 return r;
1445 }
1446
1447 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
1448 r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1449 DMA_PREP_CMD);
1450 if (r)
1451 return r;
1452 }
1453 }
1454
1455 list_for_each_entry(desc, &nandc->desc_list, node)
1456 cookie = dmaengine_submit(desc->dma_desc);
1457
1458 if (nandc->props->is_bam) {
1459 bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
1460 bam_txn->last_cmd_desc->callback_param = bam_txn;
1461 if (bam_txn->last_data_desc) {
1462 bam_txn->last_data_desc->callback = qpic_bam_dma_done;
1463 bam_txn->last_data_desc->callback_param = bam_txn;
1464 bam_txn->wait_second_completion = true;
1465 }
1466
1467 dma_async_issue_pending(nandc->tx_chan);
1468 dma_async_issue_pending(nandc->rx_chan);
1469 dma_async_issue_pending(nandc->cmd_chan);
1470
1471 if (!wait_for_completion_timeout(&bam_txn->txn_done,
1472 QPIC_NAND_COMPLETION_TIMEOUT))
1473 return -ETIMEDOUT;
1474 } else {
1475 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1476 return -ETIMEDOUT;
1477 }
1478
1479 return 0;
1480 }
1481
1482 static void free_descs(struct qcom_nand_controller *nandc)
1483 {
1484 struct desc_info *desc, *n;
1485
1486 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1487 list_del(&desc->node);
1488
1489 if (nandc->props->is_bam)
1490 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1491 desc->sgl_cnt, desc->dir);
1492 else
1493 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1494 desc->dir);
1495
1496 kfree(desc);
1497 }
1498 }
1499
1500
1501 static void clear_read_regs(struct qcom_nand_controller *nandc)
1502 {
1503 nandc->reg_read_pos = 0;
1504 nandc_read_buffer_sync(nandc, false);
1505 }
1506
1507 static void pre_command(struct qcom_nand_host *host, int command)
1508 {
1509 struct nand_chip *chip = &host->chip;
1510 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1511
1512 nandc->buf_count = 0;
1513 nandc->buf_start = 0;
1514 host->use_ecc = false;
1515 host->last_command = command;
1516
1517 clear_read_regs(nandc);
1518
1519 if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1520 command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1521 clear_bam_transaction(nandc);
1522 }
1523
1524
1525
1526
1527
1528
1529 static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1530 {
1531 struct nand_chip *chip = &host->chip;
1532 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1533 struct nand_ecc_ctrl *ecc = &chip->ecc;
1534 int num_cw;
1535 int i;
1536
1537 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
1538 nandc_read_buffer_sync(nandc, true);
1539
1540 for (i = 0; i < num_cw; i++) {
1541 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1542
1543 if (flash_status & FS_MPU_ERR)
1544 host->status &= ~NAND_STATUS_WP;
1545
1546 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1547 (flash_status &
1548 FS_DEVICE_STS_ERR)))
1549 host->status |= NAND_STATUS_FAIL;
1550 }
1551 }
1552
1553 static void post_command(struct qcom_nand_host *host, int command)
1554 {
1555 struct nand_chip *chip = &host->chip;
1556 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1557
1558 switch (command) {
1559 case NAND_CMD_READID:
1560 nandc_read_buffer_sync(nandc, true);
1561 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1562 nandc->buf_count);
1563 break;
1564 case NAND_CMD_PAGEPROG:
1565 case NAND_CMD_ERASE1:
1566 parse_erase_write_errors(host, command);
1567 break;
1568 default:
1569 break;
1570 }
1571 }
1572
1573
1574
1575
1576
1577
1578
1579 static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
1580 int column, int page_addr)
1581 {
1582 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1583 struct nand_ecc_ctrl *ecc = &chip->ecc;
1584 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1585 bool wait = false;
1586 int ret = 0;
1587
1588 pre_command(host, command);
1589
1590 switch (command) {
1591 case NAND_CMD_RESET:
1592 ret = reset(host);
1593 wait = true;
1594 break;
1595
1596 case NAND_CMD_READID:
1597 nandc->buf_count = 4;
1598 ret = read_id(host, column);
1599 wait = true;
1600 break;
1601
1602 case NAND_CMD_PARAM:
1603 ret = nandc_param(host);
1604 wait = true;
1605 break;
1606
1607 case NAND_CMD_ERASE1:
1608 ret = erase_block(host, page_addr);
1609 wait = true;
1610 break;
1611
1612 case NAND_CMD_READ0:
1613
1614 WARN_ON(column != 0);
1615
1616 host->use_ecc = true;
1617 set_address(host, 0, page_addr);
1618 update_rw_regs(host, ecc->steps, true, 0);
1619 break;
1620
1621 case NAND_CMD_SEQIN:
1622 WARN_ON(column != 0);
1623 set_address(host, 0, page_addr);
1624 break;
1625
1626 case NAND_CMD_PAGEPROG:
1627 case NAND_CMD_STATUS:
1628 case NAND_CMD_NONE:
1629 default:
1630 break;
1631 }
1632
1633 if (ret) {
1634 dev_err(nandc->dev, "failure executing command %d\n",
1635 command);
1636 free_descs(nandc);
1637 return;
1638 }
1639
1640 if (wait) {
1641 ret = submit_descs(nandc);
1642 if (ret)
1643 dev_err(nandc->dev,
1644 "failure submitting descs for command %d\n",
1645 command);
1646 }
1647
1648 free_descs(nandc);
1649
1650 post_command(host, command);
1651 }
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664 static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1665 {
1666 u8 empty1, empty2;
1667
1668
1669
1670
1671
1672
1673
1674 empty1 = data_buf[3];
1675 empty2 = data_buf[175];
1676
1677
1678
1679
1680
1681 if ((empty1 == 0x54 && empty2 == 0xff) ||
1682 (empty1 == 0xff && empty2 == 0x54)) {
1683 data_buf[3] = 0xff;
1684 data_buf[175] = 0xff;
1685 }
1686
1687
1688
1689
1690
1691 if (memchr_inv(data_buf, 0xff, data_len)) {
1692 data_buf[3] = empty1;
1693 data_buf[175] = empty2;
1694
1695 return false;
1696 }
1697
1698 return true;
1699 }
1700
1701 struct read_stats {
1702 __le32 flash;
1703 __le32 buffer;
1704 __le32 erased_cw;
1705 };
1706
1707
1708 static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
1709 {
1710 struct nand_chip *chip = &host->chip;
1711 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1712 int i;
1713
1714 nandc_read_buffer_sync(nandc, true);
1715
1716 for (i = 0; i < cw_cnt; i++) {
1717 u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
1718
1719 if (flash & (FS_OP_ERR | FS_MPU_ERR))
1720 return -EIO;
1721 }
1722
1723 return 0;
1724 }
1725
1726
1727 static int
1728 qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
1729 u8 *data_buf, u8 *oob_buf, int page, int cw)
1730 {
1731 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1732 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1733 struct nand_ecc_ctrl *ecc = &chip->ecc;
1734 int data_size1, data_size2, oob_size1, oob_size2;
1735 int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1736 int raw_cw = cw;
1737
1738 nand_read_page_op(chip, page, 0, NULL, 0);
1739 host->use_ecc = false;
1740
1741 if (nandc->props->qpic_v2)
1742 raw_cw = ecc->steps - 1;
1743
1744 clear_bam_transaction(nandc);
1745 set_address(host, host->cw_size * cw, page);
1746 update_rw_regs(host, 1, true, raw_cw);
1747 config_nand_page_read(chip);
1748
1749 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1750 oob_size1 = host->bbm_size;
1751
1752 if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
1753 data_size2 = ecc->size - data_size1 -
1754 ((ecc->steps - 1) * 4);
1755 oob_size2 = (ecc->steps * 4) + host->ecc_bytes_hw +
1756 host->spare_bytes;
1757 } else {
1758 data_size2 = host->cw_data - data_size1;
1759 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1760 }
1761
1762 if (nandc->props->is_bam) {
1763 nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
1764 read_loc += data_size1;
1765
1766 nandc_set_read_loc(chip, cw, 1, read_loc, oob_size1, 0);
1767 read_loc += oob_size1;
1768
1769 nandc_set_read_loc(chip, cw, 2, read_loc, data_size2, 0);
1770 read_loc += data_size2;
1771
1772 nandc_set_read_loc(chip, cw, 3, read_loc, oob_size2, 1);
1773 }
1774
1775 config_nand_cw_read(chip, false, raw_cw);
1776
1777 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
1778 reg_off += data_size1;
1779
1780 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
1781 reg_off += oob_size1;
1782
1783 read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
1784 reg_off += data_size2;
1785
1786 read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1787
1788 ret = submit_descs(nandc);
1789 free_descs(nandc);
1790 if (ret) {
1791 dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
1792 return ret;
1793 }
1794
1795 return check_flash_errors(host, 1);
1796 }
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813 static int
1814 check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
1815 u8 *oob_buf, unsigned long uncorrectable_cws,
1816 int page, unsigned int max_bitflips)
1817 {
1818 struct nand_chip *chip = &host->chip;
1819 struct mtd_info *mtd = nand_to_mtd(chip);
1820 struct nand_ecc_ctrl *ecc = &chip->ecc;
1821 u8 *cw_data_buf, *cw_oob_buf;
1822 int cw, data_size, oob_size, ret = 0;
1823
1824 if (!data_buf)
1825 data_buf = nand_get_data_buf(chip);
1826
1827 if (!oob_buf) {
1828 nand_get_data_buf(chip);
1829 oob_buf = chip->oob_poi;
1830 }
1831
1832 for_each_set_bit(cw, &uncorrectable_cws, ecc->steps) {
1833 if (qcom_nandc_is_last_cw(ecc, cw) && !host->codeword_fixup) {
1834 data_size = ecc->size - ((ecc->steps - 1) * 4);
1835 oob_size = (ecc->steps * 4) + host->ecc_bytes_hw;
1836 } else {
1837 data_size = host->cw_data;
1838 oob_size = host->ecc_bytes_hw;
1839 }
1840
1841
1842 cw_data_buf = data_buf + (cw * host->cw_data);
1843 cw_oob_buf = oob_buf + (cw * ecc->bytes);
1844
1845 ret = qcom_nandc_read_cw_raw(mtd, chip, cw_data_buf,
1846 cw_oob_buf, page, cw);
1847 if (ret)
1848 return ret;
1849
1850
1851
1852
1853
1854 ret = nand_check_erased_ecc_chunk(cw_data_buf, data_size,
1855 cw_oob_buf + host->bbm_size,
1856 oob_size, NULL,
1857 0, ecc->strength);
1858 if (ret < 0) {
1859 mtd->ecc_stats.failed++;
1860 } else {
1861 mtd->ecc_stats.corrected += ret;
1862 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1863 }
1864 }
1865
1866 return max_bitflips;
1867 }
1868
1869
1870
1871
1872
1873 static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1874 u8 *oob_buf, int page)
1875 {
1876 struct nand_chip *chip = &host->chip;
1877 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1878 struct mtd_info *mtd = nand_to_mtd(chip);
1879 struct nand_ecc_ctrl *ecc = &chip->ecc;
1880 unsigned int max_bitflips = 0, uncorrectable_cws = 0;
1881 struct read_stats *buf;
1882 bool flash_op_err = false, erased;
1883 int i;
1884 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1885
1886 buf = (struct read_stats *)nandc->reg_read_buf;
1887 nandc_read_buffer_sync(nandc, true);
1888
1889 for (i = 0; i < ecc->steps; i++, buf++) {
1890 u32 flash, buffer, erased_cw;
1891 int data_len, oob_len;
1892
1893 if (qcom_nandc_is_last_cw(ecc, i)) {
1894 data_len = ecc->size - ((ecc->steps - 1) << 2);
1895 oob_len = ecc->steps << 2;
1896 } else {
1897 data_len = host->cw_data;
1898 oob_len = 0;
1899 }
1900
1901 flash = le32_to_cpu(buf->flash);
1902 buffer = le32_to_cpu(buf->buffer);
1903 erased_cw = le32_to_cpu(buf->erased_cw);
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913 if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1914
1915
1916
1917
1918 if (host->bch_enabled) {
1919 erased = (erased_cw & ERASED_CW) == ERASED_CW;
1920
1921
1922
1923
1924
1925
1926 } else if (data_buf) {
1927 erased = erased_chunk_check_and_fixup(data_buf,
1928 data_len);
1929 } else {
1930 erased = false;
1931 }
1932
1933 if (!erased)
1934 uncorrectable_cws |= BIT(i);
1935
1936
1937
1938
1939
1940
1941 } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1942 flash_op_err = true;
1943
1944
1945
1946
1947 } else {
1948 unsigned int stat;
1949
1950 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1951 mtd->ecc_stats.corrected += stat;
1952 max_bitflips = max(max_bitflips, stat);
1953 }
1954
1955 if (data_buf)
1956 data_buf += data_len;
1957 if (oob_buf)
1958 oob_buf += oob_len + ecc->bytes;
1959 }
1960
1961 if (flash_op_err)
1962 return -EIO;
1963
1964 if (!uncorrectable_cws)
1965 return max_bitflips;
1966
1967 return check_for_erased_page(host, data_buf_start, oob_buf_start,
1968 uncorrectable_cws, page,
1969 max_bitflips);
1970 }
1971
1972
1973
1974
1975
1976 static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1977 u8 *oob_buf, int page)
1978 {
1979 struct nand_chip *chip = &host->chip;
1980 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1981 struct nand_ecc_ctrl *ecc = &chip->ecc;
1982 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
1983 int i, ret;
1984
1985 config_nand_page_read(chip);
1986
1987
1988 for (i = 0; i < ecc->steps; i++) {
1989 int data_size, oob_size;
1990
1991 if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
1992 data_size = ecc->size - ((ecc->steps - 1) << 2);
1993 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1994 host->spare_bytes;
1995 } else {
1996 data_size = host->cw_data;
1997 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1998 }
1999
2000 if (nandc->props->is_bam) {
2001 if (data_buf && oob_buf) {
2002 nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
2003 nandc_set_read_loc(chip, i, 1, data_size,
2004 oob_size, 1);
2005 } else if (data_buf) {
2006 nandc_set_read_loc(chip, i, 0, 0, data_size, 1);
2007 } else {
2008 nandc_set_read_loc(chip, i, 0, data_size,
2009 oob_size, 1);
2010 }
2011 }
2012
2013 config_nand_cw_read(chip, true, i);
2014
2015 if (data_buf)
2016 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
2017 data_size, 0);
2018
2019
2020
2021
2022
2023
2024
2025
2026 if (oob_buf) {
2027 int j;
2028
2029 for (j = 0; j < host->bbm_size; j++)
2030 *oob_buf++ = 0xff;
2031
2032 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
2033 oob_buf, oob_size, 0);
2034 }
2035
2036 if (data_buf)
2037 data_buf += data_size;
2038 if (oob_buf)
2039 oob_buf += oob_size;
2040 }
2041
2042 ret = submit_descs(nandc);
2043 free_descs(nandc);
2044
2045 if (ret) {
2046 dev_err(nandc->dev, "failure to read page/oob\n");
2047 return ret;
2048 }
2049
2050 return parse_read_errors(host, data_buf_start, oob_buf_start, page);
2051 }
2052
2053
2054
2055
2056
2057 static int copy_last_cw(struct qcom_nand_host *host, int page)
2058 {
2059 struct nand_chip *chip = &host->chip;
2060 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2061 struct nand_ecc_ctrl *ecc = &chip->ecc;
2062 int size;
2063 int ret;
2064
2065 clear_read_regs(nandc);
2066
2067 size = host->use_ecc ? host->cw_data : host->cw_size;
2068
2069
2070 memset(nandc->data_buffer, 0xff, size);
2071
2072 set_address(host, host->cw_size * (ecc->steps - 1), page);
2073 update_rw_regs(host, 1, true, ecc->steps - 1);
2074
2075 config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
2076
2077 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
2078
2079 ret = submit_descs(nandc);
2080 if (ret)
2081 dev_err(nandc->dev, "failed to copy last codeword\n");
2082
2083 free_descs(nandc);
2084
2085 return ret;
2086 }
2087
2088 static bool qcom_nandc_is_boot_partition(struct qcom_nand_host *host, int page)
2089 {
2090 struct qcom_nand_boot_partition *boot_partition;
2091 u32 start, end;
2092 int i;
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103 boot_partition = &host->boot_partitions[host->nr_boot_partitions - 1];
2104 start = boot_partition->page_offset;
2105 end = start + boot_partition->page_size;
2106
2107
2108 if (page > end)
2109 return false;
2110
2111
2112 if (page < end && page >= start)
2113 return true;
2114
2115
2116 for (i = host->nr_boot_partitions - 2; i >= 0; i--) {
2117 boot_partition = &host->boot_partitions[i];
2118 start = boot_partition->page_offset;
2119 end = start + boot_partition->page_size;
2120
2121 if (page < end && page >= start)
2122 return true;
2123 }
2124
2125 return false;
2126 }
2127
2128 static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
2129 {
2130 bool codeword_fixup = qcom_nandc_is_boot_partition(host, page);
2131
2132
2133 if (codeword_fixup == host->codeword_fixup)
2134 return;
2135
2136 host->codeword_fixup = codeword_fixup;
2137
2138 host->cw_data = codeword_fixup ? 512 : 516;
2139 host->spare_bytes = host->cw_size - host->ecc_bytes_hw -
2140 host->bbm_size - host->cw_data;
2141
2142 host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
2143 host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
2144 host->cw_data << UD_SIZE_BYTES;
2145
2146 host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
2147 host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
2148 host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
2149 }
2150
2151
2152 static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
2153 int oob_required, int page)
2154 {
2155 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2156 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2157 u8 *data_buf, *oob_buf = NULL;
2158
2159 if (host->nr_boot_partitions)
2160 qcom_nandc_codeword_fixup(host, page);
2161
2162 nand_read_page_op(chip, page, 0, NULL, 0);
2163 data_buf = buf;
2164 oob_buf = oob_required ? chip->oob_poi : NULL;
2165
2166 clear_bam_transaction(nandc);
2167
2168 return read_page_ecc(host, data_buf, oob_buf, page);
2169 }
2170
2171
2172 static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
2173 int oob_required, int page)
2174 {
2175 struct mtd_info *mtd = nand_to_mtd(chip);
2176 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2177 struct nand_ecc_ctrl *ecc = &chip->ecc;
2178 int cw, ret;
2179 u8 *data_buf = buf, *oob_buf = chip->oob_poi;
2180
2181 if (host->nr_boot_partitions)
2182 qcom_nandc_codeword_fixup(host, page);
2183
2184 for (cw = 0; cw < ecc->steps; cw++) {
2185 ret = qcom_nandc_read_cw_raw(mtd, chip, data_buf, oob_buf,
2186 page, cw);
2187 if (ret)
2188 return ret;
2189
2190 data_buf += host->cw_data;
2191 oob_buf += ecc->bytes;
2192 }
2193
2194 return 0;
2195 }
2196
2197
2198 static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
2199 {
2200 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2201 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2202 struct nand_ecc_ctrl *ecc = &chip->ecc;
2203
2204 if (host->nr_boot_partitions)
2205 qcom_nandc_codeword_fixup(host, page);
2206
2207 clear_read_regs(nandc);
2208 clear_bam_transaction(nandc);
2209
2210 host->use_ecc = true;
2211 set_address(host, 0, page);
2212 update_rw_regs(host, ecc->steps, true, 0);
2213
2214 return read_page_ecc(host, NULL, chip->oob_poi, page);
2215 }
2216
2217
2218 static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
2219 int oob_required, int page)
2220 {
2221 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2222 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2223 struct nand_ecc_ctrl *ecc = &chip->ecc;
2224 u8 *data_buf, *oob_buf;
2225 int i, ret;
2226
2227 if (host->nr_boot_partitions)
2228 qcom_nandc_codeword_fixup(host, page);
2229
2230 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2231
2232 clear_read_regs(nandc);
2233 clear_bam_transaction(nandc);
2234
2235 data_buf = (u8 *)buf;
2236 oob_buf = chip->oob_poi;
2237
2238 host->use_ecc = true;
2239 update_rw_regs(host, ecc->steps, false, 0);
2240 config_nand_page_write(chip);
2241
2242 for (i = 0; i < ecc->steps; i++) {
2243 int data_size, oob_size;
2244
2245 if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
2246 data_size = ecc->size - ((ecc->steps - 1) << 2);
2247 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
2248 host->spare_bytes;
2249 } else {
2250 data_size = host->cw_data;
2251 oob_size = ecc->bytes;
2252 }
2253
2254
2255 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
2256 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
2257
2258
2259
2260
2261
2262
2263
2264
2265 if (qcom_nandc_is_last_cw(ecc, i)) {
2266 oob_buf += host->bbm_size;
2267
2268 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
2269 oob_buf, oob_size, 0);
2270 }
2271
2272 config_nand_cw_write(chip);
2273
2274 data_buf += data_size;
2275 oob_buf += oob_size;
2276 }
2277
2278 ret = submit_descs(nandc);
2279 if (ret)
2280 dev_err(nandc->dev, "failure to write page\n");
2281
2282 free_descs(nandc);
2283
2284 if (!ret)
2285 ret = nand_prog_page_end_op(chip);
2286
2287 return ret;
2288 }
2289
2290
2291 static int qcom_nandc_write_page_raw(struct nand_chip *chip,
2292 const uint8_t *buf, int oob_required,
2293 int page)
2294 {
2295 struct mtd_info *mtd = nand_to_mtd(chip);
2296 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2297 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2298 struct nand_ecc_ctrl *ecc = &chip->ecc;
2299 u8 *data_buf, *oob_buf;
2300 int i, ret;
2301
2302 if (host->nr_boot_partitions)
2303 qcom_nandc_codeword_fixup(host, page);
2304
2305 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
2306 clear_read_regs(nandc);
2307 clear_bam_transaction(nandc);
2308
2309 data_buf = (u8 *)buf;
2310 oob_buf = chip->oob_poi;
2311
2312 host->use_ecc = false;
2313 update_rw_regs(host, ecc->steps, false, 0);
2314 config_nand_page_write(chip);
2315
2316 for (i = 0; i < ecc->steps; i++) {
2317 int data_size1, data_size2, oob_size1, oob_size2;
2318 int reg_off = FLASH_BUF_ACC;
2319
2320 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
2321 oob_size1 = host->bbm_size;
2322
2323 if (qcom_nandc_is_last_cw(ecc, i) && !host->codeword_fixup) {
2324 data_size2 = ecc->size - data_size1 -
2325 ((ecc->steps - 1) << 2);
2326 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
2327 host->spare_bytes;
2328 } else {
2329 data_size2 = host->cw_data - data_size1;
2330 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
2331 }
2332
2333 write_data_dma(nandc, reg_off, data_buf, data_size1,
2334 NAND_BAM_NO_EOT);
2335 reg_off += data_size1;
2336 data_buf += data_size1;
2337
2338 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
2339 NAND_BAM_NO_EOT);
2340 reg_off += oob_size1;
2341 oob_buf += oob_size1;
2342
2343 write_data_dma(nandc, reg_off, data_buf, data_size2,
2344 NAND_BAM_NO_EOT);
2345 reg_off += data_size2;
2346 data_buf += data_size2;
2347
2348 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
2349 oob_buf += oob_size2;
2350
2351 config_nand_cw_write(chip);
2352 }
2353
2354 ret = submit_descs(nandc);
2355 if (ret)
2356 dev_err(nandc->dev, "failure to write raw page\n");
2357
2358 free_descs(nandc);
2359
2360 if (!ret)
2361 ret = nand_prog_page_end_op(chip);
2362
2363 return ret;
2364 }
2365
2366
2367
2368
2369
2370
2371
2372
2373 static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
2374 {
2375 struct mtd_info *mtd = nand_to_mtd(chip);
2376 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2377 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2378 struct nand_ecc_ctrl *ecc = &chip->ecc;
2379 u8 *oob = chip->oob_poi;
2380 int data_size, oob_size;
2381 int ret;
2382
2383 if (host->nr_boot_partitions)
2384 qcom_nandc_codeword_fixup(host, page);
2385
2386 host->use_ecc = true;
2387 clear_bam_transaction(nandc);
2388
2389
2390 data_size = ecc->size - ((ecc->steps - 1) << 2);
2391 oob_size = mtd->oobavail;
2392
2393 memset(nandc->data_buffer, 0xff, host->cw_data);
2394
2395 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2396 0, mtd->oobavail);
2397
2398 set_address(host, host->cw_size * (ecc->steps - 1), page);
2399 update_rw_regs(host, 1, false, 0);
2400
2401 config_nand_page_write(chip);
2402 write_data_dma(nandc, FLASH_BUF_ACC,
2403 nandc->data_buffer, data_size + oob_size, 0);
2404 config_nand_cw_write(chip);
2405
2406 ret = submit_descs(nandc);
2407
2408 free_descs(nandc);
2409
2410 if (ret) {
2411 dev_err(nandc->dev, "failure to write oob\n");
2412 return -EIO;
2413 }
2414
2415 return nand_prog_page_end_op(chip);
2416 }
2417
2418 static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
2419 {
2420 struct mtd_info *mtd = nand_to_mtd(chip);
2421 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2422 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2423 struct nand_ecc_ctrl *ecc = &chip->ecc;
2424 int page, ret, bbpos, bad = 0;
2425
2426 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2427
2428
2429
2430
2431
2432
2433
2434 host->use_ecc = false;
2435
2436 clear_bam_transaction(nandc);
2437 ret = copy_last_cw(host, page);
2438 if (ret)
2439 goto err;
2440
2441 if (check_flash_errors(host, 1)) {
2442 dev_warn(nandc->dev, "error when trying to read BBM\n");
2443 goto err;
2444 }
2445
2446 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2447
2448 bad = nandc->data_buffer[bbpos] != 0xff;
2449
2450 if (chip->options & NAND_BUSWIDTH_16)
2451 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2452 err:
2453 return bad;
2454 }
2455
2456 static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
2457 {
2458 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2459 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2460 struct nand_ecc_ctrl *ecc = &chip->ecc;
2461 int page, ret;
2462
2463 clear_read_regs(nandc);
2464 clear_bam_transaction(nandc);
2465
2466
2467
2468
2469
2470
2471 memset(nandc->data_buffer, 0x00, host->cw_size);
2472
2473 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2474
2475
2476 host->use_ecc = false;
2477 set_address(host, host->cw_size * (ecc->steps - 1), page);
2478 update_rw_regs(host, 1, false, ecc->steps - 1);
2479
2480 config_nand_page_write(chip);
2481 write_data_dma(nandc, FLASH_BUF_ACC,
2482 nandc->data_buffer, host->cw_size, 0);
2483 config_nand_cw_write(chip);
2484
2485 ret = submit_descs(nandc);
2486
2487 free_descs(nandc);
2488
2489 if (ret) {
2490 dev_err(nandc->dev, "failure to update BBM\n");
2491 return -EIO;
2492 }
2493
2494 return nand_prog_page_end_op(chip);
2495 }
2496
2497
2498
2499
2500
2501
2502
2503 static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
2504 {
2505 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2506 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2507 u8 *buf = nandc->data_buffer;
2508 u8 ret = 0x0;
2509
2510 if (host->last_command == NAND_CMD_STATUS) {
2511 ret = host->status;
2512
2513 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2514
2515 return ret;
2516 }
2517
2518 if (nandc->buf_start < nandc->buf_count)
2519 ret = buf[nandc->buf_start++];
2520
2521 return ret;
2522 }
2523
2524 static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
2525 {
2526 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2527 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2528
2529 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2530 nandc->buf_start += real_len;
2531 }
2532
2533 static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
2534 int len)
2535 {
2536 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2537 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2538
2539 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2540
2541 nandc->buf_start += real_len;
2542 }
2543
2544
2545 static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
2546 {
2547 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2548
2549 if (chipnr <= 0)
2550 return;
2551
2552 dev_warn(nandc->dev, "invalid chip select\n");
2553 }
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640 static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2641 struct mtd_oob_region *oobregion)
2642 {
2643 struct nand_chip *chip = mtd_to_nand(mtd);
2644 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2645 struct nand_ecc_ctrl *ecc = &chip->ecc;
2646
2647 if (section > 1)
2648 return -ERANGE;
2649
2650 if (!section) {
2651 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2652 host->bbm_size;
2653 oobregion->offset = 0;
2654 } else {
2655 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2656 oobregion->offset = mtd->oobsize - oobregion->length;
2657 }
2658
2659 return 0;
2660 }
2661
2662 static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2663 struct mtd_oob_region *oobregion)
2664 {
2665 struct nand_chip *chip = mtd_to_nand(mtd);
2666 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2667 struct nand_ecc_ctrl *ecc = &chip->ecc;
2668
2669 if (section)
2670 return -ERANGE;
2671
2672 oobregion->length = ecc->steps * 4;
2673 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2674
2675 return 0;
2676 }
2677
2678 static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2679 .ecc = qcom_nand_ooblayout_ecc,
2680 .free = qcom_nand_ooblayout_free,
2681 };
2682
2683 static int
2684 qcom_nandc_calc_ecc_bytes(int step_size, int strength)
2685 {
2686 return strength == 4 ? 12 : 16;
2687 }
2688 NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
2689 NANDC_STEP_SIZE, 4, 8);
2690
2691 static int qcom_nand_attach_chip(struct nand_chip *chip)
2692 {
2693 struct mtd_info *mtd = nand_to_mtd(chip);
2694 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2695 struct nand_ecc_ctrl *ecc = &chip->ecc;
2696 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2697 int cwperpage, bad_block_byte, ret;
2698 bool wide_bus;
2699 int ecc_mode = 1;
2700
2701
2702 ecc->size = NANDC_STEP_SIZE;
2703 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2704 cwperpage = mtd->writesize / NANDC_STEP_SIZE;
2705
2706
2707
2708
2709
2710 ret = nand_ecc_choose_conf(chip, &qcom_nandc_ecc_caps,
2711 mtd->oobsize - (cwperpage * 4));
2712 if (ret) {
2713 dev_err(nandc->dev, "No valid ECC settings possible\n");
2714 return ret;
2715 }
2716
2717 if (ecc->strength >= 8) {
2718
2719 host->bch_enabled = true;
2720 ecc_mode = 1;
2721
2722 if (wide_bus) {
2723 host->ecc_bytes_hw = 14;
2724 host->spare_bytes = 0;
2725 host->bbm_size = 2;
2726 } else {
2727 host->ecc_bytes_hw = 13;
2728 host->spare_bytes = 2;
2729 host->bbm_size = 1;
2730 }
2731 } else {
2732
2733
2734
2735
2736
2737 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
2738
2739 host->bch_enabled = true;
2740 ecc_mode = 0;
2741
2742 if (wide_bus) {
2743 host->ecc_bytes_hw = 8;
2744 host->spare_bytes = 2;
2745 host->bbm_size = 2;
2746 } else {
2747 host->ecc_bytes_hw = 7;
2748 host->spare_bytes = 4;
2749 host->bbm_size = 1;
2750 }
2751 } else {
2752
2753 host->ecc_bytes_hw = 10;
2754
2755 if (wide_bus) {
2756 host->spare_bytes = 0;
2757 host->bbm_size = 2;
2758 } else {
2759 host->spare_bytes = 1;
2760 host->bbm_size = 1;
2761 }
2762 }
2763 }
2764
2765
2766
2767
2768
2769
2770
2771 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2772
2773 ecc->read_page = qcom_nandc_read_page;
2774 ecc->read_page_raw = qcom_nandc_read_page_raw;
2775 ecc->read_oob = qcom_nandc_read_oob;
2776 ecc->write_page = qcom_nandc_write_page;
2777 ecc->write_page_raw = qcom_nandc_write_page_raw;
2778 ecc->write_oob = qcom_nandc_write_oob;
2779
2780 ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2781
2782 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
2783
2784 if (nandc->props->is_bam)
2785 free_bam_transaction(nandc);
2786
2787 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2788 cwperpage);
2789
2790
2791 if (nandc->props->is_bam) {
2792 nandc->bam_txn = alloc_bam_transaction(nandc);
2793 if (!nandc->bam_txn) {
2794 dev_err(nandc->dev,
2795 "failed to allocate bam transaction\n");
2796 return -ENOMEM;
2797 }
2798 }
2799
2800
2801
2802
2803
2804
2805 host->cw_data = 516;
2806
2807
2808
2809
2810
2811 host->cw_size = host->cw_data + ecc->bytes;
2812 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2813
2814 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2815 | host->cw_data << UD_SIZE_BYTES
2816 | 0 << DISABLE_STATUS_AFTER_WRITE
2817 | 5 << NUM_ADDR_CYCLES
2818 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2819 | 0 << STATUS_BFR_READ
2820 | 1 << SET_RD_MODE_AFTER_STATUS
2821 | host->spare_bytes << SPARE_SIZE_BYTES;
2822
2823 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2824 | 0 << CS_ACTIVE_BSY
2825 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2826 | 0 << BAD_BLOCK_IN_SPARE_AREA
2827 | 2 << WR_RD_BSY_GAP
2828 | wide_bus << WIDE_FLASH
2829 | host->bch_enabled << ENABLE_BCH_ECC;
2830
2831 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2832 | host->cw_size << UD_SIZE_BYTES
2833 | 5 << NUM_ADDR_CYCLES
2834 | 0 << SPARE_SIZE_BYTES;
2835
2836 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2837 | 0 << CS_ACTIVE_BSY
2838 | 17 << BAD_BLOCK_BYTE_NUM
2839 | 1 << BAD_BLOCK_IN_SPARE_AREA
2840 | 2 << WR_RD_BSY_GAP
2841 | wide_bus << WIDE_FLASH
2842 | 1 << DEV0_CFG1_ECC_DISABLE;
2843
2844 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
2845 | 0 << ECC_SW_RESET
2846 | host->cw_data << ECC_NUM_DATA_BYTES
2847 | 1 << ECC_FORCE_CLK_OPEN
2848 | ecc_mode << ECC_MODE
2849 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2850
2851 if (!nandc->props->qpic_v2)
2852 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2853
2854 host->clrflashstatus = FS_READY_BSY_N;
2855 host->clrreadstatus = 0xc0;
2856 nandc->regs->erased_cw_detect_cfg_clr =
2857 cpu_to_le32(CLR_ERASED_PAGE_DET);
2858 nandc->regs->erased_cw_detect_cfg_set =
2859 cpu_to_le32(SET_ERASED_PAGE_DET);
2860
2861 dev_dbg(nandc->dev,
2862 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2863 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2864 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2865 cwperpage);
2866
2867 return 0;
2868 }
2869
2870 static const struct nand_controller_ops qcom_nandc_ops = {
2871 .attach_chip = qcom_nand_attach_chip,
2872 };
2873
2874 static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2875 {
2876 if (nandc->props->is_bam) {
2877 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2878 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2879 MAX_REG_RD *
2880 sizeof(*nandc->reg_read_buf),
2881 DMA_FROM_DEVICE);
2882
2883 if (nandc->tx_chan)
2884 dma_release_channel(nandc->tx_chan);
2885
2886 if (nandc->rx_chan)
2887 dma_release_channel(nandc->rx_chan);
2888
2889 if (nandc->cmd_chan)
2890 dma_release_channel(nandc->cmd_chan);
2891 } else {
2892 if (nandc->chan)
2893 dma_release_channel(nandc->chan);
2894 }
2895 }
2896
2897 static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2898 {
2899 int ret;
2900
2901 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2902 if (ret) {
2903 dev_err(nandc->dev, "failed to set DMA mask\n");
2904 return ret;
2905 }
2906
2907
2908
2909
2910
2911
2912
2913 nandc->buf_size = 532;
2914
2915 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2916 GFP_KERNEL);
2917 if (!nandc->data_buffer)
2918 return -ENOMEM;
2919
2920 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2921 GFP_KERNEL);
2922 if (!nandc->regs)
2923 return -ENOMEM;
2924
2925 nandc->reg_read_buf = devm_kcalloc(nandc->dev,
2926 MAX_REG_RD, sizeof(*nandc->reg_read_buf),
2927 GFP_KERNEL);
2928 if (!nandc->reg_read_buf)
2929 return -ENOMEM;
2930
2931 if (nandc->props->is_bam) {
2932 nandc->reg_read_dma =
2933 dma_map_single(nandc->dev, nandc->reg_read_buf,
2934 MAX_REG_RD *
2935 sizeof(*nandc->reg_read_buf),
2936 DMA_FROM_DEVICE);
2937 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2938 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2939 return -EIO;
2940 }
2941
2942 nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
2943 if (IS_ERR(nandc->tx_chan)) {
2944 ret = PTR_ERR(nandc->tx_chan);
2945 nandc->tx_chan = NULL;
2946 dev_err_probe(nandc->dev, ret,
2947 "tx DMA channel request failed\n");
2948 goto unalloc;
2949 }
2950
2951 nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
2952 if (IS_ERR(nandc->rx_chan)) {
2953 ret = PTR_ERR(nandc->rx_chan);
2954 nandc->rx_chan = NULL;
2955 dev_err_probe(nandc->dev, ret,
2956 "rx DMA channel request failed\n");
2957 goto unalloc;
2958 }
2959
2960 nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
2961 if (IS_ERR(nandc->cmd_chan)) {
2962 ret = PTR_ERR(nandc->cmd_chan);
2963 nandc->cmd_chan = NULL;
2964 dev_err_probe(nandc->dev, ret,
2965 "cmd DMA channel request failed\n");
2966 goto unalloc;
2967 }
2968
2969
2970
2971
2972
2973
2974
2975 nandc->max_cwperpage = 1;
2976 nandc->bam_txn = alloc_bam_transaction(nandc);
2977 if (!nandc->bam_txn) {
2978 dev_err(nandc->dev,
2979 "failed to allocate bam transaction\n");
2980 ret = -ENOMEM;
2981 goto unalloc;
2982 }
2983 } else {
2984 nandc->chan = dma_request_chan(nandc->dev, "rxtx");
2985 if (IS_ERR(nandc->chan)) {
2986 ret = PTR_ERR(nandc->chan);
2987 nandc->chan = NULL;
2988 dev_err_probe(nandc->dev, ret,
2989 "rxtx DMA channel request failed\n");
2990 return ret;
2991 }
2992 }
2993
2994 INIT_LIST_HEAD(&nandc->desc_list);
2995 INIT_LIST_HEAD(&nandc->host_list);
2996
2997 nand_controller_init(&nandc->controller);
2998 nandc->controller.ops = &qcom_nandc_ops;
2999
3000 return 0;
3001 unalloc:
3002 qcom_nandc_unalloc(nandc);
3003 return ret;
3004 }
3005
3006
3007 static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
3008 {
3009 u32 nand_ctrl;
3010
3011
3012 if (!nandc->props->is_qpic)
3013 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
3014
3015 if (!nandc->props->qpic_v2)
3016 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
3017 NAND_DEV_CMD_VLD_VAL);
3018
3019
3020 if (nandc->props->is_bam) {
3021 nand_ctrl = nandc_read(nandc, NAND_CTRL);
3022
3023
3024
3025
3026
3027
3028
3029
3030 if (!(nand_ctrl & BAM_MODE_EN))
3031 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
3032 } else {
3033 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
3034 }
3035
3036
3037 if (!nandc->props->qpic_v2) {
3038 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
3039 nandc->vld = NAND_DEV_CMD_VLD_VAL;
3040 }
3041
3042 return 0;
3043 }
3044
3045 static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL };
3046
3047 static int qcom_nand_host_parse_boot_partitions(struct qcom_nand_controller *nandc,
3048 struct qcom_nand_host *host,
3049 struct device_node *dn)
3050 {
3051 struct nand_chip *chip = &host->chip;
3052 struct mtd_info *mtd = nand_to_mtd(chip);
3053 struct qcom_nand_boot_partition *boot_partition;
3054 struct device *dev = nandc->dev;
3055 int partitions_count, i, j, ret;
3056
3057 if (!of_find_property(dn, "qcom,boot-partitions", NULL))
3058 return 0;
3059
3060 partitions_count = of_property_count_u32_elems(dn, "qcom,boot-partitions");
3061 if (partitions_count <= 0) {
3062 dev_err(dev, "Error parsing boot partition\n");
3063 return partitions_count ? partitions_count : -EINVAL;
3064 }
3065
3066 host->nr_boot_partitions = partitions_count / 2;
3067 host->boot_partitions = devm_kcalloc(dev, host->nr_boot_partitions,
3068 sizeof(*host->boot_partitions), GFP_KERNEL);
3069 if (!host->boot_partitions) {
3070 host->nr_boot_partitions = 0;
3071 return -ENOMEM;
3072 }
3073
3074 for (i = 0, j = 0; i < host->nr_boot_partitions; i++, j += 2) {
3075 boot_partition = &host->boot_partitions[i];
3076
3077 ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j,
3078 &boot_partition->page_offset);
3079 if (ret) {
3080 dev_err(dev, "Error parsing boot partition offset at index %d\n", i);
3081 host->nr_boot_partitions = 0;
3082 return ret;
3083 }
3084
3085 if (boot_partition->page_offset % mtd->writesize) {
3086 dev_err(dev, "Boot partition offset not multiple of writesize at index %i\n",
3087 i);
3088 host->nr_boot_partitions = 0;
3089 return -EINVAL;
3090 }
3091
3092 boot_partition->page_offset /= mtd->writesize;
3093
3094 ret = of_property_read_u32_index(dn, "qcom,boot-partitions", j + 1,
3095 &boot_partition->page_size);
3096 if (ret) {
3097 dev_err(dev, "Error parsing boot partition size at index %d\n", i);
3098 host->nr_boot_partitions = 0;
3099 return ret;
3100 }
3101
3102 if (boot_partition->page_size % mtd->writesize) {
3103 dev_err(dev, "Boot partition size not multiple of writesize at index %i\n",
3104 i);
3105 host->nr_boot_partitions = 0;
3106 return -EINVAL;
3107 }
3108
3109 boot_partition->page_size /= mtd->writesize;
3110 }
3111
3112 return 0;
3113 }
3114
3115 static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
3116 struct qcom_nand_host *host,
3117 struct device_node *dn)
3118 {
3119 struct nand_chip *chip = &host->chip;
3120 struct mtd_info *mtd = nand_to_mtd(chip);
3121 struct device *dev = nandc->dev;
3122 int ret;
3123
3124 ret = of_property_read_u32(dn, "reg", &host->cs);
3125 if (ret) {
3126 dev_err(dev, "can't get chip-select\n");
3127 return -ENXIO;
3128 }
3129
3130 nand_set_flash_node(chip, dn);
3131 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
3132 if (!mtd->name)
3133 return -ENOMEM;
3134
3135 mtd->owner = THIS_MODULE;
3136 mtd->dev.parent = dev;
3137
3138 chip->legacy.cmdfunc = qcom_nandc_command;
3139 chip->legacy.select_chip = qcom_nandc_select_chip;
3140 chip->legacy.read_byte = qcom_nandc_read_byte;
3141 chip->legacy.read_buf = qcom_nandc_read_buf;
3142 chip->legacy.write_buf = qcom_nandc_write_buf;
3143 chip->legacy.set_features = nand_get_set_features_notsupp;
3144 chip->legacy.get_features = nand_get_set_features_notsupp;
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154 chip->legacy.block_bad = qcom_nandc_block_bad;
3155 chip->legacy.block_markbad = qcom_nandc_block_markbad;
3156
3157 chip->controller = &nandc->controller;
3158 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
3159 NAND_SKIP_BBTSCAN;
3160
3161
3162 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
3163
3164 ret = nand_scan(chip, 1);
3165 if (ret)
3166 return ret;
3167
3168 ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
3169 if (ret)
3170 nand_cleanup(chip);
3171
3172 if (nandc->props->use_codeword_fixup) {
3173 ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn);
3174 if (ret) {
3175 nand_cleanup(chip);
3176 return ret;
3177 }
3178 }
3179
3180 return ret;
3181 }
3182
3183 static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
3184 {
3185 struct device *dev = nandc->dev;
3186 struct device_node *dn = dev->of_node, *child;
3187 struct qcom_nand_host *host;
3188 int ret = -ENODEV;
3189
3190 for_each_available_child_of_node(dn, child) {
3191 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
3192 if (!host) {
3193 of_node_put(child);
3194 return -ENOMEM;
3195 }
3196
3197 ret = qcom_nand_host_init_and_register(nandc, host, child);
3198 if (ret) {
3199 devm_kfree(dev, host);
3200 continue;
3201 }
3202
3203 list_add_tail(&host->node, &nandc->host_list);
3204 }
3205
3206 return ret;
3207 }
3208
3209
3210 static int qcom_nandc_parse_dt(struct platform_device *pdev)
3211 {
3212 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3213 struct device_node *np = nandc->dev->of_node;
3214 int ret;
3215
3216 if (!nandc->props->is_bam) {
3217 ret = of_property_read_u32(np, "qcom,cmd-crci",
3218 &nandc->cmd_crci);
3219 if (ret) {
3220 dev_err(nandc->dev, "command CRCI unspecified\n");
3221 return ret;
3222 }
3223
3224 ret = of_property_read_u32(np, "qcom,data-crci",
3225 &nandc->data_crci);
3226 if (ret) {
3227 dev_err(nandc->dev, "data CRCI unspecified\n");
3228 return ret;
3229 }
3230 }
3231
3232 return 0;
3233 }
3234
3235 static int qcom_nandc_probe(struct platform_device *pdev)
3236 {
3237 struct qcom_nand_controller *nandc;
3238 const void *dev_data;
3239 struct device *dev = &pdev->dev;
3240 struct resource *res;
3241 int ret;
3242
3243 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
3244 if (!nandc)
3245 return -ENOMEM;
3246
3247 platform_set_drvdata(pdev, nandc);
3248 nandc->dev = dev;
3249
3250 dev_data = of_device_get_match_data(dev);
3251 if (!dev_data) {
3252 dev_err(&pdev->dev, "failed to get device data\n");
3253 return -ENODEV;
3254 }
3255
3256 nandc->props = dev_data;
3257
3258 nandc->core_clk = devm_clk_get(dev, "core");
3259 if (IS_ERR(nandc->core_clk))
3260 return PTR_ERR(nandc->core_clk);
3261
3262 nandc->aon_clk = devm_clk_get(dev, "aon");
3263 if (IS_ERR(nandc->aon_clk))
3264 return PTR_ERR(nandc->aon_clk);
3265
3266 ret = qcom_nandc_parse_dt(pdev);
3267 if (ret)
3268 return ret;
3269
3270 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3271 nandc->base = devm_ioremap_resource(dev, res);
3272 if (IS_ERR(nandc->base))
3273 return PTR_ERR(nandc->base);
3274
3275 nandc->base_phys = res->start;
3276 nandc->base_dma = dma_map_resource(dev, res->start,
3277 resource_size(res),
3278 DMA_BIDIRECTIONAL, 0);
3279 if (dma_mapping_error(dev, nandc->base_dma))
3280 return -ENXIO;
3281
3282 ret = clk_prepare_enable(nandc->core_clk);
3283 if (ret)
3284 goto err_core_clk;
3285
3286 ret = clk_prepare_enable(nandc->aon_clk);
3287 if (ret)
3288 goto err_aon_clk;
3289
3290 ret = qcom_nandc_alloc(nandc);
3291 if (ret)
3292 goto err_nandc_alloc;
3293
3294 ret = qcom_nandc_setup(nandc);
3295 if (ret)
3296 goto err_setup;
3297
3298 ret = qcom_probe_nand_devices(nandc);
3299 if (ret)
3300 goto err_setup;
3301
3302 return 0;
3303
3304 err_setup:
3305 qcom_nandc_unalloc(nandc);
3306 err_nandc_alloc:
3307 clk_disable_unprepare(nandc->aon_clk);
3308 err_aon_clk:
3309 clk_disable_unprepare(nandc->core_clk);
3310 err_core_clk:
3311 dma_unmap_resource(dev, res->start, resource_size(res),
3312 DMA_BIDIRECTIONAL, 0);
3313 return ret;
3314 }
3315
3316 static int qcom_nandc_remove(struct platform_device *pdev)
3317 {
3318 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
3319 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3320 struct qcom_nand_host *host;
3321 struct nand_chip *chip;
3322 int ret;
3323
3324 list_for_each_entry(host, &nandc->host_list, node) {
3325 chip = &host->chip;
3326 ret = mtd_device_unregister(nand_to_mtd(chip));
3327 WARN_ON(ret);
3328 nand_cleanup(chip);
3329 }
3330
3331 qcom_nandc_unalloc(nandc);
3332
3333 clk_disable_unprepare(nandc->aon_clk);
3334 clk_disable_unprepare(nandc->core_clk);
3335
3336 dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
3337 DMA_BIDIRECTIONAL, 0);
3338
3339 return 0;
3340 }
3341
3342 static const struct qcom_nandc_props ipq806x_nandc_props = {
3343 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
3344 .is_bam = false,
3345 .use_codeword_fixup = true,
3346 .dev_cmd_reg_start = 0x0,
3347 };
3348
3349 static const struct qcom_nandc_props ipq4019_nandc_props = {
3350 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3351 .is_bam = true,
3352 .is_qpic = true,
3353 .dev_cmd_reg_start = 0x0,
3354 };
3355
3356 static const struct qcom_nandc_props ipq8074_nandc_props = {
3357 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3358 .is_bam = true,
3359 .is_qpic = true,
3360 .dev_cmd_reg_start = 0x7000,
3361 };
3362
3363 static const struct qcom_nandc_props sdx55_nandc_props = {
3364 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
3365 .is_bam = true,
3366 .is_qpic = true,
3367 .qpic_v2 = true,
3368 .dev_cmd_reg_start = 0x7000,
3369 };
3370
3371
3372
3373
3374
3375 static const struct of_device_id qcom_nandc_of_match[] = {
3376 {
3377 .compatible = "qcom,ipq806x-nand",
3378 .data = &ipq806x_nandc_props,
3379 },
3380 {
3381 .compatible = "qcom,ipq4019-nand",
3382 .data = &ipq4019_nandc_props,
3383 },
3384 {
3385 .compatible = "qcom,ipq6018-nand",
3386 .data = &ipq8074_nandc_props,
3387 },
3388 {
3389 .compatible = "qcom,ipq8074-nand",
3390 .data = &ipq8074_nandc_props,
3391 },
3392 {
3393 .compatible = "qcom,sdx55-nand",
3394 .data = &sdx55_nandc_props,
3395 },
3396 {}
3397 };
3398 MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
3399
3400 static struct platform_driver qcom_nandc_driver = {
3401 .driver = {
3402 .name = "qcom-nandc",
3403 .of_match_table = qcom_nandc_of_match,
3404 },
3405 .probe = qcom_nandc_probe,
3406 .remove = qcom_nandc_remove,
3407 };
3408 module_platform_driver(qcom_nandc_driver);
3409
3410 MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
3411 MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
3412 MODULE_LICENSE("GPL v2");