Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Intel PCH/PCU SPI flash driver.
0004  *
0005  * Copyright (C) 2016 - 2022, Intel Corporation
0006  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
0007  */
0008 
0009 #include <linux/iopoll.h>
0010 #include <linux/module.h>
0011 
0012 #include <linux/mtd/partitions.h>
0013 #include <linux/mtd/spi-nor.h>
0014 
0015 #include <linux/spi/flash.h>
0016 #include <linux/spi/spi.h>
0017 #include <linux/spi/spi-mem.h>
0018 
0019 #include "spi-intel.h"
0020 
0021 /* Offsets are from @ispi->base */
0022 #define BFPREG              0x00
0023 
0024 #define HSFSTS_CTL          0x04
0025 #define HSFSTS_CTL_FSMIE        BIT(31)
0026 #define HSFSTS_CTL_FDBC_SHIFT       24
0027 #define HSFSTS_CTL_FDBC_MASK        (0x3f << HSFSTS_CTL_FDBC_SHIFT)
0028 
0029 #define HSFSTS_CTL_FCYCLE_SHIFT     17
0030 #define HSFSTS_CTL_FCYCLE_MASK      (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
0031 /* HW sequencer opcodes */
0032 #define HSFSTS_CTL_FCYCLE_READ      (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
0033 #define HSFSTS_CTL_FCYCLE_WRITE     (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
0034 #define HSFSTS_CTL_FCYCLE_ERASE     (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
0035 #define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
0036 #define HSFSTS_CTL_FCYCLE_RDID      (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
0037 #define HSFSTS_CTL_FCYCLE_WRSR      (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
0038 #define HSFSTS_CTL_FCYCLE_RDSR      (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
0039 
0040 #define HSFSTS_CTL_FGO          BIT(16)
0041 #define HSFSTS_CTL_FLOCKDN      BIT(15)
0042 #define HSFSTS_CTL_FDV          BIT(14)
0043 #define HSFSTS_CTL_SCIP         BIT(5)
0044 #define HSFSTS_CTL_AEL          BIT(2)
0045 #define HSFSTS_CTL_FCERR        BIT(1)
0046 #define HSFSTS_CTL_FDONE        BIT(0)
0047 
0048 #define FADDR               0x08
0049 #define DLOCK               0x0c
0050 #define FDATA(n)            (0x10 + ((n) * 4))
0051 
0052 #define FRACC               0x50
0053 
0054 #define FREG(n)             (0x54 + ((n) * 4))
0055 #define FREG_BASE_MASK          0x3fff
0056 #define FREG_LIMIT_SHIFT        16
0057 #define FREG_LIMIT_MASK         (0x03fff << FREG_LIMIT_SHIFT)
0058 
0059 /* Offset is from @ispi->pregs */
0060 #define PR(n)               ((n) * 4)
0061 #define PR_WPE              BIT(31)
0062 #define PR_LIMIT_SHIFT          16
0063 #define PR_LIMIT_MASK           (0x3fff << PR_LIMIT_SHIFT)
0064 #define PR_RPE              BIT(15)
0065 #define PR_BASE_MASK            0x3fff
0066 
0067 /* Offsets are from @ispi->sregs */
0068 #define SSFSTS_CTL          0x00
0069 #define SSFSTS_CTL_FSMIE        BIT(23)
0070 #define SSFSTS_CTL_DS           BIT(22)
0071 #define SSFSTS_CTL_DBC_SHIFT        16
0072 #define SSFSTS_CTL_SPOP         BIT(11)
0073 #define SSFSTS_CTL_ACS          BIT(10)
0074 #define SSFSTS_CTL_SCGO         BIT(9)
0075 #define SSFSTS_CTL_COP_SHIFT        12
0076 #define SSFSTS_CTL_FRS          BIT(7)
0077 #define SSFSTS_CTL_DOFRS        BIT(6)
0078 #define SSFSTS_CTL_AEL          BIT(4)
0079 #define SSFSTS_CTL_FCERR        BIT(3)
0080 #define SSFSTS_CTL_FDONE        BIT(2)
0081 #define SSFSTS_CTL_SCIP         BIT(0)
0082 
0083 #define PREOP_OPTYPE            0x04
0084 #define OPMENU0             0x08
0085 #define OPMENU1             0x0c
0086 
0087 #define OPTYPE_READ_NO_ADDR     0
0088 #define OPTYPE_WRITE_NO_ADDR        1
0089 #define OPTYPE_READ_WITH_ADDR       2
0090 #define OPTYPE_WRITE_WITH_ADDR      3
0091 
0092 /* CPU specifics */
0093 #define BYT_PR              0x74
0094 #define BYT_SSFSTS_CTL          0x90
0095 #define BYT_FREG_NUM            5
0096 #define BYT_PR_NUM          5
0097 
0098 #define LPT_PR              0x74
0099 #define LPT_SSFSTS_CTL          0x90
0100 #define LPT_FREG_NUM            5
0101 #define LPT_PR_NUM          5
0102 
0103 #define BXT_PR              0x84
0104 #define BXT_SSFSTS_CTL          0xa0
0105 #define BXT_FREG_NUM            12
0106 #define BXT_PR_NUM          6
0107 
0108 #define CNL_PR              0x84
0109 #define CNL_FREG_NUM            6
0110 #define CNL_PR_NUM          5
0111 
0112 #define LVSCC               0xc4
0113 #define UVSCC               0xc8
0114 #define ERASE_OPCODE_SHIFT      8
0115 #define ERASE_OPCODE_MASK       (0xff << ERASE_OPCODE_SHIFT)
0116 #define ERASE_64K_OPCODE_SHIFT      16
0117 #define ERASE_64K_OPCODE_MASK       (0xff << ERASE_OPCODE_SHIFT)
0118 
0119 #define INTEL_SPI_TIMEOUT       5000 /* ms */
0120 #define INTEL_SPI_FIFO_SZ       64
0121 
0122 /**
0123  * struct intel_spi - Driver private data
0124  * @dev: Device pointer
0125  * @info: Pointer to board specific info
0126  * @base: Beginning of MMIO space
0127  * @pregs: Start of protection registers
0128  * @sregs: Start of software sequencer registers
0129  * @master: Pointer to the SPI controller structure
0130  * @nregions: Maximum number of regions
0131  * @pr_num: Maximum number of protected range registers
0132  * @locked: Is SPI setting locked
0133  * @swseq_reg: Use SW sequencer in register reads/writes
0134  * @swseq_erase: Use SW sequencer in erase operation
0135  * @atomic_preopcode: Holds preopcode when atomic sequence is requested
0136  * @opcodes: Opcodes which are supported. This are programmed by BIOS
0137  *           before it locks down the controller.
0138  * @mem_ops: Pointer to SPI MEM ops supported by the controller
0139  */
0140 struct intel_spi {
0141     struct device *dev;
0142     const struct intel_spi_boardinfo *info;
0143     void __iomem *base;
0144     void __iomem *pregs;
0145     void __iomem *sregs;
0146     struct spi_controller *master;
0147     size_t nregions;
0148     size_t pr_num;
0149     bool locked;
0150     bool swseq_reg;
0151     bool swseq_erase;
0152     u8 atomic_preopcode;
0153     u8 opcodes[8];
0154     const struct intel_spi_mem_op *mem_ops;
0155 };
0156 
0157 struct intel_spi_mem_op {
0158     struct spi_mem_op mem_op;
0159     u32 replacement_op;
0160     int (*exec_op)(struct intel_spi *ispi,
0161                const struct intel_spi_mem_op *iop,
0162                const struct spi_mem_op *op);
0163 };
0164 
0165 static bool writeable;
0166 module_param(writeable, bool, 0);
0167 MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)");
0168 
0169 static void intel_spi_dump_regs(struct intel_spi *ispi)
0170 {
0171     u32 value;
0172     int i;
0173 
0174     dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG));
0175 
0176     value = readl(ispi->base + HSFSTS_CTL);
0177     dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value);
0178     if (value & HSFSTS_CTL_FLOCKDN)
0179         dev_dbg(ispi->dev, "-> Locked\n");
0180 
0181     dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR));
0182     dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK));
0183 
0184     for (i = 0; i < 16; i++)
0185         dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n",
0186             i, readl(ispi->base + FDATA(i)));
0187 
0188     dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC));
0189 
0190     for (i = 0; i < ispi->nregions; i++)
0191         dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
0192             readl(ispi->base + FREG(i)));
0193     for (i = 0; i < ispi->pr_num; i++)
0194         dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
0195             readl(ispi->pregs + PR(i)));
0196 
0197     if (ispi->sregs) {
0198         value = readl(ispi->sregs + SSFSTS_CTL);
0199         dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value);
0200         dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n",
0201             readl(ispi->sregs + PREOP_OPTYPE));
0202         dev_dbg(ispi->dev, "OPMENU0=0x%08x\n",
0203             readl(ispi->sregs + OPMENU0));
0204         dev_dbg(ispi->dev, "OPMENU1=0x%08x\n",
0205             readl(ispi->sregs + OPMENU1));
0206     }
0207 
0208     dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
0209     dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
0210 
0211     dev_dbg(ispi->dev, "Protected regions:\n");
0212     for (i = 0; i < ispi->pr_num; i++) {
0213         u32 base, limit;
0214 
0215         value = readl(ispi->pregs + PR(i));
0216         if (!(value & (PR_WPE | PR_RPE)))
0217             continue;
0218 
0219         limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
0220         base = value & PR_BASE_MASK;
0221 
0222         dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
0223             i, base << 12, (limit << 12) | 0xfff,
0224             value & PR_WPE ? 'W' : '.', value & PR_RPE ? 'R' : '.');
0225     }
0226 
0227     dev_dbg(ispi->dev, "Flash regions:\n");
0228     for (i = 0; i < ispi->nregions; i++) {
0229         u32 region, base, limit;
0230 
0231         region = readl(ispi->base + FREG(i));
0232         base = region & FREG_BASE_MASK;
0233         limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
0234 
0235         if (base >= limit || (i > 0 && limit == 0))
0236             dev_dbg(ispi->dev, " %02d disabled\n", i);
0237         else
0238             dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n",
0239                 i, base << 12, (limit << 12) | 0xfff);
0240     }
0241 
0242     dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
0243         ispi->swseq_reg ? 'S' : 'H');
0244     dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
0245         ispi->swseq_erase ? 'S' : 'H');
0246 }
0247 
0248 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
0249 static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size)
0250 {
0251     size_t bytes;
0252     int i = 0;
0253 
0254     if (size > INTEL_SPI_FIFO_SZ)
0255         return -EINVAL;
0256 
0257     while (size > 0) {
0258         bytes = min_t(size_t, size, 4);
0259         memcpy_fromio(buf, ispi->base + FDATA(i), bytes);
0260         size -= bytes;
0261         buf += bytes;
0262         i++;
0263     }
0264 
0265     return 0;
0266 }
0267 
0268 /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
0269 static int intel_spi_write_block(struct intel_spi *ispi, const void *buf,
0270                  size_t size)
0271 {
0272     size_t bytes;
0273     int i = 0;
0274 
0275     if (size > INTEL_SPI_FIFO_SZ)
0276         return -EINVAL;
0277 
0278     while (size > 0) {
0279         bytes = min_t(size_t, size, 4);
0280         memcpy_toio(ispi->base + FDATA(i), buf, bytes);
0281         size -= bytes;
0282         buf += bytes;
0283         i++;
0284     }
0285 
0286     return 0;
0287 }
0288 
0289 static int intel_spi_wait_hw_busy(struct intel_spi *ispi)
0290 {
0291     u32 val;
0292 
0293     return readl_poll_timeout(ispi->base + HSFSTS_CTL, val,
0294                   !(val & HSFSTS_CTL_SCIP), 0,
0295                   INTEL_SPI_TIMEOUT * 1000);
0296 }
0297 
0298 static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
0299 {
0300     u32 val;
0301 
0302     return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val,
0303                   !(val & SSFSTS_CTL_SCIP), 0,
0304                   INTEL_SPI_TIMEOUT * 1000);
0305 }
0306 
0307 static bool intel_spi_set_writeable(struct intel_spi *ispi)
0308 {
0309     if (!ispi->info->set_writeable)
0310         return false;
0311 
0312     return ispi->info->set_writeable(ispi->base, ispi->info->data);
0313 }
0314 
0315 static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
0316 {
0317     int i;
0318     int preop;
0319 
0320     if (ispi->locked) {
0321         for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
0322             if (ispi->opcodes[i] == opcode)
0323                 return i;
0324 
0325         return -EINVAL;
0326     }
0327 
0328     /* The lock is off, so just use index 0 */
0329     writel(opcode, ispi->sregs + OPMENU0);
0330     preop = readw(ispi->sregs + PREOP_OPTYPE);
0331     writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
0332 
0333     return 0;
0334 }
0335 
0336 static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len)
0337 {
0338     u32 val, status;
0339     int ret;
0340 
0341     val = readl(ispi->base + HSFSTS_CTL);
0342     val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK);
0343 
0344     switch (opcode) {
0345     case SPINOR_OP_RDID:
0346         val |= HSFSTS_CTL_FCYCLE_RDID;
0347         break;
0348     case SPINOR_OP_WRSR:
0349         val |= HSFSTS_CTL_FCYCLE_WRSR;
0350         break;
0351     case SPINOR_OP_RDSR:
0352         val |= HSFSTS_CTL_FCYCLE_RDSR;
0353         break;
0354     default:
0355         return -EINVAL;
0356     }
0357 
0358     if (len > INTEL_SPI_FIFO_SZ)
0359         return -EINVAL;
0360 
0361     val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
0362     val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
0363     val |= HSFSTS_CTL_FGO;
0364     writel(val, ispi->base + HSFSTS_CTL);
0365 
0366     ret = intel_spi_wait_hw_busy(ispi);
0367     if (ret)
0368         return ret;
0369 
0370     status = readl(ispi->base + HSFSTS_CTL);
0371     if (status & HSFSTS_CTL_FCERR)
0372         return -EIO;
0373     else if (status & HSFSTS_CTL_AEL)
0374         return -EACCES;
0375 
0376     return 0;
0377 }
0378 
0379 static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
0380                   int optype)
0381 {
0382     u32 val = 0, status;
0383     u8 atomic_preopcode;
0384     int ret;
0385 
0386     ret = intel_spi_opcode_index(ispi, opcode, optype);
0387     if (ret < 0)
0388         return ret;
0389 
0390     if (len > INTEL_SPI_FIFO_SZ)
0391         return -EINVAL;
0392 
0393     /*
0394      * Always clear it after each SW sequencer operation regardless
0395      * of whether it is successful or not.
0396      */
0397     atomic_preopcode = ispi->atomic_preopcode;
0398     ispi->atomic_preopcode = 0;
0399 
0400     /* Only mark 'Data Cycle' bit when there is data to be transferred */
0401     if (len > 0)
0402         val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
0403     val |= ret << SSFSTS_CTL_COP_SHIFT;
0404     val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
0405     val |= SSFSTS_CTL_SCGO;
0406     if (atomic_preopcode) {
0407         u16 preop;
0408 
0409         switch (optype) {
0410         case OPTYPE_WRITE_NO_ADDR:
0411         case OPTYPE_WRITE_WITH_ADDR:
0412             /* Pick matching preopcode for the atomic sequence */
0413             preop = readw(ispi->sregs + PREOP_OPTYPE);
0414             if ((preop & 0xff) == atomic_preopcode)
0415                 ; /* Do nothing */
0416             else if ((preop >> 8) == atomic_preopcode)
0417                 val |= SSFSTS_CTL_SPOP;
0418             else
0419                 return -EINVAL;
0420 
0421             /* Enable atomic sequence */
0422             val |= SSFSTS_CTL_ACS;
0423             break;
0424 
0425         default:
0426             return -EINVAL;
0427         }
0428     }
0429     writel(val, ispi->sregs + SSFSTS_CTL);
0430 
0431     ret = intel_spi_wait_sw_busy(ispi);
0432     if (ret)
0433         return ret;
0434 
0435     status = readl(ispi->sregs + SSFSTS_CTL);
0436     if (status & SSFSTS_CTL_FCERR)
0437         return -EIO;
0438     else if (status & SSFSTS_CTL_AEL)
0439         return -EACCES;
0440 
0441     return 0;
0442 }
0443 
0444 static int intel_spi_read_reg(struct intel_spi *ispi,
0445                   const struct intel_spi_mem_op *iop,
0446                   const struct spi_mem_op *op)
0447 {
0448     size_t nbytes = op->data.nbytes;
0449     u8 opcode = op->cmd.opcode;
0450     int ret;
0451 
0452     /* Address of the first chip */
0453     writel(0, ispi->base + FADDR);
0454 
0455     if (ispi->swseq_reg)
0456         ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
0457                      OPTYPE_READ_NO_ADDR);
0458     else
0459         ret = intel_spi_hw_cycle(ispi, opcode, nbytes);
0460 
0461     if (ret)
0462         return ret;
0463 
0464     return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
0465 }
0466 
0467 static int intel_spi_write_reg(struct intel_spi *ispi,
0468                    const struct intel_spi_mem_op *iop,
0469                    const struct spi_mem_op *op)
0470 {
0471     size_t nbytes = op->data.nbytes;
0472     u8 opcode = op->cmd.opcode;
0473     int ret;
0474 
0475     /*
0476      * This is handled with atomic operation and preop code in Intel
0477      * controller so we only verify that it is available. If the
0478      * controller is not locked, program the opcode to the PREOP
0479      * register for later use.
0480      *
0481      * When hardware sequencer is used there is no need to program
0482      * any opcodes (it handles them automatically as part of a command).
0483      */
0484     if (opcode == SPINOR_OP_WREN) {
0485         u16 preop;
0486 
0487         if (!ispi->swseq_reg)
0488             return 0;
0489 
0490         preop = readw(ispi->sregs + PREOP_OPTYPE);
0491         if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
0492             if (ispi->locked)
0493                 return -EINVAL;
0494             writel(opcode, ispi->sregs + PREOP_OPTYPE);
0495         }
0496 
0497         /*
0498          * This enables atomic sequence on next SW sycle. Will
0499          * be cleared after next operation.
0500          */
0501         ispi->atomic_preopcode = opcode;
0502         return 0;
0503     }
0504 
0505     /*
0506      * We hope that HW sequencer will do the right thing automatically and
0507      * with the SW sequencer we cannot use preopcode anyway, so just ignore
0508      * the Write Disable operation and pretend it was completed
0509      * successfully.
0510      */
0511     if (opcode == SPINOR_OP_WRDI)
0512         return 0;
0513 
0514     writel(0, ispi->base + FADDR);
0515 
0516     /* Write the value beforehand */
0517     ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
0518     if (ret)
0519         return ret;
0520 
0521     if (ispi->swseq_reg)
0522         return intel_spi_sw_cycle(ispi, opcode, nbytes,
0523                       OPTYPE_WRITE_NO_ADDR);
0524     return intel_spi_hw_cycle(ispi, opcode, nbytes);
0525 }
0526 
0527 static int intel_spi_read(struct intel_spi *ispi,
0528               const struct intel_spi_mem_op *iop,
0529               const struct spi_mem_op *op)
0530 {
0531     void *read_buf = op->data.buf.in;
0532     size_t block_size, nbytes = op->data.nbytes;
0533     u32 addr = op->addr.val;
0534     u32 val, status;
0535     int ret;
0536 
0537     /*
0538      * Atomic sequence is not expected with HW sequencer reads. Make
0539      * sure it is cleared regardless.
0540      */
0541     if (WARN_ON_ONCE(ispi->atomic_preopcode))
0542         ispi->atomic_preopcode = 0;
0543 
0544     while (nbytes > 0) {
0545         block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
0546 
0547         /* Read cannot cross 4K boundary */
0548         block_size = min_t(loff_t, addr + block_size,
0549                    round_up(addr + 1, SZ_4K)) - addr;
0550 
0551         writel(addr, ispi->base + FADDR);
0552 
0553         val = readl(ispi->base + HSFSTS_CTL);
0554         val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
0555         val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
0556         val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
0557         val |= HSFSTS_CTL_FCYCLE_READ;
0558         val |= HSFSTS_CTL_FGO;
0559         writel(val, ispi->base + HSFSTS_CTL);
0560 
0561         ret = intel_spi_wait_hw_busy(ispi);
0562         if (ret)
0563             return ret;
0564 
0565         status = readl(ispi->base + HSFSTS_CTL);
0566         if (status & HSFSTS_CTL_FCERR)
0567             ret = -EIO;
0568         else if (status & HSFSTS_CTL_AEL)
0569             ret = -EACCES;
0570 
0571         if (ret < 0) {
0572             dev_err(ispi->dev, "read error: %x: %#x\n", addr, status);
0573             return ret;
0574         }
0575 
0576         ret = intel_spi_read_block(ispi, read_buf, block_size);
0577         if (ret)
0578             return ret;
0579 
0580         nbytes -= block_size;
0581         addr += block_size;
0582         read_buf += block_size;
0583     }
0584 
0585     return 0;
0586 }
0587 
0588 static int intel_spi_write(struct intel_spi *ispi,
0589                const struct intel_spi_mem_op *iop,
0590                const struct spi_mem_op *op)
0591 {
0592     size_t block_size, nbytes = op->data.nbytes;
0593     const void *write_buf = op->data.buf.out;
0594     u32 addr = op->addr.val;
0595     u32 val, status;
0596     int ret;
0597 
0598     /* Not needed with HW sequencer write, make sure it is cleared */
0599     ispi->atomic_preopcode = 0;
0600 
0601     while (nbytes > 0) {
0602         block_size = min_t(size_t, nbytes, INTEL_SPI_FIFO_SZ);
0603 
0604         /* Write cannot cross 4K boundary */
0605         block_size = min_t(loff_t, addr + block_size,
0606                    round_up(addr + 1, SZ_4K)) - addr;
0607 
0608         writel(addr, ispi->base + FADDR);
0609 
0610         val = readl(ispi->base + HSFSTS_CTL);
0611         val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
0612         val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
0613         val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
0614         val |= HSFSTS_CTL_FCYCLE_WRITE;
0615 
0616         ret = intel_spi_write_block(ispi, write_buf, block_size);
0617         if (ret) {
0618             dev_err(ispi->dev, "failed to write block\n");
0619             return ret;
0620         }
0621 
0622         /* Start the write now */
0623         val |= HSFSTS_CTL_FGO;
0624         writel(val, ispi->base + HSFSTS_CTL);
0625 
0626         ret = intel_spi_wait_hw_busy(ispi);
0627         if (ret) {
0628             dev_err(ispi->dev, "timeout\n");
0629             return ret;
0630         }
0631 
0632         status = readl(ispi->base + HSFSTS_CTL);
0633         if (status & HSFSTS_CTL_FCERR)
0634             ret = -EIO;
0635         else if (status & HSFSTS_CTL_AEL)
0636             ret = -EACCES;
0637 
0638         if (ret < 0) {
0639             dev_err(ispi->dev, "write error: %x: %#x\n", addr, status);
0640             return ret;
0641         }
0642 
0643         nbytes -= block_size;
0644         addr += block_size;
0645         write_buf += block_size;
0646     }
0647 
0648     return 0;
0649 }
0650 
0651 static int intel_spi_erase(struct intel_spi *ispi,
0652                const struct intel_spi_mem_op *iop,
0653                const struct spi_mem_op *op)
0654 {
0655     u8 opcode = op->cmd.opcode;
0656     u32 addr = op->addr.val;
0657     u32 val, status;
0658     int ret;
0659 
0660     writel(addr, ispi->base + FADDR);
0661 
0662     if (ispi->swseq_erase)
0663         return intel_spi_sw_cycle(ispi, opcode, 0,
0664                       OPTYPE_WRITE_WITH_ADDR);
0665 
0666     /* Not needed with HW sequencer erase, make sure it is cleared */
0667     ispi->atomic_preopcode = 0;
0668 
0669     val = readl(ispi->base + HSFSTS_CTL);
0670     val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK);
0671     val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
0672     val |= HSFSTS_CTL_FGO;
0673     val |= iop->replacement_op;
0674     writel(val, ispi->base + HSFSTS_CTL);
0675 
0676     ret = intel_spi_wait_hw_busy(ispi);
0677     if (ret)
0678         return ret;
0679 
0680     status = readl(ispi->base + HSFSTS_CTL);
0681     if (status & HSFSTS_CTL_FCERR)
0682         return -EIO;
0683     if (status & HSFSTS_CTL_AEL)
0684         return -EACCES;
0685 
0686     return 0;
0687 }
0688 
0689 static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op *iop,
0690                  const struct spi_mem_op *op)
0691 {
0692     if (iop->mem_op.cmd.nbytes != op->cmd.nbytes ||
0693         iop->mem_op.cmd.buswidth != op->cmd.buswidth ||
0694         iop->mem_op.cmd.dtr != op->cmd.dtr ||
0695         iop->mem_op.cmd.opcode != op->cmd.opcode)
0696         return false;
0697 
0698     if (iop->mem_op.addr.nbytes != op->addr.nbytes ||
0699         iop->mem_op.addr.dtr != op->addr.dtr)
0700         return false;
0701 
0702     if (iop->mem_op.data.dir != op->data.dir ||
0703         iop->mem_op.data.dtr != op->data.dtr)
0704         return false;
0705 
0706     if (iop->mem_op.data.dir != SPI_MEM_NO_DATA) {
0707         if (iop->mem_op.data.buswidth != op->data.buswidth)
0708             return false;
0709     }
0710 
0711     return true;
0712 }
0713 
0714 static const struct intel_spi_mem_op *
0715 intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
0716 {
0717     const struct intel_spi_mem_op *iop;
0718 
0719     for (iop = ispi->mem_ops; iop->mem_op.cmd.opcode; iop++) {
0720         if (intel_spi_cmp_mem_op(iop, op))
0721             break;
0722     }
0723 
0724     return iop->mem_op.cmd.opcode ? iop : NULL;
0725 }
0726 
0727 static bool intel_spi_supports_mem_op(struct spi_mem *mem,
0728                       const struct spi_mem_op *op)
0729 {
0730     struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
0731     const struct intel_spi_mem_op *iop;
0732 
0733     iop = intel_spi_match_mem_op(ispi, op);
0734     if (!iop) {
0735         dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
0736         return false;
0737     }
0738 
0739     /*
0740      * For software sequencer check that the opcode is actually
0741      * present in the opmenu if it is locked.
0742      */
0743     if (ispi->swseq_reg && ispi->locked) {
0744         int i;
0745 
0746         /* Check if it is in the locked opcodes list */
0747         for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) {
0748             if (ispi->opcodes[i] == op->cmd.opcode)
0749                 return true;
0750         }
0751 
0752         dev_dbg(ispi->dev, "%#x not supported\n", op->cmd.opcode);
0753         return false;
0754     }
0755 
0756     return true;
0757 }
0758 
0759 static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
0760 {
0761     struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
0762     const struct intel_spi_mem_op *iop;
0763 
0764     iop = intel_spi_match_mem_op(ispi, op);
0765     if (!iop)
0766         return -EOPNOTSUPP;
0767 
0768     return iop->exec_op(ispi, iop, op);
0769 }
0770 
0771 static const char *intel_spi_get_name(struct spi_mem *mem)
0772 {
0773     const struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
0774 
0775     /*
0776      * Return name of the flash controller device to be compatible
0777      * with the MTD version.
0778      */
0779     return dev_name(ispi->dev);
0780 }
0781 
0782 static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
0783 {
0784     struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
0785     const struct intel_spi_mem_op *iop;
0786 
0787     iop = intel_spi_match_mem_op(ispi, &desc->info.op_tmpl);
0788     if (!iop)
0789         return -EOPNOTSUPP;
0790 
0791     desc->priv = (void *)iop;
0792     return 0;
0793 }
0794 
0795 static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
0796                      size_t len, void *buf)
0797 {
0798     struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
0799     const struct intel_spi_mem_op *iop = desc->priv;
0800     struct spi_mem_op op = desc->info.op_tmpl;
0801     int ret;
0802 
0803     /* Fill in the gaps */
0804     op.addr.val = offs;
0805     op.data.nbytes = len;
0806     op.data.buf.in = buf;
0807 
0808     ret = iop->exec_op(ispi, iop, &op);
0809     return ret ? ret : len;
0810 }
0811 
0812 static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs,
0813                       size_t len, const void *buf)
0814 {
0815     struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
0816     const struct intel_spi_mem_op *iop = desc->priv;
0817     struct spi_mem_op op = desc->info.op_tmpl;
0818     int ret;
0819 
0820     op.addr.val = offs;
0821     op.data.nbytes = len;
0822     op.data.buf.out = buf;
0823 
0824     ret = iop->exec_op(ispi, iop, &op);
0825     return ret ? ret : len;
0826 }
0827 
0828 static const struct spi_controller_mem_ops intel_spi_mem_ops = {
0829     .supports_op = intel_spi_supports_mem_op,
0830     .exec_op = intel_spi_exec_mem_op,
0831     .get_name = intel_spi_get_name,
0832     .dirmap_create = intel_spi_dirmap_create,
0833     .dirmap_read = intel_spi_dirmap_read,
0834     .dirmap_write = intel_spi_dirmap_write,
0835 };
0836 
0837 #define INTEL_SPI_OP_ADDR(__nbytes)                 \
0838     {                               \
0839         .nbytes = __nbytes,                 \
0840     }
0841 
0842 #define INTEL_SPI_OP_NO_DATA                        \
0843     {                               \
0844         .dir = SPI_MEM_NO_DATA,                 \
0845     }
0846 
0847 #define INTEL_SPI_OP_DATA_IN(__buswidth)                \
0848     {                               \
0849         .dir = SPI_MEM_DATA_IN,                 \
0850         .buswidth = __buswidth,                 \
0851     }
0852 
0853 #define INTEL_SPI_OP_DATA_OUT(__buswidth)               \
0854     {                               \
0855         .dir = SPI_MEM_DATA_OUT,                \
0856         .buswidth = __buswidth,                 \
0857     }
0858 
0859 #define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op)      \
0860     {                               \
0861         .mem_op = {                     \
0862             .cmd = __cmd,                   \
0863             .addr = __addr,                 \
0864             .data = __data,                 \
0865         },                          \
0866         .exec_op = __exec_op,                   \
0867     }
0868 
0869 #define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl) \
0870     {                               \
0871         .mem_op = {                     \
0872             .cmd = __cmd,                   \
0873             .addr = __addr,                 \
0874             .data = __data,                 \
0875         },                          \
0876         .exec_op = __exec_op,                   \
0877         .replacement_op = __repl,               \
0878     }
0879 
0880 /*
0881  * The controller handles pretty much everything internally based on the
0882  * SFDP data but we want to make sure we only support the operations
0883  * actually possible. Only check buswidth and transfer direction, the
0884  * core validates data.
0885  */
0886 #define INTEL_SPI_GENERIC_OPS                       \
0887     /* Status register operations */                \
0888     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),     \
0889              SPI_MEM_OP_NO_ADDR,                \
0890              INTEL_SPI_OP_DATA_IN(1),           \
0891              intel_spi_read_reg),               \
0892     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),     \
0893              SPI_MEM_OP_NO_ADDR,                \
0894              INTEL_SPI_OP_DATA_IN(1),           \
0895              intel_spi_read_reg),               \
0896     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),     \
0897              SPI_MEM_OP_NO_ADDR,                \
0898              INTEL_SPI_OP_DATA_OUT(1),          \
0899              intel_spi_write_reg),              \
0900     /* Normal read */                       \
0901     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),     \
0902              INTEL_SPI_OP_ADDR(3),              \
0903              INTEL_SPI_OP_DATA_IN(1),           \
0904              intel_spi_read),               \
0905     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),     \
0906              INTEL_SPI_OP_ADDR(3),              \
0907              INTEL_SPI_OP_DATA_IN(2),           \
0908              intel_spi_read),               \
0909     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),     \
0910              INTEL_SPI_OP_ADDR(3),              \
0911              INTEL_SPI_OP_DATA_IN(4),           \
0912              intel_spi_read),               \
0913     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),     \
0914              INTEL_SPI_OP_ADDR(4),              \
0915              INTEL_SPI_OP_DATA_IN(1),           \
0916              intel_spi_read),               \
0917     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),     \
0918              INTEL_SPI_OP_ADDR(4),              \
0919              INTEL_SPI_OP_DATA_IN(2),           \
0920              intel_spi_read),               \
0921     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1),     \
0922              INTEL_SPI_OP_ADDR(4),              \
0923              INTEL_SPI_OP_DATA_IN(4),           \
0924              intel_spi_read),               \
0925     /* Fast read */                         \
0926     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),    \
0927              INTEL_SPI_OP_ADDR(3),              \
0928              INTEL_SPI_OP_DATA_IN(1),           \
0929              intel_spi_read),               \
0930     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),    \
0931              INTEL_SPI_OP_ADDR(3),              \
0932              INTEL_SPI_OP_DATA_IN(2),           \
0933              intel_spi_read),               \
0934     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),    \
0935              INTEL_SPI_OP_ADDR(3),              \
0936              INTEL_SPI_OP_DATA_IN(4),           \
0937              intel_spi_read),               \
0938     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),    \
0939              INTEL_SPI_OP_ADDR(4),              \
0940              INTEL_SPI_OP_DATA_IN(1),           \
0941              intel_spi_read),               \
0942     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),    \
0943              INTEL_SPI_OP_ADDR(4),              \
0944              INTEL_SPI_OP_DATA_IN(2),           \
0945              intel_spi_read),               \
0946     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1),    \
0947              INTEL_SPI_OP_ADDR(4),              \
0948              INTEL_SPI_OP_DATA_IN(4),           \
0949              intel_spi_read),               \
0950     /* Read with 4-byte address opcode */               \
0951     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),      \
0952              INTEL_SPI_OP_ADDR(4),              \
0953              INTEL_SPI_OP_DATA_IN(1),           \
0954              intel_spi_read),               \
0955     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),      \
0956              INTEL_SPI_OP_ADDR(4),              \
0957              INTEL_SPI_OP_DATA_IN(2),           \
0958              intel_spi_read),               \
0959     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1),      \
0960              INTEL_SPI_OP_ADDR(4),              \
0961              INTEL_SPI_OP_DATA_IN(4),           \
0962              intel_spi_read),               \
0963     /* Fast read with 4-byte address opcode */          \
0964     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
0965              INTEL_SPI_OP_ADDR(4),              \
0966              INTEL_SPI_OP_DATA_IN(1),           \
0967              intel_spi_read),               \
0968     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
0969              INTEL_SPI_OP_ADDR(4),              \
0970              INTEL_SPI_OP_DATA_IN(2),           \
0971              intel_spi_read),               \
0972     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
0973              INTEL_SPI_OP_ADDR(4),              \
0974              INTEL_SPI_OP_DATA_IN(4),           \
0975              intel_spi_read),               \
0976     /* Write operations */                      \
0977     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1),       \
0978              INTEL_SPI_OP_ADDR(3),              \
0979              INTEL_SPI_OP_DATA_OUT(1),          \
0980              intel_spi_write),              \
0981     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1),       \
0982              INTEL_SPI_OP_ADDR(4),              \
0983              INTEL_SPI_OP_DATA_OUT(1),          \
0984              intel_spi_write),              \
0985     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1),        \
0986              INTEL_SPI_OP_ADDR(4),              \
0987              INTEL_SPI_OP_DATA_OUT(1),          \
0988              intel_spi_write),              \
0989     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),     \
0990              SPI_MEM_OP_NO_ADDR,                \
0991              SPI_MEM_OP_NO_DATA,                \
0992              intel_spi_write_reg),              \
0993     INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),     \
0994              SPI_MEM_OP_NO_ADDR,                \
0995              SPI_MEM_OP_NO_DATA,                \
0996              intel_spi_write_reg),              \
0997     /* Erase operations */                      \
0998     INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1),   \
0999                   INTEL_SPI_OP_ADDR(3),         \
1000                   SPI_MEM_OP_NO_DATA,           \
1001                   intel_spi_erase,              \
1002                   HSFSTS_CTL_FCYCLE_ERASE),         \
1003     INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1),   \
1004                   INTEL_SPI_OP_ADDR(4),         \
1005                   SPI_MEM_OP_NO_DATA,           \
1006                   intel_spi_erase,              \
1007                   HSFSTS_CTL_FCYCLE_ERASE),         \
1008     INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1),    \
1009                   INTEL_SPI_OP_ADDR(4),         \
1010                   SPI_MEM_OP_NO_DATA,           \
1011                   intel_spi_erase,              \
1012                   HSFSTS_CTL_FCYCLE_ERASE)          \
1013 
1014 static const struct intel_spi_mem_op generic_mem_ops[] = {
1015     INTEL_SPI_GENERIC_OPS,
1016     { },
1017 };
1018 
1019 static const struct intel_spi_mem_op erase_64k_mem_ops[] = {
1020     INTEL_SPI_GENERIC_OPS,
1021     /* 64k sector erase operations */
1022     INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
1023                   INTEL_SPI_OP_ADDR(3),
1024                   SPI_MEM_OP_NO_DATA,
1025                   intel_spi_erase,
1026                   HSFSTS_CTL_FCYCLE_ERASE_64K),
1027     INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE, 1),
1028                   INTEL_SPI_OP_ADDR(4),
1029                   SPI_MEM_OP_NO_DATA,
1030                   intel_spi_erase,
1031                   HSFSTS_CTL_FCYCLE_ERASE_64K),
1032     INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B, 1),
1033                   INTEL_SPI_OP_ADDR(4),
1034                   SPI_MEM_OP_NO_DATA,
1035                   intel_spi_erase,
1036                   HSFSTS_CTL_FCYCLE_ERASE_64K),
1037     { },
1038 };
1039 
1040 static int intel_spi_init(struct intel_spi *ispi)
1041 {
1042     u32 opmenu0, opmenu1, lvscc, uvscc, val;
1043     bool erase_64k = false;
1044     int i;
1045 
1046     switch (ispi->info->type) {
1047     case INTEL_SPI_BYT:
1048         ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
1049         ispi->pregs = ispi->base + BYT_PR;
1050         ispi->nregions = BYT_FREG_NUM;
1051         ispi->pr_num = BYT_PR_NUM;
1052         ispi->swseq_reg = true;
1053         break;
1054 
1055     case INTEL_SPI_LPT:
1056         ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
1057         ispi->pregs = ispi->base + LPT_PR;
1058         ispi->nregions = LPT_FREG_NUM;
1059         ispi->pr_num = LPT_PR_NUM;
1060         ispi->swseq_reg = true;
1061         break;
1062 
1063     case INTEL_SPI_BXT:
1064         ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
1065         ispi->pregs = ispi->base + BXT_PR;
1066         ispi->nregions = BXT_FREG_NUM;
1067         ispi->pr_num = BXT_PR_NUM;
1068         erase_64k = true;
1069         break;
1070 
1071     case INTEL_SPI_CNL:
1072         ispi->sregs = NULL;
1073         ispi->pregs = ispi->base + CNL_PR;
1074         ispi->nregions = CNL_FREG_NUM;
1075         ispi->pr_num = CNL_PR_NUM;
1076         break;
1077 
1078     default:
1079         return -EINVAL;
1080     }
1081 
1082     /* Try to disable write protection if user asked to do so */
1083     if (writeable && !intel_spi_set_writeable(ispi)) {
1084         dev_warn(ispi->dev, "can't disable chip write protection\n");
1085         writeable = false;
1086     }
1087 
1088     /* Disable #SMI generation from HW sequencer */
1089     val = readl(ispi->base + HSFSTS_CTL);
1090     val &= ~HSFSTS_CTL_FSMIE;
1091     writel(val, ispi->base + HSFSTS_CTL);
1092 
1093     /*
1094      * Determine whether erase operation should use HW or SW sequencer.
1095      *
1096      * The HW sequencer has a predefined list of opcodes, with only the
1097      * erase opcode being programmable in LVSCC and UVSCC registers.
1098      * If these registers don't contain a valid erase opcode, erase
1099      * cannot be done using HW sequencer.
1100      */
1101     lvscc = readl(ispi->base + LVSCC);
1102     uvscc = readl(ispi->base + UVSCC);
1103     if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
1104         ispi->swseq_erase = true;
1105     /* SPI controller on Intel BXT supports 64K erase opcode */
1106     if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
1107         if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
1108             !(uvscc & ERASE_64K_OPCODE_MASK))
1109             erase_64k = false;
1110 
1111     if (!ispi->sregs && (ispi->swseq_reg || ispi->swseq_erase)) {
1112         dev_err(ispi->dev, "software sequencer not supported, but required\n");
1113         return -EINVAL;
1114     }
1115 
1116     /*
1117      * Some controllers can only do basic operations using hardware
1118      * sequencer. All other operations are supposed to be carried out
1119      * using software sequencer.
1120      */
1121     if (ispi->swseq_reg) {
1122         /* Disable #SMI generation from SW sequencer */
1123         val = readl(ispi->sregs + SSFSTS_CTL);
1124         val &= ~SSFSTS_CTL_FSMIE;
1125         writel(val, ispi->sregs + SSFSTS_CTL);
1126     }
1127 
1128     /* Check controller's lock status */
1129     val = readl(ispi->base + HSFSTS_CTL);
1130     ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
1131 
1132     if (ispi->locked && ispi->sregs) {
1133         /*
1134          * BIOS programs allowed opcodes and then locks down the
1135          * register. So read back what opcodes it decided to support.
1136          * That's the set we are going to support as well.
1137          */
1138         opmenu0 = readl(ispi->sregs + OPMENU0);
1139         opmenu1 = readl(ispi->sregs + OPMENU1);
1140 
1141         if (opmenu0 && opmenu1) {
1142             for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
1143                 ispi->opcodes[i] = opmenu0 >> i * 8;
1144                 ispi->opcodes[i + 4] = opmenu1 >> i * 8;
1145             }
1146         }
1147     }
1148 
1149     if (erase_64k) {
1150         dev_dbg(ispi->dev, "Using erase_64k memory operations");
1151         ispi->mem_ops = erase_64k_mem_ops;
1152     } else {
1153         dev_dbg(ispi->dev, "Using generic memory operations");
1154         ispi->mem_ops = generic_mem_ops;
1155     }
1156 
1157     intel_spi_dump_regs(ispi);
1158     return 0;
1159 }
1160 
1161 static bool intel_spi_is_protected(const struct intel_spi *ispi,
1162                    unsigned int base, unsigned int limit)
1163 {
1164     int i;
1165 
1166     for (i = 0; i < ispi->pr_num; i++) {
1167         u32 pr_base, pr_limit, pr_value;
1168 
1169         pr_value = readl(ispi->pregs + PR(i));
1170         if (!(pr_value & (PR_WPE | PR_RPE)))
1171             continue;
1172 
1173         pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT;
1174         pr_base = pr_value & PR_BASE_MASK;
1175 
1176         if (pr_base >= base && pr_limit <= limit)
1177             return true;
1178     }
1179 
1180     return false;
1181 }
1182 
1183 /*
1184  * There will be a single partition holding all enabled flash regions. We
1185  * call this "BIOS".
1186  */
1187 static void intel_spi_fill_partition(struct intel_spi *ispi,
1188                      struct mtd_partition *part)
1189 {
1190     u64 end;
1191     int i;
1192 
1193     memset(part, 0, sizeof(*part));
1194 
1195     /* Start from the mandatory descriptor region */
1196     part->size = 4096;
1197     part->name = "BIOS";
1198 
1199     /*
1200      * Now try to find where this partition ends based on the flash
1201      * region registers.
1202      */
1203     for (i = 1; i < ispi->nregions; i++) {
1204         u32 region, base, limit;
1205 
1206         region = readl(ispi->base + FREG(i));
1207         base = region & FREG_BASE_MASK;
1208         limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT;
1209 
1210         if (base >= limit || limit == 0)
1211             continue;
1212 
1213         /*
1214          * If any of the regions have protection bits set, make the
1215          * whole partition read-only to be on the safe side.
1216          *
1217          * Also if the user did not ask the chip to be writeable
1218          * mask the bit too.
1219          */
1220         if (!writeable || intel_spi_is_protected(ispi, base, limit))
1221             part->mask_flags |= MTD_WRITEABLE;
1222 
1223         end = (limit << 12) + 4096;
1224         if (end > part->size)
1225             part->size = end;
1226     }
1227 }
1228 
1229 static int intel_spi_populate_chip(struct intel_spi *ispi)
1230 {
1231     struct flash_platform_data *pdata;
1232     struct spi_board_info chip;
1233 
1234     pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
1235     if (!pdata)
1236         return -ENOMEM;
1237 
1238     pdata->nr_parts = 1;
1239     pdata->parts = devm_kcalloc(ispi->dev, pdata->nr_parts,
1240                     sizeof(*pdata->parts), GFP_KERNEL);
1241     if (!pdata->parts)
1242         return -ENOMEM;
1243 
1244     intel_spi_fill_partition(ispi, pdata->parts);
1245 
1246     memset(&chip, 0, sizeof(chip));
1247     snprintf(chip.modalias, 8, "spi-nor");
1248     chip.platform_data = pdata;
1249 
1250     return spi_new_device(ispi->master, &chip) ? 0 : -ENODEV;
1251 }
1252 
1253 /**
1254  * intel_spi_probe() - Probe the Intel SPI flash controller
1255  * @dev: Pointer to the parent device
1256  * @mem: MMIO resource
1257  * @info: Platform specific information
1258  *
1259  * Probes Intel SPI flash controller and creates the flash chip device.
1260  * Returns %0 on success and negative errno in case of failure.
1261  */
1262 int intel_spi_probe(struct device *dev, struct resource *mem,
1263             const struct intel_spi_boardinfo *info)
1264 {
1265     struct spi_controller *master;
1266     struct intel_spi *ispi;
1267     int ret;
1268 
1269     master = devm_spi_alloc_master(dev, sizeof(*ispi));
1270     if (!master)
1271         return -ENOMEM;
1272 
1273     master->mem_ops = &intel_spi_mem_ops;
1274 
1275     ispi = spi_master_get_devdata(master);
1276 
1277     ispi->base = devm_ioremap_resource(dev, mem);
1278     if (IS_ERR(ispi->base))
1279         return PTR_ERR(ispi->base);
1280 
1281     ispi->dev = dev;
1282     ispi->master = master;
1283     ispi->info = info;
1284 
1285     ret = intel_spi_init(ispi);
1286     if (ret)
1287         return ret;
1288 
1289     ret = devm_spi_register_master(dev, master);
1290     if (ret)
1291         return ret;
1292 
1293     return intel_spi_populate_chip(ispi);
1294 }
1295 EXPORT_SYMBOL_GPL(intel_spi_probe);
1296 
1297 MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
1298 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1299 MODULE_LICENSE("GPL v2");