0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/clk.h>
0009 #include <linux/delay.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/io.h>
0012 #include <linux/module.h>
0013 #include <linux/of_irq.h>
0014 #include <linux/of_address.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/spi/spi.h>
0017 #include <linux/workqueue.h>
0018 #include <linux/spi/spi-mem.h>
0019
0020
0021 #define ZYNQ_QSPI_CONFIG_OFFSET 0x00
0022 #define ZYNQ_QSPI_STATUS_OFFSET 0x04
0023 #define ZYNQ_QSPI_IEN_OFFSET 0x08
0024 #define ZYNQ_QSPI_IDIS_OFFSET 0x0C
0025 #define ZYNQ_QSPI_IMASK_OFFSET 0x10
0026 #define ZYNQ_QSPI_ENABLE_OFFSET 0x14
0027 #define ZYNQ_QSPI_DELAY_OFFSET 0x18
0028 #define ZYNQ_QSPI_TXD_00_00_OFFSET 0x1C
0029 #define ZYNQ_QSPI_TXD_00_01_OFFSET 0x80
0030 #define ZYNQ_QSPI_TXD_00_10_OFFSET 0x84
0031 #define ZYNQ_QSPI_TXD_00_11_OFFSET 0x88
0032 #define ZYNQ_QSPI_RXD_OFFSET 0x20
0033 #define ZYNQ_QSPI_SIC_OFFSET 0x24
0034 #define ZYNQ_QSPI_TX_THRESH_OFFSET 0x28
0035 #define ZYNQ_QSPI_RX_THRESH_OFFSET 0x2C
0036 #define ZYNQ_QSPI_GPIO_OFFSET 0x30
0037 #define ZYNQ_QSPI_LINEAR_CFG_OFFSET 0xA0
0038 #define ZYNQ_QSPI_MOD_ID_OFFSET 0xFC
0039
0040
0041
0042
0043
0044
0045
0046 #define ZYNQ_QSPI_CONFIG_IFMODE_MASK BIT(31)
0047 #define ZYNQ_QSPI_CONFIG_MANSRT_MASK BIT(16)
0048 #define ZYNQ_QSPI_CONFIG_MANSRTEN_MASK BIT(15)
0049 #define ZYNQ_QSPI_CONFIG_SSFORCE_MASK BIT(14)
0050 #define ZYNQ_QSPI_CONFIG_BDRATE_MASK GENMASK(5, 3)
0051 #define ZYNQ_QSPI_CONFIG_CPHA_MASK BIT(2)
0052 #define ZYNQ_QSPI_CONFIG_CPOL_MASK BIT(1)
0053 #define ZYNQ_QSPI_CONFIG_FWIDTH_MASK GENMASK(7, 6)
0054 #define ZYNQ_QSPI_CONFIG_MSTREN_MASK BIT(0)
0055
0056
0057
0058
0059
0060
0061
0062 #define ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX GENMASK(2, 0)
0063 #define ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT 3
0064 #define ZYNQ_QSPI_CONFIG_PCS BIT(10)
0065
0066
0067
0068
0069
0070
0071
0072 #define ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK BIT(0)
0073 #define ZYNQ_QSPI_IXR_TXNFULL_MASK BIT(2)
0074 #define ZYNQ_QSPI_IXR_TXFULL_MASK BIT(3)
0075 #define ZYNQ_QSPI_IXR_RXNEMTY_MASK BIT(4)
0076 #define ZYNQ_QSPI_IXR_RXF_FULL_MASK BIT(5)
0077 #define ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK BIT(6)
0078 #define ZYNQ_QSPI_IXR_ALL_MASK (ZYNQ_QSPI_IXR_RX_OVERFLOW_MASK | \
0079 ZYNQ_QSPI_IXR_TXNFULL_MASK | \
0080 ZYNQ_QSPI_IXR_TXFULL_MASK | \
0081 ZYNQ_QSPI_IXR_RXNEMTY_MASK | \
0082 ZYNQ_QSPI_IXR_RXF_FULL_MASK | \
0083 ZYNQ_QSPI_IXR_TXF_UNDRFLOW_MASK)
0084 #define ZYNQ_QSPI_IXR_RXTX_MASK (ZYNQ_QSPI_IXR_TXNFULL_MASK | \
0085 ZYNQ_QSPI_IXR_RXNEMTY_MASK)
0086
0087
0088
0089
0090
0091
0092 #define ZYNQ_QSPI_ENABLE_ENABLE_MASK BIT(0)
0093
0094
0095
0096
0097
0098
0099
0100 #define ZYNQ_QSPI_LCFG_TWO_MEM BIT(30)
0101 #define ZYNQ_QSPI_LCFG_SEP_BUS BIT(29)
0102 #define ZYNQ_QSPI_LCFG_U_PAGE BIT(28)
0103
0104 #define ZYNQ_QSPI_LCFG_DUMMY_SHIFT 8
0105
0106 #define ZYNQ_QSPI_FAST_READ_QOUT_CODE 0x6B
0107 #define ZYNQ_QSPI_FIFO_DEPTH 63
0108 #define ZYNQ_QSPI_RX_THRESHOLD 32
0109 #define ZYNQ_QSPI_TX_THRESHOLD 1
0110
0111
0112
0113
0114
0115 #define ZYNQ_QSPI_MODEBITS (SPI_CPOL | SPI_CPHA)
0116
0117
0118 #define ZYNQ_QSPI_MAX_NUM_CS 2
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 struct zynq_qspi {
0134 struct device *dev;
0135 void __iomem *regs;
0136 struct clk *refclk;
0137 struct clk *pclk;
0138 int irq;
0139 u8 *txbuf;
0140 u8 *rxbuf;
0141 int tx_bytes;
0142 int rx_bytes;
0143 struct completion data_completion;
0144 };
0145
0146
0147
0148
0149 static inline u32 zynq_qspi_read(struct zynq_qspi *xqspi, u32 offset)
0150 {
0151 return readl_relaxed(xqspi->regs + offset);
0152 }
0153
0154 static inline void zynq_qspi_write(struct zynq_qspi *xqspi, u32 offset,
0155 u32 val)
0156 {
0157 writel_relaxed(val, xqspi->regs + offset);
0158 }
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181 static void zynq_qspi_init_hw(struct zynq_qspi *xqspi, unsigned int num_cs)
0182 {
0183 u32 config_reg;
0184
0185 zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
0186 zynq_qspi_write(xqspi, ZYNQ_QSPI_IDIS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
0187
0188
0189 config_reg = 0;
0190
0191 if (num_cs > 1)
0192 config_reg |= ZYNQ_QSPI_LCFG_TWO_MEM;
0193
0194 zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
0195
0196
0197 while (zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET) &
0198 ZYNQ_QSPI_IXR_RXNEMTY_MASK)
0199 zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
0200
0201 zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, ZYNQ_QSPI_IXR_ALL_MASK);
0202 config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
0203 config_reg &= ~(ZYNQ_QSPI_CONFIG_MSTREN_MASK |
0204 ZYNQ_QSPI_CONFIG_CPOL_MASK |
0205 ZYNQ_QSPI_CONFIG_CPHA_MASK |
0206 ZYNQ_QSPI_CONFIG_BDRATE_MASK |
0207 ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
0208 ZYNQ_QSPI_CONFIG_MANSRTEN_MASK |
0209 ZYNQ_QSPI_CONFIG_MANSRT_MASK);
0210 config_reg |= (ZYNQ_QSPI_CONFIG_MSTREN_MASK |
0211 ZYNQ_QSPI_CONFIG_SSFORCE_MASK |
0212 ZYNQ_QSPI_CONFIG_FWIDTH_MASK |
0213 ZYNQ_QSPI_CONFIG_IFMODE_MASK);
0214 zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
0215
0216 zynq_qspi_write(xqspi, ZYNQ_QSPI_RX_THRESH_OFFSET,
0217 ZYNQ_QSPI_RX_THRESHOLD);
0218 zynq_qspi_write(xqspi, ZYNQ_QSPI_TX_THRESH_OFFSET,
0219 ZYNQ_QSPI_TX_THRESHOLD);
0220
0221 zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET,
0222 ZYNQ_QSPI_ENABLE_ENABLE_MASK);
0223 }
0224
0225 static bool zynq_qspi_supports_op(struct spi_mem *mem,
0226 const struct spi_mem_op *op)
0227 {
0228 if (!spi_mem_default_supports_op(mem, op))
0229 return false;
0230
0231
0232
0233
0234 if (op->addr.nbytes > 3)
0235 return false;
0236
0237 return true;
0238 }
0239
0240
0241
0242
0243
0244
0245 static void zynq_qspi_rxfifo_op(struct zynq_qspi *xqspi, unsigned int size)
0246 {
0247 u32 data;
0248
0249 data = zynq_qspi_read(xqspi, ZYNQ_QSPI_RXD_OFFSET);
0250
0251 if (xqspi->rxbuf) {
0252 memcpy(xqspi->rxbuf, ((u8 *)&data) + 4 - size, size);
0253 xqspi->rxbuf += size;
0254 }
0255
0256 xqspi->rx_bytes -= size;
0257 if (xqspi->rx_bytes < 0)
0258 xqspi->rx_bytes = 0;
0259 }
0260
0261
0262
0263
0264
0265
0266 static void zynq_qspi_txfifo_op(struct zynq_qspi *xqspi, unsigned int size)
0267 {
0268 static const unsigned int offset[4] = {
0269 ZYNQ_QSPI_TXD_00_01_OFFSET, ZYNQ_QSPI_TXD_00_10_OFFSET,
0270 ZYNQ_QSPI_TXD_00_11_OFFSET, ZYNQ_QSPI_TXD_00_00_OFFSET };
0271 u32 data;
0272
0273 if (xqspi->txbuf) {
0274 data = 0xffffffff;
0275 memcpy(&data, xqspi->txbuf, size);
0276 xqspi->txbuf += size;
0277 } else {
0278 data = 0;
0279 }
0280
0281 xqspi->tx_bytes -= size;
0282 zynq_qspi_write(xqspi, offset[size - 1], data);
0283 }
0284
0285
0286
0287
0288
0289
0290 static void zynq_qspi_chipselect(struct spi_device *spi, bool assert)
0291 {
0292 struct spi_controller *ctlr = spi->master;
0293 struct zynq_qspi *xqspi = spi_controller_get_devdata(ctlr);
0294 u32 config_reg;
0295
0296
0297 if (ctlr->num_chipselect > 1) {
0298 config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET);
0299 if (!spi->chip_select)
0300 config_reg &= ~ZYNQ_QSPI_LCFG_U_PAGE;
0301 else
0302 config_reg |= ZYNQ_QSPI_LCFG_U_PAGE;
0303
0304 zynq_qspi_write(xqspi, ZYNQ_QSPI_LINEAR_CFG_OFFSET, config_reg);
0305 }
0306
0307
0308 config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
0309 if (assert)
0310 config_reg &= ~ZYNQ_QSPI_CONFIG_PCS;
0311 else
0312 config_reg |= ZYNQ_QSPI_CONFIG_PCS;
0313
0314 zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334 static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
0335 {
0336 u32 config_reg, baud_rate_val = 0;
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) &&
0348 (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) >
0349 spi->max_speed_hz)
0350 baud_rate_val++;
0351
0352 config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET);
0353
0354
0355 config_reg &= (~ZYNQ_QSPI_CONFIG_CPHA_MASK) &
0356 (~ZYNQ_QSPI_CONFIG_CPOL_MASK);
0357 if (spi->mode & SPI_CPHA)
0358 config_reg |= ZYNQ_QSPI_CONFIG_CPHA_MASK;
0359 if (spi->mode & SPI_CPOL)
0360 config_reg |= ZYNQ_QSPI_CONFIG_CPOL_MASK;
0361
0362 config_reg &= ~ZYNQ_QSPI_CONFIG_BDRATE_MASK;
0363 config_reg |= (baud_rate_val << ZYNQ_QSPI_CONFIG_BAUD_DIV_SHIFT);
0364 zynq_qspi_write(xqspi, ZYNQ_QSPI_CONFIG_OFFSET, config_reg);
0365
0366 return 0;
0367 }
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378 static int zynq_qspi_setup_op(struct spi_device *spi)
0379 {
0380 struct spi_controller *ctlr = spi->master;
0381 struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr);
0382
0383 if (ctlr->busy)
0384 return -EBUSY;
0385
0386 clk_enable(qspi->refclk);
0387 clk_enable(qspi->pclk);
0388 zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET,
0389 ZYNQ_QSPI_ENABLE_ENABLE_MASK);
0390
0391 return 0;
0392 }
0393
0394
0395
0396
0397
0398
0399
0400 static void zynq_qspi_write_op(struct zynq_qspi *xqspi, int txcount,
0401 bool txempty)
0402 {
0403 int count, len, k;
0404
0405 len = xqspi->tx_bytes;
0406 if (len && len < 4) {
0407
0408
0409
0410
0411 if (txempty)
0412 zynq_qspi_txfifo_op(xqspi, len);
0413
0414 return;
0415 }
0416
0417 count = len / 4;
0418 if (count > txcount)
0419 count = txcount;
0420
0421 if (xqspi->txbuf) {
0422 iowrite32_rep(xqspi->regs + ZYNQ_QSPI_TXD_00_00_OFFSET,
0423 xqspi->txbuf, count);
0424 xqspi->txbuf += count * 4;
0425 } else {
0426 for (k = 0; k < count; k++)
0427 writel_relaxed(0, xqspi->regs +
0428 ZYNQ_QSPI_TXD_00_00_OFFSET);
0429 }
0430
0431 xqspi->tx_bytes -= count * 4;
0432 }
0433
0434
0435
0436
0437
0438
0439 static void zynq_qspi_read_op(struct zynq_qspi *xqspi, int rxcount)
0440 {
0441 int count, len, k;
0442
0443 len = xqspi->rx_bytes - xqspi->tx_bytes;
0444 count = len / 4;
0445 if (count > rxcount)
0446 count = rxcount;
0447 if (xqspi->rxbuf) {
0448 ioread32_rep(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET,
0449 xqspi->rxbuf, count);
0450 xqspi->rxbuf += count * 4;
0451 } else {
0452 for (k = 0; k < count; k++)
0453 readl_relaxed(xqspi->regs + ZYNQ_QSPI_RXD_OFFSET);
0454 }
0455 xqspi->rx_bytes -= count * 4;
0456 len -= count * 4;
0457
0458 if (len && len < 4 && count < rxcount)
0459 zynq_qspi_rxfifo_op(xqspi, len);
0460 }
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473 static irqreturn_t zynq_qspi_irq(int irq, void *dev_id)
0474 {
0475 u32 intr_status;
0476 bool txempty;
0477 struct zynq_qspi *xqspi = (struct zynq_qspi *)dev_id;
0478
0479 intr_status = zynq_qspi_read(xqspi, ZYNQ_QSPI_STATUS_OFFSET);
0480 zynq_qspi_write(xqspi, ZYNQ_QSPI_STATUS_OFFSET, intr_status);
0481
0482 if ((intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK) ||
0483 (intr_status & ZYNQ_QSPI_IXR_RXNEMTY_MASK)) {
0484
0485
0486
0487
0488
0489 txempty = !!(intr_status & ZYNQ_QSPI_IXR_TXNFULL_MASK);
0490
0491 zynq_qspi_read_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD);
0492 if (xqspi->tx_bytes) {
0493
0494 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_RX_THRESHOLD,
0495 txempty);
0496 } else {
0497
0498
0499
0500
0501 if (!xqspi->rx_bytes) {
0502 zynq_qspi_write(xqspi,
0503 ZYNQ_QSPI_IDIS_OFFSET,
0504 ZYNQ_QSPI_IXR_RXTX_MASK);
0505 complete(&xqspi->data_completion);
0506 }
0507 }
0508 return IRQ_HANDLED;
0509 }
0510
0511 return IRQ_NONE;
0512 }
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525 static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
0526 const struct spi_mem_op *op)
0527 {
0528 struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
0529 int err = 0, i;
0530 u8 *tmpbuf;
0531
0532 dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
0533 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
0534 op->dummy.buswidth, op->data.buswidth);
0535
0536 zynq_qspi_chipselect(mem->spi, true);
0537 zynq_qspi_config_op(xqspi, mem->spi);
0538
0539 if (op->cmd.opcode) {
0540 reinit_completion(&xqspi->data_completion);
0541 xqspi->txbuf = (u8 *)&op->cmd.opcode;
0542 xqspi->rxbuf = NULL;
0543 xqspi->tx_bytes = op->cmd.nbytes;
0544 xqspi->rx_bytes = op->cmd.nbytes;
0545 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
0546 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
0547 ZYNQ_QSPI_IXR_RXTX_MASK);
0548 if (!wait_for_completion_timeout(&xqspi->data_completion,
0549 msecs_to_jiffies(1000)))
0550 err = -ETIMEDOUT;
0551 }
0552
0553 if (op->addr.nbytes) {
0554 for (i = 0; i < op->addr.nbytes; i++) {
0555 xqspi->txbuf[i] = op->addr.val >>
0556 (8 * (op->addr.nbytes - i - 1));
0557 }
0558
0559 reinit_completion(&xqspi->data_completion);
0560 xqspi->rxbuf = NULL;
0561 xqspi->tx_bytes = op->addr.nbytes;
0562 xqspi->rx_bytes = op->addr.nbytes;
0563 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
0564 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
0565 ZYNQ_QSPI_IXR_RXTX_MASK);
0566 if (!wait_for_completion_timeout(&xqspi->data_completion,
0567 msecs_to_jiffies(1000)))
0568 err = -ETIMEDOUT;
0569 }
0570
0571 if (op->dummy.nbytes) {
0572 tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
0573 if (!tmpbuf)
0574 return -ENOMEM;
0575
0576 memset(tmpbuf, 0xff, op->dummy.nbytes);
0577 reinit_completion(&xqspi->data_completion);
0578 xqspi->txbuf = tmpbuf;
0579 xqspi->rxbuf = NULL;
0580 xqspi->tx_bytes = op->dummy.nbytes;
0581 xqspi->rx_bytes = op->dummy.nbytes;
0582 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
0583 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
0584 ZYNQ_QSPI_IXR_RXTX_MASK);
0585 if (!wait_for_completion_timeout(&xqspi->data_completion,
0586 msecs_to_jiffies(1000)))
0587 err = -ETIMEDOUT;
0588
0589 kfree(tmpbuf);
0590 }
0591
0592 if (op->data.nbytes) {
0593 reinit_completion(&xqspi->data_completion);
0594 if (op->data.dir == SPI_MEM_DATA_OUT) {
0595 xqspi->txbuf = (u8 *)op->data.buf.out;
0596 xqspi->tx_bytes = op->data.nbytes;
0597 xqspi->rxbuf = NULL;
0598 xqspi->rx_bytes = op->data.nbytes;
0599 } else {
0600 xqspi->txbuf = NULL;
0601 xqspi->rxbuf = (u8 *)op->data.buf.in;
0602 xqspi->rx_bytes = op->data.nbytes;
0603 xqspi->tx_bytes = op->data.nbytes;
0604 }
0605
0606 zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
0607 zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
0608 ZYNQ_QSPI_IXR_RXTX_MASK);
0609 if (!wait_for_completion_timeout(&xqspi->data_completion,
0610 msecs_to_jiffies(1000)))
0611 err = -ETIMEDOUT;
0612 }
0613 zynq_qspi_chipselect(mem->spi, false);
0614
0615 return err;
0616 }
0617
0618 static const struct spi_controller_mem_ops zynq_qspi_mem_ops = {
0619 .supports_op = zynq_qspi_supports_op,
0620 .exec_op = zynq_qspi_exec_mem_op,
0621 };
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631 static int zynq_qspi_probe(struct platform_device *pdev)
0632 {
0633 int ret = 0;
0634 struct spi_controller *ctlr;
0635 struct device *dev = &pdev->dev;
0636 struct device_node *np = dev->of_node;
0637 struct zynq_qspi *xqspi;
0638 u32 num_cs;
0639
0640 ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
0641 if (!ctlr)
0642 return -ENOMEM;
0643
0644 xqspi = spi_controller_get_devdata(ctlr);
0645 xqspi->dev = dev;
0646 platform_set_drvdata(pdev, xqspi);
0647 xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
0648 if (IS_ERR(xqspi->regs)) {
0649 ret = PTR_ERR(xqspi->regs);
0650 goto remove_master;
0651 }
0652
0653 xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
0654 if (IS_ERR(xqspi->pclk)) {
0655 dev_err(&pdev->dev, "pclk clock not found.\n");
0656 ret = PTR_ERR(xqspi->pclk);
0657 goto remove_master;
0658 }
0659
0660 init_completion(&xqspi->data_completion);
0661
0662 xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
0663 if (IS_ERR(xqspi->refclk)) {
0664 dev_err(&pdev->dev, "ref_clk clock not found.\n");
0665 ret = PTR_ERR(xqspi->refclk);
0666 goto remove_master;
0667 }
0668
0669 ret = clk_prepare_enable(xqspi->pclk);
0670 if (ret) {
0671 dev_err(&pdev->dev, "Unable to enable APB clock.\n");
0672 goto remove_master;
0673 }
0674
0675 ret = clk_prepare_enable(xqspi->refclk);
0676 if (ret) {
0677 dev_err(&pdev->dev, "Unable to enable device clock.\n");
0678 goto clk_dis_pclk;
0679 }
0680
0681 xqspi->irq = platform_get_irq(pdev, 0);
0682 if (xqspi->irq <= 0) {
0683 ret = -ENXIO;
0684 goto clk_dis_all;
0685 }
0686 ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
0687 0, pdev->name, xqspi);
0688 if (ret != 0) {
0689 ret = -ENXIO;
0690 dev_err(&pdev->dev, "request_irq failed\n");
0691 goto clk_dis_all;
0692 }
0693
0694 ret = of_property_read_u32(np, "num-cs",
0695 &num_cs);
0696 if (ret < 0) {
0697 ctlr->num_chipselect = 1;
0698 } else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
0699 ret = -EINVAL;
0700 dev_err(&pdev->dev, "only 2 chip selects are available\n");
0701 goto clk_dis_all;
0702 } else {
0703 ctlr->num_chipselect = num_cs;
0704 }
0705
0706 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
0707 SPI_TX_DUAL | SPI_TX_QUAD;
0708 ctlr->mem_ops = &zynq_qspi_mem_ops;
0709 ctlr->setup = zynq_qspi_setup_op;
0710 ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
0711 ctlr->dev.of_node = np;
0712
0713
0714 zynq_qspi_init_hw(xqspi, ctlr->num_chipselect);
0715
0716 ret = devm_spi_register_controller(&pdev->dev, ctlr);
0717 if (ret) {
0718 dev_err(&pdev->dev, "spi_register_master failed\n");
0719 goto clk_dis_all;
0720 }
0721
0722 return ret;
0723
0724 clk_dis_all:
0725 clk_disable_unprepare(xqspi->refclk);
0726 clk_dis_pclk:
0727 clk_disable_unprepare(xqspi->pclk);
0728 remove_master:
0729 spi_controller_put(ctlr);
0730
0731 return ret;
0732 }
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744 static int zynq_qspi_remove(struct platform_device *pdev)
0745 {
0746 struct zynq_qspi *xqspi = platform_get_drvdata(pdev);
0747
0748 zynq_qspi_write(xqspi, ZYNQ_QSPI_ENABLE_OFFSET, 0);
0749
0750 clk_disable_unprepare(xqspi->refclk);
0751 clk_disable_unprepare(xqspi->pclk);
0752
0753 return 0;
0754 }
0755
0756 static const struct of_device_id zynq_qspi_of_match[] = {
0757 { .compatible = "xlnx,zynq-qspi-1.0", },
0758 { }
0759 };
0760
0761 MODULE_DEVICE_TABLE(of, zynq_qspi_of_match);
0762
0763
0764
0765
0766 static struct platform_driver zynq_qspi_driver = {
0767 .probe = zynq_qspi_probe,
0768 .remove = zynq_qspi_remove,
0769 .driver = {
0770 .name = "zynq-qspi",
0771 .of_match_table = zynq_qspi_of_match,
0772 },
0773 };
0774
0775 module_platform_driver(zynq_qspi_driver);
0776
0777 MODULE_AUTHOR("Xilinx, Inc.");
0778 MODULE_DESCRIPTION("Xilinx Zynq QSPI driver");
0779 MODULE_LICENSE("GPL");