0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/clk.h>
0010 #include <linux/delay.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/io.h>
0014 #include <linux/irq.h>
0015 #include <linux/module.h>
0016 #include <linux/of.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/pm_runtime.h>
0019 #include <linux/scatterlist.h>
0020 #include <linux/slab.h>
0021 #include <linux/spi/spi.h>
0022 #include <linux/spinlock.h>
0023
0024 #define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x))
0025 #define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24
0026 #define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff
0027 #define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16
0028 #define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff
0029 #define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8
0030 #define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff
0031 #define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0
0032 #define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff
0033
0034 #define SPFI_CONTROL 0x14
0035 #define SPFI_CONTROL_CONTINUE BIT(12)
0036 #define SPFI_CONTROL_SOFT_RESET BIT(11)
0037 #define SPFI_CONTROL_SEND_DMA BIT(10)
0038 #define SPFI_CONTROL_GET_DMA BIT(9)
0039 #define SPFI_CONTROL_SE BIT(8)
0040 #define SPFI_CONTROL_TMODE_SHIFT 5
0041 #define SPFI_CONTROL_TMODE_MASK 0x7
0042 #define SPFI_CONTROL_TMODE_SINGLE 0
0043 #define SPFI_CONTROL_TMODE_DUAL 1
0044 #define SPFI_CONTROL_TMODE_QUAD 2
0045 #define SPFI_CONTROL_SPFI_EN BIT(0)
0046
0047 #define SPFI_TRANSACTION 0x18
0048 #define SPFI_TRANSACTION_TSIZE_SHIFT 16
0049 #define SPFI_TRANSACTION_TSIZE_MASK 0xffff
0050
0051 #define SPFI_PORT_STATE 0x1c
0052 #define SPFI_PORT_STATE_DEV_SEL_SHIFT 20
0053 #define SPFI_PORT_STATE_DEV_SEL_MASK 0x7
0054 #define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x))
0055 #define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x))
0056
0057 #define SPFI_TX_32BIT_VALID_DATA 0x20
0058 #define SPFI_TX_8BIT_VALID_DATA 0x24
0059 #define SPFI_RX_32BIT_VALID_DATA 0x28
0060 #define SPFI_RX_8BIT_VALID_DATA 0x2c
0061
0062 #define SPFI_INTERRUPT_STATUS 0x30
0063 #define SPFI_INTERRUPT_ENABLE 0x34
0064 #define SPFI_INTERRUPT_CLEAR 0x38
0065 #define SPFI_INTERRUPT_IACCESS BIT(12)
0066 #define SPFI_INTERRUPT_GDEX8BIT BIT(11)
0067 #define SPFI_INTERRUPT_ALLDONETRIG BIT(9)
0068 #define SPFI_INTERRUPT_GDFUL BIT(8)
0069 #define SPFI_INTERRUPT_GDHF BIT(7)
0070 #define SPFI_INTERRUPT_GDEX32BIT BIT(6)
0071 #define SPFI_INTERRUPT_GDTRIG BIT(5)
0072 #define SPFI_INTERRUPT_SDFUL BIT(3)
0073 #define SPFI_INTERRUPT_SDHF BIT(2)
0074 #define SPFI_INTERRUPT_SDE BIT(1)
0075 #define SPFI_INTERRUPT_SDTRIG BIT(0)
0076
0077
0078
0079
0080
0081
0082
0083
0084 #define SPFI_32BIT_FIFO_SIZE 64
0085 #define SPFI_8BIT_FIFO_SIZE 16
0086
0087 struct img_spfi {
0088 struct device *dev;
0089 struct spi_master *master;
0090 spinlock_t lock;
0091
0092 void __iomem *regs;
0093 phys_addr_t phys;
0094 int irq;
0095 struct clk *spfi_clk;
0096 struct clk *sys_clk;
0097
0098 struct dma_chan *rx_ch;
0099 struct dma_chan *tx_ch;
0100 bool tx_dma_busy;
0101 bool rx_dma_busy;
0102 };
0103
0104 static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
0105 {
0106 return readl(spfi->regs + reg);
0107 }
0108
0109 static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg)
0110 {
0111 writel(val, spfi->regs + reg);
0112 }
0113
0114 static inline void spfi_start(struct img_spfi *spfi)
0115 {
0116 u32 val;
0117
0118 val = spfi_readl(spfi, SPFI_CONTROL);
0119 val |= SPFI_CONTROL_SPFI_EN;
0120 spfi_writel(spfi, val, SPFI_CONTROL);
0121 }
0122
0123 static inline void spfi_reset(struct img_spfi *spfi)
0124 {
0125 spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
0126 spfi_writel(spfi, 0, SPFI_CONTROL);
0127 }
0128
0129 static int spfi_wait_all_done(struct img_spfi *spfi)
0130 {
0131 unsigned long timeout = jiffies + msecs_to_jiffies(50);
0132
0133 while (time_before(jiffies, timeout)) {
0134 u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
0135
0136 if (status & SPFI_INTERRUPT_ALLDONETRIG) {
0137 spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG,
0138 SPFI_INTERRUPT_CLEAR);
0139 return 0;
0140 }
0141 cpu_relax();
0142 }
0143
0144 dev_err(spfi->dev, "Timed out waiting for transaction to complete\n");
0145 spfi_reset(spfi);
0146
0147 return -ETIMEDOUT;
0148 }
0149
0150 static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
0151 unsigned int max)
0152 {
0153 unsigned int count = 0;
0154 u32 status;
0155
0156 while (count < max / 4) {
0157 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
0158 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
0159 if (status & SPFI_INTERRUPT_SDFUL)
0160 break;
0161 spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
0162 count++;
0163 }
0164
0165 return count * 4;
0166 }
0167
0168 static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
0169 unsigned int max)
0170 {
0171 unsigned int count = 0;
0172 u32 status;
0173
0174 while (count < max) {
0175 spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
0176 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
0177 if (status & SPFI_INTERRUPT_SDFUL)
0178 break;
0179 spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA);
0180 count++;
0181 }
0182
0183 return count;
0184 }
0185
0186 static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
0187 unsigned int max)
0188 {
0189 unsigned int count = 0;
0190 u32 status;
0191
0192 while (count < max / 4) {
0193 spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
0194 SPFI_INTERRUPT_CLEAR);
0195 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
0196 if (!(status & SPFI_INTERRUPT_GDEX32BIT))
0197 break;
0198 buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
0199 count++;
0200 }
0201
0202 return count * 4;
0203 }
0204
0205 static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
0206 unsigned int max)
0207 {
0208 unsigned int count = 0;
0209 u32 status;
0210
0211 while (count < max) {
0212 spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT,
0213 SPFI_INTERRUPT_CLEAR);
0214 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
0215 if (!(status & SPFI_INTERRUPT_GDEX8BIT))
0216 break;
0217 buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA);
0218 count++;
0219 }
0220
0221 return count;
0222 }
0223
0224 static int img_spfi_start_pio(struct spi_master *master,
0225 struct spi_device *spi,
0226 struct spi_transfer *xfer)
0227 {
0228 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
0229 unsigned int tx_bytes = 0, rx_bytes = 0;
0230 const void *tx_buf = xfer->tx_buf;
0231 void *rx_buf = xfer->rx_buf;
0232 unsigned long timeout;
0233 int ret;
0234
0235 if (tx_buf)
0236 tx_bytes = xfer->len;
0237 if (rx_buf)
0238 rx_bytes = xfer->len;
0239
0240 spfi_start(spfi);
0241
0242 timeout = jiffies +
0243 msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100);
0244 while ((tx_bytes > 0 || rx_bytes > 0) &&
0245 time_before(jiffies, timeout)) {
0246 unsigned int tx_count, rx_count;
0247
0248 if (tx_bytes >= 4)
0249 tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
0250 else
0251 tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
0252
0253 if (rx_bytes >= 4)
0254 rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
0255 else
0256 rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
0257
0258 tx_buf += tx_count;
0259 rx_buf += rx_count;
0260 tx_bytes -= tx_count;
0261 rx_bytes -= rx_count;
0262
0263 cpu_relax();
0264 }
0265
0266 if (rx_bytes > 0 || tx_bytes > 0) {
0267 dev_err(spfi->dev, "PIO transfer timed out\n");
0268 return -ETIMEDOUT;
0269 }
0270
0271 ret = spfi_wait_all_done(spfi);
0272 if (ret < 0)
0273 return ret;
0274
0275 return 0;
0276 }
0277
0278 static void img_spfi_dma_rx_cb(void *data)
0279 {
0280 struct img_spfi *spfi = data;
0281 unsigned long flags;
0282
0283 spfi_wait_all_done(spfi);
0284
0285 spin_lock_irqsave(&spfi->lock, flags);
0286 spfi->rx_dma_busy = false;
0287 if (!spfi->tx_dma_busy)
0288 spi_finalize_current_transfer(spfi->master);
0289 spin_unlock_irqrestore(&spfi->lock, flags);
0290 }
0291
0292 static void img_spfi_dma_tx_cb(void *data)
0293 {
0294 struct img_spfi *spfi = data;
0295 unsigned long flags;
0296
0297 spfi_wait_all_done(spfi);
0298
0299 spin_lock_irqsave(&spfi->lock, flags);
0300 spfi->tx_dma_busy = false;
0301 if (!spfi->rx_dma_busy)
0302 spi_finalize_current_transfer(spfi->master);
0303 spin_unlock_irqrestore(&spfi->lock, flags);
0304 }
0305
0306 static int img_spfi_start_dma(struct spi_master *master,
0307 struct spi_device *spi,
0308 struct spi_transfer *xfer)
0309 {
0310 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
0311 struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
0312 struct dma_slave_config rxconf, txconf;
0313
0314 spfi->rx_dma_busy = false;
0315 spfi->tx_dma_busy = false;
0316
0317 if (xfer->rx_buf) {
0318 rxconf.direction = DMA_DEV_TO_MEM;
0319 if (xfer->len % 4 == 0) {
0320 rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
0321 rxconf.src_addr_width = 4;
0322 rxconf.src_maxburst = 4;
0323 } else {
0324 rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
0325 rxconf.src_addr_width = 1;
0326 rxconf.src_maxburst = 4;
0327 }
0328 dmaengine_slave_config(spfi->rx_ch, &rxconf);
0329
0330 rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl,
0331 xfer->rx_sg.nents,
0332 DMA_DEV_TO_MEM,
0333 DMA_PREP_INTERRUPT);
0334 if (!rxdesc)
0335 goto stop_dma;
0336
0337 rxdesc->callback = img_spfi_dma_rx_cb;
0338 rxdesc->callback_param = spfi;
0339 }
0340
0341 if (xfer->tx_buf) {
0342 txconf.direction = DMA_MEM_TO_DEV;
0343 if (xfer->len % 4 == 0) {
0344 txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
0345 txconf.dst_addr_width = 4;
0346 txconf.dst_maxburst = 4;
0347 } else {
0348 txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
0349 txconf.dst_addr_width = 1;
0350 txconf.dst_maxburst = 4;
0351 }
0352 dmaengine_slave_config(spfi->tx_ch, &txconf);
0353
0354 txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl,
0355 xfer->tx_sg.nents,
0356 DMA_MEM_TO_DEV,
0357 DMA_PREP_INTERRUPT);
0358 if (!txdesc)
0359 goto stop_dma;
0360
0361 txdesc->callback = img_spfi_dma_tx_cb;
0362 txdesc->callback_param = spfi;
0363 }
0364
0365 if (xfer->rx_buf) {
0366 spfi->rx_dma_busy = true;
0367 dmaengine_submit(rxdesc);
0368 dma_async_issue_pending(spfi->rx_ch);
0369 }
0370
0371 spfi_start(spfi);
0372
0373 if (xfer->tx_buf) {
0374 spfi->tx_dma_busy = true;
0375 dmaengine_submit(txdesc);
0376 dma_async_issue_pending(spfi->tx_ch);
0377 }
0378
0379 return 1;
0380
0381 stop_dma:
0382 dmaengine_terminate_all(spfi->rx_ch);
0383 dmaengine_terminate_all(spfi->tx_ch);
0384 return -EIO;
0385 }
0386
0387 static void img_spfi_handle_err(struct spi_master *master,
0388 struct spi_message *msg)
0389 {
0390 struct img_spfi *spfi = spi_master_get_devdata(master);
0391 unsigned long flags;
0392
0393
0394
0395
0396
0397 spin_lock_irqsave(&spfi->lock, flags);
0398 if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
0399 spfi->tx_dma_busy = false;
0400 spfi->rx_dma_busy = false;
0401
0402 dmaengine_terminate_all(spfi->tx_ch);
0403 dmaengine_terminate_all(spfi->rx_ch);
0404 }
0405 spin_unlock_irqrestore(&spfi->lock, flags);
0406 }
0407
0408 static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
0409 {
0410 struct img_spfi *spfi = spi_master_get_devdata(master);
0411 u32 val;
0412
0413 val = spfi_readl(spfi, SPFI_PORT_STATE);
0414 val &= ~(SPFI_PORT_STATE_DEV_SEL_MASK <<
0415 SPFI_PORT_STATE_DEV_SEL_SHIFT);
0416 val |= msg->spi->chip_select << SPFI_PORT_STATE_DEV_SEL_SHIFT;
0417 if (msg->spi->mode & SPI_CPHA)
0418 val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
0419 else
0420 val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
0421 if (msg->spi->mode & SPI_CPOL)
0422 val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
0423 else
0424 val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
0425 spfi_writel(spfi, val, SPFI_PORT_STATE);
0426
0427 return 0;
0428 }
0429
0430 static int img_spfi_unprepare(struct spi_master *master,
0431 struct spi_message *msg)
0432 {
0433 struct img_spfi *spfi = spi_master_get_devdata(master);
0434
0435 spfi_reset(spfi);
0436
0437 return 0;
0438 }
0439
0440 static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
0441 struct spi_transfer *xfer)
0442 {
0443 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
0444 u32 val, div;
0445
0446
0447
0448
0449
0450 div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz);
0451 div = clamp(512 / (1 << get_count_order(div)), 1, 128);
0452
0453 val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select));
0454 val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
0455 SPFI_DEVICE_PARAMETER_BITCLK_SHIFT);
0456 val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
0457 spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select));
0458
0459 spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
0460 SPFI_TRANSACTION);
0461
0462 val = spfi_readl(spfi, SPFI_CONTROL);
0463 val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
0464 if (xfer->tx_buf)
0465 val |= SPFI_CONTROL_SEND_DMA;
0466 if (xfer->rx_buf)
0467 val |= SPFI_CONTROL_GET_DMA;
0468 val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT);
0469 if (xfer->tx_nbits == SPI_NBITS_DUAL &&
0470 xfer->rx_nbits == SPI_NBITS_DUAL)
0471 val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT;
0472 else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
0473 xfer->rx_nbits == SPI_NBITS_QUAD)
0474 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
0475 val |= SPFI_CONTROL_SE;
0476 spfi_writel(spfi, val, SPFI_CONTROL);
0477 }
0478
0479 static int img_spfi_transfer_one(struct spi_master *master,
0480 struct spi_device *spi,
0481 struct spi_transfer *xfer)
0482 {
0483 struct img_spfi *spfi = spi_master_get_devdata(spi->master);
0484 int ret;
0485
0486 if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
0487 dev_err(spfi->dev,
0488 "Transfer length (%d) is greater than the max supported (%d)",
0489 xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
0490 return -EINVAL;
0491 }
0492
0493 img_spfi_config(master, spi, xfer);
0494 if (master->can_dma && master->can_dma(master, spi, xfer))
0495 ret = img_spfi_start_dma(master, spi, xfer);
0496 else
0497 ret = img_spfi_start_pio(master, spi, xfer);
0498
0499 return ret;
0500 }
0501
0502 static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
0503 struct spi_transfer *xfer)
0504 {
0505 if (xfer->len > SPFI_32BIT_FIFO_SIZE)
0506 return true;
0507 return false;
0508 }
0509
0510 static irqreturn_t img_spfi_irq(int irq, void *dev_id)
0511 {
0512 struct img_spfi *spfi = (struct img_spfi *)dev_id;
0513 u32 status;
0514
0515 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
0516 if (status & SPFI_INTERRUPT_IACCESS) {
0517 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR);
0518 dev_err(spfi->dev, "Illegal access interrupt");
0519 return IRQ_HANDLED;
0520 }
0521
0522 return IRQ_NONE;
0523 }
0524
0525 static int img_spfi_probe(struct platform_device *pdev)
0526 {
0527 struct spi_master *master;
0528 struct img_spfi *spfi;
0529 struct resource *res;
0530 int ret;
0531 u32 max_speed_hz;
0532
0533 master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
0534 if (!master)
0535 return -ENOMEM;
0536 platform_set_drvdata(pdev, master);
0537
0538 spfi = spi_master_get_devdata(master);
0539 spfi->dev = &pdev->dev;
0540 spfi->master = master;
0541 spin_lock_init(&spfi->lock);
0542
0543 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0544 spfi->regs = devm_ioremap_resource(spfi->dev, res);
0545 if (IS_ERR(spfi->regs)) {
0546 ret = PTR_ERR(spfi->regs);
0547 goto put_spi;
0548 }
0549 spfi->phys = res->start;
0550
0551 spfi->irq = platform_get_irq(pdev, 0);
0552 if (spfi->irq < 0) {
0553 ret = spfi->irq;
0554 goto put_spi;
0555 }
0556 ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq,
0557 IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi);
0558 if (ret)
0559 goto put_spi;
0560
0561 spfi->sys_clk = devm_clk_get(spfi->dev, "sys");
0562 if (IS_ERR(spfi->sys_clk)) {
0563 ret = PTR_ERR(spfi->sys_clk);
0564 goto put_spi;
0565 }
0566 spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi");
0567 if (IS_ERR(spfi->spfi_clk)) {
0568 ret = PTR_ERR(spfi->spfi_clk);
0569 goto put_spi;
0570 }
0571
0572 ret = clk_prepare_enable(spfi->sys_clk);
0573 if (ret)
0574 goto put_spi;
0575 ret = clk_prepare_enable(spfi->spfi_clk);
0576 if (ret)
0577 goto disable_pclk;
0578
0579 spfi_reset(spfi);
0580
0581
0582
0583
0584 spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
0585
0586 master->auto_runtime_pm = true;
0587 master->bus_num = pdev->id;
0588 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
0589 if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
0590 master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
0591 master->dev.of_node = pdev->dev.of_node;
0592 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
0593 master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
0594 master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
0595
0596
0597
0598
0599
0600
0601
0602
0603 if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency",
0604 &max_speed_hz)) {
0605 if (master->max_speed_hz > max_speed_hz)
0606 master->max_speed_hz = max_speed_hz;
0607 }
0608
0609 master->transfer_one = img_spfi_transfer_one;
0610 master->prepare_message = img_spfi_prepare;
0611 master->unprepare_message = img_spfi_unprepare;
0612 master->handle_err = img_spfi_handle_err;
0613 master->use_gpio_descriptors = true;
0614
0615 spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
0616 if (IS_ERR(spfi->tx_ch)) {
0617 ret = PTR_ERR(spfi->tx_ch);
0618 spfi->tx_ch = NULL;
0619 if (ret == -EPROBE_DEFER)
0620 goto disable_pm;
0621 }
0622
0623 spfi->rx_ch = dma_request_chan(spfi->dev, "rx");
0624 if (IS_ERR(spfi->rx_ch)) {
0625 ret = PTR_ERR(spfi->rx_ch);
0626 spfi->rx_ch = NULL;
0627 if (ret == -EPROBE_DEFER)
0628 goto disable_pm;
0629 }
0630
0631 if (!spfi->tx_ch || !spfi->rx_ch) {
0632 if (spfi->tx_ch)
0633 dma_release_channel(spfi->tx_ch);
0634 if (spfi->rx_ch)
0635 dma_release_channel(spfi->rx_ch);
0636 spfi->tx_ch = NULL;
0637 spfi->rx_ch = NULL;
0638 dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
0639 } else {
0640 master->dma_tx = spfi->tx_ch;
0641 master->dma_rx = spfi->rx_ch;
0642 master->can_dma = img_spfi_can_dma;
0643 }
0644
0645 pm_runtime_set_active(spfi->dev);
0646 pm_runtime_enable(spfi->dev);
0647
0648 ret = devm_spi_register_master(spfi->dev, master);
0649 if (ret)
0650 goto disable_pm;
0651
0652 return 0;
0653
0654 disable_pm:
0655 pm_runtime_disable(spfi->dev);
0656 if (spfi->rx_ch)
0657 dma_release_channel(spfi->rx_ch);
0658 if (spfi->tx_ch)
0659 dma_release_channel(spfi->tx_ch);
0660 clk_disable_unprepare(spfi->spfi_clk);
0661 disable_pclk:
0662 clk_disable_unprepare(spfi->sys_clk);
0663 put_spi:
0664 spi_master_put(master);
0665
0666 return ret;
0667 }
0668
0669 static int img_spfi_remove(struct platform_device *pdev)
0670 {
0671 struct spi_master *master = platform_get_drvdata(pdev);
0672 struct img_spfi *spfi = spi_master_get_devdata(master);
0673
0674 if (spfi->tx_ch)
0675 dma_release_channel(spfi->tx_ch);
0676 if (spfi->rx_ch)
0677 dma_release_channel(spfi->rx_ch);
0678
0679 pm_runtime_disable(spfi->dev);
0680 if (!pm_runtime_status_suspended(spfi->dev)) {
0681 clk_disable_unprepare(spfi->spfi_clk);
0682 clk_disable_unprepare(spfi->sys_clk);
0683 }
0684
0685 return 0;
0686 }
0687
0688 #ifdef CONFIG_PM
0689 static int img_spfi_runtime_suspend(struct device *dev)
0690 {
0691 struct spi_master *master = dev_get_drvdata(dev);
0692 struct img_spfi *spfi = spi_master_get_devdata(master);
0693
0694 clk_disable_unprepare(spfi->spfi_clk);
0695 clk_disable_unprepare(spfi->sys_clk);
0696
0697 return 0;
0698 }
0699
0700 static int img_spfi_runtime_resume(struct device *dev)
0701 {
0702 struct spi_master *master = dev_get_drvdata(dev);
0703 struct img_spfi *spfi = spi_master_get_devdata(master);
0704 int ret;
0705
0706 ret = clk_prepare_enable(spfi->sys_clk);
0707 if (ret)
0708 return ret;
0709 ret = clk_prepare_enable(spfi->spfi_clk);
0710 if (ret) {
0711 clk_disable_unprepare(spfi->sys_clk);
0712 return ret;
0713 }
0714
0715 return 0;
0716 }
0717 #endif
0718
0719 #ifdef CONFIG_PM_SLEEP
0720 static int img_spfi_suspend(struct device *dev)
0721 {
0722 struct spi_master *master = dev_get_drvdata(dev);
0723
0724 return spi_master_suspend(master);
0725 }
0726
0727 static int img_spfi_resume(struct device *dev)
0728 {
0729 struct spi_master *master = dev_get_drvdata(dev);
0730 struct img_spfi *spfi = spi_master_get_devdata(master);
0731 int ret;
0732
0733 ret = pm_runtime_get_sync(dev);
0734 if (ret < 0) {
0735 pm_runtime_put_noidle(dev);
0736 return ret;
0737 }
0738 spfi_reset(spfi);
0739 pm_runtime_put(dev);
0740
0741 return spi_master_resume(master);
0742 }
0743 #endif
0744
0745 static const struct dev_pm_ops img_spfi_pm_ops = {
0746 SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume,
0747 NULL)
0748 SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume)
0749 };
0750
0751 static const struct of_device_id img_spfi_of_match[] = {
0752 { .compatible = "img,spfi", },
0753 { },
0754 };
0755 MODULE_DEVICE_TABLE(of, img_spfi_of_match);
0756
0757 static struct platform_driver img_spfi_driver = {
0758 .driver = {
0759 .name = "img-spfi",
0760 .pm = &img_spfi_pm_ops,
0761 .of_match_table = of_match_ptr(img_spfi_of_match),
0762 },
0763 .probe = img_spfi_probe,
0764 .remove = img_spfi_remove,
0765 };
0766 module_platform_driver(img_spfi_driver);
0767
0768 MODULE_DESCRIPTION("IMG SPFI controller driver");
0769 MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
0770 MODULE_LICENSE("GPL v2");