0001
0002
0003
0004
0005
0006 #include <linux/kernel.h>
0007 #include <linux/bitfield.h>
0008 #include <linux/bitops.h>
0009 #include <linux/clk.h>
0010 #include <linux/delay.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/io.h>
0014 #include <linux/module.h>
0015 #include <linux/platform_device.h>
0016 #include <linux/spi/spi.h>
0017
0018 #include <asm/unaligned.h>
0019
0020 #define SSI_TIMEOUT_MS 2000
0021 #define SSI_POLL_TIMEOUT_US 200
0022 #define SSI_MAX_CLK_DIVIDER 254
0023 #define SSI_MIN_CLK_DIVIDER 4
0024
0025 struct uniphier_spi_priv {
0026 void __iomem *base;
0027 dma_addr_t base_dma_addr;
0028 struct clk *clk;
0029 struct spi_master *master;
0030 struct completion xfer_done;
0031
0032 int error;
0033 unsigned int tx_bytes;
0034 unsigned int rx_bytes;
0035 const u8 *tx_buf;
0036 u8 *rx_buf;
0037 atomic_t dma_busy;
0038
0039 bool is_save_param;
0040 u8 bits_per_word;
0041 u16 mode;
0042 u32 speed_hz;
0043 };
0044
0045 #define SSI_CTL 0x00
0046 #define SSI_CTL_EN BIT(0)
0047
0048 #define SSI_CKS 0x04
0049 #define SSI_CKS_CKRAT_MASK GENMASK(7, 0)
0050 #define SSI_CKS_CKPHS BIT(14)
0051 #define SSI_CKS_CKINIT BIT(13)
0052 #define SSI_CKS_CKDLY BIT(12)
0053
0054 #define SSI_TXWDS 0x08
0055 #define SSI_TXWDS_WDLEN_MASK GENMASK(13, 8)
0056 #define SSI_TXWDS_TDTF_MASK GENMASK(7, 6)
0057 #define SSI_TXWDS_DTLEN_MASK GENMASK(5, 0)
0058
0059 #define SSI_RXWDS 0x0c
0060 #define SSI_RXWDS_DTLEN_MASK GENMASK(5, 0)
0061
0062 #define SSI_FPS 0x10
0063 #define SSI_FPS_FSPOL BIT(15)
0064 #define SSI_FPS_FSTRT BIT(14)
0065
0066 #define SSI_SR 0x14
0067 #define SSI_SR_BUSY BIT(7)
0068 #define SSI_SR_RNE BIT(0)
0069
0070 #define SSI_IE 0x18
0071 #define SSI_IE_TCIE BIT(4)
0072 #define SSI_IE_RCIE BIT(3)
0073 #define SSI_IE_TXRE BIT(2)
0074 #define SSI_IE_RXRE BIT(1)
0075 #define SSI_IE_RORIE BIT(0)
0076 #define SSI_IE_ALL_MASK GENMASK(4, 0)
0077
0078 #define SSI_IS 0x1c
0079 #define SSI_IS_RXRS BIT(9)
0080 #define SSI_IS_RCID BIT(3)
0081 #define SSI_IS_RORID BIT(0)
0082
0083 #define SSI_IC 0x1c
0084 #define SSI_IC_TCIC BIT(4)
0085 #define SSI_IC_RCIC BIT(3)
0086 #define SSI_IC_RORIC BIT(0)
0087
0088 #define SSI_FC 0x20
0089 #define SSI_FC_TXFFL BIT(12)
0090 #define SSI_FC_TXFTH_MASK GENMASK(11, 8)
0091 #define SSI_FC_RXFFL BIT(4)
0092 #define SSI_FC_RXFTH_MASK GENMASK(3, 0)
0093
0094 #define SSI_TXDR 0x24
0095 #define SSI_RXDR 0x24
0096
0097 #define SSI_FIFO_DEPTH 8U
0098 #define SSI_FIFO_BURST_NUM 1
0099
0100 #define SSI_DMA_RX_BUSY BIT(1)
0101 #define SSI_DMA_TX_BUSY BIT(0)
0102
0103 static inline unsigned int bytes_per_word(unsigned int bits)
0104 {
0105 return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
0106 }
0107
0108 static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
0109 u32 mask)
0110 {
0111 u32 val;
0112
0113 val = readl(priv->base + SSI_IE);
0114 val |= mask;
0115 writel(val, priv->base + SSI_IE);
0116 }
0117
0118 static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
0119 u32 mask)
0120 {
0121 u32 val;
0122
0123 val = readl(priv->base + SSI_IE);
0124 val &= ~mask;
0125 writel(val, priv->base + SSI_IE);
0126 }
0127
0128 static void uniphier_spi_set_mode(struct spi_device *spi)
0129 {
0130 struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
0131 u32 val1, val2;
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145 switch (spi->mode & SPI_MODE_X_MASK) {
0146 case SPI_MODE_0:
0147
0148 val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;
0149 val2 = 0;
0150 break;
0151 case SPI_MODE_1:
0152
0153 val1 = 0;
0154 val2 = SSI_FPS_FSTRT;
0155 break;
0156 case SPI_MODE_2:
0157
0158 val1 = SSI_CKS_CKINIT | SSI_CKS_CKDLY;
0159 val2 = SSI_FPS_FSTRT;
0160 break;
0161 case SPI_MODE_3:
0162
0163 val1 = SSI_CKS_CKPHS | SSI_CKS_CKINIT;
0164 val2 = 0;
0165 break;
0166 }
0167
0168 if (!(spi->mode & SPI_CS_HIGH))
0169 val2 |= SSI_FPS_FSPOL;
0170
0171 writel(val1, priv->base + SSI_CKS);
0172 writel(val2, priv->base + SSI_FPS);
0173
0174 val1 = 0;
0175 if (spi->mode & SPI_LSB_FIRST)
0176 val1 |= FIELD_PREP(SSI_TXWDS_TDTF_MASK, 1);
0177 writel(val1, priv->base + SSI_TXWDS);
0178 writel(val1, priv->base + SSI_RXWDS);
0179 }
0180
0181 static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
0182 {
0183 struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
0184 u32 val;
0185
0186 val = readl(priv->base + SSI_TXWDS);
0187 val &= ~(SSI_TXWDS_WDLEN_MASK | SSI_TXWDS_DTLEN_MASK);
0188 val |= FIELD_PREP(SSI_TXWDS_WDLEN_MASK, size);
0189 val |= FIELD_PREP(SSI_TXWDS_DTLEN_MASK, size);
0190 writel(val, priv->base + SSI_TXWDS);
0191
0192 val = readl(priv->base + SSI_RXWDS);
0193 val &= ~SSI_RXWDS_DTLEN_MASK;
0194 val |= FIELD_PREP(SSI_RXWDS_DTLEN_MASK, size);
0195 writel(val, priv->base + SSI_RXWDS);
0196 }
0197
0198 static void uniphier_spi_set_baudrate(struct spi_device *spi,
0199 unsigned int speed)
0200 {
0201 struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
0202 u32 val, ckdiv;
0203
0204
0205
0206
0207
0208 ckdiv = DIV_ROUND_UP(clk_get_rate(priv->clk), speed);
0209 ckdiv = round_up(ckdiv, 2);
0210
0211 val = readl(priv->base + SSI_CKS);
0212 val &= ~SSI_CKS_CKRAT_MASK;
0213 val |= ckdiv & SSI_CKS_CKRAT_MASK;
0214 writel(val, priv->base + SSI_CKS);
0215 }
0216
0217 static void uniphier_spi_setup_transfer(struct spi_device *spi,
0218 struct spi_transfer *t)
0219 {
0220 struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
0221 u32 val;
0222
0223 priv->error = 0;
0224 priv->tx_buf = t->tx_buf;
0225 priv->rx_buf = t->rx_buf;
0226 priv->tx_bytes = priv->rx_bytes = t->len;
0227
0228 if (!priv->is_save_param || priv->mode != spi->mode) {
0229 uniphier_spi_set_mode(spi);
0230 priv->mode = spi->mode;
0231 priv->is_save_param = false;
0232 }
0233
0234 if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
0235 uniphier_spi_set_transfer_size(spi, t->bits_per_word);
0236 priv->bits_per_word = t->bits_per_word;
0237 }
0238
0239 if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
0240 uniphier_spi_set_baudrate(spi, t->speed_hz);
0241 priv->speed_hz = t->speed_hz;
0242 }
0243
0244 priv->is_save_param = true;
0245
0246
0247 val = SSI_FC_TXFFL | SSI_FC_RXFFL;
0248 writel(val, priv->base + SSI_FC);
0249 }
0250
0251 static void uniphier_spi_send(struct uniphier_spi_priv *priv)
0252 {
0253 int wsize;
0254 u32 val = 0;
0255
0256 wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
0257 priv->tx_bytes -= wsize;
0258
0259 if (priv->tx_buf) {
0260 switch (wsize) {
0261 case 1:
0262 val = *priv->tx_buf;
0263 break;
0264 case 2:
0265 val = get_unaligned_le16(priv->tx_buf);
0266 break;
0267 case 4:
0268 val = get_unaligned_le32(priv->tx_buf);
0269 break;
0270 }
0271
0272 priv->tx_buf += wsize;
0273 }
0274
0275 writel(val, priv->base + SSI_TXDR);
0276 }
0277
0278 static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
0279 {
0280 int rsize;
0281 u32 val;
0282
0283 rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
0284 priv->rx_bytes -= rsize;
0285
0286 val = readl(priv->base + SSI_RXDR);
0287
0288 if (priv->rx_buf) {
0289 switch (rsize) {
0290 case 1:
0291 *priv->rx_buf = val;
0292 break;
0293 case 2:
0294 put_unaligned_le16(val, priv->rx_buf);
0295 break;
0296 case 4:
0297 put_unaligned_le32(val, priv->rx_buf);
0298 break;
0299 }
0300
0301 priv->rx_buf += rsize;
0302 }
0303 }
0304
0305 static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
0306 unsigned int threshold)
0307 {
0308 u32 val;
0309
0310 val = readl(priv->base + SSI_FC);
0311 val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
0312 val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
0313 val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
0314 writel(val, priv->base + SSI_FC);
0315 }
0316
0317 static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
0318 {
0319 unsigned int fifo_threshold, fill_words;
0320 unsigned int bpw = bytes_per_word(priv->bits_per_word);
0321
0322 fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
0323 fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
0324
0325 uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
0326
0327 fill_words = fifo_threshold -
0328 DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
0329
0330 while (fill_words--)
0331 uniphier_spi_send(priv);
0332 }
0333
0334 static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
0335 {
0336 struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
0337 u32 val;
0338
0339 val = readl(priv->base + SSI_FPS);
0340
0341 if (enable)
0342 val |= SSI_FPS_FSPOL;
0343 else
0344 val &= ~SSI_FPS_FSPOL;
0345
0346 writel(val, priv->base + SSI_FPS);
0347 }
0348
0349 static bool uniphier_spi_can_dma(struct spi_master *master,
0350 struct spi_device *spi,
0351 struct spi_transfer *t)
0352 {
0353 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0354 unsigned int bpw = bytes_per_word(priv->bits_per_word);
0355
0356 if ((!master->dma_tx && !master->dma_rx)
0357 || (!master->dma_tx && t->tx_buf)
0358 || (!master->dma_rx && t->rx_buf))
0359 return false;
0360
0361 return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
0362 }
0363
0364 static void uniphier_spi_dma_rxcb(void *data)
0365 {
0366 struct spi_master *master = data;
0367 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0368 int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
0369
0370 uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
0371
0372 if (!(state & SSI_DMA_TX_BUSY))
0373 spi_finalize_current_transfer(master);
0374 }
0375
0376 static void uniphier_spi_dma_txcb(void *data)
0377 {
0378 struct spi_master *master = data;
0379 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0380 int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
0381
0382 uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
0383
0384 if (!(state & SSI_DMA_RX_BUSY))
0385 spi_finalize_current_transfer(master);
0386 }
0387
0388 static int uniphier_spi_transfer_one_dma(struct spi_master *master,
0389 struct spi_device *spi,
0390 struct spi_transfer *t)
0391 {
0392 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0393 struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
0394 int buswidth;
0395
0396 atomic_set(&priv->dma_busy, 0);
0397
0398 uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
0399
0400 if (priv->bits_per_word <= 8)
0401 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
0402 else if (priv->bits_per_word <= 16)
0403 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
0404 else
0405 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
0406
0407 if (priv->rx_buf) {
0408 struct dma_slave_config rxconf = {
0409 .direction = DMA_DEV_TO_MEM,
0410 .src_addr = priv->base_dma_addr + SSI_RXDR,
0411 .src_addr_width = buswidth,
0412 .src_maxburst = SSI_FIFO_BURST_NUM,
0413 };
0414
0415 dmaengine_slave_config(master->dma_rx, &rxconf);
0416
0417 rxdesc = dmaengine_prep_slave_sg(
0418 master->dma_rx,
0419 t->rx_sg.sgl, t->rx_sg.nents,
0420 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0421 if (!rxdesc)
0422 goto out_err_prep;
0423
0424 rxdesc->callback = uniphier_spi_dma_rxcb;
0425 rxdesc->callback_param = master;
0426
0427 uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
0428 atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
0429
0430 dmaengine_submit(rxdesc);
0431 dma_async_issue_pending(master->dma_rx);
0432 }
0433
0434 if (priv->tx_buf) {
0435 struct dma_slave_config txconf = {
0436 .direction = DMA_MEM_TO_DEV,
0437 .dst_addr = priv->base_dma_addr + SSI_TXDR,
0438 .dst_addr_width = buswidth,
0439 .dst_maxburst = SSI_FIFO_BURST_NUM,
0440 };
0441
0442 dmaengine_slave_config(master->dma_tx, &txconf);
0443
0444 txdesc = dmaengine_prep_slave_sg(
0445 master->dma_tx,
0446 t->tx_sg.sgl, t->tx_sg.nents,
0447 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0448 if (!txdesc)
0449 goto out_err_prep;
0450
0451 txdesc->callback = uniphier_spi_dma_txcb;
0452 txdesc->callback_param = master;
0453
0454 uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
0455 atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
0456
0457 dmaengine_submit(txdesc);
0458 dma_async_issue_pending(master->dma_tx);
0459 }
0460
0461
0462 return (priv->tx_buf || priv->rx_buf);
0463
0464 out_err_prep:
0465 if (rxdesc)
0466 dmaengine_terminate_sync(master->dma_rx);
0467
0468 return -EINVAL;
0469 }
0470
0471 static int uniphier_spi_transfer_one_irq(struct spi_master *master,
0472 struct spi_device *spi,
0473 struct spi_transfer *t)
0474 {
0475 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0476 struct device *dev = master->dev.parent;
0477 unsigned long time_left;
0478
0479 reinit_completion(&priv->xfer_done);
0480
0481 uniphier_spi_fill_tx_fifo(priv);
0482
0483 uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
0484
0485 time_left = wait_for_completion_timeout(&priv->xfer_done,
0486 msecs_to_jiffies(SSI_TIMEOUT_MS));
0487
0488 uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
0489
0490 if (!time_left) {
0491 dev_err(dev, "transfer timeout.\n");
0492 return -ETIMEDOUT;
0493 }
0494
0495 return priv->error;
0496 }
0497
0498 static int uniphier_spi_transfer_one_poll(struct spi_master *master,
0499 struct spi_device *spi,
0500 struct spi_transfer *t)
0501 {
0502 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0503 int loop = SSI_POLL_TIMEOUT_US * 10;
0504
0505 while (priv->tx_bytes) {
0506 uniphier_spi_fill_tx_fifo(priv);
0507
0508 while ((priv->rx_bytes - priv->tx_bytes) > 0) {
0509 while (!(readl(priv->base + SSI_SR) & SSI_SR_RNE)
0510 && loop--)
0511 ndelay(100);
0512
0513 if (loop == -1)
0514 goto irq_transfer;
0515
0516 uniphier_spi_recv(priv);
0517 }
0518 }
0519
0520 return 0;
0521
0522 irq_transfer:
0523 return uniphier_spi_transfer_one_irq(master, spi, t);
0524 }
0525
0526 static int uniphier_spi_transfer_one(struct spi_master *master,
0527 struct spi_device *spi,
0528 struct spi_transfer *t)
0529 {
0530 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0531 unsigned long threshold;
0532 bool use_dma;
0533
0534
0535 if (!t->len)
0536 return 0;
0537
0538 uniphier_spi_setup_transfer(spi, t);
0539
0540 use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
0541 if (use_dma)
0542 return uniphier_spi_transfer_one_dma(master, spi, t);
0543
0544
0545
0546
0547
0548 threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
0549 USEC_PER_SEC * BITS_PER_BYTE);
0550 if (t->len > threshold)
0551 return uniphier_spi_transfer_one_irq(master, spi, t);
0552 else
0553 return uniphier_spi_transfer_one_poll(master, spi, t);
0554 }
0555
0556 static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
0557 {
0558 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0559
0560 writel(SSI_CTL_EN, priv->base + SSI_CTL);
0561
0562 return 0;
0563 }
0564
0565 static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
0566 {
0567 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0568
0569 writel(0, priv->base + SSI_CTL);
0570
0571 return 0;
0572 }
0573
0574 static void uniphier_spi_handle_err(struct spi_master *master,
0575 struct spi_message *msg)
0576 {
0577 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0578 u32 val;
0579
0580
0581 writel(0, priv->base + SSI_CTL);
0582
0583
0584 val = SSI_FC_TXFFL | SSI_FC_RXFFL;
0585 writel(val, priv->base + SSI_FC);
0586
0587 uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
0588
0589 if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
0590 dmaengine_terminate_async(master->dma_tx);
0591 atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
0592 }
0593
0594 if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
0595 dmaengine_terminate_async(master->dma_rx);
0596 atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
0597 }
0598 }
0599
0600 static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
0601 {
0602 struct uniphier_spi_priv *priv = dev_id;
0603 u32 val, stat;
0604
0605 stat = readl(priv->base + SSI_IS);
0606 val = SSI_IC_TCIC | SSI_IC_RCIC | SSI_IC_RORIC;
0607 writel(val, priv->base + SSI_IC);
0608
0609
0610 if (stat & SSI_IS_RORID) {
0611 priv->error = -EIO;
0612 goto done;
0613 }
0614
0615
0616 if ((stat & SSI_IS_RCID) && (stat & SSI_IS_RXRS)) {
0617 while ((readl(priv->base + SSI_SR) & SSI_SR_RNE) &&
0618 (priv->rx_bytes - priv->tx_bytes) > 0)
0619 uniphier_spi_recv(priv);
0620
0621 if ((readl(priv->base + SSI_SR) & SSI_SR_RNE) ||
0622 (priv->rx_bytes != priv->tx_bytes)) {
0623 priv->error = -EIO;
0624 goto done;
0625 } else if (priv->rx_bytes == 0)
0626 goto done;
0627
0628
0629 uniphier_spi_fill_tx_fifo(priv);
0630
0631 return IRQ_HANDLED;
0632 }
0633
0634 return IRQ_NONE;
0635
0636 done:
0637 complete(&priv->xfer_done);
0638 return IRQ_HANDLED;
0639 }
0640
0641 static int uniphier_spi_probe(struct platform_device *pdev)
0642 {
0643 struct uniphier_spi_priv *priv;
0644 struct spi_master *master;
0645 struct resource *res;
0646 struct dma_slave_caps caps;
0647 u32 dma_tx_burst = 0, dma_rx_burst = 0;
0648 unsigned long clk_rate;
0649 int irq;
0650 int ret;
0651
0652 master = spi_alloc_master(&pdev->dev, sizeof(*priv));
0653 if (!master)
0654 return -ENOMEM;
0655
0656 platform_set_drvdata(pdev, master);
0657
0658 priv = spi_master_get_devdata(master);
0659 priv->master = master;
0660 priv->is_save_param = false;
0661
0662 priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
0663 if (IS_ERR(priv->base)) {
0664 ret = PTR_ERR(priv->base);
0665 goto out_master_put;
0666 }
0667 priv->base_dma_addr = res->start;
0668
0669 priv->clk = devm_clk_get(&pdev->dev, NULL);
0670 if (IS_ERR(priv->clk)) {
0671 dev_err(&pdev->dev, "failed to get clock\n");
0672 ret = PTR_ERR(priv->clk);
0673 goto out_master_put;
0674 }
0675
0676 ret = clk_prepare_enable(priv->clk);
0677 if (ret)
0678 goto out_master_put;
0679
0680 irq = platform_get_irq(pdev, 0);
0681 if (irq < 0) {
0682 ret = irq;
0683 goto out_disable_clk;
0684 }
0685
0686 ret = devm_request_irq(&pdev->dev, irq, uniphier_spi_handler,
0687 0, "uniphier-spi", priv);
0688 if (ret) {
0689 dev_err(&pdev->dev, "failed to request IRQ\n");
0690 goto out_disable_clk;
0691 }
0692
0693 init_completion(&priv->xfer_done);
0694
0695 clk_rate = clk_get_rate(priv->clk);
0696
0697 master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
0698 master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
0699 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
0700 master->dev.of_node = pdev->dev.of_node;
0701 master->bus_num = pdev->id;
0702 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
0703
0704 master->set_cs = uniphier_spi_set_cs;
0705 master->transfer_one = uniphier_spi_transfer_one;
0706 master->prepare_transfer_hardware
0707 = uniphier_spi_prepare_transfer_hardware;
0708 master->unprepare_transfer_hardware
0709 = uniphier_spi_unprepare_transfer_hardware;
0710 master->handle_err = uniphier_spi_handle_err;
0711 master->can_dma = uniphier_spi_can_dma;
0712
0713 master->num_chipselect = 1;
0714 master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
0715
0716 master->dma_tx = dma_request_chan(&pdev->dev, "tx");
0717 if (IS_ERR_OR_NULL(master->dma_tx)) {
0718 if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
0719 ret = -EPROBE_DEFER;
0720 goto out_disable_clk;
0721 }
0722 master->dma_tx = NULL;
0723 dma_tx_burst = INT_MAX;
0724 } else {
0725 ret = dma_get_slave_caps(master->dma_tx, &caps);
0726 if (ret) {
0727 dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
0728 ret);
0729 goto out_release_dma;
0730 }
0731 dma_tx_burst = caps.max_burst;
0732 }
0733
0734 master->dma_rx = dma_request_chan(&pdev->dev, "rx");
0735 if (IS_ERR_OR_NULL(master->dma_rx)) {
0736 if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
0737 ret = -EPROBE_DEFER;
0738 goto out_release_dma;
0739 }
0740 master->dma_rx = NULL;
0741 dma_rx_burst = INT_MAX;
0742 } else {
0743 ret = dma_get_slave_caps(master->dma_rx, &caps);
0744 if (ret) {
0745 dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
0746 ret);
0747 goto out_release_dma;
0748 }
0749 dma_rx_burst = caps.max_burst;
0750 }
0751
0752 master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
0753
0754 ret = devm_spi_register_master(&pdev->dev, master);
0755 if (ret)
0756 goto out_release_dma;
0757
0758 return 0;
0759
0760 out_release_dma:
0761 if (!IS_ERR_OR_NULL(master->dma_rx)) {
0762 dma_release_channel(master->dma_rx);
0763 master->dma_rx = NULL;
0764 }
0765 if (!IS_ERR_OR_NULL(master->dma_tx)) {
0766 dma_release_channel(master->dma_tx);
0767 master->dma_tx = NULL;
0768 }
0769
0770 out_disable_clk:
0771 clk_disable_unprepare(priv->clk);
0772
0773 out_master_put:
0774 spi_master_put(master);
0775 return ret;
0776 }
0777
0778 static int uniphier_spi_remove(struct platform_device *pdev)
0779 {
0780 struct spi_master *master = platform_get_drvdata(pdev);
0781 struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
0782
0783 if (master->dma_tx)
0784 dma_release_channel(master->dma_tx);
0785 if (master->dma_rx)
0786 dma_release_channel(master->dma_rx);
0787
0788 clk_disable_unprepare(priv->clk);
0789
0790 return 0;
0791 }
0792
0793 static const struct of_device_id uniphier_spi_match[] = {
0794 { .compatible = "socionext,uniphier-scssi" },
0795 { }
0796 };
0797 MODULE_DEVICE_TABLE(of, uniphier_spi_match);
0798
0799 static struct platform_driver uniphier_spi_driver = {
0800 .probe = uniphier_spi_probe,
0801 .remove = uniphier_spi_remove,
0802 .driver = {
0803 .name = "uniphier-spi",
0804 .of_match_table = uniphier_spi_match,
0805 },
0806 };
0807 module_platform_driver(uniphier_spi_driver);
0808
0809 MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
0810 MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
0811 MODULE_DESCRIPTION("Socionext UniPhier SPI controller driver");
0812 MODULE_LICENSE("GPL v2");