0001
0002
0003
0004
0005
0006 #include <linux/init.h>
0007 #include <linux/module.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/delay.h>
0010 #include <linux/clk.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/dmaengine.h>
0013 #include <linux/platform_device.h>
0014 #include <linux/pm_runtime.h>
0015 #include <linux/spi/spi.h>
0016 #include <linux/of.h>
0017 #include <linux/of_device.h>
0018
0019 #include <linux/platform_data/spi-s3c64xx.h>
0020
0021 #define MAX_SPI_PORTS 12
0022 #define S3C64XX_SPI_QUIRK_POLL (1 << 0)
0023 #define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
0024 #define AUTOSUSPEND_TIMEOUT 2000
0025
0026
0027
0028 #define S3C64XX_SPI_CH_CFG 0x00
0029 #define S3C64XX_SPI_CLK_CFG 0x04
0030 #define S3C64XX_SPI_MODE_CFG 0x08
0031 #define S3C64XX_SPI_CS_REG 0x0C
0032 #define S3C64XX_SPI_INT_EN 0x10
0033 #define S3C64XX_SPI_STATUS 0x14
0034 #define S3C64XX_SPI_TX_DATA 0x18
0035 #define S3C64XX_SPI_RX_DATA 0x1C
0036 #define S3C64XX_SPI_PACKET_CNT 0x20
0037 #define S3C64XX_SPI_PENDING_CLR 0x24
0038 #define S3C64XX_SPI_SWAP_CFG 0x28
0039 #define S3C64XX_SPI_FB_CLK 0x2C
0040
0041 #define S3C64XX_SPI_CH_HS_EN (1<<6)
0042 #define S3C64XX_SPI_CH_SW_RST (1<<5)
0043 #define S3C64XX_SPI_CH_SLAVE (1<<4)
0044 #define S3C64XX_SPI_CPOL_L (1<<3)
0045 #define S3C64XX_SPI_CPHA_B (1<<2)
0046 #define S3C64XX_SPI_CH_RXCH_ON (1<<1)
0047 #define S3C64XX_SPI_CH_TXCH_ON (1<<0)
0048
0049 #define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
0050 #define S3C64XX_SPI_CLKSEL_SRCSHFT 9
0051 #define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
0052 #define S3C64XX_SPI_PSR_MASK 0xff
0053
0054 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
0055 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
0056 #define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
0057 #define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
0058 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
0059 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
0060 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
0061 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
0062 #define S3C64XX_SPI_MODE_SELF_LOOPBACK (1<<3)
0063 #define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
0064 #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
0065 #define S3C64XX_SPI_MODE_4BURST (1<<0)
0066
0067 #define S3C64XX_SPI_CS_NSC_CNT_2 (2<<4)
0068 #define S3C64XX_SPI_CS_AUTO (1<<1)
0069 #define S3C64XX_SPI_CS_SIG_INACT (1<<0)
0070
0071 #define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
0072 #define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
0073 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
0074 #define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
0075 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
0076 #define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
0077 #define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
0078
0079 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
0080 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
0081 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
0082 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
0083 #define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
0084 #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
0085
0086 #define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
0087
0088 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
0089 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
0090 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
0091 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
0092 #define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
0093
0094 #define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
0095 #define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
0096 #define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
0097 #define S3C64XX_SPI_SWAP_RX_EN (1<<4)
0098 #define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
0099 #define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
0100 #define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
0101 #define S3C64XX_SPI_SWAP_TX_EN (1<<0)
0102
0103 #define S3C64XX_SPI_FBCLK_MSK (3<<0)
0104
0105 #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
0106 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
0107 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
0108 #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
0109 #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
0110 FIFO_LVL_MASK(i))
0111
0112 #define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
0113 #define S3C64XX_SPI_TRAILCNT_OFF 19
0114
0115 #define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
0116
0117 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
0118 #define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
0119
0120 #define RXBUSY (1<<2)
0121 #define TXBUSY (1<<3)
0122
0123 struct s3c64xx_spi_dma_data {
0124 struct dma_chan *ch;
0125 dma_cookie_t cookie;
0126 enum dma_transfer_direction direction;
0127 };
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 struct s3c64xx_spi_port_config {
0148 int fifo_lvl_mask[MAX_SPI_PORTS];
0149 int rx_lvl_offset;
0150 int tx_st_done;
0151 int quirks;
0152 int clk_div;
0153 bool high_speed;
0154 bool clk_from_cmu;
0155 bool clk_ioclk;
0156 bool has_loopback;
0157 };
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 struct s3c64xx_spi_driver_data {
0181 void __iomem *regs;
0182 struct clk *clk;
0183 struct clk *src_clk;
0184 struct clk *ioclk;
0185 struct platform_device *pdev;
0186 struct spi_master *master;
0187 struct s3c64xx_spi_info *cntrlr_info;
0188 spinlock_t lock;
0189 unsigned long sfr_start;
0190 struct completion xfer_completion;
0191 unsigned state;
0192 unsigned cur_mode, cur_bpw;
0193 unsigned cur_speed;
0194 struct s3c64xx_spi_dma_data rx_dma;
0195 struct s3c64xx_spi_dma_data tx_dma;
0196 const struct s3c64xx_spi_port_config *port_conf;
0197 unsigned int port_id;
0198 };
0199
0200 static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
0201 {
0202 void __iomem *regs = sdd->regs;
0203 unsigned long loops;
0204 u32 val;
0205
0206 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
0207
0208 val = readl(regs + S3C64XX_SPI_CH_CFG);
0209 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
0210 writel(val, regs + S3C64XX_SPI_CH_CFG);
0211
0212 val = readl(regs + S3C64XX_SPI_CH_CFG);
0213 val |= S3C64XX_SPI_CH_SW_RST;
0214 val &= ~S3C64XX_SPI_CH_HS_EN;
0215 writel(val, regs + S3C64XX_SPI_CH_CFG);
0216
0217
0218 loops = msecs_to_loops(1);
0219 do {
0220 val = readl(regs + S3C64XX_SPI_STATUS);
0221 } while (TX_FIFO_LVL(val, sdd) && loops--);
0222
0223 if (loops == 0)
0224 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
0225
0226
0227 loops = msecs_to_loops(1);
0228 do {
0229 val = readl(regs + S3C64XX_SPI_STATUS);
0230 if (RX_FIFO_LVL(val, sdd))
0231 readl(regs + S3C64XX_SPI_RX_DATA);
0232 else
0233 break;
0234 } while (loops--);
0235
0236 if (loops == 0)
0237 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
0238
0239 val = readl(regs + S3C64XX_SPI_CH_CFG);
0240 val &= ~S3C64XX_SPI_CH_SW_RST;
0241 writel(val, regs + S3C64XX_SPI_CH_CFG);
0242
0243 val = readl(regs + S3C64XX_SPI_MODE_CFG);
0244 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
0245 writel(val, regs + S3C64XX_SPI_MODE_CFG);
0246 }
0247
0248 static void s3c64xx_spi_dmacb(void *data)
0249 {
0250 struct s3c64xx_spi_driver_data *sdd;
0251 struct s3c64xx_spi_dma_data *dma = data;
0252 unsigned long flags;
0253
0254 if (dma->direction == DMA_DEV_TO_MEM)
0255 sdd = container_of(data,
0256 struct s3c64xx_spi_driver_data, rx_dma);
0257 else
0258 sdd = container_of(data,
0259 struct s3c64xx_spi_driver_data, tx_dma);
0260
0261 spin_lock_irqsave(&sdd->lock, flags);
0262
0263 if (dma->direction == DMA_DEV_TO_MEM) {
0264 sdd->state &= ~RXBUSY;
0265 if (!(sdd->state & TXBUSY))
0266 complete(&sdd->xfer_completion);
0267 } else {
0268 sdd->state &= ~TXBUSY;
0269 if (!(sdd->state & RXBUSY))
0270 complete(&sdd->xfer_completion);
0271 }
0272
0273 spin_unlock_irqrestore(&sdd->lock, flags);
0274 }
0275
0276 static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
0277 struct sg_table *sgt)
0278 {
0279 struct s3c64xx_spi_driver_data *sdd;
0280 struct dma_slave_config config;
0281 struct dma_async_tx_descriptor *desc;
0282 int ret;
0283
0284 memset(&config, 0, sizeof(config));
0285
0286 if (dma->direction == DMA_DEV_TO_MEM) {
0287 sdd = container_of((void *)dma,
0288 struct s3c64xx_spi_driver_data, rx_dma);
0289 config.direction = dma->direction;
0290 config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
0291 config.src_addr_width = sdd->cur_bpw / 8;
0292 config.src_maxburst = 1;
0293 dmaengine_slave_config(dma->ch, &config);
0294 } else {
0295 sdd = container_of((void *)dma,
0296 struct s3c64xx_spi_driver_data, tx_dma);
0297 config.direction = dma->direction;
0298 config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
0299 config.dst_addr_width = sdd->cur_bpw / 8;
0300 config.dst_maxburst = 1;
0301 dmaengine_slave_config(dma->ch, &config);
0302 }
0303
0304 desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
0305 dma->direction, DMA_PREP_INTERRUPT);
0306 if (!desc) {
0307 dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
0308 dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
0309 return -ENOMEM;
0310 }
0311
0312 desc->callback = s3c64xx_spi_dmacb;
0313 desc->callback_param = dma;
0314
0315 dma->cookie = dmaengine_submit(desc);
0316 ret = dma_submit_error(dma->cookie);
0317 if (ret) {
0318 dev_err(&sdd->pdev->dev, "DMA submission failed");
0319 return -EIO;
0320 }
0321
0322 dma_async_issue_pending(dma->ch);
0323 return 0;
0324 }
0325
0326 static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
0327 {
0328 struct s3c64xx_spi_driver_data *sdd =
0329 spi_master_get_devdata(spi->master);
0330
0331 if (sdd->cntrlr_info->no_cs)
0332 return;
0333
0334 if (enable) {
0335 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
0336 writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
0337 } else {
0338 u32 ssel = readl(sdd->regs + S3C64XX_SPI_CS_REG);
0339
0340 ssel |= (S3C64XX_SPI_CS_AUTO |
0341 S3C64XX_SPI_CS_NSC_CNT_2);
0342 writel(ssel, sdd->regs + S3C64XX_SPI_CS_REG);
0343 }
0344 } else {
0345 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
0346 writel(S3C64XX_SPI_CS_SIG_INACT,
0347 sdd->regs + S3C64XX_SPI_CS_REG);
0348 }
0349 }
0350
0351 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
0352 {
0353 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
0354
0355 if (is_polling(sdd))
0356 return 0;
0357
0358
0359 sdd->rx_dma.ch = dma_request_chan(&sdd->pdev->dev, "rx");
0360 if (IS_ERR(sdd->rx_dma.ch)) {
0361 dev_err(&sdd->pdev->dev, "Failed to get RX DMA channel\n");
0362 sdd->rx_dma.ch = NULL;
0363 return 0;
0364 }
0365
0366 sdd->tx_dma.ch = dma_request_chan(&sdd->pdev->dev, "tx");
0367 if (IS_ERR(sdd->tx_dma.ch)) {
0368 dev_err(&sdd->pdev->dev, "Failed to get TX DMA channel\n");
0369 dma_release_channel(sdd->rx_dma.ch);
0370 sdd->tx_dma.ch = NULL;
0371 sdd->rx_dma.ch = NULL;
0372 return 0;
0373 }
0374
0375 spi->dma_rx = sdd->rx_dma.ch;
0376 spi->dma_tx = sdd->tx_dma.ch;
0377
0378 return 0;
0379 }
0380
0381 static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
0382 {
0383 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
0384
0385 if (is_polling(sdd))
0386 return 0;
0387
0388
0389 if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
0390 dma_release_channel(sdd->rx_dma.ch);
0391 dma_release_channel(sdd->tx_dma.ch);
0392 sdd->rx_dma.ch = 0;
0393 sdd->tx_dma.ch = 0;
0394 }
0395
0396 return 0;
0397 }
0398
0399 static bool s3c64xx_spi_can_dma(struct spi_master *master,
0400 struct spi_device *spi,
0401 struct spi_transfer *xfer)
0402 {
0403 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
0404
0405 if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
0406 return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
0407 } else {
0408 return false;
0409 }
0410
0411 }
0412
0413 static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
0414 struct spi_transfer *xfer, int dma_mode)
0415 {
0416 void __iomem *regs = sdd->regs;
0417 u32 modecfg, chcfg;
0418 int ret = 0;
0419
0420 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
0421 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
0422
0423 chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
0424 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
0425
0426 if (dma_mode) {
0427 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
0428 } else {
0429
0430
0431
0432
0433 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
0434 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
0435 | S3C64XX_SPI_PACKET_CNT_EN,
0436 regs + S3C64XX_SPI_PACKET_CNT);
0437 }
0438
0439 if (xfer->tx_buf != NULL) {
0440 sdd->state |= TXBUSY;
0441 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
0442 if (dma_mode) {
0443 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
0444 ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
0445 } else {
0446 switch (sdd->cur_bpw) {
0447 case 32:
0448 iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
0449 xfer->tx_buf, xfer->len / 4);
0450 break;
0451 case 16:
0452 iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
0453 xfer->tx_buf, xfer->len / 2);
0454 break;
0455 default:
0456 iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
0457 xfer->tx_buf, xfer->len);
0458 break;
0459 }
0460 }
0461 }
0462
0463 if (xfer->rx_buf != NULL) {
0464 sdd->state |= RXBUSY;
0465
0466 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
0467 && !(sdd->cur_mode & SPI_CPHA))
0468 chcfg |= S3C64XX_SPI_CH_HS_EN;
0469
0470 if (dma_mode) {
0471 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
0472 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
0473 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
0474 | S3C64XX_SPI_PACKET_CNT_EN,
0475 regs + S3C64XX_SPI_PACKET_CNT);
0476 ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
0477 }
0478 }
0479
0480 if (ret)
0481 return ret;
0482
0483 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
0484 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
0485
0486 return 0;
0487 }
0488
0489 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
0490 int timeout_ms)
0491 {
0492 void __iomem *regs = sdd->regs;
0493 unsigned long val = 1;
0494 u32 status;
0495
0496
0497 u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
0498
0499 if (timeout_ms)
0500 val = msecs_to_loops(timeout_ms);
0501
0502 do {
0503 status = readl(regs + S3C64XX_SPI_STATUS);
0504 } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
0505
0506
0507 return RX_FIFO_LVL(status, sdd);
0508 }
0509
0510 static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
0511 struct spi_transfer *xfer)
0512 {
0513 void __iomem *regs = sdd->regs;
0514 unsigned long val;
0515 u32 status;
0516 int ms;
0517
0518
0519 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
0520 ms += 30;
0521 ms = max(ms, 100);
0522
0523 val = msecs_to_jiffies(ms) + 10;
0524 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535 if (val && !xfer->rx_buf) {
0536 val = msecs_to_loops(10);
0537 status = readl(regs + S3C64XX_SPI_STATUS);
0538 while ((TX_FIFO_LVL(status, sdd)
0539 || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
0540 && --val) {
0541 cpu_relax();
0542 status = readl(regs + S3C64XX_SPI_STATUS);
0543 }
0544
0545 }
0546
0547
0548 if (!val)
0549 return -EIO;
0550
0551 return 0;
0552 }
0553
0554 static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
0555 struct spi_transfer *xfer)
0556 {
0557 void __iomem *regs = sdd->regs;
0558 unsigned long val;
0559 u32 status;
0560 int loops;
0561 u32 cpy_len;
0562 u8 *buf;
0563 int ms;
0564
0565
0566 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
0567 ms += 10;
0568
0569 val = msecs_to_loops(ms);
0570 do {
0571 status = readl(regs + S3C64XX_SPI_STATUS);
0572 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
0573
0574 if (!val)
0575 return -EIO;
0576
0577
0578 if (!xfer->rx_buf) {
0579 sdd->state &= ~TXBUSY;
0580 return 0;
0581 }
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
0592 buf = xfer->rx_buf;
0593 do {
0594
0595 cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
0596 (loops ? ms : 0));
0597
0598 switch (sdd->cur_bpw) {
0599 case 32:
0600 ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
0601 buf, cpy_len / 4);
0602 break;
0603 case 16:
0604 ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
0605 buf, cpy_len / 2);
0606 break;
0607 default:
0608 ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
0609 buf, cpy_len);
0610 break;
0611 }
0612
0613 buf = buf + cpy_len;
0614 } while (loops--);
0615 sdd->state &= ~RXBUSY;
0616
0617 return 0;
0618 }
0619
0620 static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
0621 {
0622 void __iomem *regs = sdd->regs;
0623 int ret;
0624 u32 val;
0625 int div = sdd->port_conf->clk_div;
0626
0627
0628 if (!sdd->port_conf->clk_from_cmu) {
0629 val = readl(regs + S3C64XX_SPI_CLK_CFG);
0630 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
0631 writel(val, regs + S3C64XX_SPI_CLK_CFG);
0632 }
0633
0634
0635 val = readl(regs + S3C64XX_SPI_CH_CFG);
0636 val &= ~(S3C64XX_SPI_CH_SLAVE |
0637 S3C64XX_SPI_CPOL_L |
0638 S3C64XX_SPI_CPHA_B);
0639
0640 if (sdd->cur_mode & SPI_CPOL)
0641 val |= S3C64XX_SPI_CPOL_L;
0642
0643 if (sdd->cur_mode & SPI_CPHA)
0644 val |= S3C64XX_SPI_CPHA_B;
0645
0646 writel(val, regs + S3C64XX_SPI_CH_CFG);
0647
0648
0649 val = readl(regs + S3C64XX_SPI_MODE_CFG);
0650 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
0651 | S3C64XX_SPI_MODE_CH_TSZ_MASK);
0652
0653 switch (sdd->cur_bpw) {
0654 case 32:
0655 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
0656 val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
0657 break;
0658 case 16:
0659 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
0660 val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
0661 break;
0662 default:
0663 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
0664 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
0665 break;
0666 }
0667
0668 if ((sdd->cur_mode & SPI_LOOP) && sdd->port_conf->has_loopback)
0669 val |= S3C64XX_SPI_MODE_SELF_LOOPBACK;
0670
0671 writel(val, regs + S3C64XX_SPI_MODE_CFG);
0672
0673 if (sdd->port_conf->clk_from_cmu) {
0674 ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * div);
0675 if (ret)
0676 return ret;
0677 sdd->cur_speed = clk_get_rate(sdd->src_clk) / div;
0678 } else {
0679
0680 val = readl(regs + S3C64XX_SPI_CLK_CFG);
0681 val &= ~S3C64XX_SPI_PSR_MASK;
0682 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / div - 1)
0683 & S3C64XX_SPI_PSR_MASK);
0684 writel(val, regs + S3C64XX_SPI_CLK_CFG);
0685
0686
0687 val = readl(regs + S3C64XX_SPI_CLK_CFG);
0688 val |= S3C64XX_SPI_ENCLK_ENABLE;
0689 writel(val, regs + S3C64XX_SPI_CLK_CFG);
0690 }
0691
0692 return 0;
0693 }
0694
0695 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
0696
0697 static int s3c64xx_spi_prepare_message(struct spi_master *master,
0698 struct spi_message *msg)
0699 {
0700 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
0701 struct spi_device *spi = msg->spi;
0702 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
0703
0704
0705 if (!cs)
0706
0707 writel(0, sdd->regs + S3C64XX_SPI_FB_CLK);
0708 else
0709 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
0710
0711 return 0;
0712 }
0713
0714 static int s3c64xx_spi_transfer_one(struct spi_master *master,
0715 struct spi_device *spi,
0716 struct spi_transfer *xfer)
0717 {
0718 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
0719 const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
0720 const void *tx_buf = NULL;
0721 void *rx_buf = NULL;
0722 int target_len = 0, origin_len = 0;
0723 int use_dma = 0;
0724 int status;
0725 u32 speed;
0726 u8 bpw;
0727 unsigned long flags;
0728
0729 reinit_completion(&sdd->xfer_completion);
0730
0731
0732 bpw = xfer->bits_per_word;
0733 speed = xfer->speed_hz;
0734
0735 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
0736 sdd->cur_bpw = bpw;
0737 sdd->cur_speed = speed;
0738 sdd->cur_mode = spi->mode;
0739 status = s3c64xx_spi_config(sdd);
0740 if (status)
0741 return status;
0742 }
0743
0744 if (!is_polling(sdd) && (xfer->len > fifo_len) &&
0745 sdd->rx_dma.ch && sdd->tx_dma.ch) {
0746 use_dma = 1;
0747
0748 } else if (xfer->len > fifo_len) {
0749 tx_buf = xfer->tx_buf;
0750 rx_buf = xfer->rx_buf;
0751 origin_len = xfer->len;
0752
0753 target_len = xfer->len;
0754 if (xfer->len > fifo_len)
0755 xfer->len = fifo_len;
0756 }
0757
0758 do {
0759 spin_lock_irqsave(&sdd->lock, flags);
0760
0761
0762 sdd->state &= ~RXBUSY;
0763 sdd->state &= ~TXBUSY;
0764
0765
0766 s3c64xx_spi_set_cs(spi, true);
0767
0768 status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
0769
0770 spin_unlock_irqrestore(&sdd->lock, flags);
0771
0772 if (status) {
0773 dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
0774 break;
0775 }
0776
0777 if (use_dma)
0778 status = s3c64xx_wait_for_dma(sdd, xfer);
0779 else
0780 status = s3c64xx_wait_for_pio(sdd, xfer);
0781
0782 if (status) {
0783 dev_err(&spi->dev,
0784 "I/O Error: rx-%d tx-%d rx-%c tx-%c len-%d dma-%d res-(%d)\n",
0785 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
0786 (sdd->state & RXBUSY) ? 'f' : 'p',
0787 (sdd->state & TXBUSY) ? 'f' : 'p',
0788 xfer->len, use_dma ? 1 : 0, status);
0789
0790 if (use_dma) {
0791 struct dma_tx_state s;
0792
0793 if (xfer->tx_buf && (sdd->state & TXBUSY)) {
0794 dmaengine_pause(sdd->tx_dma.ch);
0795 dmaengine_tx_status(sdd->tx_dma.ch, sdd->tx_dma.cookie, &s);
0796 dmaengine_terminate_all(sdd->tx_dma.ch);
0797 dev_err(&spi->dev, "TX residue: %d\n", s.residue);
0798
0799 }
0800 if (xfer->rx_buf && (sdd->state & RXBUSY)) {
0801 dmaengine_pause(sdd->rx_dma.ch);
0802 dmaengine_tx_status(sdd->rx_dma.ch, sdd->rx_dma.cookie, &s);
0803 dmaengine_terminate_all(sdd->rx_dma.ch);
0804 dev_err(&spi->dev, "RX residue: %d\n", s.residue);
0805 }
0806 }
0807 } else {
0808 s3c64xx_flush_fifo(sdd);
0809 }
0810 if (target_len > 0) {
0811 target_len -= xfer->len;
0812
0813 if (xfer->tx_buf)
0814 xfer->tx_buf += xfer->len;
0815
0816 if (xfer->rx_buf)
0817 xfer->rx_buf += xfer->len;
0818
0819 if (target_len > fifo_len)
0820 xfer->len = fifo_len;
0821 else
0822 xfer->len = target_len;
0823 }
0824 } while (target_len > 0);
0825
0826 if (origin_len) {
0827
0828 xfer->tx_buf = tx_buf;
0829 xfer->rx_buf = rx_buf;
0830 xfer->len = origin_len;
0831 }
0832
0833 return status;
0834 }
0835
0836 static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
0837 struct spi_device *spi)
0838 {
0839 struct s3c64xx_spi_csinfo *cs;
0840 struct device_node *slave_np, *data_np = NULL;
0841 u32 fb_delay = 0;
0842
0843 slave_np = spi->dev.of_node;
0844 if (!slave_np) {
0845 dev_err(&spi->dev, "device node not found\n");
0846 return ERR_PTR(-EINVAL);
0847 }
0848
0849 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
0850 if (!cs)
0851 return ERR_PTR(-ENOMEM);
0852
0853 data_np = of_get_child_by_name(slave_np, "controller-data");
0854 if (!data_np) {
0855 dev_info(&spi->dev, "feedback delay set to default (0)\n");
0856 return cs;
0857 }
0858
0859 of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
0860 cs->fb_delay = fb_delay;
0861 of_node_put(data_np);
0862 return cs;
0863 }
0864
0865
0866
0867
0868
0869
0870
0871 static int s3c64xx_spi_setup(struct spi_device *spi)
0872 {
0873 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
0874 struct s3c64xx_spi_driver_data *sdd;
0875 int err;
0876 int div;
0877
0878 sdd = spi_master_get_devdata(spi->master);
0879 if (spi->dev.of_node) {
0880 cs = s3c64xx_get_slave_ctrldata(spi);
0881 spi->controller_data = cs;
0882 }
0883
0884
0885 if (IS_ERR(cs)) {
0886 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
0887 return -ENODEV;
0888 }
0889
0890 if (!spi_get_ctldata(spi))
0891 spi_set_ctldata(spi, cs);
0892
0893 pm_runtime_get_sync(&sdd->pdev->dev);
0894
0895 div = sdd->port_conf->clk_div;
0896
0897
0898 if (!sdd->port_conf->clk_from_cmu) {
0899 u32 psr, speed;
0900
0901
0902 speed = clk_get_rate(sdd->src_clk) / div / (0 + 1);
0903
0904 if (spi->max_speed_hz > speed)
0905 spi->max_speed_hz = speed;
0906
0907 psr = clk_get_rate(sdd->src_clk) / div / spi->max_speed_hz - 1;
0908 psr &= S3C64XX_SPI_PSR_MASK;
0909 if (psr == S3C64XX_SPI_PSR_MASK)
0910 psr--;
0911
0912 speed = clk_get_rate(sdd->src_clk) / div / (psr + 1);
0913 if (spi->max_speed_hz < speed) {
0914 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
0915 psr++;
0916 } else {
0917 err = -EINVAL;
0918 goto setup_exit;
0919 }
0920 }
0921
0922 speed = clk_get_rate(sdd->src_clk) / div / (psr + 1);
0923 if (spi->max_speed_hz >= speed) {
0924 spi->max_speed_hz = speed;
0925 } else {
0926 dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
0927 spi->max_speed_hz);
0928 err = -EINVAL;
0929 goto setup_exit;
0930 }
0931 }
0932
0933 pm_runtime_mark_last_busy(&sdd->pdev->dev);
0934 pm_runtime_put_autosuspend(&sdd->pdev->dev);
0935 s3c64xx_spi_set_cs(spi, false);
0936
0937 return 0;
0938
0939 setup_exit:
0940 pm_runtime_mark_last_busy(&sdd->pdev->dev);
0941 pm_runtime_put_autosuspend(&sdd->pdev->dev);
0942
0943 s3c64xx_spi_set_cs(spi, false);
0944
0945 spi_set_ctldata(spi, NULL);
0946
0947
0948 if (spi->dev.of_node)
0949 kfree(cs);
0950
0951 return err;
0952 }
0953
0954 static void s3c64xx_spi_cleanup(struct spi_device *spi)
0955 {
0956 struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
0957
0958
0959 if (spi->dev.of_node)
0960 kfree(cs);
0961
0962 spi_set_ctldata(spi, NULL);
0963 }
0964
0965 static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
0966 {
0967 struct s3c64xx_spi_driver_data *sdd = data;
0968 struct spi_master *spi = sdd->master;
0969 unsigned int val, clr = 0;
0970
0971 val = readl(sdd->regs + S3C64XX_SPI_STATUS);
0972
0973 if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
0974 clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
0975 dev_err(&spi->dev, "RX overrun\n");
0976 }
0977 if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
0978 clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
0979 dev_err(&spi->dev, "RX underrun\n");
0980 }
0981 if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
0982 clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
0983 dev_err(&spi->dev, "TX overrun\n");
0984 }
0985 if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
0986 clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
0987 dev_err(&spi->dev, "TX underrun\n");
0988 }
0989
0990
0991 writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
0992 writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
0993
0994 return IRQ_HANDLED;
0995 }
0996
0997 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
0998 {
0999 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1000 void __iomem *regs = sdd->regs;
1001 unsigned int val;
1002
1003 sdd->cur_speed = 0;
1004
1005 if (sci->no_cs)
1006 writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
1007 else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
1008 writel(S3C64XX_SPI_CS_SIG_INACT, sdd->regs + S3C64XX_SPI_CS_REG);
1009
1010
1011 writel(0, regs + S3C64XX_SPI_INT_EN);
1012
1013 if (!sdd->port_conf->clk_from_cmu)
1014 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
1015 regs + S3C64XX_SPI_CLK_CFG);
1016 writel(0, regs + S3C64XX_SPI_MODE_CFG);
1017 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
1018
1019
1020 val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
1021 S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
1022 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
1023 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
1024 writel(val, regs + S3C64XX_SPI_PENDING_CLR);
1025 writel(0, regs + S3C64XX_SPI_PENDING_CLR);
1026
1027 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
1028
1029 val = readl(regs + S3C64XX_SPI_MODE_CFG);
1030 val &= ~S3C64XX_SPI_MODE_4BURST;
1031 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1032 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
1033 writel(val, regs + S3C64XX_SPI_MODE_CFG);
1034
1035 s3c64xx_flush_fifo(sdd);
1036 }
1037
1038 #ifdef CONFIG_OF
1039 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1040 {
1041 struct s3c64xx_spi_info *sci;
1042 u32 temp;
1043
1044 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
1045 if (!sci)
1046 return ERR_PTR(-ENOMEM);
1047
1048 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
1049 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
1050 sci->src_clk_nr = 0;
1051 } else {
1052 sci->src_clk_nr = temp;
1053 }
1054
1055 if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
1056 dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
1057 sci->num_cs = 1;
1058 } else {
1059 sci->num_cs = temp;
1060 }
1061
1062 sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
1063
1064 return sci;
1065 }
1066 #else
1067 static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
1068 {
1069 return dev_get_platdata(dev);
1070 }
1071 #endif
1072
1073 static inline const struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
1074 struct platform_device *pdev)
1075 {
1076 #ifdef CONFIG_OF
1077 if (pdev->dev.of_node)
1078 return of_device_get_match_data(&pdev->dev);
1079 #endif
1080 return (const struct s3c64xx_spi_port_config *)platform_get_device_id(pdev)->driver_data;
1081 }
1082
1083 static int s3c64xx_spi_probe(struct platform_device *pdev)
1084 {
1085 struct resource *mem_res;
1086 struct s3c64xx_spi_driver_data *sdd;
1087 struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
1088 struct spi_master *master;
1089 int ret, irq;
1090 char clk_name[16];
1091
1092 if (!sci && pdev->dev.of_node) {
1093 sci = s3c64xx_spi_parse_dt(&pdev->dev);
1094 if (IS_ERR(sci))
1095 return PTR_ERR(sci);
1096 }
1097
1098 if (!sci) {
1099 dev_err(&pdev->dev, "platform_data missing!\n");
1100 return -ENODEV;
1101 }
1102
1103 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1104 if (mem_res == NULL) {
1105 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
1106 return -ENXIO;
1107 }
1108
1109 irq = platform_get_irq(pdev, 0);
1110 if (irq < 0) {
1111 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
1112 return irq;
1113 }
1114
1115 master = spi_alloc_master(&pdev->dev,
1116 sizeof(struct s3c64xx_spi_driver_data));
1117 if (master == NULL) {
1118 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
1119 return -ENOMEM;
1120 }
1121
1122 platform_set_drvdata(pdev, master);
1123
1124 sdd = spi_master_get_devdata(master);
1125 sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
1126 sdd->master = master;
1127 sdd->cntrlr_info = sci;
1128 sdd->pdev = pdev;
1129 sdd->sfr_start = mem_res->start;
1130 if (pdev->dev.of_node) {
1131 ret = of_alias_get_id(pdev->dev.of_node, "spi");
1132 if (ret < 0) {
1133 dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
1134 ret);
1135 goto err_deref_master;
1136 }
1137 sdd->port_id = ret;
1138 } else {
1139 sdd->port_id = pdev->id;
1140 }
1141
1142 sdd->cur_bpw = 8;
1143
1144 sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1145 sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1146
1147 master->dev.of_node = pdev->dev.of_node;
1148 master->bus_num = sdd->port_id;
1149 master->setup = s3c64xx_spi_setup;
1150 master->cleanup = s3c64xx_spi_cleanup;
1151 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1152 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
1153 master->prepare_message = s3c64xx_spi_prepare_message;
1154 master->transfer_one = s3c64xx_spi_transfer_one;
1155 master->num_chipselect = sci->num_cs;
1156 master->use_gpio_descriptors = true;
1157 master->dma_alignment = 8;
1158 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
1159 SPI_BPW_MASK(8);
1160
1161 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1162 if (sdd->port_conf->has_loopback)
1163 master->mode_bits |= SPI_LOOP;
1164 master->auto_runtime_pm = true;
1165 if (!is_polling(sdd))
1166 master->can_dma = s3c64xx_spi_can_dma;
1167
1168 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1169 if (IS_ERR(sdd->regs)) {
1170 ret = PTR_ERR(sdd->regs);
1171 goto err_deref_master;
1172 }
1173
1174 if (sci->cfg_gpio && sci->cfg_gpio()) {
1175 dev_err(&pdev->dev, "Unable to config gpio\n");
1176 ret = -EBUSY;
1177 goto err_deref_master;
1178 }
1179
1180
1181 sdd->clk = devm_clk_get(&pdev->dev, "spi");
1182 if (IS_ERR(sdd->clk)) {
1183 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
1184 ret = PTR_ERR(sdd->clk);
1185 goto err_deref_master;
1186 }
1187
1188 ret = clk_prepare_enable(sdd->clk);
1189 if (ret) {
1190 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1191 goto err_deref_master;
1192 }
1193
1194 sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
1195 sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
1196 if (IS_ERR(sdd->src_clk)) {
1197 dev_err(&pdev->dev,
1198 "Unable to acquire clock '%s'\n", clk_name);
1199 ret = PTR_ERR(sdd->src_clk);
1200 goto err_disable_clk;
1201 }
1202
1203 ret = clk_prepare_enable(sdd->src_clk);
1204 if (ret) {
1205 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
1206 goto err_disable_clk;
1207 }
1208
1209 if (sdd->port_conf->clk_ioclk) {
1210 sdd->ioclk = devm_clk_get(&pdev->dev, "spi_ioclk");
1211 if (IS_ERR(sdd->ioclk)) {
1212 dev_err(&pdev->dev, "Unable to acquire 'ioclk'\n");
1213 ret = PTR_ERR(sdd->ioclk);
1214 goto err_disable_src_clk;
1215 }
1216
1217 ret = clk_prepare_enable(sdd->ioclk);
1218 if (ret) {
1219 dev_err(&pdev->dev, "Couldn't enable clock 'ioclk'\n");
1220 goto err_disable_src_clk;
1221 }
1222 }
1223
1224 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
1225 pm_runtime_use_autosuspend(&pdev->dev);
1226 pm_runtime_set_active(&pdev->dev);
1227 pm_runtime_enable(&pdev->dev);
1228 pm_runtime_get_sync(&pdev->dev);
1229
1230
1231 s3c64xx_spi_hwinit(sdd);
1232
1233 spin_lock_init(&sdd->lock);
1234 init_completion(&sdd->xfer_completion);
1235
1236 ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
1237 "spi-s3c64xx", sdd);
1238 if (ret != 0) {
1239 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1240 irq, ret);
1241 goto err_pm_put;
1242 }
1243
1244 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1245 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1246 sdd->regs + S3C64XX_SPI_INT_EN);
1247
1248 ret = devm_spi_register_master(&pdev->dev, master);
1249 if (ret != 0) {
1250 dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
1251 goto err_pm_put;
1252 }
1253
1254 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1255 sdd->port_id, master->num_chipselect);
1256 dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
1257 mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
1258
1259 pm_runtime_mark_last_busy(&pdev->dev);
1260 pm_runtime_put_autosuspend(&pdev->dev);
1261
1262 return 0;
1263
1264 err_pm_put:
1265 pm_runtime_put_noidle(&pdev->dev);
1266 pm_runtime_disable(&pdev->dev);
1267 pm_runtime_set_suspended(&pdev->dev);
1268
1269 clk_disable_unprepare(sdd->ioclk);
1270 err_disable_src_clk:
1271 clk_disable_unprepare(sdd->src_clk);
1272 err_disable_clk:
1273 clk_disable_unprepare(sdd->clk);
1274 err_deref_master:
1275 spi_master_put(master);
1276
1277 return ret;
1278 }
1279
1280 static int s3c64xx_spi_remove(struct platform_device *pdev)
1281 {
1282 struct spi_master *master = platform_get_drvdata(pdev);
1283 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1284
1285 pm_runtime_get_sync(&pdev->dev);
1286
1287 writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1288
1289 if (!is_polling(sdd)) {
1290 dma_release_channel(sdd->rx_dma.ch);
1291 dma_release_channel(sdd->tx_dma.ch);
1292 }
1293
1294 clk_disable_unprepare(sdd->ioclk);
1295
1296 clk_disable_unprepare(sdd->src_clk);
1297
1298 clk_disable_unprepare(sdd->clk);
1299
1300 pm_runtime_put_noidle(&pdev->dev);
1301 pm_runtime_disable(&pdev->dev);
1302 pm_runtime_set_suspended(&pdev->dev);
1303
1304 return 0;
1305 }
1306
1307 #ifdef CONFIG_PM_SLEEP
1308 static int s3c64xx_spi_suspend(struct device *dev)
1309 {
1310 struct spi_master *master = dev_get_drvdata(dev);
1311 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1312
1313 int ret = spi_master_suspend(master);
1314 if (ret)
1315 return ret;
1316
1317 ret = pm_runtime_force_suspend(dev);
1318 if (ret < 0)
1319 return ret;
1320
1321 sdd->cur_speed = 0;
1322
1323 return 0;
1324 }
1325
1326 static int s3c64xx_spi_resume(struct device *dev)
1327 {
1328 struct spi_master *master = dev_get_drvdata(dev);
1329 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1330 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1331 int ret;
1332
1333 if (sci->cfg_gpio)
1334 sci->cfg_gpio();
1335
1336 ret = pm_runtime_force_resume(dev);
1337 if (ret < 0)
1338 return ret;
1339
1340 return spi_master_resume(master);
1341 }
1342 #endif
1343
1344 #ifdef CONFIG_PM
1345 static int s3c64xx_spi_runtime_suspend(struct device *dev)
1346 {
1347 struct spi_master *master = dev_get_drvdata(dev);
1348 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1349
1350 clk_disable_unprepare(sdd->clk);
1351 clk_disable_unprepare(sdd->src_clk);
1352 clk_disable_unprepare(sdd->ioclk);
1353
1354 return 0;
1355 }
1356
1357 static int s3c64xx_spi_runtime_resume(struct device *dev)
1358 {
1359 struct spi_master *master = dev_get_drvdata(dev);
1360 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1361 int ret;
1362
1363 if (sdd->port_conf->clk_ioclk) {
1364 ret = clk_prepare_enable(sdd->ioclk);
1365 if (ret != 0)
1366 return ret;
1367 }
1368
1369 ret = clk_prepare_enable(sdd->src_clk);
1370 if (ret != 0)
1371 goto err_disable_ioclk;
1372
1373 ret = clk_prepare_enable(sdd->clk);
1374 if (ret != 0)
1375 goto err_disable_src_clk;
1376
1377 s3c64xx_spi_hwinit(sdd);
1378
1379 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1380 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1381 sdd->regs + S3C64XX_SPI_INT_EN);
1382
1383 return 0;
1384
1385 err_disable_src_clk:
1386 clk_disable_unprepare(sdd->src_clk);
1387 err_disable_ioclk:
1388 clk_disable_unprepare(sdd->ioclk);
1389
1390 return ret;
1391 }
1392 #endif
1393
1394 static const struct dev_pm_ops s3c64xx_spi_pm = {
1395 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
1396 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
1397 s3c64xx_spi_runtime_resume, NULL)
1398 };
1399
1400 static const struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
1401 .fifo_lvl_mask = { 0x7f },
1402 .rx_lvl_offset = 13,
1403 .tx_st_done = 21,
1404 .clk_div = 2,
1405 .high_speed = true,
1406 };
1407
1408 static const struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
1409 .fifo_lvl_mask = { 0x7f, 0x7F },
1410 .rx_lvl_offset = 13,
1411 .tx_st_done = 21,
1412 .clk_div = 2,
1413 };
1414
1415 static const struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
1416 .fifo_lvl_mask = { 0x1ff, 0x7F },
1417 .rx_lvl_offset = 15,
1418 .tx_st_done = 25,
1419 .clk_div = 2,
1420 .high_speed = true,
1421 };
1422
1423 static const struct s3c64xx_spi_port_config exynos4_spi_port_config = {
1424 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
1425 .rx_lvl_offset = 15,
1426 .tx_st_done = 25,
1427 .clk_div = 2,
1428 .high_speed = true,
1429 .clk_from_cmu = true,
1430 .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
1431 };
1432
1433 static const struct s3c64xx_spi_port_config exynos7_spi_port_config = {
1434 .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
1435 .rx_lvl_offset = 15,
1436 .tx_st_done = 25,
1437 .clk_div = 2,
1438 .high_speed = true,
1439 .clk_from_cmu = true,
1440 .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
1441 };
1442
1443 static const struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
1444 .fifo_lvl_mask = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
1445 .rx_lvl_offset = 15,
1446 .tx_st_done = 25,
1447 .clk_div = 2,
1448 .high_speed = true,
1449 .clk_from_cmu = true,
1450 .clk_ioclk = true,
1451 .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
1452 };
1453
1454 static const struct s3c64xx_spi_port_config exynosautov9_spi_port_config = {
1455 .fifo_lvl_mask = { 0x1ff, 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff, 0x7f,
1456 0x7f, 0x7f, 0x7f, 0x7f},
1457 .rx_lvl_offset = 15,
1458 .tx_st_done = 25,
1459 .clk_div = 4,
1460 .high_speed = true,
1461 .clk_from_cmu = true,
1462 .clk_ioclk = true,
1463 .has_loopback = true,
1464 .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
1465 };
1466
1467 static const struct s3c64xx_spi_port_config fsd_spi_port_config = {
1468 .fifo_lvl_mask = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f},
1469 .rx_lvl_offset = 15,
1470 .tx_st_done = 25,
1471 .clk_div = 2,
1472 .high_speed = true,
1473 .clk_from_cmu = true,
1474 .clk_ioclk = false,
1475 .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
1476 };
1477
1478 static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
1479 {
1480 .name = "s3c2443-spi",
1481 .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
1482 }, {
1483 .name = "s3c6410-spi",
1484 .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
1485 },
1486 { },
1487 };
1488
1489 static const struct of_device_id s3c64xx_spi_dt_match[] = {
1490 { .compatible = "samsung,s3c2443-spi",
1491 .data = (void *)&s3c2443_spi_port_config,
1492 },
1493 { .compatible = "samsung,s3c6410-spi",
1494 .data = (void *)&s3c6410_spi_port_config,
1495 },
1496 { .compatible = "samsung,s5pv210-spi",
1497 .data = (void *)&s5pv210_spi_port_config,
1498 },
1499 { .compatible = "samsung,exynos4210-spi",
1500 .data = (void *)&exynos4_spi_port_config,
1501 },
1502 { .compatible = "samsung,exynos7-spi",
1503 .data = (void *)&exynos7_spi_port_config,
1504 },
1505 { .compatible = "samsung,exynos5433-spi",
1506 .data = (void *)&exynos5433_spi_port_config,
1507 },
1508 { .compatible = "samsung,exynosautov9-spi",
1509 .data = (void *)&exynosautov9_spi_port_config,
1510 },
1511 { .compatible = "tesla,fsd-spi",
1512 .data = (void *)&fsd_spi_port_config,
1513 },
1514 { },
1515 };
1516 MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
1517
1518 static struct platform_driver s3c64xx_spi_driver = {
1519 .driver = {
1520 .name = "s3c64xx-spi",
1521 .pm = &s3c64xx_spi_pm,
1522 .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
1523 },
1524 .probe = s3c64xx_spi_probe,
1525 .remove = s3c64xx_spi_remove,
1526 .id_table = s3c64xx_spi_driver_ids,
1527 };
1528 MODULE_ALIAS("platform:s3c64xx-spi");
1529
1530 module_platform_driver(s3c64xx_spi_driver);
1531
1532 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1533 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1534 MODULE_LICENSE("GPL");