0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/clk.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/io.h>
0013 #include <linux/iopoll.h>
0014 #include <linux/module.h>
0015 #include <linux/of.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/slab.h>
0018 #include <linux/spi/spi.h>
0019
0020
0021 #define PESQI_XIP_CONF1_REG 0x00
0022 #define PESQI_XIP_CONF2_REG 0x04
0023 #define PESQI_CONF_REG 0x08
0024 #define PESQI_CTRL_REG 0x0C
0025 #define PESQI_CLK_CTRL_REG 0x10
0026 #define PESQI_CMD_THRES_REG 0x14
0027 #define PESQI_INT_THRES_REG 0x18
0028 #define PESQI_INT_ENABLE_REG 0x1C
0029 #define PESQI_INT_STAT_REG 0x20
0030 #define PESQI_TX_DATA_REG 0x24
0031 #define PESQI_RX_DATA_REG 0x28
0032 #define PESQI_STAT1_REG 0x2C
0033 #define PESQI_STAT2_REG 0x30
0034 #define PESQI_BD_CTRL_REG 0x34
0035 #define PESQI_BD_CUR_ADDR_REG 0x38
0036 #define PESQI_BD_BASE_ADDR_REG 0x40
0037 #define PESQI_BD_STAT_REG 0x44
0038 #define PESQI_BD_POLL_CTRL_REG 0x48
0039 #define PESQI_BD_TX_DMA_STAT_REG 0x4C
0040 #define PESQI_BD_RX_DMA_STAT_REG 0x50
0041 #define PESQI_THRES_REG 0x54
0042 #define PESQI_INT_SIGEN_REG 0x58
0043
0044
0045 #define PESQI_MODE 0x7
0046 #define PESQI_MODE_BOOT 0
0047 #define PESQI_MODE_PIO 1
0048 #define PESQI_MODE_DMA 2
0049 #define PESQI_MODE_XIP 3
0050 #define PESQI_MODE_SHIFT 0
0051 #define PESQI_CPHA BIT(3)
0052 #define PESQI_CPOL BIT(4)
0053 #define PESQI_LSBF BIT(5)
0054 #define PESQI_RXLATCH BIT(7)
0055 #define PESQI_SERMODE BIT(8)
0056 #define PESQI_WP_EN BIT(9)
0057 #define PESQI_HOLD_EN BIT(10)
0058 #define PESQI_BURST_EN BIT(12)
0059 #define PESQI_CS_CTRL_HW BIT(15)
0060 #define PESQI_SOFT_RESET BIT(16)
0061 #define PESQI_LANES_SHIFT 20
0062 #define PESQI_SINGLE_LANE 0
0063 #define PESQI_DUAL_LANE 1
0064 #define PESQI_QUAD_LANE 2
0065 #define PESQI_CSEN_SHIFT 24
0066 #define PESQI_EN BIT(23)
0067
0068
0069 #define PESQI_CLK_EN BIT(0)
0070 #define PESQI_CLK_STABLE BIT(1)
0071 #define PESQI_CLKDIV_SHIFT 8
0072 #define PESQI_CLKDIV 0xff
0073
0074
0075 #define PESQI_TXTHR_MASK 0x1f
0076 #define PESQI_TXTHR_SHIFT 8
0077 #define PESQI_RXTHR_MASK 0x1f
0078 #define PESQI_RXTHR_SHIFT 0
0079
0080
0081 #define PESQI_TXEMPTY BIT(0)
0082 #define PESQI_TXFULL BIT(1)
0083 #define PESQI_TXTHR BIT(2)
0084 #define PESQI_RXEMPTY BIT(3)
0085 #define PESQI_RXFULL BIT(4)
0086 #define PESQI_RXTHR BIT(5)
0087 #define PESQI_BDDONE BIT(9)
0088 #define PESQI_PKTCOMP BIT(10)
0089 #define PESQI_DMAERR BIT(11)
0090
0091
0092 #define PESQI_DMA_EN BIT(0)
0093 #define PESQI_POLL_EN BIT(1)
0094 #define PESQI_BDP_START BIT(2)
0095
0096
0097 struct buf_desc {
0098 u32 bd_ctrl;
0099 u32 bd_status;
0100 u32 bd_addr;
0101 u32 bd_nextp;
0102 };
0103
0104
0105 #define BD_BUFLEN 0x1ff
0106 #define BD_CBD_INT_EN BIT(16)
0107 #define BD_PKT_INT_EN BIT(17)
0108 #define BD_LIFM BIT(18)
0109 #define BD_LAST BIT(19)
0110 #define BD_DATA_RECV BIT(20)
0111 #define BD_DDR BIT(21)
0112 #define BD_DUAL BIT(22)
0113 #define BD_QUAD BIT(23)
0114 #define BD_LSBF BIT(25)
0115 #define BD_STAT_CHECK BIT(27)
0116 #define BD_DEVSEL_SHIFT 28
0117 #define BD_CS_DEASSERT BIT(30)
0118 #define BD_EN BIT(31)
0119
0120
0121
0122
0123
0124
0125
0126
0127 struct ring_desc {
0128 struct list_head list;
0129 struct buf_desc *bd;
0130 dma_addr_t bd_dma;
0131 u32 xfer_len;
0132 };
0133
0134
0135 #define PESQI_BD_BUF_LEN_MAX 256
0136 #define PESQI_BD_COUNT 256
0137
0138 struct pic32_sqi {
0139 void __iomem *regs;
0140 struct clk *sys_clk;
0141 struct clk *base_clk;
0142 struct spi_master *master;
0143 int irq;
0144 struct completion xfer_done;
0145 struct ring_desc *ring;
0146 void *bd;
0147 dma_addr_t bd_dma;
0148 struct list_head bd_list_free;
0149 struct list_head bd_list_used;
0150 struct spi_device *cur_spi;
0151 u32 cur_speed;
0152 u8 cur_mode;
0153 };
0154
0155 static inline void pic32_setbits(void __iomem *reg, u32 set)
0156 {
0157 writel(readl(reg) | set, reg);
0158 }
0159
0160 static inline void pic32_clrbits(void __iomem *reg, u32 clr)
0161 {
0162 writel(readl(reg) & ~clr, reg);
0163 }
0164
0165 static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
0166 {
0167 u32 val, div;
0168
0169
0170 div = clk_get_rate(sqi->base_clk) / (2 * sck);
0171 div &= PESQI_CLKDIV;
0172
0173 val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
0174
0175 val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
0176 val |= div << PESQI_CLKDIV_SHIFT;
0177 writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
0178
0179
0180 return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
0181 val & PESQI_CLK_STABLE, 1, 5000);
0182 }
0183
0184 static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
0185 {
0186 u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
0187
0188 writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
0189
0190 writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
0191 }
0192
0193 static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
0194 {
0195 writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
0196 writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
0197 }
0198
0199 static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
0200 {
0201 struct pic32_sqi *sqi = dev_id;
0202 u32 enable, status;
0203
0204 enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
0205 status = readl(sqi->regs + PESQI_INT_STAT_REG);
0206
0207
0208 if (!status)
0209 return IRQ_NONE;
0210
0211 if (status & PESQI_DMAERR) {
0212 enable = 0;
0213 goto irq_done;
0214 }
0215
0216 if (status & PESQI_TXTHR)
0217 enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
0218
0219 if (status & PESQI_RXTHR)
0220 enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
0221
0222 if (status & PESQI_BDDONE)
0223 enable &= ~PESQI_BDDONE;
0224
0225
0226 if (status & PESQI_PKTCOMP) {
0227
0228 enable = 0;
0229
0230 complete(&sqi->xfer_done);
0231 }
0232
0233 irq_done:
0234
0235 writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
0236
0237 return IRQ_HANDLED;
0238 }
0239
0240 static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
0241 {
0242 struct ring_desc *rdesc;
0243
0244 if (list_empty(&sqi->bd_list_free))
0245 return NULL;
0246
0247 rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
0248 list_move_tail(&rdesc->list, &sqi->bd_list_used);
0249 return rdesc;
0250 }
0251
0252 static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
0253 {
0254 list_move(&rdesc->list, &sqi->bd_list_free);
0255 }
0256
0257 static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
0258 struct spi_message *mesg,
0259 struct spi_transfer *xfer)
0260 {
0261 struct spi_device *spi = mesg->spi;
0262 struct scatterlist *sg, *sgl;
0263 struct ring_desc *rdesc;
0264 struct buf_desc *bd;
0265 int nents, i;
0266 u32 bd_ctrl;
0267 u32 nbits;
0268
0269
0270 bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT;
0271
0272
0273 if (xfer->rx_buf) {
0274 bd_ctrl |= BD_DATA_RECV;
0275 nbits = xfer->rx_nbits;
0276 sgl = xfer->rx_sg.sgl;
0277 nents = xfer->rx_sg.nents;
0278 } else {
0279 nbits = xfer->tx_nbits;
0280 sgl = xfer->tx_sg.sgl;
0281 nents = xfer->tx_sg.nents;
0282 }
0283
0284 if (nbits & SPI_NBITS_QUAD)
0285 bd_ctrl |= BD_QUAD;
0286 else if (nbits & SPI_NBITS_DUAL)
0287 bd_ctrl |= BD_DUAL;
0288
0289
0290 if (spi->mode & SPI_LSB_FIRST)
0291 bd_ctrl |= BD_LSBF;
0292
0293
0294 bd_ctrl |= BD_EN;
0295
0296 for_each_sg(sgl, sg, nents, i) {
0297
0298 rdesc = ring_desc_get(sqi);
0299 if (!rdesc)
0300 break;
0301
0302 bd = rdesc->bd;
0303
0304
0305 rdesc->xfer_len = sg_dma_len(sg);
0306 bd->bd_ctrl = bd_ctrl;
0307 bd->bd_ctrl |= rdesc->xfer_len;
0308
0309
0310 bd->bd_status = 0;
0311
0312
0313 bd->bd_addr = sg->dma_address;
0314 }
0315
0316 return 0;
0317 }
0318
0319 static int pic32_sqi_prepare_hardware(struct spi_master *master)
0320 {
0321 struct pic32_sqi *sqi = spi_master_get_devdata(master);
0322
0323
0324 pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
0325
0326 pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
0327
0328 return 0;
0329 }
0330
0331 static bool pic32_sqi_can_dma(struct spi_master *master,
0332 struct spi_device *spi,
0333 struct spi_transfer *x)
0334 {
0335
0336 return true;
0337 }
0338
0339 static int pic32_sqi_one_message(struct spi_master *master,
0340 struct spi_message *msg)
0341 {
0342 struct spi_device *spi = msg->spi;
0343 struct ring_desc *rdesc, *next;
0344 struct spi_transfer *xfer;
0345 struct pic32_sqi *sqi;
0346 int ret = 0, mode;
0347 unsigned long timeout;
0348 u32 val;
0349
0350 sqi = spi_master_get_devdata(master);
0351
0352 reinit_completion(&sqi->xfer_done);
0353 msg->actual_length = 0;
0354
0355
0356
0357
0358
0359 if (sqi->cur_spi != spi) {
0360
0361 if (sqi->cur_speed != spi->max_speed_hz) {
0362 sqi->cur_speed = spi->max_speed_hz;
0363 ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
0364 if (ret)
0365 dev_warn(&spi->dev, "set_clk, %d\n", ret);
0366 }
0367
0368
0369 mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
0370 if (sqi->cur_mode != mode) {
0371 val = readl(sqi->regs + PESQI_CONF_REG);
0372 val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
0373 if (mode & SPI_CPOL)
0374 val |= PESQI_CPOL;
0375 if (mode & SPI_LSB_FIRST)
0376 val |= PESQI_LSBF;
0377 val |= PESQI_CPHA;
0378 writel(val, sqi->regs + PESQI_CONF_REG);
0379
0380 sqi->cur_mode = mode;
0381 }
0382 sqi->cur_spi = spi;
0383 }
0384
0385
0386 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
0387 ret = pic32_sqi_one_transfer(sqi, msg, xfer);
0388 if (ret) {
0389 dev_err(&spi->dev, "xfer %p err\n", xfer);
0390 goto xfer_out;
0391 }
0392 }
0393
0394
0395
0396
0397 rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
0398 rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
0399 BD_LIFM | BD_PKT_INT_EN;
0400
0401
0402 rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
0403 writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
0404
0405
0406 pic32_sqi_enable_int(sqi);
0407
0408
0409 val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
0410 writel(val, sqi->regs + PESQI_BD_CTRL_REG);
0411
0412
0413 timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
0414 if (timeout == 0) {
0415 dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
0416 ret = -ETIMEDOUT;
0417 msg->status = ret;
0418 } else {
0419
0420 msg->status = 0;
0421 ret = 0;
0422 }
0423
0424
0425 writel(0, sqi->regs + PESQI_BD_CTRL_REG);
0426
0427 pic32_sqi_disable_int(sqi);
0428
0429 xfer_out:
0430 list_for_each_entry_safe_reverse(rdesc, next,
0431 &sqi->bd_list_used, list) {
0432
0433 msg->actual_length += rdesc->xfer_len;
0434
0435 ring_desc_put(sqi, rdesc);
0436 }
0437 spi_finalize_current_message(spi->master);
0438
0439 return ret;
0440 }
0441
0442 static int pic32_sqi_unprepare_hardware(struct spi_master *master)
0443 {
0444 struct pic32_sqi *sqi = spi_master_get_devdata(master);
0445
0446
0447 pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
0448
0449 pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
0450
0451 return 0;
0452 }
0453
0454 static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
0455 {
0456 struct ring_desc *rdesc;
0457 struct buf_desc *bd;
0458 int i;
0459
0460
0461 sqi->bd = dma_alloc_coherent(&sqi->master->dev,
0462 sizeof(*bd) * PESQI_BD_COUNT,
0463 &sqi->bd_dma, GFP_KERNEL);
0464 if (!sqi->bd) {
0465 dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
0466 return -ENOMEM;
0467 }
0468
0469
0470 sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
0471 if (!sqi->ring) {
0472 dma_free_coherent(&sqi->master->dev,
0473 sizeof(*bd) * PESQI_BD_COUNT,
0474 sqi->bd, sqi->bd_dma);
0475 return -ENOMEM;
0476 }
0477
0478 bd = (struct buf_desc *)sqi->bd;
0479
0480 INIT_LIST_HEAD(&sqi->bd_list_free);
0481 INIT_LIST_HEAD(&sqi->bd_list_used);
0482
0483
0484 for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
0485 INIT_LIST_HEAD(&rdesc->list);
0486 rdesc->bd = &bd[i];
0487 rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
0488 list_add_tail(&rdesc->list, &sqi->bd_list_free);
0489 }
0490
0491
0492 for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++)
0493 bd[i].bd_nextp = rdesc[i + 1].bd_dma;
0494 bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
0495
0496 return 0;
0497 }
0498
0499 static void ring_desc_ring_free(struct pic32_sqi *sqi)
0500 {
0501 dma_free_coherent(&sqi->master->dev,
0502 sizeof(struct buf_desc) * PESQI_BD_COUNT,
0503 sqi->bd, sqi->bd_dma);
0504 kfree(sqi->ring);
0505 }
0506
0507 static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
0508 {
0509 unsigned long flags;
0510 u32 val;
0511
0512
0513
0514
0515
0516 local_irq_save(flags);
0517
0518
0519 writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
0520
0521
0522 readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
0523 !(val & PESQI_SOFT_RESET), 1, 5000);
0524
0525
0526 pic32_sqi_disable_int(sqi);
0527
0528
0529 local_irq_restore(flags);
0530
0531
0532 val = readl(sqi->regs + PESQI_CMD_THRES_REG);
0533 val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
0534 val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
0535 val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
0536 writel(val, sqi->regs + PESQI_CMD_THRES_REG);
0537
0538 val = readl(sqi->regs + PESQI_INT_THRES_REG);
0539 val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
0540 val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
0541 val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
0542 writel(val, sqi->regs + PESQI_INT_THRES_REG);
0543
0544
0545 val = readl(sqi->regs + PESQI_CONF_REG);
0546
0547
0548 val &= ~PESQI_MODE;
0549 val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
0550 writel(val, sqi->regs + PESQI_CONF_REG);
0551
0552
0553 val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
0554
0555
0556 val |= PESQI_BURST_EN;
0557
0558
0559 val |= 3U << PESQI_CSEN_SHIFT;
0560 writel(val, sqi->regs + PESQI_CONF_REG);
0561
0562
0563 writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
0564
0565 sqi->cur_speed = 0;
0566 sqi->cur_mode = -1;
0567 }
0568
0569 static int pic32_sqi_probe(struct platform_device *pdev)
0570 {
0571 struct spi_master *master;
0572 struct pic32_sqi *sqi;
0573 int ret;
0574
0575 master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
0576 if (!master)
0577 return -ENOMEM;
0578
0579 sqi = spi_master_get_devdata(master);
0580 sqi->master = master;
0581
0582 sqi->regs = devm_platform_ioremap_resource(pdev, 0);
0583 if (IS_ERR(sqi->regs)) {
0584 ret = PTR_ERR(sqi->regs);
0585 goto err_free_master;
0586 }
0587
0588
0589 sqi->irq = platform_get_irq(pdev, 0);
0590 if (sqi->irq < 0) {
0591 ret = sqi->irq;
0592 goto err_free_master;
0593 }
0594
0595
0596 sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
0597 if (IS_ERR(sqi->sys_clk)) {
0598 ret = PTR_ERR(sqi->sys_clk);
0599 dev_err(&pdev->dev, "no sys_clk ?\n");
0600 goto err_free_master;
0601 }
0602
0603 sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
0604 if (IS_ERR(sqi->base_clk)) {
0605 ret = PTR_ERR(sqi->base_clk);
0606 dev_err(&pdev->dev, "no base clk ?\n");
0607 goto err_free_master;
0608 }
0609
0610 ret = clk_prepare_enable(sqi->sys_clk);
0611 if (ret) {
0612 dev_err(&pdev->dev, "sys clk enable failed\n");
0613 goto err_free_master;
0614 }
0615
0616 ret = clk_prepare_enable(sqi->base_clk);
0617 if (ret) {
0618 dev_err(&pdev->dev, "base clk enable failed\n");
0619 clk_disable_unprepare(sqi->sys_clk);
0620 goto err_free_master;
0621 }
0622
0623 init_completion(&sqi->xfer_done);
0624
0625
0626 pic32_sqi_hw_init(sqi);
0627
0628
0629 ret = ring_desc_ring_alloc(sqi);
0630 if (ret) {
0631 dev_err(&pdev->dev, "ring alloc failed\n");
0632 goto err_disable_clk;
0633 }
0634
0635
0636 ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
0637 dev_name(&pdev->dev), sqi);
0638 if (ret < 0) {
0639 dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
0640 goto err_free_ring;
0641 }
0642
0643
0644 master->num_chipselect = 2;
0645 master->max_speed_hz = clk_get_rate(sqi->base_clk);
0646 master->dma_alignment = 32;
0647 master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
0648 master->dev.of_node = pdev->dev.of_node;
0649 master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
0650 SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
0651 master->flags = SPI_MASTER_HALF_DUPLEX;
0652 master->can_dma = pic32_sqi_can_dma;
0653 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
0654 master->transfer_one_message = pic32_sqi_one_message;
0655 master->prepare_transfer_hardware = pic32_sqi_prepare_hardware;
0656 master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
0657
0658 ret = devm_spi_register_master(&pdev->dev, master);
0659 if (ret) {
0660 dev_err(&master->dev, "failed registering spi master\n");
0661 free_irq(sqi->irq, sqi);
0662 goto err_free_ring;
0663 }
0664
0665 platform_set_drvdata(pdev, sqi);
0666
0667 return 0;
0668
0669 err_free_ring:
0670 ring_desc_ring_free(sqi);
0671
0672 err_disable_clk:
0673 clk_disable_unprepare(sqi->base_clk);
0674 clk_disable_unprepare(sqi->sys_clk);
0675
0676 err_free_master:
0677 spi_master_put(master);
0678 return ret;
0679 }
0680
0681 static int pic32_sqi_remove(struct platform_device *pdev)
0682 {
0683 struct pic32_sqi *sqi = platform_get_drvdata(pdev);
0684
0685
0686 free_irq(sqi->irq, sqi);
0687 ring_desc_ring_free(sqi);
0688
0689
0690 clk_disable_unprepare(sqi->base_clk);
0691 clk_disable_unprepare(sqi->sys_clk);
0692
0693 return 0;
0694 }
0695
0696 static const struct of_device_id pic32_sqi_of_ids[] = {
0697 {.compatible = "microchip,pic32mzda-sqi",},
0698 {},
0699 };
0700 MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
0701
0702 static struct platform_driver pic32_sqi_driver = {
0703 .driver = {
0704 .name = "sqi-pic32",
0705 .of_match_table = of_match_ptr(pic32_sqi_of_ids),
0706 },
0707 .probe = pic32_sqi_probe,
0708 .remove = pic32_sqi_remove,
0709 };
0710
0711 module_platform_driver(pic32_sqi_driver);
0712
0713 MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
0714 MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
0715 MODULE_LICENSE("GPL v2");