Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * PIC32 Quad SPI controller driver.
0004  *
0005  * Purna Chandra Mandal <purna.mandal@microchip.com>
0006  * Copyright (c) 2016, Microchip Technology Inc.
0007  */
0008 
0009 #include <linux/clk.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/io.h>
0013 #include <linux/iopoll.h>
0014 #include <linux/module.h>
0015 #include <linux/of.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/slab.h>
0018 #include <linux/spi/spi.h>
0019 
0020 /* SQI registers */
0021 #define PESQI_XIP_CONF1_REG 0x00
0022 #define PESQI_XIP_CONF2_REG 0x04
0023 #define PESQI_CONF_REG      0x08
0024 #define PESQI_CTRL_REG      0x0C
0025 #define PESQI_CLK_CTRL_REG  0x10
0026 #define PESQI_CMD_THRES_REG 0x14
0027 #define PESQI_INT_THRES_REG 0x18
0028 #define PESQI_INT_ENABLE_REG    0x1C
0029 #define PESQI_INT_STAT_REG  0x20
0030 #define PESQI_TX_DATA_REG   0x24
0031 #define PESQI_RX_DATA_REG   0x28
0032 #define PESQI_STAT1_REG     0x2C
0033 #define PESQI_STAT2_REG     0x30
0034 #define PESQI_BD_CTRL_REG   0x34
0035 #define PESQI_BD_CUR_ADDR_REG   0x38
0036 #define PESQI_BD_BASE_ADDR_REG  0x40
0037 #define PESQI_BD_STAT_REG   0x44
0038 #define PESQI_BD_POLL_CTRL_REG  0x48
0039 #define PESQI_BD_TX_DMA_STAT_REG    0x4C
0040 #define PESQI_BD_RX_DMA_STAT_REG    0x50
0041 #define PESQI_THRES_REG     0x54
0042 #define PESQI_INT_SIGEN_REG 0x58
0043 
0044 /* PESQI_CONF_REG fields */
0045 #define PESQI_MODE      0x7
0046 #define  PESQI_MODE_BOOT    0
0047 #define  PESQI_MODE_PIO     1
0048 #define  PESQI_MODE_DMA     2
0049 #define  PESQI_MODE_XIP     3
0050 #define PESQI_MODE_SHIFT    0
0051 #define PESQI_CPHA      BIT(3)
0052 #define PESQI_CPOL      BIT(4)
0053 #define PESQI_LSBF      BIT(5)
0054 #define PESQI_RXLATCH       BIT(7)
0055 #define PESQI_SERMODE       BIT(8)
0056 #define PESQI_WP_EN     BIT(9)
0057 #define PESQI_HOLD_EN       BIT(10)
0058 #define PESQI_BURST_EN      BIT(12)
0059 #define PESQI_CS_CTRL_HW    BIT(15)
0060 #define PESQI_SOFT_RESET    BIT(16)
0061 #define PESQI_LANES_SHIFT   20
0062 #define  PESQI_SINGLE_LANE  0
0063 #define  PESQI_DUAL_LANE    1
0064 #define  PESQI_QUAD_LANE    2
0065 #define PESQI_CSEN_SHIFT    24
0066 #define PESQI_EN        BIT(23)
0067 
0068 /* PESQI_CLK_CTRL_REG fields */
0069 #define PESQI_CLK_EN        BIT(0)
0070 #define PESQI_CLK_STABLE    BIT(1)
0071 #define PESQI_CLKDIV_SHIFT  8
0072 #define PESQI_CLKDIV        0xff
0073 
0074 /* PESQI_INT_THR/CMD_THR_REG */
0075 #define PESQI_TXTHR_MASK    0x1f
0076 #define PESQI_TXTHR_SHIFT   8
0077 #define PESQI_RXTHR_MASK    0x1f
0078 #define PESQI_RXTHR_SHIFT   0
0079 
0080 /* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
0081 #define PESQI_TXEMPTY       BIT(0)
0082 #define PESQI_TXFULL        BIT(1)
0083 #define PESQI_TXTHR     BIT(2)
0084 #define PESQI_RXEMPTY       BIT(3)
0085 #define PESQI_RXFULL        BIT(4)
0086 #define PESQI_RXTHR     BIT(5)
0087 #define PESQI_BDDONE        BIT(9)  /* BD processing complete */
0088 #define PESQI_PKTCOMP       BIT(10) /* packet processing complete */
0089 #define PESQI_DMAERR        BIT(11) /* error */
0090 
0091 /* PESQI_BD_CTRL_REG */
0092 #define PESQI_DMA_EN        BIT(0) /* enable DMA engine */
0093 #define PESQI_POLL_EN       BIT(1) /* enable polling */
0094 #define PESQI_BDP_START     BIT(2) /* start BD processor */
0095 
0096 /* PESQI controller buffer descriptor */
0097 struct buf_desc {
0098     u32 bd_ctrl;    /* control */
0099     u32 bd_status;  /* reserved */
0100     u32 bd_addr;    /* DMA buffer addr */
0101     u32 bd_nextp;   /* next item in chain */
0102 };
0103 
0104 /* bd_ctrl */
0105 #define BD_BUFLEN       0x1ff
0106 #define BD_CBD_INT_EN       BIT(16) /* Current BD is processed */
0107 #define BD_PKT_INT_EN       BIT(17) /* All BDs of PKT processed */
0108 #define BD_LIFM         BIT(18) /* last data of pkt */
0109 #define BD_LAST         BIT(19) /* end of list */
0110 #define BD_DATA_RECV        BIT(20) /* receive data */
0111 #define BD_DDR          BIT(21) /* DDR mode */
0112 #define BD_DUAL         BIT(22) /* Dual SPI */
0113 #define BD_QUAD         BIT(23) /* Quad SPI */
0114 #define BD_LSBF         BIT(25) /* LSB First */
0115 #define BD_STAT_CHECK       BIT(27) /* Status poll */
0116 #define BD_DEVSEL_SHIFT     28  /* CS */
0117 #define BD_CS_DEASSERT      BIT(30) /* de-assert CS after current BD */
0118 #define BD_EN           BIT(31) /* BD owned by H/W */
0119 
0120 /**
0121  * struct ring_desc - Representation of SQI ring descriptor
0122  * @list:   list element to add to free or used list.
0123  * @bd:     PESQI controller buffer descriptor
0124  * @bd_dma: DMA address of PESQI controller buffer descriptor
0125  * @xfer_len:   transfer length
0126  */
0127 struct ring_desc {
0128     struct list_head list;
0129     struct buf_desc *bd;
0130     dma_addr_t bd_dma;
0131     u32 xfer_len;
0132 };
0133 
0134 /* Global constants */
0135 #define PESQI_BD_BUF_LEN_MAX    256
0136 #define PESQI_BD_COUNT      256 /* max 64KB data per spi message */
0137 
0138 struct pic32_sqi {
0139     void __iomem        *regs;
0140     struct clk      *sys_clk;
0141     struct clk      *base_clk; /* drives spi clock */
0142     struct spi_master   *master;
0143     int         irq;
0144     struct completion   xfer_done;
0145     struct ring_desc    *ring;
0146     void            *bd;
0147     dma_addr_t      bd_dma;
0148     struct list_head    bd_list_free; /* free */
0149     struct list_head    bd_list_used; /* allocated */
0150     struct spi_device   *cur_spi;
0151     u32         cur_speed;
0152     u8          cur_mode;
0153 };
0154 
0155 static inline void pic32_setbits(void __iomem *reg, u32 set)
0156 {
0157     writel(readl(reg) | set, reg);
0158 }
0159 
0160 static inline void pic32_clrbits(void __iomem *reg, u32 clr)
0161 {
0162     writel(readl(reg) & ~clr, reg);
0163 }
0164 
0165 static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
0166 {
0167     u32 val, div;
0168 
0169     /* div = base_clk / (2 * spi_clk) */
0170     div = clk_get_rate(sqi->base_clk) / (2 * sck);
0171     div &= PESQI_CLKDIV;
0172 
0173     val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
0174     /* apply new divider */
0175     val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
0176     val |= div << PESQI_CLKDIV_SHIFT;
0177     writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
0178 
0179     /* wait for stability */
0180     return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
0181                   val & PESQI_CLK_STABLE, 1, 5000);
0182 }
0183 
0184 static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
0185 {
0186     u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
0187 
0188     writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
0189     /* INT_SIGEN works as interrupt-gate to INTR line */
0190     writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
0191 }
0192 
0193 static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
0194 {
0195     writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
0196     writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
0197 }
0198 
0199 static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
0200 {
0201     struct pic32_sqi *sqi = dev_id;
0202     u32 enable, status;
0203 
0204     enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
0205     status = readl(sqi->regs + PESQI_INT_STAT_REG);
0206 
0207     /* check spurious interrupt */
0208     if (!status)
0209         return IRQ_NONE;
0210 
0211     if (status & PESQI_DMAERR) {
0212         enable = 0;
0213         goto irq_done;
0214     }
0215 
0216     if (status & PESQI_TXTHR)
0217         enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
0218 
0219     if (status & PESQI_RXTHR)
0220         enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
0221 
0222     if (status & PESQI_BDDONE)
0223         enable &= ~PESQI_BDDONE;
0224 
0225     /* packet processing completed */
0226     if (status & PESQI_PKTCOMP) {
0227         /* mask all interrupts */
0228         enable = 0;
0229         /* complete trasaction */
0230         complete(&sqi->xfer_done);
0231     }
0232 
0233 irq_done:
0234     /* interrupts are sticky, so mask when handled */
0235     writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
0236 
0237     return IRQ_HANDLED;
0238 }
0239 
0240 static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
0241 {
0242     struct ring_desc *rdesc;
0243 
0244     if (list_empty(&sqi->bd_list_free))
0245         return NULL;
0246 
0247     rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
0248     list_move_tail(&rdesc->list, &sqi->bd_list_used);
0249     return rdesc;
0250 }
0251 
0252 static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
0253 {
0254     list_move(&rdesc->list, &sqi->bd_list_free);
0255 }
0256 
0257 static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
0258                   struct spi_message *mesg,
0259                   struct spi_transfer *xfer)
0260 {
0261     struct spi_device *spi = mesg->spi;
0262     struct scatterlist *sg, *sgl;
0263     struct ring_desc *rdesc;
0264     struct buf_desc *bd;
0265     int nents, i;
0266     u32 bd_ctrl;
0267     u32 nbits;
0268 
0269     /* Device selection */
0270     bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT;
0271 
0272     /* half-duplex: select transfer buffer, direction and lane */
0273     if (xfer->rx_buf) {
0274         bd_ctrl |= BD_DATA_RECV;
0275         nbits = xfer->rx_nbits;
0276         sgl = xfer->rx_sg.sgl;
0277         nents = xfer->rx_sg.nents;
0278     } else {
0279         nbits = xfer->tx_nbits;
0280         sgl = xfer->tx_sg.sgl;
0281         nents = xfer->tx_sg.nents;
0282     }
0283 
0284     if (nbits & SPI_NBITS_QUAD)
0285         bd_ctrl |= BD_QUAD;
0286     else if (nbits & SPI_NBITS_DUAL)
0287         bd_ctrl |= BD_DUAL;
0288 
0289     /* LSB first */
0290     if (spi->mode & SPI_LSB_FIRST)
0291         bd_ctrl |= BD_LSBF;
0292 
0293     /* ownership to hardware */
0294     bd_ctrl |= BD_EN;
0295 
0296     for_each_sg(sgl, sg, nents, i) {
0297         /* get ring descriptor */
0298         rdesc = ring_desc_get(sqi);
0299         if (!rdesc)
0300             break;
0301 
0302         bd = rdesc->bd;
0303 
0304         /* BD CTRL: length */
0305         rdesc->xfer_len = sg_dma_len(sg);
0306         bd->bd_ctrl = bd_ctrl;
0307         bd->bd_ctrl |= rdesc->xfer_len;
0308 
0309         /* BD STAT */
0310         bd->bd_status = 0;
0311 
0312         /* BD BUFFER ADDRESS */
0313         bd->bd_addr = sg->dma_address;
0314     }
0315 
0316     return 0;
0317 }
0318 
0319 static int pic32_sqi_prepare_hardware(struct spi_master *master)
0320 {
0321     struct pic32_sqi *sqi = spi_master_get_devdata(master);
0322 
0323     /* enable spi interface */
0324     pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
0325     /* enable spi clk */
0326     pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
0327 
0328     return 0;
0329 }
0330 
0331 static bool pic32_sqi_can_dma(struct spi_master *master,
0332                   struct spi_device *spi,
0333                   struct spi_transfer *x)
0334 {
0335     /* Do DMA irrespective of transfer size */
0336     return true;
0337 }
0338 
0339 static int pic32_sqi_one_message(struct spi_master *master,
0340                  struct spi_message *msg)
0341 {
0342     struct spi_device *spi = msg->spi;
0343     struct ring_desc *rdesc, *next;
0344     struct spi_transfer *xfer;
0345     struct pic32_sqi *sqi;
0346     int ret = 0, mode;
0347     unsigned long timeout;
0348     u32 val;
0349 
0350     sqi = spi_master_get_devdata(master);
0351 
0352     reinit_completion(&sqi->xfer_done);
0353     msg->actual_length = 0;
0354 
0355     /* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
0356      * and "delay_usecs". But spi_device specific speed and mode change
0357      * can be handled at best during spi chip-select switch.
0358      */
0359     if (sqi->cur_spi != spi) {
0360         /* set spi speed */
0361         if (sqi->cur_speed != spi->max_speed_hz) {
0362             sqi->cur_speed = spi->max_speed_hz;
0363             ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
0364             if (ret)
0365                 dev_warn(&spi->dev, "set_clk, %d\n", ret);
0366         }
0367 
0368         /* set spi mode */
0369         mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
0370         if (sqi->cur_mode != mode) {
0371             val = readl(sqi->regs + PESQI_CONF_REG);
0372             val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
0373             if (mode & SPI_CPOL)
0374                 val |= PESQI_CPOL;
0375             if (mode & SPI_LSB_FIRST)
0376                 val |= PESQI_LSBF;
0377             val |= PESQI_CPHA;
0378             writel(val, sqi->regs + PESQI_CONF_REG);
0379 
0380             sqi->cur_mode = mode;
0381         }
0382         sqi->cur_spi = spi;
0383     }
0384 
0385     /* prepare hardware desc-list(BD) for transfer(s) */
0386     list_for_each_entry(xfer, &msg->transfers, transfer_list) {
0387         ret = pic32_sqi_one_transfer(sqi, msg, xfer);
0388         if (ret) {
0389             dev_err(&spi->dev, "xfer %p err\n", xfer);
0390             goto xfer_out;
0391         }
0392     }
0393 
0394     /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
0395      * element of the list.
0396      */
0397     rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
0398     rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
0399                   BD_LIFM | BD_PKT_INT_EN;
0400 
0401     /* set base address BD list for DMA engine */
0402     rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
0403     writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
0404 
0405     /* enable interrupt */
0406     pic32_sqi_enable_int(sqi);
0407 
0408     /* enable DMA engine */
0409     val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
0410     writel(val, sqi->regs + PESQI_BD_CTRL_REG);
0411 
0412     /* wait for xfer completion */
0413     timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
0414     if (timeout == 0) {
0415         dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
0416         ret = -ETIMEDOUT;
0417         msg->status = ret;
0418     } else {
0419         /* success */
0420         msg->status = 0;
0421         ret = 0;
0422     }
0423 
0424     /* disable DMA */
0425     writel(0, sqi->regs + PESQI_BD_CTRL_REG);
0426 
0427     pic32_sqi_disable_int(sqi);
0428 
0429 xfer_out:
0430     list_for_each_entry_safe_reverse(rdesc, next,
0431                      &sqi->bd_list_used, list) {
0432         /* Update total byte transferred */
0433         msg->actual_length += rdesc->xfer_len;
0434         /* release ring descr */
0435         ring_desc_put(sqi, rdesc);
0436     }
0437     spi_finalize_current_message(spi->master);
0438 
0439     return ret;
0440 }
0441 
0442 static int pic32_sqi_unprepare_hardware(struct spi_master *master)
0443 {
0444     struct pic32_sqi *sqi = spi_master_get_devdata(master);
0445 
0446     /* disable clk */
0447     pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
0448     /* disable spi */
0449     pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
0450 
0451     return 0;
0452 }
0453 
0454 static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
0455 {
0456     struct ring_desc *rdesc;
0457     struct buf_desc *bd;
0458     int i;
0459 
0460     /* allocate coherent DMAable memory for hardware buffer descriptors. */
0461     sqi->bd = dma_alloc_coherent(&sqi->master->dev,
0462                      sizeof(*bd) * PESQI_BD_COUNT,
0463                      &sqi->bd_dma, GFP_KERNEL);
0464     if (!sqi->bd) {
0465         dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
0466         return -ENOMEM;
0467     }
0468 
0469     /* allocate software ring descriptors */
0470     sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
0471     if (!sqi->ring) {
0472         dma_free_coherent(&sqi->master->dev,
0473                   sizeof(*bd) * PESQI_BD_COUNT,
0474                   sqi->bd, sqi->bd_dma);
0475         return -ENOMEM;
0476     }
0477 
0478     bd = (struct buf_desc *)sqi->bd;
0479 
0480     INIT_LIST_HEAD(&sqi->bd_list_free);
0481     INIT_LIST_HEAD(&sqi->bd_list_used);
0482 
0483     /* initialize ring-desc */
0484     for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
0485         INIT_LIST_HEAD(&rdesc->list);
0486         rdesc->bd = &bd[i];
0487         rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
0488         list_add_tail(&rdesc->list, &sqi->bd_list_free);
0489     }
0490 
0491     /* Prepare BD: chain to next BD(s) */
0492     for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++)
0493         bd[i].bd_nextp = rdesc[i + 1].bd_dma;
0494     bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
0495 
0496     return 0;
0497 }
0498 
0499 static void ring_desc_ring_free(struct pic32_sqi *sqi)
0500 {
0501     dma_free_coherent(&sqi->master->dev,
0502               sizeof(struct buf_desc) * PESQI_BD_COUNT,
0503               sqi->bd, sqi->bd_dma);
0504     kfree(sqi->ring);
0505 }
0506 
0507 static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
0508 {
0509     unsigned long flags;
0510     u32 val;
0511 
0512     /* Soft-reset of PESQI controller triggers interrupt.
0513      * We are not yet ready to handle them so disable CPU
0514      * interrupt for the time being.
0515      */
0516     local_irq_save(flags);
0517 
0518     /* assert soft-reset */
0519     writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
0520 
0521     /* wait until clear */
0522     readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
0523                   !(val & PESQI_SOFT_RESET), 1, 5000);
0524 
0525     /* disable all interrupts */
0526     pic32_sqi_disable_int(sqi);
0527 
0528     /* Now it is safe to enable back CPU interrupt */
0529     local_irq_restore(flags);
0530 
0531     /* tx and rx fifo interrupt threshold */
0532     val = readl(sqi->regs + PESQI_CMD_THRES_REG);
0533     val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
0534     val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
0535     val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
0536     writel(val, sqi->regs + PESQI_CMD_THRES_REG);
0537 
0538     val = readl(sqi->regs + PESQI_INT_THRES_REG);
0539     val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
0540     val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
0541     val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
0542     writel(val, sqi->regs + PESQI_INT_THRES_REG);
0543 
0544     /* default configuration */
0545     val = readl(sqi->regs + PESQI_CONF_REG);
0546 
0547     /* set mode: DMA */
0548     val &= ~PESQI_MODE;
0549     val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
0550     writel(val, sqi->regs + PESQI_CONF_REG);
0551 
0552     /* DATAEN - SQIID0-ID3 */
0553     val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
0554 
0555     /* burst/INCR4 enable */
0556     val |= PESQI_BURST_EN;
0557 
0558     /* CSEN - all CS */
0559     val |= 3U << PESQI_CSEN_SHIFT;
0560     writel(val, sqi->regs + PESQI_CONF_REG);
0561 
0562     /* write poll count */
0563     writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
0564 
0565     sqi->cur_speed = 0;
0566     sqi->cur_mode = -1;
0567 }
0568 
0569 static int pic32_sqi_probe(struct platform_device *pdev)
0570 {
0571     struct spi_master *master;
0572     struct pic32_sqi *sqi;
0573     int ret;
0574 
0575     master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
0576     if (!master)
0577         return -ENOMEM;
0578 
0579     sqi = spi_master_get_devdata(master);
0580     sqi->master = master;
0581 
0582     sqi->regs = devm_platform_ioremap_resource(pdev, 0);
0583     if (IS_ERR(sqi->regs)) {
0584         ret = PTR_ERR(sqi->regs);
0585         goto err_free_master;
0586     }
0587 
0588     /* irq */
0589     sqi->irq = platform_get_irq(pdev, 0);
0590     if (sqi->irq < 0) {
0591         ret = sqi->irq;
0592         goto err_free_master;
0593     }
0594 
0595     /* clocks */
0596     sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
0597     if (IS_ERR(sqi->sys_clk)) {
0598         ret = PTR_ERR(sqi->sys_clk);
0599         dev_err(&pdev->dev, "no sys_clk ?\n");
0600         goto err_free_master;
0601     }
0602 
0603     sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
0604     if (IS_ERR(sqi->base_clk)) {
0605         ret = PTR_ERR(sqi->base_clk);
0606         dev_err(&pdev->dev, "no base clk ?\n");
0607         goto err_free_master;
0608     }
0609 
0610     ret = clk_prepare_enable(sqi->sys_clk);
0611     if (ret) {
0612         dev_err(&pdev->dev, "sys clk enable failed\n");
0613         goto err_free_master;
0614     }
0615 
0616     ret = clk_prepare_enable(sqi->base_clk);
0617     if (ret) {
0618         dev_err(&pdev->dev, "base clk enable failed\n");
0619         clk_disable_unprepare(sqi->sys_clk);
0620         goto err_free_master;
0621     }
0622 
0623     init_completion(&sqi->xfer_done);
0624 
0625     /* initialize hardware */
0626     pic32_sqi_hw_init(sqi);
0627 
0628     /* allocate buffers & descriptors */
0629     ret = ring_desc_ring_alloc(sqi);
0630     if (ret) {
0631         dev_err(&pdev->dev, "ring alloc failed\n");
0632         goto err_disable_clk;
0633     }
0634 
0635     /* install irq handlers */
0636     ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
0637               dev_name(&pdev->dev), sqi);
0638     if (ret < 0) {
0639         dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
0640         goto err_free_ring;
0641     }
0642 
0643     /* register master */
0644     master->num_chipselect  = 2;
0645     master->max_speed_hz    = clk_get_rate(sqi->base_clk);
0646     master->dma_alignment   = 32;
0647     master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
0648     master->dev.of_node = pdev->dev.of_node;
0649     master->mode_bits   = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
0650                   SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
0651     master->flags       = SPI_MASTER_HALF_DUPLEX;
0652     master->can_dma     = pic32_sqi_can_dma;
0653     master->bits_per_word_mask  = SPI_BPW_RANGE_MASK(8, 32);
0654     master->transfer_one_message    = pic32_sqi_one_message;
0655     master->prepare_transfer_hardware   = pic32_sqi_prepare_hardware;
0656     master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
0657 
0658     ret = devm_spi_register_master(&pdev->dev, master);
0659     if (ret) {
0660         dev_err(&master->dev, "failed registering spi master\n");
0661         free_irq(sqi->irq, sqi);
0662         goto err_free_ring;
0663     }
0664 
0665     platform_set_drvdata(pdev, sqi);
0666 
0667     return 0;
0668 
0669 err_free_ring:
0670     ring_desc_ring_free(sqi);
0671 
0672 err_disable_clk:
0673     clk_disable_unprepare(sqi->base_clk);
0674     clk_disable_unprepare(sqi->sys_clk);
0675 
0676 err_free_master:
0677     spi_master_put(master);
0678     return ret;
0679 }
0680 
0681 static int pic32_sqi_remove(struct platform_device *pdev)
0682 {
0683     struct pic32_sqi *sqi = platform_get_drvdata(pdev);
0684 
0685     /* release resources */
0686     free_irq(sqi->irq, sqi);
0687     ring_desc_ring_free(sqi);
0688 
0689     /* disable clk */
0690     clk_disable_unprepare(sqi->base_clk);
0691     clk_disable_unprepare(sqi->sys_clk);
0692 
0693     return 0;
0694 }
0695 
0696 static const struct of_device_id pic32_sqi_of_ids[] = {
0697     {.compatible = "microchip,pic32mzda-sqi",},
0698     {},
0699 };
0700 MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
0701 
0702 static struct platform_driver pic32_sqi_driver = {
0703     .driver = {
0704         .name = "sqi-pic32",
0705         .of_match_table = of_match_ptr(pic32_sqi_of_ids),
0706     },
0707     .probe = pic32_sqi_probe,
0708     .remove = pic32_sqi_remove,
0709 };
0710 
0711 module_platform_driver(pic32_sqi_driver);
0712 
0713 MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
0714 MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
0715 MODULE_LICENSE("GPL v2");