Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 //
0003 // Driver for AT91 USART Controllers as SPI
0004 //
0005 // Copyright (C) 2018 Microchip Technology Inc.
0006 //
0007 // Author: Radu Pirea <radu.pirea@microchip.com>
0008 
0009 #include <linux/clk.h>
0010 #include <linux/delay.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/dma-direction.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/of_platform.h>
0017 #include <linux/gpio/consumer.h>
0018 #include <linux/pinctrl/consumer.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/pm_runtime.h>
0021 
0022 #include <linux/spi/spi.h>
0023 
0024 #define US_CR           0x00
0025 #define US_MR           0x04
0026 #define US_IER          0x08
0027 #define US_IDR          0x0C
0028 #define US_CSR          0x14
0029 #define US_RHR          0x18
0030 #define US_THR          0x1C
0031 #define US_BRGR         0x20
0032 #define US_VERSION      0xFC
0033 
0034 #define US_CR_RSTRX     BIT(2)
0035 #define US_CR_RSTTX     BIT(3)
0036 #define US_CR_RXEN      BIT(4)
0037 #define US_CR_RXDIS     BIT(5)
0038 #define US_CR_TXEN      BIT(6)
0039 #define US_CR_TXDIS     BIT(7)
0040 
0041 #define US_MR_SPI_MASTER    0x0E
0042 #define US_MR_CHRL      GENMASK(7, 6)
0043 #define US_MR_CPHA      BIT(8)
0044 #define US_MR_CPOL      BIT(16)
0045 #define US_MR_CLKO      BIT(18)
0046 #define US_MR_WRDBT     BIT(20)
0047 #define US_MR_LOOP      BIT(15)
0048 
0049 #define US_IR_RXRDY     BIT(0)
0050 #define US_IR_TXRDY     BIT(1)
0051 #define US_IR_OVRE      BIT(5)
0052 
0053 #define US_BRGR_SIZE        BIT(16)
0054 
0055 #define US_MIN_CLK_DIV      0x06
0056 #define US_MAX_CLK_DIV      BIT(16)
0057 
0058 #define US_RESET        (US_CR_RSTRX | US_CR_RSTTX)
0059 #define US_DISABLE      (US_CR_RXDIS | US_CR_TXDIS)
0060 #define US_ENABLE       (US_CR_RXEN | US_CR_TXEN)
0061 #define US_OVRE_RXRDY_IRQS  (US_IR_OVRE | US_IR_RXRDY)
0062 
0063 #define US_INIT \
0064     (US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
0065 #define US_DMA_MIN_BYTES       16
0066 #define US_DMA_TIMEOUT         (msecs_to_jiffies(1000))
0067 
0068 /* Register access macros */
0069 #define at91_usart_spi_readl(port, reg) \
0070     readl_relaxed((port)->regs + US_##reg)
0071 #define at91_usart_spi_writel(port, reg, value) \
0072     writel_relaxed((value), (port)->regs + US_##reg)
0073 
0074 #define at91_usart_spi_readb(port, reg) \
0075     readb_relaxed((port)->regs + US_##reg)
0076 #define at91_usart_spi_writeb(port, reg, value) \
0077     writeb_relaxed((value), (port)->regs + US_##reg)
0078 
0079 struct at91_usart_spi {
0080     struct platform_device  *mpdev;
0081     struct spi_transfer *current_transfer;
0082     void __iomem        *regs;
0083     struct device       *dev;
0084     struct clk      *clk;
0085 
0086     struct completion   xfer_completion;
0087 
0088     /*used in interrupt to protect data reading*/
0089     spinlock_t      lock;
0090 
0091     phys_addr_t     phybase;
0092 
0093     int         irq;
0094     unsigned int        current_tx_remaining_bytes;
0095     unsigned int        current_rx_remaining_bytes;
0096 
0097     u32         spi_clk;
0098     u32         status;
0099 
0100     bool            xfer_failed;
0101     bool            use_dma;
0102 };
0103 
0104 static void dma_callback(void *data)
0105 {
0106     struct spi_controller   *ctlr = data;
0107     struct at91_usart_spi   *aus = spi_master_get_devdata(ctlr);
0108 
0109     at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
0110     aus->current_rx_remaining_bytes = 0;
0111     complete(&aus->xfer_completion);
0112 }
0113 
0114 static bool at91_usart_spi_can_dma(struct spi_controller *ctrl,
0115                    struct spi_device *spi,
0116                    struct spi_transfer *xfer)
0117 {
0118     struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
0119 
0120     return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES;
0121 }
0122 
0123 static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
0124                     struct at91_usart_spi *aus)
0125 {
0126     struct dma_slave_config slave_config;
0127     struct device *dev = &aus->mpdev->dev;
0128     phys_addr_t phybase = aus->phybase;
0129     dma_cap_mask_t mask;
0130     int err = 0;
0131 
0132     dma_cap_zero(mask);
0133     dma_cap_set(DMA_SLAVE, mask);
0134 
0135     ctlr->dma_tx = dma_request_chan(dev, "tx");
0136     if (IS_ERR_OR_NULL(ctlr->dma_tx)) {
0137         if (IS_ERR(ctlr->dma_tx)) {
0138             err = PTR_ERR(ctlr->dma_tx);
0139             goto at91_usart_spi_error_clear;
0140         }
0141 
0142         dev_dbg(dev,
0143             "DMA TX channel not available, SPI unable to use DMA\n");
0144         err = -EBUSY;
0145         goto at91_usart_spi_error_clear;
0146     }
0147 
0148     ctlr->dma_rx = dma_request_chan(dev, "rx");
0149     if (IS_ERR_OR_NULL(ctlr->dma_rx)) {
0150         if (IS_ERR(ctlr->dma_rx)) {
0151             err = PTR_ERR(ctlr->dma_rx);
0152             goto at91_usart_spi_error;
0153         }
0154 
0155         dev_dbg(dev,
0156             "DMA RX channel not available, SPI unable to use DMA\n");
0157         err = -EBUSY;
0158         goto at91_usart_spi_error;
0159     }
0160 
0161     slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
0162     slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
0163     slave_config.dst_addr = (dma_addr_t)phybase + US_THR;
0164     slave_config.src_addr = (dma_addr_t)phybase + US_RHR;
0165     slave_config.src_maxburst = 1;
0166     slave_config.dst_maxburst = 1;
0167     slave_config.device_fc = false;
0168 
0169     slave_config.direction = DMA_DEV_TO_MEM;
0170     if (dmaengine_slave_config(ctlr->dma_rx, &slave_config)) {
0171         dev_err(&ctlr->dev,
0172             "failed to configure rx dma channel\n");
0173         err = -EINVAL;
0174         goto at91_usart_spi_error;
0175     }
0176 
0177     slave_config.direction = DMA_MEM_TO_DEV;
0178     if (dmaengine_slave_config(ctlr->dma_tx, &slave_config)) {
0179         dev_err(&ctlr->dev,
0180             "failed to configure tx dma channel\n");
0181         err = -EINVAL;
0182         goto at91_usart_spi_error;
0183     }
0184 
0185     aus->use_dma = true;
0186     return 0;
0187 
0188 at91_usart_spi_error:
0189     if (!IS_ERR_OR_NULL(ctlr->dma_tx))
0190         dma_release_channel(ctlr->dma_tx);
0191     if (!IS_ERR_OR_NULL(ctlr->dma_rx))
0192         dma_release_channel(ctlr->dma_rx);
0193     ctlr->dma_tx = NULL;
0194     ctlr->dma_rx = NULL;
0195 
0196 at91_usart_spi_error_clear:
0197     return err;
0198 }
0199 
0200 static void at91_usart_spi_release_dma(struct spi_controller *ctlr)
0201 {
0202     if (ctlr->dma_rx)
0203         dma_release_channel(ctlr->dma_rx);
0204     if (ctlr->dma_tx)
0205         dma_release_channel(ctlr->dma_tx);
0206 }
0207 
0208 static void at91_usart_spi_stop_dma(struct spi_controller *ctlr)
0209 {
0210     if (ctlr->dma_rx)
0211         dmaengine_terminate_all(ctlr->dma_rx);
0212     if (ctlr->dma_tx)
0213         dmaengine_terminate_all(ctlr->dma_tx);
0214 }
0215 
0216 static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr,
0217                        struct spi_transfer *xfer)
0218 {
0219     struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
0220     struct dma_chan  *rxchan = ctlr->dma_rx;
0221     struct dma_chan *txchan = ctlr->dma_tx;
0222     struct dma_async_tx_descriptor *rxdesc;
0223     struct dma_async_tx_descriptor *txdesc;
0224     dma_cookie_t cookie;
0225 
0226     /* Disable RX interrupt */
0227     at91_usart_spi_writel(aus, IDR, US_IR_RXRDY);
0228 
0229     rxdesc = dmaengine_prep_slave_sg(rxchan,
0230                      xfer->rx_sg.sgl,
0231                      xfer->rx_sg.nents,
0232                      DMA_DEV_TO_MEM,
0233                      DMA_PREP_INTERRUPT |
0234                      DMA_CTRL_ACK);
0235     if (!rxdesc)
0236         goto at91_usart_spi_err_dma;
0237 
0238     txdesc = dmaengine_prep_slave_sg(txchan,
0239                      xfer->tx_sg.sgl,
0240                      xfer->tx_sg.nents,
0241                      DMA_MEM_TO_DEV,
0242                      DMA_PREP_INTERRUPT |
0243                      DMA_CTRL_ACK);
0244     if (!txdesc)
0245         goto at91_usart_spi_err_dma;
0246 
0247     rxdesc->callback = dma_callback;
0248     rxdesc->callback_param = ctlr;
0249 
0250     cookie = rxdesc->tx_submit(rxdesc);
0251     if (dma_submit_error(cookie))
0252         goto at91_usart_spi_err_dma;
0253 
0254     cookie = txdesc->tx_submit(txdesc);
0255     if (dma_submit_error(cookie))
0256         goto at91_usart_spi_err_dma;
0257 
0258     rxchan->device->device_issue_pending(rxchan);
0259     txchan->device->device_issue_pending(txchan);
0260 
0261     return 0;
0262 
0263 at91_usart_spi_err_dma:
0264     /* Enable RX interrupt if something fails and fallback to PIO */
0265     at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
0266     at91_usart_spi_stop_dma(ctlr);
0267 
0268     return -ENOMEM;
0269 }
0270 
0271 static unsigned long at91_usart_spi_dma_timeout(struct at91_usart_spi *aus)
0272 {
0273     return wait_for_completion_timeout(&aus->xfer_completion,
0274                        US_DMA_TIMEOUT);
0275 }
0276 
0277 static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
0278 {
0279     return aus->status & US_IR_TXRDY;
0280 }
0281 
0282 static inline u32 at91_usart_spi_rx_ready(struct at91_usart_spi *aus)
0283 {
0284     return aus->status & US_IR_RXRDY;
0285 }
0286 
0287 static inline u32 at91_usart_spi_check_overrun(struct at91_usart_spi *aus)
0288 {
0289     return aus->status & US_IR_OVRE;
0290 }
0291 
0292 static inline u32 at91_usart_spi_read_status(struct at91_usart_spi *aus)
0293 {
0294     aus->status = at91_usart_spi_readl(aus, CSR);
0295     return aus->status;
0296 }
0297 
0298 static inline void at91_usart_spi_tx(struct at91_usart_spi *aus)
0299 {
0300     unsigned int len = aus->current_transfer->len;
0301     unsigned int remaining = aus->current_tx_remaining_bytes;
0302     const u8  *tx_buf = aus->current_transfer->tx_buf;
0303 
0304     if (!remaining)
0305         return;
0306 
0307     if (at91_usart_spi_tx_ready(aus)) {
0308         at91_usart_spi_writeb(aus, THR, tx_buf[len - remaining]);
0309         aus->current_tx_remaining_bytes--;
0310     }
0311 }
0312 
0313 static inline void at91_usart_spi_rx(struct at91_usart_spi *aus)
0314 {
0315     int len = aus->current_transfer->len;
0316     int remaining = aus->current_rx_remaining_bytes;
0317     u8  *rx_buf = aus->current_transfer->rx_buf;
0318 
0319     if (!remaining)
0320         return;
0321 
0322     rx_buf[len - remaining] = at91_usart_spi_readb(aus, RHR);
0323     aus->current_rx_remaining_bytes--;
0324 }
0325 
0326 static inline void
0327 at91_usart_spi_set_xfer_speed(struct at91_usart_spi *aus,
0328                   struct spi_transfer *xfer)
0329 {
0330     at91_usart_spi_writel(aus, BRGR,
0331                   DIV_ROUND_UP(aus->spi_clk, xfer->speed_hz));
0332 }
0333 
0334 static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
0335 {
0336     struct spi_controller *controller = dev_id;
0337     struct at91_usart_spi *aus = spi_master_get_devdata(controller);
0338 
0339     spin_lock(&aus->lock);
0340     at91_usart_spi_read_status(aus);
0341 
0342     if (at91_usart_spi_check_overrun(aus)) {
0343         aus->xfer_failed = true;
0344         at91_usart_spi_writel(aus, IDR, US_IR_OVRE | US_IR_RXRDY);
0345         spin_unlock(&aus->lock);
0346         return IRQ_HANDLED;
0347     }
0348 
0349     if (at91_usart_spi_rx_ready(aus)) {
0350         at91_usart_spi_rx(aus);
0351         spin_unlock(&aus->lock);
0352         return IRQ_HANDLED;
0353     }
0354 
0355     spin_unlock(&aus->lock);
0356 
0357     return IRQ_NONE;
0358 }
0359 
0360 static int at91_usart_spi_setup(struct spi_device *spi)
0361 {
0362     struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
0363     u32 *ausd = spi->controller_state;
0364     unsigned int mr = at91_usart_spi_readl(aus, MR);
0365 
0366     if (spi->mode & SPI_CPOL)
0367         mr |= US_MR_CPOL;
0368     else
0369         mr &= ~US_MR_CPOL;
0370 
0371     if (spi->mode & SPI_CPHA)
0372         mr |= US_MR_CPHA;
0373     else
0374         mr &= ~US_MR_CPHA;
0375 
0376     if (spi->mode & SPI_LOOP)
0377         mr |= US_MR_LOOP;
0378     else
0379         mr &= ~US_MR_LOOP;
0380 
0381     if (!ausd) {
0382         ausd = kzalloc(sizeof(*ausd), GFP_KERNEL);
0383         if (!ausd)
0384             return -ENOMEM;
0385 
0386         spi->controller_state = ausd;
0387     }
0388 
0389     *ausd = mr;
0390 
0391     dev_dbg(&spi->dev,
0392         "setup: bpw %u mode 0x%x -> mr %d %08x\n",
0393         spi->bits_per_word, spi->mode, spi->chip_select, mr);
0394 
0395     return 0;
0396 }
0397 
0398 static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
0399                        struct spi_device *spi,
0400                        struct spi_transfer *xfer)
0401 {
0402     struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
0403     unsigned long dma_timeout = 0;
0404     int ret = 0;
0405 
0406     at91_usart_spi_set_xfer_speed(aus, xfer);
0407     aus->xfer_failed = false;
0408     aus->current_transfer = xfer;
0409     aus->current_tx_remaining_bytes = xfer->len;
0410     aus->current_rx_remaining_bytes = xfer->len;
0411 
0412     while ((aus->current_tx_remaining_bytes ||
0413         aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
0414         reinit_completion(&aus->xfer_completion);
0415         if (at91_usart_spi_can_dma(ctlr, spi, xfer) &&
0416             !ret) {
0417             ret = at91_usart_spi_dma_transfer(ctlr, xfer);
0418             if (ret)
0419                 continue;
0420 
0421             dma_timeout = at91_usart_spi_dma_timeout(aus);
0422 
0423             if (WARN_ON(dma_timeout == 0)) {
0424                 dev_err(&spi->dev, "DMA transfer timeout\n");
0425                 return -EIO;
0426             }
0427             aus->current_tx_remaining_bytes = 0;
0428         } else {
0429             at91_usart_spi_read_status(aus);
0430             at91_usart_spi_tx(aus);
0431         }
0432 
0433         cpu_relax();
0434     }
0435 
0436     if (aus->xfer_failed) {
0437         dev_err(aus->dev, "Overrun!\n");
0438         return -EIO;
0439     }
0440 
0441     return 0;
0442 }
0443 
0444 static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
0445                       struct spi_message *message)
0446 {
0447     struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
0448     struct spi_device *spi = message->spi;
0449     u32 *ausd = spi->controller_state;
0450 
0451     at91_usart_spi_writel(aus, CR, US_ENABLE);
0452     at91_usart_spi_writel(aus, IER, US_OVRE_RXRDY_IRQS);
0453     at91_usart_spi_writel(aus, MR, *ausd);
0454 
0455     return 0;
0456 }
0457 
0458 static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr,
0459                         struct spi_message *message)
0460 {
0461     struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
0462 
0463     at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
0464     at91_usart_spi_writel(aus, IDR, US_OVRE_RXRDY_IRQS);
0465 
0466     return 0;
0467 }
0468 
0469 static void at91_usart_spi_cleanup(struct spi_device *spi)
0470 {
0471     struct at91_usart_spi_device *ausd = spi->controller_state;
0472 
0473     spi->controller_state = NULL;
0474     kfree(ausd);
0475 }
0476 
0477 static void at91_usart_spi_init(struct at91_usart_spi *aus)
0478 {
0479     at91_usart_spi_writel(aus, MR, US_INIT);
0480     at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
0481 }
0482 
0483 static int at91_usart_gpio_setup(struct platform_device *pdev)
0484 {
0485     struct gpio_descs *cs_gpios;
0486 
0487     cs_gpios = devm_gpiod_get_array_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
0488 
0489     if (IS_ERR(cs_gpios))
0490         return PTR_ERR(cs_gpios);
0491 
0492     return 0;
0493 }
0494 
0495 static int at91_usart_spi_probe(struct platform_device *pdev)
0496 {
0497     struct resource *regs;
0498     struct spi_controller *controller;
0499     struct at91_usart_spi *aus;
0500     struct clk *clk;
0501     int irq;
0502     int ret;
0503 
0504     regs = platform_get_resource(to_platform_device(pdev->dev.parent),
0505                      IORESOURCE_MEM, 0);
0506     if (!regs)
0507         return -EINVAL;
0508 
0509     irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
0510     if (irq < 0)
0511         return irq;
0512 
0513     clk = devm_clk_get(pdev->dev.parent, "usart");
0514     if (IS_ERR(clk))
0515         return PTR_ERR(clk);
0516 
0517     ret = -ENOMEM;
0518     controller = spi_alloc_master(&pdev->dev, sizeof(*aus));
0519     if (!controller)
0520         goto at91_usart_spi_probe_fail;
0521 
0522     ret = at91_usart_gpio_setup(pdev);
0523     if (ret)
0524         goto at91_usart_spi_probe_fail;
0525 
0526     controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
0527     controller->dev.of_node = pdev->dev.parent->of_node;
0528     controller->bits_per_word_mask = SPI_BPW_MASK(8);
0529     controller->setup = at91_usart_spi_setup;
0530     controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
0531     controller->transfer_one = at91_usart_spi_transfer_one;
0532     controller->prepare_message = at91_usart_spi_prepare_message;
0533     controller->unprepare_message = at91_usart_spi_unprepare_message;
0534     controller->can_dma = at91_usart_spi_can_dma;
0535     controller->cleanup = at91_usart_spi_cleanup;
0536     controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
0537                         US_MIN_CLK_DIV);
0538     controller->min_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
0539                         US_MAX_CLK_DIV);
0540     platform_set_drvdata(pdev, controller);
0541 
0542     aus = spi_master_get_devdata(controller);
0543 
0544     aus->dev = &pdev->dev;
0545     aus->regs = devm_ioremap_resource(&pdev->dev, regs);
0546     if (IS_ERR(aus->regs)) {
0547         ret = PTR_ERR(aus->regs);
0548         goto at91_usart_spi_probe_fail;
0549     }
0550 
0551     aus->irq = irq;
0552     aus->clk = clk;
0553 
0554     ret = devm_request_irq(&pdev->dev, irq, at91_usart_spi_interrupt, 0,
0555                    dev_name(&pdev->dev), controller);
0556     if (ret)
0557         goto at91_usart_spi_probe_fail;
0558 
0559     ret = clk_prepare_enable(clk);
0560     if (ret)
0561         goto at91_usart_spi_probe_fail;
0562 
0563     aus->spi_clk = clk_get_rate(clk);
0564     at91_usart_spi_init(aus);
0565 
0566     aus->phybase = regs->start;
0567 
0568     aus->mpdev = to_platform_device(pdev->dev.parent);
0569 
0570     ret = at91_usart_spi_configure_dma(controller, aus);
0571     if (ret)
0572         goto at91_usart_fail_dma;
0573 
0574     spin_lock_init(&aus->lock);
0575     init_completion(&aus->xfer_completion);
0576 
0577     ret = devm_spi_register_master(&pdev->dev, controller);
0578     if (ret)
0579         goto at91_usart_fail_register_master;
0580 
0581     dev_info(&pdev->dev,
0582          "AT91 USART SPI Controller version 0x%x at %pa (irq %d)\n",
0583          at91_usart_spi_readl(aus, VERSION),
0584          &regs->start, irq);
0585 
0586     return 0;
0587 
0588 at91_usart_fail_register_master:
0589     at91_usart_spi_release_dma(controller);
0590 at91_usart_fail_dma:
0591     clk_disable_unprepare(clk);
0592 at91_usart_spi_probe_fail:
0593     spi_master_put(controller);
0594     return ret;
0595 }
0596 
0597 __maybe_unused static int at91_usart_spi_runtime_suspend(struct device *dev)
0598 {
0599     struct spi_controller *ctlr = dev_get_drvdata(dev);
0600     struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
0601 
0602     clk_disable_unprepare(aus->clk);
0603     pinctrl_pm_select_sleep_state(dev);
0604 
0605     return 0;
0606 }
0607 
0608 __maybe_unused static int at91_usart_spi_runtime_resume(struct device *dev)
0609 {
0610     struct spi_controller *ctrl = dev_get_drvdata(dev);
0611     struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
0612 
0613     pinctrl_pm_select_default_state(dev);
0614 
0615     return clk_prepare_enable(aus->clk);
0616 }
0617 
0618 __maybe_unused static int at91_usart_spi_suspend(struct device *dev)
0619 {
0620     struct spi_controller *ctrl = dev_get_drvdata(dev);
0621     int ret;
0622 
0623     ret = spi_controller_suspend(ctrl);
0624     if (ret)
0625         return ret;
0626 
0627     if (!pm_runtime_suspended(dev))
0628         at91_usart_spi_runtime_suspend(dev);
0629 
0630     return 0;
0631 }
0632 
0633 __maybe_unused static int at91_usart_spi_resume(struct device *dev)
0634 {
0635     struct spi_controller *ctrl = dev_get_drvdata(dev);
0636     struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
0637     int ret;
0638 
0639     if (!pm_runtime_suspended(dev)) {
0640         ret = at91_usart_spi_runtime_resume(dev);
0641         if (ret)
0642             return ret;
0643     }
0644 
0645     at91_usart_spi_init(aus);
0646 
0647     return spi_controller_resume(ctrl);
0648 }
0649 
0650 static int at91_usart_spi_remove(struct platform_device *pdev)
0651 {
0652     struct spi_controller *ctlr = platform_get_drvdata(pdev);
0653     struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
0654 
0655     at91_usart_spi_release_dma(ctlr);
0656     clk_disable_unprepare(aus->clk);
0657 
0658     return 0;
0659 }
0660 
0661 static const struct dev_pm_ops at91_usart_spi_pm_ops = {
0662     SET_SYSTEM_SLEEP_PM_OPS(at91_usart_spi_suspend, at91_usart_spi_resume)
0663     SET_RUNTIME_PM_OPS(at91_usart_spi_runtime_suspend,
0664                at91_usart_spi_runtime_resume, NULL)
0665 };
0666 
0667 static struct platform_driver at91_usart_spi_driver = {
0668     .driver = {
0669         .name = "at91_usart_spi",
0670         .pm = &at91_usart_spi_pm_ops,
0671     },
0672     .probe = at91_usart_spi_probe,
0673     .remove = at91_usart_spi_remove,
0674 };
0675 
0676 module_platform_driver(at91_usart_spi_driver);
0677 
0678 MODULE_DESCRIPTION("Microchip AT91 USART SPI Controller driver");
0679 MODULE_AUTHOR("Radu Pirea <radu.pirea@microchip.com>");
0680 MODULE_LICENSE("GPL v2");
0681 MODULE_ALIAS("platform:at91_usart_spi");