0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/clk.h>
0010 #include <linux/dmaengine.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/err.h>
0013 #include <linux/init.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/iopoll.h>
0016 #include <linux/kernel.h>
0017 #include <linux/list.h>
0018 #include <linux/module.h>
0019 #include <linux/of_device.h>
0020 #include <linux/of_dma.h>
0021 #include <linux/platform_device.h>
0022 #include <linux/pm_runtime.h>
0023 #include <linux/slab.h>
0024 #include <linux/spinlock.h>
0025
0026 #include "../virt-dma.h"
0027
0028
0029 #define MTK_UART_APDMA_NR_VCHANS 8
0030
0031 #define VFF_EN_B BIT(0)
0032 #define VFF_STOP_B BIT(0)
0033 #define VFF_FLUSH_B BIT(0)
0034 #define VFF_4G_EN_B BIT(0)
0035
0036 #define VFF_RX_INT_EN_B (BIT(0) | BIT(1))
0037
0038 #define VFF_TX_INT_EN_B BIT(0)
0039 #define VFF_WARM_RST_B BIT(0)
0040 #define VFF_RX_INT_CLR_B (BIT(0) | BIT(1))
0041 #define VFF_TX_INT_CLR_B 0
0042 #define VFF_STOP_CLR_B 0
0043 #define VFF_EN_CLR_B 0
0044 #define VFF_INT_EN_CLR_B 0
0045 #define VFF_4G_SUPPORT_CLR_B 0
0046
0047
0048
0049
0050
0051
0052 #define VFF_TX_THRE(n) (n)
0053
0054 #define VFF_RX_THRE(n) ((n) * 3 / 4)
0055
0056 #define VFF_RING_SIZE 0xffff
0057
0058 #define VFF_RING_WRAP 0x10000
0059
0060 #define VFF_INT_FLAG 0x00
0061 #define VFF_INT_EN 0x04
0062 #define VFF_EN 0x08
0063 #define VFF_RST 0x0c
0064 #define VFF_STOP 0x10
0065 #define VFF_FLUSH 0x14
0066 #define VFF_ADDR 0x1c
0067 #define VFF_LEN 0x24
0068 #define VFF_THRE 0x28
0069 #define VFF_WPT 0x2c
0070 #define VFF_RPT 0x30
0071
0072 #define VFF_VALID_SIZE 0x3c
0073
0074 #define VFF_LEFT_SIZE 0x40
0075 #define VFF_DEBUG_STATUS 0x50
0076 #define VFF_4G_SUPPORT 0x54
0077
0078 struct mtk_uart_apdmadev {
0079 struct dma_device ddev;
0080 struct clk *clk;
0081 bool support_33bits;
0082 unsigned int dma_requests;
0083 };
0084
0085 struct mtk_uart_apdma_desc {
0086 struct virt_dma_desc vd;
0087
0088 dma_addr_t addr;
0089 unsigned int avail_len;
0090 };
0091
0092 struct mtk_chan {
0093 struct virt_dma_chan vc;
0094 struct dma_slave_config cfg;
0095 struct mtk_uart_apdma_desc *desc;
0096 enum dma_transfer_direction dir;
0097
0098 void __iomem *base;
0099 unsigned int irq;
0100
0101 unsigned int rx_status;
0102 };
0103
0104 static inline struct mtk_uart_apdmadev *
0105 to_mtk_uart_apdma_dev(struct dma_device *d)
0106 {
0107 return container_of(d, struct mtk_uart_apdmadev, ddev);
0108 }
0109
0110 static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
0111 {
0112 return container_of(c, struct mtk_chan, vc.chan);
0113 }
0114
0115 static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
0116 (struct dma_async_tx_descriptor *t)
0117 {
0118 return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
0119 }
0120
0121 static void mtk_uart_apdma_write(struct mtk_chan *c,
0122 unsigned int reg, unsigned int val)
0123 {
0124 writel(val, c->base + reg);
0125 }
0126
0127 static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
0128 {
0129 return readl(c->base + reg);
0130 }
0131
0132 static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
0133 {
0134 kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
0135 }
0136
0137 static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
0138 {
0139 struct mtk_uart_apdmadev *mtkd =
0140 to_mtk_uart_apdma_dev(c->vc.chan.device);
0141 struct mtk_uart_apdma_desc *d = c->desc;
0142 unsigned int wpt, vff_sz;
0143
0144 vff_sz = c->cfg.dst_port_window_size;
0145 if (!mtk_uart_apdma_read(c, VFF_LEN)) {
0146 mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
0147 mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
0148 mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz));
0149 mtk_uart_apdma_write(c, VFF_WPT, 0);
0150 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
0151
0152 if (mtkd->support_33bits)
0153 mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
0154 }
0155
0156 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
0157 if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
0158 dev_err(c->vc.chan.device->dev, "Enable TX fail\n");
0159
0160 if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) {
0161 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
0162 return;
0163 }
0164
0165 wpt = mtk_uart_apdma_read(c, VFF_WPT);
0166
0167 wpt += c->desc->avail_len;
0168 if ((wpt & VFF_RING_SIZE) == vff_sz)
0169 wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP;
0170
0171
0172 mtk_uart_apdma_write(c, VFF_WPT, wpt);
0173
0174
0175 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
0176 if (!mtk_uart_apdma_read(c, VFF_FLUSH))
0177 mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
0178 }
0179
0180 static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
0181 {
0182 struct mtk_uart_apdmadev *mtkd =
0183 to_mtk_uart_apdma_dev(c->vc.chan.device);
0184 struct mtk_uart_apdma_desc *d = c->desc;
0185 unsigned int vff_sz;
0186
0187 vff_sz = c->cfg.src_port_window_size;
0188 if (!mtk_uart_apdma_read(c, VFF_LEN)) {
0189 mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
0190 mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
0191 mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz));
0192 mtk_uart_apdma_write(c, VFF_RPT, 0);
0193 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
0194
0195 if (mtkd->support_33bits)
0196 mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
0197 }
0198
0199 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B);
0200 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
0201 if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
0202 dev_err(c->vc.chan.device->dev, "Enable RX fail\n");
0203 }
0204
0205 static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
0206 {
0207 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
0208 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
0209 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
0210 }
0211
0212 static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
0213 {
0214 struct mtk_uart_apdma_desc *d = c->desc;
0215 unsigned int len, wg, rg;
0216 int cnt;
0217
0218 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
0219
0220 if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE))
0221 return;
0222
0223 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
0224 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
0225
0226 len = c->cfg.src_port_window_size;
0227 rg = mtk_uart_apdma_read(c, VFF_RPT);
0228 wg = mtk_uart_apdma_read(c, VFF_WPT);
0229 cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE);
0230
0231
0232
0233
0234
0235 if ((rg ^ wg) & VFF_RING_WRAP)
0236 cnt += len;
0237
0238 c->rx_status = d->avail_len - cnt;
0239 mtk_uart_apdma_write(c, VFF_RPT, wg);
0240 }
0241
0242 static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
0243 {
0244 struct mtk_uart_apdma_desc *d = c->desc;
0245
0246 if (d) {
0247 list_del(&d->vd.node);
0248 vchan_cookie_complete(&d->vd);
0249 c->desc = NULL;
0250 }
0251 }
0252
0253 static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
0254 {
0255 struct dma_chan *chan = (struct dma_chan *)dev_id;
0256 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
0257 unsigned long flags;
0258
0259 spin_lock_irqsave(&c->vc.lock, flags);
0260 if (c->dir == DMA_DEV_TO_MEM)
0261 mtk_uart_apdma_rx_handler(c);
0262 else if (c->dir == DMA_MEM_TO_DEV)
0263 mtk_uart_apdma_tx_handler(c);
0264 mtk_uart_apdma_chan_complete_handler(c);
0265 spin_unlock_irqrestore(&c->vc.lock, flags);
0266
0267 return IRQ_HANDLED;
0268 }
0269
0270 static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
0271 {
0272 struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
0273 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
0274 unsigned int status;
0275 int ret;
0276
0277 ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
0278 if (ret < 0) {
0279 pm_runtime_put_noidle(chan->device->dev);
0280 return ret;
0281 }
0282
0283 mtk_uart_apdma_write(c, VFF_ADDR, 0);
0284 mtk_uart_apdma_write(c, VFF_THRE, 0);
0285 mtk_uart_apdma_write(c, VFF_LEN, 0);
0286 mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B);
0287
0288 ret = readx_poll_timeout(readl, c->base + VFF_EN,
0289 status, !status, 10, 100);
0290 if (ret)
0291 goto err_pm;
0292
0293 ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
0294 IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
0295 if (ret < 0) {
0296 dev_err(chan->device->dev, "Can't request dma IRQ\n");
0297 ret = -EINVAL;
0298 goto err_pm;
0299 }
0300
0301 if (mtkd->support_33bits)
0302 mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
0303
0304 err_pm:
0305 pm_runtime_put_noidle(mtkd->ddev.dev);
0306 return ret;
0307 }
0308
0309 static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
0310 {
0311 struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
0312 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
0313
0314 free_irq(c->irq, chan);
0315
0316 tasklet_kill(&c->vc.task);
0317
0318 vchan_free_chan_resources(&c->vc);
0319
0320 pm_runtime_put_sync(mtkd->ddev.dev);
0321 }
0322
0323 static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
0324 dma_cookie_t cookie,
0325 struct dma_tx_state *txstate)
0326 {
0327 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
0328 enum dma_status ret;
0329
0330 ret = dma_cookie_status(chan, cookie, txstate);
0331 if (!txstate)
0332 return ret;
0333
0334 dma_set_residue(txstate, c->rx_status);
0335
0336 return ret;
0337 }
0338
0339
0340
0341
0342
0343 static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
0344 (struct dma_chan *chan, struct scatterlist *sgl,
0345 unsigned int sglen, enum dma_transfer_direction dir,
0346 unsigned long tx_flags, void *context)
0347 {
0348 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
0349 struct mtk_uart_apdma_desc *d;
0350
0351 if (!is_slave_direction(dir) || sglen != 1)
0352 return NULL;
0353
0354
0355 d = kzalloc(sizeof(*d), GFP_NOWAIT);
0356 if (!d)
0357 return NULL;
0358
0359 d->avail_len = sg_dma_len(sgl);
0360 d->addr = sg_dma_address(sgl);
0361 c->dir = dir;
0362
0363 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
0364 }
0365
0366 static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
0367 {
0368 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
0369 struct virt_dma_desc *vd;
0370 unsigned long flags;
0371
0372 spin_lock_irqsave(&c->vc.lock, flags);
0373 if (vchan_issue_pending(&c->vc) && !c->desc) {
0374 vd = vchan_next_desc(&c->vc);
0375 c->desc = to_mtk_uart_apdma_desc(&vd->tx);
0376
0377 if (c->dir == DMA_DEV_TO_MEM)
0378 mtk_uart_apdma_start_rx(c);
0379 else if (c->dir == DMA_MEM_TO_DEV)
0380 mtk_uart_apdma_start_tx(c);
0381 }
0382
0383 spin_unlock_irqrestore(&c->vc.lock, flags);
0384 }
0385
0386 static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
0387 struct dma_slave_config *config)
0388 {
0389 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
0390
0391 memcpy(&c->cfg, config, sizeof(*config));
0392
0393 return 0;
0394 }
0395
0396 static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
0397 {
0398 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
0399 unsigned long flags;
0400 unsigned int status;
0401 LIST_HEAD(head);
0402 int ret;
0403
0404 mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
0405
0406 ret = readx_poll_timeout(readl, c->base + VFF_FLUSH,
0407 status, status != VFF_FLUSH_B, 10, 100);
0408 if (ret)
0409 dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n",
0410 mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
0411
0412
0413
0414
0415
0416
0417
0418 mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B);
0419 ret = readx_poll_timeout(readl, c->base + VFF_EN,
0420 status, !status, 10, 100);
0421 if (ret)
0422 dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n",
0423 mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
0424
0425 mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B);
0426 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
0427
0428 if (c->dir == DMA_DEV_TO_MEM)
0429 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
0430 else if (c->dir == DMA_MEM_TO_DEV)
0431 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
0432
0433 synchronize_irq(c->irq);
0434
0435 spin_lock_irqsave(&c->vc.lock, flags);
0436 vchan_get_all_descriptors(&c->vc, &head);
0437 spin_unlock_irqrestore(&c->vc.lock, flags);
0438
0439 vchan_dma_desc_free_list(&c->vc, &head);
0440
0441 return 0;
0442 }
0443
0444 static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
0445 {
0446 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
0447 unsigned long flags;
0448
0449 spin_lock_irqsave(&c->vc.lock, flags);
0450
0451 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
0452 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
0453
0454 synchronize_irq(c->irq);
0455
0456 spin_unlock_irqrestore(&c->vc.lock, flags);
0457
0458 return 0;
0459 }
0460
0461 static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
0462 {
0463 while (!list_empty(&mtkd->ddev.channels)) {
0464 struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
0465 struct mtk_chan, vc.chan.device_node);
0466
0467 list_del(&c->vc.chan.device_node);
0468 tasklet_kill(&c->vc.task);
0469 }
0470 }
0471
0472 static const struct of_device_id mtk_uart_apdma_match[] = {
0473 { .compatible = "mediatek,mt6577-uart-dma", },
0474 { },
0475 };
0476 MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
0477
0478 static int mtk_uart_apdma_probe(struct platform_device *pdev)
0479 {
0480 struct device_node *np = pdev->dev.of_node;
0481 struct mtk_uart_apdmadev *mtkd;
0482 int bit_mask = 32, rc;
0483 struct mtk_chan *c;
0484 unsigned int i;
0485
0486 mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
0487 if (!mtkd)
0488 return -ENOMEM;
0489
0490 mtkd->clk = devm_clk_get(&pdev->dev, NULL);
0491 if (IS_ERR(mtkd->clk)) {
0492 dev_err(&pdev->dev, "No clock specified\n");
0493 rc = PTR_ERR(mtkd->clk);
0494 return rc;
0495 }
0496
0497 if (of_property_read_bool(np, "mediatek,dma-33bits"))
0498 mtkd->support_33bits = true;
0499
0500 if (mtkd->support_33bits)
0501 bit_mask = 33;
0502
0503 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask));
0504 if (rc)
0505 return rc;
0506
0507 dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
0508 mtkd->ddev.device_alloc_chan_resources =
0509 mtk_uart_apdma_alloc_chan_resources;
0510 mtkd->ddev.device_free_chan_resources =
0511 mtk_uart_apdma_free_chan_resources;
0512 mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
0513 mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
0514 mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
0515 mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
0516 mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
0517 mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
0518 mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
0519 mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
0520 mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
0521 mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
0522 mtkd->ddev.dev = &pdev->dev;
0523 INIT_LIST_HEAD(&mtkd->ddev.channels);
0524
0525 mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS;
0526 if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) {
0527 dev_info(&pdev->dev,
0528 "Using %u as missing dma-requests property\n",
0529 MTK_UART_APDMA_NR_VCHANS);
0530 }
0531
0532 for (i = 0; i < mtkd->dma_requests; i++) {
0533 c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
0534 if (!c) {
0535 rc = -ENODEV;
0536 goto err_no_dma;
0537 }
0538
0539 c->base = devm_platform_ioremap_resource(pdev, i);
0540 if (IS_ERR(c->base)) {
0541 rc = PTR_ERR(c->base);
0542 goto err_no_dma;
0543 }
0544 c->vc.desc_free = mtk_uart_apdma_desc_free;
0545 vchan_init(&c->vc, &mtkd->ddev);
0546
0547 rc = platform_get_irq(pdev, i);
0548 if (rc < 0)
0549 goto err_no_dma;
0550 c->irq = rc;
0551 }
0552
0553 pm_runtime_enable(&pdev->dev);
0554 pm_runtime_set_active(&pdev->dev);
0555
0556 rc = dma_async_device_register(&mtkd->ddev);
0557 if (rc)
0558 goto rpm_disable;
0559
0560 platform_set_drvdata(pdev, mtkd);
0561
0562
0563 rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd);
0564 if (rc)
0565 goto dma_remove;
0566
0567 return rc;
0568
0569 dma_remove:
0570 dma_async_device_unregister(&mtkd->ddev);
0571 rpm_disable:
0572 pm_runtime_disable(&pdev->dev);
0573 err_no_dma:
0574 mtk_uart_apdma_free(mtkd);
0575 return rc;
0576 }
0577
0578 static int mtk_uart_apdma_remove(struct platform_device *pdev)
0579 {
0580 struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
0581
0582 of_dma_controller_free(pdev->dev.of_node);
0583
0584 mtk_uart_apdma_free(mtkd);
0585
0586 dma_async_device_unregister(&mtkd->ddev);
0587
0588 pm_runtime_disable(&pdev->dev);
0589
0590 return 0;
0591 }
0592
0593 #ifdef CONFIG_PM_SLEEP
0594 static int mtk_uart_apdma_suspend(struct device *dev)
0595 {
0596 struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
0597
0598 if (!pm_runtime_suspended(dev))
0599 clk_disable_unprepare(mtkd->clk);
0600
0601 return 0;
0602 }
0603
0604 static int mtk_uart_apdma_resume(struct device *dev)
0605 {
0606 int ret;
0607 struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
0608
0609 if (!pm_runtime_suspended(dev)) {
0610 ret = clk_prepare_enable(mtkd->clk);
0611 if (ret)
0612 return ret;
0613 }
0614
0615 return 0;
0616 }
0617 #endif
0618
0619 #ifdef CONFIG_PM
0620 static int mtk_uart_apdma_runtime_suspend(struct device *dev)
0621 {
0622 struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
0623
0624 clk_disable_unprepare(mtkd->clk);
0625
0626 return 0;
0627 }
0628
0629 static int mtk_uart_apdma_runtime_resume(struct device *dev)
0630 {
0631 struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
0632
0633 return clk_prepare_enable(mtkd->clk);
0634 }
0635 #endif
0636
0637 static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
0638 SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
0639 SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
0640 mtk_uart_apdma_runtime_resume, NULL)
0641 };
0642
0643 static struct platform_driver mtk_uart_apdma_driver = {
0644 .probe = mtk_uart_apdma_probe,
0645 .remove = mtk_uart_apdma_remove,
0646 .driver = {
0647 .name = KBUILD_MODNAME,
0648 .pm = &mtk_uart_apdma_pm_ops,
0649 .of_match_table = of_match_ptr(mtk_uart_apdma_match),
0650 },
0651 };
0652
0653 module_platform_driver(mtk_uart_apdma_driver);
0654
0655 MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
0656 MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
0657 MODULE_LICENSE("GPL v2");