Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Qualcomm Technologies HIDMA DMA engine interface
0003  *
0004  * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
0005  *
0006  * This program is free software; you can redistribute it and/or modify
0007  * it under the terms of the GNU General Public License version 2 and
0008  * only version 2 as published by the Free Software Foundation.
0009  *
0010  * This program is distributed in the hope that it will be useful,
0011  * but WITHOUT ANY WARRANTY; without even the implied warranty of
0012  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0013  * GNU General Public License for more details.
0014  */
0015 
0016 /*
0017  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
0018  * Copyright (C) Semihalf 2009
0019  * Copyright (C) Ilya Yanok, Emcraft Systems 2010
0020  * Copyright (C) Alexander Popov, Promcontroller 2014
0021  *
0022  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
0023  * (defines, structures and comments) was taken from MPC5121 DMA driver
0024  * written by Hongjun Chen <hong-jun.chen@freescale.com>.
0025  *
0026  * Approved as OSADL project by a majority of OSADL members and funded
0027  * by OSADL membership fees in 2009;  for details see www.osadl.org.
0028  *
0029  * This program is free software; you can redistribute it and/or modify it
0030  * under the terms of the GNU General Public License as published by the Free
0031  * Software Foundation; either version 2 of the License, or (at your option)
0032  * any later version.
0033  *
0034  * This program is distributed in the hope that it will be useful, but WITHOUT
0035  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0036  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
0037  * more details.
0038  *
0039  * The full GNU General Public License is included in this distribution in the
0040  * file called COPYING.
0041  */
0042 
0043 /* Linux Foundation elects GPLv2 license only. */
0044 
0045 #include <linux/dmaengine.h>
0046 #include <linux/dma-mapping.h>
0047 #include <linux/list.h>
0048 #include <linux/module.h>
0049 #include <linux/platform_device.h>
0050 #include <linux/slab.h>
0051 #include <linux/spinlock.h>
0052 #include <linux/of_dma.h>
0053 #include <linux/of_device.h>
0054 #include <linux/property.h>
0055 #include <linux/delay.h>
0056 #include <linux/acpi.h>
0057 #include <linux/irq.h>
0058 #include <linux/atomic.h>
0059 #include <linux/pm_runtime.h>
0060 #include <linux/msi.h>
0061 
0062 #include "../dmaengine.h"
0063 #include "hidma.h"
0064 
0065 /*
0066  * Default idle time is 2 seconds. This parameter can
0067  * be overridden by changing the following
0068  * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
0069  * during kernel boot.
0070  */
0071 #define HIDMA_AUTOSUSPEND_TIMEOUT       2000
0072 #define HIDMA_ERR_INFO_SW           0xFF
0073 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
0074 #define HIDMA_NR_DEFAULT_DESC           10
0075 #define HIDMA_MSI_INTS              11
0076 
0077 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
0078 {
0079     return container_of(dmadev, struct hidma_dev, ddev);
0080 }
0081 
0082 static inline
0083 struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
0084 {
0085     return container_of(_lldevp, struct hidma_dev, lldev);
0086 }
0087 
0088 static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
0089 {
0090     return container_of(dmach, struct hidma_chan, chan);
0091 }
0092 
0093 static void hidma_free(struct hidma_dev *dmadev)
0094 {
0095     INIT_LIST_HEAD(&dmadev->ddev.channels);
0096 }
0097 
0098 static unsigned int nr_desc_prm;
0099 module_param(nr_desc_prm, uint, 0644);
0100 MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
0101 
0102 enum hidma_cap {
0103     HIDMA_MSI_CAP = 1,
0104     HIDMA_IDENTITY_CAP,
0105 };
0106 
0107 /* process completed descriptors */
0108 static void hidma_process_completed(struct hidma_chan *mchan)
0109 {
0110     struct dma_device *ddev = mchan->chan.device;
0111     struct hidma_dev *mdma = to_hidma_dev(ddev);
0112     struct dma_async_tx_descriptor *desc;
0113     dma_cookie_t last_cookie;
0114     struct hidma_desc *mdesc;
0115     struct hidma_desc *next;
0116     unsigned long irqflags;
0117     struct list_head list;
0118 
0119     INIT_LIST_HEAD(&list);
0120 
0121     /* Get all completed descriptors */
0122     spin_lock_irqsave(&mchan->lock, irqflags);
0123     list_splice_tail_init(&mchan->completed, &list);
0124     spin_unlock_irqrestore(&mchan->lock, irqflags);
0125 
0126     /* Execute callbacks and run dependencies */
0127     list_for_each_entry_safe(mdesc, next, &list, node) {
0128         enum dma_status llstat;
0129         struct dmaengine_desc_callback cb;
0130         struct dmaengine_result result;
0131 
0132         desc = &mdesc->desc;
0133         last_cookie = desc->cookie;
0134 
0135         llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
0136 
0137         spin_lock_irqsave(&mchan->lock, irqflags);
0138         if (llstat == DMA_COMPLETE) {
0139             mchan->last_success = last_cookie;
0140             result.result = DMA_TRANS_NOERROR;
0141         } else {
0142             result.result = DMA_TRANS_ABORTED;
0143         }
0144 
0145         dma_cookie_complete(desc);
0146         spin_unlock_irqrestore(&mchan->lock, irqflags);
0147 
0148         dmaengine_desc_get_callback(desc, &cb);
0149 
0150         dma_run_dependencies(desc);
0151 
0152         spin_lock_irqsave(&mchan->lock, irqflags);
0153         list_move(&mdesc->node, &mchan->free);
0154         spin_unlock_irqrestore(&mchan->lock, irqflags);
0155 
0156         dmaengine_desc_callback_invoke(&cb, &result);
0157     }
0158 }
0159 
0160 /*
0161  * Called once for each submitted descriptor.
0162  * PM is locked once for each descriptor that is currently
0163  * in execution.
0164  */
0165 static void hidma_callback(void *data)
0166 {
0167     struct hidma_desc *mdesc = data;
0168     struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
0169     struct dma_device *ddev = mchan->chan.device;
0170     struct hidma_dev *dmadev = to_hidma_dev(ddev);
0171     unsigned long irqflags;
0172     bool queued = false;
0173 
0174     spin_lock_irqsave(&mchan->lock, irqflags);
0175     if (mdesc->node.next) {
0176         /* Delete from the active list, add to completed list */
0177         list_move_tail(&mdesc->node, &mchan->completed);
0178         queued = true;
0179 
0180         /* calculate the next running descriptor */
0181         mchan->running = list_first_entry(&mchan->active,
0182                           struct hidma_desc, node);
0183     }
0184     spin_unlock_irqrestore(&mchan->lock, irqflags);
0185 
0186     hidma_process_completed(mchan);
0187 
0188     if (queued) {
0189         pm_runtime_mark_last_busy(dmadev->ddev.dev);
0190         pm_runtime_put_autosuspend(dmadev->ddev.dev);
0191     }
0192 }
0193 
0194 static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
0195 {
0196     struct hidma_chan *mchan;
0197     struct dma_device *ddev;
0198 
0199     mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
0200     if (!mchan)
0201         return -ENOMEM;
0202 
0203     ddev = &dmadev->ddev;
0204     mchan->dma_sig = dma_sig;
0205     mchan->dmadev = dmadev;
0206     mchan->chan.device = ddev;
0207     dma_cookie_init(&mchan->chan);
0208 
0209     INIT_LIST_HEAD(&mchan->free);
0210     INIT_LIST_HEAD(&mchan->prepared);
0211     INIT_LIST_HEAD(&mchan->active);
0212     INIT_LIST_HEAD(&mchan->completed);
0213     INIT_LIST_HEAD(&mchan->queued);
0214 
0215     spin_lock_init(&mchan->lock);
0216     list_add_tail(&mchan->chan.device_node, &ddev->channels);
0217     dmadev->ddev.chancnt++;
0218     return 0;
0219 }
0220 
0221 static void hidma_issue_task(struct tasklet_struct *t)
0222 {
0223     struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
0224 
0225     pm_runtime_get_sync(dmadev->ddev.dev);
0226     hidma_ll_start(dmadev->lldev);
0227 }
0228 
0229 static void hidma_issue_pending(struct dma_chan *dmach)
0230 {
0231     struct hidma_chan *mchan = to_hidma_chan(dmach);
0232     struct hidma_dev *dmadev = mchan->dmadev;
0233     unsigned long flags;
0234     struct hidma_desc *qdesc, *next;
0235     int status;
0236 
0237     spin_lock_irqsave(&mchan->lock, flags);
0238     list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
0239         hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
0240         list_move_tail(&qdesc->node, &mchan->active);
0241     }
0242 
0243     if (!mchan->running) {
0244         struct hidma_desc *desc = list_first_entry(&mchan->active,
0245                                struct hidma_desc,
0246                                node);
0247         mchan->running = desc;
0248     }
0249     spin_unlock_irqrestore(&mchan->lock, flags);
0250 
0251     /* PM will be released in hidma_callback function. */
0252     status = pm_runtime_get(dmadev->ddev.dev);
0253     if (status < 0)
0254         tasklet_schedule(&dmadev->task);
0255     else
0256         hidma_ll_start(dmadev->lldev);
0257 }
0258 
0259 static inline bool hidma_txn_is_success(dma_cookie_t cookie,
0260         dma_cookie_t last_success, dma_cookie_t last_used)
0261 {
0262     if (last_success <= last_used) {
0263         if ((cookie <= last_success) || (cookie > last_used))
0264             return true;
0265     } else {
0266         if ((cookie <= last_success) && (cookie > last_used))
0267             return true;
0268     }
0269     return false;
0270 }
0271 
0272 static enum dma_status hidma_tx_status(struct dma_chan *dmach,
0273                        dma_cookie_t cookie,
0274                        struct dma_tx_state *txstate)
0275 {
0276     struct hidma_chan *mchan = to_hidma_chan(dmach);
0277     enum dma_status ret;
0278 
0279     ret = dma_cookie_status(dmach, cookie, txstate);
0280     if (ret == DMA_COMPLETE) {
0281         bool is_success;
0282 
0283         is_success = hidma_txn_is_success(cookie, mchan->last_success,
0284                           dmach->cookie);
0285         return is_success ? ret : DMA_ERROR;
0286     }
0287 
0288     if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
0289         unsigned long flags;
0290         dma_cookie_t runcookie;
0291 
0292         spin_lock_irqsave(&mchan->lock, flags);
0293         if (mchan->running)
0294             runcookie = mchan->running->desc.cookie;
0295         else
0296             runcookie = -EINVAL;
0297 
0298         if (runcookie == cookie)
0299             ret = DMA_PAUSED;
0300 
0301         spin_unlock_irqrestore(&mchan->lock, flags);
0302     }
0303 
0304     return ret;
0305 }
0306 
0307 /*
0308  * Submit descriptor to hardware.
0309  * Lock the PM for each descriptor we are sending.
0310  */
0311 static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
0312 {
0313     struct hidma_chan *mchan = to_hidma_chan(txd->chan);
0314     struct hidma_dev *dmadev = mchan->dmadev;
0315     struct hidma_desc *mdesc;
0316     unsigned long irqflags;
0317     dma_cookie_t cookie;
0318 
0319     pm_runtime_get_sync(dmadev->ddev.dev);
0320     if (!hidma_ll_isenabled(dmadev->lldev)) {
0321         pm_runtime_mark_last_busy(dmadev->ddev.dev);
0322         pm_runtime_put_autosuspend(dmadev->ddev.dev);
0323         return -ENODEV;
0324     }
0325     pm_runtime_mark_last_busy(dmadev->ddev.dev);
0326     pm_runtime_put_autosuspend(dmadev->ddev.dev);
0327 
0328     mdesc = container_of(txd, struct hidma_desc, desc);
0329     spin_lock_irqsave(&mchan->lock, irqflags);
0330 
0331     /* Move descriptor to queued */
0332     list_move_tail(&mdesc->node, &mchan->queued);
0333 
0334     /* Update cookie */
0335     cookie = dma_cookie_assign(txd);
0336 
0337     spin_unlock_irqrestore(&mchan->lock, irqflags);
0338 
0339     return cookie;
0340 }
0341 
0342 static int hidma_alloc_chan_resources(struct dma_chan *dmach)
0343 {
0344     struct hidma_chan *mchan = to_hidma_chan(dmach);
0345     struct hidma_dev *dmadev = mchan->dmadev;
0346     struct hidma_desc *mdesc, *tmp;
0347     unsigned long irqflags;
0348     LIST_HEAD(descs);
0349     unsigned int i;
0350     int rc = 0;
0351 
0352     if (mchan->allocated)
0353         return 0;
0354 
0355     /* Alloc descriptors for this channel */
0356     for (i = 0; i < dmadev->nr_descriptors; i++) {
0357         mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
0358         if (!mdesc) {
0359             rc = -ENOMEM;
0360             break;
0361         }
0362         dma_async_tx_descriptor_init(&mdesc->desc, dmach);
0363         mdesc->desc.tx_submit = hidma_tx_submit;
0364 
0365         rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
0366                       "DMA engine", hidma_callback, mdesc,
0367                       &mdesc->tre_ch);
0368         if (rc) {
0369             dev_err(dmach->device->dev,
0370                 "channel alloc failed at %u\n", i);
0371             kfree(mdesc);
0372             break;
0373         }
0374         list_add_tail(&mdesc->node, &descs);
0375     }
0376 
0377     if (rc) {
0378         /* return the allocated descriptors */
0379         list_for_each_entry_safe(mdesc, tmp, &descs, node) {
0380             hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
0381             kfree(mdesc);
0382         }
0383         return rc;
0384     }
0385 
0386     spin_lock_irqsave(&mchan->lock, irqflags);
0387     list_splice_tail_init(&descs, &mchan->free);
0388     mchan->allocated = true;
0389     spin_unlock_irqrestore(&mchan->lock, irqflags);
0390     return 1;
0391 }
0392 
0393 static struct dma_async_tx_descriptor *
0394 hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
0395         size_t len, unsigned long flags)
0396 {
0397     struct hidma_chan *mchan = to_hidma_chan(dmach);
0398     struct hidma_desc *mdesc = NULL;
0399     struct hidma_dev *mdma = mchan->dmadev;
0400     unsigned long irqflags;
0401 
0402     /* Get free descriptor */
0403     spin_lock_irqsave(&mchan->lock, irqflags);
0404     if (!list_empty(&mchan->free)) {
0405         mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
0406         list_del(&mdesc->node);
0407     }
0408     spin_unlock_irqrestore(&mchan->lock, irqflags);
0409 
0410     if (!mdesc)
0411         return NULL;
0412 
0413     mdesc->desc.flags = flags;
0414     hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
0415                      src, dest, len, flags,
0416                      HIDMA_TRE_MEMCPY);
0417 
0418     /* Place descriptor in prepared list */
0419     spin_lock_irqsave(&mchan->lock, irqflags);
0420     list_add_tail(&mdesc->node, &mchan->prepared);
0421     spin_unlock_irqrestore(&mchan->lock, irqflags);
0422 
0423     return &mdesc->desc;
0424 }
0425 
0426 static struct dma_async_tx_descriptor *
0427 hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
0428         size_t len, unsigned long flags)
0429 {
0430     struct hidma_chan *mchan = to_hidma_chan(dmach);
0431     struct hidma_desc *mdesc = NULL;
0432     struct hidma_dev *mdma = mchan->dmadev;
0433     unsigned long irqflags;
0434     u64 byte_pattern, fill_pattern;
0435 
0436     /* Get free descriptor */
0437     spin_lock_irqsave(&mchan->lock, irqflags);
0438     if (!list_empty(&mchan->free)) {
0439         mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
0440         list_del(&mdesc->node);
0441     }
0442     spin_unlock_irqrestore(&mchan->lock, irqflags);
0443 
0444     if (!mdesc)
0445         return NULL;
0446 
0447     byte_pattern = (char)value;
0448     fill_pattern =  (byte_pattern << 56) |
0449             (byte_pattern << 48) |
0450             (byte_pattern << 40) |
0451             (byte_pattern << 32) |
0452             (byte_pattern << 24) |
0453             (byte_pattern << 16) |
0454             (byte_pattern << 8) |
0455             byte_pattern;
0456 
0457     mdesc->desc.flags = flags;
0458     hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
0459                      fill_pattern, dest, len, flags,
0460                      HIDMA_TRE_MEMSET);
0461 
0462     /* Place descriptor in prepared list */
0463     spin_lock_irqsave(&mchan->lock, irqflags);
0464     list_add_tail(&mdesc->node, &mchan->prepared);
0465     spin_unlock_irqrestore(&mchan->lock, irqflags);
0466 
0467     return &mdesc->desc;
0468 }
0469 
0470 static int hidma_terminate_channel(struct dma_chan *chan)
0471 {
0472     struct hidma_chan *mchan = to_hidma_chan(chan);
0473     struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
0474     struct hidma_desc *tmp, *mdesc;
0475     unsigned long irqflags;
0476     LIST_HEAD(list);
0477     int rc;
0478 
0479     pm_runtime_get_sync(dmadev->ddev.dev);
0480     /* give completed requests a chance to finish */
0481     hidma_process_completed(mchan);
0482 
0483     spin_lock_irqsave(&mchan->lock, irqflags);
0484     mchan->last_success = 0;
0485     list_splice_init(&mchan->active, &list);
0486     list_splice_init(&mchan->prepared, &list);
0487     list_splice_init(&mchan->completed, &list);
0488     list_splice_init(&mchan->queued, &list);
0489     spin_unlock_irqrestore(&mchan->lock, irqflags);
0490 
0491     /* this suspends the existing transfer */
0492     rc = hidma_ll_disable(dmadev->lldev);
0493     if (rc) {
0494         dev_err(dmadev->ddev.dev, "channel did not pause\n");
0495         goto out;
0496     }
0497 
0498     /* return all user requests */
0499     list_for_each_entry_safe(mdesc, tmp, &list, node) {
0500         struct dma_async_tx_descriptor *txd = &mdesc->desc;
0501 
0502         dma_descriptor_unmap(txd);
0503         dmaengine_desc_get_callback_invoke(txd, NULL);
0504         dma_run_dependencies(txd);
0505 
0506         /* move myself to free_list */
0507         list_move(&mdesc->node, &mchan->free);
0508     }
0509 
0510     rc = hidma_ll_enable(dmadev->lldev);
0511 out:
0512     pm_runtime_mark_last_busy(dmadev->ddev.dev);
0513     pm_runtime_put_autosuspend(dmadev->ddev.dev);
0514     return rc;
0515 }
0516 
0517 static int hidma_terminate_all(struct dma_chan *chan)
0518 {
0519     struct hidma_chan *mchan = to_hidma_chan(chan);
0520     struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
0521     int rc;
0522 
0523     rc = hidma_terminate_channel(chan);
0524     if (rc)
0525         return rc;
0526 
0527     /* reinitialize the hardware */
0528     pm_runtime_get_sync(dmadev->ddev.dev);
0529     rc = hidma_ll_setup(dmadev->lldev);
0530     pm_runtime_mark_last_busy(dmadev->ddev.dev);
0531     pm_runtime_put_autosuspend(dmadev->ddev.dev);
0532     return rc;
0533 }
0534 
0535 static void hidma_free_chan_resources(struct dma_chan *dmach)
0536 {
0537     struct hidma_chan *mchan = to_hidma_chan(dmach);
0538     struct hidma_dev *mdma = mchan->dmadev;
0539     struct hidma_desc *mdesc, *tmp;
0540     unsigned long irqflags;
0541     LIST_HEAD(descs);
0542 
0543     /* terminate running transactions and free descriptors */
0544     hidma_terminate_channel(dmach);
0545 
0546     spin_lock_irqsave(&mchan->lock, irqflags);
0547 
0548     /* Move data */
0549     list_splice_tail_init(&mchan->free, &descs);
0550 
0551     /* Free descriptors */
0552     list_for_each_entry_safe(mdesc, tmp, &descs, node) {
0553         hidma_ll_free(mdma->lldev, mdesc->tre_ch);
0554         list_del(&mdesc->node);
0555         kfree(mdesc);
0556     }
0557 
0558     mchan->allocated = false;
0559     spin_unlock_irqrestore(&mchan->lock, irqflags);
0560 }
0561 
0562 static int hidma_pause(struct dma_chan *chan)
0563 {
0564     struct hidma_chan *mchan;
0565     struct hidma_dev *dmadev;
0566 
0567     mchan = to_hidma_chan(chan);
0568     dmadev = to_hidma_dev(mchan->chan.device);
0569     if (!mchan->paused) {
0570         pm_runtime_get_sync(dmadev->ddev.dev);
0571         if (hidma_ll_disable(dmadev->lldev))
0572             dev_warn(dmadev->ddev.dev, "channel did not stop\n");
0573         mchan->paused = true;
0574         pm_runtime_mark_last_busy(dmadev->ddev.dev);
0575         pm_runtime_put_autosuspend(dmadev->ddev.dev);
0576     }
0577     return 0;
0578 }
0579 
0580 static int hidma_resume(struct dma_chan *chan)
0581 {
0582     struct hidma_chan *mchan;
0583     struct hidma_dev *dmadev;
0584     int rc = 0;
0585 
0586     mchan = to_hidma_chan(chan);
0587     dmadev = to_hidma_dev(mchan->chan.device);
0588     if (mchan->paused) {
0589         pm_runtime_get_sync(dmadev->ddev.dev);
0590         rc = hidma_ll_enable(dmadev->lldev);
0591         if (!rc)
0592             mchan->paused = false;
0593         else
0594             dev_err(dmadev->ddev.dev,
0595                 "failed to resume the channel");
0596         pm_runtime_mark_last_busy(dmadev->ddev.dev);
0597         pm_runtime_put_autosuspend(dmadev->ddev.dev);
0598     }
0599     return rc;
0600 }
0601 
0602 static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
0603 {
0604     struct hidma_lldev *lldev = arg;
0605 
0606     /*
0607      * All interrupts are request driven.
0608      * HW doesn't send an interrupt by itself.
0609      */
0610     return hidma_ll_inthandler(chirq, lldev);
0611 }
0612 
0613 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
0614 static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
0615 {
0616     struct hidma_lldev **lldevp = arg;
0617     struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
0618 
0619     return hidma_ll_inthandler_msi(chirq, *lldevp,
0620                        1 << (chirq - dmadev->msi_virqbase));
0621 }
0622 #endif
0623 
0624 static ssize_t hidma_show_values(struct device *dev,
0625                  struct device_attribute *attr, char *buf)
0626 {
0627     struct hidma_dev *mdev = dev_get_drvdata(dev);
0628 
0629     buf[0] = 0;
0630 
0631     if (strcmp(attr->attr.name, "chid") == 0)
0632         sprintf(buf, "%d\n", mdev->chidx);
0633 
0634     return strlen(buf);
0635 }
0636 
0637 static inline void  hidma_sysfs_uninit(struct hidma_dev *dev)
0638 {
0639     device_remove_file(dev->ddev.dev, dev->chid_attrs);
0640 }
0641 
0642 static struct device_attribute*
0643 hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
0644 {
0645     struct device_attribute *attrs;
0646     char *name_copy;
0647 
0648     attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
0649                  GFP_KERNEL);
0650     if (!attrs)
0651         return NULL;
0652 
0653     name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
0654     if (!name_copy)
0655         return NULL;
0656 
0657     attrs->attr.name = name_copy;
0658     attrs->attr.mode = mode;
0659     attrs->show = hidma_show_values;
0660     sysfs_attr_init(&attrs->attr);
0661 
0662     return attrs;
0663 }
0664 
0665 static int hidma_sysfs_init(struct hidma_dev *dev)
0666 {
0667     dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
0668     if (!dev->chid_attrs)
0669         return -ENOMEM;
0670 
0671     return device_create_file(dev->ddev.dev, dev->chid_attrs);
0672 }
0673 
0674 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
0675 static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
0676 {
0677     struct device *dev = msi_desc_to_dev(desc);
0678     struct hidma_dev *dmadev = dev_get_drvdata(dev);
0679 
0680     if (!desc->msi_index) {
0681         writel(msg->address_lo, dmadev->dev_evca + 0x118);
0682         writel(msg->address_hi, dmadev->dev_evca + 0x11C);
0683         writel(msg->data, dmadev->dev_evca + 0x120);
0684     }
0685 }
0686 #endif
0687 
0688 static void hidma_free_msis(struct hidma_dev *dmadev)
0689 {
0690 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
0691     struct device *dev = dmadev->ddev.dev;
0692     int i, virq;
0693 
0694     for (i = 0; i < HIDMA_MSI_INTS; i++) {
0695         virq = msi_get_virq(dev, i);
0696         if (virq)
0697             devm_free_irq(dev, virq, &dmadev->lldev);
0698     }
0699 
0700     platform_msi_domain_free_irqs(dev);
0701 #endif
0702 }
0703 
0704 static int hidma_request_msi(struct hidma_dev *dmadev,
0705                  struct platform_device *pdev)
0706 {
0707 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
0708     int rc, i, virq;
0709 
0710     rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
0711                         hidma_write_msi_msg);
0712     if (rc)
0713         return rc;
0714 
0715     for (i = 0; i < HIDMA_MSI_INTS; i++) {
0716         virq = msi_get_virq(&pdev->dev, i);
0717         rc = devm_request_irq(&pdev->dev, virq,
0718                        hidma_chirq_handler_msi,
0719                        0, "qcom-hidma-msi",
0720                        &dmadev->lldev);
0721         if (rc)
0722             break;
0723         if (!i)
0724             dmadev->msi_virqbase = virq;
0725     }
0726 
0727     if (rc) {
0728         /* free allocated MSI interrupts above */
0729         for (--i; i >= 0; i--) {
0730             virq = msi_get_virq(&pdev->dev, i);
0731             devm_free_irq(&pdev->dev, virq, &dmadev->lldev);
0732         }
0733         dev_warn(&pdev->dev,
0734              "failed to request MSI irq, falling back to wired IRQ\n");
0735     } else {
0736         /* Add callback to free MSIs on teardown */
0737         hidma_ll_setup_irq(dmadev->lldev, true);
0738     }
0739     return rc;
0740 #else
0741     return -EINVAL;
0742 #endif
0743 }
0744 
0745 static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
0746 {
0747     enum hidma_cap cap;
0748 
0749     cap = (enum hidma_cap) device_get_match_data(dev);
0750     return cap ? ((cap & test_cap) > 0) : 0;
0751 }
0752 
0753 static int hidma_probe(struct platform_device *pdev)
0754 {
0755     struct hidma_dev *dmadev;
0756     struct resource *trca_resource;
0757     struct resource *evca_resource;
0758     int chirq;
0759     void __iomem *evca;
0760     void __iomem *trca;
0761     int rc;
0762     bool msi;
0763 
0764     pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
0765     pm_runtime_use_autosuspend(&pdev->dev);
0766     pm_runtime_set_active(&pdev->dev);
0767     pm_runtime_enable(&pdev->dev);
0768 
0769     trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0770     trca = devm_ioremap_resource(&pdev->dev, trca_resource);
0771     if (IS_ERR(trca)) {
0772         rc = -ENOMEM;
0773         goto bailout;
0774     }
0775 
0776     evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
0777     evca = devm_ioremap_resource(&pdev->dev, evca_resource);
0778     if (IS_ERR(evca)) {
0779         rc = -ENOMEM;
0780         goto bailout;
0781     }
0782 
0783     /*
0784      * This driver only handles the channel IRQs.
0785      * Common IRQ is handled by the management driver.
0786      */
0787     chirq = platform_get_irq(pdev, 0);
0788     if (chirq < 0) {
0789         rc = -ENODEV;
0790         goto bailout;
0791     }
0792 
0793     dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
0794     if (!dmadev) {
0795         rc = -ENOMEM;
0796         goto bailout;
0797     }
0798 
0799     INIT_LIST_HEAD(&dmadev->ddev.channels);
0800     spin_lock_init(&dmadev->lock);
0801     dmadev->ddev.dev = &pdev->dev;
0802     pm_runtime_get_sync(dmadev->ddev.dev);
0803 
0804     dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
0805     dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
0806     if (WARN_ON(!pdev->dev.dma_mask)) {
0807         rc = -ENXIO;
0808         goto dmafree;
0809     }
0810 
0811     dmadev->dev_evca = evca;
0812     dmadev->evca_resource = evca_resource;
0813     dmadev->dev_trca = trca;
0814     dmadev->trca_resource = trca_resource;
0815     dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
0816     dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
0817     dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
0818     dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
0819     dmadev->ddev.device_tx_status = hidma_tx_status;
0820     dmadev->ddev.device_issue_pending = hidma_issue_pending;
0821     dmadev->ddev.device_pause = hidma_pause;
0822     dmadev->ddev.device_resume = hidma_resume;
0823     dmadev->ddev.device_terminate_all = hidma_terminate_all;
0824     dmadev->ddev.copy_align = 8;
0825 
0826     /*
0827      * Determine the MSI capability of the platform. Old HW doesn't
0828      * support MSI.
0829      */
0830     msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
0831     device_property_read_u32(&pdev->dev, "desc-count",
0832                  &dmadev->nr_descriptors);
0833 
0834     if (nr_desc_prm) {
0835         dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
0836              nr_desc_prm);
0837         dmadev->nr_descriptors = nr_desc_prm;
0838     }
0839 
0840     if (!dmadev->nr_descriptors)
0841         dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
0842 
0843     if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
0844         dmadev->chidx = readl(dmadev->dev_trca + 0x40);
0845     else
0846         dmadev->chidx = readl(dmadev->dev_trca + 0x28);
0847 
0848     /* Set DMA mask to 64 bits. */
0849     rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
0850     if (rc) {
0851         dev_warn(&pdev->dev, "unable to set coherent mask to 64");
0852         goto dmafree;
0853     }
0854 
0855     dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
0856                       dmadev->nr_descriptors, dmadev->dev_trca,
0857                       dmadev->dev_evca, dmadev->chidx);
0858     if (!dmadev->lldev) {
0859         rc = -EPROBE_DEFER;
0860         goto dmafree;
0861     }
0862 
0863     platform_set_drvdata(pdev, dmadev);
0864     if (msi)
0865         rc = hidma_request_msi(dmadev, pdev);
0866 
0867     if (!msi || rc) {
0868         hidma_ll_setup_irq(dmadev->lldev, false);
0869         rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
0870                       0, "qcom-hidma", dmadev->lldev);
0871         if (rc)
0872             goto uninit;
0873     }
0874 
0875     INIT_LIST_HEAD(&dmadev->ddev.channels);
0876     rc = hidma_chan_init(dmadev, 0);
0877     if (rc)
0878         goto uninit;
0879 
0880     rc = dma_async_device_register(&dmadev->ddev);
0881     if (rc)
0882         goto uninit;
0883 
0884     dmadev->irq = chirq;
0885     tasklet_setup(&dmadev->task, hidma_issue_task);
0886     hidma_debug_init(dmadev);
0887     hidma_sysfs_init(dmadev);
0888     dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
0889     pm_runtime_mark_last_busy(dmadev->ddev.dev);
0890     pm_runtime_put_autosuspend(dmadev->ddev.dev);
0891     return 0;
0892 
0893 uninit:
0894     if (msi)
0895         hidma_free_msis(dmadev);
0896 
0897     hidma_ll_uninit(dmadev->lldev);
0898 dmafree:
0899     if (dmadev)
0900         hidma_free(dmadev);
0901 bailout:
0902     pm_runtime_put_sync(&pdev->dev);
0903     pm_runtime_disable(&pdev->dev);
0904     return rc;
0905 }
0906 
0907 static void hidma_shutdown(struct platform_device *pdev)
0908 {
0909     struct hidma_dev *dmadev = platform_get_drvdata(pdev);
0910 
0911     dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
0912 
0913     pm_runtime_get_sync(dmadev->ddev.dev);
0914     if (hidma_ll_disable(dmadev->lldev))
0915         dev_warn(dmadev->ddev.dev, "channel did not stop\n");
0916     pm_runtime_mark_last_busy(dmadev->ddev.dev);
0917     pm_runtime_put_autosuspend(dmadev->ddev.dev);
0918 
0919 }
0920 
0921 static int hidma_remove(struct platform_device *pdev)
0922 {
0923     struct hidma_dev *dmadev = platform_get_drvdata(pdev);
0924 
0925     pm_runtime_get_sync(dmadev->ddev.dev);
0926     dma_async_device_unregister(&dmadev->ddev);
0927     if (!dmadev->lldev->msi_support)
0928         devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
0929     else
0930         hidma_free_msis(dmadev);
0931 
0932     tasklet_kill(&dmadev->task);
0933     hidma_sysfs_uninit(dmadev);
0934     hidma_debug_uninit(dmadev);
0935     hidma_ll_uninit(dmadev->lldev);
0936     hidma_free(dmadev);
0937 
0938     dev_info(&pdev->dev, "HI-DMA engine removed\n");
0939     pm_runtime_put_sync_suspend(&pdev->dev);
0940     pm_runtime_disable(&pdev->dev);
0941 
0942     return 0;
0943 }
0944 
0945 #if IS_ENABLED(CONFIG_ACPI)
0946 static const struct acpi_device_id hidma_acpi_ids[] = {
0947     {"QCOM8061"},
0948     {"QCOM8062", HIDMA_MSI_CAP},
0949     {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
0950     {},
0951 };
0952 MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
0953 #endif
0954 
0955 static const struct of_device_id hidma_match[] = {
0956     {.compatible = "qcom,hidma-1.0",},
0957     {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
0958     {.compatible = "qcom,hidma-1.2",
0959      .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
0960     {},
0961 };
0962 MODULE_DEVICE_TABLE(of, hidma_match);
0963 
0964 static struct platform_driver hidma_driver = {
0965     .probe = hidma_probe,
0966     .remove = hidma_remove,
0967     .shutdown = hidma_shutdown,
0968     .driver = {
0969            .name = "hidma",
0970            .of_match_table = hidma_match,
0971            .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
0972     },
0973 };
0974 
0975 module_platform_driver(hidma_driver);
0976 MODULE_LICENSE("GPL v2");