0001
0002
0003
0004
0005
0006 #include <linux/dmaengine.h>
0007 #include <crypto/scatterwalk.h>
0008
0009 #include "dma.h"
0010
0011 int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
0012 {
0013 int ret;
0014
0015 dma->txchan = dma_request_chan(dev, "tx");
0016 if (IS_ERR(dma->txchan))
0017 return PTR_ERR(dma->txchan);
0018
0019 dma->rxchan = dma_request_chan(dev, "rx");
0020 if (IS_ERR(dma->rxchan)) {
0021 ret = PTR_ERR(dma->rxchan);
0022 goto error_rx;
0023 }
0024
0025 dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
0026 GFP_KERNEL);
0027 if (!dma->result_buf) {
0028 ret = -ENOMEM;
0029 goto error_nomem;
0030 }
0031
0032 dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
0033
0034 return 0;
0035 error_nomem:
0036 dma_release_channel(dma->rxchan);
0037 error_rx:
0038 dma_release_channel(dma->txchan);
0039 return ret;
0040 }
0041
0042 void qce_dma_release(struct qce_dma_data *dma)
0043 {
0044 dma_release_channel(dma->txchan);
0045 dma_release_channel(dma->rxchan);
0046 kfree(dma->result_buf);
0047 }
0048
0049 struct scatterlist *
0050 qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
0051 unsigned int max_len)
0052 {
0053 struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
0054 unsigned int new_len;
0055
0056 while (sg) {
0057 if (!sg_page(sg))
0058 break;
0059 sg = sg_next(sg);
0060 }
0061
0062 if (!sg)
0063 return ERR_PTR(-EINVAL);
0064
0065 while (new_sgl && sg && max_len) {
0066 new_len = new_sgl->length > max_len ? max_len : new_sgl->length;
0067 sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset);
0068 sg_last = sg;
0069 sg = sg_next(sg);
0070 new_sgl = sg_next(new_sgl);
0071 max_len -= new_len;
0072 }
0073
0074 return sg_last;
0075 }
0076
0077 static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
0078 int nents, unsigned long flags,
0079 enum dma_transfer_direction dir,
0080 dma_async_tx_callback cb, void *cb_param)
0081 {
0082 struct dma_async_tx_descriptor *desc;
0083 dma_cookie_t cookie;
0084
0085 if (!sg || !nents)
0086 return -EINVAL;
0087
0088 desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
0089 if (!desc)
0090 return -EINVAL;
0091
0092 desc->callback = cb;
0093 desc->callback_param = cb_param;
0094 cookie = dmaengine_submit(desc);
0095
0096 return dma_submit_error(cookie);
0097 }
0098
0099 int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
0100 int rx_nents, struct scatterlist *tx_sg, int tx_nents,
0101 dma_async_tx_callback cb, void *cb_param)
0102 {
0103 struct dma_chan *rxchan = dma->rxchan;
0104 struct dma_chan *txchan = dma->txchan;
0105 unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
0106 int ret;
0107
0108 ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
0109 NULL, NULL);
0110 if (ret)
0111 return ret;
0112
0113 return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
0114 cb, cb_param);
0115 }
0116
0117 void qce_dma_issue_pending(struct qce_dma_data *dma)
0118 {
0119 dma_async_issue_pending(dma->rxchan);
0120 dma_async_issue_pending(dma->txchan);
0121 }
0122
0123 int qce_dma_terminate_all(struct qce_dma_data *dma)
0124 {
0125 int ret;
0126
0127 ret = dmaengine_terminate_all(dma->rxchan);
0128 return ret ?: dmaengine_terminate_all(dma->txchan);
0129 }