0001
0002
0003 #include <linux/init.h>
0004 #include <linux/kernel.h>
0005 #include <linux/module.h>
0006 #include <linux/pci.h>
0007 #include <linux/device.h>
0008 #include <linux/io-64-nonatomic-lo-hi.h>
0009 #include <linux/dmaengine.h>
0010 #include <uapi/linux/idxd.h>
0011 #include "../dmaengine.h"
0012 #include "registers.h"
0013 #include "idxd.h"
0014
0015 static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
0016 {
0017 struct idxd_dma_chan *idxd_chan;
0018
0019 idxd_chan = container_of(c, struct idxd_dma_chan, chan);
0020 return idxd_chan->wq;
0021 }
0022
0023 void idxd_dma_complete_txd(struct idxd_desc *desc,
0024 enum idxd_complete_type comp_type,
0025 bool free_desc)
0026 {
0027 struct idxd_device *idxd = desc->wq->idxd;
0028 struct dma_async_tx_descriptor *tx;
0029 struct dmaengine_result res;
0030 int complete = 1;
0031
0032 if (desc->completion->status == DSA_COMP_SUCCESS) {
0033 res.result = DMA_TRANS_NOERROR;
0034 } else if (desc->completion->status) {
0035 if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT &&
0036 desc->completion->status == DSA_COMP_INT_HANDLE_INVAL &&
0037 idxd_queue_int_handle_resubmit(desc))
0038 return;
0039 res.result = DMA_TRANS_WRITE_FAILED;
0040 } else if (comp_type == IDXD_COMPLETE_ABORT) {
0041 res.result = DMA_TRANS_ABORTED;
0042 } else {
0043 complete = 0;
0044 }
0045
0046 tx = &desc->txd;
0047 if (complete && tx->cookie) {
0048 dma_cookie_complete(tx);
0049 dma_descriptor_unmap(tx);
0050 dmaengine_desc_get_callback_invoke(tx, &res);
0051 tx->callback = NULL;
0052 tx->callback_result = NULL;
0053 }
0054
0055 if (free_desc)
0056 idxd_free_desc(desc->wq, desc);
0057 }
0058
0059 static void op_flag_setup(unsigned long flags, u32 *desc_flags)
0060 {
0061 *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
0062 if (flags & DMA_PREP_INTERRUPT)
0063 *desc_flags |= IDXD_OP_FLAG_RCI;
0064 }
0065
0066 static inline void set_completion_address(struct idxd_desc *desc,
0067 u64 *compl_addr)
0068 {
0069 *compl_addr = desc->compl_dma;
0070 }
0071
0072 static inline void idxd_prep_desc_common(struct idxd_wq *wq,
0073 struct dsa_hw_desc *hw, char opcode,
0074 u64 addr_f1, u64 addr_f2, u64 len,
0075 u64 compl, u32 flags)
0076 {
0077 hw->flags = flags;
0078 hw->opcode = opcode;
0079 hw->src_addr = addr_f1;
0080 hw->dst_addr = addr_f2;
0081 hw->xfer_size = len;
0082
0083
0084
0085
0086 hw->priv = 1;
0087 hw->completion_addr = compl;
0088 }
0089
0090 static struct dma_async_tx_descriptor *
0091 idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
0092 {
0093 struct idxd_wq *wq = to_idxd_wq(c);
0094 u32 desc_flags;
0095 struct idxd_desc *desc;
0096
0097 if (wq->state != IDXD_WQ_ENABLED)
0098 return NULL;
0099
0100 op_flag_setup(flags, &desc_flags);
0101 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
0102 if (IS_ERR(desc))
0103 return NULL;
0104
0105 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
0106 0, 0, 0, desc->compl_dma, desc_flags);
0107 desc->txd.flags = flags;
0108 return &desc->txd;
0109 }
0110
0111 static struct dma_async_tx_descriptor *
0112 idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
0113 dma_addr_t dma_src, size_t len, unsigned long flags)
0114 {
0115 struct idxd_wq *wq = to_idxd_wq(c);
0116 u32 desc_flags;
0117 struct idxd_device *idxd = wq->idxd;
0118 struct idxd_desc *desc;
0119
0120 if (wq->state != IDXD_WQ_ENABLED)
0121 return NULL;
0122
0123 if (len > idxd->max_xfer_bytes)
0124 return NULL;
0125
0126 op_flag_setup(flags, &desc_flags);
0127 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
0128 if (IS_ERR(desc))
0129 return NULL;
0130
0131 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
0132 dma_src, dma_dest, len, desc->compl_dma,
0133 desc_flags);
0134
0135 desc->txd.flags = flags;
0136
0137 return &desc->txd;
0138 }
0139
0140 static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
0141 {
0142 struct idxd_wq *wq = to_idxd_wq(chan);
0143 struct device *dev = &wq->idxd->pdev->dev;
0144
0145 idxd_wq_get(wq);
0146 dev_dbg(dev, "%s: client_count: %d\n", __func__,
0147 idxd_wq_refcount(wq));
0148 return 0;
0149 }
0150
0151 static void idxd_dma_free_chan_resources(struct dma_chan *chan)
0152 {
0153 struct idxd_wq *wq = to_idxd_wq(chan);
0154 struct device *dev = &wq->idxd->pdev->dev;
0155
0156 idxd_wq_put(wq);
0157 dev_dbg(dev, "%s: client_count: %d\n", __func__,
0158 idxd_wq_refcount(wq));
0159 }
0160
0161 static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
0162 dma_cookie_t cookie,
0163 struct dma_tx_state *txstate)
0164 {
0165 return DMA_OUT_OF_ORDER;
0166 }
0167
0168
0169
0170
0171
0172 static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
0173 {
0174 }
0175
0176 static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
0177 {
0178 struct dma_chan *c = tx->chan;
0179 struct idxd_wq *wq = to_idxd_wq(c);
0180 dma_cookie_t cookie;
0181 int rc;
0182 struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
0183
0184 cookie = dma_cookie_assign(tx);
0185
0186 rc = idxd_submit_desc(wq, desc);
0187 if (rc < 0) {
0188 idxd_free_desc(wq, desc);
0189 return rc;
0190 }
0191
0192 return cookie;
0193 }
0194
0195 static void idxd_dma_release(struct dma_device *device)
0196 {
0197 struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
0198
0199 kfree(idxd_dma);
0200 }
0201
0202 int idxd_register_dma_device(struct idxd_device *idxd)
0203 {
0204 struct idxd_dma_dev *idxd_dma;
0205 struct dma_device *dma;
0206 struct device *dev = &idxd->pdev->dev;
0207 int rc;
0208
0209 idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
0210 if (!idxd_dma)
0211 return -ENOMEM;
0212
0213 dma = &idxd_dma->dma;
0214 INIT_LIST_HEAD(&dma->channels);
0215 dma->dev = dev;
0216
0217 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
0218 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
0219 dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
0220 dma->device_release = idxd_dma_release;
0221
0222 dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
0223 if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
0224 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
0225 dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
0226 }
0227
0228 dma->device_tx_status = idxd_dma_tx_status;
0229 dma->device_issue_pending = idxd_dma_issue_pending;
0230 dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
0231 dma->device_free_chan_resources = idxd_dma_free_chan_resources;
0232
0233 rc = dma_async_device_register(dma);
0234 if (rc < 0) {
0235 kfree(idxd_dma);
0236 return rc;
0237 }
0238
0239 idxd_dma->idxd = idxd;
0240
0241
0242
0243
0244 idxd->idxd_dma = idxd_dma;
0245 return 0;
0246 }
0247
0248 void idxd_unregister_dma_device(struct idxd_device *idxd)
0249 {
0250 dma_async_device_unregister(&idxd->idxd_dma->dma);
0251 }
0252
0253 static int idxd_register_dma_channel(struct idxd_wq *wq)
0254 {
0255 struct idxd_device *idxd = wq->idxd;
0256 struct dma_device *dma = &idxd->idxd_dma->dma;
0257 struct device *dev = &idxd->pdev->dev;
0258 struct idxd_dma_chan *idxd_chan;
0259 struct dma_chan *chan;
0260 int rc, i;
0261
0262 idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
0263 if (!idxd_chan)
0264 return -ENOMEM;
0265
0266 chan = &idxd_chan->chan;
0267 chan->device = dma;
0268 list_add_tail(&chan->device_node, &dma->channels);
0269
0270 for (i = 0; i < wq->num_descs; i++) {
0271 struct idxd_desc *desc = wq->descs[i];
0272
0273 dma_async_tx_descriptor_init(&desc->txd, chan);
0274 desc->txd.tx_submit = idxd_dma_tx_submit;
0275 }
0276
0277 rc = dma_async_device_channel_register(dma, chan);
0278 if (rc < 0) {
0279 kfree(idxd_chan);
0280 return rc;
0281 }
0282
0283 wq->idxd_chan = idxd_chan;
0284 idxd_chan->wq = wq;
0285 get_device(wq_confdev(wq));
0286
0287 return 0;
0288 }
0289
0290 static void idxd_unregister_dma_channel(struct idxd_wq *wq)
0291 {
0292 struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
0293 struct dma_chan *chan = &idxd_chan->chan;
0294 struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
0295
0296 dma_async_device_channel_unregister(&idxd_dma->dma, chan);
0297 list_del(&chan->device_node);
0298 kfree(wq->idxd_chan);
0299 wq->idxd_chan = NULL;
0300 put_device(wq_confdev(wq));
0301 }
0302
0303 static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
0304 {
0305 struct device *dev = &idxd_dev->conf_dev;
0306 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
0307 struct idxd_device *idxd = wq->idxd;
0308 int rc;
0309
0310 if (idxd->state != IDXD_DEV_ENABLED)
0311 return -ENXIO;
0312
0313 mutex_lock(&wq->wq_lock);
0314 wq->type = IDXD_WQT_KERNEL;
0315
0316 rc = drv_enable_wq(wq);
0317 if (rc < 0) {
0318 dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
0319 rc = -ENXIO;
0320 goto err;
0321 }
0322
0323 rc = idxd_register_dma_channel(wq);
0324 if (rc < 0) {
0325 idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
0326 dev_dbg(dev, "Failed to register dma channel\n");
0327 goto err_dma;
0328 }
0329
0330 idxd->cmd_status = 0;
0331 mutex_unlock(&wq->wq_lock);
0332 return 0;
0333
0334 err_dma:
0335 drv_disable_wq(wq);
0336 err:
0337 wq->type = IDXD_WQT_NONE;
0338 mutex_unlock(&wq->wq_lock);
0339 return rc;
0340 }
0341
0342 static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
0343 {
0344 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
0345
0346 mutex_lock(&wq->wq_lock);
0347 __idxd_wq_quiesce(wq);
0348 idxd_unregister_dma_channel(wq);
0349 drv_disable_wq(wq);
0350 mutex_unlock(&wq->wq_lock);
0351 }
0352
0353 static enum idxd_dev_type dev_types[] = {
0354 IDXD_DEV_WQ,
0355 IDXD_DEV_NONE,
0356 };
0357
0358 struct idxd_device_driver idxd_dmaengine_drv = {
0359 .probe = idxd_dmaengine_drv_probe,
0360 .remove = idxd_dmaengine_drv_remove,
0361 .name = "dmaengine",
0362 .type = dev_types,
0363 };
0364 EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);