0001
0002
0003
0004 #include <linux/init.h>
0005 #include <linux/module.h>
0006 #include <linux/dmapool.h>
0007 #include <linux/of_irq.h>
0008 #include <linux/iommu.h>
0009 #include <linux/sys_soc.h>
0010 #include <linux/fsl/mc.h>
0011 #include <soc/fsl/dpaa2-io.h>
0012
0013 #include "../virt-dma.h"
0014 #include "dpdmai.h"
0015 #include "dpaa2-qdma.h"
0016
0017 static bool smmu_disable = true;
0018
0019 static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
0020 {
0021 return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
0022 }
0023
0024 static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
0025 {
0026 return container_of(vd, struct dpaa2_qdma_comp, vdesc);
0027 }
0028
0029 static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
0030 {
0031 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
0032 struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
0033 struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
0034
0035 dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
0036 sizeof(struct dpaa2_fd),
0037 sizeof(struct dpaa2_fd), 0);
0038 if (!dpaa2_chan->fd_pool)
0039 goto err;
0040
0041 dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
0042 sizeof(struct dpaa2_fl_entry),
0043 sizeof(struct dpaa2_fl_entry), 0);
0044 if (!dpaa2_chan->fl_pool)
0045 goto err_fd;
0046
0047 dpaa2_chan->sdd_pool =
0048 dma_pool_create("sdd_pool", dev,
0049 sizeof(struct dpaa2_qdma_sd_d),
0050 sizeof(struct dpaa2_qdma_sd_d), 0);
0051 if (!dpaa2_chan->sdd_pool)
0052 goto err_fl;
0053
0054 return dpaa2_qdma->desc_allocated++;
0055 err_fl:
0056 dma_pool_destroy(dpaa2_chan->fl_pool);
0057 err_fd:
0058 dma_pool_destroy(dpaa2_chan->fd_pool);
0059 err:
0060 return -ENOMEM;
0061 }
0062
0063 static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
0064 {
0065 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
0066 struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
0067 unsigned long flags;
0068
0069 LIST_HEAD(head);
0070
0071 spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
0072 vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
0073 spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
0074
0075 vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
0076
0077 dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
0078 dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
0079
0080 dma_pool_destroy(dpaa2_chan->fd_pool);
0081 dma_pool_destroy(dpaa2_chan->fl_pool);
0082 dma_pool_destroy(dpaa2_chan->sdd_pool);
0083 dpaa2_qdma->desc_allocated--;
0084 }
0085
0086
0087
0088
0089 static struct dpaa2_qdma_comp *
0090 dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
0091 {
0092 struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
0093 struct device *dev = &qdma_priv->dpdmai_dev->dev;
0094 struct dpaa2_qdma_comp *comp_temp = NULL;
0095 unsigned long flags;
0096
0097 spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
0098 if (list_empty(&dpaa2_chan->comp_free)) {
0099 spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
0100 comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
0101 if (!comp_temp)
0102 goto err;
0103 comp_temp->fd_virt_addr =
0104 dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
0105 &comp_temp->fd_bus_addr);
0106 if (!comp_temp->fd_virt_addr)
0107 goto err_comp;
0108
0109 comp_temp->fl_virt_addr =
0110 dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
0111 &comp_temp->fl_bus_addr);
0112 if (!comp_temp->fl_virt_addr)
0113 goto err_fd_virt;
0114
0115 comp_temp->desc_virt_addr =
0116 dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
0117 &comp_temp->desc_bus_addr);
0118 if (!comp_temp->desc_virt_addr)
0119 goto err_fl_virt;
0120
0121 comp_temp->qchan = dpaa2_chan;
0122 return comp_temp;
0123 }
0124
0125 comp_temp = list_first_entry(&dpaa2_chan->comp_free,
0126 struct dpaa2_qdma_comp, list);
0127 list_del(&comp_temp->list);
0128 spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
0129
0130 comp_temp->qchan = dpaa2_chan;
0131
0132 return comp_temp;
0133
0134 err_fl_virt:
0135 dma_pool_free(dpaa2_chan->fl_pool,
0136 comp_temp->fl_virt_addr,
0137 comp_temp->fl_bus_addr);
0138 err_fd_virt:
0139 dma_pool_free(dpaa2_chan->fd_pool,
0140 comp_temp->fd_virt_addr,
0141 comp_temp->fd_bus_addr);
0142 err_comp:
0143 kfree(comp_temp);
0144 err:
0145 dev_err(dev, "Failed to request descriptor\n");
0146 return NULL;
0147 }
0148
0149 static void
0150 dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
0151 {
0152 struct dpaa2_fd *fd;
0153
0154 fd = dpaa2_comp->fd_virt_addr;
0155 memset(fd, 0, sizeof(struct dpaa2_fd));
0156
0157
0158 dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
0159
0160
0161
0162
0163
0164 if (smmu_disable)
0165 dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
0166 dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
0167
0168 dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
0169 }
0170
0171
0172 static void
0173 dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
0174 struct dpaa2_qdma_comp *dpaa2_comp,
0175 bool wrt_changed)
0176 {
0177 struct dpaa2_qdma_sd_d *sdd;
0178
0179 sdd = dpaa2_comp->desc_virt_addr;
0180 memset(sdd, 0, 2 * (sizeof(*sdd)));
0181
0182
0183 sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
0184 sdd++;
0185
0186
0187 if (wrt_changed)
0188 sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
0189 else
0190 sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
0191
0192 memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
0193
0194
0195 dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
0196 dpaa2_fl_set_len(f_list, 0x20);
0197 dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
0198
0199
0200 if (smmu_disable)
0201 f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
0202 }
0203
0204
0205 static void
0206 dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
0207 dma_addr_t dst, dma_addr_t src,
0208 size_t len, uint8_t fmt)
0209 {
0210
0211 memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
0212
0213 dpaa2_fl_set_addr(f_list, src);
0214 dpaa2_fl_set_len(f_list, len);
0215
0216
0217 dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
0218
0219
0220 if (smmu_disable)
0221 f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
0222
0223 f_list++;
0224
0225
0226 memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
0227
0228 dpaa2_fl_set_addr(f_list, dst);
0229 dpaa2_fl_set_len(f_list, len);
0230 dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
0231
0232 dpaa2_fl_set_final(f_list, QDMA_FL_F);
0233
0234 if (smmu_disable)
0235 f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
0236 }
0237
0238 static struct dma_async_tx_descriptor
0239 *dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
0240 dma_addr_t src, size_t len, ulong flags)
0241 {
0242 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
0243 struct dpaa2_qdma_engine *dpaa2_qdma;
0244 struct dpaa2_qdma_comp *dpaa2_comp;
0245 struct dpaa2_fl_entry *f_list;
0246 bool wrt_changed;
0247
0248 dpaa2_qdma = dpaa2_chan->qdma;
0249 dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
0250 if (!dpaa2_comp)
0251 return NULL;
0252
0253 wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
0254
0255
0256 dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
0257
0258 f_list = dpaa2_comp->fl_virt_addr;
0259
0260
0261 dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
0262
0263 f_list++;
0264
0265 dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
0266
0267 return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
0268 }
0269
0270 static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
0271 {
0272 struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
0273 struct dpaa2_qdma_comp *dpaa2_comp;
0274 struct virt_dma_desc *vdesc;
0275 struct dpaa2_fd *fd;
0276 unsigned long flags;
0277 int err;
0278
0279 spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
0280 spin_lock(&dpaa2_chan->vchan.lock);
0281 if (vchan_issue_pending(&dpaa2_chan->vchan)) {
0282 vdesc = vchan_next_desc(&dpaa2_chan->vchan);
0283 if (!vdesc)
0284 goto err_enqueue;
0285 dpaa2_comp = to_fsl_qdma_comp(vdesc);
0286
0287 fd = dpaa2_comp->fd_virt_addr;
0288
0289 list_del(&vdesc->node);
0290 list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
0291
0292 err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
0293 if (err) {
0294 list_move_tail(&dpaa2_comp->list,
0295 &dpaa2_chan->comp_free);
0296 }
0297 }
0298 err_enqueue:
0299 spin_unlock(&dpaa2_chan->vchan.lock);
0300 spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
0301 }
0302
0303 static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
0304 {
0305 struct dpaa2_qdma_priv_per_prio *ppriv;
0306 struct device *dev = &ls_dev->dev;
0307 struct dpaa2_qdma_priv *priv;
0308 u8 prio_def = DPDMAI_PRIO_NUM;
0309 int err = -EINVAL;
0310 int i;
0311
0312 priv = dev_get_drvdata(dev);
0313
0314 priv->dev = dev;
0315 priv->dpqdma_id = ls_dev->obj_desc.id;
0316
0317
0318 err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
0319 if (err) {
0320 dev_err(dev, "dpdmai_open() failed\n");
0321 return err;
0322 }
0323
0324 dev_dbg(dev, "Opened dpdmai object successfully\n");
0325
0326 err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
0327 &priv->dpdmai_attr);
0328 if (err) {
0329 dev_err(dev, "dpdmai_get_attributes() failed\n");
0330 goto exit;
0331 }
0332
0333 if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
0334 err = -EINVAL;
0335 dev_err(dev, "DPDMAI major version mismatch\n"
0336 "Found %u.%u, supported version is %u.%u\n",
0337 priv->dpdmai_attr.version.major,
0338 priv->dpdmai_attr.version.minor,
0339 DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
0340 goto exit;
0341 }
0342
0343 if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
0344 err = -EINVAL;
0345 dev_err(dev, "DPDMAI minor version mismatch\n"
0346 "Found %u.%u, supported version is %u.%u\n",
0347 priv->dpdmai_attr.version.major,
0348 priv->dpdmai_attr.version.minor,
0349 DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
0350 goto exit;
0351 }
0352
0353 priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
0354 ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
0355 if (!ppriv) {
0356 err = -ENOMEM;
0357 goto exit;
0358 }
0359 priv->ppriv = ppriv;
0360
0361 for (i = 0; i < priv->num_pairs; i++) {
0362 err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
0363 i, &priv->rx_queue_attr[i]);
0364 if (err) {
0365 dev_err(dev, "dpdmai_get_rx_queue() failed\n");
0366 goto exit;
0367 }
0368 ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
0369
0370 err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
0371 i, &priv->tx_fqid[i]);
0372 if (err) {
0373 dev_err(dev, "dpdmai_get_tx_queue() failed\n");
0374 goto exit;
0375 }
0376 ppriv->req_fqid = priv->tx_fqid[i];
0377 ppriv->prio = i;
0378 ppriv->priv = priv;
0379 ppriv++;
0380 }
0381
0382 return 0;
0383 exit:
0384 dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
0385 return err;
0386 }
0387
0388 static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
0389 {
0390 struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
0391 struct dpaa2_qdma_priv_per_prio, nctx);
0392 struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
0393 struct dpaa2_qdma_priv *priv = ppriv->priv;
0394 u32 n_chans = priv->dpaa2_qdma->n_chans;
0395 struct dpaa2_qdma_chan *qchan;
0396 const struct dpaa2_fd *fd_eq;
0397 const struct dpaa2_fd *fd;
0398 struct dpaa2_dq *dq;
0399 int is_last = 0;
0400 int found;
0401 u8 status;
0402 int err;
0403 int i;
0404
0405 do {
0406 err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
0407 ppriv->store);
0408 } while (err);
0409
0410 while (!is_last) {
0411 do {
0412 dq = dpaa2_io_store_next(ppriv->store, &is_last);
0413 } while (!is_last && !dq);
0414 if (!dq) {
0415 dev_err(priv->dev, "FQID returned no valid frames!\n");
0416 continue;
0417 }
0418
0419
0420 fd = dpaa2_dq_fd(dq);
0421
0422 status = dpaa2_fd_get_ctrl(fd) & 0xff;
0423 if (status)
0424 dev_err(priv->dev, "FD error occurred\n");
0425 found = 0;
0426 for (i = 0; i < n_chans; i++) {
0427 qchan = &priv->dpaa2_qdma->chans[i];
0428 spin_lock(&qchan->queue_lock);
0429 if (list_empty(&qchan->comp_used)) {
0430 spin_unlock(&qchan->queue_lock);
0431 continue;
0432 }
0433 list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
0434 &qchan->comp_used, list) {
0435 fd_eq = dpaa2_comp->fd_virt_addr;
0436
0437 if (le64_to_cpu(fd_eq->simple.addr) ==
0438 le64_to_cpu(fd->simple.addr)) {
0439 spin_lock(&qchan->vchan.lock);
0440 vchan_cookie_complete(&
0441 dpaa2_comp->vdesc);
0442 spin_unlock(&qchan->vchan.lock);
0443 found = 1;
0444 break;
0445 }
0446 }
0447 spin_unlock(&qchan->queue_lock);
0448 if (found)
0449 break;
0450 }
0451 }
0452
0453 dpaa2_io_service_rearm(NULL, ctx);
0454 }
0455
0456 static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
0457 {
0458 struct dpaa2_qdma_priv_per_prio *ppriv;
0459 struct device *dev = priv->dev;
0460 int err = -EINVAL;
0461 int i, num;
0462
0463 num = priv->num_pairs;
0464 ppriv = priv->ppriv;
0465 for (i = 0; i < num; i++) {
0466 ppriv->nctx.is_cdan = 0;
0467 ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
0468 ppriv->nctx.id = ppriv->rsp_fqid;
0469 ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
0470 err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
0471 if (err) {
0472 dev_err(dev, "Notification register failed\n");
0473 goto err_service;
0474 }
0475
0476 ppriv->store =
0477 dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
0478 if (!ppriv->store) {
0479 err = -ENOMEM;
0480 dev_err(dev, "dpaa2_io_store_create() failed\n");
0481 goto err_store;
0482 }
0483
0484 ppriv++;
0485 }
0486 return 0;
0487
0488 err_store:
0489 dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
0490 err_service:
0491 ppriv--;
0492 while (ppriv >= priv->ppriv) {
0493 dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
0494 dpaa2_io_store_destroy(ppriv->store);
0495 ppriv--;
0496 }
0497 return err;
0498 }
0499
0500 static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
0501 {
0502 struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
0503 int i;
0504
0505 for (i = 0; i < priv->num_pairs; i++) {
0506 dpaa2_io_store_destroy(ppriv->store);
0507 ppriv++;
0508 }
0509 }
0510
0511 static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
0512 {
0513 struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
0514 struct device *dev = priv->dev;
0515 int i;
0516
0517 for (i = 0; i < priv->num_pairs; i++) {
0518 dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
0519 ppriv++;
0520 }
0521 }
0522
0523 static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
0524 {
0525 struct dpdmai_rx_queue_cfg rx_queue_cfg;
0526 struct dpaa2_qdma_priv_per_prio *ppriv;
0527 struct device *dev = priv->dev;
0528 struct fsl_mc_device *ls_dev;
0529 int i, num;
0530 int err;
0531
0532 ls_dev = to_fsl_mc_device(dev);
0533 num = priv->num_pairs;
0534 ppriv = priv->ppriv;
0535 for (i = 0; i < num; i++) {
0536 rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
0537 DPDMAI_QUEUE_OPT_DEST;
0538 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
0539 rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
0540 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
0541 rx_queue_cfg.dest_cfg.priority = ppriv->prio;
0542 err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
0543 rx_queue_cfg.dest_cfg.priority,
0544 &rx_queue_cfg);
0545 if (err) {
0546 dev_err(dev, "dpdmai_set_rx_queue() failed\n");
0547 return err;
0548 }
0549
0550 ppriv++;
0551 }
0552
0553 return 0;
0554 }
0555
0556 static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
0557 {
0558 struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
0559 struct device *dev = priv->dev;
0560 struct fsl_mc_device *ls_dev;
0561 int err = 0;
0562 int i;
0563
0564 ls_dev = to_fsl_mc_device(dev);
0565
0566 for (i = 0; i < priv->num_pairs; i++) {
0567 ppriv->nctx.qman64 = 0;
0568 ppriv->nctx.dpio_id = 0;
0569 ppriv++;
0570 }
0571
0572 err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
0573 if (err)
0574 dev_err(dev, "dpdmai_reset() failed\n");
0575
0576 return err;
0577 }
0578
0579 static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
0580 struct list_head *head)
0581 {
0582 struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
0583 unsigned long flags;
0584
0585 list_for_each_entry_safe(comp_tmp, _comp_tmp,
0586 head, list) {
0587 spin_lock_irqsave(&qchan->queue_lock, flags);
0588 list_del(&comp_tmp->list);
0589 spin_unlock_irqrestore(&qchan->queue_lock, flags);
0590 dma_pool_free(qchan->fd_pool,
0591 comp_tmp->fd_virt_addr,
0592 comp_tmp->fd_bus_addr);
0593 dma_pool_free(qchan->fl_pool,
0594 comp_tmp->fl_virt_addr,
0595 comp_tmp->fl_bus_addr);
0596 dma_pool_free(qchan->sdd_pool,
0597 comp_tmp->desc_virt_addr,
0598 comp_tmp->desc_bus_addr);
0599 kfree(comp_tmp);
0600 }
0601 }
0602
0603 static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
0604 {
0605 struct dpaa2_qdma_chan *qchan;
0606 int num, i;
0607
0608 num = dpaa2_qdma->n_chans;
0609 for (i = 0; i < num; i++) {
0610 qchan = &dpaa2_qdma->chans[i];
0611 dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
0612 dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
0613 dma_pool_destroy(qchan->fd_pool);
0614 dma_pool_destroy(qchan->fl_pool);
0615 dma_pool_destroy(qchan->sdd_pool);
0616 }
0617 }
0618
0619 static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
0620 {
0621 struct dpaa2_qdma_comp *dpaa2_comp;
0622 struct dpaa2_qdma_chan *qchan;
0623 unsigned long flags;
0624
0625 dpaa2_comp = to_fsl_qdma_comp(vdesc);
0626 qchan = dpaa2_comp->qchan;
0627 spin_lock_irqsave(&qchan->queue_lock, flags);
0628 list_move_tail(&dpaa2_comp->list, &qchan->comp_free);
0629 spin_unlock_irqrestore(&qchan->queue_lock, flags);
0630 }
0631
0632 static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
0633 {
0634 struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
0635 struct dpaa2_qdma_chan *dpaa2_chan;
0636 int num = priv->num_pairs;
0637 int i;
0638
0639 INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
0640 for (i = 0; i < dpaa2_qdma->n_chans; i++) {
0641 dpaa2_chan = &dpaa2_qdma->chans[i];
0642 dpaa2_chan->qdma = dpaa2_qdma;
0643 dpaa2_chan->fqid = priv->tx_fqid[i % num];
0644 dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
0645 vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
0646 spin_lock_init(&dpaa2_chan->queue_lock);
0647 INIT_LIST_HEAD(&dpaa2_chan->comp_used);
0648 INIT_LIST_HEAD(&dpaa2_chan->comp_free);
0649 }
0650 return 0;
0651 }
0652
0653 static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
0654 {
0655 struct device *dev = &dpdmai_dev->dev;
0656 struct dpaa2_qdma_engine *dpaa2_qdma;
0657 struct dpaa2_qdma_priv *priv;
0658 int err;
0659
0660 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
0661 if (!priv)
0662 return -ENOMEM;
0663 dev_set_drvdata(dev, priv);
0664 priv->dpdmai_dev = dpdmai_dev;
0665
0666 priv->iommu_domain = iommu_get_domain_for_dev(dev);
0667 if (priv->iommu_domain)
0668 smmu_disable = false;
0669
0670
0671 err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
0672 if (err) {
0673 if (err == -ENXIO)
0674 err = -EPROBE_DEFER;
0675 else
0676 dev_err(dev, "MC portal allocation failed\n");
0677 goto err_mcportal;
0678 }
0679
0680
0681 err = dpaa2_qdma_setup(dpdmai_dev);
0682 if (err) {
0683 dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
0684 goto err_dpdmai_setup;
0685 }
0686
0687
0688 err = dpaa2_qdma_dpio_setup(priv);
0689 if (err) {
0690 dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
0691 goto err_dpio_setup;
0692 }
0693
0694
0695 err = dpaa2_dpdmai_bind(priv);
0696 if (err) {
0697 dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
0698 goto err_bind;
0699 }
0700
0701
0702 err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
0703 if (err) {
0704 dev_err(dev, "dpdmai_enable() failed\n");
0705 goto err_enable;
0706 }
0707
0708 dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
0709 if (!dpaa2_qdma) {
0710 err = -ENOMEM;
0711 goto err_eng;
0712 }
0713
0714 priv->dpaa2_qdma = dpaa2_qdma;
0715 dpaa2_qdma->priv = priv;
0716
0717 dpaa2_qdma->desc_allocated = 0;
0718 dpaa2_qdma->n_chans = NUM_CH;
0719
0720 dpaa2_dpdmai_init_channels(dpaa2_qdma);
0721
0722 if (soc_device_match(soc_fixup_tuning))
0723 dpaa2_qdma->qdma_wrtype_fixup = true;
0724 else
0725 dpaa2_qdma->qdma_wrtype_fixup = false;
0726
0727 dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
0728 dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
0729 dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
0730
0731 dpaa2_qdma->dma_dev.dev = dev;
0732 dpaa2_qdma->dma_dev.device_alloc_chan_resources =
0733 dpaa2_qdma_alloc_chan_resources;
0734 dpaa2_qdma->dma_dev.device_free_chan_resources =
0735 dpaa2_qdma_free_chan_resources;
0736 dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
0737 dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
0738 dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
0739
0740 err = dma_async_device_register(&dpaa2_qdma->dma_dev);
0741 if (err) {
0742 dev_err(dev, "Can't register NXP QDMA engine.\n");
0743 goto err_dpaa2_qdma;
0744 }
0745
0746 return 0;
0747
0748 err_dpaa2_qdma:
0749 kfree(dpaa2_qdma);
0750 err_eng:
0751 dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
0752 err_enable:
0753 dpaa2_dpdmai_dpio_unbind(priv);
0754 err_bind:
0755 dpaa2_dpmai_store_free(priv);
0756 dpaa2_dpdmai_dpio_free(priv);
0757 err_dpio_setup:
0758 kfree(priv->ppriv);
0759 dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
0760 err_dpdmai_setup:
0761 fsl_mc_portal_free(priv->mc_io);
0762 err_mcportal:
0763 kfree(priv);
0764 dev_set_drvdata(dev, NULL);
0765 return err;
0766 }
0767
0768 static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
0769 {
0770 struct dpaa2_qdma_engine *dpaa2_qdma;
0771 struct dpaa2_qdma_priv *priv;
0772 struct device *dev;
0773
0774 dev = &ls_dev->dev;
0775 priv = dev_get_drvdata(dev);
0776 dpaa2_qdma = priv->dpaa2_qdma;
0777
0778 dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
0779 dpaa2_dpdmai_dpio_unbind(priv);
0780 dpaa2_dpmai_store_free(priv);
0781 dpaa2_dpdmai_dpio_free(priv);
0782 dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
0783 fsl_mc_portal_free(priv->mc_io);
0784 dev_set_drvdata(dev, NULL);
0785 dpaa2_dpdmai_free_channels(dpaa2_qdma);
0786
0787 dma_async_device_unregister(&dpaa2_qdma->dma_dev);
0788 kfree(priv);
0789 kfree(dpaa2_qdma);
0790
0791 return 0;
0792 }
0793
0794 static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
0795 {
0796 struct dpaa2_qdma_priv *priv;
0797 struct device *dev;
0798
0799 dev = &ls_dev->dev;
0800 priv = dev_get_drvdata(dev);
0801
0802 dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
0803 dpaa2_dpdmai_dpio_unbind(priv);
0804 dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
0805 dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
0806 }
0807
0808 static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
0809 {
0810 .vendor = FSL_MC_VENDOR_FREESCALE,
0811 .obj_type = "dpdmai",
0812 },
0813 { .vendor = 0x0 }
0814 };
0815
0816 static struct fsl_mc_driver dpaa2_qdma_driver = {
0817 .driver = {
0818 .name = "dpaa2-qdma",
0819 .owner = THIS_MODULE,
0820 },
0821 .probe = dpaa2_qdma_probe,
0822 .remove = dpaa2_qdma_remove,
0823 .shutdown = dpaa2_qdma_shutdown,
0824 .match_id_table = dpaa2_qdma_id_table
0825 };
0826
0827 static int __init dpaa2_qdma_driver_init(void)
0828 {
0829 return fsl_mc_driver_register(&(dpaa2_qdma_driver));
0830 }
0831 late_initcall(dpaa2_qdma_driver_init);
0832
0833 static void __exit fsl_qdma_exit(void)
0834 {
0835 fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
0836 }
0837 module_exit(fsl_qdma_exit);
0838
0839 MODULE_ALIAS("platform:fsl-dpaa2-qdma");
0840 MODULE_LICENSE("GPL v2");
0841 MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");