0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/cpumask.h>
0011 #include <linux/kthread.h>
0012 #include <soc/fsl/qman.h>
0013
0014 #include "debugfs.h"
0015 #include "regs.h"
0016 #include "qi.h"
0017 #include "desc.h"
0018 #include "intern.h"
0019 #include "desc_constr.h"
0020
0021 #define PREHDR_RSLS_SHIFT 31
0022 #define PREHDR_ABS BIT(25)
0023
0024
0025
0026
0027
0028 #define MAX_RSP_FQ_BACKLOG_PER_CPU 256
0029
0030 #define CAAM_QI_ENQUEUE_RETRIES 10000
0031
0032 #define CAAM_NAPI_WEIGHT 63
0033
0034
0035
0036
0037
0038
0039 struct caam_napi {
0040 struct napi_struct irqtask;
0041 struct qman_portal *p;
0042 };
0043
0044
0045
0046
0047
0048
0049
0050
0051 struct caam_qi_pcpu_priv {
0052 struct caam_napi caam_napi;
0053 struct net_device net_dev;
0054 struct qman_fq *rsp_fq;
0055 } ____cacheline_aligned;
0056
0057 static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
0058 static DEFINE_PER_CPU(int, last_cpu);
0059
0060
0061
0062
0063
0064 struct caam_qi_priv {
0065 struct qman_cgr cgr;
0066 };
0067
0068 static struct caam_qi_priv qipriv ____cacheline_aligned;
0069
0070
0071
0072
0073
0074 bool caam_congested __read_mostly;
0075 EXPORT_SYMBOL(caam_congested);
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 static struct kmem_cache *qi_cache;
0088
0089 static void *caam_iova_to_virt(struct iommu_domain *domain,
0090 dma_addr_t iova_addr)
0091 {
0092 phys_addr_t phys_addr;
0093
0094 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
0095
0096 return phys_to_virt(phys_addr);
0097 }
0098
0099 int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
0100 {
0101 struct qm_fd fd;
0102 dma_addr_t addr;
0103 int ret;
0104 int num_retries = 0;
0105
0106 qm_fd_clear_fd(&fd);
0107 qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
0108
0109 addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
0110 DMA_BIDIRECTIONAL);
0111 if (dma_mapping_error(qidev, addr)) {
0112 dev_err(qidev, "DMA mapping error for QI enqueue request\n");
0113 return -EIO;
0114 }
0115 qm_fd_addr_set64(&fd, addr);
0116
0117 do {
0118 ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
0119 if (likely(!ret)) {
0120 refcount_inc(&req->drv_ctx->refcnt);
0121 return 0;
0122 }
0123
0124 if (ret != -EBUSY)
0125 break;
0126 num_retries++;
0127 } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
0128
0129 dev_err(qidev, "qman_enqueue failed: %d\n", ret);
0130
0131 return ret;
0132 }
0133 EXPORT_SYMBOL(caam_qi_enqueue);
0134
0135 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
0136 const union qm_mr_entry *msg)
0137 {
0138 const struct qm_fd *fd;
0139 struct caam_drv_req *drv_req;
0140 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
0141 struct caam_drv_private *priv = dev_get_drvdata(qidev);
0142
0143 fd = &msg->ern.fd;
0144
0145 drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
0146 if (!drv_req) {
0147 dev_err(qidev,
0148 "Can't find original request for CAAM response\n");
0149 return;
0150 }
0151
0152 refcount_dec(&drv_req->drv_ctx->refcnt);
0153
0154 if (qm_fd_get_format(fd) != qm_fd_compound) {
0155 dev_err(qidev, "Non-compound FD from CAAM\n");
0156 return;
0157 }
0158
0159 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
0160 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
0161
0162 if (fd->status)
0163 drv_req->cbk(drv_req, be32_to_cpu(fd->status));
0164 else
0165 drv_req->cbk(drv_req, JRSTA_SSRC_QI);
0166 }
0167
0168 static struct qman_fq *create_caam_req_fq(struct device *qidev,
0169 struct qman_fq *rsp_fq,
0170 dma_addr_t hwdesc,
0171 int fq_sched_flag)
0172 {
0173 int ret;
0174 struct qman_fq *req_fq;
0175 struct qm_mcc_initfq opts;
0176
0177 req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
0178 if (!req_fq)
0179 return ERR_PTR(-ENOMEM);
0180
0181 req_fq->cb.ern = caam_fq_ern_cb;
0182 req_fq->cb.fqs = NULL;
0183
0184 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
0185 QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
0186 if (ret) {
0187 dev_err(qidev, "Failed to create session req FQ\n");
0188 goto create_req_fq_fail;
0189 }
0190
0191 memset(&opts, 0, sizeof(opts));
0192 opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
0193 QM_INITFQ_WE_CONTEXTB |
0194 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
0195 opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
0196 qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
0197 opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
0198 qm_fqd_context_a_set64(&opts.fqd, hwdesc);
0199 opts.fqd.cgid = qipriv.cgr.cgrid;
0200
0201 ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
0202 if (ret) {
0203 dev_err(qidev, "Failed to init session req FQ\n");
0204 goto init_req_fq_fail;
0205 }
0206
0207 dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
0208 smp_processor_id());
0209 return req_fq;
0210
0211 init_req_fq_fail:
0212 qman_destroy_fq(req_fq);
0213 create_req_fq_fail:
0214 kfree(req_fq);
0215 return ERR_PTR(ret);
0216 }
0217
0218 static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
0219 {
0220 int ret;
0221
0222 ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
0223 QMAN_VOLATILE_FLAG_FINISH,
0224 QM_VDQCR_PRECEDENCE_VDQCR |
0225 QM_VDQCR_NUMFRAMES_TILLEMPTY);
0226 if (ret) {
0227 dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
0228 return ret;
0229 }
0230
0231 do {
0232 struct qman_portal *p;
0233
0234 p = qman_get_affine_portal(smp_processor_id());
0235 qman_p_poll_dqrr(p, 16);
0236 } while (fq->flags & QMAN_FQ_STATE_NE);
0237
0238 return 0;
0239 }
0240
0241 static int kill_fq(struct device *qidev, struct qman_fq *fq)
0242 {
0243 u32 flags;
0244 int ret;
0245
0246 ret = qman_retire_fq(fq, &flags);
0247 if (ret < 0) {
0248 dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
0249 return ret;
0250 }
0251
0252 if (!ret)
0253 goto empty_fq;
0254
0255
0256 if (ret == 1) {
0257
0258 do {
0259 msleep(20);
0260 } while (fq->state != qman_fq_state_retired);
0261
0262 WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
0263 WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
0264 }
0265
0266 empty_fq:
0267 if (fq->flags & QMAN_FQ_STATE_NE) {
0268 ret = empty_retired_fq(qidev, fq);
0269 if (ret) {
0270 dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
0271 fq->fqid);
0272 return ret;
0273 }
0274 }
0275
0276 ret = qman_oos_fq(fq);
0277 if (ret)
0278 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
0279
0280 qman_destroy_fq(fq);
0281 kfree(fq);
0282
0283 return ret;
0284 }
0285
0286 static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx)
0287 {
0288 int ret;
0289 int retries = 10;
0290 struct qm_mcr_queryfq_np np;
0291
0292
0293 do {
0294 ret = qman_query_fq_np(fq, &np);
0295 if (ret)
0296 return ret;
0297
0298 if (!qm_mcr_np_get(&np, frm_cnt))
0299 break;
0300
0301 msleep(20);
0302 } while (1);
0303
0304
0305 do {
0306 if (refcount_read(&drv_ctx->refcnt) == 1)
0307 break;
0308
0309 msleep(20);
0310 } while (--retries);
0311
0312 if (!retries)
0313 dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n",
0314 refcount_read(&drv_ctx->refcnt), fq->fqid);
0315
0316 return 0;
0317 }
0318
0319 int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
0320 {
0321 int ret;
0322 u32 num_words;
0323 struct qman_fq *new_fq, *old_fq;
0324 struct device *qidev = drv_ctx->qidev;
0325
0326 num_words = desc_len(sh_desc);
0327 if (num_words > MAX_SDLEN) {
0328 dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
0329 return -EINVAL;
0330 }
0331
0332
0333 old_fq = drv_ctx->req_fq;
0334
0335
0336 new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
0337 drv_ctx->context_a, 0);
0338 if (IS_ERR(new_fq)) {
0339 dev_err(qidev, "FQ allocation for shdesc update failed\n");
0340 return PTR_ERR(new_fq);
0341 }
0342
0343
0344 drv_ctx->req_fq = new_fq;
0345
0346
0347 ret = empty_caam_fq(old_fq, drv_ctx);
0348 if (ret) {
0349 dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
0350
0351
0352 drv_ctx->req_fq = old_fq;
0353
0354 if (kill_fq(qidev, new_fq))
0355 dev_warn(qidev, "New CAAM FQ kill failed\n");
0356
0357 return ret;
0358 }
0359
0360
0361
0362
0363
0364 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
0365 num_words);
0366 drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
0367 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
0368 dma_sync_single_for_device(qidev, drv_ctx->context_a,
0369 sizeof(drv_ctx->sh_desc) +
0370 sizeof(drv_ctx->prehdr),
0371 DMA_BIDIRECTIONAL);
0372
0373
0374 ret = qman_schedule_fq(new_fq);
0375 if (ret) {
0376 dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
0377
0378
0379
0380
0381
0382
0383 drv_ctx->req_fq = old_fq;
0384
0385 if (kill_fq(qidev, new_fq))
0386 dev_warn(qidev, "New CAAM FQ kill failed\n");
0387 } else if (kill_fq(qidev, old_fq)) {
0388 dev_warn(qidev, "Old CAAM FQ kill failed\n");
0389 }
0390
0391 return 0;
0392 }
0393 EXPORT_SYMBOL(caam_drv_ctx_update);
0394
0395 struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
0396 int *cpu,
0397 u32 *sh_desc)
0398 {
0399 size_t size;
0400 u32 num_words;
0401 dma_addr_t hwdesc;
0402 struct caam_drv_ctx *drv_ctx;
0403 const cpumask_t *cpus = qman_affine_cpus();
0404
0405 num_words = desc_len(sh_desc);
0406 if (num_words > MAX_SDLEN) {
0407 dev_err(qidev, "Invalid descriptor len: %d words\n",
0408 num_words);
0409 return ERR_PTR(-EINVAL);
0410 }
0411
0412 drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
0413 if (!drv_ctx)
0414 return ERR_PTR(-ENOMEM);
0415
0416
0417
0418
0419
0420 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
0421 num_words);
0422 drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
0423 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
0424 size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
0425 hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
0426 DMA_BIDIRECTIONAL);
0427 if (dma_mapping_error(qidev, hwdesc)) {
0428 dev_err(qidev, "DMA map error for preheader + shdesc\n");
0429 kfree(drv_ctx);
0430 return ERR_PTR(-ENOMEM);
0431 }
0432 drv_ctx->context_a = hwdesc;
0433
0434
0435 if (!cpumask_test_cpu(*cpu, cpus)) {
0436 int *pcpu = &get_cpu_var(last_cpu);
0437
0438 *pcpu = cpumask_next(*pcpu, cpus);
0439 if (*pcpu >= nr_cpu_ids)
0440 *pcpu = cpumask_first(cpus);
0441 *cpu = *pcpu;
0442
0443 put_cpu_var(last_cpu);
0444 }
0445 drv_ctx->cpu = *cpu;
0446
0447
0448 drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
0449
0450
0451 drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
0452 QMAN_INITFQ_FLAG_SCHED);
0453 if (IS_ERR(drv_ctx->req_fq)) {
0454 dev_err(qidev, "create_caam_req_fq failed\n");
0455 dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
0456 kfree(drv_ctx);
0457 return ERR_PTR(-ENOMEM);
0458 }
0459
0460
0461 refcount_set(&drv_ctx->refcnt, 1);
0462
0463 drv_ctx->qidev = qidev;
0464 return drv_ctx;
0465 }
0466 EXPORT_SYMBOL(caam_drv_ctx_init);
0467
0468 void *qi_cache_alloc(gfp_t flags)
0469 {
0470 return kmem_cache_alloc(qi_cache, flags);
0471 }
0472 EXPORT_SYMBOL(qi_cache_alloc);
0473
0474 void qi_cache_free(void *obj)
0475 {
0476 kmem_cache_free(qi_cache, obj);
0477 }
0478 EXPORT_SYMBOL(qi_cache_free);
0479
0480 static int caam_qi_poll(struct napi_struct *napi, int budget)
0481 {
0482 struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
0483
0484 int cleaned = qman_p_poll_dqrr(np->p, budget);
0485
0486 if (cleaned < budget) {
0487 napi_complete(napi);
0488 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
0489 }
0490
0491 return cleaned;
0492 }
0493
0494 void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
0495 {
0496 if (IS_ERR_OR_NULL(drv_ctx))
0497 return;
0498
0499
0500 if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
0501 dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
0502
0503 dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
0504 sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
0505 DMA_BIDIRECTIONAL);
0506 kfree(drv_ctx);
0507 }
0508 EXPORT_SYMBOL(caam_drv_ctx_rel);
0509
0510 static void caam_qi_shutdown(void *data)
0511 {
0512 int i;
0513 struct device *qidev = data;
0514 struct caam_qi_priv *priv = &qipriv;
0515 const cpumask_t *cpus = qman_affine_cpus();
0516
0517 for_each_cpu(i, cpus) {
0518 struct napi_struct *irqtask;
0519
0520 irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
0521 napi_disable(irqtask);
0522 netif_napi_del(irqtask);
0523
0524 if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
0525 dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
0526 }
0527
0528 qman_delete_cgr_safe(&priv->cgr);
0529 qman_release_cgrid(priv->cgr.cgrid);
0530
0531 kmem_cache_destroy(qi_cache);
0532 }
0533
0534 static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
0535 {
0536 caam_congested = congested;
0537
0538 if (congested) {
0539 caam_debugfs_qi_congested();
0540
0541 pr_debug_ratelimited("CAAM entered congestion\n");
0542
0543 } else {
0544 pr_debug_ratelimited("CAAM exited congestion\n");
0545 }
0546 }
0547
0548 static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np,
0549 bool sched_napi)
0550 {
0551 if (sched_napi) {
0552
0553 qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
0554 np->p = p;
0555 napi_schedule(&np->irqtask);
0556 return 1;
0557 }
0558 return 0;
0559 }
0560
0561 static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
0562 struct qman_fq *rsp_fq,
0563 const struct qm_dqrr_entry *dqrr,
0564 bool sched_napi)
0565 {
0566 struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
0567 struct caam_drv_req *drv_req;
0568 const struct qm_fd *fd;
0569 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
0570 struct caam_drv_private *priv = dev_get_drvdata(qidev);
0571 u32 status;
0572
0573 if (caam_qi_napi_schedule(p, caam_napi, sched_napi))
0574 return qman_cb_dqrr_stop;
0575
0576 fd = &dqrr->fd;
0577
0578 drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
0579 if (unlikely(!drv_req)) {
0580 dev_err(qidev,
0581 "Can't find original request for caam response\n");
0582 return qman_cb_dqrr_consume;
0583 }
0584
0585 refcount_dec(&drv_req->drv_ctx->refcnt);
0586
0587 status = be32_to_cpu(fd->status);
0588 if (unlikely(status)) {
0589 u32 ssrc = status & JRSTA_SSRC_MASK;
0590 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
0591
0592 if (ssrc != JRSTA_SSRC_CCB_ERROR ||
0593 err_id != JRSTA_CCBERR_ERRID_ICVCHK)
0594 dev_err_ratelimited(qidev,
0595 "Error: %#x in CAAM response FD\n",
0596 status);
0597 }
0598
0599 if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
0600 dev_err(qidev, "Non-compound FD from CAAM\n");
0601 return qman_cb_dqrr_consume;
0602 }
0603
0604 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
0605 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
0606
0607 drv_req->cbk(drv_req, status);
0608 return qman_cb_dqrr_consume;
0609 }
0610
0611 static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
0612 {
0613 struct qm_mcc_initfq opts;
0614 struct qman_fq *fq;
0615 int ret;
0616
0617 fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
0618 if (!fq)
0619 return -ENOMEM;
0620
0621 fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
0622
0623 ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
0624 QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
0625 if (ret) {
0626 dev_err(qidev, "Rsp FQ create failed\n");
0627 kfree(fq);
0628 return -ENODEV;
0629 }
0630
0631 memset(&opts, 0, sizeof(opts));
0632 opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
0633 QM_INITFQ_WE_CONTEXTB |
0634 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
0635 opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
0636 QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
0637 qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
0638 opts.fqd.cgid = qipriv.cgr.cgrid;
0639 opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
0640 QM_STASHING_EXCL_DATA;
0641 qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
0642
0643 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
0644 if (ret) {
0645 dev_err(qidev, "Rsp FQ init failed\n");
0646 kfree(fq);
0647 return -ENODEV;
0648 }
0649
0650 per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
0651
0652 dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
0653 return 0;
0654 }
0655
0656 static int init_cgr(struct device *qidev)
0657 {
0658 int ret;
0659 struct qm_mcc_initcgr opts;
0660 const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
0661 MAX_RSP_FQ_BACKLOG_PER_CPU;
0662
0663 ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
0664 if (ret) {
0665 dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
0666 return ret;
0667 }
0668
0669 qipriv.cgr.cb = cgr_cb;
0670 memset(&opts, 0, sizeof(opts));
0671 opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
0672 QM_CGR_WE_MODE);
0673 opts.cgr.cscn_en = QM_CGR_EN;
0674 opts.cgr.mode = QMAN_CGR_MODE_FRAME;
0675 qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
0676
0677 ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
0678 if (ret) {
0679 dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
0680 qipriv.cgr.cgrid);
0681 return ret;
0682 }
0683
0684 dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
0685 return 0;
0686 }
0687
0688 static int alloc_rsp_fqs(struct device *qidev)
0689 {
0690 int ret, i;
0691 const cpumask_t *cpus = qman_affine_cpus();
0692
0693
0694 for_each_cpu(i, cpus) {
0695 ret = alloc_rsp_fq_cpu(qidev, i);
0696 if (ret) {
0697 dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
0698 return ret;
0699 }
0700 }
0701
0702 return 0;
0703 }
0704
0705 static void free_rsp_fqs(void)
0706 {
0707 int i;
0708 const cpumask_t *cpus = qman_affine_cpus();
0709
0710 for_each_cpu(i, cpus)
0711 kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
0712 }
0713
0714 int caam_qi_init(struct platform_device *caam_pdev)
0715 {
0716 int err, i;
0717 struct device *ctrldev = &caam_pdev->dev, *qidev;
0718 struct caam_drv_private *ctrlpriv;
0719 const cpumask_t *cpus = qman_affine_cpus();
0720
0721 ctrlpriv = dev_get_drvdata(ctrldev);
0722 qidev = ctrldev;
0723
0724
0725 err = init_cgr(qidev);
0726 if (err) {
0727 dev_err(qidev, "CGR initialization failed: %d\n", err);
0728 return err;
0729 }
0730
0731
0732 err = alloc_rsp_fqs(qidev);
0733 if (err) {
0734 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
0735 free_rsp_fqs();
0736 return err;
0737 }
0738
0739
0740
0741
0742
0743 for_each_cpu(i, cpus) {
0744 struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
0745 struct caam_napi *caam_napi = &priv->caam_napi;
0746 struct napi_struct *irqtask = &caam_napi->irqtask;
0747 struct net_device *net_dev = &priv->net_dev;
0748
0749 net_dev->dev = *qidev;
0750 INIT_LIST_HEAD(&net_dev->napi_list);
0751
0752 netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll,
0753 CAAM_NAPI_WEIGHT);
0754
0755 napi_enable(irqtask);
0756 }
0757
0758 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
0759 SLAB_CACHE_DMA, NULL);
0760 if (!qi_cache) {
0761 dev_err(qidev, "Can't allocate CAAM cache\n");
0762 free_rsp_fqs();
0763 return -ENOMEM;
0764 }
0765
0766 caam_debugfs_qi_init(ctrlpriv);
0767
0768 err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
0769 if (err)
0770 return err;
0771
0772 dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
0773 return 0;
0774 }