Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Marvell RVU Admin Function driver
0003  *
0004  * Copyright (C) 2018 Marvell.
0005  *
0006  */
0007 
0008 #include <linux/module.h>
0009 #include <linux/pci.h>
0010 
0011 #include "rvu_struct.h"
0012 #include "rvu_reg.h"
0013 #include "rvu.h"
0014 
0015 static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
0016                    struct npa_aq_inst_s *inst)
0017 {
0018     struct admin_queue *aq = block->aq;
0019     struct npa_aq_res_s *result;
0020     int timeout = 1000;
0021     u64 reg, head;
0022 
0023     result = (struct npa_aq_res_s *)aq->res->base;
0024 
0025     /* Get current head pointer where to append this instruction */
0026     reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
0027     head = (reg >> 4) & AQ_PTR_MASK;
0028 
0029     memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
0030            (void *)inst, aq->inst->entry_sz);
0031     memset(result, 0, sizeof(*result));
0032     /* sync into memory */
0033     wmb();
0034 
0035     /* Ring the doorbell and wait for result */
0036     rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
0037     while (result->compcode == NPA_AQ_COMP_NOTDONE) {
0038         cpu_relax();
0039         udelay(1);
0040         timeout--;
0041         if (!timeout)
0042             return -EBUSY;
0043     }
0044 
0045     if (result->compcode != NPA_AQ_COMP_GOOD)
0046         /* TODO: Replace this with some error code */
0047         return -EBUSY;
0048 
0049     return 0;
0050 }
0051 
0052 int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
0053             struct npa_aq_enq_rsp *rsp)
0054 {
0055     struct rvu_hwinfo *hw = rvu->hw;
0056     u16 pcifunc = req->hdr.pcifunc;
0057     int blkaddr, npalf, rc = 0;
0058     struct npa_aq_inst_s inst;
0059     struct rvu_block *block;
0060     struct admin_queue *aq;
0061     struct rvu_pfvf *pfvf;
0062     void *ctx, *mask;
0063     bool ena;
0064 
0065     pfvf = rvu_get_pfvf(rvu, pcifunc);
0066     if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
0067         return NPA_AF_ERR_AQ_ENQUEUE;
0068 
0069     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
0070     if (!pfvf->npalf || blkaddr < 0)
0071         return NPA_AF_ERR_AF_LF_INVALID;
0072 
0073     block = &hw->block[blkaddr];
0074     aq = block->aq;
0075     if (!aq) {
0076         dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
0077         return NPA_AF_ERR_AQ_ENQUEUE;
0078     }
0079 
0080     npalf = rvu_get_lf(rvu, block, pcifunc, 0);
0081     if (npalf < 0)
0082         return NPA_AF_ERR_AF_LF_INVALID;
0083 
0084     memset(&inst, 0, sizeof(struct npa_aq_inst_s));
0085     inst.cindex = req->aura_id;
0086     inst.lf = npalf;
0087     inst.ctype = req->ctype;
0088     inst.op = req->op;
0089     /* Currently we are not supporting enqueuing multiple instructions,
0090      * so always choose first entry in result memory.
0091      */
0092     inst.res_addr = (u64)aq->res->iova;
0093 
0094     /* Hardware uses same aq->res->base for updating result of
0095      * previous instruction hence wait here till it is done.
0096      */
0097     spin_lock(&aq->lock);
0098 
0099     /* Clean result + context memory */
0100     memset(aq->res->base, 0, aq->res->entry_sz);
0101     /* Context needs to be written at RES_ADDR + 128 */
0102     ctx = aq->res->base + 128;
0103     /* Mask needs to be written at RES_ADDR + 256 */
0104     mask = aq->res->base + 256;
0105 
0106     switch (req->op) {
0107     case NPA_AQ_INSTOP_WRITE:
0108         /* Copy context and write mask */
0109         if (req->ctype == NPA_AQ_CTYPE_AURA) {
0110             memcpy(mask, &req->aura_mask,
0111                    sizeof(struct npa_aura_s));
0112             memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
0113         } else {
0114             memcpy(mask, &req->pool_mask,
0115                    sizeof(struct npa_pool_s));
0116             memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
0117         }
0118         break;
0119     case NPA_AQ_INSTOP_INIT:
0120         if (req->ctype == NPA_AQ_CTYPE_AURA) {
0121             if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
0122                 rc = NPA_AF_ERR_AQ_FULL;
0123                 break;
0124             }
0125             /* Set pool's context address */
0126             req->aura.pool_addr = pfvf->pool_ctx->iova +
0127             (req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
0128             memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
0129         } else { /* POOL's context */
0130             memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
0131         }
0132         break;
0133     case NPA_AQ_INSTOP_NOP:
0134     case NPA_AQ_INSTOP_READ:
0135     case NPA_AQ_INSTOP_LOCK:
0136     case NPA_AQ_INSTOP_UNLOCK:
0137         break;
0138     default:
0139         rc = NPA_AF_ERR_AQ_FULL;
0140         break;
0141     }
0142 
0143     if (rc) {
0144         spin_unlock(&aq->lock);
0145         return rc;
0146     }
0147 
0148     /* Submit the instruction to AQ */
0149     rc = npa_aq_enqueue_wait(rvu, block, &inst);
0150     if (rc) {
0151         spin_unlock(&aq->lock);
0152         return rc;
0153     }
0154 
0155     /* Set aura bitmap if aura hw context is enabled */
0156     if (req->ctype == NPA_AQ_CTYPE_AURA) {
0157         if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
0158             __set_bit(req->aura_id, pfvf->aura_bmap);
0159         if (req->op == NPA_AQ_INSTOP_WRITE) {
0160             ena = (req->aura.ena & req->aura_mask.ena) |
0161                 (test_bit(req->aura_id, pfvf->aura_bmap) &
0162                 ~req->aura_mask.ena);
0163             if (ena)
0164                 __set_bit(req->aura_id, pfvf->aura_bmap);
0165             else
0166                 __clear_bit(req->aura_id, pfvf->aura_bmap);
0167         }
0168     }
0169 
0170     /* Set pool bitmap if pool hw context is enabled */
0171     if (req->ctype == NPA_AQ_CTYPE_POOL) {
0172         if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
0173             __set_bit(req->aura_id, pfvf->pool_bmap);
0174         if (req->op == NPA_AQ_INSTOP_WRITE) {
0175             ena = (req->pool.ena & req->pool_mask.ena) |
0176                 (test_bit(req->aura_id, pfvf->pool_bmap) &
0177                 ~req->pool_mask.ena);
0178             if (ena)
0179                 __set_bit(req->aura_id, pfvf->pool_bmap);
0180             else
0181                 __clear_bit(req->aura_id, pfvf->pool_bmap);
0182         }
0183     }
0184     spin_unlock(&aq->lock);
0185 
0186     if (rsp) {
0187         /* Copy read context into mailbox */
0188         if (req->op == NPA_AQ_INSTOP_READ) {
0189             if (req->ctype == NPA_AQ_CTYPE_AURA)
0190                 memcpy(&rsp->aura, ctx,
0191                        sizeof(struct npa_aura_s));
0192             else
0193                 memcpy(&rsp->pool, ctx,
0194                        sizeof(struct npa_pool_s));
0195         }
0196     }
0197 
0198     return 0;
0199 }
0200 
0201 static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
0202 {
0203     struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
0204     struct npa_aq_enq_req aq_req;
0205     unsigned long *bmap;
0206     int id, cnt = 0;
0207     int err = 0, rc;
0208 
0209     if (!pfvf->pool_ctx || !pfvf->aura_ctx)
0210         return NPA_AF_ERR_AQ_ENQUEUE;
0211 
0212     memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
0213     aq_req.hdr.pcifunc = req->hdr.pcifunc;
0214 
0215     if (req->ctype == NPA_AQ_CTYPE_POOL) {
0216         aq_req.pool.ena = 0;
0217         aq_req.pool_mask.ena = 1;
0218         cnt = pfvf->pool_ctx->qsize;
0219         bmap = pfvf->pool_bmap;
0220     } else if (req->ctype == NPA_AQ_CTYPE_AURA) {
0221         aq_req.aura.ena = 0;
0222         aq_req.aura_mask.ena = 1;
0223         aq_req.aura.bp_ena = 0;
0224         aq_req.aura_mask.bp_ena = 1;
0225         cnt = pfvf->aura_ctx->qsize;
0226         bmap = pfvf->aura_bmap;
0227     }
0228 
0229     aq_req.ctype = req->ctype;
0230     aq_req.op = NPA_AQ_INSTOP_WRITE;
0231 
0232     for (id = 0; id < cnt; id++) {
0233         if (!test_bit(id, bmap))
0234             continue;
0235         aq_req.aura_id = id;
0236         rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
0237         if (rc) {
0238             err = rc;
0239             dev_err(rvu->dev, "Failed to disable %s:%d context\n",
0240                 (req->ctype == NPA_AQ_CTYPE_AURA) ?
0241                 "Aura" : "Pool", id);
0242         }
0243     }
0244 
0245     return err;
0246 }
0247 
0248 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
0249 static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
0250 {
0251     struct npa_aq_enq_req lock_ctx_req;
0252     int err;
0253 
0254     if (req->op != NPA_AQ_INSTOP_INIT)
0255         return 0;
0256 
0257     memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
0258     lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
0259     lock_ctx_req.ctype = req->ctype;
0260     lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
0261     lock_ctx_req.aura_id = req->aura_id;
0262     err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
0263     if (err)
0264         dev_err(rvu->dev,
0265             "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
0266             req->hdr.pcifunc,
0267             (req->ctype == NPA_AQ_CTYPE_AURA) ?
0268             "Aura" : "Pool", req->aura_id);
0269     return err;
0270 }
0271 
0272 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
0273                 struct npa_aq_enq_req *req,
0274                 struct npa_aq_enq_rsp *rsp)
0275 {
0276     int err;
0277 
0278     err = rvu_npa_aq_enq_inst(rvu, req, rsp);
0279     if (!err)
0280         err = npa_lf_hwctx_lockdown(rvu, req);
0281     return err;
0282 }
0283 #else
0284 
0285 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
0286                 struct npa_aq_enq_req *req,
0287                 struct npa_aq_enq_rsp *rsp)
0288 {
0289     return rvu_npa_aq_enq_inst(rvu, req, rsp);
0290 }
0291 #endif
0292 
0293 int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
0294                        struct hwctx_disable_req *req,
0295                        struct msg_rsp *rsp)
0296 {
0297     return npa_lf_hwctx_disable(rvu, req);
0298 }
0299 
0300 static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
0301 {
0302     kfree(pfvf->aura_bmap);
0303     pfvf->aura_bmap = NULL;
0304 
0305     qmem_free(rvu->dev, pfvf->aura_ctx);
0306     pfvf->aura_ctx = NULL;
0307 
0308     kfree(pfvf->pool_bmap);
0309     pfvf->pool_bmap = NULL;
0310 
0311     qmem_free(rvu->dev, pfvf->pool_ctx);
0312     pfvf->pool_ctx = NULL;
0313 
0314     qmem_free(rvu->dev, pfvf->npa_qints_ctx);
0315     pfvf->npa_qints_ctx = NULL;
0316 }
0317 
0318 int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
0319                   struct npa_lf_alloc_req *req,
0320                   struct npa_lf_alloc_rsp *rsp)
0321 {
0322     int npalf, qints, hwctx_size, err, rc = 0;
0323     struct rvu_hwinfo *hw = rvu->hw;
0324     u16 pcifunc = req->hdr.pcifunc;
0325     struct rvu_block *block;
0326     struct rvu_pfvf *pfvf;
0327     u64 cfg, ctx_cfg;
0328     int blkaddr;
0329 
0330     if (req->aura_sz > NPA_AURA_SZ_MAX ||
0331         req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
0332         return NPA_AF_ERR_PARAM;
0333 
0334     if (req->way_mask)
0335         req->way_mask &= 0xFFFF;
0336 
0337     pfvf = rvu_get_pfvf(rvu, pcifunc);
0338     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
0339     if (!pfvf->npalf || blkaddr < 0)
0340         return NPA_AF_ERR_AF_LF_INVALID;
0341 
0342     block = &hw->block[blkaddr];
0343     npalf = rvu_get_lf(rvu, block, pcifunc, 0);
0344     if (npalf < 0)
0345         return NPA_AF_ERR_AF_LF_INVALID;
0346 
0347     /* Reset this NPA LF */
0348     err = rvu_lf_reset(rvu, block, npalf);
0349     if (err) {
0350         dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
0351         return NPA_AF_ERR_LF_RESET;
0352     }
0353 
0354     ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
0355 
0356     /* Alloc memory for aura HW contexts */
0357     hwctx_size = 1UL << (ctx_cfg & 0xF);
0358     err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
0359              NPA_AURA_COUNT(req->aura_sz), hwctx_size);
0360     if (err)
0361         goto free_mem;
0362 
0363     pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
0364                   GFP_KERNEL);
0365     if (!pfvf->aura_bmap)
0366         goto free_mem;
0367 
0368     /* Alloc memory for pool HW contexts */
0369     hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
0370     err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
0371     if (err)
0372         goto free_mem;
0373 
0374     pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
0375                   GFP_KERNEL);
0376     if (!pfvf->pool_bmap)
0377         goto free_mem;
0378 
0379     /* Get no of queue interrupts supported */
0380     cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
0381     qints = (cfg >> 28) & 0xFFF;
0382 
0383     /* Alloc memory for Qints HW contexts */
0384     hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
0385     err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
0386     if (err)
0387         goto free_mem;
0388 
0389     cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
0390     /* Clear way partition mask and set aura offset to '0' */
0391     cfg &= ~(BIT_ULL(34) - 1);
0392     /* Set aura size & enable caching of contexts */
0393     cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
0394 
0395     rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
0396 
0397     /* Configure aura HW context's base */
0398     rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
0399             (u64)pfvf->aura_ctx->iova);
0400 
0401     /* Enable caching of qints hw context */
0402     rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
0403             BIT_ULL(36) | req->way_mask << 20);
0404     rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
0405             (u64)pfvf->npa_qints_ctx->iova);
0406 
0407     goto exit;
0408 
0409 free_mem:
0410     npa_ctx_free(rvu, pfvf);
0411     rc = -ENOMEM;
0412 
0413 exit:
0414     /* set stack page info */
0415     cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
0416     rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
0417     rsp->stack_pg_bytes = cfg & 0xFF;
0418     rsp->qints = (cfg >> 28) & 0xFFF;
0419     if (!is_rvu_otx2(rvu)) {
0420         cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
0421         rsp->cache_lines = (cfg >> 1) & 0x3F;
0422     }
0423     return rc;
0424 }
0425 
0426 int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
0427                  struct msg_rsp *rsp)
0428 {
0429     struct rvu_hwinfo *hw = rvu->hw;
0430     u16 pcifunc = req->hdr.pcifunc;
0431     struct rvu_block *block;
0432     struct rvu_pfvf *pfvf;
0433     int npalf, err;
0434     int blkaddr;
0435 
0436     pfvf = rvu_get_pfvf(rvu, pcifunc);
0437     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
0438     if (!pfvf->npalf || blkaddr < 0)
0439         return NPA_AF_ERR_AF_LF_INVALID;
0440 
0441     block = &hw->block[blkaddr];
0442     npalf = rvu_get_lf(rvu, block, pcifunc, 0);
0443     if (npalf < 0)
0444         return NPA_AF_ERR_AF_LF_INVALID;
0445 
0446     /* Reset this NPA LF */
0447     err = rvu_lf_reset(rvu, block, npalf);
0448     if (err) {
0449         dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
0450         return NPA_AF_ERR_LF_RESET;
0451     }
0452 
0453     npa_ctx_free(rvu, pfvf);
0454 
0455     return 0;
0456 }
0457 
0458 static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
0459 {
0460     u64 cfg;
0461     int err;
0462 
0463     /* Set admin queue endianness */
0464     cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
0465 #ifdef __BIG_ENDIAN
0466     cfg |= BIT_ULL(1);
0467     rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
0468 #else
0469     cfg &= ~BIT_ULL(1);
0470     rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
0471 #endif
0472 
0473     /* Do not bypass NDC cache */
0474     cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
0475     cfg &= ~0x03DULL;
0476 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
0477     /* Disable caching of stack pages */
0478     cfg |= 0x10ULL;
0479 #endif
0480     rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
0481 
0482     /* For CN10K NPA BATCH DMA set 35 cache lines */
0483     if (!is_rvu_otx2(rvu)) {
0484         cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
0485         cfg &= ~0x7EULL;
0486         cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
0487         rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
0488     }
0489     /* Result structure can be followed by Aura/Pool context at
0490      * RES + 128bytes and a write mask at RES + 256 bytes, depending on
0491      * operation type. Alloc sufficient result memory for all operations.
0492      */
0493     err = rvu_aq_alloc(rvu, &block->aq,
0494                Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
0495                ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
0496     if (err)
0497         return err;
0498 
0499     rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
0500     rvu_write64(rvu, block->addr,
0501             NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
0502     return 0;
0503 }
0504 
0505 int rvu_npa_init(struct rvu *rvu)
0506 {
0507     struct rvu_hwinfo *hw = rvu->hw;
0508     int blkaddr;
0509 
0510     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
0511     if (blkaddr < 0)
0512         return 0;
0513 
0514     /* Initialize admin queue */
0515     return npa_aq_init(rvu, &hw->block[blkaddr]);
0516 }
0517 
0518 void rvu_npa_freemem(struct rvu *rvu)
0519 {
0520     struct rvu_hwinfo *hw = rvu->hw;
0521     struct rvu_block *block;
0522     int blkaddr;
0523 
0524     blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
0525     if (blkaddr < 0)
0526         return;
0527 
0528     block = &hw->block[blkaddr];
0529     rvu_aq_free(rvu, block->aq);
0530 }
0531 
0532 void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
0533 {
0534     struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
0535     struct hwctx_disable_req ctx_req;
0536 
0537     /* Disable all pools */
0538     ctx_req.hdr.pcifunc = pcifunc;
0539     ctx_req.ctype = NPA_AQ_CTYPE_POOL;
0540     npa_lf_hwctx_disable(rvu, &ctx_req);
0541 
0542     /* Disable all auras */
0543     ctx_req.ctype = NPA_AQ_CTYPE_AURA;
0544     npa_lf_hwctx_disable(rvu, &ctx_req);
0545 
0546     npa_ctx_free(rvu, pfvf);
0547 }