Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (C) 2020 Marvell. */
0003 
0004 #include <linux/ctype.h>
0005 #include <linux/firmware.h>
0006 #include "otx2_cptpf_ucode.h"
0007 #include "otx2_cpt_common.h"
0008 #include "otx2_cptpf.h"
0009 #include "otx2_cptlf.h"
0010 #include "otx2_cpt_reqmgr.h"
0011 #include "rvu_reg.h"
0012 
0013 #define CSR_DELAY 30
0014 
0015 #define LOADFVC_RLEN 8
0016 #define LOADFVC_MAJOR_OP 0x01
0017 #define LOADFVC_MINOR_OP 0x08
0018 
0019 #define CTX_FLUSH_TIMER_CNT 0xFFFFFF
0020 
0021 struct fw_info_t {
0022     struct list_head ucodes;
0023 };
0024 
0025 static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
0026                     struct otx2_cpt_eng_grp_info *eng_grp)
0027 {
0028     struct otx2_cpt_bitmap bmap = { {0} };
0029     bool found = false;
0030     int i;
0031 
0032     if (eng_grp->g->engs_num < 0 ||
0033         eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
0034         dev_err(dev, "unsupported number of engines %d on octeontx2\n",
0035             eng_grp->g->engs_num);
0036         return bmap;
0037     }
0038 
0039     for (i = 0; i  < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
0040         if (eng_grp->engs[i].type) {
0041             bitmap_or(bmap.bits, bmap.bits,
0042                   eng_grp->engs[i].bmap,
0043                   eng_grp->g->engs_num);
0044             bmap.size = eng_grp->g->engs_num;
0045             found = true;
0046         }
0047     }
0048 
0049     if (!found)
0050         dev_err(dev, "No engines reserved for engine group %d\n",
0051             eng_grp->idx);
0052     return bmap;
0053 }
0054 
0055 static int is_eng_type(int val, int eng_type)
0056 {
0057     return val & (1 << eng_type);
0058 }
0059 
0060 static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
0061 {
0062     if (eng_grp->ucode[1].type)
0063         return true;
0064     else
0065         return false;
0066 }
0067 
0068 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
0069                    const char *filename)
0070 {
0071     strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
0072 }
0073 
0074 static char *get_eng_type_str(int eng_type)
0075 {
0076     char *str = "unknown";
0077 
0078     switch (eng_type) {
0079     case OTX2_CPT_SE_TYPES:
0080         str = "SE";
0081         break;
0082 
0083     case OTX2_CPT_IE_TYPES:
0084         str = "IE";
0085         break;
0086 
0087     case OTX2_CPT_AE_TYPES:
0088         str = "AE";
0089         break;
0090     }
0091     return str;
0092 }
0093 
0094 static char *get_ucode_type_str(int ucode_type)
0095 {
0096     char *str = "unknown";
0097 
0098     switch (ucode_type) {
0099     case (1 << OTX2_CPT_SE_TYPES):
0100         str = "SE";
0101         break;
0102 
0103     case (1 << OTX2_CPT_IE_TYPES):
0104         str = "IE";
0105         break;
0106 
0107     case (1 << OTX2_CPT_AE_TYPES):
0108         str = "AE";
0109         break;
0110 
0111     case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
0112         str = "SE+IPSEC";
0113         break;
0114     }
0115     return str;
0116 }
0117 
0118 static int get_ucode_type(struct device *dev,
0119               struct otx2_cpt_ucode_hdr *ucode_hdr,
0120               int *ucode_type)
0121 {
0122     struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
0123     char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
0124     char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
0125     struct pci_dev *pdev = cptpf->pdev;
0126     int i, val = 0;
0127     u8 nn;
0128 
0129     strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
0130     for (i = 0; i < strlen(tmp_ver_str); i++)
0131         tmp_ver_str[i] = tolower(tmp_ver_str[i]);
0132 
0133     sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
0134     if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
0135         return -EINVAL;
0136 
0137     nn = ucode_hdr->ver_num.nn;
0138     if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
0139         (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
0140          nn == OTX2_CPT_SE_UC_TYPE3))
0141         val |= 1 << OTX2_CPT_SE_TYPES;
0142     if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
0143         (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
0144          nn == OTX2_CPT_IE_UC_TYPE3))
0145         val |= 1 << OTX2_CPT_IE_TYPES;
0146     if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
0147         nn == OTX2_CPT_AE_UC_TYPE)
0148         val |= 1 << OTX2_CPT_AE_TYPES;
0149 
0150     *ucode_type = val;
0151 
0152     if (!val)
0153         return -EINVAL;
0154 
0155     return 0;
0156 }
0157 
0158 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
0159                   dma_addr_t dma_addr, int blkaddr)
0160 {
0161     return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
0162                      CPT_AF_EXEX_UCODE_BASE(eng),
0163                      (u64)dma_addr, blkaddr);
0164 }
0165 
0166 static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
0167                    struct otx2_cptpf_dev *cptpf, int blkaddr)
0168 {
0169     struct otx2_cpt_engs_rsvd *engs;
0170     dma_addr_t dma_addr;
0171     int i, bit, ret;
0172 
0173     /* Set PF number for microcode fetches */
0174     ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
0175                     CPT_AF_PF_FUNC,
0176                     cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
0177     if (ret)
0178         return ret;
0179 
0180     for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
0181         engs = &eng_grp->engs[i];
0182         if (!engs->type)
0183             continue;
0184 
0185         dma_addr = engs->ucode->dma;
0186 
0187         /*
0188          * Set UCODE_BASE only for the cores which are not used,
0189          * other cores should have already valid UCODE_BASE set
0190          */
0191         for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
0192             if (!eng_grp->g->eng_ref_cnt[bit]) {
0193                 ret = __write_ucode_base(cptpf, bit, dma_addr,
0194                              blkaddr);
0195                 if (ret)
0196                     return ret;
0197             }
0198     }
0199     return 0;
0200 }
0201 
0202 static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
0203 {
0204     struct otx2_cptpf_dev *cptpf = obj;
0205     int ret;
0206 
0207     if (cptpf->has_cpt1) {
0208         ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
0209         if (ret)
0210             return ret;
0211     }
0212     return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
0213 }
0214 
0215 static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
0216                      struct otx2_cptpf_dev *cptpf,
0217                      struct otx2_cpt_bitmap bmap,
0218                      int blkaddr)
0219 {
0220     int i, timeout = 10;
0221     int busy, ret;
0222     u64 reg = 0;
0223 
0224     /* Detach the cores from group */
0225     for_each_set_bit(i, bmap.bits, bmap.size) {
0226         ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
0227                        CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
0228         if (ret)
0229             return ret;
0230 
0231         if (reg & (1ull << eng_grp->idx)) {
0232             eng_grp->g->eng_ref_cnt[i]--;
0233             reg &= ~(1ull << eng_grp->idx);
0234 
0235             ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
0236                             cptpf->pdev,
0237                             CPT_AF_EXEX_CTL2(i), reg,
0238                             blkaddr);
0239             if (ret)
0240                 return ret;
0241         }
0242     }
0243 
0244     /* Wait for cores to become idle */
0245     do {
0246         busy = 0;
0247         usleep_range(10000, 20000);
0248         if (timeout-- < 0)
0249             return -EBUSY;
0250 
0251         for_each_set_bit(i, bmap.bits, bmap.size) {
0252             ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
0253                            cptpf->pdev,
0254                            CPT_AF_EXEX_STS(i), &reg,
0255                            blkaddr);
0256             if (ret)
0257                 return ret;
0258 
0259             if (reg & 0x1) {
0260                 busy = 1;
0261                 break;
0262             }
0263         }
0264     } while (busy);
0265 
0266     /* Disable the cores only if they are not used anymore */
0267     for_each_set_bit(i, bmap.bits, bmap.size) {
0268         if (!eng_grp->g->eng_ref_cnt[i]) {
0269             ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
0270                             cptpf->pdev,
0271                             CPT_AF_EXEX_CTL(i), 0x0,
0272                             blkaddr);
0273             if (ret)
0274                 return ret;
0275         }
0276     }
0277 
0278     return 0;
0279 }
0280 
0281 static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
0282                     void *obj)
0283 {
0284     struct otx2_cptpf_dev *cptpf = obj;
0285     struct otx2_cpt_bitmap bmap;
0286     int ret;
0287 
0288     bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
0289     if (!bmap.size)
0290         return -EINVAL;
0291 
0292     if (cptpf->has_cpt1) {
0293         ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
0294                             BLKADDR_CPT1);
0295         if (ret)
0296             return ret;
0297     }
0298     return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
0299                          BLKADDR_CPT0);
0300 }
0301 
0302 static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
0303                     struct otx2_cptpf_dev *cptpf,
0304                     struct otx2_cpt_bitmap bmap,
0305                     int blkaddr)
0306 {
0307     u64 reg = 0;
0308     int i, ret;
0309 
0310     /* Attach the cores to the group */
0311     for_each_set_bit(i, bmap.bits, bmap.size) {
0312         ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
0313                        CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
0314         if (ret)
0315             return ret;
0316 
0317         if (!(reg & (1ull << eng_grp->idx))) {
0318             eng_grp->g->eng_ref_cnt[i]++;
0319             reg |= 1ull << eng_grp->idx;
0320 
0321             ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
0322                             cptpf->pdev,
0323                             CPT_AF_EXEX_CTL2(i), reg,
0324                             blkaddr);
0325             if (ret)
0326                 return ret;
0327         }
0328     }
0329 
0330     /* Enable the cores */
0331     for_each_set_bit(i, bmap.bits, bmap.size) {
0332         ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
0333                         CPT_AF_EXEX_CTL(i), 0x1,
0334                         blkaddr);
0335         if (ret)
0336             return ret;
0337     }
0338     return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
0339 }
0340 
0341 static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
0342                        void *obj)
0343 {
0344     struct otx2_cptpf_dev *cptpf = obj;
0345     struct otx2_cpt_bitmap bmap;
0346     int ret;
0347 
0348     bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
0349     if (!bmap.size)
0350         return -EINVAL;
0351 
0352     if (cptpf->has_cpt1) {
0353         ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
0354                            BLKADDR_CPT1);
0355         if (ret)
0356             return ret;
0357     }
0358     return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
0359 }
0360 
0361 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
0362            char *filename)
0363 {
0364     struct otx2_cpt_ucode_hdr *ucode_hdr;
0365     struct otx2_cpt_uc_info_t *uc_info;
0366     int ucode_type, ucode_size;
0367     int ret;
0368 
0369     uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
0370     if (!uc_info)
0371         return -ENOMEM;
0372 
0373     ret = request_firmware(&uc_info->fw, filename, dev);
0374     if (ret)
0375         goto free_uc_info;
0376 
0377     ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
0378     ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
0379     if (ret)
0380         goto release_fw;
0381 
0382     ucode_size = ntohl(ucode_hdr->code_length) * 2;
0383     if (!ucode_size) {
0384         dev_err(dev, "Ucode %s invalid size\n", filename);
0385         ret = -EINVAL;
0386         goto release_fw;
0387     }
0388 
0389     set_ucode_filename(&uc_info->ucode, filename);
0390     memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
0391            OTX2_CPT_UCODE_VER_STR_SZ);
0392     uc_info->ucode.ver_num = ucode_hdr->ver_num;
0393     uc_info->ucode.type = ucode_type;
0394     uc_info->ucode.size = ucode_size;
0395     list_add_tail(&uc_info->list, &fw_info->ucodes);
0396 
0397     return 0;
0398 
0399 release_fw:
0400     release_firmware(uc_info->fw);
0401 free_uc_info:
0402     kfree(uc_info);
0403     return ret;
0404 }
0405 
0406 static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
0407 {
0408     struct otx2_cpt_uc_info_t *curr, *temp;
0409 
0410     if (!fw_info)
0411         return;
0412 
0413     list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
0414         list_del(&curr->list);
0415         release_firmware(curr->fw);
0416         kfree(curr);
0417     }
0418 }
0419 
0420 static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
0421                         int ucode_type)
0422 {
0423     struct otx2_cpt_uc_info_t *curr;
0424 
0425     list_for_each_entry(curr, &fw_info->ucodes, list) {
0426         if (!is_eng_type(curr->ucode.type, ucode_type))
0427             continue;
0428 
0429         return curr;
0430     }
0431     return NULL;
0432 }
0433 
0434 static void print_uc_info(struct fw_info_t *fw_info)
0435 {
0436     struct otx2_cpt_uc_info_t *curr;
0437 
0438     list_for_each_entry(curr, &fw_info->ucodes, list) {
0439         pr_debug("Ucode filename %s\n", curr->ucode.filename);
0440         pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
0441         pr_debug("Ucode version %d.%d.%d.%d\n",
0442              curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
0443              curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
0444         pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
0445              get_ucode_type_str(curr->ucode.type));
0446         pr_debug("Ucode size %d\n", curr->ucode.size);
0447         pr_debug("Ucode ptr %p\n", curr->fw->data);
0448     }
0449 }
0450 
0451 static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
0452 {
0453     char filename[OTX2_CPT_NAME_LENGTH];
0454     char eng_type[8] = {0};
0455     int ret, e, i;
0456 
0457     INIT_LIST_HEAD(&fw_info->ucodes);
0458 
0459     for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
0460         strcpy(eng_type, get_eng_type_str(e));
0461         for (i = 0; i < strlen(eng_type); i++)
0462             eng_type[i] = tolower(eng_type[i]);
0463 
0464         snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
0465              pdev->revision, eng_type);
0466         /* Request firmware for each engine type */
0467         ret = load_fw(&pdev->dev, fw_info, filename);
0468         if (ret)
0469             goto release_fw;
0470     }
0471     print_uc_info(fw_info);
0472     return 0;
0473 
0474 release_fw:
0475     cpt_ucode_release_fw(fw_info);
0476     return ret;
0477 }
0478 
0479 struct otx2_cpt_engs_rsvd *find_engines_by_type(
0480                     struct otx2_cpt_eng_grp_info *eng_grp,
0481                     int eng_type)
0482 {
0483     int i;
0484 
0485     for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
0486         if (!eng_grp->engs[i].type)
0487             continue;
0488 
0489         if (eng_grp->engs[i].type == eng_type)
0490             return &eng_grp->engs[i];
0491     }
0492     return NULL;
0493 }
0494 
0495 static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
0496                 int eng_type)
0497 {
0498     struct otx2_cpt_engs_rsvd *engs;
0499 
0500     engs = find_engines_by_type(eng_grp, eng_type);
0501 
0502     return (engs != NULL ? 1 : 0);
0503 }
0504 
0505 static int update_engines_avail_count(struct device *dev,
0506                       struct otx2_cpt_engs_available *avail,
0507                       struct otx2_cpt_engs_rsvd *engs, int val)
0508 {
0509     switch (engs->type) {
0510     case OTX2_CPT_SE_TYPES:
0511         avail->se_cnt += val;
0512         break;
0513 
0514     case OTX2_CPT_IE_TYPES:
0515         avail->ie_cnt += val;
0516         break;
0517 
0518     case OTX2_CPT_AE_TYPES:
0519         avail->ae_cnt += val;
0520         break;
0521 
0522     default:
0523         dev_err(dev, "Invalid engine type %d\n", engs->type);
0524         return -EINVAL;
0525     }
0526     return 0;
0527 }
0528 
0529 static int update_engines_offset(struct device *dev,
0530                  struct otx2_cpt_engs_available *avail,
0531                  struct otx2_cpt_engs_rsvd *engs)
0532 {
0533     switch (engs->type) {
0534     case OTX2_CPT_SE_TYPES:
0535         engs->offset = 0;
0536         break;
0537 
0538     case OTX2_CPT_IE_TYPES:
0539         engs->offset = avail->max_se_cnt;
0540         break;
0541 
0542     case OTX2_CPT_AE_TYPES:
0543         engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
0544         break;
0545 
0546     default:
0547         dev_err(dev, "Invalid engine type %d\n", engs->type);
0548         return -EINVAL;
0549     }
0550     return 0;
0551 }
0552 
0553 static int release_engines(struct device *dev,
0554                struct otx2_cpt_eng_grp_info *grp)
0555 {
0556     int i, ret = 0;
0557 
0558     for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
0559         if (!grp->engs[i].type)
0560             continue;
0561 
0562         if (grp->engs[i].count > 0) {
0563             ret = update_engines_avail_count(dev, &grp->g->avail,
0564                              &grp->engs[i],
0565                              grp->engs[i].count);
0566             if (ret)
0567                 return ret;
0568         }
0569 
0570         grp->engs[i].type = 0;
0571         grp->engs[i].count = 0;
0572         grp->engs[i].offset = 0;
0573         grp->engs[i].ucode = NULL;
0574         bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
0575     }
0576     return 0;
0577 }
0578 
0579 static int do_reserve_engines(struct device *dev,
0580                   struct otx2_cpt_eng_grp_info *grp,
0581                   struct otx2_cpt_engines *req_engs)
0582 {
0583     struct otx2_cpt_engs_rsvd *engs = NULL;
0584     int i, ret;
0585 
0586     for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
0587         if (!grp->engs[i].type) {
0588             engs = &grp->engs[i];
0589             break;
0590         }
0591     }
0592 
0593     if (!engs)
0594         return -ENOMEM;
0595 
0596     engs->type = req_engs->type;
0597     engs->count = req_engs->count;
0598 
0599     ret = update_engines_offset(dev, &grp->g->avail, engs);
0600     if (ret)
0601         return ret;
0602 
0603     if (engs->count > 0) {
0604         ret = update_engines_avail_count(dev, &grp->g->avail, engs,
0605                          -engs->count);
0606         if (ret)
0607             return ret;
0608     }
0609 
0610     return 0;
0611 }
0612 
0613 static int check_engines_availability(struct device *dev,
0614                       struct otx2_cpt_eng_grp_info *grp,
0615                       struct otx2_cpt_engines *req_eng)
0616 {
0617     int avail_cnt = 0;
0618 
0619     switch (req_eng->type) {
0620     case OTX2_CPT_SE_TYPES:
0621         avail_cnt = grp->g->avail.se_cnt;
0622         break;
0623 
0624     case OTX2_CPT_IE_TYPES:
0625         avail_cnt = grp->g->avail.ie_cnt;
0626         break;
0627 
0628     case OTX2_CPT_AE_TYPES:
0629         avail_cnt = grp->g->avail.ae_cnt;
0630         break;
0631 
0632     default:
0633         dev_err(dev, "Invalid engine type %d\n", req_eng->type);
0634         return -EINVAL;
0635     }
0636 
0637     if (avail_cnt < req_eng->count) {
0638         dev_err(dev,
0639             "Error available %s engines %d < than requested %d\n",
0640             get_eng_type_str(req_eng->type),
0641             avail_cnt, req_eng->count);
0642         return -EBUSY;
0643     }
0644     return 0;
0645 }
0646 
0647 static int reserve_engines(struct device *dev,
0648                struct otx2_cpt_eng_grp_info *grp,
0649                struct otx2_cpt_engines *req_engs, int ucodes_cnt)
0650 {
0651     int i, ret = 0;
0652 
0653     /* Validate if a number of requested engines are available */
0654     for (i = 0; i < ucodes_cnt; i++) {
0655         ret = check_engines_availability(dev, grp, &req_engs[i]);
0656         if (ret)
0657             return ret;
0658     }
0659 
0660     /* Reserve requested engines for this engine group */
0661     for (i = 0; i < ucodes_cnt; i++) {
0662         ret = do_reserve_engines(dev, grp, &req_engs[i]);
0663         if (ret)
0664             return ret;
0665     }
0666     return 0;
0667 }
0668 
0669 static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
0670 {
0671     if (ucode->va) {
0672         dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
0673                   ucode->dma);
0674         ucode->va = NULL;
0675         ucode->dma = 0;
0676         ucode->size = 0;
0677     }
0678 
0679     memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
0680     memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
0681     set_ucode_filename(ucode, "");
0682     ucode->type = 0;
0683 }
0684 
0685 static int copy_ucode_to_dma_mem(struct device *dev,
0686                  struct otx2_cpt_ucode *ucode,
0687                  const u8 *ucode_data)
0688 {
0689     u32 i;
0690 
0691     /*  Allocate DMAable space */
0692     ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
0693                        GFP_KERNEL);
0694     if (!ucode->va)
0695         return -ENOMEM;
0696 
0697     memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
0698            ucode->size);
0699 
0700     /* Byte swap 64-bit */
0701     for (i = 0; i < (ucode->size / 8); i++)
0702         cpu_to_be64s(&((u64 *)ucode->va)[i]);
0703     /*  Ucode needs 16-bit swap */
0704     for (i = 0; i < (ucode->size / 2); i++)
0705         cpu_to_be16s(&((u16 *)ucode->va)[i]);
0706     return 0;
0707 }
0708 
0709 static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
0710               void *obj)
0711 {
0712     int ret;
0713 
0714     /* Point microcode to each core of the group */
0715     ret = cpt_set_ucode_base(eng_grp, obj);
0716     if (ret)
0717         return ret;
0718 
0719     /* Attach the cores to the group and enable them */
0720     ret = cpt_attach_and_enable_cores(eng_grp, obj);
0721 
0722     return ret;
0723 }
0724 
0725 static int disable_eng_grp(struct device *dev,
0726                struct otx2_cpt_eng_grp_info *eng_grp,
0727                void *obj)
0728 {
0729     int i, ret;
0730 
0731     /* Disable all engines used by this group */
0732     ret = cpt_detach_and_disable_cores(eng_grp, obj);
0733     if (ret)
0734         return ret;
0735 
0736     /* Unload ucode used by this engine group */
0737     ucode_unload(dev, &eng_grp->ucode[0]);
0738     ucode_unload(dev, &eng_grp->ucode[1]);
0739 
0740     for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
0741         if (!eng_grp->engs[i].type)
0742             continue;
0743 
0744         eng_grp->engs[i].ucode = &eng_grp->ucode[0];
0745     }
0746 
0747     /* Clear UCODE_BASE register for each engine used by this group */
0748     ret = cpt_set_ucode_base(eng_grp, obj);
0749 
0750     return ret;
0751 }
0752 
0753 static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
0754                     struct otx2_cpt_eng_grp_info *src_grp)
0755 {
0756     /* Setup fields for engine group which is mirrored */
0757     src_grp->mirror.is_ena = false;
0758     src_grp->mirror.idx = 0;
0759     src_grp->mirror.ref_count++;
0760 
0761     /* Setup fields for mirroring engine group */
0762     dst_grp->mirror.is_ena = true;
0763     dst_grp->mirror.idx = src_grp->idx;
0764     dst_grp->mirror.ref_count = 0;
0765 }
0766 
0767 static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
0768 {
0769     struct otx2_cpt_eng_grp_info *src_grp;
0770 
0771     if (!dst_grp->mirror.is_ena)
0772         return;
0773 
0774     src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
0775 
0776     src_grp->mirror.ref_count--;
0777     dst_grp->mirror.is_ena = false;
0778     dst_grp->mirror.idx = 0;
0779     dst_grp->mirror.ref_count = 0;
0780 }
0781 
0782 static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
0783                   struct otx2_cpt_engines *engs, int engs_cnt)
0784 {
0785     struct otx2_cpt_engs_rsvd *mirrored_engs;
0786     int i;
0787 
0788     for (i = 0; i < engs_cnt; i++) {
0789         mirrored_engs = find_engines_by_type(mirror_eng_grp,
0790                              engs[i].type);
0791         if (!mirrored_engs)
0792             continue;
0793 
0794         /*
0795          * If mirrored group has this type of engines attached then
0796          * there are 3 scenarios possible:
0797          * 1) mirrored_engs.count == engs[i].count then all engines
0798          * from mirrored engine group will be shared with this engine
0799          * group
0800          * 2) mirrored_engs.count > engs[i].count then only a subset of
0801          * engines from mirrored engine group will be shared with this
0802          * engine group
0803          * 3) mirrored_engs.count < engs[i].count then all engines
0804          * from mirrored engine group will be shared with this group
0805          * and additional engines will be reserved for exclusively use
0806          * by this engine group
0807          */
0808         engs[i].count -= mirrored_engs->count;
0809     }
0810 }
0811 
0812 static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
0813                     struct otx2_cpt_eng_grp_info *grp)
0814 {
0815     struct otx2_cpt_eng_grps *eng_grps = grp->g;
0816     int i;
0817 
0818     for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
0819         if (!eng_grps->grp[i].is_enabled)
0820             continue;
0821         if (eng_grps->grp[i].ucode[0].type &&
0822             eng_grps->grp[i].ucode[1].type)
0823             continue;
0824         if (grp->idx == i)
0825             continue;
0826         if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
0827                  grp->ucode[0].ver_str,
0828                  OTX2_CPT_UCODE_VER_STR_SZ))
0829             return &eng_grps->grp[i];
0830     }
0831 
0832     return NULL;
0833 }
0834 
0835 static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
0836                     struct otx2_cpt_eng_grps *eng_grps)
0837 {
0838     int i;
0839 
0840     for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
0841         if (!eng_grps->grp[i].is_enabled)
0842             return &eng_grps->grp[i];
0843     }
0844     return NULL;
0845 }
0846 
0847 static int eng_grp_update_masks(struct device *dev,
0848                 struct otx2_cpt_eng_grp_info *eng_grp)
0849 {
0850     struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
0851     struct otx2_cpt_bitmap tmp_bmap = { {0} };
0852     int i, j, cnt, max_cnt;
0853     int bit;
0854 
0855     for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
0856         engs = &eng_grp->engs[i];
0857         if (!engs->type)
0858             continue;
0859         if (engs->count <= 0)
0860             continue;
0861 
0862         switch (engs->type) {
0863         case OTX2_CPT_SE_TYPES:
0864             max_cnt = eng_grp->g->avail.max_se_cnt;
0865             break;
0866 
0867         case OTX2_CPT_IE_TYPES:
0868             max_cnt = eng_grp->g->avail.max_ie_cnt;
0869             break;
0870 
0871         case OTX2_CPT_AE_TYPES:
0872             max_cnt = eng_grp->g->avail.max_ae_cnt;
0873             break;
0874 
0875         default:
0876             dev_err(dev, "Invalid engine type %d\n", engs->type);
0877             return -EINVAL;
0878         }
0879 
0880         cnt = engs->count;
0881         WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
0882         bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
0883         for (j = engs->offset; j < engs->offset + max_cnt; j++) {
0884             if (!eng_grp->g->eng_ref_cnt[j]) {
0885                 bitmap_set(tmp_bmap.bits, j, 1);
0886                 cnt--;
0887                 if (!cnt)
0888                     break;
0889             }
0890         }
0891 
0892         if (cnt)
0893             return -ENOSPC;
0894 
0895         bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
0896     }
0897 
0898     if (!eng_grp->mirror.is_ena)
0899         return 0;
0900 
0901     for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
0902         engs = &eng_grp->engs[i];
0903         if (!engs->type)
0904             continue;
0905 
0906         mirrored_engs = find_engines_by_type(
0907                     &eng_grp->g->grp[eng_grp->mirror.idx],
0908                     engs->type);
0909         WARN_ON(!mirrored_engs && engs->count <= 0);
0910         if (!mirrored_engs)
0911             continue;
0912 
0913         bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
0914                 eng_grp->g->engs_num);
0915         if (engs->count < 0) {
0916             bit = find_first_bit(mirrored_engs->bmap,
0917                          eng_grp->g->engs_num);
0918             bitmap_clear(tmp_bmap.bits, bit, -engs->count);
0919         }
0920         bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
0921               eng_grp->g->engs_num);
0922     }
0923     return 0;
0924 }
0925 
0926 static int delete_engine_group(struct device *dev,
0927                    struct otx2_cpt_eng_grp_info *eng_grp)
0928 {
0929     int ret;
0930 
0931     if (!eng_grp->is_enabled)
0932         return 0;
0933 
0934     if (eng_grp->mirror.ref_count)
0935         return -EINVAL;
0936 
0937     /* Removing engine group mirroring if enabled */
0938     remove_eng_grp_mirroring(eng_grp);
0939 
0940     /* Disable engine group */
0941     ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
0942     if (ret)
0943         return ret;
0944 
0945     /* Release all engines held by this engine group */
0946     ret = release_engines(dev, eng_grp);
0947     if (ret)
0948         return ret;
0949 
0950     eng_grp->is_enabled = false;
0951 
0952     return 0;
0953 }
0954 
0955 static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
0956 {
0957     struct otx2_cpt_ucode *ucode;
0958 
0959     if (eng_grp->mirror.is_ena)
0960         ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
0961     else
0962         ucode = &eng_grp->ucode[0];
0963     WARN_ON(!eng_grp->engs[0].type);
0964     eng_grp->engs[0].ucode = ucode;
0965 
0966     if (eng_grp->engs[1].type) {
0967         if (is_2nd_ucode_used(eng_grp))
0968             eng_grp->engs[1].ucode = &eng_grp->ucode[1];
0969         else
0970             eng_grp->engs[1].ucode = ucode;
0971     }
0972 }
0973 
0974 static int create_engine_group(struct device *dev,
0975                    struct otx2_cpt_eng_grps *eng_grps,
0976                    struct otx2_cpt_engines *engs, int ucodes_cnt,
0977                    void *ucode_data[], int is_print)
0978 {
0979     struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
0980     struct otx2_cpt_eng_grp_info *eng_grp;
0981     struct otx2_cpt_uc_info_t *uc_info;
0982     int i, ret = 0;
0983 
0984     /* Find engine group which is not used */
0985     eng_grp = find_unused_eng_grp(eng_grps);
0986     if (!eng_grp) {
0987         dev_err(dev, "Error all engine groups are being used\n");
0988         return -ENOSPC;
0989     }
0990     /* Load ucode */
0991     for (i = 0; i < ucodes_cnt; i++) {
0992         uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
0993         eng_grp->ucode[i] = uc_info->ucode;
0994         ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
0995                         uc_info->fw->data);
0996         if (ret)
0997             goto unload_ucode;
0998     }
0999 
1000     /* Check if this group mirrors another existing engine group */
1001     mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1002     if (mirrored_eng_grp) {
1003         /* Setup mirroring */
1004         setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1005 
1006         /*
1007          * Update count of requested engines because some
1008          * of them might be shared with mirrored group
1009          */
1010         update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
1011     }
1012     ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
1013     if (ret)
1014         goto unload_ucode;
1015 
1016     /* Update ucode pointers used by engines */
1017     update_ucode_ptrs(eng_grp);
1018 
1019     /* Update engine masks used by this group */
1020     ret = eng_grp_update_masks(dev, eng_grp);
1021     if (ret)
1022         goto release_engs;
1023 
1024     /* Enable engine group */
1025     ret = enable_eng_grp(eng_grp, eng_grps->obj);
1026     if (ret)
1027         goto release_engs;
1028 
1029     /*
1030      * If this engine group mirrors another engine group
1031      * then we need to unload ucode as we will use ucode
1032      * from mirrored engine group
1033      */
1034     if (eng_grp->mirror.is_ena)
1035         ucode_unload(dev, &eng_grp->ucode[0]);
1036 
1037     eng_grp->is_enabled = true;
1038 
1039     if (!is_print)
1040         return 0;
1041 
1042     if (mirrored_eng_grp)
1043         dev_info(dev,
1044              "Engine_group%d: reuse microcode %s from group %d\n",
1045              eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1046              mirrored_eng_grp->idx);
1047     else
1048         dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1049              eng_grp->idx, eng_grp->ucode[0].ver_str);
1050     if (is_2nd_ucode_used(eng_grp))
1051         dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1052              eng_grp->idx, eng_grp->ucode[1].ver_str);
1053 
1054     return 0;
1055 
1056 release_engs:
1057     release_engines(dev, eng_grp);
1058 unload_ucode:
1059     ucode_unload(dev, &eng_grp->ucode[0]);
1060     ucode_unload(dev, &eng_grp->ucode[1]);
1061     return ret;
1062 }
1063 
1064 static void delete_engine_grps(struct pci_dev *pdev,
1065                    struct otx2_cpt_eng_grps *eng_grps)
1066 {
1067     int i;
1068 
1069     /* First delete all mirroring engine groups */
1070     for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1071         if (eng_grps->grp[i].mirror.is_ena)
1072             delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1073 
1074     /* Delete remaining engine groups */
1075     for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1076         delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1077 }
1078 
1079 #define PCI_DEVID_CN10K_RNM 0xA098
1080 #define RNM_ENTROPY_STATUS  0x8
1081 
1082 static void rnm_to_cpt_errata_fixup(struct device *dev)
1083 {
1084     struct pci_dev *pdev;
1085     void __iomem *base;
1086     int timeout = 5000;
1087 
1088     pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
1089     if (!pdev)
1090         return;
1091 
1092     base = pci_ioremap_bar(pdev, 0);
1093     if (!base)
1094         goto put_pdev;
1095 
1096     while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
1097         cpu_relax();
1098         udelay(1);
1099         timeout--;
1100         if (!timeout) {
1101             dev_warn(dev, "RNM is not producing entropy\n");
1102             break;
1103         }
1104     }
1105 
1106     iounmap(base);
1107 
1108 put_pdev:
1109     pci_dev_put(pdev);
1110 }
1111 
1112 int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1113 {
1114 
1115     int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1116     struct otx2_cpt_eng_grp_info *grp;
1117     int i;
1118 
1119     for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1120         grp = &eng_grps->grp[i];
1121         if (!grp->is_enabled)
1122             continue;
1123 
1124         if (eng_type == OTX2_CPT_SE_TYPES) {
1125             if (eng_grp_has_eng_type(grp, eng_type) &&
1126                 !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1127                 eng_grp_num = i;
1128                 break;
1129             }
1130         } else {
1131             if (eng_grp_has_eng_type(grp, eng_type)) {
1132                 eng_grp_num = i;
1133                 break;
1134             }
1135         }
1136     }
1137     return eng_grp_num;
1138 }
1139 
1140 int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
1141                  struct otx2_cpt_eng_grps *eng_grps)
1142 {
1143     struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1144     struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1145     struct pci_dev *pdev = cptpf->pdev;
1146     struct fw_info_t fw_info;
1147     u64 reg_val;
1148     int ret = 0;
1149 
1150     mutex_lock(&eng_grps->lock);
1151     /*
1152      * We don't create engine groups if it was already
1153      * made (when user enabled VFs for the first time)
1154      */
1155     if (eng_grps->is_grps_created)
1156         goto unlock;
1157 
1158     ret = cpt_ucode_load_fw(pdev, &fw_info);
1159     if (ret)
1160         goto unlock;
1161 
1162     /*
1163      * Create engine group with SE engines for kernel
1164      * crypto functionality (symmetric crypto)
1165      */
1166     uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1167     if (uc_info[0] == NULL) {
1168         dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1169         ret = -EINVAL;
1170         goto release_fw;
1171     }
1172     engs[0].type = OTX2_CPT_SE_TYPES;
1173     engs[0].count = eng_grps->avail.max_se_cnt;
1174 
1175     ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1176                   (void **) uc_info, 1);
1177     if (ret)
1178         goto release_fw;
1179 
1180     /*
1181      * Create engine group with SE+IE engines for IPSec.
1182      * All SE engines will be shared with engine group 0.
1183      */
1184     uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1185     uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1186 
1187     if (uc_info[1] == NULL) {
1188         dev_err(&pdev->dev, "Unable to find firmware for IE");
1189         ret = -EINVAL;
1190         goto delete_eng_grp;
1191     }
1192     engs[0].type = OTX2_CPT_SE_TYPES;
1193     engs[0].count = eng_grps->avail.max_se_cnt;
1194     engs[1].type = OTX2_CPT_IE_TYPES;
1195     engs[1].count = eng_grps->avail.max_ie_cnt;
1196 
1197     ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1198                   (void **) uc_info, 1);
1199     if (ret)
1200         goto delete_eng_grp;
1201 
1202     /*
1203      * Create engine group with AE engines for asymmetric
1204      * crypto functionality.
1205      */
1206     uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1207     if (uc_info[0] == NULL) {
1208         dev_err(&pdev->dev, "Unable to find firmware for AE");
1209         ret = -EINVAL;
1210         goto delete_eng_grp;
1211     }
1212     engs[0].type = OTX2_CPT_AE_TYPES;
1213     engs[0].count = eng_grps->avail.max_ae_cnt;
1214 
1215     ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1216                   (void **) uc_info, 1);
1217     if (ret)
1218         goto delete_eng_grp;
1219 
1220     eng_grps->is_grps_created = true;
1221 
1222     cpt_ucode_release_fw(&fw_info);
1223 
1224     if (is_dev_otx2(pdev))
1225         goto unlock;
1226 
1227     /*
1228      * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
1229      * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
1230      */
1231     rnm_to_cpt_errata_fixup(&pdev->dev);
1232 
1233     /*
1234      * Configure engine group mask to allow context prefetching
1235      * for the groups and enable random number request, to enable
1236      * CPT to request random numbers from RNM.
1237      */
1238     otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
1239                   OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
1240                   BLKADDR_CPT0);
1241     /*
1242      * Set interval to periodically flush dirty data for the next
1243      * CTX cache entry. Set the interval count to maximum supported
1244      * value.
1245      */
1246     otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
1247                   CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
1248 
1249     /*
1250      * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
1251      * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
1252      * encounters a fault/poison, a rare case may result in
1253      * unpredictable data being delivered to a CPT engine.
1254      */
1255     otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, &reg_val,
1256                  BLKADDR_CPT0);
1257     otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1258                   reg_val | BIT_ULL(24), BLKADDR_CPT0);
1259 
1260     mutex_unlock(&eng_grps->lock);
1261     return 0;
1262 
1263 delete_eng_grp:
1264     delete_engine_grps(pdev, eng_grps);
1265 release_fw:
1266     cpt_ucode_release_fw(&fw_info);
1267 unlock:
1268     mutex_unlock(&eng_grps->lock);
1269     return ret;
1270 }
1271 
1272 static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
1273                   int blkaddr)
1274 {
1275     int timeout = 10, ret;
1276     int i, busy;
1277     u64 reg;
1278 
1279     /* Disengage the cores from groups */
1280     for (i = 0; i < total_cores; i++) {
1281         ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1282                         CPT_AF_EXEX_CTL2(i), 0x0,
1283                         blkaddr);
1284         if (ret)
1285             return ret;
1286 
1287         cptpf->eng_grps.eng_ref_cnt[i] = 0;
1288     }
1289     ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1290     if (ret)
1291         return ret;
1292 
1293     /* Wait for cores to become idle */
1294     do {
1295         busy = 0;
1296         usleep_range(10000, 20000);
1297         if (timeout-- < 0)
1298             return -EBUSY;
1299 
1300         for (i = 0; i < total_cores; i++) {
1301             ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1302                            cptpf->pdev,
1303                            CPT_AF_EXEX_STS(i), &reg,
1304                            blkaddr);
1305             if (ret)
1306                 return ret;
1307 
1308             if (reg & 0x1) {
1309                 busy = 1;
1310                 break;
1311             }
1312         }
1313     } while (busy);
1314 
1315     /* Disable the cores */
1316     for (i = 0; i < total_cores; i++) {
1317         ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1318                         CPT_AF_EXEX_CTL(i), 0x0,
1319                         blkaddr);
1320         if (ret)
1321             return ret;
1322     }
1323     return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1324 }
1325 
1326 int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1327 {
1328     int total_cores, ret;
1329 
1330     total_cores = cptpf->eng_grps.avail.max_se_cnt +
1331               cptpf->eng_grps.avail.max_ie_cnt +
1332               cptpf->eng_grps.avail.max_ae_cnt;
1333 
1334     if (cptpf->has_cpt1) {
1335         ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
1336         if (ret)
1337             return ret;
1338     }
1339     return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
1340 }
1341 
1342 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1343                    struct otx2_cpt_eng_grps *eng_grps)
1344 {
1345     struct otx2_cpt_eng_grp_info *grp;
1346     int i, j;
1347 
1348     mutex_lock(&eng_grps->lock);
1349     delete_engine_grps(pdev, eng_grps);
1350     /* Release memory */
1351     for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1352         grp = &eng_grps->grp[i];
1353         for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1354             kfree(grp->engs[j].bmap);
1355             grp->engs[j].bmap = NULL;
1356         }
1357     }
1358     mutex_unlock(&eng_grps->lock);
1359 }
1360 
1361 int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1362                struct otx2_cpt_eng_grps *eng_grps)
1363 {
1364     struct otx2_cpt_eng_grp_info *grp;
1365     int i, j, ret;
1366 
1367     mutex_init(&eng_grps->lock);
1368     eng_grps->obj = pci_get_drvdata(pdev);
1369     eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1370     eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1371     eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1372 
1373     eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1374                  eng_grps->avail.max_ie_cnt +
1375                  eng_grps->avail.max_ae_cnt;
1376     if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1377         dev_err(&pdev->dev,
1378             "Number of engines %d > than max supported %d\n",
1379             eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1380         ret = -EINVAL;
1381         goto cleanup_eng_grps;
1382     }
1383 
1384     for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1385         grp = &eng_grps->grp[i];
1386         grp->g = eng_grps;
1387         grp->idx = i;
1388 
1389         for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1390             grp->engs[j].bmap =
1391                 kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1392                     sizeof(long), GFP_KERNEL);
1393             if (!grp->engs[j].bmap) {
1394                 ret = -ENOMEM;
1395                 goto cleanup_eng_grps;
1396             }
1397         }
1398     }
1399     return 0;
1400 
1401 cleanup_eng_grps:
1402     otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1403     return ret;
1404 }
1405 
1406 static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1407                       struct otx2_cpt_eng_grps *eng_grps)
1408 {
1409     struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1410     struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1411     struct fw_info_t fw_info;
1412     int ret;
1413 
1414     mutex_lock(&eng_grps->lock);
1415     ret = cpt_ucode_load_fw(pdev, &fw_info);
1416     if (ret) {
1417         mutex_unlock(&eng_grps->lock);
1418         return ret;
1419     }
1420 
1421     uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1422     if (uc_info[0] == NULL) {
1423         dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1424         ret = -EINVAL;
1425         goto release_fw;
1426     }
1427     engs[0].type = OTX2_CPT_AE_TYPES;
1428     engs[0].count = 2;
1429 
1430     ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1431                   (void **) uc_info, 0);
1432     if (ret)
1433         goto release_fw;
1434 
1435     uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1436     if (uc_info[0] == NULL) {
1437         dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1438         ret = -EINVAL;
1439         goto delete_eng_grp;
1440     }
1441     engs[0].type = OTX2_CPT_SE_TYPES;
1442     engs[0].count = 2;
1443 
1444     ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1445                   (void **) uc_info, 0);
1446     if (ret)
1447         goto delete_eng_grp;
1448 
1449     uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1450     if (uc_info[0] == NULL) {
1451         dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1452         ret = -EINVAL;
1453         goto delete_eng_grp;
1454     }
1455     engs[0].type = OTX2_CPT_IE_TYPES;
1456     engs[0].count = 2;
1457 
1458     ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1459                   (void **) uc_info, 0);
1460     if (ret)
1461         goto delete_eng_grp;
1462 
1463     cpt_ucode_release_fw(&fw_info);
1464     mutex_unlock(&eng_grps->lock);
1465     return 0;
1466 
1467 delete_eng_grp:
1468     delete_engine_grps(pdev, eng_grps);
1469 release_fw:
1470     cpt_ucode_release_fw(&fw_info);
1471     mutex_unlock(&eng_grps->lock);
1472     return ret;
1473 }
1474 
1475 /*
1476  * Get CPT HW capabilities using LOAD_FVC operation.
1477  */
1478 int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1479 {
1480     struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1481     struct otx2_cpt_iq_command iq_cmd;
1482     union otx2_cpt_opcode opcode;
1483     union otx2_cpt_res_s *result;
1484     union otx2_cpt_inst_s inst;
1485     dma_addr_t rptr_baddr;
1486     struct pci_dev *pdev;
1487     u32 len, compl_rlen;
1488     int ret, etype;
1489     void *rptr;
1490 
1491     /*
1492      * We don't get capabilities if it was already done
1493      * (when user enabled VFs for the first time)
1494      */
1495     if (cptpf->is_eng_caps_discovered)
1496         return 0;
1497 
1498     pdev = cptpf->pdev;
1499     /*
1500      * Create engine groups for each type to submit LOAD_FVC op and
1501      * get engine's capabilities.
1502      */
1503     ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1504     if (ret)
1505         goto delete_grps;
1506 
1507     lfs->pdev = pdev;
1508     lfs->reg_base = cptpf->reg_base;
1509     lfs->mbox = &cptpf->afpf_mbox;
1510     lfs->blkaddr = BLKADDR_CPT0;
1511     ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1512                   OTX2_CPT_QUEUE_HI_PRIO, 1);
1513     if (ret)
1514         goto delete_grps;
1515 
1516     compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
1517     len = compl_rlen + LOADFVC_RLEN;
1518 
1519     result = kzalloc(len, GFP_KERNEL);
1520     if (!result) {
1521         ret = -ENOMEM;
1522         goto lf_cleanup;
1523     }
1524     rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
1525                     DMA_BIDIRECTIONAL);
1526     if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1527         dev_err(&pdev->dev, "DMA mapping failed\n");
1528         ret = -EFAULT;
1529         goto free_result;
1530     }
1531     rptr = (u8 *)result + compl_rlen;
1532 
1533     /* Fill in the command */
1534     opcode.s.major = LOADFVC_MAJOR_OP;
1535     opcode.s.minor = LOADFVC_MINOR_OP;
1536 
1537     iq_cmd.cmd.u = 0;
1538     iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1539 
1540     /* 64-bit swap for microcode data reads, not needed for addresses */
1541     cpu_to_be64s(&iq_cmd.cmd.u);
1542     iq_cmd.dptr = 0;
1543     iq_cmd.rptr = rptr_baddr + compl_rlen;
1544     iq_cmd.cptr.u = 0;
1545 
1546     for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1547         result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1548         iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1549                              etype);
1550         otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
1551         lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1552 
1553         while (lfs->ops->cpt_get_compcode(result) ==
1554                         OTX2_CPT_COMPLETION_CODE_INIT)
1555             cpu_relax();
1556 
1557         cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1558     }
1559     dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1560     cptpf->is_eng_caps_discovered = true;
1561 
1562 free_result:
1563     kfree(result);
1564 lf_cleanup:
1565     otx2_cptlf_shutdown(&cptpf->lfs);
1566 delete_grps:
1567     delete_engine_grps(pdev, &cptpf->eng_grps);
1568 
1569     return ret;
1570 }
1571 
1572 int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
1573                    struct devlink_param_gset_ctx *ctx)
1574 {
1575     struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
1576     struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
1577     struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1578     char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
1579     char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
1580     struct device *dev = &cptpf->pdev->dev;
1581     char *start, *val, *err_msg, *tmp;
1582     int grp_idx = 0, ret = -EINVAL;
1583     bool has_se, has_ie, has_ae;
1584     struct fw_info_t fw_info;
1585     int ucode_idx = 0;
1586 
1587     if (!eng_grps->is_grps_created) {
1588         dev_err(dev, "Not allowed before creating the default groups\n");
1589         return -EINVAL;
1590     }
1591     err_msg = "Invalid engine group format";
1592     strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
1593     start = tmp_buf;
1594 
1595     has_se = has_ie = has_ae = false;
1596 
1597     for (;;) {
1598         val = strsep(&start, ";");
1599         if (!val)
1600             break;
1601         val = strim(val);
1602         if (!*val)
1603             continue;
1604 
1605         if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1606             if (has_se || ucode_idx)
1607                 goto err_print;
1608             tmp = strsep(&val, ":");
1609             if (!tmp)
1610                 goto err_print;
1611             tmp = strim(tmp);
1612             if (!val)
1613                 goto err_print;
1614             if (strlen(tmp) != 2)
1615                 goto err_print;
1616             if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1617                 goto err_print;
1618             engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
1619             has_se = true;
1620         } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1621             if (has_ae || ucode_idx)
1622                 goto err_print;
1623             tmp = strsep(&val, ":");
1624             if (!tmp)
1625                 goto err_print;
1626             tmp = strim(tmp);
1627             if (!val)
1628                 goto err_print;
1629             if (strlen(tmp) != 2)
1630                 goto err_print;
1631             if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1632                 goto err_print;
1633             engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
1634             has_ae = true;
1635         } else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
1636             if (has_ie || ucode_idx)
1637                 goto err_print;
1638             tmp = strsep(&val, ":");
1639             if (!tmp)
1640                 goto err_print;
1641             tmp = strim(tmp);
1642             if (!val)
1643                 goto err_print;
1644             if (strlen(tmp) != 2)
1645                 goto err_print;
1646             if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1647                 goto err_print;
1648             engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
1649             has_ie = true;
1650         } else {
1651             if (ucode_idx > 1)
1652                 goto err_print;
1653             if (!strlen(val))
1654                 goto err_print;
1655             if (strnstr(val, " ", strlen(val)))
1656                 goto err_print;
1657             ucode_filename[ucode_idx++] = val;
1658         }
1659     }
1660 
1661     /* Validate input parameters */
1662     if (!(grp_idx && ucode_idx))
1663         goto err_print;
1664 
1665     if (ucode_idx > 1 && grp_idx < 2)
1666         goto err_print;
1667 
1668     if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
1669         err_msg = "Error max 2 engine types can be attached";
1670         goto err_print;
1671     }
1672 
1673     if (grp_idx > 1) {
1674         if ((engs[0].type + engs[1].type) !=
1675             (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
1676             err_msg = "Only combination of SE+IE engines is allowed";
1677             goto err_print;
1678         }
1679         /* Keep SE engines at zero index */
1680         if (engs[1].type == OTX2_CPT_SE_TYPES)
1681             swap(engs[0], engs[1]);
1682     }
1683     mutex_lock(&eng_grps->lock);
1684 
1685     if (cptpf->enabled_vfs) {
1686         dev_err(dev, "Disable VFs before modifying engine groups\n");
1687         ret = -EACCES;
1688         goto err_unlock;
1689     }
1690     INIT_LIST_HEAD(&fw_info.ucodes);
1691     ret = load_fw(dev, &fw_info, ucode_filename[0]);
1692     if (ret) {
1693         dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
1694         goto err_unlock;
1695     }
1696     if (ucode_idx > 1) {
1697         ret = load_fw(dev, &fw_info, ucode_filename[1]);
1698         if (ret) {
1699             dev_err(dev, "Unable to load firmware %s\n",
1700                 ucode_filename[1]);
1701             goto release_fw;
1702         }
1703     }
1704     uc_info[0] = get_ucode(&fw_info, engs[0].type);
1705     if (uc_info[0] == NULL) {
1706         dev_err(dev, "Unable to find firmware for %s\n",
1707             get_eng_type_str(engs[0].type));
1708         ret = -EINVAL;
1709         goto release_fw;
1710     }
1711     if (ucode_idx > 1) {
1712         uc_info[1] = get_ucode(&fw_info, engs[1].type);
1713         if (uc_info[1] == NULL) {
1714             dev_err(dev, "Unable to find firmware for %s\n",
1715                 get_eng_type_str(engs[1].type));
1716             ret = -EINVAL;
1717             goto release_fw;
1718         }
1719     }
1720     ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1721                   (void **)uc_info, 1);
1722 
1723 release_fw:
1724     cpt_ucode_release_fw(&fw_info);
1725 err_unlock:
1726     mutex_unlock(&eng_grps->lock);
1727     return ret;
1728 err_print:
1729     dev_err(dev, "%s\n", err_msg);
1730     return ret;
1731 }
1732 
1733 int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
1734                    struct devlink_param_gset_ctx *ctx)
1735 {
1736     struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1737     struct device *dev = &cptpf->pdev->dev;
1738     char *tmp, *err_msg;
1739     int egrp;
1740     int ret;
1741 
1742     err_msg = "Invalid input string format(ex: egrp:0)";
1743     if (strncasecmp(ctx->val.vstr, "egrp", 4))
1744         goto err_print;
1745     tmp = ctx->val.vstr;
1746     strsep(&tmp, ":");
1747     if (!tmp)
1748         goto err_print;
1749     if (kstrtoint(tmp, 10, &egrp))
1750         goto err_print;
1751 
1752     if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
1753         dev_err(dev, "Invalid engine group %d", egrp);
1754         return -EINVAL;
1755     }
1756     if (!eng_grps->grp[egrp].is_enabled) {
1757         dev_err(dev, "Error engine_group%d is not configured", egrp);
1758         return -EINVAL;
1759     }
1760     mutex_lock(&eng_grps->lock);
1761     ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
1762     mutex_unlock(&eng_grps->lock);
1763 
1764     return ret;
1765 
1766 err_print:
1767     dev_err(dev, "%s\n", err_msg);
1768     return -EINVAL;
1769 }
1770 
1771 static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf,
1772               int size, int idx)
1773 {
1774     struct otx2_cpt_engs_rsvd *mirrored_engs = NULL;
1775     struct otx2_cpt_engs_rsvd *engs;
1776     int len, i;
1777 
1778     buf[0] = '\0';
1779     for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
1780         engs = &eng_grp->engs[i];
1781         if (!engs->type)
1782             continue;
1783         if (idx != -1 && idx != i)
1784             continue;
1785 
1786         if (eng_grp->mirror.is_ena)
1787             mirrored_engs = find_engines_by_type(
1788                 &eng_grp->g->grp[eng_grp->mirror.idx],
1789                 engs->type);
1790         if (i > 0 && idx == -1) {
1791             len = strlen(buf);
1792             scnprintf(buf + len, size - len, ", ");
1793         }
1794 
1795         len = strlen(buf);
1796         scnprintf(buf + len, size - len, "%d %s ",
1797               mirrored_engs ? engs->count + mirrored_engs->count :
1798                       engs->count,
1799               get_eng_type_str(engs->type));
1800         if (mirrored_engs) {
1801             len = strlen(buf);
1802             scnprintf(buf + len, size - len,
1803                   "(%d shared with engine_group%d) ",
1804                   engs->count <= 0 ?
1805                       engs->count + mirrored_engs->count :
1806                       mirrored_engs->count,
1807                   eng_grp->mirror.idx);
1808         }
1809     }
1810 }
1811 
1812 void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
1813 {
1814     struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1815     struct otx2_cpt_eng_grp_info *mirrored_grp;
1816     char engs_info[2 * OTX2_CPT_NAME_LENGTH];
1817     struct otx2_cpt_eng_grp_info *grp;
1818     struct otx2_cpt_engs_rsvd *engs;
1819     int i, j;
1820 
1821     pr_debug("Engine groups global info");
1822     pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt,
1823          eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt);
1824     pr_debug("free SE %d", eng_grps->avail.se_cnt);
1825     pr_debug("free IE %d", eng_grps->avail.ie_cnt);
1826     pr_debug("free AE %d", eng_grps->avail.ae_cnt);
1827 
1828     for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1829         grp = &eng_grps->grp[i];
1830         pr_debug("engine_group%d, state %s", i,
1831              grp->is_enabled ? "enabled" : "disabled");
1832         if (grp->is_enabled) {
1833             mirrored_grp = &eng_grps->grp[grp->mirror.idx];
1834             pr_debug("Ucode0 filename %s, version %s",
1835                  grp->mirror.is_ena ?
1836                      mirrored_grp->ucode[0].filename :
1837                      grp->ucode[0].filename,
1838                  grp->mirror.is_ena ?
1839                      mirrored_grp->ucode[0].ver_str :
1840                      grp->ucode[0].ver_str);
1841             if (is_2nd_ucode_used(grp))
1842                 pr_debug("Ucode1 filename %s, version %s",
1843                      grp->ucode[1].filename,
1844                      grp->ucode[1].ver_str);
1845         }
1846 
1847         for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1848             engs = &grp->engs[j];
1849             if (engs->type) {
1850                 u32 mask[5] = { };
1851 
1852                 get_engs_info(grp, engs_info,
1853                           2 * OTX2_CPT_NAME_LENGTH, j);
1854                 pr_debug("Slot%d: %s", j, engs_info);
1855                 bitmap_to_arr32(mask, engs->bmap,
1856                         eng_grps->engs_num);
1857                 if (is_dev_otx2(cptpf->pdev))
1858                     pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x",
1859                          mask[3], mask[2], mask[1],
1860                          mask[0]);
1861                 else
1862                     pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x",
1863                          mask[4], mask[3], mask[2], mask[1],
1864                          mask[0]);
1865             }
1866         }
1867     }
1868 }