0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/ctype.h>
0012 #include <linux/firmware.h>
0013 #include "otx_cpt_common.h"
0014 #include "otx_cptpf_ucode.h"
0015 #include "otx_cptpf.h"
0016
0017 #define CSR_DELAY 30
0018
0019 #define TAR_MAGIC "ustar"
0020 #define TAR_MAGIC_LEN 6
0021 #define TAR_BLOCK_LEN 512
0022 #define REGTYPE '0'
0023 #define AREGTYPE '\0'
0024
0025
0026 struct tar_hdr_t {
0027 char name[100];
0028 char mode[8];
0029 char uid[8];
0030 char gid[8];
0031 char size[12];
0032 char mtime[12];
0033 char chksum[8];
0034 char typeflag;
0035 char linkname[100];
0036 char magic[6];
0037 char version[2];
0038 char uname[32];
0039 char gname[32];
0040 char devmajor[8];
0041 char devminor[8];
0042 char prefix[155];
0043 };
0044
0045 struct tar_blk_t {
0046 union {
0047 struct tar_hdr_t hdr;
0048 char block[TAR_BLOCK_LEN];
0049 };
0050 };
0051
0052 struct tar_arch_info_t {
0053 struct list_head ucodes;
0054 const struct firmware *fw;
0055 };
0056
0057 static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
0058 struct otx_cpt_eng_grp_info *eng_grp)
0059 {
0060 struct otx_cpt_bitmap bmap = { {0} };
0061 bool found = false;
0062 int i;
0063
0064 if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
0065 dev_err(dev, "unsupported number of engines %d on octeontx\n",
0066 eng_grp->g->engs_num);
0067 return bmap;
0068 }
0069
0070 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
0071 if (eng_grp->engs[i].type) {
0072 bitmap_or(bmap.bits, bmap.bits,
0073 eng_grp->engs[i].bmap,
0074 eng_grp->g->engs_num);
0075 bmap.size = eng_grp->g->engs_num;
0076 found = true;
0077 }
0078 }
0079
0080 if (!found)
0081 dev_err(dev, "No engines reserved for engine group %d\n",
0082 eng_grp->idx);
0083 return bmap;
0084 }
0085
0086 static int is_eng_type(int val, int eng_type)
0087 {
0088 return val & (1 << eng_type);
0089 }
0090
0091 static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
0092 int eng_type)
0093 {
0094 return is_eng_type(eng_grps->eng_types_supported, eng_type);
0095 }
0096
0097 static void set_ucode_filename(struct otx_cpt_ucode *ucode,
0098 const char *filename)
0099 {
0100 strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
0101 }
0102
0103 static char *get_eng_type_str(int eng_type)
0104 {
0105 char *str = "unknown";
0106
0107 switch (eng_type) {
0108 case OTX_CPT_SE_TYPES:
0109 str = "SE";
0110 break;
0111
0112 case OTX_CPT_AE_TYPES:
0113 str = "AE";
0114 break;
0115 }
0116 return str;
0117 }
0118
0119 static char *get_ucode_type_str(int ucode_type)
0120 {
0121 char *str = "unknown";
0122
0123 switch (ucode_type) {
0124 case (1 << OTX_CPT_SE_TYPES):
0125 str = "SE";
0126 break;
0127
0128 case (1 << OTX_CPT_AE_TYPES):
0129 str = "AE";
0130 break;
0131 }
0132 return str;
0133 }
0134
0135 static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
0136 {
0137 char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ];
0138 u32 i, val = 0;
0139 u8 nn;
0140
0141 strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
0142 for (i = 0; i < strlen(tmp_ver_str); i++)
0143 tmp_ver_str[i] = tolower(tmp_ver_str[i]);
0144
0145 nn = ucode_hdr->ver_num.nn;
0146 if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) &&
0147 (nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 ||
0148 nn == OTX_CPT_SE_UC_TYPE3))
0149 val |= 1 << OTX_CPT_SE_TYPES;
0150 if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) &&
0151 nn == OTX_CPT_AE_UC_TYPE)
0152 val |= 1 << OTX_CPT_AE_TYPES;
0153
0154 *ucode_type = val;
0155
0156 if (!val)
0157 return -EINVAL;
0158 if (is_eng_type(val, OTX_CPT_AE_TYPES) &&
0159 is_eng_type(val, OTX_CPT_SE_TYPES))
0160 return -EINVAL;
0161 return 0;
0162 }
0163
0164 static int is_mem_zero(const char *ptr, int size)
0165 {
0166 int i;
0167
0168 for (i = 0; i < size; i++) {
0169 if (ptr[i])
0170 return 0;
0171 }
0172 return 1;
0173 }
0174
0175 static int cpt_set_ucode_base(struct otx_cpt_eng_grp_info *eng_grp, void *obj)
0176 {
0177 struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
0178 dma_addr_t dma_addr;
0179 struct otx_cpt_bitmap bmap;
0180 int i;
0181
0182 bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
0183 if (!bmap.size)
0184 return -EINVAL;
0185
0186 if (eng_grp->mirror.is_ena)
0187 dma_addr =
0188 eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma;
0189 else
0190 dma_addr = eng_grp->ucode[0].align_dma;
0191
0192
0193
0194
0195
0196 for_each_set_bit(i, bmap.bits, bmap.size)
0197 if (!eng_grp->g->eng_ref_cnt[i])
0198 writeq((u64) dma_addr, cpt->reg_base +
0199 OTX_CPT_PF_ENGX_UCODE_BASE(i));
0200 return 0;
0201 }
0202
0203 static int cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info *eng_grp,
0204 void *obj)
0205 {
0206 struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
0207 struct otx_cpt_bitmap bmap = { {0} };
0208 int timeout = 10;
0209 int i, busy;
0210 u64 reg;
0211
0212 bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
0213 if (!bmap.size)
0214 return -EINVAL;
0215
0216
0217 reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
0218 for_each_set_bit(i, bmap.bits, bmap.size) {
0219 if (reg & (1ull << i)) {
0220 eng_grp->g->eng_ref_cnt[i]--;
0221 reg &= ~(1ull << i);
0222 }
0223 }
0224 writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
0225
0226
0227 do {
0228 busy = 0;
0229 usleep_range(10000, 20000);
0230 if (timeout-- < 0)
0231 return -EBUSY;
0232
0233 reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
0234 for_each_set_bit(i, bmap.bits, bmap.size)
0235 if (reg & (1ull << i)) {
0236 busy = 1;
0237 break;
0238 }
0239 } while (busy);
0240
0241
0242 reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
0243 for_each_set_bit(i, bmap.bits, bmap.size)
0244 if (!eng_grp->g->eng_ref_cnt[i])
0245 reg &= ~(1ull << i);
0246 writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
0247
0248 return 0;
0249 }
0250
0251 static int cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info *eng_grp,
0252 void *obj)
0253 {
0254 struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
0255 struct otx_cpt_bitmap bmap;
0256 u64 reg;
0257 int i;
0258
0259 bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
0260 if (!bmap.size)
0261 return -EINVAL;
0262
0263
0264 reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
0265 for_each_set_bit(i, bmap.bits, bmap.size) {
0266 if (!(reg & (1ull << i))) {
0267 eng_grp->g->eng_ref_cnt[i]++;
0268 reg |= 1ull << i;
0269 }
0270 }
0271 writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
0272
0273
0274 reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
0275 for_each_set_bit(i, bmap.bits, bmap.size)
0276 reg |= 1ull << i;
0277 writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
0278
0279 return 0;
0280 }
0281
0282 static int process_tar_file(struct device *dev,
0283 struct tar_arch_info_t *tar_arch, char *filename,
0284 const u8 *data, u32 size)
0285 {
0286 struct tar_ucode_info_t *tar_info;
0287 struct otx_cpt_ucode_hdr *ucode_hdr;
0288 int ucode_type, ucode_size;
0289
0290
0291
0292
0293
0294
0295 if (size < sizeof(struct otx_cpt_ucode_hdr))
0296 return 0;
0297
0298 ucode_hdr = (struct otx_cpt_ucode_hdr *) data;
0299
0300
0301
0302
0303 if (get_ucode_type(ucode_hdr, &ucode_type))
0304 return 0;
0305
0306 ucode_size = ntohl(ucode_hdr->code_length) * 2;
0307 if (!ucode_size || (size < round_up(ucode_size, 16) +
0308 sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
0309 dev_err(dev, "Ucode %s invalid size\n", filename);
0310 return -EINVAL;
0311 }
0312
0313 tar_info = kzalloc(sizeof(struct tar_ucode_info_t), GFP_KERNEL);
0314 if (!tar_info)
0315 return -ENOMEM;
0316
0317 tar_info->ucode_ptr = data;
0318 set_ucode_filename(&tar_info->ucode, filename);
0319 memcpy(tar_info->ucode.ver_str, ucode_hdr->ver_str,
0320 OTX_CPT_UCODE_VER_STR_SZ);
0321 tar_info->ucode.ver_num = ucode_hdr->ver_num;
0322 tar_info->ucode.type = ucode_type;
0323 tar_info->ucode.size = ucode_size;
0324 list_add_tail(&tar_info->list, &tar_arch->ucodes);
0325
0326 return 0;
0327 }
0328
0329 static void release_tar_archive(struct tar_arch_info_t *tar_arch)
0330 {
0331 struct tar_ucode_info_t *curr, *temp;
0332
0333 if (!tar_arch)
0334 return;
0335
0336 list_for_each_entry_safe(curr, temp, &tar_arch->ucodes, list) {
0337 list_del(&curr->list);
0338 kfree(curr);
0339 }
0340
0341 if (tar_arch->fw)
0342 release_firmware(tar_arch->fw);
0343 kfree(tar_arch);
0344 }
0345
0346 static struct tar_ucode_info_t *get_uc_from_tar_archive(
0347 struct tar_arch_info_t *tar_arch,
0348 int ucode_type)
0349 {
0350 struct tar_ucode_info_t *curr, *uc_found = NULL;
0351
0352 list_for_each_entry(curr, &tar_arch->ucodes, list) {
0353 if (!is_eng_type(curr->ucode.type, ucode_type))
0354 continue;
0355
0356 if (!uc_found) {
0357 uc_found = curr;
0358 continue;
0359 }
0360
0361 switch (ucode_type) {
0362 case OTX_CPT_AE_TYPES:
0363 break;
0364
0365 case OTX_CPT_SE_TYPES:
0366 if (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE2 ||
0367 (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE3
0368 && curr->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE1))
0369 uc_found = curr;
0370 break;
0371 }
0372 }
0373
0374 return uc_found;
0375 }
0376
0377 static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
0378 char *tar_filename)
0379 {
0380 struct tar_ucode_info_t *curr;
0381
0382 pr_debug("Tar archive filename %s\n", tar_filename);
0383 pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
0384 tar_arch->fw->size);
0385 list_for_each_entry(curr, &tar_arch->ucodes, list) {
0386 pr_debug("Ucode filename %s\n", curr->ucode.filename);
0387 pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
0388 pr_debug("Ucode version %d.%d.%d.%d\n",
0389 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
0390 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
0391 pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
0392 get_ucode_type_str(curr->ucode.type));
0393 pr_debug("Ucode size %d\n", curr->ucode.size);
0394 pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
0395 }
0396 }
0397
0398 static struct tar_arch_info_t *load_tar_archive(struct device *dev,
0399 char *tar_filename)
0400 {
0401 struct tar_arch_info_t *tar_arch = NULL;
0402 struct tar_blk_t *tar_blk;
0403 unsigned int cur_size;
0404 size_t tar_offs = 0;
0405 size_t tar_size;
0406 int ret;
0407
0408 tar_arch = kzalloc(sizeof(struct tar_arch_info_t), GFP_KERNEL);
0409 if (!tar_arch)
0410 return NULL;
0411
0412 INIT_LIST_HEAD(&tar_arch->ucodes);
0413
0414
0415 ret = request_firmware(&tar_arch->fw, tar_filename, dev);
0416 if (ret)
0417 goto release_tar_arch;
0418
0419 if (tar_arch->fw->size < TAR_BLOCK_LEN) {
0420 dev_err(dev, "Invalid tar archive %s\n", tar_filename);
0421 goto release_tar_arch;
0422 }
0423
0424 tar_size = tar_arch->fw->size;
0425 tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
0426 if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
0427 dev_err(dev, "Unsupported format of tar archive %s\n",
0428 tar_filename);
0429 goto release_tar_arch;
0430 }
0431
0432 while (1) {
0433
0434 ret = kstrtouint(tar_blk->hdr.size, 8, &cur_size);
0435 if (ret)
0436 goto release_tar_arch;
0437
0438 if (tar_offs + cur_size > tar_size ||
0439 tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
0440 dev_err(dev, "Invalid tar archive %s\n", tar_filename);
0441 goto release_tar_arch;
0442 }
0443
0444 tar_offs += TAR_BLOCK_LEN;
0445 if (tar_blk->hdr.typeflag == REGTYPE ||
0446 tar_blk->hdr.typeflag == AREGTYPE) {
0447 ret = process_tar_file(dev, tar_arch,
0448 tar_blk->hdr.name,
0449 &tar_arch->fw->data[tar_offs],
0450 cur_size);
0451 if (ret)
0452 goto release_tar_arch;
0453 }
0454
0455 tar_offs += (cur_size/TAR_BLOCK_LEN) * TAR_BLOCK_LEN;
0456 if (cur_size % TAR_BLOCK_LEN)
0457 tar_offs += TAR_BLOCK_LEN;
0458
0459
0460 if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
0461 dev_err(dev, "Invalid tar archive %s\n", tar_filename);
0462 goto release_tar_arch;
0463 }
0464
0465 if (is_mem_zero(&tar_arch->fw->data[tar_offs],
0466 2*TAR_BLOCK_LEN))
0467 break;
0468
0469
0470 tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs];
0471 }
0472
0473 print_tar_dbg_info(tar_arch, tar_filename);
0474 return tar_arch;
0475 release_tar_arch:
0476 release_tar_archive(tar_arch);
0477 return NULL;
0478 }
0479
0480 static struct otx_cpt_engs_rsvd *find_engines_by_type(
0481 struct otx_cpt_eng_grp_info *eng_grp,
0482 int eng_type)
0483 {
0484 int i;
0485
0486 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
0487 if (!eng_grp->engs[i].type)
0488 continue;
0489
0490 if (eng_grp->engs[i].type == eng_type)
0491 return &eng_grp->engs[i];
0492 }
0493 return NULL;
0494 }
0495
0496 int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
0497 {
0498 return is_eng_type(ucode->type, eng_type);
0499 }
0500 EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
0501
0502 int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
0503 int eng_type)
0504 {
0505 struct otx_cpt_engs_rsvd *engs;
0506
0507 engs = find_engines_by_type(eng_grp, eng_type);
0508
0509 return (engs != NULL ? 1 : 0);
0510 }
0511 EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type);
0512
0513 static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
0514 char *buf, int size)
0515 {
0516 if (eng_grp->mirror.is_ena) {
0517 scnprintf(buf, size, "%s (shared with engine_group%d)",
0518 eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
0519 eng_grp->mirror.idx);
0520 } else {
0521 scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
0522 }
0523 }
0524
0525 static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
0526 char *buf, int size, int idx)
0527 {
0528 struct otx_cpt_engs_rsvd *mirrored_engs = NULL;
0529 struct otx_cpt_engs_rsvd *engs;
0530 int len, i;
0531
0532 buf[0] = '\0';
0533 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
0534 engs = &eng_grp->engs[i];
0535 if (!engs->type)
0536 continue;
0537 if (idx != -1 && idx != i)
0538 continue;
0539
0540 if (eng_grp->mirror.is_ena)
0541 mirrored_engs = find_engines_by_type(
0542 &eng_grp->g->grp[eng_grp->mirror.idx],
0543 engs->type);
0544 if (i > 0 && idx == -1) {
0545 len = strlen(buf);
0546 scnprintf(buf+len, size-len, ", ");
0547 }
0548
0549 len = strlen(buf);
0550 scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ?
0551 engs->count + mirrored_engs->count : engs->count,
0552 get_eng_type_str(engs->type));
0553 if (mirrored_engs) {
0554 len = strlen(buf);
0555 scnprintf(buf+len, size-len,
0556 "(%d shared with engine_group%d) ",
0557 engs->count <= 0 ? engs->count +
0558 mirrored_engs->count : mirrored_engs->count,
0559 eng_grp->mirror.idx);
0560 }
0561 }
0562 }
0563
0564 static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
0565 {
0566 pr_debug("Ucode info\n");
0567 pr_debug("Ucode version string %s\n", ucode->ver_str);
0568 pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
0569 ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
0570 pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
0571 pr_debug("Ucode size %d\n", ucode->size);
0572 pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
0573 pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
0574 }
0575
0576 static void cpt_print_engines_mask(struct otx_cpt_eng_grp_info *eng_grp,
0577 struct device *dev, char *buf, int size)
0578 {
0579 struct otx_cpt_bitmap bmap;
0580 u32 mask[2];
0581
0582 bmap = get_cores_bmap(dev, eng_grp);
0583 if (!bmap.size) {
0584 scnprintf(buf, size, "unknown");
0585 return;
0586 }
0587 bitmap_to_arr32(mask, bmap.bits, bmap.size);
0588 scnprintf(buf, size, "%8.8x %8.8x", mask[1], mask[0]);
0589 }
0590
0591
0592 static void print_dbg_info(struct device *dev,
0593 struct otx_cpt_eng_grps *eng_grps)
0594 {
0595 char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
0596 struct otx_cpt_eng_grp_info *mirrored_grp;
0597 char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
0598 struct otx_cpt_eng_grp_info *grp;
0599 struct otx_cpt_engs_rsvd *engs;
0600 u32 mask[4];
0601 int i, j;
0602
0603 pr_debug("Engine groups global info\n");
0604 pr_debug("max SE %d, max AE %d\n",
0605 eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
0606 pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
0607 pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
0608
0609 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
0610 grp = &eng_grps->grp[i];
0611 pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
0612 "enabled" : "disabled");
0613 if (grp->is_enabled) {
0614 mirrored_grp = &eng_grps->grp[grp->mirror.idx];
0615 pr_debug("Ucode0 filename %s, version %s\n",
0616 grp->mirror.is_ena ?
0617 mirrored_grp->ucode[0].filename :
0618 grp->ucode[0].filename,
0619 grp->mirror.is_ena ?
0620 mirrored_grp->ucode[0].ver_str :
0621 grp->ucode[0].ver_str);
0622 }
0623
0624 for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
0625 engs = &grp->engs[j];
0626 if (engs->type) {
0627 print_engs_info(grp, engs_info,
0628 2*OTX_CPT_UCODE_NAME_LENGTH, j);
0629 pr_debug("Slot%d: %s\n", j, engs_info);
0630 bitmap_to_arr32(mask, engs->bmap,
0631 eng_grps->engs_num);
0632 pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
0633 mask[3], mask[2], mask[1], mask[0]);
0634 } else
0635 pr_debug("Slot%d not used\n", j);
0636 }
0637 if (grp->is_enabled) {
0638 cpt_print_engines_mask(grp, dev, engs_mask,
0639 OTX_CPT_UCODE_NAME_LENGTH);
0640 pr_debug("Cmask: %s\n", engs_mask);
0641 }
0642 }
0643 }
0644
0645 static int update_engines_avail_count(struct device *dev,
0646 struct otx_cpt_engs_available *avail,
0647 struct otx_cpt_engs_rsvd *engs, int val)
0648 {
0649 switch (engs->type) {
0650 case OTX_CPT_SE_TYPES:
0651 avail->se_cnt += val;
0652 break;
0653
0654 case OTX_CPT_AE_TYPES:
0655 avail->ae_cnt += val;
0656 break;
0657
0658 default:
0659 dev_err(dev, "Invalid engine type %d\n", engs->type);
0660 return -EINVAL;
0661 }
0662
0663 return 0;
0664 }
0665
0666 static int update_engines_offset(struct device *dev,
0667 struct otx_cpt_engs_available *avail,
0668 struct otx_cpt_engs_rsvd *engs)
0669 {
0670 switch (engs->type) {
0671 case OTX_CPT_SE_TYPES:
0672 engs->offset = 0;
0673 break;
0674
0675 case OTX_CPT_AE_TYPES:
0676 engs->offset = avail->max_se_cnt;
0677 break;
0678
0679 default:
0680 dev_err(dev, "Invalid engine type %d\n", engs->type);
0681 return -EINVAL;
0682 }
0683
0684 return 0;
0685 }
0686
0687 static int release_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp)
0688 {
0689 int i, ret = 0;
0690
0691 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
0692 if (!grp->engs[i].type)
0693 continue;
0694
0695 if (grp->engs[i].count > 0) {
0696 ret = update_engines_avail_count(dev, &grp->g->avail,
0697 &grp->engs[i],
0698 grp->engs[i].count);
0699 if (ret)
0700 return ret;
0701 }
0702
0703 grp->engs[i].type = 0;
0704 grp->engs[i].count = 0;
0705 grp->engs[i].offset = 0;
0706 grp->engs[i].ucode = NULL;
0707 bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
0708 }
0709
0710 return 0;
0711 }
0712
0713 static int do_reserve_engines(struct device *dev,
0714 struct otx_cpt_eng_grp_info *grp,
0715 struct otx_cpt_engines *req_engs)
0716 {
0717 struct otx_cpt_engs_rsvd *engs = NULL;
0718 int i, ret;
0719
0720 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
0721 if (!grp->engs[i].type) {
0722 engs = &grp->engs[i];
0723 break;
0724 }
0725 }
0726
0727 if (!engs)
0728 return -ENOMEM;
0729
0730 engs->type = req_engs->type;
0731 engs->count = req_engs->count;
0732
0733 ret = update_engines_offset(dev, &grp->g->avail, engs);
0734 if (ret)
0735 return ret;
0736
0737 if (engs->count > 0) {
0738 ret = update_engines_avail_count(dev, &grp->g->avail, engs,
0739 -engs->count);
0740 if (ret)
0741 return ret;
0742 }
0743
0744 return 0;
0745 }
0746
0747 static int check_engines_availability(struct device *dev,
0748 struct otx_cpt_eng_grp_info *grp,
0749 struct otx_cpt_engines *req_eng)
0750 {
0751 int avail_cnt = 0;
0752
0753 switch (req_eng->type) {
0754 case OTX_CPT_SE_TYPES:
0755 avail_cnt = grp->g->avail.se_cnt;
0756 break;
0757
0758 case OTX_CPT_AE_TYPES:
0759 avail_cnt = grp->g->avail.ae_cnt;
0760 break;
0761
0762 default:
0763 dev_err(dev, "Invalid engine type %d\n", req_eng->type);
0764 return -EINVAL;
0765 }
0766
0767 if (avail_cnt < req_eng->count) {
0768 dev_err(dev,
0769 "Error available %s engines %d < than requested %d\n",
0770 get_eng_type_str(req_eng->type),
0771 avail_cnt, req_eng->count);
0772 return -EBUSY;
0773 }
0774
0775 return 0;
0776 }
0777
0778 static int reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp,
0779 struct otx_cpt_engines *req_engs, int req_cnt)
0780 {
0781 int i, ret;
0782
0783
0784 for (i = 0; i < req_cnt; i++) {
0785 ret = check_engines_availability(dev, grp, &req_engs[i]);
0786 if (ret)
0787 return ret;
0788 }
0789
0790
0791 for (i = 0; i < req_cnt; i++) {
0792 ret = do_reserve_engines(dev, grp, &req_engs[i]);
0793 if (ret)
0794 return ret;
0795 }
0796 return 0;
0797 }
0798
0799 static ssize_t eng_grp_info_show(struct device *dev,
0800 struct device_attribute *attr,
0801 char *buf)
0802 {
0803 char ucode_info[2*OTX_CPT_UCODE_NAME_LENGTH];
0804 char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
0805 char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
0806 struct otx_cpt_eng_grp_info *eng_grp;
0807 int ret;
0808
0809 eng_grp = container_of(attr, struct otx_cpt_eng_grp_info, info_attr);
0810 mutex_lock(&eng_grp->g->lock);
0811
0812 print_engs_info(eng_grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, -1);
0813 print_ucode_info(eng_grp, ucode_info, 2*OTX_CPT_UCODE_NAME_LENGTH);
0814 cpt_print_engines_mask(eng_grp, dev, engs_mask,
0815 OTX_CPT_UCODE_NAME_LENGTH);
0816 ret = scnprintf(buf, PAGE_SIZE,
0817 "Microcode : %s\nEngines: %s\nEngines mask: %s\n",
0818 ucode_info, engs_info, engs_mask);
0819
0820 mutex_unlock(&eng_grp->g->lock);
0821 return ret;
0822 }
0823
0824 static int create_sysfs_eng_grps_info(struct device *dev,
0825 struct otx_cpt_eng_grp_info *eng_grp)
0826 {
0827 eng_grp->info_attr.show = eng_grp_info_show;
0828 eng_grp->info_attr.store = NULL;
0829 eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
0830 eng_grp->info_attr.attr.mode = 0440;
0831 sysfs_attr_init(&eng_grp->info_attr.attr);
0832 return device_create_file(dev, &eng_grp->info_attr);
0833 }
0834
0835 static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
0836 {
0837 if (ucode->va) {
0838 dma_free_coherent(dev, ucode->size + OTX_CPT_UCODE_ALIGNMENT,
0839 ucode->va, ucode->dma);
0840 ucode->va = NULL;
0841 ucode->align_va = NULL;
0842 ucode->dma = 0;
0843 ucode->align_dma = 0;
0844 ucode->size = 0;
0845 }
0846
0847 memset(&ucode->ver_str, 0, OTX_CPT_UCODE_VER_STR_SZ);
0848 memset(&ucode->ver_num, 0, sizeof(struct otx_cpt_ucode_ver_num));
0849 set_ucode_filename(ucode, "");
0850 ucode->type = 0;
0851 }
0852
0853 static int copy_ucode_to_dma_mem(struct device *dev,
0854 struct otx_cpt_ucode *ucode,
0855 const u8 *ucode_data)
0856 {
0857 u32 i;
0858
0859
0860 ucode->va = dma_alloc_coherent(dev, ucode->size +
0861 OTX_CPT_UCODE_ALIGNMENT,
0862 &ucode->dma, GFP_KERNEL);
0863 if (!ucode->va) {
0864 dev_err(dev, "Unable to allocate space for microcode\n");
0865 return -ENOMEM;
0866 }
0867 ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
0868 ucode->align_dma = PTR_ALIGN(ucode->dma, OTX_CPT_UCODE_ALIGNMENT);
0869
0870 memcpy((void *) ucode->align_va, (void *) ucode_data +
0871 sizeof(struct otx_cpt_ucode_hdr), ucode->size);
0872
0873
0874 for (i = 0; i < (ucode->size / 8); i++)
0875 ((__be64 *)ucode->align_va)[i] =
0876 cpu_to_be64(((u64 *)ucode->align_va)[i]);
0877
0878 for (i = 0; i < (ucode->size / 2); i++)
0879 ((__be16 *)ucode->align_va)[i] =
0880 cpu_to_be16(((u16 *)ucode->align_va)[i]);
0881 return 0;
0882 }
0883
0884 static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
0885 const char *ucode_filename)
0886 {
0887 struct otx_cpt_ucode_hdr *ucode_hdr;
0888 const struct firmware *fw;
0889 int ret;
0890
0891 set_ucode_filename(ucode, ucode_filename);
0892 ret = request_firmware(&fw, ucode->filename, dev);
0893 if (ret)
0894 return ret;
0895
0896 ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
0897 memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
0898 ucode->ver_num = ucode_hdr->ver_num;
0899 ucode->size = ntohl(ucode_hdr->code_length) * 2;
0900 if (!ucode->size || (fw->size < round_up(ucode->size, 16)
0901 + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
0902 dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
0903 ret = -EINVAL;
0904 goto release_fw;
0905 }
0906
0907 ret = get_ucode_type(ucode_hdr, &ucode->type);
0908 if (ret) {
0909 dev_err(dev, "Microcode %s unknown type 0x%x\n",
0910 ucode->filename, ucode->type);
0911 goto release_fw;
0912 }
0913
0914 ret = copy_ucode_to_dma_mem(dev, ucode, fw->data);
0915 if (ret)
0916 goto release_fw;
0917
0918 print_ucode_dbg_info(ucode);
0919 release_fw:
0920 release_firmware(fw);
0921 return ret;
0922 }
0923
0924 static int enable_eng_grp(struct otx_cpt_eng_grp_info *eng_grp,
0925 void *obj)
0926 {
0927 int ret;
0928
0929 ret = cpt_set_ucode_base(eng_grp, obj);
0930 if (ret)
0931 return ret;
0932
0933 ret = cpt_attach_and_enable_cores(eng_grp, obj);
0934 return ret;
0935 }
0936
0937 static int disable_eng_grp(struct device *dev,
0938 struct otx_cpt_eng_grp_info *eng_grp,
0939 void *obj)
0940 {
0941 int i, ret;
0942
0943 ret = cpt_detach_and_disable_cores(eng_grp, obj);
0944 if (ret)
0945 return ret;
0946
0947
0948 ucode_unload(dev, &eng_grp->ucode[0]);
0949
0950 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
0951 if (!eng_grp->engs[i].type)
0952 continue;
0953
0954 eng_grp->engs[i].ucode = &eng_grp->ucode[0];
0955 }
0956
0957 ret = cpt_set_ucode_base(eng_grp, obj);
0958
0959 return ret;
0960 }
0961
0962 static void setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp,
0963 struct otx_cpt_eng_grp_info *src_grp)
0964 {
0965
0966 src_grp->mirror.is_ena = false;
0967 src_grp->mirror.idx = 0;
0968 src_grp->mirror.ref_count++;
0969
0970
0971 dst_grp->mirror.is_ena = true;
0972 dst_grp->mirror.idx = src_grp->idx;
0973 dst_grp->mirror.ref_count = 0;
0974 }
0975
0976 static void remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp)
0977 {
0978 struct otx_cpt_eng_grp_info *src_grp;
0979
0980 if (!dst_grp->mirror.is_ena)
0981 return;
0982
0983 src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
0984
0985 src_grp->mirror.ref_count--;
0986 dst_grp->mirror.is_ena = false;
0987 dst_grp->mirror.idx = 0;
0988 dst_grp->mirror.ref_count = 0;
0989 }
0990
0991 static void update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp,
0992 struct otx_cpt_engines *engs, int engs_cnt)
0993 {
0994 struct otx_cpt_engs_rsvd *mirrored_engs;
0995 int i;
0996
0997 for (i = 0; i < engs_cnt; i++) {
0998 mirrored_engs = find_engines_by_type(mirrored_eng_grp,
0999 engs[i].type);
1000 if (!mirrored_engs)
1001 continue;
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 engs[i].count -= mirrored_engs->count;
1018 }
1019 }
1020
1021 static struct otx_cpt_eng_grp_info *find_mirrored_eng_grp(
1022 struct otx_cpt_eng_grp_info *grp)
1023 {
1024 struct otx_cpt_eng_grps *eng_grps = grp->g;
1025 int i;
1026
1027 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1028 if (!eng_grps->grp[i].is_enabled)
1029 continue;
1030 if (eng_grps->grp[i].ucode[0].type)
1031 continue;
1032 if (grp->idx == i)
1033 continue;
1034 if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
1035 grp->ucode[0].ver_str,
1036 OTX_CPT_UCODE_VER_STR_SZ))
1037 return &eng_grps->grp[i];
1038 }
1039
1040 return NULL;
1041 }
1042
1043 static struct otx_cpt_eng_grp_info *find_unused_eng_grp(
1044 struct otx_cpt_eng_grps *eng_grps)
1045 {
1046 int i;
1047
1048 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1049 if (!eng_grps->grp[i].is_enabled)
1050 return &eng_grps->grp[i];
1051 }
1052 return NULL;
1053 }
1054
1055 static int eng_grp_update_masks(struct device *dev,
1056 struct otx_cpt_eng_grp_info *eng_grp)
1057 {
1058 struct otx_cpt_engs_rsvd *engs, *mirrored_engs;
1059 struct otx_cpt_bitmap tmp_bmap = { {0} };
1060 int i, j, cnt, max_cnt;
1061 int bit;
1062
1063 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1064 engs = &eng_grp->engs[i];
1065 if (!engs->type)
1066 continue;
1067 if (engs->count <= 0)
1068 continue;
1069
1070 switch (engs->type) {
1071 case OTX_CPT_SE_TYPES:
1072 max_cnt = eng_grp->g->avail.max_se_cnt;
1073 break;
1074
1075 case OTX_CPT_AE_TYPES:
1076 max_cnt = eng_grp->g->avail.max_ae_cnt;
1077 break;
1078
1079 default:
1080 dev_err(dev, "Invalid engine type %d\n", engs->type);
1081 return -EINVAL;
1082 }
1083
1084 cnt = engs->count;
1085 WARN_ON(engs->offset + max_cnt > OTX_CPT_MAX_ENGINES);
1086 bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
1087 for (j = engs->offset; j < engs->offset + max_cnt; j++) {
1088 if (!eng_grp->g->eng_ref_cnt[j]) {
1089 bitmap_set(tmp_bmap.bits, j, 1);
1090 cnt--;
1091 if (!cnt)
1092 break;
1093 }
1094 }
1095
1096 if (cnt)
1097 return -ENOSPC;
1098
1099 bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
1100 }
1101
1102 if (!eng_grp->mirror.is_ena)
1103 return 0;
1104
1105 for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1106 engs = &eng_grp->engs[i];
1107 if (!engs->type)
1108 continue;
1109
1110 mirrored_engs = find_engines_by_type(
1111 &eng_grp->g->grp[eng_grp->mirror.idx],
1112 engs->type);
1113 WARN_ON(!mirrored_engs && engs->count <= 0);
1114 if (!mirrored_engs)
1115 continue;
1116
1117 bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
1118 eng_grp->g->engs_num);
1119 if (engs->count < 0) {
1120 bit = find_first_bit(mirrored_engs->bmap,
1121 eng_grp->g->engs_num);
1122 bitmap_clear(tmp_bmap.bits, bit, -engs->count);
1123 }
1124 bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
1125 eng_grp->g->engs_num);
1126 }
1127 return 0;
1128 }
1129
1130 static int delete_engine_group(struct device *dev,
1131 struct otx_cpt_eng_grp_info *eng_grp)
1132 {
1133 int i, ret;
1134
1135 if (!eng_grp->is_enabled)
1136 return -EINVAL;
1137
1138 if (eng_grp->mirror.ref_count) {
1139 dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
1140 eng_grp->idx);
1141 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1142 if (eng_grp->g->grp[i].mirror.is_ena &&
1143 eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
1144 pr_cont(" %d", i);
1145 }
1146 pr_cont("\n");
1147 return -EINVAL;
1148 }
1149
1150
1151 remove_eng_grp_mirroring(eng_grp);
1152
1153
1154 ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
1155 if (ret)
1156 return ret;
1157
1158
1159 ret = release_engines(dev, eng_grp);
1160 if (ret)
1161 return ret;
1162
1163 device_remove_file(dev, &eng_grp->info_attr);
1164 eng_grp->is_enabled = false;
1165
1166 return 0;
1167 }
1168
1169 static int validate_1_ucode_scenario(struct device *dev,
1170 struct otx_cpt_eng_grp_info *eng_grp,
1171 struct otx_cpt_engines *engs, int engs_cnt)
1172 {
1173 int i;
1174
1175
1176 for (i = 0; i < engs_cnt; i++) {
1177 if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
1178 engs[i].type)) {
1179 dev_err(dev,
1180 "Microcode %s does not support %s engines\n",
1181 eng_grp->ucode[0].filename,
1182 get_eng_type_str(engs[i].type));
1183 return -EINVAL;
1184 }
1185 }
1186 return 0;
1187 }
1188
1189 static void update_ucode_ptrs(struct otx_cpt_eng_grp_info *eng_grp)
1190 {
1191 struct otx_cpt_ucode *ucode;
1192
1193 if (eng_grp->mirror.is_ena)
1194 ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
1195 else
1196 ucode = &eng_grp->ucode[0];
1197 WARN_ON(!eng_grp->engs[0].type);
1198 eng_grp->engs[0].ucode = ucode;
1199 }
1200
1201 static int create_engine_group(struct device *dev,
1202 struct otx_cpt_eng_grps *eng_grps,
1203 struct otx_cpt_engines *engs, int engs_cnt,
1204 void *ucode_data[], int ucodes_cnt,
1205 bool use_uc_from_tar_arch)
1206 {
1207 struct otx_cpt_eng_grp_info *mirrored_eng_grp;
1208 struct tar_ucode_info_t *tar_info;
1209 struct otx_cpt_eng_grp_info *eng_grp;
1210 int i, ret = 0;
1211
1212 if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP)
1213 return -EINVAL;
1214
1215
1216 for (i = 0; i < engs_cnt; i++)
1217 if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
1218 dev_err(dev, "Device does not support %s engines\n",
1219 get_eng_type_str(engs[i].type));
1220 return -EPERM;
1221 }
1222
1223
1224 eng_grp = find_unused_eng_grp(eng_grps);
1225 if (!eng_grp) {
1226 dev_err(dev, "Error all engine groups are being used\n");
1227 return -ENOSPC;
1228 }
1229
1230
1231 for (i = 0; i < ucodes_cnt; i++) {
1232 if (use_uc_from_tar_arch) {
1233 tar_info = (struct tar_ucode_info_t *) ucode_data[i];
1234 eng_grp->ucode[i] = tar_info->ucode;
1235 ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
1236 tar_info->ucode_ptr);
1237 } else
1238 ret = ucode_load(dev, &eng_grp->ucode[i],
1239 (char *) ucode_data[i]);
1240 if (ret)
1241 goto err_ucode_unload;
1242 }
1243
1244
1245 ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt);
1246 if (ret)
1247 goto err_ucode_unload;
1248
1249
1250 mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1251 if (mirrored_eng_grp) {
1252
1253 setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1254
1255
1256
1257
1258
1259 update_requested_engs(mirrored_eng_grp, engs, engs_cnt);
1260 }
1261
1262
1263 ret = reserve_engines(dev, eng_grp, engs, engs_cnt);
1264 if (ret)
1265 goto err_ucode_unload;
1266
1267
1268 update_ucode_ptrs(eng_grp);
1269
1270
1271 ret = eng_grp_update_masks(dev, eng_grp);
1272 if (ret)
1273 goto err_release_engs;
1274
1275
1276 ret = create_sysfs_eng_grps_info(dev, eng_grp);
1277 if (ret)
1278 goto err_release_engs;
1279
1280
1281 ret = enable_eng_grp(eng_grp, eng_grps->obj);
1282 if (ret)
1283 goto err_release_engs;
1284
1285
1286
1287
1288
1289
1290 if (eng_grp->mirror.is_ena)
1291 ucode_unload(dev, &eng_grp->ucode[0]);
1292
1293 eng_grp->is_enabled = true;
1294 if (eng_grp->mirror.is_ena)
1295 dev_info(dev,
1296 "Engine_group%d: reuse microcode %s from group %d\n",
1297 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1298 mirrored_eng_grp->idx);
1299 else
1300 dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1301 eng_grp->idx, eng_grp->ucode[0].ver_str);
1302
1303 return 0;
1304
1305 err_release_engs:
1306 release_engines(dev, eng_grp);
1307 err_ucode_unload:
1308 ucode_unload(dev, &eng_grp->ucode[0]);
1309 return ret;
1310 }
1311
1312 static ssize_t ucode_load_store(struct device *dev,
1313 struct device_attribute *attr,
1314 const char *buf, size_t count)
1315 {
1316 struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1317 char *ucode_filename[OTX_CPT_MAX_ETYPES_PER_GRP];
1318 char tmp_buf[OTX_CPT_UCODE_NAME_LENGTH] = { 0 };
1319 char *start, *val, *err_msg, *tmp;
1320 struct otx_cpt_eng_grps *eng_grps;
1321 int grp_idx = 0, ret = -EINVAL;
1322 bool has_se, has_ie, has_ae;
1323 int del_grp_idx = -1;
1324 int ucode_idx = 0;
1325
1326 if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
1327 return -EINVAL;
1328
1329 eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
1330 err_msg = "Invalid engine group format";
1331 strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
1332 start = tmp_buf;
1333
1334 has_se = has_ie = has_ae = false;
1335
1336 for (;;) {
1337 val = strsep(&start, ";");
1338 if (!val)
1339 break;
1340 val = strim(val);
1341 if (!*val)
1342 continue;
1343
1344 if (!strncasecmp(val, "engine_group", 12)) {
1345 if (del_grp_idx != -1)
1346 goto err_print;
1347 tmp = strim(strsep(&val, ":"));
1348 if (!val)
1349 goto err_print;
1350 if (strlen(tmp) != 13)
1351 goto err_print;
1352 if (kstrtoint((tmp + 12), 10, &del_grp_idx))
1353 goto err_print;
1354 val = strim(val);
1355 if (strncasecmp(val, "null", 4))
1356 goto err_print;
1357 if (strlen(val) != 4)
1358 goto err_print;
1359 } else if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1360 if (has_se || ucode_idx)
1361 goto err_print;
1362 tmp = strim(strsep(&val, ":"));
1363 if (!val)
1364 goto err_print;
1365 if (strlen(tmp) != 2)
1366 goto err_print;
1367 if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1368 goto err_print;
1369 engs[grp_idx++].type = OTX_CPT_SE_TYPES;
1370 has_se = true;
1371 } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1372 if (has_ae || ucode_idx)
1373 goto err_print;
1374 tmp = strim(strsep(&val, ":"));
1375 if (!val)
1376 goto err_print;
1377 if (strlen(tmp) != 2)
1378 goto err_print;
1379 if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1380 goto err_print;
1381 engs[grp_idx++].type = OTX_CPT_AE_TYPES;
1382 has_ae = true;
1383 } else {
1384 if (ucode_idx > 1)
1385 goto err_print;
1386 if (!strlen(val))
1387 goto err_print;
1388 if (strnstr(val, " ", strlen(val)))
1389 goto err_print;
1390 ucode_filename[ucode_idx++] = val;
1391 }
1392 }
1393
1394
1395 if (del_grp_idx == -1) {
1396 if (!(grp_idx && ucode_idx))
1397 goto err_print;
1398
1399 if (ucode_idx > 1 && grp_idx < 2)
1400 goto err_print;
1401
1402 if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) {
1403 err_msg = "Error max 2 engine types can be attached";
1404 goto err_print;
1405 }
1406
1407 } else {
1408 if (del_grp_idx < 0 ||
1409 del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
1410 dev_err(dev, "Invalid engine group index %d\n",
1411 del_grp_idx);
1412 ret = -EINVAL;
1413 return ret;
1414 }
1415
1416 if (!eng_grps->grp[del_grp_idx].is_enabled) {
1417 dev_err(dev, "Error engine_group%d is not configured\n",
1418 del_grp_idx);
1419 ret = -EINVAL;
1420 return ret;
1421 }
1422
1423 if (grp_idx || ucode_idx)
1424 goto err_print;
1425 }
1426
1427 mutex_lock(&eng_grps->lock);
1428
1429 if (eng_grps->is_rdonly) {
1430 dev_err(dev, "Disable VFs before modifying engine groups\n");
1431 ret = -EACCES;
1432 goto err_unlock;
1433 }
1434
1435 if (del_grp_idx == -1)
1436
1437 ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1438 (void **) ucode_filename,
1439 ucode_idx, false);
1440 else
1441
1442 ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]);
1443 if (ret)
1444 goto err_unlock;
1445
1446 print_dbg_info(dev, eng_grps);
1447 err_unlock:
1448 mutex_unlock(&eng_grps->lock);
1449 return ret ? ret : count;
1450 err_print:
1451 dev_err(dev, "%s\n", err_msg);
1452
1453 return ret;
1454 }
1455
1456 int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
1457 struct otx_cpt_eng_grps *eng_grps,
1458 int pf_type)
1459 {
1460 struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
1461 struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
1462 struct tar_arch_info_t *tar_arch = NULL;
1463 char *tar_filename;
1464 int i, ret = 0;
1465
1466 mutex_lock(&eng_grps->lock);
1467
1468
1469
1470
1471
1472 if (eng_grps->is_first_try)
1473 goto unlock_mutex;
1474 eng_grps->is_first_try = true;
1475
1476
1477 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1478 if (eng_grps->grp[i].is_enabled)
1479 goto unlock_mutex;
1480
1481 switch (pf_type) {
1482 case OTX_CPT_AE:
1483 case OTX_CPT_SE:
1484 tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME;
1485 break;
1486
1487 default:
1488 dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1489 ret = -EINVAL;
1490 goto unlock_mutex;
1491 }
1492
1493 tar_arch = load_tar_archive(&pdev->dev, tar_filename);
1494 if (!tar_arch)
1495 goto unlock_mutex;
1496
1497
1498
1499
1500
1501
1502 tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES);
1503 if (tar_info[0] &&
1504 dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) {
1505
1506 engs[0].type = OTX_CPT_SE_TYPES;
1507 engs[0].count = eng_grps->avail.max_se_cnt;
1508
1509 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1510 (void **) tar_info, 1, true);
1511 if (ret)
1512 goto release_tar_arch;
1513 }
1514
1515
1516
1517
1518
1519 tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES);
1520 if (tar_info[0] &&
1521 dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) {
1522
1523 engs[0].type = OTX_CPT_AE_TYPES;
1524 engs[0].count = eng_grps->avail.max_ae_cnt;
1525
1526 ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1527 (void **) tar_info, 1, true);
1528 if (ret)
1529 goto release_tar_arch;
1530 }
1531
1532 print_dbg_info(&pdev->dev, eng_grps);
1533 release_tar_arch:
1534 release_tar_archive(tar_arch);
1535 unlock_mutex:
1536 mutex_unlock(&eng_grps->lock);
1537 return ret;
1538 }
1539
1540 void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
1541 bool is_rdonly)
1542 {
1543 mutex_lock(&eng_grps->lock);
1544
1545 eng_grps->is_rdonly = is_rdonly;
1546
1547 mutex_unlock(&eng_grps->lock);
1548 }
1549
1550 void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
1551 {
1552 int grp, timeout = 100;
1553 u64 reg;
1554
1555
1556 for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) {
1557 writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp));
1558 udelay(CSR_DELAY);
1559 }
1560
1561 reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1562 while (reg) {
1563 udelay(CSR_DELAY);
1564 reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1565 if (timeout--) {
1566 dev_warn(&cpt->pdev->dev, "Cores still busy\n");
1567 break;
1568 }
1569 }
1570
1571
1572 writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
1573 }
1574
1575 void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1576 struct otx_cpt_eng_grps *eng_grps)
1577 {
1578 struct otx_cpt_eng_grp_info *grp;
1579 int i, j;
1580
1581 mutex_lock(&eng_grps->lock);
1582 if (eng_grps->is_ucode_load_created) {
1583 device_remove_file(&pdev->dev,
1584 &eng_grps->ucode_load_attr);
1585 eng_grps->is_ucode_load_created = false;
1586 }
1587
1588
1589 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1590 if (eng_grps->grp[i].mirror.is_ena)
1591 delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1592
1593
1594 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1595 delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1596
1597
1598 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1599 grp = &eng_grps->grp[i];
1600 for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1601 kfree(grp->engs[j].bmap);
1602 grp->engs[j].bmap = NULL;
1603 }
1604 }
1605
1606 mutex_unlock(&eng_grps->lock);
1607 }
1608
1609 int otx_cpt_init_eng_grps(struct pci_dev *pdev,
1610 struct otx_cpt_eng_grps *eng_grps, int pf_type)
1611 {
1612 struct otx_cpt_eng_grp_info *grp;
1613 int i, j, ret = 0;
1614
1615 mutex_init(&eng_grps->lock);
1616 eng_grps->obj = pci_get_drvdata(pdev);
1617 eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1618 eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1619
1620 eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1621 eng_grps->avail.max_ae_cnt;
1622 if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
1623 dev_err(&pdev->dev,
1624 "Number of engines %d > than max supported %d\n",
1625 eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
1626 ret = -EINVAL;
1627 goto err;
1628 }
1629
1630 for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1631 grp = &eng_grps->grp[i];
1632 grp->g = eng_grps;
1633 grp->idx = i;
1634
1635 snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH,
1636 "engine_group%d", i);
1637 for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1638 grp->engs[j].bmap =
1639 kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1640 sizeof(long), GFP_KERNEL);
1641 if (!grp->engs[j].bmap) {
1642 ret = -ENOMEM;
1643 goto err;
1644 }
1645 }
1646 }
1647
1648 switch (pf_type) {
1649 case OTX_CPT_SE:
1650
1651 eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES;
1652 break;
1653
1654 case OTX_CPT_AE:
1655
1656 eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES;
1657 break;
1658
1659 default:
1660 dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1661 ret = -EINVAL;
1662 goto err;
1663 }
1664
1665 eng_grps->ucode_load_attr.show = NULL;
1666 eng_grps->ucode_load_attr.store = ucode_load_store;
1667 eng_grps->ucode_load_attr.attr.name = "ucode_load";
1668 eng_grps->ucode_load_attr.attr.mode = 0220;
1669 sysfs_attr_init(&eng_grps->ucode_load_attr.attr);
1670 ret = device_create_file(&pdev->dev,
1671 &eng_grps->ucode_load_attr);
1672 if (ret)
1673 goto err;
1674 eng_grps->is_ucode_load_created = true;
1675
1676 print_dbg_info(&pdev->dev, eng_grps);
1677 return ret;
1678 err:
1679 otx_cpt_cleanup_eng_grps(pdev, eng_grps);
1680 return ret;
1681 }