0001
0002
0003
0004 #include "ice_common.h"
0005 #include "ice_flex_pipe.h"
0006 #include "ice_flow.h"
0007 #include "ice.h"
0008
0009
0010
0011
0012
0013 #define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM"
0014 #define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM"
0015
0016
0017
0018
0019 #define ICE_TNL_PRE "TNL_"
0020 static const struct ice_tunnel_type_scan tnls[] = {
0021 { TNL_VXLAN, "TNL_VXLAN_PF" },
0022 { TNL_GENEVE, "TNL_GENEVE_PF" },
0023 { TNL_LAST, "" }
0024 };
0025
0026 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
0027
0028 {
0029 ICE_SID_XLT0_SW,
0030 ICE_SID_XLT_KEY_BUILDER_SW,
0031 ICE_SID_XLT1_SW,
0032 ICE_SID_XLT2_SW,
0033 ICE_SID_PROFID_TCAM_SW,
0034 ICE_SID_PROFID_REDIR_SW,
0035 ICE_SID_FLD_VEC_SW,
0036 ICE_SID_CDID_KEY_BUILDER_SW,
0037 ICE_SID_CDID_REDIR_SW
0038 },
0039
0040
0041 {
0042 ICE_SID_XLT0_ACL,
0043 ICE_SID_XLT_KEY_BUILDER_ACL,
0044 ICE_SID_XLT1_ACL,
0045 ICE_SID_XLT2_ACL,
0046 ICE_SID_PROFID_TCAM_ACL,
0047 ICE_SID_PROFID_REDIR_ACL,
0048 ICE_SID_FLD_VEC_ACL,
0049 ICE_SID_CDID_KEY_BUILDER_ACL,
0050 ICE_SID_CDID_REDIR_ACL
0051 },
0052
0053
0054 {
0055 ICE_SID_XLT0_FD,
0056 ICE_SID_XLT_KEY_BUILDER_FD,
0057 ICE_SID_XLT1_FD,
0058 ICE_SID_XLT2_FD,
0059 ICE_SID_PROFID_TCAM_FD,
0060 ICE_SID_PROFID_REDIR_FD,
0061 ICE_SID_FLD_VEC_FD,
0062 ICE_SID_CDID_KEY_BUILDER_FD,
0063 ICE_SID_CDID_REDIR_FD
0064 },
0065
0066
0067 {
0068 ICE_SID_XLT0_RSS,
0069 ICE_SID_XLT_KEY_BUILDER_RSS,
0070 ICE_SID_XLT1_RSS,
0071 ICE_SID_XLT2_RSS,
0072 ICE_SID_PROFID_TCAM_RSS,
0073 ICE_SID_PROFID_REDIR_RSS,
0074 ICE_SID_FLD_VEC_RSS,
0075 ICE_SID_CDID_KEY_BUILDER_RSS,
0076 ICE_SID_CDID_REDIR_RSS
0077 },
0078
0079
0080 {
0081 ICE_SID_XLT0_PE,
0082 ICE_SID_XLT_KEY_BUILDER_PE,
0083 ICE_SID_XLT1_PE,
0084 ICE_SID_XLT2_PE,
0085 ICE_SID_PROFID_TCAM_PE,
0086 ICE_SID_PROFID_REDIR_PE,
0087 ICE_SID_FLD_VEC_PE,
0088 ICE_SID_CDID_KEY_BUILDER_PE,
0089 ICE_SID_CDID_REDIR_PE
0090 }
0091 };
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
0102 {
0103 return ice_sect_lkup[blk][sect];
0104 }
0105
0106
0107
0108
0109
0110
0111
0112 static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
0113 {
0114 struct ice_buf_hdr *hdr;
0115 u16 section_count;
0116 u16 data_end;
0117
0118 hdr = (struct ice_buf_hdr *)buf->buf;
0119
0120 section_count = le16_to_cpu(hdr->section_count);
0121 if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
0122 return NULL;
0123
0124 data_end = le16_to_cpu(hdr->data_end);
0125 if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
0126 return NULL;
0127
0128 return hdr;
0129 }
0130
0131
0132
0133
0134
0135
0136
0137 static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
0138 {
0139 struct ice_nvm_table *nvms;
0140
0141 nvms = (struct ice_nvm_table *)
0142 (ice_seg->device_table +
0143 le32_to_cpu(ice_seg->device_table_count));
0144
0145 return (__force struct ice_buf_table *)
0146 (nvms->vers + le32_to_cpu(nvms->table_count));
0147 }
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161 static struct ice_buf_hdr *
0162 ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
0163 {
0164 if (ice_seg) {
0165 state->buf_table = ice_find_buf_table(ice_seg);
0166 if (!state->buf_table)
0167 return NULL;
0168
0169 state->buf_idx = 0;
0170 return ice_pkg_val_buf(state->buf_table->buf_array);
0171 }
0172
0173 if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
0174 return ice_pkg_val_buf(state->buf_table->buf_array +
0175 state->buf_idx);
0176 else
0177 return NULL;
0178 }
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 static bool
0189 ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
0190 {
0191 if (!ice_seg && !state->buf)
0192 return false;
0193
0194 if (!ice_seg && state->buf)
0195 if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
0196 return true;
0197
0198 state->buf = ice_pkg_enum_buf(ice_seg, state);
0199 if (!state->buf)
0200 return false;
0201
0202
0203 state->sect_idx = 0;
0204 return true;
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219 static void *
0220 ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
0221 u32 sect_type)
0222 {
0223 u16 offset, size;
0224
0225 if (ice_seg)
0226 state->type = sect_type;
0227
0228 if (!ice_pkg_advance_sect(ice_seg, state))
0229 return NULL;
0230
0231
0232 while (state->buf->section_entry[state->sect_idx].type !=
0233 cpu_to_le32(state->type))
0234 if (!ice_pkg_advance_sect(NULL, state))
0235 return NULL;
0236
0237
0238 offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
0239 if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
0240 return NULL;
0241
0242 size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
0243 if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
0244 return NULL;
0245
0246
0247 if (offset + size > ICE_PKG_BUF_SIZE)
0248 return NULL;
0249
0250 state->sect_type =
0251 le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
0252
0253
0254 state->sect = ((u8 *)state->buf) +
0255 le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
0256
0257 return state->sect;
0258 }
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 static void *
0286 ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
0287 u32 sect_type, u32 *offset,
0288 void *(*handler)(u32 sect_type, void *section,
0289 u32 index, u32 *offset))
0290 {
0291 void *entry;
0292
0293 if (ice_seg) {
0294 if (!handler)
0295 return NULL;
0296
0297 if (!ice_pkg_enum_section(ice_seg, state, sect_type))
0298 return NULL;
0299
0300 state->entry_idx = 0;
0301 state->handler = handler;
0302 } else {
0303 state->entry_idx++;
0304 }
0305
0306 if (!state->handler)
0307 return NULL;
0308
0309
0310 entry = state->handler(state->sect_type, state->sect, state->entry_idx,
0311 offset);
0312 if (!entry) {
0313
0314 if (!ice_pkg_enum_section(NULL, state, 0))
0315 return NULL;
0316
0317 state->entry_idx = 0;
0318 entry = state->handler(state->sect_type, state->sect,
0319 state->entry_idx, offset);
0320 }
0321
0322 return entry;
0323 }
0324
0325
0326
0327
0328
0329
0330 bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
0331 {
0332 return ptype < ICE_FLOW_PTYPE_MAX &&
0333 test_bit(ptype, hw->hw_ptype);
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346 static void *
0347 ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
0348 u32 *offset)
0349 {
0350 struct ice_marker_ptype_tcam_section *marker_ptype;
0351
0352 if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
0353 return NULL;
0354
0355 if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
0356 return NULL;
0357
0358 if (offset)
0359 *offset = 0;
0360
0361 marker_ptype = section;
0362 if (index >= le16_to_cpu(marker_ptype->count))
0363 return NULL;
0364
0365 return marker_ptype->tcam + index;
0366 }
0367
0368
0369
0370
0371
0372 static void ice_fill_hw_ptype(struct ice_hw *hw)
0373 {
0374 struct ice_marker_ptype_tcam_entry *tcam;
0375 struct ice_seg *seg = hw->seg;
0376 struct ice_pkg_enum state;
0377
0378 bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
0379 if (!seg)
0380 return;
0381
0382 memset(&state, 0, sizeof(state));
0383
0384 do {
0385 tcam = ice_pkg_enum_entry(seg, &state,
0386 ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
0387 ice_marker_ptype_tcam_handler);
0388 if (tcam &&
0389 le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
0390 le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
0391 set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype);
0392
0393 seg = NULL;
0394 } while (tcam);
0395 }
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407 static void *
0408 ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
0409 {
0410 struct ice_boost_tcam_section *boost;
0411
0412 if (!section)
0413 return NULL;
0414
0415 if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
0416 return NULL;
0417
0418
0419 if (index > ICE_MAX_BST_TCAMS_IN_BUF)
0420 return NULL;
0421
0422 if (offset)
0423 *offset = 0;
0424
0425 boost = section;
0426 if (index >= le16_to_cpu(boost->count))
0427 return NULL;
0428
0429 return boost->tcam + index;
0430 }
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442 static int
0443 ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
0444 struct ice_boost_tcam_entry **entry)
0445 {
0446 struct ice_boost_tcam_entry *tcam;
0447 struct ice_pkg_enum state;
0448
0449 memset(&state, 0, sizeof(state));
0450
0451 if (!ice_seg)
0452 return -EINVAL;
0453
0454 do {
0455 tcam = ice_pkg_enum_entry(ice_seg, &state,
0456 ICE_SID_RXPARSER_BOOST_TCAM, NULL,
0457 ice_boost_tcam_handler);
0458 if (tcam && le16_to_cpu(tcam->addr) == addr) {
0459 *entry = tcam;
0460 return 0;
0461 }
0462
0463 ice_seg = NULL;
0464 } while (tcam);
0465
0466 *entry = NULL;
0467 return -EIO;
0468 }
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480 static void *
0481 ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
0482 u32 *offset)
0483 {
0484 struct ice_label_section *labels;
0485
0486 if (!section)
0487 return NULL;
0488
0489
0490 if (index > ICE_MAX_LABELS_IN_BUF)
0491 return NULL;
0492
0493 if (offset)
0494 *offset = 0;
0495
0496 labels = section;
0497 if (index >= le16_to_cpu(labels->count))
0498 return NULL;
0499
0500 return labels->label + index;
0501 }
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515 static char *
0516 ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
0517 u16 *value)
0518 {
0519 struct ice_label *label;
0520
0521
0522 if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
0523 return NULL;
0524
0525 label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
0526 ice_label_enum_handler);
0527 if (!label)
0528 return NULL;
0529
0530 *value = le16_to_cpu(label->value);
0531 return label->name;
0532 }
0533
0534
0535
0536
0537
0538
0539
0540 static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
0541 {
0542 if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
0543 u16 i;
0544
0545 for (i = 0; tnls[i].type != TNL_LAST; i++) {
0546 size_t len = strlen(tnls[i].label_prefix);
0547
0548
0549 if (strncmp(label_name, tnls[i].label_prefix, len))
0550 continue;
0551
0552
0553
0554
0555
0556 if ((label_name[len] - '0') == hw->pf_id) {
0557 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
0558 hw->tnl.tbl[hw->tnl.count].valid = false;
0559 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
0560 hw->tnl.tbl[hw->tnl.count].port = 0;
0561 hw->tnl.count++;
0562 break;
0563 }
0564 }
0565 }
0566 }
0567
0568
0569
0570
0571
0572
0573
0574 static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
0575 {
0576 if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
0577 hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
0578 hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
0579 hw->dvm_upd.count++;
0580 }
0581 }
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593 static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
0594 {
0595 struct ice_pkg_enum state;
0596 char *label_name;
0597 u16 val;
0598 int i;
0599
0600 memset(&hw->tnl, 0, sizeof(hw->tnl));
0601 memset(&state, 0, sizeof(state));
0602
0603 if (!ice_seg)
0604 return;
0605
0606 label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
0607 &val);
0608
0609 while (label_name) {
0610 if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
0611
0612 ice_add_tunnel_hint(hw, label_name, val);
0613
0614
0615 else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
0616 ice_add_dvm_hint(hw, val, true);
0617
0618
0619 else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
0620 ice_add_dvm_hint(hw, val, false);
0621
0622 label_name = ice_enum_labels(NULL, 0, &state, &val);
0623 }
0624
0625
0626 for (i = 0; i < hw->tnl.count; i++) {
0627 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
0628 &hw->tnl.tbl[i].boost_entry);
0629 if (hw->tnl.tbl[i].boost_entry) {
0630 hw->tnl.tbl[i].valid = true;
0631 if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
0632 hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
0633 }
0634 }
0635
0636
0637 for (i = 0; i < hw->dvm_upd.count; i++)
0638 ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
0639 &hw->dvm_upd.tbl[i].boost_entry);
0640 }
0641
0642
0643
0644 #define ICE_DC_KEY 0x1
0645 #define ICE_DC_KEYINV 0x1
0646 #define ICE_NM_KEY 0x0
0647 #define ICE_NM_KEYINV 0x0
0648 #define ICE_0_KEY 0x1
0649 #define ICE_0_KEYINV 0x0
0650 #define ICE_1_KEY 0x0
0651 #define ICE_1_KEYINV 0x1
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678 static int
0679 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
0680 u8 *key_inv)
0681 {
0682 u8 in_key = *key, in_key_inv = *key_inv;
0683 u8 i;
0684
0685
0686 if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
0687 return -EIO;
0688
0689 *key = 0;
0690 *key_inv = 0;
0691
0692
0693 for (i = 0; i < 8; i++) {
0694 *key >>= 1;
0695 *key_inv >>= 1;
0696
0697 if (!(valid & 0x1)) {
0698 *key |= (in_key & 0x1) << 7;
0699 *key_inv |= (in_key_inv & 0x1) << 7;
0700 } else if (dont_care & 0x1) {
0701 *key |= ICE_DC_KEY << 7;
0702 *key_inv |= ICE_DC_KEYINV << 7;
0703 } else if (nvr_mtch & 0x1) {
0704 *key |= ICE_NM_KEY << 7;
0705 *key_inv |= ICE_NM_KEYINV << 7;
0706 } else if (val & 0x01) {
0707 *key |= ICE_1_KEY << 7;
0708 *key_inv |= ICE_1_KEYINV << 7;
0709 } else {
0710 *key |= ICE_0_KEY << 7;
0711 *key_inv |= ICE_0_KEYINV << 7;
0712 }
0713
0714 dont_care >>= 1;
0715 nvr_mtch >>= 1;
0716 valid >>= 1;
0717 val >>= 1;
0718 in_key >>= 1;
0719 in_key_inv >>= 1;
0720 }
0721
0722 return 0;
0723 }
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
0736 {
0737 u16 count = 0;
0738 u16 i;
0739
0740
0741 for (i = 0; i < size; i++) {
0742
0743 if (!mask[i])
0744 continue;
0745
0746
0747
0748
0749
0750 if (count == max)
0751 return false;
0752
0753
0754 count += hweight8(mask[i]);
0755 if (count > max)
0756 return false;
0757 }
0758
0759 return true;
0760 }
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780 static int
0781 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
0782 u16 len)
0783 {
0784 u16 half_size;
0785 u16 i;
0786
0787
0788 if (size % 2)
0789 return -EIO;
0790
0791 half_size = size / 2;
0792 if (off + len > half_size)
0793 return -EIO;
0794
0795
0796
0797
0798
0799 #define ICE_NVR_MTCH_BITS_MAX 1
0800 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
0801 return -EIO;
0802
0803 for (i = 0; i < len; i++)
0804 if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
0805 dc ? dc[i] : 0, nm ? nm[i] : 0,
0806 key + off + i, key + half_size + off + i))
0807 return -EIO;
0808
0809 return 0;
0810 }
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828 static int
0829 ice_acquire_global_cfg_lock(struct ice_hw *hw,
0830 enum ice_aq_res_access_type access)
0831 {
0832 int status;
0833
0834 status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
0835 ICE_GLOBAL_CFG_LOCK_TIMEOUT);
0836
0837 if (!status)
0838 mutex_lock(&ice_global_cfg_lock_sw);
0839 else if (status == -EALREADY)
0840 ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
0841
0842 return status;
0843 }
0844
0845
0846
0847
0848
0849
0850
0851 static void ice_release_global_cfg_lock(struct ice_hw *hw)
0852 {
0853 mutex_unlock(&ice_global_cfg_lock_sw);
0854 ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
0855 }
0856
0857
0858
0859
0860
0861
0862
0863
0864 int
0865 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
0866 {
0867 return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
0868 ICE_CHANGE_LOCK_TIMEOUT);
0869 }
0870
0871
0872
0873
0874
0875
0876
0877 void ice_release_change_lock(struct ice_hw *hw)
0878 {
0879 ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
0880 }
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894 static int
0895 ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
0896 u16 buf_size, bool last_buf, u32 *error_offset,
0897 u32 *error_info, struct ice_sq_cd *cd)
0898 {
0899 struct ice_aqc_download_pkg *cmd;
0900 struct ice_aq_desc desc;
0901 int status;
0902
0903 if (error_offset)
0904 *error_offset = 0;
0905 if (error_info)
0906 *error_info = 0;
0907
0908 cmd = &desc.params.download_pkg;
0909 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
0910 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
0911
0912 if (last_buf)
0913 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
0914
0915 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
0916 if (status == -EIO) {
0917
0918 struct ice_aqc_download_pkg_resp *resp;
0919
0920 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
0921 if (error_offset)
0922 *error_offset = le32_to_cpu(resp->error_offset);
0923 if (error_info)
0924 *error_info = le32_to_cpu(resp->error_info);
0925 }
0926
0927 return status;
0928 }
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939 int
0940 ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
0941 u16 buf_size, struct ice_sq_cd *cd)
0942 {
0943 struct ice_aq_desc desc;
0944
0945 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
0946 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
0947
0948 return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
0949 }
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 static int
0964 ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
0965 bool last_buf, u32 *error_offset, u32 *error_info,
0966 struct ice_sq_cd *cd)
0967 {
0968 struct ice_aqc_download_pkg *cmd;
0969 struct ice_aq_desc desc;
0970 int status;
0971
0972 if (error_offset)
0973 *error_offset = 0;
0974 if (error_info)
0975 *error_info = 0;
0976
0977 cmd = &desc.params.download_pkg;
0978 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
0979 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
0980
0981 if (last_buf)
0982 cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
0983
0984 status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
0985 if (status == -EIO) {
0986
0987 struct ice_aqc_download_pkg_resp *resp;
0988
0989 resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
0990 if (error_offset)
0991 *error_offset = le32_to_cpu(resp->error_offset);
0992 if (error_info)
0993 *error_info = le32_to_cpu(resp->error_info);
0994 }
0995
0996 return status;
0997 }
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009 static struct ice_generic_seg_hdr *
1010 ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
1011 struct ice_pkg_hdr *pkg_hdr)
1012 {
1013 u32 i;
1014
1015 ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
1016 pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
1017 pkg_hdr->pkg_format_ver.update,
1018 pkg_hdr->pkg_format_ver.draft);
1019
1020
1021 for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
1022 struct ice_generic_seg_hdr *seg;
1023
1024 seg = (struct ice_generic_seg_hdr *)
1025 ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
1026
1027 if (le32_to_cpu(seg->seg_type) == seg_type)
1028 return seg;
1029 }
1030
1031 return NULL;
1032 }
1033
1034
1035
1036
1037
1038
1039
1040 static int
1041 ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1042 {
1043 int status = 0;
1044 u32 i;
1045
1046 for (i = 0; i < count; i++) {
1047 struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
1048 bool last = ((i + 1) == count);
1049 u32 offset, info;
1050
1051 status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
1052 last, &offset, &info, NULL);
1053
1054 if (status) {
1055 ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
1056 status, offset, info);
1057 break;
1058 }
1059 }
1060
1061 return status;
1062 }
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1073 {
1074 int status;
1075
1076 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
1077 if (status)
1078 return status;
1079
1080 status = ice_update_pkg_no_lock(hw, bufs, count);
1081
1082 ice_release_change_lock(hw);
1083
1084 return status;
1085 }
1086
1087 static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
1088 {
1089 switch (aq_err) {
1090 case ICE_AQ_RC_ENOSEC:
1091 case ICE_AQ_RC_EBADSIG:
1092 return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
1093 case ICE_AQ_RC_ESVN:
1094 return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
1095 case ICE_AQ_RC_EBADMAN:
1096 case ICE_AQ_RC_EBADBUF:
1097 return ICE_DDP_PKG_LOAD_ERROR;
1098 default:
1099 return ICE_DDP_PKG_ERR;
1100 }
1101 }
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 static enum ice_ddp_state
1114 ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
1115 {
1116 enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
1117 struct ice_buf_hdr *bh;
1118 enum ice_aq_err err;
1119 u32 offset, info, i;
1120 int status;
1121
1122 if (!bufs || !count)
1123 return ICE_DDP_PKG_ERR;
1124
1125
1126
1127
1128
1129 bh = (struct ice_buf_hdr *)bufs;
1130 if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
1131 return ICE_DDP_PKG_SUCCESS;
1132
1133 status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
1134 if (status) {
1135 if (status == -EALREADY)
1136 return ICE_DDP_PKG_ALREADY_LOADED;
1137 return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
1138 }
1139
1140 for (i = 0; i < count; i++) {
1141 bool last = ((i + 1) == count);
1142
1143 if (!last) {
1144
1145 bh = (struct ice_buf_hdr *)(bufs + i + 1);
1146
1147
1148
1149
1150
1151 if (le16_to_cpu(bh->section_count))
1152 if (le32_to_cpu(bh->section_entry[0].type) &
1153 ICE_METADATA_BUF)
1154 last = true;
1155 }
1156
1157 bh = (struct ice_buf_hdr *)(bufs + i);
1158
1159 status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
1160 &offset, &info, NULL);
1161
1162
1163 if (status) {
1164 ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
1165 status, offset, info);
1166 err = hw->adminq.sq_last_status;
1167 state = ice_map_aq_err_to_ddp_state(err);
1168 break;
1169 }
1170
1171 if (last)
1172 break;
1173 }
1174
1175 if (!status) {
1176 status = ice_set_vlan_mode(hw);
1177 if (status)
1178 ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
1179 status);
1180 }
1181
1182 ice_release_global_cfg_lock(hw);
1183
1184 return state;
1185 }
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196 static int
1197 ice_aq_get_pkg_info_list(struct ice_hw *hw,
1198 struct ice_aqc_get_pkg_info_resp *pkg_info,
1199 u16 buf_size, struct ice_sq_cd *cd)
1200 {
1201 struct ice_aq_desc desc;
1202
1203 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
1204
1205 return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215 static enum ice_ddp_state
1216 ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
1217 {
1218 struct ice_buf_table *ice_buf_tbl;
1219 int status;
1220
1221 ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
1222 ice_seg->hdr.seg_format_ver.major,
1223 ice_seg->hdr.seg_format_ver.minor,
1224 ice_seg->hdr.seg_format_ver.update,
1225 ice_seg->hdr.seg_format_ver.draft);
1226
1227 ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
1228 le32_to_cpu(ice_seg->hdr.seg_type),
1229 le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
1230
1231 ice_buf_tbl = ice_find_buf_table(ice_seg);
1232
1233 ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
1234 le32_to_cpu(ice_buf_tbl->buf_count));
1235
1236 status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
1237 le32_to_cpu(ice_buf_tbl->buf_count));
1238
1239 ice_post_pkg_dwnld_vlan_mode_cfg(hw);
1240
1241 return status;
1242 }
1243
1244
1245
1246
1247
1248
1249
1250
1251 static enum ice_ddp_state
1252 ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
1253 {
1254 struct ice_generic_seg_hdr *seg_hdr;
1255
1256 if (!pkg_hdr)
1257 return ICE_DDP_PKG_ERR;
1258
1259 seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
1260 if (seg_hdr) {
1261 struct ice_meta_sect *meta;
1262 struct ice_pkg_enum state;
1263
1264 memset(&state, 0, sizeof(state));
1265
1266
1267 meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
1268 ICE_SID_METADATA);
1269 if (!meta) {
1270 ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
1271 return ICE_DDP_PKG_INVALID_FILE;
1272 }
1273
1274 hw->pkg_ver = meta->ver;
1275 memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
1276
1277 ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
1278 meta->ver.major, meta->ver.minor, meta->ver.update,
1279 meta->ver.draft, meta->name);
1280
1281 hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
1282 memcpy(hw->ice_seg_id, seg_hdr->seg_id,
1283 sizeof(hw->ice_seg_id));
1284
1285 ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
1286 seg_hdr->seg_format_ver.major,
1287 seg_hdr->seg_format_ver.minor,
1288 seg_hdr->seg_format_ver.update,
1289 seg_hdr->seg_format_ver.draft,
1290 seg_hdr->seg_id);
1291 } else {
1292 ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
1293 return ICE_DDP_PKG_INVALID_FILE;
1294 }
1295
1296 return ICE_DDP_PKG_SUCCESS;
1297 }
1298
1299
1300
1301
1302
1303
1304
1305 static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
1306 {
1307 enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
1308 struct ice_aqc_get_pkg_info_resp *pkg_info;
1309 u16 size;
1310 u32 i;
1311
1312 size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
1313 pkg_info = kzalloc(size, GFP_KERNEL);
1314 if (!pkg_info)
1315 return ICE_DDP_PKG_ERR;
1316
1317 if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
1318 state = ICE_DDP_PKG_ERR;
1319 goto init_pkg_free_alloc;
1320 }
1321
1322 for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
1323 #define ICE_PKG_FLAG_COUNT 4
1324 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
1325 u8 place = 0;
1326
1327 if (pkg_info->pkg_info[i].is_active) {
1328 flags[place++] = 'A';
1329 hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
1330 hw->active_track_id =
1331 le32_to_cpu(pkg_info->pkg_info[i].track_id);
1332 memcpy(hw->active_pkg_name,
1333 pkg_info->pkg_info[i].name,
1334 sizeof(pkg_info->pkg_info[i].name));
1335 hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
1336 }
1337 if (pkg_info->pkg_info[i].is_active_at_boot)
1338 flags[place++] = 'B';
1339 if (pkg_info->pkg_info[i].is_modified)
1340 flags[place++] = 'M';
1341 if (pkg_info->pkg_info[i].is_in_nvm)
1342 flags[place++] = 'N';
1343
1344 ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
1345 i, pkg_info->pkg_info[i].ver.major,
1346 pkg_info->pkg_info[i].ver.minor,
1347 pkg_info->pkg_info[i].ver.update,
1348 pkg_info->pkg_info[i].ver.draft,
1349 pkg_info->pkg_info[i].name, flags);
1350 }
1351
1352 init_pkg_free_alloc:
1353 kfree(pkg_info);
1354
1355 return state;
1356 }
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366 static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
1367 {
1368 u32 seg_count;
1369 u32 i;
1370
1371 if (len < struct_size(pkg, seg_offset, 1))
1372 return ICE_DDP_PKG_INVALID_FILE;
1373
1374 if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
1375 pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
1376 pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
1377 pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
1378 return ICE_DDP_PKG_INVALID_FILE;
1379
1380
1381 seg_count = le32_to_cpu(pkg->seg_count);
1382 if (seg_count < 1)
1383 return ICE_DDP_PKG_INVALID_FILE;
1384
1385
1386 if (len < struct_size(pkg, seg_offset, seg_count))
1387 return ICE_DDP_PKG_INVALID_FILE;
1388
1389
1390 for (i = 0; i < seg_count; i++) {
1391 u32 off = le32_to_cpu(pkg->seg_offset[i]);
1392 struct ice_generic_seg_hdr *seg;
1393
1394
1395 if (len < off + sizeof(*seg))
1396 return ICE_DDP_PKG_INVALID_FILE;
1397
1398 seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
1399
1400
1401 if (len < off + le32_to_cpu(seg->seg_size))
1402 return ICE_DDP_PKG_INVALID_FILE;
1403 }
1404
1405 return ICE_DDP_PKG_SUCCESS;
1406 }
1407
1408
1409
1410
1411
1412
1413
1414
1415 void ice_free_seg(struct ice_hw *hw)
1416 {
1417 if (hw->pkg_copy) {
1418 devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
1419 hw->pkg_copy = NULL;
1420 hw->pkg_size = 0;
1421 }
1422 hw->seg = NULL;
1423 }
1424
1425
1426
1427
1428
1429 static void ice_init_pkg_regs(struct ice_hw *hw)
1430 {
1431 #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
1432 #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
1433 #define ICE_SW_BLK_IDX 0
1434
1435
1436 wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
1437 wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
1438 }
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449 static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
1450 {
1451 if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
1452 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
1453 pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
1454 return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
1455 else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
1456 (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
1457 pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
1458 return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
1459
1460 return ICE_DDP_PKG_SUCCESS;
1461 }
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 static enum ice_ddp_state
1472 ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
1473 struct ice_seg **seg)
1474 {
1475 struct ice_aqc_get_pkg_info_resp *pkg;
1476 enum ice_ddp_state state;
1477 u16 size;
1478 u32 i;
1479
1480
1481 state = ice_chk_pkg_version(&hw->pkg_ver);
1482 if (state) {
1483 ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
1484 return state;
1485 }
1486
1487
1488 *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
1489 ospkg);
1490 if (!*seg) {
1491 ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
1492 return ICE_DDP_PKG_INVALID_FILE;
1493 }
1494
1495
1496 size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
1497 pkg = kzalloc(size, GFP_KERNEL);
1498 if (!pkg)
1499 return ICE_DDP_PKG_ERR;
1500
1501 if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
1502 state = ICE_DDP_PKG_LOAD_ERROR;
1503 goto fw_ddp_compat_free_alloc;
1504 }
1505
1506 for (i = 0; i < le32_to_cpu(pkg->count); i++) {
1507
1508 if (!pkg->pkg_info[i].is_in_nvm)
1509 continue;
1510 if ((*seg)->hdr.seg_format_ver.major !=
1511 pkg->pkg_info[i].ver.major ||
1512 (*seg)->hdr.seg_format_ver.minor >
1513 pkg->pkg_info[i].ver.minor) {
1514 state = ICE_DDP_PKG_FW_MISMATCH;
1515 ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
1516 }
1517
1518 break;
1519 }
1520 fw_ddp_compat_free_alloc:
1521 kfree(pkg);
1522 return state;
1523 }
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 static void *
1537 ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
1538 {
1539 struct ice_sw_fv_section *fv_section = section;
1540
1541 if (!section || sect_type != ICE_SID_FLD_VEC_SW)
1542 return NULL;
1543 if (index >= le16_to_cpu(fv_section->count))
1544 return NULL;
1545 if (offset)
1546
1547
1548
1549
1550
1551 *offset = le16_to_cpu(fv_section->base_offset) + index;
1552 return fv_section->fv + index;
1553 }
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 static int ice_get_prof_index_max(struct ice_hw *hw)
1564 {
1565 u16 prof_index = 0, j, max_prof_index = 0;
1566 struct ice_pkg_enum state;
1567 struct ice_seg *ice_seg;
1568 bool flag = false;
1569 struct ice_fv *fv;
1570 u32 offset;
1571
1572 memset(&state, 0, sizeof(state));
1573
1574 if (!hw->seg)
1575 return -EINVAL;
1576
1577 ice_seg = hw->seg;
1578
1579 do {
1580 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1581 &offset, ice_sw_fv_handler);
1582 if (!fv)
1583 break;
1584 ice_seg = NULL;
1585
1586
1587
1588
1589 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1590 if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
1591 fv->ew[j].off != ICE_FV_OFFSET_INVAL)
1592 flag = true;
1593 if (flag && prof_index > max_prof_index)
1594 max_prof_index = prof_index;
1595
1596 prof_index++;
1597 flag = false;
1598 } while (fv);
1599
1600 hw->switch_info->max_used_prof_index = max_prof_index;
1601
1602 return 0;
1603 }
1604
1605
1606
1607
1608
1609
1610 static enum ice_ddp_state
1611 ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
1612 {
1613 if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
1614 hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
1615 hw->pkg_ver.update == hw->active_pkg_ver.update &&
1616 hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
1617 !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
1618 if (already_loaded)
1619 return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
1620 else
1621 return ICE_DDP_PKG_SUCCESS;
1622 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
1623 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
1624 return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
1625 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
1626 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
1627 return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
1628 } else {
1629 return ICE_DDP_PKG_ERR;
1630 }
1631 }
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658 enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
1659 {
1660 bool already_loaded = false;
1661 enum ice_ddp_state state;
1662 struct ice_pkg_hdr *pkg;
1663 struct ice_seg *seg;
1664
1665 if (!buf || !len)
1666 return ICE_DDP_PKG_ERR;
1667
1668 pkg = (struct ice_pkg_hdr *)buf;
1669 state = ice_verify_pkg(pkg, len);
1670 if (state) {
1671 ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
1672 state);
1673 return state;
1674 }
1675
1676
1677 state = ice_init_pkg_info(hw, pkg);
1678 if (state)
1679 return state;
1680
1681
1682
1683
1684 state = ice_chk_pkg_compat(hw, pkg, &seg);
1685 if (state)
1686 return state;
1687
1688
1689 ice_init_pkg_hints(hw, seg);
1690 state = ice_download_pkg(hw, seg);
1691 if (state == ICE_DDP_PKG_ALREADY_LOADED) {
1692 ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
1693 already_loaded = true;
1694 }
1695
1696
1697
1698
1699 if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
1700 state = ice_get_pkg_info(hw);
1701 if (!state)
1702 state = ice_get_ddp_pkg_state(hw, already_loaded);
1703 }
1704
1705 if (ice_is_init_pkg_successful(state)) {
1706 hw->seg = seg;
1707
1708
1709
1710
1711 ice_init_pkg_regs(hw);
1712 ice_fill_blk_tbls(hw);
1713 ice_fill_hw_ptype(hw);
1714 ice_get_prof_index_max(hw);
1715 } else {
1716 ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
1717 state);
1718 }
1719
1720 return state;
1721 }
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 enum ice_ddp_state
1747 ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
1748 {
1749 enum ice_ddp_state state;
1750 u8 *buf_copy;
1751
1752 if (!buf || !len)
1753 return ICE_DDP_PKG_ERR;
1754
1755 buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
1756
1757 state = ice_init_pkg(hw, buf_copy, len);
1758 if (!ice_is_init_pkg_successful(state)) {
1759
1760 devm_kfree(ice_hw_to_dev(hw), buf_copy);
1761 } else {
1762
1763 hw->pkg_copy = buf_copy;
1764 hw->pkg_size = len;
1765 }
1766
1767 return state;
1768 }
1769
1770
1771
1772
1773
1774 bool ice_is_init_pkg_successful(enum ice_ddp_state state)
1775 {
1776 switch (state) {
1777 case ICE_DDP_PKG_SUCCESS:
1778 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
1779 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
1780 return true;
1781 default:
1782 return false;
1783 }
1784 }
1785
1786
1787
1788
1789
1790
1791
1792
1793 static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
1794 {
1795 struct ice_buf_build *bld;
1796 struct ice_buf_hdr *buf;
1797
1798 bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
1799 if (!bld)
1800 return NULL;
1801
1802 buf = (struct ice_buf_hdr *)bld;
1803 buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
1804 section_entry));
1805 return bld;
1806 }
1807
1808 static bool ice_is_gtp_u_profile(u16 prof_idx)
1809 {
1810 return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
1811 prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
1812 prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
1813 }
1814
1815 static bool ice_is_gtp_c_profile(u16 prof_idx)
1816 {
1817 switch (prof_idx) {
1818 case ICE_PROFID_IPV4_GTPC_TEID:
1819 case ICE_PROFID_IPV4_GTPC_NO_TEID:
1820 case ICE_PROFID_IPV6_GTPC_TEID:
1821 case ICE_PROFID_IPV6_GTPC_NO_TEID:
1822 return true;
1823 default:
1824 return false;
1825 }
1826 }
1827
1828
1829
1830
1831
1832
1833
1834 static enum ice_prof_type
1835 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
1836 {
1837 u16 i;
1838
1839 if (ice_is_gtp_c_profile(prof_idx))
1840 return ICE_PROF_TUN_GTPC;
1841
1842 if (ice_is_gtp_u_profile(prof_idx))
1843 return ICE_PROF_TUN_GTPU;
1844
1845 for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
1846
1847 if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
1848 fv->ew[i].off == ICE_VNI_OFFSET)
1849 return ICE_PROF_TUN_UDP;
1850
1851
1852 if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
1853 return ICE_PROF_TUN_GRE;
1854 }
1855
1856 return ICE_PROF_NON_TUN;
1857 }
1858
1859
1860
1861
1862
1863
1864
1865 void
1866 ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
1867 unsigned long *bm)
1868 {
1869 struct ice_pkg_enum state;
1870 struct ice_seg *ice_seg;
1871 struct ice_fv *fv;
1872
1873 if (req_profs == ICE_PROF_ALL) {
1874 bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
1875 return;
1876 }
1877
1878 memset(&state, 0, sizeof(state));
1879 bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
1880 ice_seg = hw->seg;
1881 do {
1882 enum ice_prof_type prof_type;
1883 u32 offset;
1884
1885 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1886 &offset, ice_sw_fv_handler);
1887 ice_seg = NULL;
1888
1889 if (fv) {
1890
1891 prof_type = ice_get_sw_prof_type(hw, fv, offset);
1892
1893 if (req_profs & prof_type)
1894 set_bit((u16)offset, bm);
1895 }
1896 } while (fv);
1897 }
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913 int
1914 ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
1915 unsigned long *bm, struct list_head *fv_list)
1916 {
1917 struct ice_sw_fv_list_entry *fvl;
1918 struct ice_sw_fv_list_entry *tmp;
1919 struct ice_pkg_enum state;
1920 struct ice_seg *ice_seg;
1921 struct ice_fv *fv;
1922 u32 offset;
1923
1924 memset(&state, 0, sizeof(state));
1925
1926 if (!lkups->n_val_words || !hw->seg)
1927 return -EINVAL;
1928
1929 ice_seg = hw->seg;
1930 do {
1931 u16 i;
1932
1933 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
1934 &offset, ice_sw_fv_handler);
1935 if (!fv)
1936 break;
1937 ice_seg = NULL;
1938
1939
1940
1941
1942 if (!test_bit((u16)offset, bm))
1943 continue;
1944
1945 for (i = 0; i < lkups->n_val_words; i++) {
1946 int j;
1947
1948 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
1949 if (fv->ew[j].prot_id ==
1950 lkups->fv_words[i].prot_id &&
1951 fv->ew[j].off == lkups->fv_words[i].off)
1952 break;
1953 if (j >= hw->blk[ICE_BLK_SW].es.fvw)
1954 break;
1955 if (i + 1 == lkups->n_val_words) {
1956 fvl = devm_kzalloc(ice_hw_to_dev(hw),
1957 sizeof(*fvl), GFP_KERNEL);
1958 if (!fvl)
1959 goto err;
1960 fvl->fv_ptr = fv;
1961 fvl->profile_id = offset;
1962 list_add(&fvl->list_entry, fv_list);
1963 break;
1964 }
1965 }
1966 } while (fv);
1967 if (list_empty(fv_list)) {
1968 dev_warn(ice_hw_to_dev(hw), "Required profiles not found in currently loaded DDP package");
1969 return -EIO;
1970 }
1971
1972 return 0;
1973
1974 err:
1975 list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
1976 list_del(&fvl->list_entry);
1977 devm_kfree(ice_hw_to_dev(hw), fvl);
1978 }
1979
1980 return -ENOMEM;
1981 }
1982
1983
1984
1985
1986
1987 void ice_init_prof_result_bm(struct ice_hw *hw)
1988 {
1989 struct ice_pkg_enum state;
1990 struct ice_seg *ice_seg;
1991 struct ice_fv *fv;
1992
1993 memset(&state, 0, sizeof(state));
1994
1995 if (!hw->seg)
1996 return;
1997
1998 ice_seg = hw->seg;
1999 do {
2000 u32 off;
2001 u16 i;
2002
2003 fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
2004 &off, ice_sw_fv_handler);
2005 ice_seg = NULL;
2006 if (!fv)
2007 break;
2008
2009 bitmap_zero(hw->switch_info->prof_res_bm[off],
2010 ICE_MAX_FV_WORDS);
2011
2012
2013
2014
2015
2016 for (i = 1; i < ICE_MAX_FV_WORDS; i++)
2017 if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
2018 fv->ew[i].off == ICE_FV_OFFSET_INVAL)
2019 set_bit(i, hw->switch_info->prof_res_bm[off]);
2020 } while (fv);
2021 }
2022
2023
2024
2025
2026
2027
2028
2029
2030 void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
2031 {
2032 devm_kfree(ice_hw_to_dev(hw), bld);
2033 }
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048 static int
2049 ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
2050 {
2051 struct ice_buf_hdr *buf;
2052 u16 section_count;
2053 u16 data_end;
2054
2055 if (!bld)
2056 return -EINVAL;
2057
2058 buf = (struct ice_buf_hdr *)&bld->buf;
2059
2060
2061 section_count = le16_to_cpu(buf->section_count);
2062 if (section_count > 0)
2063 return -EIO;
2064
2065 if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
2066 return -EIO;
2067 bld->reserved_section_table_entries += count;
2068
2069 data_end = le16_to_cpu(buf->data_end) +
2070 flex_array_size(buf, section_entry, count);
2071 buf->data_end = cpu_to_le16(data_end);
2072
2073 return 0;
2074 }
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088 static void *
2089 ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
2090 {
2091 struct ice_buf_hdr *buf;
2092 u16 sect_count;
2093 u16 data_end;
2094
2095 if (!bld || !type || !size)
2096 return NULL;
2097
2098 buf = (struct ice_buf_hdr *)&bld->buf;
2099
2100
2101 data_end = le16_to_cpu(buf->data_end);
2102
2103
2104 data_end = ALIGN(data_end, 4);
2105
2106 if ((data_end + size) > ICE_MAX_S_DATA_END)
2107 return NULL;
2108
2109
2110 sect_count = le16_to_cpu(buf->section_count);
2111 if (sect_count < bld->reserved_section_table_entries) {
2112 void *section_ptr = ((u8 *)buf) + data_end;
2113
2114 buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
2115 buf->section_entry[sect_count].size = cpu_to_le16(size);
2116 buf->section_entry[sect_count].type = cpu_to_le32(type);
2117
2118 data_end += size;
2119 buf->data_end = cpu_to_le16(data_end);
2120
2121 buf->section_count = cpu_to_le16(sect_count + 1);
2122 return section_ptr;
2123 }
2124
2125
2126 return NULL;
2127 }
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139 struct ice_buf_build *
2140 ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
2141 void **section)
2142 {
2143 struct ice_buf_build *buf;
2144
2145 if (!section)
2146 return NULL;
2147
2148 buf = ice_pkg_buf_alloc(hw);
2149 if (!buf)
2150 return NULL;
2151
2152 if (ice_pkg_buf_reserve_section(buf, 1))
2153 goto ice_pkg_buf_alloc_single_section_err;
2154
2155 *section = ice_pkg_buf_alloc_section(buf, type, size);
2156 if (!*section)
2157 goto ice_pkg_buf_alloc_single_section_err;
2158
2159 return buf;
2160
2161 ice_pkg_buf_alloc_single_section_err:
2162 ice_pkg_buf_free(hw, buf);
2163 return NULL;
2164 }
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176 static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
2177 {
2178 struct ice_buf_hdr *buf;
2179
2180 if (!bld)
2181 return 0;
2182
2183 buf = (struct ice_buf_hdr *)&bld->buf;
2184 return le16_to_cpu(buf->section_count);
2185 }
2186
2187
2188
2189
2190
2191
2192
2193 struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
2194 {
2195 if (!bld)
2196 return NULL;
2197
2198 return &bld->buf;
2199 }
2200
2201
2202
2203
2204
2205
2206
2207 bool
2208 ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
2209 enum ice_tunnel_type type)
2210 {
2211 bool res = false;
2212 u16 i;
2213
2214 mutex_lock(&hw->tnl_lock);
2215
2216 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2217 if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
2218 (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
2219 *port = hw->tnl.tbl[i].port;
2220 res = true;
2221 break;
2222 }
2223
2224 mutex_unlock(&hw->tnl_lock);
2225
2226 return res;
2227 }
2228
2229
2230
2231
2232
2233
2234 static int
2235 ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
2236 {
2237 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2238 int status = -ENOSPC;
2239 struct ice_buf_build *bld;
2240 u8 val, dc, nm;
2241
2242 bld = ice_pkg_buf_alloc(hw);
2243 if (!bld)
2244 return -ENOMEM;
2245
2246
2247 if (ice_pkg_buf_reserve_section(bld, 2))
2248 goto ice_upd_dvm_boost_entry_err;
2249
2250 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2251 struct_size(sect_rx, tcam, 1));
2252 if (!sect_rx)
2253 goto ice_upd_dvm_boost_entry_err;
2254 sect_rx->count = cpu_to_le16(1);
2255
2256 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2257 struct_size(sect_tx, tcam, 1));
2258 if (!sect_tx)
2259 goto ice_upd_dvm_boost_entry_err;
2260 sect_tx->count = cpu_to_le16(1);
2261
2262
2263 memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam));
2264
2265
2266 if (entry->enable) {
2267
2268 val = 0x00;
2269 dc = 0xFF;
2270 nm = 0x00;
2271 } else {
2272
2273 val = 0x00;
2274 dc = 0xF7;
2275 nm = 0x08;
2276 }
2277
2278 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2279 &val, NULL, &dc, &nm, 0, sizeof(u8));
2280
2281
2282 memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
2283
2284 status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1);
2285
2286 ice_upd_dvm_boost_entry_err:
2287 ice_pkg_buf_free(hw, bld);
2288
2289 return status;
2290 }
2291
2292
2293
2294
2295
2296
2297
2298 int ice_set_dvm_boost_entries(struct ice_hw *hw)
2299 {
2300 int status;
2301 u16 i;
2302
2303 for (i = 0; i < hw->dvm_upd.count; i++) {
2304 status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
2305 if (status)
2306 return status;
2307 }
2308
2309 return 0;
2310 }
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322 static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
2323 u16 idx)
2324 {
2325 u16 i;
2326
2327 for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
2328 if (hw->tnl.tbl[i].valid &&
2329 hw->tnl.tbl[i].type == type &&
2330 idx-- == 0)
2331 return i;
2332
2333 WARN_ON_ONCE(1);
2334 return 0;
2335 }
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348 static int
2349 ice_create_tunnel(struct ice_hw *hw, u16 index,
2350 enum ice_tunnel_type type, u16 port)
2351 {
2352 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2353 struct ice_buf_build *bld;
2354 int status = -ENOSPC;
2355
2356 mutex_lock(&hw->tnl_lock);
2357
2358 bld = ice_pkg_buf_alloc(hw);
2359 if (!bld) {
2360 status = -ENOMEM;
2361 goto ice_create_tunnel_end;
2362 }
2363
2364
2365 if (ice_pkg_buf_reserve_section(bld, 2))
2366 goto ice_create_tunnel_err;
2367
2368 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2369 struct_size(sect_rx, tcam, 1));
2370 if (!sect_rx)
2371 goto ice_create_tunnel_err;
2372 sect_rx->count = cpu_to_le16(1);
2373
2374 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2375 struct_size(sect_tx, tcam, 1));
2376 if (!sect_tx)
2377 goto ice_create_tunnel_err;
2378 sect_tx->count = cpu_to_le16(1);
2379
2380
2381 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2382 sizeof(*sect_rx->tcam));
2383
2384
2385
2386
2387 ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
2388 (u8 *)&port, NULL, NULL, NULL,
2389 (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
2390 sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
2391
2392
2393 memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
2394
2395 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2396 if (!status)
2397 hw->tnl.tbl[index].port = port;
2398
2399 ice_create_tunnel_err:
2400 ice_pkg_buf_free(hw, bld);
2401
2402 ice_create_tunnel_end:
2403 mutex_unlock(&hw->tnl_lock);
2404
2405 return status;
2406 }
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419 static int
2420 ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
2421 u16 port)
2422 {
2423 struct ice_boost_tcam_section *sect_rx, *sect_tx;
2424 struct ice_buf_build *bld;
2425 int status = -ENOSPC;
2426
2427 mutex_lock(&hw->tnl_lock);
2428
2429 if (WARN_ON(!hw->tnl.tbl[index].valid ||
2430 hw->tnl.tbl[index].type != type ||
2431 hw->tnl.tbl[index].port != port)) {
2432 status = -EIO;
2433 goto ice_destroy_tunnel_end;
2434 }
2435
2436 bld = ice_pkg_buf_alloc(hw);
2437 if (!bld) {
2438 status = -ENOMEM;
2439 goto ice_destroy_tunnel_end;
2440 }
2441
2442
2443 if (ice_pkg_buf_reserve_section(bld, 2))
2444 goto ice_destroy_tunnel_err;
2445
2446 sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
2447 struct_size(sect_rx, tcam, 1));
2448 if (!sect_rx)
2449 goto ice_destroy_tunnel_err;
2450 sect_rx->count = cpu_to_le16(1);
2451
2452 sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
2453 struct_size(sect_tx, tcam, 1));
2454 if (!sect_tx)
2455 goto ice_destroy_tunnel_err;
2456 sect_tx->count = cpu_to_le16(1);
2457
2458
2459
2460
2461 memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
2462 sizeof(*sect_rx->tcam));
2463 memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
2464 sizeof(*sect_tx->tcam));
2465
2466 status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
2467 if (!status)
2468 hw->tnl.tbl[index].port = 0;
2469
2470 ice_destroy_tunnel_err:
2471 ice_pkg_buf_free(hw, bld);
2472
2473 ice_destroy_tunnel_end:
2474 mutex_unlock(&hw->tnl_lock);
2475
2476 return status;
2477 }
2478
2479 int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
2480 unsigned int idx, struct udp_tunnel_info *ti)
2481 {
2482 struct ice_netdev_priv *np = netdev_priv(netdev);
2483 struct ice_vsi *vsi = np->vsi;
2484 struct ice_pf *pf = vsi->back;
2485 enum ice_tunnel_type tnl_type;
2486 int status;
2487 u16 index;
2488
2489 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
2490 index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
2491
2492 status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
2493 if (status) {
2494 netdev_err(netdev, "Error adding UDP tunnel - %d\n",
2495 status);
2496 return -EIO;
2497 }
2498
2499 udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
2500 return 0;
2501 }
2502
2503 int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
2504 unsigned int idx, struct udp_tunnel_info *ti)
2505 {
2506 struct ice_netdev_priv *np = netdev_priv(netdev);
2507 struct ice_vsi *vsi = np->vsi;
2508 struct ice_pf *pf = vsi->back;
2509 enum ice_tunnel_type tnl_type;
2510 int status;
2511
2512 tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
2513
2514 status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
2515 ntohs(ti->port));
2516 if (status) {
2517 netdev_err(netdev, "Error removing UDP tunnel - %d\n",
2518 status);
2519 return -EIO;
2520 }
2521
2522 return 0;
2523 }
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534 int
2535 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
2536 u8 *prot, u16 *off)
2537 {
2538 struct ice_fv_word *fv_ext;
2539
2540 if (prof >= hw->blk[blk].es.count)
2541 return -EINVAL;
2542
2543 if (fv_idx >= hw->blk[blk].es.fvw)
2544 return -EINVAL;
2545
2546 fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
2547
2548 *prot = fv_ext[fv_idx].prot_id;
2549 *off = fv_ext[fv_idx].off;
2550
2551 return 0;
2552 }
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567 static int
2568 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
2569 {
2570 if (ptype >= ICE_XLT1_CNT || !ptg)
2571 return -EINVAL;
2572
2573 *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
2574 return 0;
2575 }
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
2587 {
2588 hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
2589 }
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601 static int
2602 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2603 {
2604 struct ice_ptg_ptype **ch;
2605 struct ice_ptg_ptype *p;
2606
2607 if (ptype > ICE_XLT1_CNT - 1)
2608 return -EINVAL;
2609
2610 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
2611 return -ENOENT;
2612
2613
2614 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
2615 return -EIO;
2616
2617
2618 p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2619 ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2620 while (p) {
2621 if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
2622 *ch = p->next_ptype;
2623 break;
2624 }
2625
2626 ch = &p->next_ptype;
2627 p = p->next_ptype;
2628 }
2629
2630 hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
2631 hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
2632
2633 return 0;
2634 }
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648 static int
2649 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
2650 {
2651 u8 original_ptg;
2652 int status;
2653
2654 if (ptype > ICE_XLT1_CNT - 1)
2655 return -EINVAL;
2656
2657 if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
2658 return -ENOENT;
2659
2660 status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
2661 if (status)
2662 return status;
2663
2664
2665 if (original_ptg == ptg)
2666 return 0;
2667
2668
2669 if (original_ptg != ICE_DEFAULT_PTG)
2670 ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
2671
2672
2673 if (ptg == ICE_DEFAULT_PTG)
2674 return 0;
2675
2676
2677 hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
2678 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
2679 hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
2680 &hw->blk[blk].xlt1.ptypes[ptype];
2681
2682 hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
2683 hw->blk[blk].xlt1.t[ptype] = ptg;
2684
2685 return 0;
2686 }
2687
2688
2689 struct ice_blk_size_details {
2690 u16 xlt1;
2691 u16 xlt2;
2692 u16 prof_tcam;
2693 u16 prof_id;
2694 u8 prof_cdid_bits;
2695 u16 prof_redir;
2696 u16 es;
2697 u16 fvw;
2698 u8 overwrite;
2699 u8 reverse;
2700 };
2701
2702 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
2718 false, false },
2719 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
2720 false, false },
2721 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2722 false, true },
2723 { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
2724 true, true },
2725 { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
2726 false, false },
2727 };
2728
2729 enum ice_sid_all {
2730 ICE_SID_XLT1_OFF = 0,
2731 ICE_SID_XLT2_OFF,
2732 ICE_SID_PR_OFF,
2733 ICE_SID_PR_REDIR_OFF,
2734 ICE_SID_ES_OFF,
2735 ICE_SID_OFF_COUNT,
2736 };
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747 static bool
2748 ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
2749 {
2750 struct ice_vsig_prof *tmp1;
2751 struct ice_vsig_prof *tmp2;
2752 u16 chk_count = 0;
2753 u16 count = 0;
2754
2755
2756 list_for_each_entry(tmp1, list1, list)
2757 count++;
2758 list_for_each_entry(tmp2, list2, list)
2759 chk_count++;
2760
2761 if (!count || count != chk_count)
2762 return false;
2763
2764 tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
2765 tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
2766
2767
2768
2769
2770 while (count--) {
2771 if (tmp2->profile_cookie != tmp1->profile_cookie)
2772 return false;
2773
2774 tmp1 = list_next_entry(tmp1, list);
2775 tmp2 = list_next_entry(tmp2, list);
2776 }
2777
2778 return true;
2779 }
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793 static int
2794 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
2795 {
2796 if (!vsig || vsi >= ICE_MAX_VSI)
2797 return -EINVAL;
2798
2799
2800
2801
2802
2803 *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
2804
2805 return 0;
2806 }
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2817 {
2818 u16 idx = vsig & ICE_VSIG_IDX_M;
2819
2820 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
2821 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2822 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
2823 }
2824
2825 return ICE_VSIG_VALUE(idx, hw->pf_id);
2826 }
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
2837 {
2838 u16 i;
2839
2840 for (i = 1; i < ICE_MAX_VSIGS; i++)
2841 if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2842 return ice_vsig_alloc_val(hw, blk, i);
2843
2844 return ICE_DEFAULT_VSIG;
2845 }
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862 static int
2863 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
2864 struct list_head *chs, u16 *vsig)
2865 {
2866 struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
2867 u16 i;
2868
2869 for (i = 0; i < xlt2->count; i++)
2870 if (xlt2->vsig_tbl[i].in_use &&
2871 ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
2872 *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
2873 return 0;
2874 }
2875
2876 return -ENOENT;
2877 }
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888 static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
2889 {
2890 struct ice_vsig_prof *dtmp, *del;
2891 struct ice_vsig_vsi *vsi_cur;
2892 u16 idx;
2893
2894 idx = vsig & ICE_VSIG_IDX_M;
2895 if (idx >= ICE_MAX_VSIGS)
2896 return -EINVAL;
2897
2898 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2899 return -ENOENT;
2900
2901 hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
2902
2903 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2904
2905
2906
2907 if (vsi_cur) {
2908
2909 do {
2910 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
2911
2912 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2913 vsi_cur->changed = 1;
2914 vsi_cur->next_vsi = NULL;
2915 vsi_cur = tmp;
2916 } while (vsi_cur);
2917
2918
2919 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
2920 }
2921
2922
2923 list_for_each_entry_safe(del, dtmp,
2924 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2925 list) {
2926 list_del(&del->list);
2927 devm_kfree(ice_hw_to_dev(hw), del);
2928 }
2929
2930
2931
2932
2933 INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
2934
2935 return 0;
2936 }
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948 static int
2949 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
2950 {
2951 struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
2952 u16 idx;
2953
2954 idx = vsig & ICE_VSIG_IDX_M;
2955
2956 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
2957 return -EINVAL;
2958
2959 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2960 return -ENOENT;
2961
2962
2963 if (idx == ICE_DEFAULT_VSIG)
2964 return 0;
2965
2966 vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2967 if (!(*vsi_head))
2968 return -EIO;
2969
2970 vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
2971 vsi_cur = (*vsi_head);
2972
2973
2974 while (vsi_cur) {
2975 if (vsi_tgt == vsi_cur) {
2976 (*vsi_head) = vsi_cur->next_vsi;
2977 break;
2978 }
2979 vsi_head = &vsi_cur->next_vsi;
2980 vsi_cur = vsi_cur->next_vsi;
2981 }
2982
2983
2984 if (!vsi_cur)
2985 return -ENOENT;
2986
2987 vsi_cur->vsig = ICE_DEFAULT_VSIG;
2988 vsi_cur->changed = 1;
2989 vsi_cur->next_vsi = NULL;
2990
2991 return 0;
2992 }
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006 static int
3007 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
3008 {
3009 struct ice_vsig_vsi *tmp;
3010 u16 orig_vsig, idx;
3011 int status;
3012
3013 idx = vsig & ICE_VSIG_IDX_M;
3014
3015 if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
3016 return -EINVAL;
3017
3018
3019
3020
3021 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
3022 vsig != ICE_DEFAULT_VSIG)
3023 return -ENOENT;
3024
3025 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
3026 if (status)
3027 return status;
3028
3029
3030 if (orig_vsig == vsig)
3031 return 0;
3032
3033 if (orig_vsig != ICE_DEFAULT_VSIG) {
3034
3035 status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
3036 if (status)
3037 return status;
3038 }
3039
3040 if (idx == ICE_DEFAULT_VSIG)
3041 return 0;
3042
3043
3044 hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
3045 hw->blk[blk].xlt2.vsis[vsi].changed = 1;
3046
3047
3048 tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3049 hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
3050 &hw->blk[blk].xlt2.vsis[vsi];
3051 hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
3052 hw->blk[blk].xlt2.t[vsi] = vsig;
3053
3054 return 0;
3055 }
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065 static bool
3066 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
3067 u16 mask)
3068 {
3069 bool expect_no_mask = false;
3070 bool found = false;
3071 bool match = false;
3072 u16 i;
3073
3074
3075 if (mask == 0 || mask == 0xffff)
3076 expect_no_mask = true;
3077
3078
3079 for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
3080 hw->blk[blk].masks.count; i++)
3081 if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
3082 if (hw->blk[blk].masks.masks[i].in_use &&
3083 hw->blk[blk].masks.masks[i].idx == idx) {
3084 found = true;
3085 if (hw->blk[blk].masks.masks[i].mask == mask)
3086 match = true;
3087 break;
3088 }
3089
3090 if (expect_no_mask) {
3091 if (found)
3092 return false;
3093 } else {
3094 if (!match)
3095 return false;
3096 }
3097
3098 return true;
3099 }
3100
3101
3102
3103
3104
3105
3106
3107
3108 static bool
3109 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
3110 {
3111 u16 i;
3112
3113
3114 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3115 if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
3116 return false;
3117
3118 return true;
3119 }
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129 static int
3130 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
3131 struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
3132 {
3133 struct ice_es *es = &hw->blk[blk].es;
3134 u8 i;
3135
3136
3137
3138
3139 if (blk == ICE_BLK_FD)
3140 return -ENOENT;
3141
3142 for (i = 0; i < (u8)es->count; i++) {
3143 u16 off = i * es->fvw;
3144
3145 if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
3146 continue;
3147
3148
3149 if (masks && !ice_prof_has_mask(hw, blk, i, masks))
3150 continue;
3151
3152 *prof_id = i;
3153 return 0;
3154 }
3155
3156 return -ENOENT;
3157 }
3158
3159
3160
3161
3162
3163
3164 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3165 {
3166 switch (blk) {
3167 case ICE_BLK_FD:
3168 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
3169 break;
3170 case ICE_BLK_RSS:
3171 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
3172 break;
3173 default:
3174 return false;
3175 }
3176 return true;
3177 }
3178
3179
3180
3181
3182
3183
3184 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
3185 {
3186 switch (blk) {
3187 case ICE_BLK_FD:
3188 *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
3189 break;
3190 case ICE_BLK_RSS:
3191 *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
3192 break;
3193 default:
3194 return false;
3195 }
3196 return true;
3197 }
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209 static int
3210 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
3211 u16 *tcam_idx)
3212 {
3213 u16 res_type;
3214
3215 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3216 return -EINVAL;
3217
3218 return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
3219 }
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229 static int
3230 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
3231 {
3232 u16 res_type;
3233
3234 if (!ice_tcam_ent_rsrc_type(blk, &res_type))
3235 return -EINVAL;
3236
3237 return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
3238 }
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249 static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
3250 {
3251 u16 res_type;
3252 u16 get_prof;
3253 int status;
3254
3255 if (!ice_prof_id_rsrc_type(blk, &res_type))
3256 return -EINVAL;
3257
3258 status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
3259 if (!status)
3260 *prof_id = (u8)get_prof;
3261
3262 return status;
3263 }
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273 static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3274 {
3275 u16 tmp_prof_id = (u16)prof_id;
3276 u16 res_type;
3277
3278 if (!ice_prof_id_rsrc_type(blk, &res_type))
3279 return -EINVAL;
3280
3281 return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
3282 }
3283
3284
3285
3286
3287
3288
3289
3290 static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3291 {
3292 if (prof_id > hw->blk[blk].es.count)
3293 return -EINVAL;
3294
3295 hw->blk[blk].es.ref_count[prof_id]++;
3296
3297 return 0;
3298 }
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308 static void
3309 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
3310 u16 idx, u16 mask)
3311 {
3312 u32 offset;
3313 u32 val;
3314
3315 switch (blk) {
3316 case ICE_BLK_RSS:
3317 offset = GLQF_HMASK(mask_idx);
3318 val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
3319 val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
3320 break;
3321 case ICE_BLK_FD:
3322 offset = GLQF_FDMASK(mask_idx);
3323 val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
3324 val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
3325 break;
3326 default:
3327 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
3328 blk);
3329 return;
3330 }
3331
3332 wr32(hw, offset, val);
3333 ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
3334 blk, idx, offset, val);
3335 }
3336
3337
3338
3339
3340
3341
3342
3343
3344 static void
3345 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
3346 u16 prof_id, u32 enable_mask)
3347 {
3348 u32 offset;
3349
3350 switch (blk) {
3351 case ICE_BLK_RSS:
3352 offset = GLQF_HMASK_SEL(prof_id);
3353 break;
3354 case ICE_BLK_FD:
3355 offset = GLQF_FDMASK_SEL(prof_id);
3356 break;
3357 default:
3358 ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
3359 blk);
3360 return;
3361 }
3362
3363 wr32(hw, offset, enable_mask);
3364 ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
3365 blk, prof_id, offset, enable_mask);
3366 }
3367
3368
3369
3370
3371
3372
3373 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
3374 {
3375 u16 per_pf;
3376 u16 i;
3377
3378 mutex_init(&hw->blk[blk].masks.lock);
3379
3380 per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
3381
3382 hw->blk[blk].masks.count = per_pf;
3383 hw->blk[blk].masks.first = hw->pf_id * per_pf;
3384
3385 memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
3386
3387 for (i = hw->blk[blk].masks.first;
3388 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3389 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3390 }
3391
3392
3393
3394
3395
3396 static void ice_init_all_prof_masks(struct ice_hw *hw)
3397 {
3398 ice_init_prof_masks(hw, ICE_BLK_RSS);
3399 ice_init_prof_masks(hw, ICE_BLK_FD);
3400 }
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410 static int
3411 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
3412 u16 *mask_idx)
3413 {
3414 bool found_unused = false, found_copy = false;
3415 u16 unused_idx = 0, copy_idx = 0;
3416 int status = -ENOSPC;
3417 u16 i;
3418
3419 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3420 return -EINVAL;
3421
3422 mutex_lock(&hw->blk[blk].masks.lock);
3423
3424 for (i = hw->blk[blk].masks.first;
3425 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
3426 if (hw->blk[blk].masks.masks[i].in_use) {
3427
3428
3429
3430 if (hw->blk[blk].masks.masks[i].mask == mask &&
3431 hw->blk[blk].masks.masks[i].idx == idx) {
3432 found_copy = true;
3433 copy_idx = i;
3434 break;
3435 }
3436 } else {
3437
3438
3439
3440 if (!found_unused) {
3441 found_unused = true;
3442 unused_idx = i;
3443 }
3444 }
3445
3446 if (found_copy)
3447 i = copy_idx;
3448 else if (found_unused)
3449 i = unused_idx;
3450 else
3451 goto err_ice_alloc_prof_mask;
3452
3453
3454 if (found_unused) {
3455 hw->blk[blk].masks.masks[i].in_use = true;
3456 hw->blk[blk].masks.masks[i].mask = mask;
3457 hw->blk[blk].masks.masks[i].idx = idx;
3458 hw->blk[blk].masks.masks[i].ref = 0;
3459 ice_write_prof_mask_reg(hw, blk, i, idx, mask);
3460 }
3461
3462 hw->blk[blk].masks.masks[i].ref++;
3463 *mask_idx = i;
3464 status = 0;
3465
3466 err_ice_alloc_prof_mask:
3467 mutex_unlock(&hw->blk[blk].masks.lock);
3468
3469 return status;
3470 }
3471
3472
3473
3474
3475
3476
3477
3478 static int
3479 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
3480 {
3481 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3482 return -EINVAL;
3483
3484 if (!(mask_idx >= hw->blk[blk].masks.first &&
3485 mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
3486 return -ENOENT;
3487
3488 mutex_lock(&hw->blk[blk].masks.lock);
3489
3490 if (!hw->blk[blk].masks.masks[mask_idx].in_use)
3491 goto exit_ice_free_prof_mask;
3492
3493 if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
3494 hw->blk[blk].masks.masks[mask_idx].ref--;
3495 goto exit_ice_free_prof_mask;
3496 }
3497
3498
3499 hw->blk[blk].masks.masks[mask_idx].in_use = false;
3500 hw->blk[blk].masks.masks[mask_idx].mask = 0;
3501 hw->blk[blk].masks.masks[mask_idx].idx = 0;
3502
3503
3504 ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
3505 mask_idx);
3506 ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
3507
3508 exit_ice_free_prof_mask:
3509 mutex_unlock(&hw->blk[blk].masks.lock);
3510
3511 return 0;
3512 }
3513
3514
3515
3516
3517
3518
3519
3520 static int
3521 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
3522 {
3523 u32 mask_bm;
3524 u16 i;
3525
3526 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3527 return -EINVAL;
3528
3529 mask_bm = hw->blk[blk].es.mask_ena[prof_id];
3530 for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
3531 if (mask_bm & BIT(i))
3532 ice_free_prof_mask(hw, blk, i);
3533
3534 return 0;
3535 }
3536
3537
3538
3539
3540
3541
3542
3543
3544 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
3545 {
3546 u16 i;
3547
3548 mutex_lock(&hw->blk[blk].masks.lock);
3549
3550 for (i = hw->blk[blk].masks.first;
3551 i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
3552 ice_write_prof_mask_reg(hw, blk, i, 0, 0);
3553
3554 hw->blk[blk].masks.masks[i].in_use = false;
3555 hw->blk[blk].masks.masks[i].idx = 0;
3556 hw->blk[blk].masks.masks[i].mask = 0;
3557 }
3558
3559 mutex_unlock(&hw->blk[blk].masks.lock);
3560 mutex_destroy(&hw->blk[blk].masks.lock);
3561 }
3562
3563
3564
3565
3566
3567
3568
3569 static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
3570 {
3571 ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
3572 ice_shutdown_prof_masks(hw, ICE_BLK_FD);
3573 }
3574
3575
3576
3577
3578
3579
3580
3581
3582 static int
3583 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
3584 u16 *masks)
3585 {
3586 bool err = false;
3587 u32 ena_mask = 0;
3588 u16 idx;
3589 u16 i;
3590
3591
3592 if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
3593 return 0;
3594
3595 for (i = 0; i < hw->blk[blk].es.fvw; i++)
3596 if (masks[i] && masks[i] != 0xFFFF) {
3597 if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
3598 ena_mask |= BIT(idx);
3599 } else {
3600
3601 err = true;
3602 break;
3603 }
3604 }
3605
3606 if (err) {
3607
3608 for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
3609 if (ena_mask & BIT(i))
3610 ice_free_prof_mask(hw, blk, i);
3611
3612 return -EIO;
3613 }
3614
3615
3616 ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
3617
3618
3619 hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
3620
3621 return 0;
3622 }
3623
3624
3625
3626
3627
3628
3629
3630
3631 static void
3632 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
3633 struct ice_fv_word *fv)
3634 {
3635 u16 off;
3636
3637 off = prof_id * hw->blk[blk].es.fvw;
3638 if (!fv) {
3639 memset(&hw->blk[blk].es.t[off], 0,
3640 hw->blk[blk].es.fvw * sizeof(*fv));
3641 hw->blk[blk].es.written[prof_id] = false;
3642 } else {
3643 memcpy(&hw->blk[blk].es.t[off], fv,
3644 hw->blk[blk].es.fvw * sizeof(*fv));
3645 }
3646 }
3647
3648
3649
3650
3651
3652
3653
3654 static int
3655 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
3656 {
3657 if (prof_id > hw->blk[blk].es.count)
3658 return -EINVAL;
3659
3660 if (hw->blk[blk].es.ref_count[prof_id] > 0) {
3661 if (!--hw->blk[blk].es.ref_count[prof_id]) {
3662 ice_write_es(hw, blk, prof_id, NULL);
3663 ice_free_prof_masks(hw, blk, prof_id);
3664 return ice_free_prof_id(hw, blk, prof_id);
3665 }
3666 }
3667
3668 return 0;
3669 }
3670
3671
3672 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
3673
3674 { ICE_SID_XLT1_SW,
3675 ICE_SID_XLT2_SW,
3676 ICE_SID_PROFID_TCAM_SW,
3677 ICE_SID_PROFID_REDIR_SW,
3678 ICE_SID_FLD_VEC_SW
3679 },
3680
3681
3682 { ICE_SID_XLT1_ACL,
3683 ICE_SID_XLT2_ACL,
3684 ICE_SID_PROFID_TCAM_ACL,
3685 ICE_SID_PROFID_REDIR_ACL,
3686 ICE_SID_FLD_VEC_ACL
3687 },
3688
3689
3690 { ICE_SID_XLT1_FD,
3691 ICE_SID_XLT2_FD,
3692 ICE_SID_PROFID_TCAM_FD,
3693 ICE_SID_PROFID_REDIR_FD,
3694 ICE_SID_FLD_VEC_FD
3695 },
3696
3697
3698 { ICE_SID_XLT1_RSS,
3699 ICE_SID_XLT2_RSS,
3700 ICE_SID_PROFID_TCAM_RSS,
3701 ICE_SID_PROFID_REDIR_RSS,
3702 ICE_SID_FLD_VEC_RSS
3703 },
3704
3705
3706 { ICE_SID_XLT1_PE,
3707 ICE_SID_XLT2_PE,
3708 ICE_SID_PROFID_TCAM_PE,
3709 ICE_SID_PROFID_REDIR_PE,
3710 ICE_SID_FLD_VEC_PE
3711 }
3712 };
3713
3714
3715
3716
3717
3718
3719 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
3720 {
3721 u16 pt;
3722
3723 for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
3724 u8 ptg;
3725
3726 ptg = hw->blk[blk].xlt1.t[pt];
3727 if (ptg != ICE_DEFAULT_PTG) {
3728 ice_ptg_alloc_val(hw, blk, ptg);
3729 ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
3730 }
3731 }
3732 }
3733
3734
3735
3736
3737
3738
3739 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
3740 {
3741 u16 vsi;
3742
3743 for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
3744 u16 vsig;
3745
3746 vsig = hw->blk[blk].xlt2.t[vsi];
3747 if (vsig) {
3748 ice_vsig_alloc_val(hw, blk, vsig);
3749 ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3750
3751
3752
3753 hw->blk[blk].xlt2.vsis[vsi].changed = 0;
3754 }
3755 }
3756 }
3757
3758
3759
3760
3761
3762 static void ice_init_sw_db(struct ice_hw *hw)
3763 {
3764 u16 i;
3765
3766 for (i = 0; i < ICE_BLK_COUNT; i++) {
3767 ice_init_sw_xlt1_db(hw, (enum ice_block)i);
3768 ice_init_sw_xlt2_db(hw, (enum ice_block)i);
3769 }
3770 }
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
3785 {
3786 u32 dst_len, sect_len, offset = 0;
3787 struct ice_prof_redir_section *pr;
3788 struct ice_prof_id_section *pid;
3789 struct ice_xlt1_section *xlt1;
3790 struct ice_xlt2_section *xlt2;
3791 struct ice_sw_fv_section *es;
3792 struct ice_pkg_enum state;
3793 u8 *src, *dst;
3794 void *sect;
3795
3796
3797
3798
3799
3800 if (!hw->seg) {
3801 ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
3802 return;
3803 }
3804
3805 memset(&state, 0, sizeof(state));
3806
3807 sect = ice_pkg_enum_section(hw->seg, &state, sid);
3808
3809 while (sect) {
3810 switch (sid) {
3811 case ICE_SID_XLT1_SW:
3812 case ICE_SID_XLT1_FD:
3813 case ICE_SID_XLT1_RSS:
3814 case ICE_SID_XLT1_ACL:
3815 case ICE_SID_XLT1_PE:
3816 xlt1 = sect;
3817 src = xlt1->value;
3818 sect_len = le16_to_cpu(xlt1->count) *
3819 sizeof(*hw->blk[block_id].xlt1.t);
3820 dst = hw->blk[block_id].xlt1.t;
3821 dst_len = hw->blk[block_id].xlt1.count *
3822 sizeof(*hw->blk[block_id].xlt1.t);
3823 break;
3824 case ICE_SID_XLT2_SW:
3825 case ICE_SID_XLT2_FD:
3826 case ICE_SID_XLT2_RSS:
3827 case ICE_SID_XLT2_ACL:
3828 case ICE_SID_XLT2_PE:
3829 xlt2 = sect;
3830 src = (__force u8 *)xlt2->value;
3831 sect_len = le16_to_cpu(xlt2->count) *
3832 sizeof(*hw->blk[block_id].xlt2.t);
3833 dst = (u8 *)hw->blk[block_id].xlt2.t;
3834 dst_len = hw->blk[block_id].xlt2.count *
3835 sizeof(*hw->blk[block_id].xlt2.t);
3836 break;
3837 case ICE_SID_PROFID_TCAM_SW:
3838 case ICE_SID_PROFID_TCAM_FD:
3839 case ICE_SID_PROFID_TCAM_RSS:
3840 case ICE_SID_PROFID_TCAM_ACL:
3841 case ICE_SID_PROFID_TCAM_PE:
3842 pid = sect;
3843 src = (u8 *)pid->entry;
3844 sect_len = le16_to_cpu(pid->count) *
3845 sizeof(*hw->blk[block_id].prof.t);
3846 dst = (u8 *)hw->blk[block_id].prof.t;
3847 dst_len = hw->blk[block_id].prof.count *
3848 sizeof(*hw->blk[block_id].prof.t);
3849 break;
3850 case ICE_SID_PROFID_REDIR_SW:
3851 case ICE_SID_PROFID_REDIR_FD:
3852 case ICE_SID_PROFID_REDIR_RSS:
3853 case ICE_SID_PROFID_REDIR_ACL:
3854 case ICE_SID_PROFID_REDIR_PE:
3855 pr = sect;
3856 src = pr->redir_value;
3857 sect_len = le16_to_cpu(pr->count) *
3858 sizeof(*hw->blk[block_id].prof_redir.t);
3859 dst = hw->blk[block_id].prof_redir.t;
3860 dst_len = hw->blk[block_id].prof_redir.count *
3861 sizeof(*hw->blk[block_id].prof_redir.t);
3862 break;
3863 case ICE_SID_FLD_VEC_SW:
3864 case ICE_SID_FLD_VEC_FD:
3865 case ICE_SID_FLD_VEC_RSS:
3866 case ICE_SID_FLD_VEC_ACL:
3867 case ICE_SID_FLD_VEC_PE:
3868 es = sect;
3869 src = (u8 *)es->fv;
3870 sect_len = (u32)(le16_to_cpu(es->count) *
3871 hw->blk[block_id].es.fvw) *
3872 sizeof(*hw->blk[block_id].es.t);
3873 dst = (u8 *)hw->blk[block_id].es.t;
3874 dst_len = (u32)(hw->blk[block_id].es.count *
3875 hw->blk[block_id].es.fvw) *
3876 sizeof(*hw->blk[block_id].es.t);
3877 break;
3878 default:
3879 return;
3880 }
3881
3882
3883
3884
3885 if (offset > dst_len)
3886 return;
3887
3888
3889
3890
3891
3892
3893 if ((offset + sect_len) > dst_len)
3894 sect_len = dst_len - offset;
3895
3896 memcpy(dst + offset, src, sect_len);
3897 offset += sect_len;
3898 sect = ice_pkg_enum_section(NULL, &state, sid);
3899 }
3900 }
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910 void ice_fill_blk_tbls(struct ice_hw *hw)
3911 {
3912 u8 i;
3913
3914 for (i = 0; i < ICE_BLK_COUNT; i++) {
3915 enum ice_block blk_id = (enum ice_block)i;
3916
3917 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
3918 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
3919 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
3920 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
3921 ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
3922 }
3923
3924 ice_init_sw_db(hw);
3925 }
3926
3927
3928
3929
3930
3931
3932 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
3933 {
3934 struct ice_es *es = &hw->blk[blk_idx].es;
3935 struct ice_prof_map *del, *tmp;
3936
3937 mutex_lock(&es->prof_map_lock);
3938 list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
3939 list_del(&del->list);
3940 devm_kfree(ice_hw_to_dev(hw), del);
3941 }
3942 INIT_LIST_HEAD(&es->prof_map);
3943 mutex_unlock(&es->prof_map_lock);
3944 }
3945
3946
3947
3948
3949
3950
3951 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
3952 {
3953 struct ice_flow_prof *p, *tmp;
3954
3955 mutex_lock(&hw->fl_profs_locks[blk_idx]);
3956 list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
3957 struct ice_flow_entry *e, *t;
3958
3959 list_for_each_entry_safe(e, t, &p->entries, l_entry)
3960 ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
3961 ICE_FLOW_ENTRY_HNDL(e));
3962
3963 list_del(&p->l_entry);
3964
3965 mutex_destroy(&p->entries_lock);
3966 devm_kfree(ice_hw_to_dev(hw), p);
3967 }
3968 mutex_unlock(&hw->fl_profs_locks[blk_idx]);
3969
3970
3971
3972
3973 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
3974 }
3975
3976
3977
3978
3979
3980
3981 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
3982 {
3983 u16 i;
3984
3985 if (!hw->blk[blk].xlt2.vsig_tbl)
3986 return;
3987
3988 for (i = 1; i < ICE_MAX_VSIGS; i++)
3989 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
3990 ice_vsig_free(hw, blk, i);
3991 }
3992
3993
3994
3995
3996
3997 void ice_free_hw_tbls(struct ice_hw *hw)
3998 {
3999 struct ice_rss_cfg *r, *rt;
4000 u8 i;
4001
4002 for (i = 0; i < ICE_BLK_COUNT; i++) {
4003 if (hw->blk[i].is_list_init) {
4004 struct ice_es *es = &hw->blk[i].es;
4005
4006 ice_free_prof_map(hw, i);
4007 mutex_destroy(&es->prof_map_lock);
4008
4009 ice_free_flow_profs(hw, i);
4010 mutex_destroy(&hw->fl_profs_locks[i]);
4011
4012 hw->blk[i].is_list_init = false;
4013 }
4014 ice_free_vsig_tbl(hw, (enum ice_block)i);
4015 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
4016 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
4017 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
4018 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
4019 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
4020 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
4021 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
4022 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
4023 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
4024 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
4025 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
4026 devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
4027 }
4028
4029 list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
4030 list_del(&r->l_entry);
4031 devm_kfree(ice_hw_to_dev(hw), r);
4032 }
4033 mutex_destroy(&hw->rss_locks);
4034 ice_shutdown_all_prof_masks(hw);
4035 memset(hw->blk, 0, sizeof(hw->blk));
4036 }
4037
4038
4039
4040
4041
4042
4043 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
4044 {
4045 mutex_init(&hw->fl_profs_locks[blk_idx]);
4046 INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
4047 }
4048
4049
4050
4051
4052
4053 void ice_clear_hw_tbls(struct ice_hw *hw)
4054 {
4055 u8 i;
4056
4057 for (i = 0; i < ICE_BLK_COUNT; i++) {
4058 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
4059 struct ice_prof_tcam *prof = &hw->blk[i].prof;
4060 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
4061 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
4062 struct ice_es *es = &hw->blk[i].es;
4063
4064 if (hw->blk[i].is_list_init) {
4065 ice_free_prof_map(hw, i);
4066 ice_free_flow_profs(hw, i);
4067 }
4068
4069 ice_free_vsig_tbl(hw, (enum ice_block)i);
4070
4071 memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
4072 memset(xlt1->ptg_tbl, 0,
4073 ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
4074 memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
4075
4076 memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
4077 memset(xlt2->vsig_tbl, 0,
4078 xlt2->count * sizeof(*xlt2->vsig_tbl));
4079 memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
4080
4081 memset(prof->t, 0, prof->count * sizeof(*prof->t));
4082 memset(prof_redir->t, 0,
4083 prof_redir->count * sizeof(*prof_redir->t));
4084
4085 memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
4086 memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
4087 memset(es->written, 0, es->count * sizeof(*es->written));
4088 memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
4089 }
4090 }
4091
4092
4093
4094
4095
4096 int ice_init_hw_tbls(struct ice_hw *hw)
4097 {
4098 u8 i;
4099
4100 mutex_init(&hw->rss_locks);
4101 INIT_LIST_HEAD(&hw->rss_list_head);
4102 ice_init_all_prof_masks(hw);
4103 for (i = 0; i < ICE_BLK_COUNT; i++) {
4104 struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
4105 struct ice_prof_tcam *prof = &hw->blk[i].prof;
4106 struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
4107 struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
4108 struct ice_es *es = &hw->blk[i].es;
4109 u16 j;
4110
4111 if (hw->blk[i].is_list_init)
4112 continue;
4113
4114 ice_init_flow_profs(hw, i);
4115 mutex_init(&es->prof_map_lock);
4116 INIT_LIST_HEAD(&es->prof_map);
4117 hw->blk[i].is_list_init = true;
4118
4119 hw->blk[i].overwrite = blk_sizes[i].overwrite;
4120 es->reverse = blk_sizes[i].reverse;
4121
4122 xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
4123 xlt1->count = blk_sizes[i].xlt1;
4124
4125 xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
4126 sizeof(*xlt1->ptypes), GFP_KERNEL);
4127
4128 if (!xlt1->ptypes)
4129 goto err;
4130
4131 xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
4132 sizeof(*xlt1->ptg_tbl),
4133 GFP_KERNEL);
4134
4135 if (!xlt1->ptg_tbl)
4136 goto err;
4137
4138 xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
4139 sizeof(*xlt1->t), GFP_KERNEL);
4140 if (!xlt1->t)
4141 goto err;
4142
4143 xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
4144 xlt2->count = blk_sizes[i].xlt2;
4145
4146 xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
4147 sizeof(*xlt2->vsis), GFP_KERNEL);
4148
4149 if (!xlt2->vsis)
4150 goto err;
4151
4152 xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
4153 sizeof(*xlt2->vsig_tbl),
4154 GFP_KERNEL);
4155 if (!xlt2->vsig_tbl)
4156 goto err;
4157
4158 for (j = 0; j < xlt2->count; j++)
4159 INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
4160
4161 xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
4162 sizeof(*xlt2->t), GFP_KERNEL);
4163 if (!xlt2->t)
4164 goto err;
4165
4166 prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
4167 prof->count = blk_sizes[i].prof_tcam;
4168 prof->max_prof_id = blk_sizes[i].prof_id;
4169 prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
4170 prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
4171 sizeof(*prof->t), GFP_KERNEL);
4172
4173 if (!prof->t)
4174 goto err;
4175
4176 prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
4177 prof_redir->count = blk_sizes[i].prof_redir;
4178 prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
4179 prof_redir->count,
4180 sizeof(*prof_redir->t),
4181 GFP_KERNEL);
4182
4183 if (!prof_redir->t)
4184 goto err;
4185
4186 es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
4187 es->count = blk_sizes[i].es;
4188 es->fvw = blk_sizes[i].fvw;
4189 es->t = devm_kcalloc(ice_hw_to_dev(hw),
4190 (u32)(es->count * es->fvw),
4191 sizeof(*es->t), GFP_KERNEL);
4192 if (!es->t)
4193 goto err;
4194
4195 es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
4196 sizeof(*es->ref_count),
4197 GFP_KERNEL);
4198 if (!es->ref_count)
4199 goto err;
4200
4201 es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
4202 sizeof(*es->written), GFP_KERNEL);
4203 if (!es->written)
4204 goto err;
4205
4206 es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
4207 sizeof(*es->mask_ena), GFP_KERNEL);
4208 if (!es->mask_ena)
4209 goto err;
4210 }
4211 return 0;
4212
4213 err:
4214 ice_free_hw_tbls(hw);
4215 return -ENOMEM;
4216 }
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231 static int
4232 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
4233 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4234 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
4235 u8 key[ICE_TCAM_KEY_SZ])
4236 {
4237 struct ice_prof_id_key inkey;
4238
4239 inkey.xlt1 = ptg;
4240 inkey.xlt2_cdid = cpu_to_le16(vsig);
4241 inkey.flags = cpu_to_le16(flags);
4242
4243 switch (hw->blk[blk].prof.cdid_bits) {
4244 case 0:
4245 break;
4246 case 2:
4247 #define ICE_CD_2_M 0xC000U
4248 #define ICE_CD_2_S 14
4249 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
4250 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
4251 break;
4252 case 4:
4253 #define ICE_CD_4_M 0xF000U
4254 #define ICE_CD_4_S 12
4255 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
4256 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
4257 break;
4258 case 8:
4259 #define ICE_CD_8_M 0xFF00U
4260 #define ICE_CD_8_S 16
4261 inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
4262 inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
4263 break;
4264 default:
4265 ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
4266 break;
4267 }
4268
4269 return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
4270 nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
4271 }
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287 static int
4288 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
4289 u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
4290 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
4291 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
4292 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
4293 {
4294 struct ice_prof_tcam_entry;
4295 int status;
4296
4297 status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
4298 dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
4299 if (!status) {
4300 hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
4301 hw->blk[blk].prof.t[idx].prof_id = prof_id;
4302 }
4303
4304 return status;
4305 }
4306
4307
4308
4309
4310
4311
4312
4313
4314 static int
4315 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
4316 {
4317 u16 idx = vsig & ICE_VSIG_IDX_M;
4318 struct ice_vsig_vsi *ptr;
4319
4320 *refs = 0;
4321
4322 if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
4323 return -ENOENT;
4324
4325 ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
4326 while (ptr) {
4327 (*refs)++;
4328 ptr = ptr->next_vsi;
4329 }
4330
4331 return 0;
4332 }
4333
4334
4335
4336
4337
4338
4339
4340
4341 static bool
4342 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
4343 {
4344 u16 idx = vsig & ICE_VSIG_IDX_M;
4345 struct ice_vsig_prof *ent;
4346
4347 list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
4348 list)
4349 if (ent->profile_cookie == hdl)
4350 return true;
4351
4352 ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
4353 vsig);
4354 return false;
4355 }
4356
4357
4358
4359
4360
4361
4362
4363
4364 static int
4365 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
4366 struct ice_buf_build *bld, struct list_head *chgs)
4367 {
4368 u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
4369 struct ice_chs_chg *tmp;
4370
4371 list_for_each_entry(tmp, chgs, list_entry)
4372 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
4373 u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
4374 struct ice_pkg_es *p;
4375 u32 id;
4376
4377 id = ice_sect_id(blk, ICE_VEC_TBL);
4378 p = ice_pkg_buf_alloc_section(bld, id,
4379 struct_size(p, es, 1) +
4380 vec_size -
4381 sizeof(p->es[0]));
4382
4383 if (!p)
4384 return -ENOSPC;
4385
4386 p->count = cpu_to_le16(1);
4387 p->offset = cpu_to_le16(tmp->prof_id);
4388
4389 memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
4390 }
4391
4392 return 0;
4393 }
4394
4395
4396
4397
4398
4399
4400
4401
4402 static int
4403 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
4404 struct ice_buf_build *bld, struct list_head *chgs)
4405 {
4406 struct ice_chs_chg *tmp;
4407
4408 list_for_each_entry(tmp, chgs, list_entry)
4409 if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
4410 struct ice_prof_id_section *p;
4411 u32 id;
4412
4413 id = ice_sect_id(blk, ICE_PROF_TCAM);
4414 p = ice_pkg_buf_alloc_section(bld, id,
4415 struct_size(p, entry, 1));
4416
4417 if (!p)
4418 return -ENOSPC;
4419
4420 p->count = cpu_to_le16(1);
4421 p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
4422 p->entry[0].prof_id = tmp->prof_id;
4423
4424 memcpy(p->entry[0].key,
4425 &hw->blk[blk].prof.t[tmp->tcam_idx].key,
4426 sizeof(hw->blk[blk].prof.t->key));
4427 }
4428
4429 return 0;
4430 }
4431
4432
4433
4434
4435
4436
4437
4438 static int
4439 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
4440 struct list_head *chgs)
4441 {
4442 struct ice_chs_chg *tmp;
4443
4444 list_for_each_entry(tmp, chgs, list_entry)
4445 if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
4446 struct ice_xlt1_section *p;
4447 u32 id;
4448
4449 id = ice_sect_id(blk, ICE_XLT1);
4450 p = ice_pkg_buf_alloc_section(bld, id,
4451 struct_size(p, value, 1));
4452
4453 if (!p)
4454 return -ENOSPC;
4455
4456 p->count = cpu_to_le16(1);
4457 p->offset = cpu_to_le16(tmp->ptype);
4458 p->value[0] = tmp->ptg;
4459 }
4460
4461 return 0;
4462 }
4463
4464
4465
4466
4467
4468
4469
4470 static int
4471 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
4472 struct list_head *chgs)
4473 {
4474 struct ice_chs_chg *tmp;
4475
4476 list_for_each_entry(tmp, chgs, list_entry) {
4477 struct ice_xlt2_section *p;
4478 u32 id;
4479
4480 switch (tmp->type) {
4481 case ICE_VSIG_ADD:
4482 case ICE_VSI_MOVE:
4483 case ICE_VSIG_REM:
4484 id = ice_sect_id(blk, ICE_XLT2);
4485 p = ice_pkg_buf_alloc_section(bld, id,
4486 struct_size(p, value, 1));
4487
4488 if (!p)
4489 return -ENOSPC;
4490
4491 p->count = cpu_to_le16(1);
4492 p->offset = cpu_to_le16(tmp->vsi);
4493 p->value[0] = cpu_to_le16(tmp->vsig);
4494 break;
4495 default:
4496 break;
4497 }
4498 }
4499
4500 return 0;
4501 }
4502
4503
4504
4505
4506
4507
4508
4509 static int
4510 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
4511 struct list_head *chgs)
4512 {
4513 struct ice_buf_build *b;
4514 struct ice_chs_chg *tmp;
4515 u16 pkg_sects;
4516 u16 xlt1 = 0;
4517 u16 xlt2 = 0;
4518 u16 tcam = 0;
4519 u16 es = 0;
4520 int status;
4521 u16 sects;
4522
4523
4524 list_for_each_entry(tmp, chgs, list_entry) {
4525 switch (tmp->type) {
4526 case ICE_PTG_ES_ADD:
4527 if (tmp->add_ptg)
4528 xlt1++;
4529 if (tmp->add_prof)
4530 es++;
4531 break;
4532 case ICE_TCAM_ADD:
4533 tcam++;
4534 break;
4535 case ICE_VSIG_ADD:
4536 case ICE_VSI_MOVE:
4537 case ICE_VSIG_REM:
4538 xlt2++;
4539 break;
4540 default:
4541 break;
4542 }
4543 }
4544 sects = xlt1 + xlt2 + tcam + es;
4545
4546 if (!sects)
4547 return 0;
4548
4549
4550 b = ice_pkg_buf_alloc(hw);
4551 if (!b)
4552 return -ENOMEM;
4553
4554 status = ice_pkg_buf_reserve_section(b, sects);
4555 if (status)
4556 goto error_tmp;
4557
4558
4559 if (es) {
4560 status = ice_prof_bld_es(hw, blk, b, chgs);
4561 if (status)
4562 goto error_tmp;
4563 }
4564
4565 if (tcam) {
4566 status = ice_prof_bld_tcam(hw, blk, b, chgs);
4567 if (status)
4568 goto error_tmp;
4569 }
4570
4571 if (xlt1) {
4572 status = ice_prof_bld_xlt1(blk, b, chgs);
4573 if (status)
4574 goto error_tmp;
4575 }
4576
4577 if (xlt2) {
4578 status = ice_prof_bld_xlt2(blk, b, chgs);
4579 if (status)
4580 goto error_tmp;
4581 }
4582
4583
4584
4585
4586
4587 pkg_sects = ice_pkg_buf_get_active_sections(b);
4588 if (!pkg_sects || pkg_sects != sects) {
4589 status = -EINVAL;
4590 goto error_tmp;
4591 }
4592
4593
4594 status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
4595 if (status == -EIO)
4596 ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
4597
4598 error_tmp:
4599 ice_pkg_buf_free(hw, b);
4600 return status;
4601 }
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
4613 {
4614 wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
4615
4616 ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
4617 GLQF_FDMASK_SEL(prof_id), mask_sel);
4618 }
4619
4620 struct ice_fd_src_dst_pair {
4621 u8 prot_id;
4622 u8 count;
4623 u16 off;
4624 };
4625
4626 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
4627
4628 { ICE_PROT_IPV4_OF_OR_S, 2, 12 },
4629 { ICE_PROT_IPV4_OF_OR_S, 2, 16 },
4630
4631 { ICE_PROT_IPV4_IL, 2, 12 },
4632 { ICE_PROT_IPV4_IL, 2, 16 },
4633
4634 { ICE_PROT_IPV6_OF_OR_S, 8, 8 },
4635 { ICE_PROT_IPV6_OF_OR_S, 8, 24 },
4636
4637 { ICE_PROT_IPV6_IL, 8, 8 },
4638 { ICE_PROT_IPV6_IL, 8, 24 },
4639
4640 { ICE_PROT_TCP_IL, 1, 0 },
4641 { ICE_PROT_TCP_IL, 1, 2 },
4642
4643 { ICE_PROT_UDP_OF, 1, 0 },
4644 { ICE_PROT_UDP_OF, 1, 2 },
4645
4646 { ICE_PROT_UDP_IL_OR_S, 1, 0 },
4647 { ICE_PROT_UDP_IL_OR_S, 1, 2 },
4648
4649 { ICE_PROT_SCTP_IL, 1, 0 },
4650 { ICE_PROT_SCTP_IL, 1, 2 }
4651 };
4652
4653 #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs)
4654
4655
4656
4657
4658
4659
4660
4661 static int
4662 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
4663 {
4664 DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4665 u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
4666 #define ICE_FD_FV_NOT_FOUND (-2)
4667 s8 first_free = ICE_FD_FV_NOT_FOUND;
4668 u8 used[ICE_MAX_FV_WORDS] = { 0 };
4669 s8 orig_free, si;
4670 u32 mask_sel = 0;
4671 u8 i, j, k;
4672
4673 bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
4674
4675
4676
4677
4678
4679
4680
4681
4682 for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
4683
4684
4685
4686 if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
4687 ICE_PROT_INVALID)
4688 first_free = i - 1;
4689
4690 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4691 if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
4692 es[i].off == ice_fd_pairs[j].off) {
4693 __set_bit(j, pair_list);
4694 pair_start[j] = i;
4695 }
4696 }
4697
4698 orig_free = first_free;
4699
4700
4701 for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
4702 u8 bit1 = test_bit(i + 1, pair_list);
4703 u8 bit0 = test_bit(i, pair_list);
4704
4705 if (bit0 ^ bit1) {
4706 u8 index;
4707
4708
4709 if (!bit0)
4710 index = i;
4711 else
4712 index = i + 1;
4713
4714
4715 if (first_free + 1 < (s8)ice_fd_pairs[index].count)
4716 return -ENOSPC;
4717
4718
4719 for (k = 0; k < ice_fd_pairs[index].count; k++) {
4720 es[first_free - k].prot_id =
4721 ice_fd_pairs[index].prot_id;
4722 es[first_free - k].off =
4723 ice_fd_pairs[index].off + (k * 2);
4724
4725 if (k > first_free)
4726 return -EIO;
4727
4728
4729 mask_sel |= BIT(first_free - k);
4730 }
4731
4732 pair_start[index] = first_free;
4733 first_free -= ice_fd_pairs[index].count;
4734 }
4735 }
4736
4737
4738 si = hw->blk[ICE_BLK_FD].es.fvw - 1;
4739 while (si >= 0) {
4740 u8 indexes_used = 1;
4741
4742
4743 #define ICE_SWAP_VALID 0x80
4744 used[si] = si | ICE_SWAP_VALID;
4745
4746 if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
4747 si -= indexes_used;
4748 continue;
4749 }
4750
4751
4752 for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
4753 if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
4754 es[si].off == ice_fd_pairs[j].off) {
4755 u8 idx;
4756
4757
4758 idx = j + ((j % 2) ? -1 : 1);
4759
4760 indexes_used = ice_fd_pairs[idx].count;
4761 for (k = 0; k < indexes_used; k++) {
4762 used[si - k] = (pair_start[idx] - k) |
4763 ICE_SWAP_VALID;
4764 }
4765
4766 break;
4767 }
4768
4769 si -= indexes_used;
4770 }
4771
4772
4773
4774
4775 for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
4776 u32 raw_swap = 0;
4777 u32 raw_in = 0;
4778
4779 for (k = 0; k < 4; k++) {
4780 u8 idx;
4781
4782 idx = (j * 4) + k;
4783 if (used[idx] && !(mask_sel & BIT(idx))) {
4784 raw_swap |= used[idx] << (k * BITS_PER_BYTE);
4785 #define ICE_INSET_DFLT 0x9f
4786 raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
4787 }
4788 }
4789
4790
4791 wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
4792
4793 ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
4794 prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
4795
4796
4797 wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
4798
4799 ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
4800 prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
4801 }
4802
4803
4804 ice_update_fd_mask(hw, prof_id, 0);
4805
4806 return 0;
4807 }
4808
4809
4810 static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
4811 { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
4812 { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
4813 { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
4814 { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
4815 };
4816
4817
4818
4819
4820
4821
4822 static void
4823 ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
4824 struct ice_ptype_attrib_info *info)
4825 {
4826 *info = ice_ptype_attributes[type];
4827 }
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837 static int
4838 ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
4839 const struct ice_ptype_attributes *attr, u16 attr_cnt)
4840 {
4841 bool found = false;
4842 u16 i;
4843
4844 for (i = 0; i < attr_cnt; i++)
4845 if (attr[i].ptype == ptype) {
4846 found = true;
4847
4848 prof->ptg[prof->ptg_cnt] = ptg;
4849 ice_get_ptype_attrib_info(attr[i].attrib,
4850 &prof->attr[prof->ptg_cnt]);
4851
4852 if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
4853 return -ENOSPC;
4854 }
4855
4856 if (!found)
4857 return -ENOENT;
4858
4859 return 0;
4860 }
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878 int
4879 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
4880 const struct ice_ptype_attributes *attr, u16 attr_cnt,
4881 struct ice_fv_word *es, u16 *masks)
4882 {
4883 u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
4884 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
4885 struct ice_prof_map *prof;
4886 u8 byte = 0;
4887 u8 prof_id;
4888 int status;
4889
4890 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
4891
4892 mutex_lock(&hw->blk[blk].es.prof_map_lock);
4893
4894
4895 status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
4896 if (status) {
4897
4898 status = ice_alloc_prof_id(hw, blk, &prof_id);
4899 if (status)
4900 goto err_ice_add_prof;
4901 if (blk == ICE_BLK_FD) {
4902
4903
4904
4905
4906
4907
4908
4909 status = ice_update_fd_swap(hw, prof_id, es);
4910 if (status)
4911 goto err_ice_add_prof;
4912 }
4913 status = ice_update_prof_masking(hw, blk, prof_id, masks);
4914 if (status)
4915 goto err_ice_add_prof;
4916
4917
4918 ice_write_es(hw, blk, prof_id, es);
4919 }
4920
4921 ice_prof_inc_ref(hw, blk, prof_id);
4922
4923
4924 prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
4925 if (!prof) {
4926 status = -ENOMEM;
4927 goto err_ice_add_prof;
4928 }
4929
4930 prof->profile_cookie = id;
4931 prof->prof_id = prof_id;
4932 prof->ptg_cnt = 0;
4933 prof->context = 0;
4934
4935
4936 while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
4937 u8 bit;
4938
4939 if (!ptypes[byte]) {
4940 bytes--;
4941 byte++;
4942 continue;
4943 }
4944
4945
4946 for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
4947 BITS_PER_BYTE) {
4948 u16 ptype;
4949 u8 ptg;
4950
4951 ptype = byte * BITS_PER_BYTE + bit;
4952
4953
4954
4955
4956 if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
4957 continue;
4958
4959
4960 if (test_bit(ptg, ptgs_used))
4961 continue;
4962
4963 __set_bit(ptg, ptgs_used);
4964
4965
4966
4967 status = ice_add_prof_attrib(prof, ptg, ptype,
4968 attr, attr_cnt);
4969 if (status == -ENOSPC)
4970 break;
4971 if (status) {
4972
4973
4974
4975 prof->ptg[prof->ptg_cnt] = ptg;
4976 prof->attr[prof->ptg_cnt].flags = 0;
4977 prof->attr[prof->ptg_cnt].mask = 0;
4978
4979 if (++prof->ptg_cnt >=
4980 ICE_MAX_PTG_PER_PROFILE)
4981 break;
4982 }
4983 }
4984
4985 bytes--;
4986 byte++;
4987 }
4988
4989 list_add(&prof->list, &hw->blk[blk].es.prof_map);
4990 status = 0;
4991
4992 err_ice_add_prof:
4993 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
4994 return status;
4995 }
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005
5006 static struct ice_prof_map *
5007 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
5008 {
5009 struct ice_prof_map *entry = NULL;
5010 struct ice_prof_map *map;
5011
5012 list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
5013 if (map->profile_cookie == id) {
5014 entry = map;
5015 break;
5016 }
5017
5018 return entry;
5019 }
5020
5021
5022
5023
5024
5025
5026
5027 static u16
5028 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
5029 {
5030 u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
5031 struct ice_vsig_prof *p;
5032
5033 list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5034 list)
5035 count++;
5036
5037 return count;
5038 }
5039
5040
5041
5042
5043
5044
5045
5046 static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
5047 {
5048
5049 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5050 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
5051 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
5052 int status;
5053
5054
5055 status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
5056 dc_msk, nm_msk);
5057 if (status)
5058 return status;
5059
5060
5061 status = ice_free_tcam_ent(hw, blk, idx);
5062
5063 return status;
5064 }
5065
5066
5067
5068
5069
5070
5071
5072 static int
5073 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
5074 struct ice_vsig_prof *prof)
5075 {
5076 int status;
5077 u16 i;
5078
5079 for (i = 0; i < prof->tcam_count; i++)
5080 if (prof->tcam[i].in_use) {
5081 prof->tcam[i].in_use = false;
5082 status = ice_rel_tcam_idx(hw, blk,
5083 prof->tcam[i].tcam_idx);
5084 if (status)
5085 return -EIO;
5086 }
5087
5088 return 0;
5089 }
5090
5091
5092
5093
5094
5095
5096
5097
5098 static int
5099 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5100 struct list_head *chg)
5101 {
5102 u16 idx = vsig & ICE_VSIG_IDX_M;
5103 struct ice_vsig_vsi *vsi_cur;
5104 struct ice_vsig_prof *d, *t;
5105 int status;
5106
5107
5108 list_for_each_entry_safe(d, t,
5109 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5110 list) {
5111 status = ice_rem_prof_id(hw, blk, d);
5112 if (status)
5113 return status;
5114
5115 list_del(&d->list);
5116 devm_kfree(ice_hw_to_dev(hw), d);
5117 }
5118
5119
5120 vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
5121
5122
5123
5124 if (vsi_cur)
5125 do {
5126 struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
5127 struct ice_chs_chg *p;
5128
5129 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
5130 GFP_KERNEL);
5131 if (!p)
5132 return -ENOMEM;
5133
5134 p->type = ICE_VSIG_REM;
5135 p->orig_vsig = vsig;
5136 p->vsig = ICE_DEFAULT_VSIG;
5137 p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
5138
5139 list_add(&p->list_entry, chg);
5140
5141 vsi_cur = tmp;
5142 } while (vsi_cur);
5143
5144 return ice_vsig_free(hw, blk, vsig);
5145 }
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155 static int
5156 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5157 struct list_head *chg)
5158 {
5159 u16 idx = vsig & ICE_VSIG_IDX_M;
5160 struct ice_vsig_prof *p, *t;
5161 int status;
5162
5163 list_for_each_entry_safe(p, t,
5164 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5165 list)
5166 if (p->profile_cookie == hdl) {
5167 if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
5168
5169 return ice_rem_vsig(hw, blk, vsig, chg);
5170
5171 status = ice_rem_prof_id(hw, blk, p);
5172 if (!status) {
5173 list_del(&p->list);
5174 devm_kfree(ice_hw_to_dev(hw), p);
5175 }
5176 return status;
5177 }
5178
5179 return -ENOENT;
5180 }
5181
5182
5183
5184
5185
5186
5187
5188 static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
5189 {
5190 struct ice_chs_chg *del, *tmp;
5191 struct list_head chg;
5192 int status;
5193 u16 i;
5194
5195 INIT_LIST_HEAD(&chg);
5196
5197 for (i = 1; i < ICE_MAX_VSIGS; i++)
5198 if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
5199 if (ice_has_prof_vsig(hw, blk, i, id)) {
5200 status = ice_rem_prof_id_vsig(hw, blk, i, id,
5201 &chg);
5202 if (status)
5203 goto err_ice_rem_flow_all;
5204 }
5205 }
5206
5207 status = ice_upd_prof_hw(hw, blk, &chg);
5208
5209 err_ice_rem_flow_all:
5210 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5211 list_del(&del->list_entry);
5212 devm_kfree(ice_hw_to_dev(hw), del);
5213 }
5214
5215 return status;
5216 }
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228 int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
5229 {
5230 struct ice_prof_map *pmap;
5231 int status;
5232
5233 mutex_lock(&hw->blk[blk].es.prof_map_lock);
5234
5235 pmap = ice_search_prof_id(hw, blk, id);
5236 if (!pmap) {
5237 status = -ENOENT;
5238 goto err_ice_rem_prof;
5239 }
5240
5241
5242 status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
5243 if (status)
5244 goto err_ice_rem_prof;
5245
5246
5247 ice_prof_dec_ref(hw, blk, pmap->prof_id);
5248
5249 list_del(&pmap->list);
5250 devm_kfree(ice_hw_to_dev(hw), pmap);
5251
5252 err_ice_rem_prof:
5253 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5254 return status;
5255 }
5256
5257
5258
5259
5260
5261
5262
5263
5264 static int
5265 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
5266 struct list_head *chg)
5267 {
5268 struct ice_prof_map *map;
5269 struct ice_chs_chg *p;
5270 int status = 0;
5271 u16 i;
5272
5273 mutex_lock(&hw->blk[blk].es.prof_map_lock);
5274
5275 map = ice_search_prof_id(hw, blk, hdl);
5276 if (!map) {
5277 status = -ENOENT;
5278 goto err_ice_get_prof;
5279 }
5280
5281 for (i = 0; i < map->ptg_cnt; i++)
5282 if (!hw->blk[blk].es.written[map->prof_id]) {
5283
5284 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
5285 GFP_KERNEL);
5286 if (!p) {
5287 status = -ENOMEM;
5288 goto err_ice_get_prof;
5289 }
5290
5291 p->type = ICE_PTG_ES_ADD;
5292 p->ptype = 0;
5293 p->ptg = map->ptg[i];
5294 p->add_ptg = 0;
5295
5296 p->add_prof = 1;
5297 p->prof_id = map->prof_id;
5298
5299 hw->blk[blk].es.written[map->prof_id] = true;
5300
5301 list_add(&p->list_entry, chg);
5302 }
5303
5304 err_ice_get_prof:
5305 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5306
5307 return status;
5308 }
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319 static int
5320 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5321 struct list_head *lst)
5322 {
5323 struct ice_vsig_prof *ent1, *ent2;
5324 u16 idx = vsig & ICE_VSIG_IDX_M;
5325
5326 list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5327 list) {
5328 struct ice_vsig_prof *p;
5329
5330
5331 p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
5332 GFP_KERNEL);
5333 if (!p)
5334 goto err_ice_get_profs_vsig;
5335
5336 list_add_tail(&p->list, lst);
5337 }
5338
5339 return 0;
5340
5341 err_ice_get_profs_vsig:
5342 list_for_each_entry_safe(ent1, ent2, lst, list) {
5343 list_del(&ent1->list);
5344 devm_kfree(ice_hw_to_dev(hw), ent1);
5345 }
5346
5347 return -ENOMEM;
5348 }
5349
5350
5351
5352
5353
5354
5355
5356
5357 static int
5358 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
5359 struct list_head *lst, u64 hdl)
5360 {
5361 struct ice_prof_map *map;
5362 struct ice_vsig_prof *p;
5363 int status = 0;
5364 u16 i;
5365
5366 mutex_lock(&hw->blk[blk].es.prof_map_lock);
5367 map = ice_search_prof_id(hw, blk, hdl);
5368 if (!map) {
5369 status = -ENOENT;
5370 goto err_ice_add_prof_to_lst;
5371 }
5372
5373 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5374 if (!p) {
5375 status = -ENOMEM;
5376 goto err_ice_add_prof_to_lst;
5377 }
5378
5379 p->profile_cookie = map->profile_cookie;
5380 p->prof_id = map->prof_id;
5381 p->tcam_count = map->ptg_cnt;
5382
5383 for (i = 0; i < map->ptg_cnt; i++) {
5384 p->tcam[i].prof_id = map->prof_id;
5385 p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
5386 p->tcam[i].ptg = map->ptg[i];
5387 }
5388
5389 list_add(&p->list, lst);
5390
5391 err_ice_add_prof_to_lst:
5392 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5393 return status;
5394 }
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404 static int
5405 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
5406 struct list_head *chg)
5407 {
5408 struct ice_chs_chg *p;
5409 u16 orig_vsig;
5410 int status;
5411
5412 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5413 if (!p)
5414 return -ENOMEM;
5415
5416 status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
5417 if (!status)
5418 status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
5419
5420 if (status) {
5421 devm_kfree(ice_hw_to_dev(hw), p);
5422 return status;
5423 }
5424
5425 p->type = ICE_VSI_MOVE;
5426 p->vsi = vsi;
5427 p->orig_vsig = orig_vsig;
5428 p->vsig = vsig;
5429
5430 list_add(&p->list_entry, chg);
5431
5432 return 0;
5433 }
5434
5435
5436
5437
5438
5439
5440
5441 static void
5442 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
5443 {
5444 struct ice_chs_chg *pos, *tmp;
5445
5446 list_for_each_entry_safe(tmp, pos, chg, list_entry)
5447 if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
5448 list_del(&tmp->list_entry);
5449 devm_kfree(ice_hw_to_dev(hw), tmp);
5450 }
5451 }
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464 static int
5465 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
5466 u16 vsig, struct ice_tcam_inf *tcam,
5467 struct list_head *chg)
5468 {
5469 struct ice_chs_chg *p;
5470 int status;
5471
5472 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5473 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5474 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5475
5476
5477 if (!enable) {
5478 status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
5479
5480
5481
5482
5483
5484 ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
5485 tcam->tcam_idx = 0;
5486 tcam->in_use = 0;
5487 return status;
5488 }
5489
5490
5491
5492
5493
5494
5495 status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
5496 &tcam->tcam_idx);
5497 if (status)
5498 return status;
5499
5500
5501 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5502 if (!p)
5503 return -ENOMEM;
5504
5505 status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
5506 tcam->ptg, vsig, 0, tcam->attr.flags,
5507 vl_msk, dc_msk, nm_msk);
5508 if (status)
5509 goto err_ice_prof_tcam_ena_dis;
5510
5511 tcam->in_use = 1;
5512
5513 p->type = ICE_TCAM_ADD;
5514 p->add_tcam_idx = true;
5515 p->prof_id = tcam->prof_id;
5516 p->ptg = tcam->ptg;
5517 p->vsig = 0;
5518 p->tcam_idx = tcam->tcam_idx;
5519
5520
5521 list_add(&p->list_entry, chg);
5522
5523 return 0;
5524
5525 err_ice_prof_tcam_ena_dis:
5526 devm_kfree(ice_hw_to_dev(hw), p);
5527 return status;
5528 }
5529
5530
5531
5532
5533
5534
5535
5536
5537 static int
5538 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
5539 struct list_head *chg)
5540 {
5541 DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
5542 struct ice_vsig_prof *t;
5543 int status;
5544 u16 idx;
5545
5546 bitmap_zero(ptgs_used, ICE_XLT1_CNT);
5547 idx = vsig & ICE_VSIG_IDX_M;
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558 list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
5559 list) {
5560 u16 i;
5561
5562 for (i = 0; i < t->tcam_count; i++) {
5563
5564
5565
5566 if (test_bit(t->tcam[i].ptg, ptgs_used) &&
5567 t->tcam[i].in_use) {
5568
5569
5570
5571
5572 status = ice_prof_tcam_ena_dis(hw, blk, false,
5573 vsig,
5574 &t->tcam[i],
5575 chg);
5576 if (status)
5577 return status;
5578 } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
5579 !t->tcam[i].in_use) {
5580
5581
5582
5583 status = ice_prof_tcam_ena_dis(hw, blk, true,
5584 vsig,
5585 &t->tcam[i],
5586 chg);
5587 if (status)
5588 return status;
5589 }
5590
5591
5592 __set_bit(t->tcam[i].ptg, ptgs_used);
5593 }
5594 }
5595
5596 return 0;
5597 }
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608 static int
5609 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
5610 bool rev, struct list_head *chg)
5611 {
5612
5613 u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5614 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
5615 u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
5616 struct ice_prof_map *map;
5617 struct ice_vsig_prof *t;
5618 struct ice_chs_chg *p;
5619 u16 vsig_idx, i;
5620 int status = 0;
5621
5622
5623 if (ice_has_prof_vsig(hw, blk, vsig, hdl))
5624 return -EEXIST;
5625
5626
5627 t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
5628 if (!t)
5629 return -ENOMEM;
5630
5631 mutex_lock(&hw->blk[blk].es.prof_map_lock);
5632
5633 map = ice_search_prof_id(hw, blk, hdl);
5634 if (!map) {
5635 status = -ENOENT;
5636 goto err_ice_add_prof_id_vsig;
5637 }
5638
5639 t->profile_cookie = map->profile_cookie;
5640 t->prof_id = map->prof_id;
5641 t->tcam_count = map->ptg_cnt;
5642
5643
5644 for (i = 0; i < map->ptg_cnt; i++) {
5645 u16 tcam_idx;
5646
5647
5648 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5649 if (!p) {
5650 status = -ENOMEM;
5651 goto err_ice_add_prof_id_vsig;
5652 }
5653
5654
5655
5656
5657
5658
5659 status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
5660 &tcam_idx);
5661 if (status) {
5662 devm_kfree(ice_hw_to_dev(hw), p);
5663 goto err_ice_add_prof_id_vsig;
5664 }
5665
5666 t->tcam[i].ptg = map->ptg[i];
5667 t->tcam[i].prof_id = map->prof_id;
5668 t->tcam[i].tcam_idx = tcam_idx;
5669 t->tcam[i].attr = map->attr[i];
5670 t->tcam[i].in_use = true;
5671
5672 p->type = ICE_TCAM_ADD;
5673 p->add_tcam_idx = true;
5674 p->prof_id = t->tcam[i].prof_id;
5675 p->ptg = t->tcam[i].ptg;
5676 p->vsig = vsig;
5677 p->tcam_idx = t->tcam[i].tcam_idx;
5678
5679
5680 status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
5681 t->tcam[i].prof_id,
5682 t->tcam[i].ptg, vsig, 0, 0,
5683 vl_msk, dc_msk, nm_msk);
5684 if (status) {
5685 devm_kfree(ice_hw_to_dev(hw), p);
5686 goto err_ice_add_prof_id_vsig;
5687 }
5688
5689
5690 list_add(&p->list_entry, chg);
5691 }
5692
5693
5694 vsig_idx = vsig & ICE_VSIG_IDX_M;
5695 if (rev)
5696 list_add_tail(&t->list,
5697 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5698 else
5699 list_add(&t->list,
5700 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
5701
5702 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5703 return status;
5704
5705 err_ice_add_prof_id_vsig:
5706 mutex_unlock(&hw->blk[blk].es.prof_map_lock);
5707
5708 devm_kfree(ice_hw_to_dev(hw), t);
5709 return status;
5710 }
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720 static int
5721 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
5722 struct list_head *chg)
5723 {
5724 struct ice_chs_chg *p;
5725 u16 new_vsig;
5726 int status;
5727
5728 p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
5729 if (!p)
5730 return -ENOMEM;
5731
5732 new_vsig = ice_vsig_alloc(hw, blk);
5733 if (!new_vsig) {
5734 status = -EIO;
5735 goto err_ice_create_prof_id_vsig;
5736 }
5737
5738 status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
5739 if (status)
5740 goto err_ice_create_prof_id_vsig;
5741
5742 status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
5743 if (status)
5744 goto err_ice_create_prof_id_vsig;
5745
5746 p->type = ICE_VSIG_ADD;
5747 p->vsi = vsi;
5748 p->orig_vsig = ICE_DEFAULT_VSIG;
5749 p->vsig = new_vsig;
5750
5751 list_add(&p->list_entry, chg);
5752
5753 return 0;
5754
5755 err_ice_create_prof_id_vsig:
5756
5757 devm_kfree(ice_hw_to_dev(hw), p);
5758 return status;
5759 }
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769
5770 static int
5771 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
5772 struct list_head *lst, u16 *new_vsig,
5773 struct list_head *chg)
5774 {
5775 struct ice_vsig_prof *t;
5776 int status;
5777 u16 vsig;
5778
5779 vsig = ice_vsig_alloc(hw, blk);
5780 if (!vsig)
5781 return -EIO;
5782
5783 status = ice_move_vsi(hw, blk, vsi, vsig, chg);
5784 if (status)
5785 return status;
5786
5787 list_for_each_entry(t, lst, list) {
5788
5789 status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
5790 true, chg);
5791 if (status)
5792 return status;
5793 }
5794
5795 *new_vsig = vsig;
5796
5797 return 0;
5798 }
5799
5800
5801
5802
5803
5804
5805
5806
5807 static bool
5808 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
5809 {
5810 struct ice_vsig_prof *t;
5811 struct list_head lst;
5812 int status;
5813
5814 INIT_LIST_HEAD(&lst);
5815
5816 t = kzalloc(sizeof(*t), GFP_KERNEL);
5817 if (!t)
5818 return false;
5819
5820 t->profile_cookie = hdl;
5821 list_add(&t->list, &lst);
5822
5823 status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
5824
5825 list_del(&t->list);
5826 kfree(t);
5827
5828 return !status;
5829 }
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842 int
5843 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
5844 {
5845 struct ice_vsig_prof *tmp1, *del1;
5846 struct ice_chs_chg *tmp, *del;
5847 struct list_head union_lst;
5848 struct list_head chg;
5849 int status;
5850 u16 vsig;
5851
5852 INIT_LIST_HEAD(&union_lst);
5853 INIT_LIST_HEAD(&chg);
5854
5855
5856 status = ice_get_prof(hw, blk, hdl, &chg);
5857 if (status)
5858 return status;
5859
5860
5861 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
5862 if (!status && vsig) {
5863 bool only_vsi;
5864 u16 or_vsig;
5865 u16 ref;
5866
5867
5868 or_vsig = vsig;
5869
5870
5871
5872
5873
5874 if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
5875 status = -EEXIST;
5876 goto err_ice_add_prof_id_flow;
5877 }
5878
5879
5880 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
5881 if (status)
5882 goto err_ice_add_prof_id_flow;
5883 only_vsi = (ref == 1);
5884
5885
5886
5887
5888 status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
5889 if (status)
5890 goto err_ice_add_prof_id_flow;
5891
5892 status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
5893 if (status)
5894 goto err_ice_add_prof_id_flow;
5895
5896
5897 status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
5898 if (!status) {
5899
5900 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5901 if (status)
5902 goto err_ice_add_prof_id_flow;
5903
5904
5905
5906
5907 if (only_vsi) {
5908 status = ice_rem_vsig(hw, blk, or_vsig, &chg);
5909 if (status)
5910 goto err_ice_add_prof_id_flow;
5911 }
5912 } else if (only_vsi) {
5913
5914
5915
5916
5917
5918 status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
5919 &chg);
5920 if (status)
5921 goto err_ice_add_prof_id_flow;
5922
5923
5924 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5925 if (status)
5926 goto err_ice_add_prof_id_flow;
5927 } else {
5928
5929 status = ice_create_vsig_from_lst(hw, blk, vsi,
5930 &union_lst, &vsig,
5931 &chg);
5932 if (status)
5933 goto err_ice_add_prof_id_flow;
5934
5935
5936 status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
5937 if (status)
5938 goto err_ice_add_prof_id_flow;
5939 }
5940 } else {
5941
5942
5943 if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
5944
5945
5946 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
5947 if (status)
5948 goto err_ice_add_prof_id_flow;
5949 } else {
5950
5951
5952 status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
5953 &chg);
5954 if (status)
5955 goto err_ice_add_prof_id_flow;
5956 }
5957 }
5958
5959
5960 if (!status)
5961 status = ice_upd_prof_hw(hw, blk, &chg);
5962
5963 err_ice_add_prof_id_flow:
5964 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
5965 list_del(&del->list_entry);
5966 devm_kfree(ice_hw_to_dev(hw), del);
5967 }
5968
5969 list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
5970 list_del(&del1->list);
5971 devm_kfree(ice_hw_to_dev(hw), del1);
5972 }
5973
5974 return status;
5975 }
5976
5977
5978
5979
5980
5981
5982
5983 static int
5984 ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
5985 {
5986 struct ice_vsig_prof *ent, *tmp;
5987
5988 list_for_each_entry_safe(ent, tmp, lst, list)
5989 if (ent->profile_cookie == hdl) {
5990 list_del(&ent->list);
5991 devm_kfree(ice_hw_to_dev(hw), ent);
5992 return 0;
5993 }
5994
5995 return -ENOENT;
5996 }
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009 int
6010 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
6011 {
6012 struct ice_vsig_prof *tmp1, *del1;
6013 struct ice_chs_chg *tmp, *del;
6014 struct list_head chg, copy;
6015 int status;
6016 u16 vsig;
6017
6018 INIT_LIST_HEAD(©);
6019 INIT_LIST_HEAD(&chg);
6020
6021
6022 status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
6023 if (!status && vsig) {
6024 bool last_profile;
6025 bool only_vsi;
6026 u16 ref;
6027
6028
6029 last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
6030 status = ice_vsig_get_ref(hw, blk, vsig, &ref);
6031 if (status)
6032 goto err_ice_rem_prof_id_flow;
6033 only_vsi = (ref == 1);
6034
6035 if (only_vsi) {
6036
6037
6038
6039
6040
6041
6042 if (last_profile) {
6043
6044
6045
6046 status = ice_rem_vsig(hw, blk, vsig, &chg);
6047 if (status)
6048 goto err_ice_rem_prof_id_flow;
6049 } else {
6050 status = ice_rem_prof_id_vsig(hw, blk, vsig,
6051 hdl, &chg);
6052 if (status)
6053 goto err_ice_rem_prof_id_flow;
6054
6055
6056 status = ice_adj_prof_priorities(hw, blk, vsig,
6057 &chg);
6058 if (status)
6059 goto err_ice_rem_prof_id_flow;
6060 }
6061
6062 } else {
6063
6064 status = ice_get_profs_vsig(hw, blk, vsig, ©);
6065 if (status)
6066 goto err_ice_rem_prof_id_flow;
6067
6068
6069 status = ice_rem_prof_from_list(hw, ©, hdl);
6070 if (status)
6071 goto err_ice_rem_prof_id_flow;
6072
6073 if (list_empty(©)) {
6074 status = ice_move_vsi(hw, blk, vsi,
6075 ICE_DEFAULT_VSIG, &chg);
6076 if (status)
6077 goto err_ice_rem_prof_id_flow;
6078
6079 } else if (!ice_find_dup_props_vsig(hw, blk, ©,
6080 &vsig)) {
6081
6082
6083
6084
6085
6086
6087
6088 status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
6089 if (status)
6090 goto err_ice_rem_prof_id_flow;
6091 } else {
6092
6093
6094
6095
6096 status = ice_create_vsig_from_lst(hw, blk, vsi,
6097 ©, &vsig,
6098 &chg);
6099 if (status)
6100 goto err_ice_rem_prof_id_flow;
6101
6102
6103 status = ice_adj_prof_priorities(hw, blk, vsig,
6104 &chg);
6105 if (status)
6106 goto err_ice_rem_prof_id_flow;
6107 }
6108 }
6109 } else {
6110 status = -ENOENT;
6111 }
6112
6113
6114 if (!status)
6115 status = ice_upd_prof_hw(hw, blk, &chg);
6116
6117 err_ice_rem_prof_id_flow:
6118 list_for_each_entry_safe(del, tmp, &chg, list_entry) {
6119 list_del(&del->list_entry);
6120 devm_kfree(ice_hw_to_dev(hw), del);
6121 }
6122
6123 list_for_each_entry_safe(del1, tmp1, ©, list) {
6124 list_del(&del1->list);
6125 devm_kfree(ice_hw_to_dev(hw), del1);
6126 }
6127
6128 return status;
6129 }