0001
0002
0003
0004 #include "ice_sched.h"
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 static int
0015 ice_sched_add_root_node(struct ice_port_info *pi,
0016 struct ice_aqc_txsched_elem_data *info)
0017 {
0018 struct ice_sched_node *root;
0019 struct ice_hw *hw;
0020
0021 if (!pi)
0022 return -EINVAL;
0023
0024 hw = pi->hw;
0025
0026 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
0027 if (!root)
0028 return -ENOMEM;
0029
0030
0031 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
0032 sizeof(*root), GFP_KERNEL);
0033 if (!root->children) {
0034 devm_kfree(ice_hw_to_dev(hw), root);
0035 return -ENOMEM;
0036 }
0037
0038 memcpy(&root->info, info, sizeof(*info));
0039 pi->root = root;
0040 return 0;
0041 }
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 struct ice_sched_node *
0055 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
0056 {
0057 u16 i;
0058
0059
0060 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
0061 return start_node;
0062
0063
0064 if (!start_node->num_children ||
0065 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
0066 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
0067 return NULL;
0068
0069
0070 for (i = 0; i < start_node->num_children; i++)
0071 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
0072 return start_node->children[i];
0073
0074
0075 for (i = 0; i < start_node->num_children; i++) {
0076 struct ice_sched_node *tmp;
0077
0078 tmp = ice_sched_find_node_by_teid(start_node->children[i],
0079 teid);
0080 if (tmp)
0081 return tmp;
0082 }
0083
0084 return NULL;
0085 }
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 static int
0100 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
0101 u16 elems_req, void *buf, u16 buf_size,
0102 u16 *elems_resp, struct ice_sq_cd *cd)
0103 {
0104 struct ice_aqc_sched_elem_cmd *cmd;
0105 struct ice_aq_desc desc;
0106 int status;
0107
0108 cmd = &desc.params.sched_elem_cmd;
0109 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
0110 cmd->num_elem_req = cpu_to_le16(elems_req);
0111 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
0112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
0113 if (!status && elems_resp)
0114 *elems_resp = le16_to_cpu(cmd->num_elem_resp);
0115
0116 return status;
0117 }
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 int
0131 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
0132 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
0133 u16 *elems_ret, struct ice_sq_cd *cd)
0134 {
0135 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
0136 elems_req, (void *)buf, buf_size,
0137 elems_ret, cd);
0138 }
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148 int
0149 ice_sched_add_node(struct ice_port_info *pi, u8 layer,
0150 struct ice_aqc_txsched_elem_data *info)
0151 {
0152 struct ice_aqc_txsched_elem_data elem;
0153 struct ice_sched_node *parent;
0154 struct ice_sched_node *node;
0155 struct ice_hw *hw;
0156 int status;
0157
0158 if (!pi)
0159 return -EINVAL;
0160
0161 hw = pi->hw;
0162
0163
0164 parent = ice_sched_find_node_by_teid(pi->root,
0165 le32_to_cpu(info->parent_teid));
0166 if (!parent) {
0167 ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
0168 le32_to_cpu(info->parent_teid));
0169 return -EINVAL;
0170 }
0171
0172
0173
0174
0175 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
0176 if (status)
0177 return status;
0178
0179 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
0180 if (!node)
0181 return -ENOMEM;
0182 if (hw->max_children[layer]) {
0183
0184 node->children = devm_kcalloc(ice_hw_to_dev(hw),
0185 hw->max_children[layer],
0186 sizeof(*node), GFP_KERNEL);
0187 if (!node->children) {
0188 devm_kfree(ice_hw_to_dev(hw), node);
0189 return -ENOMEM;
0190 }
0191 }
0192
0193 node->in_use = true;
0194 node->parent = parent;
0195 node->tx_sched_layer = layer;
0196 parent->children[parent->num_children++] = node;
0197 node->info = elem;
0198 return 0;
0199 }
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212 static int
0213 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
0214 struct ice_aqc_delete_elem *buf, u16 buf_size,
0215 u16 *grps_del, struct ice_sq_cd *cd)
0216 {
0217 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
0218 grps_req, (void *)buf, buf_size,
0219 grps_del, cd);
0220 }
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231 static int
0232 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
0233 u16 num_nodes, u32 *node_teids)
0234 {
0235 struct ice_aqc_delete_elem *buf;
0236 u16 i, num_groups_removed = 0;
0237 u16 buf_size;
0238 int status;
0239
0240 buf_size = struct_size(buf, teid, num_nodes);
0241 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
0242 if (!buf)
0243 return -ENOMEM;
0244
0245 buf->hdr.parent_teid = parent->info.node_teid;
0246 buf->hdr.num_elems = cpu_to_le16(num_nodes);
0247 for (i = 0; i < num_nodes; i++)
0248 buf->teid[i] = cpu_to_le32(node_teids[i]);
0249
0250 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
0251 &num_groups_removed, NULL);
0252 if (status || num_groups_removed != 1)
0253 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
0254 hw->adminq.sq_last_status);
0255
0256 devm_kfree(ice_hw_to_dev(hw), buf);
0257 return status;
0258 }
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268 static struct ice_sched_node *
0269 ice_sched_get_first_node(struct ice_port_info *pi,
0270 struct ice_sched_node *parent, u8 layer)
0271 {
0272 return pi->sib_head[parent->tc_num][layer];
0273 }
0274
0275
0276
0277
0278
0279
0280
0281
0282 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
0283 {
0284 u8 i;
0285
0286 if (!pi || !pi->root)
0287 return NULL;
0288 for (i = 0; i < pi->root->num_children; i++)
0289 if (pi->root->children[i]->tc_num == tc)
0290 return pi->root->children[i];
0291 return NULL;
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
0304 {
0305 struct ice_sched_node *parent;
0306 struct ice_hw *hw = pi->hw;
0307 u8 i, j;
0308
0309
0310
0311
0312
0313 while (node->num_children)
0314 ice_free_sched_node(pi, node->children[0]);
0315
0316
0317 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
0318 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
0319 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
0320 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
0321 u32 teid = le32_to_cpu(node->info.node_teid);
0322
0323 ice_sched_remove_elems(hw, node->parent, 1, &teid);
0324 }
0325 parent = node->parent;
0326
0327 if (parent) {
0328 struct ice_sched_node *p;
0329
0330
0331 for (i = 0; i < parent->num_children; i++)
0332 if (parent->children[i] == node) {
0333 for (j = i + 1; j < parent->num_children; j++)
0334 parent->children[j - 1] =
0335 parent->children[j];
0336 parent->num_children--;
0337 break;
0338 }
0339
0340 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
0341 while (p) {
0342 if (p->sibling == node) {
0343 p->sibling = node->sibling;
0344 break;
0345 }
0346 p = p->sibling;
0347 }
0348
0349
0350 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
0351 pi->sib_head[node->tc_num][node->tx_sched_layer] =
0352 node->sibling;
0353 }
0354
0355
0356 if (node->children)
0357 devm_kfree(ice_hw_to_dev(hw), node->children);
0358 devm_kfree(ice_hw_to_dev(hw), node);
0359 }
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372 static int
0373 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
0374 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
0375 u8 *num_branches, struct ice_sq_cd *cd)
0376 {
0377 struct ice_aqc_get_topo *cmd;
0378 struct ice_aq_desc desc;
0379 int status;
0380
0381 cmd = &desc.params.get_topo;
0382 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
0383 cmd->port_num = lport;
0384 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
0385 if (!status && num_branches)
0386 *num_branches = cmd->num_branches;
0387
0388 return status;
0389 }
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 static int
0403 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
0404 struct ice_aqc_add_elem *buf, u16 buf_size,
0405 u16 *grps_added, struct ice_sq_cd *cd)
0406 {
0407 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
0408 grps_req, (void *)buf, buf_size,
0409 grps_added, cd);
0410 }
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 static int
0424 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
0425 struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
0426 u16 *elems_cfgd, struct ice_sq_cd *cd)
0427 {
0428 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
0429 elems_req, (void *)buf, buf_size,
0430 elems_cfgd, cd);
0431 }
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444 static int
0445 ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
0446 struct ice_aqc_move_elem *buf, u16 buf_size,
0447 u16 *grps_movd, struct ice_sq_cd *cd)
0448 {
0449 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
0450 grps_req, (void *)buf, buf_size,
0451 grps_movd, cd);
0452 }
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465 static int
0466 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
0467 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
0468 {
0469 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
0470 elems_req, (void *)buf, buf_size,
0471 elems_ret, cd);
0472 }
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485 static int
0486 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
0487 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
0488 {
0489 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
0490 elems_req, (void *)buf, buf_size,
0491 elems_ret, cd);
0492 }
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503 static int
0504 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
0505 struct ice_aqc_query_txsched_res_resp *buf,
0506 struct ice_sq_cd *cd)
0507 {
0508 struct ice_aq_desc desc;
0509
0510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
0511 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
0512 }
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 static int
0524 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
0525 bool suspend)
0526 {
0527 u16 i, buf_size, num_elem_ret = 0;
0528 __le32 *buf;
0529 int status;
0530
0531 buf_size = sizeof(*buf) * num_nodes;
0532 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
0533 if (!buf)
0534 return -ENOMEM;
0535
0536 for (i = 0; i < num_nodes; i++)
0537 buf[i] = cpu_to_le32(node_teids[i]);
0538
0539 if (suspend)
0540 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
0541 buf_size, &num_elem_ret,
0542 NULL);
0543 else
0544 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
0545 buf_size, &num_elem_ret,
0546 NULL);
0547 if (status || num_elem_ret != num_nodes)
0548 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
0549
0550 devm_kfree(ice_hw_to_dev(hw), buf);
0551 return status;
0552 }
0553
0554
0555
0556
0557
0558
0559
0560
0561 static int
0562 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
0563 {
0564 struct ice_vsi_ctx *vsi_ctx;
0565 struct ice_q_ctx *q_ctx;
0566
0567 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
0568 if (!vsi_ctx)
0569 return -EINVAL;
0570
0571 if (!vsi_ctx->lan_q_ctx[tc]) {
0572 vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
0573 new_numqs,
0574 sizeof(*q_ctx),
0575 GFP_KERNEL);
0576 if (!vsi_ctx->lan_q_ctx[tc])
0577 return -ENOMEM;
0578 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
0579 return 0;
0580 }
0581
0582 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
0583 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
0584
0585 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
0586 sizeof(*q_ctx), GFP_KERNEL);
0587 if (!q_ctx)
0588 return -ENOMEM;
0589 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
0590 prev_num * sizeof(*q_ctx));
0591 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
0592 vsi_ctx->lan_q_ctx[tc] = q_ctx;
0593 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
0594 }
0595 return 0;
0596 }
0597
0598
0599
0600
0601
0602
0603
0604
0605 static int
0606 ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
0607 {
0608 struct ice_vsi_ctx *vsi_ctx;
0609 struct ice_q_ctx *q_ctx;
0610
0611 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
0612 if (!vsi_ctx)
0613 return -EINVAL;
0614
0615 if (!vsi_ctx->rdma_q_ctx[tc]) {
0616 vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
0617 new_numqs,
0618 sizeof(*q_ctx),
0619 GFP_KERNEL);
0620 if (!vsi_ctx->rdma_q_ctx[tc])
0621 return -ENOMEM;
0622 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
0623 return 0;
0624 }
0625
0626 if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
0627 u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
0628
0629 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
0630 sizeof(*q_ctx), GFP_KERNEL);
0631 if (!q_ctx)
0632 return -ENOMEM;
0633 memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
0634 prev_num * sizeof(*q_ctx));
0635 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
0636 vsi_ctx->rdma_q_ctx[tc] = q_ctx;
0637 vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
0638 }
0639 return 0;
0640 }
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654 static int
0655 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
0656 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
0657 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
0658 {
0659 struct ice_aqc_rl_profile *cmd;
0660 struct ice_aq_desc desc;
0661 int status;
0662
0663 cmd = &desc.params.rl_profile;
0664
0665 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
0666 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
0667 cmd->num_profiles = cpu_to_le16(num_profiles);
0668 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
0669 if (!status && num_processed)
0670 *num_processed = le16_to_cpu(cmd->num_processed);
0671 return status;
0672 }
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685 static int
0686 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
0687 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
0688 u16 *num_profiles_added, struct ice_sq_cd *cd)
0689 {
0690 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
0691 buf, buf_size, num_profiles_added, cd);
0692 }
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705 static int
0706 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
0707 struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
0708 u16 *num_profiles_removed, struct ice_sq_cd *cd)
0709 {
0710 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
0711 num_profiles, buf, buf_size,
0712 num_profiles_removed, cd);
0713 }
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724 static int
0725 ice_sched_del_rl_profile(struct ice_hw *hw,
0726 struct ice_aqc_rl_profile_info *rl_info)
0727 {
0728 struct ice_aqc_rl_profile_elem *buf;
0729 u16 num_profiles_removed;
0730 u16 num_profiles = 1;
0731 int status;
0732
0733 if (rl_info->prof_id_ref != 0)
0734 return -EBUSY;
0735
0736
0737 buf = &rl_info->profile;
0738 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
0739 &num_profiles_removed, NULL);
0740 if (status || num_profiles_removed != num_profiles)
0741 return -EIO;
0742
0743
0744 list_del(&rl_info->list_entry);
0745 devm_kfree(ice_hw_to_dev(hw), rl_info);
0746 return status;
0747 }
0748
0749
0750
0751
0752
0753
0754
0755 static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
0756 {
0757 u16 ln;
0758
0759 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
0760 struct ice_aqc_rl_profile_info *rl_prof_elem;
0761 struct ice_aqc_rl_profile_info *rl_prof_tmp;
0762
0763 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
0764 &pi->rl_prof_list[ln], list_entry) {
0765 struct ice_hw *hw = pi->hw;
0766 int status;
0767
0768 rl_prof_elem->prof_id_ref = 0;
0769 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
0770 if (status) {
0771 ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
0772
0773 list_del(&rl_prof_elem->list_entry);
0774 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
0775 }
0776 }
0777 }
0778 }
0779
0780
0781
0782
0783
0784
0785
0786
0787 void ice_sched_clear_agg(struct ice_hw *hw)
0788 {
0789 struct ice_sched_agg_info *agg_info;
0790 struct ice_sched_agg_info *atmp;
0791
0792 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
0793 struct ice_sched_agg_vsi_info *agg_vsi_info;
0794 struct ice_sched_agg_vsi_info *vtmp;
0795
0796 list_for_each_entry_safe(agg_vsi_info, vtmp,
0797 &agg_info->agg_vsi_list, list_entry) {
0798 list_del(&agg_vsi_info->list_entry);
0799 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
0800 }
0801 list_del(&agg_info->list_entry);
0802 devm_kfree(ice_hw_to_dev(hw), agg_info);
0803 }
0804 }
0805
0806
0807
0808
0809
0810
0811
0812 static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
0813 {
0814 if (!pi)
0815 return;
0816
0817 ice_sched_clear_rl_prof(pi);
0818 if (pi->root) {
0819 ice_free_sched_node(pi, pi->root);
0820 pi->root = NULL;
0821 }
0822 }
0823
0824
0825
0826
0827
0828
0829
0830 void ice_sched_clear_port(struct ice_port_info *pi)
0831 {
0832 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
0833 return;
0834
0835 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
0836 mutex_lock(&pi->sched_lock);
0837 ice_sched_clear_tx_topo(pi);
0838 mutex_unlock(&pi->sched_lock);
0839 mutex_destroy(&pi->sched_lock);
0840 }
0841
0842
0843
0844
0845
0846
0847
0848 void ice_sched_cleanup_all(struct ice_hw *hw)
0849 {
0850 if (!hw)
0851 return;
0852
0853 if (hw->layer_info) {
0854 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
0855 hw->layer_info = NULL;
0856 }
0857
0858 ice_sched_clear_port(hw->port_info);
0859
0860 hw->num_tx_sched_layers = 0;
0861 hw->num_tx_sched_phys_layers = 0;
0862 hw->flattened_layers = 0;
0863 hw->max_cgds = 0;
0864 }
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878 static int
0879 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
0880 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
0881 u16 *num_nodes_added, u32 *first_node_teid)
0882 {
0883 struct ice_sched_node *prev, *new_node;
0884 struct ice_aqc_add_elem *buf;
0885 u16 i, num_groups_added = 0;
0886 struct ice_hw *hw = pi->hw;
0887 size_t buf_size;
0888 int status = 0;
0889 u32 teid;
0890
0891 buf_size = struct_size(buf, generic, num_nodes);
0892 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
0893 if (!buf)
0894 return -ENOMEM;
0895
0896 buf->hdr.parent_teid = parent->info.node_teid;
0897 buf->hdr.num_elems = cpu_to_le16(num_nodes);
0898 for (i = 0; i < num_nodes; i++) {
0899 buf->generic[i].parent_teid = parent->info.node_teid;
0900 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
0901 buf->generic[i].data.valid_sections =
0902 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
0903 ICE_AQC_ELEM_VALID_EIR;
0904 buf->generic[i].data.generic = 0;
0905 buf->generic[i].data.cir_bw.bw_profile_idx =
0906 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
0907 buf->generic[i].data.cir_bw.bw_alloc =
0908 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
0909 buf->generic[i].data.eir_bw.bw_profile_idx =
0910 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
0911 buf->generic[i].data.eir_bw.bw_alloc =
0912 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
0913 }
0914
0915 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
0916 &num_groups_added, NULL);
0917 if (status || num_groups_added != 1) {
0918 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
0919 hw->adminq.sq_last_status);
0920 devm_kfree(ice_hw_to_dev(hw), buf);
0921 return -EIO;
0922 }
0923
0924 *num_nodes_added = num_nodes;
0925
0926 for (i = 0; i < num_nodes; i++) {
0927 status = ice_sched_add_node(pi, layer, &buf->generic[i]);
0928 if (status) {
0929 ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
0930 status);
0931 break;
0932 }
0933
0934 teid = le32_to_cpu(buf->generic[i].node_teid);
0935 new_node = ice_sched_find_node_by_teid(parent, teid);
0936 if (!new_node) {
0937 ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
0938 break;
0939 }
0940
0941 new_node->sibling = NULL;
0942 new_node->tc_num = tc_node->tc_num;
0943
0944
0945
0946 prev = ice_sched_get_first_node(pi, tc_node, layer);
0947 if (prev && prev != new_node) {
0948 while (prev->sibling)
0949 prev = prev->sibling;
0950 prev->sibling = new_node;
0951 }
0952
0953
0954 if (!pi->sib_head[tc_node->tc_num][layer])
0955 pi->sib_head[tc_node->tc_num][layer] = new_node;
0956
0957 if (i == 0)
0958 *first_node_teid = teid;
0959 }
0960
0961 devm_kfree(ice_hw_to_dev(hw), buf);
0962 return status;
0963 }
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977 static int
0978 ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
0979 struct ice_sched_node *tc_node,
0980 struct ice_sched_node *parent, u8 layer,
0981 u16 num_nodes, u32 *first_node_teid,
0982 u16 *num_nodes_added)
0983 {
0984 u16 max_child_nodes;
0985
0986 *num_nodes_added = 0;
0987
0988 if (!num_nodes)
0989 return 0;
0990
0991 if (!parent || layer < pi->hw->sw_entry_point_layer)
0992 return -EINVAL;
0993
0994
0995 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
0996
0997
0998 if ((parent->num_children + num_nodes) > max_child_nodes) {
0999
1000 if (parent == tc_node)
1001 return -EIO;
1002 return -ENOSPC;
1003 }
1004
1005 return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
1006 num_nodes_added, first_node_teid);
1007 }
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021 static int
1022 ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
1023 struct ice_sched_node *tc_node,
1024 struct ice_sched_node *parent, u8 layer,
1025 u16 num_nodes, u32 *first_node_teid,
1026 u16 *num_nodes_added)
1027 {
1028 u32 *first_teid_ptr = first_node_teid;
1029 u16 new_num_nodes = num_nodes;
1030 int status = 0;
1031
1032 *num_nodes_added = 0;
1033 while (*num_nodes_added < num_nodes) {
1034 u16 max_child_nodes, num_added = 0;
1035
1036 u32 temp;
1037
1038 status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
1039 layer, new_num_nodes,
1040 first_teid_ptr,
1041 &num_added);
1042 if (!status)
1043 *num_nodes_added += num_added;
1044
1045 if (*num_nodes_added > num_nodes) {
1046 ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
1047 *num_nodes_added);
1048 status = -EIO;
1049 break;
1050 }
1051
1052 if (!status && (*num_nodes_added == num_nodes))
1053 break;
1054
1055 if (status && status != -ENOSPC)
1056 break;
1057
1058 max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
1059
1060 if (parent->num_children < max_child_nodes) {
1061 new_num_nodes = max_child_nodes - parent->num_children;
1062 } else {
1063
1064 parent = parent->sibling;
1065
1066
1067
1068
1069
1070 if (num_added)
1071 first_teid_ptr = &temp;
1072
1073 new_num_nodes = num_nodes - *num_nodes_added;
1074 }
1075 }
1076 return status;
1077 }
1078
1079
1080
1081
1082
1083
1084
1085 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1086 {
1087
1088 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1089 }
1090
1091
1092
1093
1094
1095
1096
1097 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1098 {
1099
1100
1101
1102
1103
1104
1105 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
1106 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1107
1108 if (layer > hw->sw_entry_point_layer)
1109 return layer;
1110 }
1111 return hw->sw_entry_point_layer;
1112 }
1113
1114
1115
1116
1117
1118
1119
1120 static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
1121 {
1122
1123
1124
1125
1126
1127 if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
1128 u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
1129
1130 if (layer > hw->sw_entry_point_layer)
1131 return layer;
1132 }
1133 return hw->sw_entry_point_layer;
1134 }
1135
1136
1137
1138
1139
1140
1141
1142
1143 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1144 {
1145 struct ice_sched_node *node;
1146
1147 node = pi->root;
1148 while (node) {
1149 if (!node->num_children)
1150 break;
1151 node = node->children[0];
1152 }
1153 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1154 u32 teid = le32_to_cpu(node->info.node_teid);
1155 int status;
1156
1157
1158 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
1159 if (!status)
1160 ice_free_sched_node(pi, node);
1161 }
1162 }
1163
1164
1165
1166
1167
1168
1169
1170
1171 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1172 {
1173 struct ice_sched_node *node;
1174
1175 ice_rm_dflt_leaf_node(pi);
1176
1177
1178 node = pi->root;
1179 while (node) {
1180 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1181 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1182 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1183 ice_free_sched_node(pi, node);
1184 break;
1185 }
1186
1187 if (!node->num_children)
1188 break;
1189 node = node->children[0];
1190 }
1191 }
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 int ice_sched_init_port(struct ice_port_info *pi)
1202 {
1203 struct ice_aqc_get_topo_elem *buf;
1204 struct ice_hw *hw;
1205 u8 num_branches;
1206 u16 num_elems;
1207 int status;
1208 u8 i, j;
1209
1210 if (!pi)
1211 return -EINVAL;
1212 hw = pi->hw;
1213
1214
1215 buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
1216 if (!buf)
1217 return -ENOMEM;
1218
1219
1220 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1221 &num_branches, NULL);
1222 if (status)
1223 goto err_init_port;
1224
1225
1226 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1227 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1228 num_branches);
1229 status = -EINVAL;
1230 goto err_init_port;
1231 }
1232
1233
1234 num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1235
1236
1237 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1238 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1239 num_elems);
1240 status = -EINVAL;
1241 goto err_init_port;
1242 }
1243
1244
1245
1246
1247 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1248 ICE_AQC_ELEM_TYPE_LEAF)
1249 pi->last_node_teid =
1250 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1251 else
1252 pi->last_node_teid =
1253 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1254
1255
1256 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1257 if (status)
1258 goto err_init_port;
1259
1260
1261 for (i = 0; i < num_branches; i++) {
1262 num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1263
1264
1265 for (j = 1; j < num_elems; j++) {
1266
1267 if (buf[0].generic[j].data.elem_type ==
1268 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1269 hw->sw_entry_point_layer = j;
1270
1271 status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1272 if (status)
1273 goto err_init_port;
1274 }
1275 }
1276
1277
1278 if (pi->root)
1279 ice_sched_rm_dflt_nodes(pi);
1280
1281
1282 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1283 mutex_init(&pi->sched_lock);
1284 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1285 INIT_LIST_HEAD(&pi->rl_prof_list[i]);
1286
1287 err_init_port:
1288 if (status && pi->root) {
1289 ice_free_sched_node(pi, pi->root);
1290 pi->root = NULL;
1291 }
1292
1293 devm_kfree(ice_hw_to_dev(hw), buf);
1294 return status;
1295 }
1296
1297
1298
1299
1300
1301
1302
1303 int ice_sched_query_res_alloc(struct ice_hw *hw)
1304 {
1305 struct ice_aqc_query_txsched_res_resp *buf;
1306 __le16 max_sibl;
1307 int status = 0;
1308 u16 i;
1309
1310 if (hw->layer_info)
1311 return status;
1312
1313 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1314 if (!buf)
1315 return -ENOMEM;
1316
1317 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1318 if (status)
1319 goto sched_query_out;
1320
1321 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1322 hw->num_tx_sched_phys_layers =
1323 le16_to_cpu(buf->sched_props.phys_levels);
1324 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1325 hw->max_cgds = buf->sched_props.max_pf_cgds;
1326
1327
1328
1329
1330
1331
1332
1333
1334 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1335 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1336 hw->max_children[i] = le16_to_cpu(max_sibl);
1337 }
1338
1339 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1340 (hw->num_tx_sched_layers *
1341 sizeof(*hw->layer_info)),
1342 GFP_KERNEL);
1343 if (!hw->layer_info) {
1344 status = -ENOMEM;
1345 goto sched_query_out;
1346 }
1347
1348 sched_query_out:
1349 devm_kfree(ice_hw_to_dev(hw), buf);
1350 return status;
1351 }
1352
1353
1354
1355
1356
1357
1358
1359 void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
1360 {
1361 u32 val, clk_src;
1362
1363 val = rd32(hw, GLGEN_CLKSTAT_SRC);
1364 clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
1365 GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
1366
1367 #define PSM_CLK_SRC_367_MHZ 0x0
1368 #define PSM_CLK_SRC_416_MHZ 0x1
1369 #define PSM_CLK_SRC_446_MHZ 0x2
1370 #define PSM_CLK_SRC_390_MHZ 0x3
1371
1372 switch (clk_src) {
1373 case PSM_CLK_SRC_367_MHZ:
1374 hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
1375 break;
1376 case PSM_CLK_SRC_416_MHZ:
1377 hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
1378 break;
1379 case PSM_CLK_SRC_446_MHZ:
1380 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1381 break;
1382 case PSM_CLK_SRC_390_MHZ:
1383 hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
1384 break;
1385 default:
1386 ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
1387 clk_src);
1388
1389 hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
1390 }
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402 static bool
1403 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1404 struct ice_sched_node *node)
1405 {
1406 u8 i;
1407
1408 for (i = 0; i < base->num_children; i++) {
1409 struct ice_sched_node *child = base->children[i];
1410
1411 if (node == child)
1412 return true;
1413
1414 if (child->tx_sched_layer > node->tx_sched_layer)
1415 return false;
1416
1417
1418
1419
1420 if (ice_sched_find_node_in_subtree(hw, child, node))
1421 return true;
1422 }
1423 return false;
1424 }
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 static struct ice_sched_node *
1438 ice_sched_get_free_qgrp(struct ice_port_info *pi,
1439 struct ice_sched_node *vsi_node,
1440 struct ice_sched_node *qgrp_node, u8 owner)
1441 {
1442 struct ice_sched_node *min_qgrp;
1443 u8 min_children;
1444
1445 if (!qgrp_node)
1446 return qgrp_node;
1447 min_children = qgrp_node->num_children;
1448 if (!min_children)
1449 return qgrp_node;
1450 min_qgrp = qgrp_node;
1451
1452
1453
1454
1455
1456 while (qgrp_node) {
1457
1458 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1459 if (qgrp_node->num_children < min_children &&
1460 qgrp_node->owner == owner) {
1461
1462 min_qgrp = qgrp_node;
1463 min_children = min_qgrp->num_children;
1464
1465 if (!min_children)
1466 break;
1467 }
1468 qgrp_node = qgrp_node->sibling;
1469 }
1470 return min_qgrp;
1471 }
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 struct ice_sched_node *
1483 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1484 u8 owner)
1485 {
1486 struct ice_sched_node *vsi_node, *qgrp_node;
1487 struct ice_vsi_ctx *vsi_ctx;
1488 u16 max_children;
1489 u8 qgrp_layer;
1490
1491 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1492 max_children = pi->hw->max_children[qgrp_layer];
1493
1494 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1495 if (!vsi_ctx)
1496 return NULL;
1497 vsi_node = vsi_ctx->sched.vsi_node[tc];
1498
1499 if (!vsi_node)
1500 return NULL;
1501
1502
1503 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1504 while (qgrp_node) {
1505
1506 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1507 if (qgrp_node->num_children < max_children &&
1508 qgrp_node->owner == owner)
1509 break;
1510 qgrp_node = qgrp_node->sibling;
1511 }
1512
1513
1514 return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
1515 }
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 static struct ice_sched_node *
1527 ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1528 u16 vsi_handle)
1529 {
1530 struct ice_sched_node *node;
1531 u8 vsi_layer;
1532
1533 vsi_layer = ice_sched_get_vsi_layer(pi->hw);
1534 node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
1535
1536
1537 while (node) {
1538 if (node->vsi_handle == vsi_handle)
1539 return node;
1540 node = node->sibling;
1541 }
1542
1543 return node;
1544 }
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555 static struct ice_sched_node *
1556 ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
1557 u32 agg_id)
1558 {
1559 struct ice_sched_node *node;
1560 struct ice_hw *hw = pi->hw;
1561 u8 agg_layer;
1562
1563 if (!hw)
1564 return NULL;
1565 agg_layer = ice_sched_get_agg_layer(hw);
1566 node = ice_sched_get_first_node(pi, tc_node, agg_layer);
1567
1568
1569 while (node) {
1570 if (node->agg_id == agg_id)
1571 return node;
1572 node = node->sibling;
1573 }
1574
1575 return node;
1576 }
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587 static void
1588 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1589 {
1590 u16 num = num_qs;
1591 u8 i, qgl, vsil;
1592
1593 qgl = ice_sched_get_qgrp_layer(hw);
1594 vsil = ice_sched_get_vsi_layer(hw);
1595
1596
1597 for (i = qgl; i > vsil; i--) {
1598
1599 num = DIV_ROUND_UP(num, hw->max_children[i]);
1600
1601
1602 num_nodes[i] = num ? num : 1;
1603 }
1604 }
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 static int
1618 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1619 struct ice_sched_node *tc_node, u16 *num_nodes,
1620 u8 owner)
1621 {
1622 struct ice_sched_node *parent, *node;
1623 struct ice_hw *hw = pi->hw;
1624 u32 first_node_teid;
1625 u16 num_added = 0;
1626 u8 i, qgl, vsil;
1627 int status;
1628
1629 qgl = ice_sched_get_qgrp_layer(hw);
1630 vsil = ice_sched_get_vsi_layer(hw);
1631 parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1632 for (i = vsil + 1; i <= qgl; i++) {
1633 if (!parent)
1634 return -EIO;
1635
1636 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1637 num_nodes[i],
1638 &first_node_teid,
1639 &num_added);
1640 if (status || num_nodes[i] != num_added)
1641 return -EIO;
1642
1643
1644
1645
1646 if (num_added) {
1647 parent = ice_sched_find_node_by_teid(tc_node,
1648 first_node_teid);
1649 node = parent;
1650 while (node) {
1651 node->owner = owner;
1652 node = node->sibling;
1653 }
1654 } else {
1655 parent = parent->children[0];
1656 }
1657 }
1658
1659 return 0;
1660 }
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672 static void
1673 ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
1674 struct ice_sched_node *tc_node, u16 *num_nodes)
1675 {
1676 struct ice_sched_node *node;
1677 u8 vsil;
1678 int i;
1679
1680 vsil = ice_sched_get_vsi_layer(pi->hw);
1681 for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
1682
1683
1684
1685 if (!tc_node->num_children || i == vsil) {
1686 num_nodes[i]++;
1687 } else {
1688
1689
1690
1691 node = ice_sched_get_first_node(pi, tc_node, (u8)i);
1692
1693 while (node) {
1694 if (node->num_children < pi->hw->max_children[i])
1695 break;
1696 node = node->sibling;
1697 }
1698
1699
1700
1701
1702
1703 if (node)
1704 break;
1705
1706 num_nodes[i]++;
1707 }
1708 }
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 static int
1721 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1722 struct ice_sched_node *tc_node, u16 *num_nodes)
1723 {
1724 struct ice_sched_node *parent = tc_node;
1725 u32 first_node_teid;
1726 u16 num_added = 0;
1727 u8 i, vsil;
1728 int status;
1729
1730 if (!pi)
1731 return -EINVAL;
1732
1733 vsil = ice_sched_get_vsi_layer(pi->hw);
1734 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1735 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1736 i, num_nodes[i],
1737 &first_node_teid,
1738 &num_added);
1739 if (status || num_nodes[i] != num_added)
1740 return -EIO;
1741
1742
1743
1744
1745 if (num_added)
1746 parent = ice_sched_find_node_by_teid(tc_node,
1747 first_node_teid);
1748 else
1749 parent = parent->children[0];
1750
1751 if (!parent)
1752 return -EIO;
1753
1754 if (i == vsil)
1755 parent->vsi_handle = vsi_handle;
1756 }
1757
1758 return 0;
1759 }
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769 static int
1770 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1771 {
1772 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1773 struct ice_sched_node *tc_node;
1774
1775 tc_node = ice_sched_get_tc_node(pi, tc);
1776 if (!tc_node)
1777 return -EINVAL;
1778
1779
1780 ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
1781
1782
1783 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1784 num_nodes);
1785 }
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797 static int
1798 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1799 u8 tc, u16 new_numqs, u8 owner)
1800 {
1801 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1802 struct ice_sched_node *vsi_node;
1803 struct ice_sched_node *tc_node;
1804 struct ice_vsi_ctx *vsi_ctx;
1805 struct ice_hw *hw = pi->hw;
1806 u16 prev_numqs;
1807 int status = 0;
1808
1809 tc_node = ice_sched_get_tc_node(pi, tc);
1810 if (!tc_node)
1811 return -EIO;
1812
1813 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1814 if (!vsi_node)
1815 return -EIO;
1816
1817 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1818 if (!vsi_ctx)
1819 return -EINVAL;
1820
1821 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1822 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1823 else
1824 prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
1825
1826 if (new_numqs <= prev_numqs)
1827 return status;
1828 if (owner == ICE_SCHED_NODE_OWNER_LAN) {
1829 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1830 if (status)
1831 return status;
1832 } else {
1833 status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
1834 if (status)
1835 return status;
1836 }
1837
1838 if (new_numqs)
1839 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1840
1841
1842
1843
1844
1845
1846
1847 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1848 new_num_nodes, owner);
1849 if (status)
1850 return status;
1851 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1852 vsi_ctx->sched.max_lanq[tc] = new_numqs;
1853 else
1854 vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
1855
1856 return 0;
1857 }
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872 int
1873 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1874 u8 owner, bool enable)
1875 {
1876 struct ice_sched_node *vsi_node, *tc_node;
1877 struct ice_vsi_ctx *vsi_ctx;
1878 struct ice_hw *hw = pi->hw;
1879 int status = 0;
1880
1881 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1882 tc_node = ice_sched_get_tc_node(pi, tc);
1883 if (!tc_node)
1884 return -EINVAL;
1885 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1886 if (!vsi_ctx)
1887 return -EINVAL;
1888 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1889
1890
1891 if (!enable) {
1892 if (vsi_node && vsi_node->in_use) {
1893 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1894
1895 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1896 true);
1897 if (!status)
1898 vsi_node->in_use = false;
1899 }
1900 return status;
1901 }
1902
1903
1904 if (!vsi_node) {
1905 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1906 if (status)
1907 return status;
1908
1909 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
1910 if (!vsi_node)
1911 return -EIO;
1912
1913 vsi_ctx->sched.vsi_node[tc] = vsi_node;
1914 vsi_node->in_use = true;
1915
1916
1917
1918
1919 vsi_ctx->sched.max_lanq[tc] = 0;
1920 vsi_ctx->sched.max_rdmaq[tc] = 0;
1921 }
1922
1923
1924 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1925 owner);
1926 if (status)
1927 return status;
1928
1929
1930 if (!vsi_node->in_use) {
1931 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1932
1933 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1934 if (!status)
1935 vsi_node->in_use = true;
1936 }
1937
1938 return status;
1939 }
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1950 {
1951 struct ice_sched_agg_info *agg_info;
1952 struct ice_sched_agg_info *atmp;
1953
1954 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1955 list_entry) {
1956 struct ice_sched_agg_vsi_info *agg_vsi_info;
1957 struct ice_sched_agg_vsi_info *vtmp;
1958
1959 list_for_each_entry_safe(agg_vsi_info, vtmp,
1960 &agg_info->agg_vsi_list, list_entry)
1961 if (agg_vsi_info->vsi_handle == vsi_handle) {
1962 list_del(&agg_vsi_info->list_entry);
1963 devm_kfree(ice_hw_to_dev(pi->hw),
1964 agg_vsi_info);
1965 return;
1966 }
1967 }
1968 }
1969
1970
1971
1972
1973
1974
1975
1976 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
1977 {
1978 u8 i;
1979
1980 for (i = 0; i < node->num_children; i++)
1981 if (ice_sched_is_leaf_node_present(node->children[i]))
1982 return true;
1983
1984 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
1985 }
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996 static int
1997 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
1998 {
1999 struct ice_vsi_ctx *vsi_ctx;
2000 int status = -EINVAL;
2001 u8 i;
2002
2003 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
2004 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2005 return status;
2006 mutex_lock(&pi->sched_lock);
2007 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
2008 if (!vsi_ctx)
2009 goto exit_sched_rm_vsi_cfg;
2010
2011 ice_for_each_traffic_class(i) {
2012 struct ice_sched_node *vsi_node, *tc_node;
2013 u8 j = 0;
2014
2015 tc_node = ice_sched_get_tc_node(pi, i);
2016 if (!tc_node)
2017 continue;
2018
2019 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2020 if (!vsi_node)
2021 continue;
2022
2023 if (ice_sched_is_leaf_node_present(vsi_node)) {
2024 ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
2025 status = -EBUSY;
2026 goto exit_sched_rm_vsi_cfg;
2027 }
2028 while (j < vsi_node->num_children) {
2029 if (vsi_node->children[j]->owner == owner) {
2030 ice_free_sched_node(pi, vsi_node->children[j]);
2031
2032
2033
2034
2035 j = 0;
2036 } else {
2037 j++;
2038 }
2039 }
2040
2041 if (!vsi_node->num_children) {
2042 ice_free_sched_node(pi, vsi_node);
2043 vsi_ctx->sched.vsi_node[i] = NULL;
2044
2045
2046 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
2047 }
2048 if (owner == ICE_SCHED_NODE_OWNER_LAN)
2049 vsi_ctx->sched.max_lanq[i] = 0;
2050 else
2051 vsi_ctx->sched.max_rdmaq[i] = 0;
2052 }
2053 status = 0;
2054
2055 exit_sched_rm_vsi_cfg:
2056 mutex_unlock(&pi->sched_lock);
2057 return status;
2058 }
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068 int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
2069 {
2070 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
2071 }
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081 int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
2082 {
2083 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
2084 }
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094 static struct ice_sched_agg_info *
2095 ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
2096 {
2097 struct ice_sched_agg_info *agg_info;
2098
2099 list_for_each_entry(agg_info, &hw->agg_list, list_entry)
2100 if (agg_info->agg_id == agg_id)
2101 return agg_info;
2102
2103 return NULL;
2104 }
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115 static struct ice_sched_node *
2116 ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
2117 u16 *num_nodes)
2118 {
2119 u8 l = node->tx_sched_layer;
2120 u8 vsil, i;
2121
2122 vsil = ice_sched_get_vsi_layer(hw);
2123
2124
2125 if (l == vsil - 1)
2126 return (node->num_children < hw->max_children[l]) ? node : NULL;
2127
2128
2129
2130
2131 if (node->num_children < hw->max_children[l])
2132 num_nodes[l] = 0;
2133
2134
2135
2136
2137 for (i = 0; i < node->num_children; i++) {
2138 struct ice_sched_node *parent;
2139
2140 parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
2141 num_nodes);
2142 if (parent)
2143 return parent;
2144 }
2145
2146 return NULL;
2147 }
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157 static void
2158 ice_sched_update_parent(struct ice_sched_node *new_parent,
2159 struct ice_sched_node *node)
2160 {
2161 struct ice_sched_node *old_parent;
2162 u8 i, j;
2163
2164 old_parent = node->parent;
2165
2166
2167 for (i = 0; i < old_parent->num_children; i++)
2168 if (old_parent->children[i] == node) {
2169 for (j = i + 1; j < old_parent->num_children; j++)
2170 old_parent->children[j - 1] =
2171 old_parent->children[j];
2172 old_parent->num_children--;
2173 break;
2174 }
2175
2176
2177 new_parent->children[new_parent->num_children++] = node;
2178 node->parent = new_parent;
2179 node->info.parent_teid = new_parent->info.node_teid;
2180 }
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191 static int
2192 ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
2193 u16 num_items, u32 *list)
2194 {
2195 struct ice_aqc_move_elem *buf;
2196 struct ice_sched_node *node;
2197 u16 i, grps_movd = 0;
2198 struct ice_hw *hw;
2199 int status = 0;
2200 u16 buf_len;
2201
2202 hw = pi->hw;
2203
2204 if (!parent || !num_items)
2205 return -EINVAL;
2206
2207
2208 if (parent->num_children + num_items >
2209 hw->max_children[parent->tx_sched_layer])
2210 return -ENOSPC;
2211
2212 buf_len = struct_size(buf, teid, 1);
2213 buf = kzalloc(buf_len, GFP_KERNEL);
2214 if (!buf)
2215 return -ENOMEM;
2216
2217 for (i = 0; i < num_items; i++) {
2218 node = ice_sched_find_node_by_teid(pi->root, list[i]);
2219 if (!node) {
2220 status = -EINVAL;
2221 goto move_err_exit;
2222 }
2223
2224 buf->hdr.src_parent_teid = node->info.parent_teid;
2225 buf->hdr.dest_parent_teid = parent->info.node_teid;
2226 buf->teid[0] = node->info.node_teid;
2227 buf->hdr.num_elems = cpu_to_le16(1);
2228 status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
2229 &grps_movd, NULL);
2230 if (status && grps_movd != 1) {
2231 status = -EIO;
2232 goto move_err_exit;
2233 }
2234
2235
2236 ice_sched_update_parent(parent, node);
2237 }
2238
2239 move_err_exit:
2240 kfree(buf);
2241 return status;
2242 }
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254 static int
2255 ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
2256 u8 tc)
2257 {
2258 struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
2259 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2260 u32 first_node_teid, vsi_teid;
2261 u16 num_nodes_added;
2262 u8 aggl, vsil, i;
2263 int status;
2264
2265 tc_node = ice_sched_get_tc_node(pi, tc);
2266 if (!tc_node)
2267 return -EIO;
2268
2269 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2270 if (!agg_node)
2271 return -ENOENT;
2272
2273 vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
2274 if (!vsi_node)
2275 return -ENOENT;
2276
2277
2278 if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
2279 return 0;
2280
2281 aggl = ice_sched_get_agg_layer(pi->hw);
2282 vsil = ice_sched_get_vsi_layer(pi->hw);
2283
2284
2285 for (i = aggl + 1; i < vsil; i++)
2286 num_nodes[i] = 1;
2287
2288
2289 for (i = 0; i < agg_node->num_children; i++) {
2290 parent = ice_sched_get_free_vsi_parent(pi->hw,
2291 agg_node->children[i],
2292 num_nodes);
2293 if (parent)
2294 goto move_nodes;
2295 }
2296
2297
2298 parent = agg_node;
2299 for (i = aggl + 1; i < vsil; i++) {
2300 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2301 num_nodes[i],
2302 &first_node_teid,
2303 &num_nodes_added);
2304 if (status || num_nodes[i] != num_nodes_added)
2305 return -EIO;
2306
2307
2308
2309
2310 if (num_nodes_added)
2311 parent = ice_sched_find_node_by_teid(tc_node,
2312 first_node_teid);
2313 else
2314 parent = parent->children[0];
2315
2316 if (!parent)
2317 return -EIO;
2318 }
2319
2320 move_nodes:
2321 vsi_teid = le32_to_cpu(vsi_node->info.node_teid);
2322 return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
2323 }
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336 static int
2337 ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
2338 struct ice_sched_agg_info *agg_info, u8 tc,
2339 bool rm_vsi_info)
2340 {
2341 struct ice_sched_agg_vsi_info *agg_vsi_info;
2342 struct ice_sched_agg_vsi_info *tmp;
2343 int status = 0;
2344
2345 list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
2346 list_entry) {
2347 u16 vsi_handle = agg_vsi_info->vsi_handle;
2348
2349
2350 if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
2351 continue;
2352
2353 status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
2354 ICE_DFLT_AGG_ID, tc);
2355 if (status)
2356 break;
2357
2358 clear_bit(tc, agg_vsi_info->tc_bitmap);
2359 if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
2360 list_del(&agg_vsi_info->list_entry);
2361 devm_kfree(ice_hw_to_dev(pi->hw), agg_vsi_info);
2362 }
2363 }
2364
2365 return status;
2366 }
2367
2368
2369
2370
2371
2372
2373
2374
2375 static bool
2376 ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
2377 {
2378 u8 vsil, i;
2379
2380 vsil = ice_sched_get_vsi_layer(pi->hw);
2381 if (node->tx_sched_layer < vsil - 1) {
2382 for (i = 0; i < node->num_children; i++)
2383 if (ice_sched_is_agg_inuse(pi, node->children[i]))
2384 return true;
2385 return false;
2386 } else {
2387 return node->num_children ? true : false;
2388 }
2389 }
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400 static int
2401 ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2402 {
2403 struct ice_sched_node *tc_node, *agg_node;
2404 struct ice_hw *hw = pi->hw;
2405
2406 tc_node = ice_sched_get_tc_node(pi, tc);
2407 if (!tc_node)
2408 return -EIO;
2409
2410 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2411 if (!agg_node)
2412 return -ENOENT;
2413
2414
2415 if (ice_sched_is_agg_inuse(pi, agg_node))
2416 return -EBUSY;
2417
2418
2419
2420
2421 while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
2422 struct ice_sched_node *parent = agg_node->parent;
2423
2424 if (!parent)
2425 return -EIO;
2426
2427 if (parent->num_children > 1)
2428 break;
2429
2430 agg_node = parent;
2431 }
2432
2433 ice_free_sched_node(pi, agg_node);
2434 return 0;
2435 }
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448 static int
2449 ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
2450 u8 tc, bool rm_vsi_info)
2451 {
2452 int status = 0;
2453
2454
2455 if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2456 goto exit_rm_agg_cfg_tc;
2457
2458 status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
2459 if (status)
2460 goto exit_rm_agg_cfg_tc;
2461
2462
2463 status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
2464 if (status)
2465 goto exit_rm_agg_cfg_tc;
2466
2467 clear_bit(tc, agg_info->tc_bitmap);
2468 exit_rm_agg_cfg_tc:
2469 return status;
2470 }
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481 static int
2482 ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
2483 unsigned long *tc_bitmap)
2484 {
2485 struct ice_sched_agg_info *agg_info;
2486
2487 agg_info = ice_get_agg_info(pi->hw, agg_id);
2488 if (!agg_info)
2489 return -EINVAL;
2490 bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
2491 ICE_MAX_TRAFFIC_CLASS);
2492 return 0;
2493 }
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504 static int
2505 ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
2506 {
2507 struct ice_sched_node *parent, *agg_node, *tc_node;
2508 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
2509 struct ice_hw *hw = pi->hw;
2510 u32 first_node_teid;
2511 u16 num_nodes_added;
2512 int status = 0;
2513 u8 i, aggl;
2514
2515 tc_node = ice_sched_get_tc_node(pi, tc);
2516 if (!tc_node)
2517 return -EIO;
2518
2519 agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
2520
2521 if (agg_node)
2522 return status;
2523
2524 aggl = ice_sched_get_agg_layer(hw);
2525
2526
2527 num_nodes[aggl] = 1;
2528
2529
2530
2531
2532
2533 for (i = hw->sw_entry_point_layer; i < aggl; i++) {
2534 parent = ice_sched_get_first_node(pi, tc_node, i);
2535
2536
2537 while (parent) {
2538 if (parent->num_children < hw->max_children[i])
2539 break;
2540 parent = parent->sibling;
2541 }
2542
2543
2544 if (!parent)
2545 num_nodes[i]++;
2546 }
2547
2548
2549 parent = tc_node;
2550 for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
2551 if (!parent)
2552 return -EIO;
2553
2554 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
2555 num_nodes[i],
2556 &first_node_teid,
2557 &num_nodes_added);
2558 if (status || num_nodes[i] != num_nodes_added)
2559 return -EIO;
2560
2561
2562
2563
2564 if (num_nodes_added) {
2565 parent = ice_sched_find_node_by_teid(tc_node,
2566 first_node_teid);
2567
2568 if (parent && i == aggl)
2569 parent->agg_id = agg_id;
2570 } else {
2571 parent = parent->children[0];
2572 }
2573 }
2574
2575 return 0;
2576 }
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594 static int
2595 ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
2596 enum ice_agg_type agg_type, unsigned long *tc_bitmap)
2597 {
2598 struct ice_sched_agg_info *agg_info;
2599 struct ice_hw *hw = pi->hw;
2600 int status = 0;
2601 u8 tc;
2602
2603 agg_info = ice_get_agg_info(hw, agg_id);
2604 if (!agg_info) {
2605
2606 agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
2607 GFP_KERNEL);
2608 if (!agg_info)
2609 return -ENOMEM;
2610
2611 agg_info->agg_id = agg_id;
2612 agg_info->agg_type = agg_type;
2613 agg_info->tc_bitmap[0] = 0;
2614
2615
2616 INIT_LIST_HEAD(&agg_info->agg_vsi_list);
2617
2618
2619 list_add(&agg_info->list_entry, &hw->agg_list);
2620 }
2621
2622 ice_for_each_traffic_class(tc) {
2623 if (!ice_is_tc_ena(*tc_bitmap, tc)) {
2624
2625 status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
2626 if (status)
2627 break;
2628 continue;
2629 }
2630
2631
2632 if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
2633 continue;
2634
2635
2636 status = ice_sched_add_agg_cfg(pi, agg_id, tc);
2637 if (status)
2638 break;
2639
2640
2641 set_bit(tc, agg_info->tc_bitmap);
2642 }
2643
2644 return status;
2645 }
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656 int
2657 ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
2658 u8 tc_bitmap)
2659 {
2660 unsigned long bitmap = tc_bitmap;
2661 int status;
2662
2663 mutex_lock(&pi->sched_lock);
2664 status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap);
2665 if (!status)
2666 status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap);
2667 mutex_unlock(&pi->sched_lock);
2668 return status;
2669 }
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679 static struct ice_sched_agg_vsi_info *
2680 ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
2681 {
2682 struct ice_sched_agg_vsi_info *agg_vsi_info;
2683
2684 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry)
2685 if (agg_vsi_info->vsi_handle == vsi_handle)
2686 return agg_vsi_info;
2687
2688 return NULL;
2689 }
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700 static struct ice_sched_agg_info *
2701 ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
2702 {
2703 struct ice_sched_agg_info *agg_info;
2704
2705 list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
2706 struct ice_sched_agg_vsi_info *agg_vsi_info;
2707
2708 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2709 if (agg_vsi_info)
2710 return agg_info;
2711 }
2712 return NULL;
2713 }
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725 static int
2726 ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2727 unsigned long *tc_bitmap)
2728 {
2729 struct ice_sched_agg_vsi_info *agg_vsi_info;
2730 struct ice_sched_agg_info *agg_info;
2731
2732 agg_info = ice_get_agg_info(pi->hw, agg_id);
2733 if (!agg_info)
2734 return -EINVAL;
2735
2736 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2737 if (!agg_vsi_info)
2738 return -EINVAL;
2739 bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
2740 ICE_MAX_TRAFFIC_CLASS);
2741 return 0;
2742 }
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755 static int
2756 ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
2757 u16 vsi_handle, unsigned long *tc_bitmap)
2758 {
2759 struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
2760 struct ice_sched_agg_info *agg_info, *old_agg_info;
2761 struct ice_hw *hw = pi->hw;
2762 int status = 0;
2763 u8 tc;
2764
2765 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2766 return -EINVAL;
2767 agg_info = ice_get_agg_info(hw, agg_id);
2768 if (!agg_info)
2769 return -EINVAL;
2770
2771
2772
2773 old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
2774 if (old_agg_info && old_agg_info != agg_info) {
2775 struct ice_sched_agg_vsi_info *vtmp;
2776
2777 list_for_each_entry_safe(old_agg_vsi_info, vtmp,
2778 &old_agg_info->agg_vsi_list,
2779 list_entry)
2780 if (old_agg_vsi_info->vsi_handle == vsi_handle)
2781 break;
2782 }
2783
2784
2785 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
2786 if (!agg_vsi_info) {
2787
2788 agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
2789 sizeof(*agg_vsi_info), GFP_KERNEL);
2790 if (!agg_vsi_info)
2791 return -EINVAL;
2792
2793
2794 agg_vsi_info->vsi_handle = vsi_handle;
2795 list_add(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
2796 }
2797
2798 ice_for_each_traffic_class(tc) {
2799 if (!ice_is_tc_ena(*tc_bitmap, tc))
2800 continue;
2801
2802
2803 status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
2804 if (status)
2805 break;
2806
2807 set_bit(tc, agg_vsi_info->tc_bitmap);
2808 if (old_agg_vsi_info)
2809 clear_bit(tc, old_agg_vsi_info->tc_bitmap);
2810 }
2811 if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
2812 list_del(&old_agg_vsi_info->list_entry);
2813 devm_kfree(ice_hw_to_dev(pi->hw), old_agg_vsi_info);
2814 }
2815 return status;
2816 }
2817
2818
2819
2820
2821
2822
2823
2824
2825 static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
2826 {
2827 u16 ln;
2828
2829 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
2830 struct ice_aqc_rl_profile_info *rl_prof_elem;
2831 struct ice_aqc_rl_profile_info *rl_prof_tmp;
2832
2833 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
2834 &pi->rl_prof_list[ln], list_entry) {
2835 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
2836 ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n");
2837 }
2838 }
2839 }
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852 static int
2853 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
2854 struct ice_aqc_txsched_elem_data *info)
2855 {
2856 struct ice_aqc_txsched_elem_data buf;
2857 u16 elem_cfgd = 0;
2858 u16 num_elems = 1;
2859 int status;
2860
2861 buf = *info;
2862
2863 buf.parent_teid = 0;
2864
2865 buf.data.elem_type = 0;
2866
2867 buf.data.flags = 0;
2868
2869
2870
2871 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
2872 &elem_cfgd, NULL);
2873 if (status || elem_cfgd != num_elems) {
2874 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
2875 return -EIO;
2876 }
2877
2878
2879
2880
2881 node->info.data = info->data;
2882 return status;
2883 }
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894 static int
2895 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
2896 enum ice_rl_type rl_type, u16 bw_alloc)
2897 {
2898 struct ice_aqc_txsched_elem_data buf;
2899 struct ice_aqc_txsched_elem *data;
2900
2901 buf = node->info;
2902 data = &buf.data;
2903 if (rl_type == ICE_MIN_BW) {
2904 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
2905 data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
2906 } else if (rl_type == ICE_MAX_BW) {
2907 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2908 data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
2909 } else {
2910 return -EINVAL;
2911 }
2912
2913
2914 return ice_sched_update_elem(hw, node, &buf);
2915 }
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926 int
2927 ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
2928 u8 tc_bitmap)
2929 {
2930 unsigned long bitmap = tc_bitmap;
2931 int status;
2932
2933 mutex_lock(&pi->sched_lock);
2934 status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
2935 (unsigned long *)&bitmap);
2936 if (!status)
2937 status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
2938 (unsigned long *)&bitmap);
2939 mutex_unlock(&pi->sched_lock);
2940 return status;
2941 }
2942
2943
2944
2945
2946
2947
2948
2949
2950 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2951 {
2952 if (bw == ICE_SCHED_DFLT_BW) {
2953 clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
2954 bw_t_info->cir_bw.bw = 0;
2955 } else {
2956
2957 set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
2958 bw_t_info->cir_bw.bw = bw;
2959 }
2960 }
2961
2962
2963
2964
2965
2966
2967
2968
2969 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2970 {
2971 if (bw == ICE_SCHED_DFLT_BW) {
2972 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2973 bw_t_info->eir_bw.bw = 0;
2974 } else {
2975
2976
2977
2978
2979 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2980 bw_t_info->shared_bw = 0;
2981
2982 set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2983 bw_t_info->eir_bw.bw = bw;
2984 }
2985 }
2986
2987
2988
2989
2990
2991
2992
2993
2994 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
2995 {
2996 if (bw == ICE_SCHED_DFLT_BW) {
2997 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2998 bw_t_info->shared_bw = 0;
2999 } else {
3000
3001
3002
3003
3004 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
3005 bw_t_info->eir_bw.bw = 0;
3006
3007 set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
3008 bw_t_info->shared_bw = bw;
3009 }
3010 }
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022 static int
3023 ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3024 enum ice_rl_type rl_type, u32 bw)
3025 {
3026 struct ice_vsi_ctx *vsi_ctx;
3027
3028 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3029 return -EINVAL;
3030 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3031 if (!vsi_ctx)
3032 return -EINVAL;
3033 switch (rl_type) {
3034 case ICE_MIN_BW:
3035 ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3036 break;
3037 case ICE_MAX_BW:
3038 ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3039 break;
3040 case ICE_SHARED_BW:
3041 ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
3042 break;
3043 default:
3044 return -EINVAL;
3045 }
3046 return 0;
3047 }
3048
3049
3050
3051
3052
3053
3054
3055
3056 static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
3057 {
3058 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
3059 s32 wakeup_f_int;
3060 u16 wakeup = 0;
3061
3062
3063 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
3064 wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec);
3065 if (wakeup_int > 63) {
3066 wakeup = (u16)((1 << 15) | wakeup_int);
3067 } else {
3068
3069
3070
3071 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
3072 wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
3073 hw->psm_clk_freq, bytes_per_sec);
3074
3075
3076 wakeup_f = wakeup_a - wakeup_b;
3077
3078
3079 if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
3080 wakeup_f += 1;
3081
3082 wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
3083 ICE_RL_PROF_MULTIPLIER);
3084 wakeup |= (u16)(wakeup_int << 9);
3085 wakeup |= (u16)(0x1ff & wakeup_f_int);
3086 }
3087
3088 return wakeup;
3089 }
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099 static int
3100 ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
3101 struct ice_aqc_rl_profile_elem *profile)
3102 {
3103 s64 bytes_per_sec, ts_rate, mv_tmp;
3104 int status = -EINVAL;
3105 bool found = false;
3106 s32 encode = 0;
3107 s64 mv = 0;
3108 s32 i;
3109
3110
3111 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
3112 return status;
3113
3114
3115 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
3116
3117
3118 for (i = 0; i < 64; i++) {
3119 u64 pow_result = BIT_ULL(i);
3120
3121 ts_rate = div64_long((s64)hw->psm_clk_freq,
3122 pow_result * ICE_RL_PROF_TS_MULTIPLIER);
3123 if (ts_rate <= 0)
3124 continue;
3125
3126
3127 mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
3128 ts_rate);
3129
3130
3131 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
3132
3133
3134
3135
3136 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
3137 encode = i;
3138 found = true;
3139 break;
3140 }
3141 }
3142 if (found) {
3143 u16 wm;
3144
3145 wm = ice_sched_calc_wakeup(hw, bw);
3146 profile->rl_multiply = cpu_to_le16(mv);
3147 profile->wake_up_calc = cpu_to_le16(wm);
3148 profile->rl_encode = cpu_to_le16(encode);
3149 status = 0;
3150 } else {
3151 status = -ENOENT;
3152 }
3153
3154 return status;
3155 }
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170 static struct ice_aqc_rl_profile_info *
3171 ice_sched_add_rl_profile(struct ice_port_info *pi,
3172 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3173 {
3174 struct ice_aqc_rl_profile_info *rl_prof_elem;
3175 u16 profiles_added = 0, num_profiles = 1;
3176 struct ice_aqc_rl_profile_elem *buf;
3177 struct ice_hw *hw;
3178 u8 profile_type;
3179 int status;
3180
3181 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
3182 return NULL;
3183 switch (rl_type) {
3184 case ICE_MIN_BW:
3185 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3186 break;
3187 case ICE_MAX_BW:
3188 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3189 break;
3190 case ICE_SHARED_BW:
3191 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3192 break;
3193 default:
3194 return NULL;
3195 }
3196
3197 if (!pi)
3198 return NULL;
3199 hw = pi->hw;
3200 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
3201 list_entry)
3202 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
3203 profile_type && rl_prof_elem->bw == bw)
3204
3205 return rl_prof_elem;
3206
3207
3208 rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
3209 GFP_KERNEL);
3210
3211 if (!rl_prof_elem)
3212 return NULL;
3213
3214 status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
3215 if (status)
3216 goto exit_add_rl_prof;
3217
3218 rl_prof_elem->bw = bw;
3219
3220 rl_prof_elem->profile.level = layer_num + 1;
3221 rl_prof_elem->profile.flags = profile_type;
3222 rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
3223
3224
3225 buf = &rl_prof_elem->profile;
3226 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
3227 &profiles_added, NULL);
3228 if (status || profiles_added != num_profiles)
3229 goto exit_add_rl_prof;
3230
3231
3232 rl_prof_elem->prof_id_ref = 0;
3233 list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
3234 return rl_prof_elem;
3235
3236 exit_add_rl_prof:
3237 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
3238 return NULL;
3239 }
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250 static int
3251 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
3252 enum ice_rl_type rl_type, u16 rl_prof_id)
3253 {
3254 struct ice_aqc_txsched_elem_data buf;
3255 struct ice_aqc_txsched_elem *data;
3256
3257 buf = node->info;
3258 data = &buf.data;
3259 switch (rl_type) {
3260 case ICE_MIN_BW:
3261 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
3262 data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
3263 break;
3264 case ICE_MAX_BW:
3265
3266
3267
3268 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3269 return -EIO;
3270 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3271 data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
3272 break;
3273 case ICE_SHARED_BW:
3274
3275 if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
3276
3277 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
3278 data->srl_id = 0;
3279
3280
3281 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
3282 data->eir_bw.bw_profile_idx =
3283 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
3284 break;
3285 }
3286
3287
3288
3289 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
3290 (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
3291 ICE_SCHED_DFLT_RL_PROF_ID))
3292 return -EIO;
3293
3294 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
3295
3296 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
3297 data->srl_id = cpu_to_le16(rl_prof_id);
3298 break;
3299 default:
3300
3301 return -EINVAL;
3302 }
3303
3304
3305 return ice_sched_update_elem(hw, node, &buf);
3306 }
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316 static u16
3317 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
3318 enum ice_rl_type rl_type)
3319 {
3320 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
3321 struct ice_aqc_txsched_elem *data;
3322
3323 data = &node->info.data;
3324 switch (rl_type) {
3325 case ICE_MIN_BW:
3326 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
3327 rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
3328 break;
3329 case ICE_MAX_BW:
3330 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
3331 rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
3332 break;
3333 case ICE_SHARED_BW:
3334 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
3335 rl_prof_id = le16_to_cpu(data->srl_id);
3336 break;
3337 default:
3338 break;
3339 }
3340
3341 return rl_prof_id;
3342 }
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352 static u8
3353 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
3354 u8 layer_index)
3355 {
3356 struct ice_hw *hw = pi->hw;
3357
3358 if (layer_index >= hw->num_tx_sched_layers)
3359 return ICE_SCHED_INVAL_LAYER_NUM;
3360 switch (rl_type) {
3361 case ICE_MIN_BW:
3362 if (hw->layer_info[layer_index].max_cir_rl_profiles)
3363 return layer_index;
3364 break;
3365 case ICE_MAX_BW:
3366 if (hw->layer_info[layer_index].max_eir_rl_profiles)
3367 return layer_index;
3368 break;
3369 case ICE_SHARED_BW:
3370
3371
3372
3373 if (hw->layer_info[layer_index].max_srl_profiles)
3374 return layer_index;
3375 else if (layer_index < hw->num_tx_sched_layers - 1 &&
3376 hw->layer_info[layer_index + 1].max_srl_profiles)
3377 return layer_index + 1;
3378 else if (layer_index > 0 &&
3379 hw->layer_info[layer_index - 1].max_srl_profiles)
3380 return layer_index - 1;
3381 break;
3382 default:
3383 break;
3384 }
3385 return ICE_SCHED_INVAL_LAYER_NUM;
3386 }
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396 static struct ice_sched_node *
3397 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
3398 {
3399 if (srl_layer > node->tx_sched_layer)
3400 return node->children[0];
3401 else if (srl_layer < node->tx_sched_layer)
3402
3403
3404
3405 return node->parent;
3406 else
3407 return node;
3408 }
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421 static int
3422 ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
3423 u16 profile_id)
3424 {
3425 struct ice_aqc_rl_profile_info *rl_prof_elem;
3426 int status = 0;
3427
3428 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
3429 return -EINVAL;
3430
3431 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
3432 list_entry)
3433 if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
3434 profile_type &&
3435 le16_to_cpu(rl_prof_elem->profile.profile_id) ==
3436 profile_id) {
3437 if (rl_prof_elem->prof_id_ref)
3438 rl_prof_elem->prof_id_ref--;
3439
3440
3441 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
3442 if (status && status != -EBUSY)
3443 ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
3444 break;
3445 }
3446 if (status == -EBUSY)
3447 status = 0;
3448 return status;
3449 }
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462 static int
3463 ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
3464 struct ice_sched_node *node,
3465 enum ice_rl_type rl_type, u8 layer_num)
3466 {
3467 struct ice_hw *hw;
3468 u8 profile_type;
3469 u16 rl_prof_id;
3470 u16 old_id;
3471 int status;
3472
3473 hw = pi->hw;
3474 switch (rl_type) {
3475 case ICE_MIN_BW:
3476 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
3477 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
3478 break;
3479 case ICE_MAX_BW:
3480 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
3481 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
3482 break;
3483 case ICE_SHARED_BW:
3484 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
3485
3486 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
3487 break;
3488 default:
3489 return -EINVAL;
3490 }
3491
3492 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
3493
3494 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
3495 if (status)
3496 return status;
3497
3498
3499 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
3500 old_id == ICE_SCHED_INVAL_PROF_ID)
3501 return 0;
3502
3503 return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
3504 }
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519 static int
3520 ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
3521 struct ice_sched_node *node,
3522 u8 layer_num, enum ice_rl_type rl_type, u32 bw)
3523 {
3524 if (rl_type == ICE_SHARED_BW) {
3525
3526 if (bw == ICE_SCHED_DFLT_BW)
3527
3528
3529
3530
3531 return 0;
3532
3533
3534
3535
3536
3537 return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
3538 layer_num);
3539 } else if (rl_type == ICE_MAX_BW &&
3540 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
3541
3542
3543
3544 return ice_sched_set_node_bw_dflt(pi, node,
3545 ICE_SHARED_BW,
3546 layer_num);
3547 }
3548 return 0;
3549 }
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563 static int
3564 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
3565 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
3566 {
3567 struct ice_aqc_rl_profile_info *rl_prof_info;
3568 struct ice_hw *hw = pi->hw;
3569 u16 old_id, rl_prof_id;
3570 int status = -EINVAL;
3571
3572 rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
3573 if (!rl_prof_info)
3574 return status;
3575
3576 rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
3577
3578
3579 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
3580
3581 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
3582 if (status)
3583 return status;
3584
3585
3586
3587 rl_prof_info->prof_id_ref++;
3588
3589
3590 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
3591 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
3592 return 0;
3593
3594 return ice_sched_rm_rl_profile(pi, layer_num,
3595 rl_prof_info->profile.flags &
3596 ICE_AQC_RL_PROFILE_TYPE_M, old_id);
3597 }
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609 static int
3610 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
3611 enum ice_rl_type rl_type, u32 bw)
3612 {
3613 struct ice_sched_node *cfg_node = node;
3614 int status;
3615
3616 struct ice_hw *hw;
3617 u8 layer_num;
3618
3619 if (!pi)
3620 return -EINVAL;
3621 hw = pi->hw;
3622
3623 ice_sched_rm_unused_rl_prof(pi);
3624 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
3625 node->tx_sched_layer);
3626 if (layer_num >= hw->num_tx_sched_layers)
3627 return -EINVAL;
3628
3629 if (rl_type == ICE_SHARED_BW) {
3630
3631 cfg_node = ice_sched_get_srl_node(node, layer_num);
3632 if (!cfg_node)
3633 return -EIO;
3634 }
3635
3636
3637
3638 status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
3639 bw);
3640 if (status)
3641 return status;
3642 if (bw == ICE_SCHED_DFLT_BW)
3643 return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
3644 layer_num);
3645 return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
3646 }
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658 static int
3659 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
3660 struct ice_sched_node *node,
3661 enum ice_rl_type rl_type)
3662 {
3663 return ice_sched_set_node_bw_lmt(pi, node, rl_type,
3664 ICE_SCHED_DFLT_BW);
3665 }
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676 static int
3677 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
3678 {
3679
3680
3681
3682
3683
3684 if (sel_layer == node->tx_sched_layer ||
3685 ((sel_layer == node->tx_sched_layer + 1) &&
3686 node->num_children == 1) ||
3687 ((sel_layer == node->tx_sched_layer - 1) &&
3688 (node->parent && node->parent->num_children == 1)))
3689 return 0;
3690
3691 return -EIO;
3692 }
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702 static int
3703 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
3704 {
3705 switch (rl_type) {
3706 case ICE_MIN_BW:
3707 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
3708 break;
3709 case ICE_MAX_BW:
3710 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
3711 break;
3712 case ICE_SHARED_BW:
3713 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
3714 break;
3715 default:
3716 return -EINVAL;
3717 }
3718 return 0;
3719 }
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732 static int
3733 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3734 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
3735 {
3736 struct ice_sched_node *node;
3737 struct ice_q_ctx *q_ctx;
3738 int status = -EINVAL;
3739
3740 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3741 return -EINVAL;
3742 mutex_lock(&pi->sched_lock);
3743 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
3744 if (!q_ctx)
3745 goto exit_q_bw_lmt;
3746 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
3747 if (!node) {
3748 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
3749 goto exit_q_bw_lmt;
3750 }
3751
3752
3753 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
3754 goto exit_q_bw_lmt;
3755
3756
3757 if (rl_type == ICE_SHARED_BW) {
3758 u8 sel_layer;
3759
3760 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
3761 node->tx_sched_layer);
3762 if (sel_layer >= pi->hw->num_tx_sched_layers) {
3763 status = -EINVAL;
3764 goto exit_q_bw_lmt;
3765 }
3766 status = ice_sched_validate_srl_node(node, sel_layer);
3767 if (status)
3768 goto exit_q_bw_lmt;
3769 }
3770
3771 if (bw == ICE_SCHED_DFLT_BW)
3772 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
3773 else
3774 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
3775
3776 if (!status)
3777 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
3778
3779 exit_q_bw_lmt:
3780 mutex_unlock(&pi->sched_lock);
3781 return status;
3782 }
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795 int
3796 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3797 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
3798 {
3799 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
3800 bw);
3801 }
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813 int
3814 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3815 u16 q_handle, enum ice_rl_type rl_type)
3816 {
3817 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
3818 ICE_SCHED_DFLT_BW);
3819 }
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832 static struct ice_sched_node *
3833 ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
3834 enum ice_agg_type agg_type, u8 tc)
3835 {
3836 struct ice_sched_node *node = NULL;
3837
3838 switch (agg_type) {
3839 case ICE_AGG_TYPE_VSI: {
3840 struct ice_vsi_ctx *vsi_ctx;
3841 u16 vsi_handle = (u16)id;
3842
3843 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
3844 break;
3845
3846 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
3847 if (!vsi_ctx)
3848 break;
3849 node = vsi_ctx->sched.vsi_node[tc];
3850 break;
3851 }
3852
3853 case ICE_AGG_TYPE_AGG: {
3854 struct ice_sched_node *tc_node;
3855
3856 tc_node = ice_sched_get_tc_node(pi, tc);
3857 if (tc_node)
3858 node = ice_sched_get_agg_node(pi, tc_node, id);
3859 break;
3860 }
3861
3862 default:
3863 break;
3864 }
3865
3866 return node;
3867 }
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881 int
3882 ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
3883 enum ice_agg_type agg_type, u8 tc,
3884 enum ice_rl_type rl_type, u32 bw)
3885 {
3886 struct ice_sched_node *node;
3887 int status = -EINVAL;
3888
3889 if (!pi)
3890 return status;
3891
3892 if (rl_type == ICE_UNKNOWN_BW)
3893 return status;
3894
3895 mutex_lock(&pi->sched_lock);
3896 node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
3897 if (!node) {
3898 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
3899 goto exit_set_node_bw_lmt_per_tc;
3900 }
3901 if (bw == ICE_SCHED_DFLT_BW)
3902 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
3903 else
3904 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
3905
3906 exit_set_node_bw_lmt_per_tc:
3907 mutex_unlock(&pi->sched_lock);
3908 return status;
3909 }
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922 int
3923 ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3924 enum ice_rl_type rl_type, u32 bw)
3925 {
3926 int status;
3927
3928 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
3929 ICE_AGG_TYPE_VSI,
3930 tc, rl_type, bw);
3931 if (!status) {
3932 mutex_lock(&pi->sched_lock);
3933 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
3934 mutex_unlock(&pi->sched_lock);
3935 }
3936 return status;
3937 }
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949 int
3950 ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
3951 enum ice_rl_type rl_type)
3952 {
3953 int status;
3954
3955 status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
3956 ICE_AGG_TYPE_VSI,
3957 tc, rl_type,
3958 ICE_SCHED_DFLT_BW);
3959 if (!status) {
3960 mutex_lock(&pi->sched_lock);
3961 status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
3962 ICE_SCHED_DFLT_BW);
3963 mutex_unlock(&pi->sched_lock);
3964 }
3965 return status;
3966 }
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977 int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
3978 {
3979 u16 burst_size_to_prog;
3980
3981 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
3982 bytes > ICE_MAX_BURST_SIZE_ALLOWED)
3983 return -EINVAL;
3984 if (ice_round_to_num(bytes, 64) <=
3985 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
3986
3987
3988 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
3989
3990 bytes = ice_round_to_num(bytes, 64);
3991
3992 burst_size_to_prog |= (u16)(bytes / 64);
3993 } else {
3994
3995
3996 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
3997
3998 bytes = ice_round_to_num(bytes, 1024);
3999
4000 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
4001 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
4002
4003 burst_size_to_prog |= (u16)(bytes / 1024);
4004 }
4005 hw->max_burst_size = burst_size_to_prog;
4006 return 0;
4007 }
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018 static int
4019 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
4020 u8 priority)
4021 {
4022 struct ice_aqc_txsched_elem_data buf;
4023 struct ice_aqc_txsched_elem *data;
4024 int status;
4025
4026 buf = node->info;
4027 data = &buf.data;
4028 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
4029 data->generic = priority;
4030
4031
4032 status = ice_sched_update_elem(hw, node, &buf);
4033 return status;
4034 }
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045 static int
4046 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
4047 struct ice_bw_type_info *bw_t_info)
4048 {
4049 struct ice_port_info *pi = hw->port_info;
4050 int status = -EINVAL;
4051 u16 bw_alloc;
4052
4053 if (!node)
4054 return status;
4055 if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
4056 return 0;
4057 if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
4058 status = ice_sched_replay_node_prio(hw, node,
4059 bw_t_info->generic);
4060 if (status)
4061 return status;
4062 }
4063 if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
4064 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
4065 bw_t_info->cir_bw.bw);
4066 if (status)
4067 return status;
4068 }
4069 if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
4070 bw_alloc = bw_t_info->cir_bw.bw_alloc;
4071 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
4072 bw_alloc);
4073 if (status)
4074 return status;
4075 }
4076 if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
4077 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
4078 bw_t_info->eir_bw.bw);
4079 if (status)
4080 return status;
4081 }
4082 if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
4083 bw_alloc = bw_t_info->eir_bw.bw_alloc;
4084 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
4085 bw_alloc);
4086 if (status)
4087 return status;
4088 }
4089 if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
4090 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
4091 bw_t_info->shared_bw);
4092 return status;
4093 }
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105 static void
4106 ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi,
4107 unsigned long *tc_bitmap,
4108 unsigned long *ena_tc_bitmap)
4109 {
4110 u8 tc;
4111
4112
4113 ice_for_each_traffic_class(tc)
4114 if (ice_is_tc_ena(*tc_bitmap, tc) &&
4115 (ice_sched_get_tc_node(pi, tc)))
4116 set_bit(tc, ena_tc_bitmap);
4117 }
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127 void ice_sched_replay_agg(struct ice_hw *hw)
4128 {
4129 struct ice_port_info *pi = hw->port_info;
4130 struct ice_sched_agg_info *agg_info;
4131
4132 mutex_lock(&pi->sched_lock);
4133 list_for_each_entry(agg_info, &hw->agg_list, list_entry)
4134
4135 if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
4136 ICE_MAX_TRAFFIC_CLASS)) {
4137 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4138 int status;
4139
4140 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4141 ice_sched_get_ena_tc_bitmap(pi,
4142 agg_info->replay_tc_bitmap,
4143 replay_bitmap);
4144 status = ice_sched_cfg_agg(hw->port_info,
4145 agg_info->agg_id,
4146 ICE_AGG_TYPE_AGG,
4147 replay_bitmap);
4148 if (status) {
4149 dev_info(ice_hw_to_dev(hw),
4150 "Replay agg id[%d] failed\n",
4151 agg_info->agg_id);
4152
4153 continue;
4154 }
4155 }
4156 mutex_unlock(&pi->sched_lock);
4157 }
4158
4159
4160
4161
4162
4163
4164
4165
4166 void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
4167 {
4168 struct ice_port_info *pi = hw->port_info;
4169 struct ice_sched_agg_info *agg_info;
4170
4171 mutex_lock(&pi->sched_lock);
4172 list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
4173 struct ice_sched_agg_vsi_info *agg_vsi_info;
4174
4175 agg_info->tc_bitmap[0] = 0;
4176 list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list,
4177 list_entry)
4178 agg_vsi_info->tc_bitmap[0] = 0;
4179 }
4180 mutex_unlock(&pi->sched_lock);
4181 }
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192 static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
4193 {
4194 DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4195 struct ice_sched_agg_vsi_info *agg_vsi_info;
4196 struct ice_port_info *pi = hw->port_info;
4197 struct ice_sched_agg_info *agg_info;
4198 int status;
4199
4200 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4201 if (!ice_is_vsi_valid(hw, vsi_handle))
4202 return -EINVAL;
4203 agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
4204 if (!agg_info)
4205 return 0;
4206 agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
4207 if (!agg_vsi_info)
4208 return 0;
4209 ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
4210 replay_bitmap);
4211
4212 status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
4213 ICE_AGG_TYPE_AGG, replay_bitmap);
4214 if (status)
4215 return status;
4216
4217 bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
4218 ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
4219 replay_bitmap);
4220
4221 return ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
4222 replay_bitmap);
4223 }
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233 int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
4234 {
4235 struct ice_port_info *pi = hw->port_info;
4236 int status;
4237
4238 mutex_lock(&pi->sched_lock);
4239 status = ice_sched_replay_vsi_agg(hw, vsi_handle);
4240 mutex_unlock(&pi->sched_lock);
4241 return status;
4242 }
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252 int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
4253 {
4254 struct ice_sched_node *q_node;
4255
4256
4257 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
4258 if (!q_node)
4259 return -EINVAL;
4260 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
4261 }