0001
0002
0003
0004 #include "vf.h"
0005 #include "ixgbevf.h"
0006
0007
0008
0009
0010
0011 #define IXGBE_HV_RESET_OFFSET 0x201
0012
0013 static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
0014 u32 *retmsg, u16 size)
0015 {
0016 s32 retval = ixgbevf_write_mbx(hw, msg, size);
0017
0018 if (retval)
0019 return retval;
0020
0021 return ixgbevf_poll_mbx(hw, retmsg, size);
0022 }
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
0034 {
0035
0036 hw->adapter_stopped = false;
0037
0038 return 0;
0039 }
0040
0041
0042
0043
0044
0045
0046
0047
0048 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
0049 {
0050 s32 status = hw->mac.ops.start_hw(hw);
0051
0052 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
0053
0054 return status;
0055 }
0056
0057
0058
0059
0060
0061
0062
0063
0064 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
0065 {
0066 struct ixgbe_mbx_info *mbx = &hw->mbx;
0067 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
0068 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
0069 u8 *addr = (u8 *)(&msgbuf[1]);
0070 s32 ret_val;
0071
0072
0073 hw->mac.ops.stop_adapter(hw);
0074
0075
0076 hw->api_version = ixgbe_mbox_api_10;
0077 hw->mbx.ops.init_params(hw);
0078 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
0079 sizeof(struct ixgbe_mbx_operations));
0080
0081 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
0082 IXGBE_WRITE_FLUSH(hw);
0083
0084
0085 while (!mbx->ops.check_for_rst(hw) && timeout) {
0086 timeout--;
0087 udelay(5);
0088 }
0089
0090 if (!timeout)
0091 return IXGBE_ERR_RESET_FAILED;
0092
0093
0094 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
0095
0096 msgbuf[0] = IXGBE_VF_RESET;
0097 ixgbevf_write_mbx(hw, msgbuf, 1);
0098
0099 mdelay(10);
0100
0101
0102
0103
0104
0105 ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
0106 if (ret_val)
0107 return ret_val;
0108
0109
0110
0111
0112
0113 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
0114 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
0115 return IXGBE_ERR_INVALID_MAC_ADDR;
0116
0117 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
0118 ether_addr_copy(hw->mac.perm_addr, addr);
0119
0120 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
0121
0122 return 0;
0123 }
0124
0125
0126
0127
0128
0129
0130
0131
0132 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
0133 {
0134 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
0135 struct ixgbevf_adapter *adapter = hw->back;
0136 int i;
0137
0138 for (i = 0; i < 6; i++)
0139 pci_read_config_byte(adapter->pdev,
0140 (i + IXGBE_HV_RESET_OFFSET),
0141 &hw->mac.perm_addr[i]);
0142 return 0;
0143 #else
0144 pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
0145 return -EOPNOTSUPP;
0146 #endif
0147 }
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
0159 {
0160 u32 number_of_queues;
0161 u32 reg_val;
0162 u16 i;
0163
0164
0165
0166
0167 hw->adapter_stopped = true;
0168
0169
0170 number_of_queues = hw->mac.max_rx_queues;
0171 for (i = 0; i < number_of_queues; i++) {
0172 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
0173 if (reg_val & IXGBE_RXDCTL_ENABLE) {
0174 reg_val &= ~IXGBE_RXDCTL_ENABLE;
0175 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
0176 }
0177 }
0178
0179 IXGBE_WRITE_FLUSH(hw);
0180
0181
0182 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
0183
0184
0185 IXGBE_READ_REG(hw, IXGBE_VTEICR);
0186
0187
0188 number_of_queues = hw->mac.max_tx_queues;
0189 for (i = 0; i < number_of_queues; i++) {
0190 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
0191 if (reg_val & IXGBE_TXDCTL_ENABLE) {
0192 reg_val &= ~IXGBE_TXDCTL_ENABLE;
0193 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
0194 }
0195 }
0196
0197 return 0;
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
0213 {
0214 u32 vector = 0;
0215
0216 switch (hw->mac.mc_filter_type) {
0217 case 0:
0218 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
0219 break;
0220 case 1:
0221 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
0222 break;
0223 case 2:
0224 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
0225 break;
0226 case 3:
0227 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
0228 break;
0229 default:
0230 break;
0231 }
0232
0233
0234 vector &= 0xFFF;
0235 return vector;
0236 }
0237
0238
0239
0240
0241
0242
0243 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
0244 {
0245 ether_addr_copy(mac_addr, hw->mac.perm_addr);
0246
0247 return 0;
0248 }
0249
0250 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
0251 {
0252 u32 msgbuf[3], msgbuf_chk;
0253 u8 *msg_addr = (u8 *)(&msgbuf[1]);
0254 s32 ret_val;
0255
0256 memset(msgbuf, 0, sizeof(msgbuf));
0257
0258
0259
0260
0261
0262 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
0263 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
0264 msgbuf_chk = msgbuf[0];
0265
0266 if (addr)
0267 ether_addr_copy(msg_addr, addr);
0268
0269 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
0270 ARRAY_SIZE(msgbuf));
0271 if (!ret_val) {
0272 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
0273
0274 if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
0275 return -ENOMEM;
0276 }
0277
0278 return ret_val;
0279 }
0280
0281 static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
0282 {
0283 return -EOPNOTSUPP;
0284 }
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
0298 {
0299 int err, i, j;
0300 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
0301 u32 *hw_reta = &msgbuf[1];
0302 u32 mask = 0;
0303
0304
0305
0306
0307
0308
0309 int dwords = IXGBEVF_82599_RETA_SIZE / 16;
0310
0311
0312
0313
0314
0315 switch (hw->api_version) {
0316 case ixgbe_mbox_api_15:
0317 case ixgbe_mbox_api_14:
0318 case ixgbe_mbox_api_13:
0319 case ixgbe_mbox_api_12:
0320 if (hw->mac.type < ixgbe_mac_X550_vf)
0321 break;
0322 fallthrough;
0323 default:
0324 return -EOPNOTSUPP;
0325 }
0326
0327 msgbuf[0] = IXGBE_VF_GET_RETA;
0328
0329 err = ixgbevf_write_mbx(hw, msgbuf, 1);
0330
0331 if (err)
0332 return err;
0333
0334 err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
0335
0336 if (err)
0337 return err;
0338
0339 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
0340
0341
0342 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
0343 return -EPERM;
0344
0345
0346
0347
0348
0349 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
0350 return IXGBE_ERR_MBX;
0351
0352
0353 if (num_rx_queues > 1)
0354 mask = 0x1;
0355
0356 for (i = 0; i < dwords; i++)
0357 for (j = 0; j < 16; j++)
0358 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
0359
0360 return 0;
0361 }
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
0374 {
0375 int err;
0376 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
0377
0378
0379
0380
0381
0382
0383
0384 switch (hw->api_version) {
0385 case ixgbe_mbox_api_15:
0386 case ixgbe_mbox_api_14:
0387 case ixgbe_mbox_api_13:
0388 case ixgbe_mbox_api_12:
0389 if (hw->mac.type < ixgbe_mac_X550_vf)
0390 break;
0391 fallthrough;
0392 default:
0393 return -EOPNOTSUPP;
0394 }
0395
0396 msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
0397 err = ixgbevf_write_mbx(hw, msgbuf, 1);
0398
0399 if (err)
0400 return err;
0401
0402 err = ixgbevf_poll_mbx(hw, msgbuf, 11);
0403
0404 if (err)
0405 return err;
0406
0407 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
0408
0409
0410 if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
0411 return -EPERM;
0412
0413
0414
0415
0416
0417 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
0418 return IXGBE_ERR_MBX;
0419
0420 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
0421
0422 return 0;
0423 }
0424
0425
0426
0427
0428
0429
0430
0431
0432 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
0433 u32 vmdq)
0434 {
0435 u32 msgbuf[3];
0436 u8 *msg_addr = (u8 *)(&msgbuf[1]);
0437 s32 ret_val;
0438
0439 memset(msgbuf, 0, sizeof(msgbuf));
0440 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
0441 ether_addr_copy(msg_addr, addr);
0442
0443 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
0444 ARRAY_SIZE(msgbuf));
0445 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
0446
0447
0448 if (!ret_val &&
0449 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
0450 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
0451 return IXGBE_ERR_MBX;
0452 }
0453
0454 return ret_val;
0455 }
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468 static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
0469 u32 vmdq)
0470 {
0471 if (ether_addr_equal(addr, hw->mac.perm_addr))
0472 return 0;
0473
0474 return -EOPNOTSUPP;
0475 }
0476
0477
0478
0479
0480
0481
0482
0483
0484 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
0485 struct net_device *netdev)
0486 {
0487 struct netdev_hw_addr *ha;
0488 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
0489 u16 *vector_list = (u16 *)&msgbuf[1];
0490 u32 cnt, i;
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501 cnt = netdev_mc_count(netdev);
0502 if (cnt > 30)
0503 cnt = 30;
0504 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
0505 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
0506
0507 i = 0;
0508 netdev_for_each_mc_addr(ha, netdev) {
0509 if (i == cnt)
0510 break;
0511 if (is_link_local_ether_addr(ha->addr))
0512 continue;
0513
0514 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
0515 }
0516
0517 return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
0518 IXGBE_VFMAILBOX_SIZE);
0519 }
0520
0521
0522
0523
0524
0525
0526
0527
0528 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
0529 struct net_device *netdev)
0530 {
0531 return -EOPNOTSUPP;
0532 }
0533
0534
0535
0536
0537
0538
0539
0540
0541 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
0542 {
0543 u32 msgbuf[2];
0544 s32 err;
0545
0546 switch (hw->api_version) {
0547 case ixgbe_mbox_api_12:
0548
0549 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
0550 return -EOPNOTSUPP;
0551 fallthrough;
0552 case ixgbe_mbox_api_13:
0553 case ixgbe_mbox_api_14:
0554 case ixgbe_mbox_api_15:
0555 break;
0556 default:
0557 return -EOPNOTSUPP;
0558 }
0559
0560 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
0561 msgbuf[1] = xcast_mode;
0562
0563 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
0564 ARRAY_SIZE(msgbuf));
0565 if (err)
0566 return err;
0567
0568 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
0569 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
0570 return -EPERM;
0571
0572 return 0;
0573 }
0574
0575
0576
0577
0578
0579
0580
0581
0582 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
0583 {
0584 return -EOPNOTSUPP;
0585 }
0586
0587
0588
0589
0590
0591
0592
0593
0594 static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
0595 {
0596 u32 msgbuf[2];
0597 s32 ret_val;
0598 s32 err;
0599
0600 msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
0601 msgbuf[1] = 0x0;
0602
0603 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
0604
0605 if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
0606 ret_val = IXGBE_ERR_MBX;
0607 } else {
0608 ret_val = 0;
0609 *link_state = msgbuf[1];
0610 }
0611
0612 return ret_val;
0613 }
0614
0615
0616
0617
0618
0619
0620
0621
0622 static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
0623 {
0624 return -EOPNOTSUPP;
0625 }
0626
0627
0628
0629
0630
0631
0632
0633
0634 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
0635 bool vlan_on)
0636 {
0637 u32 msgbuf[2];
0638 s32 err;
0639
0640 msgbuf[0] = IXGBE_VF_SET_VLAN;
0641 msgbuf[1] = vlan;
0642
0643 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
0644
0645 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
0646 ARRAY_SIZE(msgbuf));
0647 if (err)
0648 goto mbx_err;
0649
0650
0651 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
0652 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
0653
0654 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
0655 err = IXGBE_ERR_INVALID_ARGUMENT;
0656
0657 mbx_err:
0658 return err;
0659 }
0660
0661
0662
0663
0664
0665
0666
0667
0668 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
0669 bool vlan_on)
0670 {
0671 return -EOPNOTSUPP;
0672 }
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
0685 ixgbe_link_speed speed, bool autoneg,
0686 bool autoneg_wait_to_complete)
0687 {
0688 return 0;
0689 }
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
0701 ixgbe_link_speed *speed,
0702 bool *link_up,
0703 bool autoneg_wait_to_complete)
0704 {
0705 struct ixgbe_mbx_info *mbx = &hw->mbx;
0706 struct ixgbe_mac_info *mac = &hw->mac;
0707 s32 ret_val = 0;
0708 u32 links_reg;
0709 u32 in_msg = 0;
0710
0711
0712 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
0713 mac->get_link_status = true;
0714
0715 if (!mac->get_link_status)
0716 goto out;
0717
0718
0719 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
0720 if (!(links_reg & IXGBE_LINKS_UP))
0721 goto out;
0722
0723
0724
0725
0726 if (mac->type == ixgbe_mac_82599_vf) {
0727 int i;
0728
0729 for (i = 0; i < 5; i++) {
0730 udelay(100);
0731 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
0732
0733 if (!(links_reg & IXGBE_LINKS_UP))
0734 goto out;
0735 }
0736 }
0737
0738 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
0739 case IXGBE_LINKS_SPEED_10G_82599:
0740 *speed = IXGBE_LINK_SPEED_10GB_FULL;
0741 break;
0742 case IXGBE_LINKS_SPEED_1G_82599:
0743 *speed = IXGBE_LINK_SPEED_1GB_FULL;
0744 break;
0745 case IXGBE_LINKS_SPEED_100_82599:
0746 *speed = IXGBE_LINK_SPEED_100_FULL;
0747 break;
0748 }
0749
0750
0751
0752
0753 if (mbx->ops.read(hw, &in_msg, 1)) {
0754 if (hw->api_version >= ixgbe_mbox_api_15)
0755 mac->get_link_status = false;
0756 goto out;
0757 }
0758
0759 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
0760
0761 if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
0762 ret_val = -1;
0763 goto out;
0764 }
0765
0766
0767 if (!mbx->timeout) {
0768 ret_val = -1;
0769 goto out;
0770 }
0771
0772
0773
0774
0775 mac->get_link_status = false;
0776
0777 out:
0778 *link_up = !mac->get_link_status;
0779 return ret_val;
0780 }
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
0792 ixgbe_link_speed *speed,
0793 bool *link_up,
0794 bool autoneg_wait_to_complete)
0795 {
0796 struct ixgbe_mbx_info *mbx = &hw->mbx;
0797 struct ixgbe_mac_info *mac = &hw->mac;
0798 u32 links_reg;
0799
0800
0801 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
0802 mac->get_link_status = true;
0803
0804 if (!mac->get_link_status)
0805 goto out;
0806
0807
0808 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
0809 if (!(links_reg & IXGBE_LINKS_UP))
0810 goto out;
0811
0812
0813
0814
0815 if (mac->type == ixgbe_mac_82599_vf) {
0816 int i;
0817
0818 for (i = 0; i < 5; i++) {
0819 udelay(100);
0820 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
0821
0822 if (!(links_reg & IXGBE_LINKS_UP))
0823 goto out;
0824 }
0825 }
0826
0827 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
0828 case IXGBE_LINKS_SPEED_10G_82599:
0829 *speed = IXGBE_LINK_SPEED_10GB_FULL;
0830 break;
0831 case IXGBE_LINKS_SPEED_1G_82599:
0832 *speed = IXGBE_LINK_SPEED_1GB_FULL;
0833 break;
0834 case IXGBE_LINKS_SPEED_100_82599:
0835 *speed = IXGBE_LINK_SPEED_100_FULL;
0836 break;
0837 }
0838
0839
0840
0841
0842 mac->get_link_status = false;
0843
0844 out:
0845 *link_up = !mac->get_link_status;
0846 return 0;
0847 }
0848
0849
0850
0851
0852
0853
0854 static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
0855 {
0856 u32 msgbuf[2];
0857 s32 ret_val;
0858
0859 msgbuf[0] = IXGBE_VF_SET_LPE;
0860 msgbuf[1] = max_size;
0861
0862 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
0863 ARRAY_SIZE(msgbuf));
0864 if (ret_val)
0865 return ret_val;
0866 if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
0867 (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
0868 return IXGBE_ERR_MBX;
0869
0870 return 0;
0871 }
0872
0873
0874
0875
0876
0877
0878
0879 static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
0880 {
0881 u32 reg;
0882
0883
0884
0885
0886 reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
0887
0888 reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
0889 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
0890
0891 return 0;
0892 }
0893
0894
0895
0896
0897
0898
0899 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
0900 {
0901 int err;
0902 u32 msg[3];
0903
0904
0905 msg[0] = IXGBE_VF_API_NEGOTIATE;
0906 msg[1] = api;
0907 msg[2] = 0;
0908
0909 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
0910 if (!err) {
0911 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
0912
0913
0914 if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
0915 IXGBE_VT_MSGTYPE_SUCCESS)) {
0916 hw->api_version = api;
0917 return 0;
0918 }
0919
0920 err = IXGBE_ERR_INVALID_ARGUMENT;
0921 }
0922
0923 return err;
0924 }
0925
0926
0927
0928
0929
0930
0931
0932 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
0933 {
0934
0935 if (api != ixgbe_mbox_api_10)
0936 return IXGBE_ERR_INVALID_ARGUMENT;
0937
0938 return 0;
0939 }
0940
0941 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
0942 unsigned int *default_tc)
0943 {
0944 int err;
0945 u32 msg[5];
0946
0947
0948 switch (hw->api_version) {
0949 case ixgbe_mbox_api_11:
0950 case ixgbe_mbox_api_12:
0951 case ixgbe_mbox_api_13:
0952 case ixgbe_mbox_api_14:
0953 case ixgbe_mbox_api_15:
0954 break;
0955 default:
0956 return 0;
0957 }
0958
0959
0960 msg[0] = IXGBE_VF_GET_QUEUE;
0961 msg[1] = msg[2] = msg[3] = msg[4] = 0;
0962
0963 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
0964 if (!err) {
0965 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
0966
0967
0968
0969
0970
0971 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
0972 return IXGBE_ERR_MBX;
0973
0974
0975 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
0976 if (hw->mac.max_tx_queues == 0 ||
0977 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
0978 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
0979
0980 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
0981 if (hw->mac.max_rx_queues == 0 ||
0982 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
0983 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
0984
0985 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
0986
0987 if (*num_tcs > hw->mac.max_rx_queues)
0988 *num_tcs = 1;
0989
0990 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
0991
0992 if (*default_tc >= hw->mac.max_tx_queues)
0993 *default_tc = 0;
0994 }
0995
0996 return err;
0997 }
0998
0999 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
1000 .init_hw = ixgbevf_init_hw_vf,
1001 .reset_hw = ixgbevf_reset_hw_vf,
1002 .start_hw = ixgbevf_start_hw_vf,
1003 .get_mac_addr = ixgbevf_get_mac_addr_vf,
1004 .stop_adapter = ixgbevf_stop_hw_vf,
1005 .setup_link = ixgbevf_setup_mac_link_vf,
1006 .check_link = ixgbevf_check_mac_link_vf,
1007 .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
1008 .set_rar = ixgbevf_set_rar_vf,
1009 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
1010 .update_xcast_mode = ixgbevf_update_xcast_mode,
1011 .get_link_state = ixgbevf_get_link_state_vf,
1012 .set_uc_addr = ixgbevf_set_uc_addr_vf,
1013 .set_vfta = ixgbevf_set_vfta_vf,
1014 .set_rlpml = ixgbevf_set_rlpml_vf,
1015 };
1016
1017 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
1018 .init_hw = ixgbevf_init_hw_vf,
1019 .reset_hw = ixgbevf_hv_reset_hw_vf,
1020 .start_hw = ixgbevf_start_hw_vf,
1021 .get_mac_addr = ixgbevf_get_mac_addr_vf,
1022 .stop_adapter = ixgbevf_stop_hw_vf,
1023 .setup_link = ixgbevf_setup_mac_link_vf,
1024 .check_link = ixgbevf_hv_check_mac_link_vf,
1025 .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
1026 .set_rar = ixgbevf_hv_set_rar_vf,
1027 .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
1028 .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
1029 .get_link_state = ixgbevf_hv_get_link_state_vf,
1030 .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
1031 .set_vfta = ixgbevf_hv_set_vfta_vf,
1032 .set_rlpml = ixgbevf_hv_set_rlpml_vf,
1033 };
1034
1035 const struct ixgbevf_info ixgbevf_82599_vf_info = {
1036 .mac = ixgbe_mac_82599_vf,
1037 .mac_ops = &ixgbevf_mac_ops,
1038 };
1039
1040 const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
1041 .mac = ixgbe_mac_82599_vf,
1042 .mac_ops = &ixgbevf_hv_mac_ops,
1043 };
1044
1045 const struct ixgbevf_info ixgbevf_X540_vf_info = {
1046 .mac = ixgbe_mac_X540_vf,
1047 .mac_ops = &ixgbevf_mac_ops,
1048 };
1049
1050 const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
1051 .mac = ixgbe_mac_X540_vf,
1052 .mac_ops = &ixgbevf_hv_mac_ops,
1053 };
1054
1055 const struct ixgbevf_info ixgbevf_X550_vf_info = {
1056 .mac = ixgbe_mac_X550_vf,
1057 .mac_ops = &ixgbevf_mac_ops,
1058 };
1059
1060 const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1061 .mac = ixgbe_mac_X550_vf,
1062 .mac_ops = &ixgbevf_hv_mac_ops,
1063 };
1064
1065 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1066 .mac = ixgbe_mac_X550EM_x_vf,
1067 .mac_ops = &ixgbevf_mac_ops,
1068 };
1069
1070 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1071 .mac = ixgbe_mac_X550EM_x_vf,
1072 .mac_ops = &ixgbevf_hv_mac_ops,
1073 };
1074
1075 const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1076 .mac = ixgbe_mac_x550em_a_vf,
1077 .mac_ops = &ixgbevf_mac_ops,
1078 };