0001
0002
0003
0004 #include <linux/pci.h>
0005 #include <linux/delay.h>
0006 #include "ixgbe.h"
0007 #include "ixgbe_mbx.h"
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
0019 {
0020 struct ixgbe_mbx_info *mbx = &hw->mbx;
0021
0022
0023 if (size > mbx->size)
0024 size = mbx->size;
0025
0026 if (!mbx->ops)
0027 return IXGBE_ERR_MBX;
0028
0029 return mbx->ops->read(hw, msg, size, mbx_id);
0030 }
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
0042 {
0043 struct ixgbe_mbx_info *mbx = &hw->mbx;
0044
0045 if (size > mbx->size)
0046 return IXGBE_ERR_MBX;
0047
0048 if (!mbx->ops)
0049 return IXGBE_ERR_MBX;
0050
0051 return mbx->ops->write(hw, msg, size, mbx_id);
0052 }
0053
0054
0055
0056
0057
0058
0059
0060
0061 s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
0062 {
0063 struct ixgbe_mbx_info *mbx = &hw->mbx;
0064
0065 if (!mbx->ops)
0066 return IXGBE_ERR_MBX;
0067
0068 return mbx->ops->check_for_msg(hw, mbx_id);
0069 }
0070
0071
0072
0073
0074
0075
0076
0077
0078 s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
0079 {
0080 struct ixgbe_mbx_info *mbx = &hw->mbx;
0081
0082 if (!mbx->ops)
0083 return IXGBE_ERR_MBX;
0084
0085 return mbx->ops->check_for_ack(hw, mbx_id);
0086 }
0087
0088
0089
0090
0091
0092
0093
0094
0095 s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
0096 {
0097 struct ixgbe_mbx_info *mbx = &hw->mbx;
0098
0099 if (!mbx->ops)
0100 return IXGBE_ERR_MBX;
0101
0102 return mbx->ops->check_for_rst(hw, mbx_id);
0103 }
0104
0105
0106
0107
0108
0109
0110
0111
0112 static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
0113 {
0114 struct ixgbe_mbx_info *mbx = &hw->mbx;
0115 int countdown = mbx->timeout;
0116
0117 if (!countdown || !mbx->ops)
0118 return IXGBE_ERR_MBX;
0119
0120 while (mbx->ops->check_for_msg(hw, mbx_id)) {
0121 countdown--;
0122 if (!countdown)
0123 return IXGBE_ERR_MBX;
0124 udelay(mbx->usec_delay);
0125 }
0126
0127 return 0;
0128 }
0129
0130
0131
0132
0133
0134
0135
0136
0137 static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
0138 {
0139 struct ixgbe_mbx_info *mbx = &hw->mbx;
0140 int countdown = mbx->timeout;
0141
0142 if (!countdown || !mbx->ops)
0143 return IXGBE_ERR_MBX;
0144
0145 while (mbx->ops->check_for_ack(hw, mbx_id)) {
0146 countdown--;
0147 if (!countdown)
0148 return IXGBE_ERR_MBX;
0149 udelay(mbx->usec_delay);
0150 }
0151
0152 return 0;
0153 }
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165 static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
0166 u16 mbx_id)
0167 {
0168 struct ixgbe_mbx_info *mbx = &hw->mbx;
0169 s32 ret_val;
0170
0171 if (!mbx->ops)
0172 return IXGBE_ERR_MBX;
0173
0174 ret_val = ixgbe_poll_for_msg(hw, mbx_id);
0175 if (ret_val)
0176 return ret_val;
0177
0178
0179 return mbx->ops->read(hw, msg, size, mbx_id);
0180 }
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
0193 u16 mbx_id)
0194 {
0195 struct ixgbe_mbx_info *mbx = &hw->mbx;
0196 s32 ret_val;
0197
0198
0199 if (!mbx->ops || !mbx->timeout)
0200 return IXGBE_ERR_MBX;
0201
0202
0203 ret_val = mbx->ops->write(hw, msg, size, mbx_id);
0204 if (ret_val)
0205 return ret_val;
0206
0207
0208 return ixgbe_poll_for_ack(hw, mbx_id);
0209 }
0210
0211 static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
0212 {
0213 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
0214
0215 if (mbvficr & mask) {
0216 IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
0217 return 0;
0218 }
0219
0220 return IXGBE_ERR_MBX;
0221 }
0222
0223
0224
0225
0226
0227
0228
0229
0230 static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
0231 {
0232 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
0233 u32 vf_bit = vf_number % 16;
0234
0235 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
0236 index)) {
0237 hw->mbx.stats.reqs++;
0238 return 0;
0239 }
0240
0241 return IXGBE_ERR_MBX;
0242 }
0243
0244
0245
0246
0247
0248
0249
0250
0251 static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
0252 {
0253 s32 index = IXGBE_MBVFICR_INDEX(vf_number);
0254 u32 vf_bit = vf_number % 16;
0255
0256 if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
0257 index)) {
0258 hw->mbx.stats.acks++;
0259 return 0;
0260 }
0261
0262 return IXGBE_ERR_MBX;
0263 }
0264
0265
0266
0267
0268
0269
0270
0271
0272 static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
0273 {
0274 u32 reg_offset = (vf_number < 32) ? 0 : 1;
0275 u32 vf_shift = vf_number % 32;
0276 u32 vflre = 0;
0277
0278 switch (hw->mac.type) {
0279 case ixgbe_mac_82599EB:
0280 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
0281 break;
0282 case ixgbe_mac_X540:
0283 case ixgbe_mac_X550:
0284 case ixgbe_mac_X550EM_x:
0285 case ixgbe_mac_x550em_a:
0286 vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
0287 break;
0288 default:
0289 break;
0290 }
0291
0292 if (vflre & BIT(vf_shift)) {
0293 IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
0294 hw->mbx.stats.rsts++;
0295 return 0;
0296 }
0297
0298 return IXGBE_ERR_MBX;
0299 }
0300
0301
0302
0303
0304
0305
0306
0307
0308 static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
0309 {
0310 u32 p2v_mailbox;
0311
0312
0313 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
0314
0315
0316 p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
0317 if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
0318 return 0;
0319
0320 return IXGBE_ERR_MBX;
0321 }
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332 static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
0333 u16 vf_number)
0334 {
0335 s32 ret_val;
0336 u16 i;
0337
0338
0339 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
0340 if (ret_val)
0341 return ret_val;
0342
0343
0344 ixgbe_check_for_msg_pf(hw, vf_number);
0345 ixgbe_check_for_ack_pf(hw, vf_number);
0346
0347
0348 for (i = 0; i < size; i++)
0349 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
0350
0351
0352 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
0353
0354
0355 hw->mbx.stats.msgs_tx++;
0356
0357 return 0;
0358 }
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371 static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
0372 u16 vf_number)
0373 {
0374 s32 ret_val;
0375 u16 i;
0376
0377
0378 ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
0379 if (ret_val)
0380 return ret_val;
0381
0382
0383 for (i = 0; i < size; i++)
0384 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
0385
0386
0387 IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
0388
0389
0390 hw->mbx.stats.msgs_rx++;
0391
0392 return 0;
0393 }
0394
0395 #ifdef CONFIG_PCI_IOV
0396
0397
0398
0399
0400
0401
0402 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
0403 {
0404 struct ixgbe_mbx_info *mbx = &hw->mbx;
0405
0406 if (hw->mac.type != ixgbe_mac_82599EB &&
0407 hw->mac.type != ixgbe_mac_X550 &&
0408 hw->mac.type != ixgbe_mac_X550EM_x &&
0409 hw->mac.type != ixgbe_mac_x550em_a &&
0410 hw->mac.type != ixgbe_mac_X540)
0411 return;
0412
0413 mbx->timeout = 0;
0414 mbx->usec_delay = 0;
0415
0416 mbx->stats.msgs_tx = 0;
0417 mbx->stats.msgs_rx = 0;
0418 mbx->stats.reqs = 0;
0419 mbx->stats.acks = 0;
0420 mbx->stats.rsts = 0;
0421
0422 mbx->size = IXGBE_VFMAILBOX_SIZE;
0423 }
0424 #endif
0425
0426 const struct ixgbe_mbx_operations mbx_ops_generic = {
0427 .read = ixgbe_read_mbx_pf,
0428 .write = ixgbe_write_mbx_pf,
0429 .read_posted = ixgbe_read_posted_mbx,
0430 .write_posted = ixgbe_write_posted_mbx,
0431 .check_for_msg = ixgbe_check_for_msg_pf,
0432 .check_for_ack = ixgbe_check_for_ack_pf,
0433 .check_for_rst = ixgbe_check_for_rst_pf,
0434 };
0435