0001
0002
0003
0004 #include "mbx.h"
0005 #include "ixgbevf.h"
0006
0007
0008
0009
0010
0011
0012
0013 static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
0014 {
0015 struct ixgbe_mbx_info *mbx = &hw->mbx;
0016 int countdown = mbx->timeout;
0017
0018 if (!countdown || !mbx->ops.check_for_msg)
0019 return IXGBE_ERR_CONFIG;
0020
0021 while (countdown && mbx->ops.check_for_msg(hw)) {
0022 countdown--;
0023 udelay(mbx->udelay);
0024 }
0025
0026 return countdown ? 0 : IXGBE_ERR_TIMEOUT;
0027 }
0028
0029
0030
0031
0032
0033
0034
0035 static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
0036 {
0037 struct ixgbe_mbx_info *mbx = &hw->mbx;
0038 int countdown = mbx->timeout;
0039
0040 if (!countdown || !mbx->ops.check_for_ack)
0041 return IXGBE_ERR_CONFIG;
0042
0043 while (countdown && mbx->ops.check_for_ack(hw)) {
0044 countdown--;
0045 udelay(mbx->udelay);
0046 }
0047
0048 return countdown ? 0 : IXGBE_ERR_TIMEOUT;
0049 }
0050
0051
0052
0053
0054
0055
0056
0057
0058 static u32 ixgbevf_read_mailbox_vf(struct ixgbe_hw *hw)
0059 {
0060 u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
0061
0062 vf_mailbox |= hw->mbx.vf_mailbox;
0063 hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
0064
0065 return vf_mailbox;
0066 }
0067
0068
0069
0070
0071
0072
0073
0074 static void ixgbevf_clear_msg_vf(struct ixgbe_hw *hw)
0075 {
0076 u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
0077
0078 if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
0079 hw->mbx.stats.reqs++;
0080 hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
0081 }
0082 }
0083
0084
0085
0086
0087
0088
0089
0090 static void ixgbevf_clear_ack_vf(struct ixgbe_hw *hw)
0091 {
0092 u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
0093
0094 if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
0095 hw->mbx.stats.acks++;
0096 hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
0097 }
0098 }
0099
0100
0101
0102
0103
0104
0105
0106
0107 static void ixgbevf_clear_rst_vf(struct ixgbe_hw *hw)
0108 {
0109 u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
0110
0111 if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
0112 hw->mbx.stats.rsts++;
0113 hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
0114 IXGBE_VFMAILBOX_RSTD);
0115 }
0116 }
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
0127 {
0128 u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
0129 s32 ret_val = IXGBE_ERR_MBX;
0130
0131 if (vf_mailbox & mask)
0132 ret_val = 0;
0133
0134 return ret_val;
0135 }
0136
0137
0138
0139
0140
0141
0142
0143 static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
0144 {
0145 s32 ret_val = IXGBE_ERR_MBX;
0146
0147 if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
0148 ret_val = 0;
0149 hw->mbx.stats.reqs++;
0150 }
0151
0152 return ret_val;
0153 }
0154
0155
0156
0157
0158
0159
0160
0161 static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
0162 {
0163 s32 ret_val = IXGBE_ERR_MBX;
0164
0165 if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
0166 ret_val = 0;
0167 ixgbevf_clear_ack_vf(hw);
0168 hw->mbx.stats.acks++;
0169 }
0170
0171 return ret_val;
0172 }
0173
0174
0175
0176
0177
0178
0179
0180 static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
0181 {
0182 s32 ret_val = IXGBE_ERR_MBX;
0183
0184 if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
0185 IXGBE_VFMAILBOX_RSTI))) {
0186 ret_val = 0;
0187 ixgbevf_clear_rst_vf(hw);
0188 hw->mbx.stats.rsts++;
0189 }
0190
0191 return ret_val;
0192 }
0193
0194
0195
0196
0197
0198
0199
0200 static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
0201 {
0202 struct ixgbe_mbx_info *mbx = &hw->mbx;
0203 s32 ret_val = IXGBE_ERR_CONFIG;
0204 int countdown = mbx->timeout;
0205 u32 vf_mailbox;
0206
0207 if (!mbx->timeout)
0208 return ret_val;
0209
0210 while (countdown--) {
0211
0212 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
0213 vf_mailbox |= IXGBE_VFMAILBOX_VFU;
0214 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
0215
0216
0217 if (ixgbevf_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) {
0218 ret_val = 0;
0219 break;
0220 }
0221
0222
0223 udelay(mbx->udelay);
0224 }
0225
0226 if (ret_val)
0227 ret_val = IXGBE_ERR_TIMEOUT;
0228
0229 return ret_val;
0230 }
0231
0232
0233
0234
0235
0236 static void ixgbevf_release_mbx_lock_vf(struct ixgbe_hw *hw)
0237 {
0238 u32 vf_mailbox;
0239
0240
0241 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
0242 vf_mailbox &= ~IXGBE_VFMAILBOX_VFU;
0243 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
0244 }
0245
0246
0247
0248
0249
0250 static void ixgbevf_release_mbx_lock_vf_legacy(struct ixgbe_hw *__always_unused hw)
0251 {
0252 }
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
0263 {
0264 u32 vf_mailbox;
0265 s32 ret_val;
0266 u16 i;
0267
0268
0269 ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
0270 if (ret_val)
0271 goto out_no_write;
0272
0273
0274 ixgbevf_clear_msg_vf(hw);
0275 ixgbevf_clear_ack_vf(hw);
0276
0277
0278 for (i = 0; i < size; i++)
0279 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
0280
0281
0282 hw->mbx.stats.msgs_tx++;
0283
0284
0285 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
0286 vf_mailbox |= IXGBE_VFMAILBOX_REQ;
0287 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
0288
0289
0290 ret_val = ixgbevf_poll_for_ack(hw);
0291
0292 out_no_write:
0293 hw->mbx.ops.release(hw);
0294
0295 return ret_val;
0296 }
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306 static s32 ixgbevf_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size)
0307 {
0308 s32 ret_val;
0309 u16 i;
0310
0311
0312 ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
0313 if (ret_val)
0314 goto out_no_write;
0315
0316
0317 ixgbevf_check_for_msg_vf(hw);
0318 ixgbevf_clear_msg_vf(hw);
0319 ixgbevf_check_for_ack_vf(hw);
0320 ixgbevf_clear_ack_vf(hw);
0321
0322
0323 for (i = 0; i < size; i++)
0324 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
0325
0326
0327 hw->mbx.stats.msgs_tx++;
0328
0329
0330 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
0331
0332 out_no_write:
0333 return ret_val;
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344 static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
0345 {
0346 u32 vf_mailbox;
0347 s32 ret_val;
0348 u16 i;
0349
0350
0351 ret_val = ixgbevf_check_for_msg_vf(hw);
0352 if (ret_val)
0353 return ret_val;
0354
0355 ixgbevf_clear_msg_vf(hw);
0356
0357
0358 for (i = 0; i < size; i++)
0359 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
0360
0361
0362 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
0363 vf_mailbox |= IXGBE_VFMAILBOX_ACK;
0364 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
0365
0366
0367 hw->mbx.stats.msgs_rx++;
0368
0369 return ret_val;
0370 }
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380 static s32 ixgbevf_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size)
0381 {
0382 s32 ret_val = 0;
0383 u16 i;
0384
0385
0386 ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
0387 if (ret_val)
0388 goto out_no_read;
0389
0390
0391 for (i = 0; i < size; i++)
0392 msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
0393
0394
0395 IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
0396
0397
0398 hw->mbx.stats.msgs_rx++;
0399
0400 out_no_read:
0401 return ret_val;
0402 }
0403
0404
0405
0406
0407
0408
0409
0410 static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
0411 {
0412 struct ixgbe_mbx_info *mbx = &hw->mbx;
0413
0414
0415
0416
0417 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
0418 mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
0419
0420 mbx->size = IXGBE_VFMAILBOX_SIZE;
0421
0422 mbx->stats.msgs_tx = 0;
0423 mbx->stats.msgs_rx = 0;
0424 mbx->stats.reqs = 0;
0425 mbx->stats.acks = 0;
0426 mbx->stats.rsts = 0;
0427
0428 return 0;
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439 s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
0440 {
0441 struct ixgbe_mbx_info *mbx = &hw->mbx;
0442 s32 ret_val = IXGBE_ERR_CONFIG;
0443
0444 if (!mbx->ops.read || !mbx->ops.check_for_msg || !mbx->timeout)
0445 return ret_val;
0446
0447
0448 if (size > mbx->size)
0449 size = mbx->size;
0450
0451 ret_val = ixgbevf_poll_for_msg(hw);
0452
0453 if (!ret_val)
0454 ret_val = mbx->ops.read(hw, msg, size);
0455
0456 return ret_val;
0457 }
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468 s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
0469 {
0470 struct ixgbe_mbx_info *mbx = &hw->mbx;
0471 s32 ret_val = IXGBE_ERR_CONFIG;
0472
0473
0474
0475
0476
0477 if (!mbx->ops.write || !mbx->ops.check_for_ack || !mbx->ops.release ||
0478 !mbx->timeout)
0479 return ret_val;
0480
0481 if (size > mbx->size)
0482 ret_val = IXGBE_ERR_PARAM;
0483 else
0484 ret_val = mbx->ops.write(hw, msg, size);
0485
0486 return ret_val;
0487 }
0488
0489 const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
0490 .init_params = ixgbevf_init_mbx_params_vf,
0491 .release = ixgbevf_release_mbx_lock_vf,
0492 .read = ixgbevf_read_mbx_vf,
0493 .write = ixgbevf_write_mbx_vf,
0494 .check_for_msg = ixgbevf_check_for_msg_vf,
0495 .check_for_ack = ixgbevf_check_for_ack_vf,
0496 .check_for_rst = ixgbevf_check_for_rst_vf,
0497 };
0498
0499 const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
0500 .init_params = ixgbevf_init_mbx_params_vf,
0501 .release = ixgbevf_release_mbx_lock_vf_legacy,
0502 .read = ixgbevf_read_mbx_vf_legacy,
0503 .write = ixgbevf_write_mbx_vf_legacy,
0504 .check_for_msg = ixgbevf_check_for_msg_vf,
0505 .check_for_ack = ixgbevf_check_for_ack_vf,
0506 .check_for_rst = ixgbevf_check_for_rst_vf,
0507 };
0508
0509
0510
0511
0512
0513
0514
0515
0516 const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
0517 .init_params = ixgbevf_init_mbx_params_vf,
0518 .check_for_rst = ixgbevf_check_for_rst_vf,
0519 };