0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007
0008 #include <linux/types.h>
0009 #include <linux/module.h>
0010 #include <linux/slab.h>
0011 #include <linux/pci.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/ethtool.h>
0014 #include <linux/vmalloc.h>
0015 #include <linux/if_vlan.h>
0016 #include <linux/uaccess.h>
0017
0018 #include "ixgbevf.h"
0019
0020 enum {NETDEV_STATS, IXGBEVF_STATS};
0021
0022 struct ixgbe_stats {
0023 char stat_string[ETH_GSTRING_LEN];
0024 int type;
0025 int sizeof_stat;
0026 int stat_offset;
0027 };
0028
0029 #define IXGBEVF_STAT(_name, _stat) { \
0030 .stat_string = _name, \
0031 .type = IXGBEVF_STATS, \
0032 .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \
0033 .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
0034 }
0035
0036 #define IXGBEVF_NETDEV_STAT(_net_stat) { \
0037 .stat_string = #_net_stat, \
0038 .type = NETDEV_STATS, \
0039 .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \
0040 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
0041 }
0042
0043 static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
0044 IXGBEVF_NETDEV_STAT(rx_packets),
0045 IXGBEVF_NETDEV_STAT(tx_packets),
0046 IXGBEVF_NETDEV_STAT(rx_bytes),
0047 IXGBEVF_NETDEV_STAT(tx_bytes),
0048 IXGBEVF_STAT("tx_busy", tx_busy),
0049 IXGBEVF_STAT("tx_restart_queue", restart_queue),
0050 IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
0051 IXGBEVF_NETDEV_STAT(multicast),
0052 IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
0053 IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
0054 IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
0055 IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
0056 IXGBEVF_STAT("tx_ipsec", tx_ipsec),
0057 IXGBEVF_STAT("rx_ipsec", rx_ipsec),
0058 };
0059
0060 #define IXGBEVF_QUEUE_STATS_LEN ( \
0061 (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
0062 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
0063 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
0064 (sizeof(struct ixgbevf_stats) / sizeof(u64)))
0065 #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
0066
0067 #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
0068 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
0069 "Register test (offline)",
0070 "Link test (on/offline)"
0071 };
0072
0073 #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
0074
0075 static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
0076 #define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
0077 "legacy-rx",
0078 };
0079
0080 #define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
0081
0082 static int ixgbevf_get_link_ksettings(struct net_device *netdev,
0083 struct ethtool_link_ksettings *cmd)
0084 {
0085 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0086
0087 ethtool_link_ksettings_zero_link_mode(cmd, supported);
0088 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
0089 cmd->base.autoneg = AUTONEG_DISABLE;
0090 cmd->base.port = -1;
0091
0092 if (adapter->link_up) {
0093 __u32 speed = SPEED_10000;
0094
0095 switch (adapter->link_speed) {
0096 case IXGBE_LINK_SPEED_10GB_FULL:
0097 speed = SPEED_10000;
0098 break;
0099 case IXGBE_LINK_SPEED_1GB_FULL:
0100 speed = SPEED_1000;
0101 break;
0102 case IXGBE_LINK_SPEED_100_FULL:
0103 speed = SPEED_100;
0104 break;
0105 }
0106
0107 cmd->base.speed = speed;
0108 cmd->base.duplex = DUPLEX_FULL;
0109 } else {
0110 cmd->base.speed = SPEED_UNKNOWN;
0111 cmd->base.duplex = DUPLEX_UNKNOWN;
0112 }
0113
0114 return 0;
0115 }
0116
0117 static u32 ixgbevf_get_msglevel(struct net_device *netdev)
0118 {
0119 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0120
0121 return adapter->msg_enable;
0122 }
0123
0124 static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
0125 {
0126 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0127
0128 adapter->msg_enable = data;
0129 }
0130
0131 static int ixgbevf_get_regs_len(struct net_device *netdev)
0132 {
0133 #define IXGBE_REGS_LEN 45
0134 return IXGBE_REGS_LEN * sizeof(u32);
0135 }
0136
0137 static void ixgbevf_get_regs(struct net_device *netdev,
0138 struct ethtool_regs *regs,
0139 void *p)
0140 {
0141 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0142 struct ixgbe_hw *hw = &adapter->hw;
0143 u32 *regs_buff = p;
0144 u32 regs_len = ixgbevf_get_regs_len(netdev);
0145 u8 i;
0146
0147 memset(p, 0, regs_len);
0148
0149
0150 regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
0151
0152
0153 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
0154 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
0155 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
0156 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
0157 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
0158
0159
0160
0161
0162
0163 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
0164 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
0165 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
0166 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
0167 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
0168 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
0169 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
0170 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
0171 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
0172
0173
0174 for (i = 0; i < 2; i++)
0175 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
0176 for (i = 0; i < 2; i++)
0177 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
0178 for (i = 0; i < 2; i++)
0179 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
0180 for (i = 0; i < 2; i++)
0181 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
0182 for (i = 0; i < 2; i++)
0183 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
0184 for (i = 0; i < 2; i++)
0185 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
0186 for (i = 0; i < 2; i++)
0187 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
0188
0189
0190 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
0191
0192
0193 for (i = 0; i < 2; i++)
0194 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
0195 for (i = 0; i < 2; i++)
0196 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
0197 for (i = 0; i < 2; i++)
0198 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
0199 for (i = 0; i < 2; i++)
0200 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
0201 for (i = 0; i < 2; i++)
0202 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
0203 for (i = 0; i < 2; i++)
0204 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
0205 for (i = 0; i < 2; i++)
0206 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
0207 for (i = 0; i < 2; i++)
0208 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
0209 }
0210
0211 static void ixgbevf_get_drvinfo(struct net_device *netdev,
0212 struct ethtool_drvinfo *drvinfo)
0213 {
0214 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0215
0216 strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
0217 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
0218 sizeof(drvinfo->bus_info));
0219
0220 drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
0221 }
0222
0223 static void ixgbevf_get_ringparam(struct net_device *netdev,
0224 struct ethtool_ringparam *ring,
0225 struct kernel_ethtool_ringparam *kernel_ring,
0226 struct netlink_ext_ack *extack)
0227 {
0228 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0229
0230 ring->rx_max_pending = IXGBEVF_MAX_RXD;
0231 ring->tx_max_pending = IXGBEVF_MAX_TXD;
0232 ring->rx_pending = adapter->rx_ring_count;
0233 ring->tx_pending = adapter->tx_ring_count;
0234 }
0235
0236 static int ixgbevf_set_ringparam(struct net_device *netdev,
0237 struct ethtool_ringparam *ring,
0238 struct kernel_ethtool_ringparam *kernel_ring,
0239 struct netlink_ext_ack *extack)
0240 {
0241 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0242 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
0243 u32 new_rx_count, new_tx_count;
0244 int i, j, err = 0;
0245
0246 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
0247 return -EINVAL;
0248
0249 new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
0250 new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
0251 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
0252
0253 new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
0254 new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
0255 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
0256
0257
0258 if ((new_tx_count == adapter->tx_ring_count) &&
0259 (new_rx_count == adapter->rx_ring_count))
0260 return 0;
0261
0262 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
0263 usleep_range(1000, 2000);
0264
0265 if (!netif_running(adapter->netdev)) {
0266 for (i = 0; i < adapter->num_tx_queues; i++)
0267 adapter->tx_ring[i]->count = new_tx_count;
0268 for (i = 0; i < adapter->num_xdp_queues; i++)
0269 adapter->xdp_ring[i]->count = new_tx_count;
0270 for (i = 0; i < adapter->num_rx_queues; i++)
0271 adapter->rx_ring[i]->count = new_rx_count;
0272 adapter->tx_ring_count = new_tx_count;
0273 adapter->xdp_ring_count = new_tx_count;
0274 adapter->rx_ring_count = new_rx_count;
0275 goto clear_reset;
0276 }
0277
0278 if (new_tx_count != adapter->tx_ring_count) {
0279 tx_ring = vmalloc(array_size(sizeof(*tx_ring),
0280 adapter->num_tx_queues +
0281 adapter->num_xdp_queues));
0282 if (!tx_ring) {
0283 err = -ENOMEM;
0284 goto clear_reset;
0285 }
0286
0287 for (i = 0; i < adapter->num_tx_queues; i++) {
0288
0289 tx_ring[i] = *adapter->tx_ring[i];
0290 tx_ring[i].count = new_tx_count;
0291 err = ixgbevf_setup_tx_resources(&tx_ring[i]);
0292 if (err) {
0293 while (i) {
0294 i--;
0295 ixgbevf_free_tx_resources(&tx_ring[i]);
0296 }
0297
0298 vfree(tx_ring);
0299 tx_ring = NULL;
0300
0301 goto clear_reset;
0302 }
0303 }
0304
0305 for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
0306
0307 tx_ring[i] = *adapter->xdp_ring[j];
0308 tx_ring[i].count = new_tx_count;
0309 err = ixgbevf_setup_tx_resources(&tx_ring[i]);
0310 if (err) {
0311 while (i) {
0312 i--;
0313 ixgbevf_free_tx_resources(&tx_ring[i]);
0314 }
0315
0316 vfree(tx_ring);
0317 tx_ring = NULL;
0318
0319 goto clear_reset;
0320 }
0321 }
0322 }
0323
0324 if (new_rx_count != adapter->rx_ring_count) {
0325 rx_ring = vmalloc(array_size(sizeof(*rx_ring),
0326 adapter->num_rx_queues));
0327 if (!rx_ring) {
0328 err = -ENOMEM;
0329 goto clear_reset;
0330 }
0331
0332 for (i = 0; i < adapter->num_rx_queues; i++) {
0333
0334 rx_ring[i] = *adapter->rx_ring[i];
0335
0336
0337 memset(&rx_ring[i].xdp_rxq, 0,
0338 sizeof(rx_ring[i].xdp_rxq));
0339
0340 rx_ring[i].count = new_rx_count;
0341 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
0342 if (err) {
0343 while (i) {
0344 i--;
0345 ixgbevf_free_rx_resources(&rx_ring[i]);
0346 }
0347
0348 vfree(rx_ring);
0349 rx_ring = NULL;
0350
0351 goto clear_reset;
0352 }
0353 }
0354 }
0355
0356
0357 ixgbevf_down(adapter);
0358
0359
0360 if (tx_ring) {
0361 for (i = 0; i < adapter->num_tx_queues; i++) {
0362 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
0363 *adapter->tx_ring[i] = tx_ring[i];
0364 }
0365 adapter->tx_ring_count = new_tx_count;
0366
0367 for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
0368 ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
0369 *adapter->xdp_ring[j] = tx_ring[i];
0370 }
0371 adapter->xdp_ring_count = new_tx_count;
0372
0373 vfree(tx_ring);
0374 tx_ring = NULL;
0375 }
0376
0377
0378 if (rx_ring) {
0379 for (i = 0; i < adapter->num_rx_queues; i++) {
0380 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
0381 *adapter->rx_ring[i] = rx_ring[i];
0382 }
0383 adapter->rx_ring_count = new_rx_count;
0384
0385 vfree(rx_ring);
0386 rx_ring = NULL;
0387 }
0388
0389
0390 ixgbevf_up(adapter);
0391
0392 clear_reset:
0393
0394 if (tx_ring) {
0395 for (i = 0;
0396 i < adapter->num_tx_queues + adapter->num_xdp_queues; i++)
0397 ixgbevf_free_tx_resources(&tx_ring[i]);
0398 vfree(tx_ring);
0399 }
0400
0401 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
0402 return err;
0403 }
0404
0405 static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
0406 {
0407 switch (stringset) {
0408 case ETH_SS_TEST:
0409 return IXGBEVF_TEST_LEN;
0410 case ETH_SS_STATS:
0411 return IXGBEVF_STATS_LEN;
0412 case ETH_SS_PRIV_FLAGS:
0413 return IXGBEVF_PRIV_FLAGS_STR_LEN;
0414 default:
0415 return -EINVAL;
0416 }
0417 }
0418
0419 static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
0420 struct ethtool_stats *stats, u64 *data)
0421 {
0422 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0423 struct rtnl_link_stats64 temp;
0424 const struct rtnl_link_stats64 *net_stats;
0425 unsigned int start;
0426 struct ixgbevf_ring *ring;
0427 int i, j;
0428 char *p;
0429
0430 ixgbevf_update_stats(adapter);
0431 net_stats = dev_get_stats(netdev, &temp);
0432 for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
0433 switch (ixgbevf_gstrings_stats[i].type) {
0434 case NETDEV_STATS:
0435 p = (char *)net_stats +
0436 ixgbevf_gstrings_stats[i].stat_offset;
0437 break;
0438 case IXGBEVF_STATS:
0439 p = (char *)adapter +
0440 ixgbevf_gstrings_stats[i].stat_offset;
0441 break;
0442 default:
0443 data[i] = 0;
0444 continue;
0445 }
0446
0447 data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
0448 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
0449 }
0450
0451
0452 for (j = 0; j < adapter->num_tx_queues; j++) {
0453 ring = adapter->tx_ring[j];
0454 if (!ring) {
0455 data[i++] = 0;
0456 data[i++] = 0;
0457 continue;
0458 }
0459
0460 do {
0461 start = u64_stats_fetch_begin_irq(&ring->syncp);
0462 data[i] = ring->stats.packets;
0463 data[i + 1] = ring->stats.bytes;
0464 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
0465 i += 2;
0466 }
0467
0468
0469 for (j = 0; j < adapter->num_xdp_queues; j++) {
0470 ring = adapter->xdp_ring[j];
0471 if (!ring) {
0472 data[i++] = 0;
0473 data[i++] = 0;
0474 continue;
0475 }
0476
0477 do {
0478 start = u64_stats_fetch_begin_irq(&ring->syncp);
0479 data[i] = ring->stats.packets;
0480 data[i + 1] = ring->stats.bytes;
0481 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
0482 i += 2;
0483 }
0484
0485
0486 for (j = 0; j < adapter->num_rx_queues; j++) {
0487 ring = adapter->rx_ring[j];
0488 if (!ring) {
0489 data[i++] = 0;
0490 data[i++] = 0;
0491 continue;
0492 }
0493
0494 do {
0495 start = u64_stats_fetch_begin_irq(&ring->syncp);
0496 data[i] = ring->stats.packets;
0497 data[i + 1] = ring->stats.bytes;
0498 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
0499 i += 2;
0500 }
0501 }
0502
0503 static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
0504 u8 *data)
0505 {
0506 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0507 char *p = (char *)data;
0508 int i;
0509
0510 switch (stringset) {
0511 case ETH_SS_TEST:
0512 memcpy(data, *ixgbe_gstrings_test,
0513 IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
0514 break;
0515 case ETH_SS_STATS:
0516 for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
0517 memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
0518 ETH_GSTRING_LEN);
0519 p += ETH_GSTRING_LEN;
0520 }
0521
0522 for (i = 0; i < adapter->num_tx_queues; i++) {
0523 sprintf(p, "tx_queue_%u_packets", i);
0524 p += ETH_GSTRING_LEN;
0525 sprintf(p, "tx_queue_%u_bytes", i);
0526 p += ETH_GSTRING_LEN;
0527 }
0528 for (i = 0; i < adapter->num_xdp_queues; i++) {
0529 sprintf(p, "xdp_queue_%u_packets", i);
0530 p += ETH_GSTRING_LEN;
0531 sprintf(p, "xdp_queue_%u_bytes", i);
0532 p += ETH_GSTRING_LEN;
0533 }
0534 for (i = 0; i < adapter->num_rx_queues; i++) {
0535 sprintf(p, "rx_queue_%u_packets", i);
0536 p += ETH_GSTRING_LEN;
0537 sprintf(p, "rx_queue_%u_bytes", i);
0538 p += ETH_GSTRING_LEN;
0539 }
0540 break;
0541 case ETH_SS_PRIV_FLAGS:
0542 memcpy(data, ixgbevf_priv_flags_strings,
0543 IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
0544 break;
0545 }
0546 }
0547
0548 static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
0549 {
0550 struct ixgbe_hw *hw = &adapter->hw;
0551 bool link_up;
0552 u32 link_speed = 0;
0553 *data = 0;
0554
0555 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
0556 if (!link_up)
0557 *data = 1;
0558
0559 return *data;
0560 }
0561
0562
0563 struct ixgbevf_reg_test {
0564 u16 reg;
0565 u8 array_len;
0566 u8 test_type;
0567 u32 mask;
0568 u32 write;
0569 };
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 #define PATTERN_TEST 1
0582 #define SET_READ_TEST 2
0583 #define WRITE_NO_TEST 3
0584 #define TABLE32_TEST 4
0585 #define TABLE64_TEST_LO 5
0586 #define TABLE64_TEST_HI 6
0587
0588
0589 static const struct ixgbevf_reg_test reg_test_vf[] = {
0590 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
0591 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
0592 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
0593 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
0594 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
0595 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
0596 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
0597 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
0598 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
0599 { .reg = 0 }
0600 };
0601
0602 static const u32 register_test_patterns[] = {
0603 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
0604 };
0605
0606 static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
0607 int reg, u32 mask, u32 write)
0608 {
0609 u32 pat, val, before;
0610
0611 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
0612 *data = 1;
0613 return true;
0614 }
0615 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
0616 before = ixgbevf_read_reg(&adapter->hw, reg);
0617 ixgbe_write_reg(&adapter->hw, reg,
0618 register_test_patterns[pat] & write);
0619 val = ixgbevf_read_reg(&adapter->hw, reg);
0620 if (val != (register_test_patterns[pat] & write & mask)) {
0621 hw_dbg(&adapter->hw,
0622 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
0623 reg, val,
0624 register_test_patterns[pat] & write & mask);
0625 *data = reg;
0626 ixgbe_write_reg(&adapter->hw, reg, before);
0627 return true;
0628 }
0629 ixgbe_write_reg(&adapter->hw, reg, before);
0630 }
0631 return false;
0632 }
0633
0634 static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
0635 int reg, u32 mask, u32 write)
0636 {
0637 u32 val, before;
0638
0639 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
0640 *data = 1;
0641 return true;
0642 }
0643 before = ixgbevf_read_reg(&adapter->hw, reg);
0644 ixgbe_write_reg(&adapter->hw, reg, write & mask);
0645 val = ixgbevf_read_reg(&adapter->hw, reg);
0646 if ((write & mask) != (val & mask)) {
0647 pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
0648 reg, (val & mask), write & mask);
0649 *data = reg;
0650 ixgbe_write_reg(&adapter->hw, reg, before);
0651 return true;
0652 }
0653 ixgbe_write_reg(&adapter->hw, reg, before);
0654 return false;
0655 }
0656
0657 static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
0658 {
0659 const struct ixgbevf_reg_test *test;
0660 u32 i;
0661
0662 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
0663 dev_err(&adapter->pdev->dev,
0664 "Adapter removed - register test blocked\n");
0665 *data = 1;
0666 return 1;
0667 }
0668 test = reg_test_vf;
0669
0670
0671
0672
0673 while (test->reg) {
0674 for (i = 0; i < test->array_len; i++) {
0675 bool b = false;
0676
0677 switch (test->test_type) {
0678 case PATTERN_TEST:
0679 b = reg_pattern_test(adapter, data,
0680 test->reg + (i * 0x40),
0681 test->mask,
0682 test->write);
0683 break;
0684 case SET_READ_TEST:
0685 b = reg_set_and_check(adapter, data,
0686 test->reg + (i * 0x40),
0687 test->mask,
0688 test->write);
0689 break;
0690 case WRITE_NO_TEST:
0691 ixgbe_write_reg(&adapter->hw,
0692 test->reg + (i * 0x40),
0693 test->write);
0694 break;
0695 case TABLE32_TEST:
0696 b = reg_pattern_test(adapter, data,
0697 test->reg + (i * 4),
0698 test->mask,
0699 test->write);
0700 break;
0701 case TABLE64_TEST_LO:
0702 b = reg_pattern_test(adapter, data,
0703 test->reg + (i * 8),
0704 test->mask,
0705 test->write);
0706 break;
0707 case TABLE64_TEST_HI:
0708 b = reg_pattern_test(adapter, data,
0709 test->reg + 4 + (i * 8),
0710 test->mask,
0711 test->write);
0712 break;
0713 }
0714 if (b)
0715 return 1;
0716 }
0717 test++;
0718 }
0719
0720 *data = 0;
0721 return *data;
0722 }
0723
0724 static void ixgbevf_diag_test(struct net_device *netdev,
0725 struct ethtool_test *eth_test, u64 *data)
0726 {
0727 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0728 bool if_running = netif_running(netdev);
0729
0730 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
0731 dev_err(&adapter->pdev->dev,
0732 "Adapter removed - test blocked\n");
0733 data[0] = 1;
0734 data[1] = 1;
0735 eth_test->flags |= ETH_TEST_FL_FAILED;
0736 return;
0737 }
0738 set_bit(__IXGBEVF_TESTING, &adapter->state);
0739 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
0740
0741
0742 hw_dbg(&adapter->hw, "offline testing starting\n");
0743
0744
0745
0746
0747 if (ixgbevf_link_test(adapter, &data[1]))
0748 eth_test->flags |= ETH_TEST_FL_FAILED;
0749
0750 if (if_running)
0751
0752 ixgbevf_close(netdev);
0753 else
0754 ixgbevf_reset(adapter);
0755
0756 hw_dbg(&adapter->hw, "register testing starting\n");
0757 if (ixgbevf_reg_test(adapter, &data[0]))
0758 eth_test->flags |= ETH_TEST_FL_FAILED;
0759
0760 ixgbevf_reset(adapter);
0761
0762 clear_bit(__IXGBEVF_TESTING, &adapter->state);
0763 if (if_running)
0764 ixgbevf_open(netdev);
0765 } else {
0766 hw_dbg(&adapter->hw, "online testing starting\n");
0767
0768 if (ixgbevf_link_test(adapter, &data[1]))
0769 eth_test->flags |= ETH_TEST_FL_FAILED;
0770
0771
0772 data[0] = 0;
0773
0774 clear_bit(__IXGBEVF_TESTING, &adapter->state);
0775 }
0776 msleep_interruptible(4 * 1000);
0777 }
0778
0779 static int ixgbevf_nway_reset(struct net_device *netdev)
0780 {
0781 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0782
0783 if (netif_running(netdev))
0784 ixgbevf_reinit_locked(adapter);
0785
0786 return 0;
0787 }
0788
0789 static int ixgbevf_get_coalesce(struct net_device *netdev,
0790 struct ethtool_coalesce *ec,
0791 struct kernel_ethtool_coalesce *kernel_coal,
0792 struct netlink_ext_ack *extack)
0793 {
0794 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0795
0796
0797 if (adapter->rx_itr_setting <= 1)
0798 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
0799 else
0800 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
0801
0802
0803 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
0804 return 0;
0805
0806
0807 if (adapter->tx_itr_setting <= 1)
0808 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
0809 else
0810 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
0811
0812 return 0;
0813 }
0814
0815 static int ixgbevf_set_coalesce(struct net_device *netdev,
0816 struct ethtool_coalesce *ec,
0817 struct kernel_ethtool_coalesce *kernel_coal,
0818 struct netlink_ext_ack *extack)
0819 {
0820 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0821 struct ixgbevf_q_vector *q_vector;
0822 int num_vectors, i;
0823 u16 tx_itr_param, rx_itr_param;
0824
0825
0826 if (adapter->q_vector[0]->tx.count &&
0827 adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
0828 return -EINVAL;
0829
0830 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
0831 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
0832 return -EINVAL;
0833
0834 if (ec->rx_coalesce_usecs > 1)
0835 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
0836 else
0837 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
0838
0839 if (adapter->rx_itr_setting == 1)
0840 rx_itr_param = IXGBE_20K_ITR;
0841 else
0842 rx_itr_param = adapter->rx_itr_setting;
0843
0844 if (ec->tx_coalesce_usecs > 1)
0845 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
0846 else
0847 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
0848
0849 if (adapter->tx_itr_setting == 1)
0850 tx_itr_param = IXGBE_12K_ITR;
0851 else
0852 tx_itr_param = adapter->tx_itr_setting;
0853
0854 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
0855
0856 for (i = 0; i < num_vectors; i++) {
0857 q_vector = adapter->q_vector[i];
0858 if (q_vector->tx.count && !q_vector->rx.count)
0859
0860 q_vector->itr = tx_itr_param;
0861 else
0862
0863 q_vector->itr = rx_itr_param;
0864 ixgbevf_write_eitr(q_vector);
0865 }
0866
0867 return 0;
0868 }
0869
0870 static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
0871 u32 *rules __always_unused)
0872 {
0873 struct ixgbevf_adapter *adapter = netdev_priv(dev);
0874
0875 switch (info->cmd) {
0876 case ETHTOOL_GRXRINGS:
0877 info->data = adapter->num_rx_queues;
0878 return 0;
0879 default:
0880 hw_dbg(&adapter->hw, "Command parameters not supported\n");
0881 return -EOPNOTSUPP;
0882 }
0883 }
0884
0885 static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
0886 {
0887 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0888
0889 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
0890 return IXGBEVF_X550_VFRETA_SIZE;
0891
0892 return IXGBEVF_82599_RETA_SIZE;
0893 }
0894
0895 static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
0896 {
0897 return IXGBEVF_RSS_HASH_KEY_SIZE;
0898 }
0899
0900 static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
0901 u8 *hfunc)
0902 {
0903 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0904 int err = 0;
0905
0906 if (hfunc)
0907 *hfunc = ETH_RSS_HASH_TOP;
0908
0909 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
0910 if (key)
0911 memcpy(key, adapter->rss_key,
0912 ixgbevf_get_rxfh_key_size(netdev));
0913
0914 if (indir) {
0915 int i;
0916
0917 for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
0918 indir[i] = adapter->rss_indir_tbl[i];
0919 }
0920 } else {
0921
0922
0923
0924 if (!indir && !key)
0925 return 0;
0926
0927 spin_lock_bh(&adapter->mbx_lock);
0928 if (indir)
0929 err = ixgbevf_get_reta_locked(&adapter->hw, indir,
0930 adapter->num_rx_queues);
0931
0932 if (!err && key)
0933 err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
0934
0935 spin_unlock_bh(&adapter->mbx_lock);
0936 }
0937
0938 return err;
0939 }
0940
0941 static u32 ixgbevf_get_priv_flags(struct net_device *netdev)
0942 {
0943 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0944 u32 priv_flags = 0;
0945
0946 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
0947 priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX;
0948
0949 return priv_flags;
0950 }
0951
0952 static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
0953 {
0954 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0955 unsigned int flags = adapter->flags;
0956
0957 flags &= ~IXGBEVF_FLAGS_LEGACY_RX;
0958 if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX)
0959 flags |= IXGBEVF_FLAGS_LEGACY_RX;
0960
0961 if (flags != adapter->flags) {
0962 adapter->flags = flags;
0963
0964
0965 if (netif_running(netdev))
0966 ixgbevf_reinit_locked(adapter);
0967 }
0968
0969 return 0;
0970 }
0971
0972 static const struct ethtool_ops ixgbevf_ethtool_ops = {
0973 .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
0974 .get_drvinfo = ixgbevf_get_drvinfo,
0975 .get_regs_len = ixgbevf_get_regs_len,
0976 .get_regs = ixgbevf_get_regs,
0977 .nway_reset = ixgbevf_nway_reset,
0978 .get_link = ethtool_op_get_link,
0979 .get_ringparam = ixgbevf_get_ringparam,
0980 .set_ringparam = ixgbevf_set_ringparam,
0981 .get_msglevel = ixgbevf_get_msglevel,
0982 .set_msglevel = ixgbevf_set_msglevel,
0983 .self_test = ixgbevf_diag_test,
0984 .get_sset_count = ixgbevf_get_sset_count,
0985 .get_strings = ixgbevf_get_strings,
0986 .get_ethtool_stats = ixgbevf_get_ethtool_stats,
0987 .get_coalesce = ixgbevf_get_coalesce,
0988 .set_coalesce = ixgbevf_set_coalesce,
0989 .get_rxnfc = ixgbevf_get_rxnfc,
0990 .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
0991 .get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
0992 .get_rxfh = ixgbevf_get_rxfh,
0993 .get_link_ksettings = ixgbevf_get_link_ksettings,
0994 .get_priv_flags = ixgbevf_get_priv_flags,
0995 .set_priv_flags = ixgbevf_set_priv_flags,
0996 };
0997
0998 void ixgbevf_set_ethtool_ops(struct net_device *netdev)
0999 {
1000 netdev->ethtool_ops = &ixgbevf_ethtool_ops;
1001 }