0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/netdevice.h>
0009 #include <linux/ethtool.h>
0010 #include <linux/rtnetlink.h>
0011 #include <linux/in.h>
0012 #include "net_driver.h"
0013 #include "workarounds.h"
0014 #include "selftest.h"
0015 #include "efx.h"
0016 #include "filter.h"
0017 #include "nic.h"
0018
0019 struct ef4_sw_stat_desc {
0020 const char *name;
0021 enum {
0022 EF4_ETHTOOL_STAT_SOURCE_nic,
0023 EF4_ETHTOOL_STAT_SOURCE_channel,
0024 EF4_ETHTOOL_STAT_SOURCE_tx_queue
0025 } source;
0026 unsigned offset;
0027 u64(*get_stat) (void *field);
0028 };
0029
0030
0031 #define EF4_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
0032 get_stat_function) { \
0033 .name = #stat_name, \
0034 .source = EF4_ETHTOOL_STAT_SOURCE_##source_name, \
0035 .offset = ((((field_type *) 0) == \
0036 &((struct ef4_##source_name *)0)->field) ? \
0037 offsetof(struct ef4_##source_name, field) : \
0038 offsetof(struct ef4_##source_name, field)), \
0039 .get_stat = get_stat_function, \
0040 }
0041
0042 static u64 ef4_get_uint_stat(void *field)
0043 {
0044 return *(unsigned int *)field;
0045 }
0046
0047 static u64 ef4_get_atomic_stat(void *field)
0048 {
0049 return atomic_read((atomic_t *) field);
0050 }
0051
0052 #define EF4_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
0053 EF4_ETHTOOL_STAT(field, nic, field, \
0054 atomic_t, ef4_get_atomic_stat)
0055
0056 #define EF4_ETHTOOL_UINT_CHANNEL_STAT(field) \
0057 EF4_ETHTOOL_STAT(field, channel, n_##field, \
0058 unsigned int, ef4_get_uint_stat)
0059
0060 #define EF4_ETHTOOL_UINT_TXQ_STAT(field) \
0061 EF4_ETHTOOL_STAT(tx_##field, tx_queue, field, \
0062 unsigned int, ef4_get_uint_stat)
0063
0064 static const struct ef4_sw_stat_desc ef4_sw_stat_desc[] = {
0065 EF4_ETHTOOL_UINT_TXQ_STAT(merge_events),
0066 EF4_ETHTOOL_UINT_TXQ_STAT(pushes),
0067 EF4_ETHTOOL_UINT_TXQ_STAT(cb_packets),
0068 EF4_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
0069 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
0070 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
0071 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
0072 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
0073 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
0074 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
0075 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
0076 };
0077
0078 #define EF4_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(ef4_sw_stat_desc)
0079
0080 #define EF4_ETHTOOL_EEPROM_MAGIC 0xEFAB
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 static int ef4_ethtool_phys_id(struct net_device *net_dev,
0091 enum ethtool_phys_id_state state)
0092 {
0093 struct ef4_nic *efx = netdev_priv(net_dev);
0094 enum ef4_led_mode mode = EF4_LED_DEFAULT;
0095
0096 switch (state) {
0097 case ETHTOOL_ID_ON:
0098 mode = EF4_LED_ON;
0099 break;
0100 case ETHTOOL_ID_OFF:
0101 mode = EF4_LED_OFF;
0102 break;
0103 case ETHTOOL_ID_INACTIVE:
0104 mode = EF4_LED_DEFAULT;
0105 break;
0106 case ETHTOOL_ID_ACTIVE:
0107 return 1;
0108 }
0109
0110 efx->type->set_id_led(efx, mode);
0111 return 0;
0112 }
0113
0114
0115 static int
0116 ef4_ethtool_get_link_ksettings(struct net_device *net_dev,
0117 struct ethtool_link_ksettings *cmd)
0118 {
0119 struct ef4_nic *efx = netdev_priv(net_dev);
0120 struct ef4_link_state *link_state = &efx->link_state;
0121
0122 mutex_lock(&efx->mac_lock);
0123 efx->phy_op->get_link_ksettings(efx, cmd);
0124 mutex_unlock(&efx->mac_lock);
0125
0126
0127 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
0128 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
0129
0130 if (LOOPBACK_INTERNAL(efx)) {
0131 cmd->base.speed = link_state->speed;
0132 cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
0133 }
0134
0135 return 0;
0136 }
0137
0138
0139 static int
0140 ef4_ethtool_set_link_ksettings(struct net_device *net_dev,
0141 const struct ethtool_link_ksettings *cmd)
0142 {
0143 struct ef4_nic *efx = netdev_priv(net_dev);
0144 int rc;
0145
0146
0147 if ((cmd->base.speed == SPEED_1000) &&
0148 (cmd->base.duplex != DUPLEX_FULL)) {
0149 netif_dbg(efx, drv, efx->net_dev,
0150 "rejecting unsupported 1000Mbps HD setting\n");
0151 return -EINVAL;
0152 }
0153
0154 mutex_lock(&efx->mac_lock);
0155 rc = efx->phy_op->set_link_ksettings(efx, cmd);
0156 mutex_unlock(&efx->mac_lock);
0157 return rc;
0158 }
0159
0160 static void ef4_ethtool_get_drvinfo(struct net_device *net_dev,
0161 struct ethtool_drvinfo *info)
0162 {
0163 struct ef4_nic *efx = netdev_priv(net_dev);
0164
0165 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
0166 strlcpy(info->version, EF4_DRIVER_VERSION, sizeof(info->version));
0167 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
0168 }
0169
0170 static int ef4_ethtool_get_regs_len(struct net_device *net_dev)
0171 {
0172 return ef4_nic_get_regs_len(netdev_priv(net_dev));
0173 }
0174
0175 static void ef4_ethtool_get_regs(struct net_device *net_dev,
0176 struct ethtool_regs *regs, void *buf)
0177 {
0178 struct ef4_nic *efx = netdev_priv(net_dev);
0179
0180 regs->version = efx->type->revision;
0181 ef4_nic_get_regs(efx, buf);
0182 }
0183
0184 static u32 ef4_ethtool_get_msglevel(struct net_device *net_dev)
0185 {
0186 struct ef4_nic *efx = netdev_priv(net_dev);
0187 return efx->msg_enable;
0188 }
0189
0190 static void ef4_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
0191 {
0192 struct ef4_nic *efx = netdev_priv(net_dev);
0193 efx->msg_enable = msg_enable;
0194 }
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209 static void ef4_fill_test(unsigned int test_index, u8 *strings, u64 *data,
0210 int *test, const char *unit_format, int unit_id,
0211 const char *test_format, const char *test_id)
0212 {
0213 char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
0214
0215
0216 if (data)
0217 data[test_index] = *test;
0218
0219
0220 if (strings) {
0221 if (strchr(unit_format, '%'))
0222 snprintf(unit_str, sizeof(unit_str),
0223 unit_format, unit_id);
0224 else
0225 strcpy(unit_str, unit_format);
0226 snprintf(test_str, sizeof(test_str), test_format, test_id);
0227 snprintf(strings + test_index * ETH_GSTRING_LEN,
0228 ETH_GSTRING_LEN,
0229 "%-6s %-24s", unit_str, test_str);
0230 }
0231 }
0232
0233 #define EF4_CHANNEL_NAME(_channel) "chan%d", _channel->channel
0234 #define EF4_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
0235 #define EF4_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
0236 #define EF4_LOOPBACK_NAME(_mode, _counter) \
0237 "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, ef4_loopback_mode)
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 static int ef4_fill_loopback_test(struct ef4_nic *efx,
0252 struct ef4_loopback_self_tests *lb_tests,
0253 enum ef4_loopback_mode mode,
0254 unsigned int test_index,
0255 u8 *strings, u64 *data)
0256 {
0257 struct ef4_channel *channel =
0258 ef4_get_channel(efx, efx->tx_channel_offset);
0259 struct ef4_tx_queue *tx_queue;
0260
0261 ef4_for_each_channel_tx_queue(tx_queue, channel) {
0262 ef4_fill_test(test_index++, strings, data,
0263 &lb_tests->tx_sent[tx_queue->queue],
0264 EF4_TX_QUEUE_NAME(tx_queue),
0265 EF4_LOOPBACK_NAME(mode, "tx_sent"));
0266 ef4_fill_test(test_index++, strings, data,
0267 &lb_tests->tx_done[tx_queue->queue],
0268 EF4_TX_QUEUE_NAME(tx_queue),
0269 EF4_LOOPBACK_NAME(mode, "tx_done"));
0270 }
0271 ef4_fill_test(test_index++, strings, data,
0272 &lb_tests->rx_good,
0273 "rx", 0,
0274 EF4_LOOPBACK_NAME(mode, "rx_good"));
0275 ef4_fill_test(test_index++, strings, data,
0276 &lb_tests->rx_bad,
0277 "rx", 0,
0278 EF4_LOOPBACK_NAME(mode, "rx_bad"));
0279
0280 return test_index;
0281 }
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296 static int ef4_ethtool_fill_self_tests(struct ef4_nic *efx,
0297 struct ef4_self_tests *tests,
0298 u8 *strings, u64 *data)
0299 {
0300 struct ef4_channel *channel;
0301 unsigned int n = 0, i;
0302 enum ef4_loopback_mode mode;
0303
0304 ef4_fill_test(n++, strings, data, &tests->phy_alive,
0305 "phy", 0, "alive", NULL);
0306 ef4_fill_test(n++, strings, data, &tests->nvram,
0307 "core", 0, "nvram", NULL);
0308 ef4_fill_test(n++, strings, data, &tests->interrupt,
0309 "core", 0, "interrupt", NULL);
0310
0311
0312 ef4_for_each_channel(channel, efx) {
0313 ef4_fill_test(n++, strings, data,
0314 &tests->eventq_dma[channel->channel],
0315 EF4_CHANNEL_NAME(channel),
0316 "eventq.dma", NULL);
0317 ef4_fill_test(n++, strings, data,
0318 &tests->eventq_int[channel->channel],
0319 EF4_CHANNEL_NAME(channel),
0320 "eventq.int", NULL);
0321 }
0322
0323 ef4_fill_test(n++, strings, data, &tests->memory,
0324 "core", 0, "memory", NULL);
0325 ef4_fill_test(n++, strings, data, &tests->registers,
0326 "core", 0, "registers", NULL);
0327
0328 if (efx->phy_op->run_tests != NULL) {
0329 EF4_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
0330
0331 for (i = 0; true; ++i) {
0332 const char *name;
0333
0334 EF4_BUG_ON_PARANOID(i >= EF4_MAX_PHY_TESTS);
0335 name = efx->phy_op->test_name(efx, i);
0336 if (name == NULL)
0337 break;
0338
0339 ef4_fill_test(n++, strings, data, &tests->phy_ext[i],
0340 "phy", 0, name, NULL);
0341 }
0342 }
0343
0344
0345 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
0346 if (!(efx->loopback_modes & (1 << mode)))
0347 continue;
0348 n = ef4_fill_loopback_test(efx,
0349 &tests->loopback[mode], mode, n,
0350 strings, data);
0351 }
0352
0353 return n;
0354 }
0355
0356 static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings)
0357 {
0358 size_t n_stats = 0;
0359 struct ef4_channel *channel;
0360
0361 ef4_for_each_channel(channel, efx) {
0362 if (ef4_channel_has_tx_queues(channel)) {
0363 n_stats++;
0364 if (strings != NULL) {
0365 snprintf(strings, ETH_GSTRING_LEN,
0366 "tx-%u.tx_packets",
0367 channel->tx_queue[0].queue /
0368 EF4_TXQ_TYPES);
0369
0370 strings += ETH_GSTRING_LEN;
0371 }
0372 }
0373 }
0374 ef4_for_each_channel(channel, efx) {
0375 if (ef4_channel_has_rx_queue(channel)) {
0376 n_stats++;
0377 if (strings != NULL) {
0378 snprintf(strings, ETH_GSTRING_LEN,
0379 "rx-%d.rx_packets", channel->channel);
0380 strings += ETH_GSTRING_LEN;
0381 }
0382 }
0383 }
0384 return n_stats;
0385 }
0386
0387 static int ef4_ethtool_get_sset_count(struct net_device *net_dev,
0388 int string_set)
0389 {
0390 struct ef4_nic *efx = netdev_priv(net_dev);
0391
0392 switch (string_set) {
0393 case ETH_SS_STATS:
0394 return efx->type->describe_stats(efx, NULL) +
0395 EF4_ETHTOOL_SW_STAT_COUNT +
0396 ef4_describe_per_queue_stats(efx, NULL);
0397 case ETH_SS_TEST:
0398 return ef4_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
0399 default:
0400 return -EINVAL;
0401 }
0402 }
0403
0404 static void ef4_ethtool_get_strings(struct net_device *net_dev,
0405 u32 string_set, u8 *strings)
0406 {
0407 struct ef4_nic *efx = netdev_priv(net_dev);
0408 int i;
0409
0410 switch (string_set) {
0411 case ETH_SS_STATS:
0412 strings += (efx->type->describe_stats(efx, strings) *
0413 ETH_GSTRING_LEN);
0414 for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++)
0415 strlcpy(strings + i * ETH_GSTRING_LEN,
0416 ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN);
0417 strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
0418 strings += (ef4_describe_per_queue_stats(efx, strings) *
0419 ETH_GSTRING_LEN);
0420 break;
0421 case ETH_SS_TEST:
0422 ef4_ethtool_fill_self_tests(efx, NULL, strings, NULL);
0423 break;
0424 default:
0425
0426 break;
0427 }
0428 }
0429
0430 static void ef4_ethtool_get_stats(struct net_device *net_dev,
0431 struct ethtool_stats *stats,
0432 u64 *data)
0433 {
0434 struct ef4_nic *efx = netdev_priv(net_dev);
0435 const struct ef4_sw_stat_desc *stat;
0436 struct ef4_channel *channel;
0437 struct ef4_tx_queue *tx_queue;
0438 struct ef4_rx_queue *rx_queue;
0439 int i;
0440
0441 spin_lock_bh(&efx->stats_lock);
0442
0443
0444 data += efx->type->update_stats(efx, data, NULL);
0445
0446
0447 for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++) {
0448 stat = &ef4_sw_stat_desc[i];
0449 switch (stat->source) {
0450 case EF4_ETHTOOL_STAT_SOURCE_nic:
0451 data[i] = stat->get_stat((void *)efx + stat->offset);
0452 break;
0453 case EF4_ETHTOOL_STAT_SOURCE_channel:
0454 data[i] = 0;
0455 ef4_for_each_channel(channel, efx)
0456 data[i] += stat->get_stat((void *)channel +
0457 stat->offset);
0458 break;
0459 case EF4_ETHTOOL_STAT_SOURCE_tx_queue:
0460 data[i] = 0;
0461 ef4_for_each_channel(channel, efx) {
0462 ef4_for_each_channel_tx_queue(tx_queue, channel)
0463 data[i] +=
0464 stat->get_stat((void *)tx_queue
0465 + stat->offset);
0466 }
0467 break;
0468 }
0469 }
0470 data += EF4_ETHTOOL_SW_STAT_COUNT;
0471
0472 spin_unlock_bh(&efx->stats_lock);
0473
0474 ef4_for_each_channel(channel, efx) {
0475 if (ef4_channel_has_tx_queues(channel)) {
0476 *data = 0;
0477 ef4_for_each_channel_tx_queue(tx_queue, channel) {
0478 *data += tx_queue->tx_packets;
0479 }
0480 data++;
0481 }
0482 }
0483 ef4_for_each_channel(channel, efx) {
0484 if (ef4_channel_has_rx_queue(channel)) {
0485 *data = 0;
0486 ef4_for_each_channel_rx_queue(rx_queue, channel) {
0487 *data += rx_queue->rx_packets;
0488 }
0489 data++;
0490 }
0491 }
0492 }
0493
0494 static void ef4_ethtool_self_test(struct net_device *net_dev,
0495 struct ethtool_test *test, u64 *data)
0496 {
0497 struct ef4_nic *efx = netdev_priv(net_dev);
0498 struct ef4_self_tests *ef4_tests;
0499 bool already_up;
0500 int rc = -ENOMEM;
0501
0502 ef4_tests = kzalloc(sizeof(*ef4_tests), GFP_KERNEL);
0503 if (!ef4_tests)
0504 goto fail;
0505
0506 if (efx->state != STATE_READY) {
0507 rc = -EBUSY;
0508 goto out;
0509 }
0510
0511 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
0512 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
0513
0514
0515 already_up = (efx->net_dev->flags & IFF_UP);
0516 if (!already_up) {
0517 rc = dev_open(efx->net_dev, NULL);
0518 if (rc) {
0519 netif_err(efx, drv, efx->net_dev,
0520 "failed opening device.\n");
0521 goto out;
0522 }
0523 }
0524
0525 rc = ef4_selftest(efx, ef4_tests, test->flags);
0526
0527 if (!already_up)
0528 dev_close(efx->net_dev);
0529
0530 netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
0531 rc == 0 ? "passed" : "failed",
0532 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
0533
0534 out:
0535 ef4_ethtool_fill_self_tests(efx, ef4_tests, NULL, data);
0536 kfree(ef4_tests);
0537 fail:
0538 if (rc)
0539 test->flags |= ETH_TEST_FL_FAILED;
0540 }
0541
0542
0543 static int ef4_ethtool_nway_reset(struct net_device *net_dev)
0544 {
0545 struct ef4_nic *efx = netdev_priv(net_dev);
0546
0547 return mdio45_nway_restart(&efx->mdio);
0548 }
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579 static int ef4_ethtool_get_coalesce(struct net_device *net_dev,
0580 struct ethtool_coalesce *coalesce,
0581 struct kernel_ethtool_coalesce *kernel_coal,
0582 struct netlink_ext_ack *extack)
0583 {
0584 struct ef4_nic *efx = netdev_priv(net_dev);
0585 unsigned int tx_usecs, rx_usecs;
0586 bool rx_adaptive;
0587
0588 ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
0589
0590 coalesce->tx_coalesce_usecs = tx_usecs;
0591 coalesce->tx_coalesce_usecs_irq = tx_usecs;
0592 coalesce->rx_coalesce_usecs = rx_usecs;
0593 coalesce->rx_coalesce_usecs_irq = rx_usecs;
0594 coalesce->use_adaptive_rx_coalesce = rx_adaptive;
0595
0596 return 0;
0597 }
0598
0599 static int ef4_ethtool_set_coalesce(struct net_device *net_dev,
0600 struct ethtool_coalesce *coalesce,
0601 struct kernel_ethtool_coalesce *kernel_coal,
0602 struct netlink_ext_ack *extack)
0603 {
0604 struct ef4_nic *efx = netdev_priv(net_dev);
0605 struct ef4_channel *channel;
0606 unsigned int tx_usecs, rx_usecs;
0607 bool adaptive, rx_may_override_tx;
0608 int rc;
0609
0610 ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
0611
0612 if (coalesce->rx_coalesce_usecs != rx_usecs)
0613 rx_usecs = coalesce->rx_coalesce_usecs;
0614 else
0615 rx_usecs = coalesce->rx_coalesce_usecs_irq;
0616
0617 adaptive = coalesce->use_adaptive_rx_coalesce;
0618
0619
0620
0621
0622 rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
0623 coalesce->tx_coalesce_usecs_irq == tx_usecs);
0624 if (coalesce->tx_coalesce_usecs != tx_usecs)
0625 tx_usecs = coalesce->tx_coalesce_usecs;
0626 else
0627 tx_usecs = coalesce->tx_coalesce_usecs_irq;
0628
0629 rc = ef4_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
0630 rx_may_override_tx);
0631 if (rc != 0)
0632 return rc;
0633
0634 ef4_for_each_channel(channel, efx)
0635 efx->type->push_irq_moderation(channel);
0636
0637 return 0;
0638 }
0639
0640 static void
0641 ef4_ethtool_get_ringparam(struct net_device *net_dev,
0642 struct ethtool_ringparam *ring,
0643 struct kernel_ethtool_ringparam *kernel_ring,
0644 struct netlink_ext_ack *extack)
0645 {
0646 struct ef4_nic *efx = netdev_priv(net_dev);
0647
0648 ring->rx_max_pending = EF4_MAX_DMAQ_SIZE;
0649 ring->tx_max_pending = EF4_MAX_DMAQ_SIZE;
0650 ring->rx_pending = efx->rxq_entries;
0651 ring->tx_pending = efx->txq_entries;
0652 }
0653
0654 static int
0655 ef4_ethtool_set_ringparam(struct net_device *net_dev,
0656 struct ethtool_ringparam *ring,
0657 struct kernel_ethtool_ringparam *kernel_ring,
0658 struct netlink_ext_ack *extack)
0659 {
0660 struct ef4_nic *efx = netdev_priv(net_dev);
0661 u32 txq_entries;
0662
0663 if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
0664 ring->rx_pending > EF4_MAX_DMAQ_SIZE ||
0665 ring->tx_pending > EF4_MAX_DMAQ_SIZE)
0666 return -EINVAL;
0667
0668 if (ring->rx_pending < EF4_RXQ_MIN_ENT) {
0669 netif_err(efx, drv, efx->net_dev,
0670 "RX queues cannot be smaller than %u\n",
0671 EF4_RXQ_MIN_ENT);
0672 return -EINVAL;
0673 }
0674
0675 txq_entries = max(ring->tx_pending, EF4_TXQ_MIN_ENT(efx));
0676 if (txq_entries != ring->tx_pending)
0677 netif_warn(efx, drv, efx->net_dev,
0678 "increasing TX queue size to minimum of %u\n",
0679 txq_entries);
0680
0681 return ef4_realloc_channels(efx, ring->rx_pending, txq_entries);
0682 }
0683
0684 static int ef4_ethtool_set_pauseparam(struct net_device *net_dev,
0685 struct ethtool_pauseparam *pause)
0686 {
0687 struct ef4_nic *efx = netdev_priv(net_dev);
0688 u8 wanted_fc, old_fc;
0689 u32 old_adv;
0690 int rc = 0;
0691
0692 mutex_lock(&efx->mac_lock);
0693
0694 wanted_fc = ((pause->rx_pause ? EF4_FC_RX : 0) |
0695 (pause->tx_pause ? EF4_FC_TX : 0) |
0696 (pause->autoneg ? EF4_FC_AUTO : 0));
0697
0698 if ((wanted_fc & EF4_FC_TX) && !(wanted_fc & EF4_FC_RX)) {
0699 netif_dbg(efx, drv, efx->net_dev,
0700 "Flow control unsupported: tx ON rx OFF\n");
0701 rc = -EINVAL;
0702 goto out;
0703 }
0704
0705 if ((wanted_fc & EF4_FC_AUTO) && !efx->link_advertising) {
0706 netif_dbg(efx, drv, efx->net_dev,
0707 "Autonegotiation is disabled\n");
0708 rc = -EINVAL;
0709 goto out;
0710 }
0711
0712
0713 if (efx->type->prepare_enable_fc_tx &&
0714 (wanted_fc & EF4_FC_TX) && !(efx->wanted_fc & EF4_FC_TX))
0715 efx->type->prepare_enable_fc_tx(efx);
0716
0717 old_adv = efx->link_advertising;
0718 old_fc = efx->wanted_fc;
0719 ef4_link_set_wanted_fc(efx, wanted_fc);
0720 if (efx->link_advertising != old_adv ||
0721 (efx->wanted_fc ^ old_fc) & EF4_FC_AUTO) {
0722 rc = efx->phy_op->reconfigure(efx);
0723 if (rc) {
0724 netif_err(efx, drv, efx->net_dev,
0725 "Unable to advertise requested flow "
0726 "control setting\n");
0727 goto out;
0728 }
0729 }
0730
0731
0732
0733
0734 ef4_mac_reconfigure(efx);
0735
0736 out:
0737 mutex_unlock(&efx->mac_lock);
0738
0739 return rc;
0740 }
0741
0742 static void ef4_ethtool_get_pauseparam(struct net_device *net_dev,
0743 struct ethtool_pauseparam *pause)
0744 {
0745 struct ef4_nic *efx = netdev_priv(net_dev);
0746
0747 pause->rx_pause = !!(efx->wanted_fc & EF4_FC_RX);
0748 pause->tx_pause = !!(efx->wanted_fc & EF4_FC_TX);
0749 pause->autoneg = !!(efx->wanted_fc & EF4_FC_AUTO);
0750 }
0751
0752 static void ef4_ethtool_get_wol(struct net_device *net_dev,
0753 struct ethtool_wolinfo *wol)
0754 {
0755 struct ef4_nic *efx = netdev_priv(net_dev);
0756 return efx->type->get_wol(efx, wol);
0757 }
0758
0759
0760 static int ef4_ethtool_set_wol(struct net_device *net_dev,
0761 struct ethtool_wolinfo *wol)
0762 {
0763 struct ef4_nic *efx = netdev_priv(net_dev);
0764 return efx->type->set_wol(efx, wol->wolopts);
0765 }
0766
0767 static int ef4_ethtool_reset(struct net_device *net_dev, u32 *flags)
0768 {
0769 struct ef4_nic *efx = netdev_priv(net_dev);
0770 int rc;
0771
0772 rc = efx->type->map_reset_flags(flags);
0773 if (rc < 0)
0774 return rc;
0775
0776 return ef4_reset(efx, rc);
0777 }
0778
0779
0780 static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
0781
0782 #define IP4_ADDR_FULL_MASK ((__force __be32)~0)
0783 #define IP_PROTO_FULL_MASK 0xFF
0784 #define PORT_FULL_MASK ((__force __be16)~0)
0785 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
0786
0787 static inline void ip6_fill_mask(__be32 *mask)
0788 {
0789 mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
0790 }
0791
0792 static int ef4_ethtool_get_class_rule(struct ef4_nic *efx,
0793 struct ethtool_rx_flow_spec *rule)
0794 {
0795 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
0796 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
0797 struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
0798 struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
0799 struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
0800 struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
0801 struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
0802 struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
0803 struct ethhdr *mac_entry = &rule->h_u.ether_spec;
0804 struct ethhdr *mac_mask = &rule->m_u.ether_spec;
0805 struct ef4_filter_spec spec;
0806 int rc;
0807
0808 rc = ef4_filter_get_filter_safe(efx, EF4_FILTER_PRI_MANUAL,
0809 rule->location, &spec);
0810 if (rc)
0811 return rc;
0812
0813 if (spec.dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
0814 rule->ring_cookie = RX_CLS_FLOW_DISC;
0815 else
0816 rule->ring_cookie = spec.dmaq_id;
0817
0818 if ((spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) &&
0819 spec.ether_type == htons(ETH_P_IP) &&
0820 (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) &&
0821 (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
0822 !(spec.match_flags &
0823 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
0824 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
0825 EF4_FILTER_MATCH_IP_PROTO |
0826 EF4_FILTER_MATCH_LOC_PORT | EF4_FILTER_MATCH_REM_PORT))) {
0827 rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
0828 TCP_V4_FLOW : UDP_V4_FLOW);
0829 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
0830 ip_entry->ip4dst = spec.loc_host[0];
0831 ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
0832 }
0833 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
0834 ip_entry->ip4src = spec.rem_host[0];
0835 ip_mask->ip4src = IP4_ADDR_FULL_MASK;
0836 }
0837 if (spec.match_flags & EF4_FILTER_MATCH_LOC_PORT) {
0838 ip_entry->pdst = spec.loc_port;
0839 ip_mask->pdst = PORT_FULL_MASK;
0840 }
0841 if (spec.match_flags & EF4_FILTER_MATCH_REM_PORT) {
0842 ip_entry->psrc = spec.rem_port;
0843 ip_mask->psrc = PORT_FULL_MASK;
0844 }
0845 } else if ((spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) &&
0846 spec.ether_type == htons(ETH_P_IPV6) &&
0847 (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) &&
0848 (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
0849 !(spec.match_flags &
0850 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
0851 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
0852 EF4_FILTER_MATCH_IP_PROTO |
0853 EF4_FILTER_MATCH_LOC_PORT | EF4_FILTER_MATCH_REM_PORT))) {
0854 rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
0855 TCP_V6_FLOW : UDP_V6_FLOW);
0856 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
0857 memcpy(ip6_entry->ip6dst, spec.loc_host,
0858 sizeof(ip6_entry->ip6dst));
0859 ip6_fill_mask(ip6_mask->ip6dst);
0860 }
0861 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
0862 memcpy(ip6_entry->ip6src, spec.rem_host,
0863 sizeof(ip6_entry->ip6src));
0864 ip6_fill_mask(ip6_mask->ip6src);
0865 }
0866 if (spec.match_flags & EF4_FILTER_MATCH_LOC_PORT) {
0867 ip6_entry->pdst = spec.loc_port;
0868 ip6_mask->pdst = PORT_FULL_MASK;
0869 }
0870 if (spec.match_flags & EF4_FILTER_MATCH_REM_PORT) {
0871 ip6_entry->psrc = spec.rem_port;
0872 ip6_mask->psrc = PORT_FULL_MASK;
0873 }
0874 } else if (!(spec.match_flags &
0875 ~(EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG |
0876 EF4_FILTER_MATCH_REM_MAC | EF4_FILTER_MATCH_ETHER_TYPE |
0877 EF4_FILTER_MATCH_OUTER_VID))) {
0878 rule->flow_type = ETHER_FLOW;
0879 if (spec.match_flags &
0880 (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG)) {
0881 ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
0882 if (spec.match_flags & EF4_FILTER_MATCH_LOC_MAC)
0883 eth_broadcast_addr(mac_mask->h_dest);
0884 else
0885 ether_addr_copy(mac_mask->h_dest,
0886 mac_addr_ig_mask);
0887 }
0888 if (spec.match_flags & EF4_FILTER_MATCH_REM_MAC) {
0889 ether_addr_copy(mac_entry->h_source, spec.rem_mac);
0890 eth_broadcast_addr(mac_mask->h_source);
0891 }
0892 if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) {
0893 mac_entry->h_proto = spec.ether_type;
0894 mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
0895 }
0896 } else if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE &&
0897 spec.ether_type == htons(ETH_P_IP) &&
0898 !(spec.match_flags &
0899 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
0900 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
0901 EF4_FILTER_MATCH_IP_PROTO))) {
0902 rule->flow_type = IPV4_USER_FLOW;
0903 uip_entry->ip_ver = ETH_RX_NFC_IP4;
0904 if (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) {
0905 uip_mask->proto = IP_PROTO_FULL_MASK;
0906 uip_entry->proto = spec.ip_proto;
0907 }
0908 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
0909 uip_entry->ip4dst = spec.loc_host[0];
0910 uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
0911 }
0912 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
0913 uip_entry->ip4src = spec.rem_host[0];
0914 uip_mask->ip4src = IP4_ADDR_FULL_MASK;
0915 }
0916 } else if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE &&
0917 spec.ether_type == htons(ETH_P_IPV6) &&
0918 !(spec.match_flags &
0919 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
0920 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
0921 EF4_FILTER_MATCH_IP_PROTO))) {
0922 rule->flow_type = IPV6_USER_FLOW;
0923 if (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) {
0924 uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
0925 uip6_entry->l4_proto = spec.ip_proto;
0926 }
0927 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
0928 memcpy(uip6_entry->ip6dst, spec.loc_host,
0929 sizeof(uip6_entry->ip6dst));
0930 ip6_fill_mask(uip6_mask->ip6dst);
0931 }
0932 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
0933 memcpy(uip6_entry->ip6src, spec.rem_host,
0934 sizeof(uip6_entry->ip6src));
0935 ip6_fill_mask(uip6_mask->ip6src);
0936 }
0937 } else {
0938
0939 WARN_ON(1);
0940 return -EINVAL;
0941 }
0942
0943 if (spec.match_flags & EF4_FILTER_MATCH_OUTER_VID) {
0944 rule->flow_type |= FLOW_EXT;
0945 rule->h_ext.vlan_tci = spec.outer_vid;
0946 rule->m_ext.vlan_tci = htons(0xfff);
0947 }
0948
0949 return rc;
0950 }
0951
0952 static int
0953 ef4_ethtool_get_rxnfc(struct net_device *net_dev,
0954 struct ethtool_rxnfc *info, u32 *rule_locs)
0955 {
0956 struct ef4_nic *efx = netdev_priv(net_dev);
0957
0958 switch (info->cmd) {
0959 case ETHTOOL_GRXRINGS:
0960 info->data = efx->n_rx_channels;
0961 return 0;
0962
0963 case ETHTOOL_GRXFH: {
0964 unsigned min_revision = 0;
0965
0966 info->data = 0;
0967 switch (info->flow_type) {
0968 case TCP_V4_FLOW:
0969 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
0970 fallthrough;
0971 case UDP_V4_FLOW:
0972 case SCTP_V4_FLOW:
0973 case AH_ESP_V4_FLOW:
0974 case IPV4_FLOW:
0975 info->data |= RXH_IP_SRC | RXH_IP_DST;
0976 min_revision = EF4_REV_FALCON_B0;
0977 break;
0978 default:
0979 break;
0980 }
0981 if (ef4_nic_rev(efx) < min_revision)
0982 info->data = 0;
0983 return 0;
0984 }
0985
0986 case ETHTOOL_GRXCLSRLCNT:
0987 info->data = ef4_filter_get_rx_id_limit(efx);
0988 if (info->data == 0)
0989 return -EOPNOTSUPP;
0990 info->data |= RX_CLS_LOC_SPECIAL;
0991 info->rule_cnt =
0992 ef4_filter_count_rx_used(efx, EF4_FILTER_PRI_MANUAL);
0993 return 0;
0994
0995 case ETHTOOL_GRXCLSRULE:
0996 if (ef4_filter_get_rx_id_limit(efx) == 0)
0997 return -EOPNOTSUPP;
0998 return ef4_ethtool_get_class_rule(efx, &info->fs);
0999
1000 case ETHTOOL_GRXCLSRLALL: {
1001 s32 rc;
1002 info->data = ef4_filter_get_rx_id_limit(efx);
1003 if (info->data == 0)
1004 return -EOPNOTSUPP;
1005 rc = ef4_filter_get_rx_ids(efx, EF4_FILTER_PRI_MANUAL,
1006 rule_locs, info->rule_cnt);
1007 if (rc < 0)
1008 return rc;
1009 info->rule_cnt = rc;
1010 return 0;
1011 }
1012
1013 default:
1014 return -EOPNOTSUPP;
1015 }
1016 }
1017
1018 static inline bool ip6_mask_is_full(__be32 mask[4])
1019 {
1020 return !~(mask[0] & mask[1] & mask[2] & mask[3]);
1021 }
1022
1023 static inline bool ip6_mask_is_empty(__be32 mask[4])
1024 {
1025 return !(mask[0] | mask[1] | mask[2] | mask[3]);
1026 }
1027
1028 static int ef4_ethtool_set_class_rule(struct ef4_nic *efx,
1029 struct ethtool_rx_flow_spec *rule)
1030 {
1031 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
1032 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
1033 struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
1034 struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
1035 struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
1036 struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
1037 struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
1038 struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
1039 struct ethhdr *mac_entry = &rule->h_u.ether_spec;
1040 struct ethhdr *mac_mask = &rule->m_u.ether_spec;
1041 struct ef4_filter_spec spec;
1042 int rc;
1043
1044
1045 if (rule->location != RX_CLS_LOC_ANY)
1046 return -EINVAL;
1047
1048
1049 if (rule->ring_cookie >= efx->n_rx_channels &&
1050 rule->ring_cookie != RX_CLS_FLOW_DISC)
1051 return -EINVAL;
1052
1053
1054 if ((rule->flow_type & FLOW_EXT) &&
1055 (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
1056 rule->m_ext.data[1]))
1057 return -EINVAL;
1058
1059 ef4_filter_init_rx(&spec, EF4_FILTER_PRI_MANUAL,
1060 efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
1061 (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
1062 EF4_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
1063
1064 switch (rule->flow_type & ~FLOW_EXT) {
1065 case TCP_V4_FLOW:
1066 case UDP_V4_FLOW:
1067 spec.match_flags = (EF4_FILTER_MATCH_ETHER_TYPE |
1068 EF4_FILTER_MATCH_IP_PROTO);
1069 spec.ether_type = htons(ETH_P_IP);
1070 spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
1071 IPPROTO_TCP : IPPROTO_UDP);
1072 if (ip_mask->ip4dst) {
1073 if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
1074 return -EINVAL;
1075 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1076 spec.loc_host[0] = ip_entry->ip4dst;
1077 }
1078 if (ip_mask->ip4src) {
1079 if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
1080 return -EINVAL;
1081 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1082 spec.rem_host[0] = ip_entry->ip4src;
1083 }
1084 if (ip_mask->pdst) {
1085 if (ip_mask->pdst != PORT_FULL_MASK)
1086 return -EINVAL;
1087 spec.match_flags |= EF4_FILTER_MATCH_LOC_PORT;
1088 spec.loc_port = ip_entry->pdst;
1089 }
1090 if (ip_mask->psrc) {
1091 if (ip_mask->psrc != PORT_FULL_MASK)
1092 return -EINVAL;
1093 spec.match_flags |= EF4_FILTER_MATCH_REM_PORT;
1094 spec.rem_port = ip_entry->psrc;
1095 }
1096 if (ip_mask->tos)
1097 return -EINVAL;
1098 break;
1099
1100 case TCP_V6_FLOW:
1101 case UDP_V6_FLOW:
1102 spec.match_flags = (EF4_FILTER_MATCH_ETHER_TYPE |
1103 EF4_FILTER_MATCH_IP_PROTO);
1104 spec.ether_type = htons(ETH_P_IPV6);
1105 spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ?
1106 IPPROTO_TCP : IPPROTO_UDP);
1107 if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
1108 if (!ip6_mask_is_full(ip6_mask->ip6dst))
1109 return -EINVAL;
1110 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1111 memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
1112 }
1113 if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
1114 if (!ip6_mask_is_full(ip6_mask->ip6src))
1115 return -EINVAL;
1116 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1117 memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
1118 }
1119 if (ip6_mask->pdst) {
1120 if (ip6_mask->pdst != PORT_FULL_MASK)
1121 return -EINVAL;
1122 spec.match_flags |= EF4_FILTER_MATCH_LOC_PORT;
1123 spec.loc_port = ip6_entry->pdst;
1124 }
1125 if (ip6_mask->psrc) {
1126 if (ip6_mask->psrc != PORT_FULL_MASK)
1127 return -EINVAL;
1128 spec.match_flags |= EF4_FILTER_MATCH_REM_PORT;
1129 spec.rem_port = ip6_entry->psrc;
1130 }
1131 if (ip6_mask->tclass)
1132 return -EINVAL;
1133 break;
1134
1135 case IPV4_USER_FLOW:
1136 if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
1137 uip_entry->ip_ver != ETH_RX_NFC_IP4)
1138 return -EINVAL;
1139 spec.match_flags = EF4_FILTER_MATCH_ETHER_TYPE;
1140 spec.ether_type = htons(ETH_P_IP);
1141 if (uip_mask->ip4dst) {
1142 if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
1143 return -EINVAL;
1144 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1145 spec.loc_host[0] = uip_entry->ip4dst;
1146 }
1147 if (uip_mask->ip4src) {
1148 if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
1149 return -EINVAL;
1150 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1151 spec.rem_host[0] = uip_entry->ip4src;
1152 }
1153 if (uip_mask->proto) {
1154 if (uip_mask->proto != IP_PROTO_FULL_MASK)
1155 return -EINVAL;
1156 spec.match_flags |= EF4_FILTER_MATCH_IP_PROTO;
1157 spec.ip_proto = uip_entry->proto;
1158 }
1159 break;
1160
1161 case IPV6_USER_FLOW:
1162 if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
1163 return -EINVAL;
1164 spec.match_flags = EF4_FILTER_MATCH_ETHER_TYPE;
1165 spec.ether_type = htons(ETH_P_IPV6);
1166 if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
1167 if (!ip6_mask_is_full(uip6_mask->ip6dst))
1168 return -EINVAL;
1169 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1170 memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
1171 }
1172 if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
1173 if (!ip6_mask_is_full(uip6_mask->ip6src))
1174 return -EINVAL;
1175 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1176 memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
1177 }
1178 if (uip6_mask->l4_proto) {
1179 if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
1180 return -EINVAL;
1181 spec.match_flags |= EF4_FILTER_MATCH_IP_PROTO;
1182 spec.ip_proto = uip6_entry->l4_proto;
1183 }
1184 break;
1185
1186 case ETHER_FLOW:
1187 if (!is_zero_ether_addr(mac_mask->h_dest)) {
1188 if (ether_addr_equal(mac_mask->h_dest,
1189 mac_addr_ig_mask))
1190 spec.match_flags |= EF4_FILTER_MATCH_LOC_MAC_IG;
1191 else if (is_broadcast_ether_addr(mac_mask->h_dest))
1192 spec.match_flags |= EF4_FILTER_MATCH_LOC_MAC;
1193 else
1194 return -EINVAL;
1195 ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
1196 }
1197 if (!is_zero_ether_addr(mac_mask->h_source)) {
1198 if (!is_broadcast_ether_addr(mac_mask->h_source))
1199 return -EINVAL;
1200 spec.match_flags |= EF4_FILTER_MATCH_REM_MAC;
1201 ether_addr_copy(spec.rem_mac, mac_entry->h_source);
1202 }
1203 if (mac_mask->h_proto) {
1204 if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
1205 return -EINVAL;
1206 spec.match_flags |= EF4_FILTER_MATCH_ETHER_TYPE;
1207 spec.ether_type = mac_entry->h_proto;
1208 }
1209 break;
1210
1211 default:
1212 return -EINVAL;
1213 }
1214
1215 if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
1216 if (rule->m_ext.vlan_tci != htons(0xfff))
1217 return -EINVAL;
1218 spec.match_flags |= EF4_FILTER_MATCH_OUTER_VID;
1219 spec.outer_vid = rule->h_ext.vlan_tci;
1220 }
1221
1222 rc = ef4_filter_insert_filter(efx, &spec, true);
1223 if (rc < 0)
1224 return rc;
1225
1226 rule->location = rc;
1227 return 0;
1228 }
1229
1230 static int ef4_ethtool_set_rxnfc(struct net_device *net_dev,
1231 struct ethtool_rxnfc *info)
1232 {
1233 struct ef4_nic *efx = netdev_priv(net_dev);
1234
1235 if (ef4_filter_get_rx_id_limit(efx) == 0)
1236 return -EOPNOTSUPP;
1237
1238 switch (info->cmd) {
1239 case ETHTOOL_SRXCLSRLINS:
1240 return ef4_ethtool_set_class_rule(efx, &info->fs);
1241
1242 case ETHTOOL_SRXCLSRLDEL:
1243 return ef4_filter_remove_id_safe(efx, EF4_FILTER_PRI_MANUAL,
1244 info->fs.location);
1245
1246 default:
1247 return -EOPNOTSUPP;
1248 }
1249 }
1250
1251 static u32 ef4_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
1252 {
1253 struct ef4_nic *efx = netdev_priv(net_dev);
1254
1255 return ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0 ||
1256 efx->n_rx_channels == 1) ?
1257 0 : ARRAY_SIZE(efx->rx_indir_table));
1258 }
1259
1260 static int ef4_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
1261 u8 *hfunc)
1262 {
1263 struct ef4_nic *efx = netdev_priv(net_dev);
1264
1265 if (hfunc)
1266 *hfunc = ETH_RSS_HASH_TOP;
1267 if (indir)
1268 memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
1269 return 0;
1270 }
1271
1272 static int ef4_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
1273 const u8 *key, const u8 hfunc)
1274 {
1275 struct ef4_nic *efx = netdev_priv(net_dev);
1276
1277
1278 if (key ||
1279 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
1280 return -EOPNOTSUPP;
1281 if (!indir)
1282 return 0;
1283
1284 return efx->type->rx_push_rss_config(efx, true, indir);
1285 }
1286
1287 static int ef4_ethtool_get_module_eeprom(struct net_device *net_dev,
1288 struct ethtool_eeprom *ee,
1289 u8 *data)
1290 {
1291 struct ef4_nic *efx = netdev_priv(net_dev);
1292 int ret;
1293
1294 if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
1295 return -EOPNOTSUPP;
1296
1297 mutex_lock(&efx->mac_lock);
1298 ret = efx->phy_op->get_module_eeprom(efx, ee, data);
1299 mutex_unlock(&efx->mac_lock);
1300
1301 return ret;
1302 }
1303
1304 static int ef4_ethtool_get_module_info(struct net_device *net_dev,
1305 struct ethtool_modinfo *modinfo)
1306 {
1307 struct ef4_nic *efx = netdev_priv(net_dev);
1308 int ret;
1309
1310 if (!efx->phy_op || !efx->phy_op->get_module_info)
1311 return -EOPNOTSUPP;
1312
1313 mutex_lock(&efx->mac_lock);
1314 ret = efx->phy_op->get_module_info(efx, modinfo);
1315 mutex_unlock(&efx->mac_lock);
1316
1317 return ret;
1318 }
1319
1320 const struct ethtool_ops ef4_ethtool_ops = {
1321 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1322 ETHTOOL_COALESCE_USECS_IRQ |
1323 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1324 .get_drvinfo = ef4_ethtool_get_drvinfo,
1325 .get_regs_len = ef4_ethtool_get_regs_len,
1326 .get_regs = ef4_ethtool_get_regs,
1327 .get_msglevel = ef4_ethtool_get_msglevel,
1328 .set_msglevel = ef4_ethtool_set_msglevel,
1329 .nway_reset = ef4_ethtool_nway_reset,
1330 .get_link = ethtool_op_get_link,
1331 .get_coalesce = ef4_ethtool_get_coalesce,
1332 .set_coalesce = ef4_ethtool_set_coalesce,
1333 .get_ringparam = ef4_ethtool_get_ringparam,
1334 .set_ringparam = ef4_ethtool_set_ringparam,
1335 .get_pauseparam = ef4_ethtool_get_pauseparam,
1336 .set_pauseparam = ef4_ethtool_set_pauseparam,
1337 .get_sset_count = ef4_ethtool_get_sset_count,
1338 .self_test = ef4_ethtool_self_test,
1339 .get_strings = ef4_ethtool_get_strings,
1340 .set_phys_id = ef4_ethtool_phys_id,
1341 .get_ethtool_stats = ef4_ethtool_get_stats,
1342 .get_wol = ef4_ethtool_get_wol,
1343 .set_wol = ef4_ethtool_set_wol,
1344 .reset = ef4_ethtool_reset,
1345 .get_rxnfc = ef4_ethtool_get_rxnfc,
1346 .set_rxnfc = ef4_ethtool_set_rxnfc,
1347 .get_rxfh_indir_size = ef4_ethtool_get_rxfh_indir_size,
1348 .get_rxfh = ef4_ethtool_get_rxfh,
1349 .set_rxfh = ef4_ethtool_set_rxfh,
1350 .get_module_info = ef4_ethtool_get_module_info,
1351 .get_module_eeprom = ef4_ethtool_get_module_eeprom,
1352 .get_link_ksettings = ef4_ethtool_get_link_ksettings,
1353 .set_link_ksettings = ef4_ethtool_set_link_ksettings,
1354 };