0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include "cna.h"
0013
0014 #include <linux/netdevice.h>
0015 #include <linux/skbuff.h>
0016 #include <linux/ethtool.h>
0017 #include <linux/rtnetlink.h>
0018
0019 #include "bna.h"
0020
0021 #include "bnad.h"
0022
0023 #define BNAD_NUM_TXF_COUNTERS 12
0024 #define BNAD_NUM_RXF_COUNTERS 10
0025 #define BNAD_NUM_CQ_COUNTERS (3 + 5)
0026 #define BNAD_NUM_RXQ_COUNTERS 7
0027 #define BNAD_NUM_TXQ_COUNTERS 5
0028
0029 static const char *bnad_net_stats_strings[] = {
0030 "rx_packets",
0031 "tx_packets",
0032 "rx_bytes",
0033 "tx_bytes",
0034 "rx_errors",
0035 "tx_errors",
0036 "rx_dropped",
0037 "tx_dropped",
0038 "multicast",
0039 "collisions",
0040 "rx_length_errors",
0041 "rx_crc_errors",
0042 "rx_frame_errors",
0043 "tx_fifo_errors",
0044
0045 "netif_queue_stop",
0046 "netif_queue_wakeup",
0047 "netif_queue_stopped",
0048 "tso4",
0049 "tso6",
0050 "tso_err",
0051 "tcpcsum_offload",
0052 "udpcsum_offload",
0053 "csum_help",
0054 "tx_skb_too_short",
0055 "tx_skb_stopping",
0056 "tx_skb_max_vectors",
0057 "tx_skb_mss_too_long",
0058 "tx_skb_tso_too_short",
0059 "tx_skb_tso_prepare",
0060 "tx_skb_non_tso_too_long",
0061 "tx_skb_tcp_hdr",
0062 "tx_skb_udp_hdr",
0063 "tx_skb_csum_err",
0064 "tx_skb_headlen_too_long",
0065 "tx_skb_headlen_zero",
0066 "tx_skb_frag_zero",
0067 "tx_skb_len_mismatch",
0068 "tx_skb_map_failed",
0069 "hw_stats_updates",
0070 "netif_rx_dropped",
0071
0072 "link_toggle",
0073 "cee_toggle",
0074
0075 "rxp_info_alloc_failed",
0076 "mbox_intr_disabled",
0077 "mbox_intr_enabled",
0078 "tx_unmap_q_alloc_failed",
0079 "rx_unmap_q_alloc_failed",
0080 "rxbuf_alloc_failed",
0081 "rxbuf_map_failed",
0082
0083 "mac_stats_clr_cnt",
0084 "mac_frame_64",
0085 "mac_frame_65_127",
0086 "mac_frame_128_255",
0087 "mac_frame_256_511",
0088 "mac_frame_512_1023",
0089 "mac_frame_1024_1518",
0090 "mac_frame_1518_1522",
0091 "mac_rx_bytes",
0092 "mac_rx_packets",
0093 "mac_rx_fcs_error",
0094 "mac_rx_multicast",
0095 "mac_rx_broadcast",
0096 "mac_rx_control_frames",
0097 "mac_rx_pause",
0098 "mac_rx_unknown_opcode",
0099 "mac_rx_alignment_error",
0100 "mac_rx_frame_length_error",
0101 "mac_rx_code_error",
0102 "mac_rx_carrier_sense_error",
0103 "mac_rx_undersize",
0104 "mac_rx_oversize",
0105 "mac_rx_fragments",
0106 "mac_rx_jabber",
0107 "mac_rx_drop",
0108
0109 "mac_tx_bytes",
0110 "mac_tx_packets",
0111 "mac_tx_multicast",
0112 "mac_tx_broadcast",
0113 "mac_tx_pause",
0114 "mac_tx_deferral",
0115 "mac_tx_excessive_deferral",
0116 "mac_tx_single_collision",
0117 "mac_tx_muliple_collision",
0118 "mac_tx_late_collision",
0119 "mac_tx_excessive_collision",
0120 "mac_tx_total_collision",
0121 "mac_tx_pause_honored",
0122 "mac_tx_drop",
0123 "mac_tx_jabber",
0124 "mac_tx_fcs_error",
0125 "mac_tx_control_frame",
0126 "mac_tx_oversize",
0127 "mac_tx_undersize",
0128 "mac_tx_fragments",
0129
0130 "bpc_tx_pause_0",
0131 "bpc_tx_pause_1",
0132 "bpc_tx_pause_2",
0133 "bpc_tx_pause_3",
0134 "bpc_tx_pause_4",
0135 "bpc_tx_pause_5",
0136 "bpc_tx_pause_6",
0137 "bpc_tx_pause_7",
0138 "bpc_tx_zero_pause_0",
0139 "bpc_tx_zero_pause_1",
0140 "bpc_tx_zero_pause_2",
0141 "bpc_tx_zero_pause_3",
0142 "bpc_tx_zero_pause_4",
0143 "bpc_tx_zero_pause_5",
0144 "bpc_tx_zero_pause_6",
0145 "bpc_tx_zero_pause_7",
0146 "bpc_tx_first_pause_0",
0147 "bpc_tx_first_pause_1",
0148 "bpc_tx_first_pause_2",
0149 "bpc_tx_first_pause_3",
0150 "bpc_tx_first_pause_4",
0151 "bpc_tx_first_pause_5",
0152 "bpc_tx_first_pause_6",
0153 "bpc_tx_first_pause_7",
0154
0155 "bpc_rx_pause_0",
0156 "bpc_rx_pause_1",
0157 "bpc_rx_pause_2",
0158 "bpc_rx_pause_3",
0159 "bpc_rx_pause_4",
0160 "bpc_rx_pause_5",
0161 "bpc_rx_pause_6",
0162 "bpc_rx_pause_7",
0163 "bpc_rx_zero_pause_0",
0164 "bpc_rx_zero_pause_1",
0165 "bpc_rx_zero_pause_2",
0166 "bpc_rx_zero_pause_3",
0167 "bpc_rx_zero_pause_4",
0168 "bpc_rx_zero_pause_5",
0169 "bpc_rx_zero_pause_6",
0170 "bpc_rx_zero_pause_7",
0171 "bpc_rx_first_pause_0",
0172 "bpc_rx_first_pause_1",
0173 "bpc_rx_first_pause_2",
0174 "bpc_rx_first_pause_3",
0175 "bpc_rx_first_pause_4",
0176 "bpc_rx_first_pause_5",
0177 "bpc_rx_first_pause_6",
0178 "bpc_rx_first_pause_7",
0179
0180 "rad_rx_frames",
0181 "rad_rx_octets",
0182 "rad_rx_vlan_frames",
0183 "rad_rx_ucast",
0184 "rad_rx_ucast_octets",
0185 "rad_rx_ucast_vlan",
0186 "rad_rx_mcast",
0187 "rad_rx_mcast_octets",
0188 "rad_rx_mcast_vlan",
0189 "rad_rx_bcast",
0190 "rad_rx_bcast_octets",
0191 "rad_rx_bcast_vlan",
0192 "rad_rx_drops",
0193
0194 "rlb_rad_rx_frames",
0195 "rlb_rad_rx_octets",
0196 "rlb_rad_rx_vlan_frames",
0197 "rlb_rad_rx_ucast",
0198 "rlb_rad_rx_ucast_octets",
0199 "rlb_rad_rx_ucast_vlan",
0200 "rlb_rad_rx_mcast",
0201 "rlb_rad_rx_mcast_octets",
0202 "rlb_rad_rx_mcast_vlan",
0203 "rlb_rad_rx_bcast",
0204 "rlb_rad_rx_bcast_octets",
0205 "rlb_rad_rx_bcast_vlan",
0206 "rlb_rad_rx_drops",
0207
0208 "fc_rx_ucast_octets",
0209 "fc_rx_ucast",
0210 "fc_rx_ucast_vlan",
0211 "fc_rx_mcast_octets",
0212 "fc_rx_mcast",
0213 "fc_rx_mcast_vlan",
0214 "fc_rx_bcast_octets",
0215 "fc_rx_bcast",
0216 "fc_rx_bcast_vlan",
0217
0218 "fc_tx_ucast_octets",
0219 "fc_tx_ucast",
0220 "fc_tx_ucast_vlan",
0221 "fc_tx_mcast_octets",
0222 "fc_tx_mcast",
0223 "fc_tx_mcast_vlan",
0224 "fc_tx_bcast_octets",
0225 "fc_tx_bcast",
0226 "fc_tx_bcast_vlan",
0227 "fc_tx_parity_errors",
0228 "fc_tx_timeout",
0229 "fc_tx_fid_parity_errors",
0230 };
0231
0232 #define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
0233
0234 static int
0235 bnad_get_link_ksettings(struct net_device *netdev,
0236 struct ethtool_link_ksettings *cmd)
0237 {
0238 ethtool_link_ksettings_zero_link_mode(cmd, supported);
0239 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
0240
0241 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseCR_Full);
0242 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full);
0243 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full);
0244 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseCR_Full);
0245 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full);
0246 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full);
0247 cmd->base.autoneg = AUTONEG_DISABLE;
0248 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
0249 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
0250 cmd->base.port = PORT_FIBRE;
0251 cmd->base.phy_address = 0;
0252
0253 if (netif_carrier_ok(netdev)) {
0254 cmd->base.speed = SPEED_10000;
0255 cmd->base.duplex = DUPLEX_FULL;
0256 } else {
0257 cmd->base.speed = SPEED_UNKNOWN;
0258 cmd->base.duplex = DUPLEX_UNKNOWN;
0259 }
0260
0261 return 0;
0262 }
0263
0264 static int
0265 bnad_set_link_ksettings(struct net_device *netdev,
0266 const struct ethtool_link_ksettings *cmd)
0267 {
0268
0269 if (cmd->base.autoneg == AUTONEG_ENABLE)
0270 return -EOPNOTSUPP;
0271
0272 if ((cmd->base.speed == SPEED_10000) &&
0273 (cmd->base.duplex == DUPLEX_FULL))
0274 return 0;
0275
0276 return -EOPNOTSUPP;
0277 }
0278
0279 static void
0280 bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
0281 {
0282 struct bnad *bnad = netdev_priv(netdev);
0283 struct bfa_ioc_attr *ioc_attr;
0284 unsigned long flags;
0285
0286 strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
0287
0288 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
0289 if (ioc_attr) {
0290 spin_lock_irqsave(&bnad->bna_lock, flags);
0291 bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
0292 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0293
0294 strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
0295 sizeof(drvinfo->fw_version));
0296 kfree(ioc_attr);
0297 }
0298
0299 strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
0300 sizeof(drvinfo->bus_info));
0301 }
0302
0303 static void
0304 bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
0305 {
0306 wolinfo->supported = 0;
0307 wolinfo->wolopts = 0;
0308 }
0309
0310 static int bnad_get_coalesce(struct net_device *netdev,
0311 struct ethtool_coalesce *coalesce,
0312 struct kernel_ethtool_coalesce *kernel_coal,
0313 struct netlink_ext_ack *extack)
0314 {
0315 struct bnad *bnad = netdev_priv(netdev);
0316 unsigned long flags;
0317
0318
0319 spin_lock_irqsave(&bnad->bna_lock, flags);
0320 coalesce->use_adaptive_rx_coalesce =
0321 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
0322 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0323
0324 coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
0325 BFI_COALESCING_TIMER_UNIT;
0326 coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
0327 BFI_COALESCING_TIMER_UNIT;
0328 coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
0329
0330 return 0;
0331 }
0332
0333 static int bnad_set_coalesce(struct net_device *netdev,
0334 struct ethtool_coalesce *coalesce,
0335 struct kernel_ethtool_coalesce *kernel_coal,
0336 struct netlink_ext_ack *extack)
0337 {
0338 struct bnad *bnad = netdev_priv(netdev);
0339 unsigned long flags;
0340 int to_del = 0;
0341
0342 if (coalesce->rx_coalesce_usecs == 0 ||
0343 coalesce->rx_coalesce_usecs >
0344 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
0345 return -EINVAL;
0346
0347 if (coalesce->tx_coalesce_usecs == 0 ||
0348 coalesce->tx_coalesce_usecs >
0349 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
0350 return -EINVAL;
0351
0352 mutex_lock(&bnad->conf_mutex);
0353
0354
0355
0356
0357
0358 spin_lock_irqsave(&bnad->bna_lock, flags);
0359 if (coalesce->use_adaptive_rx_coalesce) {
0360 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
0361 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
0362 bnad_dim_timer_start(bnad);
0363 }
0364 } else {
0365 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
0366 bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
0367 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
0368 test_bit(BNAD_RF_DIM_TIMER_RUNNING,
0369 &bnad->run_flags)) {
0370 clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
0371 &bnad->run_flags);
0372 to_del = 1;
0373 }
0374 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0375 if (to_del)
0376 del_timer_sync(&bnad->dim_timer);
0377 spin_lock_irqsave(&bnad->bna_lock, flags);
0378 bnad_rx_coalescing_timeo_set(bnad);
0379 }
0380 }
0381 if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
0382 BFI_COALESCING_TIMER_UNIT) {
0383 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
0384 BFI_COALESCING_TIMER_UNIT;
0385 bnad_tx_coalescing_timeo_set(bnad);
0386 }
0387
0388 if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
0389 BFI_COALESCING_TIMER_UNIT) {
0390 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
0391 BFI_COALESCING_TIMER_UNIT;
0392
0393 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
0394 bnad_rx_coalescing_timeo_set(bnad);
0395
0396 }
0397
0398
0399
0400 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0401
0402 mutex_unlock(&bnad->conf_mutex);
0403 return 0;
0404 }
0405
0406 static void
0407 bnad_get_ringparam(struct net_device *netdev,
0408 struct ethtool_ringparam *ringparam,
0409 struct kernel_ethtool_ringparam *kernel_ringparam,
0410 struct netlink_ext_ack *extack)
0411 {
0412 struct bnad *bnad = netdev_priv(netdev);
0413
0414 ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
0415 ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
0416
0417 ringparam->rx_pending = bnad->rxq_depth;
0418 ringparam->tx_pending = bnad->txq_depth;
0419 }
0420
0421 static int
0422 bnad_set_ringparam(struct net_device *netdev,
0423 struct ethtool_ringparam *ringparam,
0424 struct kernel_ethtool_ringparam *kernel_ringparam,
0425 struct netlink_ext_ack *extack)
0426 {
0427 int i, current_err, err = 0;
0428 struct bnad *bnad = netdev_priv(netdev);
0429 unsigned long flags;
0430
0431 mutex_lock(&bnad->conf_mutex);
0432 if (ringparam->rx_pending == bnad->rxq_depth &&
0433 ringparam->tx_pending == bnad->txq_depth) {
0434 mutex_unlock(&bnad->conf_mutex);
0435 return 0;
0436 }
0437
0438 if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
0439 ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
0440 !is_power_of_2(ringparam->rx_pending)) {
0441 mutex_unlock(&bnad->conf_mutex);
0442 return -EINVAL;
0443 }
0444 if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
0445 ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
0446 !is_power_of_2(ringparam->tx_pending)) {
0447 mutex_unlock(&bnad->conf_mutex);
0448 return -EINVAL;
0449 }
0450
0451 if (ringparam->rx_pending != bnad->rxq_depth) {
0452 bnad->rxq_depth = ringparam->rx_pending;
0453 if (!netif_running(netdev)) {
0454 mutex_unlock(&bnad->conf_mutex);
0455 return 0;
0456 }
0457
0458 for (i = 0; i < bnad->num_rx; i++) {
0459 if (!bnad->rx_info[i].rx)
0460 continue;
0461 bnad_destroy_rx(bnad, i);
0462 current_err = bnad_setup_rx(bnad, i);
0463 if (current_err && !err)
0464 err = current_err;
0465 }
0466
0467 if (!err && bnad->rx_info[0].rx) {
0468
0469 bnad_restore_vlans(bnad, 0);
0470 bnad_enable_default_bcast(bnad);
0471 spin_lock_irqsave(&bnad->bna_lock, flags);
0472 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
0473 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0474 bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
0475 BNAD_CF_PROMISC);
0476 bnad_set_rx_mode(netdev);
0477 }
0478 }
0479 if (ringparam->tx_pending != bnad->txq_depth) {
0480 bnad->txq_depth = ringparam->tx_pending;
0481 if (!netif_running(netdev)) {
0482 mutex_unlock(&bnad->conf_mutex);
0483 return 0;
0484 }
0485
0486 for (i = 0; i < bnad->num_tx; i++) {
0487 if (!bnad->tx_info[i].tx)
0488 continue;
0489 bnad_destroy_tx(bnad, i);
0490 current_err = bnad_setup_tx(bnad, i);
0491 if (current_err && !err)
0492 err = current_err;
0493 }
0494 }
0495
0496 mutex_unlock(&bnad->conf_mutex);
0497 return err;
0498 }
0499
0500 static void
0501 bnad_get_pauseparam(struct net_device *netdev,
0502 struct ethtool_pauseparam *pauseparam)
0503 {
0504 struct bnad *bnad = netdev_priv(netdev);
0505
0506 pauseparam->autoneg = 0;
0507 pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
0508 pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
0509 }
0510
0511 static int
0512 bnad_set_pauseparam(struct net_device *netdev,
0513 struct ethtool_pauseparam *pauseparam)
0514 {
0515 struct bnad *bnad = netdev_priv(netdev);
0516 struct bna_pause_config pause_config;
0517 unsigned long flags;
0518
0519 if (pauseparam->autoneg == AUTONEG_ENABLE)
0520 return -EINVAL;
0521
0522 mutex_lock(&bnad->conf_mutex);
0523 if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
0524 pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
0525 pause_config.rx_pause = pauseparam->rx_pause;
0526 pause_config.tx_pause = pauseparam->tx_pause;
0527 spin_lock_irqsave(&bnad->bna_lock, flags);
0528 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
0529 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0530 }
0531 mutex_unlock(&bnad->conf_mutex);
0532 return 0;
0533 }
0534
0535 static void bnad_get_txf_strings(u8 **string, int f_num)
0536 {
0537 ethtool_sprintf(string, "txf%d_ucast_octets", f_num);
0538 ethtool_sprintf(string, "txf%d_ucast", f_num);
0539 ethtool_sprintf(string, "txf%d_ucast_vlan", f_num);
0540 ethtool_sprintf(string, "txf%d_mcast_octets", f_num);
0541 ethtool_sprintf(string, "txf%d_mcast", f_num);
0542 ethtool_sprintf(string, "txf%d_mcast_vlan", f_num);
0543 ethtool_sprintf(string, "txf%d_bcast_octets", f_num);
0544 ethtool_sprintf(string, "txf%d_bcast", f_num);
0545 ethtool_sprintf(string, "txf%d_bcast_vlan", f_num);
0546 ethtool_sprintf(string, "txf%d_errors", f_num);
0547 ethtool_sprintf(string, "txf%d_filter_vlan", f_num);
0548 ethtool_sprintf(string, "txf%d_filter_mac_sa", f_num);
0549 }
0550
0551 static void bnad_get_rxf_strings(u8 **string, int f_num)
0552 {
0553 ethtool_sprintf(string, "rxf%d_ucast_octets", f_num);
0554 ethtool_sprintf(string, "rxf%d_ucast", f_num);
0555 ethtool_sprintf(string, "rxf%d_ucast_vlan", f_num);
0556 ethtool_sprintf(string, "rxf%d_mcast_octets", f_num);
0557 ethtool_sprintf(string, "rxf%d_mcast", f_num);
0558 ethtool_sprintf(string, "rxf%d_mcast_vlan", f_num);
0559 ethtool_sprintf(string, "rxf%d_bcast_octets", f_num);
0560 ethtool_sprintf(string, "rxf%d_bcast", f_num);
0561 ethtool_sprintf(string, "rxf%d_bcast_vlan", f_num);
0562 ethtool_sprintf(string, "rxf%d_frame_drops", f_num);
0563 }
0564
0565 static void bnad_get_cq_strings(u8 **string, int q_num)
0566 {
0567 ethtool_sprintf(string, "cq%d_producer_index", q_num);
0568 ethtool_sprintf(string, "cq%d_consumer_index", q_num);
0569 ethtool_sprintf(string, "cq%d_hw_producer_index", q_num);
0570 ethtool_sprintf(string, "cq%d_intr", q_num);
0571 ethtool_sprintf(string, "cq%d_poll", q_num);
0572 ethtool_sprintf(string, "cq%d_schedule", q_num);
0573 ethtool_sprintf(string, "cq%d_keep_poll", q_num);
0574 ethtool_sprintf(string, "cq%d_complete", q_num);
0575 }
0576
0577 static void bnad_get_rxq_strings(u8 **string, int q_num)
0578 {
0579 ethtool_sprintf(string, "rxq%d_packets", q_num);
0580 ethtool_sprintf(string, "rxq%d_bytes", q_num);
0581 ethtool_sprintf(string, "rxq%d_packets_with_error", q_num);
0582 ethtool_sprintf(string, "rxq%d_allocbuf_failed", q_num);
0583 ethtool_sprintf(string, "rxq%d_mapbuf_failed", q_num);
0584 ethtool_sprintf(string, "rxq%d_producer_index", q_num);
0585 ethtool_sprintf(string, "rxq%d_consumer_index", q_num);
0586 }
0587
0588 static void bnad_get_txq_strings(u8 **string, int q_num)
0589 {
0590 ethtool_sprintf(string, "txq%d_packets", q_num);
0591 ethtool_sprintf(string, "txq%d_bytes", q_num);
0592 ethtool_sprintf(string, "txq%d_producer_index", q_num);
0593 ethtool_sprintf(string, "txq%d_consumer_index", q_num);
0594 ethtool_sprintf(string, "txq%d_hw_consumer_index", q_num);
0595 }
0596
0597 static void
0598 bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
0599 {
0600 struct bnad *bnad = netdev_priv(netdev);
0601 int i, j, q_num;
0602 u32 bmap;
0603
0604 if (stringset != ETH_SS_STATS)
0605 return;
0606
0607 mutex_lock(&bnad->conf_mutex);
0608
0609 for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
0610 BUG_ON(!(strlen(bnad_net_stats_strings[i]) < ETH_GSTRING_LEN));
0611 ethtool_sprintf(&string, bnad_net_stats_strings[i]);
0612 }
0613
0614 bmap = bna_tx_rid_mask(&bnad->bna);
0615 for (i = 0; bmap; i++) {
0616 if (bmap & 1)
0617 bnad_get_txf_strings(&string, i);
0618 bmap >>= 1;
0619 }
0620
0621 bmap = bna_rx_rid_mask(&bnad->bna);
0622 for (i = 0; bmap; i++, bmap >>= 1) {
0623 if (bmap & 1)
0624 bnad_get_rxf_strings(&string, i);
0625 bmap >>= 1;
0626 }
0627
0628 q_num = 0;
0629 for (i = 0; i < bnad->num_rx; i++) {
0630 if (!bnad->rx_info[i].rx)
0631 continue;
0632 for (j = 0; j < bnad->num_rxp_per_rx; j++)
0633 bnad_get_cq_strings(&string, q_num++);
0634 }
0635
0636 q_num = 0;
0637 for (i = 0; i < bnad->num_rx; i++) {
0638 if (!bnad->rx_info[i].rx)
0639 continue;
0640 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
0641 bnad_get_rxq_strings(&string, q_num++);
0642 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
0643 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
0644 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
0645 bnad_get_rxq_strings(&string, q_num++);
0646 }
0647 }
0648
0649 q_num = 0;
0650 for (i = 0; i < bnad->num_tx; i++) {
0651 if (!bnad->tx_info[i].tx)
0652 continue;
0653 for (j = 0; j < bnad->num_txq_per_tx; j++)
0654 bnad_get_txq_strings(&string, q_num++);
0655 }
0656
0657 mutex_unlock(&bnad->conf_mutex);
0658 }
0659
0660 static int
0661 bnad_get_stats_count_locked(struct net_device *netdev)
0662 {
0663 struct bnad *bnad = netdev_priv(netdev);
0664 int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
0665 u32 bmap;
0666
0667 bmap = bna_tx_rid_mask(&bnad->bna);
0668 for (i = 0; bmap; i++) {
0669 if (bmap & 1)
0670 txf_active_num++;
0671 bmap >>= 1;
0672 }
0673 bmap = bna_rx_rid_mask(&bnad->bna);
0674 for (i = 0; bmap; i++) {
0675 if (bmap & 1)
0676 rxf_active_num++;
0677 bmap >>= 1;
0678 }
0679 count = BNAD_ETHTOOL_STATS_NUM +
0680 txf_active_num * BNAD_NUM_TXF_COUNTERS +
0681 rxf_active_num * BNAD_NUM_RXF_COUNTERS;
0682
0683 for (i = 0; i < bnad->num_rx; i++) {
0684 if (!bnad->rx_info[i].rx)
0685 continue;
0686 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
0687 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
0688 for (j = 0; j < bnad->num_rxp_per_rx; j++)
0689 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
0690 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
0691 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
0692 count += BNAD_NUM_RXQ_COUNTERS;
0693 }
0694
0695 for (i = 0; i < bnad->num_tx; i++) {
0696 if (!bnad->tx_info[i].tx)
0697 continue;
0698 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
0699 }
0700 return count;
0701 }
0702
0703 static int
0704 bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
0705 {
0706 int i, j;
0707 struct bna_rcb *rcb = NULL;
0708 struct bna_tcb *tcb = NULL;
0709
0710 for (i = 0; i < bnad->num_rx; i++) {
0711 if (!bnad->rx_info[i].rx)
0712 continue;
0713 for (j = 0; j < bnad->num_rxp_per_rx; j++)
0714 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
0715 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
0716 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
0717 buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
0718 ccb->producer_index;
0719 buf[bi++] = 0;
0720 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
0721 ccb->hw_producer_index);
0722
0723 buf[bi++] = bnad->rx_info[i].
0724 rx_ctrl[j].rx_intr_ctr;
0725 buf[bi++] = bnad->rx_info[i].
0726 rx_ctrl[j].rx_poll_ctr;
0727 buf[bi++] = bnad->rx_info[i].
0728 rx_ctrl[j].rx_schedule;
0729 buf[bi++] = bnad->rx_info[i].
0730 rx_ctrl[j].rx_keep_poll;
0731 buf[bi++] = bnad->rx_info[i].
0732 rx_ctrl[j].rx_complete;
0733 }
0734 }
0735 for (i = 0; i < bnad->num_rx; i++) {
0736 if (!bnad->rx_info[i].rx)
0737 continue;
0738 for (j = 0; j < bnad->num_rxp_per_rx; j++)
0739 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
0740 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
0741 bnad->rx_info[i].rx_ctrl[j].ccb->
0742 rcb[0]->rxq) {
0743 rcb = bnad->rx_info[i].rx_ctrl[j].
0744 ccb->rcb[0];
0745 buf[bi++] = rcb->rxq->rx_packets;
0746 buf[bi++] = rcb->rxq->rx_bytes;
0747 buf[bi++] = rcb->rxq->
0748 rx_packets_with_error;
0749 buf[bi++] = rcb->rxq->
0750 rxbuf_alloc_failed;
0751 buf[bi++] = rcb->rxq->rxbuf_map_failed;
0752 buf[bi++] = rcb->producer_index;
0753 buf[bi++] = rcb->consumer_index;
0754 }
0755 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
0756 bnad->rx_info[i].rx_ctrl[j].ccb->
0757 rcb[1]->rxq) {
0758 rcb = bnad->rx_info[i].rx_ctrl[j].
0759 ccb->rcb[1];
0760 buf[bi++] = rcb->rxq->rx_packets;
0761 buf[bi++] = rcb->rxq->rx_bytes;
0762 buf[bi++] = rcb->rxq->
0763 rx_packets_with_error;
0764 buf[bi++] = rcb->rxq->
0765 rxbuf_alloc_failed;
0766 buf[bi++] = rcb->rxq->rxbuf_map_failed;
0767 buf[bi++] = rcb->producer_index;
0768 buf[bi++] = rcb->consumer_index;
0769 }
0770 }
0771 }
0772
0773 for (i = 0; i < bnad->num_tx; i++) {
0774 if (!bnad->tx_info[i].tx)
0775 continue;
0776 for (j = 0; j < bnad->num_txq_per_tx; j++)
0777 if (bnad->tx_info[i].tcb[j] &&
0778 bnad->tx_info[i].tcb[j]->txq) {
0779 tcb = bnad->tx_info[i].tcb[j];
0780 buf[bi++] = tcb->txq->tx_packets;
0781 buf[bi++] = tcb->txq->tx_bytes;
0782 buf[bi++] = tcb->producer_index;
0783 buf[bi++] = tcb->consumer_index;
0784 buf[bi++] = *(tcb->hw_consumer_index);
0785 }
0786 }
0787
0788 return bi;
0789 }
0790
0791 static void
0792 bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
0793 u64 *buf)
0794 {
0795 struct bnad *bnad = netdev_priv(netdev);
0796 int i, j, bi = 0;
0797 unsigned long flags;
0798 struct rtnl_link_stats64 net_stats64;
0799 u64 *stats64;
0800 u32 bmap;
0801
0802 mutex_lock(&bnad->conf_mutex);
0803 if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
0804 mutex_unlock(&bnad->conf_mutex);
0805 return;
0806 }
0807
0808
0809
0810
0811
0812 spin_lock_irqsave(&bnad->bna_lock, flags);
0813
0814 memset(&net_stats64, 0, sizeof(net_stats64));
0815 bnad_netdev_qstats_fill(bnad, &net_stats64);
0816 bnad_netdev_hwstats_fill(bnad, &net_stats64);
0817
0818 buf[bi++] = net_stats64.rx_packets;
0819 buf[bi++] = net_stats64.tx_packets;
0820 buf[bi++] = net_stats64.rx_bytes;
0821 buf[bi++] = net_stats64.tx_bytes;
0822 buf[bi++] = net_stats64.rx_errors;
0823 buf[bi++] = net_stats64.tx_errors;
0824 buf[bi++] = net_stats64.rx_dropped;
0825 buf[bi++] = net_stats64.tx_dropped;
0826 buf[bi++] = net_stats64.multicast;
0827 buf[bi++] = net_stats64.collisions;
0828 buf[bi++] = net_stats64.rx_length_errors;
0829 buf[bi++] = net_stats64.rx_crc_errors;
0830 buf[bi++] = net_stats64.rx_frame_errors;
0831 buf[bi++] = net_stats64.tx_fifo_errors;
0832
0833
0834 bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
0835
0836
0837 stats64 = (u64 *)&bnad->stats.drv_stats;
0838 for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
0839 buf[bi++] = stats64[i];
0840
0841
0842 stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
0843 for (i = 0;
0844 i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
0845 sizeof(u64);
0846 i++)
0847 buf[bi++] = stats64[i];
0848
0849
0850 bmap = bna_tx_rid_mask(&bnad->bna);
0851 for (i = 0; bmap; i++) {
0852 if (bmap & 1) {
0853 stats64 = (u64 *)&bnad->stats.bna_stats->
0854 hw_stats.txf_stats[i];
0855 for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
0856 sizeof(u64); j++)
0857 buf[bi++] = stats64[j];
0858 }
0859 bmap >>= 1;
0860 }
0861
0862
0863 bmap = bna_rx_rid_mask(&bnad->bna);
0864 for (i = 0; bmap; i++) {
0865 if (bmap & 1) {
0866 stats64 = (u64 *)&bnad->stats.bna_stats->
0867 hw_stats.rxf_stats[i];
0868 for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
0869 sizeof(u64); j++)
0870 buf[bi++] = stats64[j];
0871 }
0872 bmap >>= 1;
0873 }
0874
0875
0876 bi = bnad_per_q_stats_fill(bnad, buf, bi);
0877
0878 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0879
0880 mutex_unlock(&bnad->conf_mutex);
0881 }
0882
0883 static int
0884 bnad_get_sset_count(struct net_device *netdev, int sset)
0885 {
0886 switch (sset) {
0887 case ETH_SS_STATS:
0888 return bnad_get_stats_count_locked(netdev);
0889 default:
0890 return -EOPNOTSUPP;
0891 }
0892 }
0893
0894 static u32
0895 bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
0896 u32 *base_offset)
0897 {
0898 struct bfa_flash_attr *flash_attr;
0899 struct bnad_iocmd_comp fcomp;
0900 u32 i, flash_part = 0, ret;
0901 unsigned long flags = 0;
0902
0903 flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
0904 if (!flash_attr)
0905 return 0;
0906
0907 fcomp.bnad = bnad;
0908 fcomp.comp_status = 0;
0909
0910 init_completion(&fcomp.comp);
0911 spin_lock_irqsave(&bnad->bna_lock, flags);
0912 ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
0913 bnad_cb_completion, &fcomp);
0914 if (ret != BFA_STATUS_OK) {
0915 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0916 kfree(flash_attr);
0917 return 0;
0918 }
0919 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0920 wait_for_completion(&fcomp.comp);
0921 ret = fcomp.comp_status;
0922
0923
0924 if (ret == BFA_STATUS_OK) {
0925 for (i = 0; i < flash_attr->npart; i++) {
0926 if (offset >= flash_attr->part[i].part_off &&
0927 offset < (flash_attr->part[i].part_off +
0928 flash_attr->part[i].part_size)) {
0929 flash_part = flash_attr->part[i].part_type;
0930 *base_offset = flash_attr->part[i].part_off;
0931 break;
0932 }
0933 }
0934 }
0935 kfree(flash_attr);
0936 return flash_part;
0937 }
0938
0939 static int
0940 bnad_get_eeprom_len(struct net_device *netdev)
0941 {
0942 return BFA_TOTAL_FLASH_SIZE;
0943 }
0944
0945 static int
0946 bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
0947 u8 *bytes)
0948 {
0949 struct bnad *bnad = netdev_priv(netdev);
0950 struct bnad_iocmd_comp fcomp;
0951 u32 flash_part = 0, base_offset = 0;
0952 unsigned long flags = 0;
0953 int ret = 0;
0954
0955
0956 eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16);
0957
0958
0959 flash_part = bnad_get_flash_partition_by_offset(bnad,
0960 eeprom->offset, &base_offset);
0961 if (flash_part == 0)
0962 return -EFAULT;
0963
0964 fcomp.bnad = bnad;
0965 fcomp.comp_status = 0;
0966
0967 init_completion(&fcomp.comp);
0968 spin_lock_irqsave(&bnad->bna_lock, flags);
0969 ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
0970 bnad->id, bytes, eeprom->len,
0971 eeprom->offset - base_offset,
0972 bnad_cb_completion, &fcomp);
0973 if (ret != BFA_STATUS_OK) {
0974 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0975 goto done;
0976 }
0977
0978 spin_unlock_irqrestore(&bnad->bna_lock, flags);
0979 wait_for_completion(&fcomp.comp);
0980 ret = fcomp.comp_status;
0981 done:
0982 return ret;
0983 }
0984
0985 static int
0986 bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
0987 u8 *bytes)
0988 {
0989 struct bnad *bnad = netdev_priv(netdev);
0990 struct bnad_iocmd_comp fcomp;
0991 u32 flash_part = 0, base_offset = 0;
0992 unsigned long flags = 0;
0993 int ret = 0;
0994
0995
0996 if (eeprom->magic != (bnad->pcidev->vendor |
0997 (bnad->pcidev->device << 16)))
0998 return -EINVAL;
0999
1000
1001 flash_part = bnad_get_flash_partition_by_offset(bnad,
1002 eeprom->offset, &base_offset);
1003 if (flash_part == 0)
1004 return -EFAULT;
1005
1006 fcomp.bnad = bnad;
1007 fcomp.comp_status = 0;
1008
1009 init_completion(&fcomp.comp);
1010 spin_lock_irqsave(&bnad->bna_lock, flags);
1011 ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
1012 bnad->id, bytes, eeprom->len,
1013 eeprom->offset - base_offset,
1014 bnad_cb_completion, &fcomp);
1015 if (ret != BFA_STATUS_OK) {
1016 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1017 goto done;
1018 }
1019
1020 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1021 wait_for_completion(&fcomp.comp);
1022 ret = fcomp.comp_status;
1023 done:
1024 return ret;
1025 }
1026
1027 static int
1028 bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
1029 {
1030 struct bnad *bnad = netdev_priv(netdev);
1031 struct bnad_iocmd_comp fcomp;
1032 const struct firmware *fw;
1033 int ret = 0;
1034
1035 ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
1036 if (ret) {
1037 netdev_err(netdev, "can't load firmware %s\n", eflash->data);
1038 goto out;
1039 }
1040
1041 fcomp.bnad = bnad;
1042 fcomp.comp_status = 0;
1043
1044 init_completion(&fcomp.comp);
1045 spin_lock_irq(&bnad->bna_lock);
1046 ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG,
1047 bnad->id, (u8 *)fw->data, fw->size, 0,
1048 bnad_cb_completion, &fcomp);
1049 if (ret != BFA_STATUS_OK) {
1050 netdev_warn(netdev, "flash update failed with err=%d\n", ret);
1051 ret = -EIO;
1052 spin_unlock_irq(&bnad->bna_lock);
1053 goto out;
1054 }
1055
1056 spin_unlock_irq(&bnad->bna_lock);
1057 wait_for_completion(&fcomp.comp);
1058 if (fcomp.comp_status != BFA_STATUS_OK) {
1059 ret = -EIO;
1060 netdev_warn(netdev,
1061 "firmware image update failed with err=%d\n",
1062 fcomp.comp_status);
1063 }
1064 out:
1065 release_firmware(fw);
1066 return ret;
1067 }
1068
1069 static const struct ethtool_ops bnad_ethtool_ops = {
1070 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1071 ETHTOOL_COALESCE_TX_MAX_FRAMES |
1072 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1073 .get_drvinfo = bnad_get_drvinfo,
1074 .get_wol = bnad_get_wol,
1075 .get_link = ethtool_op_get_link,
1076 .get_coalesce = bnad_get_coalesce,
1077 .set_coalesce = bnad_set_coalesce,
1078 .get_ringparam = bnad_get_ringparam,
1079 .set_ringparam = bnad_set_ringparam,
1080 .get_pauseparam = bnad_get_pauseparam,
1081 .set_pauseparam = bnad_set_pauseparam,
1082 .get_strings = bnad_get_strings,
1083 .get_ethtool_stats = bnad_get_ethtool_stats,
1084 .get_sset_count = bnad_get_sset_count,
1085 .get_eeprom_len = bnad_get_eeprom_len,
1086 .get_eeprom = bnad_get_eeprom,
1087 .set_eeprom = bnad_set_eeprom,
1088 .flash_device = bnad_flash_device,
1089 .get_ts_info = ethtool_op_get_ts_info,
1090 .get_link_ksettings = bnad_get_link_ksettings,
1091 .set_link_ksettings = bnad_set_link_ksettings,
1092 };
1093
1094 void
1095 bnad_set_ethtool_ops(struct net_device *netdev)
1096 {
1097 netdev->ethtool_ops = &bnad_ethtool_ops;
1098 }