0001
0002
0003
0004
0005
0006 #include <linux/firmware.h>
0007 #include <linux/mdio.h>
0008
0009 #include "cxgb4.h"
0010 #include "t4_regs.h"
0011 #include "t4fw_api.h"
0012 #include "cxgb4_cudbg.h"
0013 #include "cxgb4_filter.h"
0014 #include "cxgb4_tc_flower.h"
0015
0016 #define EEPROM_MAGIC 0x38E2F10C
0017
0018 static u32 get_msglevel(struct net_device *dev)
0019 {
0020 return netdev2adap(dev)->msg_enable;
0021 }
0022
0023 static void set_msglevel(struct net_device *dev, u32 val)
0024 {
0025 netdev2adap(dev)->msg_enable = val;
0026 }
0027
0028 enum cxgb4_ethtool_tests {
0029 CXGB4_ETHTOOL_LB_TEST,
0030 CXGB4_ETHTOOL_MAX_TEST,
0031 };
0032
0033 static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = {
0034 "Loop back test (offline)",
0035 };
0036
0037 static const char * const flash_region_strings[] = {
0038 "All",
0039 "Firmware",
0040 "PHY Firmware",
0041 "Boot",
0042 "Boot CFG",
0043 };
0044
0045 static const char stats_strings[][ETH_GSTRING_LEN] = {
0046 "tx_octets_ok ",
0047 "tx_frames_ok ",
0048 "tx_broadcast_frames ",
0049 "tx_multicast_frames ",
0050 "tx_unicast_frames ",
0051 "tx_error_frames ",
0052
0053 "tx_frames_64 ",
0054 "tx_frames_65_to_127 ",
0055 "tx_frames_128_to_255 ",
0056 "tx_frames_256_to_511 ",
0057 "tx_frames_512_to_1023 ",
0058 "tx_frames_1024_to_1518 ",
0059 "tx_frames_1519_to_max ",
0060
0061 "tx_frames_dropped ",
0062 "tx_pause_frames ",
0063 "tx_ppp0_frames ",
0064 "tx_ppp1_frames ",
0065 "tx_ppp2_frames ",
0066 "tx_ppp3_frames ",
0067 "tx_ppp4_frames ",
0068 "tx_ppp5_frames ",
0069 "tx_ppp6_frames ",
0070 "tx_ppp7_frames ",
0071
0072 "rx_octets_ok ",
0073 "rx_frames_ok ",
0074 "rx_broadcast_frames ",
0075 "rx_multicast_frames ",
0076 "rx_unicast_frames ",
0077
0078 "rx_frames_too_long ",
0079 "rx_jabber_errors ",
0080 "rx_fcs_errors ",
0081 "rx_length_errors ",
0082 "rx_symbol_errors ",
0083 "rx_runt_frames ",
0084
0085 "rx_frames_64 ",
0086 "rx_frames_65_to_127 ",
0087 "rx_frames_128_to_255 ",
0088 "rx_frames_256_to_511 ",
0089 "rx_frames_512_to_1023 ",
0090 "rx_frames_1024_to_1518 ",
0091 "rx_frames_1519_to_max ",
0092
0093 "rx_pause_frames ",
0094 "rx_ppp0_frames ",
0095 "rx_ppp1_frames ",
0096 "rx_ppp2_frames ",
0097 "rx_ppp3_frames ",
0098 "rx_ppp4_frames ",
0099 "rx_ppp5_frames ",
0100 "rx_ppp6_frames ",
0101 "rx_ppp7_frames ",
0102
0103 "rx_bg0_frames_dropped ",
0104 "rx_bg1_frames_dropped ",
0105 "rx_bg2_frames_dropped ",
0106 "rx_bg3_frames_dropped ",
0107 "rx_bg0_frames_trunc ",
0108 "rx_bg1_frames_trunc ",
0109 "rx_bg2_frames_trunc ",
0110 "rx_bg3_frames_trunc ",
0111
0112 "tso ",
0113 "uso ",
0114 "tx_csum_offload ",
0115 "rx_csum_good ",
0116 "vlan_extractions ",
0117 "vlan_insertions ",
0118 "gro_packets ",
0119 "gro_merged ",
0120 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
0121 "tx_tls_encrypted_packets",
0122 "tx_tls_encrypted_bytes ",
0123 "tx_tls_ctx ",
0124 "tx_tls_ooo ",
0125 "tx_tls_skip_no_sync_data",
0126 "tx_tls_drop_no_sync_data",
0127 "tx_tls_drop_bypass_req ",
0128 #endif
0129 };
0130
0131 static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
0132 "db_drop ",
0133 "db_full ",
0134 "db_empty ",
0135 "write_coal_success ",
0136 "write_coal_fail ",
0137 };
0138
0139 static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
0140 "-------Loopback----------- ",
0141 "octets_ok ",
0142 "frames_ok ",
0143 "bcast_frames ",
0144 "mcast_frames ",
0145 "ucast_frames ",
0146 "error_frames ",
0147 "frames_64 ",
0148 "frames_65_to_127 ",
0149 "frames_128_to_255 ",
0150 "frames_256_to_511 ",
0151 "frames_512_to_1023 ",
0152 "frames_1024_to_1518 ",
0153 "frames_1519_to_max ",
0154 "frames_dropped ",
0155 "bg0_frames_dropped ",
0156 "bg1_frames_dropped ",
0157 "bg2_frames_dropped ",
0158 "bg3_frames_dropped ",
0159 "bg0_frames_trunc ",
0160 "bg1_frames_trunc ",
0161 "bg2_frames_trunc ",
0162 "bg3_frames_trunc ",
0163 };
0164
0165 static const char cxgb4_priv_flags_strings[][ETH_GSTRING_LEN] = {
0166 [PRIV_FLAG_PORT_TX_VM_BIT] = "port_tx_vm_wr",
0167 };
0168
0169 static int get_sset_count(struct net_device *dev, int sset)
0170 {
0171 switch (sset) {
0172 case ETH_SS_STATS:
0173 return ARRAY_SIZE(stats_strings) +
0174 ARRAY_SIZE(adapter_stats_strings) +
0175 ARRAY_SIZE(loopback_stats_strings);
0176 case ETH_SS_PRIV_FLAGS:
0177 return ARRAY_SIZE(cxgb4_priv_flags_strings);
0178 case ETH_SS_TEST:
0179 return ARRAY_SIZE(cxgb4_selftest_strings);
0180 default:
0181 return -EOPNOTSUPP;
0182 }
0183 }
0184
0185 static int get_regs_len(struct net_device *dev)
0186 {
0187 struct adapter *adap = netdev2adap(dev);
0188
0189 return t4_get_regs_len(adap);
0190 }
0191
0192 static int get_eeprom_len(struct net_device *dev)
0193 {
0194 return EEPROMSIZE;
0195 }
0196
0197 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
0198 {
0199 struct adapter *adapter = netdev2adap(dev);
0200 u32 exprom_vers;
0201
0202 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
0203 strlcpy(info->bus_info, pci_name(adapter->pdev),
0204 sizeof(info->bus_info));
0205 info->regdump_len = get_regs_len(dev);
0206
0207 if (adapter->params.fw_vers)
0208 snprintf(info->fw_version, sizeof(info->fw_version),
0209 "%u.%u.%u.%u, TP %u.%u.%u.%u",
0210 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
0211 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
0212 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
0213 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
0214 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
0215 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
0216 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
0217 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
0218
0219 if (!t4_get_exprom_version(adapter, &exprom_vers))
0220 snprintf(info->erom_version, sizeof(info->erom_version),
0221 "%u.%u.%u.%u",
0222 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
0223 FW_HDR_FW_VER_MINOR_G(exprom_vers),
0224 FW_HDR_FW_VER_MICRO_G(exprom_vers),
0225 FW_HDR_FW_VER_BUILD_G(exprom_vers));
0226 info->n_priv_flags = ARRAY_SIZE(cxgb4_priv_flags_strings);
0227 }
0228
0229 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
0230 {
0231 if (stringset == ETH_SS_STATS) {
0232 memcpy(data, stats_strings, sizeof(stats_strings));
0233 data += sizeof(stats_strings);
0234 memcpy(data, adapter_stats_strings,
0235 sizeof(adapter_stats_strings));
0236 data += sizeof(adapter_stats_strings);
0237 memcpy(data, loopback_stats_strings,
0238 sizeof(loopback_stats_strings));
0239 } else if (stringset == ETH_SS_PRIV_FLAGS) {
0240 memcpy(data, cxgb4_priv_flags_strings,
0241 sizeof(cxgb4_priv_flags_strings));
0242 } else if (stringset == ETH_SS_TEST) {
0243 memcpy(data, cxgb4_selftest_strings,
0244 sizeof(cxgb4_selftest_strings));
0245 }
0246 }
0247
0248
0249
0250
0251 struct queue_port_stats {
0252 u64 tso;
0253 u64 uso;
0254 u64 tx_csum;
0255 u64 rx_csum;
0256 u64 vlan_ex;
0257 u64 vlan_ins;
0258 u64 gro_pkts;
0259 u64 gro_merged;
0260 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
0261 u64 tx_tls_encrypted_packets;
0262 u64 tx_tls_encrypted_bytes;
0263 u64 tx_tls_ctx;
0264 u64 tx_tls_ooo;
0265 u64 tx_tls_skip_no_sync_data;
0266 u64 tx_tls_drop_no_sync_data;
0267 u64 tx_tls_drop_bypass_req;
0268 #endif
0269 };
0270
0271 struct adapter_stats {
0272 u64 db_drop;
0273 u64 db_full;
0274 u64 db_empty;
0275 u64 wc_success;
0276 u64 wc_fail;
0277 };
0278
0279 static void collect_sge_port_stats(const struct adapter *adap,
0280 const struct port_info *p,
0281 struct queue_port_stats *s)
0282 {
0283 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
0284 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
0285 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
0286 const struct ch_ktls_port_stats_debug *ktls_stats;
0287 #endif
0288 struct sge_eohw_txq *eohw_tx;
0289 unsigned int i;
0290
0291 memset(s, 0, sizeof(*s));
0292 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
0293 s->tso += tx->tso;
0294 s->uso += tx->uso;
0295 s->tx_csum += tx->tx_cso;
0296 s->rx_csum += rx->stats.rx_cso;
0297 s->vlan_ex += rx->stats.vlan_ex;
0298 s->vlan_ins += tx->vlan_ins;
0299 s->gro_pkts += rx->stats.lro_pkts;
0300 s->gro_merged += rx->stats.lro_merged;
0301 }
0302
0303 if (adap->sge.eohw_txq) {
0304 eohw_tx = &adap->sge.eohw_txq[p->first_qset];
0305 for (i = 0; i < p->nqsets; i++, eohw_tx++) {
0306 s->tso += eohw_tx->tso;
0307 s->uso += eohw_tx->uso;
0308 s->tx_csum += eohw_tx->tx_cso;
0309 s->vlan_ins += eohw_tx->vlan_ins;
0310 }
0311 }
0312 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
0313 ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id];
0314 s->tx_tls_encrypted_packets =
0315 atomic64_read(&ktls_stats->ktls_tx_encrypted_packets);
0316 s->tx_tls_encrypted_bytes =
0317 atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes);
0318 s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx);
0319 s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo);
0320 s->tx_tls_skip_no_sync_data =
0321 atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data);
0322 s->tx_tls_drop_no_sync_data =
0323 atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data);
0324 s->tx_tls_drop_bypass_req =
0325 atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req);
0326 #endif
0327 }
0328
0329 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
0330 {
0331 u64 val1, val2;
0332
0333 memset(s, 0, sizeof(*s));
0334
0335 s->db_drop = adap->db_stats.db_drop;
0336 s->db_full = adap->db_stats.db_full;
0337 s->db_empty = adap->db_stats.db_empty;
0338
0339 if (!is_t4(adap->params.chip)) {
0340 int v;
0341
0342 v = t4_read_reg(adap, SGE_STAT_CFG_A);
0343 if (STATSOURCE_T5_G(v) == 7) {
0344 val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
0345 val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
0346 s->wc_success = val1 - val2;
0347 s->wc_fail = val2;
0348 }
0349 }
0350 }
0351
0352 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
0353 u64 *data)
0354 {
0355 struct port_info *pi = netdev_priv(dev);
0356 struct adapter *adapter = pi->adapter;
0357 struct lb_port_stats s;
0358 int i;
0359 u64 *p0;
0360
0361 t4_get_port_stats_offset(adapter, pi->tx_chan,
0362 (struct port_stats *)data,
0363 &pi->stats_base);
0364
0365 data += sizeof(struct port_stats) / sizeof(u64);
0366 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
0367 data += sizeof(struct queue_port_stats) / sizeof(u64);
0368 collect_adapter_stats(adapter, (struct adapter_stats *)data);
0369 data += sizeof(struct adapter_stats) / sizeof(u64);
0370
0371 *data++ = (u64)pi->port_id;
0372 memset(&s, 0, sizeof(s));
0373 t4_get_lb_stats(adapter, pi->port_id, &s);
0374
0375 p0 = &s.octets;
0376 for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
0377 *data++ = (unsigned long long)*p0++;
0378 }
0379
0380 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0381 void *buf)
0382 {
0383 struct adapter *adap = netdev2adap(dev);
0384 size_t buf_size;
0385
0386 buf_size = t4_get_regs_len(adap);
0387 regs->version = mk_adap_vers(adap);
0388 t4_get_regs(adap, buf, buf_size);
0389 }
0390
0391 static int restart_autoneg(struct net_device *dev)
0392 {
0393 struct port_info *p = netdev_priv(dev);
0394
0395 if (!netif_running(dev))
0396 return -EAGAIN;
0397 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
0398 return -EINVAL;
0399 t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
0400 return 0;
0401 }
0402
0403 static int identify_port(struct net_device *dev,
0404 enum ethtool_phys_id_state state)
0405 {
0406 unsigned int val;
0407 struct adapter *adap = netdev2adap(dev);
0408
0409 if (state == ETHTOOL_ID_ACTIVE)
0410 val = 0xffff;
0411 else if (state == ETHTOOL_ID_INACTIVE)
0412 val = 0;
0413 else
0414 return -EINVAL;
0415
0416 return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
0417 }
0418
0419
0420
0421
0422
0423
0424
0425
0426 static int from_fw_port_mod_type(enum fw_port_type port_type,
0427 enum fw_port_module_type mod_type)
0428 {
0429 if (port_type == FW_PORT_TYPE_BT_SGMII ||
0430 port_type == FW_PORT_TYPE_BT_XFI ||
0431 port_type == FW_PORT_TYPE_BT_XAUI) {
0432 return PORT_TP;
0433 } else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
0434 port_type == FW_PORT_TYPE_FIBER_XAUI) {
0435 return PORT_FIBRE;
0436 } else if (port_type == FW_PORT_TYPE_SFP ||
0437 port_type == FW_PORT_TYPE_QSFP_10G ||
0438 port_type == FW_PORT_TYPE_QSA ||
0439 port_type == FW_PORT_TYPE_QSFP ||
0440 port_type == FW_PORT_TYPE_CR4_QSFP ||
0441 port_type == FW_PORT_TYPE_CR_QSFP ||
0442 port_type == FW_PORT_TYPE_CR2_QSFP ||
0443 port_type == FW_PORT_TYPE_SFP28) {
0444 if (mod_type == FW_PORT_MOD_TYPE_LR ||
0445 mod_type == FW_PORT_MOD_TYPE_SR ||
0446 mod_type == FW_PORT_MOD_TYPE_ER ||
0447 mod_type == FW_PORT_MOD_TYPE_LRM)
0448 return PORT_FIBRE;
0449 else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
0450 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
0451 return PORT_DA;
0452 else
0453 return PORT_OTHER;
0454 } else if (port_type == FW_PORT_TYPE_KR4_100G ||
0455 port_type == FW_PORT_TYPE_KR_SFP28 ||
0456 port_type == FW_PORT_TYPE_KR_XLAUI) {
0457 return PORT_NONE;
0458 }
0459
0460 return PORT_OTHER;
0461 }
0462
0463
0464
0465
0466
0467
0468
0469
0470 static unsigned int speed_to_fw_caps(int speed)
0471 {
0472 if (speed == 100)
0473 return FW_PORT_CAP32_SPEED_100M;
0474 if (speed == 1000)
0475 return FW_PORT_CAP32_SPEED_1G;
0476 if (speed == 10000)
0477 return FW_PORT_CAP32_SPEED_10G;
0478 if (speed == 25000)
0479 return FW_PORT_CAP32_SPEED_25G;
0480 if (speed == 40000)
0481 return FW_PORT_CAP32_SPEED_40G;
0482 if (speed == 50000)
0483 return FW_PORT_CAP32_SPEED_50G;
0484 if (speed == 100000)
0485 return FW_PORT_CAP32_SPEED_100G;
0486 if (speed == 200000)
0487 return FW_PORT_CAP32_SPEED_200G;
0488 if (speed == 400000)
0489 return FW_PORT_CAP32_SPEED_400G;
0490 return 0;
0491 }
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502 static void fw_caps_to_lmm(enum fw_port_type port_type,
0503 fw_port_cap32_t fw_caps,
0504 unsigned long *link_mode_mask)
0505 {
0506 #define SET_LMM(__lmm_name) \
0507 do { \
0508 __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
0509 link_mode_mask); \
0510 } while (0)
0511
0512 #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
0513 do { \
0514 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
0515 SET_LMM(__lmm_name); \
0516 } while (0)
0517
0518 switch (port_type) {
0519 case FW_PORT_TYPE_BT_SGMII:
0520 case FW_PORT_TYPE_BT_XFI:
0521 case FW_PORT_TYPE_BT_XAUI:
0522 SET_LMM(TP);
0523 FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
0524 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
0525 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
0526 break;
0527
0528 case FW_PORT_TYPE_KX4:
0529 case FW_PORT_TYPE_KX:
0530 SET_LMM(Backplane);
0531 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
0532 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
0533 break;
0534
0535 case FW_PORT_TYPE_KR:
0536 SET_LMM(Backplane);
0537 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
0538 break;
0539
0540 case FW_PORT_TYPE_BP_AP:
0541 SET_LMM(Backplane);
0542 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
0543 FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
0544 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
0545 break;
0546
0547 case FW_PORT_TYPE_BP4_AP:
0548 SET_LMM(Backplane);
0549 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
0550 FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
0551 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
0552 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
0553 break;
0554
0555 case FW_PORT_TYPE_FIBER_XFI:
0556 case FW_PORT_TYPE_FIBER_XAUI:
0557 case FW_PORT_TYPE_SFP:
0558 case FW_PORT_TYPE_QSFP_10G:
0559 case FW_PORT_TYPE_QSA:
0560 SET_LMM(FIBRE);
0561 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
0562 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
0563 break;
0564
0565 case FW_PORT_TYPE_BP40_BA:
0566 case FW_PORT_TYPE_QSFP:
0567 SET_LMM(FIBRE);
0568 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
0569 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
0570 FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
0571 break;
0572
0573 case FW_PORT_TYPE_CR_QSFP:
0574 case FW_PORT_TYPE_SFP28:
0575 SET_LMM(FIBRE);
0576 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
0577 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
0578 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
0579 break;
0580
0581 case FW_PORT_TYPE_KR_SFP28:
0582 SET_LMM(Backplane);
0583 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
0584 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
0585 FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
0586 break;
0587
0588 case FW_PORT_TYPE_KR_XLAUI:
0589 SET_LMM(Backplane);
0590 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
0591 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
0592 FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
0593 break;
0594
0595 case FW_PORT_TYPE_CR2_QSFP:
0596 SET_LMM(FIBRE);
0597 FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
0598 break;
0599
0600 case FW_PORT_TYPE_KR4_100G:
0601 case FW_PORT_TYPE_CR4_QSFP:
0602 SET_LMM(FIBRE);
0603 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
0604 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
0605 FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
0606 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
0607 FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
0608 FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
0609 break;
0610
0611 default:
0612 break;
0613 }
0614
0615 if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
0616 FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
0617 FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
0618 } else {
0619 SET_LMM(FEC_NONE);
0620 }
0621
0622 FW_CAPS_TO_LMM(ANEG, Autoneg);
0623 FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
0624 FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
0625
0626 #undef FW_CAPS_TO_LMM
0627 #undef SET_LMM
0628 }
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
0639 {
0640 unsigned int fw_caps = 0;
0641
0642 #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \
0643 do { \
0644 if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
0645 link_mode_mask)) \
0646 fw_caps |= FW_PORT_CAP32_ ## __fw_name; \
0647 } while (0)
0648
0649 LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M);
0650 LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G);
0651 LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G);
0652 LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G);
0653 LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G);
0654 LMM_TO_FW_CAPS(50000baseCR2_Full, SPEED_50G);
0655 LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G);
0656
0657 #undef LMM_TO_FW_CAPS
0658
0659 return fw_caps;
0660 }
0661
0662 static int get_link_ksettings(struct net_device *dev,
0663 struct ethtool_link_ksettings *link_ksettings)
0664 {
0665 struct port_info *pi = netdev_priv(dev);
0666 struct ethtool_link_settings *base = &link_ksettings->base;
0667
0668
0669
0670
0671
0672 if (!netif_running(dev))
0673 (void)t4_update_port_info(pi);
0674
0675 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
0676 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
0677 ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
0678
0679 base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
0680
0681 if (pi->mdio_addr >= 0) {
0682 base->phy_address = pi->mdio_addr;
0683 base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
0684 ? ETH_MDIO_SUPPORTS_C22
0685 : ETH_MDIO_SUPPORTS_C45);
0686 } else {
0687 base->phy_address = 255;
0688 base->mdio_support = 0;
0689 }
0690
0691 fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
0692 link_ksettings->link_modes.supported);
0693 fw_caps_to_lmm(pi->port_type,
0694 t4_link_acaps(pi->adapter,
0695 pi->lport,
0696 &pi->link_cfg),
0697 link_ksettings->link_modes.advertising);
0698 fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
0699 link_ksettings->link_modes.lp_advertising);
0700
0701 base->speed = (netif_carrier_ok(dev)
0702 ? pi->link_cfg.speed
0703 : SPEED_UNKNOWN);
0704 base->duplex = DUPLEX_FULL;
0705
0706 base->autoneg = pi->link_cfg.autoneg;
0707 if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
0708 ethtool_link_ksettings_add_link_mode(link_ksettings,
0709 supported, Autoneg);
0710 if (pi->link_cfg.autoneg)
0711 ethtool_link_ksettings_add_link_mode(link_ksettings,
0712 advertising, Autoneg);
0713
0714 return 0;
0715 }
0716
0717 static int set_link_ksettings(struct net_device *dev,
0718 const struct ethtool_link_ksettings *link_ksettings)
0719 {
0720 struct port_info *pi = netdev_priv(dev);
0721 struct link_config *lc = &pi->link_cfg;
0722 const struct ethtool_link_settings *base = &link_ksettings->base;
0723 struct link_config old_lc;
0724 unsigned int fw_caps;
0725 int ret = 0;
0726
0727
0728 if (base->duplex != DUPLEX_FULL)
0729 return -EINVAL;
0730
0731 old_lc = *lc;
0732 if (!(lc->pcaps & FW_PORT_CAP32_ANEG) ||
0733 base->autoneg == AUTONEG_DISABLE) {
0734 fw_caps = speed_to_fw_caps(base->speed);
0735
0736
0737 if (!(lc->pcaps & fw_caps))
0738 return -EINVAL;
0739
0740 lc->speed_caps = fw_caps;
0741 lc->acaps = fw_caps;
0742 } else {
0743 fw_caps =
0744 lmm_to_fw_caps(link_ksettings->link_modes.advertising);
0745 if (!(lc->pcaps & fw_caps))
0746 return -EINVAL;
0747 lc->speed_caps = 0;
0748 lc->acaps = fw_caps | FW_PORT_CAP32_ANEG;
0749 }
0750 lc->autoneg = base->autoneg;
0751
0752
0753
0754
0755 ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc);
0756 if (ret)
0757 *lc = old_lc;
0758
0759 return ret;
0760 }
0761
0762
0763 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
0764 {
0765 unsigned int eth_fec = 0;
0766
0767 if (fw_fec & FW_PORT_CAP32_FEC_RS)
0768 eth_fec |= ETHTOOL_FEC_RS;
0769 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
0770 eth_fec |= ETHTOOL_FEC_BASER;
0771
0772
0773 if (!eth_fec)
0774 eth_fec = ETHTOOL_FEC_OFF;
0775
0776 return eth_fec;
0777 }
0778
0779
0780 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
0781 {
0782 unsigned int eth_fec = 0;
0783
0784 if (cc_fec & FEC_AUTO)
0785 eth_fec |= ETHTOOL_FEC_AUTO;
0786 if (cc_fec & FEC_RS)
0787 eth_fec |= ETHTOOL_FEC_RS;
0788 if (cc_fec & FEC_BASER_RS)
0789 eth_fec |= ETHTOOL_FEC_BASER;
0790
0791
0792 if (!eth_fec)
0793 eth_fec = ETHTOOL_FEC_OFF;
0794
0795 return eth_fec;
0796 }
0797
0798
0799 static inline unsigned int eth_to_cc_fec(unsigned int eth_fec)
0800 {
0801 unsigned int cc_fec = 0;
0802
0803 if (eth_fec & ETHTOOL_FEC_OFF)
0804 return cc_fec;
0805
0806 if (eth_fec & ETHTOOL_FEC_AUTO)
0807 cc_fec |= FEC_AUTO;
0808 if (eth_fec & ETHTOOL_FEC_RS)
0809 cc_fec |= FEC_RS;
0810 if (eth_fec & ETHTOOL_FEC_BASER)
0811 cc_fec |= FEC_BASER_RS;
0812
0813 return cc_fec;
0814 }
0815
0816 static int get_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
0817 {
0818 const struct port_info *pi = netdev_priv(dev);
0819 const struct link_config *lc = &pi->link_cfg;
0820
0821
0822
0823
0824
0825 fec->fec = fwcap_to_eth_fec(lc->pcaps);
0826 if (fec->fec != ETHTOOL_FEC_OFF)
0827 fec->fec |= ETHTOOL_FEC_AUTO;
0828
0829
0830
0831
0832 fec->active_fec = cc_to_eth_fec(lc->fec);
0833
0834 return 0;
0835 }
0836
0837 static int set_fecparam(struct net_device *dev, struct ethtool_fecparam *fec)
0838 {
0839 struct port_info *pi = netdev_priv(dev);
0840 struct link_config *lc = &pi->link_cfg;
0841 struct link_config old_lc;
0842 int ret;
0843
0844
0845
0846
0847 old_lc = *lc;
0848
0849
0850
0851
0852 lc->requested_fec = eth_to_cc_fec(fec->fec);
0853 ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox,
0854 pi->tx_chan, lc);
0855 if (ret)
0856 *lc = old_lc;
0857 return ret;
0858 }
0859
0860 static void get_pauseparam(struct net_device *dev,
0861 struct ethtool_pauseparam *epause)
0862 {
0863 struct port_info *p = netdev_priv(dev);
0864
0865 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
0866 epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0;
0867 epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0;
0868 }
0869
0870 static int set_pauseparam(struct net_device *dev,
0871 struct ethtool_pauseparam *epause)
0872 {
0873 struct port_info *p = netdev_priv(dev);
0874 struct link_config *lc = &p->link_cfg;
0875
0876 if (epause->autoneg == AUTONEG_DISABLE)
0877 lc->requested_fc = 0;
0878 else if (lc->pcaps & FW_PORT_CAP32_ANEG)
0879 lc->requested_fc = PAUSE_AUTONEG;
0880 else
0881 return -EINVAL;
0882
0883 if (epause->rx_pause)
0884 lc->requested_fc |= PAUSE_RX;
0885 if (epause->tx_pause)
0886 lc->requested_fc |= PAUSE_TX;
0887 if (netif_running(dev))
0888 return t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan,
0889 lc);
0890 return 0;
0891 }
0892
0893 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
0894 struct kernel_ethtool_ringparam *kernel_e,
0895 struct netlink_ext_ack *extack)
0896 {
0897 const struct port_info *pi = netdev_priv(dev);
0898 const struct sge *s = &pi->adapter->sge;
0899
0900 e->rx_max_pending = MAX_RX_BUFFERS;
0901 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
0902 e->rx_jumbo_max_pending = 0;
0903 e->tx_max_pending = MAX_TXQ_ENTRIES;
0904
0905 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
0906 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
0907 e->rx_jumbo_pending = 0;
0908 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
0909 }
0910
0911 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
0912 struct kernel_ethtool_ringparam *kernel_e,
0913 struct netlink_ext_ack *extack)
0914 {
0915 int i;
0916 const struct port_info *pi = netdev_priv(dev);
0917 struct adapter *adapter = pi->adapter;
0918 struct sge *s = &adapter->sge;
0919
0920 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
0921 e->tx_pending > MAX_TXQ_ENTRIES ||
0922 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
0923 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
0924 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
0925 return -EINVAL;
0926
0927 if (adapter->flags & CXGB4_FULL_INIT_DONE)
0928 return -EBUSY;
0929
0930 for (i = 0; i < pi->nqsets; ++i) {
0931 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
0932 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
0933 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
0934 }
0935 return 0;
0936 }
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946 static int set_rx_intr_params(struct net_device *dev,
0947 unsigned int us, unsigned int cnt)
0948 {
0949 int i, err;
0950 struct port_info *pi = netdev_priv(dev);
0951 struct adapter *adap = pi->adapter;
0952 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
0953
0954 for (i = 0; i < pi->nqsets; i++, q++) {
0955 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
0956 if (err)
0957 return err;
0958 }
0959 return 0;
0960 }
0961
0962 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
0963 {
0964 int i;
0965 struct port_info *pi = netdev_priv(dev);
0966 struct adapter *adap = pi->adapter;
0967 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
0968
0969 for (i = 0; i < pi->nqsets; i++, q++)
0970 q->rspq.adaptive_rx = adaptive_rx;
0971
0972 return 0;
0973 }
0974
0975 static int get_adaptive_rx_setting(struct net_device *dev)
0976 {
0977 struct port_info *pi = netdev_priv(dev);
0978 struct adapter *adap = pi->adapter;
0979 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
0980
0981 return q->rspq.adaptive_rx;
0982 }
0983
0984
0985
0986
0987 static int get_dbqtimer_tick(struct net_device *dev)
0988 {
0989 struct port_info *pi = netdev_priv(dev);
0990 struct adapter *adap = pi->adapter;
0991
0992 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
0993 return 0;
0994
0995 return adap->sge.dbqtimer_tick;
0996 }
0997
0998
0999
1000
1001 static int get_dbqtimer(struct net_device *dev)
1002 {
1003 struct port_info *pi = netdev_priv(dev);
1004 struct adapter *adap = pi->adapter;
1005 struct sge_eth_txq *txq;
1006
1007 txq = &adap->sge.ethtxq[pi->first_qset];
1008
1009 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1010 return 0;
1011
1012
1013 return adap->sge.dbqtimer_val[txq->dbqtimerix];
1014 }
1015
1016
1017
1018
1019
1020
1021
1022
1023 static int set_dbqtimer_tick(struct net_device *dev, int usecs)
1024 {
1025 struct port_info *pi = netdev_priv(dev);
1026 struct adapter *adap = pi->adapter;
1027 struct sge *s = &adap->sge;
1028 u32 param, val;
1029 int ret;
1030
1031 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1032 return 0;
1033
1034
1035 if (s->dbqtimer_tick == usecs)
1036 return 0;
1037
1038
1039 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1040 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
1041 val = usecs;
1042 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
1043 if (ret)
1044 return ret;
1045 s->dbqtimer_tick = usecs;
1046
1047
1048 ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val),
1049 s->dbqtimer_val);
1050 return ret;
1051 }
1052
1053
1054
1055
1056
1057 static int set_dbqtimer(struct net_device *dev, int usecs)
1058 {
1059 int qix, timerix, min_timerix, delta, min_delta;
1060 struct port_info *pi = netdev_priv(dev);
1061 struct adapter *adap = pi->adapter;
1062 struct sge *s = &adap->sge;
1063 struct sge_eth_txq *txq;
1064 u32 param, val;
1065 int ret;
1066
1067 if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
1068 return 0;
1069
1070
1071
1072
1073 min_delta = INT_MAX;
1074 min_timerix = 0;
1075 for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) {
1076 delta = s->dbqtimer_val[timerix] - usecs;
1077 if (delta < 0)
1078 delta = -delta;
1079 if (delta < min_delta) {
1080 min_delta = delta;
1081 min_timerix = timerix;
1082 }
1083 }
1084
1085
1086
1087
1088
1089 txq = &s->ethtxq[pi->first_qset];
1090 if (txq->dbqtimerix == min_timerix)
1091 return 0;
1092
1093 for (qix = 0; qix < pi->nqsets; qix++, txq++) {
1094 if (adap->flags & CXGB4_FULL_INIT_DONE) {
1095 param =
1096 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1097 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
1098 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
1099 val = min_timerix;
1100 ret = t4_set_params(adap, adap->mbox, adap->pf, 0,
1101 1, ¶m, &val);
1102 if (ret)
1103 return ret;
1104 }
1105 txq->dbqtimerix = min_timerix;
1106 }
1107 return 0;
1108 }
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120 static int set_dbqtimer_tickval(struct net_device *dev,
1121 int tick_usecs, int timer_usecs)
1122 {
1123 struct port_info *pi = netdev_priv(dev);
1124 struct adapter *adap = pi->adapter;
1125 int timer[MAX_NPORTS];
1126 unsigned int port;
1127 int ret;
1128
1129
1130
1131
1132 for_each_port(adap, port)
1133 if (port == pi->port_id)
1134 timer[port] = timer_usecs;
1135 else
1136 timer[port] = get_dbqtimer(adap->port[port]);
1137
1138
1139 ret = set_dbqtimer_tick(dev, tick_usecs);
1140 if (ret)
1141 return ret;
1142
1143
1144 for_each_port(adap, port) {
1145 ret = set_dbqtimer(adap->port[port], timer[port]);
1146 if (ret)
1147 return ret;
1148 }
1149
1150 return 0;
1151 }
1152
1153 static int set_coalesce(struct net_device *dev,
1154 struct ethtool_coalesce *coalesce,
1155 struct kernel_ethtool_coalesce *kernel_coal,
1156 struct netlink_ext_ack *extack)
1157 {
1158 int ret;
1159
1160 set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce);
1161
1162 ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs,
1163 coalesce->rx_max_coalesced_frames);
1164 if (ret)
1165 return ret;
1166
1167 return set_dbqtimer_tickval(dev,
1168 coalesce->tx_coalesce_usecs_irq,
1169 coalesce->tx_coalesce_usecs);
1170 }
1171
1172 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
1173 struct kernel_ethtool_coalesce *kernel_coal,
1174 struct netlink_ext_ack *extack)
1175 {
1176 const struct port_info *pi = netdev_priv(dev);
1177 const struct adapter *adap = pi->adapter;
1178 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1179
1180 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1181 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
1182 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1183 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
1184 c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev);
1185 c->tx_coalesce_usecs = get_dbqtimer(dev);
1186 return 0;
1187 }
1188
1189
1190
1191 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1192 {
1193 int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1194
1195 if (vaddr >= 0)
1196 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1197 return vaddr < 0 ? vaddr : 0;
1198 }
1199
1200 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1201 {
1202 int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
1203
1204 if (vaddr >= 0)
1205 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1206 return vaddr < 0 ? vaddr : 0;
1207 }
1208
1209 #define EEPROM_MAGIC 0x38E2F10C
1210
1211 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1212 u8 *data)
1213 {
1214 int i, err = 0;
1215 struct adapter *adapter = netdev2adap(dev);
1216 u8 *buf = kvzalloc(EEPROMSIZE, GFP_KERNEL);
1217
1218 if (!buf)
1219 return -ENOMEM;
1220
1221 e->magic = EEPROM_MAGIC;
1222 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1223 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1224
1225 if (!err)
1226 memcpy(data, buf + e->offset, e->len);
1227 kvfree(buf);
1228 return err;
1229 }
1230
1231 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1232 u8 *data)
1233 {
1234 u8 *buf;
1235 int err = 0;
1236 u32 aligned_offset, aligned_len, *p;
1237 struct adapter *adapter = netdev2adap(dev);
1238
1239 if (eeprom->magic != EEPROM_MAGIC)
1240 return -EINVAL;
1241
1242 aligned_offset = eeprom->offset & ~3;
1243 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1244
1245 if (adapter->pf > 0) {
1246 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
1247
1248 if (aligned_offset < start ||
1249 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1250 return -EPERM;
1251 }
1252
1253 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1254
1255
1256 buf = kvzalloc(aligned_len, GFP_KERNEL);
1257 if (!buf)
1258 return -ENOMEM;
1259 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1260 if (!err && aligned_len > 4)
1261 err = eeprom_rd_phys(adapter,
1262 aligned_offset + aligned_len - 4,
1263 (u32 *)&buf[aligned_len - 4]);
1264 if (err)
1265 goto out;
1266 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1267 } else {
1268 buf = data;
1269 }
1270
1271 err = t4_seeprom_wp(adapter, false);
1272 if (err)
1273 goto out;
1274
1275 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1276 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1277 aligned_offset += 4;
1278 }
1279
1280 if (!err)
1281 err = t4_seeprom_wp(adapter, true);
1282 out:
1283 if (buf != data)
1284 kvfree(buf);
1285 return err;
1286 }
1287
1288 static int cxgb4_ethtool_flash_bootcfg(struct net_device *netdev,
1289 const u8 *data, u32 size)
1290 {
1291 struct adapter *adap = netdev2adap(netdev);
1292 int ret;
1293
1294 ret = t4_load_bootcfg(adap, data, size);
1295 if (ret)
1296 dev_err(adap->pdev_dev, "Failed to load boot cfg image\n");
1297
1298 return ret;
1299 }
1300
1301 static int cxgb4_ethtool_flash_boot(struct net_device *netdev,
1302 const u8 *bdata, u32 size)
1303 {
1304 struct adapter *adap = netdev2adap(netdev);
1305 unsigned int offset;
1306 u8 *data;
1307 int ret;
1308
1309 data = kmemdup(bdata, size, GFP_KERNEL);
1310 if (!data)
1311 return -ENOMEM;
1312
1313 offset = OFFSET_G(t4_read_reg(adap, PF_REG(0, PCIE_PF_EXPROM_OFST_A)));
1314
1315 ret = t4_load_boot(adap, data, offset, size);
1316 if (ret)
1317 dev_err(adap->pdev_dev, "Failed to load boot image\n");
1318
1319 kfree(data);
1320 return ret;
1321 }
1322
1323 #define CXGB4_PHY_SIG 0x130000ea
1324
1325 static int cxgb4_validate_phy_image(const u8 *data, u32 *size)
1326 {
1327 struct cxgb4_fw_data *header;
1328
1329 header = (struct cxgb4_fw_data *)data;
1330 if (be32_to_cpu(header->signature) != CXGB4_PHY_SIG)
1331 return -EINVAL;
1332
1333 return 0;
1334 }
1335
1336 static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
1337 const u8 *data, u32 size)
1338 {
1339 struct adapter *adap = netdev2adap(netdev);
1340 int ret;
1341
1342 ret = cxgb4_validate_phy_image(data, NULL);
1343 if (ret) {
1344 dev_err(adap->pdev_dev, "PHY signature mismatch\n");
1345 return ret;
1346 }
1347
1348
1349
1350
1351
1352
1353 ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
1354 if (ret < 0) {
1355 dev_err(adap->pdev_dev,
1356 "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
1357 ret);
1358 return ret;
1359 }
1360
1361 ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
1362 if (ret < 0) {
1363 dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
1364 ret);
1365 return ret;
1366 }
1367
1368 return 0;
1369 }
1370
1371 static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
1372 const u8 *data, u32 size)
1373 {
1374 struct adapter *adap = netdev2adap(netdev);
1375 unsigned int mbox = PCIE_FW_MASTER_M + 1;
1376 int ret;
1377
1378
1379
1380
1381
1382
1383 if (adap->flags & CXGB4_FULL_INIT_DONE)
1384 mbox = adap->mbox;
1385
1386 ret = t4_fw_upgrade(adap, mbox, data, size, 1);
1387 if (ret)
1388 dev_err(adap->pdev_dev,
1389 "Failed to flash firmware\n");
1390
1391 return ret;
1392 }
1393
1394 static int cxgb4_ethtool_flash_region(struct net_device *netdev,
1395 const u8 *data, u32 size, u32 region)
1396 {
1397 struct adapter *adap = netdev2adap(netdev);
1398 int ret;
1399
1400 switch (region) {
1401 case CXGB4_ETHTOOL_FLASH_FW:
1402 ret = cxgb4_ethtool_flash_fw(netdev, data, size);
1403 break;
1404 case CXGB4_ETHTOOL_FLASH_PHY:
1405 ret = cxgb4_ethtool_flash_phy(netdev, data, size);
1406 break;
1407 case CXGB4_ETHTOOL_FLASH_BOOT:
1408 ret = cxgb4_ethtool_flash_boot(netdev, data, size);
1409 break;
1410 case CXGB4_ETHTOOL_FLASH_BOOTCFG:
1411 ret = cxgb4_ethtool_flash_bootcfg(netdev, data, size);
1412 break;
1413 default:
1414 ret = -EOPNOTSUPP;
1415 break;
1416 }
1417
1418 if (!ret)
1419 dev_info(adap->pdev_dev,
1420 "loading %s successful, reload cxgb4 driver\n",
1421 flash_region_strings[region]);
1422 return ret;
1423 }
1424
1425 #define CXGB4_FW_SIG 0x4368656c
1426 #define CXGB4_FW_SIG_OFFSET 0x160
1427
1428 static int cxgb4_validate_fw_image(const u8 *data, u32 *size)
1429 {
1430 struct cxgb4_fw_data *header;
1431
1432 header = (struct cxgb4_fw_data *)&data[CXGB4_FW_SIG_OFFSET];
1433 if (be32_to_cpu(header->signature) != CXGB4_FW_SIG)
1434 return -EINVAL;
1435
1436 if (size)
1437 *size = be16_to_cpu(((struct fw_hdr *)data)->len512) * 512;
1438
1439 return 0;
1440 }
1441
1442 static int cxgb4_validate_bootcfg_image(const u8 *data, u32 *size)
1443 {
1444 struct cxgb4_bootcfg_data *header;
1445
1446 header = (struct cxgb4_bootcfg_data *)data;
1447 if (le16_to_cpu(header->signature) != BOOT_CFG_SIG)
1448 return -EINVAL;
1449
1450 return 0;
1451 }
1452
1453 static int cxgb4_validate_boot_image(const u8 *data, u32 *size)
1454 {
1455 struct cxgb4_pci_exp_rom_header *exp_header;
1456 struct cxgb4_pcir_data *pcir_header;
1457 struct legacy_pci_rom_hdr *header;
1458 const u8 *cur_header = data;
1459 u16 pcir_offset;
1460
1461 exp_header = (struct cxgb4_pci_exp_rom_header *)data;
1462
1463 if (le16_to_cpu(exp_header->signature) != BOOT_SIGNATURE)
1464 return -EINVAL;
1465
1466 if (size) {
1467 do {
1468 header = (struct legacy_pci_rom_hdr *)cur_header;
1469 pcir_offset = le16_to_cpu(header->pcir_offset);
1470 pcir_header = (struct cxgb4_pcir_data *)(cur_header +
1471 pcir_offset);
1472
1473 *size += header->size512 * 512;
1474 cur_header += header->size512 * 512;
1475 } while (!(pcir_header->indicator & CXGB4_HDR_INDI));
1476 }
1477
1478 return 0;
1479 }
1480
1481 static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size)
1482 {
1483 if (!cxgb4_validate_fw_image(data, size))
1484 return CXGB4_ETHTOOL_FLASH_FW;
1485 if (!cxgb4_validate_boot_image(data, size))
1486 return CXGB4_ETHTOOL_FLASH_BOOT;
1487 if (!cxgb4_validate_phy_image(data, size))
1488 return CXGB4_ETHTOOL_FLASH_PHY;
1489 if (!cxgb4_validate_bootcfg_image(data, size))
1490 return CXGB4_ETHTOOL_FLASH_BOOTCFG;
1491
1492 return -EOPNOTSUPP;
1493 }
1494
1495 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1496 {
1497 struct adapter *adap = netdev2adap(netdev);
1498 const struct firmware *fw;
1499 unsigned int master;
1500 u8 master_vld = 0;
1501 const u8 *fw_data;
1502 size_t fw_size;
1503 u32 size = 0;
1504 u32 pcie_fw;
1505 int region;
1506 int ret;
1507
1508 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
1509 master = PCIE_FW_MASTER_G(pcie_fw);
1510 if (pcie_fw & PCIE_FW_MASTER_VLD_F)
1511 master_vld = 1;
1512
1513 if (master_vld && (master != adap->pf)) {
1514 dev_warn(adap->pdev_dev,
1515 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
1516 return -EOPNOTSUPP;
1517 }
1518
1519 ef->data[sizeof(ef->data) - 1] = '\0';
1520 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1521 if (ret < 0)
1522 return ret;
1523
1524 fw_data = fw->data;
1525 fw_size = fw->size;
1526 if (ef->region == ETHTOOL_FLASH_ALL_REGIONS) {
1527 while (fw_size > 0) {
1528 size = 0;
1529 region = cxgb4_ethtool_get_flash_region(fw_data, &size);
1530 if (region < 0 || !size) {
1531 ret = region;
1532 goto out_free_fw;
1533 }
1534
1535 ret = cxgb4_ethtool_flash_region(netdev, fw_data, size,
1536 region);
1537 if (ret)
1538 goto out_free_fw;
1539
1540 fw_data += size;
1541 fw_size -= size;
1542 }
1543 } else {
1544 ret = cxgb4_ethtool_flash_region(netdev, fw_data, fw_size,
1545 ef->region);
1546 }
1547
1548 out_free_fw:
1549 release_firmware(fw);
1550 return ret;
1551 }
1552
1553 static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
1554 {
1555 struct port_info *pi = netdev_priv(dev);
1556 struct adapter *adapter = pi->adapter;
1557
1558 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1559 SOF_TIMESTAMPING_RX_SOFTWARE |
1560 SOF_TIMESTAMPING_SOFTWARE;
1561
1562 ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
1563 SOF_TIMESTAMPING_TX_HARDWARE |
1564 SOF_TIMESTAMPING_RAW_HARDWARE;
1565
1566 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1567 (1 << HWTSTAMP_TX_ON);
1568
1569 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1570 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1571 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1572 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1573 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1574 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
1575
1576 if (adapter->ptp_clock)
1577 ts_info->phc_index = ptp_clock_index(adapter->ptp_clock);
1578 else
1579 ts_info->phc_index = -1;
1580
1581 return 0;
1582 }
1583
1584 static u32 get_rss_table_size(struct net_device *dev)
1585 {
1586 const struct port_info *pi = netdev_priv(dev);
1587
1588 return pi->rss_size;
1589 }
1590
1591 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
1592 {
1593 const struct port_info *pi = netdev_priv(dev);
1594 unsigned int n = pi->rss_size;
1595
1596 if (hfunc)
1597 *hfunc = ETH_RSS_HASH_TOP;
1598 if (!p)
1599 return 0;
1600 while (n--)
1601 p[n] = pi->rss[n];
1602 return 0;
1603 }
1604
1605 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
1606 const u8 hfunc)
1607 {
1608 unsigned int i;
1609 struct port_info *pi = netdev_priv(dev);
1610
1611
1612
1613
1614 if (key ||
1615 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
1616 return -EOPNOTSUPP;
1617 if (!p)
1618 return 0;
1619
1620
1621 if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
1622 for (i = 0; i < pi->rss_size; i++)
1623 pi->rss[i] = p[i];
1624
1625 return cxgb4_write_rss(pi, pi->rss);
1626 }
1627
1628 return -EPERM;
1629 }
1630
1631 static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
1632 u32 ftid)
1633 {
1634 struct tid_info *t = &adap->tids;
1635
1636 if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
1637 return &t->hpftid_tab[ftid - t->hpftid_base];
1638
1639 if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
1640 return &t->ftid_tab[ftid - t->ftid_base];
1641
1642 return lookup_tid(t, ftid);
1643 }
1644
1645 static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
1646 struct ch_filter_specification *dfs)
1647 {
1648 switch (dfs->val.proto) {
1649 case IPPROTO_TCP:
1650 if (dfs->type)
1651 fs->flow_type = TCP_V6_FLOW;
1652 else
1653 fs->flow_type = TCP_V4_FLOW;
1654 break;
1655 case IPPROTO_UDP:
1656 if (dfs->type)
1657 fs->flow_type = UDP_V6_FLOW;
1658 else
1659 fs->flow_type = UDP_V4_FLOW;
1660 break;
1661 }
1662
1663 if (dfs->type) {
1664 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->val.fport);
1665 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->mask.fport);
1666 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->val.lport);
1667 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->mask.lport);
1668 memcpy(&fs->h_u.tcp_ip6_spec.ip6src, &dfs->val.fip[0],
1669 sizeof(fs->h_u.tcp_ip6_spec.ip6src));
1670 memcpy(&fs->m_u.tcp_ip6_spec.ip6src, &dfs->mask.fip[0],
1671 sizeof(fs->m_u.tcp_ip6_spec.ip6src));
1672 memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, &dfs->val.lip[0],
1673 sizeof(fs->h_u.tcp_ip6_spec.ip6dst));
1674 memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, &dfs->mask.lip[0],
1675 sizeof(fs->m_u.tcp_ip6_spec.ip6dst));
1676 fs->h_u.tcp_ip6_spec.tclass = dfs->val.tos;
1677 fs->m_u.tcp_ip6_spec.tclass = dfs->mask.tos;
1678 } else {
1679 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->val.fport);
1680 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->mask.fport);
1681 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->val.lport);
1682 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->mask.lport);
1683 memcpy(&fs->h_u.tcp_ip4_spec.ip4src, &dfs->val.fip[0],
1684 sizeof(fs->h_u.tcp_ip4_spec.ip4src));
1685 memcpy(&fs->m_u.tcp_ip4_spec.ip4src, &dfs->mask.fip[0],
1686 sizeof(fs->m_u.tcp_ip4_spec.ip4src));
1687 memcpy(&fs->h_u.tcp_ip4_spec.ip4dst, &dfs->val.lip[0],
1688 sizeof(fs->h_u.tcp_ip4_spec.ip4dst));
1689 memcpy(&fs->m_u.tcp_ip4_spec.ip4dst, &dfs->mask.lip[0],
1690 sizeof(fs->m_u.tcp_ip4_spec.ip4dst));
1691 fs->h_u.tcp_ip4_spec.tos = dfs->val.tos;
1692 fs->m_u.tcp_ip4_spec.tos = dfs->mask.tos;
1693 }
1694 fs->h_ext.vlan_tci = cpu_to_be16(dfs->val.ivlan);
1695 fs->m_ext.vlan_tci = cpu_to_be16(dfs->mask.ivlan);
1696 fs->flow_type |= FLOW_EXT;
1697
1698 if (dfs->action == FILTER_DROP)
1699 fs->ring_cookie = RX_CLS_FLOW_DISC;
1700 else
1701 fs->ring_cookie = dfs->iq;
1702 }
1703
1704 static int cxgb4_ntuple_get_filter(struct net_device *dev,
1705 struct ethtool_rxnfc *cmd,
1706 unsigned int loc)
1707 {
1708 const struct port_info *pi = netdev_priv(dev);
1709 struct adapter *adap = netdev2adap(dev);
1710 struct filter_entry *f;
1711 int ftid;
1712
1713 if (!(adap->flags & CXGB4_FULL_INIT_DONE))
1714 return -EAGAIN;
1715
1716
1717 if (!adap->ethtool_filters)
1718 return -EOPNOTSUPP;
1719
1720 if (loc >= adap->ethtool_filters->nentries)
1721 return -ERANGE;
1722
1723 if (!test_bit(loc, adap->ethtool_filters->port[pi->port_id].bmap))
1724 return -ENOENT;
1725
1726 ftid = adap->ethtool_filters->port[pi->port_id].loc_array[loc];
1727
1728
1729 f = cxgb4_get_filter_entry(adap, ftid);
1730
1731 cxgb4_fill_filter_rule(&cmd->fs, &f->fs);
1732
1733 return 0;
1734 }
1735
1736 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1737 u32 *rules)
1738 {
1739 const struct port_info *pi = netdev_priv(dev);
1740 struct adapter *adap = netdev2adap(dev);
1741 unsigned int count = 0, index = 0;
1742 int ret = 0;
1743
1744 switch (info->cmd) {
1745 case ETHTOOL_GRXFH: {
1746 unsigned int v = pi->rss_mode;
1747
1748 info->data = 0;
1749 switch (info->flow_type) {
1750 case TCP_V4_FLOW:
1751 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
1752 info->data = RXH_IP_SRC | RXH_IP_DST |
1753 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1754 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1755 info->data = RXH_IP_SRC | RXH_IP_DST;
1756 break;
1757 case UDP_V4_FLOW:
1758 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
1759 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1760 info->data = RXH_IP_SRC | RXH_IP_DST |
1761 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1762 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1763 info->data = RXH_IP_SRC | RXH_IP_DST;
1764 break;
1765 case SCTP_V4_FLOW:
1766 case AH_ESP_V4_FLOW:
1767 case IPV4_FLOW:
1768 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1769 info->data = RXH_IP_SRC | RXH_IP_DST;
1770 break;
1771 case TCP_V6_FLOW:
1772 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
1773 info->data = RXH_IP_SRC | RXH_IP_DST |
1774 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1775 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1776 info->data = RXH_IP_SRC | RXH_IP_DST;
1777 break;
1778 case UDP_V6_FLOW:
1779 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
1780 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1781 info->data = RXH_IP_SRC | RXH_IP_DST |
1782 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1783 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1784 info->data = RXH_IP_SRC | RXH_IP_DST;
1785 break;
1786 case SCTP_V6_FLOW:
1787 case AH_ESP_V6_FLOW:
1788 case IPV6_FLOW:
1789 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1790 info->data = RXH_IP_SRC | RXH_IP_DST;
1791 break;
1792 }
1793 return 0;
1794 }
1795 case ETHTOOL_GRXRINGS:
1796 info->data = pi->nqsets;
1797 return 0;
1798 case ETHTOOL_GRXCLSRLCNT:
1799 info->rule_cnt =
1800 adap->ethtool_filters->port[pi->port_id].in_use;
1801 return 0;
1802 case ETHTOOL_GRXCLSRULE:
1803 return cxgb4_ntuple_get_filter(dev, info, info->fs.location);
1804 case ETHTOOL_GRXCLSRLALL:
1805 info->data = adap->ethtool_filters->nentries;
1806 while (count < info->rule_cnt) {
1807 ret = cxgb4_ntuple_get_filter(dev, info, index);
1808 if (!ret)
1809 rules[count++] = index;
1810 index++;
1811 }
1812 return 0;
1813 }
1814
1815 return -EOPNOTSUPP;
1816 }
1817
1818 static int cxgb4_ntuple_del_filter(struct net_device *dev,
1819 struct ethtool_rxnfc *cmd)
1820 {
1821 struct cxgb4_ethtool_filter_info *filter_info;
1822 struct adapter *adapter = netdev2adap(dev);
1823 struct port_info *pi = netdev_priv(dev);
1824 struct filter_entry *f;
1825 u32 filter_id;
1826 int ret;
1827
1828 if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1829 return -EAGAIN;
1830
1831 if (!adapter->ethtool_filters)
1832 return -EOPNOTSUPP;
1833
1834 if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1835 dev_err(adapter->pdev_dev,
1836 "Location must be < %u",
1837 adapter->ethtool_filters->nentries);
1838 return -ERANGE;
1839 }
1840
1841 filter_info = &adapter->ethtool_filters->port[pi->port_id];
1842
1843 if (!test_bit(cmd->fs.location, filter_info->bmap))
1844 return -ENOENT;
1845
1846 filter_id = filter_info->loc_array[cmd->fs.location];
1847 f = cxgb4_get_filter_entry(adapter, filter_id);
1848
1849 if (f->fs.prio)
1850 filter_id -= adapter->tids.hpftid_base;
1851 else if (!f->fs.hash)
1852 filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
1853
1854 ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
1855 if (ret)
1856 goto err;
1857
1858 clear_bit(cmd->fs.location, filter_info->bmap);
1859 filter_info->in_use--;
1860
1861 err:
1862 return ret;
1863 }
1864
1865
1866 static int cxgb4_ntuple_set_filter(struct net_device *netdev,
1867 struct ethtool_rxnfc *cmd)
1868 {
1869 struct ethtool_rx_flow_spec_input input = {};
1870 struct cxgb4_ethtool_filter_info *filter_info;
1871 struct adapter *adapter = netdev2adap(netdev);
1872 struct port_info *pi = netdev_priv(netdev);
1873 struct ch_filter_specification fs;
1874 struct ethtool_rx_flow_rule *flow;
1875 u32 tid;
1876 int ret;
1877
1878 if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
1879 return -EAGAIN;
1880
1881 if (!adapter->ethtool_filters)
1882 return -EOPNOTSUPP;
1883
1884 if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
1885 dev_err(adapter->pdev_dev,
1886 "Location must be < %u",
1887 adapter->ethtool_filters->nentries);
1888 return -ERANGE;
1889 }
1890
1891 if (test_bit(cmd->fs.location,
1892 adapter->ethtool_filters->port[pi->port_id].bmap))
1893 return -EEXIST;
1894
1895 memset(&fs, 0, sizeof(fs));
1896
1897 input.fs = &cmd->fs;
1898 flow = ethtool_rx_flow_rule_create(&input);
1899 if (IS_ERR(flow)) {
1900 ret = PTR_ERR(flow);
1901 goto exit;
1902 }
1903
1904 fs.hitcnts = 1;
1905
1906 ret = cxgb4_flow_rule_replace(netdev, flow->rule, cmd->fs.location,
1907 NULL, &fs, &tid);
1908 if (ret)
1909 goto free;
1910
1911 filter_info = &adapter->ethtool_filters->port[pi->port_id];
1912
1913 if (fs.prio)
1914 tid += adapter->tids.hpftid_base;
1915 else if (!fs.hash)
1916 tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
1917
1918 filter_info->loc_array[cmd->fs.location] = tid;
1919 set_bit(cmd->fs.location, filter_info->bmap);
1920 filter_info->in_use++;
1921
1922 free:
1923 ethtool_rx_flow_rule_destroy(flow);
1924 exit:
1925 return ret;
1926 }
1927
1928 static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1929 {
1930 int ret = -EOPNOTSUPP;
1931
1932 switch (cmd->cmd) {
1933 case ETHTOOL_SRXCLSRLINS:
1934 ret = cxgb4_ntuple_set_filter(dev, cmd);
1935 break;
1936 case ETHTOOL_SRXCLSRLDEL:
1937 ret = cxgb4_ntuple_del_filter(dev, cmd);
1938 break;
1939 default:
1940 break;
1941 }
1942
1943 return ret;
1944 }
1945
1946 static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump)
1947 {
1948 struct adapter *adapter = netdev2adap(dev);
1949 u32 len = 0;
1950
1951 len = sizeof(struct cudbg_hdr) +
1952 sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1953 len += cxgb4_get_dump_length(adapter, eth_dump->flag);
1954
1955 adapter->eth_dump.flag = eth_dump->flag;
1956 adapter->eth_dump.len = len;
1957 return 0;
1958 }
1959
1960 static int get_dump_flag(struct net_device *dev, struct ethtool_dump *eth_dump)
1961 {
1962 struct adapter *adapter = netdev2adap(dev);
1963
1964 eth_dump->flag = adapter->eth_dump.flag;
1965 eth_dump->len = adapter->eth_dump.len;
1966 eth_dump->version = adapter->eth_dump.version;
1967 return 0;
1968 }
1969
1970 static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
1971 void *buf)
1972 {
1973 struct adapter *adapter = netdev2adap(dev);
1974 u32 len = 0;
1975 int ret = 0;
1976
1977 if (adapter->eth_dump.flag == CXGB4_ETH_DUMP_NONE)
1978 return -ENOENT;
1979
1980 len = sizeof(struct cudbg_hdr) +
1981 sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
1982 len += cxgb4_get_dump_length(adapter, adapter->eth_dump.flag);
1983 if (eth_dump->len < len)
1984 return -ENOMEM;
1985
1986 ret = cxgb4_cudbg_collect(adapter, buf, &len, adapter->eth_dump.flag);
1987 if (ret)
1988 return ret;
1989
1990 eth_dump->flag = adapter->eth_dump.flag;
1991 eth_dump->len = len;
1992 eth_dump->version = adapter->eth_dump.version;
1993 return 0;
1994 }
1995
1996 static bool cxgb4_fw_mod_type_info_available(unsigned int fw_mod_type)
1997 {
1998
1999
2000
2001 return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
2002 fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
2003 }
2004
2005 static int cxgb4_get_module_info(struct net_device *dev,
2006 struct ethtool_modinfo *modinfo)
2007 {
2008 struct port_info *pi = netdev_priv(dev);
2009 u8 sff8472_comp, sff_diag_type, sff_rev;
2010 struct adapter *adapter = pi->adapter;
2011 int ret;
2012
2013 if (!cxgb4_fw_mod_type_info_available(pi->mod_type))
2014 return -EINVAL;
2015
2016 switch (pi->port_type) {
2017 case FW_PORT_TYPE_SFP:
2018 case FW_PORT_TYPE_QSA:
2019 case FW_PORT_TYPE_SFP28:
2020 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2021 I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
2022 SFF_8472_COMP_LEN, &sff8472_comp);
2023 if (ret)
2024 return ret;
2025 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2026 I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
2027 SFP_DIAG_TYPE_LEN, &sff_diag_type);
2028 if (ret)
2029 return ret;
2030
2031 if (!sff8472_comp || (sff_diag_type & SFP_DIAG_ADDRMODE)) {
2032 modinfo->type = ETH_MODULE_SFF_8079;
2033 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
2034 } else {
2035 modinfo->type = ETH_MODULE_SFF_8472;
2036 if (sff_diag_type & SFP_DIAG_IMPLEMENTED)
2037 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2038 else
2039 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2;
2040 }
2041 break;
2042
2043 case FW_PORT_TYPE_QSFP:
2044 case FW_PORT_TYPE_QSFP_10G:
2045 case FW_PORT_TYPE_CR_QSFP:
2046 case FW_PORT_TYPE_CR2_QSFP:
2047 case FW_PORT_TYPE_CR4_QSFP:
2048 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2049 I2C_DEV_ADDR_A0, SFF_REV_ADDR,
2050 SFF_REV_LEN, &sff_rev);
2051
2052
2053
2054 if (ret)
2055 return ret;
2056 if (sff_rev >= 0x3) {
2057 modinfo->type = ETH_MODULE_SFF_8636;
2058 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2059 } else {
2060 modinfo->type = ETH_MODULE_SFF_8436;
2061 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2062 }
2063 break;
2064
2065 default:
2066 return -EINVAL;
2067 }
2068
2069 return 0;
2070 }
2071
2072 static int cxgb4_get_module_eeprom(struct net_device *dev,
2073 struct ethtool_eeprom *eprom, u8 *data)
2074 {
2075 int ret = 0, offset = eprom->offset, len = eprom->len;
2076 struct port_info *pi = netdev_priv(dev);
2077 struct adapter *adapter = pi->adapter;
2078
2079 memset(data, 0, eprom->len);
2080 if (offset + len <= I2C_PAGE_SIZE)
2081 return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2082 I2C_DEV_ADDR_A0, offset, len, data);
2083
2084
2085 if (offset <= I2C_PAGE_SIZE) {
2086
2087 len = I2C_PAGE_SIZE - offset;
2088 ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
2089 I2C_DEV_ADDR_A0, offset, len, data);
2090 if (ret)
2091 return ret;
2092 offset = I2C_PAGE_SIZE;
2093
2094
2095
2096 len = eprom->len - len;
2097 }
2098
2099 return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
2100 offset, len, &data[eprom->len - len]);
2101 }
2102
2103 static u32 cxgb4_get_priv_flags(struct net_device *netdev)
2104 {
2105 struct port_info *pi = netdev_priv(netdev);
2106 struct adapter *adapter = pi->adapter;
2107
2108 return (adapter->eth_flags | pi->eth_flags);
2109 }
2110
2111
2112
2113
2114
2115
2116
2117 static inline void set_flags(u32 *cur_flags, u32 new_flags, u32 flags)
2118 {
2119 *cur_flags = (*cur_flags & ~flags) | (new_flags & flags);
2120 }
2121
2122 static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
2123 {
2124 struct port_info *pi = netdev_priv(netdev);
2125 struct adapter *adapter = pi->adapter;
2126
2127 set_flags(&adapter->eth_flags, flags, PRIV_FLAGS_ADAP);
2128 set_flags(&pi->eth_flags, flags, PRIV_FLAGS_PORT);
2129
2130 return 0;
2131 }
2132
2133 static void cxgb4_lb_test(struct net_device *netdev, u64 *lb_status)
2134 {
2135 int dev_state = netif_running(netdev);
2136
2137 if (dev_state) {
2138 netif_tx_stop_all_queues(netdev);
2139 netif_carrier_off(netdev);
2140 }
2141
2142 *lb_status = cxgb4_selftest_lb_pkt(netdev);
2143
2144 if (dev_state) {
2145 netif_tx_start_all_queues(netdev);
2146 netif_carrier_on(netdev);
2147 }
2148 }
2149
2150 static void cxgb4_self_test(struct net_device *netdev,
2151 struct ethtool_test *eth_test, u64 *data)
2152 {
2153 struct port_info *pi = netdev_priv(netdev);
2154 struct adapter *adap = pi->adapter;
2155
2156 memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST);
2157
2158 if (!(adap->flags & CXGB4_FULL_INIT_DONE) ||
2159 !(adap->flags & CXGB4_FW_OK)) {
2160 eth_test->flags |= ETH_TEST_FL_FAILED;
2161 return;
2162 }
2163
2164 if (eth_test->flags & ETH_TEST_FL_OFFLINE)
2165 cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]);
2166
2167 if (data[CXGB4_ETHTOOL_LB_TEST])
2168 eth_test->flags |= ETH_TEST_FL_FAILED;
2169 }
2170
2171 static const struct ethtool_ops cxgb_ethtool_ops = {
2172 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2173 ETHTOOL_COALESCE_RX_MAX_FRAMES |
2174 ETHTOOL_COALESCE_TX_USECS_IRQ |
2175 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2176 .get_link_ksettings = get_link_ksettings,
2177 .set_link_ksettings = set_link_ksettings,
2178 .get_fecparam = get_fecparam,
2179 .set_fecparam = set_fecparam,
2180 .get_drvinfo = get_drvinfo,
2181 .get_msglevel = get_msglevel,
2182 .set_msglevel = set_msglevel,
2183 .get_ringparam = get_sge_param,
2184 .set_ringparam = set_sge_param,
2185 .get_coalesce = get_coalesce,
2186 .set_coalesce = set_coalesce,
2187 .get_eeprom_len = get_eeprom_len,
2188 .get_eeprom = get_eeprom,
2189 .set_eeprom = set_eeprom,
2190 .get_pauseparam = get_pauseparam,
2191 .set_pauseparam = set_pauseparam,
2192 .get_link = ethtool_op_get_link,
2193 .get_strings = get_strings,
2194 .set_phys_id = identify_port,
2195 .nway_reset = restart_autoneg,
2196 .get_sset_count = get_sset_count,
2197 .get_ethtool_stats = get_stats,
2198 .get_regs_len = get_regs_len,
2199 .get_regs = get_regs,
2200 .get_rxnfc = get_rxnfc,
2201 .set_rxnfc = set_rxnfc,
2202 .get_rxfh_indir_size = get_rss_table_size,
2203 .get_rxfh = get_rss_table,
2204 .set_rxfh = set_rss_table,
2205 .self_test = cxgb4_self_test,
2206 .flash_device = set_flash,
2207 .get_ts_info = get_ts_info,
2208 .set_dump = set_dump,
2209 .get_dump_flag = get_dump_flag,
2210 .get_dump_data = get_dump_data,
2211 .get_module_info = cxgb4_get_module_info,
2212 .get_module_eeprom = cxgb4_get_module_eeprom,
2213 .get_priv_flags = cxgb4_get_priv_flags,
2214 .set_priv_flags = cxgb4_set_priv_flags,
2215 };
2216
2217 void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
2218 {
2219 struct cxgb4_ethtool_filter_info *eth_filter_info;
2220 u8 i;
2221
2222 if (!adap->ethtool_filters)
2223 return;
2224
2225 eth_filter_info = adap->ethtool_filters->port;
2226
2227 if (eth_filter_info) {
2228 for (i = 0; i < adap->params.nports; i++) {
2229 kvfree(eth_filter_info[i].loc_array);
2230 bitmap_free(eth_filter_info[i].bmap);
2231 }
2232 kfree(eth_filter_info);
2233 }
2234
2235 kfree(adap->ethtool_filters);
2236 }
2237
2238 int cxgb4_init_ethtool_filters(struct adapter *adap)
2239 {
2240 struct cxgb4_ethtool_filter_info *eth_filter_info;
2241 struct cxgb4_ethtool_filter *eth_filter;
2242 struct tid_info *tids = &adap->tids;
2243 u32 nentries, i;
2244 int ret;
2245
2246 eth_filter = kzalloc(sizeof(*eth_filter), GFP_KERNEL);
2247 if (!eth_filter)
2248 return -ENOMEM;
2249
2250 eth_filter_info = kcalloc(adap->params.nports,
2251 sizeof(*eth_filter_info),
2252 GFP_KERNEL);
2253 if (!eth_filter_info) {
2254 ret = -ENOMEM;
2255 goto free_eth_filter;
2256 }
2257
2258 eth_filter->port = eth_filter_info;
2259
2260 nentries = tids->nhpftids + tids->nftids;
2261 if (is_hashfilter(adap))
2262 nentries += tids->nhash +
2263 (adap->tids.stid_base - adap->tids.tid_base);
2264 eth_filter->nentries = nentries;
2265
2266 for (i = 0; i < adap->params.nports; i++) {
2267 eth_filter->port[i].loc_array = kvzalloc(nentries, GFP_KERNEL);
2268 if (!eth_filter->port[i].loc_array) {
2269 ret = -ENOMEM;
2270 goto free_eth_finfo;
2271 }
2272
2273 eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
2274 if (!eth_filter->port[i].bmap) {
2275 ret = -ENOMEM;
2276 goto free_eth_finfo;
2277 }
2278 }
2279
2280 adap->ethtool_filters = eth_filter;
2281 return 0;
2282
2283 free_eth_finfo:
2284 while (i-- > 0) {
2285 bitmap_free(eth_filter->port[i].bmap);
2286 kvfree(eth_filter->port[i].loc_array);
2287 }
2288 kfree(eth_filter_info);
2289
2290 free_eth_filter:
2291 kfree(eth_filter);
2292
2293 return ret;
2294 }
2295
2296 void cxgb4_set_ethtool_ops(struct net_device *netdev)
2297 {
2298 netdev->ethtool_ops = &cxgb_ethtool_ops;
2299 }