0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 #include <linux/module.h>
0118 #include <linux/device.h>
0119 #include <linux/spinlock.h>
0120 #include <linux/netdevice.h>
0121 #include <linux/etherdevice.h>
0122 #include <linux/io.h>
0123 #include <linux/notifier.h>
0124
0125 #include "xgbe.h"
0126 #include "xgbe-common.h"
0127
0128 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
0129 MODULE_LICENSE("Dual BSD/GPL");
0130 MODULE_DESCRIPTION(XGBE_DRV_DESC);
0131
0132 static int debug = -1;
0133 module_param(debug, int, 0644);
0134 MODULE_PARM_DESC(debug, " Network interface message level setting");
0135
0136 static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
0137 NETIF_MSG_IFUP);
0138
0139 static void xgbe_default_config(struct xgbe_prv_data *pdata)
0140 {
0141 DBGPR("-->xgbe_default_config\n");
0142
0143 pdata->blen = DMA_SBMR_BLEN_64;
0144 pdata->pbl = DMA_PBL_128;
0145 pdata->aal = 1;
0146 pdata->rd_osr_limit = 8;
0147 pdata->wr_osr_limit = 8;
0148 pdata->tx_sf_mode = MTL_TSF_ENABLE;
0149 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
0150 pdata->tx_osp_mode = DMA_OSP_ENABLE;
0151 pdata->rx_sf_mode = MTL_RSF_DISABLE;
0152 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
0153 pdata->pause_autoneg = 1;
0154 pdata->tx_pause = 1;
0155 pdata->rx_pause = 1;
0156 pdata->phy_speed = SPEED_UNKNOWN;
0157 pdata->power_down = 0;
0158
0159 DBGPR("<--xgbe_default_config\n");
0160 }
0161
0162 static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
0163 {
0164 xgbe_init_function_ptrs_dev(&pdata->hw_if);
0165 xgbe_init_function_ptrs_phy(&pdata->phy_if);
0166 xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
0167 xgbe_init_function_ptrs_desc(&pdata->desc_if);
0168
0169 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
0170 }
0171
0172 struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
0173 {
0174 struct xgbe_prv_data *pdata;
0175 struct net_device *netdev;
0176
0177 netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
0178 XGBE_MAX_DMA_CHANNELS);
0179 if (!netdev) {
0180 dev_err(dev, "alloc_etherdev_mq failed\n");
0181 return ERR_PTR(-ENOMEM);
0182 }
0183 SET_NETDEV_DEV(netdev, dev);
0184 pdata = netdev_priv(netdev);
0185 pdata->netdev = netdev;
0186 pdata->dev = dev;
0187
0188 spin_lock_init(&pdata->lock);
0189 spin_lock_init(&pdata->xpcs_lock);
0190 mutex_init(&pdata->rss_mutex);
0191 spin_lock_init(&pdata->tstamp_lock);
0192 mutex_init(&pdata->i2c_mutex);
0193 init_completion(&pdata->i2c_complete);
0194 init_completion(&pdata->mdio_complete);
0195
0196 pdata->msg_enable = netif_msg_init(debug, default_msg_level);
0197
0198 set_bit(XGBE_DOWN, &pdata->dev_state);
0199 set_bit(XGBE_STOPPED, &pdata->dev_state);
0200
0201 return pdata;
0202 }
0203
0204 void xgbe_free_pdata(struct xgbe_prv_data *pdata)
0205 {
0206 struct net_device *netdev = pdata->netdev;
0207
0208 free_netdev(netdev);
0209 }
0210
0211 void xgbe_set_counts(struct xgbe_prv_data *pdata)
0212 {
0213
0214 xgbe_init_all_fptrs(pdata);
0215
0216
0217 xgbe_get_all_hw_features(pdata);
0218
0219
0220 if (!pdata->tx_max_channel_count)
0221 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
0222 if (!pdata->rx_max_channel_count)
0223 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
0224
0225 if (!pdata->tx_max_q_count)
0226 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
0227 if (!pdata->rx_max_q_count)
0228 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
0229
0230
0231
0232
0233
0234
0235
0236
0237 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
0238 pdata->hw_feat.tx_ch_cnt);
0239 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
0240 pdata->tx_max_channel_count);
0241 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
0242 pdata->tx_max_q_count);
0243
0244 pdata->tx_q_count = pdata->tx_ring_count;
0245
0246 pdata->rx_ring_count = min_t(unsigned int, num_online_cpus(),
0247 pdata->hw_feat.rx_ch_cnt);
0248 pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
0249 pdata->rx_max_channel_count);
0250
0251 pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
0252 pdata->rx_max_q_count);
0253
0254 if (netif_msg_probe(pdata)) {
0255 dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
0256 pdata->tx_ring_count, pdata->rx_ring_count);
0257 dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
0258 pdata->tx_q_count, pdata->rx_q_count);
0259 }
0260 }
0261
0262 int xgbe_config_netdev(struct xgbe_prv_data *pdata)
0263 {
0264 struct net_device *netdev = pdata->netdev;
0265 struct device *dev = pdata->dev;
0266 int ret;
0267
0268 netdev->irq = pdata->dev_irq;
0269 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
0270 eth_hw_addr_set(netdev, pdata->mac_addr);
0271
0272
0273 pdata->tx_sec_period = jiffies;
0274 pdata->tx_ded_period = jiffies;
0275 pdata->rx_sec_period = jiffies;
0276 pdata->rx_ded_period = jiffies;
0277 pdata->desc_sec_period = jiffies;
0278 pdata->desc_ded_period = jiffies;
0279
0280
0281 ret = pdata->hw_if.exit(pdata);
0282 if (ret) {
0283 dev_err(dev, "software reset failed\n");
0284 return ret;
0285 }
0286
0287
0288 xgbe_default_config(pdata);
0289
0290
0291 ret = dma_set_mask_and_coherent(dev,
0292 DMA_BIT_MASK(pdata->hw_feat.dma_width));
0293 if (ret) {
0294 dev_err(dev, "dma_set_mask_and_coherent failed\n");
0295 return ret;
0296 }
0297
0298
0299 if (!pdata->tx_max_fifo_size)
0300 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
0301 if (!pdata->rx_max_fifo_size)
0302 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
0303
0304
0305 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
0306 pdata->tx_desc_count = XGBE_TX_DESC_CNT;
0307
0308 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
0309 pdata->rx_desc_count = XGBE_RX_DESC_CNT;
0310
0311
0312 if (pdata->channel_irq_count) {
0313 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
0314 pdata->channel_irq_count);
0315 pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
0316 pdata->channel_irq_count);
0317
0318 if (netif_msg_probe(pdata))
0319 dev_dbg(pdata->dev,
0320 "adjusted TX/RX DMA channel count = %u/%u\n",
0321 pdata->tx_ring_count, pdata->rx_ring_count);
0322 }
0323
0324
0325 netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
0326
0327 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
0328 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
0329 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
0330
0331
0332 pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
0333 ret = pdata->phy_if.phy_init(pdata);
0334 if (ret)
0335 return ret;
0336
0337
0338 netdev->netdev_ops = xgbe_get_netdev_ops();
0339 netdev->ethtool_ops = xgbe_get_ethtool_ops();
0340 #ifdef CONFIG_AMD_XGBE_DCB
0341 netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
0342 #endif
0343
0344
0345 netdev->hw_features = NETIF_F_SG |
0346 NETIF_F_IP_CSUM |
0347 NETIF_F_IPV6_CSUM |
0348 NETIF_F_RXCSUM |
0349 NETIF_F_TSO |
0350 NETIF_F_TSO6 |
0351 NETIF_F_GRO |
0352 NETIF_F_HW_VLAN_CTAG_RX |
0353 NETIF_F_HW_VLAN_CTAG_TX |
0354 NETIF_F_HW_VLAN_CTAG_FILTER;
0355
0356 if (pdata->hw_feat.rss)
0357 netdev->hw_features |= NETIF_F_RXHASH;
0358
0359 if (pdata->hw_feat.vxn) {
0360 netdev->hw_enc_features = NETIF_F_SG |
0361 NETIF_F_IP_CSUM |
0362 NETIF_F_IPV6_CSUM |
0363 NETIF_F_RXCSUM |
0364 NETIF_F_TSO |
0365 NETIF_F_TSO6 |
0366 NETIF_F_GRO |
0367 NETIF_F_GSO_UDP_TUNNEL |
0368 NETIF_F_GSO_UDP_TUNNEL_CSUM;
0369
0370 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
0371 NETIF_F_GSO_UDP_TUNNEL_CSUM;
0372
0373 netdev->udp_tunnel_nic_info = xgbe_get_udp_tunnel_info();
0374 }
0375
0376 netdev->vlan_features |= NETIF_F_SG |
0377 NETIF_F_IP_CSUM |
0378 NETIF_F_IPV6_CSUM |
0379 NETIF_F_TSO |
0380 NETIF_F_TSO6;
0381
0382 netdev->features |= netdev->hw_features;
0383 pdata->netdev_features = netdev->features;
0384
0385 netdev->priv_flags |= IFF_UNICAST_FLT;
0386 netdev->min_mtu = 0;
0387 netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
0388
0389
0390 netdev->watchdog_timeo = 0;
0391
0392 xgbe_init_rx_coalesce(pdata);
0393 xgbe_init_tx_coalesce(pdata);
0394
0395 netif_carrier_off(netdev);
0396 ret = register_netdev(netdev);
0397 if (ret) {
0398 dev_err(dev, "net device registration failed\n");
0399 return ret;
0400 }
0401
0402 if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
0403 xgbe_ptp_register(pdata);
0404
0405 xgbe_debugfs_init(pdata);
0406
0407 netif_dbg(pdata, drv, pdata->netdev, "%u Tx software queues\n",
0408 pdata->tx_ring_count);
0409 netif_dbg(pdata, drv, pdata->netdev, "%u Rx software queues\n",
0410 pdata->rx_ring_count);
0411
0412 return 0;
0413 }
0414
0415 void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
0416 {
0417 struct net_device *netdev = pdata->netdev;
0418
0419 xgbe_debugfs_exit(pdata);
0420
0421 if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
0422 xgbe_ptp_unregister(pdata);
0423
0424 unregister_netdev(netdev);
0425
0426 pdata->phy_if.phy_exit(pdata);
0427 }
0428
0429 static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event,
0430 void *data)
0431 {
0432 struct net_device *netdev = netdev_notifier_info_to_dev(data);
0433 struct xgbe_prv_data *pdata = netdev_priv(netdev);
0434
0435 if (netdev->netdev_ops != xgbe_get_netdev_ops())
0436 goto out;
0437
0438 switch (event) {
0439 case NETDEV_CHANGENAME:
0440 xgbe_debugfs_rename(pdata);
0441 break;
0442
0443 default:
0444 break;
0445 }
0446
0447 out:
0448 return NOTIFY_DONE;
0449 }
0450
0451 static struct notifier_block xgbe_netdev_notifier = {
0452 .notifier_call = xgbe_netdev_event,
0453 };
0454
0455 static int __init xgbe_mod_init(void)
0456 {
0457 int ret;
0458
0459 ret = register_netdevice_notifier(&xgbe_netdev_notifier);
0460 if (ret)
0461 return ret;
0462
0463 ret = xgbe_platform_init();
0464 if (ret)
0465 goto err_platform_init;
0466
0467 ret = xgbe_pci_init();
0468 if (ret)
0469 goto err_pci_init;
0470
0471 return 0;
0472
0473 err_pci_init:
0474 xgbe_platform_exit();
0475 err_platform_init:
0476 unregister_netdevice_notifier(&xgbe_netdev_notifier);
0477 return ret;
0478 }
0479
0480 static void __exit xgbe_mod_exit(void)
0481 {
0482 xgbe_pci_exit();
0483
0484 xgbe_platform_exit();
0485
0486 unregister_netdevice_notifier(&xgbe_netdev_notifier);
0487 }
0488
0489 module_init(xgbe_mod_init);
0490 module_exit(xgbe_mod_exit);