0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/platform_device.h>
0009 #include <linux/kernel.h>
0010 #include <linux/module.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/etherdevice.h>
0013 #include <linux/phy.h>
0014 #include <linux/slab.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/of_mdio.h>
0017 #include <linux/of_net.h>
0018 #include <linux/if_ether.h>
0019 #include <linux/if_vlan.h>
0020
0021 #include <net/dst.h>
0022
0023 #include "octeon-ethernet.h"
0024 #include "ethernet-defines.h"
0025 #include "ethernet-mem.h"
0026 #include "ethernet-rx.h"
0027 #include "ethernet-tx.h"
0028 #include "ethernet-mdio.h"
0029 #include "ethernet-util.h"
0030
0031 #define OCTEON_MAX_MTU 65392
0032
0033 static int num_packet_buffers = 1024;
0034 module_param(num_packet_buffers, int, 0444);
0035 MODULE_PARM_DESC(num_packet_buffers, "\n"
0036 "\tNumber of packet buffers to allocate and store in the\n"
0037 "\tFPA. By default, 1024 packet buffers are used.\n");
0038
0039 static int pow_receive_group = 15;
0040 module_param(pow_receive_group, int, 0444);
0041 MODULE_PARM_DESC(pow_receive_group, "\n"
0042 "\tPOW group to receive packets from. All ethernet hardware\n"
0043 "\twill be configured to send incoming packets to this POW\n"
0044 "\tgroup. Also any other software can submit packets to this\n"
0045 "\tgroup for the kernel to process.");
0046
0047 static int receive_group_order;
0048 module_param(receive_group_order, int, 0444);
0049 MODULE_PARM_DESC(receive_group_order, "\n"
0050 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
0051 "\twill be configured to send incoming packets to multiple POW\n"
0052 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
0053 "\tgroups are taken into use and groups are allocated starting\n"
0054 "\tfrom 0. By default, a single group is used.\n");
0055
0056 int pow_send_group = -1;
0057 module_param(pow_send_group, int, 0644);
0058 MODULE_PARM_DESC(pow_send_group, "\n"
0059 "\tPOW group to send packets to other software on. This\n"
0060 "\tcontrols the creation of the virtual device pow0.\n"
0061 "\talways_use_pow also depends on this value.");
0062
0063 int always_use_pow;
0064 module_param(always_use_pow, int, 0444);
0065 MODULE_PARM_DESC(always_use_pow, "\n"
0066 "\tWhen set, always send to the pow group. This will cause\n"
0067 "\tpackets sent to real ethernet devices to be sent to the\n"
0068 "\tPOW group instead of the hardware. Unless some other\n"
0069 "\tapplication changes the config, packets will still be\n"
0070 "\treceived from the low level hardware. Use this option\n"
0071 "\tto allow a CVMX app to intercept all packets from the\n"
0072 "\tlinux kernel. You must specify pow_send_group along with\n"
0073 "\tthis option.");
0074
0075 char pow_send_list[128] = "";
0076 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
0077 MODULE_PARM_DESC(pow_send_list, "\n"
0078 "\tComma separated list of ethernet devices that should use the\n"
0079 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
0080 "\tis a per port version of always_use_pow. always_use_pow takes\n"
0081 "\tprecedence over this list. For example, setting this to\n"
0082 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
0083 "\tusing the pow_send_group.");
0084
0085 int rx_napi_weight = 32;
0086 module_param(rx_napi_weight, int, 0444);
0087 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
0088
0089
0090 int pow_receive_groups;
0091
0092
0093
0094
0095
0096
0097 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
0098
0099
0100
0101
0102
0103 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
0104
0105 u64 cvm_oct_tx_poll_interval;
0106
0107 static void cvm_oct_rx_refill_worker(struct work_struct *work);
0108 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
0109
0110 static void cvm_oct_rx_refill_worker(struct work_struct *work)
0111 {
0112
0113
0114
0115
0116
0117
0118
0119 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
0120
0121 if (!atomic_read(&cvm_oct_poll_queue_stopping))
0122 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
0123 }
0124
0125 static void cvm_oct_periodic_worker(struct work_struct *work)
0126 {
0127 struct octeon_ethernet *priv = container_of(work,
0128 struct octeon_ethernet,
0129 port_periodic_work.work);
0130
0131 if (priv->poll)
0132 priv->poll(cvm_oct_device[priv->port]);
0133
0134 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats
0135 (cvm_oct_device[priv->port]);
0136
0137 if (!atomic_read(&cvm_oct_poll_queue_stopping))
0138 schedule_delayed_work(&priv->port_periodic_work, HZ);
0139 }
0140
0141 static void cvm_oct_configure_common_hw(void)
0142 {
0143
0144 cvmx_fpa_enable();
0145 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
0146 num_packet_buffers);
0147 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
0148 num_packet_buffers);
0149 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
0150 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
0151 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
0152
0153 #ifdef __LITTLE_ENDIAN
0154 {
0155 union cvmx_ipd_ctl_status ipd_ctl_status;
0156
0157 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
0158 ipd_ctl_status.s.pkt_lend = 1;
0159 ipd_ctl_status.s.wqe_lend = 1;
0160 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
0161 }
0162 #endif
0163
0164 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
0165 }
0166
0167
0168
0169
0170
0171
0172
0173
0174 int cvm_oct_free_work(void *work_queue_entry)
0175 {
0176 struct cvmx_wqe *work = work_queue_entry;
0177
0178 int segments = work->word2.s.bufs;
0179 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
0180
0181 while (segments--) {
0182 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
0183 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
0184 if (unlikely(!segment_ptr.s.i))
0185 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
0186 segment_ptr.s.pool,
0187 CVMX_FPA_PACKET_POOL_SIZE / 128);
0188 segment_ptr = next_ptr;
0189 }
0190 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
0191
0192 return 0;
0193 }
0194 EXPORT_SYMBOL(cvm_oct_free_work);
0195
0196
0197
0198
0199
0200
0201
0202 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
0203 {
0204 cvmx_pip_port_status_t rx_status;
0205 cvmx_pko_port_status_t tx_status;
0206 struct octeon_ethernet *priv = netdev_priv(dev);
0207
0208 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
0209 if (octeon_is_simulation()) {
0210
0211 memset(&rx_status, 0, sizeof(rx_status));
0212 memset(&tx_status, 0, sizeof(tx_status));
0213 } else {
0214 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
0215 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
0216 }
0217
0218 dev->stats.rx_packets += rx_status.inb_packets;
0219 dev->stats.tx_packets += tx_status.packets;
0220 dev->stats.rx_bytes += rx_status.inb_octets;
0221 dev->stats.tx_bytes += tx_status.octets;
0222 dev->stats.multicast += rx_status.multicast_packets;
0223 dev->stats.rx_crc_errors += rx_status.inb_errors;
0224 dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
0225 dev->stats.rx_dropped += rx_status.dropped_packets;
0226 }
0227
0228 return &dev->stats;
0229 }
0230
0231
0232
0233
0234
0235
0236
0237
0238 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
0239 {
0240 struct octeon_ethernet *priv = netdev_priv(dev);
0241 int interface = INTERFACE(priv->port);
0242 #if IS_ENABLED(CONFIG_VLAN_8021Q)
0243 int vlan_bytes = VLAN_HLEN;
0244 #else
0245 int vlan_bytes = 0;
0246 #endif
0247 int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
0248
0249 dev->mtu = new_mtu;
0250
0251 if ((interface < 2) &&
0252 (cvmx_helper_interface_get_mode(interface) !=
0253 CVMX_HELPER_INTERFACE_MODE_SPI)) {
0254 int index = INDEX(priv->port);
0255
0256 int max_packet = new_mtu + mtu_overhead;
0257
0258 if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
0259 OCTEON_IS_MODEL(OCTEON_CN58XX)) {
0260
0261 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
0262 max_packet);
0263 } else {
0264
0265
0266
0267
0268 union cvmx_pip_frm_len_chkx frm_len_chk;
0269
0270 frm_len_chk.u64 = 0;
0271 frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
0272 frm_len_chk.s.maxlen = max_packet;
0273 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
0274 frm_len_chk.u64);
0275 }
0276
0277
0278
0279
0280
0281 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
0282 (max_packet + 7) & ~7u);
0283 }
0284 return 0;
0285 }
0286
0287
0288
0289
0290
0291 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
0292 {
0293 union cvmx_gmxx_prtx_cfg gmx_cfg;
0294 struct octeon_ethernet *priv = netdev_priv(dev);
0295 int interface = INTERFACE(priv->port);
0296
0297 if ((interface < 2) &&
0298 (cvmx_helper_interface_get_mode(interface) !=
0299 CVMX_HELPER_INTERFACE_MODE_SPI)) {
0300 union cvmx_gmxx_rxx_adr_ctl control;
0301 int index = INDEX(priv->port);
0302
0303 control.u64 = 0;
0304 control.s.bcst = 1;
0305
0306 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
0307 (dev->flags & IFF_PROMISC))
0308
0309 control.s.mcst = 2;
0310 else
0311
0312 control.s.mcst = 1;
0313
0314 if (dev->flags & IFF_PROMISC)
0315
0316
0317
0318
0319 control.s.cam_mode = 0;
0320 else
0321
0322 control.s.cam_mode = 1;
0323
0324 gmx_cfg.u64 =
0325 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
0326 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
0327 gmx_cfg.u64 & ~1ull);
0328
0329 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
0330 control.u64);
0331 if (dev->flags & IFF_PROMISC)
0332 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
0333 (index, interface), 0);
0334 else
0335 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
0336 (index, interface), 1);
0337
0338 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
0339 gmx_cfg.u64);
0340 }
0341 }
0342
0343 static int cvm_oct_set_mac_filter(struct net_device *dev)
0344 {
0345 struct octeon_ethernet *priv = netdev_priv(dev);
0346 union cvmx_gmxx_prtx_cfg gmx_cfg;
0347 int interface = INTERFACE(priv->port);
0348
0349 if ((interface < 2) &&
0350 (cvmx_helper_interface_get_mode(interface) !=
0351 CVMX_HELPER_INTERFACE_MODE_SPI)) {
0352 int i;
0353 const u8 *ptr = dev->dev_addr;
0354 u64 mac = 0;
0355 int index = INDEX(priv->port);
0356
0357 for (i = 0; i < 6; i++)
0358 mac = (mac << 8) | (u64)ptr[i];
0359
0360 gmx_cfg.u64 =
0361 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
0362 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
0363 gmx_cfg.u64 & ~1ull);
0364
0365 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
0366 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
0367 ptr[0]);
0368 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
0369 ptr[1]);
0370 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
0371 ptr[2]);
0372 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
0373 ptr[3]);
0374 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
0375 ptr[4]);
0376 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
0377 ptr[5]);
0378 cvm_oct_common_set_multicast_list(dev);
0379 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
0380 gmx_cfg.u64);
0381 }
0382 return 0;
0383 }
0384
0385
0386
0387
0388
0389
0390
0391
0392 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
0393 {
0394 int r = eth_mac_addr(dev, addr);
0395
0396 if (r)
0397 return r;
0398 return cvm_oct_set_mac_filter(dev);
0399 }
0400
0401
0402
0403
0404
0405
0406
0407 int cvm_oct_common_init(struct net_device *dev)
0408 {
0409 struct octeon_ethernet *priv = netdev_priv(dev);
0410 int ret;
0411
0412 ret = of_get_ethdev_address(priv->of_node, dev);
0413 if (ret)
0414 eth_hw_addr_random(dev);
0415
0416
0417
0418
0419
0420 if ((pow_send_group != -1) &&
0421 (always_use_pow || strstr(pow_send_list, dev->name)))
0422 priv->queue = -1;
0423
0424 if (priv->queue != -1)
0425 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
0426
0427
0428 dev->features |= NETIF_F_LLTX;
0429 dev->ethtool_ops = &cvm_oct_ethtool_ops;
0430
0431 cvm_oct_set_mac_filter(dev);
0432 dev_set_mtu(dev, dev->mtu);
0433
0434
0435
0436
0437
0438 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
0439 sizeof(struct net_device_stats));
0440
0441 if (dev->netdev_ops->ndo_stop)
0442 dev->netdev_ops->ndo_stop(dev);
0443
0444 return 0;
0445 }
0446
0447 void cvm_oct_common_uninit(struct net_device *dev)
0448 {
0449 if (dev->phydev)
0450 phy_disconnect(dev->phydev);
0451 }
0452
0453 int cvm_oct_common_open(struct net_device *dev,
0454 void (*link_poll)(struct net_device *))
0455 {
0456 union cvmx_gmxx_prtx_cfg gmx_cfg;
0457 struct octeon_ethernet *priv = netdev_priv(dev);
0458 int interface = INTERFACE(priv->port);
0459 int index = INDEX(priv->port);
0460 union cvmx_helper_link_info link_info;
0461 int rv;
0462
0463 rv = cvm_oct_phy_setup_device(dev);
0464 if (rv)
0465 return rv;
0466
0467 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
0468 gmx_cfg.s.en = 1;
0469 if (octeon_has_feature(OCTEON_FEATURE_PKND))
0470 gmx_cfg.s.pknd = priv->port;
0471 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
0472
0473 if (octeon_is_simulation())
0474 return 0;
0475
0476 if (dev->phydev) {
0477 int r = phy_read_status(dev->phydev);
0478
0479 if (r == 0 && dev->phydev->link == 0)
0480 netif_carrier_off(dev);
0481 cvm_oct_adjust_link(dev);
0482 } else {
0483 link_info = cvmx_helper_link_get(priv->port);
0484 if (!link_info.s.link_up)
0485 netif_carrier_off(dev);
0486 priv->poll = link_poll;
0487 link_poll(dev);
0488 }
0489
0490 return 0;
0491 }
0492
0493 void cvm_oct_link_poll(struct net_device *dev)
0494 {
0495 struct octeon_ethernet *priv = netdev_priv(dev);
0496 union cvmx_helper_link_info link_info;
0497
0498 link_info = cvmx_helper_link_get(priv->port);
0499 if (link_info.u64 == priv->link_info)
0500 return;
0501
0502 if (cvmx_helper_link_set(priv->port, link_info))
0503 link_info.u64 = priv->link_info;
0504 else
0505 priv->link_info = link_info.u64;
0506
0507 if (link_info.s.link_up) {
0508 if (!netif_carrier_ok(dev))
0509 netif_carrier_on(dev);
0510 } else if (netif_carrier_ok(dev)) {
0511 netif_carrier_off(dev);
0512 }
0513 cvm_oct_note_carrier(priv, link_info);
0514 }
0515
0516 static int cvm_oct_xaui_open(struct net_device *dev)
0517 {
0518 return cvm_oct_common_open(dev, cvm_oct_link_poll);
0519 }
0520
0521 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
0522 .ndo_init = cvm_oct_common_init,
0523 .ndo_uninit = cvm_oct_common_uninit,
0524 .ndo_start_xmit = cvm_oct_xmit,
0525 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
0526 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
0527 .ndo_eth_ioctl = cvm_oct_ioctl,
0528 .ndo_change_mtu = cvm_oct_common_change_mtu,
0529 .ndo_get_stats = cvm_oct_common_get_stats,
0530 #ifdef CONFIG_NET_POLL_CONTROLLER
0531 .ndo_poll_controller = cvm_oct_poll_controller,
0532 #endif
0533 };
0534
0535 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
0536 .ndo_init = cvm_oct_common_init,
0537 .ndo_uninit = cvm_oct_common_uninit,
0538 .ndo_open = cvm_oct_xaui_open,
0539 .ndo_stop = cvm_oct_common_stop,
0540 .ndo_start_xmit = cvm_oct_xmit,
0541 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
0542 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
0543 .ndo_eth_ioctl = cvm_oct_ioctl,
0544 .ndo_change_mtu = cvm_oct_common_change_mtu,
0545 .ndo_get_stats = cvm_oct_common_get_stats,
0546 #ifdef CONFIG_NET_POLL_CONTROLLER
0547 .ndo_poll_controller = cvm_oct_poll_controller,
0548 #endif
0549 };
0550
0551 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
0552 .ndo_init = cvm_oct_sgmii_init,
0553 .ndo_uninit = cvm_oct_common_uninit,
0554 .ndo_open = cvm_oct_sgmii_open,
0555 .ndo_stop = cvm_oct_common_stop,
0556 .ndo_start_xmit = cvm_oct_xmit,
0557 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
0558 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
0559 .ndo_eth_ioctl = cvm_oct_ioctl,
0560 .ndo_change_mtu = cvm_oct_common_change_mtu,
0561 .ndo_get_stats = cvm_oct_common_get_stats,
0562 #ifdef CONFIG_NET_POLL_CONTROLLER
0563 .ndo_poll_controller = cvm_oct_poll_controller,
0564 #endif
0565 };
0566
0567 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
0568 .ndo_init = cvm_oct_spi_init,
0569 .ndo_uninit = cvm_oct_spi_uninit,
0570 .ndo_start_xmit = cvm_oct_xmit,
0571 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
0572 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
0573 .ndo_eth_ioctl = cvm_oct_ioctl,
0574 .ndo_change_mtu = cvm_oct_common_change_mtu,
0575 .ndo_get_stats = cvm_oct_common_get_stats,
0576 #ifdef CONFIG_NET_POLL_CONTROLLER
0577 .ndo_poll_controller = cvm_oct_poll_controller,
0578 #endif
0579 };
0580
0581 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
0582 .ndo_init = cvm_oct_common_init,
0583 .ndo_uninit = cvm_oct_common_uninit,
0584 .ndo_open = cvm_oct_rgmii_open,
0585 .ndo_stop = cvm_oct_common_stop,
0586 .ndo_start_xmit = cvm_oct_xmit,
0587 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
0588 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
0589 .ndo_eth_ioctl = cvm_oct_ioctl,
0590 .ndo_change_mtu = cvm_oct_common_change_mtu,
0591 .ndo_get_stats = cvm_oct_common_get_stats,
0592 #ifdef CONFIG_NET_POLL_CONTROLLER
0593 .ndo_poll_controller = cvm_oct_poll_controller,
0594 #endif
0595 };
0596
0597 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
0598 .ndo_init = cvm_oct_common_init,
0599 .ndo_start_xmit = cvm_oct_xmit_pow,
0600 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
0601 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
0602 .ndo_eth_ioctl = cvm_oct_ioctl,
0603 .ndo_change_mtu = cvm_oct_common_change_mtu,
0604 .ndo_get_stats = cvm_oct_common_get_stats,
0605 #ifdef CONFIG_NET_POLL_CONTROLLER
0606 .ndo_poll_controller = cvm_oct_poll_controller,
0607 #endif
0608 };
0609
0610 static struct device_node *cvm_oct_of_get_child
0611 (const struct device_node *parent, int reg_val)
0612 {
0613 struct device_node *node;
0614 const __be32 *addr;
0615 int size;
0616
0617 for_each_child_of_node(parent, node) {
0618 addr = of_get_property(node, "reg", &size);
0619 if (addr && (be32_to_cpu(*addr) == reg_val))
0620 break;
0621 }
0622 return node;
0623 }
0624
0625 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
0626 int interface, int port)
0627 {
0628 struct device_node *ni, *np;
0629
0630 ni = cvm_oct_of_get_child(pip, interface);
0631 if (!ni)
0632 return NULL;
0633
0634 np = cvm_oct_of_get_child(ni, port);
0635 of_node_put(ni);
0636
0637 return np;
0638 }
0639
0640 static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
0641 int port)
0642 {
0643 struct device_node *np = priv->of_node;
0644 u32 delay_value;
0645 bool rx_delay;
0646 bool tx_delay;
0647
0648
0649
0650
0651 rx_delay = true;
0652 tx_delay = true;
0653
0654 if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
0655 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
0656 rx_delay = delay_value > 0;
0657 }
0658 if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
0659 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
0660 tx_delay = delay_value > 0;
0661 }
0662
0663 if (!rx_delay && !tx_delay)
0664 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
0665 else if (!rx_delay)
0666 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
0667 else if (!tx_delay)
0668 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
0669 else
0670 priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
0671 }
0672
0673 static int cvm_oct_probe(struct platform_device *pdev)
0674 {
0675 int num_interfaces;
0676 int interface;
0677 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
0678 int qos;
0679 struct device_node *pip;
0680 int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
0681
0682 #if IS_ENABLED(CONFIG_VLAN_8021Q)
0683 mtu_overhead += VLAN_HLEN;
0684 #endif
0685
0686 pip = pdev->dev.of_node;
0687 if (!pip) {
0688 pr_err("Error: No 'pip' in /aliases\n");
0689 return -EINVAL;
0690 }
0691
0692 cvm_oct_configure_common_hw();
0693
0694 cvmx_helper_initialize_packet_io_global();
0695
0696 if (receive_group_order) {
0697 if (receive_group_order > 4)
0698 receive_group_order = 4;
0699 pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
0700 } else {
0701 pow_receive_groups = BIT(pow_receive_group);
0702 }
0703
0704
0705 num_interfaces = cvmx_helper_get_number_of_interfaces();
0706 for (interface = 0; interface < num_interfaces; interface++) {
0707 int num_ports = cvmx_helper_ports_on_interface(interface);
0708 int port;
0709
0710 for (port = cvmx_helper_get_ipd_port(interface, 0);
0711 port < cvmx_helper_get_ipd_port(interface, num_ports);
0712 port++) {
0713 union cvmx_pip_prt_tagx pip_prt_tagx;
0714
0715 pip_prt_tagx.u64 =
0716 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
0717
0718 if (receive_group_order) {
0719 int tag_mask;
0720
0721
0722
0723
0724
0725 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
0726 pip_prt_tagx.u64 |= 0x3ull << 44;
0727
0728 tag_mask = ~((1 << receive_group_order) - 1);
0729 pip_prt_tagx.s.grptagbase = 0;
0730 pip_prt_tagx.s.grptagmask = tag_mask;
0731 pip_prt_tagx.s.grptag = 1;
0732 pip_prt_tagx.s.tag_mode = 0;
0733 pip_prt_tagx.s.inc_prt_flag = 1;
0734 pip_prt_tagx.s.ip6_dprt_flag = 1;
0735 pip_prt_tagx.s.ip4_dprt_flag = 1;
0736 pip_prt_tagx.s.ip6_sprt_flag = 1;
0737 pip_prt_tagx.s.ip4_sprt_flag = 1;
0738 pip_prt_tagx.s.ip6_dst_flag = 1;
0739 pip_prt_tagx.s.ip4_dst_flag = 1;
0740 pip_prt_tagx.s.ip6_src_flag = 1;
0741 pip_prt_tagx.s.ip4_src_flag = 1;
0742 pip_prt_tagx.s.grp = 0;
0743 } else {
0744 pip_prt_tagx.s.grptag = 0;
0745 pip_prt_tagx.s.grp = pow_receive_group;
0746 }
0747
0748 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
0749 pip_prt_tagx.u64);
0750 }
0751 }
0752
0753 cvmx_helper_ipd_and_packet_input_enable();
0754
0755 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
0756
0757
0758
0759
0760
0761 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
0762
0763
0764 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
0765
0766 if ((pow_send_group != -1)) {
0767 struct net_device *dev;
0768
0769 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
0770 if (dev) {
0771
0772 struct octeon_ethernet *priv = netdev_priv(dev);
0773
0774 SET_NETDEV_DEV(dev, &pdev->dev);
0775 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
0776 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
0777 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
0778 priv->queue = -1;
0779 strscpy(dev->name, "pow%d", sizeof(dev->name));
0780 for (qos = 0; qos < 16; qos++)
0781 skb_queue_head_init(&priv->tx_free_list[qos]);
0782 dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
0783 dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
0784
0785 if (register_netdev(dev) < 0) {
0786 pr_err("Failed to register ethernet device for POW\n");
0787 free_netdev(dev);
0788 } else {
0789 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
0790 pr_info("%s: POW send group %d, receive group %d\n",
0791 dev->name, pow_send_group,
0792 pow_receive_group);
0793 }
0794 } else {
0795 pr_err("Failed to allocate ethernet device for POW\n");
0796 }
0797 }
0798
0799 num_interfaces = cvmx_helper_get_number_of_interfaces();
0800 for (interface = 0; interface < num_interfaces; interface++) {
0801 cvmx_helper_interface_mode_t imode =
0802 cvmx_helper_interface_get_mode(interface);
0803 int num_ports = cvmx_helper_ports_on_interface(interface);
0804 int port;
0805 int port_index;
0806
0807 for (port_index = 0,
0808 port = cvmx_helper_get_ipd_port(interface, 0);
0809 port < cvmx_helper_get_ipd_port(interface, num_ports);
0810 port_index++, port++) {
0811 struct octeon_ethernet *priv;
0812 struct net_device *dev =
0813 alloc_etherdev(sizeof(struct octeon_ethernet));
0814 if (!dev) {
0815 pr_err("Failed to allocate ethernet device for port %d\n",
0816 port);
0817 continue;
0818 }
0819
0820
0821 SET_NETDEV_DEV(dev, &pdev->dev);
0822 priv = netdev_priv(dev);
0823 priv->netdev = dev;
0824 priv->of_node = cvm_oct_node_for_port(pip, interface,
0825 port_index);
0826
0827 INIT_DELAYED_WORK(&priv->port_periodic_work,
0828 cvm_oct_periodic_worker);
0829 priv->imode = imode;
0830 priv->port = port;
0831 priv->queue = cvmx_pko_get_base_queue(priv->port);
0832 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
0833 priv->phy_mode = PHY_INTERFACE_MODE_NA;
0834 for (qos = 0; qos < 16; qos++)
0835 skb_queue_head_init(&priv->tx_free_list[qos]);
0836 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
0837 qos++)
0838 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
0839 dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
0840 dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
0841
0842 switch (priv->imode) {
0843
0844 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
0845 case CVMX_HELPER_INTERFACE_MODE_PCIE:
0846 case CVMX_HELPER_INTERFACE_MODE_PICMG:
0847 break;
0848
0849 case CVMX_HELPER_INTERFACE_MODE_NPI:
0850 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
0851 strscpy(dev->name, "npi%d", sizeof(dev->name));
0852 break;
0853
0854 case CVMX_HELPER_INTERFACE_MODE_XAUI:
0855 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
0856 strscpy(dev->name, "xaui%d", sizeof(dev->name));
0857 break;
0858
0859 case CVMX_HELPER_INTERFACE_MODE_LOOP:
0860 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
0861 strscpy(dev->name, "loop%d", sizeof(dev->name));
0862 break;
0863
0864 case CVMX_HELPER_INTERFACE_MODE_SGMII:
0865 priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
0866 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
0867 strscpy(dev->name, "eth%d", sizeof(dev->name));
0868 break;
0869
0870 case CVMX_HELPER_INTERFACE_MODE_SPI:
0871 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
0872 strscpy(dev->name, "spi%d", sizeof(dev->name));
0873 break;
0874
0875 case CVMX_HELPER_INTERFACE_MODE_GMII:
0876 priv->phy_mode = PHY_INTERFACE_MODE_GMII;
0877 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
0878 strscpy(dev->name, "eth%d", sizeof(dev->name));
0879 break;
0880
0881 case CVMX_HELPER_INTERFACE_MODE_RGMII:
0882 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
0883 strscpy(dev->name, "eth%d", sizeof(dev->name));
0884 cvm_set_rgmii_delay(priv, interface,
0885 port_index);
0886 break;
0887 }
0888
0889 if (priv->of_node && of_phy_is_fixed_link(priv->of_node)) {
0890 if (of_phy_register_fixed_link(priv->of_node)) {
0891 netdev_err(dev, "Failed to register fixed link for interface %d, port %d\n",
0892 interface, priv->port);
0893 dev->netdev_ops = NULL;
0894 }
0895 }
0896
0897 if (!dev->netdev_ops) {
0898 free_netdev(dev);
0899 } else if (register_netdev(dev) < 0) {
0900 pr_err("Failed to register ethernet device for interface %d, port %d\n",
0901 interface, priv->port);
0902 free_netdev(dev);
0903 } else {
0904 cvm_oct_device[priv->port] = dev;
0905 fau -=
0906 cvmx_pko_get_num_queues(priv->port) *
0907 sizeof(u32);
0908 schedule_delayed_work(&priv->port_periodic_work,
0909 HZ);
0910 }
0911 }
0912 }
0913
0914 cvm_oct_tx_initialize();
0915 cvm_oct_rx_initialize();
0916
0917
0918
0919
0920 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
0921
0922 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
0923
0924 return 0;
0925 }
0926
0927 static int cvm_oct_remove(struct platform_device *pdev)
0928 {
0929 int port;
0930
0931 cvmx_ipd_disable();
0932
0933 atomic_inc_return(&cvm_oct_poll_queue_stopping);
0934 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
0935
0936 cvm_oct_rx_shutdown();
0937 cvm_oct_tx_shutdown();
0938
0939 cvmx_pko_disable();
0940
0941
0942 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
0943 if (cvm_oct_device[port]) {
0944 struct net_device *dev = cvm_oct_device[port];
0945 struct octeon_ethernet *priv = netdev_priv(dev);
0946
0947 cancel_delayed_work_sync(&priv->port_periodic_work);
0948
0949 cvm_oct_tx_shutdown_dev(dev);
0950 unregister_netdev(dev);
0951 free_netdev(dev);
0952 cvm_oct_device[port] = NULL;
0953 }
0954 }
0955
0956 cvmx_pko_shutdown();
0957
0958 cvmx_ipd_free_ptr();
0959
0960
0961 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
0962 num_packet_buffers);
0963 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
0964 num_packet_buffers);
0965 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
0966 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
0967 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
0968 return 0;
0969 }
0970
0971 static const struct of_device_id cvm_oct_match[] = {
0972 {
0973 .compatible = "cavium,octeon-3860-pip",
0974 },
0975 {},
0976 };
0977 MODULE_DEVICE_TABLE(of, cvm_oct_match);
0978
0979 static struct platform_driver cvm_oct_driver = {
0980 .probe = cvm_oct_probe,
0981 .remove = cvm_oct_remove,
0982 .driver = {
0983 .name = KBUILD_MODNAME,
0984 .of_match_table = cvm_oct_match,
0985 },
0986 };
0987
0988 module_platform_driver(cvm_oct_driver);
0989
0990 MODULE_SOFTDEP("pre: mdio-cavium");
0991 MODULE_LICENSE("GPL");
0992 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
0993 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");