0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 #include <linux/etherdevice.h>
0051 #include <linux/ethtool.h>
0052 #include <linux/module.h>
0053 #include <linux/pci.h>
0054 #include <linux/ntb.h>
0055 #include <linux/ntb_transport.h>
0056
0057 #define NTB_NETDEV_VER "0.7"
0058
0059 MODULE_DESCRIPTION(KBUILD_MODNAME);
0060 MODULE_VERSION(NTB_NETDEV_VER);
0061 MODULE_LICENSE("Dual BSD/GPL");
0062 MODULE_AUTHOR("Intel Corporation");
0063
0064
0065 static unsigned int tx_time = 1;
0066
0067
0068 static unsigned int tx_start = 10;
0069
0070
0071 static unsigned int tx_stop = 5;
0072
0073 struct ntb_netdev {
0074 struct pci_dev *pdev;
0075 struct net_device *ndev;
0076 struct ntb_transport_qp *qp;
0077 struct timer_list tx_timer;
0078 };
0079
0080 #define NTB_TX_TIMEOUT_MS 1000
0081 #define NTB_RXQ_SIZE 100
0082
0083 static void ntb_netdev_event_handler(void *data, int link_is_up)
0084 {
0085 struct net_device *ndev = data;
0086 struct ntb_netdev *dev = netdev_priv(ndev);
0087
0088 netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up,
0089 ntb_transport_link_query(dev->qp));
0090
0091 if (link_is_up) {
0092 if (ntb_transport_link_query(dev->qp))
0093 netif_carrier_on(ndev);
0094 } else {
0095 netif_carrier_off(ndev);
0096 }
0097 }
0098
0099 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
0100 void *data, int len)
0101 {
0102 struct net_device *ndev = qp_data;
0103 struct sk_buff *skb;
0104 int rc;
0105
0106 skb = data;
0107 if (!skb)
0108 return;
0109
0110 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
0111
0112 if (len < 0) {
0113 ndev->stats.rx_errors++;
0114 ndev->stats.rx_length_errors++;
0115 goto enqueue_again;
0116 }
0117
0118 skb_put(skb, len);
0119 skb->protocol = eth_type_trans(skb, ndev);
0120 skb->ip_summed = CHECKSUM_NONE;
0121
0122 if (__netif_rx(skb) == NET_RX_DROP) {
0123 ndev->stats.rx_errors++;
0124 ndev->stats.rx_dropped++;
0125 } else {
0126 ndev->stats.rx_packets++;
0127 ndev->stats.rx_bytes += len;
0128 }
0129
0130 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
0131 if (!skb) {
0132 ndev->stats.rx_errors++;
0133 ndev->stats.rx_frame_errors++;
0134 return;
0135 }
0136
0137 enqueue_again:
0138 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
0139 if (rc) {
0140 dev_kfree_skb(skb);
0141 ndev->stats.rx_errors++;
0142 ndev->stats.rx_fifo_errors++;
0143 }
0144 }
0145
0146 static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
0147 struct ntb_transport_qp *qp, int size)
0148 {
0149 struct ntb_netdev *dev = netdev_priv(netdev);
0150
0151 netif_stop_queue(netdev);
0152
0153
0154
0155 smp_mb();
0156
0157 if (likely(ntb_transport_tx_free_entry(qp) < size)) {
0158 mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
0159 return -EBUSY;
0160 }
0161
0162 netif_start_queue(netdev);
0163 return 0;
0164 }
0165
0166 static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
0167 struct ntb_transport_qp *qp, int size)
0168 {
0169 if (netif_queue_stopped(ndev) ||
0170 (ntb_transport_tx_free_entry(qp) >= size))
0171 return 0;
0172
0173 return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
0174 }
0175
0176 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
0177 void *data, int len)
0178 {
0179 struct net_device *ndev = qp_data;
0180 struct sk_buff *skb;
0181 struct ntb_netdev *dev = netdev_priv(ndev);
0182
0183 skb = data;
0184 if (!skb || !ndev)
0185 return;
0186
0187 if (len > 0) {
0188 ndev->stats.tx_packets++;
0189 ndev->stats.tx_bytes += skb->len;
0190 } else {
0191 ndev->stats.tx_errors++;
0192 ndev->stats.tx_aborted_errors++;
0193 }
0194
0195 dev_kfree_skb(skb);
0196
0197 if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
0198
0199
0200
0201 smp_mb();
0202 if (netif_queue_stopped(ndev))
0203 netif_wake_queue(ndev);
0204 }
0205 }
0206
0207 static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
0208 struct net_device *ndev)
0209 {
0210 struct ntb_netdev *dev = netdev_priv(ndev);
0211 int rc;
0212
0213 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
0214
0215 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
0216 if (rc)
0217 goto err;
0218
0219
0220 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
0221
0222 return NETDEV_TX_OK;
0223
0224 err:
0225 ndev->stats.tx_dropped++;
0226 ndev->stats.tx_errors++;
0227 return NETDEV_TX_BUSY;
0228 }
0229
0230 static void ntb_netdev_tx_timer(struct timer_list *t)
0231 {
0232 struct ntb_netdev *dev = from_timer(dev, t, tx_timer);
0233 struct net_device *ndev = dev->ndev;
0234
0235 if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
0236 mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
0237 } else {
0238
0239
0240
0241 smp_mb();
0242 if (netif_queue_stopped(ndev))
0243 netif_wake_queue(ndev);
0244 }
0245 }
0246
0247 static int ntb_netdev_open(struct net_device *ndev)
0248 {
0249 struct ntb_netdev *dev = netdev_priv(ndev);
0250 struct sk_buff *skb;
0251 int rc, i, len;
0252
0253
0254 for (i = 0; i < NTB_RXQ_SIZE; i++) {
0255 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
0256 if (!skb) {
0257 rc = -ENOMEM;
0258 goto err;
0259 }
0260
0261 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
0262 ndev->mtu + ETH_HLEN);
0263 if (rc) {
0264 dev_kfree_skb(skb);
0265 goto err;
0266 }
0267 }
0268
0269 timer_setup(&dev->tx_timer, ntb_netdev_tx_timer, 0);
0270
0271 netif_carrier_off(ndev);
0272 ntb_transport_link_up(dev->qp);
0273 netif_start_queue(ndev);
0274
0275 return 0;
0276
0277 err:
0278 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
0279 dev_kfree_skb(skb);
0280 return rc;
0281 }
0282
0283 static int ntb_netdev_close(struct net_device *ndev)
0284 {
0285 struct ntb_netdev *dev = netdev_priv(ndev);
0286 struct sk_buff *skb;
0287 int len;
0288
0289 ntb_transport_link_down(dev->qp);
0290
0291 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
0292 dev_kfree_skb(skb);
0293
0294 del_timer_sync(&dev->tx_timer);
0295
0296 return 0;
0297 }
0298
0299 static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
0300 {
0301 struct ntb_netdev *dev = netdev_priv(ndev);
0302 struct sk_buff *skb;
0303 int len, rc;
0304
0305 if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
0306 return -EINVAL;
0307
0308 if (!netif_running(ndev)) {
0309 ndev->mtu = new_mtu;
0310 return 0;
0311 }
0312
0313
0314 ntb_transport_link_down(dev->qp);
0315
0316 if (ndev->mtu < new_mtu) {
0317 int i;
0318
0319 for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
0320 dev_kfree_skb(skb);
0321
0322 for (; i; i--) {
0323 skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
0324 if (!skb) {
0325 rc = -ENOMEM;
0326 goto err;
0327 }
0328
0329 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
0330 new_mtu + ETH_HLEN);
0331 if (rc) {
0332 dev_kfree_skb(skb);
0333 goto err;
0334 }
0335 }
0336 }
0337
0338 ndev->mtu = new_mtu;
0339
0340 ntb_transport_link_up(dev->qp);
0341
0342 return 0;
0343
0344 err:
0345 ntb_transport_link_down(dev->qp);
0346
0347 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
0348 dev_kfree_skb(skb);
0349
0350 netdev_err(ndev, "Error changing MTU, device inoperable\n");
0351 return rc;
0352 }
0353
0354 static const struct net_device_ops ntb_netdev_ops = {
0355 .ndo_open = ntb_netdev_open,
0356 .ndo_stop = ntb_netdev_close,
0357 .ndo_start_xmit = ntb_netdev_start_xmit,
0358 .ndo_change_mtu = ntb_netdev_change_mtu,
0359 .ndo_set_mac_address = eth_mac_addr,
0360 };
0361
0362 static void ntb_get_drvinfo(struct net_device *ndev,
0363 struct ethtool_drvinfo *info)
0364 {
0365 struct ntb_netdev *dev = netdev_priv(ndev);
0366
0367 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
0368 strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
0369 strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
0370 }
0371
0372 static int ntb_get_link_ksettings(struct net_device *dev,
0373 struct ethtool_link_ksettings *cmd)
0374 {
0375 ethtool_link_ksettings_zero_link_mode(cmd, supported);
0376 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
0377 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
0378 ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
0379
0380 cmd->base.speed = SPEED_UNKNOWN;
0381 cmd->base.duplex = DUPLEX_FULL;
0382 cmd->base.port = PORT_OTHER;
0383 cmd->base.phy_address = 0;
0384 cmd->base.autoneg = AUTONEG_ENABLE;
0385
0386 return 0;
0387 }
0388
0389 static const struct ethtool_ops ntb_ethtool_ops = {
0390 .get_drvinfo = ntb_get_drvinfo,
0391 .get_link = ethtool_op_get_link,
0392 .get_link_ksettings = ntb_get_link_ksettings,
0393 };
0394
0395 static const struct ntb_queue_handlers ntb_netdev_handlers = {
0396 .tx_handler = ntb_netdev_tx_handler,
0397 .rx_handler = ntb_netdev_rx_handler,
0398 .event_handler = ntb_netdev_event_handler,
0399 };
0400
0401 static int ntb_netdev_probe(struct device *client_dev)
0402 {
0403 struct ntb_dev *ntb;
0404 struct net_device *ndev;
0405 struct pci_dev *pdev;
0406 struct ntb_netdev *dev;
0407 int rc;
0408
0409 ntb = dev_ntb(client_dev->parent);
0410 pdev = ntb->pdev;
0411 if (!pdev)
0412 return -ENODEV;
0413
0414 ndev = alloc_etherdev(sizeof(*dev));
0415 if (!ndev)
0416 return -ENOMEM;
0417
0418 SET_NETDEV_DEV(ndev, client_dev);
0419
0420 dev = netdev_priv(ndev);
0421 dev->ndev = ndev;
0422 dev->pdev = pdev;
0423 ndev->features = NETIF_F_HIGHDMA;
0424
0425 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
0426
0427 ndev->hw_features = ndev->features;
0428 ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
0429
0430 eth_random_addr(ndev->perm_addr);
0431 dev_addr_set(ndev, ndev->perm_addr);
0432
0433 ndev->netdev_ops = &ntb_netdev_ops;
0434 ndev->ethtool_ops = &ntb_ethtool_ops;
0435
0436 ndev->min_mtu = 0;
0437 ndev->max_mtu = ETH_MAX_MTU;
0438
0439 dev->qp = ntb_transport_create_queue(ndev, client_dev,
0440 &ntb_netdev_handlers);
0441 if (!dev->qp) {
0442 rc = -EIO;
0443 goto err;
0444 }
0445
0446 ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
0447
0448 rc = register_netdev(ndev);
0449 if (rc)
0450 goto err1;
0451
0452 dev_set_drvdata(client_dev, ndev);
0453 dev_info(&pdev->dev, "%s created\n", ndev->name);
0454 return 0;
0455
0456 err1:
0457 ntb_transport_free_queue(dev->qp);
0458 err:
0459 free_netdev(ndev);
0460 return rc;
0461 }
0462
0463 static void ntb_netdev_remove(struct device *client_dev)
0464 {
0465 struct net_device *ndev = dev_get_drvdata(client_dev);
0466 struct ntb_netdev *dev = netdev_priv(ndev);
0467
0468 unregister_netdev(ndev);
0469 ntb_transport_free_queue(dev->qp);
0470 free_netdev(ndev);
0471 }
0472
0473 static struct ntb_transport_client ntb_netdev_client = {
0474 .driver.name = KBUILD_MODNAME,
0475 .driver.owner = THIS_MODULE,
0476 .probe = ntb_netdev_probe,
0477 .remove = ntb_netdev_remove,
0478 };
0479
0480 static int __init ntb_netdev_init_module(void)
0481 {
0482 int rc;
0483
0484 rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
0485 if (rc)
0486 return rc;
0487 return ntb_transport_register_client(&ntb_netdev_client);
0488 }
0489 module_init(ntb_netdev_init_module);
0490
0491 static void __exit ntb_netdev_exit_module(void)
0492 {
0493 ntb_transport_unregister_client(&ntb_netdev_client);
0494 ntb_transport_unregister_client_dev(KBUILD_MODNAME);
0495 }
0496 module_exit(ntb_netdev_exit_module);