Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002  /*
0003  * drivers/net/ethernet/ec_bhf.c
0004  *
0005  * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
0006  */
0007 
0008 /* This is a driver for EtherCAT master module present on CCAT FPGA.
0009  * Those can be found on Bechhoff CX50xx industrial PCs.
0010  */
0011 
0012 #include <linux/kernel.h>
0013 #include <linux/module.h>
0014 #include <linux/moduleparam.h>
0015 #include <linux/pci.h>
0016 #include <linux/init.h>
0017 
0018 #include <linux/netdevice.h>
0019 #include <linux/etherdevice.h>
0020 #include <linux/ip.h>
0021 #include <linux/skbuff.h>
0022 #include <linux/hrtimer.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/stat.h>
0025 
0026 #define TIMER_INTERVAL_NSEC 20000
0027 
0028 #define INFO_BLOCK_SIZE     0x10
0029 #define INFO_BLOCK_TYPE     0x0
0030 #define INFO_BLOCK_REV      0x2
0031 #define INFO_BLOCK_BLK_CNT  0x4
0032 #define INFO_BLOCK_TX_CHAN  0x4
0033 #define INFO_BLOCK_RX_CHAN  0x5
0034 #define INFO_BLOCK_OFFSET   0x8
0035 
0036 #define EC_MII_OFFSET       0x4
0037 #define EC_FIFO_OFFSET      0x8
0038 #define EC_MAC_OFFSET       0xc
0039 
0040 #define MAC_FRAME_ERR_CNT   0x0
0041 #define MAC_RX_ERR_CNT      0x1
0042 #define MAC_CRC_ERR_CNT     0x2
0043 #define MAC_LNK_LST_ERR_CNT 0x3
0044 #define MAC_TX_FRAME_CNT    0x10
0045 #define MAC_RX_FRAME_CNT    0x14
0046 #define MAC_TX_FIFO_LVL     0x20
0047 #define MAC_DROPPED_FRMS    0x28
0048 #define MAC_CONNECTED_CCAT_FLAG 0x78
0049 
0050 #define MII_MAC_ADDR        0x8
0051 #define MII_MAC_FILT_FLAG   0xe
0052 #define MII_LINK_STATUS     0xf
0053 
0054 #define FIFO_TX_REG     0x0
0055 #define FIFO_TX_RESET       0x8
0056 #define FIFO_RX_REG     0x10
0057 #define FIFO_RX_ADDR_VALID  (1u << 31)
0058 #define FIFO_RX_RESET       0x18
0059 
0060 #define DMA_CHAN_OFFSET     0x1000
0061 #define DMA_CHAN_SIZE       0x8
0062 
0063 #define DMA_WINDOW_SIZE_MASK    0xfffffffc
0064 
0065 #define ETHERCAT_MASTER_ID  0x14
0066 
0067 static const struct pci_device_id ids[] = {
0068     { PCI_DEVICE(0x15ec, 0x5000), },
0069     { 0, }
0070 };
0071 MODULE_DEVICE_TABLE(pci, ids);
0072 
0073 struct rx_header {
0074 #define RXHDR_NEXT_ADDR_MASK    0xffffffu
0075 #define RXHDR_NEXT_VALID    (1u << 31)
0076     __le32 next;
0077 #define RXHDR_NEXT_RECV_FLAG    0x1
0078     __le32 recv;
0079 #define RXHDR_LEN_MASK      0xfffu
0080     __le16 len;
0081     __le16 port;
0082     __le32 reserved;
0083     u8 timestamp[8];
0084 } __packed;
0085 
0086 #define PKT_PAYLOAD_SIZE    0x7e8
0087 struct rx_desc {
0088     struct rx_header header;
0089     u8 data[PKT_PAYLOAD_SIZE];
0090 } __packed;
0091 
0092 struct tx_header {
0093     __le16 len;
0094 #define TX_HDR_PORT_0       0x1
0095 #define TX_HDR_PORT_1       0x2
0096     u8 port;
0097     u8 ts_enable;
0098 #define TX_HDR_SENT     0x1
0099     __le32 sent;
0100     u8 timestamp[8];
0101 } __packed;
0102 
0103 struct tx_desc {
0104     struct tx_header header;
0105     u8 data[PKT_PAYLOAD_SIZE];
0106 } __packed;
0107 
0108 #define FIFO_SIZE       64
0109 
0110 static long polling_frequency = TIMER_INTERVAL_NSEC;
0111 
0112 struct bhf_dma {
0113     u8 *buf;
0114     size_t len;
0115     dma_addr_t buf_phys;
0116 
0117     u8 *alloc;
0118     size_t alloc_len;
0119     dma_addr_t alloc_phys;
0120 };
0121 
0122 struct ec_bhf_priv {
0123     struct net_device *net_dev;
0124     struct pci_dev *dev;
0125 
0126     void __iomem *io;
0127     void __iomem *dma_io;
0128 
0129     struct hrtimer hrtimer;
0130 
0131     int tx_dma_chan;
0132     int rx_dma_chan;
0133     void __iomem *ec_io;
0134     void __iomem *fifo_io;
0135     void __iomem *mii_io;
0136     void __iomem *mac_io;
0137 
0138     struct bhf_dma rx_buf;
0139     struct rx_desc *rx_descs;
0140     int rx_dnext;
0141     int rx_dcount;
0142 
0143     struct bhf_dma tx_buf;
0144     struct tx_desc *tx_descs;
0145     int tx_dcount;
0146     int tx_dnext;
0147 
0148     u64 stat_rx_bytes;
0149     u64 stat_tx_bytes;
0150 };
0151 
0152 #define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
0153 
0154 static void ec_bhf_reset(struct ec_bhf_priv *priv)
0155 {
0156     iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
0157     iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
0158     iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
0159     iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
0160     iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
0161     iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
0162     iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
0163 
0164     iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
0165     iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
0166 
0167     iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
0168 }
0169 
0170 static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
0171 {
0172     u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
0173     u32 addr = (u8 *)desc - priv->tx_buf.buf;
0174 
0175     iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
0176 }
0177 
0178 static int ec_bhf_desc_sent(struct tx_desc *desc)
0179 {
0180     return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
0181 }
0182 
0183 static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
0184 {
0185     if (unlikely(netif_queue_stopped(priv->net_dev))) {
0186         /* Make sure that we perceive changes to tx_dnext. */
0187         smp_rmb();
0188 
0189         if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
0190             netif_wake_queue(priv->net_dev);
0191     }
0192 }
0193 
0194 static int ec_bhf_pkt_received(struct rx_desc *desc)
0195 {
0196     return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
0197 }
0198 
0199 static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
0200 {
0201     iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
0202           priv->fifo_io + FIFO_RX_REG);
0203 }
0204 
0205 static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
0206 {
0207     struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
0208 
0209     while (ec_bhf_pkt_received(desc)) {
0210         int pkt_size = (le16_to_cpu(desc->header.len) &
0211                    RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
0212         u8 *data = desc->data;
0213         struct sk_buff *skb;
0214 
0215         skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
0216         if (skb) {
0217             skb_put_data(skb, data, pkt_size);
0218             skb->protocol = eth_type_trans(skb, priv->net_dev);
0219             priv->stat_rx_bytes += pkt_size;
0220 
0221             netif_rx(skb);
0222         } else {
0223             dev_err_ratelimited(PRIV_TO_DEV(priv),
0224                         "Couldn't allocate a skb_buff for a packet of size %u\n",
0225                         pkt_size);
0226         }
0227 
0228         desc->header.recv = 0;
0229 
0230         ec_bhf_add_rx_desc(priv, desc);
0231 
0232         priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
0233         desc = &priv->rx_descs[priv->rx_dnext];
0234     }
0235 }
0236 
0237 static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
0238 {
0239     struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
0240                         hrtimer);
0241     ec_bhf_process_rx(priv);
0242     ec_bhf_process_tx(priv);
0243 
0244     if (!netif_running(priv->net_dev))
0245         return HRTIMER_NORESTART;
0246 
0247     hrtimer_forward_now(timer, polling_frequency);
0248     return HRTIMER_RESTART;
0249 }
0250 
0251 static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
0252 {
0253     struct device *dev = PRIV_TO_DEV(priv);
0254     unsigned block_count, i;
0255     void __iomem *ec_info;
0256 
0257     block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
0258     for (i = 0; i < block_count; i++) {
0259         u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
0260                     INFO_BLOCK_TYPE);
0261         if (type == ETHERCAT_MASTER_ID)
0262             break;
0263     }
0264     if (i == block_count) {
0265         dev_err(dev, "EtherCAT master with DMA block not found\n");
0266         return -ENODEV;
0267     }
0268 
0269     ec_info = priv->io + i * INFO_BLOCK_SIZE;
0270 
0271     priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
0272     priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
0273 
0274     priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
0275     priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
0276     priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
0277     priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
0278 
0279     return 0;
0280 }
0281 
0282 static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
0283                      struct net_device *net_dev)
0284 {
0285     struct ec_bhf_priv *priv = netdev_priv(net_dev);
0286     struct tx_desc *desc;
0287     unsigned len;
0288 
0289     desc = &priv->tx_descs[priv->tx_dnext];
0290 
0291     skb_copy_and_csum_dev(skb, desc->data);
0292     len = skb->len;
0293 
0294     memset(&desc->header, 0, sizeof(desc->header));
0295     desc->header.len = cpu_to_le16(len);
0296     desc->header.port = TX_HDR_PORT_0;
0297 
0298     ec_bhf_send_packet(priv, desc);
0299 
0300     priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
0301 
0302     if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
0303         /* Make sure that updates to tx_dnext are perceived
0304          * by timer routine.
0305          */
0306         smp_wmb();
0307 
0308         netif_stop_queue(net_dev);
0309     }
0310 
0311     priv->stat_tx_bytes += len;
0312 
0313     dev_kfree_skb(skb);
0314 
0315     return NETDEV_TX_OK;
0316 }
0317 
0318 static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
0319                 struct bhf_dma *buf,
0320                 int channel,
0321                 int size)
0322 {
0323     int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
0324     struct device *dev = PRIV_TO_DEV(priv);
0325     u32 mask;
0326 
0327     iowrite32(0xffffffff, priv->dma_io + offset);
0328 
0329     mask = ioread32(priv->dma_io + offset);
0330     mask &= DMA_WINDOW_SIZE_MASK;
0331 
0332     /* We want to allocate a chunk of memory that is:
0333      * - aligned to the mask we just read
0334      * - is of size 2^mask bytes (at most)
0335      * In order to ensure that we will allocate buffer of
0336      * 2 * 2^mask bytes.
0337      */
0338     buf->len = min_t(int, ~mask + 1, size);
0339     buf->alloc_len = 2 * buf->len;
0340 
0341     buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
0342                     GFP_KERNEL);
0343     if (buf->alloc == NULL) {
0344         dev_err(dev, "Failed to allocate buffer\n");
0345         return -ENOMEM;
0346     }
0347 
0348     buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
0349     buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
0350 
0351     iowrite32(0, priv->dma_io + offset + 4);
0352     iowrite32(buf->buf_phys, priv->dma_io + offset);
0353 
0354     return 0;
0355 }
0356 
0357 static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
0358 {
0359     int i = 0;
0360 
0361     priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
0362     priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf;
0363     priv->tx_dnext = 0;
0364 
0365     for (i = 0; i < priv->tx_dcount; i++)
0366         priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
0367 }
0368 
0369 static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
0370 {
0371     int i;
0372 
0373     priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
0374     priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf;
0375     priv->rx_dnext = 0;
0376 
0377     for (i = 0; i < priv->rx_dcount; i++) {
0378         struct rx_desc *desc = &priv->rx_descs[i];
0379         u32 next;
0380 
0381         if (i != priv->rx_dcount - 1)
0382             next = (u8 *)(desc + 1) - priv->rx_buf.buf;
0383         else
0384             next = 0;
0385         next |= RXHDR_NEXT_VALID;
0386         desc->header.next = cpu_to_le32(next);
0387         desc->header.recv = 0;
0388         ec_bhf_add_rx_desc(priv, desc);
0389     }
0390 }
0391 
0392 static int ec_bhf_open(struct net_device *net_dev)
0393 {
0394     struct ec_bhf_priv *priv = netdev_priv(net_dev);
0395     struct device *dev = PRIV_TO_DEV(priv);
0396     int err = 0;
0397 
0398     ec_bhf_reset(priv);
0399 
0400     err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
0401                    FIFO_SIZE * sizeof(struct rx_desc));
0402     if (err) {
0403         dev_err(dev, "Failed to allocate rx buffer\n");
0404         goto out;
0405     }
0406     ec_bhf_setup_rx_descs(priv);
0407 
0408     err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
0409                    FIFO_SIZE * sizeof(struct tx_desc));
0410     if (err) {
0411         dev_err(dev, "Failed to allocate tx buffer\n");
0412         goto error_rx_free;
0413     }
0414     iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
0415     ec_bhf_setup_tx_descs(priv);
0416 
0417     netif_start_queue(net_dev);
0418 
0419     hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
0420     priv->hrtimer.function = ec_bhf_timer_fun;
0421     hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
0422 
0423     return 0;
0424 
0425 error_rx_free:
0426     dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
0427               priv->rx_buf.alloc_len);
0428 out:
0429     return err;
0430 }
0431 
0432 static int ec_bhf_stop(struct net_device *net_dev)
0433 {
0434     struct ec_bhf_priv *priv = netdev_priv(net_dev);
0435     struct device *dev = PRIV_TO_DEV(priv);
0436 
0437     hrtimer_cancel(&priv->hrtimer);
0438 
0439     ec_bhf_reset(priv);
0440 
0441     netif_tx_disable(net_dev);
0442 
0443     dma_free_coherent(dev, priv->tx_buf.alloc_len,
0444               priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
0445     dma_free_coherent(dev, priv->rx_buf.alloc_len,
0446               priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
0447 
0448     return 0;
0449 }
0450 
0451 static void
0452 ec_bhf_get_stats(struct net_device *net_dev,
0453          struct rtnl_link_stats64 *stats)
0454 {
0455     struct ec_bhf_priv *priv = netdev_priv(net_dev);
0456 
0457     stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
0458                 ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
0459                 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
0460     stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
0461     stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
0462     stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
0463 
0464     stats->tx_bytes = priv->stat_tx_bytes;
0465     stats->rx_bytes = priv->stat_rx_bytes;
0466 }
0467 
0468 static const struct net_device_ops ec_bhf_netdev_ops = {
0469     .ndo_start_xmit     = ec_bhf_start_xmit,
0470     .ndo_open       = ec_bhf_open,
0471     .ndo_stop       = ec_bhf_stop,
0472     .ndo_get_stats64    = ec_bhf_get_stats,
0473     .ndo_validate_addr  = eth_validate_addr,
0474     .ndo_set_mac_address    = eth_mac_addr
0475 };
0476 
0477 static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
0478 {
0479     struct net_device *net_dev;
0480     struct ec_bhf_priv *priv;
0481     void __iomem *dma_io;
0482     u8 addr[ETH_ALEN];
0483     void __iomem *io;
0484     int err = 0;
0485 
0486     err = pci_enable_device(dev);
0487     if (err)
0488         return err;
0489 
0490     pci_set_master(dev);
0491 
0492     err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
0493     if (err) {
0494         dev_err(&dev->dev,
0495             "Required dma mask not supported, failed to initialize device\n");
0496         goto err_disable_dev;
0497     }
0498 
0499     err = pci_request_regions(dev, "ec_bhf");
0500     if (err) {
0501         dev_err(&dev->dev, "Failed to request pci memory regions\n");
0502         goto err_disable_dev;
0503     }
0504 
0505     io = pci_iomap(dev, 0, 0);
0506     if (!io) {
0507         dev_err(&dev->dev, "Failed to map pci card memory bar 0");
0508         err = -EIO;
0509         goto err_release_regions;
0510     }
0511 
0512     dma_io = pci_iomap(dev, 2, 0);
0513     if (!dma_io) {
0514         dev_err(&dev->dev, "Failed to map pci card memory bar 2");
0515         err = -EIO;
0516         goto err_unmap;
0517     }
0518 
0519     net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
0520     if (net_dev == NULL) {
0521         err = -ENOMEM;
0522         goto err_unmap_dma_io;
0523     }
0524 
0525     pci_set_drvdata(dev, net_dev);
0526     SET_NETDEV_DEV(net_dev, &dev->dev);
0527 
0528     net_dev->features = 0;
0529     net_dev->flags |= IFF_NOARP;
0530 
0531     net_dev->netdev_ops = &ec_bhf_netdev_ops;
0532 
0533     priv = netdev_priv(net_dev);
0534     priv->net_dev = net_dev;
0535     priv->io = io;
0536     priv->dma_io = dma_io;
0537     priv->dev = dev;
0538 
0539     err = ec_bhf_setup_offsets(priv);
0540     if (err < 0)
0541         goto err_free_net_dev;
0542 
0543     memcpy_fromio(addr, priv->mii_io + MII_MAC_ADDR, ETH_ALEN);
0544     eth_hw_addr_set(net_dev, addr);
0545 
0546     err = register_netdev(net_dev);
0547     if (err < 0)
0548         goto err_free_net_dev;
0549 
0550     return 0;
0551 
0552 err_free_net_dev:
0553     free_netdev(net_dev);
0554 err_unmap_dma_io:
0555     pci_iounmap(dev, dma_io);
0556 err_unmap:
0557     pci_iounmap(dev, io);
0558 err_release_regions:
0559     pci_release_regions(dev);
0560 err_disable_dev:
0561     pci_clear_master(dev);
0562     pci_disable_device(dev);
0563 
0564     return err;
0565 }
0566 
0567 static void ec_bhf_remove(struct pci_dev *dev)
0568 {
0569     struct net_device *net_dev = pci_get_drvdata(dev);
0570     struct ec_bhf_priv *priv = netdev_priv(net_dev);
0571 
0572     unregister_netdev(net_dev);
0573 
0574     pci_iounmap(dev, priv->dma_io);
0575     pci_iounmap(dev, priv->io);
0576 
0577     free_netdev(net_dev);
0578 
0579     pci_release_regions(dev);
0580     pci_clear_master(dev);
0581     pci_disable_device(dev);
0582 }
0583 
0584 static struct pci_driver pci_driver = {
0585     .name       = "ec_bhf",
0586     .id_table   = ids,
0587     .probe      = ec_bhf_probe,
0588     .remove     = ec_bhf_remove,
0589 };
0590 module_pci_driver(pci_driver);
0591 
0592 module_param(polling_frequency, long, 0444);
0593 MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
0594 
0595 MODULE_LICENSE("GPL");
0596 MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");