Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (c) 2020, Intel Corporation. */
0003 
0004 #include <linux/if_vlan.h>
0005 #include <net/xdp_sock_drv.h>
0006 
0007 #include "igc.h"
0008 #include "igc_xdp.h"
0009 
0010 int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
0011              struct netlink_ext_ack *extack)
0012 {
0013     struct net_device *dev = adapter->netdev;
0014     bool if_running = netif_running(dev);
0015     struct bpf_prog *old_prog;
0016 
0017     if (dev->mtu > ETH_DATA_LEN) {
0018         /* For now, the driver doesn't support XDP functionality with
0019          * jumbo frames so we return error.
0020          */
0021         NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
0022         return -EOPNOTSUPP;
0023     }
0024 
0025     if (if_running)
0026         igc_close(dev);
0027 
0028     old_prog = xchg(&adapter->xdp_prog, prog);
0029     if (old_prog)
0030         bpf_prog_put(old_prog);
0031 
0032     if (if_running)
0033         igc_open(dev);
0034 
0035     return 0;
0036 }
0037 
0038 static int igc_xdp_enable_pool(struct igc_adapter *adapter,
0039                    struct xsk_buff_pool *pool, u16 queue_id)
0040 {
0041     struct net_device *ndev = adapter->netdev;
0042     struct device *dev = &adapter->pdev->dev;
0043     struct igc_ring *rx_ring, *tx_ring;
0044     struct napi_struct *napi;
0045     bool needs_reset;
0046     u32 frame_size;
0047     int err;
0048 
0049     if (queue_id >= adapter->num_rx_queues ||
0050         queue_id >= adapter->num_tx_queues)
0051         return -EINVAL;
0052 
0053     frame_size = xsk_pool_get_rx_frame_size(pool);
0054     if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
0055         /* When XDP is enabled, the driver doesn't support frames that
0056          * span over multiple buffers. To avoid that, we check if xsk
0057          * frame size is big enough to fit the max ethernet frame size
0058          * + vlan double tagging.
0059          */
0060         return -EOPNOTSUPP;
0061     }
0062 
0063     err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
0064     if (err) {
0065         netdev_err(ndev, "Failed to map xsk pool\n");
0066         return err;
0067     }
0068 
0069     needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
0070 
0071     rx_ring = adapter->rx_ring[queue_id];
0072     tx_ring = adapter->tx_ring[queue_id];
0073     /* Rx and Tx rings share the same napi context. */
0074     napi = &rx_ring->q_vector->napi;
0075 
0076     if (needs_reset) {
0077         igc_disable_rx_ring(rx_ring);
0078         igc_disable_tx_ring(tx_ring);
0079         napi_disable(napi);
0080     }
0081 
0082     set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
0083     set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
0084 
0085     if (needs_reset) {
0086         napi_enable(napi);
0087         igc_enable_rx_ring(rx_ring);
0088         igc_enable_tx_ring(tx_ring);
0089 
0090         err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
0091         if (err) {
0092             xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
0093             return err;
0094         }
0095     }
0096 
0097     return 0;
0098 }
0099 
0100 static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
0101 {
0102     struct igc_ring *rx_ring, *tx_ring;
0103     struct xsk_buff_pool *pool;
0104     struct napi_struct *napi;
0105     bool needs_reset;
0106 
0107     if (queue_id >= adapter->num_rx_queues ||
0108         queue_id >= adapter->num_tx_queues)
0109         return -EINVAL;
0110 
0111     pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
0112     if (!pool)
0113         return -EINVAL;
0114 
0115     needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
0116 
0117     rx_ring = adapter->rx_ring[queue_id];
0118     tx_ring = adapter->tx_ring[queue_id];
0119     /* Rx and Tx rings share the same napi context. */
0120     napi = &rx_ring->q_vector->napi;
0121 
0122     if (needs_reset) {
0123         igc_disable_rx_ring(rx_ring);
0124         igc_disable_tx_ring(tx_ring);
0125         napi_disable(napi);
0126     }
0127 
0128     xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
0129     clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
0130     clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
0131 
0132     if (needs_reset) {
0133         napi_enable(napi);
0134         igc_enable_rx_ring(rx_ring);
0135         igc_enable_tx_ring(tx_ring);
0136     }
0137 
0138     return 0;
0139 }
0140 
0141 int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
0142                u16 queue_id)
0143 {
0144     return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
0145               igc_xdp_disable_pool(adapter, queue_id);
0146 }