0001
0002
0003
0004 #include <linux/inetdevice.h>
0005 #include <linux/etherdevice.h>
0006 #include <linux/mm.h>
0007 #include <linux/bpf.h>
0008 #include <linux/bpf_trace.h>
0009 #include <net/xdp.h>
0010
0011 #include "mana.h"
0012
0013 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
0014 {
0015 u16 txq_idx = skb_get_queue_mapping(skb);
0016 struct netdev_queue *ndevtxq;
0017 int rc;
0018
0019 __skb_push(skb, ETH_HLEN);
0020
0021 ndevtxq = netdev_get_tx_queue(ndev, txq_idx);
0022 __netif_tx_lock(ndevtxq, smp_processor_id());
0023
0024 rc = mana_start_xmit(skb, ndev);
0025
0026 __netif_tx_unlock(ndevtxq);
0027
0028 if (dev_xmit_complete(rc))
0029 return;
0030
0031 dev_kfree_skb_any(skb);
0032 ndev->stats.tx_dropped++;
0033 }
0034
0035 static int mana_xdp_xmit_fm(struct net_device *ndev, struct xdp_frame *frame,
0036 u16 q_idx)
0037 {
0038 struct sk_buff *skb;
0039
0040 skb = xdp_build_skb_from_frame(frame, ndev);
0041 if (unlikely(!skb))
0042 return -ENOMEM;
0043
0044 skb_set_queue_mapping(skb, q_idx);
0045
0046 mana_xdp_tx(skb, ndev);
0047
0048 return 0;
0049 }
0050
0051 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
0052 u32 flags)
0053 {
0054 struct mana_port_context *apc = netdev_priv(ndev);
0055 struct mana_stats_tx *tx_stats;
0056 int i, count = 0;
0057 u16 q_idx;
0058
0059 if (unlikely(!apc->port_is_up))
0060 return 0;
0061
0062 q_idx = smp_processor_id() % ndev->real_num_tx_queues;
0063
0064 for (i = 0; i < n; i++) {
0065 if (mana_xdp_xmit_fm(ndev, frames[i], q_idx))
0066 break;
0067
0068 count++;
0069 }
0070
0071 tx_stats = &apc->tx_qp[q_idx].txq.stats;
0072
0073 u64_stats_update_begin(&tx_stats->syncp);
0074 tx_stats->xdp_xmit += count;
0075 u64_stats_update_end(&tx_stats->syncp);
0076
0077 return count;
0078 }
0079
0080 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
0081 struct xdp_buff *xdp, void *buf_va, uint pkt_len)
0082 {
0083 struct mana_stats_rx *rx_stats;
0084 struct bpf_prog *prog;
0085 u32 act = XDP_PASS;
0086
0087 rcu_read_lock();
0088 prog = rcu_dereference(rxq->bpf_prog);
0089
0090 if (!prog)
0091 goto out;
0092
0093 xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
0094 xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
0095
0096 act = bpf_prog_run_xdp(prog, xdp);
0097
0098 rx_stats = &rxq->stats;
0099
0100 switch (act) {
0101 case XDP_PASS:
0102 case XDP_TX:
0103 case XDP_DROP:
0104 break;
0105
0106 case XDP_REDIRECT:
0107 rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog);
0108 if (!rxq->xdp_rc) {
0109 rxq->xdp_flush = true;
0110
0111 u64_stats_update_begin(&rx_stats->syncp);
0112 rx_stats->packets++;
0113 rx_stats->bytes += pkt_len;
0114 rx_stats->xdp_redirect++;
0115 u64_stats_update_end(&rx_stats->syncp);
0116
0117 break;
0118 }
0119
0120 fallthrough;
0121
0122 case XDP_ABORTED:
0123 trace_xdp_exception(ndev, prog, act);
0124 break;
0125
0126 default:
0127 bpf_warn_invalid_xdp_action(ndev, prog, act);
0128 }
0129
0130 out:
0131 rcu_read_unlock();
0132
0133 return act;
0134 }
0135
0136 static unsigned int mana_xdp_fraglen(unsigned int len)
0137 {
0138 return SKB_DATA_ALIGN(len) +
0139 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
0140 }
0141
0142 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc)
0143 {
0144 ASSERT_RTNL();
0145
0146 return apc->bpf_prog;
0147 }
0148
0149 static struct bpf_prog *mana_chn_xdp_get(struct mana_port_context *apc)
0150 {
0151 return rtnl_dereference(apc->rxqs[0]->bpf_prog);
0152 }
0153
0154
0155 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog)
0156 {
0157 struct bpf_prog *old_prog = mana_chn_xdp_get(apc);
0158 unsigned int num_queues = apc->num_queues;
0159 int i;
0160
0161 ASSERT_RTNL();
0162
0163 if (old_prog == prog)
0164 return;
0165
0166 if (prog)
0167 bpf_prog_add(prog, num_queues);
0168
0169 for (i = 0; i < num_queues; i++)
0170 rcu_assign_pointer(apc->rxqs[i]->bpf_prog, prog);
0171
0172 if (old_prog)
0173 for (i = 0; i < num_queues; i++)
0174 bpf_prog_put(old_prog);
0175 }
0176
0177 static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
0178 struct netlink_ext_ack *extack)
0179 {
0180 struct mana_port_context *apc = netdev_priv(ndev);
0181 struct bpf_prog *old_prog;
0182 int buf_max;
0183
0184 old_prog = mana_xdp_get(apc);
0185
0186 if (!old_prog && !prog)
0187 return 0;
0188
0189 buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN);
0190 if (prog && buf_max > PAGE_SIZE) {
0191 netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n",
0192 ndev->mtu, buf_max);
0193 NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
0194
0195 return -EOPNOTSUPP;
0196 }
0197
0198
0199
0200
0201 apc->bpf_prog = prog;
0202
0203 if (old_prog)
0204 bpf_prog_put(old_prog);
0205
0206 if (apc->port_is_up)
0207 mana_chn_setxdp(apc, prog);
0208
0209 return 0;
0210 }
0211
0212 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
0213 {
0214 struct netlink_ext_ack *extack = bpf->extack;
0215 int ret;
0216
0217 switch (bpf->command) {
0218 case XDP_SETUP_PROG:
0219 return mana_xdp_set(ndev, bpf->prog, extack);
0220
0221 default:
0222 return -EOPNOTSUPP;
0223 }
0224
0225 return ret;
0226 }