0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0009
0010 #include <linux/netdevice.h>
0011 #include <linux/etherdevice.h>
0012 #include <linux/ethtool.h>
0013 #include <linux/netpoll.h>
0014 #include <linux/bpf.h>
0015 #include <linux/bpf_trace.h>
0016 #include <linux/kernel.h>
0017 #include <net/xdp.h>
0018
0019 #include <linux/mutex.h>
0020 #include <linux/rtnetlink.h>
0021
0022 #include "hyperv_net.h"
0023
0024 u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
0025 struct xdp_buff *xdp)
0026 {
0027 struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
0028 void *data = nvchan->rsc.data[0];
0029 u32 len = nvchan->rsc.len[0];
0030 struct page *page = NULL;
0031 struct bpf_prog *prog;
0032 u32 act = XDP_PASS;
0033 bool drop = true;
0034
0035 xdp->data_hard_start = NULL;
0036
0037 rcu_read_lock();
0038 prog = rcu_dereference(nvchan->bpf_prog);
0039
0040 if (!prog)
0041 goto out;
0042
0043
0044 if (len > ndev->mtu + ETH_HLEN) {
0045 act = XDP_DROP;
0046 goto out;
0047 }
0048
0049
0050 page = alloc_page(GFP_ATOMIC);
0051 if (!page) {
0052 act = XDP_DROP;
0053 goto out;
0054 }
0055
0056 xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
0057 xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
0058
0059 memcpy(xdp->data, data, len);
0060
0061 act = bpf_prog_run_xdp(prog, xdp);
0062
0063 switch (act) {
0064 case XDP_PASS:
0065 case XDP_TX:
0066 drop = false;
0067 break;
0068
0069 case XDP_DROP:
0070 break;
0071
0072 case XDP_REDIRECT:
0073 if (!xdp_do_redirect(ndev, xdp, prog)) {
0074 nvchan->xdp_flush = true;
0075 drop = false;
0076
0077 u64_stats_update_begin(&rx_stats->syncp);
0078
0079 rx_stats->xdp_redirect++;
0080 rx_stats->packets++;
0081 rx_stats->bytes += nvchan->rsc.pktlen;
0082
0083 u64_stats_update_end(&rx_stats->syncp);
0084
0085 break;
0086 } else {
0087 u64_stats_update_begin(&rx_stats->syncp);
0088 rx_stats->xdp_drop++;
0089 u64_stats_update_end(&rx_stats->syncp);
0090 }
0091
0092 fallthrough;
0093
0094 case XDP_ABORTED:
0095 trace_xdp_exception(ndev, prog, act);
0096 break;
0097
0098 default:
0099 bpf_warn_invalid_xdp_action(ndev, prog, act);
0100 }
0101
0102 out:
0103 rcu_read_unlock();
0104
0105 if (page && drop) {
0106 __free_page(page);
0107 xdp->data_hard_start = NULL;
0108 }
0109
0110 return act;
0111 }
0112
0113 unsigned int netvsc_xdp_fraglen(unsigned int len)
0114 {
0115 return SKB_DATA_ALIGN(len) +
0116 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
0117 }
0118
0119 struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev)
0120 {
0121 return rtnl_dereference(nvdev->chan_table[0].bpf_prog);
0122 }
0123
0124 int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
0125 struct netlink_ext_ack *extack,
0126 struct netvsc_device *nvdev)
0127 {
0128 struct bpf_prog *old_prog;
0129 int buf_max, i;
0130
0131 old_prog = netvsc_xdp_get(nvdev);
0132
0133 if (!old_prog && !prog)
0134 return 0;
0135
0136 buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN);
0137 if (prog && buf_max > PAGE_SIZE) {
0138 netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n",
0139 dev->mtu, buf_max);
0140 NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
0141
0142 return -EOPNOTSUPP;
0143 }
0144
0145 if (prog && (dev->features & NETIF_F_LRO)) {
0146 netdev_err(dev, "XDP: not support LRO\n");
0147 NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO");
0148
0149 return -EOPNOTSUPP;
0150 }
0151
0152 if (prog)
0153 bpf_prog_add(prog, nvdev->num_chn - 1);
0154
0155 for (i = 0; i < nvdev->num_chn; i++)
0156 rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
0157
0158 if (old_prog)
0159 for (i = 0; i < nvdev->num_chn; i++)
0160 bpf_prog_put(old_prog);
0161
0162 return 0;
0163 }
0164
0165 int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
0166 {
0167 struct netdev_bpf xdp;
0168 int ret;
0169
0170 ASSERT_RTNL();
0171
0172 if (!vf_netdev)
0173 return 0;
0174
0175 if (!vf_netdev->netdev_ops->ndo_bpf)
0176 return 0;
0177
0178 memset(&xdp, 0, sizeof(xdp));
0179
0180 if (prog)
0181 bpf_prog_inc(prog);
0182
0183 xdp.command = XDP_SETUP_PROG;
0184 xdp.prog = prog;
0185
0186 ret = vf_netdev->netdev_ops->ndo_bpf(vf_netdev, &xdp);
0187
0188 if (ret && prog)
0189 bpf_prog_put(prog);
0190
0191 return ret;
0192 }
0193
0194 int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
0195 {
0196 struct net_device_context *ndevctx = netdev_priv(dev);
0197 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
0198 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
0199 struct netlink_ext_ack *extack = bpf->extack;
0200 int ret;
0201
0202 if (!nvdev || nvdev->destroy) {
0203 return -ENODEV;
0204 }
0205
0206 switch (bpf->command) {
0207 case XDP_SETUP_PROG:
0208 ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev);
0209
0210 if (ret)
0211 return ret;
0212
0213 ret = netvsc_vf_setxdp(vf_netdev, bpf->prog);
0214
0215 if (ret) {
0216 netdev_err(dev, "vf_setxdp failed:%d\n", ret);
0217 NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed");
0218
0219 netvsc_xdp_set(dev, NULL, extack, nvdev);
0220 }
0221
0222 return ret;
0223
0224 default:
0225 return -EINVAL;
0226 }
0227 }
0228
0229 static int netvsc_ndoxdp_xmit_fm(struct net_device *ndev,
0230 struct xdp_frame *frame, u16 q_idx)
0231 {
0232 struct sk_buff *skb;
0233
0234 skb = xdp_build_skb_from_frame(frame, ndev);
0235 if (unlikely(!skb))
0236 return -ENOMEM;
0237
0238 netvsc_get_hash(skb, netdev_priv(ndev));
0239
0240 skb_record_rx_queue(skb, q_idx);
0241
0242 netvsc_xdp_xmit(skb, ndev);
0243
0244 return 0;
0245 }
0246
0247 int netvsc_ndoxdp_xmit(struct net_device *ndev, int n,
0248 struct xdp_frame **frames, u32 flags)
0249 {
0250 struct net_device_context *ndev_ctx = netdev_priv(ndev);
0251 const struct net_device_ops *vf_ops;
0252 struct netvsc_stats_tx *tx_stats;
0253 struct netvsc_device *nvsc_dev;
0254 struct net_device *vf_netdev;
0255 int i, count = 0;
0256 u16 q_idx;
0257
0258
0259 nvsc_dev = rcu_dereference_bh(ndev_ctx->nvdev);
0260 if (unlikely(!nvsc_dev || nvsc_dev->destroy))
0261 return 0;
0262
0263
0264
0265
0266
0267 vf_netdev = rcu_dereference_bh(ndev_ctx->vf_netdev);
0268 if (vf_netdev && netif_running(vf_netdev) &&
0269 netif_carrier_ok(vf_netdev) && !netpoll_tx_running(ndev) &&
0270 vf_netdev->netdev_ops->ndo_xdp_xmit &&
0271 ndev_ctx->data_path_is_vf) {
0272 vf_ops = vf_netdev->netdev_ops;
0273 return vf_ops->ndo_xdp_xmit(vf_netdev, n, frames, flags);
0274 }
0275
0276 q_idx = smp_processor_id() % ndev->real_num_tx_queues;
0277
0278 for (i = 0; i < n; i++) {
0279 if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx))
0280 break;
0281
0282 count++;
0283 }
0284
0285 tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats;
0286
0287 u64_stats_update_begin(&tx_stats->syncp);
0288 tx_stats->xdp_xmit += count;
0289 u64_stats_update_end(&tx_stats->syncp);
0290
0291 return count;
0292 }