0001
0002
0003
0004
0005 #include <linux/bpf_trace.h>
0006 #include <linux/netdevice.h>
0007
0008 #include "../nfp_app.h"
0009 #include "../nfp_net.h"
0010 #include "../nfp_net_dp.h"
0011 #include "../nfp_net_xsk.h"
0012 #include "nfd3.h"
0013
0014 static bool
0015 nfp_nfd3_xsk_tx_xdp(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
0016 struct nfp_net_rx_ring *rx_ring,
0017 struct nfp_net_tx_ring *tx_ring,
0018 struct nfp_net_xsk_rx_buf *xrxbuf, unsigned int pkt_len,
0019 int pkt_off)
0020 {
0021 struct xsk_buff_pool *pool = r_vec->xsk_pool;
0022 struct nfp_nfd3_tx_buf *txbuf;
0023 struct nfp_nfd3_tx_desc *txd;
0024 unsigned int wr_idx;
0025
0026 if (nfp_net_tx_space(tx_ring) < 1)
0027 return false;
0028
0029 xsk_buff_raw_dma_sync_for_device(pool, xrxbuf->dma_addr + pkt_off,
0030 pkt_len);
0031
0032 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
0033
0034 txbuf = &tx_ring->txbufs[wr_idx];
0035 txbuf->xdp = xrxbuf->xdp;
0036 txbuf->real_len = pkt_len;
0037 txbuf->is_xsk_tx = true;
0038
0039
0040 txd = &tx_ring->txds[wr_idx];
0041 txd->offset_eop = NFD3_DESC_TX_EOP;
0042 txd->dma_len = cpu_to_le16(pkt_len);
0043 nfp_desc_set_dma_addr_40b(txd, xrxbuf->dma_addr + pkt_off);
0044 txd->data_len = cpu_to_le16(pkt_len);
0045
0046 txd->flags = 0;
0047 txd->mss = 0;
0048 txd->lso_hdrlen = 0;
0049
0050 tx_ring->wr_ptr_add++;
0051 tx_ring->wr_p++;
0052
0053 return true;
0054 }
0055
0056 static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
0057 const struct nfp_net_rx_desc *rxd,
0058 struct nfp_net_xsk_rx_buf *xrxbuf,
0059 const struct nfp_meta_parsed *meta,
0060 unsigned int pkt_len,
0061 bool meta_xdp,
0062 unsigned int *skbs_polled)
0063 {
0064 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
0065 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
0066 struct net_device *netdev;
0067 struct sk_buff *skb;
0068
0069 if (likely(!meta->portid)) {
0070 netdev = dp->netdev;
0071 } else {
0072 struct nfp_net *nn = netdev_priv(dp->netdev);
0073
0074 netdev = nfp_app_dev_get(nn->app, meta->portid, NULL);
0075 if (unlikely(!netdev)) {
0076 nfp_net_xsk_rx_drop(r_vec, xrxbuf);
0077 return;
0078 }
0079 nfp_repr_inc_rx_stats(netdev, pkt_len);
0080 }
0081
0082 skb = napi_alloc_skb(&r_vec->napi, pkt_len);
0083 if (!skb) {
0084 nfp_net_xsk_rx_drop(r_vec, xrxbuf);
0085 return;
0086 }
0087 memcpy(skb_put(skb, pkt_len), xrxbuf->xdp->data, pkt_len);
0088
0089 skb->mark = meta->mark;
0090 skb_set_hash(skb, meta->hash, meta->hash_type);
0091
0092 skb_record_rx_queue(skb, rx_ring->idx);
0093 skb->protocol = eth_type_trans(skb, netdev);
0094
0095 nfp_nfd3_rx_csum(dp, r_vec, rxd, meta, skb);
0096
0097 if (unlikely(!nfp_net_vlan_strip(skb, rxd, meta))) {
0098 dev_kfree_skb_any(skb);
0099 nfp_net_xsk_rx_drop(r_vec, xrxbuf);
0100 return;
0101 }
0102
0103 if (meta_xdp)
0104 skb_metadata_set(skb,
0105 xrxbuf->xdp->data - xrxbuf->xdp->data_meta);
0106
0107 napi_gro_receive(&rx_ring->r_vec->napi, skb);
0108
0109 nfp_net_xsk_rx_free(xrxbuf);
0110
0111 (*skbs_polled)++;
0112 }
0113
0114 static unsigned int
0115 nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
0116 unsigned int *skbs_polled)
0117 {
0118 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
0119 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
0120 struct nfp_net_tx_ring *tx_ring;
0121 struct bpf_prog *xdp_prog;
0122 bool xdp_redir = false;
0123 int pkts_polled = 0;
0124
0125 xdp_prog = READ_ONCE(dp->xdp_prog);
0126 tx_ring = r_vec->xdp_ring;
0127
0128 while (pkts_polled < budget) {
0129 unsigned int meta_len, data_len, pkt_len, pkt_off;
0130 struct nfp_net_xsk_rx_buf *xrxbuf;
0131 struct nfp_net_rx_desc *rxd;
0132 struct nfp_meta_parsed meta;
0133 int idx, act;
0134
0135 idx = D_IDX(rx_ring, rx_ring->rd_p);
0136
0137 rxd = &rx_ring->rxds[idx];
0138 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
0139 break;
0140
0141 rx_ring->rd_p++;
0142 pkts_polled++;
0143
0144 xrxbuf = &rx_ring->xsk_rxbufs[idx];
0145
0146
0147 if (rx_ring->rd_p >= rx_ring->wr_p) {
0148 nn_dp_warn(dp, "Starved of RX buffers\n");
0149 nfp_net_xsk_rx_drop(r_vec, xrxbuf);
0150 break;
0151 }
0152
0153
0154
0155
0156 dma_rmb();
0157
0158 memset(&meta, 0, sizeof(meta));
0159
0160
0161
0162
0163
0164
0165
0166
0167 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
0168 data_len = le16_to_cpu(rxd->rxd.data_len);
0169 pkt_len = data_len - meta_len;
0170
0171 if (unlikely(meta_len > NFP_NET_MAX_PREPEND)) {
0172 nn_dp_warn(dp, "Oversized RX packet metadata %u\n",
0173 meta_len);
0174 nfp_net_xsk_rx_drop(r_vec, xrxbuf);
0175 continue;
0176 }
0177
0178
0179 u64_stats_update_begin(&r_vec->rx_sync);
0180 r_vec->rx_pkts++;
0181 r_vec->rx_bytes += pkt_len;
0182 u64_stats_update_end(&r_vec->rx_sync);
0183
0184 xrxbuf->xdp->data += meta_len;
0185 xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
0186 xdp_set_data_meta_invalid(xrxbuf->xdp);
0187 xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
0188 net_prefetch(xrxbuf->xdp->data);
0189
0190 if (meta_len) {
0191 if (unlikely(nfp_nfd3_parse_meta(dp->netdev, &meta,
0192 xrxbuf->xdp->data -
0193 meta_len,
0194 xrxbuf->xdp->data,
0195 pkt_len, meta_len))) {
0196 nn_dp_warn(dp, "Invalid RX packet metadata\n");
0197 nfp_net_xsk_rx_drop(r_vec, xrxbuf);
0198 continue;
0199 }
0200
0201 if (unlikely(meta.portid)) {
0202 struct nfp_net *nn = netdev_priv(dp->netdev);
0203
0204 if (meta.portid != NFP_META_PORT_ID_CTRL) {
0205 nfp_nfd3_xsk_rx_skb(rx_ring, rxd,
0206 xrxbuf, &meta,
0207 pkt_len, false,
0208 skbs_polled);
0209 continue;
0210 }
0211
0212 nfp_app_ctrl_rx_raw(nn->app, xrxbuf->xdp->data,
0213 pkt_len);
0214 nfp_net_xsk_rx_free(xrxbuf);
0215 continue;
0216 }
0217 }
0218
0219 act = bpf_prog_run_xdp(xdp_prog, xrxbuf->xdp);
0220
0221 pkt_len = xrxbuf->xdp->data_end - xrxbuf->xdp->data;
0222 pkt_off = xrxbuf->xdp->data - xrxbuf->xdp->data_hard_start;
0223
0224 switch (act) {
0225 case XDP_PASS:
0226 nfp_nfd3_xsk_rx_skb(rx_ring, rxd, xrxbuf, &meta, pkt_len,
0227 true, skbs_polled);
0228 break;
0229 case XDP_TX:
0230 if (!nfp_nfd3_xsk_tx_xdp(dp, r_vec, rx_ring, tx_ring,
0231 xrxbuf, pkt_len, pkt_off))
0232 nfp_net_xsk_rx_drop(r_vec, xrxbuf);
0233 else
0234 nfp_net_xsk_rx_unstash(xrxbuf);
0235 break;
0236 case XDP_REDIRECT:
0237 if (xdp_do_redirect(dp->netdev, xrxbuf->xdp, xdp_prog)) {
0238 nfp_net_xsk_rx_drop(r_vec, xrxbuf);
0239 } else {
0240 nfp_net_xsk_rx_unstash(xrxbuf);
0241 xdp_redir = true;
0242 }
0243 break;
0244 default:
0245 bpf_warn_invalid_xdp_action(dp->netdev, xdp_prog, act);
0246 fallthrough;
0247 case XDP_ABORTED:
0248 trace_xdp_exception(dp->netdev, xdp_prog, act);
0249 fallthrough;
0250 case XDP_DROP:
0251 nfp_net_xsk_rx_drop(r_vec, xrxbuf);
0252 break;
0253 }
0254 }
0255
0256 nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
0257
0258 if (xdp_redir)
0259 xdp_do_flush_map();
0260
0261 if (tx_ring->wr_ptr_add)
0262 nfp_net_tx_xmit_more_flush(tx_ring);
0263
0264 return pkts_polled;
0265 }
0266
0267 void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf)
0268 {
0269 xsk_buff_free(txbuf->xdp);
0270
0271 txbuf->dma_addr = 0;
0272 txbuf->xdp = NULL;
0273 }
0274
0275 static bool nfp_nfd3_xsk_complete(struct nfp_net_tx_ring *tx_ring)
0276 {
0277 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
0278 u32 done_pkts = 0, done_bytes = 0, reused = 0;
0279 bool done_all;
0280 int idx, todo;
0281 u32 qcp_rd_p;
0282
0283 if (tx_ring->wr_p == tx_ring->rd_p)
0284 return true;
0285
0286
0287 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
0288
0289 if (qcp_rd_p == tx_ring->qcp_rd_p)
0290 return true;
0291
0292 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
0293
0294 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
0295 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
0296
0297 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
0298
0299 done_pkts = todo;
0300 while (todo--) {
0301 struct nfp_nfd3_tx_buf *txbuf;
0302
0303 idx = D_IDX(tx_ring, tx_ring->rd_p);
0304 tx_ring->rd_p++;
0305
0306 txbuf = &tx_ring->txbufs[idx];
0307 if (unlikely(!txbuf->real_len))
0308 continue;
0309
0310 done_bytes += txbuf->real_len;
0311 txbuf->real_len = 0;
0312
0313 if (txbuf->is_xsk_tx) {
0314 nfp_nfd3_xsk_tx_free(txbuf);
0315 reused++;
0316 }
0317 }
0318
0319 u64_stats_update_begin(&r_vec->tx_sync);
0320 r_vec->tx_bytes += done_bytes;
0321 r_vec->tx_pkts += done_pkts;
0322 u64_stats_update_end(&r_vec->tx_sync);
0323
0324 xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused);
0325
0326 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
0327 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
0328 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
0329
0330 return done_all;
0331 }
0332
0333 static void nfp_nfd3_xsk_tx(struct nfp_net_tx_ring *tx_ring)
0334 {
0335 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
0336 struct xdp_desc desc[NFP_NET_XSK_TX_BATCH];
0337 struct xsk_buff_pool *xsk_pool;
0338 struct nfp_nfd3_tx_desc *txd;
0339 u32 pkts = 0, wr_idx;
0340 u32 i, got;
0341
0342 xsk_pool = r_vec->xsk_pool;
0343
0344 while (nfp_net_tx_space(tx_ring) >= NFP_NET_XSK_TX_BATCH) {
0345 for (i = 0; i < NFP_NET_XSK_TX_BATCH; i++)
0346 if (!xsk_tx_peek_desc(xsk_pool, &desc[i]))
0347 break;
0348 got = i;
0349 if (!got)
0350 break;
0351
0352 wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
0353 prefetchw(&tx_ring->txds[wr_idx]);
0354
0355 for (i = 0; i < got; i++)
0356 xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr,
0357 desc[i].len);
0358
0359 for (i = 0; i < got; i++) {
0360 wr_idx = D_IDX(tx_ring, tx_ring->wr_p + i);
0361
0362 tx_ring->txbufs[wr_idx].real_len = desc[i].len;
0363 tx_ring->txbufs[wr_idx].is_xsk_tx = false;
0364
0365
0366 txd = &tx_ring->txds[wr_idx];
0367 nfp_desc_set_dma_addr_40b(txd,
0368 xsk_buff_raw_get_dma(xsk_pool, desc[i].addr));
0369 txd->offset_eop = NFD3_DESC_TX_EOP;
0370 txd->dma_len = cpu_to_le16(desc[i].len);
0371 txd->data_len = cpu_to_le16(desc[i].len);
0372 }
0373
0374 tx_ring->wr_p += got;
0375 pkts += got;
0376 }
0377
0378 if (!pkts)
0379 return;
0380
0381 xsk_tx_release(xsk_pool);
0382
0383 wmb();
0384 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, pkts);
0385 }
0386
0387 int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget)
0388 {
0389 struct nfp_net_r_vector *r_vec =
0390 container_of(napi, struct nfp_net_r_vector, napi);
0391 unsigned int pkts_polled, skbs = 0;
0392
0393 pkts_polled = nfp_nfd3_xsk_rx(r_vec->rx_ring, budget, &skbs);
0394
0395 if (pkts_polled < budget) {
0396 if (r_vec->tx_ring)
0397 nfp_nfd3_tx_complete(r_vec->tx_ring, budget);
0398
0399 if (!nfp_nfd3_xsk_complete(r_vec->xdp_ring))
0400 pkts_polled = budget;
0401
0402 nfp_nfd3_xsk_tx(r_vec->xdp_ring);
0403
0404 if (pkts_polled < budget && napi_complete_done(napi, skbs))
0405 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
0406 }
0407
0408 return pkts_polled;
0409 }