0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/socket.h>
0009 #include <linux/in.h>
0010 #include <linux/slab.h>
0011 #include <linux/ip.h>
0012 #include <linux/ipv6.h>
0013 #include <linux/tcp.h>
0014 #include <linux/udp.h>
0015 #include <linux/prefetch.h>
0016 #include <linux/moduleparam.h>
0017 #include <linux/iommu.h>
0018 #include <net/ip.h>
0019 #include <net/checksum.h>
0020 #include <net/xdp.h>
0021 #include <linux/bpf_trace.h>
0022 #include "net_driver.h"
0023 #include "efx.h"
0024 #include "rx_common.h"
0025 #include "filter.h"
0026 #include "nic.h"
0027 #include "selftest.h"
0028 #include "workarounds.h"
0029
0030
0031 #define EFX_RX_PREFERRED_BATCH 8U
0032
0033
0034 #define EFX_MAX_RX_PREFIX_SIZE 16
0035
0036
0037 #define EFX_SKB_HEADERS 128u
0038
0039
0040 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
0041 EFX_RX_USR_BUF_SIZE)
0042
0043 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
0044 struct efx_rx_buffer *rx_buf,
0045 int len)
0046 {
0047 struct efx_nic *efx = rx_queue->efx;
0048 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
0049
0050 if (likely(len <= max_len))
0051 return;
0052
0053
0054
0055
0056 rx_buf->flags |= EFX_RX_PKT_DISCARD;
0057
0058 if (net_ratelimit())
0059 netif_err(efx, rx_err, efx->net_dev,
0060 "RX queue %d overlength RX event (%#x > %#x)\n",
0061 efx_rx_queue_index(rx_queue), len, max_len);
0062
0063 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
0064 }
0065
0066
0067 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
0068 struct efx_rx_buffer *rx_buf,
0069 unsigned int n_frags,
0070 u8 *eh, int hdr_len)
0071 {
0072 struct efx_nic *efx = channel->efx;
0073 struct sk_buff *skb;
0074
0075
0076 skb = netdev_alloc_skb(efx->net_dev,
0077 efx->rx_ip_align + efx->rx_prefix_size +
0078 hdr_len);
0079 if (unlikely(skb == NULL)) {
0080 atomic_inc(&efx->n_rx_noskb_drops);
0081 return NULL;
0082 }
0083
0084 EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
0085
0086 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
0087 efx->rx_prefix_size + hdr_len);
0088 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
0089 __skb_put(skb, hdr_len);
0090
0091
0092 if (rx_buf->len > hdr_len) {
0093 rx_buf->page_offset += hdr_len;
0094 rx_buf->len -= hdr_len;
0095
0096 for (;;) {
0097 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
0098 rx_buf->page, rx_buf->page_offset,
0099 rx_buf->len, efx->rx_buffer_truesize);
0100 rx_buf->page = NULL;
0101
0102 if (skb_shinfo(skb)->nr_frags == n_frags)
0103 break;
0104
0105 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
0106 }
0107 } else {
0108 __free_pages(rx_buf->page, efx->rx_buffer_order);
0109 rx_buf->page = NULL;
0110 n_frags = 0;
0111 }
0112
0113
0114 skb->protocol = eth_type_trans(skb, efx->net_dev);
0115
0116 skb_mark_napi_id(skb, &channel->napi_str);
0117
0118 return skb;
0119 }
0120
0121 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
0122 unsigned int n_frags, unsigned int len, u16 flags)
0123 {
0124 struct efx_nic *efx = rx_queue->efx;
0125 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
0126 struct efx_rx_buffer *rx_buf;
0127
0128 rx_queue->rx_packets++;
0129
0130 rx_buf = efx_rx_buffer(rx_queue, index);
0131 rx_buf->flags |= flags;
0132
0133
0134 if (n_frags == 1) {
0135 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
0136 efx_rx_packet__check_len(rx_queue, rx_buf, len);
0137 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
0138 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
0139 unlikely(len > n_frags * efx->rx_dma_len) ||
0140 unlikely(!efx->rx_scatter)) {
0141
0142
0143
0144 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
0145 rx_buf->flags |= EFX_RX_PKT_DISCARD;
0146 }
0147
0148 netif_vdbg(efx, rx_status, efx->net_dev,
0149 "RX queue %d received ids %x-%x len %d %s%s\n",
0150 efx_rx_queue_index(rx_queue), index,
0151 (index + n_frags - 1) & rx_queue->ptr_mask, len,
0152 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
0153 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
0154
0155
0156
0157
0158 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
0159 efx_rx_flush_packet(channel);
0160 efx_discard_rx_packet(channel, rx_buf, n_frags);
0161 return;
0162 }
0163
0164 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
0165 rx_buf->len = len;
0166
0167
0168
0169
0170 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
0171
0172
0173
0174
0175 prefetch(efx_rx_buf_va(rx_buf));
0176
0177 rx_buf->page_offset += efx->rx_prefix_size;
0178 rx_buf->len -= efx->rx_prefix_size;
0179
0180 if (n_frags > 1) {
0181
0182
0183
0184 unsigned int tail_frags = n_frags - 1;
0185
0186 for (;;) {
0187 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
0188 if (--tail_frags == 0)
0189 break;
0190 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
0191 }
0192 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
0193 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
0194 }
0195
0196
0197 rx_buf = efx_rx_buffer(rx_queue, index);
0198 efx_recycle_rx_pages(channel, rx_buf, n_frags);
0199
0200
0201
0202
0203 efx_rx_flush_packet(channel);
0204 channel->rx_pkt_n_frags = n_frags;
0205 channel->rx_pkt_index = index;
0206 }
0207
0208 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
0209 struct efx_rx_buffer *rx_buf,
0210 unsigned int n_frags)
0211 {
0212 struct sk_buff *skb;
0213 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
0214
0215 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
0216 if (unlikely(skb == NULL)) {
0217 struct efx_rx_queue *rx_queue;
0218
0219 rx_queue = efx_channel_get_rx_queue(channel);
0220 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
0221 return;
0222 }
0223 skb_record_rx_queue(skb, channel->rx_queue.core_index);
0224
0225
0226 skb_checksum_none_assert(skb);
0227 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
0228 skb->ip_summed = CHECKSUM_UNNECESSARY;
0229 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
0230 }
0231
0232 efx_rx_skb_attach_timestamp(channel, skb);
0233
0234 if (channel->type->receive_skb)
0235 if (channel->type->receive_skb(channel, skb))
0236 return;
0237
0238
0239 if (channel->rx_list != NULL)
0240
0241 list_add_tail(&skb->list, channel->rx_list);
0242 else
0243
0244 netif_receive_skb(skb);
0245 }
0246
0247
0248
0249
0250
0251 static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
0252 struct efx_rx_buffer *rx_buf, u8 **ehp)
0253 {
0254 u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
0255 struct efx_rx_queue *rx_queue;
0256 struct bpf_prog *xdp_prog;
0257 struct xdp_frame *xdpf;
0258 struct xdp_buff xdp;
0259 u32 xdp_act;
0260 s16 offset;
0261 int err;
0262
0263 xdp_prog = rcu_dereference_bh(efx->xdp_prog);
0264 if (!xdp_prog)
0265 return true;
0266
0267 rx_queue = efx_channel_get_rx_queue(channel);
0268
0269 if (unlikely(channel->rx_pkt_n_frags > 1)) {
0270
0271 efx_free_rx_buffers(rx_queue, rx_buf,
0272 channel->rx_pkt_n_frags);
0273 if (net_ratelimit())
0274 netif_err(efx, rx_err, efx->net_dev,
0275 "XDP is not possible with multiple receive fragments (%d)\n",
0276 channel->rx_pkt_n_frags);
0277 channel->n_rx_xdp_bad_drops++;
0278 return false;
0279 }
0280
0281 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
0282 rx_buf->len, DMA_FROM_DEVICE);
0283
0284
0285 EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
0286 memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
0287 efx->rx_prefix_size);
0288
0289 xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
0290
0291 xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
0292 rx_buf->len, false);
0293
0294 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
0295
0296 offset = (u8 *)xdp.data - *ehp;
0297
0298 switch (xdp_act) {
0299 case XDP_PASS:
0300
0301 if (offset) {
0302 *ehp += offset;
0303 rx_buf->page_offset += offset;
0304 rx_buf->len -= offset;
0305 memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
0306 efx->rx_prefix_size);
0307 }
0308 break;
0309
0310 case XDP_TX:
0311
0312 xdpf = xdp_convert_buff_to_frame(&xdp);
0313 err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
0314 if (unlikely(err != 1)) {
0315 efx_free_rx_buffers(rx_queue, rx_buf, 1);
0316 if (net_ratelimit())
0317 netif_err(efx, rx_err, efx->net_dev,
0318 "XDP TX failed (%d)\n", err);
0319 channel->n_rx_xdp_bad_drops++;
0320 trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
0321 } else {
0322 channel->n_rx_xdp_tx++;
0323 }
0324 break;
0325
0326 case XDP_REDIRECT:
0327 err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
0328 if (unlikely(err)) {
0329 efx_free_rx_buffers(rx_queue, rx_buf, 1);
0330 if (net_ratelimit())
0331 netif_err(efx, rx_err, efx->net_dev,
0332 "XDP redirect failed (%d)\n", err);
0333 channel->n_rx_xdp_bad_drops++;
0334 trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
0335 } else {
0336 channel->n_rx_xdp_redirect++;
0337 }
0338 break;
0339
0340 default:
0341 bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
0342 efx_free_rx_buffers(rx_queue, rx_buf, 1);
0343 channel->n_rx_xdp_bad_drops++;
0344 trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
0345 break;
0346
0347 case XDP_ABORTED:
0348 trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
0349 fallthrough;
0350 case XDP_DROP:
0351 efx_free_rx_buffers(rx_queue, rx_buf, 1);
0352 channel->n_rx_xdp_drops++;
0353 break;
0354 }
0355
0356 return xdp_act == XDP_PASS;
0357 }
0358
0359
0360 void __efx_rx_packet(struct efx_channel *channel)
0361 {
0362 struct efx_nic *efx = channel->efx;
0363 struct efx_rx_buffer *rx_buf =
0364 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
0365 u8 *eh = efx_rx_buf_va(rx_buf);
0366
0367
0368
0369
0370 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
0371 rx_buf->len = le16_to_cpup((__le16 *)
0372 (eh + efx->rx_packet_len_offset));
0373
0374
0375
0376
0377 if (unlikely(efx->loopback_selftest)) {
0378 struct efx_rx_queue *rx_queue;
0379
0380 efx_loopback_rx_packet(efx, eh, rx_buf->len);
0381 rx_queue = efx_channel_get_rx_queue(channel);
0382 efx_free_rx_buffers(rx_queue, rx_buf,
0383 channel->rx_pkt_n_frags);
0384 goto out;
0385 }
0386
0387 if (!efx_do_xdp(efx, channel, rx_buf, &eh))
0388 goto out;
0389
0390 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
0391 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
0392
0393 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
0394 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0);
0395 else
0396 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
0397 out:
0398 channel->rx_pkt_n_frags = 0;
0399 }