0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include "net_driver.h"
0012 #include <linux/module.h>
0013 #include <linux/iommu.h>
0014 #include "efx.h"
0015 #include "nic.h"
0016 #include "rx_common.h"
0017
0018
0019
0020
0021 static unsigned int rx_refill_threshold;
0022 module_param(rx_refill_threshold, uint, 0444);
0023 MODULE_PARM_DESC(rx_refill_threshold,
0024 "RX descriptor ring refill threshold (%)");
0025
0026
0027
0028
0029
0030
0031 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
0032
0033
0034 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
0035 {
0036 struct efx_nic *efx = rx_queue->efx;
0037 struct efx_rx_page_state *state;
0038 unsigned int index;
0039 struct page *page;
0040
0041 if (unlikely(!rx_queue->page_ring))
0042 return NULL;
0043 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
0044 page = rx_queue->page_ring[index];
0045 if (page == NULL)
0046 return NULL;
0047
0048 rx_queue->page_ring[index] = NULL;
0049
0050 if (rx_queue->page_remove != rx_queue->page_add)
0051 ++rx_queue->page_remove;
0052
0053
0054 if (page_count(page) == 1) {
0055 ++rx_queue->page_recycle_count;
0056 return page;
0057 } else {
0058 state = page_address(page);
0059 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
0060 PAGE_SIZE << efx->rx_buffer_order,
0061 DMA_FROM_DEVICE);
0062 put_page(page);
0063 ++rx_queue->page_recycle_failed;
0064 }
0065
0066 return NULL;
0067 }
0068
0069
0070
0071
0072
0073 static void efx_recycle_rx_page(struct efx_channel *channel,
0074 struct efx_rx_buffer *rx_buf)
0075 {
0076 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
0077 struct efx_nic *efx = rx_queue->efx;
0078 struct page *page = rx_buf->page;
0079 unsigned int index;
0080
0081
0082 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
0083 return;
0084
0085 index = rx_queue->page_add & rx_queue->page_ptr_mask;
0086 if (rx_queue->page_ring[index] == NULL) {
0087 unsigned int read_index = rx_queue->page_remove &
0088 rx_queue->page_ptr_mask;
0089
0090
0091
0092
0093
0094 if (read_index == index)
0095 ++rx_queue->page_remove;
0096 rx_queue->page_ring[index] = page;
0097 ++rx_queue->page_add;
0098 return;
0099 }
0100 ++rx_queue->page_recycle_full;
0101 efx_unmap_rx_buffer(efx, rx_buf);
0102 put_page(rx_buf->page);
0103 }
0104
0105
0106 void efx_recycle_rx_pages(struct efx_channel *channel,
0107 struct efx_rx_buffer *rx_buf,
0108 unsigned int n_frags)
0109 {
0110 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
0111
0112 if (unlikely(!rx_queue->page_ring))
0113 return;
0114
0115 do {
0116 efx_recycle_rx_page(channel, rx_buf);
0117 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
0118 } while (--n_frags);
0119 }
0120
0121 void efx_discard_rx_packet(struct efx_channel *channel,
0122 struct efx_rx_buffer *rx_buf,
0123 unsigned int n_frags)
0124 {
0125 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
0126
0127 efx_recycle_rx_pages(channel, rx_buf, n_frags);
0128
0129 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
0130 }
0131
0132 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
0133 {
0134 unsigned int bufs_in_recycle_ring, page_ring_size;
0135 struct efx_nic *efx = rx_queue->efx;
0136
0137 bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
0138 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
0139 efx->rx_bufs_per_page);
0140 rx_queue->page_ring = kcalloc(page_ring_size,
0141 sizeof(*rx_queue->page_ring), GFP_KERNEL);
0142 if (!rx_queue->page_ring)
0143 rx_queue->page_ptr_mask = 0;
0144 else
0145 rx_queue->page_ptr_mask = page_ring_size - 1;
0146 }
0147
0148 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
0149 {
0150 struct efx_nic *efx = rx_queue->efx;
0151 int i;
0152
0153 if (unlikely(!rx_queue->page_ring))
0154 return;
0155
0156
0157 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
0158 struct page *page = rx_queue->page_ring[i];
0159 struct efx_rx_page_state *state;
0160
0161 if (page == NULL)
0162 continue;
0163
0164 state = page_address(page);
0165 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
0166 PAGE_SIZE << efx->rx_buffer_order,
0167 DMA_FROM_DEVICE);
0168 put_page(page);
0169 }
0170 kfree(rx_queue->page_ring);
0171 rx_queue->page_ring = NULL;
0172 }
0173
0174 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
0175 struct efx_rx_buffer *rx_buf)
0176 {
0177
0178 if (rx_buf->page)
0179 put_page(rx_buf->page);
0180
0181
0182 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
0183 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
0184 efx_free_rx_buffers(rx_queue, rx_buf, 1);
0185 }
0186 rx_buf->page = NULL;
0187 }
0188
0189 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
0190 {
0191 struct efx_nic *efx = rx_queue->efx;
0192 unsigned int entries;
0193 int rc;
0194
0195
0196 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
0197 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
0198 rx_queue->ptr_mask = entries - 1;
0199
0200 netif_dbg(efx, probe, efx->net_dev,
0201 "creating RX queue %d size %#x mask %#x\n",
0202 efx_rx_queue_index(rx_queue), efx->rxq_entries,
0203 rx_queue->ptr_mask);
0204
0205
0206 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
0207 GFP_KERNEL);
0208 if (!rx_queue->buffer)
0209 return -ENOMEM;
0210
0211 rc = efx_nic_probe_rx(rx_queue);
0212 if (rc) {
0213 kfree(rx_queue->buffer);
0214 rx_queue->buffer = NULL;
0215 }
0216
0217 return rc;
0218 }
0219
0220 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
0221 {
0222 unsigned int max_fill, trigger, max_trigger;
0223 struct efx_nic *efx = rx_queue->efx;
0224 int rc = 0;
0225
0226 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
0227 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
0228
0229
0230 rx_queue->added_count = 0;
0231 rx_queue->notified_count = 0;
0232 rx_queue->removed_count = 0;
0233 rx_queue->min_fill = -1U;
0234 efx_init_rx_recycle_ring(rx_queue);
0235
0236 rx_queue->page_remove = 0;
0237 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
0238 rx_queue->page_recycle_count = 0;
0239 rx_queue->page_recycle_failed = 0;
0240 rx_queue->page_recycle_full = 0;
0241
0242
0243 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
0244 max_trigger =
0245 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
0246 if (rx_refill_threshold != 0) {
0247 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
0248 if (trigger > max_trigger)
0249 trigger = max_trigger;
0250 } else {
0251 trigger = max_trigger;
0252 }
0253
0254 rx_queue->max_fill = max_fill;
0255 rx_queue->fast_fill_trigger = trigger;
0256 rx_queue->refill_enabled = true;
0257
0258
0259 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
0260 rx_queue->core_index, 0);
0261
0262 if (rc) {
0263 netif_err(efx, rx_err, efx->net_dev,
0264 "Failure to initialise XDP queue information rc=%d\n",
0265 rc);
0266 efx->xdp_rxq_info_failed = true;
0267 } else {
0268 rx_queue->xdp_rxq_info_valid = true;
0269 }
0270
0271
0272 efx_nic_init_rx(rx_queue);
0273 }
0274
0275 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
0276 {
0277 struct efx_rx_buffer *rx_buf;
0278 int i;
0279
0280 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
0281 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
0282
0283 del_timer_sync(&rx_queue->slow_fill);
0284
0285
0286 if (rx_queue->buffer) {
0287 for (i = rx_queue->removed_count; i < rx_queue->added_count;
0288 i++) {
0289 unsigned int index = i & rx_queue->ptr_mask;
0290
0291 rx_buf = efx_rx_buffer(rx_queue, index);
0292 efx_fini_rx_buffer(rx_queue, rx_buf);
0293 }
0294 }
0295
0296 efx_fini_rx_recycle_ring(rx_queue);
0297
0298 if (rx_queue->xdp_rxq_info_valid)
0299 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
0300
0301 rx_queue->xdp_rxq_info_valid = false;
0302 }
0303
0304 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
0305 {
0306 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
0307 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
0308
0309 efx_nic_remove_rx(rx_queue);
0310
0311 kfree(rx_queue->buffer);
0312 rx_queue->buffer = NULL;
0313 }
0314
0315
0316
0317
0318 void efx_unmap_rx_buffer(struct efx_nic *efx,
0319 struct efx_rx_buffer *rx_buf)
0320 {
0321 struct page *page = rx_buf->page;
0322
0323 if (page) {
0324 struct efx_rx_page_state *state = page_address(page);
0325
0326 dma_unmap_page(&efx->pci_dev->dev,
0327 state->dma_addr,
0328 PAGE_SIZE << efx->rx_buffer_order,
0329 DMA_FROM_DEVICE);
0330 }
0331 }
0332
0333 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
0334 struct efx_rx_buffer *rx_buf,
0335 unsigned int num_bufs)
0336 {
0337 do {
0338 if (rx_buf->page) {
0339 put_page(rx_buf->page);
0340 rx_buf->page = NULL;
0341 }
0342 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
0343 } while (--num_bufs);
0344 }
0345
0346 void efx_rx_slow_fill(struct timer_list *t)
0347 {
0348 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
0349
0350
0351 efx_nic_generate_fill_event(rx_queue);
0352 ++rx_queue->slow_fill_count;
0353 }
0354
0355 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
0356 {
0357 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
0358 }
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
0370 {
0371 unsigned int page_offset, index, count;
0372 struct efx_nic *efx = rx_queue->efx;
0373 struct efx_rx_page_state *state;
0374 struct efx_rx_buffer *rx_buf;
0375 dma_addr_t dma_addr;
0376 struct page *page;
0377
0378 count = 0;
0379 do {
0380 page = efx_reuse_page(rx_queue);
0381 if (page == NULL) {
0382 page = alloc_pages(__GFP_COMP |
0383 (atomic ? GFP_ATOMIC : GFP_KERNEL),
0384 efx->rx_buffer_order);
0385 if (unlikely(page == NULL))
0386 return -ENOMEM;
0387 dma_addr =
0388 dma_map_page(&efx->pci_dev->dev, page, 0,
0389 PAGE_SIZE << efx->rx_buffer_order,
0390 DMA_FROM_DEVICE);
0391 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
0392 dma_addr))) {
0393 __free_pages(page, efx->rx_buffer_order);
0394 return -EIO;
0395 }
0396 state = page_address(page);
0397 state->dma_addr = dma_addr;
0398 } else {
0399 state = page_address(page);
0400 dma_addr = state->dma_addr;
0401 }
0402
0403 dma_addr += sizeof(struct efx_rx_page_state);
0404 page_offset = sizeof(struct efx_rx_page_state);
0405
0406 do {
0407 index = rx_queue->added_count & rx_queue->ptr_mask;
0408 rx_buf = efx_rx_buffer(rx_queue, index);
0409 rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
0410 EFX_XDP_HEADROOM;
0411 rx_buf->page = page;
0412 rx_buf->page_offset = page_offset + efx->rx_ip_align +
0413 EFX_XDP_HEADROOM;
0414 rx_buf->len = efx->rx_dma_len;
0415 rx_buf->flags = 0;
0416 ++rx_queue->added_count;
0417 get_page(page);
0418 dma_addr += efx->rx_page_buf_step;
0419 page_offset += efx->rx_page_buf_step;
0420 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
0421
0422 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
0423 } while (++count < efx->rx_pages_per_batch);
0424
0425 return 0;
0426 }
0427
0428 void efx_rx_config_page_split(struct efx_nic *efx)
0429 {
0430 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
0431 EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
0432 EFX_RX_BUF_ALIGNMENT);
0433 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
0434 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
0435 efx->rx_page_buf_step);
0436 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
0437 efx->rx_bufs_per_page;
0438 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
0439 efx->rx_bufs_per_page);
0440 }
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
0454 {
0455 struct efx_nic *efx = rx_queue->efx;
0456 unsigned int fill_level, batch_size;
0457 int space, rc = 0;
0458
0459 if (!rx_queue->refill_enabled)
0460 return;
0461
0462
0463 fill_level = (rx_queue->added_count - rx_queue->removed_count);
0464 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
0465 if (fill_level >= rx_queue->fast_fill_trigger)
0466 goto out;
0467
0468
0469 if (unlikely(fill_level < rx_queue->min_fill)) {
0470 if (fill_level)
0471 rx_queue->min_fill = fill_level;
0472 }
0473
0474 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
0475 space = rx_queue->max_fill - fill_level;
0476 EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
0477
0478 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
0479 "RX queue %d fast-filling descriptor ring from"
0480 " level %d to level %d\n",
0481 efx_rx_queue_index(rx_queue), fill_level,
0482 rx_queue->max_fill);
0483
0484 do {
0485 rc = efx_init_rx_buffers(rx_queue, atomic);
0486 if (unlikely(rc)) {
0487
0488 efx_schedule_slow_fill(rx_queue);
0489 goto out;
0490 }
0491 } while ((space -= batch_size) >= batch_size);
0492
0493 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
0494 "RX queue %d fast-filled descriptor ring "
0495 "to level %d\n", efx_rx_queue_index(rx_queue),
0496 rx_queue->added_count - rx_queue->removed_count);
0497
0498 out:
0499 if (rx_queue->notified_count != rx_queue->added_count)
0500 efx_nic_notify_rx_desc(rx_queue);
0501 }
0502
0503
0504
0505
0506 void
0507 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
0508 unsigned int n_frags, u8 *eh, __wsum csum)
0509 {
0510 struct napi_struct *napi = &channel->napi_str;
0511 struct efx_nic *efx = channel->efx;
0512 struct sk_buff *skb;
0513
0514 skb = napi_get_frags(napi);
0515 if (unlikely(!skb)) {
0516 struct efx_rx_queue *rx_queue;
0517
0518 rx_queue = efx_channel_get_rx_queue(channel);
0519 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
0520 return;
0521 }
0522
0523 if (efx->net_dev->features & NETIF_F_RXHASH &&
0524 efx_rx_buf_hash_valid(efx, eh))
0525 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
0526 PKT_HASH_TYPE_L3);
0527 if (csum) {
0528 skb->csum = csum;
0529 skb->ip_summed = CHECKSUM_COMPLETE;
0530 } else {
0531 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
0532 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
0533 }
0534 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
0535
0536 for (;;) {
0537 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
0538 rx_buf->page, rx_buf->page_offset,
0539 rx_buf->len);
0540 rx_buf->page = NULL;
0541 skb->len += rx_buf->len;
0542 if (skb_shinfo(skb)->nr_frags == n_frags)
0543 break;
0544
0545 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
0546 }
0547
0548 skb->data_len = skb->len;
0549 skb->truesize += n_frags * efx->rx_buffer_truesize;
0550
0551 skb_record_rx_queue(skb, channel->rx_queue.core_index);
0552
0553 napi_gro_frags(napi);
0554 }
0555
0556
0557
0558
0559 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
0560 {
0561 struct list_head *head = &efx->rss_context.list;
0562 struct efx_rss_context *ctx, *new;
0563 u32 id = 1;
0564
0565 WARN_ON(!mutex_is_locked(&efx->rss_lock));
0566
0567
0568 list_for_each_entry(ctx, head, list) {
0569 if (ctx->user_id != id)
0570 break;
0571 id++;
0572
0573
0574
0575 if (WARN_ON_ONCE(!id))
0576 return NULL;
0577 }
0578
0579
0580 new = kmalloc(sizeof(*new), GFP_KERNEL);
0581 if (!new)
0582 return NULL;
0583 new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
0584 new->rx_hash_udp_4tuple = false;
0585
0586
0587 new->user_id = id;
0588 list_add_tail(&new->list, &ctx->list);
0589 return new;
0590 }
0591
0592 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
0593 {
0594 struct list_head *head = &efx->rss_context.list;
0595 struct efx_rss_context *ctx;
0596
0597 WARN_ON(!mutex_is_locked(&efx->rss_lock));
0598
0599 list_for_each_entry(ctx, head, list)
0600 if (ctx->user_id == id)
0601 return ctx;
0602 return NULL;
0603 }
0604
0605 void efx_free_rss_context_entry(struct efx_rss_context *ctx)
0606 {
0607 list_del(&ctx->list);
0608 kfree(ctx);
0609 }
0610
0611 void efx_set_default_rx_indir_table(struct efx_nic *efx,
0612 struct efx_rss_context *ctx)
0613 {
0614 size_t i;
0615
0616 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
0617 ctx->rx_indir_table[i] =
0618 ethtool_rxfh_indir_default(i, efx->rss_spread);
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
0631 {
0632 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
0633 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
0634 return false;
0635
0636 if (spec->match_flags &
0637 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
0638 is_multicast_ether_addr(spec->loc_mac))
0639 return true;
0640
0641 if ((spec->match_flags &
0642 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
0643 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
0644 if (spec->ether_type == htons(ETH_P_IP) &&
0645 ipv4_is_multicast(spec->loc_host[0]))
0646 return true;
0647 if (spec->ether_type == htons(ETH_P_IPV6) &&
0648 ((const u8 *)spec->loc_host)[0] == 0xff)
0649 return true;
0650 }
0651
0652 return false;
0653 }
0654
0655 bool efx_filter_spec_equal(const struct efx_filter_spec *left,
0656 const struct efx_filter_spec *right)
0657 {
0658 if ((left->match_flags ^ right->match_flags) |
0659 ((left->flags ^ right->flags) &
0660 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
0661 return false;
0662
0663 return memcmp(&left->outer_vid, &right->outer_vid,
0664 sizeof(struct efx_filter_spec) -
0665 offsetof(struct efx_filter_spec, outer_vid)) == 0;
0666 }
0667
0668 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
0669 {
0670 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
0671 return jhash2((const u32 *)&spec->outer_vid,
0672 (sizeof(struct efx_filter_spec) -
0673 offsetof(struct efx_filter_spec, outer_vid)) / 4,
0674 0);
0675 }
0676
0677 #ifdef CONFIG_RFS_ACCEL
0678 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
0679 bool *force)
0680 {
0681 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
0682
0683 return false;
0684 }
0685 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
0686
0687
0688
0689 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
0690 *force = true;
0691 return true;
0692 } else if (WARN_ON(rule->filter_id != filter_idx)) {
0693
0694
0695
0696
0697 *force = true;
0698 return true;
0699 }
0700
0701 return true;
0702 }
0703
0704 static
0705 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
0706 const struct efx_filter_spec *spec)
0707 {
0708 u32 hash = efx_filter_spec_hash(spec);
0709
0710 lockdep_assert_held(&efx->rps_hash_lock);
0711 if (!efx->rps_hash_table)
0712 return NULL;
0713 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
0714 }
0715
0716 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
0717 const struct efx_filter_spec *spec)
0718 {
0719 struct efx_arfs_rule *rule;
0720 struct hlist_head *head;
0721 struct hlist_node *node;
0722
0723 head = efx_rps_hash_bucket(efx, spec);
0724 if (!head)
0725 return NULL;
0726 hlist_for_each(node, head) {
0727 rule = container_of(node, struct efx_arfs_rule, node);
0728 if (efx_filter_spec_equal(spec, &rule->spec))
0729 return rule;
0730 }
0731 return NULL;
0732 }
0733
0734 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
0735 const struct efx_filter_spec *spec,
0736 bool *new)
0737 {
0738 struct efx_arfs_rule *rule;
0739 struct hlist_head *head;
0740 struct hlist_node *node;
0741
0742 head = efx_rps_hash_bucket(efx, spec);
0743 if (!head)
0744 return NULL;
0745 hlist_for_each(node, head) {
0746 rule = container_of(node, struct efx_arfs_rule, node);
0747 if (efx_filter_spec_equal(spec, &rule->spec)) {
0748 *new = false;
0749 return rule;
0750 }
0751 }
0752 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
0753 *new = true;
0754 if (rule) {
0755 memcpy(&rule->spec, spec, sizeof(rule->spec));
0756 hlist_add_head(&rule->node, head);
0757 }
0758 return rule;
0759 }
0760
0761 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
0762 {
0763 struct efx_arfs_rule *rule;
0764 struct hlist_head *head;
0765 struct hlist_node *node;
0766
0767 head = efx_rps_hash_bucket(efx, spec);
0768 if (WARN_ON(!head))
0769 return;
0770 hlist_for_each(node, head) {
0771 rule = container_of(node, struct efx_arfs_rule, node);
0772 if (efx_filter_spec_equal(spec, &rule->spec)) {
0773
0774
0775
0776
0777
0778
0779 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
0780 return;
0781 hlist_del(node);
0782 kfree(rule);
0783 return;
0784 }
0785 }
0786
0787 WARN_ON(1);
0788 }
0789 #endif
0790
0791 int efx_probe_filters(struct efx_nic *efx)
0792 {
0793 int rc;
0794
0795 mutex_lock(&efx->mac_lock);
0796 rc = efx->type->filter_table_probe(efx);
0797 if (rc)
0798 goto out_unlock;
0799
0800 #ifdef CONFIG_RFS_ACCEL
0801 if (efx->type->offload_features & NETIF_F_NTUPLE) {
0802 struct efx_channel *channel;
0803 int i, success = 1;
0804
0805 efx_for_each_channel(channel, efx) {
0806 channel->rps_flow_id =
0807 kcalloc(efx->type->max_rx_ip_filters,
0808 sizeof(*channel->rps_flow_id),
0809 GFP_KERNEL);
0810 if (!channel->rps_flow_id)
0811 success = 0;
0812 else
0813 for (i = 0;
0814 i < efx->type->max_rx_ip_filters;
0815 ++i)
0816 channel->rps_flow_id[i] =
0817 RPS_FLOW_ID_INVALID;
0818 channel->rfs_expire_index = 0;
0819 channel->rfs_filter_count = 0;
0820 }
0821
0822 if (!success) {
0823 efx_for_each_channel(channel, efx)
0824 kfree(channel->rps_flow_id);
0825 efx->type->filter_table_remove(efx);
0826 rc = -ENOMEM;
0827 goto out_unlock;
0828 }
0829 }
0830 #endif
0831 out_unlock:
0832 mutex_unlock(&efx->mac_lock);
0833 return rc;
0834 }
0835
0836 void efx_remove_filters(struct efx_nic *efx)
0837 {
0838 #ifdef CONFIG_RFS_ACCEL
0839 struct efx_channel *channel;
0840
0841 efx_for_each_channel(channel, efx) {
0842 cancel_delayed_work_sync(&channel->filter_work);
0843 kfree(channel->rps_flow_id);
0844 channel->rps_flow_id = NULL;
0845 }
0846 #endif
0847 efx->type->filter_table_remove(efx);
0848 }
0849
0850 #ifdef CONFIG_RFS_ACCEL
0851
0852 static void efx_filter_rfs_work(struct work_struct *data)
0853 {
0854 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
0855 work);
0856 struct efx_nic *efx = efx_netdev_priv(req->net_dev);
0857 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
0858 int slot_idx = req - efx->rps_slot;
0859 struct efx_arfs_rule *rule;
0860 u16 arfs_id = 0;
0861 int rc;
0862
0863 rc = efx->type->filter_insert(efx, &req->spec, true);
0864 if (rc >= 0)
0865
0866 rc %= efx->type->max_rx_ip_filters;
0867 if (efx->rps_hash_table) {
0868 spin_lock_bh(&efx->rps_hash_lock);
0869 rule = efx_rps_hash_find(efx, &req->spec);
0870
0871
0872
0873
0874
0875
0876 if (rule) {
0877 if (rc < 0)
0878 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
0879 else
0880 rule->filter_id = rc;
0881 arfs_id = rule->arfs_id;
0882 }
0883 spin_unlock_bh(&efx->rps_hash_lock);
0884 }
0885 if (rc >= 0) {
0886
0887
0888
0889 mutex_lock(&efx->rps_mutex);
0890 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
0891 channel->rfs_filter_count++;
0892 channel->rps_flow_id[rc] = req->flow_id;
0893 mutex_unlock(&efx->rps_mutex);
0894
0895 if (req->spec.ether_type == htons(ETH_P_IP))
0896 netif_info(efx, rx_status, efx->net_dev,
0897 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
0898 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
0899 req->spec.rem_host, ntohs(req->spec.rem_port),
0900 req->spec.loc_host, ntohs(req->spec.loc_port),
0901 req->rxq_index, req->flow_id, rc, arfs_id);
0902 else
0903 netif_info(efx, rx_status, efx->net_dev,
0904 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
0905 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
0906 req->spec.rem_host, ntohs(req->spec.rem_port),
0907 req->spec.loc_host, ntohs(req->spec.loc_port),
0908 req->rxq_index, req->flow_id, rc, arfs_id);
0909 channel->n_rfs_succeeded++;
0910 } else {
0911 if (req->spec.ether_type == htons(ETH_P_IP))
0912 netif_dbg(efx, rx_status, efx->net_dev,
0913 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
0914 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
0915 req->spec.rem_host, ntohs(req->spec.rem_port),
0916 req->spec.loc_host, ntohs(req->spec.loc_port),
0917 req->rxq_index, req->flow_id, rc, arfs_id);
0918 else
0919 netif_dbg(efx, rx_status, efx->net_dev,
0920 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
0921 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
0922 req->spec.rem_host, ntohs(req->spec.rem_port),
0923 req->spec.loc_host, ntohs(req->spec.loc_port),
0924 req->rxq_index, req->flow_id, rc, arfs_id);
0925 channel->n_rfs_failed++;
0926
0927
0928
0929 __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
0930 100u));
0931 }
0932
0933
0934 clear_bit(slot_idx, &efx->rps_slot_map);
0935 dev_put(req->net_dev);
0936 }
0937
0938 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
0939 u16 rxq_index, u32 flow_id)
0940 {
0941 struct efx_nic *efx = efx_netdev_priv(net_dev);
0942 struct efx_async_filter_insertion *req;
0943 struct efx_arfs_rule *rule;
0944 struct flow_keys fk;
0945 int slot_idx;
0946 bool new;
0947 int rc;
0948
0949
0950 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
0951 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
0952 break;
0953 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
0954 return -EBUSY;
0955
0956 if (flow_id == RPS_FLOW_ID_INVALID) {
0957 rc = -EINVAL;
0958 goto out_clear;
0959 }
0960
0961 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
0962 rc = -EPROTONOSUPPORT;
0963 goto out_clear;
0964 }
0965
0966 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
0967 rc = -EPROTONOSUPPORT;
0968 goto out_clear;
0969 }
0970 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
0971 rc = -EPROTONOSUPPORT;
0972 goto out_clear;
0973 }
0974
0975 req = efx->rps_slot + slot_idx;
0976 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
0977 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
0978 rxq_index);
0979 req->spec.match_flags =
0980 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
0981 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
0982 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
0983 req->spec.ether_type = fk.basic.n_proto;
0984 req->spec.ip_proto = fk.basic.ip_proto;
0985
0986 if (fk.basic.n_proto == htons(ETH_P_IP)) {
0987 req->spec.rem_host[0] = fk.addrs.v4addrs.src;
0988 req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
0989 } else {
0990 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
0991 sizeof(struct in6_addr));
0992 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
0993 sizeof(struct in6_addr));
0994 }
0995
0996 req->spec.rem_port = fk.ports.src;
0997 req->spec.loc_port = fk.ports.dst;
0998
0999 if (efx->rps_hash_table) {
1000
1001 spin_lock(&efx->rps_hash_lock);
1002 rule = efx_rps_hash_add(efx, &req->spec, &new);
1003 if (!rule) {
1004 rc = -ENOMEM;
1005 goto out_unlock;
1006 }
1007 if (new)
1008 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
1009 rc = rule->arfs_id;
1010
1011 if (!new && rule->rxq_index == rxq_index &&
1012 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
1013 goto out_unlock;
1014 rule->rxq_index = rxq_index;
1015 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
1016 spin_unlock(&efx->rps_hash_lock);
1017 } else {
1018
1019
1020
1021
1022
1023 rc = 0;
1024 }
1025
1026
1027 dev_hold(req->net_dev = net_dev);
1028 INIT_WORK(&req->work, efx_filter_rfs_work);
1029 req->rxq_index = rxq_index;
1030 req->flow_id = flow_id;
1031 schedule_work(&req->work);
1032 return rc;
1033 out_unlock:
1034 spin_unlock(&efx->rps_hash_lock);
1035 out_clear:
1036 clear_bit(slot_idx, &efx->rps_slot_map);
1037 return rc;
1038 }
1039
1040 bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1041 {
1042 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1043 struct efx_nic *efx = channel->efx;
1044 unsigned int index, size, start;
1045 u32 flow_id;
1046
1047 if (!mutex_trylock(&efx->rps_mutex))
1048 return false;
1049 expire_one = efx->type->filter_rfs_expire_one;
1050 index = channel->rfs_expire_index;
1051 start = index;
1052 size = efx->type->max_rx_ip_filters;
1053 while (quota) {
1054 flow_id = channel->rps_flow_id[index];
1055
1056 if (flow_id != RPS_FLOW_ID_INVALID) {
1057 quota--;
1058 if (expire_one(efx, flow_id, index)) {
1059 netif_info(efx, rx_status, efx->net_dev,
1060 "expired filter %d [channel %u flow %u]\n",
1061 index, channel->channel, flow_id);
1062 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1063 channel->rfs_filter_count--;
1064 }
1065 }
1066 if (++index == size)
1067 index = 0;
1068
1069
1070
1071
1072
1073 if (index == start)
1074 break;
1075 }
1076
1077 channel->rfs_expire_index = index;
1078 mutex_unlock(&efx->rps_mutex);
1079 return true;
1080 }
1081
1082 #endif