0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include "net_driver.h"
0012 #include <linux/module.h>
0013 #include <linux/filter.h>
0014 #include "efx_channels.h"
0015 #include "efx.h"
0016 #include "efx_common.h"
0017 #include "tx_common.h"
0018 #include "rx_common.h"
0019 #include "nic.h"
0020 #include "sriov.h"
0021 #include "workarounds.h"
0022
0023
0024
0025
0026
0027
0028 unsigned int efx_siena_interrupt_mode = EFX_INT_MODE_MSIX;
0029
0030
0031
0032
0033
0034
0035
0036
0037 unsigned int efx_siena_rss_cpus;
0038
0039 static unsigned int irq_adapt_low_thresh = 8000;
0040 module_param(irq_adapt_low_thresh, uint, 0644);
0041 MODULE_PARM_DESC(irq_adapt_low_thresh,
0042 "Threshold score for reducing IRQ moderation");
0043
0044 static unsigned int irq_adapt_high_thresh = 16000;
0045 module_param(irq_adapt_high_thresh, uint, 0644);
0046 MODULE_PARM_DESC(irq_adapt_high_thresh,
0047 "Threshold score for increasing IRQ moderation");
0048
0049 static const struct efx_channel_type efx_default_channel_type;
0050
0051
0052
0053
0054
0055 static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
0056 {
0057 cpumask_var_t filter_mask;
0058 unsigned int count;
0059 int cpu;
0060
0061 if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
0062 netif_warn(efx, probe, efx->net_dev,
0063 "RSS disabled due to allocation failure\n");
0064 return 1;
0065 }
0066
0067 cpumask_copy(filter_mask, cpu_online_mask);
0068 if (local_node)
0069 cpumask_and(filter_mask, filter_mask,
0070 cpumask_of_pcibus(efx->pci_dev->bus));
0071
0072 count = 0;
0073 for_each_cpu(cpu, filter_mask) {
0074 ++count;
0075 cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
0076 }
0077
0078 free_cpumask_var(filter_mask);
0079
0080 return count;
0081 }
0082
0083 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
0084 {
0085 unsigned int count;
0086
0087 if (efx_siena_rss_cpus) {
0088 count = efx_siena_rss_cpus;
0089 } else {
0090 count = count_online_cores(efx, true);
0091
0092
0093 if (count == 0)
0094 count = count_online_cores(efx, false);
0095 }
0096
0097 if (count > EFX_MAX_RX_QUEUES) {
0098 netif_cond_dbg(efx, probe, efx->net_dev, !efx_siena_rss_cpus,
0099 warn,
0100 "Reducing number of rx queues from %u to %u.\n",
0101 count, EFX_MAX_RX_QUEUES);
0102 count = EFX_MAX_RX_QUEUES;
0103 }
0104
0105
0106
0107
0108 #ifdef CONFIG_SFC_SIENA_SRIOV
0109 if (efx->type->sriov_wanted) {
0110 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
0111 count > efx_vf_size(efx)) {
0112 netif_warn(efx, probe, efx->net_dev,
0113 "Reducing number of RSS channels from %u to %u for "
0114 "VF support. Increase vf-msix-limit to use more "
0115 "channels on the PF.\n",
0116 count, efx_vf_size(efx));
0117 count = efx_vf_size(efx);
0118 }
0119 }
0120 #endif
0121
0122 return count;
0123 }
0124
0125 static int efx_allocate_msix_channels(struct efx_nic *efx,
0126 unsigned int max_channels,
0127 unsigned int extra_channels,
0128 unsigned int parallelism)
0129 {
0130 unsigned int n_channels = parallelism;
0131 int vec_count;
0132 int tx_per_ev;
0133 int n_xdp_tx;
0134 int n_xdp_ev;
0135
0136 if (efx_siena_separate_tx_channels)
0137 n_channels *= 2;
0138 n_channels += extra_channels;
0139
0140
0141
0142
0143
0144
0145 tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
0146 tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
0147 n_xdp_tx = num_possible_cpus();
0148 n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
0149
0150 vec_count = pci_msix_vec_count(efx->pci_dev);
0151 if (vec_count < 0)
0152 return vec_count;
0153
0154 max_channels = min_t(unsigned int, vec_count, max_channels);
0155
0156
0157
0158
0159
0160 if (n_channels >= max_channels) {
0161 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
0162 netif_warn(efx, drv, efx->net_dev,
0163 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
0164 n_xdp_ev, n_channels, max_channels);
0165 netif_warn(efx, drv, efx->net_dev,
0166 "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
0167 } else if (n_channels + n_xdp_tx > efx->max_vis) {
0168 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
0169 netif_warn(efx, drv, efx->net_dev,
0170 "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
0171 n_xdp_tx, n_channels, efx->max_vis);
0172 netif_warn(efx, drv, efx->net_dev,
0173 "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
0174 } else if (n_channels + n_xdp_ev > max_channels) {
0175 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
0176 netif_warn(efx, drv, efx->net_dev,
0177 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
0178 n_xdp_ev, n_channels, max_channels);
0179
0180 n_xdp_ev = max_channels - n_channels;
0181 netif_warn(efx, drv, efx->net_dev,
0182 "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
0183 DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
0184 } else {
0185 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
0186 }
0187
0188 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
0189 efx->n_xdp_channels = n_xdp_ev;
0190 efx->xdp_tx_per_channel = tx_per_ev;
0191 efx->xdp_tx_queue_count = n_xdp_tx;
0192 n_channels += n_xdp_ev;
0193 netif_dbg(efx, drv, efx->net_dev,
0194 "Allocating %d TX and %d event queues for XDP\n",
0195 n_xdp_ev * tx_per_ev, n_xdp_ev);
0196 } else {
0197 efx->n_xdp_channels = 0;
0198 efx->xdp_tx_per_channel = 0;
0199 efx->xdp_tx_queue_count = n_xdp_tx;
0200 }
0201
0202 if (vec_count < n_channels) {
0203 netif_err(efx, drv, efx->net_dev,
0204 "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
0205 vec_count, n_channels);
0206 netif_err(efx, drv, efx->net_dev,
0207 "WARNING: Performance may be reduced.\n");
0208 n_channels = vec_count;
0209 }
0210
0211 n_channels = min(n_channels, max_channels);
0212
0213 efx->n_channels = n_channels;
0214
0215
0216 n_channels -= efx->n_xdp_channels;
0217
0218 if (efx_siena_separate_tx_channels) {
0219 efx->n_tx_channels =
0220 min(max(n_channels / 2, 1U),
0221 efx->max_tx_channels);
0222 efx->tx_channel_offset =
0223 n_channels - efx->n_tx_channels;
0224 efx->n_rx_channels =
0225 max(n_channels -
0226 efx->n_tx_channels, 1U);
0227 } else {
0228 efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
0229 efx->tx_channel_offset = 0;
0230 efx->n_rx_channels = n_channels;
0231 }
0232
0233 efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
0234 efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
0235
0236 efx->xdp_channel_offset = n_channels;
0237
0238 netif_dbg(efx, drv, efx->net_dev,
0239 "Allocating %u RX channels\n",
0240 efx->n_rx_channels);
0241
0242 return efx->n_channels;
0243 }
0244
0245
0246
0247
0248 int efx_siena_probe_interrupts(struct efx_nic *efx)
0249 {
0250 unsigned int extra_channels = 0;
0251 unsigned int rss_spread;
0252 unsigned int i, j;
0253 int rc;
0254
0255 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
0256 if (efx->extra_channel_type[i])
0257 ++extra_channels;
0258
0259 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
0260 unsigned int parallelism = efx_wanted_parallelism(efx);
0261 struct msix_entry xentries[EFX_MAX_CHANNELS];
0262 unsigned int n_channels;
0263
0264 rc = efx_allocate_msix_channels(efx, efx->max_channels,
0265 extra_channels, parallelism);
0266 if (rc >= 0) {
0267 n_channels = rc;
0268 for (i = 0; i < n_channels; i++)
0269 xentries[i].entry = i;
0270 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
0271 n_channels);
0272 }
0273 if (rc < 0) {
0274
0275 netif_err(efx, drv, efx->net_dev,
0276 "could not enable MSI-X\n");
0277 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
0278 efx->interrupt_mode = EFX_INT_MODE_MSI;
0279 else
0280 return rc;
0281 } else if (rc < n_channels) {
0282 netif_err(efx, drv, efx->net_dev,
0283 "WARNING: Insufficient MSI-X vectors"
0284 " available (%d < %u).\n", rc, n_channels);
0285 netif_err(efx, drv, efx->net_dev,
0286 "WARNING: Performance may be reduced.\n");
0287 n_channels = rc;
0288 }
0289
0290 if (rc > 0) {
0291 for (i = 0; i < efx->n_channels; i++)
0292 efx_get_channel(efx, i)->irq =
0293 xentries[i].vector;
0294 }
0295 }
0296
0297
0298 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
0299 efx->n_channels = 1;
0300 efx->n_rx_channels = 1;
0301 efx->n_tx_channels = 1;
0302 efx->tx_channel_offset = 0;
0303 efx->n_xdp_channels = 0;
0304 efx->xdp_channel_offset = efx->n_channels;
0305 rc = pci_enable_msi(efx->pci_dev);
0306 if (rc == 0) {
0307 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
0308 } else {
0309 netif_err(efx, drv, efx->net_dev,
0310 "could not enable MSI\n");
0311 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
0312 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
0313 else
0314 return rc;
0315 }
0316 }
0317
0318
0319 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
0320 efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
0321 efx->n_rx_channels = 1;
0322 efx->n_tx_channels = 1;
0323 efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
0324 efx->n_xdp_channels = 0;
0325 efx->xdp_channel_offset = efx->n_channels;
0326 efx->legacy_irq = efx->pci_dev->irq;
0327 }
0328
0329
0330 efx->n_extra_tx_channels = 0;
0331 j = efx->xdp_channel_offset;
0332 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
0333 if (!efx->extra_channel_type[i])
0334 continue;
0335 if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
0336 efx->extra_channel_type[i]->handle_no_channel(efx);
0337 } else {
0338 --j;
0339 efx_get_channel(efx, j)->type =
0340 efx->extra_channel_type[i];
0341 if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
0342 efx->n_extra_tx_channels++;
0343 }
0344 }
0345
0346 rss_spread = efx->n_rx_channels;
0347
0348 #ifdef CONFIG_SFC_SIENA_SRIOV
0349 if (efx->type->sriov_wanted) {
0350 efx->rss_spread = ((rss_spread > 1 ||
0351 !efx->type->sriov_wanted(efx)) ?
0352 rss_spread : efx_vf_size(efx));
0353 return 0;
0354 }
0355 #endif
0356 efx->rss_spread = rss_spread;
0357
0358 return 0;
0359 }
0360
0361 #if defined(CONFIG_SMP)
0362 void efx_siena_set_interrupt_affinity(struct efx_nic *efx)
0363 {
0364 const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
0365 struct efx_channel *channel;
0366 unsigned int cpu;
0367
0368
0369 if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
0370 numa_mask = cpu_online_mask;
0371
0372 cpu = -1;
0373 efx_for_each_channel(channel, efx) {
0374 cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
0375 if (cpu >= nr_cpu_ids)
0376 cpu = cpumask_first_and(cpu_online_mask, numa_mask);
0377 irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
0378 }
0379 }
0380
0381 void efx_siena_clear_interrupt_affinity(struct efx_nic *efx)
0382 {
0383 struct efx_channel *channel;
0384
0385 efx_for_each_channel(channel, efx)
0386 irq_set_affinity_hint(channel->irq, NULL);
0387 }
0388 #else
0389 void
0390 efx_siena_set_interrupt_affinity(struct efx_nic *efx __always_unused)
0391 {
0392 }
0393
0394 void
0395 efx_siena_clear_interrupt_affinity(struct efx_nic *efx __always_unused)
0396 {
0397 }
0398 #endif
0399
0400 void efx_siena_remove_interrupts(struct efx_nic *efx)
0401 {
0402 struct efx_channel *channel;
0403
0404
0405 efx_for_each_channel(channel, efx)
0406 channel->irq = 0;
0407 pci_disable_msi(efx->pci_dev);
0408 pci_disable_msix(efx->pci_dev);
0409
0410
0411 efx->legacy_irq = 0;
0412 }
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 static int efx_probe_eventq(struct efx_channel *channel)
0424 {
0425 struct efx_nic *efx = channel->efx;
0426 unsigned long entries;
0427
0428 netif_dbg(efx, probe, efx->net_dev,
0429 "chan %d create event queue\n", channel->channel);
0430
0431
0432
0433
0434 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
0435 EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
0436 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
0437
0438 return efx_nic_probe_eventq(channel);
0439 }
0440
0441
0442 static int efx_init_eventq(struct efx_channel *channel)
0443 {
0444 struct efx_nic *efx = channel->efx;
0445 int rc;
0446
0447 EFX_WARN_ON_PARANOID(channel->eventq_init);
0448
0449 netif_dbg(efx, drv, efx->net_dev,
0450 "chan %d init event queue\n", channel->channel);
0451
0452 rc = efx_nic_init_eventq(channel);
0453 if (rc == 0) {
0454 efx->type->push_irq_moderation(channel);
0455 channel->eventq_read_ptr = 0;
0456 channel->eventq_init = true;
0457 }
0458 return rc;
0459 }
0460
0461
0462 void efx_siena_start_eventq(struct efx_channel *channel)
0463 {
0464 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
0465 "chan %d start event queue\n", channel->channel);
0466
0467
0468 channel->enabled = true;
0469 smp_wmb();
0470
0471 napi_enable(&channel->napi_str);
0472 efx_nic_eventq_read_ack(channel);
0473 }
0474
0475
0476 void efx_siena_stop_eventq(struct efx_channel *channel)
0477 {
0478 if (!channel->enabled)
0479 return;
0480
0481 napi_disable(&channel->napi_str);
0482 channel->enabled = false;
0483 }
0484
0485 static void efx_fini_eventq(struct efx_channel *channel)
0486 {
0487 if (!channel->eventq_init)
0488 return;
0489
0490 netif_dbg(channel->efx, drv, channel->efx->net_dev,
0491 "chan %d fini event queue\n", channel->channel);
0492
0493 efx_nic_fini_eventq(channel);
0494 channel->eventq_init = false;
0495 }
0496
0497 static void efx_remove_eventq(struct efx_channel *channel)
0498 {
0499 netif_dbg(channel->efx, drv, channel->efx->net_dev,
0500 "chan %d remove event queue\n", channel->channel);
0501
0502 efx_nic_remove_eventq(channel);
0503 }
0504
0505
0506
0507
0508
0509
0510
0511 #ifdef CONFIG_RFS_ACCEL
0512 static void efx_filter_rfs_expire(struct work_struct *data)
0513 {
0514 struct delayed_work *dwork = to_delayed_work(data);
0515 struct efx_channel *channel;
0516 unsigned int time, quota;
0517
0518 channel = container_of(dwork, struct efx_channel, filter_work);
0519 time = jiffies - channel->rfs_last_expiry;
0520 quota = channel->rfs_filter_count * time / (30 * HZ);
0521 if (quota >= 20 && __efx_siena_filter_rfs_expire(channel,
0522 min(channel->rfs_filter_count, quota)))
0523 channel->rfs_last_expiry += time;
0524
0525 schedule_delayed_work(dwork, 30 * HZ);
0526 }
0527 #endif
0528
0529
0530 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
0531 {
0532 struct efx_rx_queue *rx_queue;
0533 struct efx_tx_queue *tx_queue;
0534 struct efx_channel *channel;
0535 int j;
0536
0537 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
0538 if (!channel)
0539 return NULL;
0540
0541 channel->efx = efx;
0542 channel->channel = i;
0543 channel->type = &efx_default_channel_type;
0544
0545 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
0546 tx_queue = &channel->tx_queue[j];
0547 tx_queue->efx = efx;
0548 tx_queue->queue = -1;
0549 tx_queue->label = j;
0550 tx_queue->channel = channel;
0551 }
0552
0553 #ifdef CONFIG_RFS_ACCEL
0554 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
0555 #endif
0556
0557 rx_queue = &channel->rx_queue;
0558 rx_queue->efx = efx;
0559 timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
0560
0561 return channel;
0562 }
0563
0564 int efx_siena_init_channels(struct efx_nic *efx)
0565 {
0566 unsigned int i;
0567
0568 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
0569 efx->channel[i] = efx_alloc_channel(efx, i);
0570 if (!efx->channel[i])
0571 return -ENOMEM;
0572 efx->msi_context[i].efx = efx;
0573 efx->msi_context[i].index = i;
0574 }
0575
0576
0577 efx->interrupt_mode = min(efx->type->min_interrupt_mode,
0578 efx_siena_interrupt_mode);
0579
0580 efx->max_channels = EFX_MAX_CHANNELS;
0581 efx->max_tx_channels = EFX_MAX_CHANNELS;
0582
0583 return 0;
0584 }
0585
0586 void efx_siena_fini_channels(struct efx_nic *efx)
0587 {
0588 unsigned int i;
0589
0590 for (i = 0; i < EFX_MAX_CHANNELS; i++)
0591 if (efx->channel[i]) {
0592 kfree(efx->channel[i]);
0593 efx->channel[i] = NULL;
0594 }
0595 }
0596
0597
0598
0599
0600 static
0601 struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
0602 {
0603 struct efx_rx_queue *rx_queue;
0604 struct efx_tx_queue *tx_queue;
0605 struct efx_channel *channel;
0606 int j;
0607
0608 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
0609 if (!channel)
0610 return NULL;
0611
0612 *channel = *old_channel;
0613
0614 channel->napi_dev = NULL;
0615 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
0616 channel->napi_str.napi_id = 0;
0617 channel->napi_str.state = 0;
0618 memset(&channel->eventq, 0, sizeof(channel->eventq));
0619
0620 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
0621 tx_queue = &channel->tx_queue[j];
0622 if (tx_queue->channel)
0623 tx_queue->channel = channel;
0624 tx_queue->buffer = NULL;
0625 tx_queue->cb_page = NULL;
0626 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
0627 }
0628
0629 rx_queue = &channel->rx_queue;
0630 rx_queue->buffer = NULL;
0631 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
0632 timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
0633 #ifdef CONFIG_RFS_ACCEL
0634 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
0635 #endif
0636
0637 return channel;
0638 }
0639
0640 static int efx_probe_channel(struct efx_channel *channel)
0641 {
0642 struct efx_tx_queue *tx_queue;
0643 struct efx_rx_queue *rx_queue;
0644 int rc;
0645
0646 netif_dbg(channel->efx, probe, channel->efx->net_dev,
0647 "creating channel %d\n", channel->channel);
0648
0649 rc = channel->type->pre_probe(channel);
0650 if (rc)
0651 goto fail;
0652
0653 rc = efx_probe_eventq(channel);
0654 if (rc)
0655 goto fail;
0656
0657 efx_for_each_channel_tx_queue(tx_queue, channel) {
0658 rc = efx_siena_probe_tx_queue(tx_queue);
0659 if (rc)
0660 goto fail;
0661 }
0662
0663 efx_for_each_channel_rx_queue(rx_queue, channel) {
0664 rc = efx_siena_probe_rx_queue(rx_queue);
0665 if (rc)
0666 goto fail;
0667 }
0668
0669 channel->rx_list = NULL;
0670
0671 return 0;
0672
0673 fail:
0674 efx_siena_remove_channel(channel);
0675 return rc;
0676 }
0677
0678 static void efx_get_channel_name(struct efx_channel *channel, char *buf,
0679 size_t len)
0680 {
0681 struct efx_nic *efx = channel->efx;
0682 const char *type;
0683 int number;
0684
0685 number = channel->channel;
0686
0687 if (number >= efx->xdp_channel_offset &&
0688 !WARN_ON_ONCE(!efx->n_xdp_channels)) {
0689 type = "-xdp";
0690 number -= efx->xdp_channel_offset;
0691 } else if (efx->tx_channel_offset == 0) {
0692 type = "";
0693 } else if (number < efx->tx_channel_offset) {
0694 type = "-rx";
0695 } else {
0696 type = "-tx";
0697 number -= efx->tx_channel_offset;
0698 }
0699 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
0700 }
0701
0702 void efx_siena_set_channel_names(struct efx_nic *efx)
0703 {
0704 struct efx_channel *channel;
0705
0706 efx_for_each_channel(channel, efx)
0707 channel->type->get_name(channel,
0708 efx->msi_context[channel->channel].name,
0709 sizeof(efx->msi_context[0].name));
0710 }
0711
0712 int efx_siena_probe_channels(struct efx_nic *efx)
0713 {
0714 struct efx_channel *channel;
0715 int rc;
0716
0717
0718 efx->next_buffer_table = 0;
0719
0720
0721
0722
0723
0724
0725 efx_for_each_channel_rev(channel, efx) {
0726 rc = efx_probe_channel(channel);
0727 if (rc) {
0728 netif_err(efx, probe, efx->net_dev,
0729 "failed to create channel %d\n",
0730 channel->channel);
0731 goto fail;
0732 }
0733 }
0734 efx_siena_set_channel_names(efx);
0735
0736 return 0;
0737
0738 fail:
0739 efx_siena_remove_channels(efx);
0740 return rc;
0741 }
0742
0743 void efx_siena_remove_channel(struct efx_channel *channel)
0744 {
0745 struct efx_tx_queue *tx_queue;
0746 struct efx_rx_queue *rx_queue;
0747
0748 netif_dbg(channel->efx, drv, channel->efx->net_dev,
0749 "destroy chan %d\n", channel->channel);
0750
0751 efx_for_each_channel_rx_queue(rx_queue, channel)
0752 efx_siena_remove_rx_queue(rx_queue);
0753 efx_for_each_channel_tx_queue(tx_queue, channel)
0754 efx_siena_remove_tx_queue(tx_queue);
0755 efx_remove_eventq(channel);
0756 channel->type->post_remove(channel);
0757 }
0758
0759 void efx_siena_remove_channels(struct efx_nic *efx)
0760 {
0761 struct efx_channel *channel;
0762
0763 efx_for_each_channel(channel, efx)
0764 efx_siena_remove_channel(channel);
0765
0766 kfree(efx->xdp_tx_queues);
0767 }
0768
0769 static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
0770 struct efx_tx_queue *tx_queue)
0771 {
0772 if (xdp_queue_number >= efx->xdp_tx_queue_count)
0773 return -EINVAL;
0774
0775 netif_dbg(efx, drv, efx->net_dev,
0776 "Channel %u TXQ %u is XDP %u, HW %u\n",
0777 tx_queue->channel->channel, tx_queue->label,
0778 xdp_queue_number, tx_queue->queue);
0779 efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
0780 return 0;
0781 }
0782
0783 static void efx_set_xdp_channels(struct efx_nic *efx)
0784 {
0785 struct efx_tx_queue *tx_queue;
0786 struct efx_channel *channel;
0787 unsigned int next_queue = 0;
0788 int xdp_queue_number = 0;
0789 int rc;
0790
0791
0792
0793
0794
0795 efx_for_each_channel(channel, efx) {
0796 if (channel->channel < efx->tx_channel_offset)
0797 continue;
0798
0799 if (efx_channel_is_xdp_tx(channel)) {
0800 efx_for_each_channel_tx_queue(tx_queue, channel) {
0801 tx_queue->queue = next_queue++;
0802 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
0803 tx_queue);
0804 if (rc == 0)
0805 xdp_queue_number++;
0806 }
0807 } else {
0808 efx_for_each_channel_tx_queue(tx_queue, channel) {
0809 tx_queue->queue = next_queue++;
0810 netif_dbg(efx, drv, efx->net_dev,
0811 "Channel %u TXQ %u is HW %u\n",
0812 channel->channel, tx_queue->label,
0813 tx_queue->queue);
0814 }
0815
0816
0817
0818
0819
0820
0821 if (efx->xdp_txq_queues_mode ==
0822 EFX_XDP_TX_QUEUES_BORROWED) {
0823 tx_queue = &channel->tx_queue[0];
0824 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
0825 tx_queue);
0826 if (rc == 0)
0827 xdp_queue_number++;
0828 }
0829 }
0830 }
0831 WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
0832 xdp_queue_number != efx->xdp_tx_queue_count);
0833 WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
0834 xdp_queue_number > efx->xdp_tx_queue_count);
0835
0836
0837
0838
0839 next_queue = 0;
0840 while (xdp_queue_number < efx->xdp_tx_queue_count) {
0841 tx_queue = efx->xdp_tx_queues[next_queue++];
0842 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
0843 if (rc == 0)
0844 xdp_queue_number++;
0845 }
0846 }
0847
0848 static int efx_soft_enable_interrupts(struct efx_nic *efx);
0849 static void efx_soft_disable_interrupts(struct efx_nic *efx);
0850 static void efx_init_napi_channel(struct efx_channel *channel);
0851 static void efx_fini_napi_channel(struct efx_channel *channel);
0852
0853 int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries,
0854 u32 txq_entries)
0855 {
0856 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
0857 unsigned int i, next_buffer_table = 0;
0858 u32 old_rxq_entries, old_txq_entries;
0859 int rc, rc2;
0860
0861 rc = efx_check_disabled(efx);
0862 if (rc)
0863 return rc;
0864
0865
0866
0867
0868 efx_for_each_channel(channel, efx) {
0869 struct efx_rx_queue *rx_queue;
0870 struct efx_tx_queue *tx_queue;
0871
0872 if (channel->type->copy)
0873 continue;
0874 next_buffer_table = max(next_buffer_table,
0875 channel->eventq.index +
0876 channel->eventq.entries);
0877 efx_for_each_channel_rx_queue(rx_queue, channel)
0878 next_buffer_table = max(next_buffer_table,
0879 rx_queue->rxd.index +
0880 rx_queue->rxd.entries);
0881 efx_for_each_channel_tx_queue(tx_queue, channel)
0882 next_buffer_table = max(next_buffer_table,
0883 tx_queue->txd.index +
0884 tx_queue->txd.entries);
0885 }
0886
0887 efx_device_detach_sync(efx);
0888 efx_siena_stop_all(efx);
0889 efx_soft_disable_interrupts(efx);
0890
0891
0892 memset(other_channel, 0, sizeof(other_channel));
0893 for (i = 0; i < efx->n_channels; i++) {
0894 channel = efx->channel[i];
0895 if (channel->type->copy)
0896 channel = channel->type->copy(channel);
0897 if (!channel) {
0898 rc = -ENOMEM;
0899 goto out;
0900 }
0901 other_channel[i] = channel;
0902 }
0903
0904
0905 old_rxq_entries = efx->rxq_entries;
0906 old_txq_entries = efx->txq_entries;
0907 efx->rxq_entries = rxq_entries;
0908 efx->txq_entries = txq_entries;
0909 for (i = 0; i < efx->n_channels; i++)
0910 swap(efx->channel[i], other_channel[i]);
0911
0912
0913 efx->next_buffer_table = next_buffer_table;
0914
0915 for (i = 0; i < efx->n_channels; i++) {
0916 channel = efx->channel[i];
0917 if (!channel->type->copy)
0918 continue;
0919 rc = efx_probe_channel(channel);
0920 if (rc)
0921 goto rollback;
0922 efx_init_napi_channel(efx->channel[i]);
0923 }
0924
0925 efx_set_xdp_channels(efx);
0926 out:
0927
0928 for (i = 0; i < efx->n_channels; i++) {
0929 channel = other_channel[i];
0930 if (channel && channel->type->copy) {
0931 efx_fini_napi_channel(channel);
0932 efx_siena_remove_channel(channel);
0933 kfree(channel);
0934 }
0935 }
0936
0937 rc2 = efx_soft_enable_interrupts(efx);
0938 if (rc2) {
0939 rc = rc ? rc : rc2;
0940 netif_err(efx, drv, efx->net_dev,
0941 "unable to restart interrupts on channel reallocation\n");
0942 efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
0943 } else {
0944 efx_siena_start_all(efx);
0945 efx_device_attach_if_not_resetting(efx);
0946 }
0947 return rc;
0948
0949 rollback:
0950
0951 efx->rxq_entries = old_rxq_entries;
0952 efx->txq_entries = old_txq_entries;
0953 for (i = 0; i < efx->n_channels; i++)
0954 swap(efx->channel[i], other_channel[i]);
0955 goto out;
0956 }
0957
0958 int efx_siena_set_channels(struct efx_nic *efx)
0959 {
0960 struct efx_channel *channel;
0961 int rc;
0962
0963 if (efx->xdp_tx_queue_count) {
0964 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
0965
0966
0967 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
0968 sizeof(*efx->xdp_tx_queues),
0969 GFP_KERNEL);
0970 if (!efx->xdp_tx_queues)
0971 return -ENOMEM;
0972 }
0973
0974 efx_for_each_channel(channel, efx) {
0975 if (channel->channel < efx->n_rx_channels)
0976 channel->rx_queue.core_index = channel->channel;
0977 else
0978 channel->rx_queue.core_index = -1;
0979 }
0980
0981 efx_set_xdp_channels(efx);
0982
0983 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
0984 if (rc)
0985 return rc;
0986 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
0987 }
0988
0989 static bool efx_default_channel_want_txqs(struct efx_channel *channel)
0990 {
0991 return channel->channel - channel->efx->tx_channel_offset <
0992 channel->efx->n_tx_channels;
0993 }
0994
0995
0996
0997
0998
0999 static int efx_soft_enable_interrupts(struct efx_nic *efx)
1000 {
1001 struct efx_channel *channel, *end_channel;
1002 int rc;
1003
1004 BUG_ON(efx->state == STATE_DISABLED);
1005
1006 efx->irq_soft_enabled = true;
1007 smp_wmb();
1008
1009 efx_for_each_channel(channel, efx) {
1010 if (!channel->type->keep_eventq) {
1011 rc = efx_init_eventq(channel);
1012 if (rc)
1013 goto fail;
1014 }
1015 efx_siena_start_eventq(channel);
1016 }
1017
1018 efx_siena_mcdi_mode_event(efx);
1019
1020 return 0;
1021 fail:
1022 end_channel = channel;
1023 efx_for_each_channel(channel, efx) {
1024 if (channel == end_channel)
1025 break;
1026 efx_siena_stop_eventq(channel);
1027 if (!channel->type->keep_eventq)
1028 efx_fini_eventq(channel);
1029 }
1030
1031 return rc;
1032 }
1033
1034 static void efx_soft_disable_interrupts(struct efx_nic *efx)
1035 {
1036 struct efx_channel *channel;
1037
1038 if (efx->state == STATE_DISABLED)
1039 return;
1040
1041 efx_siena_mcdi_mode_poll(efx);
1042
1043 efx->irq_soft_enabled = false;
1044 smp_wmb();
1045
1046 if (efx->legacy_irq)
1047 synchronize_irq(efx->legacy_irq);
1048
1049 efx_for_each_channel(channel, efx) {
1050 if (channel->irq)
1051 synchronize_irq(channel->irq);
1052
1053 efx_siena_stop_eventq(channel);
1054 if (!channel->type->keep_eventq)
1055 efx_fini_eventq(channel);
1056 }
1057
1058
1059 efx_siena_mcdi_flush_async(efx);
1060 }
1061
1062 int efx_siena_enable_interrupts(struct efx_nic *efx)
1063 {
1064 struct efx_channel *channel, *end_channel;
1065 int rc;
1066
1067
1068 BUG_ON(efx->state == STATE_DISABLED);
1069
1070 if (efx->eeh_disabled_legacy_irq) {
1071 enable_irq(efx->legacy_irq);
1072 efx->eeh_disabled_legacy_irq = false;
1073 }
1074
1075 efx->type->irq_enable_master(efx);
1076
1077 efx_for_each_channel(channel, efx) {
1078 if (channel->type->keep_eventq) {
1079 rc = efx_init_eventq(channel);
1080 if (rc)
1081 goto fail;
1082 }
1083 }
1084
1085 rc = efx_soft_enable_interrupts(efx);
1086 if (rc)
1087 goto fail;
1088
1089 return 0;
1090
1091 fail:
1092 end_channel = channel;
1093 efx_for_each_channel(channel, efx) {
1094 if (channel == end_channel)
1095 break;
1096 if (channel->type->keep_eventq)
1097 efx_fini_eventq(channel);
1098 }
1099
1100 efx->type->irq_disable_non_ev(efx);
1101
1102 return rc;
1103 }
1104
1105 void efx_siena_disable_interrupts(struct efx_nic *efx)
1106 {
1107 struct efx_channel *channel;
1108
1109 efx_soft_disable_interrupts(efx);
1110
1111 efx_for_each_channel(channel, efx) {
1112 if (channel->type->keep_eventq)
1113 efx_fini_eventq(channel);
1114 }
1115
1116 efx->type->irq_disable_non_ev(efx);
1117 }
1118
1119 void efx_siena_start_channels(struct efx_nic *efx)
1120 {
1121 struct efx_tx_queue *tx_queue;
1122 struct efx_rx_queue *rx_queue;
1123 struct efx_channel *channel;
1124
1125 efx_for_each_channel_rev(channel, efx) {
1126 efx_for_each_channel_tx_queue(tx_queue, channel) {
1127 efx_siena_init_tx_queue(tx_queue);
1128 atomic_inc(&efx->active_queues);
1129 }
1130
1131 efx_for_each_channel_rx_queue(rx_queue, channel) {
1132 efx_siena_init_rx_queue(rx_queue);
1133 atomic_inc(&efx->active_queues);
1134 efx_siena_stop_eventq(channel);
1135 efx_siena_fast_push_rx_descriptors(rx_queue, false);
1136 efx_siena_start_eventq(channel);
1137 }
1138
1139 WARN_ON(channel->rx_pkt_n_frags);
1140 }
1141 }
1142
1143 void efx_siena_stop_channels(struct efx_nic *efx)
1144 {
1145 struct efx_tx_queue *tx_queue;
1146 struct efx_rx_queue *rx_queue;
1147 struct efx_channel *channel;
1148 int rc = 0;
1149
1150
1151 efx_for_each_channel(channel, efx) {
1152 efx_for_each_channel_rx_queue(rx_queue, channel)
1153 rx_queue->refill_enabled = false;
1154 }
1155
1156 efx_for_each_channel(channel, efx) {
1157
1158
1159
1160
1161
1162
1163 if (efx_channel_has_rx_queue(channel)) {
1164 efx_siena_stop_eventq(channel);
1165 efx_siena_start_eventq(channel);
1166 }
1167 }
1168
1169 if (efx->type->fini_dmaq)
1170 rc = efx->type->fini_dmaq(efx);
1171
1172 if (rc) {
1173 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1174 } else {
1175 netif_dbg(efx, drv, efx->net_dev,
1176 "successfully flushed all queues\n");
1177 }
1178
1179 efx_for_each_channel(channel, efx) {
1180 efx_for_each_channel_rx_queue(rx_queue, channel)
1181 efx_siena_fini_rx_queue(rx_queue);
1182 efx_for_each_channel_tx_queue(tx_queue, channel)
1183 efx_siena_fini_tx_queue(tx_queue);
1184 }
1185 }
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 static int efx_process_channel(struct efx_channel *channel, int budget)
1201 {
1202 struct efx_tx_queue *tx_queue;
1203 struct list_head rx_list;
1204 int spent;
1205
1206 if (unlikely(!channel->enabled))
1207 return 0;
1208
1209
1210 EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
1211 INIT_LIST_HEAD(&rx_list);
1212 channel->rx_list = &rx_list;
1213
1214 efx_for_each_channel_tx_queue(tx_queue, channel) {
1215 tx_queue->pkts_compl = 0;
1216 tx_queue->bytes_compl = 0;
1217 }
1218
1219 spent = efx_nic_process_eventq(channel, budget);
1220 if (spent && efx_channel_has_rx_queue(channel)) {
1221 struct efx_rx_queue *rx_queue =
1222 efx_channel_get_rx_queue(channel);
1223
1224 efx_rx_flush_packet(channel);
1225 efx_siena_fast_push_rx_descriptors(rx_queue, true);
1226 }
1227
1228
1229 efx_for_each_channel_tx_queue(tx_queue, channel) {
1230 if (tx_queue->bytes_compl) {
1231 netdev_tx_completed_queue(tx_queue->core_txq,
1232 tx_queue->pkts_compl,
1233 tx_queue->bytes_compl);
1234 }
1235 }
1236
1237
1238 netif_receive_skb_list(channel->rx_list);
1239 channel->rx_list = NULL;
1240
1241 return spent;
1242 }
1243
1244 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1245 {
1246 int step = efx->irq_mod_step_us;
1247
1248 if (channel->irq_mod_score < irq_adapt_low_thresh) {
1249 if (channel->irq_moderation_us > step) {
1250 channel->irq_moderation_us -= step;
1251 efx->type->push_irq_moderation(channel);
1252 }
1253 } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
1254 if (channel->irq_moderation_us <
1255 efx->irq_rx_moderation_us) {
1256 channel->irq_moderation_us += step;
1257 efx->type->push_irq_moderation(channel);
1258 }
1259 }
1260
1261 channel->irq_count = 0;
1262 channel->irq_mod_score = 0;
1263 }
1264
1265
1266
1267
1268
1269
1270 static int efx_poll(struct napi_struct *napi, int budget)
1271 {
1272 struct efx_channel *channel =
1273 container_of(napi, struct efx_channel, napi_str);
1274 struct efx_nic *efx = channel->efx;
1275 #ifdef CONFIG_RFS_ACCEL
1276 unsigned int time;
1277 #endif
1278 int spent;
1279
1280 netif_vdbg(efx, intr, efx->net_dev,
1281 "channel %d NAPI poll executing on CPU %d\n",
1282 channel->channel, raw_smp_processor_id());
1283
1284 spent = efx_process_channel(channel, budget);
1285
1286 xdp_do_flush_map();
1287
1288 if (spent < budget) {
1289 if (efx_channel_has_rx_queue(channel) &&
1290 efx->irq_rx_adaptive &&
1291 unlikely(++channel->irq_count == 1000)) {
1292 efx_update_irq_mod(efx, channel);
1293 }
1294
1295 #ifdef CONFIG_RFS_ACCEL
1296
1297 time = jiffies - channel->rfs_last_expiry;
1298
1299 if (channel->rfs_filter_count * time >= 600 * HZ)
1300 mod_delayed_work(system_wq, &channel->filter_work, 0);
1301 #endif
1302
1303
1304
1305
1306
1307
1308 if (napi_complete_done(napi, spent))
1309 efx_nic_eventq_read_ack(channel);
1310 }
1311
1312 return spent;
1313 }
1314
1315 static void efx_init_napi_channel(struct efx_channel *channel)
1316 {
1317 struct efx_nic *efx = channel->efx;
1318
1319 channel->napi_dev = efx->net_dev;
1320 netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64);
1321 }
1322
1323 void efx_siena_init_napi(struct efx_nic *efx)
1324 {
1325 struct efx_channel *channel;
1326
1327 efx_for_each_channel(channel, efx)
1328 efx_init_napi_channel(channel);
1329 }
1330
1331 static void efx_fini_napi_channel(struct efx_channel *channel)
1332 {
1333 if (channel->napi_dev)
1334 netif_napi_del(&channel->napi_str);
1335
1336 channel->napi_dev = NULL;
1337 }
1338
1339 void efx_siena_fini_napi(struct efx_nic *efx)
1340 {
1341 struct efx_channel *channel;
1342
1343 efx_for_each_channel(channel, efx)
1344 efx_fini_napi_channel(channel);
1345 }
1346
1347
1348
1349
1350
1351 static int efx_channel_dummy_op_int(struct efx_channel *channel)
1352 {
1353 return 0;
1354 }
1355
1356 void efx_siena_channel_dummy_op_void(struct efx_channel *channel)
1357 {
1358 }
1359
1360 static const struct efx_channel_type efx_default_channel_type = {
1361 .pre_probe = efx_channel_dummy_op_int,
1362 .post_remove = efx_siena_channel_dummy_op_void,
1363 .get_name = efx_get_channel_name,
1364 .copy = efx_copy_channel,
1365 .want_txqs = efx_default_channel_want_txqs,
1366 .keep_eventq = false,
1367 .want_pio = true,
1368 };