0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 #include <linux/module.h>
0118 #include <linux/spinlock.h>
0119 #include <linux/tcp.h>
0120 #include <linux/if_vlan.h>
0121 #include <linux/interrupt.h>
0122 #include <linux/clk.h>
0123 #include <linux/if_ether.h>
0124 #include <linux/net_tstamp.h>
0125 #include <linux/phy.h>
0126 #include <net/vxlan.h>
0127
0128 #include "xgbe.h"
0129 #include "xgbe-common.h"
0130
0131 static unsigned int ecc_sec_info_threshold = 10;
0132 static unsigned int ecc_sec_warn_threshold = 10000;
0133 static unsigned int ecc_sec_period = 600;
0134 static unsigned int ecc_ded_threshold = 2;
0135 static unsigned int ecc_ded_period = 600;
0136
0137 #ifdef CONFIG_AMD_XGBE_HAVE_ECC
0138
0139 module_param(ecc_sec_info_threshold, uint, 0644);
0140 MODULE_PARM_DESC(ecc_sec_info_threshold,
0141 " ECC corrected error informational threshold setting");
0142
0143 module_param(ecc_sec_warn_threshold, uint, 0644);
0144 MODULE_PARM_DESC(ecc_sec_warn_threshold,
0145 " ECC corrected error warning threshold setting");
0146
0147 module_param(ecc_sec_period, uint, 0644);
0148 MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
0149
0150 module_param(ecc_ded_threshold, uint, 0644);
0151 MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
0152
0153 module_param(ecc_ded_period, uint, 0644);
0154 MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
0155 #endif
0156
0157 static int xgbe_one_poll(struct napi_struct *, int);
0158 static int xgbe_all_poll(struct napi_struct *, int);
0159 static void xgbe_stop(struct xgbe_prv_data *);
0160
0161 static void *xgbe_alloc_node(size_t size, int node)
0162 {
0163 void *mem;
0164
0165 mem = kzalloc_node(size, GFP_KERNEL, node);
0166 if (!mem)
0167 mem = kzalloc(size, GFP_KERNEL);
0168
0169 return mem;
0170 }
0171
0172 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
0173 {
0174 unsigned int i;
0175
0176 for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
0177 if (!pdata->channel[i])
0178 continue;
0179
0180 kfree(pdata->channel[i]->rx_ring);
0181 kfree(pdata->channel[i]->tx_ring);
0182 kfree(pdata->channel[i]);
0183
0184 pdata->channel[i] = NULL;
0185 }
0186
0187 pdata->channel_count = 0;
0188 }
0189
0190 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
0191 {
0192 struct xgbe_channel *channel;
0193 struct xgbe_ring *ring;
0194 unsigned int count, i;
0195 unsigned int cpu;
0196 int node;
0197
0198 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
0199 for (i = 0; i < count; i++) {
0200
0201 cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
0202
0203
0204 node = cpu_to_node(cpu);
0205
0206 channel = xgbe_alloc_node(sizeof(*channel), node);
0207 if (!channel)
0208 goto err_mem;
0209 pdata->channel[i] = channel;
0210
0211 snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
0212 channel->pdata = pdata;
0213 channel->queue_index = i;
0214 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
0215 (DMA_CH_INC * i);
0216 channel->node = node;
0217 cpumask_set_cpu(cpu, &channel->affinity_mask);
0218
0219 if (pdata->per_channel_irq)
0220 channel->dma_irq = pdata->channel_irq[i];
0221
0222 if (i < pdata->tx_ring_count) {
0223 ring = xgbe_alloc_node(sizeof(*ring), node);
0224 if (!ring)
0225 goto err_mem;
0226
0227 spin_lock_init(&ring->lock);
0228 ring->node = node;
0229
0230 channel->tx_ring = ring;
0231 }
0232
0233 if (i < pdata->rx_ring_count) {
0234 ring = xgbe_alloc_node(sizeof(*ring), node);
0235 if (!ring)
0236 goto err_mem;
0237
0238 spin_lock_init(&ring->lock);
0239 ring->node = node;
0240
0241 channel->rx_ring = ring;
0242 }
0243
0244 netif_dbg(pdata, drv, pdata->netdev,
0245 "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
0246
0247 netif_dbg(pdata, drv, pdata->netdev,
0248 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
0249 channel->name, channel->dma_regs, channel->dma_irq,
0250 channel->tx_ring, channel->rx_ring);
0251 }
0252
0253 pdata->channel_count = count;
0254
0255 return 0;
0256
0257 err_mem:
0258 xgbe_free_channels(pdata);
0259
0260 return -ENOMEM;
0261 }
0262
0263 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
0264 {
0265 return (ring->rdesc_count - (ring->cur - ring->dirty));
0266 }
0267
0268 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
0269 {
0270 return (ring->cur - ring->dirty);
0271 }
0272
0273 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
0274 struct xgbe_ring *ring, unsigned int count)
0275 {
0276 struct xgbe_prv_data *pdata = channel->pdata;
0277
0278 if (count > xgbe_tx_avail_desc(ring)) {
0279 netif_info(pdata, drv, pdata->netdev,
0280 "Tx queue stopped, not enough descriptors available\n");
0281 netif_stop_subqueue(pdata->netdev, channel->queue_index);
0282 ring->tx.queue_stopped = 1;
0283
0284
0285
0286
0287 if (ring->tx.xmit_more)
0288 pdata->hw_if.tx_start_xmit(channel, ring);
0289
0290 return NETDEV_TX_BUSY;
0291 }
0292
0293 return 0;
0294 }
0295
0296 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
0297 {
0298 unsigned int rx_buf_size;
0299
0300 rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
0301 rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
0302
0303 rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
0304 ~(XGBE_RX_BUF_ALIGN - 1);
0305
0306 return rx_buf_size;
0307 }
0308
0309 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
0310 struct xgbe_channel *channel)
0311 {
0312 struct xgbe_hw_if *hw_if = &pdata->hw_if;
0313 enum xgbe_int int_id;
0314
0315 if (channel->tx_ring && channel->rx_ring)
0316 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
0317 else if (channel->tx_ring)
0318 int_id = XGMAC_INT_DMA_CH_SR_TI;
0319 else if (channel->rx_ring)
0320 int_id = XGMAC_INT_DMA_CH_SR_RI;
0321 else
0322 return;
0323
0324 hw_if->enable_int(channel, int_id);
0325 }
0326
0327 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
0328 {
0329 unsigned int i;
0330
0331 for (i = 0; i < pdata->channel_count; i++)
0332 xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
0333 }
0334
0335 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
0336 struct xgbe_channel *channel)
0337 {
0338 struct xgbe_hw_if *hw_if = &pdata->hw_if;
0339 enum xgbe_int int_id;
0340
0341 if (channel->tx_ring && channel->rx_ring)
0342 int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
0343 else if (channel->tx_ring)
0344 int_id = XGMAC_INT_DMA_CH_SR_TI;
0345 else if (channel->rx_ring)
0346 int_id = XGMAC_INT_DMA_CH_SR_RI;
0347 else
0348 return;
0349
0350 hw_if->disable_int(channel, int_id);
0351 }
0352
0353 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
0354 {
0355 unsigned int i;
0356
0357 for (i = 0; i < pdata->channel_count; i++)
0358 xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
0359 }
0360
0361 static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
0362 unsigned int *count, const char *area)
0363 {
0364 if (time_before(jiffies, *period)) {
0365 (*count)++;
0366 } else {
0367 *period = jiffies + (ecc_sec_period * HZ);
0368 *count = 1;
0369 }
0370
0371 if (*count > ecc_sec_info_threshold)
0372 dev_warn_once(pdata->dev,
0373 "%s ECC corrected errors exceed informational threshold\n",
0374 area);
0375
0376 if (*count > ecc_sec_warn_threshold) {
0377 dev_warn_once(pdata->dev,
0378 "%s ECC corrected errors exceed warning threshold\n",
0379 area);
0380 return true;
0381 }
0382
0383 return false;
0384 }
0385
0386 static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
0387 unsigned int *count, const char *area)
0388 {
0389 if (time_before(jiffies, *period)) {
0390 (*count)++;
0391 } else {
0392 *period = jiffies + (ecc_ded_period * HZ);
0393 *count = 1;
0394 }
0395
0396 if (*count > ecc_ded_threshold) {
0397 netdev_alert(pdata->netdev,
0398 "%s ECC detected errors exceed threshold\n",
0399 area);
0400 return true;
0401 }
0402
0403 return false;
0404 }
0405
0406 static void xgbe_ecc_isr_task(struct tasklet_struct *t)
0407 {
0408 struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
0409 unsigned int ecc_isr;
0410 bool stop = false;
0411
0412
0413 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
0414 ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
0415 netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
0416
0417 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
0418 stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
0419 &pdata->tx_ded_count, "TX fifo");
0420 }
0421
0422 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
0423 stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
0424 &pdata->rx_ded_count, "RX fifo");
0425 }
0426
0427 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
0428 stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
0429 &pdata->desc_ded_count,
0430 "descriptor cache");
0431 }
0432
0433 if (stop) {
0434 pdata->hw_if.disable_ecc_ded(pdata);
0435 schedule_work(&pdata->stopdev_work);
0436 goto out;
0437 }
0438
0439 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
0440 if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
0441 &pdata->tx_sec_count, "TX fifo"))
0442 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
0443 }
0444
0445 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
0446 if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
0447 &pdata->rx_sec_count, "RX fifo"))
0448 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
0449
0450 if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
0451 if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
0452 &pdata->desc_sec_count, "descriptor cache"))
0453 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
0454
0455 out:
0456
0457 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
0458
0459
0460 if (pdata->vdata->irq_reissue_support)
0461 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
0462 }
0463
0464 static irqreturn_t xgbe_ecc_isr(int irq, void *data)
0465 {
0466 struct xgbe_prv_data *pdata = data;
0467
0468 if (pdata->isr_as_tasklet)
0469 tasklet_schedule(&pdata->tasklet_ecc);
0470 else
0471 xgbe_ecc_isr_task(&pdata->tasklet_ecc);
0472
0473 return IRQ_HANDLED;
0474 }
0475
0476 static void xgbe_isr_task(struct tasklet_struct *t)
0477 {
0478 struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
0479 struct xgbe_hw_if *hw_if = &pdata->hw_if;
0480 struct xgbe_channel *channel;
0481 unsigned int dma_isr, dma_ch_isr;
0482 unsigned int mac_isr, mac_tssr, mac_mdioisr;
0483 unsigned int i;
0484
0485
0486
0487
0488
0489 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
0490 if (!dma_isr)
0491 goto isr_done;
0492
0493 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
0494
0495 for (i = 0; i < pdata->channel_count; i++) {
0496 if (!(dma_isr & (1 << i)))
0497 continue;
0498
0499 channel = pdata->channel[i];
0500
0501 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
0502 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
0503 i, dma_ch_isr);
0504
0505
0506
0507
0508
0509 if (!pdata->per_channel_irq &&
0510 (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
0511 XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
0512 if (napi_schedule_prep(&pdata->napi)) {
0513
0514 xgbe_disable_rx_tx_ints(pdata);
0515
0516
0517 __napi_schedule(&pdata->napi);
0518 }
0519 } else {
0520
0521
0522
0523
0524 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
0525 XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
0526 }
0527
0528 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
0529 pdata->ext_stats.rx_buffer_unavailable++;
0530
0531
0532 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
0533 schedule_work(&pdata->restart_work);
0534
0535
0536 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
0537 }
0538
0539 if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
0540 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
0541
0542 netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
0543 mac_isr);
0544
0545 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
0546 hw_if->tx_mmc_int(pdata);
0547
0548 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
0549 hw_if->rx_mmc_int(pdata);
0550
0551 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
0552 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
0553
0554 netif_dbg(pdata, intr, pdata->netdev,
0555 "MAC_TSSR=%#010x\n", mac_tssr);
0556
0557 if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
0558
0559 pdata->tx_tstamp =
0560 hw_if->get_tx_tstamp(pdata);
0561 queue_work(pdata->dev_workqueue,
0562 &pdata->tx_tstamp_work);
0563 }
0564 }
0565
0566 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
0567 mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
0568
0569 netif_dbg(pdata, intr, pdata->netdev,
0570 "MAC_MDIOISR=%#010x\n", mac_mdioisr);
0571
0572 if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
0573 SNGLCOMPINT))
0574 complete(&pdata->mdio_complete);
0575 }
0576 }
0577
0578 isr_done:
0579
0580 if (pdata->dev_irq == pdata->an_irq)
0581 pdata->phy_if.an_isr(pdata);
0582
0583
0584 if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
0585 xgbe_ecc_isr_task(&pdata->tasklet_ecc);
0586
0587
0588 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
0589 pdata->i2c_if.i2c_isr(pdata);
0590
0591
0592 if (pdata->vdata->irq_reissue_support) {
0593 unsigned int reissue_mask;
0594
0595 reissue_mask = 1 << 0;
0596 if (!pdata->per_channel_irq)
0597 reissue_mask |= 0xffff << 4;
0598
0599 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
0600 }
0601 }
0602
0603 static irqreturn_t xgbe_isr(int irq, void *data)
0604 {
0605 struct xgbe_prv_data *pdata = data;
0606
0607 if (pdata->isr_as_tasklet)
0608 tasklet_schedule(&pdata->tasklet_dev);
0609 else
0610 xgbe_isr_task(&pdata->tasklet_dev);
0611
0612 return IRQ_HANDLED;
0613 }
0614
0615 static irqreturn_t xgbe_dma_isr(int irq, void *data)
0616 {
0617 struct xgbe_channel *channel = data;
0618 struct xgbe_prv_data *pdata = channel->pdata;
0619 unsigned int dma_status;
0620
0621
0622
0623
0624 if (napi_schedule_prep(&channel->napi)) {
0625
0626 if (pdata->channel_irq_mode)
0627 xgbe_disable_rx_tx_int(pdata, channel);
0628 else
0629 disable_irq_nosync(channel->dma_irq);
0630
0631
0632 __napi_schedule_irqoff(&channel->napi);
0633 }
0634
0635
0636 dma_status = 0;
0637 XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
0638 XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
0639 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
0640
0641 return IRQ_HANDLED;
0642 }
0643
0644 static void xgbe_tx_timer(struct timer_list *t)
0645 {
0646 struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
0647 struct xgbe_prv_data *pdata = channel->pdata;
0648 struct napi_struct *napi;
0649
0650 DBGPR("-->xgbe_tx_timer\n");
0651
0652 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
0653
0654 if (napi_schedule_prep(napi)) {
0655
0656 if (pdata->per_channel_irq)
0657 if (pdata->channel_irq_mode)
0658 xgbe_disable_rx_tx_int(pdata, channel);
0659 else
0660 disable_irq_nosync(channel->dma_irq);
0661 else
0662 xgbe_disable_rx_tx_ints(pdata);
0663
0664
0665 __napi_schedule(napi);
0666 }
0667
0668 channel->tx_timer_active = 0;
0669
0670 DBGPR("<--xgbe_tx_timer\n");
0671 }
0672
0673 static void xgbe_service(struct work_struct *work)
0674 {
0675 struct xgbe_prv_data *pdata = container_of(work,
0676 struct xgbe_prv_data,
0677 service_work);
0678
0679 pdata->phy_if.phy_status(pdata);
0680 }
0681
0682 static void xgbe_service_timer(struct timer_list *t)
0683 {
0684 struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
0685
0686 queue_work(pdata->dev_workqueue, &pdata->service_work);
0687
0688 mod_timer(&pdata->service_timer, jiffies + HZ);
0689 }
0690
0691 static void xgbe_init_timers(struct xgbe_prv_data *pdata)
0692 {
0693 struct xgbe_channel *channel;
0694 unsigned int i;
0695
0696 timer_setup(&pdata->service_timer, xgbe_service_timer, 0);
0697
0698 for (i = 0; i < pdata->channel_count; i++) {
0699 channel = pdata->channel[i];
0700 if (!channel->tx_ring)
0701 break;
0702
0703 timer_setup(&channel->tx_timer, xgbe_tx_timer, 0);
0704 }
0705 }
0706
0707 static void xgbe_start_timers(struct xgbe_prv_data *pdata)
0708 {
0709 mod_timer(&pdata->service_timer, jiffies + HZ);
0710 }
0711
0712 static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
0713 {
0714 struct xgbe_channel *channel;
0715 unsigned int i;
0716
0717 del_timer_sync(&pdata->service_timer);
0718
0719 for (i = 0; i < pdata->channel_count; i++) {
0720 channel = pdata->channel[i];
0721 if (!channel->tx_ring)
0722 break;
0723
0724
0725 del_timer_sync(&channel->tx_timer);
0726 channel->tx_timer_active = 0;
0727 }
0728 }
0729
0730 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
0731 {
0732 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
0733 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
0734
0735 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
0736 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
0737 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
0738
0739 memset(hw_feat, 0, sizeof(*hw_feat));
0740
0741 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
0742
0743
0744 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
0745 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
0746 hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
0747 hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
0748 hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
0749 hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
0750 hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
0751 hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
0752 hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
0753 hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
0754 hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
0755 hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
0756 ADDMACADRSEL);
0757 hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
0758 hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
0759 hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN);
0760
0761
0762 hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
0763 RXFIFOSIZE);
0764 hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
0765 TXFIFOSIZE);
0766 hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
0767 hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
0768 hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
0769 hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
0770 hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
0771 hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
0772 hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
0773 hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
0774 hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
0775 HASHTBLSZ);
0776 hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
0777 L3L4FNUM);
0778
0779
0780 hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
0781 hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
0782 hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
0783 hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
0784 hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
0785 hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
0786
0787
0788 switch (hw_feat->hash_table_size) {
0789 case 0:
0790 break;
0791 case 1:
0792 hw_feat->hash_table_size = 64;
0793 break;
0794 case 2:
0795 hw_feat->hash_table_size = 128;
0796 break;
0797 case 3:
0798 hw_feat->hash_table_size = 256;
0799 break;
0800 }
0801
0802
0803 switch (hw_feat->dma_width) {
0804 case 0:
0805 hw_feat->dma_width = 32;
0806 break;
0807 case 1:
0808 hw_feat->dma_width = 40;
0809 break;
0810 case 2:
0811 hw_feat->dma_width = 48;
0812 break;
0813 default:
0814 hw_feat->dma_width = 32;
0815 }
0816
0817
0818
0819
0820 hw_feat->rx_q_cnt++;
0821 hw_feat->tx_q_cnt++;
0822 hw_feat->rx_ch_cnt++;
0823 hw_feat->tx_ch_cnt++;
0824 hw_feat->tc_cnt++;
0825
0826
0827 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
0828 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
0829
0830 if (netif_msg_probe(pdata)) {
0831 dev_dbg(pdata->dev, "Hardware features:\n");
0832
0833
0834 dev_dbg(pdata->dev, " 1GbE support : %s\n",
0835 hw_feat->gmii ? "yes" : "no");
0836 dev_dbg(pdata->dev, " VLAN hash filter : %s\n",
0837 hw_feat->vlhash ? "yes" : "no");
0838 dev_dbg(pdata->dev, " MDIO interface : %s\n",
0839 hw_feat->sma ? "yes" : "no");
0840 dev_dbg(pdata->dev, " Wake-up packet support : %s\n",
0841 hw_feat->rwk ? "yes" : "no");
0842 dev_dbg(pdata->dev, " Magic packet support : %s\n",
0843 hw_feat->mgk ? "yes" : "no");
0844 dev_dbg(pdata->dev, " Management counters : %s\n",
0845 hw_feat->mmc ? "yes" : "no");
0846 dev_dbg(pdata->dev, " ARP offload : %s\n",
0847 hw_feat->aoe ? "yes" : "no");
0848 dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n",
0849 hw_feat->ts ? "yes" : "no");
0850 dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n",
0851 hw_feat->eee ? "yes" : "no");
0852 dev_dbg(pdata->dev, " TX checksum offload : %s\n",
0853 hw_feat->tx_coe ? "yes" : "no");
0854 dev_dbg(pdata->dev, " RX checksum offload : %s\n",
0855 hw_feat->rx_coe ? "yes" : "no");
0856 dev_dbg(pdata->dev, " Additional MAC addresses : %u\n",
0857 hw_feat->addn_mac);
0858 dev_dbg(pdata->dev, " Timestamp source : %s\n",
0859 (hw_feat->ts_src == 1) ? "internal" :
0860 (hw_feat->ts_src == 2) ? "external" :
0861 (hw_feat->ts_src == 3) ? "internal/external" : "n/a");
0862 dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n",
0863 hw_feat->sa_vlan_ins ? "yes" : "no");
0864 dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n",
0865 hw_feat->vxn ? "yes" : "no");
0866
0867
0868 dev_dbg(pdata->dev, " RX fifo size : %u\n",
0869 hw_feat->rx_fifo_size);
0870 dev_dbg(pdata->dev, " TX fifo size : %u\n",
0871 hw_feat->tx_fifo_size);
0872 dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n",
0873 hw_feat->adv_ts_hi ? "yes" : "no");
0874 dev_dbg(pdata->dev, " DMA width : %u\n",
0875 hw_feat->dma_width);
0876 dev_dbg(pdata->dev, " Data Center Bridging : %s\n",
0877 hw_feat->dcb ? "yes" : "no");
0878 dev_dbg(pdata->dev, " Split header : %s\n",
0879 hw_feat->sph ? "yes" : "no");
0880 dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n",
0881 hw_feat->tso ? "yes" : "no");
0882 dev_dbg(pdata->dev, " Debug memory interface : %s\n",
0883 hw_feat->dma_debug ? "yes" : "no");
0884 dev_dbg(pdata->dev, " Receive Side Scaling : %s\n",
0885 hw_feat->rss ? "yes" : "no");
0886 dev_dbg(pdata->dev, " Traffic Class count : %u\n",
0887 hw_feat->tc_cnt);
0888 dev_dbg(pdata->dev, " Hash table size : %u\n",
0889 hw_feat->hash_table_size);
0890 dev_dbg(pdata->dev, " L3/L4 Filters : %u\n",
0891 hw_feat->l3l4_filter_num);
0892
0893
0894 dev_dbg(pdata->dev, " RX queue count : %u\n",
0895 hw_feat->rx_q_cnt);
0896 dev_dbg(pdata->dev, " TX queue count : %u\n",
0897 hw_feat->tx_q_cnt);
0898 dev_dbg(pdata->dev, " RX DMA channel count : %u\n",
0899 hw_feat->rx_ch_cnt);
0900 dev_dbg(pdata->dev, " TX DMA channel count : %u\n",
0901 hw_feat->rx_ch_cnt);
0902 dev_dbg(pdata->dev, " PPS outputs : %u\n",
0903 hw_feat->pps_out_num);
0904 dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n",
0905 hw_feat->aux_snap_num);
0906 }
0907 }
0908
0909 static int xgbe_vxlan_set_port(struct net_device *netdev, unsigned int table,
0910 unsigned int entry, struct udp_tunnel_info *ti)
0911 {
0912 struct xgbe_prv_data *pdata = netdev_priv(netdev);
0913
0914 pdata->vxlan_port = be16_to_cpu(ti->port);
0915 pdata->hw_if.enable_vxlan(pdata);
0916
0917 return 0;
0918 }
0919
0920 static int xgbe_vxlan_unset_port(struct net_device *netdev, unsigned int table,
0921 unsigned int entry, struct udp_tunnel_info *ti)
0922 {
0923 struct xgbe_prv_data *pdata = netdev_priv(netdev);
0924
0925 pdata->hw_if.disable_vxlan(pdata);
0926 pdata->vxlan_port = 0;
0927
0928 return 0;
0929 }
0930
0931 static const struct udp_tunnel_nic_info xgbe_udp_tunnels = {
0932 .set_port = xgbe_vxlan_set_port,
0933 .unset_port = xgbe_vxlan_unset_port,
0934 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
0935 .tables = {
0936 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
0937 },
0938 };
0939
0940 const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void)
0941 {
0942 return &xgbe_udp_tunnels;
0943 }
0944
0945 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
0946 {
0947 struct xgbe_channel *channel;
0948 unsigned int i;
0949
0950 if (pdata->per_channel_irq) {
0951 for (i = 0; i < pdata->channel_count; i++) {
0952 channel = pdata->channel[i];
0953 if (add)
0954 netif_napi_add(pdata->netdev, &channel->napi,
0955 xgbe_one_poll, NAPI_POLL_WEIGHT);
0956
0957 napi_enable(&channel->napi);
0958 }
0959 } else {
0960 if (add)
0961 netif_napi_add(pdata->netdev, &pdata->napi,
0962 xgbe_all_poll, NAPI_POLL_WEIGHT);
0963
0964 napi_enable(&pdata->napi);
0965 }
0966 }
0967
0968 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
0969 {
0970 struct xgbe_channel *channel;
0971 unsigned int i;
0972
0973 if (pdata->per_channel_irq) {
0974 for (i = 0; i < pdata->channel_count; i++) {
0975 channel = pdata->channel[i];
0976 napi_disable(&channel->napi);
0977
0978 if (del)
0979 netif_napi_del(&channel->napi);
0980 }
0981 } else {
0982 napi_disable(&pdata->napi);
0983
0984 if (del)
0985 netif_napi_del(&pdata->napi);
0986 }
0987 }
0988
0989 static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
0990 {
0991 struct xgbe_channel *channel;
0992 struct net_device *netdev = pdata->netdev;
0993 unsigned int i;
0994 int ret;
0995
0996 tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
0997 tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
0998
0999 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1000 netdev_name(netdev), pdata);
1001 if (ret) {
1002 netdev_alert(netdev, "error requesting irq %d\n",
1003 pdata->dev_irq);
1004 return ret;
1005 }
1006
1007 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
1008 ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
1009 0, pdata->ecc_name, pdata);
1010 if (ret) {
1011 netdev_alert(netdev, "error requesting ecc irq %d\n",
1012 pdata->ecc_irq);
1013 goto err_dev_irq;
1014 }
1015 }
1016
1017 if (!pdata->per_channel_irq)
1018 return 0;
1019
1020 for (i = 0; i < pdata->channel_count; i++) {
1021 channel = pdata->channel[i];
1022 snprintf(channel->dma_irq_name,
1023 sizeof(channel->dma_irq_name) - 1,
1024 "%s-TxRx-%u", netdev_name(netdev),
1025 channel->queue_index);
1026
1027 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1028 xgbe_dma_isr, 0,
1029 channel->dma_irq_name, channel);
1030 if (ret) {
1031 netdev_alert(netdev, "error requesting irq %d\n",
1032 channel->dma_irq);
1033 goto err_dma_irq;
1034 }
1035
1036 irq_set_affinity_hint(channel->dma_irq,
1037 &channel->affinity_mask);
1038 }
1039
1040 return 0;
1041
1042 err_dma_irq:
1043
1044 for (i--; i < pdata->channel_count; i--) {
1045 channel = pdata->channel[i];
1046
1047 irq_set_affinity_hint(channel->dma_irq, NULL);
1048 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1049 }
1050
1051 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
1052 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
1053
1054 err_dev_irq:
1055 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1056
1057 return ret;
1058 }
1059
1060 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
1061 {
1062 struct xgbe_channel *channel;
1063 unsigned int i;
1064
1065 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1066
1067 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
1068 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
1069
1070 if (!pdata->per_channel_irq)
1071 return;
1072
1073 for (i = 0; i < pdata->channel_count; i++) {
1074 channel = pdata->channel[i];
1075
1076 irq_set_affinity_hint(channel->dma_irq, NULL);
1077 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1078 }
1079 }
1080
1081 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
1082 {
1083 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1084
1085 DBGPR("-->xgbe_init_tx_coalesce\n");
1086
1087 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
1088 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
1089
1090 hw_if->config_tx_coalesce(pdata);
1091
1092 DBGPR("<--xgbe_init_tx_coalesce\n");
1093 }
1094
1095 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
1096 {
1097 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1098
1099 DBGPR("-->xgbe_init_rx_coalesce\n");
1100
1101 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
1102 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
1103 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
1104
1105 hw_if->config_rx_coalesce(pdata);
1106
1107 DBGPR("<--xgbe_init_rx_coalesce\n");
1108 }
1109
1110 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
1111 {
1112 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1113 struct xgbe_ring *ring;
1114 struct xgbe_ring_data *rdata;
1115 unsigned int i, j;
1116
1117 DBGPR("-->xgbe_free_tx_data\n");
1118
1119 for (i = 0; i < pdata->channel_count; i++) {
1120 ring = pdata->channel[i]->tx_ring;
1121 if (!ring)
1122 break;
1123
1124 for (j = 0; j < ring->rdesc_count; j++) {
1125 rdata = XGBE_GET_DESC_DATA(ring, j);
1126 desc_if->unmap_rdata(pdata, rdata);
1127 }
1128 }
1129
1130 DBGPR("<--xgbe_free_tx_data\n");
1131 }
1132
1133 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
1134 {
1135 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1136 struct xgbe_ring *ring;
1137 struct xgbe_ring_data *rdata;
1138 unsigned int i, j;
1139
1140 DBGPR("-->xgbe_free_rx_data\n");
1141
1142 for (i = 0; i < pdata->channel_count; i++) {
1143 ring = pdata->channel[i]->rx_ring;
1144 if (!ring)
1145 break;
1146
1147 for (j = 0; j < ring->rdesc_count; j++) {
1148 rdata = XGBE_GET_DESC_DATA(ring, j);
1149 desc_if->unmap_rdata(pdata, rdata);
1150 }
1151 }
1152
1153 DBGPR("<--xgbe_free_rx_data\n");
1154 }
1155
1156 static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
1157 {
1158 pdata->phy_link = -1;
1159 pdata->phy_speed = SPEED_UNKNOWN;
1160
1161 return pdata->phy_if.phy_reset(pdata);
1162 }
1163
1164 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
1165 {
1166 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1167 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1168 unsigned long flags;
1169
1170 DBGPR("-->xgbe_powerdown\n");
1171
1172 if (!netif_running(netdev) ||
1173 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
1174 netdev_alert(netdev, "Device is already powered down\n");
1175 DBGPR("<--xgbe_powerdown\n");
1176 return -EINVAL;
1177 }
1178
1179 spin_lock_irqsave(&pdata->lock, flags);
1180
1181 if (caller == XGMAC_DRIVER_CONTEXT)
1182 netif_device_detach(netdev);
1183
1184 netif_tx_stop_all_queues(netdev);
1185
1186 xgbe_stop_timers(pdata);
1187 flush_workqueue(pdata->dev_workqueue);
1188
1189 hw_if->powerdown_tx(pdata);
1190 hw_if->powerdown_rx(pdata);
1191
1192 xgbe_napi_disable(pdata, 0);
1193
1194 pdata->power_down = 1;
1195
1196 spin_unlock_irqrestore(&pdata->lock, flags);
1197
1198 DBGPR("<--xgbe_powerdown\n");
1199
1200 return 0;
1201 }
1202
1203 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
1204 {
1205 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1206 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1207 unsigned long flags;
1208
1209 DBGPR("-->xgbe_powerup\n");
1210
1211 if (!netif_running(netdev) ||
1212 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
1213 netdev_alert(netdev, "Device is already powered up\n");
1214 DBGPR("<--xgbe_powerup\n");
1215 return -EINVAL;
1216 }
1217
1218 spin_lock_irqsave(&pdata->lock, flags);
1219
1220 pdata->power_down = 0;
1221
1222 xgbe_napi_enable(pdata, 0);
1223
1224 hw_if->powerup_tx(pdata);
1225 hw_if->powerup_rx(pdata);
1226
1227 if (caller == XGMAC_DRIVER_CONTEXT)
1228 netif_device_attach(netdev);
1229
1230 netif_tx_start_all_queues(netdev);
1231
1232 xgbe_start_timers(pdata);
1233
1234 spin_unlock_irqrestore(&pdata->lock, flags);
1235
1236 DBGPR("<--xgbe_powerup\n");
1237
1238 return 0;
1239 }
1240
1241 static void xgbe_free_memory(struct xgbe_prv_data *pdata)
1242 {
1243 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1244
1245
1246 desc_if->free_ring_resources(pdata);
1247
1248
1249 xgbe_free_channels(pdata);
1250 }
1251
1252 static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
1253 {
1254 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1255 struct net_device *netdev = pdata->netdev;
1256 int ret;
1257
1258 if (pdata->new_tx_ring_count) {
1259 pdata->tx_ring_count = pdata->new_tx_ring_count;
1260 pdata->tx_q_count = pdata->tx_ring_count;
1261 pdata->new_tx_ring_count = 0;
1262 }
1263
1264 if (pdata->new_rx_ring_count) {
1265 pdata->rx_ring_count = pdata->new_rx_ring_count;
1266 pdata->new_rx_ring_count = 0;
1267 }
1268
1269
1270 pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1271
1272
1273 ret = xgbe_alloc_channels(pdata);
1274 if (ret)
1275 return ret;
1276
1277
1278 ret = desc_if->alloc_ring_resources(pdata);
1279 if (ret)
1280 goto err_channels;
1281
1282
1283 xgbe_init_timers(pdata);
1284
1285 return 0;
1286
1287 err_channels:
1288 xgbe_free_memory(pdata);
1289
1290 return ret;
1291 }
1292
1293 static int xgbe_start(struct xgbe_prv_data *pdata)
1294 {
1295 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1296 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1297 struct net_device *netdev = pdata->netdev;
1298 unsigned int i;
1299 int ret;
1300
1301
1302 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
1303 if (ret) {
1304 netdev_err(netdev, "error setting real tx queue count\n");
1305 return ret;
1306 }
1307
1308 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
1309 if (ret) {
1310 netdev_err(netdev, "error setting real rx queue count\n");
1311 return ret;
1312 }
1313
1314
1315 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
1316 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
1317 i % pdata->rx_ring_count);
1318
1319 ret = hw_if->init(pdata);
1320 if (ret)
1321 return ret;
1322
1323 xgbe_napi_enable(pdata, 1);
1324
1325 ret = xgbe_request_irqs(pdata);
1326 if (ret)
1327 goto err_napi;
1328
1329 ret = phy_if->phy_start(pdata);
1330 if (ret)
1331 goto err_irqs;
1332
1333 hw_if->enable_tx(pdata);
1334 hw_if->enable_rx(pdata);
1335
1336 udp_tunnel_nic_reset_ntf(netdev);
1337
1338 netif_tx_start_all_queues(netdev);
1339
1340 xgbe_start_timers(pdata);
1341 queue_work(pdata->dev_workqueue, &pdata->service_work);
1342
1343 clear_bit(XGBE_STOPPED, &pdata->dev_state);
1344
1345 return 0;
1346
1347 err_irqs:
1348 xgbe_free_irqs(pdata);
1349
1350 err_napi:
1351 xgbe_napi_disable(pdata, 1);
1352
1353 hw_if->exit(pdata);
1354
1355 return ret;
1356 }
1357
1358 static void xgbe_stop(struct xgbe_prv_data *pdata)
1359 {
1360 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1361 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1362 struct xgbe_channel *channel;
1363 struct net_device *netdev = pdata->netdev;
1364 struct netdev_queue *txq;
1365 unsigned int i;
1366
1367 DBGPR("-->xgbe_stop\n");
1368
1369 if (test_bit(XGBE_STOPPED, &pdata->dev_state))
1370 return;
1371
1372 netif_tx_stop_all_queues(netdev);
1373 netif_carrier_off(pdata->netdev);
1374
1375 xgbe_stop_timers(pdata);
1376 flush_workqueue(pdata->dev_workqueue);
1377
1378 xgbe_vxlan_unset_port(netdev, 0, 0, NULL);
1379
1380 hw_if->disable_tx(pdata);
1381 hw_if->disable_rx(pdata);
1382
1383 phy_if->phy_stop(pdata);
1384
1385 xgbe_free_irqs(pdata);
1386
1387 xgbe_napi_disable(pdata, 1);
1388
1389 hw_if->exit(pdata);
1390
1391 for (i = 0; i < pdata->channel_count; i++) {
1392 channel = pdata->channel[i];
1393 if (!channel->tx_ring)
1394 continue;
1395
1396 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1397 netdev_tx_reset_queue(txq);
1398 }
1399
1400 set_bit(XGBE_STOPPED, &pdata->dev_state);
1401
1402 DBGPR("<--xgbe_stop\n");
1403 }
1404
1405 static void xgbe_stopdev(struct work_struct *work)
1406 {
1407 struct xgbe_prv_data *pdata = container_of(work,
1408 struct xgbe_prv_data,
1409 stopdev_work);
1410
1411 rtnl_lock();
1412
1413 xgbe_stop(pdata);
1414
1415 xgbe_free_tx_data(pdata);
1416 xgbe_free_rx_data(pdata);
1417
1418 rtnl_unlock();
1419
1420 netdev_alert(pdata->netdev, "device stopped\n");
1421 }
1422
1423 void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
1424 {
1425
1426 if (!netif_running(pdata->netdev))
1427 return;
1428
1429 xgbe_stop(pdata);
1430
1431 xgbe_free_memory(pdata);
1432 xgbe_alloc_memory(pdata);
1433
1434 xgbe_start(pdata);
1435 }
1436
1437 void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1438 {
1439
1440 if (!netif_running(pdata->netdev))
1441 return;
1442
1443 xgbe_stop(pdata);
1444
1445 xgbe_free_tx_data(pdata);
1446 xgbe_free_rx_data(pdata);
1447
1448 xgbe_start(pdata);
1449 }
1450
1451 static void xgbe_restart(struct work_struct *work)
1452 {
1453 struct xgbe_prv_data *pdata = container_of(work,
1454 struct xgbe_prv_data,
1455 restart_work);
1456
1457 rtnl_lock();
1458
1459 xgbe_restart_dev(pdata);
1460
1461 rtnl_unlock();
1462 }
1463
1464 static void xgbe_tx_tstamp(struct work_struct *work)
1465 {
1466 struct xgbe_prv_data *pdata = container_of(work,
1467 struct xgbe_prv_data,
1468 tx_tstamp_work);
1469 struct skb_shared_hwtstamps hwtstamps;
1470 u64 nsec;
1471 unsigned long flags;
1472
1473 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1474 if (!pdata->tx_tstamp_skb)
1475 goto unlock;
1476
1477 if (pdata->tx_tstamp) {
1478 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1479 pdata->tx_tstamp);
1480
1481 memset(&hwtstamps, 0, sizeof(hwtstamps));
1482 hwtstamps.hwtstamp = ns_to_ktime(nsec);
1483 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1484 }
1485
1486 dev_kfree_skb_any(pdata->tx_tstamp_skb);
1487
1488 pdata->tx_tstamp_skb = NULL;
1489
1490 unlock:
1491 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1492 }
1493
1494 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1495 struct ifreq *ifreq)
1496 {
1497 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1498 sizeof(pdata->tstamp_config)))
1499 return -EFAULT;
1500
1501 return 0;
1502 }
1503
1504 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1505 struct ifreq *ifreq)
1506 {
1507 struct hwtstamp_config config;
1508 unsigned int mac_tscr;
1509
1510 if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
1511 return -EFAULT;
1512
1513 mac_tscr = 0;
1514
1515 switch (config.tx_type) {
1516 case HWTSTAMP_TX_OFF:
1517 break;
1518
1519 case HWTSTAMP_TX_ON:
1520 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1521 break;
1522
1523 default:
1524 return -ERANGE;
1525 }
1526
1527 switch (config.rx_filter) {
1528 case HWTSTAMP_FILTER_NONE:
1529 break;
1530
1531 case HWTSTAMP_FILTER_NTP_ALL:
1532 case HWTSTAMP_FILTER_ALL:
1533 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1534 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1535 break;
1536
1537
1538 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1539 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1540 fallthrough;
1541 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1542 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1543 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1544 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1545 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1546 break;
1547
1548
1549 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1550 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1551 fallthrough;
1552 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1553 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1554 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1555 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1556 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1557 break;
1558
1559
1560 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1561 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1562 fallthrough;
1563 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1564 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1565 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1566 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1567 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1568 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1569 break;
1570
1571
1572 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1573 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1574 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1575 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1576 break;
1577
1578
1579 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1580 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1581 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1582 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1583 break;
1584
1585
1586 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1587 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
1588 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1589 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1590 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1591 break;
1592
1593
1594 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1595 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1596 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1597 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1598 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1599 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1600 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1601 break;
1602
1603
1604 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1605 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1606 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1607 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1608 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1609 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1610 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1611 break;
1612
1613
1614 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1615 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1616 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1617 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1618 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1619 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1620 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1621 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1622 break;
1623
1624 default:
1625 return -ERANGE;
1626 }
1627
1628 pdata->hw_if.config_tstamp(pdata, mac_tscr);
1629
1630 memcpy(&pdata->tstamp_config, &config, sizeof(config));
1631
1632 return 0;
1633 }
1634
1635 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1636 struct sk_buff *skb,
1637 struct xgbe_packet_data *packet)
1638 {
1639 unsigned long flags;
1640
1641 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1642 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1643 if (pdata->tx_tstamp_skb) {
1644
1645 XGMAC_SET_BITS(packet->attributes,
1646 TX_PACKET_ATTRIBUTES, PTP, 0);
1647 } else {
1648 pdata->tx_tstamp_skb = skb_get(skb);
1649 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1650 }
1651 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1652 }
1653
1654 skb_tx_timestamp(skb);
1655 }
1656
1657 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1658 {
1659 if (skb_vlan_tag_present(skb))
1660 packet->vlan_ctag = skb_vlan_tag_get(skb);
1661 }
1662
1663 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1664 {
1665 int ret;
1666
1667 if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1668 TSO_ENABLE))
1669 return 0;
1670
1671 ret = skb_cow_head(skb, 0);
1672 if (ret)
1673 return ret;
1674
1675 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
1676 packet->header_len = skb_inner_tcp_all_headers(skb);
1677 packet->tcp_header_len = inner_tcp_hdrlen(skb);
1678 } else {
1679 packet->header_len = skb_tcp_all_headers(skb);
1680 packet->tcp_header_len = tcp_hdrlen(skb);
1681 }
1682 packet->tcp_payload_len = skb->len - packet->header_len;
1683 packet->mss = skb_shinfo(skb)->gso_size;
1684
1685 DBGPR(" packet->header_len=%u\n", packet->header_len);
1686 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1687 packet->tcp_header_len, packet->tcp_payload_len);
1688 DBGPR(" packet->mss=%u\n", packet->mss);
1689
1690
1691
1692
1693 packet->tx_packets = skb_shinfo(skb)->gso_segs;
1694 packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
1695
1696 return 0;
1697 }
1698
1699 static bool xgbe_is_vxlan(struct sk_buff *skb)
1700 {
1701 if (!skb->encapsulation)
1702 return false;
1703
1704 if (skb->ip_summed != CHECKSUM_PARTIAL)
1705 return false;
1706
1707 switch (skb->protocol) {
1708 case htons(ETH_P_IP):
1709 if (ip_hdr(skb)->protocol != IPPROTO_UDP)
1710 return false;
1711 break;
1712
1713 case htons(ETH_P_IPV6):
1714 if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
1715 return false;
1716 break;
1717
1718 default:
1719 return false;
1720 }
1721
1722 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1723 skb->inner_protocol != htons(ETH_P_TEB) ||
1724 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
1725 sizeof(struct udphdr) + sizeof(struct vxlanhdr)))
1726 return false;
1727
1728 return true;
1729 }
1730
1731 static int xgbe_is_tso(struct sk_buff *skb)
1732 {
1733 if (skb->ip_summed != CHECKSUM_PARTIAL)
1734 return 0;
1735
1736 if (!skb_is_gso(skb))
1737 return 0;
1738
1739 DBGPR(" TSO packet to be processed\n");
1740
1741 return 1;
1742 }
1743
1744 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1745 struct xgbe_ring *ring, struct sk_buff *skb,
1746 struct xgbe_packet_data *packet)
1747 {
1748 skb_frag_t *frag;
1749 unsigned int context_desc;
1750 unsigned int len;
1751 unsigned int i;
1752
1753 packet->skb = skb;
1754
1755 context_desc = 0;
1756 packet->rdesc_count = 0;
1757
1758 packet->tx_packets = 1;
1759 packet->tx_bytes = skb->len;
1760
1761 if (xgbe_is_tso(skb)) {
1762
1763 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1764 context_desc = 1;
1765 packet->rdesc_count++;
1766 }
1767
1768
1769 packet->rdesc_count++;
1770
1771 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1772 TSO_ENABLE, 1);
1773 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1774 CSUM_ENABLE, 1);
1775 } else if (skb->ip_summed == CHECKSUM_PARTIAL)
1776 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1777 CSUM_ENABLE, 1);
1778
1779 if (xgbe_is_vxlan(skb))
1780 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1781 VXLAN, 1);
1782
1783 if (skb_vlan_tag_present(skb)) {
1784
1785 if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
1786
1787 if (!context_desc) {
1788 context_desc = 1;
1789 packet->rdesc_count++;
1790 }
1791
1792 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1793 VLAN_CTAG, 1);
1794 }
1795
1796 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1797 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1798 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1799 PTP, 1);
1800
1801 for (len = skb_headlen(skb); len;) {
1802 packet->rdesc_count++;
1803 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1804 }
1805
1806 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1807 frag = &skb_shinfo(skb)->frags[i];
1808 for (len = skb_frag_size(frag); len; ) {
1809 packet->rdesc_count++;
1810 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1811 }
1812 }
1813 }
1814
1815 static int xgbe_open(struct net_device *netdev)
1816 {
1817 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1818 int ret;
1819
1820
1821 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
1822 netdev_name(netdev));
1823
1824 snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
1825 netdev_name(netdev));
1826
1827 snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
1828 netdev_name(netdev));
1829
1830
1831 pdata->dev_workqueue =
1832 create_singlethread_workqueue(netdev_name(netdev));
1833 if (!pdata->dev_workqueue) {
1834 netdev_err(netdev, "device workqueue creation failed\n");
1835 return -ENOMEM;
1836 }
1837
1838 pdata->an_workqueue =
1839 create_singlethread_workqueue(pdata->an_name);
1840 if (!pdata->an_workqueue) {
1841 netdev_err(netdev, "phy workqueue creation failed\n");
1842 ret = -ENOMEM;
1843 goto err_dev_wq;
1844 }
1845
1846
1847 ret = xgbe_phy_reset(pdata);
1848 if (ret)
1849 goto err_an_wq;
1850
1851
1852 ret = clk_prepare_enable(pdata->sysclk);
1853 if (ret) {
1854 netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1855 goto err_an_wq;
1856 }
1857
1858 ret = clk_prepare_enable(pdata->ptpclk);
1859 if (ret) {
1860 netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1861 goto err_sysclk;
1862 }
1863
1864 INIT_WORK(&pdata->service_work, xgbe_service);
1865 INIT_WORK(&pdata->restart_work, xgbe_restart);
1866 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
1867 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1868
1869 ret = xgbe_alloc_memory(pdata);
1870 if (ret)
1871 goto err_ptpclk;
1872
1873 ret = xgbe_start(pdata);
1874 if (ret)
1875 goto err_mem;
1876
1877 clear_bit(XGBE_DOWN, &pdata->dev_state);
1878
1879 return 0;
1880
1881 err_mem:
1882 xgbe_free_memory(pdata);
1883
1884 err_ptpclk:
1885 clk_disable_unprepare(pdata->ptpclk);
1886
1887 err_sysclk:
1888 clk_disable_unprepare(pdata->sysclk);
1889
1890 err_an_wq:
1891 destroy_workqueue(pdata->an_workqueue);
1892
1893 err_dev_wq:
1894 destroy_workqueue(pdata->dev_workqueue);
1895
1896 return ret;
1897 }
1898
1899 static int xgbe_close(struct net_device *netdev)
1900 {
1901 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1902
1903
1904 xgbe_stop(pdata);
1905
1906 xgbe_free_memory(pdata);
1907
1908
1909 clk_disable_unprepare(pdata->ptpclk);
1910 clk_disable_unprepare(pdata->sysclk);
1911
1912 destroy_workqueue(pdata->an_workqueue);
1913
1914 destroy_workqueue(pdata->dev_workqueue);
1915
1916 set_bit(XGBE_DOWN, &pdata->dev_state);
1917
1918 return 0;
1919 }
1920
1921 static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1922 {
1923 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1924 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1925 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1926 struct xgbe_channel *channel;
1927 struct xgbe_ring *ring;
1928 struct xgbe_packet_data *packet;
1929 struct netdev_queue *txq;
1930 netdev_tx_t ret;
1931
1932 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1933
1934 channel = pdata->channel[skb->queue_mapping];
1935 txq = netdev_get_tx_queue(netdev, channel->queue_index);
1936 ring = channel->tx_ring;
1937 packet = &ring->packet_data;
1938
1939 ret = NETDEV_TX_OK;
1940
1941 if (skb->len == 0) {
1942 netif_err(pdata, tx_err, netdev,
1943 "empty skb received from stack\n");
1944 dev_kfree_skb_any(skb);
1945 goto tx_netdev_return;
1946 }
1947
1948
1949 memset(packet, 0, sizeof(*packet));
1950 xgbe_packet_info(pdata, ring, skb, packet);
1951
1952
1953 ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
1954 if (ret)
1955 goto tx_netdev_return;
1956
1957 ret = xgbe_prep_tso(skb, packet);
1958 if (ret) {
1959 netif_err(pdata, tx_err, netdev,
1960 "error processing TSO packet\n");
1961 dev_kfree_skb_any(skb);
1962 goto tx_netdev_return;
1963 }
1964 xgbe_prep_vlan(skb, packet);
1965
1966 if (!desc_if->map_tx_skb(channel, skb)) {
1967 dev_kfree_skb_any(skb);
1968 goto tx_netdev_return;
1969 }
1970
1971 xgbe_prep_tx_tstamp(pdata, skb, packet);
1972
1973
1974 netdev_tx_sent_queue(txq, packet->tx_bytes);
1975
1976
1977 hw_if->dev_xmit(channel);
1978
1979 if (netif_msg_pktdata(pdata))
1980 xgbe_print_pkt(netdev, skb, true);
1981
1982
1983 xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
1984
1985 ret = NETDEV_TX_OK;
1986
1987 tx_netdev_return:
1988 return ret;
1989 }
1990
1991 static void xgbe_set_rx_mode(struct net_device *netdev)
1992 {
1993 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1994 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1995
1996 DBGPR("-->xgbe_set_rx_mode\n");
1997
1998 hw_if->config_rx_mode(pdata);
1999
2000 DBGPR("<--xgbe_set_rx_mode\n");
2001 }
2002
2003 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
2004 {
2005 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2006 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2007 struct sockaddr *saddr = addr;
2008
2009 DBGPR("-->xgbe_set_mac_address\n");
2010
2011 if (!is_valid_ether_addr(saddr->sa_data))
2012 return -EADDRNOTAVAIL;
2013
2014 eth_hw_addr_set(netdev, saddr->sa_data);
2015
2016 hw_if->set_mac_address(pdata, netdev->dev_addr);
2017
2018 DBGPR("<--xgbe_set_mac_address\n");
2019
2020 return 0;
2021 }
2022
2023 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
2024 {
2025 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2026 int ret;
2027
2028 switch (cmd) {
2029 case SIOCGHWTSTAMP:
2030 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
2031 break;
2032
2033 case SIOCSHWTSTAMP:
2034 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
2035 break;
2036
2037 default:
2038 ret = -EOPNOTSUPP;
2039 }
2040
2041 return ret;
2042 }
2043
2044 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
2045 {
2046 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2047 int ret;
2048
2049 DBGPR("-->xgbe_change_mtu\n");
2050
2051 ret = xgbe_calc_rx_buf_size(netdev, mtu);
2052 if (ret < 0)
2053 return ret;
2054
2055 pdata->rx_buf_size = ret;
2056 netdev->mtu = mtu;
2057
2058 xgbe_restart_dev(pdata);
2059
2060 DBGPR("<--xgbe_change_mtu\n");
2061
2062 return 0;
2063 }
2064
2065 static void xgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2066 {
2067 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2068
2069 netdev_warn(netdev, "tx timeout, device restarting\n");
2070 schedule_work(&pdata->restart_work);
2071 }
2072
2073 static void xgbe_get_stats64(struct net_device *netdev,
2074 struct rtnl_link_stats64 *s)
2075 {
2076 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2077 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
2078
2079 DBGPR("-->%s\n", __func__);
2080
2081 pdata->hw_if.read_mmc_stats(pdata);
2082
2083 s->rx_packets = pstats->rxframecount_gb;
2084 s->rx_bytes = pstats->rxoctetcount_gb;
2085 s->rx_errors = pstats->rxframecount_gb -
2086 pstats->rxbroadcastframes_g -
2087 pstats->rxmulticastframes_g -
2088 pstats->rxunicastframes_g;
2089 s->multicast = pstats->rxmulticastframes_g;
2090 s->rx_length_errors = pstats->rxlengtherror;
2091 s->rx_crc_errors = pstats->rxcrcerror;
2092 s->rx_fifo_errors = pstats->rxfifooverflow;
2093
2094 s->tx_packets = pstats->txframecount_gb;
2095 s->tx_bytes = pstats->txoctetcount_gb;
2096 s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
2097 s->tx_dropped = netdev->stats.tx_dropped;
2098
2099 DBGPR("<--%s\n", __func__);
2100 }
2101
2102 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
2103 u16 vid)
2104 {
2105 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2106 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2107
2108 DBGPR("-->%s\n", __func__);
2109
2110 set_bit(vid, pdata->active_vlans);
2111 hw_if->update_vlan_hash_table(pdata);
2112
2113 DBGPR("<--%s\n", __func__);
2114
2115 return 0;
2116 }
2117
2118 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
2119 u16 vid)
2120 {
2121 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2122 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2123
2124 DBGPR("-->%s\n", __func__);
2125
2126 clear_bit(vid, pdata->active_vlans);
2127 hw_if->update_vlan_hash_table(pdata);
2128
2129 DBGPR("<--%s\n", __func__);
2130
2131 return 0;
2132 }
2133
2134 #ifdef CONFIG_NET_POLL_CONTROLLER
2135 static void xgbe_poll_controller(struct net_device *netdev)
2136 {
2137 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2138 struct xgbe_channel *channel;
2139 unsigned int i;
2140
2141 DBGPR("-->xgbe_poll_controller\n");
2142
2143 if (pdata->per_channel_irq) {
2144 for (i = 0; i < pdata->channel_count; i++) {
2145 channel = pdata->channel[i];
2146 xgbe_dma_isr(channel->dma_irq, channel);
2147 }
2148 } else {
2149 disable_irq(pdata->dev_irq);
2150 xgbe_isr(pdata->dev_irq, pdata);
2151 enable_irq(pdata->dev_irq);
2152 }
2153
2154 DBGPR("<--xgbe_poll_controller\n");
2155 }
2156 #endif
2157
2158 static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type,
2159 void *type_data)
2160 {
2161 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2162 struct tc_mqprio_qopt *mqprio = type_data;
2163 u8 tc;
2164
2165 if (type != TC_SETUP_QDISC_MQPRIO)
2166 return -EOPNOTSUPP;
2167
2168 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2169 tc = mqprio->num_tc;
2170
2171 if (tc > pdata->hw_feat.tc_cnt)
2172 return -EINVAL;
2173
2174 pdata->num_tcs = tc;
2175 pdata->hw_if.config_tc(pdata);
2176
2177 return 0;
2178 }
2179
2180 static netdev_features_t xgbe_fix_features(struct net_device *netdev,
2181 netdev_features_t features)
2182 {
2183 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2184 netdev_features_t vxlan_base;
2185
2186 vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT;
2187
2188 if (!pdata->hw_feat.vxn)
2189 return features;
2190
2191
2192 if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) &&
2193 !(features & NETIF_F_GSO_UDP_TUNNEL)) {
2194 netdev_notice(netdev,
2195 "forcing tx udp tunnel support\n");
2196 features |= NETIF_F_GSO_UDP_TUNNEL;
2197 }
2198
2199
2200 if ((features & vxlan_base) != vxlan_base) {
2201 netdev_notice(netdev,
2202 "forcing both tx and rx udp tunnel support\n");
2203 features |= vxlan_base;
2204 }
2205
2206 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2207 if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) {
2208 netdev_notice(netdev,
2209 "forcing tx udp tunnel checksumming on\n");
2210 features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
2211 }
2212 } else {
2213 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) {
2214 netdev_notice(netdev,
2215 "forcing tx udp tunnel checksumming off\n");
2216 features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
2217 }
2218 }
2219
2220 return features;
2221 }
2222
2223 static int xgbe_set_features(struct net_device *netdev,
2224 netdev_features_t features)
2225 {
2226 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2227 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2228 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
2229 int ret = 0;
2230
2231 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
2232 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
2233 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
2234 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
2235
2236 if ((features & NETIF_F_RXHASH) && !rxhash)
2237 ret = hw_if->enable_rss(pdata);
2238 else if (!(features & NETIF_F_RXHASH) && rxhash)
2239 ret = hw_if->disable_rss(pdata);
2240 if (ret)
2241 return ret;
2242
2243 if ((features & NETIF_F_RXCSUM) && !rxcsum)
2244 hw_if->enable_rx_csum(pdata);
2245 else if (!(features & NETIF_F_RXCSUM) && rxcsum)
2246 hw_if->disable_rx_csum(pdata);
2247
2248 if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
2249 hw_if->enable_rx_vlan_stripping(pdata);
2250 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
2251 hw_if->disable_rx_vlan_stripping(pdata);
2252
2253 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
2254 hw_if->enable_rx_vlan_filtering(pdata);
2255 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
2256 hw_if->disable_rx_vlan_filtering(pdata);
2257
2258 pdata->netdev_features = features;
2259
2260 DBGPR("<--xgbe_set_features\n");
2261
2262 return 0;
2263 }
2264
2265 static netdev_features_t xgbe_features_check(struct sk_buff *skb,
2266 struct net_device *netdev,
2267 netdev_features_t features)
2268 {
2269 features = vlan_features_check(skb, features);
2270 features = vxlan_features_check(skb, features);
2271
2272 return features;
2273 }
2274
2275 static const struct net_device_ops xgbe_netdev_ops = {
2276 .ndo_open = xgbe_open,
2277 .ndo_stop = xgbe_close,
2278 .ndo_start_xmit = xgbe_xmit,
2279 .ndo_set_rx_mode = xgbe_set_rx_mode,
2280 .ndo_set_mac_address = xgbe_set_mac_address,
2281 .ndo_validate_addr = eth_validate_addr,
2282 .ndo_eth_ioctl = xgbe_ioctl,
2283 .ndo_change_mtu = xgbe_change_mtu,
2284 .ndo_tx_timeout = xgbe_tx_timeout,
2285 .ndo_get_stats64 = xgbe_get_stats64,
2286 .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
2287 .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
2288 #ifdef CONFIG_NET_POLL_CONTROLLER
2289 .ndo_poll_controller = xgbe_poll_controller,
2290 #endif
2291 .ndo_setup_tc = xgbe_setup_tc,
2292 .ndo_fix_features = xgbe_fix_features,
2293 .ndo_set_features = xgbe_set_features,
2294 .ndo_features_check = xgbe_features_check,
2295 };
2296
2297 const struct net_device_ops *xgbe_get_netdev_ops(void)
2298 {
2299 return &xgbe_netdev_ops;
2300 }
2301
2302 static void xgbe_rx_refresh(struct xgbe_channel *channel)
2303 {
2304 struct xgbe_prv_data *pdata = channel->pdata;
2305 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2306 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2307 struct xgbe_ring *ring = channel->rx_ring;
2308 struct xgbe_ring_data *rdata;
2309
2310 while (ring->dirty != ring->cur) {
2311 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2312
2313
2314 desc_if->unmap_rdata(pdata, rdata);
2315
2316 if (desc_if->map_rx_buffer(pdata, ring, rdata))
2317 break;
2318
2319 hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
2320
2321 ring->dirty++;
2322 }
2323
2324
2325 wmb();
2326
2327
2328
2329 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
2330 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
2331 lower_32_bits(rdata->rdesc_dma));
2332 }
2333
2334 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
2335 struct napi_struct *napi,
2336 struct xgbe_ring_data *rdata,
2337 unsigned int len)
2338 {
2339 struct sk_buff *skb;
2340 u8 *packet;
2341
2342 skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
2343 if (!skb)
2344 return NULL;
2345
2346
2347
2348
2349 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
2350 rdata->rx.hdr.dma_off,
2351 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
2352
2353 packet = page_address(rdata->rx.hdr.pa.pages) +
2354 rdata->rx.hdr.pa.pages_offset;
2355 skb_copy_to_linear_data(skb, packet, len);
2356 skb_put(skb, len);
2357
2358 return skb;
2359 }
2360
2361 static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
2362 struct xgbe_packet_data *packet)
2363 {
2364
2365 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
2366 return 0;
2367
2368
2369 if (rdata->rx.hdr_len)
2370 return rdata->rx.hdr_len;
2371
2372
2373
2374
2375 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2376 return rdata->rx.hdr.dma_len;
2377
2378
2379
2380
2381 return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
2382 }
2383
2384 static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
2385 struct xgbe_packet_data *packet,
2386 unsigned int len)
2387 {
2388
2389 if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
2390 return rdata->rx.buf.dma_len;
2391
2392
2393
2394
2395 return rdata->rx.len - len;
2396 }
2397
2398 static int xgbe_tx_poll(struct xgbe_channel *channel)
2399 {
2400 struct xgbe_prv_data *pdata = channel->pdata;
2401 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2402 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2403 struct xgbe_ring *ring = channel->tx_ring;
2404 struct xgbe_ring_data *rdata;
2405 struct xgbe_ring_desc *rdesc;
2406 struct net_device *netdev = pdata->netdev;
2407 struct netdev_queue *txq;
2408 int processed = 0;
2409 unsigned int tx_packets = 0, tx_bytes = 0;
2410 unsigned int cur;
2411
2412 DBGPR("-->xgbe_tx_poll\n");
2413
2414
2415 if (!ring)
2416 return 0;
2417
2418 cur = ring->cur;
2419
2420
2421 smp_rmb();
2422
2423 txq = netdev_get_tx_queue(netdev, channel->queue_index);
2424
2425 while ((processed < XGBE_TX_DESC_MAX_PROC) &&
2426 (ring->dirty != cur)) {
2427 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
2428 rdesc = rdata->rdesc;
2429
2430 if (!hw_if->tx_complete(rdesc))
2431 break;
2432
2433
2434
2435 dma_rmb();
2436
2437 if (netif_msg_tx_done(pdata))
2438 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
2439
2440 if (hw_if->is_last_desc(rdesc)) {
2441 tx_packets += rdata->tx.packets;
2442 tx_bytes += rdata->tx.bytes;
2443 }
2444
2445
2446 desc_if->unmap_rdata(pdata, rdata);
2447 hw_if->tx_desc_reset(rdata);
2448
2449 processed++;
2450 ring->dirty++;
2451 }
2452
2453 if (!processed)
2454 return 0;
2455
2456 netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
2457
2458 if ((ring->tx.queue_stopped == 1) &&
2459 (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
2460 ring->tx.queue_stopped = 0;
2461 netif_tx_wake_queue(txq);
2462 }
2463
2464 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
2465
2466 return processed;
2467 }
2468
2469 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2470 {
2471 struct xgbe_prv_data *pdata = channel->pdata;
2472 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2473 struct xgbe_ring *ring = channel->rx_ring;
2474 struct xgbe_ring_data *rdata;
2475 struct xgbe_packet_data *packet;
2476 struct net_device *netdev = pdata->netdev;
2477 struct napi_struct *napi;
2478 struct sk_buff *skb;
2479 struct skb_shared_hwtstamps *hwtstamps;
2480 unsigned int last, error, context_next, context;
2481 unsigned int len, buf1_len, buf2_len, max_len;
2482 unsigned int received = 0;
2483 int packet_count = 0;
2484
2485 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
2486
2487
2488 if (!ring)
2489 return 0;
2490
2491 last = 0;
2492 context_next = 0;
2493
2494 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
2495
2496 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2497 packet = &ring->packet_data;
2498 while (packet_count < budget) {
2499 DBGPR(" cur = %d\n", ring->cur);
2500
2501
2502 if (!received && rdata->state_saved) {
2503 skb = rdata->state.skb;
2504 error = rdata->state.error;
2505 len = rdata->state.len;
2506 } else {
2507 memset(packet, 0, sizeof(*packet));
2508 skb = NULL;
2509 error = 0;
2510 len = 0;
2511 }
2512
2513 read_again:
2514 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2515
2516 if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
2517 xgbe_rx_refresh(channel);
2518
2519 if (hw_if->dev_read(channel))
2520 break;
2521
2522 received++;
2523 ring->cur++;
2524
2525 last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
2526 LAST);
2527 context_next = XGMAC_GET_BITS(packet->attributes,
2528 RX_PACKET_ATTRIBUTES,
2529 CONTEXT_NEXT);
2530 context = XGMAC_GET_BITS(packet->attributes,
2531 RX_PACKET_ATTRIBUTES,
2532 CONTEXT);
2533
2534
2535 if ((!last || context_next) && error)
2536 goto read_again;
2537
2538 if (error || packet->errors) {
2539 if (packet->errors)
2540 netif_err(pdata, rx_err, netdev,
2541 "error in received packet\n");
2542 dev_kfree_skb(skb);
2543 goto next_packet;
2544 }
2545
2546 if (!context) {
2547
2548 buf1_len = xgbe_rx_buf1_len(rdata, packet);
2549 len += buf1_len;
2550 buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
2551 len += buf2_len;
2552
2553 if (buf2_len > rdata->rx.buf.dma_len) {
2554
2555
2556
2557 error = 1;
2558 goto skip_data;
2559 }
2560
2561 if (!skb) {
2562 skb = xgbe_create_skb(pdata, napi, rdata,
2563 buf1_len);
2564 if (!skb) {
2565 error = 1;
2566 goto skip_data;
2567 }
2568 }
2569
2570 if (buf2_len) {
2571 dma_sync_single_range_for_cpu(pdata->dev,
2572 rdata->rx.buf.dma_base,
2573 rdata->rx.buf.dma_off,
2574 rdata->rx.buf.dma_len,
2575 DMA_FROM_DEVICE);
2576
2577 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2578 rdata->rx.buf.pa.pages,
2579 rdata->rx.buf.pa.pages_offset,
2580 buf2_len,
2581 rdata->rx.buf.dma_len);
2582 rdata->rx.buf.pa.pages = NULL;
2583 }
2584 }
2585
2586 skip_data:
2587 if (!last || context_next)
2588 goto read_again;
2589
2590 if (!skb || error) {
2591 dev_kfree_skb(skb);
2592 goto next_packet;
2593 }
2594
2595
2596 max_len = netdev->mtu + ETH_HLEN;
2597 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2598 (skb->protocol == htons(ETH_P_8021Q)))
2599 max_len += VLAN_HLEN;
2600
2601 if (skb->len > max_len) {
2602 netif_err(pdata, rx_err, netdev,
2603 "packet length exceeds configured MTU\n");
2604 dev_kfree_skb(skb);
2605 goto next_packet;
2606 }
2607
2608 if (netif_msg_pktdata(pdata))
2609 xgbe_print_pkt(netdev, skb, false);
2610
2611 skb_checksum_none_assert(skb);
2612 if (XGMAC_GET_BITS(packet->attributes,
2613 RX_PACKET_ATTRIBUTES, CSUM_DONE))
2614 skb->ip_summed = CHECKSUM_UNNECESSARY;
2615
2616 if (XGMAC_GET_BITS(packet->attributes,
2617 RX_PACKET_ATTRIBUTES, TNP)) {
2618 skb->encapsulation = 1;
2619
2620 if (XGMAC_GET_BITS(packet->attributes,
2621 RX_PACKET_ATTRIBUTES, TNPCSUM_DONE))
2622 skb->csum_level = 1;
2623 }
2624
2625 if (XGMAC_GET_BITS(packet->attributes,
2626 RX_PACKET_ATTRIBUTES, VLAN_CTAG))
2627 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2628 packet->vlan_ctag);
2629
2630 if (XGMAC_GET_BITS(packet->attributes,
2631 RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
2632 u64 nsec;
2633
2634 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2635 packet->rx_tstamp);
2636 hwtstamps = skb_hwtstamps(skb);
2637 hwtstamps->hwtstamp = ns_to_ktime(nsec);
2638 }
2639
2640 if (XGMAC_GET_BITS(packet->attributes,
2641 RX_PACKET_ATTRIBUTES, RSS_HASH))
2642 skb_set_hash(skb, packet->rss_hash,
2643 packet->rss_hash_type);
2644
2645 skb->dev = netdev;
2646 skb->protocol = eth_type_trans(skb, netdev);
2647 skb_record_rx_queue(skb, channel->queue_index);
2648
2649 napi_gro_receive(napi, skb);
2650
2651 next_packet:
2652 packet_count++;
2653 }
2654
2655
2656 if (received && (!last || context_next)) {
2657 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
2658 rdata->state_saved = 1;
2659 rdata->state.skb = skb;
2660 rdata->state.len = len;
2661 rdata->state.error = error;
2662 }
2663
2664 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
2665
2666 return packet_count;
2667 }
2668
2669 static int xgbe_one_poll(struct napi_struct *napi, int budget)
2670 {
2671 struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
2672 napi);
2673 struct xgbe_prv_data *pdata = channel->pdata;
2674 int processed = 0;
2675
2676 DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
2677
2678
2679 xgbe_tx_poll(channel);
2680
2681
2682 processed = xgbe_rx_poll(channel, budget);
2683
2684
2685 if ((processed < budget) && napi_complete_done(napi, processed)) {
2686
2687 if (pdata->channel_irq_mode)
2688 xgbe_enable_rx_tx_int(pdata, channel);
2689 else
2690 enable_irq(channel->dma_irq);
2691 }
2692
2693 DBGPR("<--xgbe_one_poll: received = %d\n", processed);
2694
2695 return processed;
2696 }
2697
2698 static int xgbe_all_poll(struct napi_struct *napi, int budget)
2699 {
2700 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2701 napi);
2702 struct xgbe_channel *channel;
2703 int ring_budget;
2704 int processed, last_processed;
2705 unsigned int i;
2706
2707 DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
2708
2709 processed = 0;
2710 ring_budget = budget / pdata->rx_ring_count;
2711 do {
2712 last_processed = processed;
2713
2714 for (i = 0; i < pdata->channel_count; i++) {
2715 channel = pdata->channel[i];
2716
2717
2718 xgbe_tx_poll(channel);
2719
2720
2721 if (ring_budget > (budget - processed))
2722 ring_budget = budget - processed;
2723 processed += xgbe_rx_poll(channel, ring_budget);
2724 }
2725 } while ((processed < budget) && (processed != last_processed));
2726
2727
2728 if ((processed < budget) && napi_complete_done(napi, processed)) {
2729
2730 xgbe_enable_rx_tx_ints(pdata);
2731 }
2732
2733 DBGPR("<--xgbe_all_poll: received = %d\n", processed);
2734
2735 return processed;
2736 }
2737
2738 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2739 unsigned int idx, unsigned int count, unsigned int flag)
2740 {
2741 struct xgbe_ring_data *rdata;
2742 struct xgbe_ring_desc *rdesc;
2743
2744 while (count--) {
2745 rdata = XGBE_GET_DESC_DATA(ring, idx);
2746 rdesc = rdata->rdesc;
2747 netdev_dbg(pdata->netdev,
2748 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
2749 (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2750 le32_to_cpu(rdesc->desc0),
2751 le32_to_cpu(rdesc->desc1),
2752 le32_to_cpu(rdesc->desc2),
2753 le32_to_cpu(rdesc->desc3));
2754 idx++;
2755 }
2756 }
2757
2758 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2759 unsigned int idx)
2760 {
2761 struct xgbe_ring_data *rdata;
2762 struct xgbe_ring_desc *rdesc;
2763
2764 rdata = XGBE_GET_DESC_DATA(ring, idx);
2765 rdesc = rdata->rdesc;
2766 netdev_dbg(pdata->netdev,
2767 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2768 idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
2769 le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
2770 }
2771
2772 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
2773 {
2774 struct ethhdr *eth = (struct ethhdr *)skb->data;
2775 unsigned char buffer[128];
2776 unsigned int i;
2777
2778 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2779
2780 netdev_dbg(netdev, "%s packet of %d bytes\n",
2781 (tx_rx ? "TX" : "RX"), skb->len);
2782
2783 netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
2784 netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
2785 netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto));
2786
2787 for (i = 0; i < skb->len; i += 32) {
2788 unsigned int len = min(skb->len - i, 32U);
2789
2790 hex_dump_to_buffer(&skb->data[i], len, 32, 1,
2791 buffer, sizeof(buffer), false);
2792 netdev_dbg(netdev, " %#06x: %s\n", i, buffer);
2793 }
2794
2795 netdev_dbg(netdev, "\n************** SKB dump ****************\n");
2796 }