Back to home page

OSCL-LXR

 
 

    


0001 /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
0002  *
0003  * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
0004  *
0005  * This program is dual-licensed; you may select either version 2 of
0006  * the GNU General Public License ("GPL") or BSD license ("BSD").
0007  *
0008  * This Synopsys DWC XLGMAC software driver and associated documentation
0009  * (hereinafter the "Software") is an unsupported proprietary work of
0010  * Synopsys, Inc. unless otherwise expressly agreed to in writing between
0011  * Synopsys and you. The Software IS NOT an item of Licensed Software or a
0012  * Licensed Product under any End User Software License Agreement or
0013  * Agreement for Licensed Products with Synopsys or any supplement thereto.
0014  * Synopsys is a registered trademark of Synopsys, Inc. Other names included
0015  * in the SOFTWARE may be the trademarks of their respective owners.
0016  */
0017 
0018 #include <linux/netdevice.h>
0019 #include <linux/tcp.h>
0020 #include <linux/interrupt.h>
0021 
0022 #include "dwc-xlgmac.h"
0023 #include "dwc-xlgmac-reg.h"
0024 
0025 static int xlgmac_one_poll(struct napi_struct *, int);
0026 static int xlgmac_all_poll(struct napi_struct *, int);
0027 
0028 static inline unsigned int xlgmac_tx_avail_desc(struct xlgmac_ring *ring)
0029 {
0030     return (ring->dma_desc_count - (ring->cur - ring->dirty));
0031 }
0032 
0033 static inline unsigned int xlgmac_rx_dirty_desc(struct xlgmac_ring *ring)
0034 {
0035     return (ring->cur - ring->dirty);
0036 }
0037 
0038 static int xlgmac_maybe_stop_tx_queue(
0039             struct xlgmac_channel *channel,
0040             struct xlgmac_ring *ring,
0041             unsigned int count)
0042 {
0043     struct xlgmac_pdata *pdata = channel->pdata;
0044 
0045     if (count > xlgmac_tx_avail_desc(ring)) {
0046         netif_info(pdata, drv, pdata->netdev,
0047                "Tx queue stopped, not enough descriptors available\n");
0048         netif_stop_subqueue(pdata->netdev, channel->queue_index);
0049         ring->tx.queue_stopped = 1;
0050 
0051         /* If we haven't notified the hardware because of xmit_more
0052          * support, tell it now
0053          */
0054         if (ring->tx.xmit_more)
0055             pdata->hw_ops.tx_start_xmit(channel, ring);
0056 
0057         return NETDEV_TX_BUSY;
0058     }
0059 
0060     return 0;
0061 }
0062 
0063 static void xlgmac_prep_vlan(struct sk_buff *skb,
0064                  struct xlgmac_pkt_info *pkt_info)
0065 {
0066     if (skb_vlan_tag_present(skb))
0067         pkt_info->vlan_ctag = skb_vlan_tag_get(skb);
0068 }
0069 
0070 static int xlgmac_prep_tso(struct sk_buff *skb,
0071                struct xlgmac_pkt_info *pkt_info)
0072 {
0073     int ret;
0074 
0075     if (!XLGMAC_GET_REG_BITS(pkt_info->attributes,
0076                  TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
0077                  TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN))
0078         return 0;
0079 
0080     ret = skb_cow_head(skb, 0);
0081     if (ret)
0082         return ret;
0083 
0084     pkt_info->header_len = skb_tcp_all_headers(skb);
0085     pkt_info->tcp_header_len = tcp_hdrlen(skb);
0086     pkt_info->tcp_payload_len = skb->len - pkt_info->header_len;
0087     pkt_info->mss = skb_shinfo(skb)->gso_size;
0088 
0089     XLGMAC_PR("header_len=%u\n", pkt_info->header_len);
0090     XLGMAC_PR("tcp_header_len=%u, tcp_payload_len=%u\n",
0091           pkt_info->tcp_header_len, pkt_info->tcp_payload_len);
0092     XLGMAC_PR("mss=%u\n", pkt_info->mss);
0093 
0094     /* Update the number of packets that will ultimately be transmitted
0095      * along with the extra bytes for each extra packet
0096      */
0097     pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
0098     pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
0099 
0100     return 0;
0101 }
0102 
0103 static int xlgmac_is_tso(struct sk_buff *skb)
0104 {
0105     if (skb->ip_summed != CHECKSUM_PARTIAL)
0106         return 0;
0107 
0108     if (!skb_is_gso(skb))
0109         return 0;
0110 
0111     return 1;
0112 }
0113 
0114 static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
0115                    struct xlgmac_ring *ring,
0116                    struct sk_buff *skb,
0117                    struct xlgmac_pkt_info *pkt_info)
0118 {
0119     skb_frag_t *frag;
0120     unsigned int context_desc;
0121     unsigned int len;
0122     unsigned int i;
0123 
0124     pkt_info->skb = skb;
0125 
0126     context_desc = 0;
0127     pkt_info->desc_count = 0;
0128 
0129     pkt_info->tx_packets = 1;
0130     pkt_info->tx_bytes = skb->len;
0131 
0132     if (xlgmac_is_tso(skb)) {
0133         /* TSO requires an extra descriptor if mss is different */
0134         if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
0135             context_desc = 1;
0136             pkt_info->desc_count++;
0137         }
0138 
0139         /* TSO requires an extra descriptor for TSO header */
0140         pkt_info->desc_count++;
0141 
0142         pkt_info->attributes = XLGMAC_SET_REG_BITS(
0143                     pkt_info->attributes,
0144                     TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
0145                     TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN,
0146                     1);
0147         pkt_info->attributes = XLGMAC_SET_REG_BITS(
0148                     pkt_info->attributes,
0149                     TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
0150                     TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
0151                     1);
0152     } else if (skb->ip_summed == CHECKSUM_PARTIAL)
0153         pkt_info->attributes = XLGMAC_SET_REG_BITS(
0154                     pkt_info->attributes,
0155                     TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS,
0156                     TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN,
0157                     1);
0158 
0159     if (skb_vlan_tag_present(skb)) {
0160         /* VLAN requires an extra descriptor if tag is different */
0161         if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
0162             /* We can share with the TSO context descriptor */
0163             if (!context_desc) {
0164                 context_desc = 1;
0165                 pkt_info->desc_count++;
0166             }
0167 
0168         pkt_info->attributes = XLGMAC_SET_REG_BITS(
0169                     pkt_info->attributes,
0170                     TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
0171                     TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN,
0172                     1);
0173     }
0174 
0175     for (len = skb_headlen(skb); len;) {
0176         pkt_info->desc_count++;
0177         len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
0178     }
0179 
0180     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
0181         frag = &skb_shinfo(skb)->frags[i];
0182         for (len = skb_frag_size(frag); len; ) {
0183             pkt_info->desc_count++;
0184             len -= min_t(unsigned int, len, XLGMAC_TX_MAX_BUF_SIZE);
0185         }
0186     }
0187 }
0188 
0189 static int xlgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
0190 {
0191     unsigned int rx_buf_size;
0192 
0193     if (mtu > XLGMAC_JUMBO_PACKET_MTU) {
0194         netdev_alert(netdev, "MTU exceeds maximum supported value\n");
0195         return -EINVAL;
0196     }
0197 
0198     rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
0199     rx_buf_size = clamp_val(rx_buf_size, XLGMAC_RX_MIN_BUF_SIZE, PAGE_SIZE);
0200 
0201     rx_buf_size = (rx_buf_size + XLGMAC_RX_BUF_ALIGN - 1) &
0202               ~(XLGMAC_RX_BUF_ALIGN - 1);
0203 
0204     return rx_buf_size;
0205 }
0206 
0207 static void xlgmac_enable_rx_tx_ints(struct xlgmac_pdata *pdata)
0208 {
0209     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0210     struct xlgmac_channel *channel;
0211     enum xlgmac_int int_id;
0212     unsigned int i;
0213 
0214     channel = pdata->channel_head;
0215     for (i = 0; i < pdata->channel_count; i++, channel++) {
0216         if (channel->tx_ring && channel->rx_ring)
0217             int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
0218         else if (channel->tx_ring)
0219             int_id = XLGMAC_INT_DMA_CH_SR_TI;
0220         else if (channel->rx_ring)
0221             int_id = XLGMAC_INT_DMA_CH_SR_RI;
0222         else
0223             continue;
0224 
0225         hw_ops->enable_int(channel, int_id);
0226     }
0227 }
0228 
0229 static void xlgmac_disable_rx_tx_ints(struct xlgmac_pdata *pdata)
0230 {
0231     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0232     struct xlgmac_channel *channel;
0233     enum xlgmac_int int_id;
0234     unsigned int i;
0235 
0236     channel = pdata->channel_head;
0237     for (i = 0; i < pdata->channel_count; i++, channel++) {
0238         if (channel->tx_ring && channel->rx_ring)
0239             int_id = XLGMAC_INT_DMA_CH_SR_TI_RI;
0240         else if (channel->tx_ring)
0241             int_id = XLGMAC_INT_DMA_CH_SR_TI;
0242         else if (channel->rx_ring)
0243             int_id = XLGMAC_INT_DMA_CH_SR_RI;
0244         else
0245             continue;
0246 
0247         hw_ops->disable_int(channel, int_id);
0248     }
0249 }
0250 
0251 static irqreturn_t xlgmac_isr(int irq, void *data)
0252 {
0253     unsigned int dma_isr, dma_ch_isr, mac_isr;
0254     struct xlgmac_pdata *pdata = data;
0255     struct xlgmac_channel *channel;
0256     struct xlgmac_hw_ops *hw_ops;
0257     unsigned int i, ti, ri;
0258 
0259     hw_ops = &pdata->hw_ops;
0260 
0261     /* The DMA interrupt status register also reports MAC and MTL
0262      * interrupts. So for polling mode, we just need to check for
0263      * this register to be non-zero
0264      */
0265     dma_isr = readl(pdata->mac_regs + DMA_ISR);
0266     if (!dma_isr)
0267         return IRQ_HANDLED;
0268 
0269     netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
0270 
0271     for (i = 0; i < pdata->channel_count; i++) {
0272         if (!(dma_isr & (1 << i)))
0273             continue;
0274 
0275         channel = pdata->channel_head + i;
0276 
0277         dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
0278         netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
0279               i, dma_ch_isr);
0280 
0281         /* The TI or RI interrupt bits may still be set even if using
0282          * per channel DMA interrupts. Check to be sure those are not
0283          * enabled before using the private data napi structure.
0284          */
0285         ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
0286                      DMA_CH_SR_TI_LEN);
0287         ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
0288                      DMA_CH_SR_RI_LEN);
0289         if (!pdata->per_channel_irq && (ti || ri)) {
0290             if (napi_schedule_prep(&pdata->napi)) {
0291                 /* Disable Tx and Rx interrupts */
0292                 xlgmac_disable_rx_tx_ints(pdata);
0293 
0294                 pdata->stats.napi_poll_isr++;
0295                 /* Turn on polling */
0296                 __napi_schedule_irqoff(&pdata->napi);
0297             }
0298         }
0299 
0300         if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS,
0301                     DMA_CH_SR_TPS_LEN))
0302             pdata->stats.tx_process_stopped++;
0303 
0304         if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS,
0305                     DMA_CH_SR_RPS_LEN))
0306             pdata->stats.rx_process_stopped++;
0307 
0308         if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS,
0309                     DMA_CH_SR_TBU_LEN))
0310             pdata->stats.tx_buffer_unavailable++;
0311 
0312         if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
0313                     DMA_CH_SR_RBU_LEN))
0314             pdata->stats.rx_buffer_unavailable++;
0315 
0316         /* Restart the device on a Fatal Bus Error */
0317         if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
0318                     DMA_CH_SR_FBE_LEN)) {
0319             pdata->stats.fatal_bus_error++;
0320             schedule_work(&pdata->restart_work);
0321         }
0322 
0323         /* Clear all interrupt signals */
0324         writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
0325     }
0326 
0327     if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
0328                 DMA_ISR_MACIS_LEN)) {
0329         mac_isr = readl(pdata->mac_regs + MAC_ISR);
0330 
0331         if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
0332                     MAC_ISR_MMCTXIS_LEN))
0333             hw_ops->tx_mmc_int(pdata);
0334 
0335         if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
0336                     MAC_ISR_MMCRXIS_LEN))
0337             hw_ops->rx_mmc_int(pdata);
0338     }
0339 
0340     return IRQ_HANDLED;
0341 }
0342 
0343 static irqreturn_t xlgmac_dma_isr(int irq, void *data)
0344 {
0345     struct xlgmac_channel *channel = data;
0346 
0347     /* Per channel DMA interrupts are enabled, so we use the per
0348      * channel napi structure and not the private data napi structure
0349      */
0350     if (napi_schedule_prep(&channel->napi)) {
0351         /* Disable Tx and Rx interrupts */
0352         disable_irq_nosync(channel->dma_irq);
0353 
0354         /* Turn on polling */
0355         __napi_schedule_irqoff(&channel->napi);
0356     }
0357 
0358     return IRQ_HANDLED;
0359 }
0360 
0361 static void xlgmac_tx_timer(struct timer_list *t)
0362 {
0363     struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
0364     struct xlgmac_pdata *pdata = channel->pdata;
0365     struct napi_struct *napi;
0366 
0367     napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
0368 
0369     if (napi_schedule_prep(napi)) {
0370         /* Disable Tx and Rx interrupts */
0371         if (pdata->per_channel_irq)
0372             disable_irq_nosync(channel->dma_irq);
0373         else
0374             xlgmac_disable_rx_tx_ints(pdata);
0375 
0376         pdata->stats.napi_poll_txtimer++;
0377         /* Turn on polling */
0378         __napi_schedule(napi);
0379     }
0380 
0381     channel->tx_timer_active = 0;
0382 }
0383 
0384 static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
0385 {
0386     struct xlgmac_channel *channel;
0387     unsigned int i;
0388 
0389     channel = pdata->channel_head;
0390     for (i = 0; i < pdata->channel_count; i++, channel++) {
0391         if (!channel->tx_ring)
0392             break;
0393 
0394         timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0);
0395     }
0396 }
0397 
0398 static void xlgmac_stop_timers(struct xlgmac_pdata *pdata)
0399 {
0400     struct xlgmac_channel *channel;
0401     unsigned int i;
0402 
0403     channel = pdata->channel_head;
0404     for (i = 0; i < pdata->channel_count; i++, channel++) {
0405         if (!channel->tx_ring)
0406             break;
0407 
0408         del_timer_sync(&channel->tx_timer);
0409     }
0410 }
0411 
0412 static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
0413 {
0414     struct xlgmac_channel *channel;
0415     unsigned int i;
0416 
0417     if (pdata->per_channel_irq) {
0418         channel = pdata->channel_head;
0419         for (i = 0; i < pdata->channel_count; i++, channel++) {
0420             if (add)
0421                 netif_napi_add(pdata->netdev, &channel->napi,
0422                            xlgmac_one_poll,
0423                            NAPI_POLL_WEIGHT);
0424 
0425             napi_enable(&channel->napi);
0426         }
0427     } else {
0428         if (add)
0429             netif_napi_add(pdata->netdev, &pdata->napi,
0430                        xlgmac_all_poll, NAPI_POLL_WEIGHT);
0431 
0432         napi_enable(&pdata->napi);
0433     }
0434 }
0435 
0436 static void xlgmac_napi_disable(struct xlgmac_pdata *pdata, unsigned int del)
0437 {
0438     struct xlgmac_channel *channel;
0439     unsigned int i;
0440 
0441     if (pdata->per_channel_irq) {
0442         channel = pdata->channel_head;
0443         for (i = 0; i < pdata->channel_count; i++, channel++) {
0444             napi_disable(&channel->napi);
0445 
0446             if (del)
0447                 netif_napi_del(&channel->napi);
0448         }
0449     } else {
0450         napi_disable(&pdata->napi);
0451 
0452         if (del)
0453             netif_napi_del(&pdata->napi);
0454     }
0455 }
0456 
0457 static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
0458 {
0459     struct net_device *netdev = pdata->netdev;
0460     struct xlgmac_channel *channel;
0461     unsigned int i;
0462     int ret;
0463 
0464     ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
0465                    IRQF_SHARED, netdev->name, pdata);
0466     if (ret) {
0467         netdev_alert(netdev, "error requesting irq %d\n",
0468                  pdata->dev_irq);
0469         return ret;
0470     }
0471 
0472     if (!pdata->per_channel_irq)
0473         return 0;
0474 
0475     channel = pdata->channel_head;
0476     for (i = 0; i < pdata->channel_count; i++, channel++) {
0477         snprintf(channel->dma_irq_name,
0478              sizeof(channel->dma_irq_name) - 1,
0479              "%s-TxRx-%u", netdev_name(netdev),
0480              channel->queue_index);
0481 
0482         ret = devm_request_irq(pdata->dev, channel->dma_irq,
0483                        xlgmac_dma_isr, 0,
0484                        channel->dma_irq_name, channel);
0485         if (ret) {
0486             netdev_alert(netdev, "error requesting irq %d\n",
0487                      channel->dma_irq);
0488             goto err_irq;
0489         }
0490     }
0491 
0492     return 0;
0493 
0494 err_irq:
0495     /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
0496     for (i--, channel--; i < pdata->channel_count; i--, channel--)
0497         devm_free_irq(pdata->dev, channel->dma_irq, channel);
0498 
0499     devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
0500 
0501     return ret;
0502 }
0503 
0504 static void xlgmac_free_irqs(struct xlgmac_pdata *pdata)
0505 {
0506     struct xlgmac_channel *channel;
0507     unsigned int i;
0508 
0509     devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
0510 
0511     if (!pdata->per_channel_irq)
0512         return;
0513 
0514     channel = pdata->channel_head;
0515     for (i = 0; i < pdata->channel_count; i++, channel++)
0516         devm_free_irq(pdata->dev, channel->dma_irq, channel);
0517 }
0518 
0519 static void xlgmac_free_tx_data(struct xlgmac_pdata *pdata)
0520 {
0521     struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
0522     struct xlgmac_desc_data *desc_data;
0523     struct xlgmac_channel *channel;
0524     struct xlgmac_ring *ring;
0525     unsigned int i, j;
0526 
0527     channel = pdata->channel_head;
0528     for (i = 0; i < pdata->channel_count; i++, channel++) {
0529         ring = channel->tx_ring;
0530         if (!ring)
0531             break;
0532 
0533         for (j = 0; j < ring->dma_desc_count; j++) {
0534             desc_data = XLGMAC_GET_DESC_DATA(ring, j);
0535             desc_ops->unmap_desc_data(pdata, desc_data);
0536         }
0537     }
0538 }
0539 
0540 static void xlgmac_free_rx_data(struct xlgmac_pdata *pdata)
0541 {
0542     struct xlgmac_desc_ops *desc_ops = &pdata->desc_ops;
0543     struct xlgmac_desc_data *desc_data;
0544     struct xlgmac_channel *channel;
0545     struct xlgmac_ring *ring;
0546     unsigned int i, j;
0547 
0548     channel = pdata->channel_head;
0549     for (i = 0; i < pdata->channel_count; i++, channel++) {
0550         ring = channel->rx_ring;
0551         if (!ring)
0552             break;
0553 
0554         for (j = 0; j < ring->dma_desc_count; j++) {
0555             desc_data = XLGMAC_GET_DESC_DATA(ring, j);
0556             desc_ops->unmap_desc_data(pdata, desc_data);
0557         }
0558     }
0559 }
0560 
0561 static int xlgmac_start(struct xlgmac_pdata *pdata)
0562 {
0563     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0564     struct net_device *netdev = pdata->netdev;
0565     int ret;
0566 
0567     hw_ops->init(pdata);
0568     xlgmac_napi_enable(pdata, 1);
0569 
0570     ret = xlgmac_request_irqs(pdata);
0571     if (ret)
0572         goto err_napi;
0573 
0574     hw_ops->enable_tx(pdata);
0575     hw_ops->enable_rx(pdata);
0576     netif_tx_start_all_queues(netdev);
0577 
0578     return 0;
0579 
0580 err_napi:
0581     xlgmac_napi_disable(pdata, 1);
0582     hw_ops->exit(pdata);
0583 
0584     return ret;
0585 }
0586 
0587 static void xlgmac_stop(struct xlgmac_pdata *pdata)
0588 {
0589     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0590     struct net_device *netdev = pdata->netdev;
0591     struct xlgmac_channel *channel;
0592     struct netdev_queue *txq;
0593     unsigned int i;
0594 
0595     netif_tx_stop_all_queues(netdev);
0596     xlgmac_stop_timers(pdata);
0597     hw_ops->disable_tx(pdata);
0598     hw_ops->disable_rx(pdata);
0599     xlgmac_free_irqs(pdata);
0600     xlgmac_napi_disable(pdata, 1);
0601     hw_ops->exit(pdata);
0602 
0603     channel = pdata->channel_head;
0604     for (i = 0; i < pdata->channel_count; i++, channel++) {
0605         if (!channel->tx_ring)
0606             continue;
0607 
0608         txq = netdev_get_tx_queue(netdev, channel->queue_index);
0609         netdev_tx_reset_queue(txq);
0610     }
0611 }
0612 
0613 static void xlgmac_restart_dev(struct xlgmac_pdata *pdata)
0614 {
0615     /* If not running, "restart" will happen on open */
0616     if (!netif_running(pdata->netdev))
0617         return;
0618 
0619     xlgmac_stop(pdata);
0620 
0621     xlgmac_free_tx_data(pdata);
0622     xlgmac_free_rx_data(pdata);
0623 
0624     xlgmac_start(pdata);
0625 }
0626 
0627 static void xlgmac_restart(struct work_struct *work)
0628 {
0629     struct xlgmac_pdata *pdata = container_of(work,
0630                            struct xlgmac_pdata,
0631                            restart_work);
0632 
0633     rtnl_lock();
0634 
0635     xlgmac_restart_dev(pdata);
0636 
0637     rtnl_unlock();
0638 }
0639 
0640 static int xlgmac_open(struct net_device *netdev)
0641 {
0642     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0643     struct xlgmac_desc_ops *desc_ops;
0644     int ret;
0645 
0646     desc_ops = &pdata->desc_ops;
0647 
0648     /* TODO: Initialize the phy */
0649 
0650     /* Calculate the Rx buffer size before allocating rings */
0651     ret = xlgmac_calc_rx_buf_size(netdev, netdev->mtu);
0652     if (ret < 0)
0653         return ret;
0654     pdata->rx_buf_size = ret;
0655 
0656     /* Allocate the channels and rings */
0657     ret = desc_ops->alloc_channels_and_rings(pdata);
0658     if (ret)
0659         return ret;
0660 
0661     INIT_WORK(&pdata->restart_work, xlgmac_restart);
0662     xlgmac_init_timers(pdata);
0663 
0664     ret = xlgmac_start(pdata);
0665     if (ret)
0666         goto err_channels_and_rings;
0667 
0668     return 0;
0669 
0670 err_channels_and_rings:
0671     desc_ops->free_channels_and_rings(pdata);
0672 
0673     return ret;
0674 }
0675 
0676 static int xlgmac_close(struct net_device *netdev)
0677 {
0678     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0679     struct xlgmac_desc_ops *desc_ops;
0680 
0681     desc_ops = &pdata->desc_ops;
0682 
0683     /* Stop the device */
0684     xlgmac_stop(pdata);
0685 
0686     /* Free the channels and rings */
0687     desc_ops->free_channels_and_rings(pdata);
0688 
0689     return 0;
0690 }
0691 
0692 static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
0693 {
0694     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0695 
0696     netdev_warn(netdev, "tx timeout, device restarting\n");
0697     schedule_work(&pdata->restart_work);
0698 }
0699 
0700 static netdev_tx_t xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
0701 {
0702     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0703     struct xlgmac_pkt_info *tx_pkt_info;
0704     struct xlgmac_desc_ops *desc_ops;
0705     struct xlgmac_channel *channel;
0706     struct xlgmac_hw_ops *hw_ops;
0707     struct netdev_queue *txq;
0708     struct xlgmac_ring *ring;
0709     int ret;
0710 
0711     desc_ops = &pdata->desc_ops;
0712     hw_ops = &pdata->hw_ops;
0713 
0714     XLGMAC_PR("skb->len = %d\n", skb->len);
0715 
0716     channel = pdata->channel_head + skb->queue_mapping;
0717     txq = netdev_get_tx_queue(netdev, channel->queue_index);
0718     ring = channel->tx_ring;
0719     tx_pkt_info = &ring->pkt_info;
0720 
0721     if (skb->len == 0) {
0722         netif_err(pdata, tx_err, netdev,
0723               "empty skb received from stack\n");
0724         dev_kfree_skb_any(skb);
0725         return NETDEV_TX_OK;
0726     }
0727 
0728     /* Prepare preliminary packet info for TX */
0729     memset(tx_pkt_info, 0, sizeof(*tx_pkt_info));
0730     xlgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info);
0731 
0732     /* Check that there are enough descriptors available */
0733     ret = xlgmac_maybe_stop_tx_queue(channel, ring,
0734                      tx_pkt_info->desc_count);
0735     if (ret)
0736         return ret;
0737 
0738     ret = xlgmac_prep_tso(skb, tx_pkt_info);
0739     if (ret) {
0740         netif_err(pdata, tx_err, netdev,
0741               "error processing TSO packet\n");
0742         dev_kfree_skb_any(skb);
0743         return ret;
0744     }
0745     xlgmac_prep_vlan(skb, tx_pkt_info);
0746 
0747     if (!desc_ops->map_tx_skb(channel, skb)) {
0748         dev_kfree_skb_any(skb);
0749         return NETDEV_TX_OK;
0750     }
0751 
0752     /* Report on the actual number of bytes (to be) sent */
0753     netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes);
0754 
0755     /* Configure required descriptor fields for transmission */
0756     hw_ops->dev_xmit(channel);
0757 
0758     if (netif_msg_pktdata(pdata))
0759         xlgmac_print_pkt(netdev, skb, true);
0760 
0761     /* Stop the queue in advance if there may not be enough descriptors */
0762     xlgmac_maybe_stop_tx_queue(channel, ring, XLGMAC_TX_MAX_DESC_NR);
0763 
0764     return NETDEV_TX_OK;
0765 }
0766 
0767 static void xlgmac_get_stats64(struct net_device *netdev,
0768                    struct rtnl_link_stats64 *s)
0769 {
0770     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0771     struct xlgmac_stats *pstats = &pdata->stats;
0772 
0773     pdata->hw_ops.read_mmc_stats(pdata);
0774 
0775     s->rx_packets = pstats->rxframecount_gb;
0776     s->rx_bytes = pstats->rxoctetcount_gb;
0777     s->rx_errors = pstats->rxframecount_gb -
0778                pstats->rxbroadcastframes_g -
0779                pstats->rxmulticastframes_g -
0780                pstats->rxunicastframes_g;
0781     s->multicast = pstats->rxmulticastframes_g;
0782     s->rx_length_errors = pstats->rxlengtherror;
0783     s->rx_crc_errors = pstats->rxcrcerror;
0784     s->rx_fifo_errors = pstats->rxfifooverflow;
0785 
0786     s->tx_packets = pstats->txframecount_gb;
0787     s->tx_bytes = pstats->txoctetcount_gb;
0788     s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
0789     s->tx_dropped = netdev->stats.tx_dropped;
0790 }
0791 
0792 static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
0793 {
0794     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0795     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0796     struct sockaddr *saddr = addr;
0797 
0798     if (!is_valid_ether_addr(saddr->sa_data))
0799         return -EADDRNOTAVAIL;
0800 
0801     eth_hw_addr_set(netdev, saddr->sa_data);
0802 
0803     hw_ops->set_mac_address(pdata, netdev->dev_addr);
0804 
0805     return 0;
0806 }
0807 
0808 static int xlgmac_ioctl(struct net_device *netdev,
0809             struct ifreq *ifreq, int cmd)
0810 {
0811     if (!netif_running(netdev))
0812         return -ENODEV;
0813 
0814     return 0;
0815 }
0816 
0817 static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
0818 {
0819     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0820     int ret;
0821 
0822     ret = xlgmac_calc_rx_buf_size(netdev, mtu);
0823     if (ret < 0)
0824         return ret;
0825 
0826     pdata->rx_buf_size = ret;
0827     netdev->mtu = mtu;
0828 
0829     xlgmac_restart_dev(pdata);
0830 
0831     return 0;
0832 }
0833 
0834 static int xlgmac_vlan_rx_add_vid(struct net_device *netdev,
0835                   __be16 proto,
0836                   u16 vid)
0837 {
0838     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0839     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0840 
0841     set_bit(vid, pdata->active_vlans);
0842     hw_ops->update_vlan_hash_table(pdata);
0843 
0844     return 0;
0845 }
0846 
0847 static int xlgmac_vlan_rx_kill_vid(struct net_device *netdev,
0848                    __be16 proto,
0849                    u16 vid)
0850 {
0851     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0852     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0853 
0854     clear_bit(vid, pdata->active_vlans);
0855     hw_ops->update_vlan_hash_table(pdata);
0856 
0857     return 0;
0858 }
0859 
0860 #ifdef CONFIG_NET_POLL_CONTROLLER
0861 static void xlgmac_poll_controller(struct net_device *netdev)
0862 {
0863     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0864     struct xlgmac_channel *channel;
0865     unsigned int i;
0866 
0867     if (pdata->per_channel_irq) {
0868         channel = pdata->channel_head;
0869         for (i = 0; i < pdata->channel_count; i++, channel++)
0870             xlgmac_dma_isr(channel->dma_irq, channel);
0871     } else {
0872         disable_irq(pdata->dev_irq);
0873         xlgmac_isr(pdata->dev_irq, pdata);
0874         enable_irq(pdata->dev_irq);
0875     }
0876 }
0877 #endif /* CONFIG_NET_POLL_CONTROLLER */
0878 
0879 static int xlgmac_set_features(struct net_device *netdev,
0880                    netdev_features_t features)
0881 {
0882     netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
0883     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0884     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0885     int ret = 0;
0886 
0887     rxhash = pdata->netdev_features & NETIF_F_RXHASH;
0888     rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
0889     rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
0890     rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
0891 
0892     if ((features & NETIF_F_RXHASH) && !rxhash)
0893         ret = hw_ops->enable_rss(pdata);
0894     else if (!(features & NETIF_F_RXHASH) && rxhash)
0895         ret = hw_ops->disable_rss(pdata);
0896     if (ret)
0897         return ret;
0898 
0899     if ((features & NETIF_F_RXCSUM) && !rxcsum)
0900         hw_ops->enable_rx_csum(pdata);
0901     else if (!(features & NETIF_F_RXCSUM) && rxcsum)
0902         hw_ops->disable_rx_csum(pdata);
0903 
0904     if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
0905         hw_ops->enable_rx_vlan_stripping(pdata);
0906     else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
0907         hw_ops->disable_rx_vlan_stripping(pdata);
0908 
0909     if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
0910         hw_ops->enable_rx_vlan_filtering(pdata);
0911     else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
0912         hw_ops->disable_rx_vlan_filtering(pdata);
0913 
0914     pdata->netdev_features = features;
0915 
0916     return 0;
0917 }
0918 
0919 static void xlgmac_set_rx_mode(struct net_device *netdev)
0920 {
0921     struct xlgmac_pdata *pdata = netdev_priv(netdev);
0922     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0923 
0924     hw_ops->config_rx_mode(pdata);
0925 }
0926 
0927 static const struct net_device_ops xlgmac_netdev_ops = {
0928     .ndo_open       = xlgmac_open,
0929     .ndo_stop       = xlgmac_close,
0930     .ndo_start_xmit     = xlgmac_xmit,
0931     .ndo_tx_timeout     = xlgmac_tx_timeout,
0932     .ndo_get_stats64    = xlgmac_get_stats64,
0933     .ndo_change_mtu     = xlgmac_change_mtu,
0934     .ndo_set_mac_address    = xlgmac_set_mac_address,
0935     .ndo_validate_addr  = eth_validate_addr,
0936     .ndo_eth_ioctl      = xlgmac_ioctl,
0937     .ndo_vlan_rx_add_vid    = xlgmac_vlan_rx_add_vid,
0938     .ndo_vlan_rx_kill_vid   = xlgmac_vlan_rx_kill_vid,
0939 #ifdef CONFIG_NET_POLL_CONTROLLER
0940     .ndo_poll_controller    = xlgmac_poll_controller,
0941 #endif
0942     .ndo_set_features   = xlgmac_set_features,
0943     .ndo_set_rx_mode    = xlgmac_set_rx_mode,
0944 };
0945 
0946 const struct net_device_ops *xlgmac_get_netdev_ops(void)
0947 {
0948     return &xlgmac_netdev_ops;
0949 }
0950 
0951 static void xlgmac_rx_refresh(struct xlgmac_channel *channel)
0952 {
0953     struct xlgmac_pdata *pdata = channel->pdata;
0954     struct xlgmac_ring *ring = channel->rx_ring;
0955     struct xlgmac_desc_data *desc_data;
0956     struct xlgmac_desc_ops *desc_ops;
0957     struct xlgmac_hw_ops *hw_ops;
0958 
0959     desc_ops = &pdata->desc_ops;
0960     hw_ops = &pdata->hw_ops;
0961 
0962     while (ring->dirty != ring->cur) {
0963         desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
0964 
0965         /* Reset desc_data values */
0966         desc_ops->unmap_desc_data(pdata, desc_data);
0967 
0968         if (desc_ops->map_rx_buffer(pdata, ring, desc_data))
0969             break;
0970 
0971         hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty);
0972 
0973         ring->dirty++;
0974     }
0975 
0976     /* Make sure everything is written before the register write */
0977     wmb();
0978 
0979     /* Update the Rx Tail Pointer Register with address of
0980      * the last cleaned entry
0981      */
0982     desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty - 1);
0983     writel(lower_32_bits(desc_data->dma_desc_addr),
0984            XLGMAC_DMA_REG(channel, DMA_CH_RDTR_LO));
0985 }
0986 
0987 static struct sk_buff *xlgmac_create_skb(struct xlgmac_pdata *pdata,
0988                      struct napi_struct *napi,
0989                      struct xlgmac_desc_data *desc_data,
0990                      unsigned int len)
0991 {
0992     unsigned int copy_len;
0993     struct sk_buff *skb;
0994     u8 *packet;
0995 
0996     skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len);
0997     if (!skb)
0998         return NULL;
0999 
1000     /* Start with the header buffer which may contain just the header
1001      * or the header plus data
1002      */
1003     dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
1004                       desc_data->rx.hdr.dma_off,
1005                       desc_data->rx.hdr.dma_len,
1006                       DMA_FROM_DEVICE);
1007 
1008     packet = page_address(desc_data->rx.hdr.pa.pages) +
1009          desc_data->rx.hdr.pa.pages_offset;
1010     copy_len = (desc_data->rx.hdr_len) ? desc_data->rx.hdr_len : len;
1011     copy_len = min(desc_data->rx.hdr.dma_len, copy_len);
1012     skb_copy_to_linear_data(skb, packet, copy_len);
1013     skb_put(skb, copy_len);
1014 
1015     len -= copy_len;
1016     if (len) {
1017         /* Add the remaining data as a frag */
1018         dma_sync_single_range_for_cpu(pdata->dev,
1019                           desc_data->rx.buf.dma_base,
1020                           desc_data->rx.buf.dma_off,
1021                           desc_data->rx.buf.dma_len,
1022                           DMA_FROM_DEVICE);
1023 
1024         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1025                 desc_data->rx.buf.pa.pages,
1026                 desc_data->rx.buf.pa.pages_offset,
1027                 len, desc_data->rx.buf.dma_len);
1028         desc_data->rx.buf.pa.pages = NULL;
1029     }
1030 
1031     return skb;
1032 }
1033 
1034 static int xlgmac_tx_poll(struct xlgmac_channel *channel)
1035 {
1036     struct xlgmac_pdata *pdata = channel->pdata;
1037     struct xlgmac_ring *ring = channel->tx_ring;
1038     struct net_device *netdev = pdata->netdev;
1039     unsigned int tx_packets = 0, tx_bytes = 0;
1040     struct xlgmac_desc_data *desc_data;
1041     struct xlgmac_dma_desc *dma_desc;
1042     struct xlgmac_desc_ops *desc_ops;
1043     struct xlgmac_hw_ops *hw_ops;
1044     struct netdev_queue *txq;
1045     int processed = 0;
1046     unsigned int cur;
1047 
1048     desc_ops = &pdata->desc_ops;
1049     hw_ops = &pdata->hw_ops;
1050 
1051     /* Nothing to do if there isn't a Tx ring for this channel */
1052     if (!ring)
1053         return 0;
1054 
1055     cur = ring->cur;
1056 
1057     /* Be sure we get ring->cur before accessing descriptor data */
1058     smp_rmb();
1059 
1060     txq = netdev_get_tx_queue(netdev, channel->queue_index);
1061 
1062     while ((processed < XLGMAC_TX_DESC_MAX_PROC) &&
1063            (ring->dirty != cur)) {
1064         desc_data = XLGMAC_GET_DESC_DATA(ring, ring->dirty);
1065         dma_desc = desc_data->dma_desc;
1066 
1067         if (!hw_ops->tx_complete(dma_desc))
1068             break;
1069 
1070         /* Make sure descriptor fields are read after reading
1071          * the OWN bit
1072          */
1073         dma_rmb();
1074 
1075         if (netif_msg_tx_done(pdata))
1076             xlgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
1077 
1078         if (hw_ops->is_last_desc(dma_desc)) {
1079             tx_packets += desc_data->tx.packets;
1080             tx_bytes += desc_data->tx.bytes;
1081         }
1082 
1083         /* Free the SKB and reset the descriptor for re-use */
1084         desc_ops->unmap_desc_data(pdata, desc_data);
1085         hw_ops->tx_desc_reset(desc_data);
1086 
1087         processed++;
1088         ring->dirty++;
1089     }
1090 
1091     if (!processed)
1092         return 0;
1093 
1094     netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
1095 
1096     if ((ring->tx.queue_stopped == 1) &&
1097         (xlgmac_tx_avail_desc(ring) > XLGMAC_TX_DESC_MIN_FREE)) {
1098         ring->tx.queue_stopped = 0;
1099         netif_tx_wake_queue(txq);
1100     }
1101 
1102     XLGMAC_PR("processed=%d\n", processed);
1103 
1104     return processed;
1105 }
1106 
1107 static int xlgmac_rx_poll(struct xlgmac_channel *channel, int budget)
1108 {
1109     struct xlgmac_pdata *pdata = channel->pdata;
1110     struct xlgmac_ring *ring = channel->rx_ring;
1111     struct net_device *netdev = pdata->netdev;
1112     unsigned int len, dma_desc_len, max_len;
1113     unsigned int context_next, context;
1114     struct xlgmac_desc_data *desc_data;
1115     struct xlgmac_pkt_info *pkt_info;
1116     unsigned int incomplete, error;
1117     struct xlgmac_hw_ops *hw_ops;
1118     unsigned int received = 0;
1119     struct napi_struct *napi;
1120     struct sk_buff *skb;
1121     int packet_count = 0;
1122 
1123     hw_ops = &pdata->hw_ops;
1124 
1125     /* Nothing to do if there isn't a Rx ring for this channel */
1126     if (!ring)
1127         return 0;
1128 
1129     incomplete = 0;
1130     context_next = 0;
1131 
1132     napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
1133 
1134     desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1135     pkt_info = &ring->pkt_info;
1136     while (packet_count < budget) {
1137         /* First time in loop see if we need to restore state */
1138         if (!received && desc_data->state_saved) {
1139             skb = desc_data->state.skb;
1140             error = desc_data->state.error;
1141             len = desc_data->state.len;
1142         } else {
1143             memset(pkt_info, 0, sizeof(*pkt_info));
1144             skb = NULL;
1145             error = 0;
1146             len = 0;
1147         }
1148 
1149 read_again:
1150         desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1151 
1152         if (xlgmac_rx_dirty_desc(ring) > XLGMAC_RX_DESC_MAX_DIRTY)
1153             xlgmac_rx_refresh(channel);
1154 
1155         if (hw_ops->dev_read(channel))
1156             break;
1157 
1158         received++;
1159         ring->cur++;
1160 
1161         incomplete = XLGMAC_GET_REG_BITS(
1162                     pkt_info->attributes,
1163                     RX_PACKET_ATTRIBUTES_INCOMPLETE_POS,
1164                     RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN);
1165         context_next = XLGMAC_GET_REG_BITS(
1166                     pkt_info->attributes,
1167                     RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS,
1168                     RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN);
1169         context = XLGMAC_GET_REG_BITS(
1170                     pkt_info->attributes,
1171                     RX_PACKET_ATTRIBUTES_CONTEXT_POS,
1172                     RX_PACKET_ATTRIBUTES_CONTEXT_LEN);
1173 
1174         /* Earlier error, just drain the remaining data */
1175         if ((incomplete || context_next) && error)
1176             goto read_again;
1177 
1178         if (error || pkt_info->errors) {
1179             if (pkt_info->errors)
1180                 netif_err(pdata, rx_err, netdev,
1181                       "error in received packet\n");
1182             dev_kfree_skb(skb);
1183             goto next_packet;
1184         }
1185 
1186         if (!context) {
1187             /* Length is cumulative, get this descriptor's length */
1188             dma_desc_len = desc_data->rx.len - len;
1189             len += dma_desc_len;
1190 
1191             if (dma_desc_len && !skb) {
1192                 skb = xlgmac_create_skb(pdata, napi, desc_data,
1193                             dma_desc_len);
1194                 if (!skb)
1195                     error = 1;
1196             } else if (dma_desc_len) {
1197                 dma_sync_single_range_for_cpu(
1198                         pdata->dev,
1199                         desc_data->rx.buf.dma_base,
1200                         desc_data->rx.buf.dma_off,
1201                         desc_data->rx.buf.dma_len,
1202                         DMA_FROM_DEVICE);
1203 
1204                 skb_add_rx_frag(
1205                     skb, skb_shinfo(skb)->nr_frags,
1206                     desc_data->rx.buf.pa.pages,
1207                     desc_data->rx.buf.pa.pages_offset,
1208                     dma_desc_len,
1209                     desc_data->rx.buf.dma_len);
1210                 desc_data->rx.buf.pa.pages = NULL;
1211             }
1212         }
1213 
1214         if (incomplete || context_next)
1215             goto read_again;
1216 
1217         if (!skb)
1218             goto next_packet;
1219 
1220         /* Be sure we don't exceed the configured MTU */
1221         max_len = netdev->mtu + ETH_HLEN;
1222         if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1223             (skb->protocol == htons(ETH_P_8021Q)))
1224             max_len += VLAN_HLEN;
1225 
1226         if (skb->len > max_len) {
1227             netif_err(pdata, rx_err, netdev,
1228                   "packet length exceeds configured MTU\n");
1229             dev_kfree_skb(skb);
1230             goto next_packet;
1231         }
1232 
1233         if (netif_msg_pktdata(pdata))
1234             xlgmac_print_pkt(netdev, skb, false);
1235 
1236         skb_checksum_none_assert(skb);
1237         if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1238                     RX_PACKET_ATTRIBUTES_CSUM_DONE_POS,
1239                     RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN))
1240             skb->ip_summed = CHECKSUM_UNNECESSARY;
1241 
1242         if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1243                     RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
1244                     RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) {
1245             __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1246                            pkt_info->vlan_ctag);
1247             pdata->stats.rx_vlan_packets++;
1248         }
1249 
1250         if (XLGMAC_GET_REG_BITS(pkt_info->attributes,
1251                     RX_PACKET_ATTRIBUTES_RSS_HASH_POS,
1252                     RX_PACKET_ATTRIBUTES_RSS_HASH_LEN))
1253             skb_set_hash(skb, pkt_info->rss_hash,
1254                      pkt_info->rss_hash_type);
1255 
1256         skb->dev = netdev;
1257         skb->protocol = eth_type_trans(skb, netdev);
1258         skb_record_rx_queue(skb, channel->queue_index);
1259 
1260         napi_gro_receive(napi, skb);
1261 
1262 next_packet:
1263         packet_count++;
1264     }
1265 
1266     /* Check if we need to save state before leaving */
1267     if (received && (incomplete || context_next)) {
1268         desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
1269         desc_data->state_saved = 1;
1270         desc_data->state.skb = skb;
1271         desc_data->state.len = len;
1272         desc_data->state.error = error;
1273     }
1274 
1275     XLGMAC_PR("packet_count = %d\n", packet_count);
1276 
1277     return packet_count;
1278 }
1279 
1280 static int xlgmac_one_poll(struct napi_struct *napi, int budget)
1281 {
1282     struct xlgmac_channel *channel = container_of(napi,
1283                         struct xlgmac_channel,
1284                         napi);
1285     int processed = 0;
1286 
1287     XLGMAC_PR("budget=%d\n", budget);
1288 
1289     /* Cleanup Tx ring first */
1290     xlgmac_tx_poll(channel);
1291 
1292     /* Process Rx ring next */
1293     processed = xlgmac_rx_poll(channel, budget);
1294 
1295     /* If we processed everything, we are done */
1296     if (processed < budget) {
1297         /* Turn off polling */
1298         napi_complete_done(napi, processed);
1299 
1300         /* Enable Tx and Rx interrupts */
1301         enable_irq(channel->dma_irq);
1302     }
1303 
1304     XLGMAC_PR("received = %d\n", processed);
1305 
1306     return processed;
1307 }
1308 
1309 static int xlgmac_all_poll(struct napi_struct *napi, int budget)
1310 {
1311     struct xlgmac_pdata *pdata = container_of(napi,
1312                            struct xlgmac_pdata,
1313                            napi);
1314     struct xlgmac_channel *channel;
1315     int processed, last_processed;
1316     int ring_budget;
1317     unsigned int i;
1318 
1319     XLGMAC_PR("budget=%d\n", budget);
1320 
1321     processed = 0;
1322     ring_budget = budget / pdata->rx_ring_count;
1323     do {
1324         last_processed = processed;
1325 
1326         channel = pdata->channel_head;
1327         for (i = 0; i < pdata->channel_count; i++, channel++) {
1328             /* Cleanup Tx ring first */
1329             xlgmac_tx_poll(channel);
1330 
1331             /* Process Rx ring next */
1332             if (ring_budget > (budget - processed))
1333                 ring_budget = budget - processed;
1334             processed += xlgmac_rx_poll(channel, ring_budget);
1335         }
1336     } while ((processed < budget) && (processed != last_processed));
1337 
1338     /* If we processed everything, we are done */
1339     if (processed < budget) {
1340         /* Turn off polling */
1341         napi_complete_done(napi, processed);
1342 
1343         /* Enable Tx and Rx interrupts */
1344         xlgmac_enable_rx_tx_ints(pdata);
1345     }
1346 
1347     XLGMAC_PR("received = %d\n", processed);
1348 
1349     return processed;
1350 }