Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* Applied Micro X-Gene SoC Ethernet Driver
0003  *
0004  * Copyright (c) 2014, Applied Micro Circuits Corporation
0005  * Authors: Iyappan Subramanian <isubramanian@apm.com>
0006  *      Ravi Patel <rapatel@apm.com>
0007  *      Keyur Chudgar <kchudgar@apm.com>
0008  */
0009 
0010 #include <linux/gpio.h>
0011 #include "xgene_enet_main.h"
0012 #include "xgene_enet_hw.h"
0013 #include "xgene_enet_sgmac.h"
0014 #include "xgene_enet_xgmac.h"
0015 
0016 #define RES_ENET_CSR    0
0017 #define RES_RING_CSR    1
0018 #define RES_RING_CMD    2
0019 
0020 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
0021 {
0022     struct xgene_enet_raw_desc16 *raw_desc;
0023     int i;
0024 
0025     if (!buf_pool)
0026         return;
0027 
0028     for (i = 0; i < buf_pool->slots; i++) {
0029         raw_desc = &buf_pool->raw_desc16[i];
0030 
0031         /* Hardware expects descriptor in little endian format */
0032         raw_desc->m0 = cpu_to_le64(i |
0033                 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
0034                 SET_VAL(STASH, 3));
0035     }
0036 }
0037 
0038 static u16 xgene_enet_get_data_len(u64 bufdatalen)
0039 {
0040     u16 hw_len, mask;
0041 
0042     hw_len = GET_VAL(BUFDATALEN, bufdatalen);
0043 
0044     if (unlikely(hw_len == 0x7800)) {
0045         return 0;
0046     } else if (!(hw_len & BIT(14))) {
0047         mask = GENMASK(13, 0);
0048         return (hw_len & mask) ? (hw_len & mask) : SIZE_16K;
0049     } else if (!(hw_len & GENMASK(13, 12))) {
0050         mask = GENMASK(11, 0);
0051         return (hw_len & mask) ? (hw_len & mask) : SIZE_4K;
0052     } else {
0053         mask = GENMASK(11, 0);
0054         return (hw_len & mask) ? (hw_len & mask) : SIZE_2K;
0055     }
0056 }
0057 
0058 static u16 xgene_enet_set_data_len(u32 size)
0059 {
0060     u16 hw_len;
0061 
0062     hw_len =  (size == SIZE_4K) ? BIT(14) : 0;
0063 
0064     return hw_len;
0065 }
0066 
0067 static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool,
0068                       u32 nbuf)
0069 {
0070     struct xgene_enet_raw_desc16 *raw_desc;
0071     struct xgene_enet_pdata *pdata;
0072     struct net_device *ndev;
0073     dma_addr_t dma_addr;
0074     struct device *dev;
0075     struct page *page;
0076     u32 slots, tail;
0077     u16 hw_len;
0078     int i;
0079 
0080     if (unlikely(!buf_pool))
0081         return 0;
0082 
0083     ndev = buf_pool->ndev;
0084     pdata = netdev_priv(ndev);
0085     dev = ndev_to_dev(ndev);
0086     slots = buf_pool->slots - 1;
0087     tail = buf_pool->tail;
0088 
0089     for (i = 0; i < nbuf; i++) {
0090         raw_desc = &buf_pool->raw_desc16[tail];
0091 
0092         page = dev_alloc_page();
0093         if (unlikely(!page))
0094             return -ENOMEM;
0095 
0096         dma_addr = dma_map_page(dev, page, 0,
0097                     PAGE_SIZE, DMA_FROM_DEVICE);
0098         if (unlikely(dma_mapping_error(dev, dma_addr))) {
0099             put_page(page);
0100             return -ENOMEM;
0101         }
0102 
0103         hw_len = xgene_enet_set_data_len(PAGE_SIZE);
0104         raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
0105                        SET_VAL(BUFDATALEN, hw_len) |
0106                        SET_BIT(COHERENT));
0107 
0108         buf_pool->frag_page[tail] = page;
0109         tail = (tail + 1) & slots;
0110     }
0111 
0112     pdata->ring_ops->wr_cmd(buf_pool, nbuf);
0113     buf_pool->tail = tail;
0114 
0115     return 0;
0116 }
0117 
0118 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
0119                      u32 nbuf)
0120 {
0121     struct sk_buff *skb;
0122     struct xgene_enet_raw_desc16 *raw_desc;
0123     struct xgene_enet_pdata *pdata;
0124     struct net_device *ndev;
0125     struct device *dev;
0126     dma_addr_t dma_addr;
0127     u32 tail = buf_pool->tail;
0128     u32 slots = buf_pool->slots - 1;
0129     u16 bufdatalen, len;
0130     int i;
0131 
0132     ndev = buf_pool->ndev;
0133     dev = ndev_to_dev(buf_pool->ndev);
0134     pdata = netdev_priv(ndev);
0135 
0136     bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
0137     len = XGENE_ENET_STD_MTU;
0138 
0139     for (i = 0; i < nbuf; i++) {
0140         raw_desc = &buf_pool->raw_desc16[tail];
0141 
0142         skb = netdev_alloc_skb_ip_align(ndev, len);
0143         if (unlikely(!skb))
0144             return -ENOMEM;
0145 
0146         dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
0147         if (dma_mapping_error(dev, dma_addr)) {
0148             netdev_err(ndev, "DMA mapping error\n");
0149             dev_kfree_skb_any(skb);
0150             return -EINVAL;
0151         }
0152 
0153         buf_pool->rx_skb[tail] = skb;
0154 
0155         raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
0156                        SET_VAL(BUFDATALEN, bufdatalen) |
0157                        SET_BIT(COHERENT));
0158         tail = (tail + 1) & slots;
0159     }
0160 
0161     pdata->ring_ops->wr_cmd(buf_pool, nbuf);
0162     buf_pool->tail = tail;
0163 
0164     return 0;
0165 }
0166 
0167 static u8 xgene_enet_hdr_len(const void *data)
0168 {
0169     const struct ethhdr *eth = data;
0170 
0171     return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
0172 }
0173 
0174 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
0175 {
0176     struct device *dev = ndev_to_dev(buf_pool->ndev);
0177     struct xgene_enet_raw_desc16 *raw_desc;
0178     dma_addr_t dma_addr;
0179     int i;
0180 
0181     /* Free up the buffers held by hardware */
0182     for (i = 0; i < buf_pool->slots; i++) {
0183         if (buf_pool->rx_skb[i]) {
0184             dev_kfree_skb_any(buf_pool->rx_skb[i]);
0185 
0186             raw_desc = &buf_pool->raw_desc16[i];
0187             dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
0188             dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
0189                      DMA_FROM_DEVICE);
0190         }
0191     }
0192 }
0193 
0194 static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool)
0195 {
0196     struct device *dev = ndev_to_dev(buf_pool->ndev);
0197     dma_addr_t dma_addr;
0198     struct page *page;
0199     int i;
0200 
0201     /* Free up the buffers held by hardware */
0202     for (i = 0; i < buf_pool->slots; i++) {
0203         page = buf_pool->frag_page[i];
0204         if (page) {
0205             dma_addr = buf_pool->frag_dma_addr[i];
0206             dma_unmap_page(dev, dma_addr, PAGE_SIZE,
0207                        DMA_FROM_DEVICE);
0208             put_page(page);
0209         }
0210     }
0211 }
0212 
0213 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
0214 {
0215     struct xgene_enet_desc_ring *rx_ring = data;
0216 
0217     if (napi_schedule_prep(&rx_ring->napi)) {
0218         disable_irq_nosync(irq);
0219         __napi_schedule(&rx_ring->napi);
0220     }
0221 
0222     return IRQ_HANDLED;
0223 }
0224 
0225 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
0226                     struct xgene_enet_raw_desc *raw_desc)
0227 {
0228     struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
0229     struct sk_buff *skb;
0230     struct device *dev;
0231     skb_frag_t *frag;
0232     dma_addr_t *frag_dma_addr;
0233     u16 skb_index;
0234     u8 mss_index;
0235     u8 status;
0236     int i;
0237 
0238     skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
0239     skb = cp_ring->cp_skb[skb_index];
0240     frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
0241 
0242     dev = ndev_to_dev(cp_ring->ndev);
0243     dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
0244              skb_headlen(skb),
0245              DMA_TO_DEVICE);
0246 
0247     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
0248         frag = &skb_shinfo(skb)->frags[i];
0249         dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
0250                    DMA_TO_DEVICE);
0251     }
0252 
0253     if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
0254         mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
0255         spin_lock(&pdata->mss_lock);
0256         pdata->mss_refcnt[mss_index]--;
0257         spin_unlock(&pdata->mss_lock);
0258     }
0259 
0260     /* Checking for error */
0261     status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
0262     if (unlikely(status > 2)) {
0263         cp_ring->tx_dropped++;
0264         cp_ring->tx_errors++;
0265     }
0266 
0267     if (likely(skb)) {
0268         dev_kfree_skb_any(skb);
0269     } else {
0270         netdev_err(cp_ring->ndev, "completion skb is NULL\n");
0271     }
0272 
0273     return 0;
0274 }
0275 
0276 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
0277 {
0278     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
0279     int mss_index = -EBUSY;
0280     int i;
0281 
0282     spin_lock(&pdata->mss_lock);
0283 
0284     /* Reuse the slot if MSS matches */
0285     for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
0286         if (pdata->mss[i] == mss) {
0287             pdata->mss_refcnt[i]++;
0288             mss_index = i;
0289         }
0290     }
0291 
0292     /* Overwrite the slot with ref_count = 0 */
0293     for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
0294         if (!pdata->mss_refcnt[i]) {
0295             pdata->mss_refcnt[i]++;
0296             pdata->mac_ops->set_mss(pdata, mss, i);
0297             pdata->mss[i] = mss;
0298             mss_index = i;
0299         }
0300     }
0301 
0302     spin_unlock(&pdata->mss_lock);
0303 
0304     return mss_index;
0305 }
0306 
0307 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
0308 {
0309     struct net_device *ndev = skb->dev;
0310     struct iphdr *iph;
0311     u8 l3hlen = 0, l4hlen = 0;
0312     u8 ethhdr, proto = 0, csum_enable = 0;
0313     u32 hdr_len, mss = 0;
0314     u32 i, len, nr_frags;
0315     int mss_index;
0316 
0317     ethhdr = xgene_enet_hdr_len(skb->data);
0318 
0319     if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
0320         unlikely(skb->protocol != htons(ETH_P_8021Q)))
0321         goto out;
0322 
0323     if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
0324         goto out;
0325 
0326     iph = ip_hdr(skb);
0327     if (unlikely(ip_is_fragment(iph)))
0328         goto out;
0329 
0330     if (likely(iph->protocol == IPPROTO_TCP)) {
0331         l4hlen = tcp_hdrlen(skb) >> 2;
0332         csum_enable = 1;
0333         proto = TSO_IPPROTO_TCP;
0334         if (ndev->features & NETIF_F_TSO) {
0335             hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
0336             mss = skb_shinfo(skb)->gso_size;
0337 
0338             if (skb_is_nonlinear(skb)) {
0339                 len = skb_headlen(skb);
0340                 nr_frags = skb_shinfo(skb)->nr_frags;
0341 
0342                 for (i = 0; i < 2 && i < nr_frags; i++)
0343                     len += skb_frag_size(
0344                         &skb_shinfo(skb)->frags[i]);
0345 
0346                 /* HW requires header must reside in 3 buffer */
0347                 if (unlikely(hdr_len > len)) {
0348                     if (skb_linearize(skb))
0349                         return 0;
0350                 }
0351             }
0352 
0353             if (!mss || ((skb->len - hdr_len) <= mss))
0354                 goto out;
0355 
0356             mss_index = xgene_enet_setup_mss(ndev, mss);
0357             if (unlikely(mss_index < 0))
0358                 return -EBUSY;
0359 
0360             *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
0361         }
0362     } else if (iph->protocol == IPPROTO_UDP) {
0363         l4hlen = UDP_HDR_SIZE;
0364         csum_enable = 1;
0365     }
0366 out:
0367     l3hlen = ip_hdrlen(skb) >> 2;
0368     *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
0369             SET_VAL(IPHDR, l3hlen) |
0370             SET_VAL(ETHHDR, ethhdr) |
0371             SET_VAL(EC, csum_enable) |
0372             SET_VAL(IS, proto) |
0373             SET_BIT(IC) |
0374             SET_BIT(TYPE_ETH_WORK_MESSAGE);
0375 
0376     return 0;
0377 }
0378 
0379 static u16 xgene_enet_encode_len(u16 len)
0380 {
0381     return (len == BUFLEN_16K) ? 0 : len;
0382 }
0383 
0384 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
0385 {
0386     desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
0387                     SET_VAL(BUFDATALEN, len));
0388 }
0389 
0390 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
0391 {
0392     __le64 *exp_bufs;
0393 
0394     exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
0395     memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
0396     ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
0397 
0398     return exp_bufs;
0399 }
0400 
0401 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
0402 {
0403     return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
0404 }
0405 
0406 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
0407                     struct sk_buff *skb)
0408 {
0409     struct device *dev = ndev_to_dev(tx_ring->ndev);
0410     struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
0411     struct xgene_enet_raw_desc *raw_desc;
0412     __le64 *exp_desc = NULL, *exp_bufs = NULL;
0413     dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
0414     skb_frag_t *frag;
0415     u16 tail = tx_ring->tail;
0416     u64 hopinfo = 0;
0417     u32 len, hw_len;
0418     u8 ll = 0, nv = 0, idx = 0;
0419     bool split = false;
0420     u32 size, offset, ell_bytes = 0;
0421     u32 i, fidx, nr_frags, count = 1;
0422     int ret;
0423 
0424     raw_desc = &tx_ring->raw_desc[tail];
0425     tail = (tail + 1) & (tx_ring->slots - 1);
0426     memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
0427 
0428     ret = xgene_enet_work_msg(skb, &hopinfo);
0429     if (ret)
0430         return ret;
0431 
0432     raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
0433                    hopinfo);
0434 
0435     len = skb_headlen(skb);
0436     hw_len = xgene_enet_encode_len(len);
0437 
0438     dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
0439     if (dma_mapping_error(dev, dma_addr)) {
0440         netdev_err(tx_ring->ndev, "DMA mapping error\n");
0441         return -EINVAL;
0442     }
0443 
0444     /* Hardware expects descriptor in little endian format */
0445     raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
0446                    SET_VAL(BUFDATALEN, hw_len) |
0447                    SET_BIT(COHERENT));
0448 
0449     if (!skb_is_nonlinear(skb))
0450         goto out;
0451 
0452     /* scatter gather */
0453     nv = 1;
0454     exp_desc = (void *)&tx_ring->raw_desc[tail];
0455     tail = (tail + 1) & (tx_ring->slots - 1);
0456     memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
0457 
0458     nr_frags = skb_shinfo(skb)->nr_frags;
0459     for (i = nr_frags; i < 4 ; i++)
0460         exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
0461 
0462     frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
0463 
0464     for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
0465         if (!split) {
0466             frag = &skb_shinfo(skb)->frags[fidx];
0467             size = skb_frag_size(frag);
0468             offset = 0;
0469 
0470             pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
0471                              DMA_TO_DEVICE);
0472             if (dma_mapping_error(dev, pbuf_addr))
0473                 return -EINVAL;
0474 
0475             frag_dma_addr[fidx] = pbuf_addr;
0476             fidx++;
0477 
0478             if (size > BUFLEN_16K)
0479                 split = true;
0480         }
0481 
0482         if (size > BUFLEN_16K) {
0483             len = BUFLEN_16K;
0484             size -= BUFLEN_16K;
0485         } else {
0486             len = size;
0487             split = false;
0488         }
0489 
0490         dma_addr = pbuf_addr + offset;
0491         hw_len = xgene_enet_encode_len(len);
0492 
0493         switch (i) {
0494         case 0:
0495         case 1:
0496         case 2:
0497             xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
0498             break;
0499         case 3:
0500             if (split || (fidx != nr_frags)) {
0501                 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
0502                 xgene_set_addr_len(exp_bufs, idx, dma_addr,
0503                            hw_len);
0504                 idx++;
0505                 ell_bytes += len;
0506             } else {
0507                 xgene_set_addr_len(exp_desc, i, dma_addr,
0508                            hw_len);
0509             }
0510             break;
0511         default:
0512             xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
0513             idx++;
0514             ell_bytes += len;
0515             break;
0516         }
0517 
0518         if (split)
0519             offset += BUFLEN_16K;
0520     }
0521     count++;
0522 
0523     if (idx) {
0524         ll = 1;
0525         dma_addr = dma_map_single(dev, exp_bufs,
0526                       sizeof(u64) * MAX_EXP_BUFFS,
0527                       DMA_TO_DEVICE);
0528         if (dma_mapping_error(dev, dma_addr)) {
0529             dev_kfree_skb_any(skb);
0530             return -EINVAL;
0531         }
0532         i = ell_bytes >> LL_BYTES_LSB_LEN;
0533         exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
0534                       SET_VAL(LL_BYTES_MSB, i) |
0535                       SET_VAL(LL_LEN, idx));
0536         raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
0537     }
0538 
0539 out:
0540     raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
0541                    SET_VAL(USERINFO, tx_ring->tail));
0542     tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
0543     pdata->tx_level[tx_ring->cp_ring->index] += count;
0544     tx_ring->tail = tail;
0545 
0546     return count;
0547 }
0548 
0549 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
0550                      struct net_device *ndev)
0551 {
0552     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
0553     struct xgene_enet_desc_ring *tx_ring;
0554     int index = skb->queue_mapping;
0555     u32 tx_level = pdata->tx_level[index];
0556     int count;
0557 
0558     tx_ring = pdata->tx_ring[index];
0559     if (tx_level < pdata->txc_level[index])
0560         tx_level += ((typeof(pdata->tx_level[index]))~0U);
0561 
0562     if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
0563         netif_stop_subqueue(ndev, index);
0564         return NETDEV_TX_BUSY;
0565     }
0566 
0567     if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
0568         return NETDEV_TX_OK;
0569 
0570     count = xgene_enet_setup_tx_desc(tx_ring, skb);
0571     if (count == -EBUSY)
0572         return NETDEV_TX_BUSY;
0573 
0574     if (count <= 0) {
0575         dev_kfree_skb_any(skb);
0576         return NETDEV_TX_OK;
0577     }
0578 
0579     skb_tx_timestamp(skb);
0580 
0581     tx_ring->tx_packets++;
0582     tx_ring->tx_bytes += skb->len;
0583 
0584     pdata->ring_ops->wr_cmd(tx_ring, count);
0585     return NETDEV_TX_OK;
0586 }
0587 
0588 static void xgene_enet_rx_csum(struct sk_buff *skb)
0589 {
0590     struct net_device *ndev = skb->dev;
0591     struct iphdr *iph = ip_hdr(skb);
0592 
0593     if (!(ndev->features & NETIF_F_RXCSUM))
0594         return;
0595 
0596     if (skb->protocol != htons(ETH_P_IP))
0597         return;
0598 
0599     if (ip_is_fragment(iph))
0600         return;
0601 
0602     if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
0603         return;
0604 
0605     skb->ip_summed = CHECKSUM_UNNECESSARY;
0606 }
0607 
0608 static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
0609                      struct xgene_enet_raw_desc *raw_desc,
0610                      struct xgene_enet_raw_desc *exp_desc)
0611 {
0612     __le64 *desc = (void *)exp_desc;
0613     dma_addr_t dma_addr;
0614     struct device *dev;
0615     struct page *page;
0616     u16 slots, head;
0617     u32 frag_size;
0618     int i;
0619 
0620     if (!buf_pool || !raw_desc || !exp_desc ||
0621         (!GET_VAL(NV, le64_to_cpu(raw_desc->m0))))
0622         return;
0623 
0624     dev = ndev_to_dev(buf_pool->ndev);
0625     slots = buf_pool->slots - 1;
0626     head = buf_pool->head;
0627 
0628     for (i = 0; i < 4; i++) {
0629         frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
0630         if (!frag_size)
0631             break;
0632 
0633         dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
0634         dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
0635 
0636         page = buf_pool->frag_page[head];
0637         put_page(page);
0638 
0639         buf_pool->frag_page[head] = NULL;
0640         head = (head + 1) & slots;
0641     }
0642     buf_pool->head = head;
0643 }
0644 
0645 /* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */
0646 static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status)
0647 {
0648     if (status == INGRESS_CRC &&
0649         len >= (ETHER_STD_PACKET + 1) &&
0650         len <= (ETHER_STD_PACKET + 4) &&
0651         skb->protocol == htons(ETH_P_8021Q))
0652         return true;
0653 
0654     return false;
0655 }
0656 
0657 /* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
0658 static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
0659 {
0660     if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) {
0661         if (ntohs(eth_hdr(skb)->h_proto) < 46)
0662             return true;
0663     }
0664 
0665     return false;
0666 }
0667 
0668 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
0669                    struct xgene_enet_raw_desc *raw_desc,
0670                    struct xgene_enet_raw_desc *exp_desc)
0671 {
0672     struct xgene_enet_desc_ring *buf_pool, *page_pool;
0673     u32 datalen, frag_size, skb_index;
0674     struct xgene_enet_pdata *pdata;
0675     struct net_device *ndev;
0676     dma_addr_t dma_addr;
0677     struct sk_buff *skb;
0678     struct device *dev;
0679     struct page *page;
0680     u16 slots, head;
0681     int i, ret = 0;
0682     __le64 *desc;
0683     u8 status;
0684     bool nv;
0685 
0686     ndev = rx_ring->ndev;
0687     pdata = netdev_priv(ndev);
0688     dev = ndev_to_dev(rx_ring->ndev);
0689     buf_pool = rx_ring->buf_pool;
0690     page_pool = rx_ring->page_pool;
0691 
0692     dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
0693              XGENE_ENET_STD_MTU, DMA_FROM_DEVICE);
0694     skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
0695     skb = buf_pool->rx_skb[skb_index];
0696     buf_pool->rx_skb[skb_index] = NULL;
0697 
0698     datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
0699 
0700     /* strip off CRC as HW isn't doing this */
0701     nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
0702     if (!nv)
0703         datalen -= 4;
0704 
0705     skb_put(skb, datalen);
0706     prefetch(skb->data - NET_IP_ALIGN);
0707     skb->protocol = eth_type_trans(skb, ndev);
0708 
0709     /* checking for error */
0710     status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
0711           GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
0712     if (unlikely(status)) {
0713         if (xgene_enet_errata_10GE_8(skb, datalen, status)) {
0714             pdata->false_rflr++;
0715         } else if (xgene_enet_errata_10GE_10(skb, datalen, status)) {
0716             pdata->vlan_rjbr++;
0717         } else {
0718             dev_kfree_skb_any(skb);
0719             xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
0720             xgene_enet_parse_error(rx_ring, status);
0721             rx_ring->rx_dropped++;
0722             goto out;
0723         }
0724     }
0725 
0726     if (!nv)
0727         goto skip_jumbo;
0728 
0729     slots = page_pool->slots - 1;
0730     head = page_pool->head;
0731     desc = (void *)exp_desc;
0732 
0733     for (i = 0; i < 4; i++) {
0734         frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
0735         if (!frag_size)
0736             break;
0737 
0738         dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
0739         dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
0740 
0741         page = page_pool->frag_page[head];
0742         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
0743                 frag_size, PAGE_SIZE);
0744 
0745         datalen += frag_size;
0746 
0747         page_pool->frag_page[head] = NULL;
0748         head = (head + 1) & slots;
0749     }
0750 
0751     page_pool->head = head;
0752     rx_ring->npagepool -= skb_shinfo(skb)->nr_frags;
0753 
0754 skip_jumbo:
0755     skb_checksum_none_assert(skb);
0756     xgene_enet_rx_csum(skb);
0757 
0758     rx_ring->rx_packets++;
0759     rx_ring->rx_bytes += datalen;
0760     napi_gro_receive(&rx_ring->napi, skb);
0761 
0762 out:
0763     if (rx_ring->npagepool <= 0) {
0764         ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL);
0765         rx_ring->npagepool = NUM_NXTBUFPOOL;
0766         if (ret)
0767             return ret;
0768     }
0769 
0770     if (--rx_ring->nbufpool == 0) {
0771         ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
0772         rx_ring->nbufpool = NUM_BUFPOOL;
0773     }
0774 
0775     return ret;
0776 }
0777 
0778 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
0779 {
0780     return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
0781 }
0782 
0783 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
0784                    int budget)
0785 {
0786     struct net_device *ndev = ring->ndev;
0787     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
0788     struct xgene_enet_raw_desc *raw_desc, *exp_desc;
0789     u16 head = ring->head;
0790     u16 slots = ring->slots - 1;
0791     int ret, desc_count, count = 0, processed = 0;
0792     bool is_completion;
0793 
0794     do {
0795         raw_desc = &ring->raw_desc[head];
0796         desc_count = 0;
0797         is_completion = false;
0798         exp_desc = NULL;
0799         if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
0800             break;
0801 
0802         /* read fpqnum field after dataaddr field */
0803         dma_rmb();
0804         if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
0805             head = (head + 1) & slots;
0806             exp_desc = &ring->raw_desc[head];
0807 
0808             if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
0809                 head = (head - 1) & slots;
0810                 break;
0811             }
0812             dma_rmb();
0813             count++;
0814             desc_count++;
0815         }
0816         if (is_rx_desc(raw_desc)) {
0817             ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc);
0818         } else {
0819             ret = xgene_enet_tx_completion(ring, raw_desc);
0820             is_completion = true;
0821         }
0822         xgene_enet_mark_desc_slot_empty(raw_desc);
0823         if (exp_desc)
0824             xgene_enet_mark_desc_slot_empty(exp_desc);
0825 
0826         head = (head + 1) & slots;
0827         count++;
0828         desc_count++;
0829         processed++;
0830         if (is_completion)
0831             pdata->txc_level[ring->index] += desc_count;
0832 
0833         if (ret)
0834             break;
0835     } while (--budget);
0836 
0837     if (likely(count)) {
0838         pdata->ring_ops->wr_cmd(ring, -count);
0839         ring->head = head;
0840 
0841         if (__netif_subqueue_stopped(ndev, ring->index))
0842             netif_start_subqueue(ndev, ring->index);
0843     }
0844 
0845     return processed;
0846 }
0847 
0848 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
0849 {
0850     struct xgene_enet_desc_ring *ring;
0851     int processed;
0852 
0853     ring = container_of(napi, struct xgene_enet_desc_ring, napi);
0854     processed = xgene_enet_process_ring(ring, budget);
0855 
0856     if (processed != budget) {
0857         napi_complete_done(napi, processed);
0858         enable_irq(ring->irq);
0859     }
0860 
0861     return processed;
0862 }
0863 
0864 static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
0865 {
0866     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
0867     struct netdev_queue *txq;
0868     int i;
0869 
0870     pdata->mac_ops->reset(pdata);
0871 
0872     for (i = 0; i < pdata->txq_cnt; i++) {
0873         txq = netdev_get_tx_queue(ndev, i);
0874         txq_trans_cond_update(txq);
0875         netif_tx_start_queue(txq);
0876     }
0877 }
0878 
0879 static void xgene_enet_set_irq_name(struct net_device *ndev)
0880 {
0881     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
0882     struct xgene_enet_desc_ring *ring;
0883     int i;
0884 
0885     for (i = 0; i < pdata->rxq_cnt; i++) {
0886         ring = pdata->rx_ring[i];
0887         if (!pdata->cq_cnt) {
0888             snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
0889                  ndev->name);
0890         } else {
0891             snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
0892                  ndev->name, i);
0893         }
0894     }
0895 
0896     for (i = 0; i < pdata->cq_cnt; i++) {
0897         ring = pdata->tx_ring[i]->cp_ring;
0898         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
0899              ndev->name, i);
0900     }
0901 }
0902 
0903 static int xgene_enet_register_irq(struct net_device *ndev)
0904 {
0905     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
0906     struct device *dev = ndev_to_dev(ndev);
0907     struct xgene_enet_desc_ring *ring;
0908     int ret = 0, i;
0909 
0910     xgene_enet_set_irq_name(ndev);
0911     for (i = 0; i < pdata->rxq_cnt; i++) {
0912         ring = pdata->rx_ring[i];
0913         irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
0914         ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
0915                        0, ring->irq_name, ring);
0916         if (ret) {
0917             netdev_err(ndev, "Failed to request irq %s\n",
0918                    ring->irq_name);
0919         }
0920     }
0921 
0922     for (i = 0; i < pdata->cq_cnt; i++) {
0923         ring = pdata->tx_ring[i]->cp_ring;
0924         irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
0925         ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
0926                        0, ring->irq_name, ring);
0927         if (ret) {
0928             netdev_err(ndev, "Failed to request irq %s\n",
0929                    ring->irq_name);
0930         }
0931     }
0932 
0933     return ret;
0934 }
0935 
0936 static void xgene_enet_free_irq(struct net_device *ndev)
0937 {
0938     struct xgene_enet_pdata *pdata;
0939     struct xgene_enet_desc_ring *ring;
0940     struct device *dev;
0941     int i;
0942 
0943     pdata = netdev_priv(ndev);
0944     dev = ndev_to_dev(ndev);
0945 
0946     for (i = 0; i < pdata->rxq_cnt; i++) {
0947         ring = pdata->rx_ring[i];
0948         irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
0949         devm_free_irq(dev, ring->irq, ring);
0950     }
0951 
0952     for (i = 0; i < pdata->cq_cnt; i++) {
0953         ring = pdata->tx_ring[i]->cp_ring;
0954         irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
0955         devm_free_irq(dev, ring->irq, ring);
0956     }
0957 }
0958 
0959 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
0960 {
0961     struct napi_struct *napi;
0962     int i;
0963 
0964     for (i = 0; i < pdata->rxq_cnt; i++) {
0965         napi = &pdata->rx_ring[i]->napi;
0966         napi_enable(napi);
0967     }
0968 
0969     for (i = 0; i < pdata->cq_cnt; i++) {
0970         napi = &pdata->tx_ring[i]->cp_ring->napi;
0971         napi_enable(napi);
0972     }
0973 }
0974 
0975 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
0976 {
0977     struct napi_struct *napi;
0978     int i;
0979 
0980     for (i = 0; i < pdata->rxq_cnt; i++) {
0981         napi = &pdata->rx_ring[i]->napi;
0982         napi_disable(napi);
0983     }
0984 
0985     for (i = 0; i < pdata->cq_cnt; i++) {
0986         napi = &pdata->tx_ring[i]->cp_ring->napi;
0987         napi_disable(napi);
0988     }
0989 }
0990 
0991 static int xgene_enet_open(struct net_device *ndev)
0992 {
0993     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
0994     const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
0995     int ret;
0996 
0997     ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
0998     if (ret)
0999         return ret;
1000 
1001     ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
1002     if (ret)
1003         return ret;
1004 
1005     xgene_enet_napi_enable(pdata);
1006     ret = xgene_enet_register_irq(ndev);
1007     if (ret)
1008         return ret;
1009 
1010     if (ndev->phydev) {
1011         phy_start(ndev->phydev);
1012     } else {
1013         schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
1014         netif_carrier_off(ndev);
1015     }
1016 
1017     mac_ops->tx_enable(pdata);
1018     mac_ops->rx_enable(pdata);
1019     netif_tx_start_all_queues(ndev);
1020 
1021     return ret;
1022 }
1023 
1024 static int xgene_enet_close(struct net_device *ndev)
1025 {
1026     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1027     const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
1028     int i;
1029 
1030     netif_tx_stop_all_queues(ndev);
1031     mac_ops->tx_disable(pdata);
1032     mac_ops->rx_disable(pdata);
1033 
1034     if (ndev->phydev)
1035         phy_stop(ndev->phydev);
1036     else
1037         cancel_delayed_work_sync(&pdata->link_work);
1038 
1039     xgene_enet_free_irq(ndev);
1040     xgene_enet_napi_disable(pdata);
1041     for (i = 0; i < pdata->rxq_cnt; i++)
1042         xgene_enet_process_ring(pdata->rx_ring[i], -1);
1043 
1044     return 0;
1045 }
1046 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
1047 {
1048     struct xgene_enet_pdata *pdata;
1049     struct device *dev;
1050 
1051     pdata = netdev_priv(ring->ndev);
1052     dev = ndev_to_dev(ring->ndev);
1053 
1054     pdata->ring_ops->clear(ring);
1055     dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
1056 }
1057 
1058 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
1059 {
1060     struct xgene_enet_desc_ring *buf_pool, *page_pool;
1061     struct xgene_enet_desc_ring *ring;
1062     int i;
1063 
1064     for (i = 0; i < pdata->txq_cnt; i++) {
1065         ring = pdata->tx_ring[i];
1066         if (ring) {
1067             xgene_enet_delete_ring(ring);
1068             pdata->port_ops->clear(pdata, ring);
1069             if (pdata->cq_cnt)
1070                 xgene_enet_delete_ring(ring->cp_ring);
1071             pdata->tx_ring[i] = NULL;
1072         }
1073 
1074     }
1075 
1076     for (i = 0; i < pdata->rxq_cnt; i++) {
1077         ring = pdata->rx_ring[i];
1078         if (ring) {
1079             page_pool = ring->page_pool;
1080             if (page_pool) {
1081                 xgene_enet_delete_pagepool(page_pool);
1082                 xgene_enet_delete_ring(page_pool);
1083                 pdata->port_ops->clear(pdata, page_pool);
1084             }
1085 
1086             buf_pool = ring->buf_pool;
1087             xgene_enet_delete_bufpool(buf_pool);
1088             xgene_enet_delete_ring(buf_pool);
1089             pdata->port_ops->clear(pdata, buf_pool);
1090 
1091             xgene_enet_delete_ring(ring);
1092             pdata->rx_ring[i] = NULL;
1093         }
1094 
1095     }
1096 }
1097 
1098 static int xgene_enet_get_ring_size(struct device *dev,
1099                     enum xgene_enet_ring_cfgsize cfgsize)
1100 {
1101     int size = -EINVAL;
1102 
1103     switch (cfgsize) {
1104     case RING_CFGSIZE_512B:
1105         size = 0x200;
1106         break;
1107     case RING_CFGSIZE_2KB:
1108         size = 0x800;
1109         break;
1110     case RING_CFGSIZE_16KB:
1111         size = 0x4000;
1112         break;
1113     case RING_CFGSIZE_64KB:
1114         size = 0x10000;
1115         break;
1116     case RING_CFGSIZE_512KB:
1117         size = 0x80000;
1118         break;
1119     default:
1120         dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
1121         break;
1122     }
1123 
1124     return size;
1125 }
1126 
1127 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
1128 {
1129     struct xgene_enet_pdata *pdata;
1130     struct device *dev;
1131 
1132     if (!ring)
1133         return;
1134 
1135     dev = ndev_to_dev(ring->ndev);
1136     pdata = netdev_priv(ring->ndev);
1137 
1138     if (ring->desc_addr) {
1139         pdata->ring_ops->clear(ring);
1140         dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
1141     }
1142     devm_kfree(dev, ring);
1143 }
1144 
1145 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
1146 {
1147     struct xgene_enet_desc_ring *page_pool;
1148     struct device *dev = &pdata->pdev->dev;
1149     struct xgene_enet_desc_ring *ring;
1150     void *p;
1151     int i;
1152 
1153     for (i = 0; i < pdata->txq_cnt; i++) {
1154         ring = pdata->tx_ring[i];
1155         if (ring) {
1156             if (ring->cp_ring && ring->cp_ring->cp_skb)
1157                 devm_kfree(dev, ring->cp_ring->cp_skb);
1158 
1159             if (ring->cp_ring && pdata->cq_cnt)
1160                 xgene_enet_free_desc_ring(ring->cp_ring);
1161 
1162             xgene_enet_free_desc_ring(ring);
1163         }
1164 
1165     }
1166 
1167     for (i = 0; i < pdata->rxq_cnt; i++) {
1168         ring = pdata->rx_ring[i];
1169         if (ring) {
1170             if (ring->buf_pool) {
1171                 if (ring->buf_pool->rx_skb)
1172                     devm_kfree(dev, ring->buf_pool->rx_skb);
1173 
1174                 xgene_enet_free_desc_ring(ring->buf_pool);
1175             }
1176 
1177             page_pool = ring->page_pool;
1178             if (page_pool) {
1179                 p = page_pool->frag_page;
1180                 if (p)
1181                     devm_kfree(dev, p);
1182 
1183                 p = page_pool->frag_dma_addr;
1184                 if (p)
1185                     devm_kfree(dev, p);
1186             }
1187 
1188             xgene_enet_free_desc_ring(ring);
1189         }
1190     }
1191 }
1192 
1193 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
1194                  struct xgene_enet_desc_ring *ring)
1195 {
1196     if ((pdata->enet_id == XGENE_ENET2) &&
1197         (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
1198         return true;
1199     }
1200 
1201     return false;
1202 }
1203 
1204 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
1205                           struct xgene_enet_desc_ring *ring)
1206 {
1207     u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
1208 
1209     return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
1210 }
1211 
1212 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
1213             struct net_device *ndev, u32 ring_num,
1214             enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
1215 {
1216     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1217     struct device *dev = ndev_to_dev(ndev);
1218     struct xgene_enet_desc_ring *ring;
1219     void *irq_mbox_addr;
1220     int size;
1221 
1222     size = xgene_enet_get_ring_size(dev, cfgsize);
1223     if (size < 0)
1224         return NULL;
1225 
1226     ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
1227                 GFP_KERNEL);
1228     if (!ring)
1229         return NULL;
1230 
1231     ring->ndev = ndev;
1232     ring->num = ring_num;
1233     ring->cfgsize = cfgsize;
1234     ring->id = ring_id;
1235 
1236     ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
1237                           GFP_KERNEL | __GFP_ZERO);
1238     if (!ring->desc_addr) {
1239         devm_kfree(dev, ring);
1240         return NULL;
1241     }
1242     ring->size = size;
1243 
1244     if (is_irq_mbox_required(pdata, ring)) {
1245         irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
1246                             &ring->irq_mbox_dma,
1247                             GFP_KERNEL | __GFP_ZERO);
1248         if (!irq_mbox_addr) {
1249             dmam_free_coherent(dev, size, ring->desc_addr,
1250                        ring->dma);
1251             devm_kfree(dev, ring);
1252             return NULL;
1253         }
1254         ring->irq_mbox_addr = irq_mbox_addr;
1255     }
1256 
1257     ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
1258     ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
1259     ring = pdata->ring_ops->setup(ring);
1260     netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
1261            ring->num, ring->size, ring->id, ring->slots);
1262 
1263     return ring;
1264 }
1265 
1266 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
1267 {
1268     return (owner << 6) | (bufnum & GENMASK(5, 0));
1269 }
1270 
1271 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
1272 {
1273     enum xgene_ring_owner owner;
1274 
1275     if (p->enet_id == XGENE_ENET1) {
1276         switch (p->phy_mode) {
1277         case PHY_INTERFACE_MODE_SGMII:
1278             owner = RING_OWNER_ETH0;
1279             break;
1280         default:
1281             owner = (!p->port_id) ? RING_OWNER_ETH0 :
1282                         RING_OWNER_ETH1;
1283             break;
1284         }
1285     } else {
1286         owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1287     }
1288 
1289     return owner;
1290 }
1291 
1292 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1293 {
1294     struct device *dev = &pdata->pdev->dev;
1295     u32 cpu_bufnum;
1296     int ret;
1297 
1298     ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1299 
1300     return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1301 }
1302 
1303 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1304 {
1305     struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1306     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1307     struct xgene_enet_desc_ring *page_pool = NULL;
1308     struct xgene_enet_desc_ring *buf_pool = NULL;
1309     struct device *dev = ndev_to_dev(ndev);
1310     u8 eth_bufnum = pdata->eth_bufnum;
1311     u8 bp_bufnum = pdata->bp_bufnum;
1312     u16 ring_num = pdata->ring_num;
1313     enum xgene_ring_owner owner;
1314     dma_addr_t dma_exp_bufs;
1315     u16 ring_id, slots;
1316     __le64 *exp_bufs;
1317     int i, ret, size;
1318     u8 cpu_bufnum;
1319 
1320     cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1321 
1322     for (i = 0; i < pdata->rxq_cnt; i++) {
1323         /* allocate rx descriptor ring */
1324         owner = xgene_derive_ring_owner(pdata);
1325         ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1326         rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1327                               RING_CFGSIZE_16KB,
1328                               ring_id);
1329         if (!rx_ring) {
1330             ret = -ENOMEM;
1331             goto err;
1332         }
1333 
1334         /* allocate buffer pool for receiving packets */
1335         owner = xgene_derive_ring_owner(pdata);
1336         ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1337         buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1338                                RING_CFGSIZE_16KB,
1339                                ring_id);
1340         if (!buf_pool) {
1341             ret = -ENOMEM;
1342             goto err;
1343         }
1344 
1345         rx_ring->nbufpool = NUM_BUFPOOL;
1346         rx_ring->npagepool = NUM_NXTBUFPOOL;
1347         rx_ring->irq = pdata->irqs[i];
1348         buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1349                         sizeof(struct sk_buff *),
1350                         GFP_KERNEL);
1351         if (!buf_pool->rx_skb) {
1352             ret = -ENOMEM;
1353             goto err;
1354         }
1355 
1356         buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1357         rx_ring->buf_pool = buf_pool;
1358         pdata->rx_ring[i] = rx_ring;
1359 
1360         if ((pdata->enet_id == XGENE_ENET1 &&  pdata->rxq_cnt > 4) ||
1361             (pdata->enet_id == XGENE_ENET2 &&  pdata->rxq_cnt > 16)) {
1362             break;
1363         }
1364 
1365         /* allocate next buffer pool for jumbo packets */
1366         owner = xgene_derive_ring_owner(pdata);
1367         ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1368         page_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1369                             RING_CFGSIZE_16KB,
1370                             ring_id);
1371         if (!page_pool) {
1372             ret = -ENOMEM;
1373             goto err;
1374         }
1375 
1376         slots = page_pool->slots;
1377         page_pool->frag_page = devm_kcalloc(dev, slots,
1378                             sizeof(struct page *),
1379                             GFP_KERNEL);
1380         if (!page_pool->frag_page) {
1381             ret = -ENOMEM;
1382             goto err;
1383         }
1384 
1385         page_pool->frag_dma_addr = devm_kcalloc(dev, slots,
1386                             sizeof(dma_addr_t),
1387                             GFP_KERNEL);
1388         if (!page_pool->frag_dma_addr) {
1389             ret = -ENOMEM;
1390             goto err;
1391         }
1392 
1393         page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool);
1394         rx_ring->page_pool = page_pool;
1395     }
1396 
1397     for (i = 0; i < pdata->txq_cnt; i++) {
1398         /* allocate tx descriptor ring */
1399         owner = xgene_derive_ring_owner(pdata);
1400         ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1401         tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1402                               RING_CFGSIZE_16KB,
1403                               ring_id);
1404         if (!tx_ring) {
1405             ret = -ENOMEM;
1406             goto err;
1407         }
1408 
1409         size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1410         exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1411                            GFP_KERNEL | __GFP_ZERO);
1412         if (!exp_bufs) {
1413             ret = -ENOMEM;
1414             goto err;
1415         }
1416         tx_ring->exp_bufs = exp_bufs;
1417 
1418         pdata->tx_ring[i] = tx_ring;
1419 
1420         if (!pdata->cq_cnt) {
1421             cp_ring = pdata->rx_ring[i];
1422         } else {
1423             /* allocate tx completion descriptor ring */
1424             ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1425                              cpu_bufnum++);
1426             cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1427                                   RING_CFGSIZE_16KB,
1428                                   ring_id);
1429             if (!cp_ring) {
1430                 ret = -ENOMEM;
1431                 goto err;
1432             }
1433 
1434             cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1435             cp_ring->index = i;
1436         }
1437 
1438         cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1439                            sizeof(struct sk_buff *),
1440                            GFP_KERNEL);
1441         if (!cp_ring->cp_skb) {
1442             ret = -ENOMEM;
1443             goto err;
1444         }
1445 
1446         size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1447         cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1448                               size, GFP_KERNEL);
1449         if (!cp_ring->frag_dma_addr) {
1450             devm_kfree(dev, cp_ring->cp_skb);
1451             ret = -ENOMEM;
1452             goto err;
1453         }
1454 
1455         tx_ring->cp_ring = cp_ring;
1456         tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1457     }
1458 
1459     if (pdata->ring_ops->coalesce)
1460         pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1461     pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1462 
1463     return 0;
1464 
1465 err:
1466     xgene_enet_free_desc_rings(pdata);
1467     return ret;
1468 }
1469 
1470 static void xgene_enet_get_stats64(
1471             struct net_device *ndev,
1472             struct rtnl_link_stats64 *stats)
1473 {
1474     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1475     struct xgene_enet_desc_ring *ring;
1476     int i;
1477 
1478     for (i = 0; i < pdata->txq_cnt; i++) {
1479         ring = pdata->tx_ring[i];
1480         if (ring) {
1481             stats->tx_packets += ring->tx_packets;
1482             stats->tx_bytes += ring->tx_bytes;
1483             stats->tx_dropped += ring->tx_dropped;
1484             stats->tx_errors += ring->tx_errors;
1485         }
1486     }
1487 
1488     for (i = 0; i < pdata->rxq_cnt; i++) {
1489         ring = pdata->rx_ring[i];
1490         if (ring) {
1491             stats->rx_packets += ring->rx_packets;
1492             stats->rx_bytes += ring->rx_bytes;
1493             stats->rx_dropped += ring->rx_dropped;
1494             stats->rx_errors += ring->rx_errors +
1495                 ring->rx_length_errors +
1496                 ring->rx_crc_errors +
1497                 ring->rx_frame_errors +
1498                 ring->rx_fifo_errors;
1499             stats->rx_length_errors += ring->rx_length_errors;
1500             stats->rx_crc_errors += ring->rx_crc_errors;
1501             stats->rx_frame_errors += ring->rx_frame_errors;
1502             stats->rx_fifo_errors += ring->rx_fifo_errors;
1503         }
1504     }
1505 }
1506 
1507 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1508 {
1509     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1510     int ret;
1511 
1512     ret = eth_mac_addr(ndev, addr);
1513     if (ret)
1514         return ret;
1515     pdata->mac_ops->set_mac_addr(pdata);
1516 
1517     return ret;
1518 }
1519 
1520 static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
1521 {
1522     struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1523     int frame_size;
1524 
1525     if (!netif_running(ndev))
1526         return 0;
1527 
1528     frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
1529 
1530     xgene_enet_close(ndev);
1531     ndev->mtu = new_mtu;
1532     pdata->mac_ops->set_framesize(pdata, frame_size);
1533     xgene_enet_open(ndev);
1534 
1535     return 0;
1536 }
1537 
1538 static const struct net_device_ops xgene_ndev_ops = {
1539     .ndo_open = xgene_enet_open,
1540     .ndo_stop = xgene_enet_close,
1541     .ndo_start_xmit = xgene_enet_start_xmit,
1542     .ndo_tx_timeout = xgene_enet_timeout,
1543     .ndo_get_stats64 = xgene_enet_get_stats64,
1544     .ndo_change_mtu = xgene_change_mtu,
1545     .ndo_set_mac_address = xgene_enet_set_mac_address,
1546 };
1547 
1548 #ifdef CONFIG_ACPI
1549 static void xgene_get_port_id_acpi(struct device *dev,
1550                   struct xgene_enet_pdata *pdata)
1551 {
1552     acpi_status status;
1553     u64 temp;
1554 
1555     status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1556     if (ACPI_FAILURE(status)) {
1557         pdata->port_id = 0;
1558     } else {
1559         pdata->port_id = temp;
1560     }
1561 
1562     return;
1563 }
1564 #endif
1565 
1566 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1567 {
1568     u32 id = 0;
1569 
1570     of_property_read_u32(dev->of_node, "port-id", &id);
1571 
1572     pdata->port_id = id & BIT(0);
1573 
1574     return;
1575 }
1576 
1577 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1578 {
1579     struct device *dev = &pdata->pdev->dev;
1580     int delay, ret;
1581 
1582     ret = device_property_read_u32(dev, "tx-delay", &delay);
1583     if (ret) {
1584         pdata->tx_delay = 4;
1585         return 0;
1586     }
1587 
1588     if (delay < 0 || delay > 7) {
1589         dev_err(dev, "Invalid tx-delay specified\n");
1590         return -EINVAL;
1591     }
1592 
1593     pdata->tx_delay = delay;
1594 
1595     return 0;
1596 }
1597 
1598 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1599 {
1600     struct device *dev = &pdata->pdev->dev;
1601     int delay, ret;
1602 
1603     ret = device_property_read_u32(dev, "rx-delay", &delay);
1604     if (ret) {
1605         pdata->rx_delay = 2;
1606         return 0;
1607     }
1608 
1609     if (delay < 0 || delay > 7) {
1610         dev_err(dev, "Invalid rx-delay specified\n");
1611         return -EINVAL;
1612     }
1613 
1614     pdata->rx_delay = delay;
1615 
1616     return 0;
1617 }
1618 
1619 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1620 {
1621     struct platform_device *pdev = pdata->pdev;
1622     int i, ret, max_irqs;
1623 
1624     if (phy_interface_mode_is_rgmii(pdata->phy_mode))
1625         max_irqs = 1;
1626     else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1627         max_irqs = 2;
1628     else
1629         max_irqs = XGENE_MAX_ENET_IRQ;
1630 
1631     for (i = 0; i < max_irqs; i++) {
1632         ret = platform_get_irq(pdev, i);
1633         if (ret <= 0) {
1634             if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1635                 max_irqs = i;
1636                 pdata->rxq_cnt = max_irqs / 2;
1637                 pdata->txq_cnt = max_irqs / 2;
1638                 pdata->cq_cnt = max_irqs / 2;
1639                 break;
1640             }
1641             return ret ? : -ENXIO;
1642         }
1643         pdata->irqs[i] = ret;
1644     }
1645 
1646     return 0;
1647 }
1648 
1649 static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1650 {
1651     int ret;
1652 
1653     if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1654         return;
1655 
1656     if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1657         return;
1658 
1659     ret = xgene_enet_phy_connect(pdata->ndev);
1660     if (!ret)
1661         pdata->mdio_driver = true;
1662 }
1663 
1664 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1665 {
1666     struct device *dev = &pdata->pdev->dev;
1667 
1668     pdata->sfp_gpio_en = false;
1669     if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
1670         (!device_property_present(dev, "sfp-gpios") &&
1671          !device_property_present(dev, "rxlos-gpios")))
1672         return;
1673 
1674     pdata->sfp_gpio_en = true;
1675     pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1676     if (IS_ERR(pdata->sfp_rdy))
1677         pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1678 }
1679 
1680 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1681 {
1682     struct platform_device *pdev;
1683     struct net_device *ndev;
1684     struct device *dev;
1685     struct resource *res;
1686     void __iomem *base_addr;
1687     u32 offset;
1688     int ret = 0;
1689 
1690     pdev = pdata->pdev;
1691     dev = &pdev->dev;
1692     ndev = pdata->ndev;
1693 
1694     res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1695     if (!res) {
1696         dev_err(dev, "Resource enet_csr not defined\n");
1697         return -ENODEV;
1698     }
1699     pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1700     if (!pdata->base_addr) {
1701         dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1702         return -ENOMEM;
1703     }
1704 
1705     res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1706     if (!res) {
1707         dev_err(dev, "Resource ring_csr not defined\n");
1708         return -ENODEV;
1709     }
1710     pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1711                             resource_size(res));
1712     if (!pdata->ring_csr_addr) {
1713         dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1714         return -ENOMEM;
1715     }
1716 
1717     res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1718     if (!res) {
1719         dev_err(dev, "Resource ring_cmd not defined\n");
1720         return -ENODEV;
1721     }
1722     pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1723                             resource_size(res));
1724     if (!pdata->ring_cmd_addr) {
1725         dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1726         return -ENOMEM;
1727     }
1728 
1729     if (dev->of_node)
1730         xgene_get_port_id_dt(dev, pdata);
1731 #ifdef CONFIG_ACPI
1732     else
1733         xgene_get_port_id_acpi(dev, pdata);
1734 #endif
1735 
1736     if (device_get_ethdev_address(dev, ndev))
1737         eth_hw_addr_random(ndev);
1738 
1739     memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1740 
1741     pdata->phy_mode = device_get_phy_mode(dev);
1742     if (pdata->phy_mode < 0) {
1743         dev_err(dev, "Unable to get phy-connection-type\n");
1744         return pdata->phy_mode;
1745     }
1746     if (!phy_interface_mode_is_rgmii(pdata->phy_mode) &&
1747         pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1748         pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1749         dev_err(dev, "Incorrect phy-connection-type specified\n");
1750         return -ENODEV;
1751     }
1752 
1753     ret = xgene_get_tx_delay(pdata);
1754     if (ret)
1755         return ret;
1756 
1757     ret = xgene_get_rx_delay(pdata);
1758     if (ret)
1759         return ret;
1760 
1761     ret = xgene_enet_get_irqs(pdata);
1762     if (ret)
1763         return ret;
1764 
1765     xgene_enet_gpiod_get(pdata);
1766 
1767     pdata->clk = devm_clk_get(&pdev->dev, NULL);
1768     if (IS_ERR(pdata->clk)) {
1769         if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
1770             /* Abort if the clock is defined but couldn't be
1771              * retrived. Always abort if the clock is missing on
1772              * DT system as the driver can't cope with this case.
1773              */
1774             if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
1775                 return PTR_ERR(pdata->clk);
1776             /* Firmware may have set up the clock already. */
1777             dev_info(dev, "clocks have been setup already\n");
1778         }
1779     }
1780 
1781     if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1782         base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1783     else
1784         base_addr = pdata->base_addr;
1785     pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1786     pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1787     pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1788     pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1789     if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
1790         pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1791         pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1792         pdata->mcx_stats_addr =
1793             pdata->base_addr + BLOCK_ETH_STATS_OFFSET;
1794         offset = (pdata->enet_id == XGENE_ENET1) ?
1795               BLOCK_ETH_MAC_CSR_OFFSET :
1796               X2_BLOCK_ETH_MAC_CSR_OFFSET;
1797         pdata->mcx_mac_csr_addr = base_addr + offset;
1798     } else {
1799         pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1800         pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET;
1801         pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1802         pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1803     }
1804     pdata->rx_buff_cnt = NUM_PKT_BUF;
1805 
1806     return 0;
1807 }
1808 
1809 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1810 {
1811     struct xgene_enet_cle *enet_cle = &pdata->cle;
1812     struct xgene_enet_desc_ring *page_pool;
1813     struct net_device *ndev = pdata->ndev;
1814     struct xgene_enet_desc_ring *buf_pool;
1815     u16 dst_ring_num, ring_id;
1816     int i, ret;
1817     u32 count;
1818 
1819     ret = pdata->port_ops->reset(pdata);
1820     if (ret)
1821         return ret;
1822 
1823     ret = xgene_enet_create_desc_rings(ndev);
1824     if (ret) {
1825         netdev_err(ndev, "Error in ring configuration\n");
1826         return ret;
1827     }
1828 
1829     /* setup buffer pool */
1830     for (i = 0; i < pdata->rxq_cnt; i++) {
1831         buf_pool = pdata->rx_ring[i]->buf_pool;
1832         xgene_enet_init_bufpool(buf_pool);
1833         page_pool = pdata->rx_ring[i]->page_pool;
1834         xgene_enet_init_bufpool(page_pool);
1835 
1836         count = pdata->rx_buff_cnt;
1837         ret = xgene_enet_refill_bufpool(buf_pool, count);
1838         if (ret)
1839             goto err;
1840 
1841         ret = xgene_enet_refill_pagepool(page_pool, count);
1842         if (ret)
1843             goto err;
1844 
1845     }
1846 
1847     dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1848     buf_pool = pdata->rx_ring[0]->buf_pool;
1849     if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1850         /* Initialize and Enable  PreClassifier Tree */
1851         enet_cle->max_nodes = 512;
1852         enet_cle->max_dbptrs = 1024;
1853         enet_cle->parsers = 3;
1854         enet_cle->active_parser = PARSER_ALL;
1855         enet_cle->ptree.start_node = 0;
1856         enet_cle->ptree.start_dbptr = 0;
1857         enet_cle->jump_bytes = 8;
1858         ret = pdata->cle_ops->cle_init(pdata);
1859         if (ret) {
1860             netdev_err(ndev, "Preclass Tree init error\n");
1861             goto err;
1862         }
1863 
1864     } else {
1865         dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1866         buf_pool = pdata->rx_ring[0]->buf_pool;
1867         page_pool = pdata->rx_ring[0]->page_pool;
1868         ring_id = (page_pool) ? page_pool->id : 0;
1869         pdata->port_ops->cle_bypass(pdata, dst_ring_num,
1870                         buf_pool->id, ring_id);
1871     }
1872 
1873     ndev->max_mtu = XGENE_ENET_MAX_MTU;
1874     pdata->phy_speed = SPEED_UNKNOWN;
1875     pdata->mac_ops->init(pdata);
1876 
1877     return ret;
1878 
1879 err:
1880     xgene_enet_delete_desc_rings(pdata);
1881     return ret;
1882 }
1883 
1884 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1885 {
1886     switch (pdata->phy_mode) {
1887     case PHY_INTERFACE_MODE_RGMII:
1888     case PHY_INTERFACE_MODE_RGMII_ID:
1889     case PHY_INTERFACE_MODE_RGMII_RXID:
1890     case PHY_INTERFACE_MODE_RGMII_TXID:
1891         pdata->mac_ops = &xgene_gmac_ops;
1892         pdata->port_ops = &xgene_gport_ops;
1893         pdata->rm = RM3;
1894         pdata->rxq_cnt = 1;
1895         pdata->txq_cnt = 1;
1896         pdata->cq_cnt = 0;
1897         break;
1898     case PHY_INTERFACE_MODE_SGMII:
1899         pdata->mac_ops = &xgene_sgmac_ops;
1900         pdata->port_ops = &xgene_sgport_ops;
1901         pdata->rm = RM1;
1902         pdata->rxq_cnt = 1;
1903         pdata->txq_cnt = 1;
1904         pdata->cq_cnt = 1;
1905         break;
1906     default:
1907         pdata->mac_ops = &xgene_xgmac_ops;
1908         pdata->port_ops = &xgene_xgport_ops;
1909         pdata->cle_ops = &xgene_cle3in_ops;
1910         pdata->rm = RM0;
1911         if (!pdata->rxq_cnt) {
1912             pdata->rxq_cnt = XGENE_NUM_RX_RING;
1913             pdata->txq_cnt = XGENE_NUM_TX_RING;
1914             pdata->cq_cnt = XGENE_NUM_TXC_RING;
1915         }
1916         break;
1917     }
1918 
1919     if (pdata->enet_id == XGENE_ENET1) {
1920         switch (pdata->port_id) {
1921         case 0:
1922             if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1923                 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1924                 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1925                 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1926                 pdata->ring_num = START_RING_NUM_0;
1927             } else {
1928                 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1929                 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1930                 pdata->bp_bufnum = START_BP_BUFNUM_0;
1931                 pdata->ring_num = START_RING_NUM_0;
1932             }
1933             break;
1934         case 1:
1935             if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1936                 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1937                 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1938                 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1939                 pdata->ring_num = XG_START_RING_NUM_1;
1940             } else {
1941                 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1942                 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1943                 pdata->bp_bufnum = START_BP_BUFNUM_1;
1944                 pdata->ring_num = START_RING_NUM_1;
1945             }
1946             break;
1947         default:
1948             break;
1949         }
1950         pdata->ring_ops = &xgene_ring1_ops;
1951     } else {
1952         switch (pdata->port_id) {
1953         case 0:
1954             pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1955             pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1956             pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1957             pdata->ring_num = X2_START_RING_NUM_0;
1958             break;
1959         case 1:
1960             pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1961             pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1962             pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1963             pdata->ring_num = X2_START_RING_NUM_1;
1964             break;
1965         default:
1966             break;
1967         }
1968         pdata->rm = RM0;
1969         pdata->ring_ops = &xgene_ring2_ops;
1970     }
1971 }
1972 
1973 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1974 {
1975     struct napi_struct *napi;
1976     int i;
1977 
1978     for (i = 0; i < pdata->rxq_cnt; i++) {
1979         napi = &pdata->rx_ring[i]->napi;
1980         netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1981                    NAPI_POLL_WEIGHT);
1982     }
1983 
1984     for (i = 0; i < pdata->cq_cnt; i++) {
1985         napi = &pdata->tx_ring[i]->cp_ring->napi;
1986         netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1987                    NAPI_POLL_WEIGHT);
1988     }
1989 }
1990 
1991 #ifdef CONFIG_ACPI
1992 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1993     { "APMC0D05", XGENE_ENET1},
1994     { "APMC0D30", XGENE_ENET1},
1995     { "APMC0D31", XGENE_ENET1},
1996     { "APMC0D3F", XGENE_ENET1},
1997     { "APMC0D26", XGENE_ENET2},
1998     { "APMC0D25", XGENE_ENET2},
1999     { }
2000 };
2001 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
2002 #endif
2003 
2004 static const struct of_device_id xgene_enet_of_match[] = {
2005     {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
2006     {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
2007     {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
2008     {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
2009     {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
2010     {},
2011 };
2012 
2013 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
2014 
2015 static int xgene_enet_probe(struct platform_device *pdev)
2016 {
2017     struct net_device *ndev;
2018     struct xgene_enet_pdata *pdata;
2019     struct device *dev = &pdev->dev;
2020     void (*link_state)(struct work_struct *);
2021     const struct of_device_id *of_id;
2022     int ret;
2023 
2024     ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
2025                   XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
2026     if (!ndev)
2027         return -ENOMEM;
2028 
2029     pdata = netdev_priv(ndev);
2030 
2031     pdata->pdev = pdev;
2032     pdata->ndev = ndev;
2033     SET_NETDEV_DEV(ndev, dev);
2034     platform_set_drvdata(pdev, pdata);
2035     ndev->netdev_ops = &xgene_ndev_ops;
2036     xgene_enet_set_ethtool_ops(ndev);
2037     ndev->features |= NETIF_F_IP_CSUM |
2038               NETIF_F_GSO |
2039               NETIF_F_GRO |
2040               NETIF_F_SG;
2041 
2042     of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
2043     if (of_id) {
2044         pdata->enet_id = (enum xgene_enet_id)of_id->data;
2045     }
2046 #ifdef CONFIG_ACPI
2047     else {
2048         const struct acpi_device_id *acpi_id;
2049 
2050         acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
2051         if (acpi_id)
2052             pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
2053     }
2054 #endif
2055     if (!pdata->enet_id) {
2056         ret = -ENODEV;
2057         goto err;
2058     }
2059 
2060     ret = xgene_enet_get_resources(pdata);
2061     if (ret)
2062         goto err;
2063 
2064     xgene_enet_setup_ops(pdata);
2065     spin_lock_init(&pdata->mac_lock);
2066 
2067     if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2068         ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
2069         spin_lock_init(&pdata->mss_lock);
2070     }
2071     ndev->hw_features = ndev->features;
2072 
2073     ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
2074     if (ret) {
2075         netdev_err(ndev, "No usable DMA configuration\n");
2076         goto err;
2077     }
2078 
2079     xgene_enet_check_phy_handle(pdata);
2080 
2081     ret = xgene_enet_init_hw(pdata);
2082     if (ret)
2083         goto err2;
2084 
2085     link_state = pdata->mac_ops->link_state;
2086     if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2087         INIT_DELAYED_WORK(&pdata->link_work, link_state);
2088     } else if (!pdata->mdio_driver) {
2089         if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2090             ret = xgene_enet_mdio_config(pdata);
2091         else
2092             INIT_DELAYED_WORK(&pdata->link_work, link_state);
2093 
2094         if (ret)
2095             goto err1;
2096     }
2097 
2098     spin_lock_init(&pdata->stats_lock);
2099     ret = xgene_extd_stats_init(pdata);
2100     if (ret)
2101         goto err1;
2102 
2103     xgene_enet_napi_add(pdata);
2104     ret = register_netdev(ndev);
2105     if (ret) {
2106         netdev_err(ndev, "Failed to register netdev\n");
2107         goto err1;
2108     }
2109 
2110     return 0;
2111 
2112 err1:
2113     /*
2114      * If necessary, free_netdev() will call netif_napi_del() and undo
2115      * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
2116      */
2117 
2118     xgene_enet_delete_desc_rings(pdata);
2119 
2120 err2:
2121     if (pdata->mdio_driver)
2122         xgene_enet_phy_disconnect(pdata);
2123     else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2124         xgene_enet_mdio_remove(pdata);
2125 err:
2126     free_netdev(ndev);
2127     return ret;
2128 }
2129 
2130 static int xgene_enet_remove(struct platform_device *pdev)
2131 {
2132     struct xgene_enet_pdata *pdata;
2133     struct net_device *ndev;
2134 
2135     pdata = platform_get_drvdata(pdev);
2136     ndev = pdata->ndev;
2137 
2138     rtnl_lock();
2139     if (netif_running(ndev))
2140         dev_close(ndev);
2141     rtnl_unlock();
2142 
2143     if (pdata->mdio_driver)
2144         xgene_enet_phy_disconnect(pdata);
2145     else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2146         xgene_enet_mdio_remove(pdata);
2147 
2148     unregister_netdev(ndev);
2149     xgene_enet_delete_desc_rings(pdata);
2150     pdata->port_ops->shutdown(pdata);
2151     free_netdev(ndev);
2152 
2153     return 0;
2154 }
2155 
2156 static void xgene_enet_shutdown(struct platform_device *pdev)
2157 {
2158     struct xgene_enet_pdata *pdata;
2159 
2160     pdata = platform_get_drvdata(pdev);
2161     if (!pdata)
2162         return;
2163 
2164     if (!pdata->ndev)
2165         return;
2166 
2167     xgene_enet_remove(pdev);
2168 }
2169 
2170 static struct platform_driver xgene_enet_driver = {
2171     .driver = {
2172            .name = "xgene-enet",
2173            .of_match_table = of_match_ptr(xgene_enet_of_match),
2174            .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
2175     },
2176     .probe = xgene_enet_probe,
2177     .remove = xgene_enet_remove,
2178     .shutdown = xgene_enet_shutdown,
2179 };
2180 
2181 module_platform_driver(xgene_enet_driver);
2182 
2183 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
2184 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
2185 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
2186 MODULE_LICENSE("GPL");