Back to home page

OSCL-LXR

 
 

    


0001 /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
0002  *
0003  * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
0004  *
0005  * This program is dual-licensed; you may select either version 2 of
0006  * the GNU General Public License ("GPL") or BSD license ("BSD").
0007  *
0008  * This Synopsys DWC XLGMAC software driver and associated documentation
0009  * (hereinafter the "Software") is an unsupported proprietary work of
0010  * Synopsys, Inc. unless otherwise expressly agreed to in writing between
0011  * Synopsys and you. The Software IS NOT an item of Licensed Software or a
0012  * Licensed Product under any End User Software License Agreement or
0013  * Agreement for Licensed Products with Synopsys or any supplement thereto.
0014  * Synopsys is a registered trademark of Synopsys, Inc. Other names included
0015  * in the SOFTWARE may be the trademarks of their respective owners.
0016  */
0017 
0018 #include "dwc-xlgmac.h"
0019 #include "dwc-xlgmac-reg.h"
0020 
0021 static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata,
0022                    struct xlgmac_desc_data *desc_data)
0023 {
0024     if (desc_data->skb_dma) {
0025         if (desc_data->mapped_as_page) {
0026             dma_unmap_page(pdata->dev, desc_data->skb_dma,
0027                        desc_data->skb_dma_len, DMA_TO_DEVICE);
0028         } else {
0029             dma_unmap_single(pdata->dev, desc_data->skb_dma,
0030                      desc_data->skb_dma_len, DMA_TO_DEVICE);
0031         }
0032         desc_data->skb_dma = 0;
0033         desc_data->skb_dma_len = 0;
0034     }
0035 
0036     if (desc_data->skb) {
0037         dev_kfree_skb_any(desc_data->skb);
0038         desc_data->skb = NULL;
0039     }
0040 
0041     if (desc_data->rx.hdr.pa.pages)
0042         put_page(desc_data->rx.hdr.pa.pages);
0043 
0044     if (desc_data->rx.hdr.pa_unmap.pages) {
0045         dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma,
0046                    desc_data->rx.hdr.pa_unmap.pages_len,
0047                    DMA_FROM_DEVICE);
0048         put_page(desc_data->rx.hdr.pa_unmap.pages);
0049     }
0050 
0051     if (desc_data->rx.buf.pa.pages)
0052         put_page(desc_data->rx.buf.pa.pages);
0053 
0054     if (desc_data->rx.buf.pa_unmap.pages) {
0055         dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma,
0056                    desc_data->rx.buf.pa_unmap.pages_len,
0057                    DMA_FROM_DEVICE);
0058         put_page(desc_data->rx.buf.pa_unmap.pages);
0059     }
0060 
0061     memset(&desc_data->tx, 0, sizeof(desc_data->tx));
0062     memset(&desc_data->rx, 0, sizeof(desc_data->rx));
0063 
0064     desc_data->mapped_as_page = 0;
0065 
0066     if (desc_data->state_saved) {
0067         desc_data->state_saved = 0;
0068         desc_data->state.skb = NULL;
0069         desc_data->state.len = 0;
0070         desc_data->state.error = 0;
0071     }
0072 }
0073 
0074 static void xlgmac_free_ring(struct xlgmac_pdata *pdata,
0075                  struct xlgmac_ring *ring)
0076 {
0077     struct xlgmac_desc_data *desc_data;
0078     unsigned int i;
0079 
0080     if (!ring)
0081         return;
0082 
0083     if (ring->desc_data_head) {
0084         for (i = 0; i < ring->dma_desc_count; i++) {
0085             desc_data = XLGMAC_GET_DESC_DATA(ring, i);
0086             xlgmac_unmap_desc_data(pdata, desc_data);
0087         }
0088 
0089         kfree(ring->desc_data_head);
0090         ring->desc_data_head = NULL;
0091     }
0092 
0093     if (ring->rx_hdr_pa.pages) {
0094         dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
0095                    ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
0096         put_page(ring->rx_hdr_pa.pages);
0097 
0098         ring->rx_hdr_pa.pages = NULL;
0099         ring->rx_hdr_pa.pages_len = 0;
0100         ring->rx_hdr_pa.pages_offset = 0;
0101         ring->rx_hdr_pa.pages_dma = 0;
0102     }
0103 
0104     if (ring->rx_buf_pa.pages) {
0105         dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
0106                    ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
0107         put_page(ring->rx_buf_pa.pages);
0108 
0109         ring->rx_buf_pa.pages = NULL;
0110         ring->rx_buf_pa.pages_len = 0;
0111         ring->rx_buf_pa.pages_offset = 0;
0112         ring->rx_buf_pa.pages_dma = 0;
0113     }
0114 
0115     if (ring->dma_desc_head) {
0116         dma_free_coherent(pdata->dev,
0117                   (sizeof(struct xlgmac_dma_desc) *
0118                   ring->dma_desc_count),
0119                   ring->dma_desc_head,
0120                   ring->dma_desc_head_addr);
0121         ring->dma_desc_head = NULL;
0122     }
0123 }
0124 
0125 static int xlgmac_init_ring(struct xlgmac_pdata *pdata,
0126                 struct xlgmac_ring *ring,
0127                 unsigned int dma_desc_count)
0128 {
0129     if (!ring)
0130         return 0;
0131 
0132     /* Descriptors */
0133     ring->dma_desc_count = dma_desc_count;
0134     ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
0135                     (sizeof(struct xlgmac_dma_desc) *
0136                      dma_desc_count),
0137                     &ring->dma_desc_head_addr,
0138                     GFP_KERNEL);
0139     if (!ring->dma_desc_head)
0140         return -ENOMEM;
0141 
0142     /* Array of descriptor data */
0143     ring->desc_data_head = kcalloc(dma_desc_count,
0144                     sizeof(struct xlgmac_desc_data),
0145                     GFP_KERNEL);
0146     if (!ring->desc_data_head)
0147         return -ENOMEM;
0148 
0149     netif_dbg(pdata, drv, pdata->netdev,
0150           "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n",
0151         ring->dma_desc_head,
0152         &ring->dma_desc_head_addr,
0153         ring->desc_data_head);
0154 
0155     return 0;
0156 }
0157 
0158 static void xlgmac_free_rings(struct xlgmac_pdata *pdata)
0159 {
0160     struct xlgmac_channel *channel;
0161     unsigned int i;
0162 
0163     if (!pdata->channel_head)
0164         return;
0165 
0166     channel = pdata->channel_head;
0167     for (i = 0; i < pdata->channel_count; i++, channel++) {
0168         xlgmac_free_ring(pdata, channel->tx_ring);
0169         xlgmac_free_ring(pdata, channel->rx_ring);
0170     }
0171 }
0172 
0173 static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata)
0174 {
0175     struct xlgmac_channel *channel;
0176     unsigned int i;
0177     int ret;
0178 
0179     channel = pdata->channel_head;
0180     for (i = 0; i < pdata->channel_count; i++, channel++) {
0181         netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
0182               channel->name);
0183 
0184         ret = xlgmac_init_ring(pdata, channel->tx_ring,
0185                        pdata->tx_desc_count);
0186 
0187         if (ret) {
0188             netdev_alert(pdata->netdev,
0189                      "error initializing Tx ring");
0190             goto err_init_ring;
0191         }
0192 
0193         netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
0194               channel->name);
0195 
0196         ret = xlgmac_init_ring(pdata, channel->rx_ring,
0197                        pdata->rx_desc_count);
0198         if (ret) {
0199             netdev_alert(pdata->netdev,
0200                      "error initializing Rx ring\n");
0201             goto err_init_ring;
0202         }
0203     }
0204 
0205     return 0;
0206 
0207 err_init_ring:
0208     xlgmac_free_rings(pdata);
0209 
0210     return ret;
0211 }
0212 
0213 static void xlgmac_free_channels(struct xlgmac_pdata *pdata)
0214 {
0215     if (!pdata->channel_head)
0216         return;
0217 
0218     kfree(pdata->channel_head->tx_ring);
0219     pdata->channel_head->tx_ring = NULL;
0220 
0221     kfree(pdata->channel_head->rx_ring);
0222     pdata->channel_head->rx_ring = NULL;
0223 
0224     kfree(pdata->channel_head);
0225 
0226     pdata->channel_head = NULL;
0227     pdata->channel_count = 0;
0228 }
0229 
0230 static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata)
0231 {
0232     struct xlgmac_channel *channel_head, *channel;
0233     struct xlgmac_ring *tx_ring, *rx_ring;
0234     int ret = -ENOMEM;
0235     unsigned int i;
0236 
0237     channel_head = kcalloc(pdata->channel_count,
0238                    sizeof(struct xlgmac_channel), GFP_KERNEL);
0239     if (!channel_head)
0240         return ret;
0241 
0242     netif_dbg(pdata, drv, pdata->netdev,
0243           "channel_head=%p\n", channel_head);
0244 
0245     tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
0246               GFP_KERNEL);
0247     if (!tx_ring)
0248         goto err_tx_ring;
0249 
0250     rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring),
0251               GFP_KERNEL);
0252     if (!rx_ring)
0253         goto err_rx_ring;
0254 
0255     for (i = 0, channel = channel_head; i < pdata->channel_count;
0256         i++, channel++) {
0257         snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
0258         channel->pdata = pdata;
0259         channel->queue_index = i;
0260         channel->dma_regs = pdata->mac_regs + DMA_CH_BASE +
0261                     (DMA_CH_INC * i);
0262 
0263         if (pdata->per_channel_irq) {
0264             /* Get the per DMA interrupt */
0265             ret = pdata->channel_irq[i];
0266             if (ret < 0) {
0267                 netdev_err(pdata->netdev,
0268                        "get_irq %u failed\n",
0269                        i + 1);
0270                 goto err_irq;
0271             }
0272             channel->dma_irq = ret;
0273         }
0274 
0275         if (i < pdata->tx_ring_count)
0276             channel->tx_ring = tx_ring++;
0277 
0278         if (i < pdata->rx_ring_count)
0279             channel->rx_ring = rx_ring++;
0280 
0281         netif_dbg(pdata, drv, pdata->netdev,
0282               "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n",
0283               channel->name, channel->dma_regs,
0284               channel->tx_ring, channel->rx_ring);
0285     }
0286 
0287     pdata->channel_head = channel_head;
0288 
0289     return 0;
0290 
0291 err_irq:
0292     kfree(rx_ring);
0293 
0294 err_rx_ring:
0295     kfree(tx_ring);
0296 
0297 err_tx_ring:
0298     kfree(channel_head);
0299 
0300     return ret;
0301 }
0302 
0303 static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata)
0304 {
0305     xlgmac_free_rings(pdata);
0306 
0307     xlgmac_free_channels(pdata);
0308 }
0309 
0310 static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata)
0311 {
0312     int ret;
0313 
0314     ret = xlgmac_alloc_channels(pdata);
0315     if (ret)
0316         goto err_alloc;
0317 
0318     ret = xlgmac_alloc_rings(pdata);
0319     if (ret)
0320         goto err_alloc;
0321 
0322     return 0;
0323 
0324 err_alloc:
0325     xlgmac_free_channels_and_rings(pdata);
0326 
0327     return ret;
0328 }
0329 
0330 static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
0331                   struct xlgmac_page_alloc *pa,
0332                   gfp_t gfp, int order)
0333 {
0334     struct page *pages = NULL;
0335     dma_addr_t pages_dma;
0336 
0337     /* Try to obtain pages, decreasing order if necessary */
0338     gfp |= __GFP_COMP | __GFP_NOWARN;
0339     while (order >= 0) {
0340         pages = alloc_pages(gfp, order);
0341         if (pages)
0342             break;
0343 
0344         order--;
0345     }
0346     if (!pages)
0347         return -ENOMEM;
0348 
0349     /* Map the pages */
0350     pages_dma = dma_map_page(pdata->dev, pages, 0,
0351                  PAGE_SIZE << order, DMA_FROM_DEVICE);
0352     if (dma_mapping_error(pdata->dev, pages_dma)) {
0353         put_page(pages);
0354         return -ENOMEM;
0355     }
0356 
0357     pa->pages = pages;
0358     pa->pages_len = PAGE_SIZE << order;
0359     pa->pages_offset = 0;
0360     pa->pages_dma = pages_dma;
0361 
0362     return 0;
0363 }
0364 
0365 static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd,
0366                    struct xlgmac_page_alloc *pa,
0367                    unsigned int len)
0368 {
0369     get_page(pa->pages);
0370     bd->pa = *pa;
0371 
0372     bd->dma_base = pa->pages_dma;
0373     bd->dma_off = pa->pages_offset;
0374     bd->dma_len = len;
0375 
0376     pa->pages_offset += len;
0377     if ((pa->pages_offset + len) > pa->pages_len) {
0378         /* This data descriptor is responsible for unmapping page(s) */
0379         bd->pa_unmap = *pa;
0380 
0381         /* Get a new allocation next time */
0382         pa->pages = NULL;
0383         pa->pages_len = 0;
0384         pa->pages_offset = 0;
0385         pa->pages_dma = 0;
0386     }
0387 }
0388 
0389 static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata,
0390                 struct xlgmac_ring *ring,
0391                 struct xlgmac_desc_data *desc_data)
0392 {
0393     int order, ret;
0394 
0395     if (!ring->rx_hdr_pa.pages) {
0396         ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
0397                      GFP_ATOMIC, 0);
0398         if (ret)
0399             return ret;
0400     }
0401 
0402     if (!ring->rx_buf_pa.pages) {
0403         order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
0404         ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
0405                      GFP_ATOMIC, order);
0406         if (ret)
0407             return ret;
0408     }
0409 
0410     /* Set up the header page info */
0411     xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
0412                    XLGMAC_SKB_ALLOC_SIZE);
0413 
0414     /* Set up the buffer page info */
0415     xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
0416                    pdata->rx_buf_size);
0417 
0418     return 0;
0419 }
0420 
0421 static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata)
0422 {
0423     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0424     struct xlgmac_desc_data *desc_data;
0425     struct xlgmac_dma_desc *dma_desc;
0426     struct xlgmac_channel *channel;
0427     struct xlgmac_ring *ring;
0428     dma_addr_t dma_desc_addr;
0429     unsigned int i, j;
0430 
0431     channel = pdata->channel_head;
0432     for (i = 0; i < pdata->channel_count; i++, channel++) {
0433         ring = channel->tx_ring;
0434         if (!ring)
0435             break;
0436 
0437         dma_desc = ring->dma_desc_head;
0438         dma_desc_addr = ring->dma_desc_head_addr;
0439 
0440         for (j = 0; j < ring->dma_desc_count; j++) {
0441             desc_data = XLGMAC_GET_DESC_DATA(ring, j);
0442 
0443             desc_data->dma_desc = dma_desc;
0444             desc_data->dma_desc_addr = dma_desc_addr;
0445 
0446             dma_desc++;
0447             dma_desc_addr += sizeof(struct xlgmac_dma_desc);
0448         }
0449 
0450         ring->cur = 0;
0451         ring->dirty = 0;
0452         memset(&ring->tx, 0, sizeof(ring->tx));
0453 
0454         hw_ops->tx_desc_init(channel);
0455     }
0456 }
0457 
0458 static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata)
0459 {
0460     struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
0461     struct xlgmac_desc_data *desc_data;
0462     struct xlgmac_dma_desc *dma_desc;
0463     struct xlgmac_channel *channel;
0464     struct xlgmac_ring *ring;
0465     dma_addr_t dma_desc_addr;
0466     unsigned int i, j;
0467 
0468     channel = pdata->channel_head;
0469     for (i = 0; i < pdata->channel_count; i++, channel++) {
0470         ring = channel->rx_ring;
0471         if (!ring)
0472             break;
0473 
0474         dma_desc = ring->dma_desc_head;
0475         dma_desc_addr = ring->dma_desc_head_addr;
0476 
0477         for (j = 0; j < ring->dma_desc_count; j++) {
0478             desc_data = XLGMAC_GET_DESC_DATA(ring, j);
0479 
0480             desc_data->dma_desc = dma_desc;
0481             desc_data->dma_desc_addr = dma_desc_addr;
0482 
0483             if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
0484                 break;
0485 
0486             dma_desc++;
0487             dma_desc_addr += sizeof(struct xlgmac_dma_desc);
0488         }
0489 
0490         ring->cur = 0;
0491         ring->dirty = 0;
0492 
0493         hw_ops->rx_desc_init(channel);
0494     }
0495 }
0496 
0497 static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
0498                  struct sk_buff *skb)
0499 {
0500     struct xlgmac_pdata *pdata = channel->pdata;
0501     struct xlgmac_ring *ring = channel->tx_ring;
0502     unsigned int start_index, cur_index;
0503     struct xlgmac_desc_data *desc_data;
0504     unsigned int offset, datalen, len;
0505     struct xlgmac_pkt_info *pkt_info;
0506     skb_frag_t *frag;
0507     unsigned int tso, vlan;
0508     dma_addr_t skb_dma;
0509     unsigned int i;
0510 
0511     offset = 0;
0512     start_index = ring->cur;
0513     cur_index = ring->cur;
0514 
0515     pkt_info = &ring->pkt_info;
0516     pkt_info->desc_count = 0;
0517     pkt_info->length = 0;
0518 
0519     tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
0520                   TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
0521                   TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
0522     vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
0523                    TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
0524                    TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
0525 
0526     /* Save space for a context descriptor if needed */
0527     if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
0528         (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
0529         cur_index++;
0530     desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
0531 
0532     if (tso) {
0533         /* Map the TSO header */
0534         skb_dma = dma_map_single(pdata->dev, skb->data,
0535                      pkt_info->header_len, DMA_TO_DEVICE);
0536         if (dma_mapping_error(pdata->dev, skb_dma)) {
0537             netdev_alert(pdata->netdev, "dma_map_single failed\n");
0538             goto err_out;
0539         }
0540         desc_data->skb_dma = skb_dma;
0541         desc_data->skb_dma_len = pkt_info->header_len;
0542         netif_dbg(pdata, tx_queued, pdata->netdev,
0543               "skb header: index=%u, dma=%pad, len=%u\n",
0544               cur_index, &skb_dma, pkt_info->header_len);
0545 
0546         offset = pkt_info->header_len;
0547 
0548         pkt_info->length += pkt_info->header_len;
0549 
0550         cur_index++;
0551         desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
0552     }
0553 
0554     /* Map the (remainder of the) packet */
0555     for (datalen = skb_headlen(skb) - offset; datalen; ) {
0556         len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE);
0557 
0558         skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
0559                      DMA_TO_DEVICE);
0560         if (dma_mapping_error(pdata->dev, skb_dma)) {
0561             netdev_alert(pdata->netdev, "dma_map_single failed\n");
0562             goto err_out;
0563         }
0564         desc_data->skb_dma = skb_dma;
0565         desc_data->skb_dma_len = len;
0566         netif_dbg(pdata, tx_queued, pdata->netdev,
0567               "skb data: index=%u, dma=%pad, len=%u\n",
0568               cur_index, &skb_dma, len);
0569 
0570         datalen -= len;
0571         offset += len;
0572 
0573         pkt_info->length += len;
0574 
0575         cur_index++;
0576         desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
0577     }
0578 
0579     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
0580         netif_dbg(pdata, tx_queued, pdata->netdev,
0581               "mapping frag %u\n", i);
0582 
0583         frag = &skb_shinfo(skb)->frags[i];
0584         offset = 0;
0585 
0586         for (datalen = skb_frag_size(frag); datalen; ) {
0587             len = min_t(unsigned int, datalen,
0588                     XLGMAC_TX_MAX_BUF_SIZE);
0589 
0590             skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
0591                            len, DMA_TO_DEVICE);
0592             if (dma_mapping_error(pdata->dev, skb_dma)) {
0593                 netdev_alert(pdata->netdev,
0594                          "skb_frag_dma_map failed\n");
0595                 goto err_out;
0596             }
0597             desc_data->skb_dma = skb_dma;
0598             desc_data->skb_dma_len = len;
0599             desc_data->mapped_as_page = 1;
0600             netif_dbg(pdata, tx_queued, pdata->netdev,
0601                   "skb frag: index=%u, dma=%pad, len=%u\n",
0602                   cur_index, &skb_dma, len);
0603 
0604             datalen -= len;
0605             offset += len;
0606 
0607             pkt_info->length += len;
0608 
0609             cur_index++;
0610             desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
0611         }
0612     }
0613 
0614     /* Save the skb address in the last entry. We always have some data
0615      * that has been mapped so desc_data is always advanced past the last
0616      * piece of mapped data - use the entry pointed to by cur_index - 1.
0617      */
0618     desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
0619     desc_data->skb = skb;
0620 
0621     /* Save the number of descriptor entries used */
0622     pkt_info->desc_count = cur_index - start_index;
0623 
0624     return pkt_info->desc_count;
0625 
0626 err_out:
0627     while (start_index < cur_index) {
0628         desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
0629         xlgmac_unmap_desc_data(pdata, desc_data);
0630     }
0631 
0632     return 0;
0633 }
0634 
0635 void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
0636 {
0637     desc_ops->alloc_channels_and_rings = xlgmac_alloc_channels_and_rings;
0638     desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
0639     desc_ops->map_tx_skb = xlgmac_map_tx_skb;
0640     desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
0641     desc_ops->unmap_desc_data = xlgmac_unmap_desc_data;
0642     desc_ops->tx_desc_init = xlgmac_tx_desc_init;
0643     desc_ops->rx_desc_init = xlgmac_rx_desc_init;
0644 }