Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * AMD 10Gb Ethernet driver
0003  *
0004  * This file is available to you under your choice of the following two
0005  * licenses:
0006  *
0007  * License 1: GPLv2
0008  *
0009  * Copyright (c) 2014 Advanced Micro Devices, Inc.
0010  *
0011  * This file is free software; you may copy, redistribute and/or modify
0012  * it under the terms of the GNU General Public License as published by
0013  * the Free Software Foundation, either version 2 of the License, or (at
0014  * your option) any later version.
0015  *
0016  * This file is distributed in the hope that it will be useful, but
0017  * WITHOUT ANY WARRANTY; without even the implied warranty of
0018  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
0019  * General Public License for more details.
0020  *
0021  * You should have received a copy of the GNU General Public License
0022  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
0023  *
0024  * This file incorporates work covered by the following copyright and
0025  * permission notice:
0026  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
0027  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
0028  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
0029  *     and you.
0030  *
0031  *     The Software IS NOT an item of Licensed Software or Licensed Product
0032  *     under any End User Software License Agreement or Agreement for Licensed
0033  *     Product with Synopsys or any supplement thereto.  Permission is hereby
0034  *     granted, free of charge, to any person obtaining a copy of this software
0035  *     annotated with this license and the Software, to deal in the Software
0036  *     without restriction, including without limitation the rights to use,
0037  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
0038  *     of the Software, and to permit persons to whom the Software is furnished
0039  *     to do so, subject to the following conditions:
0040  *
0041  *     The above copyright notice and this permission notice shall be included
0042  *     in all copies or substantial portions of the Software.
0043  *
0044  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
0045  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
0046  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
0047  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
0048  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0049  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0050  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0051  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0052  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0053  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
0054  *     THE POSSIBILITY OF SUCH DAMAGE.
0055  *
0056  *
0057  * License 2: Modified BSD
0058  *
0059  * Copyright (c) 2014 Advanced Micro Devices, Inc.
0060  * All rights reserved.
0061  *
0062  * Redistribution and use in source and binary forms, with or without
0063  * modification, are permitted provided that the following conditions are met:
0064  *     * Redistributions of source code must retain the above copyright
0065  *       notice, this list of conditions and the following disclaimer.
0066  *     * Redistributions in binary form must reproduce the above copyright
0067  *       notice, this list of conditions and the following disclaimer in the
0068  *       documentation and/or other materials provided with the distribution.
0069  *     * Neither the name of Advanced Micro Devices, Inc. nor the
0070  *       names of its contributors may be used to endorse or promote products
0071  *       derived from this software without specific prior written permission.
0072  *
0073  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
0074  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
0075  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
0076  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
0077  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
0078  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
0079  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
0080  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
0081  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
0082  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0083  *
0084  * This file incorporates work covered by the following copyright and
0085  * permission notice:
0086  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
0087  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
0088  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
0089  *     and you.
0090  *
0091  *     The Software IS NOT an item of Licensed Software or Licensed Product
0092  *     under any End User Software License Agreement or Agreement for Licensed
0093  *     Product with Synopsys or any supplement thereto.  Permission is hereby
0094  *     granted, free of charge, to any person obtaining a copy of this software
0095  *     annotated with this license and the Software, to deal in the Software
0096  *     without restriction, including without limitation the rights to use,
0097  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
0098  *     of the Software, and to permit persons to whom the Software is furnished
0099  *     to do so, subject to the following conditions:
0100  *
0101  *     The above copyright notice and this permission notice shall be included
0102  *     in all copies or substantial portions of the Software.
0103  *
0104  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
0105  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
0106  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
0107  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
0108  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
0109  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
0110  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0111  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0112  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
0113  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
0114  *     THE POSSIBILITY OF SUCH DAMAGE.
0115  */
0116 
0117 #include "xgbe.h"
0118 #include "xgbe-common.h"
0119 
0120 static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
0121 
0122 static void xgbe_free_ring(struct xgbe_prv_data *pdata,
0123                struct xgbe_ring *ring)
0124 {
0125     struct xgbe_ring_data *rdata;
0126     unsigned int i;
0127 
0128     if (!ring)
0129         return;
0130 
0131     if (ring->rdata) {
0132         for (i = 0; i < ring->rdesc_count; i++) {
0133             rdata = XGBE_GET_DESC_DATA(ring, i);
0134             xgbe_unmap_rdata(pdata, rdata);
0135         }
0136 
0137         kfree(ring->rdata);
0138         ring->rdata = NULL;
0139     }
0140 
0141     if (ring->rx_hdr_pa.pages) {
0142         dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
0143                    ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
0144         put_page(ring->rx_hdr_pa.pages);
0145 
0146         ring->rx_hdr_pa.pages = NULL;
0147         ring->rx_hdr_pa.pages_len = 0;
0148         ring->rx_hdr_pa.pages_offset = 0;
0149         ring->rx_hdr_pa.pages_dma = 0;
0150     }
0151 
0152     if (ring->rx_buf_pa.pages) {
0153         dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
0154                    ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
0155         put_page(ring->rx_buf_pa.pages);
0156 
0157         ring->rx_buf_pa.pages = NULL;
0158         ring->rx_buf_pa.pages_len = 0;
0159         ring->rx_buf_pa.pages_offset = 0;
0160         ring->rx_buf_pa.pages_dma = 0;
0161     }
0162 
0163     if (ring->rdesc) {
0164         dma_free_coherent(pdata->dev,
0165                   (sizeof(struct xgbe_ring_desc) *
0166                    ring->rdesc_count),
0167                   ring->rdesc, ring->rdesc_dma);
0168         ring->rdesc = NULL;
0169     }
0170 }
0171 
0172 static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
0173 {
0174     struct xgbe_channel *channel;
0175     unsigned int i;
0176 
0177     DBGPR("-->xgbe_free_ring_resources\n");
0178 
0179     for (i = 0; i < pdata->channel_count; i++) {
0180         channel = pdata->channel[i];
0181         xgbe_free_ring(pdata, channel->tx_ring);
0182         xgbe_free_ring(pdata, channel->rx_ring);
0183     }
0184 
0185     DBGPR("<--xgbe_free_ring_resources\n");
0186 }
0187 
0188 static void *xgbe_alloc_node(size_t size, int node)
0189 {
0190     void *mem;
0191 
0192     mem = kzalloc_node(size, GFP_KERNEL, node);
0193     if (!mem)
0194         mem = kzalloc(size, GFP_KERNEL);
0195 
0196     return mem;
0197 }
0198 
0199 static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
0200                  dma_addr_t *dma, int node)
0201 {
0202     void *mem;
0203     int cur_node = dev_to_node(dev);
0204 
0205     set_dev_node(dev, node);
0206     mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
0207     set_dev_node(dev, cur_node);
0208 
0209     if (!mem)
0210         mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
0211 
0212     return mem;
0213 }
0214 
0215 static int xgbe_init_ring(struct xgbe_prv_data *pdata,
0216               struct xgbe_ring *ring, unsigned int rdesc_count)
0217 {
0218     size_t size;
0219 
0220     if (!ring)
0221         return 0;
0222 
0223     /* Descriptors */
0224     size = rdesc_count * sizeof(struct xgbe_ring_desc);
0225 
0226     ring->rdesc_count = rdesc_count;
0227     ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
0228                       ring->node);
0229     if (!ring->rdesc)
0230         return -ENOMEM;
0231 
0232     /* Descriptor information */
0233     size = rdesc_count * sizeof(struct xgbe_ring_data);
0234 
0235     ring->rdata = xgbe_alloc_node(size, ring->node);
0236     if (!ring->rdata)
0237         return -ENOMEM;
0238 
0239     netif_dbg(pdata, drv, pdata->netdev,
0240           "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
0241           ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
0242 
0243     return 0;
0244 }
0245 
0246 static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
0247 {
0248     struct xgbe_channel *channel;
0249     unsigned int i;
0250     int ret;
0251 
0252     for (i = 0; i < pdata->channel_count; i++) {
0253         channel = pdata->channel[i];
0254         netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
0255               channel->name);
0256 
0257         ret = xgbe_init_ring(pdata, channel->tx_ring,
0258                      pdata->tx_desc_count);
0259         if (ret) {
0260             netdev_alert(pdata->netdev,
0261                      "error initializing Tx ring\n");
0262             goto err_ring;
0263         }
0264 
0265         netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
0266               channel->name);
0267 
0268         ret = xgbe_init_ring(pdata, channel->rx_ring,
0269                      pdata->rx_desc_count);
0270         if (ret) {
0271             netdev_alert(pdata->netdev,
0272                      "error initializing Rx ring\n");
0273             goto err_ring;
0274         }
0275     }
0276 
0277     return 0;
0278 
0279 err_ring:
0280     xgbe_free_ring_resources(pdata);
0281 
0282     return ret;
0283 }
0284 
0285 static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
0286                 struct xgbe_page_alloc *pa, int alloc_order,
0287                 int node)
0288 {
0289     struct page *pages = NULL;
0290     dma_addr_t pages_dma;
0291     gfp_t gfp;
0292     int order;
0293 
0294 again:
0295     order = alloc_order;
0296 
0297     /* Try to obtain pages, decreasing order if necessary */
0298     gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
0299     while (order >= 0) {
0300         pages = alloc_pages_node(node, gfp, order);
0301         if (pages)
0302             break;
0303 
0304         order--;
0305     }
0306 
0307     /* If we couldn't get local pages, try getting from anywhere */
0308     if (!pages && (node != NUMA_NO_NODE)) {
0309         node = NUMA_NO_NODE;
0310         goto again;
0311     }
0312 
0313     if (!pages)
0314         return -ENOMEM;
0315 
0316     /* Map the pages */
0317     pages_dma = dma_map_page(pdata->dev, pages, 0,
0318                  PAGE_SIZE << order, DMA_FROM_DEVICE);
0319     if (dma_mapping_error(pdata->dev, pages_dma)) {
0320         put_page(pages);
0321         return -ENOMEM;
0322     }
0323 
0324     pa->pages = pages;
0325     pa->pages_len = PAGE_SIZE << order;
0326     pa->pages_offset = 0;
0327     pa->pages_dma = pages_dma;
0328 
0329     return 0;
0330 }
0331 
0332 static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
0333                  struct xgbe_page_alloc *pa,
0334                  unsigned int len)
0335 {
0336     get_page(pa->pages);
0337     bd->pa = *pa;
0338 
0339     bd->dma_base = pa->pages_dma;
0340     bd->dma_off = pa->pages_offset;
0341     bd->dma_len = len;
0342 
0343     pa->pages_offset += len;
0344     if ((pa->pages_offset + len) > pa->pages_len) {
0345         /* This data descriptor is responsible for unmapping page(s) */
0346         bd->pa_unmap = *pa;
0347 
0348         /* Get a new allocation next time */
0349         pa->pages = NULL;
0350         pa->pages_len = 0;
0351         pa->pages_offset = 0;
0352         pa->pages_dma = 0;
0353     }
0354 }
0355 
0356 static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
0357                   struct xgbe_ring *ring,
0358                   struct xgbe_ring_data *rdata)
0359 {
0360     int ret;
0361 
0362     if (!ring->rx_hdr_pa.pages) {
0363         ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
0364         if (ret)
0365             return ret;
0366     }
0367 
0368     if (!ring->rx_buf_pa.pages) {
0369         ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
0370                        PAGE_ALLOC_COSTLY_ORDER, ring->node);
0371         if (ret)
0372             return ret;
0373     }
0374 
0375     /* Set up the header page info */
0376     xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
0377                  XGBE_SKB_ALLOC_SIZE);
0378 
0379     /* Set up the buffer page info */
0380     xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
0381                  pdata->rx_buf_size);
0382 
0383     return 0;
0384 }
0385 
0386 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
0387 {
0388     struct xgbe_hw_if *hw_if = &pdata->hw_if;
0389     struct xgbe_channel *channel;
0390     struct xgbe_ring *ring;
0391     struct xgbe_ring_data *rdata;
0392     struct xgbe_ring_desc *rdesc;
0393     dma_addr_t rdesc_dma;
0394     unsigned int i, j;
0395 
0396     DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
0397 
0398     for (i = 0; i < pdata->channel_count; i++) {
0399         channel = pdata->channel[i];
0400         ring = channel->tx_ring;
0401         if (!ring)
0402             break;
0403 
0404         rdesc = ring->rdesc;
0405         rdesc_dma = ring->rdesc_dma;
0406 
0407         for (j = 0; j < ring->rdesc_count; j++) {
0408             rdata = XGBE_GET_DESC_DATA(ring, j);
0409 
0410             rdata->rdesc = rdesc;
0411             rdata->rdesc_dma = rdesc_dma;
0412 
0413             rdesc++;
0414             rdesc_dma += sizeof(struct xgbe_ring_desc);
0415         }
0416 
0417         ring->cur = 0;
0418         ring->dirty = 0;
0419         memset(&ring->tx, 0, sizeof(ring->tx));
0420 
0421         hw_if->tx_desc_init(channel);
0422     }
0423 
0424     DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
0425 }
0426 
0427 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
0428 {
0429     struct xgbe_hw_if *hw_if = &pdata->hw_if;
0430     struct xgbe_channel *channel;
0431     struct xgbe_ring *ring;
0432     struct xgbe_ring_desc *rdesc;
0433     struct xgbe_ring_data *rdata;
0434     dma_addr_t rdesc_dma;
0435     unsigned int i, j;
0436 
0437     DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
0438 
0439     for (i = 0; i < pdata->channel_count; i++) {
0440         channel = pdata->channel[i];
0441         ring = channel->rx_ring;
0442         if (!ring)
0443             break;
0444 
0445         rdesc = ring->rdesc;
0446         rdesc_dma = ring->rdesc_dma;
0447 
0448         for (j = 0; j < ring->rdesc_count; j++) {
0449             rdata = XGBE_GET_DESC_DATA(ring, j);
0450 
0451             rdata->rdesc = rdesc;
0452             rdata->rdesc_dma = rdesc_dma;
0453 
0454             if (xgbe_map_rx_buffer(pdata, ring, rdata))
0455                 break;
0456 
0457             rdesc++;
0458             rdesc_dma += sizeof(struct xgbe_ring_desc);
0459         }
0460 
0461         ring->cur = 0;
0462         ring->dirty = 0;
0463 
0464         hw_if->rx_desc_init(channel);
0465     }
0466 
0467     DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
0468 }
0469 
0470 static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
0471                  struct xgbe_ring_data *rdata)
0472 {
0473     if (rdata->skb_dma) {
0474         if (rdata->mapped_as_page) {
0475             dma_unmap_page(pdata->dev, rdata->skb_dma,
0476                        rdata->skb_dma_len, DMA_TO_DEVICE);
0477         } else {
0478             dma_unmap_single(pdata->dev, rdata->skb_dma,
0479                      rdata->skb_dma_len, DMA_TO_DEVICE);
0480         }
0481         rdata->skb_dma = 0;
0482         rdata->skb_dma_len = 0;
0483     }
0484 
0485     if (rdata->skb) {
0486         dev_kfree_skb_any(rdata->skb);
0487         rdata->skb = NULL;
0488     }
0489 
0490     if (rdata->rx.hdr.pa.pages)
0491         put_page(rdata->rx.hdr.pa.pages);
0492 
0493     if (rdata->rx.hdr.pa_unmap.pages) {
0494         dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
0495                    rdata->rx.hdr.pa_unmap.pages_len,
0496                    DMA_FROM_DEVICE);
0497         put_page(rdata->rx.hdr.pa_unmap.pages);
0498     }
0499 
0500     if (rdata->rx.buf.pa.pages)
0501         put_page(rdata->rx.buf.pa.pages);
0502 
0503     if (rdata->rx.buf.pa_unmap.pages) {
0504         dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
0505                    rdata->rx.buf.pa_unmap.pages_len,
0506                    DMA_FROM_DEVICE);
0507         put_page(rdata->rx.buf.pa_unmap.pages);
0508     }
0509 
0510     memset(&rdata->tx, 0, sizeof(rdata->tx));
0511     memset(&rdata->rx, 0, sizeof(rdata->rx));
0512 
0513     rdata->mapped_as_page = 0;
0514 
0515     if (rdata->state_saved) {
0516         rdata->state_saved = 0;
0517         rdata->state.skb = NULL;
0518         rdata->state.len = 0;
0519         rdata->state.error = 0;
0520     }
0521 }
0522 
0523 static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
0524 {
0525     struct xgbe_prv_data *pdata = channel->pdata;
0526     struct xgbe_ring *ring = channel->tx_ring;
0527     struct xgbe_ring_data *rdata;
0528     struct xgbe_packet_data *packet;
0529     skb_frag_t *frag;
0530     dma_addr_t skb_dma;
0531     unsigned int start_index, cur_index;
0532     unsigned int offset, tso, vlan, datalen, len;
0533     unsigned int i;
0534 
0535     DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
0536 
0537     offset = 0;
0538     start_index = ring->cur;
0539     cur_index = ring->cur;
0540 
0541     packet = &ring->packet_data;
0542     packet->rdesc_count = 0;
0543     packet->length = 0;
0544 
0545     tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
0546                  TSO_ENABLE);
0547     vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
0548                   VLAN_CTAG);
0549 
0550     /* Save space for a context descriptor if needed */
0551     if ((tso && (packet->mss != ring->tx.cur_mss)) ||
0552         (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
0553         cur_index++;
0554     rdata = XGBE_GET_DESC_DATA(ring, cur_index);
0555 
0556     if (tso) {
0557         /* Map the TSO header */
0558         skb_dma = dma_map_single(pdata->dev, skb->data,
0559                      packet->header_len, DMA_TO_DEVICE);
0560         if (dma_mapping_error(pdata->dev, skb_dma)) {
0561             netdev_alert(pdata->netdev, "dma_map_single failed\n");
0562             goto err_out;
0563         }
0564         rdata->skb_dma = skb_dma;
0565         rdata->skb_dma_len = packet->header_len;
0566         netif_dbg(pdata, tx_queued, pdata->netdev,
0567               "skb header: index=%u, dma=%pad, len=%u\n",
0568               cur_index, &skb_dma, packet->header_len);
0569 
0570         offset = packet->header_len;
0571 
0572         packet->length += packet->header_len;
0573 
0574         cur_index++;
0575         rdata = XGBE_GET_DESC_DATA(ring, cur_index);
0576     }
0577 
0578     /* Map the (remainder of the) packet */
0579     for (datalen = skb_headlen(skb) - offset; datalen; ) {
0580         len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
0581 
0582         skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
0583                      DMA_TO_DEVICE);
0584         if (dma_mapping_error(pdata->dev, skb_dma)) {
0585             netdev_alert(pdata->netdev, "dma_map_single failed\n");
0586             goto err_out;
0587         }
0588         rdata->skb_dma = skb_dma;
0589         rdata->skb_dma_len = len;
0590         netif_dbg(pdata, tx_queued, pdata->netdev,
0591               "skb data: index=%u, dma=%pad, len=%u\n",
0592               cur_index, &skb_dma, len);
0593 
0594         datalen -= len;
0595         offset += len;
0596 
0597         packet->length += len;
0598 
0599         cur_index++;
0600         rdata = XGBE_GET_DESC_DATA(ring, cur_index);
0601     }
0602 
0603     for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
0604         netif_dbg(pdata, tx_queued, pdata->netdev,
0605               "mapping frag %u\n", i);
0606 
0607         frag = &skb_shinfo(skb)->frags[i];
0608         offset = 0;
0609 
0610         for (datalen = skb_frag_size(frag); datalen; ) {
0611             len = min_t(unsigned int, datalen,
0612                     XGBE_TX_MAX_BUF_SIZE);
0613 
0614             skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
0615                            len, DMA_TO_DEVICE);
0616             if (dma_mapping_error(pdata->dev, skb_dma)) {
0617                 netdev_alert(pdata->netdev,
0618                          "skb_frag_dma_map failed\n");
0619                 goto err_out;
0620             }
0621             rdata->skb_dma = skb_dma;
0622             rdata->skb_dma_len = len;
0623             rdata->mapped_as_page = 1;
0624             netif_dbg(pdata, tx_queued, pdata->netdev,
0625                   "skb frag: index=%u, dma=%pad, len=%u\n",
0626                   cur_index, &skb_dma, len);
0627 
0628             datalen -= len;
0629             offset += len;
0630 
0631             packet->length += len;
0632 
0633             cur_index++;
0634             rdata = XGBE_GET_DESC_DATA(ring, cur_index);
0635         }
0636     }
0637 
0638     /* Save the skb address in the last entry. We always have some data
0639      * that has been mapped so rdata is always advanced past the last
0640      * piece of mapped data - use the entry pointed to by cur_index - 1.
0641      */
0642     rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
0643     rdata->skb = skb;
0644 
0645     /* Save the number of descriptor entries used */
0646     packet->rdesc_count = cur_index - start_index;
0647 
0648     DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
0649 
0650     return packet->rdesc_count;
0651 
0652 err_out:
0653     while (start_index < cur_index) {
0654         rdata = XGBE_GET_DESC_DATA(ring, start_index++);
0655         xgbe_unmap_rdata(pdata, rdata);
0656     }
0657 
0658     DBGPR("<--xgbe_map_tx_skb: count=0\n");
0659 
0660     return 0;
0661 }
0662 
0663 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
0664 {
0665     DBGPR("-->xgbe_init_function_ptrs_desc\n");
0666 
0667     desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
0668     desc_if->free_ring_resources = xgbe_free_ring_resources;
0669     desc_if->map_tx_skb = xgbe_map_tx_skb;
0670     desc_if->map_rx_buffer = xgbe_map_rx_buffer;
0671     desc_if->unmap_rdata = xgbe_unmap_rdata;
0672     desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
0673     desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
0674 
0675     DBGPR("<--xgbe_init_function_ptrs_desc\n");
0676 }