0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 #include "xgbe.h"
0118 #include "xgbe-common.h"
0119
0120 static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
0121
0122 static void xgbe_free_ring(struct xgbe_prv_data *pdata,
0123 struct xgbe_ring *ring)
0124 {
0125 struct xgbe_ring_data *rdata;
0126 unsigned int i;
0127
0128 if (!ring)
0129 return;
0130
0131 if (ring->rdata) {
0132 for (i = 0; i < ring->rdesc_count; i++) {
0133 rdata = XGBE_GET_DESC_DATA(ring, i);
0134 xgbe_unmap_rdata(pdata, rdata);
0135 }
0136
0137 kfree(ring->rdata);
0138 ring->rdata = NULL;
0139 }
0140
0141 if (ring->rx_hdr_pa.pages) {
0142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
0143 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
0144 put_page(ring->rx_hdr_pa.pages);
0145
0146 ring->rx_hdr_pa.pages = NULL;
0147 ring->rx_hdr_pa.pages_len = 0;
0148 ring->rx_hdr_pa.pages_offset = 0;
0149 ring->rx_hdr_pa.pages_dma = 0;
0150 }
0151
0152 if (ring->rx_buf_pa.pages) {
0153 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
0154 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
0155 put_page(ring->rx_buf_pa.pages);
0156
0157 ring->rx_buf_pa.pages = NULL;
0158 ring->rx_buf_pa.pages_len = 0;
0159 ring->rx_buf_pa.pages_offset = 0;
0160 ring->rx_buf_pa.pages_dma = 0;
0161 }
0162
0163 if (ring->rdesc) {
0164 dma_free_coherent(pdata->dev,
0165 (sizeof(struct xgbe_ring_desc) *
0166 ring->rdesc_count),
0167 ring->rdesc, ring->rdesc_dma);
0168 ring->rdesc = NULL;
0169 }
0170 }
0171
0172 static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
0173 {
0174 struct xgbe_channel *channel;
0175 unsigned int i;
0176
0177 DBGPR("-->xgbe_free_ring_resources\n");
0178
0179 for (i = 0; i < pdata->channel_count; i++) {
0180 channel = pdata->channel[i];
0181 xgbe_free_ring(pdata, channel->tx_ring);
0182 xgbe_free_ring(pdata, channel->rx_ring);
0183 }
0184
0185 DBGPR("<--xgbe_free_ring_resources\n");
0186 }
0187
0188 static void *xgbe_alloc_node(size_t size, int node)
0189 {
0190 void *mem;
0191
0192 mem = kzalloc_node(size, GFP_KERNEL, node);
0193 if (!mem)
0194 mem = kzalloc(size, GFP_KERNEL);
0195
0196 return mem;
0197 }
0198
0199 static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
0200 dma_addr_t *dma, int node)
0201 {
0202 void *mem;
0203 int cur_node = dev_to_node(dev);
0204
0205 set_dev_node(dev, node);
0206 mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
0207 set_dev_node(dev, cur_node);
0208
0209 if (!mem)
0210 mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
0211
0212 return mem;
0213 }
0214
0215 static int xgbe_init_ring(struct xgbe_prv_data *pdata,
0216 struct xgbe_ring *ring, unsigned int rdesc_count)
0217 {
0218 size_t size;
0219
0220 if (!ring)
0221 return 0;
0222
0223
0224 size = rdesc_count * sizeof(struct xgbe_ring_desc);
0225
0226 ring->rdesc_count = rdesc_count;
0227 ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
0228 ring->node);
0229 if (!ring->rdesc)
0230 return -ENOMEM;
0231
0232
0233 size = rdesc_count * sizeof(struct xgbe_ring_data);
0234
0235 ring->rdata = xgbe_alloc_node(size, ring->node);
0236 if (!ring->rdata)
0237 return -ENOMEM;
0238
0239 netif_dbg(pdata, drv, pdata->netdev,
0240 "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
0241 ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
0242
0243 return 0;
0244 }
0245
0246 static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
0247 {
0248 struct xgbe_channel *channel;
0249 unsigned int i;
0250 int ret;
0251
0252 for (i = 0; i < pdata->channel_count; i++) {
0253 channel = pdata->channel[i];
0254 netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
0255 channel->name);
0256
0257 ret = xgbe_init_ring(pdata, channel->tx_ring,
0258 pdata->tx_desc_count);
0259 if (ret) {
0260 netdev_alert(pdata->netdev,
0261 "error initializing Tx ring\n");
0262 goto err_ring;
0263 }
0264
0265 netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
0266 channel->name);
0267
0268 ret = xgbe_init_ring(pdata, channel->rx_ring,
0269 pdata->rx_desc_count);
0270 if (ret) {
0271 netdev_alert(pdata->netdev,
0272 "error initializing Rx ring\n");
0273 goto err_ring;
0274 }
0275 }
0276
0277 return 0;
0278
0279 err_ring:
0280 xgbe_free_ring_resources(pdata);
0281
0282 return ret;
0283 }
0284
0285 static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
0286 struct xgbe_page_alloc *pa, int alloc_order,
0287 int node)
0288 {
0289 struct page *pages = NULL;
0290 dma_addr_t pages_dma;
0291 gfp_t gfp;
0292 int order;
0293
0294 again:
0295 order = alloc_order;
0296
0297
0298 gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
0299 while (order >= 0) {
0300 pages = alloc_pages_node(node, gfp, order);
0301 if (pages)
0302 break;
0303
0304 order--;
0305 }
0306
0307
0308 if (!pages && (node != NUMA_NO_NODE)) {
0309 node = NUMA_NO_NODE;
0310 goto again;
0311 }
0312
0313 if (!pages)
0314 return -ENOMEM;
0315
0316
0317 pages_dma = dma_map_page(pdata->dev, pages, 0,
0318 PAGE_SIZE << order, DMA_FROM_DEVICE);
0319 if (dma_mapping_error(pdata->dev, pages_dma)) {
0320 put_page(pages);
0321 return -ENOMEM;
0322 }
0323
0324 pa->pages = pages;
0325 pa->pages_len = PAGE_SIZE << order;
0326 pa->pages_offset = 0;
0327 pa->pages_dma = pages_dma;
0328
0329 return 0;
0330 }
0331
0332 static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
0333 struct xgbe_page_alloc *pa,
0334 unsigned int len)
0335 {
0336 get_page(pa->pages);
0337 bd->pa = *pa;
0338
0339 bd->dma_base = pa->pages_dma;
0340 bd->dma_off = pa->pages_offset;
0341 bd->dma_len = len;
0342
0343 pa->pages_offset += len;
0344 if ((pa->pages_offset + len) > pa->pages_len) {
0345
0346 bd->pa_unmap = *pa;
0347
0348
0349 pa->pages = NULL;
0350 pa->pages_len = 0;
0351 pa->pages_offset = 0;
0352 pa->pages_dma = 0;
0353 }
0354 }
0355
0356 static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
0357 struct xgbe_ring *ring,
0358 struct xgbe_ring_data *rdata)
0359 {
0360 int ret;
0361
0362 if (!ring->rx_hdr_pa.pages) {
0363 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
0364 if (ret)
0365 return ret;
0366 }
0367
0368 if (!ring->rx_buf_pa.pages) {
0369 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
0370 PAGE_ALLOC_COSTLY_ORDER, ring->node);
0371 if (ret)
0372 return ret;
0373 }
0374
0375
0376 xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
0377 XGBE_SKB_ALLOC_SIZE);
0378
0379
0380 xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
0381 pdata->rx_buf_size);
0382
0383 return 0;
0384 }
0385
0386 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
0387 {
0388 struct xgbe_hw_if *hw_if = &pdata->hw_if;
0389 struct xgbe_channel *channel;
0390 struct xgbe_ring *ring;
0391 struct xgbe_ring_data *rdata;
0392 struct xgbe_ring_desc *rdesc;
0393 dma_addr_t rdesc_dma;
0394 unsigned int i, j;
0395
0396 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
0397
0398 for (i = 0; i < pdata->channel_count; i++) {
0399 channel = pdata->channel[i];
0400 ring = channel->tx_ring;
0401 if (!ring)
0402 break;
0403
0404 rdesc = ring->rdesc;
0405 rdesc_dma = ring->rdesc_dma;
0406
0407 for (j = 0; j < ring->rdesc_count; j++) {
0408 rdata = XGBE_GET_DESC_DATA(ring, j);
0409
0410 rdata->rdesc = rdesc;
0411 rdata->rdesc_dma = rdesc_dma;
0412
0413 rdesc++;
0414 rdesc_dma += sizeof(struct xgbe_ring_desc);
0415 }
0416
0417 ring->cur = 0;
0418 ring->dirty = 0;
0419 memset(&ring->tx, 0, sizeof(ring->tx));
0420
0421 hw_if->tx_desc_init(channel);
0422 }
0423
0424 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
0425 }
0426
0427 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
0428 {
0429 struct xgbe_hw_if *hw_if = &pdata->hw_if;
0430 struct xgbe_channel *channel;
0431 struct xgbe_ring *ring;
0432 struct xgbe_ring_desc *rdesc;
0433 struct xgbe_ring_data *rdata;
0434 dma_addr_t rdesc_dma;
0435 unsigned int i, j;
0436
0437 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
0438
0439 for (i = 0; i < pdata->channel_count; i++) {
0440 channel = pdata->channel[i];
0441 ring = channel->rx_ring;
0442 if (!ring)
0443 break;
0444
0445 rdesc = ring->rdesc;
0446 rdesc_dma = ring->rdesc_dma;
0447
0448 for (j = 0; j < ring->rdesc_count; j++) {
0449 rdata = XGBE_GET_DESC_DATA(ring, j);
0450
0451 rdata->rdesc = rdesc;
0452 rdata->rdesc_dma = rdesc_dma;
0453
0454 if (xgbe_map_rx_buffer(pdata, ring, rdata))
0455 break;
0456
0457 rdesc++;
0458 rdesc_dma += sizeof(struct xgbe_ring_desc);
0459 }
0460
0461 ring->cur = 0;
0462 ring->dirty = 0;
0463
0464 hw_if->rx_desc_init(channel);
0465 }
0466
0467 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
0468 }
0469
0470 static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
0471 struct xgbe_ring_data *rdata)
0472 {
0473 if (rdata->skb_dma) {
0474 if (rdata->mapped_as_page) {
0475 dma_unmap_page(pdata->dev, rdata->skb_dma,
0476 rdata->skb_dma_len, DMA_TO_DEVICE);
0477 } else {
0478 dma_unmap_single(pdata->dev, rdata->skb_dma,
0479 rdata->skb_dma_len, DMA_TO_DEVICE);
0480 }
0481 rdata->skb_dma = 0;
0482 rdata->skb_dma_len = 0;
0483 }
0484
0485 if (rdata->skb) {
0486 dev_kfree_skb_any(rdata->skb);
0487 rdata->skb = NULL;
0488 }
0489
0490 if (rdata->rx.hdr.pa.pages)
0491 put_page(rdata->rx.hdr.pa.pages);
0492
0493 if (rdata->rx.hdr.pa_unmap.pages) {
0494 dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
0495 rdata->rx.hdr.pa_unmap.pages_len,
0496 DMA_FROM_DEVICE);
0497 put_page(rdata->rx.hdr.pa_unmap.pages);
0498 }
0499
0500 if (rdata->rx.buf.pa.pages)
0501 put_page(rdata->rx.buf.pa.pages);
0502
0503 if (rdata->rx.buf.pa_unmap.pages) {
0504 dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
0505 rdata->rx.buf.pa_unmap.pages_len,
0506 DMA_FROM_DEVICE);
0507 put_page(rdata->rx.buf.pa_unmap.pages);
0508 }
0509
0510 memset(&rdata->tx, 0, sizeof(rdata->tx));
0511 memset(&rdata->rx, 0, sizeof(rdata->rx));
0512
0513 rdata->mapped_as_page = 0;
0514
0515 if (rdata->state_saved) {
0516 rdata->state_saved = 0;
0517 rdata->state.skb = NULL;
0518 rdata->state.len = 0;
0519 rdata->state.error = 0;
0520 }
0521 }
0522
0523 static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
0524 {
0525 struct xgbe_prv_data *pdata = channel->pdata;
0526 struct xgbe_ring *ring = channel->tx_ring;
0527 struct xgbe_ring_data *rdata;
0528 struct xgbe_packet_data *packet;
0529 skb_frag_t *frag;
0530 dma_addr_t skb_dma;
0531 unsigned int start_index, cur_index;
0532 unsigned int offset, tso, vlan, datalen, len;
0533 unsigned int i;
0534
0535 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
0536
0537 offset = 0;
0538 start_index = ring->cur;
0539 cur_index = ring->cur;
0540
0541 packet = &ring->packet_data;
0542 packet->rdesc_count = 0;
0543 packet->length = 0;
0544
0545 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
0546 TSO_ENABLE);
0547 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
0548 VLAN_CTAG);
0549
0550
0551 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
0552 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
0553 cur_index++;
0554 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
0555
0556 if (tso) {
0557
0558 skb_dma = dma_map_single(pdata->dev, skb->data,
0559 packet->header_len, DMA_TO_DEVICE);
0560 if (dma_mapping_error(pdata->dev, skb_dma)) {
0561 netdev_alert(pdata->netdev, "dma_map_single failed\n");
0562 goto err_out;
0563 }
0564 rdata->skb_dma = skb_dma;
0565 rdata->skb_dma_len = packet->header_len;
0566 netif_dbg(pdata, tx_queued, pdata->netdev,
0567 "skb header: index=%u, dma=%pad, len=%u\n",
0568 cur_index, &skb_dma, packet->header_len);
0569
0570 offset = packet->header_len;
0571
0572 packet->length += packet->header_len;
0573
0574 cur_index++;
0575 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
0576 }
0577
0578
0579 for (datalen = skb_headlen(skb) - offset; datalen; ) {
0580 len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
0581
0582 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
0583 DMA_TO_DEVICE);
0584 if (dma_mapping_error(pdata->dev, skb_dma)) {
0585 netdev_alert(pdata->netdev, "dma_map_single failed\n");
0586 goto err_out;
0587 }
0588 rdata->skb_dma = skb_dma;
0589 rdata->skb_dma_len = len;
0590 netif_dbg(pdata, tx_queued, pdata->netdev,
0591 "skb data: index=%u, dma=%pad, len=%u\n",
0592 cur_index, &skb_dma, len);
0593
0594 datalen -= len;
0595 offset += len;
0596
0597 packet->length += len;
0598
0599 cur_index++;
0600 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
0601 }
0602
0603 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
0604 netif_dbg(pdata, tx_queued, pdata->netdev,
0605 "mapping frag %u\n", i);
0606
0607 frag = &skb_shinfo(skb)->frags[i];
0608 offset = 0;
0609
0610 for (datalen = skb_frag_size(frag); datalen; ) {
0611 len = min_t(unsigned int, datalen,
0612 XGBE_TX_MAX_BUF_SIZE);
0613
0614 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
0615 len, DMA_TO_DEVICE);
0616 if (dma_mapping_error(pdata->dev, skb_dma)) {
0617 netdev_alert(pdata->netdev,
0618 "skb_frag_dma_map failed\n");
0619 goto err_out;
0620 }
0621 rdata->skb_dma = skb_dma;
0622 rdata->skb_dma_len = len;
0623 rdata->mapped_as_page = 1;
0624 netif_dbg(pdata, tx_queued, pdata->netdev,
0625 "skb frag: index=%u, dma=%pad, len=%u\n",
0626 cur_index, &skb_dma, len);
0627
0628 datalen -= len;
0629 offset += len;
0630
0631 packet->length += len;
0632
0633 cur_index++;
0634 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
0635 }
0636 }
0637
0638
0639
0640
0641
0642 rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
0643 rdata->skb = skb;
0644
0645
0646 packet->rdesc_count = cur_index - start_index;
0647
0648 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
0649
0650 return packet->rdesc_count;
0651
0652 err_out:
0653 while (start_index < cur_index) {
0654 rdata = XGBE_GET_DESC_DATA(ring, start_index++);
0655 xgbe_unmap_rdata(pdata, rdata);
0656 }
0657
0658 DBGPR("<--xgbe_map_tx_skb: count=0\n");
0659
0660 return 0;
0661 }
0662
0663 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
0664 {
0665 DBGPR("-->xgbe_init_function_ptrs_desc\n");
0666
0667 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
0668 desc_if->free_ring_resources = xgbe_free_ring_resources;
0669 desc_if->map_tx_skb = xgbe_map_tx_skb;
0670 desc_if->map_rx_buffer = xgbe_map_rx_buffer;
0671 desc_if->unmap_rdata = xgbe_unmap_rdata;
0672 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
0673 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
0674
0675 DBGPR("<--xgbe_init_function_ptrs_desc\n");
0676 }