0001
0002
0003
0004 #include <linux/bitfield.h>
0005 #include <linux/dmapool.h>
0006 #include <linux/etherdevice.h>
0007 #include <linux/if_vlan.h>
0008 #include <linux/of_address.h>
0009 #include <linux/of_device.h>
0010 #include <linux/of.h>
0011 #include <linux/platform_device.h>
0012
0013 #include "prestera_dsa.h"
0014 #include "prestera.h"
0015 #include "prestera_hw.h"
0016 #include "prestera_rxtx.h"
0017 #include "prestera_devlink.h"
0018
0019 #define PRESTERA_SDMA_WAIT_MUL 10
0020
0021 struct prestera_sdma_desc {
0022 __le32 word1;
0023 __le32 word2;
0024 __le32 buff;
0025 __le32 next;
0026 } __packed __aligned(16);
0027
0028 #define PRESTERA_SDMA_BUFF_SIZE_MAX 1544
0029
0030 #define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
0031 ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
0032
0033 #define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
0034 ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
0035
0036 #define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
0037 (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
0038
0039 #define PRESTERA_SDMA_RX_DESC_CPU_OWN 0
0040 #define PRESTERA_SDMA_RX_DESC_DMA_OWN 1
0041
0042 #define PRESTERA_SDMA_RX_QUEUE_NUM 8
0043
0044 #define PRESTERA_SDMA_RX_DESC_PER_Q 1000
0045
0046 #define PRESTERA_SDMA_TX_DESC_PER_Q 1000
0047 #define PRESTERA_SDMA_TX_MAX_BURST 64
0048
0049 #define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
0050 ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
0051
0052 #define PRESTERA_SDMA_TX_DESC_CPU_OWN 0
0053 #define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U
0054
0055 #define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
0056 (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
0057
0058 #define PRESTERA_SDMA_TX_DESC_LAST BIT(20)
0059 #define PRESTERA_SDMA_TX_DESC_FIRST BIT(21)
0060 #define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12)
0061
0062 #define PRESTERA_SDMA_TX_DESC_SINGLE \
0063 (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
0064
0065 #define PRESTERA_SDMA_TX_DESC_INIT \
0066 (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
0067
0068 #define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814
0069 #define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680
0070 #define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16)
0071
0072 #define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0
0073 #define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868
0074
0075 struct prestera_sdma_buf {
0076 struct prestera_sdma_desc *desc;
0077 dma_addr_t desc_dma;
0078 struct sk_buff *skb;
0079 dma_addr_t buf_dma;
0080 bool is_used;
0081 };
0082
0083 struct prestera_rx_ring {
0084 struct prestera_sdma_buf *bufs;
0085 int next_rx;
0086 };
0087
0088 struct prestera_tx_ring {
0089 struct prestera_sdma_buf *bufs;
0090 int next_tx;
0091 int max_burst;
0092 int burst;
0093 };
0094
0095 struct prestera_sdma {
0096 struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
0097 struct prestera_tx_ring tx_ring;
0098 struct prestera_switch *sw;
0099 struct dma_pool *desc_pool;
0100 struct work_struct tx_work;
0101 struct napi_struct rx_napi;
0102 struct net_device napi_dev;
0103 u32 map_addr;
0104 u64 dma_mask;
0105
0106 spinlock_t tx_lock;
0107 };
0108
0109 struct prestera_rxtx {
0110 struct prestera_sdma sdma;
0111 };
0112
0113 static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
0114 struct prestera_sdma_buf *buf)
0115 {
0116 struct prestera_sdma_desc *desc;
0117 dma_addr_t dma;
0118
0119 desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
0120 if (!desc)
0121 return -ENOMEM;
0122
0123 buf->buf_dma = DMA_MAPPING_ERROR;
0124 buf->desc_dma = dma;
0125 buf->desc = desc;
0126 buf->skb = NULL;
0127
0128 return 0;
0129 }
0130
0131 static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
0132 {
0133 return sdma->map_addr + pa;
0134 }
0135
0136 static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
0137 struct prestera_sdma_desc *desc,
0138 dma_addr_t buf)
0139 {
0140 u32 word = le32_to_cpu(desc->word2);
0141
0142 u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
0143 desc->word2 = cpu_to_le32(word);
0144
0145 desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
0146
0147
0148 wmb();
0149
0150 desc->word1 = cpu_to_le32(0xA0000000);
0151 }
0152
0153 static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
0154 struct prestera_sdma_desc *desc,
0155 dma_addr_t next)
0156 {
0157 desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
0158 }
0159
0160 static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
0161 struct prestera_sdma_buf *buf)
0162 {
0163 struct device *dev = sdma->sw->dev->dev;
0164 struct sk_buff *skb;
0165 dma_addr_t dma;
0166
0167 skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
0168 if (!skb)
0169 return -ENOMEM;
0170
0171 dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
0172 if (dma_mapping_error(dev, dma))
0173 goto err_dma_map;
0174
0175 if (buf->skb)
0176 dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
0177 DMA_FROM_DEVICE);
0178
0179 buf->buf_dma = dma;
0180 buf->skb = skb;
0181
0182 return 0;
0183
0184 err_dma_map:
0185 kfree_skb(skb);
0186
0187 return -ENOMEM;
0188 }
0189
0190 static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
0191 struct prestera_sdma_buf *buf)
0192 {
0193 dma_addr_t buf_dma = buf->buf_dma;
0194 struct sk_buff *skb = buf->skb;
0195 u32 len = skb->len;
0196 int err;
0197
0198 err = prestera_sdma_rx_skb_alloc(sdma, buf);
0199 if (err) {
0200 buf->buf_dma = buf_dma;
0201 buf->skb = skb;
0202
0203 skb = alloc_skb(skb->len, GFP_ATOMIC);
0204 if (skb) {
0205 skb_put(skb, len);
0206 skb_copy_from_linear_data(buf->skb, skb->data, len);
0207 }
0208 }
0209
0210 prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
0211
0212 return skb;
0213 }
0214
0215 static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
0216 struct sk_buff *skb)
0217 {
0218 struct prestera_port *port;
0219 struct prestera_dsa dsa;
0220 u32 hw_port, dev_id;
0221 u8 cpu_code;
0222 int err;
0223
0224 skb_pull(skb, ETH_HLEN);
0225
0226
0227 err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
0228 if (err)
0229 return err;
0230
0231 dev_id = dsa.hw_dev_num;
0232 hw_port = dsa.port_num;
0233
0234 port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
0235 if (unlikely(!port)) {
0236 dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
0237 dev_id, hw_port);
0238 return -ENOENT;
0239 }
0240
0241 if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
0242 return -EINVAL;
0243
0244
0245 skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
0246
0247 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
0248 ETH_ALEN * 2);
0249
0250 skb_push(skb, ETH_HLEN);
0251
0252 skb->protocol = eth_type_trans(skb, port->dev);
0253
0254 if (dsa.vlan.is_tagged) {
0255 u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
0256
0257 tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
0258 if (dsa.vlan.cfi_bit)
0259 tci |= VLAN_CFI_MASK;
0260
0261 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
0262 }
0263
0264 cpu_code = dsa.cpu_code;
0265 prestera_devlink_trap_report(port, skb, cpu_code);
0266
0267 return 0;
0268 }
0269
0270 static int prestera_sdma_next_rx_buf_idx(int buf_idx)
0271 {
0272 return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
0273 }
0274
0275 static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
0276 {
0277 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
0278 unsigned int rxq_done_map = 0;
0279 struct prestera_sdma *sdma;
0280 struct list_head rx_list;
0281 unsigned int qmask;
0282 int pkts_done = 0;
0283 int q;
0284
0285 qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
0286 qmask = GENMASK(qnum - 1, 0);
0287
0288 INIT_LIST_HEAD(&rx_list);
0289
0290 sdma = container_of(napi, struct prestera_sdma, rx_napi);
0291
0292 while (pkts_done < budget && rxq_done_map != qmask) {
0293 for (q = 0; q < qnum && pkts_done < budget; q++) {
0294 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
0295 struct prestera_sdma_desc *desc;
0296 struct prestera_sdma_buf *buf;
0297 int buf_idx = ring->next_rx;
0298 struct sk_buff *skb;
0299
0300 buf = &ring->bufs[buf_idx];
0301 desc = buf->desc;
0302
0303 if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
0304 rxq_done_map &= ~BIT(q);
0305 } else {
0306 rxq_done_map |= BIT(q);
0307 continue;
0308 }
0309
0310 pkts_done++;
0311
0312 __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
0313
0314 skb = prestera_sdma_rx_skb_get(sdma, buf);
0315 if (!skb)
0316 goto rx_next_buf;
0317
0318 if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
0319 goto rx_next_buf;
0320
0321 list_add_tail(&skb->list, &rx_list);
0322 rx_next_buf:
0323 ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
0324 }
0325 }
0326
0327 if (pkts_done < budget && napi_complete_done(napi, pkts_done))
0328 prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
0329 GENMASK(9, 2));
0330
0331 netif_receive_skb_list(&rx_list);
0332
0333 return pkts_done;
0334 }
0335
0336 static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
0337 {
0338 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
0339 int q, b;
0340
0341
0342 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
0343 GENMASK(15, 8));
0344
0345 for (q = 0; q < qnum; q++) {
0346 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
0347
0348 if (!ring->bufs)
0349 break;
0350
0351 for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
0352 struct prestera_sdma_buf *buf = &ring->bufs[b];
0353
0354 if (buf->desc_dma)
0355 dma_pool_free(sdma->desc_pool, buf->desc,
0356 buf->desc_dma);
0357
0358 if (!buf->skb)
0359 continue;
0360
0361 if (buf->buf_dma != DMA_MAPPING_ERROR)
0362 dma_unmap_single(sdma->sw->dev->dev,
0363 buf->buf_dma, buf->skb->len,
0364 DMA_FROM_DEVICE);
0365 kfree_skb(buf->skb);
0366 }
0367 }
0368 }
0369
0370 static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
0371 {
0372 int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
0373 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
0374 int err;
0375 int q;
0376
0377
0378 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
0379 GENMASK(15, 8));
0380
0381 for (q = 0; q < qnum; q++) {
0382 struct prestera_sdma_buf *head, *tail, *next, *prev;
0383 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
0384
0385 ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
0386 if (!ring->bufs)
0387 return -ENOMEM;
0388
0389 ring->next_rx = 0;
0390
0391 tail = &ring->bufs[bnum - 1];
0392 head = &ring->bufs[0];
0393 next = head;
0394 prev = next;
0395
0396 do {
0397 err = prestera_sdma_buf_init(sdma, next);
0398 if (err)
0399 return err;
0400
0401 err = prestera_sdma_rx_skb_alloc(sdma, next);
0402 if (err)
0403 return err;
0404
0405 prestera_sdma_rx_desc_init(sdma, next->desc,
0406 next->buf_dma);
0407
0408 prestera_sdma_rx_desc_set_next(sdma, prev->desc,
0409 next->desc_dma);
0410
0411 prev = next;
0412 next++;
0413 } while (prev != tail);
0414
0415
0416 prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
0417
0418 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
0419 prestera_sdma_map(sdma, head->desc_dma));
0420 }
0421
0422
0423 wmb();
0424
0425 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
0426 GENMASK(7, 0));
0427
0428 return 0;
0429 }
0430
0431 static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
0432 struct prestera_sdma_desc *desc)
0433 {
0434 desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
0435 desc->word2 = 0;
0436 }
0437
0438 static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
0439 struct prestera_sdma_desc *desc,
0440 dma_addr_t next)
0441 {
0442 desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
0443 }
0444
0445 static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
0446 struct prestera_sdma_desc *desc,
0447 dma_addr_t buf, size_t len)
0448 {
0449 u32 word = le32_to_cpu(desc->word2);
0450
0451 u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
0452
0453 desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
0454 desc->word2 = cpu_to_le32(word);
0455 }
0456
0457 static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
0458 {
0459 u32 word = le32_to_cpu(desc->word1);
0460
0461 word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
0462
0463
0464 wmb();
0465
0466 desc->word1 = cpu_to_le32(word);
0467 }
0468
0469 static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
0470 struct prestera_sdma_buf *buf,
0471 struct sk_buff *skb)
0472 {
0473 struct device *dma_dev = sdma->sw->dev->dev;
0474 dma_addr_t dma;
0475
0476 dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
0477 if (dma_mapping_error(dma_dev, dma))
0478 return -ENOMEM;
0479
0480 buf->buf_dma = dma;
0481 buf->skb = skb;
0482
0483 return 0;
0484 }
0485
0486 static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
0487 struct prestera_sdma_buf *buf)
0488 {
0489 struct device *dma_dev = sdma->sw->dev->dev;
0490
0491 dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
0492 }
0493
0494 static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
0495 {
0496 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
0497 struct prestera_tx_ring *tx_ring;
0498 struct prestera_sdma *sdma;
0499 int b;
0500
0501 sdma = container_of(work, struct prestera_sdma, tx_work);
0502
0503 tx_ring = &sdma->tx_ring;
0504
0505 for (b = 0; b < bnum; b++) {
0506 struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
0507
0508 if (!buf->is_used)
0509 continue;
0510
0511 if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
0512 continue;
0513
0514 prestera_sdma_tx_buf_unmap(sdma, buf);
0515 dev_consume_skb_any(buf->skb);
0516 buf->skb = NULL;
0517
0518
0519 wmb();
0520
0521 buf->is_used = false;
0522 }
0523 }
0524
0525 static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
0526 {
0527 struct prestera_sdma_buf *head, *tail, *next, *prev;
0528 struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
0529 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
0530 int err;
0531
0532 INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
0533 spin_lock_init(&sdma->tx_lock);
0534
0535 tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
0536 if (!tx_ring->bufs)
0537 return -ENOMEM;
0538
0539 tail = &tx_ring->bufs[bnum - 1];
0540 head = &tx_ring->bufs[0];
0541 next = head;
0542 prev = next;
0543
0544 tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
0545 tx_ring->burst = tx_ring->max_burst;
0546 tx_ring->next_tx = 0;
0547
0548 do {
0549 err = prestera_sdma_buf_init(sdma, next);
0550 if (err)
0551 return err;
0552
0553 next->is_used = false;
0554
0555 prestera_sdma_tx_desc_init(sdma, next->desc);
0556
0557 prestera_sdma_tx_desc_set_next(sdma, prev->desc,
0558 next->desc_dma);
0559
0560 prev = next;
0561 next++;
0562 } while (prev != tail);
0563
0564
0565 prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
0566
0567
0568 wmb();
0569
0570 prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
0571 prestera_sdma_map(sdma, head->desc_dma));
0572
0573 return 0;
0574 }
0575
0576 static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
0577 {
0578 struct prestera_tx_ring *ring = &sdma->tx_ring;
0579 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
0580 int b;
0581
0582 cancel_work_sync(&sdma->tx_work);
0583
0584 if (!ring->bufs)
0585 return;
0586
0587 for (b = 0; b < bnum; b++) {
0588 struct prestera_sdma_buf *buf = &ring->bufs[b];
0589
0590 if (buf->desc)
0591 dma_pool_free(sdma->desc_pool, buf->desc,
0592 buf->desc_dma);
0593
0594 if (!buf->skb)
0595 continue;
0596
0597 dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
0598 buf->skb->len, DMA_TO_DEVICE);
0599
0600 dev_consume_skb_any(buf->skb);
0601 }
0602 }
0603
0604 static void prestera_rxtx_handle_event(struct prestera_switch *sw,
0605 struct prestera_event *evt,
0606 void *arg)
0607 {
0608 struct prestera_sdma *sdma = arg;
0609
0610 if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
0611 return;
0612
0613 prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
0614 napi_schedule(&sdma->rx_napi);
0615 }
0616
0617 static int prestera_sdma_switch_init(struct prestera_switch *sw)
0618 {
0619 struct prestera_sdma *sdma = &sw->rxtx->sdma;
0620 struct device *dev = sw->dev->dev;
0621 struct prestera_rxtx_params p;
0622 int err;
0623
0624 p.use_sdma = true;
0625
0626 err = prestera_hw_rxtx_init(sw, &p);
0627 if (err) {
0628 dev_err(dev, "failed to init rxtx by hw\n");
0629 return err;
0630 }
0631
0632 sdma->dma_mask = dma_get_mask(dev);
0633 sdma->map_addr = p.map_addr;
0634 sdma->sw = sw;
0635
0636 sdma->desc_pool = dma_pool_create("desc_pool", dev,
0637 sizeof(struct prestera_sdma_desc),
0638 16, 0);
0639 if (!sdma->desc_pool)
0640 return -ENOMEM;
0641
0642 err = prestera_sdma_rx_init(sdma);
0643 if (err) {
0644 dev_err(dev, "failed to init rx ring\n");
0645 goto err_rx_init;
0646 }
0647
0648 err = prestera_sdma_tx_init(sdma);
0649 if (err) {
0650 dev_err(dev, "failed to init tx ring\n");
0651 goto err_tx_init;
0652 }
0653
0654 err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
0655 prestera_rxtx_handle_event,
0656 sdma);
0657 if (err)
0658 goto err_evt_register;
0659
0660 init_dummy_netdev(&sdma->napi_dev);
0661
0662 netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
0663 napi_enable(&sdma->rx_napi);
0664
0665 return 0;
0666
0667 err_evt_register:
0668 err_tx_init:
0669 prestera_sdma_tx_fini(sdma);
0670 err_rx_init:
0671 prestera_sdma_rx_fini(sdma);
0672
0673 dma_pool_destroy(sdma->desc_pool);
0674 return err;
0675 }
0676
0677 static void prestera_sdma_switch_fini(struct prestera_switch *sw)
0678 {
0679 struct prestera_sdma *sdma = &sw->rxtx->sdma;
0680
0681 napi_disable(&sdma->rx_napi);
0682 netif_napi_del(&sdma->rx_napi);
0683 prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
0684 prestera_rxtx_handle_event);
0685 prestera_sdma_tx_fini(sdma);
0686 prestera_sdma_rx_fini(sdma);
0687 dma_pool_destroy(sdma->desc_pool);
0688 }
0689
0690 static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
0691 {
0692 return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
0693 }
0694
0695 static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
0696 struct prestera_tx_ring *tx_ring)
0697 {
0698 int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
0699
0700 do {
0701 if (prestera_sdma_is_ready(sdma))
0702 return 0;
0703
0704 udelay(1);
0705 } while (--tx_wait_num);
0706
0707 return -EBUSY;
0708 }
0709
0710 static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
0711 {
0712 prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
0713 schedule_work(&sdma->tx_work);
0714 }
0715
0716 static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
0717 struct sk_buff *skb)
0718 {
0719 struct device *dma_dev = sdma->sw->dev->dev;
0720 struct net_device *dev = skb->dev;
0721 struct prestera_tx_ring *tx_ring;
0722 struct prestera_sdma_buf *buf;
0723 int err;
0724
0725 spin_lock(&sdma->tx_lock);
0726
0727 tx_ring = &sdma->tx_ring;
0728
0729 buf = &tx_ring->bufs[tx_ring->next_tx];
0730 if (buf->is_used) {
0731 schedule_work(&sdma->tx_work);
0732 goto drop_skb;
0733 }
0734
0735 if (unlikely(eth_skb_pad(skb)))
0736 goto drop_skb_nofree;
0737
0738 err = prestera_sdma_tx_buf_map(sdma, buf, skb);
0739 if (err)
0740 goto drop_skb;
0741
0742 prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
0743
0744 dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
0745 DMA_TO_DEVICE);
0746
0747 if (tx_ring->burst) {
0748 tx_ring->burst--;
0749 } else {
0750 tx_ring->burst = tx_ring->max_burst;
0751
0752 err = prestera_sdma_tx_wait(sdma, tx_ring);
0753 if (err)
0754 goto drop_skb_unmap;
0755 }
0756
0757 tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
0758 prestera_sdma_tx_desc_xmit(buf->desc);
0759 buf->is_used = true;
0760
0761 prestera_sdma_tx_start(sdma);
0762
0763 goto tx_done;
0764
0765 drop_skb_unmap:
0766 prestera_sdma_tx_buf_unmap(sdma, buf);
0767 drop_skb:
0768 dev_consume_skb_any(skb);
0769 drop_skb_nofree:
0770 dev->stats.tx_dropped++;
0771 tx_done:
0772 spin_unlock(&sdma->tx_lock);
0773 return NETDEV_TX_OK;
0774 }
0775
0776 int prestera_rxtx_switch_init(struct prestera_switch *sw)
0777 {
0778 struct prestera_rxtx *rxtx;
0779
0780 rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
0781 if (!rxtx)
0782 return -ENOMEM;
0783
0784 sw->rxtx = rxtx;
0785
0786 return prestera_sdma_switch_init(sw);
0787 }
0788
0789 void prestera_rxtx_switch_fini(struct prestera_switch *sw)
0790 {
0791 prestera_sdma_switch_fini(sw);
0792 kfree(sw->rxtx);
0793 }
0794
0795 int prestera_rxtx_port_init(struct prestera_port *port)
0796 {
0797 port->dev->needed_headroom = PRESTERA_DSA_HLEN;
0798 return 0;
0799 }
0800
0801 netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
0802 {
0803 struct prestera_dsa dsa;
0804
0805 dsa.hw_dev_num = port->dev_id;
0806 dsa.port_num = port->hw_id;
0807
0808 if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
0809 return NET_XMIT_DROP;
0810
0811 skb_push(skb, PRESTERA_DSA_HLEN);
0812 memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
0813
0814 if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
0815 return NET_XMIT_DROP;
0816
0817 return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);
0818 }