0001
0002
0003
0004
0005 #include <linux/init.h>
0006 #include <linux/module.h>
0007 #include <linux/platform_device.h>
0008 #include <linux/etherdevice.h>
0009 #include <linux/of_net.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/msi.h>
0012 #include <linux/kthread.h>
0013 #include <linux/iommu.h>
0014 #include <linux/fsl/mc.h>
0015 #include <linux/bpf.h>
0016 #include <linux/bpf_trace.h>
0017 #include <linux/fsl/ptp_qoriq.h>
0018 #include <linux/ptp_classify.h>
0019 #include <net/pkt_cls.h>
0020 #include <net/sock.h>
0021 #include <net/tso.h>
0022
0023 #include "dpaa2-eth.h"
0024
0025
0026
0027
0028 #define CREATE_TRACE_POINTS
0029 #include "dpaa2-eth-trace.h"
0030
0031 MODULE_LICENSE("Dual BSD/GPL");
0032 MODULE_AUTHOR("Freescale Semiconductor, Inc");
0033 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
0034
0035 struct ptp_qoriq *dpaa2_ptp;
0036 EXPORT_SYMBOL(dpaa2_ptp);
0037
0038 static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
0039 {
0040 priv->features = 0;
0041
0042 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
0043 DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
0044 priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
0045 }
0046
0047 static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
0048 u32 offset, u8 udp)
0049 {
0050 struct dpni_single_step_cfg cfg;
0051
0052 cfg.en = 1;
0053 cfg.ch_update = udp;
0054 cfg.offset = offset;
0055 cfg.peer_delay = 0;
0056
0057 if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
0058 WARN_ONCE(1, "Failed to set single step register");
0059 }
0060
0061 static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
0062 u32 offset, u8 udp)
0063 {
0064 u32 val = 0;
0065
0066 val = DPAA2_PTP_SINGLE_STEP_ENABLE |
0067 DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
0068
0069 if (udp)
0070 val |= DPAA2_PTP_SINGLE_STEP_CH;
0071
0072 if (priv->onestep_reg_base)
0073 writel(val, priv->onestep_reg_base);
0074 }
0075
0076 static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
0077 {
0078 struct device *dev = priv->net_dev->dev.parent;
0079 struct dpni_single_step_cfg ptp_cfg;
0080
0081 priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
0082
0083 if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
0084 return;
0085
0086 if (dpni_get_single_step_cfg(priv->mc_io, 0,
0087 priv->mc_token, &ptp_cfg)) {
0088 dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
0089 return;
0090 }
0091
0092 if (!ptp_cfg.ptp_onestep_reg_base) {
0093 dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
0094 return;
0095 }
0096
0097 priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
0098 sizeof(u32));
0099 if (!priv->onestep_reg_base) {
0100 dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
0101 return;
0102 }
0103
0104 priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
0105 }
0106
0107 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
0108 dma_addr_t iova_addr)
0109 {
0110 phys_addr_t phys_addr;
0111
0112 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
0113
0114 return phys_to_virt(phys_addr);
0115 }
0116
0117 static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
0118 u32 fd_status,
0119 struct sk_buff *skb)
0120 {
0121 skb_checksum_none_assert(skb);
0122
0123
0124 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
0125 return;
0126
0127
0128 if (!((fd_status & DPAA2_FAS_L3CV) &&
0129 (fd_status & DPAA2_FAS_L4CV)))
0130 return;
0131
0132
0133 skb->ip_summed = CHECKSUM_UNNECESSARY;
0134 }
0135
0136
0137
0138
0139 static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
0140 const struct dpaa2_fd *fd,
0141 void *vaddr)
0142 {
0143 struct device *dev = priv->net_dev->dev.parent;
0144 dma_addr_t addr = dpaa2_fd_get_addr(fd);
0145 u8 fd_format = dpaa2_fd_get_format(fd);
0146 struct dpaa2_sg_entry *sgt;
0147 void *sg_vaddr;
0148 int i;
0149
0150
0151 if (fd_format == dpaa2_fd_single)
0152 goto free_buf;
0153 else if (fd_format != dpaa2_fd_sg)
0154
0155 return;
0156
0157
0158
0159
0160 sgt = vaddr + dpaa2_fd_get_offset(fd);
0161 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
0162 addr = dpaa2_sg_get_addr(&sgt[i]);
0163 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
0164 dma_unmap_page(dev, addr, priv->rx_buf_size,
0165 DMA_BIDIRECTIONAL);
0166
0167 free_pages((unsigned long)sg_vaddr, 0);
0168 if (dpaa2_sg_is_final(&sgt[i]))
0169 break;
0170 }
0171
0172 free_buf:
0173 free_pages((unsigned long)vaddr, 0);
0174 }
0175
0176
0177 static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
0178 const struct dpaa2_fd *fd,
0179 void *fd_vaddr)
0180 {
0181 struct sk_buff *skb = NULL;
0182 u16 fd_offset = dpaa2_fd_get_offset(fd);
0183 u32 fd_length = dpaa2_fd_get_len(fd);
0184
0185 ch->buf_count--;
0186
0187 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
0188 if (unlikely(!skb))
0189 return NULL;
0190
0191 skb_reserve(skb, fd_offset);
0192 skb_put(skb, fd_length);
0193
0194 return skb;
0195 }
0196
0197
0198 static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
0199 struct dpaa2_eth_channel *ch,
0200 struct dpaa2_sg_entry *sgt)
0201 {
0202 struct sk_buff *skb = NULL;
0203 struct device *dev = priv->net_dev->dev.parent;
0204 void *sg_vaddr;
0205 dma_addr_t sg_addr;
0206 u16 sg_offset;
0207 u32 sg_length;
0208 struct page *page, *head_page;
0209 int page_offset;
0210 int i;
0211
0212 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
0213 struct dpaa2_sg_entry *sge = &sgt[i];
0214
0215
0216
0217
0218
0219
0220 sg_addr = dpaa2_sg_get_addr(sge);
0221 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
0222 dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
0223 DMA_BIDIRECTIONAL);
0224
0225 sg_length = dpaa2_sg_get_len(sge);
0226
0227 if (i == 0) {
0228
0229 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
0230 if (unlikely(!skb)) {
0231
0232
0233
0234 free_pages((unsigned long)sg_vaddr, 0);
0235
0236
0237
0238
0239 while (!dpaa2_sg_is_final(&sgt[i]) &&
0240 i < DPAA2_ETH_MAX_SG_ENTRIES)
0241 i++;
0242 break;
0243 }
0244
0245 sg_offset = dpaa2_sg_get_offset(sge);
0246 skb_reserve(skb, sg_offset);
0247 skb_put(skb, sg_length);
0248 } else {
0249
0250 page = virt_to_page(sg_vaddr);
0251 head_page = virt_to_head_page(sg_vaddr);
0252
0253
0254
0255
0256
0257
0258 page_offset = ((unsigned long)sg_vaddr &
0259 (PAGE_SIZE - 1)) +
0260 (page_address(page) - page_address(head_page));
0261
0262 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
0263 sg_length, priv->rx_buf_size);
0264 }
0265
0266 if (dpaa2_sg_is_final(sge))
0267 break;
0268 }
0269
0270 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
0271
0272
0273 ch->buf_count -= i + 2;
0274
0275 return skb;
0276 }
0277
0278
0279
0280
0281 static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
0282 int count)
0283 {
0284 struct device *dev = priv->net_dev->dev.parent;
0285 void *vaddr;
0286 int i;
0287
0288 for (i = 0; i < count; i++) {
0289 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
0290 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
0291 DMA_BIDIRECTIONAL);
0292 free_pages((unsigned long)vaddr, 0);
0293 }
0294 }
0295
0296 static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
0297 struct dpaa2_eth_channel *ch,
0298 dma_addr_t addr)
0299 {
0300 int retries = 0;
0301 int err;
0302
0303 ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
0304 if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
0305 return;
0306
0307 while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
0308 ch->recycled_bufs,
0309 ch->recycled_bufs_cnt)) == -EBUSY) {
0310 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
0311 break;
0312 cpu_relax();
0313 }
0314
0315 if (err) {
0316 dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
0317 ch->buf_count -= ch->recycled_bufs_cnt;
0318 }
0319
0320 ch->recycled_bufs_cnt = 0;
0321 }
0322
0323 static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
0324 struct dpaa2_eth_fq *fq,
0325 struct dpaa2_eth_xdp_fds *xdp_fds)
0326 {
0327 int total_enqueued = 0, retries = 0, enqueued;
0328 struct dpaa2_eth_drv_stats *percpu_extras;
0329 int num_fds, err, max_retries;
0330 struct dpaa2_fd *fds;
0331
0332 percpu_extras = this_cpu_ptr(priv->percpu_extras);
0333
0334
0335 fds = xdp_fds->fds;
0336 num_fds = xdp_fds->num;
0337 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
0338 while (total_enqueued < num_fds && retries < max_retries) {
0339 err = priv->enqueue(priv, fq, &fds[total_enqueued],
0340 0, num_fds - total_enqueued, &enqueued);
0341 if (err == -EBUSY) {
0342 percpu_extras->tx_portal_busy += ++retries;
0343 continue;
0344 }
0345 total_enqueued += enqueued;
0346 }
0347 xdp_fds->num = 0;
0348
0349 return total_enqueued;
0350 }
0351
0352 static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
0353 struct dpaa2_eth_channel *ch,
0354 struct dpaa2_eth_fq *fq)
0355 {
0356 struct rtnl_link_stats64 *percpu_stats;
0357 struct dpaa2_fd *fds;
0358 int enqueued, i;
0359
0360 percpu_stats = this_cpu_ptr(priv->percpu_stats);
0361
0362
0363 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
0364
0365
0366 percpu_stats->tx_packets += enqueued;
0367 fds = fq->xdp_tx_fds.fds;
0368 for (i = 0; i < enqueued; i++) {
0369 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
0370 ch->stats.xdp_tx++;
0371 }
0372 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
0373 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
0374 percpu_stats->tx_errors++;
0375 ch->stats.xdp_tx_err++;
0376 }
0377 fq->xdp_tx_fds.num = 0;
0378 }
0379
0380 static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
0381 struct dpaa2_eth_channel *ch,
0382 struct dpaa2_fd *fd,
0383 void *buf_start, u16 queue_id)
0384 {
0385 struct dpaa2_faead *faead;
0386 struct dpaa2_fd *dest_fd;
0387 struct dpaa2_eth_fq *fq;
0388 u32 ctrl, frc;
0389
0390
0391 frc = dpaa2_fd_get_frc(fd);
0392 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
0393 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
0394
0395
0396
0397
0398
0399 ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
0400 faead = dpaa2_get_faead(buf_start, false);
0401 faead->ctrl = cpu_to_le32(ctrl);
0402 faead->conf_fqid = 0;
0403
0404 fq = &priv->fq[queue_id];
0405 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
0406 memcpy(dest_fd, fd, sizeof(*dest_fd));
0407
0408 if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
0409 return;
0410
0411 dpaa2_eth_xdp_tx_flush(priv, ch, fq);
0412 }
0413
0414 static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
0415 struct dpaa2_eth_channel *ch,
0416 struct dpaa2_eth_fq *rx_fq,
0417 struct dpaa2_fd *fd, void *vaddr)
0418 {
0419 dma_addr_t addr = dpaa2_fd_get_addr(fd);
0420 struct bpf_prog *xdp_prog;
0421 struct xdp_buff xdp;
0422 u32 xdp_act = XDP_PASS;
0423 int err, offset;
0424
0425 xdp_prog = READ_ONCE(ch->xdp.prog);
0426 if (!xdp_prog)
0427 goto out;
0428
0429 offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
0430 xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
0431 xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
0432 dpaa2_fd_get_len(fd), false);
0433
0434 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
0435
0436
0437 dpaa2_fd_set_offset(fd, xdp.data - vaddr);
0438 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
0439
0440 switch (xdp_act) {
0441 case XDP_PASS:
0442 break;
0443 case XDP_TX:
0444 dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
0445 break;
0446 default:
0447 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
0448 fallthrough;
0449 case XDP_ABORTED:
0450 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
0451 fallthrough;
0452 case XDP_DROP:
0453 dpaa2_eth_recycle_buf(priv, ch, addr);
0454 ch->stats.xdp_drop++;
0455 break;
0456 case XDP_REDIRECT:
0457 dma_unmap_page(priv->net_dev->dev.parent, addr,
0458 priv->rx_buf_size, DMA_BIDIRECTIONAL);
0459 ch->buf_count--;
0460
0461
0462 xdp.data_hard_start = vaddr;
0463 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
0464
0465 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
0466 if (unlikely(err)) {
0467 addr = dma_map_page(priv->net_dev->dev.parent,
0468 virt_to_page(vaddr), 0,
0469 priv->rx_buf_size, DMA_BIDIRECTIONAL);
0470 if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
0471 free_pages((unsigned long)vaddr, 0);
0472 } else {
0473 ch->buf_count++;
0474 dpaa2_eth_recycle_buf(priv, ch, addr);
0475 }
0476 ch->stats.xdp_drop++;
0477 } else {
0478 ch->stats.xdp_redirect++;
0479 }
0480 break;
0481 }
0482
0483 ch->xdp.res |= xdp_act;
0484 out:
0485 return xdp_act;
0486 }
0487
0488 static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
0489 const struct dpaa2_fd *fd,
0490 void *fd_vaddr)
0491 {
0492 u16 fd_offset = dpaa2_fd_get_offset(fd);
0493 struct dpaa2_eth_priv *priv = ch->priv;
0494 u32 fd_length = dpaa2_fd_get_len(fd);
0495 struct sk_buff *skb = NULL;
0496 unsigned int skb_len;
0497
0498 if (fd_length > priv->rx_copybreak)
0499 return NULL;
0500
0501 skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
0502
0503 skb = napi_alloc_skb(&ch->napi, skb_len);
0504 if (!skb)
0505 return NULL;
0506
0507 skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
0508 skb_put(skb, fd_length);
0509
0510 memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
0511
0512 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
0513
0514 return skb;
0515 }
0516
0517
0518 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
0519 struct dpaa2_eth_channel *ch,
0520 const struct dpaa2_fd *fd,
0521 struct dpaa2_eth_fq *fq)
0522 {
0523 dma_addr_t addr = dpaa2_fd_get_addr(fd);
0524 u8 fd_format = dpaa2_fd_get_format(fd);
0525 void *vaddr;
0526 struct sk_buff *skb;
0527 struct rtnl_link_stats64 *percpu_stats;
0528 struct dpaa2_eth_drv_stats *percpu_extras;
0529 struct device *dev = priv->net_dev->dev.parent;
0530 struct dpaa2_fas *fas;
0531 void *buf_data;
0532 u32 status = 0;
0533 u32 xdp_act;
0534
0535
0536 trace_dpaa2_rx_fd(priv->net_dev, fd);
0537
0538 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
0539 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
0540 DMA_BIDIRECTIONAL);
0541
0542 fas = dpaa2_get_fas(vaddr, false);
0543 prefetch(fas);
0544 buf_data = vaddr + dpaa2_fd_get_offset(fd);
0545 prefetch(buf_data);
0546
0547 percpu_stats = this_cpu_ptr(priv->percpu_stats);
0548 percpu_extras = this_cpu_ptr(priv->percpu_extras);
0549
0550 if (fd_format == dpaa2_fd_single) {
0551 xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
0552 if (xdp_act != XDP_PASS) {
0553 percpu_stats->rx_packets++;
0554 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
0555 return;
0556 }
0557
0558 skb = dpaa2_eth_copybreak(ch, fd, vaddr);
0559 if (!skb) {
0560 dma_unmap_page(dev, addr, priv->rx_buf_size,
0561 DMA_BIDIRECTIONAL);
0562 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
0563 }
0564 } else if (fd_format == dpaa2_fd_sg) {
0565 WARN_ON(priv->xdp_prog);
0566
0567 dma_unmap_page(dev, addr, priv->rx_buf_size,
0568 DMA_BIDIRECTIONAL);
0569 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
0570 free_pages((unsigned long)vaddr, 0);
0571 percpu_extras->rx_sg_frames++;
0572 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
0573 } else {
0574
0575 goto err_frame_format;
0576 }
0577
0578 if (unlikely(!skb))
0579 goto err_build_skb;
0580
0581 prefetch(skb->data);
0582
0583
0584 if (priv->rx_tstamp) {
0585 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
0586 __le64 *ts = dpaa2_get_ts(vaddr, false);
0587 u64 ns;
0588
0589 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
0590
0591 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
0592 shhwtstamps->hwtstamp = ns_to_ktime(ns);
0593 }
0594
0595
0596 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
0597 status = le32_to_cpu(fas->status);
0598 dpaa2_eth_validate_rx_csum(priv, status, skb);
0599 }
0600
0601 skb->protocol = eth_type_trans(skb, priv->net_dev);
0602 skb_record_rx_queue(skb, fq->flowid);
0603
0604 percpu_stats->rx_packets++;
0605 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
0606 ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
0607
0608 list_add_tail(&skb->list, ch->rx_list);
0609
0610 return;
0611
0612 err_build_skb:
0613 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
0614 err_frame_format:
0615 percpu_stats->rx_dropped++;
0616 }
0617
0618
0619
0620
0621 static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
0622 struct dpaa2_eth_channel *ch,
0623 const struct dpaa2_fd *fd,
0624 struct dpaa2_eth_fq *fq __always_unused)
0625 {
0626 struct device *dev = priv->net_dev->dev.parent;
0627 dma_addr_t addr = dpaa2_fd_get_addr(fd);
0628 u8 fd_format = dpaa2_fd_get_format(fd);
0629 struct rtnl_link_stats64 *percpu_stats;
0630 struct dpaa2_eth_trap_item *trap_item;
0631 struct dpaa2_fapr *fapr;
0632 struct sk_buff *skb;
0633 void *buf_data;
0634 void *vaddr;
0635
0636 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
0637 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
0638 DMA_BIDIRECTIONAL);
0639
0640 buf_data = vaddr + dpaa2_fd_get_offset(fd);
0641
0642 if (fd_format == dpaa2_fd_single) {
0643 dma_unmap_page(dev, addr, priv->rx_buf_size,
0644 DMA_BIDIRECTIONAL);
0645 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
0646 } else if (fd_format == dpaa2_fd_sg) {
0647 dma_unmap_page(dev, addr, priv->rx_buf_size,
0648 DMA_BIDIRECTIONAL);
0649 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
0650 free_pages((unsigned long)vaddr, 0);
0651 } else {
0652
0653 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
0654 goto err_frame_format;
0655 }
0656
0657 fapr = dpaa2_get_fapr(vaddr, false);
0658 trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
0659 if (trap_item)
0660 devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
0661 &priv->devlink_port, NULL);
0662 consume_skb(skb);
0663
0664 err_frame_format:
0665 percpu_stats = this_cpu_ptr(priv->percpu_stats);
0666 percpu_stats->rx_errors++;
0667 ch->buf_count--;
0668 }
0669
0670
0671
0672
0673
0674
0675
0676 static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
0677 struct dpaa2_eth_fq **src)
0678 {
0679 struct dpaa2_eth_priv *priv = ch->priv;
0680 struct dpaa2_eth_fq *fq = NULL;
0681 struct dpaa2_dq *dq;
0682 const struct dpaa2_fd *fd;
0683 int cleaned = 0, retries = 0;
0684 int is_last;
0685
0686 do {
0687 dq = dpaa2_io_store_next(ch->store, &is_last);
0688 if (unlikely(!dq)) {
0689
0690
0691
0692
0693
0694 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
0695 netdev_err_once(priv->net_dev,
0696 "Unable to read a valid dequeue response\n");
0697 return -ETIMEDOUT;
0698 }
0699 continue;
0700 }
0701
0702 fd = dpaa2_dq_fd(dq);
0703 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
0704
0705 fq->consume(priv, ch, fd, fq);
0706 cleaned++;
0707 retries = 0;
0708 } while (!is_last);
0709
0710 if (!cleaned)
0711 return 0;
0712
0713 fq->stats.frames += cleaned;
0714 ch->stats.frames += cleaned;
0715 ch->stats.frames_per_cdan += cleaned;
0716
0717
0718
0719
0720 if (src)
0721 *src = fq;
0722
0723 return cleaned;
0724 }
0725
0726 static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
0727 u8 *msgtype, u8 *twostep, u8 *udp,
0728 u16 *correction_offset,
0729 u16 *origintimestamp_offset)
0730 {
0731 unsigned int ptp_class;
0732 struct ptp_header *hdr;
0733 unsigned int type;
0734 u8 *base;
0735
0736 ptp_class = ptp_classify_raw(skb);
0737 if (ptp_class == PTP_CLASS_NONE)
0738 return -EINVAL;
0739
0740 hdr = ptp_parse_header(skb, ptp_class);
0741 if (!hdr)
0742 return -EINVAL;
0743
0744 *msgtype = ptp_get_msgtype(hdr, ptp_class);
0745 *twostep = hdr->flag_field[0] & 0x2;
0746
0747 type = ptp_class & PTP_CLASS_PMASK;
0748 if (type == PTP_CLASS_IPV4 ||
0749 type == PTP_CLASS_IPV6)
0750 *udp = 1;
0751 else
0752 *udp = 0;
0753
0754 base = skb_mac_header(skb);
0755 *correction_offset = (u8 *)&hdr->correction - base;
0756 *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
0757
0758 return 0;
0759 }
0760
0761
0762 static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
0763 struct dpaa2_fd *fd,
0764 void *buf_start,
0765 struct sk_buff *skb)
0766 {
0767 struct ptp_tstamp origin_timestamp;
0768 u8 msgtype, twostep, udp;
0769 struct dpaa2_faead *faead;
0770 struct dpaa2_fas *fas;
0771 struct timespec64 ts;
0772 u16 offset1, offset2;
0773 u32 ctrl, frc;
0774 __le64 *ns;
0775 u8 *data;
0776
0777
0778 frc = dpaa2_fd_get_frc(fd);
0779 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
0780
0781
0782 ctrl = dpaa2_fd_get_ctrl(fd);
0783 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
0784
0785
0786
0787
0788 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
0789 faead = dpaa2_get_faead(buf_start, true);
0790 faead->ctrl = cpu_to_le32(ctrl);
0791
0792 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
0793 if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
0794 &offset1, &offset2) ||
0795 msgtype != PTP_MSGTYPE_SYNC || twostep) {
0796 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
0797 return;
0798 }
0799
0800
0801 frc = dpaa2_fd_get_frc(fd);
0802 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
0803
0804
0805 fas = dpaa2_get_fas(buf_start, true);
0806 fas->status = cpu_to_le32(DPAA2_FAS_PTP);
0807
0808 dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
0809 ns = dpaa2_get_ts(buf_start, true);
0810 *ns = cpu_to_le64(timespec64_to_ns(&ts) /
0811 DPAA2_PTP_CLK_PERIOD_NS);
0812
0813
0814 ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
0815 data = skb_mac_header(skb);
0816 *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
0817 *(__be32 *)(data + offset2 + 2) =
0818 htonl(origin_timestamp.sec_lsb);
0819 *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
0820
0821 if (priv->ptp_correction_off == offset1)
0822 return;
0823
0824 priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
0825 priv->ptp_correction_off = offset1;
0826
0827 }
0828 }
0829
0830 static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
0831 {
0832 struct dpaa2_eth_sgt_cache *sgt_cache;
0833 void *sgt_buf = NULL;
0834 int sgt_buf_size;
0835
0836 sgt_cache = this_cpu_ptr(priv->sgt_cache);
0837 sgt_buf_size = priv->tx_data_offset +
0838 DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
0839
0840 if (sgt_cache->count == 0)
0841 sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
0842 else
0843 sgt_buf = sgt_cache->buf[--sgt_cache->count];
0844 if (!sgt_buf)
0845 return NULL;
0846
0847 memset(sgt_buf, 0, sgt_buf_size);
0848
0849 return sgt_buf;
0850 }
0851
0852 static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
0853 {
0854 struct dpaa2_eth_sgt_cache *sgt_cache;
0855
0856 sgt_cache = this_cpu_ptr(priv->sgt_cache);
0857 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
0858 skb_free_frag(sgt_buf);
0859 else
0860 sgt_cache->buf[sgt_cache->count++] = sgt_buf;
0861 }
0862
0863
0864 static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
0865 struct sk_buff *skb,
0866 struct dpaa2_fd *fd,
0867 void **swa_addr)
0868 {
0869 struct device *dev = priv->net_dev->dev.parent;
0870 void *sgt_buf = NULL;
0871 dma_addr_t addr;
0872 int nr_frags = skb_shinfo(skb)->nr_frags;
0873 struct dpaa2_sg_entry *sgt;
0874 int i, err;
0875 int sgt_buf_size;
0876 struct scatterlist *scl, *crt_scl;
0877 int num_sg;
0878 int num_dma_bufs;
0879 struct dpaa2_eth_swa *swa;
0880
0881
0882
0883
0884
0885
0886 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
0887 return -EINVAL;
0888
0889 scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
0890 if (unlikely(!scl))
0891 return -ENOMEM;
0892
0893 sg_init_table(scl, nr_frags + 1);
0894 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
0895 if (unlikely(num_sg < 0)) {
0896 err = -ENOMEM;
0897 goto dma_map_sg_failed;
0898 }
0899 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
0900 if (unlikely(!num_dma_bufs)) {
0901 err = -ENOMEM;
0902 goto dma_map_sg_failed;
0903 }
0904
0905
0906 sgt_buf_size = priv->tx_data_offset +
0907 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
0908 sgt_buf = dpaa2_eth_sgt_get(priv);
0909 if (unlikely(!sgt_buf)) {
0910 err = -ENOMEM;
0911 goto sgt_buf_alloc_failed;
0912 }
0913
0914 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
0915
0916
0917
0918
0919
0920
0921
0922
0923 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
0924 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
0925 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
0926 }
0927 dpaa2_sg_set_final(&sgt[i - 1], true);
0928
0929
0930
0931
0932
0933
0934 *swa_addr = (void *)sgt_buf;
0935 swa = (struct dpaa2_eth_swa *)sgt_buf;
0936 swa->type = DPAA2_ETH_SWA_SG;
0937 swa->sg.skb = skb;
0938 swa->sg.scl = scl;
0939 swa->sg.num_sg = num_sg;
0940 swa->sg.sgt_size = sgt_buf_size;
0941
0942
0943 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
0944 if (unlikely(dma_mapping_error(dev, addr))) {
0945 err = -ENOMEM;
0946 goto dma_map_single_failed;
0947 }
0948 memset(fd, 0, sizeof(struct dpaa2_fd));
0949 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
0950 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
0951 dpaa2_fd_set_addr(fd, addr);
0952 dpaa2_fd_set_len(fd, skb->len);
0953 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
0954
0955 return 0;
0956
0957 dma_map_single_failed:
0958 dpaa2_eth_sgt_recycle(priv, sgt_buf);
0959 sgt_buf_alloc_failed:
0960 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
0961 dma_map_sg_failed:
0962 kfree(scl);
0963 return err;
0964 }
0965
0966
0967
0968
0969
0970
0971
0972 static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
0973 struct sk_buff *skb,
0974 struct dpaa2_fd *fd,
0975 void **swa_addr)
0976 {
0977 struct device *dev = priv->net_dev->dev.parent;
0978 struct dpaa2_sg_entry *sgt;
0979 struct dpaa2_eth_swa *swa;
0980 dma_addr_t addr, sgt_addr;
0981 void *sgt_buf = NULL;
0982 int sgt_buf_size;
0983 int err;
0984
0985
0986 sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
0987 sgt_buf = dpaa2_eth_sgt_get(priv);
0988 if (unlikely(!sgt_buf))
0989 return -ENOMEM;
0990 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
0991
0992 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
0993 if (unlikely(dma_mapping_error(dev, addr))) {
0994 err = -ENOMEM;
0995 goto data_map_failed;
0996 }
0997
0998
0999 dpaa2_sg_set_addr(sgt, addr);
1000 dpaa2_sg_set_len(sgt, skb->len);
1001 dpaa2_sg_set_final(sgt, true);
1002
1003
1004 *swa_addr = (void *)sgt_buf;
1005 swa = (struct dpaa2_eth_swa *)sgt_buf;
1006 swa->type = DPAA2_ETH_SWA_SINGLE;
1007 swa->single.skb = skb;
1008 swa->single.sgt_size = sgt_buf_size;
1009
1010
1011 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1012 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1013 err = -ENOMEM;
1014 goto sgt_map_failed;
1015 }
1016
1017 memset(fd, 0, sizeof(struct dpaa2_fd));
1018 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1019 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1020 dpaa2_fd_set_addr(fd, sgt_addr);
1021 dpaa2_fd_set_len(fd, skb->len);
1022 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1023
1024 return 0;
1025
1026 sgt_map_failed:
1027 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
1028 data_map_failed:
1029 dpaa2_eth_sgt_recycle(priv, sgt_buf);
1030
1031 return err;
1032 }
1033
1034
1035 static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
1036 struct sk_buff *skb,
1037 struct dpaa2_fd *fd,
1038 void **swa_addr)
1039 {
1040 struct device *dev = priv->net_dev->dev.parent;
1041 u8 *buffer_start, *aligned_start;
1042 struct dpaa2_eth_swa *swa;
1043 dma_addr_t addr;
1044
1045 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
1046
1047
1048
1049
1050 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1051 DPAA2_ETH_TX_BUF_ALIGN);
1052 if (aligned_start >= skb->head)
1053 buffer_start = aligned_start;
1054
1055
1056
1057
1058
1059 *swa_addr = (void *)buffer_start;
1060 swa = (struct dpaa2_eth_swa *)buffer_start;
1061 swa->type = DPAA2_ETH_SWA_SINGLE;
1062 swa->single.skb = skb;
1063
1064 addr = dma_map_single(dev, buffer_start,
1065 skb_tail_pointer(skb) - buffer_start,
1066 DMA_BIDIRECTIONAL);
1067 if (unlikely(dma_mapping_error(dev, addr)))
1068 return -ENOMEM;
1069
1070 memset(fd, 0, sizeof(struct dpaa2_fd));
1071 dpaa2_fd_set_addr(fd, addr);
1072 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
1073 dpaa2_fd_set_len(fd, skb->len);
1074 dpaa2_fd_set_format(fd, dpaa2_fd_single);
1075 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1076
1077 return 0;
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087 static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
1088 struct dpaa2_eth_fq *fq,
1089 const struct dpaa2_fd *fd, bool in_napi)
1090 {
1091 struct device *dev = priv->net_dev->dev.parent;
1092 dma_addr_t fd_addr, sg_addr;
1093 struct sk_buff *skb = NULL;
1094 unsigned char *buffer_start;
1095 struct dpaa2_eth_swa *swa;
1096 u8 fd_format = dpaa2_fd_get_format(fd);
1097 u32 fd_len = dpaa2_fd_get_len(fd);
1098 struct dpaa2_sg_entry *sgt;
1099 int should_free_skb = 1;
1100 void *tso_hdr;
1101 int i;
1102
1103 fd_addr = dpaa2_fd_get_addr(fd);
1104 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
1105 swa = (struct dpaa2_eth_swa *)buffer_start;
1106
1107 if (fd_format == dpaa2_fd_single) {
1108 if (swa->type == DPAA2_ETH_SWA_SINGLE) {
1109 skb = swa->single.skb;
1110
1111
1112
1113 dma_unmap_single(dev, fd_addr,
1114 skb_tail_pointer(skb) - buffer_start,
1115 DMA_BIDIRECTIONAL);
1116 } else {
1117 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
1118 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
1119 DMA_BIDIRECTIONAL);
1120 }
1121 } else if (fd_format == dpaa2_fd_sg) {
1122 if (swa->type == DPAA2_ETH_SWA_SG) {
1123 skb = swa->sg.skb;
1124
1125
1126 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1127 DMA_BIDIRECTIONAL);
1128 kfree(swa->sg.scl);
1129
1130
1131 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1132 DMA_BIDIRECTIONAL);
1133 } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
1134 skb = swa->tso.skb;
1135
1136 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1137 priv->tx_data_offset);
1138
1139
1140 dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
1141 DMA_BIDIRECTIONAL);
1142
1143
1144 tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
1145 dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
1146 DMA_TO_DEVICE);
1147 kfree(tso_hdr);
1148
1149
1150 for (i = 1; i < swa->tso.num_sg; i++)
1151 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1152 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1153
1154 if (!swa->tso.is_last_fd)
1155 should_free_skb = 0;
1156 } else {
1157 skb = swa->single.skb;
1158
1159
1160 dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1161 DMA_BIDIRECTIONAL);
1162
1163 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1164 priv->tx_data_offset);
1165 sg_addr = dpaa2_sg_get_addr(sgt);
1166 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1167 }
1168 } else {
1169 netdev_dbg(priv->net_dev, "Invalid FD format\n");
1170 return;
1171 }
1172
1173 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1174 fq->dq_frames++;
1175 fq->dq_bytes += fd_len;
1176 }
1177
1178 if (swa->type == DPAA2_ETH_SWA_XDP) {
1179 xdp_return_frame(swa->xdp.xdpf);
1180 return;
1181 }
1182
1183
1184 if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
1185 if (skb->cb[0] == TX_TSTAMP) {
1186 struct skb_shared_hwtstamps shhwtstamps;
1187 __le64 *ts = dpaa2_get_ts(buffer_start, true);
1188 u64 ns;
1189
1190 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1191
1192 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1193 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1194 skb_tstamp_tx(skb, &shhwtstamps);
1195 } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1196 mutex_unlock(&priv->onestep_tstamp_lock);
1197 }
1198 }
1199
1200
1201 if (fd_format != dpaa2_fd_single)
1202 dpaa2_eth_sgt_recycle(priv, buffer_start);
1203
1204
1205
1206
1207
1208 if (should_free_skb)
1209 napi_consume_skb(skb, in_napi);
1210 }
1211
1212 static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
1213 struct sk_buff *skb, struct dpaa2_fd *fd,
1214 int *num_fds, u32 *total_fds_len)
1215 {
1216 struct device *dev = priv->net_dev->dev.parent;
1217 int hdr_len, total_len, data_left, fd_len;
1218 int num_sge, err, i, sgt_buf_size;
1219 struct dpaa2_fd *fd_start = fd;
1220 struct dpaa2_sg_entry *sgt;
1221 struct dpaa2_eth_swa *swa;
1222 dma_addr_t sgt_addr, addr;
1223 dma_addr_t tso_hdr_dma;
1224 unsigned int index = 0;
1225 struct tso_t tso;
1226 char *tso_hdr;
1227 void *sgt_buf;
1228
1229
1230 hdr_len = tso_start(skb, &tso);
1231 *total_fds_len = 0;
1232
1233 total_len = skb->len - hdr_len;
1234 while (total_len > 0) {
1235
1236 sgt_buf = dpaa2_eth_sgt_get(priv);
1237 if (unlikely(!sgt_buf)) {
1238 netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
1239 err = -ENOMEM;
1240 goto err_sgt_get;
1241 }
1242 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1243
1244
1245 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1246 total_len -= data_left;
1247 fd_len = data_left + hdr_len;
1248
1249
1250 tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
1251 if (!tso_hdr) {
1252 err = -ENOMEM;
1253 goto err_alloc_tso_hdr;
1254 }
1255
1256 tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
1257 tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1258 if (dma_mapping_error(dev, tso_hdr_dma)) {
1259 netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
1260 err = -ENOMEM;
1261 goto err_map_tso_hdr;
1262 }
1263
1264
1265 dpaa2_sg_set_addr(sgt, tso_hdr_dma);
1266 dpaa2_sg_set_len(sgt, hdr_len);
1267 dpaa2_sg_set_final(sgt, data_left <= 0);
1268
1269
1270 num_sge = 1;
1271 while (data_left > 0) {
1272 int size;
1273
1274
1275 sgt++;
1276 size = min_t(int, tso.size, data_left);
1277
1278 addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
1279 if (dma_mapping_error(dev, addr)) {
1280 netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
1281 err = -ENOMEM;
1282 goto err_map_data;
1283 }
1284 dpaa2_sg_set_addr(sgt, addr);
1285 dpaa2_sg_set_len(sgt, size);
1286 dpaa2_sg_set_final(sgt, size == data_left);
1287
1288 num_sge++;
1289
1290
1291 data_left -= size;
1292 tso_build_data(skb, &tso, size);
1293 }
1294
1295
1296 sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
1297 swa = (struct dpaa2_eth_swa *)sgt_buf;
1298 swa->type = DPAA2_ETH_SWA_SW_TSO;
1299 swa->tso.skb = skb;
1300 swa->tso.num_sg = num_sge;
1301 swa->tso.sgt_size = sgt_buf_size;
1302 swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
1303
1304
1305 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1306 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1307 netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
1308 err = -ENOMEM;
1309 goto err_map_sgt;
1310 }
1311
1312
1313 memset(fd, 0, sizeof(struct dpaa2_fd));
1314 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1315 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1316 dpaa2_fd_set_addr(fd, sgt_addr);
1317 dpaa2_fd_set_len(fd, fd_len);
1318 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1319
1320 *total_fds_len += fd_len;
1321
1322 fd++;
1323 index++;
1324 }
1325
1326 *num_fds = index;
1327
1328 return 0;
1329
1330 err_map_sgt:
1331 err_map_data:
1332
1333 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1334 for (i = 1; i < num_sge; i++)
1335 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1336 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1337
1338
1339 dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1340 err_map_tso_hdr:
1341 kfree(tso_hdr);
1342 err_alloc_tso_hdr:
1343 dpaa2_eth_sgt_recycle(priv, sgt_buf);
1344 err_sgt_get:
1345
1346 for (i = 0; i < index; i++)
1347 dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
1348
1349 return err;
1350 }
1351
1352 static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1353 struct net_device *net_dev)
1354 {
1355 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1356 int total_enqueued = 0, retries = 0, enqueued;
1357 struct dpaa2_eth_drv_stats *percpu_extras;
1358 struct rtnl_link_stats64 *percpu_stats;
1359 unsigned int needed_headroom;
1360 int num_fds = 1, max_retries;
1361 struct dpaa2_eth_fq *fq;
1362 struct netdev_queue *nq;
1363 struct dpaa2_fd *fd;
1364 u16 queue_mapping;
1365 void *swa = NULL;
1366 u8 prio = 0;
1367 int err, i;
1368 u32 fd_len;
1369
1370 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1371 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1372 fd = (this_cpu_ptr(priv->fd))->array;
1373
1374 needed_headroom = dpaa2_eth_needed_headroom(skb);
1375
1376
1377
1378
1379 skb = skb_unshare(skb, GFP_ATOMIC);
1380 if (unlikely(!skb)) {
1381
1382 percpu_stats->tx_dropped++;
1383 return NETDEV_TX_OK;
1384 }
1385
1386
1387
1388 if (skb_is_gso(skb)) {
1389 err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
1390 percpu_extras->tx_sg_frames += num_fds;
1391 percpu_extras->tx_sg_bytes += fd_len;
1392 percpu_extras->tx_tso_frames += num_fds;
1393 percpu_extras->tx_tso_bytes += fd_len;
1394 } else if (skb_is_nonlinear(skb)) {
1395 err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
1396 percpu_extras->tx_sg_frames++;
1397 percpu_extras->tx_sg_bytes += skb->len;
1398 fd_len = dpaa2_fd_get_len(fd);
1399 } else if (skb_headroom(skb) < needed_headroom) {
1400 err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
1401 percpu_extras->tx_sg_frames++;
1402 percpu_extras->tx_sg_bytes += skb->len;
1403 percpu_extras->tx_converted_sg_frames++;
1404 percpu_extras->tx_converted_sg_bytes += skb->len;
1405 fd_len = dpaa2_fd_get_len(fd);
1406 } else {
1407 err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
1408 fd_len = dpaa2_fd_get_len(fd);
1409 }
1410
1411 if (unlikely(err)) {
1412 percpu_stats->tx_dropped++;
1413 goto err_build_fd;
1414 }
1415
1416 if (swa && skb->cb[0])
1417 dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
1418
1419
1420 for (i = 0; i < num_fds; i++)
1421 trace_dpaa2_tx_fd(net_dev, &fd[i]);
1422
1423
1424
1425
1426
1427 queue_mapping = skb_get_queue_mapping(skb);
1428
1429 if (net_dev->num_tc) {
1430 prio = netdev_txq_to_tc(net_dev, queue_mapping);
1431
1432
1433
1434 prio = net_dev->num_tc - prio - 1;
1435
1436
1437
1438 queue_mapping %= dpaa2_eth_queue_count(priv);
1439 }
1440 fq = &priv->fq[queue_mapping];
1441 nq = netdev_get_tx_queue(net_dev, queue_mapping);
1442 netdev_tx_sent_queue(nq, fd_len);
1443
1444
1445
1446
1447 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
1448 while (total_enqueued < num_fds && retries < max_retries) {
1449 err = priv->enqueue(priv, fq, &fd[total_enqueued],
1450 prio, num_fds - total_enqueued, &enqueued);
1451 if (err == -EBUSY) {
1452 retries++;
1453 continue;
1454 }
1455
1456 total_enqueued += enqueued;
1457 }
1458 percpu_extras->tx_portal_busy += retries;
1459
1460 if (unlikely(err < 0)) {
1461 percpu_stats->tx_errors++;
1462
1463 dpaa2_eth_free_tx_fd(priv, fq, fd, false);
1464 netdev_tx_completed_queue(nq, 1, fd_len);
1465 } else {
1466 percpu_stats->tx_packets += total_enqueued;
1467 percpu_stats->tx_bytes += fd_len;
1468 }
1469
1470 return NETDEV_TX_OK;
1471
1472 err_build_fd:
1473 dev_kfree_skb(skb);
1474
1475 return NETDEV_TX_OK;
1476 }
1477
1478 static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1479 {
1480 struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1481 tx_onestep_tstamp);
1482 struct sk_buff *skb;
1483
1484 while (true) {
1485 skb = skb_dequeue(&priv->tx_skbs);
1486 if (!skb)
1487 return;
1488
1489
1490
1491
1492
1493
1494 mutex_lock(&priv->onestep_tstamp_lock);
1495 __dpaa2_eth_tx(skb, priv->net_dev);
1496 }
1497 }
1498
1499 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1500 {
1501 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1502 u8 msgtype, twostep, udp;
1503 u16 offset1, offset2;
1504
1505
1506 skb->cb[0] = 0;
1507
1508 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1509 if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1510 skb->cb[0] = TX_TSTAMP;
1511 else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1512 skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1513 }
1514
1515
1516 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1517 if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1518 &offset1, &offset2))
1519 if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
1520 skb_queue_tail(&priv->tx_skbs, skb);
1521 queue_work(priv->dpaa2_ptp_wq,
1522 &priv->tx_onestep_tstamp);
1523 return NETDEV_TX_OK;
1524 }
1525
1526
1527
1528 skb->cb[0] = TX_TSTAMP;
1529 }
1530
1531
1532 return __dpaa2_eth_tx(skb, net_dev);
1533 }
1534
1535
1536 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1537 struct dpaa2_eth_channel *ch,
1538 const struct dpaa2_fd *fd,
1539 struct dpaa2_eth_fq *fq)
1540 {
1541 struct rtnl_link_stats64 *percpu_stats;
1542 struct dpaa2_eth_drv_stats *percpu_extras;
1543 u32 fd_len = dpaa2_fd_get_len(fd);
1544 u32 fd_errors;
1545
1546
1547 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1548
1549 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1550 percpu_extras->tx_conf_frames++;
1551 percpu_extras->tx_conf_bytes += fd_len;
1552 ch->stats.bytes_per_cdan += fd_len;
1553
1554
1555 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
1556 dpaa2_eth_free_tx_fd(priv, fq, fd, true);
1557
1558 if (likely(!fd_errors))
1559 return;
1560
1561 if (net_ratelimit())
1562 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1563 fd_errors);
1564
1565 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1566
1567 percpu_stats->tx_errors++;
1568 }
1569
1570 static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
1571 bool enable)
1572 {
1573 int err;
1574
1575 err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
1576
1577 if (err) {
1578 netdev_err(priv->net_dev,
1579 "dpni_enable_vlan_filter failed\n");
1580 return err;
1581 }
1582
1583 return 0;
1584 }
1585
1586 static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
1587 {
1588 int err;
1589
1590 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1591 DPNI_OFF_RX_L3_CSUM, enable);
1592 if (err) {
1593 netdev_err(priv->net_dev,
1594 "dpni_set_offload(RX_L3_CSUM) failed\n");
1595 return err;
1596 }
1597
1598 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1599 DPNI_OFF_RX_L4_CSUM, enable);
1600 if (err) {
1601 netdev_err(priv->net_dev,
1602 "dpni_set_offload(RX_L4_CSUM) failed\n");
1603 return err;
1604 }
1605
1606 return 0;
1607 }
1608
1609 static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
1610 {
1611 int err;
1612
1613 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1614 DPNI_OFF_TX_L3_CSUM, enable);
1615 if (err) {
1616 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1617 return err;
1618 }
1619
1620 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1621 DPNI_OFF_TX_L4_CSUM, enable);
1622 if (err) {
1623 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1624 return err;
1625 }
1626
1627 return 0;
1628 }
1629
1630
1631
1632
1633 static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1634 struct dpaa2_eth_channel *ch, u16 bpid)
1635 {
1636 struct device *dev = priv->net_dev->dev.parent;
1637 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1638 struct page *page;
1639 dma_addr_t addr;
1640 int retries = 0;
1641 int i, err;
1642
1643 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1644
1645
1646
1647
1648
1649
1650
1651 page = dev_alloc_pages(0);
1652 if (!page)
1653 goto err_alloc;
1654
1655 addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1656 DMA_BIDIRECTIONAL);
1657 if (unlikely(dma_mapping_error(dev, addr)))
1658 goto err_map;
1659
1660 buf_array[i] = addr;
1661
1662
1663 trace_dpaa2_eth_buf_seed(priv->net_dev, page_address(page),
1664 DPAA2_ETH_RX_BUF_RAW_SIZE,
1665 addr, priv->rx_buf_size,
1666 bpid);
1667 }
1668
1669 release_bufs:
1670
1671 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
1672 buf_array, i)) == -EBUSY) {
1673 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1674 break;
1675 cpu_relax();
1676 }
1677
1678
1679
1680
1681 if (err) {
1682 dpaa2_eth_free_bufs(priv, buf_array, i);
1683 return 0;
1684 }
1685
1686 return i;
1687
1688 err_map:
1689 __free_pages(page, 0);
1690 err_alloc:
1691
1692
1693
1694 if (i)
1695 goto release_bufs;
1696
1697 return 0;
1698 }
1699
1700 static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1701 {
1702 int i, j;
1703 int new_count;
1704
1705 for (j = 0; j < priv->num_channels; j++) {
1706 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1707 i += DPAA2_ETH_BUFS_PER_CMD) {
1708 new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
1709 priv->channel[j]->buf_count += new_count;
1710
1711 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1712 return -ENOMEM;
1713 }
1714 }
1715 }
1716
1717 return 0;
1718 }
1719
1720
1721
1722
1723
1724 static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
1725 {
1726 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1727 int retries = 0;
1728 int ret;
1729
1730 do {
1731 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1732 buf_array, count);
1733 if (ret < 0) {
1734 if (ret == -EBUSY &&
1735 retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
1736 continue;
1737 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1738 return;
1739 }
1740 dpaa2_eth_free_bufs(priv, buf_array, ret);
1741 retries = 0;
1742 } while (ret);
1743 }
1744
1745 static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
1746 {
1747 int i;
1748
1749 dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1750 dpaa2_eth_drain_bufs(priv, 1);
1751
1752 for (i = 0; i < priv->num_channels; i++)
1753 priv->channel[i]->buf_count = 0;
1754 }
1755
1756
1757
1758
1759 static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1760 struct dpaa2_eth_channel *ch,
1761 u16 bpid)
1762 {
1763 int new_count;
1764
1765 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1766 return 0;
1767
1768 do {
1769 new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
1770 if (unlikely(!new_count)) {
1771
1772 break;
1773 }
1774 ch->buf_count += new_count;
1775 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1776
1777 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1778 return -ENOMEM;
1779
1780 return 0;
1781 }
1782
1783 static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1784 {
1785 struct dpaa2_eth_sgt_cache *sgt_cache;
1786 u16 count;
1787 int k, i;
1788
1789 for_each_possible_cpu(k) {
1790 sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1791 count = sgt_cache->count;
1792
1793 for (i = 0; i < count; i++)
1794 skb_free_frag(sgt_cache->buf[i]);
1795 sgt_cache->count = 0;
1796 }
1797 }
1798
1799 static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
1800 {
1801 int err;
1802 int dequeues = -1;
1803
1804
1805 do {
1806 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1807 ch->store);
1808 dequeues++;
1809 cpu_relax();
1810 } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1811
1812 ch->stats.dequeue_portal_busy += dequeues;
1813 if (unlikely(err))
1814 ch->stats.pull_err++;
1815
1816 return err;
1817 }
1818
1819
1820
1821
1822
1823
1824
1825 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1826 {
1827 struct dpaa2_eth_channel *ch;
1828 struct dpaa2_eth_priv *priv;
1829 int rx_cleaned = 0, txconf_cleaned = 0;
1830 struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1831 struct netdev_queue *nq;
1832 int store_cleaned, work_done;
1833 struct list_head rx_list;
1834 int retries = 0;
1835 u16 flowid;
1836 int err;
1837
1838 ch = container_of(napi, struct dpaa2_eth_channel, napi);
1839 ch->xdp.res = 0;
1840 priv = ch->priv;
1841
1842 INIT_LIST_HEAD(&rx_list);
1843 ch->rx_list = &rx_list;
1844
1845 do {
1846 err = dpaa2_eth_pull_channel(ch);
1847 if (unlikely(err))
1848 break;
1849
1850
1851 dpaa2_eth_refill_pool(priv, ch, priv->bpid);
1852
1853 store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
1854 if (store_cleaned <= 0)
1855 break;
1856 if (fq->type == DPAA2_RX_FQ) {
1857 rx_cleaned += store_cleaned;
1858 flowid = fq->flowid;
1859 } else {
1860 txconf_cleaned += store_cleaned;
1861
1862 txc_fq = fq;
1863 }
1864
1865
1866
1867
1868 if (rx_cleaned >= budget ||
1869 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1870 work_done = budget;
1871 goto out;
1872 }
1873 } while (store_cleaned);
1874
1875
1876 dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
1877 ch->stats.bytes_per_cdan);
1878 ch->stats.frames_per_cdan = 0;
1879 ch->stats.bytes_per_cdan = 0;
1880
1881
1882
1883
1884 napi_complete_done(napi, rx_cleaned);
1885 do {
1886 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1887 cpu_relax();
1888 } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
1889 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1890 ch->nctx.desired_cpu);
1891
1892 work_done = max(rx_cleaned, 1);
1893
1894 out:
1895 netif_receive_skb_list(ch->rx_list);
1896
1897 if (txc_fq && txc_fq->dq_frames) {
1898 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1899 netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1900 txc_fq->dq_bytes);
1901 txc_fq->dq_frames = 0;
1902 txc_fq->dq_bytes = 0;
1903 }
1904
1905 if (ch->xdp.res & XDP_REDIRECT)
1906 xdp_do_flush_map();
1907 else if (rx_cleaned && ch->xdp.res & XDP_TX)
1908 dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
1909
1910 return work_done;
1911 }
1912
1913 static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
1914 {
1915 struct dpaa2_eth_channel *ch;
1916 int i;
1917
1918 for (i = 0; i < priv->num_channels; i++) {
1919 ch = priv->channel[i];
1920 napi_enable(&ch->napi);
1921 }
1922 }
1923
1924 static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
1925 {
1926 struct dpaa2_eth_channel *ch;
1927 int i;
1928
1929 for (i = 0; i < priv->num_channels; i++) {
1930 ch = priv->channel[i];
1931 napi_disable(&ch->napi);
1932 }
1933 }
1934
1935 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
1936 bool tx_pause, bool pfc)
1937 {
1938 struct dpni_taildrop td = {0};
1939 struct dpaa2_eth_fq *fq;
1940 int i, err;
1941
1942
1943
1944
1945
1946
1947 td.enable = !tx_pause;
1948 if (priv->rx_fqtd_enabled == td.enable)
1949 goto set_cgtd;
1950
1951 td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
1952 td.units = DPNI_CONGESTION_UNIT_BYTES;
1953
1954 for (i = 0; i < priv->num_fqs; i++) {
1955 fq = &priv->fq[i];
1956 if (fq->type != DPAA2_RX_FQ)
1957 continue;
1958 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1959 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
1960 fq->tc, fq->flowid, &td);
1961 if (err) {
1962 netdev_err(priv->net_dev,
1963 "dpni_set_taildrop(FQ) failed\n");
1964 return;
1965 }
1966 }
1967
1968 priv->rx_fqtd_enabled = td.enable;
1969
1970 set_cgtd:
1971
1972
1973
1974
1975
1976
1977
1978 td.enable = !tx_pause || pfc;
1979 if (priv->rx_cgtd_enabled == td.enable)
1980 return;
1981
1982 td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
1983 td.units = DPNI_CONGESTION_UNIT_FRAMES;
1984 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
1985 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1986 DPNI_CP_GROUP, DPNI_QUEUE_RX,
1987 i, 0, &td);
1988 if (err) {
1989 netdev_err(priv->net_dev,
1990 "dpni_set_taildrop(CG) failed\n");
1991 return;
1992 }
1993 }
1994
1995 priv->rx_cgtd_enabled = td.enable;
1996 }
1997
1998 static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
1999 {
2000 struct dpni_link_state state = {0};
2001 bool tx_pause;
2002 int err;
2003
2004 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
2005 if (unlikely(err)) {
2006 netdev_err(priv->net_dev,
2007 "dpni_get_link_state() failed\n");
2008 return err;
2009 }
2010
2011
2012
2013
2014
2015 tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
2016 dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
2017
2018
2019
2020
2021 if (dpaa2_eth_is_type_phy(priv))
2022 goto out;
2023
2024
2025 if (priv->link_state.up == state.up)
2026 goto out;
2027
2028 if (state.up) {
2029 netif_carrier_on(priv->net_dev);
2030 netif_tx_start_all_queues(priv->net_dev);
2031 } else {
2032 netif_tx_stop_all_queues(priv->net_dev);
2033 netif_carrier_off(priv->net_dev);
2034 }
2035
2036 netdev_info(priv->net_dev, "Link Event: state %s\n",
2037 state.up ? "up" : "down");
2038
2039 out:
2040 priv->link_state = state;
2041
2042 return 0;
2043 }
2044
2045 static int dpaa2_eth_open(struct net_device *net_dev)
2046 {
2047 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2048 int err;
2049
2050 err = dpaa2_eth_seed_pool(priv, priv->bpid);
2051 if (err) {
2052
2053
2054
2055
2056 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
2057 priv->dpbp_dev->obj_desc.id, priv->bpid);
2058 }
2059
2060 if (!dpaa2_eth_is_type_phy(priv)) {
2061
2062
2063
2064
2065 netif_tx_stop_all_queues(net_dev);
2066
2067
2068
2069
2070
2071
2072 netif_carrier_off(net_dev);
2073 }
2074 dpaa2_eth_enable_ch_napi(priv);
2075
2076 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
2077 if (err < 0) {
2078 netdev_err(net_dev, "dpni_enable() failed\n");
2079 goto enable_err;
2080 }
2081
2082 if (dpaa2_eth_is_type_phy(priv)) {
2083 dpaa2_mac_start(priv->mac);
2084 phylink_start(priv->mac->phylink);
2085 }
2086
2087 return 0;
2088
2089 enable_err:
2090 dpaa2_eth_disable_ch_napi(priv);
2091 dpaa2_eth_drain_pool(priv);
2092 return err;
2093 }
2094
2095
2096 static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
2097 {
2098 struct dpaa2_eth_fq *fq;
2099 u32 fcnt = 0, bcnt = 0, total = 0;
2100 int i, err;
2101
2102 for (i = 0; i < priv->num_fqs; i++) {
2103 fq = &priv->fq[i];
2104 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
2105 if (err) {
2106 netdev_warn(priv->net_dev, "query_fq_count failed");
2107 break;
2108 }
2109 total += fcnt;
2110 }
2111
2112 return total;
2113 }
2114
2115 static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
2116 {
2117 int retries = 10;
2118 u32 pending;
2119
2120 do {
2121 pending = dpaa2_eth_ingress_fq_count(priv);
2122 if (pending)
2123 msleep(100);
2124 } while (pending && --retries);
2125 }
2126
2127 #define DPNI_TX_PENDING_VER_MAJOR 7
2128 #define DPNI_TX_PENDING_VER_MINOR 13
2129 static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
2130 {
2131 union dpni_statistics stats;
2132 int retries = 10;
2133 int err;
2134
2135 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
2136 DPNI_TX_PENDING_VER_MINOR) < 0)
2137 goto out;
2138
2139 do {
2140 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
2141 &stats);
2142 if (err)
2143 goto out;
2144 if (stats.page_6.tx_pending_frames == 0)
2145 return;
2146 } while (--retries);
2147
2148 out:
2149 msleep(500);
2150 }
2151
2152 static int dpaa2_eth_stop(struct net_device *net_dev)
2153 {
2154 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2155 int dpni_enabled = 0;
2156 int retries = 10;
2157
2158 if (dpaa2_eth_is_type_phy(priv)) {
2159 phylink_stop(priv->mac->phylink);
2160 dpaa2_mac_stop(priv->mac);
2161 } else {
2162 netif_tx_stop_all_queues(net_dev);
2163 netif_carrier_off(net_dev);
2164 }
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176 dpaa2_eth_wait_for_egress_fq_empty(priv);
2177
2178 do {
2179 dpni_disable(priv->mc_io, 0, priv->mc_token);
2180 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
2181 if (dpni_enabled)
2182
2183 msleep(100);
2184 } while (dpni_enabled && --retries);
2185 if (!retries) {
2186 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
2187
2188
2189
2190 }
2191
2192 dpaa2_eth_wait_for_ingress_fq_empty(priv);
2193 dpaa2_eth_disable_ch_napi(priv);
2194
2195
2196 dpaa2_eth_drain_pool(priv);
2197
2198
2199 dpaa2_eth_sgt_cache_drain(priv);
2200
2201 return 0;
2202 }
2203
2204 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
2205 {
2206 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2207 struct device *dev = net_dev->dev.parent;
2208 int err;
2209
2210 err = eth_mac_addr(net_dev, addr);
2211 if (err < 0) {
2212 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2213 return err;
2214 }
2215
2216 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2217 net_dev->dev_addr);
2218 if (err) {
2219 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2220 return err;
2221 }
2222
2223 return 0;
2224 }
2225
2226
2227
2228
2229 static void dpaa2_eth_get_stats(struct net_device *net_dev,
2230 struct rtnl_link_stats64 *stats)
2231 {
2232 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2233 struct rtnl_link_stats64 *percpu_stats;
2234 u64 *cpustats;
2235 u64 *netstats = (u64 *)stats;
2236 int i, j;
2237 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
2238
2239 for_each_possible_cpu(i) {
2240 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2241 cpustats = (u64 *)percpu_stats;
2242 for (j = 0; j < num; j++)
2243 netstats[j] += cpustats[j];
2244 }
2245 }
2246
2247
2248
2249
2250 static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
2251 struct dpaa2_eth_priv *priv)
2252 {
2253 struct netdev_hw_addr *ha;
2254 int err;
2255
2256 netdev_for_each_uc_addr(ha, net_dev) {
2257 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2258 ha->addr);
2259 if (err)
2260 netdev_warn(priv->net_dev,
2261 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
2262 ha->addr, err);
2263 }
2264 }
2265
2266
2267
2268
2269 static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
2270 struct dpaa2_eth_priv *priv)
2271 {
2272 struct netdev_hw_addr *ha;
2273 int err;
2274
2275 netdev_for_each_mc_addr(ha, net_dev) {
2276 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2277 ha->addr);
2278 if (err)
2279 netdev_warn(priv->net_dev,
2280 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2281 ha->addr, err);
2282 }
2283 }
2284
2285 static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
2286 __be16 vlan_proto, u16 vid)
2287 {
2288 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2289 int err;
2290
2291 err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
2292 vid, 0, 0, 0);
2293
2294 if (err) {
2295 netdev_warn(priv->net_dev,
2296 "Could not add the vlan id %u\n",
2297 vid);
2298 return err;
2299 }
2300
2301 return 0;
2302 }
2303
2304 static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
2305 __be16 vlan_proto, u16 vid)
2306 {
2307 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2308 int err;
2309
2310 err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
2311
2312 if (err) {
2313 netdev_warn(priv->net_dev,
2314 "Could not remove the vlan id %u\n",
2315 vid);
2316 return err;
2317 }
2318
2319 return 0;
2320 }
2321
2322 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2323 {
2324 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2325 int uc_count = netdev_uc_count(net_dev);
2326 int mc_count = netdev_mc_count(net_dev);
2327 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2328 u32 options = priv->dpni_attrs.options;
2329 u16 mc_token = priv->mc_token;
2330 struct fsl_mc_io *mc_io = priv->mc_io;
2331 int err;
2332
2333
2334 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2335 netdev_info(net_dev,
2336 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2337 max_mac);
2338
2339
2340 if (uc_count > max_mac) {
2341 netdev_info(net_dev,
2342 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2343 uc_count, max_mac);
2344 goto force_promisc;
2345 }
2346 if (mc_count + uc_count > max_mac) {
2347 netdev_info(net_dev,
2348 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2349 uc_count + mc_count, max_mac);
2350 goto force_mc_promisc;
2351 }
2352
2353
2354 if (net_dev->flags & IFF_PROMISC)
2355 goto force_promisc;
2356 if (net_dev->flags & IFF_ALLMULTI) {
2357
2358
2359
2360
2361
2362
2363
2364 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2365 if (err)
2366 netdev_warn(net_dev, "Can't set uc promisc\n");
2367
2368
2369 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2370 if (err)
2371 netdev_warn(net_dev, "Can't clear uc filters\n");
2372 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2373
2374
2375 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2376 if (err)
2377 netdev_warn(net_dev, "Can't clear uc promisc\n");
2378 goto force_mc_promisc;
2379 }
2380
2381
2382
2383
2384 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2385 if (err)
2386 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2387 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2388 if (err)
2389 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2390
2391
2392 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2393 if (err)
2394 netdev_warn(net_dev, "Can't clear mac filters\n");
2395 dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2396 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2397
2398
2399
2400
2401 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2402 if (err)
2403 netdev_warn(net_dev, "Can't clear ucast promisc\n");
2404 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2405 if (err)
2406 netdev_warn(net_dev, "Can't clear mcast promisc\n");
2407
2408 return;
2409
2410 force_promisc:
2411 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2412 if (err)
2413 netdev_warn(net_dev, "Can't set ucast promisc\n");
2414 force_mc_promisc:
2415 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2416 if (err)
2417 netdev_warn(net_dev, "Can't set mcast promisc\n");
2418 }
2419
2420 static int dpaa2_eth_set_features(struct net_device *net_dev,
2421 netdev_features_t features)
2422 {
2423 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2424 netdev_features_t changed = features ^ net_dev->features;
2425 bool enable;
2426 int err;
2427
2428 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
2429 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2430 err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
2431 if (err)
2432 return err;
2433 }
2434
2435 if (changed & NETIF_F_RXCSUM) {
2436 enable = !!(features & NETIF_F_RXCSUM);
2437 err = dpaa2_eth_set_rx_csum(priv, enable);
2438 if (err)
2439 return err;
2440 }
2441
2442 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2443 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2444 err = dpaa2_eth_set_tx_csum(priv, enable);
2445 if (err)
2446 return err;
2447 }
2448
2449 return 0;
2450 }
2451
2452 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2453 {
2454 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2455 struct hwtstamp_config config;
2456
2457 if (!dpaa2_ptp)
2458 return -EINVAL;
2459
2460 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2461 return -EFAULT;
2462
2463 switch (config.tx_type) {
2464 case HWTSTAMP_TX_OFF:
2465 case HWTSTAMP_TX_ON:
2466 case HWTSTAMP_TX_ONESTEP_SYNC:
2467 priv->tx_tstamp_type = config.tx_type;
2468 break;
2469 default:
2470 return -ERANGE;
2471 }
2472
2473 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2474 priv->rx_tstamp = false;
2475 } else {
2476 priv->rx_tstamp = true;
2477
2478 config.rx_filter = HWTSTAMP_FILTER_ALL;
2479 }
2480
2481 if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
2482 dpaa2_ptp_onestep_reg_update_method(priv);
2483
2484 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2485 -EFAULT : 0;
2486 }
2487
2488 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2489 {
2490 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2491
2492 if (cmd == SIOCSHWTSTAMP)
2493 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2494
2495 if (dpaa2_eth_is_type_phy(priv))
2496 return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2497
2498 return -EOPNOTSUPP;
2499 }
2500
2501 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2502 {
2503 int mfl, linear_mfl;
2504
2505 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2506 linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
2507 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
2508
2509 if (mfl > linear_mfl) {
2510 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2511 linear_mfl - VLAN_ETH_HLEN);
2512 return false;
2513 }
2514
2515 return true;
2516 }
2517
2518 static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
2519 {
2520 int mfl, err;
2521
2522
2523
2524
2525
2526
2527 if (has_xdp)
2528 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2529 else
2530 mfl = DPAA2_ETH_MFL;
2531
2532 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2533 if (err) {
2534 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2535 return err;
2536 }
2537
2538 return 0;
2539 }
2540
2541 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2542 {
2543 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2544 int err;
2545
2546 if (!priv->xdp_prog)
2547 goto out;
2548
2549 if (!xdp_mtu_valid(priv, new_mtu))
2550 return -EINVAL;
2551
2552 err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
2553 if (err)
2554 return err;
2555
2556 out:
2557 dev->mtu = new_mtu;
2558 return 0;
2559 }
2560
2561 static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
2562 {
2563 struct dpni_buffer_layout buf_layout = {0};
2564 int err;
2565
2566 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2567 DPNI_QUEUE_RX, &buf_layout);
2568 if (err) {
2569 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2570 return err;
2571 }
2572
2573
2574 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2575 (has_xdp ? XDP_PACKET_HEADROOM : 0);
2576 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2577 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2578 DPNI_QUEUE_RX, &buf_layout);
2579 if (err) {
2580 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2581 return err;
2582 }
2583
2584 return 0;
2585 }
2586
2587 static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2588 {
2589 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2590 struct dpaa2_eth_channel *ch;
2591 struct bpf_prog *old;
2592 bool up, need_update;
2593 int i, err;
2594
2595 if (prog && !xdp_mtu_valid(priv, dev->mtu))
2596 return -EINVAL;
2597
2598 if (prog)
2599 bpf_prog_add(prog, priv->num_channels);
2600
2601 up = netif_running(dev);
2602 need_update = (!!priv->xdp_prog != !!prog);
2603
2604 if (up)
2605 dpaa2_eth_stop(dev);
2606
2607
2608
2609
2610
2611
2612 if (need_update) {
2613 err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
2614 if (err)
2615 goto out_err;
2616 err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
2617 if (err)
2618 goto out_err;
2619 }
2620
2621 old = xchg(&priv->xdp_prog, prog);
2622 if (old)
2623 bpf_prog_put(old);
2624
2625 for (i = 0; i < priv->num_channels; i++) {
2626 ch = priv->channel[i];
2627 old = xchg(&ch->xdp.prog, prog);
2628 if (old)
2629 bpf_prog_put(old);
2630 }
2631
2632 if (up) {
2633 err = dpaa2_eth_open(dev);
2634 if (err)
2635 return err;
2636 }
2637
2638 return 0;
2639
2640 out_err:
2641 if (prog)
2642 bpf_prog_sub(prog, priv->num_channels);
2643 if (up)
2644 dpaa2_eth_open(dev);
2645
2646 return err;
2647 }
2648
2649 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2650 {
2651 switch (xdp->command) {
2652 case XDP_SETUP_PROG:
2653 return dpaa2_eth_setup_xdp(dev, xdp->prog);
2654 default:
2655 return -EINVAL;
2656 }
2657
2658 return 0;
2659 }
2660
2661 static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2662 struct xdp_frame *xdpf,
2663 struct dpaa2_fd *fd)
2664 {
2665 struct device *dev = net_dev->dev.parent;
2666 unsigned int needed_headroom;
2667 struct dpaa2_eth_swa *swa;
2668 void *buffer_start, *aligned_start;
2669 dma_addr_t addr;
2670
2671
2672
2673
2674 needed_headroom = dpaa2_eth_needed_headroom(NULL);
2675 if (xdpf->headroom < needed_headroom)
2676 return -EINVAL;
2677
2678
2679 memset(fd, 0, sizeof(*fd));
2680
2681
2682 buffer_start = xdpf->data - needed_headroom;
2683 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2684 DPAA2_ETH_TX_BUF_ALIGN);
2685 if (aligned_start >= xdpf->data - xdpf->headroom)
2686 buffer_start = aligned_start;
2687
2688 swa = (struct dpaa2_eth_swa *)buffer_start;
2689
2690 swa->type = DPAA2_ETH_SWA_XDP;
2691 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2692 swa->xdp.xdpf = xdpf;
2693
2694 addr = dma_map_single(dev, buffer_start,
2695 swa->xdp.dma_size,
2696 DMA_BIDIRECTIONAL);
2697 if (unlikely(dma_mapping_error(dev, addr)))
2698 return -ENOMEM;
2699
2700 dpaa2_fd_set_addr(fd, addr);
2701 dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2702 dpaa2_fd_set_len(fd, xdpf->len);
2703 dpaa2_fd_set_format(fd, dpaa2_fd_single);
2704 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2705
2706 return 0;
2707 }
2708
2709 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2710 struct xdp_frame **frames, u32 flags)
2711 {
2712 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2713 struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
2714 struct rtnl_link_stats64 *percpu_stats;
2715 struct dpaa2_eth_fq *fq;
2716 struct dpaa2_fd *fds;
2717 int enqueued, i, err;
2718
2719 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2720 return -EINVAL;
2721
2722 if (!netif_running(net_dev))
2723 return -ENETDOWN;
2724
2725 fq = &priv->fq[smp_processor_id()];
2726 xdp_redirect_fds = &fq->xdp_redirect_fds;
2727 fds = xdp_redirect_fds->fds;
2728
2729 percpu_stats = this_cpu_ptr(priv->percpu_stats);
2730
2731
2732 for (i = 0; i < n; i++) {
2733 err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2734 if (err)
2735 break;
2736 }
2737 xdp_redirect_fds->num = i;
2738
2739
2740 enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2741
2742
2743 percpu_stats->tx_packets += enqueued;
2744 for (i = 0; i < enqueued; i++)
2745 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2746
2747 return enqueued;
2748 }
2749
2750 static int update_xps(struct dpaa2_eth_priv *priv)
2751 {
2752 struct net_device *net_dev = priv->net_dev;
2753 struct cpumask xps_mask;
2754 struct dpaa2_eth_fq *fq;
2755 int i, num_queues, netdev_queues;
2756 int err = 0;
2757
2758 num_queues = dpaa2_eth_queue_count(priv);
2759 netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
2760
2761
2762
2763
2764 for (i = 0; i < netdev_queues; i++) {
2765 fq = &priv->fq[i % num_queues];
2766
2767 cpumask_clear(&xps_mask);
2768 cpumask_set_cpu(fq->target_cpu, &xps_mask);
2769
2770 err = netif_set_xps_queue(net_dev, &xps_mask, i);
2771 if (err) {
2772 netdev_warn_once(net_dev, "Error setting XPS queue\n");
2773 break;
2774 }
2775 }
2776
2777 return err;
2778 }
2779
2780 static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2781 struct tc_mqprio_qopt *mqprio)
2782 {
2783 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2784 u8 num_tc, num_queues;
2785 int i;
2786
2787 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2788 num_queues = dpaa2_eth_queue_count(priv);
2789 num_tc = mqprio->num_tc;
2790
2791 if (num_tc == net_dev->num_tc)
2792 return 0;
2793
2794 if (num_tc > dpaa2_eth_tc_count(priv)) {
2795 netdev_err(net_dev, "Max %d traffic classes supported\n",
2796 dpaa2_eth_tc_count(priv));
2797 return -EOPNOTSUPP;
2798 }
2799
2800 if (!num_tc) {
2801 netdev_reset_tc(net_dev);
2802 netif_set_real_num_tx_queues(net_dev, num_queues);
2803 goto out;
2804 }
2805
2806 netdev_set_num_tc(net_dev, num_tc);
2807 netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2808
2809 for (i = 0; i < num_tc; i++)
2810 netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2811
2812 out:
2813 update_xps(priv);
2814
2815 return 0;
2816 }
2817
2818 #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2819
2820 static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2821 {
2822 struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2823 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2824 struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2825 struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2826 int err;
2827
2828 if (p->command == TC_TBF_STATS)
2829 return -EOPNOTSUPP;
2830
2831
2832 if (p->parent != TC_H_ROOT)
2833 return -EOPNOTSUPP;
2834
2835 if (p->command == TC_TBF_REPLACE) {
2836 if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2837 netdev_err(net_dev, "burst size cannot be greater than %d\n",
2838 DPAA2_ETH_MAX_BURST_SIZE);
2839 return -EINVAL;
2840 }
2841
2842 tx_cr_shaper.max_burst_size = cfg->max_size;
2843
2844
2845
2846 tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2847 }
2848
2849 err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2850 &tx_er_shaper, 0);
2851 if (err) {
2852 netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
2853 return err;
2854 }
2855
2856 return 0;
2857 }
2858
2859 static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2860 enum tc_setup_type type, void *type_data)
2861 {
2862 switch (type) {
2863 case TC_SETUP_QDISC_MQPRIO:
2864 return dpaa2_eth_setup_mqprio(net_dev, type_data);
2865 case TC_SETUP_QDISC_TBF:
2866 return dpaa2_eth_setup_tbf(net_dev, type_data);
2867 default:
2868 return -EOPNOTSUPP;
2869 }
2870 }
2871
2872 static const struct net_device_ops dpaa2_eth_ops = {
2873 .ndo_open = dpaa2_eth_open,
2874 .ndo_start_xmit = dpaa2_eth_tx,
2875 .ndo_stop = dpaa2_eth_stop,
2876 .ndo_set_mac_address = dpaa2_eth_set_addr,
2877 .ndo_get_stats64 = dpaa2_eth_get_stats,
2878 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2879 .ndo_set_features = dpaa2_eth_set_features,
2880 .ndo_eth_ioctl = dpaa2_eth_ioctl,
2881 .ndo_change_mtu = dpaa2_eth_change_mtu,
2882 .ndo_bpf = dpaa2_eth_xdp,
2883 .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
2884 .ndo_setup_tc = dpaa2_eth_setup_tc,
2885 .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
2886 .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
2887 };
2888
2889 static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2890 {
2891 struct dpaa2_eth_channel *ch;
2892
2893 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2894
2895
2896 ch->stats.cdan++;
2897
2898 napi_schedule(&ch->napi);
2899 }
2900
2901
2902 static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
2903 {
2904 struct fsl_mc_device *dpcon;
2905 struct device *dev = priv->net_dev->dev.parent;
2906 int err;
2907
2908 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2909 FSL_MC_POOL_DPCON, &dpcon);
2910 if (err) {
2911 if (err == -ENXIO)
2912 err = -EPROBE_DEFER;
2913 else
2914 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2915 return ERR_PTR(err);
2916 }
2917
2918 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2919 if (err) {
2920 dev_err(dev, "dpcon_open() failed\n");
2921 goto free;
2922 }
2923
2924 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2925 if (err) {
2926 dev_err(dev, "dpcon_reset() failed\n");
2927 goto close;
2928 }
2929
2930 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2931 if (err) {
2932 dev_err(dev, "dpcon_enable() failed\n");
2933 goto close;
2934 }
2935
2936 return dpcon;
2937
2938 close:
2939 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2940 free:
2941 fsl_mc_object_free(dpcon);
2942
2943 return ERR_PTR(err);
2944 }
2945
2946 static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
2947 struct fsl_mc_device *dpcon)
2948 {
2949 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2950 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2951 fsl_mc_object_free(dpcon);
2952 }
2953
2954 static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
2955 {
2956 struct dpaa2_eth_channel *channel;
2957 struct dpcon_attr attr;
2958 struct device *dev = priv->net_dev->dev.parent;
2959 int err;
2960
2961 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2962 if (!channel)
2963 return NULL;
2964
2965 channel->dpcon = dpaa2_eth_setup_dpcon(priv);
2966 if (IS_ERR(channel->dpcon)) {
2967 err = PTR_ERR(channel->dpcon);
2968 goto err_setup;
2969 }
2970
2971 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2972 &attr);
2973 if (err) {
2974 dev_err(dev, "dpcon_get_attributes() failed\n");
2975 goto err_get_attr;
2976 }
2977
2978 channel->dpcon_id = attr.id;
2979 channel->ch_id = attr.qbman_ch_id;
2980 channel->priv = priv;
2981
2982 return channel;
2983
2984 err_get_attr:
2985 dpaa2_eth_free_dpcon(priv, channel->dpcon);
2986 err_setup:
2987 kfree(channel);
2988 return ERR_PTR(err);
2989 }
2990
2991 static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
2992 struct dpaa2_eth_channel *channel)
2993 {
2994 dpaa2_eth_free_dpcon(priv, channel->dpcon);
2995 kfree(channel);
2996 }
2997
2998
2999
3000
3001 static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
3002 {
3003 struct dpaa2_io_notification_ctx *nctx;
3004 struct dpaa2_eth_channel *channel;
3005 struct dpcon_notification_cfg dpcon_notif_cfg;
3006 struct device *dev = priv->net_dev->dev.parent;
3007 int i, err;
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018 cpumask_clear(&priv->dpio_cpumask);
3019 for_each_online_cpu(i) {
3020
3021 channel = dpaa2_eth_alloc_channel(priv);
3022 if (IS_ERR_OR_NULL(channel)) {
3023 err = PTR_ERR_OR_ZERO(channel);
3024 if (err != -EPROBE_DEFER)
3025 dev_info(dev,
3026 "No affine channel for cpu %d and above\n", i);
3027 goto err_alloc_ch;
3028 }
3029
3030 priv->channel[priv->num_channels] = channel;
3031
3032 nctx = &channel->nctx;
3033 nctx->is_cdan = 1;
3034 nctx->cb = dpaa2_eth_cdan_cb;
3035 nctx->id = channel->ch_id;
3036 nctx->desired_cpu = i;
3037
3038
3039 channel->dpio = dpaa2_io_service_select(i);
3040 err = dpaa2_io_service_register(channel->dpio, nctx, dev);
3041 if (err) {
3042 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
3043
3044
3045
3046
3047
3048 err = -EPROBE_DEFER;
3049 goto err_service_reg;
3050 }
3051
3052
3053 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
3054 dpcon_notif_cfg.priority = 0;
3055 dpcon_notif_cfg.user_ctx = nctx->qman64;
3056 err = dpcon_set_notification(priv->mc_io, 0,
3057 channel->dpcon->mc_handle,
3058 &dpcon_notif_cfg);
3059 if (err) {
3060 dev_err(dev, "dpcon_set_notification failed()\n");
3061 goto err_set_cdan;
3062 }
3063
3064
3065
3066
3067 cpumask_set_cpu(i, &priv->dpio_cpumask);
3068 priv->num_channels++;
3069
3070
3071
3072
3073 if (priv->num_channels == priv->dpni_attrs.num_queues)
3074 break;
3075 }
3076
3077 return 0;
3078
3079 err_set_cdan:
3080 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3081 err_service_reg:
3082 dpaa2_eth_free_channel(priv, channel);
3083 err_alloc_ch:
3084 if (err == -EPROBE_DEFER) {
3085 for (i = 0; i < priv->num_channels; i++) {
3086 channel = priv->channel[i];
3087 nctx = &channel->nctx;
3088 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3089 dpaa2_eth_free_channel(priv, channel);
3090 }
3091 priv->num_channels = 0;
3092 return err;
3093 }
3094
3095 if (cpumask_empty(&priv->dpio_cpumask)) {
3096 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
3097 return -ENODEV;
3098 }
3099
3100 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
3101 cpumask_pr_args(&priv->dpio_cpumask));
3102
3103 return 0;
3104 }
3105
3106 static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
3107 {
3108 struct device *dev = priv->net_dev->dev.parent;
3109 struct dpaa2_eth_channel *ch;
3110 int i;
3111
3112
3113 for (i = 0; i < priv->num_channels; i++) {
3114 ch = priv->channel[i];
3115 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
3116 dpaa2_eth_free_channel(priv, ch);
3117 }
3118 }
3119
3120 static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
3121 int cpu)
3122 {
3123 struct device *dev = priv->net_dev->dev.parent;
3124 int i;
3125
3126 for (i = 0; i < priv->num_channels; i++)
3127 if (priv->channel[i]->nctx.desired_cpu == cpu)
3128 return priv->channel[i];
3129
3130
3131
3132
3133 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
3134
3135 return priv->channel[0];
3136 }
3137
3138 static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
3139 {
3140 struct device *dev = priv->net_dev->dev.parent;
3141 struct dpaa2_eth_fq *fq;
3142 int rx_cpu, txc_cpu;
3143 int i;
3144
3145
3146
3147
3148
3149 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
3150
3151 for (i = 0; i < priv->num_fqs; i++) {
3152 fq = &priv->fq[i];
3153 switch (fq->type) {
3154 case DPAA2_RX_FQ:
3155 case DPAA2_RX_ERR_FQ:
3156 fq->target_cpu = rx_cpu;
3157 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3158 if (rx_cpu >= nr_cpu_ids)
3159 rx_cpu = cpumask_first(&priv->dpio_cpumask);
3160 break;
3161 case DPAA2_TX_CONF_FQ:
3162 fq->target_cpu = txc_cpu;
3163 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
3164 if (txc_cpu >= nr_cpu_ids)
3165 txc_cpu = cpumask_first(&priv->dpio_cpumask);
3166 break;
3167 default:
3168 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3169 }
3170 fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
3171 }
3172
3173 update_xps(priv);
3174 }
3175
3176 static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
3177 {
3178 int i, j;
3179
3180
3181
3182
3183
3184 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3185 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
3186 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
3187 priv->fq[priv->num_fqs++].flowid = (u16)i;
3188 }
3189
3190 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3191 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3192 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3193 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3194 priv->fq[priv->num_fqs].tc = (u8)j;
3195 priv->fq[priv->num_fqs++].flowid = (u16)i;
3196 }
3197 }
3198
3199
3200 priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3201 priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3202
3203
3204 dpaa2_eth_set_fq_affinity(priv);
3205 }
3206
3207
3208 static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
3209 {
3210 int err;
3211 struct fsl_mc_device *dpbp_dev;
3212 struct device *dev = priv->net_dev->dev.parent;
3213 struct dpbp_attr dpbp_attrs;
3214
3215 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3216 &dpbp_dev);
3217 if (err) {
3218 if (err == -ENXIO)
3219 err = -EPROBE_DEFER;
3220 else
3221 dev_err(dev, "DPBP device allocation failed\n");
3222 return err;
3223 }
3224
3225 priv->dpbp_dev = dpbp_dev;
3226
3227 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
3228 &dpbp_dev->mc_handle);
3229 if (err) {
3230 dev_err(dev, "dpbp_open() failed\n");
3231 goto err_open;
3232 }
3233
3234 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
3235 if (err) {
3236 dev_err(dev, "dpbp_reset() failed\n");
3237 goto err_reset;
3238 }
3239
3240 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
3241 if (err) {
3242 dev_err(dev, "dpbp_enable() failed\n");
3243 goto err_enable;
3244 }
3245
3246 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
3247 &dpbp_attrs);
3248 if (err) {
3249 dev_err(dev, "dpbp_get_attributes() failed\n");
3250 goto err_get_attr;
3251 }
3252 priv->bpid = dpbp_attrs.bpid;
3253
3254 return 0;
3255
3256 err_get_attr:
3257 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
3258 err_enable:
3259 err_reset:
3260 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
3261 err_open:
3262 fsl_mc_object_free(dpbp_dev);
3263
3264 return err;
3265 }
3266
3267 static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
3268 {
3269 dpaa2_eth_drain_pool(priv);
3270 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3271 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
3272 fsl_mc_object_free(priv->dpbp_dev);
3273 }
3274
3275 static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
3276 {
3277 struct device *dev = priv->net_dev->dev.parent;
3278 struct dpni_buffer_layout buf_layout = {0};
3279 u16 rx_buf_align;
3280 int err;
3281
3282
3283
3284
3285
3286 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
3287 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
3288 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
3289 else
3290 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
3291
3292
3293
3294
3295 priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
3296
3297
3298 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3299 buf_layout.pass_timestamp = true;
3300 buf_layout.pass_frame_status = true;
3301 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3302 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3303 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3304 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3305 DPNI_QUEUE_TX, &buf_layout);
3306 if (err) {
3307 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3308 return err;
3309 }
3310
3311
3312 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3313 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3314 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3315 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3316 if (err) {
3317 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3318 return err;
3319 }
3320
3321
3322
3323
3324 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3325 &priv->tx_data_offset);
3326 if (err) {
3327 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3328 return err;
3329 }
3330
3331 if ((priv->tx_data_offset % 64) != 0)
3332 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3333 priv->tx_data_offset);
3334
3335
3336 buf_layout.pass_frame_status = true;
3337 buf_layout.pass_parser_result = true;
3338 buf_layout.data_align = rx_buf_align;
3339 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
3340 buf_layout.private_data_size = 0;
3341 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3342 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3343 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3344 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3345 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3346 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3347 DPNI_QUEUE_RX, &buf_layout);
3348 if (err) {
3349 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3350 return err;
3351 }
3352
3353 return 0;
3354 }
3355
3356 #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
3357 #define DPNI_ENQUEUE_FQID_VER_MINOR 9
3358
3359 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3360 struct dpaa2_eth_fq *fq,
3361 struct dpaa2_fd *fd, u8 prio,
3362 u32 num_frames __always_unused,
3363 int *frames_enqueued)
3364 {
3365 int err;
3366
3367 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3368 priv->tx_qdid, prio,
3369 fq->tx_qdbin, fd);
3370 if (!err && frames_enqueued)
3371 *frames_enqueued = 1;
3372 return err;
3373 }
3374
3375 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
3376 struct dpaa2_eth_fq *fq,
3377 struct dpaa2_fd *fd,
3378 u8 prio, u32 num_frames,
3379 int *frames_enqueued)
3380 {
3381 int err;
3382
3383 err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3384 fq->tx_fqid[prio],
3385 fd, num_frames);
3386
3387 if (err == 0)
3388 return -EBUSY;
3389
3390 if (frames_enqueued)
3391 *frames_enqueued = err;
3392 return 0;
3393 }
3394
3395 static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
3396 {
3397 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3398 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3399 priv->enqueue = dpaa2_eth_enqueue_qd;
3400 else
3401 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3402 }
3403
3404 static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
3405 {
3406 struct device *dev = priv->net_dev->dev.parent;
3407 struct dpni_link_cfg link_cfg = {0};
3408 int err;
3409
3410
3411 err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3412 if (err) {
3413 dev_err(dev, "dpni_get_link_cfg() failed\n");
3414 return err;
3415 }
3416
3417
3418 link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3419 link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3420 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3421 if (err) {
3422 dev_err(dev, "dpni_set_link_cfg() failed\n");
3423 return err;
3424 }
3425
3426 priv->link_state.options = link_cfg.options;
3427
3428 return 0;
3429 }
3430
3431 static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
3432 {
3433 struct dpni_queue_id qid = {0};
3434 struct dpaa2_eth_fq *fq;
3435 struct dpni_queue queue;
3436 int i, j, err;
3437
3438
3439
3440
3441 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3442 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3443 return;
3444
3445 for (i = 0; i < priv->num_fqs; i++) {
3446 fq = &priv->fq[i];
3447 if (fq->type != DPAA2_TX_CONF_FQ)
3448 continue;
3449 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3450 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3451 DPNI_QUEUE_TX, j, fq->flowid,
3452 &queue, &qid);
3453 if (err)
3454 goto out_err;
3455
3456 fq->tx_fqid[j] = qid.fqid;
3457 if (fq->tx_fqid[j] == 0)
3458 goto out_err;
3459 }
3460 }
3461
3462 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3463
3464 return;
3465
3466 out_err:
3467 netdev_info(priv->net_dev,
3468 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3469 priv->enqueue = dpaa2_eth_enqueue_qd;
3470 }
3471
3472
3473 static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
3474 {
3475 struct device *dev = priv->net_dev->dev.parent;
3476 struct dpkg_profile_cfg kg_cfg = {0};
3477 struct dpni_qos_tbl_cfg qos_cfg = {0};
3478 struct dpni_rule_cfg key_params;
3479 void *dma_mem, *key, *mask;
3480 u8 key_size = 2;
3481 int i, pcp, err;
3482
3483
3484
3485
3486
3487
3488 if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3489 dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3490 return -EOPNOTSUPP;
3491 }
3492
3493 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3494 if (!dma_mem)
3495 return -ENOMEM;
3496
3497 kg_cfg.num_extracts = 1;
3498 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3499 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3500 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3501 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3502
3503 err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3504 if (err) {
3505 dev_err(dev, "dpni_prepare_key_cfg failed\n");
3506 goto out_free_tbl;
3507 }
3508
3509
3510 qos_cfg.default_tc = 0;
3511 qos_cfg.discard_on_miss = 0;
3512 qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3513 DPAA2_CLASSIFIER_DMA_SIZE,
3514 DMA_TO_DEVICE);
3515 if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3516 dev_err(dev, "QoS table DMA mapping failed\n");
3517 err = -ENOMEM;
3518 goto out_free_tbl;
3519 }
3520
3521 err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3522 if (err) {
3523 dev_err(dev, "dpni_set_qos_table failed\n");
3524 goto out_unmap_tbl;
3525 }
3526
3527
3528 key = kzalloc(key_size * 2, GFP_KERNEL);
3529 if (!key) {
3530 err = -ENOMEM;
3531 goto out_unmap_tbl;
3532 }
3533 mask = key + key_size;
3534 *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3535
3536 key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3537 DMA_TO_DEVICE);
3538 if (dma_mapping_error(dev, key_params.key_iova)) {
3539 dev_err(dev, "Qos table entry DMA mapping failed\n");
3540 err = -ENOMEM;
3541 goto out_free_key;
3542 }
3543
3544 key_params.mask_iova = key_params.key_iova + key_size;
3545 key_params.key_size = key_size;
3546
3547
3548
3549
3550
3551
3552 for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3553 *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3554 dma_sync_single_for_device(dev, key_params.key_iova,
3555 key_size * 2, DMA_TO_DEVICE);
3556
3557 err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3558 &key_params, i, i);
3559 if (err) {
3560 dev_err(dev, "dpni_add_qos_entry failed\n");
3561 dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3562 goto out_unmap_key;
3563 }
3564 }
3565
3566 priv->vlan_cls_enabled = true;
3567
3568
3569
3570
3571 out_unmap_key:
3572 dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3573 out_free_key:
3574 kfree(key);
3575 out_unmap_tbl:
3576 dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3577 DMA_TO_DEVICE);
3578 out_free_tbl:
3579 kfree(dma_mem);
3580
3581 return err;
3582 }
3583
3584
3585 static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
3586 {
3587 struct device *dev = &ls_dev->dev;
3588 struct dpaa2_eth_priv *priv;
3589 struct net_device *net_dev;
3590 int err;
3591
3592 net_dev = dev_get_drvdata(dev);
3593 priv = netdev_priv(net_dev);
3594
3595
3596 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3597 if (err) {
3598 dev_err(dev, "dpni_open() failed\n");
3599 return err;
3600 }
3601
3602
3603 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3604 &priv->dpni_ver_minor);
3605 if (err) {
3606 dev_err(dev, "dpni_get_api_version() failed\n");
3607 goto close;
3608 }
3609 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3610 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3611 priv->dpni_ver_major, priv->dpni_ver_minor,
3612 DPNI_VER_MAJOR, DPNI_VER_MINOR);
3613 err = -ENOTSUPP;
3614 goto close;
3615 }
3616
3617 ls_dev->mc_io = priv->mc_io;
3618 ls_dev->mc_handle = priv->mc_token;
3619
3620 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3621 if (err) {
3622 dev_err(dev, "dpni_reset() failed\n");
3623 goto close;
3624 }
3625
3626 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3627 &priv->dpni_attrs);
3628 if (err) {
3629 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3630 goto close;
3631 }
3632
3633 err = dpaa2_eth_set_buffer_layout(priv);
3634 if (err)
3635 goto close;
3636
3637 dpaa2_eth_set_enqueue_mode(priv);
3638
3639
3640 if (dpaa2_eth_has_pause_support(priv)) {
3641 err = dpaa2_eth_set_pause(priv);
3642 if (err)
3643 goto close;
3644 }
3645
3646 err = dpaa2_eth_set_vlan_qos(priv);
3647 if (err && err != -EOPNOTSUPP)
3648 goto close;
3649
3650 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3651 sizeof(struct dpaa2_eth_cls_rule),
3652 GFP_KERNEL);
3653 if (!priv->cls_rules) {
3654 err = -ENOMEM;
3655 goto close;
3656 }
3657
3658 return 0;
3659
3660 close:
3661 dpni_close(priv->mc_io, 0, priv->mc_token);
3662
3663 return err;
3664 }
3665
3666 static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
3667 {
3668 int err;
3669
3670 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3671 if (err)
3672 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3673 err);
3674
3675 dpni_close(priv->mc_io, 0, priv->mc_token);
3676 }
3677
3678 static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3679 struct dpaa2_eth_fq *fq)
3680 {
3681 struct device *dev = priv->net_dev->dev.parent;
3682 struct dpni_queue queue;
3683 struct dpni_queue_id qid;
3684 int err;
3685
3686 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3687 DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
3688 if (err) {
3689 dev_err(dev, "dpni_get_queue(RX) failed\n");
3690 return err;
3691 }
3692
3693 fq->fqid = qid.fqid;
3694
3695 queue.destination.id = fq->channel->dpcon_id;
3696 queue.destination.type = DPNI_DEST_DPCON;
3697 queue.destination.priority = 1;
3698 queue.user_context = (u64)(uintptr_t)fq;
3699 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3700 DPNI_QUEUE_RX, fq->tc, fq->flowid,
3701 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3702 &queue);
3703 if (err) {
3704 dev_err(dev, "dpni_set_queue(RX) failed\n");
3705 return err;
3706 }
3707
3708
3709
3710 if (fq->tc > 0)
3711 return 0;
3712
3713 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
3714 fq->flowid, 0);
3715 if (err) {
3716 dev_err(dev, "xdp_rxq_info_reg failed\n");
3717 return err;
3718 }
3719
3720 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3721 MEM_TYPE_PAGE_ORDER0, NULL);
3722 if (err) {
3723 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3724 return err;
3725 }
3726
3727 return 0;
3728 }
3729
3730 static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3731 struct dpaa2_eth_fq *fq)
3732 {
3733 struct device *dev = priv->net_dev->dev.parent;
3734 struct dpni_queue queue;
3735 struct dpni_queue_id qid;
3736 int i, err;
3737
3738 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3739 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3740 DPNI_QUEUE_TX, i, fq->flowid,
3741 &queue, &qid);
3742 if (err) {
3743 dev_err(dev, "dpni_get_queue(TX) failed\n");
3744 return err;
3745 }
3746 fq->tx_fqid[i] = qid.fqid;
3747 }
3748
3749
3750 fq->tx_qdbin = qid.qdbin;
3751
3752 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3753 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3754 &queue, &qid);
3755 if (err) {
3756 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3757 return err;
3758 }
3759
3760 fq->fqid = qid.fqid;
3761
3762 queue.destination.id = fq->channel->dpcon_id;
3763 queue.destination.type = DPNI_DEST_DPCON;
3764 queue.destination.priority = 0;
3765 queue.user_context = (u64)(uintptr_t)fq;
3766 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3767 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3768 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3769 &queue);
3770 if (err) {
3771 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3772 return err;
3773 }
3774
3775 return 0;
3776 }
3777
3778 static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3779 struct dpaa2_eth_fq *fq)
3780 {
3781 struct device *dev = priv->net_dev->dev.parent;
3782 struct dpni_queue q = { { 0 } };
3783 struct dpni_queue_id qid;
3784 u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3785 int err;
3786
3787 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3788 DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3789 if (err) {
3790 dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3791 return err;
3792 }
3793
3794 fq->fqid = qid.fqid;
3795
3796 q.destination.id = fq->channel->dpcon_id;
3797 q.destination.type = DPNI_DEST_DPCON;
3798 q.destination.priority = 1;
3799 q.user_context = (u64)(uintptr_t)fq;
3800 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3801 DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3802 if (err) {
3803 dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3804 return err;
3805 }
3806
3807 return 0;
3808 }
3809
3810
3811 static const struct dpaa2_eth_dist_fields dist_fields[] = {
3812 {
3813
3814 .rxnfc_field = RXH_L2DA,
3815 .cls_prot = NET_PROT_ETH,
3816 .cls_field = NH_FLD_ETH_DA,
3817 .id = DPAA2_ETH_DIST_ETHDST,
3818 .size = 6,
3819 }, {
3820 .cls_prot = NET_PROT_ETH,
3821 .cls_field = NH_FLD_ETH_SA,
3822 .id = DPAA2_ETH_DIST_ETHSRC,
3823 .size = 6,
3824 }, {
3825
3826
3827
3828
3829 .cls_prot = NET_PROT_ETH,
3830 .cls_field = NH_FLD_ETH_TYPE,
3831 .id = DPAA2_ETH_DIST_ETHTYPE,
3832 .size = 2,
3833 }, {
3834
3835 .rxnfc_field = RXH_VLAN,
3836 .cls_prot = NET_PROT_VLAN,
3837 .cls_field = NH_FLD_VLAN_TCI,
3838 .id = DPAA2_ETH_DIST_VLAN,
3839 .size = 2,
3840 }, {
3841
3842 .rxnfc_field = RXH_IP_SRC,
3843 .cls_prot = NET_PROT_IP,
3844 .cls_field = NH_FLD_IP_SRC,
3845 .id = DPAA2_ETH_DIST_IPSRC,
3846 .size = 4,
3847 }, {
3848 .rxnfc_field = RXH_IP_DST,
3849 .cls_prot = NET_PROT_IP,
3850 .cls_field = NH_FLD_IP_DST,
3851 .id = DPAA2_ETH_DIST_IPDST,
3852 .size = 4,
3853 }, {
3854 .rxnfc_field = RXH_L3_PROTO,
3855 .cls_prot = NET_PROT_IP,
3856 .cls_field = NH_FLD_IP_PROTO,
3857 .id = DPAA2_ETH_DIST_IPPROTO,
3858 .size = 1,
3859 }, {
3860
3861
3862
3863 .rxnfc_field = RXH_L4_B_0_1,
3864 .cls_prot = NET_PROT_UDP,
3865 .cls_field = NH_FLD_UDP_PORT_SRC,
3866 .id = DPAA2_ETH_DIST_L4SRC,
3867 .size = 2,
3868 }, {
3869 .rxnfc_field = RXH_L4_B_2_3,
3870 .cls_prot = NET_PROT_UDP,
3871 .cls_field = NH_FLD_UDP_PORT_DST,
3872 .id = DPAA2_ETH_DIST_L4DST,
3873 .size = 2,
3874 },
3875 };
3876
3877
3878 static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3879 {
3880 struct device *dev = priv->net_dev->dev.parent;
3881 struct dpni_rx_tc_dist_cfg dist_cfg;
3882 int i, err = 0;
3883
3884 memset(&dist_cfg, 0, sizeof(dist_cfg));
3885
3886 dist_cfg.key_cfg_iova = key;
3887 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3888 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3889
3890 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3891 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
3892 i, &dist_cfg);
3893 if (err) {
3894 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
3895 break;
3896 }
3897 }
3898
3899 return err;
3900 }
3901
3902
3903 static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3904 {
3905 struct device *dev = priv->net_dev->dev.parent;
3906 struct dpni_rx_dist_cfg dist_cfg;
3907 int i, err = 0;
3908
3909 memset(&dist_cfg, 0, sizeof(dist_cfg));
3910
3911 dist_cfg.key_cfg_iova = key;
3912 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3913 dist_cfg.enable = 1;
3914
3915 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3916 dist_cfg.tc = i;
3917 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
3918 &dist_cfg);
3919 if (err) {
3920 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
3921 break;
3922 }
3923
3924
3925
3926
3927 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3928 break;
3929 }
3930
3931 return err;
3932 }
3933
3934
3935 static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3936 {
3937 struct device *dev = priv->net_dev->dev.parent;
3938 struct dpni_rx_dist_cfg dist_cfg;
3939 int i, err = 0;
3940
3941 memset(&dist_cfg, 0, sizeof(dist_cfg));
3942
3943 dist_cfg.key_cfg_iova = key;
3944 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3945 dist_cfg.enable = 1;
3946
3947 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3948 dist_cfg.tc = i;
3949 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
3950 &dist_cfg);
3951 if (err) {
3952 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
3953 break;
3954 }
3955
3956
3957
3958
3959 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3960 break;
3961 }
3962
3963 return err;
3964 }
3965
3966
3967 int dpaa2_eth_cls_key_size(u64 fields)
3968 {
3969 int i, size = 0;
3970
3971 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3972 if (!(fields & dist_fields[i].id))
3973 continue;
3974 size += dist_fields[i].size;
3975 }
3976
3977 return size;
3978 }
3979
3980
3981 int dpaa2_eth_cls_fld_off(int prot, int field)
3982 {
3983 int i, off = 0;
3984
3985 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3986 if (dist_fields[i].cls_prot == prot &&
3987 dist_fields[i].cls_field == field)
3988 return off;
3989 off += dist_fields[i].size;
3990 }
3991
3992 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
3993 return 0;
3994 }
3995
3996
3997
3998
3999 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
4000 {
4001 int off = 0, new_off = 0;
4002 int i, size;
4003
4004 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4005 size = dist_fields[i].size;
4006 if (dist_fields[i].id & fields) {
4007 memcpy(key_mem + new_off, key_mem + off, size);
4008 new_off += size;
4009 }
4010 off += size;
4011 }
4012 }
4013
4014
4015
4016
4017 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
4018 enum dpaa2_eth_rx_dist type, u64 flags)
4019 {
4020 struct device *dev = net_dev->dev.parent;
4021 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4022 struct dpkg_profile_cfg cls_cfg;
4023 u32 rx_hash_fields = 0;
4024 dma_addr_t key_iova;
4025 u8 *dma_mem;
4026 int i;
4027 int err = 0;
4028
4029 memset(&cls_cfg, 0, sizeof(cls_cfg));
4030
4031 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4032 struct dpkg_extract *key =
4033 &cls_cfg.extracts[cls_cfg.num_extracts];
4034
4035
4036
4037
4038 if (!(flags & dist_fields[i].id))
4039 continue;
4040 if (type == DPAA2_ETH_RX_DIST_HASH)
4041 rx_hash_fields |= dist_fields[i].rxnfc_field;
4042
4043 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
4044 dev_err(dev, "error adding key extraction rule, too many rules?\n");
4045 return -E2BIG;
4046 }
4047
4048 key->type = DPKG_EXTRACT_FROM_HDR;
4049 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
4050 key->extract.from_hdr.type = DPKG_FULL_FIELD;
4051 key->extract.from_hdr.field = dist_fields[i].cls_field;
4052 cls_cfg.num_extracts++;
4053 }
4054
4055 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4056 if (!dma_mem)
4057 return -ENOMEM;
4058
4059 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
4060 if (err) {
4061 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
4062 goto free_key;
4063 }
4064
4065
4066 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
4067 DMA_TO_DEVICE);
4068 if (dma_mapping_error(dev, key_iova)) {
4069 dev_err(dev, "DMA mapping failed\n");
4070 err = -ENOMEM;
4071 goto free_key;
4072 }
4073
4074 if (type == DPAA2_ETH_RX_DIST_HASH) {
4075 if (dpaa2_eth_has_legacy_dist(priv))
4076 err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
4077 else
4078 err = dpaa2_eth_config_hash_key(priv, key_iova);
4079 } else {
4080 err = dpaa2_eth_config_cls_key(priv, key_iova);
4081 }
4082
4083 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
4084 DMA_TO_DEVICE);
4085 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
4086 priv->rx_hash_fields = rx_hash_fields;
4087
4088 free_key:
4089 kfree(dma_mem);
4090 return err;
4091 }
4092
4093 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
4094 {
4095 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4096 u64 key = 0;
4097 int i;
4098
4099 if (!dpaa2_eth_hash_enabled(priv))
4100 return -EOPNOTSUPP;
4101
4102 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
4103 if (dist_fields[i].rxnfc_field & flags)
4104 key |= dist_fields[i].id;
4105
4106 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
4107 }
4108
4109 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
4110 {
4111 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
4112 }
4113
4114 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
4115 {
4116 struct device *dev = priv->net_dev->dev.parent;
4117 int err;
4118
4119
4120 if (dpaa2_eth_has_legacy_dist(priv)) {
4121 dev_dbg(dev, "Rx cls not supported by current MC version\n");
4122 return -EOPNOTSUPP;
4123 }
4124
4125 if (!dpaa2_eth_fs_enabled(priv)) {
4126 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
4127 return -EOPNOTSUPP;
4128 }
4129
4130 if (!dpaa2_eth_hash_enabled(priv)) {
4131 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
4132 return -EOPNOTSUPP;
4133 }
4134
4135
4136
4137
4138
4139 if (!dpaa2_eth_fs_mask_enabled(priv))
4140 goto out;
4141
4142 err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
4143 if (err)
4144 return err;
4145
4146 out:
4147 priv->rx_cls_enabled = 1;
4148
4149 return 0;
4150 }
4151
4152
4153
4154
4155 static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
4156 {
4157 struct net_device *net_dev = priv->net_dev;
4158 struct device *dev = net_dev->dev.parent;
4159 struct dpni_pools_cfg pools_params;
4160 struct dpni_error_cfg err_cfg;
4161 int err = 0;
4162 int i;
4163
4164 pools_params.num_dpbp = 1;
4165 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
4166 pools_params.pools[0].backup_pool = 0;
4167 pools_params.pools[0].buffer_size = priv->rx_buf_size;
4168 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
4169 if (err) {
4170 dev_err(dev, "dpni_set_pools() failed\n");
4171 return err;
4172 }
4173
4174
4175
4176
4177 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
4178 if (err && err != -EOPNOTSUPP)
4179 dev_err(dev, "Failed to configure hashing\n");
4180
4181
4182
4183
4184 err = dpaa2_eth_set_default_cls(priv);
4185 if (err && err != -EOPNOTSUPP)
4186 dev_err(dev, "Failed to configure Rx classification key\n");
4187
4188
4189 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
4190 err_cfg.set_frame_annotation = 1;
4191 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
4192 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
4193 &err_cfg);
4194 if (err) {
4195 dev_err(dev, "dpni_set_errors_behavior failed\n");
4196 return err;
4197 }
4198
4199
4200 for (i = 0; i < priv->num_fqs; i++) {
4201 switch (priv->fq[i].type) {
4202 case DPAA2_RX_FQ:
4203 err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
4204 break;
4205 case DPAA2_TX_CONF_FQ:
4206 err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
4207 break;
4208 case DPAA2_RX_ERR_FQ:
4209 err = setup_rx_err_flow(priv, &priv->fq[i]);
4210 break;
4211 default:
4212 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
4213 return -EINVAL;
4214 }
4215 if (err)
4216 return err;
4217 }
4218
4219 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
4220 DPNI_QUEUE_TX, &priv->tx_qdid);
4221 if (err) {
4222 dev_err(dev, "dpni_get_qdid() failed\n");
4223 return err;
4224 }
4225
4226 return 0;
4227 }
4228
4229
4230 static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
4231 {
4232 struct net_device *net_dev = priv->net_dev;
4233 struct device *dev = net_dev->dev.parent;
4234 int i;
4235
4236 for (i = 0; i < priv->num_channels; i++) {
4237 priv->channel[i]->store =
4238 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
4239 if (!priv->channel[i]->store) {
4240 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
4241 goto err_ring;
4242 }
4243 }
4244
4245 return 0;
4246
4247 err_ring:
4248 for (i = 0; i < priv->num_channels; i++) {
4249 if (!priv->channel[i]->store)
4250 break;
4251 dpaa2_io_store_destroy(priv->channel[i]->store);
4252 }
4253
4254 return -ENOMEM;
4255 }
4256
4257 static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
4258 {
4259 int i;
4260
4261 for (i = 0; i < priv->num_channels; i++)
4262 dpaa2_io_store_destroy(priv->channel[i]->store);
4263 }
4264
4265 static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
4266 {
4267 struct net_device *net_dev = priv->net_dev;
4268 struct device *dev = net_dev->dev.parent;
4269 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
4270 int err;
4271
4272
4273 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
4274 if (err) {
4275 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
4276 return err;
4277 }
4278
4279
4280 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4281 dpni_mac_addr);
4282 if (err) {
4283 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
4284 return err;
4285 }
4286
4287
4288 if (!is_zero_ether_addr(mac_addr)) {
4289
4290 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
4291 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
4292 priv->mc_token,
4293 mac_addr);
4294 if (err) {
4295 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4296 return err;
4297 }
4298 }
4299 eth_hw_addr_set(net_dev, mac_addr);
4300 } else if (is_zero_ether_addr(dpni_mac_addr)) {
4301
4302
4303
4304 eth_hw_addr_random(net_dev);
4305 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
4306
4307 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4308 net_dev->dev_addr);
4309 if (err) {
4310 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4311 return err;
4312 }
4313
4314
4315
4316
4317
4318
4319 net_dev->addr_assign_type = NET_ADDR_PERM;
4320 } else {
4321
4322
4323
4324 eth_hw_addr_set(net_dev, dpni_mac_addr);
4325 }
4326
4327 return 0;
4328 }
4329
4330 static int dpaa2_eth_netdev_init(struct net_device *net_dev)
4331 {
4332 struct device *dev = net_dev->dev.parent;
4333 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4334 u32 options = priv->dpni_attrs.options;
4335 u64 supported = 0, not_supported = 0;
4336 u8 bcast_addr[ETH_ALEN];
4337 u8 num_queues;
4338 int err;
4339
4340 net_dev->netdev_ops = &dpaa2_eth_ops;
4341 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4342
4343 err = dpaa2_eth_set_mac_addr(priv);
4344 if (err)
4345 return err;
4346
4347
4348 eth_broadcast_addr(bcast_addr);
4349 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
4350 if (err) {
4351 dev_err(dev, "dpni_add_mac_addr() failed\n");
4352 return err;
4353 }
4354
4355
4356 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
4357 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
4358 DPAA2_ETH_MFL);
4359 if (err) {
4360 dev_err(dev, "dpni_set_max_frame_length() failed\n");
4361 return err;
4362 }
4363
4364
4365 num_queues = dpaa2_eth_queue_count(priv);
4366 err = netif_set_real_num_tx_queues(net_dev, num_queues);
4367 if (err) {
4368 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4369 return err;
4370 }
4371 err = netif_set_real_num_rx_queues(net_dev, num_queues);
4372 if (err) {
4373 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4374 return err;
4375 }
4376
4377 dpaa2_eth_detect_features(priv);
4378
4379
4380 supported |= IFF_LIVE_ADDR_CHANGE;
4381
4382 if (options & DPNI_OPT_NO_MAC_FILTER)
4383 not_supported |= IFF_UNICAST_FLT;
4384 else
4385 supported |= IFF_UNICAST_FLT;
4386
4387 net_dev->priv_flags |= supported;
4388 net_dev->priv_flags &= ~not_supported;
4389
4390
4391 net_dev->features = NETIF_F_RXCSUM |
4392 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4393 NETIF_F_SG | NETIF_F_HIGHDMA |
4394 NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
4395 net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
4396 net_dev->hw_features = net_dev->features;
4397
4398 if (priv->dpni_attrs.vlan_filter_entries)
4399 net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4400
4401 return 0;
4402 }
4403
4404 static int dpaa2_eth_poll_link_state(void *arg)
4405 {
4406 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4407 int err;
4408
4409 while (!kthread_should_stop()) {
4410 err = dpaa2_eth_link_state_update(priv);
4411 if (unlikely(err))
4412 return err;
4413
4414 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4415 }
4416
4417 return 0;
4418 }
4419
4420 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4421 {
4422 struct fsl_mc_device *dpni_dev, *dpmac_dev;
4423 struct dpaa2_mac *mac;
4424 int err;
4425
4426 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4427 dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
4428
4429 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
4430 return PTR_ERR(dpmac_dev);
4431
4432 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
4433 return 0;
4434
4435 mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4436 if (!mac)
4437 return -ENOMEM;
4438
4439 mac->mc_dev = dpmac_dev;
4440 mac->mc_io = priv->mc_io;
4441 mac->net_dev = priv->net_dev;
4442
4443 err = dpaa2_mac_open(mac);
4444 if (err)
4445 goto err_free_mac;
4446 priv->mac = mac;
4447
4448 if (dpaa2_eth_is_type_phy(priv)) {
4449 err = dpaa2_mac_connect(mac);
4450 if (err && err != -EPROBE_DEFER)
4451 netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
4452 ERR_PTR(err));
4453 if (err)
4454 goto err_close_mac;
4455 }
4456
4457 return 0;
4458
4459 err_close_mac:
4460 dpaa2_mac_close(mac);
4461 priv->mac = NULL;
4462 err_free_mac:
4463 kfree(mac);
4464 return err;
4465 }
4466
4467 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4468 {
4469 if (dpaa2_eth_is_type_phy(priv))
4470 dpaa2_mac_disconnect(priv->mac);
4471
4472 if (!dpaa2_eth_has_mac(priv))
4473 return;
4474
4475 dpaa2_mac_close(priv->mac);
4476 kfree(priv->mac);
4477 priv->mac = NULL;
4478 }
4479
4480 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4481 {
4482 u32 status = ~0;
4483 struct device *dev = (struct device *)arg;
4484 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4485 struct net_device *net_dev = dev_get_drvdata(dev);
4486 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4487 int err;
4488
4489 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4490 DPNI_IRQ_INDEX, &status);
4491 if (unlikely(err)) {
4492 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4493 return IRQ_HANDLED;
4494 }
4495
4496 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4497 dpaa2_eth_link_state_update(netdev_priv(net_dev));
4498
4499 if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
4500 dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4501 dpaa2_eth_update_tx_fqids(priv);
4502
4503 rtnl_lock();
4504 if (dpaa2_eth_has_mac(priv))
4505 dpaa2_eth_disconnect_mac(priv);
4506 else
4507 dpaa2_eth_connect_mac(priv);
4508 rtnl_unlock();
4509 }
4510
4511 return IRQ_HANDLED;
4512 }
4513
4514 static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
4515 {
4516 int err = 0;
4517 struct fsl_mc_device_irq *irq;
4518
4519 err = fsl_mc_allocate_irqs(ls_dev);
4520 if (err) {
4521 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4522 return err;
4523 }
4524
4525 irq = ls_dev->irqs[0];
4526 err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
4527 NULL, dpni_irq0_handler_thread,
4528 IRQF_NO_SUSPEND | IRQF_ONESHOT,
4529 dev_name(&ls_dev->dev), &ls_dev->dev);
4530 if (err < 0) {
4531 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
4532 goto free_mc_irq;
4533 }
4534
4535 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
4536 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4537 DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
4538 if (err < 0) {
4539 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
4540 goto free_irq;
4541 }
4542
4543 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4544 DPNI_IRQ_INDEX, 1);
4545 if (err < 0) {
4546 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
4547 goto free_irq;
4548 }
4549
4550 return 0;
4551
4552 free_irq:
4553 devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
4554 free_mc_irq:
4555 fsl_mc_free_irqs(ls_dev);
4556
4557 return err;
4558 }
4559
4560 static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
4561 {
4562 int i;
4563 struct dpaa2_eth_channel *ch;
4564
4565 for (i = 0; i < priv->num_channels; i++) {
4566 ch = priv->channel[i];
4567
4568 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
4569 NAPI_POLL_WEIGHT);
4570 }
4571 }
4572
4573 static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
4574 {
4575 int i;
4576 struct dpaa2_eth_channel *ch;
4577
4578 for (i = 0; i < priv->num_channels; i++) {
4579 ch = priv->channel[i];
4580 netif_napi_del(&ch->napi);
4581 }
4582 }
4583
4584 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4585 {
4586 struct device *dev;
4587 struct net_device *net_dev = NULL;
4588 struct dpaa2_eth_priv *priv = NULL;
4589 int err = 0;
4590
4591 dev = &dpni_dev->dev;
4592
4593
4594 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
4595 if (!net_dev) {
4596 dev_err(dev, "alloc_etherdev_mq() failed\n");
4597 return -ENOMEM;
4598 }
4599
4600 SET_NETDEV_DEV(net_dev, dev);
4601 dev_set_drvdata(dev, net_dev);
4602
4603 priv = netdev_priv(net_dev);
4604 priv->net_dev = net_dev;
4605
4606 priv->iommu_domain = iommu_get_domain_for_dev(dev);
4607
4608 priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4609 priv->rx_tstamp = false;
4610
4611 priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4612 if (!priv->dpaa2_ptp_wq) {
4613 err = -ENOMEM;
4614 goto err_wq_alloc;
4615 }
4616
4617 INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4618 mutex_init(&priv->onestep_tstamp_lock);
4619 skb_queue_head_init(&priv->tx_skbs);
4620
4621 priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
4622
4623
4624 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4625 &priv->mc_io);
4626 if (err) {
4627 if (err == -ENXIO)
4628 err = -EPROBE_DEFER;
4629 else
4630 dev_err(dev, "MC portal allocation failed\n");
4631 goto err_portal_alloc;
4632 }
4633
4634
4635 err = dpaa2_eth_setup_dpni(dpni_dev);
4636 if (err)
4637 goto err_dpni_setup;
4638
4639 err = dpaa2_eth_setup_dpio(priv);
4640 if (err)
4641 goto err_dpio_setup;
4642
4643 dpaa2_eth_setup_fqs(priv);
4644
4645 err = dpaa2_eth_setup_dpbp(priv);
4646 if (err)
4647 goto err_dpbp_setup;
4648
4649 err = dpaa2_eth_bind_dpni(priv);
4650 if (err)
4651 goto err_bind;
4652
4653
4654 dpaa2_eth_add_ch_napi(priv);
4655
4656
4657 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4658 if (!priv->percpu_stats) {
4659 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4660 err = -ENOMEM;
4661 goto err_alloc_percpu_stats;
4662 }
4663 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4664 if (!priv->percpu_extras) {
4665 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4666 err = -ENOMEM;
4667 goto err_alloc_percpu_extras;
4668 }
4669
4670 priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4671 if (!priv->sgt_cache) {
4672 dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4673 err = -ENOMEM;
4674 goto err_alloc_sgt_cache;
4675 }
4676
4677 priv->fd = alloc_percpu(*priv->fd);
4678 if (!priv->fd) {
4679 dev_err(dev, "alloc_percpu(fds) failed\n");
4680 err = -ENOMEM;
4681 goto err_alloc_fds;
4682 }
4683
4684 err = dpaa2_eth_netdev_init(net_dev);
4685 if (err)
4686 goto err_netdev_init;
4687
4688
4689 err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4690 if (err)
4691 goto err_csum;
4692
4693 err = dpaa2_eth_set_tx_csum(priv,
4694 !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4695 if (err)
4696 goto err_csum;
4697
4698 err = dpaa2_eth_alloc_rings(priv);
4699 if (err)
4700 goto err_alloc_rings;
4701
4702 #ifdef CONFIG_FSL_DPAA2_ETH_DCB
4703 if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4704 priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4705 net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4706 } else {
4707 dev_dbg(dev, "PFC not supported\n");
4708 }
4709 #endif
4710
4711 err = dpaa2_eth_setup_irqs(dpni_dev);
4712 if (err) {
4713 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4714 priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
4715 "%s_poll_link", net_dev->name);
4716 if (IS_ERR(priv->poll_thread)) {
4717 dev_err(dev, "Error starting polling thread\n");
4718 goto err_poll_thread;
4719 }
4720 priv->do_link_poll = true;
4721 }
4722
4723 err = dpaa2_eth_connect_mac(priv);
4724 if (err)
4725 goto err_connect_mac;
4726
4727 err = dpaa2_eth_dl_alloc(priv);
4728 if (err)
4729 goto err_dl_register;
4730
4731 err = dpaa2_eth_dl_traps_register(priv);
4732 if (err)
4733 goto err_dl_trap_register;
4734
4735 err = dpaa2_eth_dl_port_add(priv);
4736 if (err)
4737 goto err_dl_port_add;
4738
4739 err = register_netdev(net_dev);
4740 if (err < 0) {
4741 dev_err(dev, "register_netdev() failed\n");
4742 goto err_netdev_reg;
4743 }
4744
4745 #ifdef CONFIG_DEBUG_FS
4746 dpaa2_dbg_add(priv);
4747 #endif
4748
4749 dpaa2_eth_dl_register(priv);
4750 dev_info(dev, "Probed interface %s\n", net_dev->name);
4751 return 0;
4752
4753 err_netdev_reg:
4754 dpaa2_eth_dl_port_del(priv);
4755 err_dl_port_add:
4756 dpaa2_eth_dl_traps_unregister(priv);
4757 err_dl_trap_register:
4758 dpaa2_eth_dl_free(priv);
4759 err_dl_register:
4760 dpaa2_eth_disconnect_mac(priv);
4761 err_connect_mac:
4762 if (priv->do_link_poll)
4763 kthread_stop(priv->poll_thread);
4764 else
4765 fsl_mc_free_irqs(dpni_dev);
4766 err_poll_thread:
4767 dpaa2_eth_free_rings(priv);
4768 err_alloc_rings:
4769 err_csum:
4770 err_netdev_init:
4771 free_percpu(priv->fd);
4772 err_alloc_fds:
4773 free_percpu(priv->sgt_cache);
4774 err_alloc_sgt_cache:
4775 free_percpu(priv->percpu_extras);
4776 err_alloc_percpu_extras:
4777 free_percpu(priv->percpu_stats);
4778 err_alloc_percpu_stats:
4779 dpaa2_eth_del_ch_napi(priv);
4780 err_bind:
4781 dpaa2_eth_free_dpbp(priv);
4782 err_dpbp_setup:
4783 dpaa2_eth_free_dpio(priv);
4784 err_dpio_setup:
4785 dpaa2_eth_free_dpni(priv);
4786 err_dpni_setup:
4787 fsl_mc_portal_free(priv->mc_io);
4788 err_portal_alloc:
4789 destroy_workqueue(priv->dpaa2_ptp_wq);
4790 err_wq_alloc:
4791 dev_set_drvdata(dev, NULL);
4792 free_netdev(net_dev);
4793
4794 return err;
4795 }
4796
4797 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4798 {
4799 struct device *dev;
4800 struct net_device *net_dev;
4801 struct dpaa2_eth_priv *priv;
4802
4803 dev = &ls_dev->dev;
4804 net_dev = dev_get_drvdata(dev);
4805 priv = netdev_priv(net_dev);
4806
4807 dpaa2_eth_dl_unregister(priv);
4808
4809 #ifdef CONFIG_DEBUG_FS
4810 dpaa2_dbg_remove(priv);
4811 #endif
4812
4813 unregister_netdev(net_dev);
4814 rtnl_lock();
4815 dpaa2_eth_disconnect_mac(priv);
4816 rtnl_unlock();
4817
4818 dpaa2_eth_dl_port_del(priv);
4819 dpaa2_eth_dl_traps_unregister(priv);
4820 dpaa2_eth_dl_free(priv);
4821
4822 if (priv->do_link_poll)
4823 kthread_stop(priv->poll_thread);
4824 else
4825 fsl_mc_free_irqs(ls_dev);
4826
4827 dpaa2_eth_free_rings(priv);
4828 free_percpu(priv->fd);
4829 free_percpu(priv->sgt_cache);
4830 free_percpu(priv->percpu_stats);
4831 free_percpu(priv->percpu_extras);
4832
4833 dpaa2_eth_del_ch_napi(priv);
4834 dpaa2_eth_free_dpbp(priv);
4835 dpaa2_eth_free_dpio(priv);
4836 dpaa2_eth_free_dpni(priv);
4837 if (priv->onestep_reg_base)
4838 iounmap(priv->onestep_reg_base);
4839
4840 fsl_mc_portal_free(priv->mc_io);
4841
4842 destroy_workqueue(priv->dpaa2_ptp_wq);
4843
4844 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
4845
4846 free_netdev(net_dev);
4847
4848 return 0;
4849 }
4850
4851 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4852 {
4853 .vendor = FSL_MC_VENDOR_FREESCALE,
4854 .obj_type = "dpni",
4855 },
4856 { .vendor = 0x0 }
4857 };
4858 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4859
4860 static struct fsl_mc_driver dpaa2_eth_driver = {
4861 .driver = {
4862 .name = KBUILD_MODNAME,
4863 .owner = THIS_MODULE,
4864 },
4865 .probe = dpaa2_eth_probe,
4866 .remove = dpaa2_eth_remove,
4867 .match_id_table = dpaa2_eth_match_id_table
4868 };
4869
4870 static int __init dpaa2_eth_driver_init(void)
4871 {
4872 int err;
4873
4874 dpaa2_eth_dbg_init();
4875 err = fsl_mc_driver_register(&dpaa2_eth_driver);
4876 if (err) {
4877 dpaa2_eth_dbg_exit();
4878 return err;
4879 }
4880
4881 return 0;
4882 }
4883
4884 static void __exit dpaa2_eth_driver_exit(void)
4885 {
4886 dpaa2_eth_dbg_exit();
4887 fsl_mc_driver_unregister(&dpaa2_eth_driver);
4888 }
4889
4890 module_init(dpaa2_eth_driver_init);
4891 module_exit(dpaa2_eth_driver_exit);