0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/etherdevice.h>
0009 #include <net/ip.h>
0010 #include <net/tso.h>
0011 #include <linux/bpf.h>
0012 #include <linux/bpf_trace.h>
0013
0014 #include "otx2_reg.h"
0015 #include "otx2_common.h"
0016 #include "otx2_struct.h"
0017 #include "otx2_txrx.h"
0018 #include "otx2_ptp.h"
0019 #include "cn10k.h"
0020
0021 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
0022 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
0023 struct bpf_prog *prog,
0024 struct nix_cqe_rx_s *cqe,
0025 struct otx2_cq_queue *cq);
0026
0027 static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
0028 struct otx2_cq_queue *cq)
0029 {
0030 u64 incr = (u64)(cq->cq_idx) << 32;
0031 u64 status;
0032
0033 status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
0034
0035 if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
0036 status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
0037 dev_err(pfvf->dev, "CQ stopped due to error");
0038 return -EINVAL;
0039 }
0040
0041 cq->cq_tail = status & 0xFFFFF;
0042 cq->cq_head = (status >> 20) & 0xFFFFF;
0043 if (cq->cq_tail < cq->cq_head)
0044 cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
0045 cq->cq_tail;
0046 else
0047 cq->pend_cqe = cq->cq_tail - cq->cq_head;
0048
0049 return 0;
0050 }
0051
0052 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
0053 {
0054 struct nix_cqe_hdr_s *cqe_hdr;
0055
0056 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
0057 if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
0058 return NULL;
0059
0060 cq->cq_head++;
0061 cq->cq_head &= (cq->cqe_cnt - 1);
0062
0063 return cqe_hdr;
0064 }
0065
0066 static unsigned int frag_num(unsigned int i)
0067 {
0068 #ifdef __BIG_ENDIAN
0069 return (i & ~3) + 3 - (i & 3);
0070 #else
0071 return i;
0072 #endif
0073 }
0074
0075 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
0076 struct sk_buff *skb, int seg, int *len)
0077 {
0078 const skb_frag_t *frag;
0079 struct page *page;
0080 int offset;
0081
0082
0083 if (!seg) {
0084 page = virt_to_page(skb->data);
0085 offset = offset_in_page(skb->data);
0086 *len = skb_headlen(skb);
0087 } else {
0088 frag = &skb_shinfo(skb)->frags[seg - 1];
0089 page = skb_frag_page(frag);
0090 offset = skb_frag_off(frag);
0091 *len = skb_frag_size(frag);
0092 }
0093 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
0094 }
0095
0096 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
0097 {
0098 int seg;
0099
0100 for (seg = 0; seg < sg->num_segs; seg++) {
0101 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
0102 sg->size[seg], DMA_TO_DEVICE);
0103 }
0104 sg->num_segs = 0;
0105 }
0106
0107 static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
0108 struct otx2_snd_queue *sq,
0109 struct nix_cqe_tx_s *cqe)
0110 {
0111 struct nix_send_comp_s *snd_comp = &cqe->comp;
0112 struct sg_list *sg;
0113 struct page *page;
0114 u64 pa;
0115
0116 sg = &sq->sg[snd_comp->sqe_id];
0117
0118 pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
0119 otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
0120 sg->size[0], DMA_TO_DEVICE);
0121 page = virt_to_page(phys_to_virt(pa));
0122 put_page(page);
0123 }
0124
0125 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
0126 struct otx2_cq_queue *cq,
0127 struct otx2_snd_queue *sq,
0128 struct nix_cqe_tx_s *cqe,
0129 int budget, int *tx_pkts, int *tx_bytes)
0130 {
0131 struct nix_send_comp_s *snd_comp = &cqe->comp;
0132 struct skb_shared_hwtstamps ts;
0133 struct sk_buff *skb = NULL;
0134 u64 timestamp, tsns;
0135 struct sg_list *sg;
0136 int err;
0137
0138 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
0139 net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
0140 pfvf->netdev->name, cq->cint_idx,
0141 snd_comp->status);
0142
0143 sg = &sq->sg[snd_comp->sqe_id];
0144 skb = (struct sk_buff *)sg->skb;
0145 if (unlikely(!skb))
0146 return;
0147
0148 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
0149 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
0150 if (timestamp != 1) {
0151 timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
0152 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
0153 if (!err) {
0154 memset(&ts, 0, sizeof(ts));
0155 ts.hwtstamp = ns_to_ktime(tsns);
0156 skb_tstamp_tx(skb, &ts);
0157 }
0158 }
0159 }
0160
0161 *tx_bytes += skb->len;
0162 (*tx_pkts)++;
0163 otx2_dma_unmap_skb_frags(pfvf, sg);
0164 napi_consume_skb(skb, budget);
0165 sg->skb = (u64)NULL;
0166 }
0167
0168 static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
0169 struct sk_buff *skb, void *data)
0170 {
0171 u64 timestamp, tsns;
0172 int err;
0173
0174 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
0175 return;
0176
0177 timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
0178
0179 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
0180 if (err)
0181 return;
0182
0183 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
0184 }
0185
0186 static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
0187 u64 iova, int len, struct nix_rx_parse_s *parse,
0188 int qidx)
0189 {
0190 struct page *page;
0191 int off = 0;
0192 void *va;
0193
0194 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
0195
0196 if (likely(!skb_shinfo(skb)->nr_frags)) {
0197
0198
0199
0200
0201
0202 if (parse->laptr) {
0203 otx2_set_rxtstamp(pfvf, skb, va);
0204 off = OTX2_HW_TIMESTAMP_LEN;
0205 }
0206 }
0207
0208 page = virt_to_page(va);
0209 if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
0210 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
0211 va - page_address(page) + off,
0212 len - off, pfvf->rbsize);
0213
0214 otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
0215 pfvf->rbsize, DMA_FROM_DEVICE);
0216 return true;
0217 }
0218
0219
0220
0221
0222 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
0223
0224 return false;
0225 }
0226
0227 static void otx2_set_rxhash(struct otx2_nic *pfvf,
0228 struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
0229 {
0230 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
0231 struct otx2_rss_info *rss;
0232 u32 hash = 0;
0233
0234 if (!(pfvf->netdev->features & NETIF_F_RXHASH))
0235 return;
0236
0237 rss = &pfvf->hw.rss_info;
0238 if (rss->flowkey_cfg) {
0239 if (rss->flowkey_cfg &
0240 ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
0241 hash_type = PKT_HASH_TYPE_L4;
0242 else
0243 hash_type = PKT_HASH_TYPE_L3;
0244 hash = cqe->hdr.flow_tag;
0245 }
0246 skb_set_hash(skb, hash, hash_type);
0247 }
0248
0249 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
0250 int qidx)
0251 {
0252 struct nix_rx_sg_s *sg = &cqe->sg;
0253 void *end, *start;
0254 u64 *seg_addr;
0255 int seg;
0256
0257 start = (void *)sg;
0258 end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
0259 while (start < end) {
0260 sg = (struct nix_rx_sg_s *)start;
0261 seg_addr = &sg->seg_addr;
0262 for (seg = 0; seg < sg->segs; seg++, seg_addr++)
0263 pfvf->hw_ops->aura_freeptr(pfvf, qidx,
0264 *seg_addr & ~0x07ULL);
0265 start += sizeof(*sg);
0266 }
0267 }
0268
0269 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
0270 struct nix_cqe_rx_s *cqe, int qidx)
0271 {
0272 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
0273 struct nix_rx_parse_s *parse = &cqe->parse;
0274
0275 if (netif_msg_rx_err(pfvf))
0276 netdev_err(pfvf->netdev,
0277 "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
0278 qidx, parse->errlev, parse->errcode);
0279
0280 if (parse->errlev == NPC_ERRLVL_RE) {
0281 switch (parse->errcode) {
0282 case ERRCODE_FCS:
0283 case ERRCODE_FCS_RCV:
0284 atomic_inc(&stats->rx_fcs_errs);
0285 break;
0286 case ERRCODE_UNDERSIZE:
0287 atomic_inc(&stats->rx_undersize_errs);
0288 break;
0289 case ERRCODE_OVERSIZE:
0290 atomic_inc(&stats->rx_oversize_errs);
0291 break;
0292 case ERRCODE_OL2_LEN_MISMATCH:
0293 atomic_inc(&stats->rx_len_errs);
0294 break;
0295 default:
0296 atomic_inc(&stats->rx_other_errs);
0297 break;
0298 }
0299 } else if (parse->errlev == NPC_ERRLVL_NIX) {
0300 switch (parse->errcode) {
0301 case ERRCODE_OL3_LEN:
0302 case ERRCODE_OL4_LEN:
0303 case ERRCODE_IL3_LEN:
0304 case ERRCODE_IL4_LEN:
0305 atomic_inc(&stats->rx_len_errs);
0306 break;
0307 case ERRCODE_OL4_CSUM:
0308 case ERRCODE_IL4_CSUM:
0309 atomic_inc(&stats->rx_csum_errs);
0310 break;
0311 default:
0312 atomic_inc(&stats->rx_other_errs);
0313 break;
0314 }
0315 } else {
0316 atomic_inc(&stats->rx_other_errs);
0317
0318
0319
0320 return false;
0321 }
0322
0323
0324 if (pfvf->netdev->features & NETIF_F_RXALL)
0325 return false;
0326
0327
0328 if (cqe->sg.segs)
0329 otx2_free_rcv_seg(pfvf, cqe, qidx);
0330 return true;
0331 }
0332
0333 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
0334 struct napi_struct *napi,
0335 struct otx2_cq_queue *cq,
0336 struct nix_cqe_rx_s *cqe)
0337 {
0338 struct nix_rx_parse_s *parse = &cqe->parse;
0339 struct nix_rx_sg_s *sg = &cqe->sg;
0340 struct sk_buff *skb = NULL;
0341 void *end, *start;
0342 u64 *seg_addr;
0343 u16 *seg_size;
0344 int seg;
0345
0346 if (unlikely(parse->errlev || parse->errcode)) {
0347 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
0348 return;
0349 }
0350
0351 if (pfvf->xdp_prog)
0352 if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
0353 return;
0354
0355 skb = napi_get_frags(napi);
0356 if (unlikely(!skb))
0357 return;
0358
0359 start = (void *)sg;
0360 end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
0361 while (start < end) {
0362 sg = (struct nix_rx_sg_s *)start;
0363 seg_addr = &sg->seg_addr;
0364 seg_size = (void *)sg;
0365 for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
0366 if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
0367 seg_size[seg], parse, cq->cq_idx))
0368 cq->pool_ptrs++;
0369 }
0370 start += sizeof(*sg);
0371 }
0372 otx2_set_rxhash(pfvf, cqe, skb);
0373
0374 skb_record_rx_queue(skb, cq->cq_idx);
0375 if (pfvf->netdev->features & NETIF_F_RXCSUM)
0376 skb->ip_summed = CHECKSUM_UNNECESSARY;
0377
0378 napi_gro_frags(napi);
0379 }
0380
0381 static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
0382 struct napi_struct *napi,
0383 struct otx2_cq_queue *cq, int budget)
0384 {
0385 struct nix_cqe_rx_s *cqe;
0386 int processed_cqe = 0;
0387
0388 if (cq->pend_cqe >= budget)
0389 goto process_cqe;
0390
0391 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
0392 return 0;
0393
0394 process_cqe:
0395 while (likely(processed_cqe < budget) && cq->pend_cqe) {
0396 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
0397 if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
0398 !cqe->sg.seg_addr) {
0399 if (!processed_cqe)
0400 return 0;
0401 break;
0402 }
0403 cq->cq_head++;
0404 cq->cq_head &= (cq->cqe_cnt - 1);
0405
0406 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
0407
0408 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
0409 cqe->sg.seg_addr = 0x00;
0410 processed_cqe++;
0411 cq->pend_cqe--;
0412 }
0413
0414
0415 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
0416 ((u64)cq->cq_idx << 32) | processed_cqe);
0417
0418 return processed_cqe;
0419 }
0420
0421 void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
0422 {
0423 struct otx2_nic *pfvf = dev;
0424 dma_addr_t bufptr;
0425
0426 while (cq->pool_ptrs) {
0427 if (otx2_alloc_buffer(pfvf, cq, &bufptr))
0428 break;
0429 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
0430 cq->pool_ptrs--;
0431 }
0432 }
0433
0434 static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
0435 struct otx2_cq_queue *cq, int budget)
0436 {
0437 int tx_pkts = 0, tx_bytes = 0, qidx;
0438 struct nix_cqe_tx_s *cqe;
0439 int processed_cqe = 0;
0440
0441 if (cq->pend_cqe >= budget)
0442 goto process_cqe;
0443
0444 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
0445 return 0;
0446
0447 process_cqe:
0448 while (likely(processed_cqe < budget) && cq->pend_cqe) {
0449 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
0450 if (unlikely(!cqe)) {
0451 if (!processed_cqe)
0452 return 0;
0453 break;
0454 }
0455 if (cq->cq_type == CQ_XDP) {
0456 qidx = cq->cq_idx - pfvf->hw.rx_queues;
0457 otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
0458 cqe);
0459 } else {
0460 otx2_snd_pkt_handler(pfvf, cq,
0461 &pfvf->qset.sq[cq->cint_idx],
0462 cqe, budget, &tx_pkts, &tx_bytes);
0463 }
0464 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
0465 processed_cqe++;
0466 cq->pend_cqe--;
0467 }
0468
0469
0470 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
0471 ((u64)cq->cq_idx << 32) | processed_cqe);
0472
0473 if (likely(tx_pkts)) {
0474 struct netdev_queue *txq;
0475
0476 txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
0477 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
0478
0479 smp_mb();
0480 if (netif_tx_queue_stopped(txq) &&
0481 netif_carrier_ok(pfvf->netdev))
0482 netif_tx_wake_queue(txq);
0483 }
0484 return 0;
0485 }
0486
0487 static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
0488 {
0489 struct dim_sample dim_sample;
0490 u64 rx_frames, rx_bytes;
0491
0492 rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
0493 OTX2_GET_RX_STATS(RX_UCAST);
0494 rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
0495 dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
0496 net_dim(&cq_poll->dim, dim_sample);
0497 }
0498
0499 int otx2_napi_handler(struct napi_struct *napi, int budget)
0500 {
0501 struct otx2_cq_queue *rx_cq = NULL;
0502 struct otx2_cq_poll *cq_poll;
0503 int workdone = 0, cq_idx, i;
0504 struct otx2_cq_queue *cq;
0505 struct otx2_qset *qset;
0506 struct otx2_nic *pfvf;
0507
0508 cq_poll = container_of(napi, struct otx2_cq_poll, napi);
0509 pfvf = (struct otx2_nic *)cq_poll->dev;
0510 qset = &pfvf->qset;
0511
0512 for (i = 0; i < CQS_PER_CINT; i++) {
0513 cq_idx = cq_poll->cq_ids[i];
0514 if (unlikely(cq_idx == CINT_INVALID_CQ))
0515 continue;
0516 cq = &qset->cq[cq_idx];
0517 if (cq->cq_type == CQ_RX) {
0518 rx_cq = cq;
0519 workdone += otx2_rx_napi_handler(pfvf, napi,
0520 cq, budget);
0521 } else {
0522 workdone += otx2_tx_napi_handler(pfvf, cq, budget);
0523 }
0524 }
0525
0526 if (rx_cq && rx_cq->pool_ptrs)
0527 pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
0528
0529 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
0530
0531 if (workdone < budget && napi_complete_done(napi, workdone)) {
0532
0533 if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
0534 return workdone;
0535
0536
0537 if (workdone != 0 &&
0538 ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
0539 OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
0540
0541 otx2_adjust_adaptive_coalese(pfvf, cq_poll);
0542
0543 for (i = 0; i < pfvf->hw.cint_cnt; i++)
0544 otx2_config_irq_coalescing(pfvf, i);
0545 }
0546
0547
0548 otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
0549 BIT_ULL(0));
0550 }
0551 return workdone;
0552 }
0553
0554 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
0555 int size, int qidx)
0556 {
0557 u64 status;
0558
0559
0560 dma_wmb();
0561
0562 do {
0563 memcpy(sq->lmt_addr, sq->sqe_base, size);
0564 status = otx2_lmt_flush(sq->io_addr);
0565 } while (status == 0);
0566
0567 sq->head++;
0568 sq->head &= (sq->sqe_cnt - 1);
0569 }
0570
0571 #define MAX_SEGS_PER_SG 3
0572
0573 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
0574 struct sk_buff *skb, int num_segs, int *offset)
0575 {
0576 struct nix_sqe_sg_s *sg = NULL;
0577 u64 dma_addr, *iova = NULL;
0578 u16 *sg_lens = NULL;
0579 int seg, len;
0580
0581 sq->sg[sq->head].num_segs = 0;
0582
0583 for (seg = 0; seg < num_segs; seg++) {
0584 if ((seg % MAX_SEGS_PER_SG) == 0) {
0585 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
0586 sg->ld_type = NIX_SEND_LDTYPE_LDD;
0587 sg->subdc = NIX_SUBDC_SG;
0588 sg->segs = 0;
0589 sg_lens = (void *)sg;
0590 iova = (void *)sg + sizeof(*sg);
0591
0592
0593
0594 if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
0595 *offset += sizeof(*sg) + (3 * sizeof(u64));
0596 else
0597 *offset += sizeof(*sg) + sizeof(u64);
0598 }
0599 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
0600 if (dma_mapping_error(pfvf->dev, dma_addr))
0601 return false;
0602
0603 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
0604 sg->segs++;
0605 *iova++ = dma_addr;
0606
0607
0608 sq->sg[sq->head].dma_addr[seg] = dma_addr;
0609 sq->sg[sq->head].size[seg] = len;
0610 sq->sg[sq->head].num_segs++;
0611 }
0612
0613 sq->sg[sq->head].skb = (u64)skb;
0614 return true;
0615 }
0616
0617
0618 static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
0619 struct sk_buff *skb, int *offset)
0620 {
0621 struct nix_sqe_ext_s *ext;
0622
0623 ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
0624 ext->subdc = NIX_SUBDC_EXT;
0625 if (skb_shinfo(skb)->gso_size) {
0626 ext->lso = 1;
0627 ext->lso_sb = skb_tcp_all_headers(skb);
0628 ext->lso_mps = skb_shinfo(skb)->gso_size;
0629
0630
0631 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
0632 ext->lso_format = pfvf->hw.lso_tsov4_idx;
0633
0634
0635
0636
0637
0638 ip_hdr(skb)->tot_len =
0639 htons(ext->lso_sb - skb_network_offset(skb));
0640 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
0641 ext->lso_format = pfvf->hw.lso_tsov6_idx;
0642
0643 ipv6_hdr(skb)->payload_len =
0644 htons(ext->lso_sb - skb_network_offset(skb));
0645 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
0646 __be16 l3_proto = vlan_get_protocol(skb);
0647 struct udphdr *udph = udp_hdr(skb);
0648 u16 iplen;
0649
0650 ext->lso_sb = skb_transport_offset(skb) +
0651 sizeof(struct udphdr);
0652
0653
0654
0655
0656
0657 iplen = htons(ext->lso_sb - skb_network_offset(skb));
0658 if (l3_proto == htons(ETH_P_IP)) {
0659 ip_hdr(skb)->tot_len = iplen;
0660 ext->lso_format = pfvf->hw.lso_udpv4_idx;
0661 } else {
0662 ipv6_hdr(skb)->payload_len = iplen;
0663 ext->lso_format = pfvf->hw.lso_udpv6_idx;
0664 }
0665
0666 udph->len = htons(sizeof(struct udphdr));
0667 }
0668 } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
0669 ext->tstmp = 1;
0670 }
0671
0672 #define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
0673 if (skb_vlan_tag_present(skb)) {
0674 if (skb->vlan_proto == htons(ETH_P_8021Q)) {
0675 ext->vlan1_ins_ena = 1;
0676 ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
0677 ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
0678 } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
0679 ext->vlan0_ins_ena = 1;
0680 ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
0681 ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
0682 }
0683 }
0684
0685 *offset += sizeof(*ext);
0686 }
0687
0688 static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
0689 int alg, u64 iova)
0690 {
0691 struct nix_sqe_mem_s *mem;
0692
0693 mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
0694 mem->subdc = NIX_SUBDC_MEM;
0695 mem->alg = alg;
0696 mem->wmem = 1;
0697 mem->addr = iova;
0698
0699 *offset += sizeof(*mem);
0700 }
0701
0702
0703 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
0704 struct nix_sqe_hdr_s *sqe_hdr,
0705 struct sk_buff *skb, u16 qidx)
0706 {
0707 int proto = 0;
0708
0709
0710
0711
0712 if (!sqe_hdr->total) {
0713
0714 sqe_hdr->df = 1;
0715 sqe_hdr->aura = sq->aura_id;
0716
0717 sqe_hdr->pnc = 1;
0718 sqe_hdr->sq = qidx;
0719 }
0720 sqe_hdr->total = skb->len;
0721
0722 sqe_hdr->sqe_id = sq->head;
0723
0724
0725 if (skb->ip_summed == CHECKSUM_PARTIAL) {
0726 sqe_hdr->ol3ptr = skb_network_offset(skb);
0727 sqe_hdr->ol4ptr = skb_transport_offset(skb);
0728
0729 if (eth_type_vlan(skb->protocol))
0730 skb->protocol = vlan_get_protocol(skb);
0731
0732 if (skb->protocol == htons(ETH_P_IP)) {
0733 proto = ip_hdr(skb)->protocol;
0734
0735
0736
0737 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
0738 } else if (skb->protocol == htons(ETH_P_IPV6)) {
0739 proto = ipv6_hdr(skb)->nexthdr;
0740 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6;
0741 }
0742
0743 if (proto == IPPROTO_TCP)
0744 sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
0745 else if (proto == IPPROTO_UDP)
0746 sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
0747 }
0748 }
0749
0750 static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
0751 struct otx2_snd_queue *sq,
0752 struct sk_buff *skb, int sqe, int hdr_len)
0753 {
0754 int num_segs = skb_shinfo(skb)->nr_frags + 1;
0755 struct sg_list *sg = &sq->sg[sqe];
0756 u64 dma_addr;
0757 int seg, len;
0758
0759 sg->num_segs = 0;
0760
0761
0762 len = skb_headlen(skb) - hdr_len;
0763
0764 for (seg = 0; seg < num_segs; seg++) {
0765
0766 if (!seg && !len)
0767 continue;
0768 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
0769 if (dma_mapping_error(pfvf->dev, dma_addr))
0770 goto unmap;
0771
0772
0773 sg->dma_addr[sg->num_segs] = dma_addr;
0774 sg->size[sg->num_segs] = len;
0775 sg->num_segs++;
0776 }
0777 return 0;
0778 unmap:
0779 otx2_dma_unmap_skb_frags(pfvf, sg);
0780 return -EINVAL;
0781 }
0782
0783 static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
0784 struct sk_buff *skb, int seg,
0785 u64 seg_addr, int hdr_len, int sqe)
0786 {
0787 struct sg_list *sg = &sq->sg[sqe];
0788 const skb_frag_t *frag;
0789 int offset;
0790
0791 if (seg < 0)
0792 return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
0793
0794 frag = &skb_shinfo(skb)->frags[seg];
0795 offset = seg_addr - (u64)skb_frag_address(frag);
0796 if (skb_headlen(skb) - hdr_len)
0797 seg++;
0798 return sg->dma_addr[seg] + offset;
0799 }
0800
0801 static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
0802 struct sg_list *list, int *offset)
0803 {
0804 struct nix_sqe_sg_s *sg = NULL;
0805 u16 *sg_lens = NULL;
0806 u64 *iova = NULL;
0807 int seg;
0808
0809
0810 for (seg = 0; seg < list->num_segs; seg++) {
0811 if ((seg % MAX_SEGS_PER_SG) == 0) {
0812 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
0813 sg->ld_type = NIX_SEND_LDTYPE_LDD;
0814 sg->subdc = NIX_SUBDC_SG;
0815 sg->segs = 0;
0816 sg_lens = (void *)sg;
0817 iova = (void *)sg + sizeof(*sg);
0818
0819
0820
0821 if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
0822 *offset += sizeof(*sg) + (3 * sizeof(u64));
0823 else
0824 *offset += sizeof(*sg) + sizeof(u64);
0825 }
0826 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
0827 *iova++ = list->dma_addr[seg];
0828 sg->segs++;
0829 }
0830 }
0831
0832 static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
0833 struct sk_buff *skb, u16 qidx)
0834 {
0835 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
0836 int hdr_len, tcp_data, seg_len, pkt_len, offset;
0837 struct nix_sqe_hdr_s *sqe_hdr;
0838 int first_sqe = sq->head;
0839 struct sg_list list;
0840 struct tso_t tso;
0841
0842 hdr_len = tso_start(skb, &tso);
0843
0844
0845
0846
0847 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
0848 dev_kfree_skb_any(skb);
0849 return;
0850 }
0851
0852 netdev_tx_sent_queue(txq, skb->len);
0853
0854 tcp_data = skb->len - hdr_len;
0855 while (tcp_data > 0) {
0856 char *hdr;
0857
0858 seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
0859 tcp_data -= seg_len;
0860
0861
0862 memset(sq->sqe_base, 0, sq->sqe_size);
0863 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
0864 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
0865 offset = sizeof(*sqe_hdr);
0866
0867
0868 hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
0869 tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
0870 list.dma_addr[0] =
0871 sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
0872 list.size[0] = hdr_len;
0873 list.num_segs = 1;
0874
0875
0876 pkt_len = hdr_len;
0877 while (seg_len > 0) {
0878 int size;
0879
0880 size = min_t(int, tso.size, seg_len);
0881
0882 list.size[list.num_segs] = size;
0883 list.dma_addr[list.num_segs] =
0884 otx2_tso_frag_dma_addr(sq, skb,
0885 tso.next_frag_idx - 1,
0886 (u64)tso.data, hdr_len,
0887 first_sqe);
0888 list.num_segs++;
0889 pkt_len += size;
0890 seg_len -= size;
0891 tso_build_data(skb, &tso, size);
0892 }
0893 sqe_hdr->total = pkt_len;
0894 otx2_sqe_tso_add_sg(sq, &list, &offset);
0895
0896
0897
0898
0899
0900
0901
0902 if (!tcp_data) {
0903 sqe_hdr->pnc = 1;
0904 sqe_hdr->sqe_id = first_sqe;
0905 sq->sg[first_sqe].skb = (u64)skb;
0906 } else {
0907 sqe_hdr->pnc = 0;
0908 }
0909
0910 sqe_hdr->sizem1 = (offset / 16) - 1;
0911
0912
0913 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
0914 }
0915 }
0916
0917 static bool is_hw_tso_supported(struct otx2_nic *pfvf,
0918 struct sk_buff *skb)
0919 {
0920 int payload_len, last_seg_size;
0921
0922 if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
0923 return true;
0924
0925
0926 if (!is_96xx_B0(pfvf->pdev))
0927 return false;
0928
0929
0930
0931
0932
0933
0934 payload_len = skb->len - skb_tcp_all_headers(skb);
0935 last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
0936 if (last_seg_size && last_seg_size < 16)
0937 return false;
0938
0939 return true;
0940 }
0941
0942 static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
0943 {
0944 if (!skb_shinfo(skb)->gso_size)
0945 return 1;
0946
0947
0948 if (is_hw_tso_supported(pfvf, skb))
0949 return 1;
0950
0951
0952 return skb_shinfo(skb)->gso_segs;
0953 }
0954
0955 static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
0956 struct otx2_snd_queue *sq, int *offset)
0957 {
0958 u64 iova;
0959
0960 if (!skb_shinfo(skb)->gso_size &&
0961 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
0962 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
0963 iova = sq->timestamps->iova + (sq->head * sizeof(u64));
0964 otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
0965 } else {
0966 skb_tx_timestamp(skb);
0967 }
0968 }
0969
0970 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
0971 struct sk_buff *skb, u16 qidx)
0972 {
0973 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
0974 struct otx2_nic *pfvf = netdev_priv(netdev);
0975 int offset, num_segs, free_sqe;
0976 struct nix_sqe_hdr_s *sqe_hdr;
0977
0978
0979
0980
0981
0982 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
0983
0984 if (free_sqe < sq->sqe_thresh ||
0985 free_sqe < otx2_get_sqe_count(pfvf, skb))
0986 return false;
0987
0988 num_segs = skb_shinfo(skb)->nr_frags + 1;
0989
0990
0991
0992
0993 if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
0994 if (__skb_linearize(skb)) {
0995 dev_kfree_skb_any(skb);
0996 return true;
0997 }
0998 num_segs = skb_shinfo(skb)->nr_frags + 1;
0999 }
1000
1001 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
1002
1003 if (skb_vlan_tag_present(skb))
1004 skb = __vlan_hwaccel_push_inside(skb);
1005 otx2_sq_append_tso(pfvf, sq, skb, qidx);
1006 return true;
1007 }
1008
1009
1010
1011
1012 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
1013 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
1014 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
1015 offset = sizeof(*sqe_hdr);
1016
1017
1018 otx2_sqe_add_ext(pfvf, sq, skb, &offset);
1019
1020
1021 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
1022 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
1023 return false;
1024 }
1025
1026 otx2_set_txtstamp(pfvf, skb, sq, &offset);
1027
1028 sqe_hdr->sizem1 = (offset / 16) - 1;
1029
1030 netdev_tx_sent_queue(txq, skb->len);
1031
1032
1033 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1034
1035 return true;
1036 }
1037 EXPORT_SYMBOL(otx2_sq_append_skb);
1038
1039 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
1040 {
1041 struct nix_cqe_rx_s *cqe;
1042 int processed_cqe = 0;
1043 u64 iova, pa;
1044
1045 if (pfvf->xdp_prog)
1046 xdp_rxq_info_unreg(&cq->xdp_rxq);
1047
1048 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1049 return;
1050
1051 while (cq->pend_cqe) {
1052 cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
1053 processed_cqe++;
1054 cq->pend_cqe--;
1055
1056 if (!cqe)
1057 continue;
1058 if (cqe->sg.segs > 1) {
1059 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
1060 continue;
1061 }
1062 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1063 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1064 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
1065 put_page(virt_to_page(phys_to_virt(pa)));
1066 }
1067
1068
1069 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1070 ((u64)cq->cq_idx << 32) | processed_cqe);
1071 }
1072
1073 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
1074 {
1075 struct sk_buff *skb = NULL;
1076 struct otx2_snd_queue *sq;
1077 struct nix_cqe_tx_s *cqe;
1078 int processed_cqe = 0;
1079 struct sg_list *sg;
1080
1081 sq = &pfvf->qset.sq[cq->cint_idx];
1082
1083 if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
1084 return;
1085
1086 while (cq->pend_cqe) {
1087 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
1088 processed_cqe++;
1089 cq->pend_cqe--;
1090
1091 if (!cqe)
1092 continue;
1093 sg = &sq->sg[cqe->comp.sqe_id];
1094 skb = (struct sk_buff *)sg->skb;
1095 if (skb) {
1096 otx2_dma_unmap_skb_frags(pfvf, sg);
1097 dev_kfree_skb_any(skb);
1098 sg->skb = (u64)NULL;
1099 }
1100 }
1101
1102
1103 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
1104 ((u64)cq->cq_idx << 32) | processed_cqe);
1105 }
1106
1107 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
1108 {
1109 struct msg_req *msg;
1110 int err;
1111
1112 mutex_lock(&pfvf->mbox.lock);
1113 if (enable)
1114 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
1115 else
1116 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
1117
1118 if (!msg) {
1119 mutex_unlock(&pfvf->mbox.lock);
1120 return -ENOMEM;
1121 }
1122
1123 err = otx2_sync_mbox_msg(&pfvf->mbox);
1124 mutex_unlock(&pfvf->mbox.lock);
1125 return err;
1126 }
1127
1128 static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
1129 int len, int *offset)
1130 {
1131 struct nix_sqe_sg_s *sg = NULL;
1132 u64 *iova = NULL;
1133
1134 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
1135 sg->ld_type = NIX_SEND_LDTYPE_LDD;
1136 sg->subdc = NIX_SUBDC_SG;
1137 sg->segs = 1;
1138 sg->seg1_size = len;
1139 iova = (void *)sg + sizeof(*sg);
1140 *iova = dma_addr;
1141 *offset += sizeof(*sg) + sizeof(u64);
1142
1143 sq->sg[sq->head].dma_addr[0] = dma_addr;
1144 sq->sg[sq->head].size[0] = len;
1145 sq->sg[sq->head].num_segs = 1;
1146 }
1147
1148 bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
1149 {
1150 struct nix_sqe_hdr_s *sqe_hdr;
1151 struct otx2_snd_queue *sq;
1152 int offset, free_sqe;
1153
1154 sq = &pfvf->qset.sq[qidx];
1155 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
1156 if (free_sqe < sq->sqe_thresh)
1157 return false;
1158
1159 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
1160
1161 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
1162
1163 if (!sqe_hdr->total) {
1164 sqe_hdr->aura = sq->aura_id;
1165 sqe_hdr->df = 1;
1166 sqe_hdr->sq = qidx;
1167 sqe_hdr->pnc = 1;
1168 }
1169 sqe_hdr->total = len;
1170 sqe_hdr->sqe_id = sq->head;
1171
1172 offset = sizeof(*sqe_hdr);
1173
1174 otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
1175 sqe_hdr->sizem1 = (offset / 16) - 1;
1176 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
1177
1178 return true;
1179 }
1180
1181 static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
1182 struct bpf_prog *prog,
1183 struct nix_cqe_rx_s *cqe,
1184 struct otx2_cq_queue *cq)
1185 {
1186 unsigned char *hard_start, *data;
1187 int qidx = cq->cq_idx;
1188 struct xdp_buff xdp;
1189 struct page *page;
1190 u64 iova, pa;
1191 u32 act;
1192 int err;
1193
1194 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
1195 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1196 page = virt_to_page(phys_to_virt(pa));
1197
1198 xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
1199
1200 data = (unsigned char *)phys_to_virt(pa);
1201 hard_start = page_address(page);
1202 xdp_prepare_buff(&xdp, hard_start, data - hard_start,
1203 cqe->sg.seg_size, false);
1204
1205 act = bpf_prog_run_xdp(prog, &xdp);
1206
1207 switch (act) {
1208 case XDP_PASS:
1209 break;
1210 case XDP_TX:
1211 qidx += pfvf->hw.tx_queues;
1212 cq->pool_ptrs++;
1213 return otx2_xdp_sq_append_pkt(pfvf, iova,
1214 cqe->sg.seg_size, qidx);
1215 case XDP_REDIRECT:
1216 cq->pool_ptrs++;
1217 err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
1218
1219 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
1220 DMA_FROM_DEVICE);
1221 if (!err)
1222 return true;
1223 put_page(page);
1224 break;
1225 default:
1226 bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
1227 break;
1228 case XDP_ABORTED:
1229 trace_xdp_exception(pfvf->netdev, prog, act);
1230 break;
1231 case XDP_DROP:
1232 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
1233 DMA_FROM_DEVICE);
1234 put_page(page);
1235 cq->pool_ptrs++;
1236 return true;
1237 }
1238 return false;
1239 }