0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/bpf_trace.h>
0034 #include <net/xdp_sock_drv.h>
0035 #include "en/xdp.h"
0036 #include "en/params.h"
0037
0038 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
0039 {
0040 int hr = mlx5e_get_linear_rq_headroom(params, xsk);
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
0056 }
0057
0058 static inline bool
0059 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
0060 struct page *page, struct xdp_buff *xdp)
0061 {
0062 struct skb_shared_info *sinfo = NULL;
0063 struct mlx5e_xmit_data xdptxd;
0064 struct mlx5e_xdp_info xdpi;
0065 struct xdp_frame *xdpf;
0066 dma_addr_t dma_addr;
0067 int i;
0068
0069 xdpf = xdp_convert_buff_to_frame(xdp);
0070 if (unlikely(!xdpf))
0071 return false;
0072
0073 xdptxd.data = xdpf->data;
0074 xdptxd.len = xdpf->len;
0075
0076 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
0088
0089 xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
0090
0091 dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len,
0092 DMA_TO_DEVICE);
0093 if (dma_mapping_error(sq->pdev, dma_addr)) {
0094 xdp_return_frame(xdpf);
0095 return false;
0096 }
0097
0098 xdptxd.dma_addr = dma_addr;
0099 xdpi.frame.xdpf = xdpf;
0100 xdpi.frame.dma_addr = dma_addr;
0101
0102 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
0103 mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0)))
0104 return false;
0105
0106 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
0107 return true;
0108 }
0109
0110
0111
0112
0113
0114
0115
0116 xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;
0117 xdpi.page.rq = rq;
0118
0119 dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
0120 dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
0121
0122 if (unlikely(xdp_frame_has_frags(xdpf))) {
0123 sinfo = xdp_get_shared_info_from_frame(xdpf);
0124
0125 for (i = 0; i < sinfo->nr_frags; i++) {
0126 skb_frag_t *frag = &sinfo->frags[i];
0127 dma_addr_t addr;
0128 u32 len;
0129
0130 addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
0131 skb_frag_off(frag);
0132 len = skb_frag_size(frag);
0133 dma_sync_single_for_device(sq->pdev, addr, len,
0134 DMA_TO_DEVICE);
0135 }
0136 }
0137
0138 xdptxd.dma_addr = dma_addr;
0139
0140 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
0141 mlx5e_xmit_xdp_frame, sq, &xdptxd, sinfo, 0)))
0142 return false;
0143
0144 xdpi.page.page = page;
0145 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
0146
0147 if (unlikely(xdp_frame_has_frags(xdpf))) {
0148 for (i = 0; i < sinfo->nr_frags; i++) {
0149 skb_frag_t *frag = &sinfo->frags[i];
0150
0151 xdpi.page.page = skb_frag_page(frag);
0152 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
0153 }
0154 }
0155
0156 return true;
0157 }
0158
0159
0160 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
0161 struct bpf_prog *prog, struct xdp_buff *xdp)
0162 {
0163 u32 act;
0164 int err;
0165
0166 act = bpf_prog_run_xdp(prog, xdp);
0167 switch (act) {
0168 case XDP_PASS:
0169 return false;
0170 case XDP_TX:
0171 if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
0172 goto xdp_abort;
0173 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
0174 return true;
0175 case XDP_REDIRECT:
0176
0177 err = xdp_do_redirect(rq->netdev, xdp, prog);
0178 if (unlikely(err))
0179 goto xdp_abort;
0180 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
0181 __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
0182 if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
0183 mlx5e_page_dma_unmap(rq, page);
0184 rq->stats->xdp_redirect++;
0185 return true;
0186 default:
0187 bpf_warn_invalid_xdp_action(rq->netdev, prog, act);
0188 fallthrough;
0189 case XDP_ABORTED:
0190 xdp_abort:
0191 trace_xdp_exception(rq->netdev, prog, act);
0192 fallthrough;
0193 case XDP_DROP:
0194 rq->stats->xdp_drop++;
0195 return true;
0196 }
0197 }
0198
0199 static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size)
0200 {
0201 struct mlx5_wq_cyc *wq = &sq->wq;
0202 u16 pi, contig_wqebbs;
0203
0204 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
0205 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
0206 if (unlikely(contig_wqebbs < size)) {
0207 struct mlx5e_xdp_wqe_info *wi, *edge_wi;
0208
0209 wi = &sq->db.wqe_info[pi];
0210 edge_wi = wi + contig_wqebbs;
0211
0212
0213 for (; wi < edge_wi; wi++) {
0214 *wi = (struct mlx5e_xdp_wqe_info) {
0215 .num_wqebbs = 1,
0216 .num_pkts = 0,
0217 };
0218 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
0219 }
0220 sq->stats->nops += contig_wqebbs;
0221
0222 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
0223 }
0224
0225 return pi;
0226 }
0227
0228 static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
0229 {
0230 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
0231 struct mlx5e_xdpsq_stats *stats = sq->stats;
0232 struct mlx5e_tx_wqe *wqe;
0233 u16 pi;
0234
0235 pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
0236 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
0237 net_prefetchw(wqe->data);
0238
0239 *session = (struct mlx5e_tx_mpwqe) {
0240 .wqe = wqe,
0241 .bytes_count = 0,
0242 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
0243 .pkt_count = 0,
0244 .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
0245 };
0246
0247 stats->mpwqe++;
0248 }
0249
0250 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
0251 {
0252 struct mlx5_wq_cyc *wq = &sq->wq;
0253 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
0254 struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl;
0255 u16 ds_count = session->ds_count;
0256 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
0257 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
0258
0259 cseg->opmod_idx_opcode =
0260 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
0261 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
0262
0263 wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS);
0264 wi->num_pkts = session->pkt_count;
0265
0266 sq->pc += wi->num_wqebbs;
0267
0268 sq->doorbell_cseg = cseg;
0269
0270 session->wqe = NULL;
0271 }
0272
0273 enum {
0274 MLX5E_XDP_CHECK_OK = 1,
0275 MLX5E_XDP_CHECK_START_MPWQE = 2,
0276 };
0277
0278 INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
0279 {
0280 if (unlikely(!sq->mpwqe.wqe)) {
0281 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
0282 sq->stop_room))) {
0283
0284 mlx5e_xmit_xdp_doorbell(sq);
0285 sq->stats->full++;
0286 return -EBUSY;
0287 }
0288
0289 return MLX5E_XDP_CHECK_START_MPWQE;
0290 }
0291
0292 return MLX5E_XDP_CHECK_OK;
0293 }
0294
0295 INDIRECT_CALLABLE_SCOPE bool
0296 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
0297 struct skb_shared_info *sinfo, int check_result);
0298
0299 INDIRECT_CALLABLE_SCOPE bool
0300 mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
0301 struct skb_shared_info *sinfo, int check_result)
0302 {
0303 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
0304 struct mlx5e_xdpsq_stats *stats = sq->stats;
0305
0306 if (unlikely(sinfo)) {
0307
0308
0309
0310
0311 if (unlikely(sq->mpwqe.wqe))
0312 mlx5e_xdp_mpwqe_complete(sq);
0313 return mlx5e_xmit_xdp_frame(sq, xdptxd, sinfo, 0);
0314 }
0315
0316 if (unlikely(xdptxd->len > sq->hw_mtu)) {
0317 stats->err++;
0318 return false;
0319 }
0320
0321 if (!check_result)
0322 check_result = mlx5e_xmit_xdp_frame_check_mpwqe(sq);
0323 if (unlikely(check_result < 0))
0324 return false;
0325
0326 if (check_result == MLX5E_XDP_CHECK_START_MPWQE) {
0327
0328
0329
0330
0331 mlx5e_xdp_mpwqe_session_start(sq);
0332 }
0333
0334 mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
0335
0336 if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs)))
0337 mlx5e_xdp_mpwqe_complete(sq);
0338
0339 stats->xmit++;
0340 return true;
0341 }
0342
0343 static int mlx5e_xmit_xdp_frame_check_stop_room(struct mlx5e_xdpsq *sq, int stop_room)
0344 {
0345 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) {
0346
0347 mlx5e_xmit_xdp_doorbell(sq);
0348 sq->stats->full++;
0349 return -EBUSY;
0350 }
0351
0352 return MLX5E_XDP_CHECK_OK;
0353 }
0354
0355 INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
0356 {
0357 return mlx5e_xmit_xdp_frame_check_stop_room(sq, 1);
0358 }
0359
0360 INDIRECT_CALLABLE_SCOPE bool
0361 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
0362 struct skb_shared_info *sinfo, int check_result)
0363 {
0364 struct mlx5_wq_cyc *wq = &sq->wq;
0365 struct mlx5_wqe_ctrl_seg *cseg;
0366 struct mlx5_wqe_data_seg *dseg;
0367 struct mlx5_wqe_eth_seg *eseg;
0368 struct mlx5e_tx_wqe *wqe;
0369
0370 dma_addr_t dma_addr = xdptxd->dma_addr;
0371 u32 dma_len = xdptxd->len;
0372 u16 ds_cnt, inline_hdr_sz;
0373 u8 num_wqebbs = 1;
0374 int num_frags = 0;
0375 u16 pi;
0376
0377 struct mlx5e_xdpsq_stats *stats = sq->stats;
0378
0379 if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
0380 stats->err++;
0381 return false;
0382 }
0383
0384 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
0385 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE)
0386 ds_cnt++;
0387
0388
0389 if (!check_result) {
0390 int stop_room = 1;
0391
0392 if (unlikely(sinfo)) {
0393 ds_cnt += sinfo->nr_frags;
0394 num_frags = sinfo->nr_frags;
0395 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
0396
0397
0398
0399 stop_room = MLX5E_STOP_ROOM(num_wqebbs);
0400 }
0401
0402 check_result = mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room);
0403 }
0404 if (unlikely(check_result < 0))
0405 return false;
0406
0407 pi = mlx5e_xdpsq_get_next_pi(sq, num_wqebbs);
0408 wqe = mlx5_wq_cyc_get_wqe(wq, pi);
0409 net_prefetchw(wqe);
0410
0411 cseg = &wqe->ctrl;
0412 eseg = &wqe->eth;
0413 dseg = wqe->data;
0414
0415 inline_hdr_sz = 0;
0416
0417
0418 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
0419 memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
0420 memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
0421 MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
0422 dma_len -= MLX5E_XDP_MIN_INLINE;
0423 dma_addr += MLX5E_XDP_MIN_INLINE;
0424 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
0425 dseg++;
0426 }
0427
0428
0429 dseg->addr = cpu_to_be64(dma_addr);
0430 dseg->byte_count = cpu_to_be32(dma_len);
0431
0432 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
0433
0434 if (unlikely(test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state))) {
0435 u8 num_pkts = 1 + num_frags;
0436 int i;
0437
0438 memset(&cseg->trailer, 0, sizeof(cseg->trailer));
0439 memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
0440
0441 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
0442 dseg->lkey = sq->mkey_be;
0443
0444 for (i = 0; i < num_frags; i++) {
0445 skb_frag_t *frag = &sinfo->frags[i];
0446 dma_addr_t addr;
0447
0448 addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
0449 skb_frag_off(frag);
0450
0451 dseg++;
0452 dseg->addr = cpu_to_be64(addr);
0453 dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
0454 dseg->lkey = sq->mkey_be;
0455 }
0456
0457 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
0458
0459 sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
0460 .num_wqebbs = num_wqebbs,
0461 .num_pkts = num_pkts,
0462 };
0463
0464 sq->pc += num_wqebbs;
0465 } else {
0466 cseg->fm_ce_se = 0;
0467
0468 sq->pc++;
0469 }
0470
0471 sq->doorbell_cseg = cseg;
0472
0473 stats->xmit++;
0474 return true;
0475 }
0476
0477 static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
0478 struct mlx5e_xdp_wqe_info *wi,
0479 u32 *xsk_frames,
0480 bool recycle,
0481 struct xdp_frame_bulk *bq)
0482 {
0483 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
0484 u16 i;
0485
0486 for (i = 0; i < wi->num_pkts; i++) {
0487 struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
0488
0489 switch (xdpi.mode) {
0490 case MLX5E_XDP_XMIT_MODE_FRAME:
0491
0492 dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
0493 xdpi.frame.xdpf->len, DMA_TO_DEVICE);
0494 xdp_return_frame_bulk(xdpi.frame.xdpf, bq);
0495 break;
0496 case MLX5E_XDP_XMIT_MODE_PAGE:
0497
0498 mlx5e_page_release_dynamic(xdpi.page.rq, xdpi.page.page, recycle);
0499 break;
0500 case MLX5E_XDP_XMIT_MODE_XSK:
0501
0502 (*xsk_frames)++;
0503 break;
0504 default:
0505 WARN_ON_ONCE(true);
0506 }
0507 }
0508 }
0509
0510 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
0511 {
0512 struct xdp_frame_bulk bq;
0513 struct mlx5e_xdpsq *sq;
0514 struct mlx5_cqe64 *cqe;
0515 u32 xsk_frames = 0;
0516 u16 sqcc;
0517 int i;
0518
0519 xdp_frame_bulk_init(&bq);
0520
0521 sq = container_of(cq, struct mlx5e_xdpsq, cq);
0522
0523 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
0524 return false;
0525
0526 cqe = mlx5_cqwq_get_cqe(&cq->wq);
0527 if (!cqe)
0528 return false;
0529
0530
0531
0532
0533 sqcc = sq->cc;
0534
0535 i = 0;
0536 do {
0537 struct mlx5e_xdp_wqe_info *wi;
0538 u16 wqe_counter, ci;
0539 bool last_wqe;
0540
0541 mlx5_cqwq_pop(&cq->wq);
0542
0543 wqe_counter = be16_to_cpu(cqe->wqe_counter);
0544
0545 do {
0546 last_wqe = (sqcc == wqe_counter);
0547 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
0548 wi = &sq->db.wqe_info[ci];
0549
0550 sqcc += wi->num_wqebbs;
0551
0552 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq);
0553 } while (!last_wqe);
0554
0555 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
0556 netdev_WARN_ONCE(sq->channel->netdev,
0557 "Bad OP in XDPSQ CQE: 0x%x\n",
0558 get_cqe_opcode(cqe));
0559 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
0560 (struct mlx5_err_cqe *)cqe);
0561 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
0562 }
0563 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
0564
0565 xdp_flush_frame_bulk(&bq);
0566
0567 if (xsk_frames)
0568 xsk_tx_completed(sq->xsk_pool, xsk_frames);
0569
0570 sq->stats->cqes += i;
0571
0572 mlx5_cqwq_update_db_record(&cq->wq);
0573
0574
0575 wmb();
0576
0577 sq->cc = sqcc;
0578 return (i == MLX5E_TX_CQ_POLL_BUDGET);
0579 }
0580
0581 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
0582 {
0583 struct xdp_frame_bulk bq;
0584 u32 xsk_frames = 0;
0585
0586 xdp_frame_bulk_init(&bq);
0587
0588 rcu_read_lock();
0589
0590 while (sq->cc != sq->pc) {
0591 struct mlx5e_xdp_wqe_info *wi;
0592 u16 ci;
0593
0594 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
0595 wi = &sq->db.wqe_info[ci];
0596
0597 sq->cc += wi->num_wqebbs;
0598
0599 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq);
0600 }
0601
0602 xdp_flush_frame_bulk(&bq);
0603 rcu_read_unlock();
0604
0605 if (xsk_frames)
0606 xsk_tx_completed(sq->xsk_pool, xsk_frames);
0607 }
0608
0609 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
0610 u32 flags)
0611 {
0612 struct mlx5e_priv *priv = netdev_priv(dev);
0613 struct mlx5e_xdpsq *sq;
0614 int nxmit = 0;
0615 int sq_num;
0616 int i;
0617
0618
0619 if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
0620 return -ENETDOWN;
0621
0622 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
0623 return -EINVAL;
0624
0625 sq_num = smp_processor_id();
0626
0627 if (unlikely(sq_num >= priv->channels.num))
0628 return -ENXIO;
0629
0630 sq = &priv->channels.c[sq_num]->xdpsq;
0631
0632 for (i = 0; i < n; i++) {
0633 struct xdp_frame *xdpf = frames[i];
0634 struct mlx5e_xmit_data xdptxd;
0635 struct mlx5e_xdp_info xdpi;
0636 bool ret;
0637
0638 xdptxd.data = xdpf->data;
0639 xdptxd.len = xdpf->len;
0640 xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
0641 xdptxd.len, DMA_TO_DEVICE);
0642
0643 if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr)))
0644 break;
0645
0646 xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
0647 xdpi.frame.xdpf = xdpf;
0648 xdpi.frame.dma_addr = xdptxd.dma_addr;
0649
0650 ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
0651 mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0);
0652 if (unlikely(!ret)) {
0653 dma_unmap_single(sq->pdev, xdptxd.dma_addr,
0654 xdptxd.len, DMA_TO_DEVICE);
0655 break;
0656 }
0657 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
0658 nxmit++;
0659 }
0660
0661 if (flags & XDP_XMIT_FLUSH) {
0662 if (sq->mpwqe.wqe)
0663 mlx5e_xdp_mpwqe_complete(sq);
0664 mlx5e_xmit_xdp_doorbell(sq);
0665 }
0666
0667 return nxmit;
0668 }
0669
0670 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
0671 {
0672 struct mlx5e_xdpsq *xdpsq = rq->xdpsq;
0673
0674 if (xdpsq->mpwqe.wqe)
0675 mlx5e_xdp_mpwqe_complete(xdpsq);
0676
0677 mlx5e_xmit_xdp_doorbell(xdpsq);
0678
0679 if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
0680 xdp_do_flush_map();
0681 __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
0682 }
0683 }
0684
0685 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw)
0686 {
0687 sq->xmit_xdp_frame_check = is_mpw ?
0688 mlx5e_xmit_xdp_frame_check_mpwqe : mlx5e_xmit_xdp_frame_check;
0689 sq->xmit_xdp_frame = is_mpw ?
0690 mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame;
0691 }