Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
0002 /*
0003  * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
0004  */
0005 
0006 #ifndef _MLX5_IB_WR_H
0007 #define _MLX5_IB_WR_H
0008 
0009 #include "mlx5_ib.h"
0010 
0011 enum {
0012     MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
0013 };
0014 
0015 struct mlx5_wqe_eth_pad {
0016     u8 rsvd0[16];
0017 };
0018 
0019 
0020 /* get_sq_edge - Get the next nearby edge.
0021  *
0022  * An 'edge' is defined as the first following address after the end
0023  * of the fragment or the SQ. Accordingly, during the WQE construction
0024  * which repetitively increases the pointer to write the next data, it
0025  * simply should check if it gets to an edge.
0026  *
0027  * @sq - SQ buffer.
0028  * @idx - Stride index in the SQ buffer.
0029  *
0030  * Return:
0031  *  The new edge.
0032  */
0033 static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
0034 {
0035     void *fragment_end;
0036 
0037     fragment_end = mlx5_frag_buf_get_wqe
0038         (&sq->fbc,
0039          mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
0040 
0041     return fragment_end + MLX5_SEND_WQE_BB;
0042 }
0043 
0044 /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
0045  * next nearby edge and get new address translation for current WQE position.
0046  * @sq: SQ buffer.
0047  * @seg: Current WQE position (16B aligned).
0048  * @wqe_sz: Total current WQE size [16B].
0049  * @cur_edge: Updated current edge.
0050  */
0051 static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
0052                      u32 wqe_sz, void **cur_edge)
0053 {
0054     u32 idx;
0055 
0056     if (likely(*seg != *cur_edge))
0057         return;
0058 
0059     idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
0060     *cur_edge = get_sq_edge(sq, idx);
0061 
0062     *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
0063 }
0064 
0065 /* mlx5r_memcpy_send_wqe - copy data from src to WQE and update the relevant
0066  * WQ's pointers. At the end @seg is aligned to 16B regardless the copied size.
0067  * @sq: SQ buffer.
0068  * @cur_edge: Updated current edge.
0069  * @seg: Current WQE position (16B aligned).
0070  * @wqe_sz: Total current WQE size [16B].
0071  * @src: Pointer to copy from.
0072  * @n: Number of bytes to copy.
0073  */
0074 static inline void mlx5r_memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
0075                      void **seg, u32 *wqe_sz,
0076                      const void *src, size_t n)
0077 {
0078     while (likely(n)) {
0079         size_t leftlen = *cur_edge - *seg;
0080         size_t copysz = min_t(size_t, leftlen, n);
0081         size_t stride;
0082 
0083         memcpy(*seg, src, copysz);
0084 
0085         n -= copysz;
0086         src += copysz;
0087         stride = !n ? ALIGN(copysz, 16) : copysz;
0088         *seg += stride;
0089         *wqe_sz += stride >> 4;
0090         handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
0091     }
0092 }
0093 
0094 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq);
0095 int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg,
0096             struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx,
0097             int *size, void **cur_edge, int nreq, __be32 general_id,
0098             bool send_signaled, bool solicited);
0099 void mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl,
0100               void *seg, u8 size, void *cur_edge, unsigned int idx,
0101               u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode);
0102 void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
0103            struct mlx5_wqe_ctrl_seg *ctrl);
0104 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
0105               const struct ib_send_wr **bad_wr, bool drain);
0106 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
0107               const struct ib_recv_wr **bad_wr, bool drain);
0108 
0109 static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
0110                         const struct ib_send_wr *wr,
0111                         const struct ib_send_wr **bad_wr)
0112 {
0113     return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
0114 }
0115 
0116 static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
0117                       const struct ib_send_wr *wr,
0118                       const struct ib_send_wr **bad_wr)
0119 {
0120     return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
0121 }
0122 
0123 static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
0124                         const struct ib_recv_wr *wr,
0125                         const struct ib_recv_wr **bad_wr)
0126 {
0127     return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
0128 }
0129 
0130 static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
0131                       const struct ib_recv_wr *wr,
0132                       const struct ib_recv_wr **bad_wr)
0133 {
0134     return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
0135 }
0136 #endif /* _MLX5_IB_WR_H */