0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #ifndef MLX5_CORE_CQ_H
0034 #define MLX5_CORE_CQ_H
0035
0036 #include <linux/mlx5/driver.h>
0037 #include <linux/refcount.h>
0038
0039 struct mlx5_core_cq {
0040 u32 cqn;
0041 int cqe_sz;
0042 __be32 *set_ci_db;
0043 __be32 *arm_db;
0044 struct mlx5_uars_page *uar;
0045 refcount_t refcount;
0046 struct completion free;
0047 unsigned vector;
0048 unsigned int irqn;
0049 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
0050 void (*event) (struct mlx5_core_cq *, enum mlx5_event);
0051 u32 cons_index;
0052 unsigned arm_sn;
0053 struct mlx5_rsc_debug *dbg;
0054 int pid;
0055 struct {
0056 struct list_head list;
0057 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
0058 void *priv;
0059 } tasklet_ctx;
0060 int reset_notify_added;
0061 struct list_head reset_notify;
0062 struct mlx5_eq_comp *eq;
0063 u16 uid;
0064 };
0065
0066
0067 enum {
0068 MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
0069 MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
0070 MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
0071 MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
0072 MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
0073 MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
0074 MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
0075 MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
0076 MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
0077 MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
0078 MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
0079 MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
0080 MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
0081 };
0082
0083 enum {
0084 MLX5_CQE_OWNER_MASK = 1,
0085 MLX5_CQE_REQ = 0,
0086 MLX5_CQE_RESP_WR_IMM = 1,
0087 MLX5_CQE_RESP_SEND = 2,
0088 MLX5_CQE_RESP_SEND_IMM = 3,
0089 MLX5_CQE_RESP_SEND_INV = 4,
0090 MLX5_CQE_RESIZE_CQ = 5,
0091 MLX5_CQE_SIG_ERR = 12,
0092 MLX5_CQE_REQ_ERR = 13,
0093 MLX5_CQE_RESP_ERR = 14,
0094 MLX5_CQE_INVALID = 15,
0095 };
0096
0097 enum {
0098 MLX5_CQ_MODIFY_PERIOD = 1 << 0,
0099 MLX5_CQ_MODIFY_COUNT = 1 << 1,
0100 MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
0101 };
0102
0103 enum {
0104 MLX5_CQ_OPMOD_RESIZE = 1,
0105 MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0,
0106 MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1,
0107 MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2,
0108 };
0109
0110 struct mlx5_cq_modify_params {
0111 int type;
0112 union {
0113 struct {
0114 u32 page_offset;
0115 u8 log_cq_size;
0116 } resize;
0117
0118 struct {
0119 } moder;
0120
0121 struct {
0122 } mapping;
0123 } params;
0124 };
0125
0126 enum {
0127 CQE_STRIDE_64 = 0,
0128 CQE_STRIDE_128 = 1,
0129 CQE_STRIDE_128_PAD = 2,
0130 };
0131
0132 #define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
0133 #define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1)
0134
0135 static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
0136 {
0137 return padding_128_en ? CQE_STRIDE_128_PAD :
0138 size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
0139 }
0140
0141 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
0142 {
0143 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
0144 }
0145
0146 enum {
0147 MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
0148 MLX5_CQ_DB_REQ_NOT = 0 << 24
0149 };
0150
0151 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
0152 void __iomem *uar_page,
0153 u32 cons_index)
0154 {
0155 __be32 doorbell[2];
0156 u32 sn;
0157 u32 ci;
0158
0159 sn = cq->arm_sn & 3;
0160 ci = cons_index & 0xffffff;
0161
0162 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
0163
0164
0165
0166
0167 wmb();
0168
0169 doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
0170 doorbell[1] = cpu_to_be32(cq->cqn);
0171
0172 mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL);
0173 }
0174
0175 static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
0176 {
0177 refcount_inc(&cq->refcount);
0178 }
0179
0180 static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
0181 {
0182 if (refcount_dec_and_test(&cq->refcount))
0183 complete(&cq->free);
0184 }
0185
0186 int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
0187 u32 *in, int inlen, u32 *out, int outlen);
0188 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
0189 u32 *in, int inlen, u32 *out, int outlen);
0190 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
0191 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
0192 u32 *out);
0193 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
0194 u32 *in, int inlen);
0195 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
0196 struct mlx5_core_cq *cq, u16 cq_period,
0197 u16 cq_max_count);
0198 static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev,
0199 struct mlx5_err_cqe *err_cqe)
0200 {
0201 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe,
0202 sizeof(*err_cqe), false);
0203 }
0204 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
0205 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
0206
0207 #endif