0001
0002
0003 #include <linux/etherdevice.h>
0004
0005 #include "osdep.h"
0006 #include "hmc.h"
0007 #include "defs.h"
0008 #include "type.h"
0009 #include "protos.h"
0010 #include "uda.h"
0011 #include "uda_d.h"
0012
0013
0014
0015
0016
0017
0018
0019
0020 int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
0021 u32 op, u64 scratch)
0022 {
0023 __le64 *wqe;
0024 u64 qw1, qw2;
0025
0026 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
0027 if (!wqe)
0028 return -ENOMEM;
0029
0030 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
0031 qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
0032 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
0033 FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
0034
0035 qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
0036 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
0037 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
0038 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
0039
0040 if (!info->ipv4_valid) {
0041 set_64bit_val(wqe, 40,
0042 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
0043 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
0044 set_64bit_val(wqe, 32,
0045 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
0046 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
0047
0048 set_64bit_val(wqe, 56,
0049 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
0050 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
0051 set_64bit_val(wqe, 48,
0052 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
0053 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
0054 } else {
0055 set_64bit_val(wqe, 32,
0056 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
0057
0058 set_64bit_val(wqe, 48,
0059 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
0060 }
0061
0062 set_64bit_val(wqe, 8, qw1);
0063 set_64bit_val(wqe, 16, qw2);
0064
0065 dma_wmb();
0066
0067 set_64bit_val(
0068 wqe, 24,
0069 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
0070 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
0071 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
0072 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
0073 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
0074 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
0075
0076 print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8,
0077 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
0078 irdma_sc_cqp_post_sq(cqp);
0079
0080 return 0;
0081 }
0082
0083
0084
0085
0086
0087 static void irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
0088 {
0089 struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
0090 u8 idx = 0;
0091 u8 ctx_idx = 0;
0092
0093 memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
0094
0095 for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
0096 entry_info = &info->mg_ctx_info[idx];
0097 if (entry_info->valid_entry) {
0098 set_64bit_val((__le64 *)info->dma_mem_mc.va,
0099 ctx_idx * sizeof(u64),
0100 FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
0101 FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
0102 FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
0103 ctx_idx++;
0104 }
0105 }
0106 }
0107
0108
0109
0110
0111
0112
0113
0114
0115 int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
0116 struct irdma_mcast_grp_info *info, u32 op,
0117 u64 scratch)
0118 {
0119 __le64 *wqe;
0120
0121 if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
0122 ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
0123 return -EINVAL;
0124 }
0125
0126 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
0127 if (!wqe) {
0128 ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
0129 return -ENOMEM;
0130 }
0131
0132 irdma_create_mg_ctx(info);
0133
0134 set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
0135 set_64bit_val(wqe, 16,
0136 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
0137 FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
0138 set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr));
0139 set_64bit_val(wqe, 8,
0140 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
0141
0142 if (!info->ipv4_valid) {
0143 set_64bit_val(wqe, 56,
0144 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
0145 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
0146 set_64bit_val(wqe, 48,
0147 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
0148 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
0149 } else {
0150 set_64bit_val(wqe, 48,
0151 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
0152 }
0153
0154 dma_wmb();
0155
0156 set_64bit_val(wqe, 24,
0157 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
0158 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
0159 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
0160 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
0161 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
0162
0163 print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8,
0164 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
0165 print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
0166 8, info->dma_mem_mc.va,
0167 IRDMA_MAX_MGS_PER_CTX * 8, false);
0168 irdma_sc_cqp_post_sq(cqp);
0169
0170 return 0;
0171 }
0172
0173
0174
0175
0176
0177
0178 static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
0179 struct irdma_mcast_grp_ctx_entry_info *entry2)
0180 {
0181 if (entry1->dest_port == entry2->dest_port &&
0182 entry1->qp_id == entry2->qp_id)
0183 return true;
0184
0185 return false;
0186 }
0187
0188
0189
0190
0191
0192
0193 int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
0194 struct irdma_mcast_grp_ctx_entry_info *mg)
0195 {
0196 u32 idx;
0197 bool free_entry_found = false;
0198 u32 free_entry_idx = 0;
0199
0200
0201 for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
0202 if (ctx->mg_ctx_info[idx].valid_entry) {
0203 if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
0204 ctx->mg_ctx_info[idx].use_cnt++;
0205 return 0;
0206 }
0207 continue;
0208 }
0209 if (!free_entry_found) {
0210 free_entry_found = true;
0211 free_entry_idx = idx;
0212 }
0213 }
0214
0215 if (free_entry_found) {
0216 ctx->mg_ctx_info[free_entry_idx] = *mg;
0217 ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
0218 ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
0219 ctx->no_of_mgs++;
0220 return 0;
0221 }
0222
0223 return -ENOMEM;
0224 }
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
0235 struct irdma_mcast_grp_ctx_entry_info *mg)
0236 {
0237 u32 idx;
0238
0239
0240 for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
0241 if (!ctx->mg_ctx_info[idx].valid_entry)
0242 continue;
0243
0244 if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
0245 ctx->mg_ctx_info[idx].use_cnt--;
0246
0247 if (!ctx->mg_ctx_info[idx].use_cnt) {
0248 ctx->mg_ctx_info[idx].valid_entry = false;
0249 ctx->no_of_mgs--;
0250
0251 if (idx != ctx->no_of_mgs &&
0252 ctx->no_of_mgs > 0) {
0253 memcpy(&ctx->mg_ctx_info[idx],
0254 &ctx->mg_ctx_info[ctx->no_of_mgs - 1],
0255 sizeof(ctx->mg_ctx_info[idx]));
0256 ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
0257 }
0258 }
0259
0260 return 0;
0261 }
0262 }
0263
0264 return -EINVAL;
0265 }