0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0016
0017 #include "ehea_phyp.h"
0018
0019
0020 static inline u16 get_order_of_qentries(u16 queue_entries)
0021 {
0022 u8 ld = 1;
0023 while (((1U << ld) - 1) < queue_entries)
0024 ld++;
0025 return ld - 1;
0026 }
0027
0028
0029 #define H_ALL_RES_TYPE_QP 1
0030 #define H_ALL_RES_TYPE_CQ 2
0031 #define H_ALL_RES_TYPE_EQ 3
0032 #define H_ALL_RES_TYPE_MR 5
0033 #define H_ALL_RES_TYPE_MW 6
0034
0035 static long ehea_plpar_hcall_norets(unsigned long opcode,
0036 unsigned long arg1,
0037 unsigned long arg2,
0038 unsigned long arg3,
0039 unsigned long arg4,
0040 unsigned long arg5,
0041 unsigned long arg6,
0042 unsigned long arg7)
0043 {
0044 long ret;
0045 int i, sleep_msecs;
0046
0047 for (i = 0; i < 5; i++) {
0048 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
0049 arg5, arg6, arg7);
0050
0051 if (H_IS_LONG_BUSY(ret)) {
0052 sleep_msecs = get_longbusy_msecs(ret);
0053 msleep_interruptible(sleep_msecs);
0054 continue;
0055 }
0056
0057 if (ret < H_SUCCESS)
0058 pr_err("opcode=%lx ret=%lx"
0059 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
0060 " arg5=%lx arg6=%lx arg7=%lx\n",
0061 opcode, ret,
0062 arg1, arg2, arg3, arg4, arg5, arg6, arg7);
0063
0064 return ret;
0065 }
0066
0067 return H_BUSY;
0068 }
0069
0070 static long ehea_plpar_hcall9(unsigned long opcode,
0071 unsigned long *outs,
0072 unsigned long arg1,
0073 unsigned long arg2,
0074 unsigned long arg3,
0075 unsigned long arg4,
0076 unsigned long arg5,
0077 unsigned long arg6,
0078 unsigned long arg7,
0079 unsigned long arg8,
0080 unsigned long arg9)
0081 {
0082 long ret;
0083 int i, sleep_msecs;
0084 u8 cb_cat;
0085
0086 for (i = 0; i < 5; i++) {
0087 ret = plpar_hcall9(opcode, outs,
0088 arg1, arg2, arg3, arg4, arg5,
0089 arg6, arg7, arg8, arg9);
0090
0091 if (H_IS_LONG_BUSY(ret)) {
0092 sleep_msecs = get_longbusy_msecs(ret);
0093 msleep_interruptible(sleep_msecs);
0094 continue;
0095 }
0096
0097 cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2);
0098
0099 if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY)
0100 && (opcode == H_MODIFY_HEA_PORT))
0101 && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
0102 || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
0103 && (arg3 == H_PORT_CB7_DUCQPN)))))
0104 pr_err("opcode=%lx ret=%lx"
0105 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
0106 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
0107 " arg9=%lx"
0108 " out1=%lx out2=%lx out3=%lx out4=%lx"
0109 " out5=%lx out6=%lx out7=%lx out8=%lx"
0110 " out9=%lx\n",
0111 opcode, ret,
0112 arg1, arg2, arg3, arg4, arg5,
0113 arg6, arg7, arg8, arg9,
0114 outs[0], outs[1], outs[2], outs[3], outs[4],
0115 outs[5], outs[6], outs[7], outs[8]);
0116 return ret;
0117 }
0118
0119 return H_BUSY;
0120 }
0121
0122 u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
0123 const u64 qp_handle, const u64 sel_mask, void *cb_addr)
0124 {
0125 return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
0126 adapter_handle,
0127 qp_category,
0128 qp_handle,
0129 sel_mask,
0130 __pa(cb_addr),
0131 0, 0);
0132 }
0133
0134
0135 #define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
0136 #define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
0137 #define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
0138 #define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
0139 #define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
0140 #define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
0141 #define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
0142 #define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
0143 #define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
0144 #define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
0145
0146
0147 #define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
0148 #define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63)
0149
0150
0151 #define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
0152 #define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
0153 #define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
0154 #define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
0155
0156 #define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
0157 #define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
0158
0159 #define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
0160 #define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
0161
0162
0163 #define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
0164
0165 #define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
0166
0167
0168 #define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
0169
0170 #define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
0171
0172
0173
0174 #define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
0175 #define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
0176 #define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
0177 #define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
0178
0179
0180 #define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
0181 #define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
0182 #define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
0183 #define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
0184 #define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
0185
0186
0187 #define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
0188 #define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
0189 #define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
0190 #define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
0191
0192
0193 #define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
0194 #define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
0195 #define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
0196 #define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
0197
0198 u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
0199 struct ehea_qp_init_attr *init_attr, const u32 pd,
0200 u64 *qp_handle, struct h_epas *h_epas)
0201 {
0202 u64 hret;
0203 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
0204
0205 u64 allocate_controls =
0206 EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
0207 | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
0208 | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6)
0209 | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0)
0210 | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
0211 | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
0212 | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
0213 | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
0214 | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
0215
0216 u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
0217 | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
0218
0219 u64 max_r10_reg =
0220 EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
0221 get_order_of_qentries(init_attr->max_nr_send_wqes))
0222 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
0223 get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
0224 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
0225 get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
0226 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
0227 get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
0228 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
0229 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
0230 init_attr->wqe_size_enc_rq1)
0231 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
0232 init_attr->wqe_size_enc_rq2)
0233 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
0234 init_attr->wqe_size_enc_rq3);
0235
0236 u64 r11_in =
0237 EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
0238 | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
0239 u64 threshold =
0240 EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
0241 | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
0242
0243 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
0244 outs,
0245 adapter_handle,
0246 allocate_controls,
0247 init_attr->send_cq_handle,
0248 init_attr->recv_cq_handle,
0249 init_attr->aff_eq_handle,
0250 r9_reg,
0251 max_r10_reg,
0252 r11_in,
0253 threshold);
0254
0255 *qp_handle = outs[0];
0256 init_attr->qp_nr = (u32)outs[1];
0257
0258 init_attr->act_nr_send_wqes =
0259 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
0260 init_attr->act_nr_rwqes_rq1 =
0261 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
0262 init_attr->act_nr_rwqes_rq2 =
0263 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
0264 init_attr->act_nr_rwqes_rq3 =
0265 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
0266
0267 init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
0268 init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
0269 init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
0270 init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
0271
0272 init_attr->nr_sq_pages =
0273 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
0274 init_attr->nr_rq1_pages =
0275 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
0276 init_attr->nr_rq2_pages =
0277 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
0278 init_attr->nr_rq3_pages =
0279 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
0280
0281 init_attr->liobn_sq =
0282 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
0283 init_attr->liobn_rq1 =
0284 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
0285 init_attr->liobn_rq2 =
0286 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
0287 init_attr->liobn_rq3 =
0288 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
0289
0290 if (!hret)
0291 hcp_epas_ctor(h_epas, outs[6], outs[6]);
0292
0293 return hret;
0294 }
0295
0296 u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
0297 struct ehea_cq_attr *cq_attr,
0298 u64 *cq_handle, struct h_epas *epas)
0299 {
0300 u64 hret;
0301 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
0302
0303 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
0304 outs,
0305 adapter_handle,
0306 H_ALL_RES_TYPE_CQ,
0307 cq_attr->eq_handle,
0308 cq_attr->cq_token,
0309 cq_attr->max_nr_of_cqes,
0310 0, 0, 0, 0);
0311
0312 *cq_handle = outs[0];
0313 cq_attr->act_nr_of_cqes = outs[3];
0314 cq_attr->nr_pages = outs[4];
0315
0316 if (!hret)
0317 hcp_epas_ctor(epas, outs[5], outs[6]);
0318
0319 return hret;
0320 }
0321
0322
0323 #define H_ALL_RES_TYPE_QP 1
0324 #define H_ALL_RES_TYPE_CQ 2
0325 #define H_ALL_RES_TYPE_EQ 3
0326 #define H_ALL_RES_TYPE_MR 5
0327 #define H_ALL_RES_TYPE_MW 6
0328
0329
0330 #define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
0331 #define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
0332 #define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
0333 #define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
0334
0335 #define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
0336
0337
0338 #define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
0339
0340
0341 #define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
0342
0343
0344 #define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
0345
0346
0347 #define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
0348 #define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63)
0349
0350
0351 #define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63)
0352
0353
0354 #define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63)
0355
0356
0357 #define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63)
0358
0359 u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
0360 struct ehea_eq_attr *eq_attr, u64 *eq_handle)
0361 {
0362 u64 hret, allocate_controls;
0363 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
0364
0365
0366 allocate_controls =
0367 EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
0368 | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
0369 | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
0370 | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
0371
0372 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
0373 outs,
0374 adapter_handle,
0375 allocate_controls,
0376 eq_attr->max_nr_of_eqes,
0377 0, 0, 0, 0, 0, 0);
0378
0379 *eq_handle = outs[0];
0380 eq_attr->act_nr_of_eqes = outs[3];
0381 eq_attr->nr_pages = outs[4];
0382 eq_attr->ist1 = outs[5];
0383 eq_attr->ist2 = outs[6];
0384 eq_attr->ist3 = outs[7];
0385 eq_attr->ist4 = outs[8];
0386
0387 return hret;
0388 }
0389
0390 u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
0391 const u64 qp_handle, const u64 sel_mask,
0392 void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
0393 u16 *out_swr, u16 *out_rwr)
0394 {
0395 u64 hret;
0396 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
0397
0398 hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
0399 outs,
0400 adapter_handle,
0401 (u64) cat,
0402 qp_handle,
0403 sel_mask,
0404 __pa(cb_addr),
0405 0, 0, 0, 0);
0406
0407 *inv_attr_id = outs[0];
0408 *out_swr = outs[3];
0409 *out_rwr = outs[4];
0410 *proc_mask = outs[5];
0411
0412 return hret;
0413 }
0414
0415 u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
0416 const u8 queue_type, const u64 resource_handle,
0417 const u64 log_pageaddr, u64 count)
0418 {
0419 u64 reg_control;
0420
0421 reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
0422 | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
0423
0424 return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
0425 adapter_handle,
0426 reg_control,
0427 resource_handle,
0428 log_pageaddr,
0429 count,
0430 0, 0);
0431 }
0432
0433 u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
0434 const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
0435 struct ehea_mr *mr)
0436 {
0437 u64 hret;
0438 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
0439
0440 hret = ehea_plpar_hcall9(H_REGISTER_SMR,
0441 outs,
0442 adapter_handle ,
0443 orig_mr_handle,
0444 vaddr_in,
0445 (((u64)access_ctrl) << 32ULL),
0446 pd,
0447 0, 0, 0, 0);
0448
0449 mr->handle = outs[0];
0450 mr->lkey = (u32)outs[2];
0451
0452 return hret;
0453 }
0454
0455 u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
0456 {
0457 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
0458
0459 return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
0460 outs,
0461 adapter_handle,
0462 H_DISABLE_GET_EHEA_WQE_P,
0463 qp_handle,
0464 0, 0, 0, 0, 0, 0);
0465 }
0466
0467 u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
0468 u64 force_bit)
0469 {
0470 return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
0471 adapter_handle,
0472 res_handle,
0473 force_bit,
0474 0, 0, 0, 0);
0475 }
0476
0477 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
0478 const u64 length, const u32 access_ctrl,
0479 const u32 pd, u64 *mr_handle, u32 *lkey)
0480 {
0481 u64 hret;
0482 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
0483
0484 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
0485 outs,
0486 adapter_handle,
0487 5,
0488 vaddr,
0489 length,
0490 (((u64) access_ctrl) << 32ULL),
0491 pd,
0492 0, 0, 0);
0493
0494 *mr_handle = outs[0];
0495 *lkey = (u32)outs[2];
0496 return hret;
0497 }
0498
0499 u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
0500 const u8 pagesize, const u8 queue_type,
0501 const u64 log_pageaddr, const u64 count)
0502 {
0503 if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
0504 pr_err("not on pageboundary\n");
0505 return H_PARAMETER;
0506 }
0507
0508 return ehea_h_register_rpage(adapter_handle, pagesize,
0509 queue_type, mr_handle,
0510 log_pageaddr, count);
0511 }
0512
0513 u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
0514 {
0515 u64 hret, cb_logaddr;
0516
0517 cb_logaddr = __pa(cb_addr);
0518
0519 hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
0520 adapter_handle,
0521 cb_logaddr,
0522 0, 0, 0, 0, 0);
0523 #ifdef DEBUG
0524 ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
0525 #endif
0526 return hret;
0527 }
0528
0529 u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
0530 const u8 cb_cat, const u64 select_mask,
0531 void *cb_addr)
0532 {
0533 u64 port_info;
0534 u64 cb_logaddr = __pa(cb_addr);
0535 u64 arr_index = 0;
0536
0537 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
0538 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
0539
0540 return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
0541 adapter_handle,
0542 port_info,
0543 select_mask,
0544 arr_index,
0545 cb_logaddr,
0546 0, 0);
0547 }
0548
0549 u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
0550 const u8 cb_cat, const u64 select_mask,
0551 void *cb_addr)
0552 {
0553 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
0554 u64 port_info;
0555 u64 arr_index = 0;
0556 u64 cb_logaddr = __pa(cb_addr);
0557
0558 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
0559 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
0560 #ifdef DEBUG
0561 ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
0562 #endif
0563 return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
0564 outs,
0565 adapter_handle,
0566 port_info,
0567 select_mask,
0568 arr_index,
0569 cb_logaddr,
0570 0, 0, 0, 0);
0571 }
0572
0573 u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
0574 const u8 reg_type, const u64 mc_mac_addr,
0575 const u16 vlan_id, const u32 hcall_id)
0576 {
0577 u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
0578 u64 mac_addr = mc_mac_addr >> 16;
0579
0580 r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
0581 r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
0582 r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
0583 r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
0584
0585 return ehea_plpar_hcall_norets(hcall_id,
0586 adapter_handle,
0587 r5_port_num,
0588 r6_reg_type,
0589 r7_mc_mac_addr,
0590 r8_vlan_id,
0591 0, 0);
0592 }
0593
0594 u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
0595 const u64 event_mask)
0596 {
0597 return ehea_plpar_hcall_norets(H_RESET_EVENTS,
0598 adapter_handle,
0599 neq_handle,
0600 event_mask,
0601 0, 0, 0, 0);
0602 }
0603
0604 u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
0605 void *rblock)
0606 {
0607 return ehea_plpar_hcall_norets(H_ERROR_DATA,
0608 adapter_handle,
0609 ressource_handle,
0610 __pa(rblock),
0611 0, 0, 0, 0);
0612 }