0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <rdma/ib_smi.h>
0036
0037 #include "qib.h"
0038 #include "qib_mad.h"
0039
0040 static int reply(struct ib_smp *smp)
0041 {
0042
0043
0044
0045
0046 smp->method = IB_MGMT_METHOD_GET_RESP;
0047 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
0048 smp->status |= IB_SMP_DIRECTION;
0049 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
0050 }
0051
0052 static int reply_failure(struct ib_smp *smp)
0053 {
0054
0055
0056
0057
0058 smp->method = IB_MGMT_METHOD_GET_RESP;
0059 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
0060 smp->status |= IB_SMP_DIRECTION;
0061 return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY;
0062 }
0063
0064 static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
0065 {
0066 struct ib_mad_send_buf *send_buf;
0067 struct ib_mad_agent *agent;
0068 struct ib_smp *smp;
0069 int ret;
0070 unsigned long flags;
0071 unsigned long timeout;
0072
0073 agent = ibp->rvp.send_agent;
0074 if (!agent)
0075 return;
0076
0077
0078 if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
0079 return;
0080
0081
0082 if (ibp->rvp.trap_timeout &&
0083 time_before(jiffies, ibp->rvp.trap_timeout))
0084 return;
0085
0086 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
0087 IB_MGMT_MAD_DATA, GFP_ATOMIC,
0088 IB_MGMT_BASE_VERSION);
0089 if (IS_ERR(send_buf))
0090 return;
0091
0092 smp = send_buf->mad;
0093 smp->base_version = IB_MGMT_BASE_VERSION;
0094 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
0095 smp->class_version = 1;
0096 smp->method = IB_MGMT_METHOD_TRAP;
0097 ibp->rvp.tid++;
0098 smp->tid = cpu_to_be64(ibp->rvp.tid);
0099 smp->attr_id = IB_SMP_ATTR_NOTICE;
0100
0101 memcpy(smp->data, data, len);
0102
0103 spin_lock_irqsave(&ibp->rvp.lock, flags);
0104 if (!ibp->rvp.sm_ah) {
0105 if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
0106 struct ib_ah *ah;
0107
0108 ah = qib_create_qp0_ah(ibp, (u16)ibp->rvp.sm_lid);
0109 if (IS_ERR(ah))
0110 ret = PTR_ERR(ah);
0111 else {
0112 send_buf->ah = ah;
0113 ibp->rvp.sm_ah = ibah_to_rvtah(ah);
0114 ret = 0;
0115 }
0116 } else
0117 ret = -EINVAL;
0118 } else {
0119 send_buf->ah = &ibp->rvp.sm_ah->ibah;
0120 ret = 0;
0121 }
0122 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
0123
0124 if (!ret)
0125 ret = ib_post_send_mad(send_buf, NULL);
0126 if (!ret) {
0127
0128 timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
0129 ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
0130 } else {
0131 ib_free_send_mad(send_buf);
0132 ibp->rvp.trap_timeout = 0;
0133 }
0134 }
0135
0136
0137
0138
0139 void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl,
0140 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
0141 {
0142 struct ib_mad_notice_attr data;
0143
0144 ibp->rvp.n_pkt_drops++;
0145 ibp->rvp.pkey_violations++;
0146
0147
0148 data.generic_type = IB_NOTICE_TYPE_SECURITY;
0149 data.prod_type_msb = 0;
0150 data.prod_type_lsb = IB_NOTICE_PROD_CA;
0151 data.trap_num = IB_NOTICE_TRAP_BAD_PKEY;
0152 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
0153 data.toggle_count = 0;
0154 memset(&data.details, 0, sizeof(data.details));
0155 data.details.ntc_257_258.lid1 = lid1;
0156 data.details.ntc_257_258.lid2 = lid2;
0157 data.details.ntc_257_258.key = cpu_to_be32(key);
0158 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
0159 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
0160
0161 qib_send_trap(ibp, &data, sizeof(data));
0162 }
0163
0164
0165
0166
0167 static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
0168 {
0169 struct ib_mad_notice_attr data;
0170
0171
0172 data.generic_type = IB_NOTICE_TYPE_SECURITY;
0173 data.prod_type_msb = 0;
0174 data.prod_type_lsb = IB_NOTICE_PROD_CA;
0175 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
0176 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
0177 data.toggle_count = 0;
0178 memset(&data.details, 0, sizeof(data.details));
0179 data.details.ntc_256.lid = data.issuer_lid;
0180 data.details.ntc_256.method = smp->method;
0181 data.details.ntc_256.attr_id = smp->attr_id;
0182 data.details.ntc_256.attr_mod = smp->attr_mod;
0183 data.details.ntc_256.mkey = smp->mkey;
0184 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
0185 u8 hop_cnt;
0186
0187 data.details.ntc_256.dr_slid = smp->dr_slid;
0188 data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
0189 hop_cnt = smp->hop_cnt;
0190 if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
0191 data.details.ntc_256.dr_trunc_hop |=
0192 IB_NOTICE_TRAP_DR_TRUNC;
0193 hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
0194 }
0195 data.details.ntc_256.dr_trunc_hop |= hop_cnt;
0196 memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
0197 hop_cnt);
0198 }
0199
0200 qib_send_trap(ibp, &data, sizeof(data));
0201 }
0202
0203
0204
0205
0206 void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num)
0207 {
0208 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
0209 struct qib_devdata *dd = dd_from_dev(ibdev);
0210 struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
0211 struct ib_mad_notice_attr data;
0212
0213 data.generic_type = IB_NOTICE_TYPE_INFO;
0214 data.prod_type_msb = 0;
0215 data.prod_type_lsb = IB_NOTICE_PROD_CA;
0216 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
0217 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
0218 data.toggle_count = 0;
0219 memset(&data.details, 0, sizeof(data.details));
0220 data.details.ntc_144.lid = data.issuer_lid;
0221 data.details.ntc_144.new_cap_mask =
0222 cpu_to_be32(ibp->rvp.port_cap_flags);
0223 qib_send_trap(ibp, &data, sizeof(data));
0224 }
0225
0226
0227
0228
0229 void qib_sys_guid_chg(struct qib_ibport *ibp)
0230 {
0231 struct ib_mad_notice_attr data;
0232
0233 data.generic_type = IB_NOTICE_TYPE_INFO;
0234 data.prod_type_msb = 0;
0235 data.prod_type_lsb = IB_NOTICE_PROD_CA;
0236 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
0237 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
0238 data.toggle_count = 0;
0239 memset(&data.details, 0, sizeof(data.details));
0240 data.details.ntc_145.lid = data.issuer_lid;
0241 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
0242
0243 qib_send_trap(ibp, &data, sizeof(data));
0244 }
0245
0246
0247
0248
0249 void qib_node_desc_chg(struct qib_ibport *ibp)
0250 {
0251 struct ib_mad_notice_attr data;
0252
0253 data.generic_type = IB_NOTICE_TYPE_INFO;
0254 data.prod_type_msb = 0;
0255 data.prod_type_lsb = IB_NOTICE_PROD_CA;
0256 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
0257 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
0258 data.toggle_count = 0;
0259 memset(&data.details, 0, sizeof(data.details));
0260 data.details.ntc_144.lid = data.issuer_lid;
0261 data.details.ntc_144.local_changes = 1;
0262 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
0263
0264 qib_send_trap(ibp, &data, sizeof(data));
0265 }
0266
0267 static int subn_get_nodedescription(struct ib_smp *smp,
0268 struct ib_device *ibdev)
0269 {
0270 if (smp->attr_mod)
0271 smp->status |= IB_SMP_INVALID_FIELD;
0272
0273 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
0274
0275 return reply(smp);
0276 }
0277
0278 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
0279 u8 port)
0280 {
0281 struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
0282 struct qib_devdata *dd = dd_from_ibdev(ibdev);
0283 u32 majrev, minrev;
0284 unsigned pidx = port - 1;
0285
0286
0287 if (smp->attr_mod || pidx >= dd->num_pports ||
0288 dd->pport[pidx].guid == 0)
0289 smp->status |= IB_SMP_INVALID_FIELD;
0290 else
0291 nip->port_guid = dd->pport[pidx].guid;
0292
0293 nip->base_version = 1;
0294 nip->class_version = 1;
0295 nip->node_type = 1;
0296 nip->num_ports = ibdev->phys_port_cnt;
0297
0298 nip->sys_guid = ib_qib_sys_image_guid;
0299 nip->node_guid = dd->pport->guid;
0300 nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
0301 nip->device_id = cpu_to_be16(dd->deviceid);
0302 majrev = dd->majrev;
0303 minrev = dd->minrev;
0304 nip->revision = cpu_to_be32((majrev << 16) | minrev);
0305 nip->local_port_num = port;
0306 nip->vendor_id[0] = QIB_SRC_OUI_1;
0307 nip->vendor_id[1] = QIB_SRC_OUI_2;
0308 nip->vendor_id[2] = QIB_SRC_OUI_3;
0309
0310 return reply(smp);
0311 }
0312
0313 static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
0314 u8 port)
0315 {
0316 struct qib_devdata *dd = dd_from_ibdev(ibdev);
0317 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
0318 __be64 *p = (__be64 *) smp->data;
0319 unsigned pidx = port - 1;
0320
0321
0322
0323 memset(smp->data, 0, sizeof(smp->data));
0324
0325 if (startgx == 0 && pidx < dd->num_pports) {
0326 struct qib_pportdata *ppd = dd->pport + pidx;
0327 struct qib_ibport *ibp = &ppd->ibport_data;
0328 __be64 g = ppd->guid;
0329 unsigned i;
0330
0331
0332 if (g == 0)
0333 smp->status |= IB_SMP_INVALID_FIELD;
0334 else {
0335
0336 p[0] = g;
0337 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
0338 p[i] = ibp->guids[i - 1];
0339 }
0340 } else
0341 smp->status |= IB_SMP_INVALID_FIELD;
0342
0343 return reply(smp);
0344 }
0345
0346 static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
0347 {
0348 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
0349 }
0350
0351 static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
0352 {
0353 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
0354 }
0355
0356 static int get_overrunthreshold(struct qib_pportdata *ppd)
0357 {
0358 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
0359 }
0360
0361
0362
0363
0364
0365
0366
0367
0368 static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
0369 {
0370 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
0371 (u32)n);
0372 return 0;
0373 }
0374
0375 static int get_phyerrthreshold(struct qib_pportdata *ppd)
0376 {
0377 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387 static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
0388 {
0389 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
0390 (u32)n);
0391 return 0;
0392 }
0393
0394
0395
0396
0397
0398
0399
0400 static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
0401 {
0402 return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
0403 IB_LINKINITCMD_SLEEP;
0404 }
0405
0406 static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
0407 {
0408 int valid_mkey = 0;
0409 int ret = 0;
0410
0411
0412 if (ibp->rvp.mkey_lease_timeout &&
0413 time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
0414
0415 ibp->rvp.mkey_lease_timeout = 0;
0416 ibp->rvp.mkeyprot = 0;
0417 }
0418
0419 if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
0420 ibp->rvp.mkey == smp->mkey)
0421 valid_mkey = 1;
0422
0423
0424 if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
0425 (smp->method == IB_MGMT_METHOD_GET ||
0426 smp->method == IB_MGMT_METHOD_SET ||
0427 smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
0428 ibp->rvp.mkey_lease_timeout = 0;
0429
0430 if (!valid_mkey) {
0431 switch (smp->method) {
0432 case IB_MGMT_METHOD_GET:
0433
0434 if (ibp->rvp.mkeyprot < 2)
0435 break;
0436 fallthrough;
0437 case IB_MGMT_METHOD_SET:
0438 case IB_MGMT_METHOD_TRAP_REPRESS:
0439 if (ibp->rvp.mkey_violations != 0xFFFF)
0440 ++ibp->rvp.mkey_violations;
0441 if (!ibp->rvp.mkey_lease_timeout &&
0442 ibp->rvp.mkey_lease_period)
0443 ibp->rvp.mkey_lease_timeout = jiffies +
0444 ibp->rvp.mkey_lease_period * HZ;
0445
0446 qib_bad_mkey(ibp, smp);
0447 ret = 1;
0448 }
0449 }
0450
0451 return ret;
0452 }
0453
0454 static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
0455 u8 port)
0456 {
0457 struct qib_devdata *dd;
0458 struct qib_pportdata *ppd;
0459 struct qib_ibport *ibp;
0460 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
0461 u8 mtu;
0462 int ret;
0463 u32 state;
0464 u32 port_num = be32_to_cpu(smp->attr_mod);
0465
0466 if (port_num == 0)
0467 port_num = port;
0468 else {
0469 if (port_num > ibdev->phys_port_cnt) {
0470 smp->status |= IB_SMP_INVALID_FIELD;
0471 ret = reply(smp);
0472 goto bail;
0473 }
0474 if (port_num != port) {
0475 ibp = to_iport(ibdev, port_num);
0476 ret = check_mkey(ibp, smp, 0);
0477 if (ret) {
0478 ret = IB_MAD_RESULT_FAILURE;
0479 goto bail;
0480 }
0481 }
0482 }
0483
0484 dd = dd_from_ibdev(ibdev);
0485
0486 ppd = dd->pport + (port_num - 1);
0487 ibp = &ppd->ibport_data;
0488
0489
0490 memset(smp->data, 0, sizeof(smp->data));
0491
0492
0493 if (!(smp->method == IB_MGMT_METHOD_GET &&
0494 ibp->rvp.mkey != smp->mkey &&
0495 ibp->rvp.mkeyprot == 1))
0496 pip->mkey = ibp->rvp.mkey;
0497 pip->gid_prefix = ibp->rvp.gid_prefix;
0498 pip->lid = cpu_to_be16(ppd->lid);
0499 pip->sm_lid = cpu_to_be16((u16)ibp->rvp.sm_lid);
0500 pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
0501
0502 pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
0503 pip->local_port_num = port;
0504 pip->link_width_enabled = ppd->link_width_enabled;
0505 pip->link_width_supported = ppd->link_width_supported;
0506 pip->link_width_active = ppd->link_width_active;
0507 state = dd->f_iblink_state(ppd->lastibcstat);
0508 pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
0509
0510 pip->portphysstate_linkdown =
0511 (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
0512 (get_linkdowndefaultstate(ppd) ? 1 : 2);
0513 pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
0514 pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
0515 ppd->link_speed_enabled;
0516 switch (ppd->ibmtu) {
0517 default:
0518 case 4096:
0519 mtu = IB_MTU_4096;
0520 break;
0521 case 2048:
0522 mtu = IB_MTU_2048;
0523 break;
0524 case 1024:
0525 mtu = IB_MTU_1024;
0526 break;
0527 case 512:
0528 mtu = IB_MTU_512;
0529 break;
0530 case 256:
0531 mtu = IB_MTU_256;
0532 break;
0533 }
0534 pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl;
0535 pip->vlcap_inittype = ppd->vls_supported << 4;
0536 pip->vl_high_limit = ibp->rvp.vl_high_limit;
0537 pip->vl_arb_high_cap =
0538 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
0539 pip->vl_arb_low_cap =
0540 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
0541
0542 pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
0543
0544
0545 pip->operationalvl_pei_peo_fpi_fpo =
0546 dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
0547 pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
0548
0549 pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
0550 pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
0551
0552 pip->guid_cap = QIB_GUIDS_PER_PORT;
0553 pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout;
0554
0555 pip->resv_resptimevalue = 3;
0556 pip->localphyerrors_overrunerrors =
0557 (get_phyerrthreshold(ppd) << 4) |
0558 get_overrunthreshold(ppd);
0559
0560 if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
0561 u32 v;
0562
0563 v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
0564 pip->link_roundtrip_latency[0] = v >> 16;
0565 pip->link_roundtrip_latency[1] = v >> 8;
0566 pip->link_roundtrip_latency[2] = v;
0567 }
0568
0569 ret = reply(smp);
0570
0571 bail:
0572 return ret;
0573 }
0574
0575
0576
0577
0578
0579
0580
0581 static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
0582 {
0583 struct qib_pportdata *ppd = dd->pport + port - 1;
0584
0585
0586
0587
0588
0589 struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
0590
0591 memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
0592
0593 return 0;
0594 }
0595
0596 static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
0597 u8 port)
0598 {
0599 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
0600 u16 *p = (u16 *) smp->data;
0601 __be16 *q = (__be16 *) smp->data;
0602
0603
0604
0605 memset(smp->data, 0, sizeof(smp->data));
0606 if (startpx == 0) {
0607 struct qib_devdata *dd = dd_from_ibdev(ibdev);
0608 unsigned i, n = qib_get_npkeys(dd);
0609
0610 get_pkeys(dd, port, p);
0611
0612 for (i = 0; i < n; i++)
0613 q[i] = cpu_to_be16(p[i]);
0614 } else
0615 smp->status |= IB_SMP_INVALID_FIELD;
0616
0617 return reply(smp);
0618 }
0619
0620 static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
0621 u8 port)
0622 {
0623 struct qib_devdata *dd = dd_from_ibdev(ibdev);
0624 u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
0625 __be64 *p = (__be64 *) smp->data;
0626 unsigned pidx = port - 1;
0627
0628
0629
0630 if (startgx == 0 && pidx < dd->num_pports) {
0631 struct qib_pportdata *ppd = dd->pport + pidx;
0632 struct qib_ibport *ibp = &ppd->ibport_data;
0633 unsigned i;
0634
0635
0636 for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
0637 ibp->guids[i - 1] = p[i];
0638 } else
0639 smp->status |= IB_SMP_INVALID_FIELD;
0640
0641
0642 return subn_get_guidinfo(smp, ibdev, port);
0643 }
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
0654 u8 port)
0655 {
0656 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
0657 struct ib_event event;
0658 struct qib_devdata *dd;
0659 struct qib_pportdata *ppd;
0660 struct qib_ibport *ibp;
0661 u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
0662 unsigned long flags;
0663 u16 lid, smlid;
0664 u8 lwe;
0665 u8 lse;
0666 u8 state;
0667 u8 vls;
0668 u8 msl;
0669 u16 lstate;
0670 int ret, ore, mtu;
0671 u32 port_num = be32_to_cpu(smp->attr_mod);
0672
0673 if (port_num == 0)
0674 port_num = port;
0675 else {
0676 if (port_num > ibdev->phys_port_cnt)
0677 goto err;
0678
0679 if (port_num != port)
0680 goto get_only;
0681 }
0682
0683 dd = dd_from_ibdev(ibdev);
0684
0685 ppd = dd->pport + (port_num - 1);
0686 ibp = &ppd->ibport_data;
0687 event.device = ibdev;
0688 event.element.port_num = port;
0689
0690 ibp->rvp.mkey = pip->mkey;
0691 ibp->rvp.gid_prefix = pip->gid_prefix;
0692 ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
0693
0694 lid = be16_to_cpu(pip->lid);
0695
0696 if (lid == 0 || lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
0697 smp->status |= IB_SMP_INVALID_FIELD;
0698 else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
0699 if (ppd->lid != lid)
0700 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
0701 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
0702 qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
0703 qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
0704 event.event = IB_EVENT_LID_CHANGE;
0705 ib_dispatch_event(&event);
0706 }
0707
0708 smlid = be16_to_cpu(pip->sm_lid);
0709 msl = pip->neighbormtu_mastersmsl & 0xF;
0710
0711 if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
0712 smp->status |= IB_SMP_INVALID_FIELD;
0713 else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
0714 spin_lock_irqsave(&ibp->rvp.lock, flags);
0715 if (ibp->rvp.sm_ah) {
0716 if (smlid != ibp->rvp.sm_lid)
0717 rdma_ah_set_dlid(&ibp->rvp.sm_ah->attr,
0718 smlid);
0719 if (msl != ibp->rvp.sm_sl)
0720 rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
0721 }
0722 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
0723 if (smlid != ibp->rvp.sm_lid)
0724 ibp->rvp.sm_lid = smlid;
0725 if (msl != ibp->rvp.sm_sl)
0726 ibp->rvp.sm_sl = msl;
0727 event.event = IB_EVENT_SM_CHANGE;
0728 ib_dispatch_event(&event);
0729 }
0730
0731
0732 lwe = pip->link_width_enabled;
0733 if (lwe) {
0734 if (lwe == 0xFF)
0735 set_link_width_enabled(ppd, ppd->link_width_supported);
0736 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
0737 smp->status |= IB_SMP_INVALID_FIELD;
0738 else if (lwe != ppd->link_width_enabled)
0739 set_link_width_enabled(ppd, lwe);
0740 }
0741
0742 lse = pip->linkspeedactive_enabled & 0xF;
0743 if (lse) {
0744
0745
0746
0747
0748
0749 if (lse == 15)
0750 set_link_speed_enabled(ppd,
0751 ppd->link_speed_supported);
0752 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
0753 smp->status |= IB_SMP_INVALID_FIELD;
0754 else if (lse != ppd->link_speed_enabled)
0755 set_link_speed_enabled(ppd, lse);
0756 }
0757
0758
0759 switch (pip->portphysstate_linkdown & 0xF) {
0760 case 0:
0761 break;
0762 case 1:
0763 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
0764 IB_LINKINITCMD_SLEEP);
0765 break;
0766 case 2:
0767 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
0768 IB_LINKINITCMD_POLL);
0769 break;
0770 default:
0771 smp->status |= IB_SMP_INVALID_FIELD;
0772 }
0773
0774 ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6;
0775 ibp->rvp.vl_high_limit = pip->vl_high_limit;
0776 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
0777 ibp->rvp.vl_high_limit);
0778
0779 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
0780 if (mtu == -1)
0781 smp->status |= IB_SMP_INVALID_FIELD;
0782 else
0783 qib_set_mtu(ppd, mtu);
0784
0785
0786 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
0787 if (vls) {
0788 if (vls > ppd->vls_supported)
0789 smp->status |= IB_SMP_INVALID_FIELD;
0790 else
0791 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
0792 }
0793
0794 if (pip->mkey_violations == 0)
0795 ibp->rvp.mkey_violations = 0;
0796
0797 if (pip->pkey_violations == 0)
0798 ibp->rvp.pkey_violations = 0;
0799
0800 if (pip->qkey_violations == 0)
0801 ibp->rvp.qkey_violations = 0;
0802
0803 ore = pip->localphyerrors_overrunerrors;
0804 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
0805 smp->status |= IB_SMP_INVALID_FIELD;
0806
0807 if (set_overrunthreshold(ppd, (ore & 0xF)))
0808 smp->status |= IB_SMP_INVALID_FIELD;
0809
0810 ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
0811
0812
0813
0814
0815
0816
0817
0818 state = pip->linkspeed_portstate & 0xF;
0819 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
0820 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
0821 smp->status |= IB_SMP_INVALID_FIELD;
0822
0823
0824
0825
0826
0827 switch (state) {
0828 case IB_PORT_NOP:
0829 if (lstate == 0)
0830 break;
0831 fallthrough;
0832 case IB_PORT_DOWN:
0833 if (lstate == 0)
0834 lstate = QIB_IB_LINKDOWN_ONLY;
0835 else if (lstate == 1)
0836 lstate = QIB_IB_LINKDOWN_SLEEP;
0837 else if (lstate == 2)
0838 lstate = QIB_IB_LINKDOWN;
0839 else if (lstate == 3)
0840 lstate = QIB_IB_LINKDOWN_DISABLE;
0841 else {
0842 smp->status |= IB_SMP_INVALID_FIELD;
0843 break;
0844 }
0845 spin_lock_irqsave(&ppd->lflags_lock, flags);
0846 ppd->lflags &= ~QIBL_LINKV;
0847 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
0848 qib_set_linkstate(ppd, lstate);
0849
0850
0851
0852
0853 if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
0854 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
0855 goto done;
0856 }
0857 qib_wait_linkstate(ppd, QIBL_LINKV, 10);
0858 break;
0859 case IB_PORT_ARMED:
0860 qib_set_linkstate(ppd, QIB_IB_LINKARM);
0861 break;
0862 case IB_PORT_ACTIVE:
0863 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
0864 break;
0865 default:
0866 smp->status |= IB_SMP_INVALID_FIELD;
0867 }
0868
0869 if (clientrereg) {
0870 event.event = IB_EVENT_CLIENT_REREGISTER;
0871 ib_dispatch_event(&event);
0872 }
0873
0874
0875 pip->clientrereg_resv_subnetto |= clientrereg;
0876
0877 goto get_only;
0878
0879 err:
0880 smp->status |= IB_SMP_INVALID_FIELD;
0881 get_only:
0882 ret = subn_get_portinfo(smp, ibdev, port);
0883 done:
0884 return ret;
0885 }
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895 static int rm_pkey(struct qib_pportdata *ppd, u16 key)
0896 {
0897 int i;
0898 int ret;
0899
0900 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
0901 if (ppd->pkeys[i] != key)
0902 continue;
0903 if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
0904 ppd->pkeys[i] = 0;
0905 ret = 1;
0906 goto bail;
0907 }
0908 break;
0909 }
0910
0911 ret = 0;
0912
0913 bail:
0914 return ret;
0915 }
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925 static int add_pkey(struct qib_pportdata *ppd, u16 key)
0926 {
0927 int i;
0928 u16 lkey = key & 0x7FFF;
0929 int any = 0;
0930 int ret;
0931
0932 if (lkey == 0x7FFF) {
0933 ret = 0;
0934 goto bail;
0935 }
0936
0937
0938 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
0939 if (!ppd->pkeys[i]) {
0940 any++;
0941 continue;
0942 }
0943
0944 if (ppd->pkeys[i] == key) {
0945 if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
0946 ret = 0;
0947 goto bail;
0948 }
0949
0950 atomic_dec(&ppd->pkeyrefs[i]);
0951 any++;
0952 }
0953
0954
0955
0956
0957
0958 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
0959 ret = -EEXIST;
0960 goto bail;
0961 }
0962 }
0963 if (!any) {
0964 ret = -EBUSY;
0965 goto bail;
0966 }
0967 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
0968 if (!ppd->pkeys[i] &&
0969 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
0970
0971 ppd->pkeys[i] = key;
0972 ret = 1;
0973 goto bail;
0974 }
0975 }
0976 ret = -EBUSY;
0977
0978 bail:
0979 return ret;
0980 }
0981
0982
0983
0984
0985
0986
0987
0988 static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
0989 {
0990 struct qib_pportdata *ppd;
0991 struct qib_ctxtdata *rcd;
0992 int i;
0993 int changed = 0;
0994
0995
0996
0997
0998
0999
1000
1001 ppd = dd->pport + (port - 1);
1002 rcd = dd->rcd[ppd->hw_pidx];
1003
1004 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
1005 u16 key = pkeys[i];
1006 u16 okey = rcd->pkeys[i];
1007
1008 if (key == okey)
1009 continue;
1010
1011
1012
1013
1014 if (okey & 0x7FFF)
1015 changed |= rm_pkey(ppd, okey);
1016 if (key & 0x7FFF) {
1017 int ret = add_pkey(ppd, key);
1018
1019 if (ret < 0)
1020 key = 0;
1021 else
1022 changed |= ret;
1023 }
1024 rcd->pkeys[i] = key;
1025 }
1026 if (changed) {
1027 struct ib_event event;
1028
1029 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
1030
1031 event.event = IB_EVENT_PKEY_CHANGE;
1032 event.device = &dd->verbs_dev.rdi.ibdev;
1033 event.element.port_num = port;
1034 ib_dispatch_event(&event);
1035 }
1036 return 0;
1037 }
1038
1039 static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
1040 u8 port)
1041 {
1042 u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
1043 __be16 *p = (__be16 *) smp->data;
1044 u16 *q = (u16 *) smp->data;
1045 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1046 unsigned i, n = qib_get_npkeys(dd);
1047
1048 for (i = 0; i < n; i++)
1049 q[i] = be16_to_cpu(p[i]);
1050
1051 if (startpx != 0 || set_pkeys(dd, port, q) != 0)
1052 smp->status |= IB_SMP_INVALID_FIELD;
1053
1054 return subn_get_pkeytable(smp, ibdev, port);
1055 }
1056
1057 static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1058 u8 port)
1059 {
1060 struct qib_ibport *ibp = to_iport(ibdev, port);
1061 u8 *p = (u8 *) smp->data;
1062 unsigned i;
1063
1064 memset(smp->data, 0, sizeof(smp->data));
1065
1066 if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP))
1067 smp->status |= IB_SMP_UNSUP_METHOD;
1068 else
1069 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
1070 *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
1071
1072 return reply(smp);
1073 }
1074
1075 static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
1076 u8 port)
1077 {
1078 struct qib_ibport *ibp = to_iport(ibdev, port);
1079 u8 *p = (u8 *) smp->data;
1080 unsigned i;
1081
1082 if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) {
1083 smp->status |= IB_SMP_UNSUP_METHOD;
1084 return reply(smp);
1085 }
1086
1087 for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
1088 ibp->sl_to_vl[i] = *p >> 4;
1089 ibp->sl_to_vl[i + 1] = *p & 0xF;
1090 }
1091 qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
1092 _QIB_EVENT_SL2VL_CHANGE_BIT);
1093
1094 return subn_get_sl_to_vl(smp, ibdev, port);
1095 }
1096
1097 static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1098 u8 port)
1099 {
1100 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1101 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1102
1103 memset(smp->data, 0, sizeof(smp->data));
1104
1105 if (ppd->vls_supported == IB_VL_VL0)
1106 smp->status |= IB_SMP_UNSUP_METHOD;
1107 else if (which == IB_VLARB_LOWPRI_0_31)
1108 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1109 smp->data);
1110 else if (which == IB_VLARB_HIGHPRI_0_31)
1111 (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1112 smp->data);
1113 else
1114 smp->status |= IB_SMP_INVALID_FIELD;
1115
1116 return reply(smp);
1117 }
1118
1119 static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
1120 u8 port)
1121 {
1122 unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
1123 struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
1124
1125 if (ppd->vls_supported == IB_VL_VL0)
1126 smp->status |= IB_SMP_UNSUP_METHOD;
1127 else if (which == IB_VLARB_LOWPRI_0_31)
1128 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
1129 smp->data);
1130 else if (which == IB_VLARB_HIGHPRI_0_31)
1131 (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
1132 smp->data);
1133 else
1134 smp->status |= IB_SMP_INVALID_FIELD;
1135
1136 return subn_get_vl_arb(smp, ibdev, port);
1137 }
1138
1139 static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
1140 u8 port)
1141 {
1142
1143
1144
1145
1146
1147
1148 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1149 }
1150
1151 static int pma_get_classportinfo(struct ib_pma_mad *pmp,
1152 struct ib_device *ibdev)
1153 {
1154 struct ib_class_port_info *p =
1155 (struct ib_class_port_info *)pmp->data;
1156 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1157
1158 memset(pmp->data, 0, sizeof(pmp->data));
1159
1160 if (pmp->mad_hdr.attr_mod != 0)
1161 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1162
1163
1164 p->base_version = 1;
1165 p->class_version = 1;
1166 p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
1167
1168
1169
1170
1171 ib_set_cpi_capmask2(p,
1172 dd->psxmitwait_supported <<
1173 (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE));
1174
1175
1176
1177 ib_set_cpi_resp_time(p, 18);
1178
1179 return reply((struct ib_smp *) pmp);
1180 }
1181
1182 static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
1183 struct ib_device *ibdev, u8 port)
1184 {
1185 struct ib_pma_portsamplescontrol *p =
1186 (struct ib_pma_portsamplescontrol *)pmp->data;
1187 struct qib_ibdev *dev = to_idev(ibdev);
1188 struct qib_devdata *dd = dd_from_dev(dev);
1189 struct qib_ibport *ibp = to_iport(ibdev, port);
1190 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1191 unsigned long flags;
1192 u8 port_select = p->port_select;
1193
1194 memset(pmp->data, 0, sizeof(pmp->data));
1195
1196 p->port_select = port_select;
1197 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1198 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1199 goto bail;
1200 }
1201 spin_lock_irqsave(&ibp->rvp.lock, flags);
1202 p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
1203 p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1204 p->counter_width = 4;
1205 p->counter_mask0_9 = COUNTER_MASK0_9;
1206 p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
1207 p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
1208 p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1209 p->counter_select[0] = ibp->rvp.pma_counter_select[0];
1210 p->counter_select[1] = ibp->rvp.pma_counter_select[1];
1211 p->counter_select[2] = ibp->rvp.pma_counter_select[2];
1212 p->counter_select[3] = ibp->rvp.pma_counter_select[3];
1213 p->counter_select[4] = ibp->rvp.pma_counter_select[4];
1214 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1215
1216 bail:
1217 return reply((struct ib_smp *) pmp);
1218 }
1219
1220 static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
1221 struct ib_device *ibdev, u8 port)
1222 {
1223 struct ib_pma_portsamplescontrol *p =
1224 (struct ib_pma_portsamplescontrol *)pmp->data;
1225 struct qib_ibdev *dev = to_idev(ibdev);
1226 struct qib_devdata *dd = dd_from_dev(dev);
1227 struct qib_ibport *ibp = to_iport(ibdev, port);
1228 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1229 unsigned long flags;
1230 u8 status, xmit_flags;
1231 int ret;
1232
1233 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
1234 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1235 ret = reply((struct ib_smp *) pmp);
1236 goto bail;
1237 }
1238
1239 spin_lock_irqsave(&ibp->rvp.lock, flags);
1240
1241
1242 xmit_flags = ppd->cong_stats.flags;
1243 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
1244 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1245 if (status == IB_PMA_SAMPLE_STATUS_DONE ||
1246 (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
1247 xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
1248 ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
1249 ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
1250 ibp->rvp.pma_tag = be16_to_cpu(p->tag);
1251 ibp->rvp.pma_counter_select[0] = p->counter_select[0];
1252 ibp->rvp.pma_counter_select[1] = p->counter_select[1];
1253 ibp->rvp.pma_counter_select[2] = p->counter_select[2];
1254 ibp->rvp.pma_counter_select[3] = p->counter_select[3];
1255 ibp->rvp.pma_counter_select[4] = p->counter_select[4];
1256 dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
1257 ibp->rvp.pma_sample_start);
1258 }
1259 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1260
1261 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
1262
1263 bail:
1264 return ret;
1265 }
1266
1267 static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
1268 __be16 sel)
1269 {
1270 u64 ret;
1271
1272 switch (sel) {
1273 case IB_PMA_PORT_XMIT_DATA:
1274 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
1275 break;
1276 case IB_PMA_PORT_RCV_DATA:
1277 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
1278 break;
1279 case IB_PMA_PORT_XMIT_PKTS:
1280 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
1281 break;
1282 case IB_PMA_PORT_RCV_PKTS:
1283 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
1284 break;
1285 case IB_PMA_PORT_XMIT_WAIT:
1286 ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
1287 break;
1288 default:
1289 ret = 0;
1290 }
1291
1292 return ret;
1293 }
1294
1295
1296 static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
1297 {
1298 u32 delta;
1299
1300 delta = get_counter(&ppd->ibport_data, ppd,
1301 IB_PMA_PORT_XMIT_WAIT);
1302 return ppd->cong_stats.counter + delta;
1303 }
1304
1305 static void cache_hw_sample_counters(struct qib_pportdata *ppd)
1306 {
1307 struct qib_ibport *ibp = &ppd->ibport_data;
1308
1309 ppd->cong_stats.counter_cache.psxmitdata =
1310 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
1311 ppd->cong_stats.counter_cache.psrcvdata =
1312 get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
1313 ppd->cong_stats.counter_cache.psxmitpkts =
1314 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
1315 ppd->cong_stats.counter_cache.psrcvpkts =
1316 get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
1317 ppd->cong_stats.counter_cache.psxmitwait =
1318 get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
1319 }
1320
1321 static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
1322 __be16 sel)
1323 {
1324 u64 ret;
1325
1326 switch (sel) {
1327 case IB_PMA_PORT_XMIT_DATA:
1328 ret = ppd->cong_stats.counter_cache.psxmitdata;
1329 break;
1330 case IB_PMA_PORT_RCV_DATA:
1331 ret = ppd->cong_stats.counter_cache.psrcvdata;
1332 break;
1333 case IB_PMA_PORT_XMIT_PKTS:
1334 ret = ppd->cong_stats.counter_cache.psxmitpkts;
1335 break;
1336 case IB_PMA_PORT_RCV_PKTS:
1337 ret = ppd->cong_stats.counter_cache.psrcvpkts;
1338 break;
1339 case IB_PMA_PORT_XMIT_WAIT:
1340 ret = ppd->cong_stats.counter_cache.psxmitwait;
1341 break;
1342 default:
1343 ret = 0;
1344 }
1345
1346 return ret;
1347 }
1348
1349 static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
1350 struct ib_device *ibdev, u8 port)
1351 {
1352 struct ib_pma_portsamplesresult *p =
1353 (struct ib_pma_portsamplesresult *)pmp->data;
1354 struct qib_ibdev *dev = to_idev(ibdev);
1355 struct qib_devdata *dd = dd_from_dev(dev);
1356 struct qib_ibport *ibp = to_iport(ibdev, port);
1357 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1358 unsigned long flags;
1359 u8 status;
1360 int i;
1361
1362 memset(pmp->data, 0, sizeof(pmp->data));
1363 spin_lock_irqsave(&ibp->rvp.lock, flags);
1364 p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1365 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1366 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1367 else {
1368 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1369 p->sample_status = cpu_to_be16(status);
1370 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1371 cache_hw_sample_counters(ppd);
1372 ppd->cong_stats.counter =
1373 xmit_wait_get_value_delta(ppd);
1374 dd->f_set_cntr_sample(ppd,
1375 QIB_CONG_TIMER_PSINTERVAL, 0);
1376 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1377 }
1378 }
1379 for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
1380 p->counter[i] = cpu_to_be32(
1381 get_cache_hw_sample_counters(
1382 ppd, ibp->rvp.pma_counter_select[i]));
1383 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1384
1385 return reply((struct ib_smp *) pmp);
1386 }
1387
1388 static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
1389 struct ib_device *ibdev, u8 port)
1390 {
1391 struct ib_pma_portsamplesresult_ext *p =
1392 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1393 struct qib_ibdev *dev = to_idev(ibdev);
1394 struct qib_devdata *dd = dd_from_dev(dev);
1395 struct qib_ibport *ibp = to_iport(ibdev, port);
1396 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1397 unsigned long flags;
1398 u8 status;
1399 int i;
1400
1401
1402 memset(pmp->data, 0, sizeof(pmp->data));
1403 spin_lock_irqsave(&ibp->rvp.lock, flags);
1404 p->tag = cpu_to_be16(ibp->rvp.pma_tag);
1405 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
1406 p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
1407 else {
1408 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
1409 p->sample_status = cpu_to_be16(status);
1410
1411 p->extended_width = cpu_to_be32(0x80000000);
1412 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
1413 cache_hw_sample_counters(ppd);
1414 ppd->cong_stats.counter =
1415 xmit_wait_get_value_delta(ppd);
1416 dd->f_set_cntr_sample(ppd,
1417 QIB_CONG_TIMER_PSINTERVAL, 0);
1418 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
1419 }
1420 }
1421 for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
1422 p->counter[i] = cpu_to_be64(
1423 get_cache_hw_sample_counters(
1424 ppd, ibp->rvp.pma_counter_select[i]));
1425 spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1426
1427 return reply((struct ib_smp *) pmp);
1428 }
1429
1430 static int pma_get_portcounters(struct ib_pma_mad *pmp,
1431 struct ib_device *ibdev, u8 port)
1432 {
1433 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1434 pmp->data;
1435 struct qib_ibport *ibp = to_iport(ibdev, port);
1436 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1437 struct qib_verbs_counters cntrs;
1438 u8 port_select = p->port_select;
1439
1440 qib_get_counters(ppd, &cntrs);
1441
1442
1443 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1444 cntrs.link_error_recovery_counter -=
1445 ibp->z_link_error_recovery_counter;
1446 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1447 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1448 cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
1449 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1450 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1451 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1452 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1453 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1454 cntrs.local_link_integrity_errors -=
1455 ibp->z_local_link_integrity_errors;
1456 cntrs.excessive_buffer_overrun_errors -=
1457 ibp->z_excessive_buffer_overrun_errors;
1458 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1459 cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
1460
1461 memset(pmp->data, 0, sizeof(pmp->data));
1462
1463 p->port_select = port_select;
1464 if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
1465 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1466
1467 if (cntrs.symbol_error_counter > 0xFFFFUL)
1468 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1469 else
1470 p->symbol_error_counter =
1471 cpu_to_be16((u16)cntrs.symbol_error_counter);
1472 if (cntrs.link_error_recovery_counter > 0xFFUL)
1473 p->link_error_recovery_counter = 0xFF;
1474 else
1475 p->link_error_recovery_counter =
1476 (u8)cntrs.link_error_recovery_counter;
1477 if (cntrs.link_downed_counter > 0xFFUL)
1478 p->link_downed_counter = 0xFF;
1479 else
1480 p->link_downed_counter = (u8)cntrs.link_downed_counter;
1481 if (cntrs.port_rcv_errors > 0xFFFFUL)
1482 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1483 else
1484 p->port_rcv_errors =
1485 cpu_to_be16((u16) cntrs.port_rcv_errors);
1486 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1487 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1488 else
1489 p->port_rcv_remphys_errors =
1490 cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
1491 if (cntrs.port_xmit_discards > 0xFFFFUL)
1492 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1493 else
1494 p->port_xmit_discards =
1495 cpu_to_be16((u16)cntrs.port_xmit_discards);
1496 if (cntrs.local_link_integrity_errors > 0xFUL)
1497 cntrs.local_link_integrity_errors = 0xFUL;
1498 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1499 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1500 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1501 cntrs.excessive_buffer_overrun_errors;
1502 if (cntrs.vl15_dropped > 0xFFFFUL)
1503 p->vl15_dropped = cpu_to_be16(0xFFFF);
1504 else
1505 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1506 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1507 p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
1508 else
1509 p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
1510 if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
1511 p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
1512 else
1513 p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
1514 if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
1515 p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
1516 else
1517 p->port_xmit_packets =
1518 cpu_to_be32((u32)cntrs.port_xmit_packets);
1519 if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
1520 p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
1521 else
1522 p->port_rcv_packets =
1523 cpu_to_be32((u32) cntrs.port_rcv_packets);
1524
1525 return reply((struct ib_smp *) pmp);
1526 }
1527
1528 static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
1529 struct ib_device *ibdev, u8 port)
1530 {
1531
1532 struct ib_pma_portcounters_cong *p =
1533 (struct ib_pma_portcounters_cong *)pmp->reserved;
1534 struct qib_verbs_counters cntrs;
1535 struct qib_ibport *ibp = to_iport(ibdev, port);
1536 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1537 struct qib_devdata *dd = dd_from_ppd(ppd);
1538 u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
1539 u64 xmit_wait_counter;
1540 unsigned long flags;
1541
1542
1543
1544
1545
1546 if (!dd->psxmitwait_supported)
1547 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1548 if (port_select != port)
1549 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1550
1551 qib_get_counters(ppd, &cntrs);
1552 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
1553 xmit_wait_counter = xmit_wait_get_value_delta(ppd);
1554 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
1555
1556
1557 cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
1558 cntrs.link_error_recovery_counter -=
1559 ibp->z_link_error_recovery_counter;
1560 cntrs.link_downed_counter -= ibp->z_link_downed_counter;
1561 cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
1562 cntrs.port_rcv_remphys_errors -=
1563 ibp->z_port_rcv_remphys_errors;
1564 cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
1565 cntrs.local_link_integrity_errors -=
1566 ibp->z_local_link_integrity_errors;
1567 cntrs.excessive_buffer_overrun_errors -=
1568 ibp->z_excessive_buffer_overrun_errors;
1569 cntrs.vl15_dropped -= ibp->z_vl15_dropped;
1570 cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
1571 cntrs.port_xmit_data -= ibp->z_port_xmit_data;
1572 cntrs.port_rcv_data -= ibp->z_port_rcv_data;
1573 cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
1574 cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
1575
1576 memset(pmp->reserved, 0, sizeof(pmp->reserved));
1577 memset(pmp->data, 0, sizeof(pmp->data));
1578
1579
1580
1581
1582
1583 p->port_check_rate =
1584 cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
1585 (dd->psxmitwait_check_rate &
1586 ~(QIB_XMIT_RATE_PICO << 13)));
1587 p->port_adr_events = cpu_to_be64(0);
1588 p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
1589 p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
1590 p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
1591 p->port_xmit_packets =
1592 cpu_to_be64(cntrs.port_xmit_packets);
1593 p->port_rcv_packets =
1594 cpu_to_be64(cntrs.port_rcv_packets);
1595 if (cntrs.symbol_error_counter > 0xFFFFUL)
1596 p->symbol_error_counter = cpu_to_be16(0xFFFF);
1597 else
1598 p->symbol_error_counter =
1599 cpu_to_be16(
1600 (u16)cntrs.symbol_error_counter);
1601 if (cntrs.link_error_recovery_counter > 0xFFUL)
1602 p->link_error_recovery_counter = 0xFF;
1603 else
1604 p->link_error_recovery_counter =
1605 (u8)cntrs.link_error_recovery_counter;
1606 if (cntrs.link_downed_counter > 0xFFUL)
1607 p->link_downed_counter = 0xFF;
1608 else
1609 p->link_downed_counter =
1610 (u8)cntrs.link_downed_counter;
1611 if (cntrs.port_rcv_errors > 0xFFFFUL)
1612 p->port_rcv_errors = cpu_to_be16(0xFFFF);
1613 else
1614 p->port_rcv_errors =
1615 cpu_to_be16((u16) cntrs.port_rcv_errors);
1616 if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
1617 p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
1618 else
1619 p->port_rcv_remphys_errors =
1620 cpu_to_be16(
1621 (u16)cntrs.port_rcv_remphys_errors);
1622 if (cntrs.port_xmit_discards > 0xFFFFUL)
1623 p->port_xmit_discards = cpu_to_be16(0xFFFF);
1624 else
1625 p->port_xmit_discards =
1626 cpu_to_be16((u16)cntrs.port_xmit_discards);
1627 if (cntrs.local_link_integrity_errors > 0xFUL)
1628 cntrs.local_link_integrity_errors = 0xFUL;
1629 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1630 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1631 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1632 cntrs.excessive_buffer_overrun_errors;
1633 if (cntrs.vl15_dropped > 0xFFFFUL)
1634 p->vl15_dropped = cpu_to_be16(0xFFFF);
1635 else
1636 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1637
1638 return reply((struct ib_smp *)pmp);
1639 }
1640
1641 static void qib_snapshot_pmacounters(
1642 struct qib_ibport *ibp,
1643 struct qib_pma_counters *pmacounters)
1644 {
1645 struct qib_pma_counters *p;
1646 int cpu;
1647
1648 memset(pmacounters, 0, sizeof(*pmacounters));
1649 for_each_possible_cpu(cpu) {
1650 p = per_cpu_ptr(ibp->pmastats, cpu);
1651 pmacounters->n_unicast_xmit += p->n_unicast_xmit;
1652 pmacounters->n_unicast_rcv += p->n_unicast_rcv;
1653 pmacounters->n_multicast_xmit += p->n_multicast_xmit;
1654 pmacounters->n_multicast_rcv += p->n_multicast_rcv;
1655 }
1656 }
1657
1658 static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
1659 struct ib_device *ibdev, u8 port)
1660 {
1661 struct ib_pma_portcounters_ext *p =
1662 (struct ib_pma_portcounters_ext *)pmp->data;
1663 struct qib_ibport *ibp = to_iport(ibdev, port);
1664 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1665 u64 swords, rwords, spkts, rpkts, xwait;
1666 struct qib_pma_counters pma;
1667 u8 port_select = p->port_select;
1668
1669 memset(pmp->data, 0, sizeof(pmp->data));
1670
1671 p->port_select = port_select;
1672 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1673 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1674 goto bail;
1675 }
1676
1677 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1678
1679
1680 swords -= ibp->z_port_xmit_data;
1681 rwords -= ibp->z_port_rcv_data;
1682 spkts -= ibp->z_port_xmit_packets;
1683 rpkts -= ibp->z_port_rcv_packets;
1684
1685 p->port_xmit_data = cpu_to_be64(swords);
1686 p->port_rcv_data = cpu_to_be64(rwords);
1687 p->port_xmit_packets = cpu_to_be64(spkts);
1688 p->port_rcv_packets = cpu_to_be64(rpkts);
1689
1690 qib_snapshot_pmacounters(ibp, &pma);
1691
1692 p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
1693 - ibp->z_unicast_xmit);
1694 p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
1695 - ibp->z_unicast_rcv);
1696 p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
1697 - ibp->z_multicast_xmit);
1698 p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
1699 - ibp->z_multicast_rcv);
1700
1701 bail:
1702 return reply((struct ib_smp *) pmp);
1703 }
1704
1705 static int pma_set_portcounters(struct ib_pma_mad *pmp,
1706 struct ib_device *ibdev, u8 port)
1707 {
1708 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1709 pmp->data;
1710 struct qib_ibport *ibp = to_iport(ibdev, port);
1711 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1712 struct qib_verbs_counters cntrs;
1713
1714
1715
1716
1717
1718 qib_get_counters(ppd, &cntrs);
1719
1720 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
1721 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1722
1723 if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
1724 ibp->z_link_error_recovery_counter =
1725 cntrs.link_error_recovery_counter;
1726
1727 if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
1728 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1729
1730 if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
1731 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1732
1733 if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
1734 ibp->z_port_rcv_remphys_errors =
1735 cntrs.port_rcv_remphys_errors;
1736
1737 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
1738 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1739
1740 if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
1741 ibp->z_local_link_integrity_errors =
1742 cntrs.local_link_integrity_errors;
1743
1744 if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
1745 ibp->z_excessive_buffer_overrun_errors =
1746 cntrs.excessive_buffer_overrun_errors;
1747
1748 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1749 ibp->rvp.n_vl15_dropped = 0;
1750 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1751 }
1752
1753 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1754 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1755
1756 if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
1757 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1758
1759 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
1760 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1761
1762 if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
1763 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1764
1765 return pma_get_portcounters(pmp, ibdev, port);
1766 }
1767
1768 static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
1769 struct ib_device *ibdev, u8 port)
1770 {
1771 struct qib_ibport *ibp = to_iport(ibdev, port);
1772 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1773 struct qib_devdata *dd = dd_from_ppd(ppd);
1774 struct qib_verbs_counters cntrs;
1775 u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
1776 int ret = 0;
1777 unsigned long flags;
1778
1779 qib_get_counters(ppd, &cntrs);
1780
1781 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1782
1783 if (counter_select & IB_PMA_SEL_CONG_XMIT) {
1784 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
1785 ppd->cong_stats.counter = 0;
1786 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
1787 0x0);
1788 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
1789 }
1790 if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
1791 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1792 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1793 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1794 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1795 }
1796 if (counter_select & IB_PMA_SEL_CONG_ALL) {
1797 ibp->z_symbol_error_counter =
1798 cntrs.symbol_error_counter;
1799 ibp->z_link_error_recovery_counter =
1800 cntrs.link_error_recovery_counter;
1801 ibp->z_link_downed_counter =
1802 cntrs.link_downed_counter;
1803 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1804 ibp->z_port_rcv_remphys_errors =
1805 cntrs.port_rcv_remphys_errors;
1806 ibp->z_port_xmit_discards =
1807 cntrs.port_xmit_discards;
1808 ibp->z_local_link_integrity_errors =
1809 cntrs.local_link_integrity_errors;
1810 ibp->z_excessive_buffer_overrun_errors =
1811 cntrs.excessive_buffer_overrun_errors;
1812 ibp->rvp.n_vl15_dropped = 0;
1813 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1814 }
1815
1816 return ret;
1817 }
1818
1819 static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
1820 struct ib_device *ibdev, u8 port)
1821 {
1822 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
1823 pmp->data;
1824 struct qib_ibport *ibp = to_iport(ibdev, port);
1825 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1826 u64 swords, rwords, spkts, rpkts, xwait;
1827 struct qib_pma_counters pma;
1828
1829 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
1830
1831 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1832 ibp->z_port_xmit_data = swords;
1833
1834 if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
1835 ibp->z_port_rcv_data = rwords;
1836
1837 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
1838 ibp->z_port_xmit_packets = spkts;
1839
1840 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
1841 ibp->z_port_rcv_packets = rpkts;
1842
1843 qib_snapshot_pmacounters(ibp, &pma);
1844
1845 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
1846 ibp->z_unicast_xmit = pma.n_unicast_xmit;
1847
1848 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
1849 ibp->z_unicast_rcv = pma.n_unicast_rcv;
1850
1851 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
1852 ibp->z_multicast_xmit = pma.n_multicast_xmit;
1853
1854 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
1855 ibp->z_multicast_rcv = pma.n_multicast_rcv;
1856
1857 return pma_get_portcounters_ext(pmp, ibdev, port);
1858 }
1859
1860 static int process_subn(struct ib_device *ibdev, int mad_flags,
1861 u8 port, const struct ib_mad *in_mad,
1862 struct ib_mad *out_mad)
1863 {
1864 struct ib_smp *smp = (struct ib_smp *)out_mad;
1865 struct qib_ibport *ibp = to_iport(ibdev, port);
1866 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1867 int ret;
1868
1869 *out_mad = *in_mad;
1870 if (smp->class_version != 1) {
1871 smp->status |= IB_SMP_UNSUP_VERSION;
1872 ret = reply(smp);
1873 goto bail;
1874 }
1875
1876 ret = check_mkey(ibp, smp, mad_flags);
1877 if (ret) {
1878 u32 port_num = be32_to_cpu(smp->attr_mod);
1879
1880
1881
1882
1883
1884
1885
1886
1887 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
1888 (smp->method == IB_MGMT_METHOD_GET ||
1889 smp->method == IB_MGMT_METHOD_SET) &&
1890 port_num && port_num <= ibdev->phys_port_cnt &&
1891 port != port_num)
1892 (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
1893 ret = IB_MAD_RESULT_FAILURE;
1894 goto bail;
1895 }
1896
1897 switch (smp->method) {
1898 case IB_MGMT_METHOD_GET:
1899 switch (smp->attr_id) {
1900 case IB_SMP_ATTR_NODE_DESC:
1901 ret = subn_get_nodedescription(smp, ibdev);
1902 goto bail;
1903 case IB_SMP_ATTR_NODE_INFO:
1904 ret = subn_get_nodeinfo(smp, ibdev, port);
1905 goto bail;
1906 case IB_SMP_ATTR_GUID_INFO:
1907 ret = subn_get_guidinfo(smp, ibdev, port);
1908 goto bail;
1909 case IB_SMP_ATTR_PORT_INFO:
1910 ret = subn_get_portinfo(smp, ibdev, port);
1911 goto bail;
1912 case IB_SMP_ATTR_PKEY_TABLE:
1913 ret = subn_get_pkeytable(smp, ibdev, port);
1914 goto bail;
1915 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1916 ret = subn_get_sl_to_vl(smp, ibdev, port);
1917 goto bail;
1918 case IB_SMP_ATTR_VL_ARB_TABLE:
1919 ret = subn_get_vl_arb(smp, ibdev, port);
1920 goto bail;
1921 case IB_SMP_ATTR_SM_INFO:
1922 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
1923 ret = IB_MAD_RESULT_SUCCESS |
1924 IB_MAD_RESULT_CONSUMED;
1925 goto bail;
1926 }
1927 if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
1928 ret = IB_MAD_RESULT_SUCCESS;
1929 goto bail;
1930 }
1931 fallthrough;
1932 default:
1933 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1934 ret = reply(smp);
1935 goto bail;
1936 }
1937
1938 case IB_MGMT_METHOD_SET:
1939 switch (smp->attr_id) {
1940 case IB_SMP_ATTR_GUID_INFO:
1941 ret = subn_set_guidinfo(smp, ibdev, port);
1942 goto bail;
1943 case IB_SMP_ATTR_PORT_INFO:
1944 ret = subn_set_portinfo(smp, ibdev, port);
1945 goto bail;
1946 case IB_SMP_ATTR_PKEY_TABLE:
1947 ret = subn_set_pkeytable(smp, ibdev, port);
1948 goto bail;
1949 case IB_SMP_ATTR_SL_TO_VL_TABLE:
1950 ret = subn_set_sl_to_vl(smp, ibdev, port);
1951 goto bail;
1952 case IB_SMP_ATTR_VL_ARB_TABLE:
1953 ret = subn_set_vl_arb(smp, ibdev, port);
1954 goto bail;
1955 case IB_SMP_ATTR_SM_INFO:
1956 if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
1957 ret = IB_MAD_RESULT_SUCCESS |
1958 IB_MAD_RESULT_CONSUMED;
1959 goto bail;
1960 }
1961 if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
1962 ret = IB_MAD_RESULT_SUCCESS;
1963 goto bail;
1964 }
1965 fallthrough;
1966 default:
1967 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1968 ret = reply(smp);
1969 goto bail;
1970 }
1971
1972 case IB_MGMT_METHOD_TRAP_REPRESS:
1973 if (smp->attr_id == IB_SMP_ATTR_NOTICE)
1974 ret = subn_trap_repress(smp, ibdev, port);
1975 else {
1976 smp->status |= IB_SMP_UNSUP_METH_ATTR;
1977 ret = reply(smp);
1978 }
1979 goto bail;
1980
1981 case IB_MGMT_METHOD_TRAP:
1982 case IB_MGMT_METHOD_REPORT:
1983 case IB_MGMT_METHOD_REPORT_RESP:
1984 case IB_MGMT_METHOD_GET_RESP:
1985
1986
1987
1988
1989
1990 ret = IB_MAD_RESULT_SUCCESS;
1991 goto bail;
1992
1993 case IB_MGMT_METHOD_SEND:
1994 if (ib_get_smp_direction(smp) &&
1995 smp->attr_id == QIB_VENDOR_IPG) {
1996 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
1997 smp->data[0]);
1998 ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1999 } else
2000 ret = IB_MAD_RESULT_SUCCESS;
2001 goto bail;
2002
2003 default:
2004 smp->status |= IB_SMP_UNSUP_METHOD;
2005 ret = reply(smp);
2006 }
2007
2008 bail:
2009 return ret;
2010 }
2011
2012 static int process_perf(struct ib_device *ibdev, u8 port,
2013 const struct ib_mad *in_mad,
2014 struct ib_mad *out_mad)
2015 {
2016 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
2017 int ret;
2018
2019 *out_mad = *in_mad;
2020 if (pmp->mad_hdr.class_version != 1) {
2021 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
2022 ret = reply((struct ib_smp *) pmp);
2023 goto bail;
2024 }
2025
2026 switch (pmp->mad_hdr.method) {
2027 case IB_MGMT_METHOD_GET:
2028 switch (pmp->mad_hdr.attr_id) {
2029 case IB_PMA_CLASS_PORT_INFO:
2030 ret = pma_get_classportinfo(pmp, ibdev);
2031 goto bail;
2032 case IB_PMA_PORT_SAMPLES_CONTROL:
2033 ret = pma_get_portsamplescontrol(pmp, ibdev, port);
2034 goto bail;
2035 case IB_PMA_PORT_SAMPLES_RESULT:
2036 ret = pma_get_portsamplesresult(pmp, ibdev, port);
2037 goto bail;
2038 case IB_PMA_PORT_SAMPLES_RESULT_EXT:
2039 ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
2040 goto bail;
2041 case IB_PMA_PORT_COUNTERS:
2042 ret = pma_get_portcounters(pmp, ibdev, port);
2043 goto bail;
2044 case IB_PMA_PORT_COUNTERS_EXT:
2045 ret = pma_get_portcounters_ext(pmp, ibdev, port);
2046 goto bail;
2047 case IB_PMA_PORT_COUNTERS_CONG:
2048 ret = pma_get_portcounters_cong(pmp, ibdev, port);
2049 goto bail;
2050 default:
2051 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2052 ret = reply((struct ib_smp *) pmp);
2053 goto bail;
2054 }
2055
2056 case IB_MGMT_METHOD_SET:
2057 switch (pmp->mad_hdr.attr_id) {
2058 case IB_PMA_PORT_SAMPLES_CONTROL:
2059 ret = pma_set_portsamplescontrol(pmp, ibdev, port);
2060 goto bail;
2061 case IB_PMA_PORT_COUNTERS:
2062 ret = pma_set_portcounters(pmp, ibdev, port);
2063 goto bail;
2064 case IB_PMA_PORT_COUNTERS_EXT:
2065 ret = pma_set_portcounters_ext(pmp, ibdev, port);
2066 goto bail;
2067 case IB_PMA_PORT_COUNTERS_CONG:
2068 ret = pma_set_portcounters_cong(pmp, ibdev, port);
2069 goto bail;
2070 default:
2071 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2072 ret = reply((struct ib_smp *) pmp);
2073 goto bail;
2074 }
2075
2076 case IB_MGMT_METHOD_TRAP:
2077 case IB_MGMT_METHOD_GET_RESP:
2078
2079
2080
2081
2082
2083 ret = IB_MAD_RESULT_SUCCESS;
2084 goto bail;
2085
2086 default:
2087 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
2088 ret = reply((struct ib_smp *) pmp);
2089 }
2090
2091 bail:
2092 return ret;
2093 }
2094
2095 static int cc_get_classportinfo(struct ib_cc_mad *ccp,
2096 struct ib_device *ibdev)
2097 {
2098 struct ib_cc_classportinfo_attr *p =
2099 (struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
2100
2101 p->base_version = 1;
2102 p->class_version = 1;
2103 p->cap_mask = 0;
2104
2105
2106
2107
2108 p->resp_time_value = 18;
2109
2110 return reply((struct ib_smp *) ccp);
2111 }
2112
2113 static int cc_get_congestion_info(struct ib_cc_mad *ccp,
2114 struct ib_device *ibdev, u8 port)
2115 {
2116 struct ib_cc_info_attr *p =
2117 (struct ib_cc_info_attr *)ccp->mgmt_data;
2118 struct qib_ibport *ibp = to_iport(ibdev, port);
2119 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2120
2121 p->congestion_info = 0;
2122 p->control_table_cap = ppd->cc_max_table_entries;
2123
2124 return reply((struct ib_smp *) ccp);
2125 }
2126
2127 static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
2128 struct ib_device *ibdev, u8 port)
2129 {
2130 int i;
2131 struct ib_cc_congestion_setting_attr *p =
2132 (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
2133 struct qib_ibport *ibp = to_iport(ibdev, port);
2134 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2135 struct ib_cc_congestion_entry_shadow *entries;
2136
2137 spin_lock(&ppd->cc_shadow_lock);
2138
2139 entries = ppd->congestion_entries_shadow->entries;
2140 p->port_control = cpu_to_be16(
2141 ppd->congestion_entries_shadow->port_control);
2142 p->control_map = cpu_to_be16(
2143 ppd->congestion_entries_shadow->control_map);
2144 for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
2145 p->entries[i].ccti_increase = entries[i].ccti_increase;
2146 p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
2147 p->entries[i].trigger_threshold = entries[i].trigger_threshold;
2148 p->entries[i].ccti_min = entries[i].ccti_min;
2149 }
2150
2151 spin_unlock(&ppd->cc_shadow_lock);
2152
2153 return reply((struct ib_smp *) ccp);
2154 }
2155
2156 static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
2157 struct ib_device *ibdev, u8 port)
2158 {
2159 struct ib_cc_table_attr *p =
2160 (struct ib_cc_table_attr *)ccp->mgmt_data;
2161 struct qib_ibport *ibp = to_iport(ibdev, port);
2162 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2163 u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
2164 u32 max_cct_block;
2165 u32 cct_entry;
2166 struct ib_cc_table_entry_shadow *entries;
2167 int i;
2168
2169
2170 if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
2171 goto bail;
2172
2173 spin_lock(&ppd->cc_shadow_lock);
2174
2175 max_cct_block =
2176 (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES;
2177 max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
2178
2179 if (cct_block_index > max_cct_block) {
2180 spin_unlock(&ppd->cc_shadow_lock);
2181 goto bail;
2182 }
2183
2184 ccp->attr_mod = cpu_to_be32(cct_block_index);
2185
2186 cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1);
2187
2188 cct_entry--;
2189
2190 p->ccti_limit = cpu_to_be16(cct_entry);
2191
2192 entries = &ppd->ccti_entries_shadow->
2193 entries[IB_CCT_ENTRIES * cct_block_index];
2194 cct_entry %= IB_CCT_ENTRIES;
2195
2196 for (i = 0; i <= cct_entry; i++)
2197 p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
2198
2199 spin_unlock(&ppd->cc_shadow_lock);
2200
2201 return reply((struct ib_smp *) ccp);
2202
2203 bail:
2204 return reply_failure((struct ib_smp *) ccp);
2205 }
2206
2207 static int cc_set_congestion_setting(struct ib_cc_mad *ccp,
2208 struct ib_device *ibdev, u8 port)
2209 {
2210 struct ib_cc_congestion_setting_attr *p =
2211 (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
2212 struct qib_ibport *ibp = to_iport(ibdev, port);
2213 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2214 int i;
2215
2216 ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
2217
2218 for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
2219 ppd->congestion_entries[i].ccti_increase =
2220 p->entries[i].ccti_increase;
2221
2222 ppd->congestion_entries[i].ccti_timer =
2223 be16_to_cpu(p->entries[i].ccti_timer);
2224
2225 ppd->congestion_entries[i].trigger_threshold =
2226 p->entries[i].trigger_threshold;
2227
2228 ppd->congestion_entries[i].ccti_min =
2229 p->entries[i].ccti_min;
2230 }
2231
2232 return reply((struct ib_smp *) ccp);
2233 }
2234
2235 static int cc_set_congestion_control_table(struct ib_cc_mad *ccp,
2236 struct ib_device *ibdev, u8 port)
2237 {
2238 struct ib_cc_table_attr *p =
2239 (struct ib_cc_table_attr *)ccp->mgmt_data;
2240 struct qib_ibport *ibp = to_iport(ibdev, port);
2241 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2242 u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
2243 u32 cct_entry;
2244 struct ib_cc_table_entry_shadow *entries;
2245 int i;
2246
2247
2248 if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
2249 goto bail;
2250
2251
2252
2253
2254 if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
2255 ppd->total_cct_entry = 0;
2256
2257 cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
2258
2259
2260 ppd->total_cct_entry += (cct_entry + 1);
2261
2262 if (ppd->total_cct_entry > ppd->cc_supported_table_entries)
2263 goto bail;
2264
2265 ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
2266
2267 entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index);
2268
2269 for (i = 0; i <= cct_entry; i++)
2270 entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
2271
2272 spin_lock(&ppd->cc_shadow_lock);
2273
2274 ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1;
2275 memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries,
2276 (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)));
2277
2278 ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED;
2279 ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map;
2280 memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries,
2281 IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry));
2282
2283 spin_unlock(&ppd->cc_shadow_lock);
2284
2285 return reply((struct ib_smp *) ccp);
2286
2287 bail:
2288 return reply_failure((struct ib_smp *) ccp);
2289 }
2290
2291 static int process_cc(struct ib_device *ibdev, int mad_flags,
2292 u8 port, const struct ib_mad *in_mad,
2293 struct ib_mad *out_mad)
2294 {
2295 struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
2296 *out_mad = *in_mad;
2297
2298 if (ccp->class_version != 2) {
2299 ccp->status |= IB_SMP_UNSUP_VERSION;
2300 return reply((struct ib_smp *)ccp);
2301 }
2302
2303 switch (ccp->method) {
2304 case IB_MGMT_METHOD_GET:
2305 switch (ccp->attr_id) {
2306 case IB_CC_ATTR_CLASSPORTINFO:
2307 return cc_get_classportinfo(ccp, ibdev);
2308 case IB_CC_ATTR_CONGESTION_INFO:
2309 return cc_get_congestion_info(ccp, ibdev, port);
2310 case IB_CC_ATTR_CA_CONGESTION_SETTING:
2311 return cc_get_congestion_setting(ccp, ibdev, port);
2312 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
2313 return cc_get_congestion_control_table(ccp, ibdev, port);
2314 default:
2315 ccp->status |= IB_SMP_UNSUP_METH_ATTR;
2316 return reply((struct ib_smp *) ccp);
2317 }
2318 case IB_MGMT_METHOD_SET:
2319 switch (ccp->attr_id) {
2320 case IB_CC_ATTR_CA_CONGESTION_SETTING:
2321 return cc_set_congestion_setting(ccp, ibdev, port);
2322 case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
2323 return cc_set_congestion_control_table(ccp, ibdev, port);
2324 default:
2325 ccp->status |= IB_SMP_UNSUP_METH_ATTR;
2326 return reply((struct ib_smp *) ccp);
2327 }
2328 case IB_MGMT_METHOD_GET_RESP:
2329
2330
2331
2332
2333
2334 return IB_MAD_RESULT_SUCCESS;
2335 }
2336
2337
2338 ccp->status |= IB_SMP_UNSUP_METHOD;
2339 return reply((struct ib_smp *) ccp);
2340 }
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
2364 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
2365 const struct ib_mad *in, struct ib_mad *out,
2366 size_t *out_mad_size, u16 *out_mad_pkey_index)
2367 {
2368 int ret;
2369 struct qib_ibport *ibp = to_iport(ibdev, port);
2370 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2371
2372 switch (in->mad_hdr.mgmt_class) {
2373 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
2374 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
2375 ret = process_subn(ibdev, mad_flags, port, in, out);
2376 goto bail;
2377
2378 case IB_MGMT_CLASS_PERF_MGMT:
2379 ret = process_perf(ibdev, port, in, out);
2380 goto bail;
2381
2382 case IB_MGMT_CLASS_CONG_MGMT:
2383 if (!ppd->congestion_entries_shadow ||
2384 !qib_cc_table_size) {
2385 ret = IB_MAD_RESULT_SUCCESS;
2386 goto bail;
2387 }
2388 ret = process_cc(ibdev, mad_flags, port, in, out);
2389 goto bail;
2390
2391 default:
2392 ret = IB_MAD_RESULT_SUCCESS;
2393 }
2394
2395 bail:
2396 return ret;
2397 }
2398
2399 static void xmit_wait_timer_func(struct timer_list *t)
2400 {
2401 struct qib_pportdata *ppd = from_timer(ppd, t, cong_stats.timer);
2402 struct qib_devdata *dd = dd_from_ppd(ppd);
2403 unsigned long flags;
2404 u8 status;
2405
2406 spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
2407 if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
2408 status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
2409 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
2410
2411 cache_hw_sample_counters(ppd);
2412 ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
2413 } else
2414 goto done;
2415 }
2416 ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
2417 dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
2418 done:
2419 spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
2420 mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
2421 }
2422
2423 void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
2424 {
2425 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
2426 struct qib_devdata *dd = container_of(ibdev,
2427 struct qib_devdata, verbs_dev);
2428
2429
2430 dd->pport[port_idx].cong_stats.counter = 0;
2431 timer_setup(&dd->pport[port_idx].cong_stats.timer,
2432 xmit_wait_timer_func, 0);
2433 dd->pport[port_idx].cong_stats.timer.expires = 0;
2434 add_timer(&dd->pport[port_idx].cong_stats.timer);
2435 }
2436
2437 void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
2438 {
2439 struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
2440 struct qib_devdata *dd = container_of(ibdev,
2441 struct qib_devdata, verbs_dev);
2442
2443 if (dd->pport[port_idx].cong_stats.timer.function)
2444 del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
2445
2446 if (dd->pport[port_idx].ibport_data.smi_ah)
2447 rdma_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah,
2448 RDMA_DESTROY_AH_SLEEPABLE);
2449 }