0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0039
0040 #include <linux/dma-mapping.h>
0041 #include <linux/slab.h>
0042 #include <linux/module.h>
0043 #include <linux/security.h>
0044 #include <linux/xarray.h>
0045 #include <rdma/ib_cache.h>
0046
0047 #include "mad_priv.h"
0048 #include "core_priv.h"
0049 #include "mad_rmpp.h"
0050 #include "smi.h"
0051 #include "opa_smi.h"
0052 #include "agent.h"
0053
0054 #define CREATE_TRACE_POINTS
0055 #include <trace/events/ib_mad.h>
0056
0057 #ifdef CONFIG_TRACEPOINTS
0058 static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
0059 struct ib_mad_qp_info *qp_info,
0060 struct trace_event_raw_ib_mad_send_template *entry)
0061 {
0062 u16 pkey;
0063 struct ib_device *dev = qp_info->port_priv->device;
0064 u32 pnum = qp_info->port_priv->port_num;
0065 struct ib_ud_wr *wr = &mad_send_wr->send_wr;
0066 struct rdma_ah_attr attr = {};
0067
0068 rdma_query_ah(wr->ah, &attr);
0069
0070
0071 entry->sl = attr.sl;
0072 ib_query_pkey(dev, pnum, wr->pkey_index, &pkey);
0073 entry->pkey = pkey;
0074 entry->rqpn = wr->remote_qpn;
0075 entry->rqkey = wr->remote_qkey;
0076 entry->dlid = rdma_ah_get_dlid(&attr);
0077 }
0078 #endif
0079
0080 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
0081 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
0082
0083 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
0084 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
0085 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
0086 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
0087
0088 static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
0089 static u32 ib_mad_client_next;
0090 static struct list_head ib_mad_port_list;
0091
0092
0093 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
0094
0095
0096 static int method_in_use(struct ib_mad_mgmt_method_table **method,
0097 struct ib_mad_reg_req *mad_reg_req);
0098 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
0099 static struct ib_mad_agent_private *find_mad_agent(
0100 struct ib_mad_port_private *port_priv,
0101 const struct ib_mad_hdr *mad);
0102 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
0103 struct ib_mad_private *mad);
0104 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
0105 static void timeout_sends(struct work_struct *work);
0106 static void local_completions(struct work_struct *work);
0107 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
0108 struct ib_mad_agent_private *agent_priv,
0109 u8 mgmt_class);
0110 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
0111 struct ib_mad_agent_private *agent_priv);
0112 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
0113 struct ib_wc *wc);
0114 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
0115
0116
0117
0118
0119
0120 static inline struct ib_mad_port_private *
0121 __ib_get_mad_port(struct ib_device *device, u32 port_num)
0122 {
0123 struct ib_mad_port_private *entry;
0124
0125 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
0126 if (entry->device == device && entry->port_num == port_num)
0127 return entry;
0128 }
0129 return NULL;
0130 }
0131
0132
0133
0134
0135
0136 static inline struct ib_mad_port_private *
0137 ib_get_mad_port(struct ib_device *device, u32 port_num)
0138 {
0139 struct ib_mad_port_private *entry;
0140 unsigned long flags;
0141
0142 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
0143 entry = __ib_get_mad_port(device, port_num);
0144 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
0145
0146 return entry;
0147 }
0148
0149 static inline u8 convert_mgmt_class(u8 mgmt_class)
0150 {
0151
0152 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
0153 0 : mgmt_class;
0154 }
0155
0156 static int get_spl_qp_index(enum ib_qp_type qp_type)
0157 {
0158 switch (qp_type) {
0159 case IB_QPT_SMI:
0160 return 0;
0161 case IB_QPT_GSI:
0162 return 1;
0163 default:
0164 return -1;
0165 }
0166 }
0167
0168 static int vendor_class_index(u8 mgmt_class)
0169 {
0170 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
0171 }
0172
0173 static int is_vendor_class(u8 mgmt_class)
0174 {
0175 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
0176 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
0177 return 0;
0178 return 1;
0179 }
0180
0181 static int is_vendor_oui(char *oui)
0182 {
0183 if (oui[0] || oui[1] || oui[2])
0184 return 1;
0185 return 0;
0186 }
0187
0188 static int is_vendor_method_in_use(
0189 struct ib_mad_mgmt_vendor_class *vendor_class,
0190 struct ib_mad_reg_req *mad_reg_req)
0191 {
0192 struct ib_mad_mgmt_method_table *method;
0193 int i;
0194
0195 for (i = 0; i < MAX_MGMT_OUI; i++) {
0196 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
0197 method = vendor_class->method_table[i];
0198 if (method) {
0199 if (method_in_use(&method, mad_reg_req))
0200 return 1;
0201 else
0202 break;
0203 }
0204 }
0205 }
0206 return 0;
0207 }
0208
0209 int ib_response_mad(const struct ib_mad_hdr *hdr)
0210 {
0211 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
0212 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
0213 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
0214 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
0215 }
0216 EXPORT_SYMBOL(ib_response_mad);
0217
0218
0219
0220
0221
0222
0223 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
0224 u32 port_num,
0225 enum ib_qp_type qp_type,
0226 struct ib_mad_reg_req *mad_reg_req,
0227 u8 rmpp_version,
0228 ib_mad_send_handler send_handler,
0229 ib_mad_recv_handler recv_handler,
0230 void *context,
0231 u32 registration_flags)
0232 {
0233 struct ib_mad_port_private *port_priv;
0234 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
0235 struct ib_mad_agent_private *mad_agent_priv;
0236 struct ib_mad_reg_req *reg_req = NULL;
0237 struct ib_mad_mgmt_class_table *class;
0238 struct ib_mad_mgmt_vendor_class_table *vendor;
0239 struct ib_mad_mgmt_vendor_class *vendor_class;
0240 struct ib_mad_mgmt_method_table *method;
0241 int ret2, qpn;
0242 u8 mgmt_class, vclass;
0243
0244 if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
0245 (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
0246 return ERR_PTR(-EPROTONOSUPPORT);
0247
0248
0249 qpn = get_spl_qp_index(qp_type);
0250 if (qpn == -1) {
0251 dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
0252 __func__, qp_type);
0253 goto error1;
0254 }
0255
0256 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
0257 dev_dbg_ratelimited(&device->dev,
0258 "%s: invalid RMPP Version %u\n",
0259 __func__, rmpp_version);
0260 goto error1;
0261 }
0262
0263
0264 if (mad_reg_req) {
0265 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
0266 dev_dbg_ratelimited(&device->dev,
0267 "%s: invalid Class Version %u\n",
0268 __func__,
0269 mad_reg_req->mgmt_class_version);
0270 goto error1;
0271 }
0272 if (!recv_handler) {
0273 dev_dbg_ratelimited(&device->dev,
0274 "%s: no recv_handler\n", __func__);
0275 goto error1;
0276 }
0277 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
0278
0279
0280
0281
0282 if (mad_reg_req->mgmt_class !=
0283 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
0284 dev_dbg_ratelimited(&device->dev,
0285 "%s: Invalid Mgmt Class 0x%x\n",
0286 __func__, mad_reg_req->mgmt_class);
0287 goto error1;
0288 }
0289 } else if (mad_reg_req->mgmt_class == 0) {
0290
0291
0292
0293
0294 dev_dbg_ratelimited(&device->dev,
0295 "%s: Invalid Mgmt Class 0\n",
0296 __func__);
0297 goto error1;
0298 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
0299
0300
0301
0302
0303 if (!is_vendor_oui(mad_reg_req->oui)) {
0304 dev_dbg_ratelimited(&device->dev,
0305 "%s: No OUI specified for class 0x%x\n",
0306 __func__,
0307 mad_reg_req->mgmt_class);
0308 goto error1;
0309 }
0310 }
0311
0312 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
0313 if (rmpp_version) {
0314 dev_dbg_ratelimited(&device->dev,
0315 "%s: RMPP version for non-RMPP class 0x%x\n",
0316 __func__, mad_reg_req->mgmt_class);
0317 goto error1;
0318 }
0319 }
0320
0321
0322 if (qp_type == IB_QPT_SMI) {
0323 if ((mad_reg_req->mgmt_class !=
0324 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
0325 (mad_reg_req->mgmt_class !=
0326 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
0327 dev_dbg_ratelimited(&device->dev,
0328 "%s: Invalid SM QP type: class 0x%x\n",
0329 __func__, mad_reg_req->mgmt_class);
0330 goto error1;
0331 }
0332 } else {
0333 if ((mad_reg_req->mgmt_class ==
0334 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
0335 (mad_reg_req->mgmt_class ==
0336 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
0337 dev_dbg_ratelimited(&device->dev,
0338 "%s: Invalid GS QP type: class 0x%x\n",
0339 __func__, mad_reg_req->mgmt_class);
0340 goto error1;
0341 }
0342 }
0343 } else {
0344
0345 if (!send_handler)
0346 goto error1;
0347 if (registration_flags & IB_MAD_USER_RMPP)
0348 goto error1;
0349 }
0350
0351
0352 port_priv = ib_get_mad_port(device, port_num);
0353 if (!port_priv) {
0354 dev_dbg_ratelimited(&device->dev, "%s: Invalid port %u\n",
0355 __func__, port_num);
0356 ret = ERR_PTR(-ENODEV);
0357 goto error1;
0358 }
0359
0360
0361
0362
0363 if (!port_priv->qp_info[qpn].qp) {
0364 dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
0365 __func__, qpn);
0366 ret = ERR_PTR(-EPROTONOSUPPORT);
0367 goto error1;
0368 }
0369
0370
0371 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
0372 if (!mad_agent_priv) {
0373 ret = ERR_PTR(-ENOMEM);
0374 goto error1;
0375 }
0376
0377 if (mad_reg_req) {
0378 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
0379 if (!reg_req) {
0380 ret = ERR_PTR(-ENOMEM);
0381 goto error3;
0382 }
0383 }
0384
0385
0386 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
0387 mad_agent_priv->reg_req = reg_req;
0388 mad_agent_priv->agent.rmpp_version = rmpp_version;
0389 mad_agent_priv->agent.device = device;
0390 mad_agent_priv->agent.recv_handler = recv_handler;
0391 mad_agent_priv->agent.send_handler = send_handler;
0392 mad_agent_priv->agent.context = context;
0393 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
0394 mad_agent_priv->agent.port_num = port_num;
0395 mad_agent_priv->agent.flags = registration_flags;
0396 spin_lock_init(&mad_agent_priv->lock);
0397 INIT_LIST_HEAD(&mad_agent_priv->send_list);
0398 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
0399 INIT_LIST_HEAD(&mad_agent_priv->done_list);
0400 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
0401 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
0402 INIT_LIST_HEAD(&mad_agent_priv->local_list);
0403 INIT_WORK(&mad_agent_priv->local_work, local_completions);
0404 refcount_set(&mad_agent_priv->refcount, 1);
0405 init_completion(&mad_agent_priv->comp);
0406
0407 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
0408 if (ret2) {
0409 ret = ERR_PTR(ret2);
0410 goto error4;
0411 }
0412
0413
0414
0415
0416
0417 ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
0418 mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
0419 &ib_mad_client_next, GFP_KERNEL);
0420 if (ret2 < 0) {
0421 ret = ERR_PTR(ret2);
0422 goto error5;
0423 }
0424
0425
0426
0427
0428
0429 spin_lock_irq(&port_priv->reg_lock);
0430 if (mad_reg_req) {
0431 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
0432 if (!is_vendor_class(mgmt_class)) {
0433 class = port_priv->version[mad_reg_req->
0434 mgmt_class_version].class;
0435 if (class) {
0436 method = class->method_table[mgmt_class];
0437 if (method) {
0438 if (method_in_use(&method,
0439 mad_reg_req))
0440 goto error6;
0441 }
0442 }
0443 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
0444 mgmt_class);
0445 } else {
0446
0447 vendor = port_priv->version[mad_reg_req->
0448 mgmt_class_version].vendor;
0449 if (vendor) {
0450 vclass = vendor_class_index(mgmt_class);
0451 vendor_class = vendor->vendor_class[vclass];
0452 if (vendor_class) {
0453 if (is_vendor_method_in_use(
0454 vendor_class,
0455 mad_reg_req))
0456 goto error6;
0457 }
0458 }
0459 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
0460 }
0461 if (ret2) {
0462 ret = ERR_PTR(ret2);
0463 goto error6;
0464 }
0465 }
0466 spin_unlock_irq(&port_priv->reg_lock);
0467
0468 trace_ib_mad_create_agent(mad_agent_priv);
0469 return &mad_agent_priv->agent;
0470 error6:
0471 spin_unlock_irq(&port_priv->reg_lock);
0472 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
0473 error5:
0474 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
0475 error4:
0476 kfree(reg_req);
0477 error3:
0478 kfree(mad_agent_priv);
0479 error1:
0480 return ret;
0481 }
0482 EXPORT_SYMBOL(ib_register_mad_agent);
0483
0484 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
0485 {
0486 if (refcount_dec_and_test(&mad_agent_priv->refcount))
0487 complete(&mad_agent_priv->comp);
0488 }
0489
0490 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
0491 {
0492 struct ib_mad_port_private *port_priv;
0493
0494
0495 trace_ib_mad_unregister_agent(mad_agent_priv);
0496
0497
0498
0499
0500
0501 cancel_mads(mad_agent_priv);
0502 port_priv = mad_agent_priv->qp_info->port_priv;
0503 cancel_delayed_work(&mad_agent_priv->timed_work);
0504
0505 spin_lock_irq(&port_priv->reg_lock);
0506 remove_mad_reg_req(mad_agent_priv);
0507 spin_unlock_irq(&port_priv->reg_lock);
0508 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
0509
0510 flush_workqueue(port_priv->wq);
0511
0512 deref_mad_agent(mad_agent_priv);
0513 wait_for_completion(&mad_agent_priv->comp);
0514 ib_cancel_rmpp_recvs(mad_agent_priv);
0515
0516 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
0517
0518 kfree(mad_agent_priv->reg_req);
0519 kfree_rcu(mad_agent_priv, rcu);
0520 }
0521
0522
0523
0524
0525
0526
0527 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
0528 {
0529 struct ib_mad_agent_private *mad_agent_priv;
0530
0531 mad_agent_priv = container_of(mad_agent,
0532 struct ib_mad_agent_private,
0533 agent);
0534 unregister_mad_agent(mad_agent_priv);
0535 }
0536 EXPORT_SYMBOL(ib_unregister_mad_agent);
0537
0538 static void dequeue_mad(struct ib_mad_list_head *mad_list)
0539 {
0540 struct ib_mad_queue *mad_queue;
0541 unsigned long flags;
0542
0543 mad_queue = mad_list->mad_queue;
0544 spin_lock_irqsave(&mad_queue->lock, flags);
0545 list_del(&mad_list->list);
0546 mad_queue->count--;
0547 spin_unlock_irqrestore(&mad_queue->lock, flags);
0548 }
0549
0550 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
0551 u16 pkey_index, u32 port_num, struct ib_wc *wc)
0552 {
0553 memset(wc, 0, sizeof *wc);
0554 wc->wr_cqe = cqe;
0555 wc->status = IB_WC_SUCCESS;
0556 wc->opcode = IB_WC_RECV;
0557 wc->pkey_index = pkey_index;
0558 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
0559 wc->src_qp = IB_QP0;
0560 wc->qp = qp;
0561 wc->slid = slid;
0562 wc->sl = 0;
0563 wc->dlid_path_bits = 0;
0564 wc->port_num = port_num;
0565 }
0566
0567 static size_t mad_priv_size(const struct ib_mad_private *mp)
0568 {
0569 return sizeof(struct ib_mad_private) + mp->mad_size;
0570 }
0571
0572 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
0573 {
0574 size_t size = sizeof(struct ib_mad_private) + mad_size;
0575 struct ib_mad_private *ret = kzalloc(size, flags);
0576
0577 if (ret)
0578 ret->mad_size = mad_size;
0579
0580 return ret;
0581 }
0582
0583 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
0584 {
0585 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
0586 }
0587
0588 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
0589 {
0590 return sizeof(struct ib_grh) + mp->mad_size;
0591 }
0592
0593
0594
0595
0596
0597
0598 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
0599 struct ib_mad_send_wr_private *mad_send_wr)
0600 {
0601 int ret = 0;
0602 struct ib_smp *smp = mad_send_wr->send_buf.mad;
0603 struct opa_smp *opa_smp = (struct opa_smp *)smp;
0604 unsigned long flags;
0605 struct ib_mad_local_private *local;
0606 struct ib_mad_private *mad_priv;
0607 struct ib_mad_port_private *port_priv;
0608 struct ib_mad_agent_private *recv_mad_agent = NULL;
0609 struct ib_device *device = mad_agent_priv->agent.device;
0610 u32 port_num;
0611 struct ib_wc mad_wc;
0612 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
0613 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
0614 u16 out_mad_pkey_index = 0;
0615 u16 drslid;
0616 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
0617 mad_agent_priv->qp_info->port_priv->port_num);
0618
0619 if (rdma_cap_ib_switch(device) &&
0620 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
0621 port_num = send_wr->port_num;
0622 else
0623 port_num = mad_agent_priv->agent.port_num;
0624
0625
0626
0627
0628
0629
0630
0631 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
0632 u32 opa_drslid;
0633
0634 trace_ib_mad_handle_out_opa_smi(opa_smp);
0635
0636 if ((opa_get_smp_direction(opa_smp)
0637 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
0638 OPA_LID_PERMISSIVE &&
0639 opa_smi_handle_dr_smp_send(opa_smp,
0640 rdma_cap_ib_switch(device),
0641 port_num) == IB_SMI_DISCARD) {
0642 ret = -EINVAL;
0643 dev_err(&device->dev, "OPA Invalid directed route\n");
0644 goto out;
0645 }
0646 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
0647 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
0648 opa_drslid & 0xffff0000) {
0649 ret = -EINVAL;
0650 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
0651 opa_drslid);
0652 goto out;
0653 }
0654 drslid = (u16)(opa_drslid & 0x0000ffff);
0655
0656
0657 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
0658 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
0659 goto out;
0660 } else {
0661 trace_ib_mad_handle_out_ib_smi(smp);
0662
0663 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
0664 IB_LID_PERMISSIVE &&
0665 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
0666 IB_SMI_DISCARD) {
0667 ret = -EINVAL;
0668 dev_err(&device->dev, "Invalid directed route\n");
0669 goto out;
0670 }
0671 drslid = be16_to_cpu(smp->dr_slid);
0672
0673
0674 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
0675 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
0676 goto out;
0677 }
0678
0679 local = kmalloc(sizeof *local, GFP_ATOMIC);
0680 if (!local) {
0681 ret = -ENOMEM;
0682 goto out;
0683 }
0684 local->mad_priv = NULL;
0685 local->recv_mad_agent = NULL;
0686 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
0687 if (!mad_priv) {
0688 ret = -ENOMEM;
0689 kfree(local);
0690 goto out;
0691 }
0692
0693 build_smp_wc(mad_agent_priv->agent.qp,
0694 send_wr->wr.wr_cqe, drslid,
0695 send_wr->pkey_index,
0696 send_wr->port_num, &mad_wc);
0697
0698 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
0699 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
0700 + mad_send_wr->send_buf.data_len
0701 + sizeof(struct ib_grh);
0702 }
0703
0704
0705 ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
0706 (const struct ib_mad *)smp,
0707 (struct ib_mad *)mad_priv->mad, &mad_size,
0708 &out_mad_pkey_index);
0709 switch (ret) {
0710 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
0711 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
0712 mad_agent_priv->agent.recv_handler) {
0713 local->mad_priv = mad_priv;
0714 local->recv_mad_agent = mad_agent_priv;
0715
0716
0717
0718
0719 refcount_inc(&mad_agent_priv->refcount);
0720 } else
0721 kfree(mad_priv);
0722 break;
0723 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
0724 kfree(mad_priv);
0725 break;
0726 case IB_MAD_RESULT_SUCCESS:
0727
0728 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
0729 mad_agent_priv->agent.port_num);
0730 if (port_priv) {
0731 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
0732 recv_mad_agent = find_mad_agent(port_priv,
0733 (const struct ib_mad_hdr *)mad_priv->mad);
0734 }
0735 if (!port_priv || !recv_mad_agent) {
0736
0737
0738
0739
0740 kfree(mad_priv);
0741 break;
0742 }
0743 local->mad_priv = mad_priv;
0744 local->recv_mad_agent = recv_mad_agent;
0745 break;
0746 default:
0747 kfree(mad_priv);
0748 kfree(local);
0749 ret = -EINVAL;
0750 goto out;
0751 }
0752
0753 local->mad_send_wr = mad_send_wr;
0754 if (opa) {
0755 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
0756 local->return_wc_byte_len = mad_size;
0757 }
0758
0759 refcount_inc(&mad_agent_priv->refcount);
0760
0761 spin_lock_irqsave(&mad_agent_priv->lock, flags);
0762 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
0763 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
0764 queue_work(mad_agent_priv->qp_info->port_priv->wq,
0765 &mad_agent_priv->local_work);
0766 ret = 1;
0767 out:
0768 return ret;
0769 }
0770
0771 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
0772 {
0773 int seg_size, pad;
0774
0775 seg_size = mad_size - hdr_len;
0776 if (data_len && seg_size) {
0777 pad = seg_size - data_len % seg_size;
0778 return pad == seg_size ? 0 : pad;
0779 } else
0780 return seg_size;
0781 }
0782
0783 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
0784 {
0785 struct ib_rmpp_segment *s, *t;
0786
0787 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
0788 list_del(&s->list);
0789 kfree(s);
0790 }
0791 }
0792
0793 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
0794 size_t mad_size, gfp_t gfp_mask)
0795 {
0796 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
0797 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
0798 struct ib_rmpp_segment *seg = NULL;
0799 int left, seg_size, pad;
0800
0801 send_buf->seg_size = mad_size - send_buf->hdr_len;
0802 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
0803 seg_size = send_buf->seg_size;
0804 pad = send_wr->pad;
0805
0806
0807 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
0808 seg = kmalloc(sizeof(*seg) + seg_size, gfp_mask);
0809 if (!seg) {
0810 free_send_rmpp_list(send_wr);
0811 return -ENOMEM;
0812 }
0813 seg->num = ++send_buf->seg_count;
0814 list_add_tail(&seg->list, &send_wr->rmpp_list);
0815 }
0816
0817
0818 if (pad)
0819 memset(seg->data + seg_size - pad, 0, pad);
0820
0821 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
0822 agent.rmpp_version;
0823 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
0824 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
0825
0826 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
0827 struct ib_rmpp_segment, list);
0828 send_wr->last_ack_seg = send_wr->cur_seg;
0829 return 0;
0830 }
0831
0832 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
0833 {
0834 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
0835 }
0836 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
0837
0838 struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
0839 u32 remote_qpn, u16 pkey_index,
0840 int rmpp_active, int hdr_len,
0841 int data_len, gfp_t gfp_mask,
0842 u8 base_version)
0843 {
0844 struct ib_mad_agent_private *mad_agent_priv;
0845 struct ib_mad_send_wr_private *mad_send_wr;
0846 int pad, message_size, ret, size;
0847 void *buf;
0848 size_t mad_size;
0849 bool opa;
0850
0851 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
0852 agent);
0853
0854 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
0855
0856 if (opa && base_version == OPA_MGMT_BASE_VERSION)
0857 mad_size = sizeof(struct opa_mad);
0858 else
0859 mad_size = sizeof(struct ib_mad);
0860
0861 pad = get_pad_size(hdr_len, data_len, mad_size);
0862 message_size = hdr_len + data_len + pad;
0863
0864 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
0865 if (!rmpp_active && message_size > mad_size)
0866 return ERR_PTR(-EINVAL);
0867 } else
0868 if (rmpp_active || message_size > mad_size)
0869 return ERR_PTR(-EINVAL);
0870
0871 size = rmpp_active ? hdr_len : mad_size;
0872 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
0873 if (!buf)
0874 return ERR_PTR(-ENOMEM);
0875
0876 mad_send_wr = buf + size;
0877 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
0878 mad_send_wr->send_buf.mad = buf;
0879 mad_send_wr->send_buf.hdr_len = hdr_len;
0880 mad_send_wr->send_buf.data_len = data_len;
0881 mad_send_wr->pad = pad;
0882
0883 mad_send_wr->mad_agent_priv = mad_agent_priv;
0884 mad_send_wr->sg_list[0].length = hdr_len;
0885 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
0886
0887
0888 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
0889 data_len < mad_size - hdr_len)
0890 mad_send_wr->sg_list[1].length = data_len;
0891 else
0892 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
0893
0894 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
0895
0896 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
0897
0898 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
0899 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
0900 mad_send_wr->send_wr.wr.num_sge = 2;
0901 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
0902 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
0903 mad_send_wr->send_wr.remote_qpn = remote_qpn;
0904 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
0905 mad_send_wr->send_wr.pkey_index = pkey_index;
0906
0907 if (rmpp_active) {
0908 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
0909 if (ret) {
0910 kfree(buf);
0911 return ERR_PTR(ret);
0912 }
0913 }
0914
0915 mad_send_wr->send_buf.mad_agent = mad_agent;
0916 refcount_inc(&mad_agent_priv->refcount);
0917 return &mad_send_wr->send_buf;
0918 }
0919 EXPORT_SYMBOL(ib_create_send_mad);
0920
0921 int ib_get_mad_data_offset(u8 mgmt_class)
0922 {
0923 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
0924 return IB_MGMT_SA_HDR;
0925 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
0926 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
0927 (mgmt_class == IB_MGMT_CLASS_BIS))
0928 return IB_MGMT_DEVICE_HDR;
0929 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
0930 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
0931 return IB_MGMT_VENDOR_HDR;
0932 else
0933 return IB_MGMT_MAD_HDR;
0934 }
0935 EXPORT_SYMBOL(ib_get_mad_data_offset);
0936
0937 int ib_is_mad_class_rmpp(u8 mgmt_class)
0938 {
0939 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
0940 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
0941 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
0942 (mgmt_class == IB_MGMT_CLASS_BIS) ||
0943 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
0944 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
0945 return 1;
0946 return 0;
0947 }
0948 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
0949
0950 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
0951 {
0952 struct ib_mad_send_wr_private *mad_send_wr;
0953 struct list_head *list;
0954
0955 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
0956 send_buf);
0957 list = &mad_send_wr->cur_seg->list;
0958
0959 if (mad_send_wr->cur_seg->num < seg_num) {
0960 list_for_each_entry(mad_send_wr->cur_seg, list, list)
0961 if (mad_send_wr->cur_seg->num == seg_num)
0962 break;
0963 } else if (mad_send_wr->cur_seg->num > seg_num) {
0964 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
0965 if (mad_send_wr->cur_seg->num == seg_num)
0966 break;
0967 }
0968 return mad_send_wr->cur_seg->data;
0969 }
0970 EXPORT_SYMBOL(ib_get_rmpp_segment);
0971
0972 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
0973 {
0974 if (mad_send_wr->send_buf.seg_count)
0975 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
0976 mad_send_wr->seg_num);
0977 else
0978 return mad_send_wr->send_buf.mad +
0979 mad_send_wr->send_buf.hdr_len;
0980 }
0981
0982 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
0983 {
0984 struct ib_mad_agent_private *mad_agent_priv;
0985 struct ib_mad_send_wr_private *mad_send_wr;
0986
0987 mad_agent_priv = container_of(send_buf->mad_agent,
0988 struct ib_mad_agent_private, agent);
0989 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
0990 send_buf);
0991
0992 free_send_rmpp_list(mad_send_wr);
0993 kfree(send_buf->mad);
0994 deref_mad_agent(mad_agent_priv);
0995 }
0996 EXPORT_SYMBOL(ib_free_send_mad);
0997
0998 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
0999 {
1000 struct ib_mad_qp_info *qp_info;
1001 struct list_head *list;
1002 struct ib_mad_agent *mad_agent;
1003 struct ib_sge *sge;
1004 unsigned long flags;
1005 int ret;
1006
1007
1008 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1009 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1010 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1011 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1012
1013 mad_agent = mad_send_wr->send_buf.mad_agent;
1014 sge = mad_send_wr->sg_list;
1015 sge[0].addr = ib_dma_map_single(mad_agent->device,
1016 mad_send_wr->send_buf.mad,
1017 sge[0].length,
1018 DMA_TO_DEVICE);
1019 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1020 return -ENOMEM;
1021
1022 mad_send_wr->header_mapping = sge[0].addr;
1023
1024 sge[1].addr = ib_dma_map_single(mad_agent->device,
1025 ib_get_payload(mad_send_wr),
1026 sge[1].length,
1027 DMA_TO_DEVICE);
1028 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1029 ib_dma_unmap_single(mad_agent->device,
1030 mad_send_wr->header_mapping,
1031 sge[0].length, DMA_TO_DEVICE);
1032 return -ENOMEM;
1033 }
1034 mad_send_wr->payload_mapping = sge[1].addr;
1035
1036 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1037 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1038 trace_ib_mad_ib_send_mad(mad_send_wr, qp_info);
1039 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1040 NULL);
1041 list = &qp_info->send_queue.list;
1042 } else {
1043 ret = 0;
1044 list = &qp_info->overflow_list;
1045 }
1046
1047 if (!ret) {
1048 qp_info->send_queue.count++;
1049 list_add_tail(&mad_send_wr->mad_list.list, list);
1050 }
1051 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1052 if (ret) {
1053 ib_dma_unmap_single(mad_agent->device,
1054 mad_send_wr->header_mapping,
1055 sge[0].length, DMA_TO_DEVICE);
1056 ib_dma_unmap_single(mad_agent->device,
1057 mad_send_wr->payload_mapping,
1058 sge[1].length, DMA_TO_DEVICE);
1059 }
1060 return ret;
1061 }
1062
1063
1064
1065
1066
1067 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1068 struct ib_mad_send_buf **bad_send_buf)
1069 {
1070 struct ib_mad_agent_private *mad_agent_priv;
1071 struct ib_mad_send_buf *next_send_buf;
1072 struct ib_mad_send_wr_private *mad_send_wr;
1073 unsigned long flags;
1074 int ret = -EINVAL;
1075
1076
1077 for (; send_buf; send_buf = next_send_buf) {
1078 mad_send_wr = container_of(send_buf,
1079 struct ib_mad_send_wr_private,
1080 send_buf);
1081 mad_agent_priv = mad_send_wr->mad_agent_priv;
1082
1083 ret = ib_mad_enforce_security(mad_agent_priv,
1084 mad_send_wr->send_wr.pkey_index);
1085 if (ret)
1086 goto error;
1087
1088 if (!send_buf->mad_agent->send_handler ||
1089 (send_buf->timeout_ms &&
1090 !send_buf->mad_agent->recv_handler)) {
1091 ret = -EINVAL;
1092 goto error;
1093 }
1094
1095 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1096 if (mad_agent_priv->agent.rmpp_version) {
1097 ret = -EINVAL;
1098 goto error;
1099 }
1100 }
1101
1102
1103
1104
1105
1106
1107 next_send_buf = send_buf->next;
1108 mad_send_wr->send_wr.ah = send_buf->ah;
1109
1110 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1111 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1112 ret = handle_outgoing_dr_smp(mad_agent_priv,
1113 mad_send_wr);
1114 if (ret < 0)
1115 goto error;
1116 else if (ret == 1)
1117 continue;
1118 }
1119
1120 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1121
1122 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1123 mad_send_wr->max_retries = send_buf->retries;
1124 mad_send_wr->retries_left = send_buf->retries;
1125 send_buf->retries = 0;
1126
1127 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1128 mad_send_wr->status = IB_WC_SUCCESS;
1129
1130
1131 refcount_inc(&mad_agent_priv->refcount);
1132 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1133 list_add_tail(&mad_send_wr->agent_list,
1134 &mad_agent_priv->send_list);
1135 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1136
1137 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1138 ret = ib_send_rmpp_mad(mad_send_wr);
1139 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1140 ret = ib_send_mad(mad_send_wr);
1141 } else
1142 ret = ib_send_mad(mad_send_wr);
1143 if (ret < 0) {
1144
1145 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1146 list_del(&mad_send_wr->agent_list);
1147 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1148 deref_mad_agent(mad_agent_priv);
1149 goto error;
1150 }
1151 }
1152 return 0;
1153 error:
1154 if (bad_send_buf)
1155 *bad_send_buf = send_buf;
1156 return ret;
1157 }
1158 EXPORT_SYMBOL(ib_post_send_mad);
1159
1160
1161
1162
1163
1164 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1165 {
1166 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1167 struct ib_mad_private_header *mad_priv_hdr;
1168 struct ib_mad_private *priv;
1169 struct list_head free_list;
1170
1171 INIT_LIST_HEAD(&free_list);
1172 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1173
1174 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1175 &free_list, list) {
1176 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1177 recv_buf);
1178 mad_priv_hdr = container_of(mad_recv_wc,
1179 struct ib_mad_private_header,
1180 recv_wc);
1181 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1182 header);
1183 kfree(priv);
1184 }
1185 }
1186 EXPORT_SYMBOL(ib_free_recv_mad);
1187
1188 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1189 struct ib_mad_reg_req *mad_reg_req)
1190 {
1191 int i;
1192
1193 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1194 if ((*method)->agent[i]) {
1195 pr_err("Method %d already in use\n", i);
1196 return -EINVAL;
1197 }
1198 }
1199 return 0;
1200 }
1201
1202 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1203 {
1204
1205 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1206 return (*method) ? 0 : (-ENOMEM);
1207 }
1208
1209
1210
1211
1212 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1213 {
1214 int i;
1215
1216 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1217 if (method->agent[i])
1218 return 1;
1219 return 0;
1220 }
1221
1222
1223
1224
1225 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1226 {
1227 int i;
1228
1229 for (i = 0; i < MAX_MGMT_CLASS; i++)
1230 if (class->method_table[i])
1231 return 1;
1232 return 0;
1233 }
1234
1235 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1236 {
1237 int i;
1238
1239 for (i = 0; i < MAX_MGMT_OUI; i++)
1240 if (vendor_class->method_table[i])
1241 return 1;
1242 return 0;
1243 }
1244
1245 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1246 const char *oui)
1247 {
1248 int i;
1249
1250 for (i = 0; i < MAX_MGMT_OUI; i++)
1251
1252 if (!memcmp(vendor_class->oui[i], oui, 3))
1253 return i;
1254
1255 return -1;
1256 }
1257
1258 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1259 {
1260 int i;
1261
1262 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1263 if (vendor->vendor_class[i])
1264 return 1;
1265
1266 return 0;
1267 }
1268
1269 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1270 struct ib_mad_agent_private *agent)
1271 {
1272 int i;
1273
1274
1275 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1276 if (method->agent[i] == agent)
1277 method->agent[i] = NULL;
1278 }
1279
1280 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1281 struct ib_mad_agent_private *agent_priv,
1282 u8 mgmt_class)
1283 {
1284 struct ib_mad_port_private *port_priv;
1285 struct ib_mad_mgmt_class_table **class;
1286 struct ib_mad_mgmt_method_table **method;
1287 int i, ret;
1288
1289 port_priv = agent_priv->qp_info->port_priv;
1290 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1291 if (!*class) {
1292
1293 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1294 if (!*class) {
1295 ret = -ENOMEM;
1296 goto error1;
1297 }
1298
1299
1300 method = &(*class)->method_table[mgmt_class];
1301 if ((ret = allocate_method_table(method)))
1302 goto error2;
1303 } else {
1304 method = &(*class)->method_table[mgmt_class];
1305 if (!*method) {
1306
1307 if ((ret = allocate_method_table(method)))
1308 goto error1;
1309 }
1310 }
1311
1312
1313 if (method_in_use(method, mad_reg_req))
1314 goto error3;
1315
1316
1317 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1318 (*method)->agent[i] = agent_priv;
1319
1320 return 0;
1321
1322 error3:
1323
1324 remove_methods_mad_agent(*method, agent_priv);
1325
1326 if (!check_method_table(*method)) {
1327
1328 kfree(*method);
1329 *method = NULL;
1330 }
1331 ret = -EINVAL;
1332 goto error1;
1333 error2:
1334 kfree(*class);
1335 *class = NULL;
1336 error1:
1337 return ret;
1338 }
1339
1340 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1341 struct ib_mad_agent_private *agent_priv)
1342 {
1343 struct ib_mad_port_private *port_priv;
1344 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1345 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1346 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1347 struct ib_mad_mgmt_method_table **method;
1348 int i, ret = -ENOMEM;
1349 u8 vclass;
1350
1351
1352 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1353 port_priv = agent_priv->qp_info->port_priv;
1354 vendor_table = &port_priv->version[
1355 mad_reg_req->mgmt_class_version].vendor;
1356 if (!*vendor_table) {
1357
1358 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1359 if (!vendor)
1360 goto error1;
1361
1362 *vendor_table = vendor;
1363 }
1364 if (!(*vendor_table)->vendor_class[vclass]) {
1365
1366 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1367 if (!vendor_class)
1368 goto error2;
1369
1370 (*vendor_table)->vendor_class[vclass] = vendor_class;
1371 }
1372 for (i = 0; i < MAX_MGMT_OUI; i++) {
1373
1374 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1375 mad_reg_req->oui, 3)) {
1376 method = &(*vendor_table)->vendor_class[
1377 vclass]->method_table[i];
1378 if (!*method)
1379 goto error3;
1380 goto check_in_use;
1381 }
1382 }
1383 for (i = 0; i < MAX_MGMT_OUI; i++) {
1384
1385 if (!is_vendor_oui((*vendor_table)->vendor_class[
1386 vclass]->oui[i])) {
1387 method = &(*vendor_table)->vendor_class[
1388 vclass]->method_table[i];
1389
1390 if (!*method) {
1391 ret = allocate_method_table(method);
1392 if (ret)
1393 goto error3;
1394 }
1395 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1396 mad_reg_req->oui, 3);
1397 goto check_in_use;
1398 }
1399 }
1400 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1401 goto error3;
1402
1403 check_in_use:
1404
1405 if (method_in_use(method, mad_reg_req))
1406 goto error4;
1407
1408
1409 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1410 (*method)->agent[i] = agent_priv;
1411
1412 return 0;
1413
1414 error4:
1415
1416 remove_methods_mad_agent(*method, agent_priv);
1417
1418 if (!check_method_table(*method)) {
1419
1420 kfree(*method);
1421 *method = NULL;
1422 }
1423 ret = -EINVAL;
1424 error3:
1425 if (vendor_class) {
1426 (*vendor_table)->vendor_class[vclass] = NULL;
1427 kfree(vendor_class);
1428 }
1429 error2:
1430 if (vendor) {
1431 *vendor_table = NULL;
1432 kfree(vendor);
1433 }
1434 error1:
1435 return ret;
1436 }
1437
1438 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1439 {
1440 struct ib_mad_port_private *port_priv;
1441 struct ib_mad_mgmt_class_table *class;
1442 struct ib_mad_mgmt_method_table *method;
1443 struct ib_mad_mgmt_vendor_class_table *vendor;
1444 struct ib_mad_mgmt_vendor_class *vendor_class;
1445 int index;
1446 u8 mgmt_class;
1447
1448
1449
1450
1451
1452 if (!agent_priv->reg_req)
1453 goto out;
1454
1455 port_priv = agent_priv->qp_info->port_priv;
1456 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1457 class = port_priv->version[
1458 agent_priv->reg_req->mgmt_class_version].class;
1459 if (!class)
1460 goto vendor_check;
1461
1462 method = class->method_table[mgmt_class];
1463 if (method) {
1464
1465 remove_methods_mad_agent(method, agent_priv);
1466
1467 if (!check_method_table(method)) {
1468
1469 kfree(method);
1470 class->method_table[mgmt_class] = NULL;
1471
1472 if (!check_class_table(class)) {
1473
1474 kfree(class);
1475 port_priv->version[
1476 agent_priv->reg_req->
1477 mgmt_class_version].class = NULL;
1478 }
1479 }
1480 }
1481
1482 vendor_check:
1483 if (!is_vendor_class(mgmt_class))
1484 goto out;
1485
1486
1487 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1488 vendor = port_priv->version[
1489 agent_priv->reg_req->mgmt_class_version].vendor;
1490
1491 if (!vendor)
1492 goto out;
1493
1494 vendor_class = vendor->vendor_class[mgmt_class];
1495 if (vendor_class) {
1496 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1497 if (index < 0)
1498 goto out;
1499 method = vendor_class->method_table[index];
1500 if (method) {
1501
1502 remove_methods_mad_agent(method, agent_priv);
1503
1504
1505
1506
1507 if (!check_method_table(method)) {
1508
1509 kfree(method);
1510 vendor_class->method_table[index] = NULL;
1511 memset(vendor_class->oui[index], 0, 3);
1512
1513 if (!check_vendor_class(vendor_class)) {
1514
1515 kfree(vendor_class);
1516 vendor->vendor_class[mgmt_class] = NULL;
1517
1518 if (!check_vendor_table(vendor)) {
1519 kfree(vendor);
1520 port_priv->version[
1521 agent_priv->reg_req->
1522 mgmt_class_version].
1523 vendor = NULL;
1524 }
1525 }
1526 }
1527 }
1528 }
1529
1530 out:
1531 return;
1532 }
1533
1534 static struct ib_mad_agent_private *
1535 find_mad_agent(struct ib_mad_port_private *port_priv,
1536 const struct ib_mad_hdr *mad_hdr)
1537 {
1538 struct ib_mad_agent_private *mad_agent = NULL;
1539 unsigned long flags;
1540
1541 if (ib_response_mad(mad_hdr)) {
1542 u32 hi_tid;
1543
1544
1545
1546
1547
1548 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1549 rcu_read_lock();
1550 mad_agent = xa_load(&ib_mad_clients, hi_tid);
1551 if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount))
1552 mad_agent = NULL;
1553 rcu_read_unlock();
1554 } else {
1555 struct ib_mad_mgmt_class_table *class;
1556 struct ib_mad_mgmt_method_table *method;
1557 struct ib_mad_mgmt_vendor_class_table *vendor;
1558 struct ib_mad_mgmt_vendor_class *vendor_class;
1559 const struct ib_vendor_mad *vendor_mad;
1560 int index;
1561
1562 spin_lock_irqsave(&port_priv->reg_lock, flags);
1563
1564
1565
1566
1567 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1568 goto out;
1569 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1570 class = port_priv->version[
1571 mad_hdr->class_version].class;
1572 if (!class)
1573 goto out;
1574 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1575 ARRAY_SIZE(class->method_table))
1576 goto out;
1577 method = class->method_table[convert_mgmt_class(
1578 mad_hdr->mgmt_class)];
1579 if (method)
1580 mad_agent = method->agent[mad_hdr->method &
1581 ~IB_MGMT_METHOD_RESP];
1582 } else {
1583 vendor = port_priv->version[
1584 mad_hdr->class_version].vendor;
1585 if (!vendor)
1586 goto out;
1587 vendor_class = vendor->vendor_class[vendor_class_index(
1588 mad_hdr->mgmt_class)];
1589 if (!vendor_class)
1590 goto out;
1591
1592 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1593 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1594 if (index == -1)
1595 goto out;
1596 method = vendor_class->method_table[index];
1597 if (method) {
1598 mad_agent = method->agent[mad_hdr->method &
1599 ~IB_MGMT_METHOD_RESP];
1600 }
1601 }
1602 if (mad_agent)
1603 refcount_inc(&mad_agent->refcount);
1604 out:
1605 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1606 }
1607
1608 if (mad_agent && !mad_agent->agent.recv_handler) {
1609 dev_notice(&port_priv->device->dev,
1610 "No receive handler for client %p on port %u\n",
1611 &mad_agent->agent, port_priv->port_num);
1612 deref_mad_agent(mad_agent);
1613 mad_agent = NULL;
1614 }
1615
1616 return mad_agent;
1617 }
1618
1619 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1620 const struct ib_mad_qp_info *qp_info,
1621 bool opa)
1622 {
1623 int valid = 0;
1624 u32 qp_num = qp_info->qp->qp_num;
1625
1626
1627 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1628 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1629 pr_err("MAD received with unsupported base version %u %s\n",
1630 mad_hdr->base_version, opa ? "(opa)" : "");
1631 goto out;
1632 }
1633
1634
1635 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1636 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1637 if (qp_num == 0)
1638 valid = 1;
1639 } else {
1640
1641 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1642 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1643 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1644 goto out;
1645
1646 if (qp_num != 0)
1647 valid = 1;
1648 }
1649
1650 out:
1651 return valid;
1652 }
1653
1654 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1655 const struct ib_mad_hdr *mad_hdr)
1656 {
1657 struct ib_rmpp_mad *rmpp_mad;
1658
1659 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1660 return !mad_agent_priv->agent.rmpp_version ||
1661 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1662 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1663 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1664 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1665 }
1666
1667 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1668 const struct ib_mad_recv_wc *rwc)
1669 {
1670 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1671 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1672 }
1673
1674 static inline int
1675 rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1676 const struct ib_mad_send_wr_private *wr,
1677 const struct ib_mad_recv_wc *rwc)
1678 {
1679 struct rdma_ah_attr attr;
1680 u8 send_resp, rcv_resp;
1681 union ib_gid sgid;
1682 struct ib_device *device = mad_agent_priv->agent.device;
1683 u32 port_num = mad_agent_priv->agent.port_num;
1684 u8 lmc;
1685 bool has_grh;
1686
1687 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1688 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1689
1690 if (send_resp == rcv_resp)
1691
1692 return 0;
1693
1694 if (rdma_query_ah(wr->send_buf.ah, &attr))
1695
1696 return 0;
1697
1698 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1699 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1700
1701 return 0;
1702
1703 if (!send_resp && rcv_resp) {
1704
1705 if (!has_grh) {
1706 if (ib_get_cached_lmc(device, port_num, &lmc))
1707 return 0;
1708 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1709 rwc->wc->dlid_path_bits) &
1710 ((1 << lmc) - 1)));
1711 } else {
1712 const struct ib_global_route *grh =
1713 rdma_ah_read_grh(&attr);
1714
1715 if (rdma_query_gid(device, port_num,
1716 grh->sgid_index, &sgid))
1717 return 0;
1718 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1719 16);
1720 }
1721 }
1722
1723 if (!has_grh)
1724 return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1725 else
1726 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1727 rwc->recv_buf.grh->sgid.raw,
1728 16);
1729 }
1730
1731 static inline int is_direct(u8 class)
1732 {
1733 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1734 }
1735
1736 struct ib_mad_send_wr_private*
1737 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1738 const struct ib_mad_recv_wc *wc)
1739 {
1740 struct ib_mad_send_wr_private *wr;
1741 const struct ib_mad_hdr *mad_hdr;
1742
1743 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1744
1745 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1746 if ((wr->tid == mad_hdr->tid) &&
1747 rcv_has_same_class(wr, wc) &&
1748
1749
1750
1751
1752 (is_direct(mad_hdr->mgmt_class) ||
1753 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1754 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1755 }
1756
1757
1758
1759
1760
1761 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1762 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1763 wr->tid == mad_hdr->tid &&
1764 wr->timeout &&
1765 rcv_has_same_class(wr, wc) &&
1766
1767
1768
1769
1770 (is_direct(mad_hdr->mgmt_class) ||
1771 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1772
1773 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1774 }
1775 return NULL;
1776 }
1777
1778 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1779 {
1780 mad_send_wr->timeout = 0;
1781 if (mad_send_wr->refcount == 1)
1782 list_move_tail(&mad_send_wr->agent_list,
1783 &mad_send_wr->mad_agent_priv->done_list);
1784 }
1785
1786 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1787 struct ib_mad_recv_wc *mad_recv_wc)
1788 {
1789 struct ib_mad_send_wr_private *mad_send_wr;
1790 struct ib_mad_send_wc mad_send_wc;
1791 unsigned long flags;
1792 int ret;
1793
1794 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1795 ret = ib_mad_enforce_security(mad_agent_priv,
1796 mad_recv_wc->wc->pkey_index);
1797 if (ret) {
1798 ib_free_recv_mad(mad_recv_wc);
1799 deref_mad_agent(mad_agent_priv);
1800 return;
1801 }
1802
1803 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1804 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1805 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1806 mad_recv_wc);
1807 if (!mad_recv_wc) {
1808 deref_mad_agent(mad_agent_priv);
1809 return;
1810 }
1811 }
1812
1813
1814 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1815 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1816 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1817 if (!mad_send_wr) {
1818 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1819 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1820 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1821 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1822 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1823
1824
1825
1826 mad_agent_priv->agent.recv_handler(
1827 &mad_agent_priv->agent, NULL,
1828 mad_recv_wc);
1829 deref_mad_agent(mad_agent_priv);
1830 } else {
1831
1832
1833
1834 ib_free_recv_mad(mad_recv_wc);
1835 deref_mad_agent(mad_agent_priv);
1836 return;
1837 }
1838 } else {
1839 ib_mark_mad_done(mad_send_wr);
1840 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1841
1842
1843 mad_agent_priv->agent.recv_handler(
1844 &mad_agent_priv->agent,
1845 &mad_send_wr->send_buf,
1846 mad_recv_wc);
1847 deref_mad_agent(mad_agent_priv);
1848
1849 mad_send_wc.status = IB_WC_SUCCESS;
1850 mad_send_wc.vendor_err = 0;
1851 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1852 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1853 }
1854 } else {
1855 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
1856 mad_recv_wc);
1857 deref_mad_agent(mad_agent_priv);
1858 }
1859 }
1860
1861 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
1862 const struct ib_mad_qp_info *qp_info,
1863 const struct ib_wc *wc,
1864 u32 port_num,
1865 struct ib_mad_private *recv,
1866 struct ib_mad_private *response)
1867 {
1868 enum smi_forward_action retsmi;
1869 struct ib_smp *smp = (struct ib_smp *)recv->mad;
1870
1871 trace_ib_mad_handle_ib_smi(smp);
1872
1873 if (smi_handle_dr_smp_recv(smp,
1874 rdma_cap_ib_switch(port_priv->device),
1875 port_num,
1876 port_priv->device->phys_port_cnt) ==
1877 IB_SMI_DISCARD)
1878 return IB_SMI_DISCARD;
1879
1880 retsmi = smi_check_forward_dr_smp(smp);
1881 if (retsmi == IB_SMI_LOCAL)
1882 return IB_SMI_HANDLE;
1883
1884 if (retsmi == IB_SMI_SEND) {
1885 if (smi_handle_dr_smp_send(smp,
1886 rdma_cap_ib_switch(port_priv->device),
1887 port_num) == IB_SMI_DISCARD)
1888 return IB_SMI_DISCARD;
1889
1890 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
1891 return IB_SMI_DISCARD;
1892 } else if (rdma_cap_ib_switch(port_priv->device)) {
1893
1894 memcpy(response, recv, mad_priv_size(response));
1895 response->header.recv_wc.wc = &response->header.wc;
1896 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
1897 response->header.recv_wc.recv_buf.grh = &response->grh;
1898
1899 agent_send_response((const struct ib_mad_hdr *)response->mad,
1900 &response->grh, wc,
1901 port_priv->device,
1902 smi_get_fwd_port(smp),
1903 qp_info->qp->qp_num,
1904 response->mad_size,
1905 false);
1906
1907 return IB_SMI_DISCARD;
1908 }
1909 return IB_SMI_HANDLE;
1910 }
1911
1912 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
1913 struct ib_mad_private *response,
1914 size_t *resp_len, bool opa)
1915 {
1916 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
1917 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
1918
1919 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
1920 recv_hdr->method == IB_MGMT_METHOD_SET) {
1921 memcpy(response, recv, mad_priv_size(response));
1922 response->header.recv_wc.wc = &response->header.wc;
1923 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
1924 response->header.recv_wc.recv_buf.grh = &response->grh;
1925 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
1926 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
1927 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1928 resp_hdr->status |= IB_SMP_DIRECTION;
1929
1930 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
1931 if (recv_hdr->mgmt_class ==
1932 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
1933 recv_hdr->mgmt_class ==
1934 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1935 *resp_len = opa_get_smp_header_size(
1936 (struct opa_smp *)recv->mad);
1937 else
1938 *resp_len = sizeof(struct ib_mad_hdr);
1939 }
1940
1941 return true;
1942 } else {
1943 return false;
1944 }
1945 }
1946
1947 static enum smi_action
1948 handle_opa_smi(struct ib_mad_port_private *port_priv,
1949 struct ib_mad_qp_info *qp_info,
1950 struct ib_wc *wc,
1951 u32 port_num,
1952 struct ib_mad_private *recv,
1953 struct ib_mad_private *response)
1954 {
1955 enum smi_forward_action retsmi;
1956 struct opa_smp *smp = (struct opa_smp *)recv->mad;
1957
1958 trace_ib_mad_handle_opa_smi(smp);
1959
1960 if (opa_smi_handle_dr_smp_recv(smp,
1961 rdma_cap_ib_switch(port_priv->device),
1962 port_num,
1963 port_priv->device->phys_port_cnt) ==
1964 IB_SMI_DISCARD)
1965 return IB_SMI_DISCARD;
1966
1967 retsmi = opa_smi_check_forward_dr_smp(smp);
1968 if (retsmi == IB_SMI_LOCAL)
1969 return IB_SMI_HANDLE;
1970
1971 if (retsmi == IB_SMI_SEND) {
1972 if (opa_smi_handle_dr_smp_send(smp,
1973 rdma_cap_ib_switch(port_priv->device),
1974 port_num) == IB_SMI_DISCARD)
1975 return IB_SMI_DISCARD;
1976
1977 if (opa_smi_check_local_smp(smp, port_priv->device) ==
1978 IB_SMI_DISCARD)
1979 return IB_SMI_DISCARD;
1980
1981 } else if (rdma_cap_ib_switch(port_priv->device)) {
1982
1983 memcpy(response, recv, mad_priv_size(response));
1984 response->header.recv_wc.wc = &response->header.wc;
1985 response->header.recv_wc.recv_buf.opa_mad =
1986 (struct opa_mad *)response->mad;
1987 response->header.recv_wc.recv_buf.grh = &response->grh;
1988
1989 agent_send_response((const struct ib_mad_hdr *)response->mad,
1990 &response->grh, wc,
1991 port_priv->device,
1992 opa_smi_get_fwd_port(smp),
1993 qp_info->qp->qp_num,
1994 recv->header.wc.byte_len,
1995 true);
1996
1997 return IB_SMI_DISCARD;
1998 }
1999
2000 return IB_SMI_HANDLE;
2001 }
2002
2003 static enum smi_action
2004 handle_smi(struct ib_mad_port_private *port_priv,
2005 struct ib_mad_qp_info *qp_info,
2006 struct ib_wc *wc,
2007 u32 port_num,
2008 struct ib_mad_private *recv,
2009 struct ib_mad_private *response,
2010 bool opa)
2011 {
2012 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2013
2014 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2015 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2016 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2017 response);
2018
2019 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2020 }
2021
2022 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2023 {
2024 struct ib_mad_port_private *port_priv = cq->cq_context;
2025 struct ib_mad_list_head *mad_list =
2026 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2027 struct ib_mad_qp_info *qp_info;
2028 struct ib_mad_private_header *mad_priv_hdr;
2029 struct ib_mad_private *recv, *response = NULL;
2030 struct ib_mad_agent_private *mad_agent;
2031 u32 port_num;
2032 int ret = IB_MAD_RESULT_SUCCESS;
2033 size_t mad_size;
2034 u16 resp_mad_pkey_index = 0;
2035 bool opa;
2036
2037 if (list_empty_careful(&port_priv->port_list))
2038 return;
2039
2040 if (wc->status != IB_WC_SUCCESS) {
2041
2042
2043
2044
2045 return;
2046 }
2047
2048 qp_info = mad_list->mad_queue->qp_info;
2049 dequeue_mad(mad_list);
2050
2051 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2052 qp_info->port_priv->port_num);
2053
2054 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2055 mad_list);
2056 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2057 ib_dma_unmap_single(port_priv->device,
2058 recv->header.mapping,
2059 mad_priv_dma_size(recv),
2060 DMA_FROM_DEVICE);
2061
2062
2063 recv->header.wc = *wc;
2064 recv->header.recv_wc.wc = &recv->header.wc;
2065
2066 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2067 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2068 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2069 } else {
2070 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2071 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2072 }
2073
2074 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2075 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2076
2077
2078 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2079 goto out;
2080
2081 trace_ib_mad_recv_done_handler(qp_info, wc,
2082 (struct ib_mad_hdr *)recv->mad);
2083
2084 mad_size = recv->mad_size;
2085 response = alloc_mad_private(mad_size, GFP_KERNEL);
2086 if (!response)
2087 goto out;
2088
2089 if (rdma_cap_ib_switch(port_priv->device))
2090 port_num = wc->port_num;
2091 else
2092 port_num = port_priv->port_num;
2093
2094 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2095 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2096 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2097 response, opa)
2098 == IB_SMI_DISCARD)
2099 goto out;
2100 }
2101
2102
2103 if (port_priv->device->ops.process_mad) {
2104 ret = port_priv->device->ops.process_mad(
2105 port_priv->device, 0, port_priv->port_num, wc,
2106 &recv->grh, (const struct ib_mad *)recv->mad,
2107 (struct ib_mad *)response->mad, &mad_size,
2108 &resp_mad_pkey_index);
2109
2110 if (opa)
2111 wc->pkey_index = resp_mad_pkey_index;
2112
2113 if (ret & IB_MAD_RESULT_SUCCESS) {
2114 if (ret & IB_MAD_RESULT_CONSUMED)
2115 goto out;
2116 if (ret & IB_MAD_RESULT_REPLY) {
2117 agent_send_response((const struct ib_mad_hdr *)response->mad,
2118 &recv->grh, wc,
2119 port_priv->device,
2120 port_num,
2121 qp_info->qp->qp_num,
2122 mad_size, opa);
2123 goto out;
2124 }
2125 }
2126 }
2127
2128 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2129 if (mad_agent) {
2130 trace_ib_mad_recv_done_agent(mad_agent);
2131 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2132
2133
2134
2135
2136 recv = NULL;
2137 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2138 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2139 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2140 port_priv->device, port_num,
2141 qp_info->qp->qp_num, mad_size, opa);
2142 }
2143
2144 out:
2145
2146 if (response) {
2147 ib_mad_post_receive_mads(qp_info, response);
2148 kfree(recv);
2149 } else
2150 ib_mad_post_receive_mads(qp_info, recv);
2151 }
2152
2153 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2154 {
2155 struct ib_mad_send_wr_private *mad_send_wr;
2156 unsigned long delay;
2157
2158 if (list_empty(&mad_agent_priv->wait_list)) {
2159 cancel_delayed_work(&mad_agent_priv->timed_work);
2160 } else {
2161 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2162 struct ib_mad_send_wr_private,
2163 agent_list);
2164
2165 if (time_after(mad_agent_priv->timeout,
2166 mad_send_wr->timeout)) {
2167 mad_agent_priv->timeout = mad_send_wr->timeout;
2168 delay = mad_send_wr->timeout - jiffies;
2169 if ((long)delay <= 0)
2170 delay = 1;
2171 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2172 &mad_agent_priv->timed_work, delay);
2173 }
2174 }
2175 }
2176
2177 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2178 {
2179 struct ib_mad_agent_private *mad_agent_priv;
2180 struct ib_mad_send_wr_private *temp_mad_send_wr;
2181 struct list_head *list_item;
2182 unsigned long delay;
2183
2184 mad_agent_priv = mad_send_wr->mad_agent_priv;
2185 list_del(&mad_send_wr->agent_list);
2186
2187 delay = mad_send_wr->timeout;
2188 mad_send_wr->timeout += jiffies;
2189
2190 if (delay) {
2191 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2192 temp_mad_send_wr = list_entry(list_item,
2193 struct ib_mad_send_wr_private,
2194 agent_list);
2195 if (time_after(mad_send_wr->timeout,
2196 temp_mad_send_wr->timeout))
2197 break;
2198 }
2199 } else {
2200 list_item = &mad_agent_priv->wait_list;
2201 }
2202
2203 list_add(&mad_send_wr->agent_list, list_item);
2204
2205
2206 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2207 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2208 &mad_agent_priv->timed_work, delay);
2209 }
2210
2211 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2212 unsigned long timeout_ms)
2213 {
2214 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2215 wait_for_response(mad_send_wr);
2216 }
2217
2218
2219
2220
2221 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2222 struct ib_mad_send_wc *mad_send_wc)
2223 {
2224 struct ib_mad_agent_private *mad_agent_priv;
2225 unsigned long flags;
2226 int ret;
2227
2228 mad_agent_priv = mad_send_wr->mad_agent_priv;
2229 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2230 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2231 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2232 if (ret == IB_RMPP_RESULT_CONSUMED)
2233 goto done;
2234 } else
2235 ret = IB_RMPP_RESULT_UNHANDLED;
2236
2237 if (mad_send_wc->status != IB_WC_SUCCESS &&
2238 mad_send_wr->status == IB_WC_SUCCESS) {
2239 mad_send_wr->status = mad_send_wc->status;
2240 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2241 }
2242
2243 if (--mad_send_wr->refcount > 0) {
2244 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2245 mad_send_wr->status == IB_WC_SUCCESS) {
2246 wait_for_response(mad_send_wr);
2247 }
2248 goto done;
2249 }
2250
2251
2252 list_del(&mad_send_wr->agent_list);
2253 adjust_timeout(mad_agent_priv);
2254 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2255
2256 if (mad_send_wr->status != IB_WC_SUCCESS)
2257 mad_send_wc->status = mad_send_wr->status;
2258 if (ret == IB_RMPP_RESULT_INTERNAL)
2259 ib_rmpp_send_handler(mad_send_wc);
2260 else
2261 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2262 mad_send_wc);
2263
2264
2265 deref_mad_agent(mad_agent_priv);
2266 return;
2267 done:
2268 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2269 }
2270
2271 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2272 {
2273 struct ib_mad_port_private *port_priv = cq->cq_context;
2274 struct ib_mad_list_head *mad_list =
2275 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2276 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2277 struct ib_mad_qp_info *qp_info;
2278 struct ib_mad_queue *send_queue;
2279 struct ib_mad_send_wc mad_send_wc;
2280 unsigned long flags;
2281 int ret;
2282
2283 if (list_empty_careful(&port_priv->port_list))
2284 return;
2285
2286 if (wc->status != IB_WC_SUCCESS) {
2287 if (!ib_mad_send_error(port_priv, wc))
2288 return;
2289 }
2290
2291 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2292 mad_list);
2293 send_queue = mad_list->mad_queue;
2294 qp_info = send_queue->qp_info;
2295
2296 trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
2297 trace_ib_mad_send_done_handler(mad_send_wr, wc);
2298
2299 retry:
2300 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2301 mad_send_wr->header_mapping,
2302 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2303 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2304 mad_send_wr->payload_mapping,
2305 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2306 queued_send_wr = NULL;
2307 spin_lock_irqsave(&send_queue->lock, flags);
2308 list_del(&mad_list->list);
2309
2310
2311 if (send_queue->count-- > send_queue->max_active) {
2312 mad_list = container_of(qp_info->overflow_list.next,
2313 struct ib_mad_list_head, list);
2314 queued_send_wr = container_of(mad_list,
2315 struct ib_mad_send_wr_private,
2316 mad_list);
2317 list_move_tail(&mad_list->list, &send_queue->list);
2318 }
2319 spin_unlock_irqrestore(&send_queue->lock, flags);
2320
2321 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2322 mad_send_wc.status = wc->status;
2323 mad_send_wc.vendor_err = wc->vendor_err;
2324 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2325
2326 if (queued_send_wr) {
2327 trace_ib_mad_send_done_resend(queued_send_wr, qp_info);
2328 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2329 NULL);
2330 if (ret) {
2331 dev_err(&port_priv->device->dev,
2332 "ib_post_send failed: %d\n", ret);
2333 mad_send_wr = queued_send_wr;
2334 wc->status = IB_WC_LOC_QP_OP_ERR;
2335 goto retry;
2336 }
2337 }
2338 }
2339
2340 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2341 {
2342 struct ib_mad_send_wr_private *mad_send_wr;
2343 struct ib_mad_list_head *mad_list;
2344 unsigned long flags;
2345
2346 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2347 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2348 mad_send_wr = container_of(mad_list,
2349 struct ib_mad_send_wr_private,
2350 mad_list);
2351 mad_send_wr->retry = 1;
2352 }
2353 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2354 }
2355
2356 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2357 struct ib_wc *wc)
2358 {
2359 struct ib_mad_list_head *mad_list =
2360 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2361 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2362 struct ib_mad_send_wr_private *mad_send_wr;
2363 int ret;
2364
2365
2366
2367
2368
2369 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2370 mad_list);
2371 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2372 if (mad_send_wr->retry) {
2373
2374 mad_send_wr->retry = 0;
2375 trace_ib_mad_error_handler(mad_send_wr, qp_info);
2376 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2377 NULL);
2378 if (!ret)
2379 return false;
2380 }
2381 } else {
2382 struct ib_qp_attr *attr;
2383
2384
2385 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2386 if (attr) {
2387 attr->qp_state = IB_QPS_RTS;
2388 attr->cur_qp_state = IB_QPS_SQE;
2389 ret = ib_modify_qp(qp_info->qp, attr,
2390 IB_QP_STATE | IB_QP_CUR_STATE);
2391 kfree(attr);
2392 if (ret)
2393 dev_err(&port_priv->device->dev,
2394 "%s - ib_modify_qp to RTS: %d\n",
2395 __func__, ret);
2396 else
2397 mark_sends_for_retry(qp_info);
2398 }
2399 }
2400
2401 return true;
2402 }
2403
2404 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2405 {
2406 unsigned long flags;
2407 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2408 struct ib_mad_send_wc mad_send_wc;
2409 struct list_head cancel_list;
2410
2411 INIT_LIST_HEAD(&cancel_list);
2412
2413 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2414 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2415 &mad_agent_priv->send_list, agent_list) {
2416 if (mad_send_wr->status == IB_WC_SUCCESS) {
2417 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2418 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2419 }
2420 }
2421
2422
2423 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2424 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2425
2426
2427 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2428 mad_send_wc.vendor_err = 0;
2429
2430 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2431 &cancel_list, agent_list) {
2432 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2433 list_del(&mad_send_wr->agent_list);
2434 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2435 &mad_send_wc);
2436 deref_mad_agent(mad_agent_priv);
2437 }
2438 }
2439
2440 static struct ib_mad_send_wr_private*
2441 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2442 struct ib_mad_send_buf *send_buf)
2443 {
2444 struct ib_mad_send_wr_private *mad_send_wr;
2445
2446 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2447 agent_list) {
2448 if (&mad_send_wr->send_buf == send_buf)
2449 return mad_send_wr;
2450 }
2451
2452 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2453 agent_list) {
2454 if (is_rmpp_data_mad(mad_agent_priv,
2455 mad_send_wr->send_buf.mad) &&
2456 &mad_send_wr->send_buf == send_buf)
2457 return mad_send_wr;
2458 }
2459 return NULL;
2460 }
2461
2462 int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2463 {
2464 struct ib_mad_agent_private *mad_agent_priv;
2465 struct ib_mad_send_wr_private *mad_send_wr;
2466 unsigned long flags;
2467 int active;
2468
2469 if (!send_buf)
2470 return -EINVAL;
2471
2472 mad_agent_priv = container_of(send_buf->mad_agent,
2473 struct ib_mad_agent_private, agent);
2474 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2475 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2476 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2477 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2478 return -EINVAL;
2479 }
2480
2481 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2482 if (!timeout_ms) {
2483 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2484 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2485 }
2486
2487 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2488 if (active)
2489 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2490 else
2491 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2492
2493 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2494 return 0;
2495 }
2496 EXPORT_SYMBOL(ib_modify_mad);
2497
2498 static void local_completions(struct work_struct *work)
2499 {
2500 struct ib_mad_agent_private *mad_agent_priv;
2501 struct ib_mad_local_private *local;
2502 struct ib_mad_agent_private *recv_mad_agent;
2503 unsigned long flags;
2504 int free_mad;
2505 struct ib_wc wc;
2506 struct ib_mad_send_wc mad_send_wc;
2507 bool opa;
2508
2509 mad_agent_priv =
2510 container_of(work, struct ib_mad_agent_private, local_work);
2511
2512 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2513 mad_agent_priv->qp_info->port_priv->port_num);
2514
2515 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2516 while (!list_empty(&mad_agent_priv->local_list)) {
2517 local = list_entry(mad_agent_priv->local_list.next,
2518 struct ib_mad_local_private,
2519 completion_list);
2520 list_del(&local->completion_list);
2521 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2522 free_mad = 0;
2523 if (local->mad_priv) {
2524 u8 base_version;
2525 recv_mad_agent = local->recv_mad_agent;
2526 if (!recv_mad_agent) {
2527 dev_err(&mad_agent_priv->agent.device->dev,
2528 "No receive MAD agent for local completion\n");
2529 free_mad = 1;
2530 goto local_send_completion;
2531 }
2532
2533
2534
2535
2536
2537 build_smp_wc(recv_mad_agent->agent.qp,
2538 local->mad_send_wr->send_wr.wr.wr_cqe,
2539 be16_to_cpu(IB_LID_PERMISSIVE),
2540 local->mad_send_wr->send_wr.pkey_index,
2541 recv_mad_agent->agent.port_num, &wc);
2542
2543 local->mad_priv->header.recv_wc.wc = &wc;
2544
2545 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2546 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2547 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2548 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2549 } else {
2550 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2551 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2552 }
2553
2554 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2555 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2556 &local->mad_priv->header.recv_wc.rmpp_list);
2557 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2558 local->mad_priv->header.recv_wc.recv_buf.mad =
2559 (struct ib_mad *)local->mad_priv->mad;
2560 recv_mad_agent->agent.recv_handler(
2561 &recv_mad_agent->agent,
2562 &local->mad_send_wr->send_buf,
2563 &local->mad_priv->header.recv_wc);
2564 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2565 deref_mad_agent(recv_mad_agent);
2566 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2567 }
2568
2569 local_send_completion:
2570
2571 mad_send_wc.status = IB_WC_SUCCESS;
2572 mad_send_wc.vendor_err = 0;
2573 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2574 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2575 &mad_send_wc);
2576
2577 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2578 deref_mad_agent(mad_agent_priv);
2579 if (free_mad)
2580 kfree(local->mad_priv);
2581 kfree(local);
2582 }
2583 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2584 }
2585
2586 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2587 {
2588 int ret;
2589
2590 if (!mad_send_wr->retries_left)
2591 return -ETIMEDOUT;
2592
2593 mad_send_wr->retries_left--;
2594 mad_send_wr->send_buf.retries++;
2595
2596 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2597
2598 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2599 ret = ib_retry_rmpp(mad_send_wr);
2600 switch (ret) {
2601 case IB_RMPP_RESULT_UNHANDLED:
2602 ret = ib_send_mad(mad_send_wr);
2603 break;
2604 case IB_RMPP_RESULT_CONSUMED:
2605 ret = 0;
2606 break;
2607 default:
2608 ret = -ECOMM;
2609 break;
2610 }
2611 } else
2612 ret = ib_send_mad(mad_send_wr);
2613
2614 if (!ret) {
2615 mad_send_wr->refcount++;
2616 list_add_tail(&mad_send_wr->agent_list,
2617 &mad_send_wr->mad_agent_priv->send_list);
2618 }
2619 return ret;
2620 }
2621
2622 static void timeout_sends(struct work_struct *work)
2623 {
2624 struct ib_mad_agent_private *mad_agent_priv;
2625 struct ib_mad_send_wr_private *mad_send_wr;
2626 struct ib_mad_send_wc mad_send_wc;
2627 unsigned long flags, delay;
2628
2629 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2630 timed_work.work);
2631 mad_send_wc.vendor_err = 0;
2632
2633 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2634 while (!list_empty(&mad_agent_priv->wait_list)) {
2635 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2636 struct ib_mad_send_wr_private,
2637 agent_list);
2638
2639 if (time_after(mad_send_wr->timeout, jiffies)) {
2640 delay = mad_send_wr->timeout - jiffies;
2641 if ((long)delay <= 0)
2642 delay = 1;
2643 queue_delayed_work(mad_agent_priv->qp_info->
2644 port_priv->wq,
2645 &mad_agent_priv->timed_work, delay);
2646 break;
2647 }
2648
2649 list_del(&mad_send_wr->agent_list);
2650 if (mad_send_wr->status == IB_WC_SUCCESS &&
2651 !retry_send(mad_send_wr))
2652 continue;
2653
2654 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2655
2656 if (mad_send_wr->status == IB_WC_SUCCESS)
2657 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2658 else
2659 mad_send_wc.status = mad_send_wr->status;
2660 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2661 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2662 &mad_send_wc);
2663
2664 deref_mad_agent(mad_agent_priv);
2665 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2666 }
2667 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2668 }
2669
2670
2671
2672
2673 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2674 struct ib_mad_private *mad)
2675 {
2676 unsigned long flags;
2677 int post, ret;
2678 struct ib_mad_private *mad_priv;
2679 struct ib_sge sg_list;
2680 struct ib_recv_wr recv_wr;
2681 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2682
2683
2684 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2685
2686
2687 recv_wr.next = NULL;
2688 recv_wr.sg_list = &sg_list;
2689 recv_wr.num_sge = 1;
2690
2691 do {
2692
2693 if (mad) {
2694 mad_priv = mad;
2695 mad = NULL;
2696 } else {
2697 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2698 GFP_ATOMIC);
2699 if (!mad_priv) {
2700 ret = -ENOMEM;
2701 break;
2702 }
2703 }
2704 sg_list.length = mad_priv_dma_size(mad_priv);
2705 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2706 &mad_priv->grh,
2707 mad_priv_dma_size(mad_priv),
2708 DMA_FROM_DEVICE);
2709 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2710 sg_list.addr))) {
2711 kfree(mad_priv);
2712 ret = -ENOMEM;
2713 break;
2714 }
2715 mad_priv->header.mapping = sg_list.addr;
2716 mad_priv->header.mad_list.mad_queue = recv_queue;
2717 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2718 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2719
2720
2721 spin_lock_irqsave(&recv_queue->lock, flags);
2722 post = (++recv_queue->count < recv_queue->max_active);
2723 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2724 spin_unlock_irqrestore(&recv_queue->lock, flags);
2725 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
2726 if (ret) {
2727 spin_lock_irqsave(&recv_queue->lock, flags);
2728 list_del(&mad_priv->header.mad_list.list);
2729 recv_queue->count--;
2730 spin_unlock_irqrestore(&recv_queue->lock, flags);
2731 ib_dma_unmap_single(qp_info->port_priv->device,
2732 mad_priv->header.mapping,
2733 mad_priv_dma_size(mad_priv),
2734 DMA_FROM_DEVICE);
2735 kfree(mad_priv);
2736 dev_err(&qp_info->port_priv->device->dev,
2737 "ib_post_recv failed: %d\n", ret);
2738 break;
2739 }
2740 } while (post);
2741
2742 return ret;
2743 }
2744
2745
2746
2747
2748 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2749 {
2750 struct ib_mad_private_header *mad_priv_hdr;
2751 struct ib_mad_private *recv;
2752 struct ib_mad_list_head *mad_list;
2753
2754 if (!qp_info->qp)
2755 return;
2756
2757 while (!list_empty(&qp_info->recv_queue.list)) {
2758
2759 mad_list = list_entry(qp_info->recv_queue.list.next,
2760 struct ib_mad_list_head, list);
2761 mad_priv_hdr = container_of(mad_list,
2762 struct ib_mad_private_header,
2763 mad_list);
2764 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2765 header);
2766
2767
2768 list_del(&mad_list->list);
2769
2770 ib_dma_unmap_single(qp_info->port_priv->device,
2771 recv->header.mapping,
2772 mad_priv_dma_size(recv),
2773 DMA_FROM_DEVICE);
2774 kfree(recv);
2775 }
2776
2777 qp_info->recv_queue.count = 0;
2778 }
2779
2780
2781
2782
2783 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2784 {
2785 int ret, i;
2786 struct ib_qp_attr *attr;
2787 struct ib_qp *qp;
2788 u16 pkey_index;
2789
2790 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2791 if (!attr)
2792 return -ENOMEM;
2793
2794 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2795 IB_DEFAULT_PKEY_FULL, &pkey_index);
2796 if (ret)
2797 pkey_index = 0;
2798
2799 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2800 qp = port_priv->qp_info[i].qp;
2801 if (!qp)
2802 continue;
2803
2804
2805
2806
2807
2808 attr->qp_state = IB_QPS_INIT;
2809 attr->pkey_index = pkey_index;
2810 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2811 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2812 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2813 if (ret) {
2814 dev_err(&port_priv->device->dev,
2815 "Couldn't change QP%d state to INIT: %d\n",
2816 i, ret);
2817 goto out;
2818 }
2819
2820 attr->qp_state = IB_QPS_RTR;
2821 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2822 if (ret) {
2823 dev_err(&port_priv->device->dev,
2824 "Couldn't change QP%d state to RTR: %d\n",
2825 i, ret);
2826 goto out;
2827 }
2828
2829 attr->qp_state = IB_QPS_RTS;
2830 attr->sq_psn = IB_MAD_SEND_Q_PSN;
2831 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2832 if (ret) {
2833 dev_err(&port_priv->device->dev,
2834 "Couldn't change QP%d state to RTS: %d\n",
2835 i, ret);
2836 goto out;
2837 }
2838 }
2839
2840 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2841 if (ret) {
2842 dev_err(&port_priv->device->dev,
2843 "Failed to request completion notification: %d\n",
2844 ret);
2845 goto out;
2846 }
2847
2848 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2849 if (!port_priv->qp_info[i].qp)
2850 continue;
2851
2852 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2853 if (ret) {
2854 dev_err(&port_priv->device->dev,
2855 "Couldn't post receive WRs\n");
2856 goto out;
2857 }
2858 }
2859 out:
2860 kfree(attr);
2861 return ret;
2862 }
2863
2864 static void qp_event_handler(struct ib_event *event, void *qp_context)
2865 {
2866 struct ib_mad_qp_info *qp_info = qp_context;
2867
2868
2869 dev_err(&qp_info->port_priv->device->dev,
2870 "Fatal error (%d) on MAD QP (%u)\n",
2871 event->event, qp_info->qp->qp_num);
2872 }
2873
2874 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2875 struct ib_mad_queue *mad_queue)
2876 {
2877 mad_queue->qp_info = qp_info;
2878 mad_queue->count = 0;
2879 spin_lock_init(&mad_queue->lock);
2880 INIT_LIST_HEAD(&mad_queue->list);
2881 }
2882
2883 static void init_mad_qp(struct ib_mad_port_private *port_priv,
2884 struct ib_mad_qp_info *qp_info)
2885 {
2886 qp_info->port_priv = port_priv;
2887 init_mad_queue(qp_info, &qp_info->send_queue);
2888 init_mad_queue(qp_info, &qp_info->recv_queue);
2889 INIT_LIST_HEAD(&qp_info->overflow_list);
2890 }
2891
2892 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2893 enum ib_qp_type qp_type)
2894 {
2895 struct ib_qp_init_attr qp_init_attr;
2896 int ret;
2897
2898 memset(&qp_init_attr, 0, sizeof qp_init_attr);
2899 qp_init_attr.send_cq = qp_info->port_priv->cq;
2900 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2901 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2902 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2903 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2904 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2905 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2906 qp_init_attr.qp_type = qp_type;
2907 qp_init_attr.port_num = qp_info->port_priv->port_num;
2908 qp_init_attr.qp_context = qp_info;
2909 qp_init_attr.event_handler = qp_event_handler;
2910 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2911 if (IS_ERR(qp_info->qp)) {
2912 dev_err(&qp_info->port_priv->device->dev,
2913 "Couldn't create ib_mad QP%d\n",
2914 get_spl_qp_index(qp_type));
2915 ret = PTR_ERR(qp_info->qp);
2916 goto error;
2917 }
2918
2919 qp_info->send_queue.max_active = mad_sendq_size;
2920 qp_info->recv_queue.max_active = mad_recvq_size;
2921 return 0;
2922
2923 error:
2924 return ret;
2925 }
2926
2927 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2928 {
2929 if (!qp_info->qp)
2930 return;
2931
2932 ib_destroy_qp(qp_info->qp);
2933 }
2934
2935
2936
2937
2938
2939 static int ib_mad_port_open(struct ib_device *device,
2940 u32 port_num)
2941 {
2942 int ret, cq_size;
2943 struct ib_mad_port_private *port_priv;
2944 unsigned long flags;
2945 char name[sizeof "ib_mad123"];
2946 int has_smi;
2947
2948 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
2949 return -EFAULT;
2950
2951 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
2952 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
2953 return -EFAULT;
2954
2955
2956 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2957 if (!port_priv)
2958 return -ENOMEM;
2959
2960 port_priv->device = device;
2961 port_priv->port_num = port_num;
2962 spin_lock_init(&port_priv->reg_lock);
2963 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2964 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2965
2966 cq_size = mad_sendq_size + mad_recvq_size;
2967 has_smi = rdma_cap_ib_smi(device, port_num);
2968 if (has_smi)
2969 cq_size *= 2;
2970
2971 port_priv->pd = ib_alloc_pd(device, 0);
2972 if (IS_ERR(port_priv->pd)) {
2973 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
2974 ret = PTR_ERR(port_priv->pd);
2975 goto error3;
2976 }
2977
2978 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
2979 IB_POLL_UNBOUND_WORKQUEUE);
2980 if (IS_ERR(port_priv->cq)) {
2981 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
2982 ret = PTR_ERR(port_priv->cq);
2983 goto error4;
2984 }
2985
2986 if (has_smi) {
2987 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2988 if (ret)
2989 goto error6;
2990 }
2991 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2992 if (ret)
2993 goto error7;
2994
2995 snprintf(name, sizeof(name), "ib_mad%u", port_num);
2996 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2997 if (!port_priv->wq) {
2998 ret = -ENOMEM;
2999 goto error8;
3000 }
3001
3002 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3003 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3004 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3005
3006 ret = ib_mad_port_start(port_priv);
3007 if (ret) {
3008 dev_err(&device->dev, "Couldn't start port\n");
3009 goto error9;
3010 }
3011
3012 return 0;
3013
3014 error9:
3015 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3016 list_del_init(&port_priv->port_list);
3017 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3018
3019 destroy_workqueue(port_priv->wq);
3020 error8:
3021 destroy_mad_qp(&port_priv->qp_info[1]);
3022 error7:
3023 destroy_mad_qp(&port_priv->qp_info[0]);
3024 error6:
3025 ib_free_cq(port_priv->cq);
3026 cleanup_recv_queue(&port_priv->qp_info[1]);
3027 cleanup_recv_queue(&port_priv->qp_info[0]);
3028 error4:
3029 ib_dealloc_pd(port_priv->pd);
3030 error3:
3031 kfree(port_priv);
3032
3033 return ret;
3034 }
3035
3036
3037
3038
3039
3040
3041 static int ib_mad_port_close(struct ib_device *device, u32 port_num)
3042 {
3043 struct ib_mad_port_private *port_priv;
3044 unsigned long flags;
3045
3046 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3047 port_priv = __ib_get_mad_port(device, port_num);
3048 if (port_priv == NULL) {
3049 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3050 dev_err(&device->dev, "Port %u not found\n", port_num);
3051 return -ENODEV;
3052 }
3053 list_del_init(&port_priv->port_list);
3054 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3055
3056 destroy_workqueue(port_priv->wq);
3057 destroy_mad_qp(&port_priv->qp_info[1]);
3058 destroy_mad_qp(&port_priv->qp_info[0]);
3059 ib_free_cq(port_priv->cq);
3060 ib_dealloc_pd(port_priv->pd);
3061 cleanup_recv_queue(&port_priv->qp_info[1]);
3062 cleanup_recv_queue(&port_priv->qp_info[0]);
3063
3064
3065 kfree(port_priv);
3066
3067 return 0;
3068 }
3069
3070 static int ib_mad_init_device(struct ib_device *device)
3071 {
3072 int start, i;
3073 unsigned int count = 0;
3074 int ret;
3075
3076 start = rdma_start_port(device);
3077
3078 for (i = start; i <= rdma_end_port(device); i++) {
3079 if (!rdma_cap_ib_mad(device, i))
3080 continue;
3081
3082 ret = ib_mad_port_open(device, i);
3083 if (ret) {
3084 dev_err(&device->dev, "Couldn't open port %d\n", i);
3085 goto error;
3086 }
3087 ret = ib_agent_port_open(device, i);
3088 if (ret) {
3089 dev_err(&device->dev,
3090 "Couldn't open port %d for agents\n", i);
3091 goto error_agent;
3092 }
3093 count++;
3094 }
3095 if (!count)
3096 return -EOPNOTSUPP;
3097
3098 return 0;
3099
3100 error_agent:
3101 if (ib_mad_port_close(device, i))
3102 dev_err(&device->dev, "Couldn't close port %d\n", i);
3103
3104 error:
3105 while (--i >= start) {
3106 if (!rdma_cap_ib_mad(device, i))
3107 continue;
3108
3109 if (ib_agent_port_close(device, i))
3110 dev_err(&device->dev,
3111 "Couldn't close port %d for agents\n", i);
3112 if (ib_mad_port_close(device, i))
3113 dev_err(&device->dev, "Couldn't close port %d\n", i);
3114 }
3115 return ret;
3116 }
3117
3118 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3119 {
3120 unsigned int i;
3121
3122 rdma_for_each_port (device, i) {
3123 if (!rdma_cap_ib_mad(device, i))
3124 continue;
3125
3126 if (ib_agent_port_close(device, i))
3127 dev_err(&device->dev,
3128 "Couldn't close port %u for agents\n", i);
3129 if (ib_mad_port_close(device, i))
3130 dev_err(&device->dev, "Couldn't close port %u\n", i);
3131 }
3132 }
3133
3134 static struct ib_client mad_client = {
3135 .name = "mad",
3136 .add = ib_mad_init_device,
3137 .remove = ib_mad_remove_device
3138 };
3139
3140 int ib_mad_init(void)
3141 {
3142 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3143 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3144
3145 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3146 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3147
3148 INIT_LIST_HEAD(&ib_mad_port_list);
3149
3150 if (ib_register_client(&mad_client)) {
3151 pr_err("Couldn't register ib_mad client\n");
3152 return -EINVAL;
3153 }
3154
3155 return 0;
3156 }
3157
3158 void ib_mad_cleanup(void)
3159 {
3160 ib_unregister_client(&mad_client);
3161 }