0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/string.h>
0036 #include <linux/slab.h>
0037
0038 #include <rdma/ib_verbs.h>
0039 #include <rdma/ib_mad.h>
0040 #include <rdma/ib_smi.h>
0041
0042 #include "mthca_dev.h"
0043 #include "mthca_cmd.h"
0044
0045 enum {
0046 MTHCA_VENDOR_CLASS1 = 0x9,
0047 MTHCA_VENDOR_CLASS2 = 0xa
0048 };
0049
0050 static int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
0051 {
0052 struct ib_port_attr *tprops = NULL;
0053 int ret;
0054
0055 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
0056 if (!tprops)
0057 return -ENOMEM;
0058
0059 ret = ib_query_port(&dev->ib_dev, port_num, tprops);
0060 if (ret) {
0061 dev_warn(&dev->ib_dev.dev,
0062 "ib_query_port failed (%d) forport %d\n", ret,
0063 port_num);
0064 goto out;
0065 }
0066
0067 dev->rate[port_num - 1] = tprops->active_speed *
0068 ib_width_enum_to_int(tprops->active_width);
0069
0070 out:
0071 kfree(tprops);
0072 return ret;
0073 }
0074
0075 static void update_sm_ah(struct mthca_dev *dev,
0076 u8 port_num, u16 lid, u8 sl)
0077 {
0078 struct ib_ah *new_ah;
0079 struct rdma_ah_attr ah_attr;
0080 unsigned long flags;
0081
0082 if (!dev->send_agent[port_num - 1][0])
0083 return;
0084
0085 memset(&ah_attr, 0, sizeof ah_attr);
0086 ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num);
0087 rdma_ah_set_dlid(&ah_attr, lid);
0088 rdma_ah_set_sl(&ah_attr, sl);
0089 rdma_ah_set_port_num(&ah_attr, port_num);
0090
0091 new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
0092 &ah_attr, 0);
0093 if (IS_ERR(new_ah))
0094 return;
0095
0096 spin_lock_irqsave(&dev->sm_lock, flags);
0097 if (dev->sm_ah[port_num - 1])
0098 rdma_destroy_ah(dev->sm_ah[port_num - 1], 0);
0099 dev->sm_ah[port_num - 1] = new_ah;
0100 spin_unlock_irqrestore(&dev->sm_lock, flags);
0101 }
0102
0103
0104
0105
0106
0107 static void smp_snoop(struct ib_device *ibdev,
0108 u8 port_num,
0109 const struct ib_mad *mad,
0110 u16 prev_lid)
0111 {
0112 struct ib_event event;
0113
0114 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
0115 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
0116 mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
0117 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
0118 struct ib_port_info *pinfo =
0119 (struct ib_port_info *) ((struct ib_smp *) mad)->data;
0120 u16 lid = be16_to_cpu(pinfo->lid);
0121
0122 mthca_update_rate(to_mdev(ibdev), port_num);
0123 update_sm_ah(to_mdev(ibdev), port_num,
0124 be16_to_cpu(pinfo->sm_lid),
0125 pinfo->neighbormtu_mastersmsl & 0xf);
0126
0127 event.device = ibdev;
0128 event.element.port_num = port_num;
0129
0130 if (pinfo->clientrereg_resv_subnetto & 0x80) {
0131 event.event = IB_EVENT_CLIENT_REREGISTER;
0132 ib_dispatch_event(&event);
0133 }
0134
0135 if (prev_lid != lid) {
0136 event.event = IB_EVENT_LID_CHANGE;
0137 ib_dispatch_event(&event);
0138 }
0139 }
0140
0141 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
0142 event.device = ibdev;
0143 event.event = IB_EVENT_PKEY_CHANGE;
0144 event.element.port_num = port_num;
0145 ib_dispatch_event(&event);
0146 }
0147 }
0148 }
0149
0150 static void node_desc_override(struct ib_device *dev,
0151 struct ib_mad *mad)
0152 {
0153 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
0154 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
0155 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
0156 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
0157 mutex_lock(&to_mdev(dev)->cap_mask_mutex);
0158 memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
0159 IB_DEVICE_NODE_DESC_MAX);
0160 mutex_unlock(&to_mdev(dev)->cap_mask_mutex);
0161 }
0162 }
0163
0164 static void forward_trap(struct mthca_dev *dev,
0165 u32 port_num,
0166 const struct ib_mad *mad)
0167 {
0168 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
0169 struct ib_mad_send_buf *send_buf;
0170 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
0171 int ret;
0172 unsigned long flags;
0173
0174 if (agent) {
0175 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
0176 IB_MGMT_MAD_DATA, GFP_ATOMIC,
0177 IB_MGMT_BASE_VERSION);
0178 if (IS_ERR(send_buf))
0179 return;
0180
0181
0182
0183
0184
0185
0186 spin_lock_irqsave(&dev->sm_lock, flags);
0187 memcpy(send_buf->mad, mad, sizeof *mad);
0188 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
0189 ret = ib_post_send_mad(send_buf, NULL);
0190 else
0191 ret = -EINVAL;
0192 spin_unlock_irqrestore(&dev->sm_lock, flags);
0193
0194 if (ret)
0195 ib_free_send_mad(send_buf);
0196 }
0197 }
0198
0199 int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
0200 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
0201 const struct ib_mad *in, struct ib_mad *out,
0202 size_t *out_mad_size, u16 *out_mad_pkey_index)
0203 {
0204 int err;
0205 u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
0206 u16 prev_lid = 0;
0207 struct ib_port_attr pattr;
0208
0209
0210 if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP && !slid) {
0211 forward_trap(to_mdev(ibdev), port_num, in);
0212 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
0213 }
0214
0215
0216
0217
0218
0219
0220
0221 if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
0222 in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
0223 if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
0224 in->mad_hdr.method != IB_MGMT_METHOD_SET &&
0225 in->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
0226 return IB_MAD_RESULT_SUCCESS;
0227
0228
0229
0230
0231
0232 if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
0233 ((in->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
0234 IB_SMP_ATTR_VENDOR_MASK))
0235 return IB_MAD_RESULT_SUCCESS;
0236 } else if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
0237 in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
0238 in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
0239 if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
0240 in->mad_hdr.method != IB_MGMT_METHOD_SET)
0241 return IB_MAD_RESULT_SUCCESS;
0242 } else
0243 return IB_MAD_RESULT_SUCCESS;
0244 if ((in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
0245 in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
0246 in->mad_hdr.method == IB_MGMT_METHOD_SET &&
0247 in->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
0248 !ib_query_port(ibdev, port_num, &pattr))
0249 prev_lid = ib_lid_cpu16(pattr.lid);
0250
0251 err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
0252 mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
0253 in_grh, in, out);
0254 if (err == -EBADMSG)
0255 return IB_MAD_RESULT_SUCCESS;
0256 else if (err) {
0257 mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err);
0258 return IB_MAD_RESULT_FAILURE;
0259 }
0260
0261 if (!out->mad_hdr.status) {
0262 smp_snoop(ibdev, port_num, in, prev_lid);
0263 node_desc_override(ibdev, out);
0264 }
0265
0266
0267 if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
0268 out->mad_hdr.status |= cpu_to_be16(1 << 15);
0269
0270 if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
0271
0272 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
0273
0274 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
0275 }
0276
0277 static void send_handler(struct ib_mad_agent *agent,
0278 struct ib_mad_send_wc *mad_send_wc)
0279 {
0280 ib_free_send_mad(mad_send_wc->send_buf);
0281 }
0282
0283 int mthca_create_agents(struct mthca_dev *dev)
0284 {
0285 struct ib_mad_agent *agent;
0286 int p, q;
0287 int ret;
0288
0289 spin_lock_init(&dev->sm_lock);
0290
0291 for (p = 0; p < dev->limits.num_ports; ++p)
0292 for (q = 0; q <= 1; ++q) {
0293 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
0294 q ? IB_QPT_GSI : IB_QPT_SMI,
0295 NULL, 0, send_handler,
0296 NULL, NULL, 0);
0297 if (IS_ERR(agent)) {
0298 ret = PTR_ERR(agent);
0299 goto err;
0300 }
0301 dev->send_agent[p][q] = agent;
0302 }
0303
0304
0305 for (p = 1; p <= dev->limits.num_ports; ++p) {
0306 ret = mthca_update_rate(dev, p);
0307 if (ret) {
0308 mthca_err(dev, "Failed to obtain port %d rate."
0309 " aborting.\n", p);
0310 goto err;
0311 }
0312 }
0313
0314 return 0;
0315
0316 err:
0317 for (p = 0; p < dev->limits.num_ports; ++p)
0318 for (q = 0; q <= 1; ++q)
0319 if (dev->send_agent[p][q])
0320 ib_unregister_mad_agent(dev->send_agent[p][q]);
0321
0322 return ret;
0323 }
0324
0325 void mthca_free_agents(struct mthca_dev *dev)
0326 {
0327 struct ib_mad_agent *agent;
0328 int p, q;
0329
0330 for (p = 0; p < dev->limits.num_ports; ++p) {
0331 for (q = 0; q <= 1; ++q) {
0332 agent = dev->send_agent[p][q];
0333 dev->send_agent[p][q] = NULL;
0334 ib_unregister_mad_agent(agent);
0335 }
0336
0337 if (dev->sm_ah[p])
0338 rdma_destroy_ah(dev->sm_ah[p],
0339 RDMA_DESTROY_AH_SLEEPABLE);
0340 }
0341 }