0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #define pr_fmt(fmt) "user_mad: " fmt
0037
0038 #include <linux/module.h>
0039 #include <linux/init.h>
0040 #include <linux/device.h>
0041 #include <linux/err.h>
0042 #include <linux/fs.h>
0043 #include <linux/cdev.h>
0044 #include <linux/dma-mapping.h>
0045 #include <linux/poll.h>
0046 #include <linux/mutex.h>
0047 #include <linux/kref.h>
0048 #include <linux/compat.h>
0049 #include <linux/sched.h>
0050 #include <linux/semaphore.h>
0051 #include <linux/slab.h>
0052 #include <linux/nospec.h>
0053
0054 #include <linux/uaccess.h>
0055
0056 #include <rdma/ib_mad.h>
0057 #include <rdma/ib_user_mad.h>
0058 #include <rdma/rdma_netlink.h>
0059
0060 #include "core_priv.h"
0061
0062 MODULE_AUTHOR("Roland Dreier");
0063 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
0064 MODULE_LICENSE("Dual BSD/GPL");
0065
0066 enum {
0067 IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS,
0068 IB_UMAD_MAX_AGENTS = 32,
0069
0070 IB_UMAD_MAJOR = 231,
0071 IB_UMAD_MINOR_BASE = 0,
0072 IB_UMAD_NUM_FIXED_MINOR = 64,
0073 IB_UMAD_NUM_DYNAMIC_MINOR = IB_UMAD_MAX_PORTS - IB_UMAD_NUM_FIXED_MINOR,
0074 IB_ISSM_MINOR_BASE = IB_UMAD_NUM_FIXED_MINOR,
0075 };
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091 struct ib_umad_port {
0092 struct cdev cdev;
0093 struct device dev;
0094 struct cdev sm_cdev;
0095 struct device sm_dev;
0096 struct semaphore sm_sem;
0097
0098 struct mutex file_mutex;
0099 struct list_head file_list;
0100
0101 struct ib_device *ib_dev;
0102 struct ib_umad_device *umad_dev;
0103 int dev_num;
0104 u32 port_num;
0105 };
0106
0107 struct ib_umad_device {
0108 struct kref kref;
0109 struct ib_umad_port ports[];
0110 };
0111
0112 struct ib_umad_file {
0113 struct mutex mutex;
0114 struct ib_umad_port *port;
0115 struct list_head recv_list;
0116 struct list_head send_list;
0117 struct list_head port_list;
0118 spinlock_t send_lock;
0119 wait_queue_head_t recv_wait;
0120 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
0121 int agents_dead;
0122 u8 use_pkey_index;
0123 u8 already_used;
0124 };
0125
0126 struct ib_umad_packet {
0127 struct ib_mad_send_buf *msg;
0128 struct ib_mad_recv_wc *recv_wc;
0129 struct list_head list;
0130 int length;
0131 struct ib_user_mad mad;
0132 };
0133
0134 #define CREATE_TRACE_POINTS
0135 #include <trace/events/ib_umad.h>
0136
0137 static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
0138 static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
0139 IB_UMAD_NUM_FIXED_MINOR;
0140 static dev_t dynamic_umad_dev;
0141 static dev_t dynamic_issm_dev;
0142
0143 static DEFINE_IDA(umad_ida);
0144
0145 static int ib_umad_add_one(struct ib_device *device);
0146 static void ib_umad_remove_one(struct ib_device *device, void *client_data);
0147
0148 static void ib_umad_dev_free(struct kref *kref)
0149 {
0150 struct ib_umad_device *dev =
0151 container_of(kref, struct ib_umad_device, kref);
0152
0153 kfree(dev);
0154 }
0155
0156 static void ib_umad_dev_get(struct ib_umad_device *dev)
0157 {
0158 kref_get(&dev->kref);
0159 }
0160
0161 static void ib_umad_dev_put(struct ib_umad_device *dev)
0162 {
0163 kref_put(&dev->kref, ib_umad_dev_free);
0164 }
0165
0166 static int hdr_size(struct ib_umad_file *file)
0167 {
0168 return file->use_pkey_index ? sizeof(struct ib_user_mad_hdr) :
0169 sizeof(struct ib_user_mad_hdr_old);
0170 }
0171
0172
0173 static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
0174 {
0175 return file->agents_dead ? NULL : file->agent[id];
0176 }
0177
0178 static int queue_packet(struct ib_umad_file *file,
0179 struct ib_mad_agent *agent,
0180 struct ib_umad_packet *packet)
0181 {
0182 int ret = 1;
0183
0184 mutex_lock(&file->mutex);
0185
0186 for (packet->mad.hdr.id = 0;
0187 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
0188 packet->mad.hdr.id++)
0189 if (agent == __get_agent(file, packet->mad.hdr.id)) {
0190 list_add_tail(&packet->list, &file->recv_list);
0191 wake_up_interruptible(&file->recv_wait);
0192 ret = 0;
0193 break;
0194 }
0195
0196 mutex_unlock(&file->mutex);
0197
0198 return ret;
0199 }
0200
0201 static void dequeue_send(struct ib_umad_file *file,
0202 struct ib_umad_packet *packet)
0203 {
0204 spin_lock_irq(&file->send_lock);
0205 list_del(&packet->list);
0206 spin_unlock_irq(&file->send_lock);
0207 }
0208
0209 static void send_handler(struct ib_mad_agent *agent,
0210 struct ib_mad_send_wc *send_wc)
0211 {
0212 struct ib_umad_file *file = agent->context;
0213 struct ib_umad_packet *packet = send_wc->send_buf->context[0];
0214
0215 dequeue_send(file, packet);
0216 rdma_destroy_ah(packet->msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
0217 ib_free_send_mad(packet->msg);
0218
0219 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
0220 packet->length = IB_MGMT_MAD_HDR;
0221 packet->mad.hdr.status = ETIMEDOUT;
0222 if (!queue_packet(file, agent, packet))
0223 return;
0224 }
0225 kfree(packet);
0226 }
0227
0228 static void recv_handler(struct ib_mad_agent *agent,
0229 struct ib_mad_send_buf *send_buf,
0230 struct ib_mad_recv_wc *mad_recv_wc)
0231 {
0232 struct ib_umad_file *file = agent->context;
0233 struct ib_umad_packet *packet;
0234
0235 if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
0236 goto err1;
0237
0238 packet = kzalloc(sizeof *packet, GFP_KERNEL);
0239 if (!packet)
0240 goto err1;
0241
0242 packet->length = mad_recv_wc->mad_len;
0243 packet->recv_wc = mad_recv_wc;
0244
0245 packet->mad.hdr.status = 0;
0246 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
0247 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
0248
0249
0250
0251
0252 if (rdma_cap_opa_mad(agent->device, agent->port_num))
0253 packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
0254 mad_recv_wc->wc->slid);
0255 else
0256 packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
0257 packet->mad.hdr.sl = mad_recv_wc->wc->sl;
0258 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
0259 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
0260 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
0261 if (packet->mad.hdr.grh_present) {
0262 struct rdma_ah_attr ah_attr;
0263 const struct ib_global_route *grh;
0264 int ret;
0265
0266 ret = ib_init_ah_attr_from_wc(agent->device, agent->port_num,
0267 mad_recv_wc->wc,
0268 mad_recv_wc->recv_buf.grh,
0269 &ah_attr);
0270 if (ret)
0271 goto err2;
0272
0273 grh = rdma_ah_read_grh(&ah_attr);
0274 packet->mad.hdr.gid_index = grh->sgid_index;
0275 packet->mad.hdr.hop_limit = grh->hop_limit;
0276 packet->mad.hdr.traffic_class = grh->traffic_class;
0277 memcpy(packet->mad.hdr.gid, &grh->dgid, 16);
0278 packet->mad.hdr.flow_label = cpu_to_be32(grh->flow_label);
0279 rdma_destroy_ah_attr(&ah_attr);
0280 }
0281
0282 if (queue_packet(file, agent, packet))
0283 goto err2;
0284 return;
0285
0286 err2:
0287 kfree(packet);
0288 err1:
0289 ib_free_recv_mad(mad_recv_wc);
0290 }
0291
0292 static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
0293 struct ib_umad_packet *packet, size_t count)
0294 {
0295 struct ib_mad_recv_buf *recv_buf;
0296 int left, seg_payload, offset, max_seg_payload;
0297 size_t seg_size;
0298
0299 recv_buf = &packet->recv_wc->recv_buf;
0300 seg_size = packet->recv_wc->mad_seg_size;
0301
0302
0303 if ((packet->length <= seg_size &&
0304 count < hdr_size(file) + packet->length) ||
0305 (packet->length > seg_size &&
0306 count < hdr_size(file) + seg_size))
0307 return -EINVAL;
0308
0309 if (copy_to_user(buf, &packet->mad, hdr_size(file)))
0310 return -EFAULT;
0311
0312 buf += hdr_size(file);
0313 seg_payload = min_t(int, packet->length, seg_size);
0314 if (copy_to_user(buf, recv_buf->mad, seg_payload))
0315 return -EFAULT;
0316
0317 if (seg_payload < packet->length) {
0318
0319
0320
0321
0322 if (count < hdr_size(file) + packet->length) {
0323
0324
0325
0326
0327 return -ENOSPC;
0328 }
0329 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
0330 max_seg_payload = seg_size - offset;
0331
0332 for (left = packet->length - seg_payload, buf += seg_payload;
0333 left; left -= seg_payload, buf += seg_payload) {
0334 recv_buf = container_of(recv_buf->list.next,
0335 struct ib_mad_recv_buf, list);
0336 seg_payload = min(left, max_seg_payload);
0337 if (copy_to_user(buf, ((void *) recv_buf->mad) + offset,
0338 seg_payload))
0339 return -EFAULT;
0340 }
0341 }
0342
0343 trace_ib_umad_read_recv(file, &packet->mad.hdr, &recv_buf->mad->mad_hdr);
0344
0345 return hdr_size(file) + packet->length;
0346 }
0347
0348 static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf,
0349 struct ib_umad_packet *packet, size_t count)
0350 {
0351 ssize_t size = hdr_size(file) + packet->length;
0352
0353 if (count < size)
0354 return -EINVAL;
0355
0356 if (copy_to_user(buf, &packet->mad, hdr_size(file)))
0357 return -EFAULT;
0358
0359 buf += hdr_size(file);
0360
0361 if (copy_to_user(buf, packet->mad.data, packet->length))
0362 return -EFAULT;
0363
0364 trace_ib_umad_read_send(file, &packet->mad.hdr,
0365 (struct ib_mad_hdr *)&packet->mad.data);
0366
0367 return size;
0368 }
0369
0370 static ssize_t ib_umad_read(struct file *filp, char __user *buf,
0371 size_t count, loff_t *pos)
0372 {
0373 struct ib_umad_file *file = filp->private_data;
0374 struct ib_umad_packet *packet;
0375 ssize_t ret;
0376
0377 if (count < hdr_size(file))
0378 return -EINVAL;
0379
0380 mutex_lock(&file->mutex);
0381
0382 if (file->agents_dead) {
0383 mutex_unlock(&file->mutex);
0384 return -EIO;
0385 }
0386
0387 while (list_empty(&file->recv_list)) {
0388 mutex_unlock(&file->mutex);
0389
0390 if (filp->f_flags & O_NONBLOCK)
0391 return -EAGAIN;
0392
0393 if (wait_event_interruptible(file->recv_wait,
0394 !list_empty(&file->recv_list)))
0395 return -ERESTARTSYS;
0396
0397 mutex_lock(&file->mutex);
0398 }
0399
0400 if (file->agents_dead) {
0401 mutex_unlock(&file->mutex);
0402 return -EIO;
0403 }
0404
0405 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
0406 list_del(&packet->list);
0407
0408 mutex_unlock(&file->mutex);
0409
0410 if (packet->recv_wc)
0411 ret = copy_recv_mad(file, buf, packet, count);
0412 else
0413 ret = copy_send_mad(file, buf, packet, count);
0414
0415 if (ret < 0) {
0416
0417 mutex_lock(&file->mutex);
0418 list_add(&packet->list, &file->recv_list);
0419 mutex_unlock(&file->mutex);
0420 } else {
0421 if (packet->recv_wc)
0422 ib_free_recv_mad(packet->recv_wc);
0423 kfree(packet);
0424 }
0425 return ret;
0426 }
0427
0428 static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
0429 {
0430 int left, seg;
0431
0432
0433 if ((msg->hdr_len > IB_MGMT_RMPP_HDR) &&
0434 copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR,
0435 msg->hdr_len - IB_MGMT_RMPP_HDR))
0436 return -EFAULT;
0437
0438
0439 for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0;
0440 seg++, left -= msg->seg_size, buf += msg->seg_size) {
0441 if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf,
0442 min(left, msg->seg_size)))
0443 return -EFAULT;
0444 }
0445 return 0;
0446 }
0447
0448 static int same_destination(struct ib_user_mad_hdr *hdr1,
0449 struct ib_user_mad_hdr *hdr2)
0450 {
0451 if (!hdr1->grh_present && !hdr2->grh_present)
0452 return (hdr1->lid == hdr2->lid);
0453
0454 if (hdr1->grh_present && hdr2->grh_present)
0455 return !memcmp(hdr1->gid, hdr2->gid, 16);
0456
0457 return 0;
0458 }
0459
0460 static int is_duplicate(struct ib_umad_file *file,
0461 struct ib_umad_packet *packet)
0462 {
0463 struct ib_umad_packet *sent_packet;
0464 struct ib_mad_hdr *sent_hdr, *hdr;
0465
0466 hdr = (struct ib_mad_hdr *) packet->mad.data;
0467 list_for_each_entry(sent_packet, &file->send_list, list) {
0468 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
0469
0470 if ((hdr->tid != sent_hdr->tid) ||
0471 (hdr->mgmt_class != sent_hdr->mgmt_class))
0472 continue;
0473
0474
0475
0476
0477
0478
0479 if (!ib_response_mad(hdr)) {
0480 if (!ib_response_mad(sent_hdr))
0481 return 1;
0482 continue;
0483 } else if (!ib_response_mad(sent_hdr))
0484 continue;
0485
0486 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
0487 return 1;
0488 }
0489
0490 return 0;
0491 }
0492
0493 static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
0494 size_t count, loff_t *pos)
0495 {
0496 struct ib_umad_file *file = filp->private_data;
0497 struct ib_umad_packet *packet;
0498 struct ib_mad_agent *agent;
0499 struct rdma_ah_attr ah_attr;
0500 struct ib_ah *ah;
0501 struct ib_rmpp_mad *rmpp_mad;
0502 __be64 *tid;
0503 int ret, data_len, hdr_len, copy_offset, rmpp_active;
0504 u8 base_version;
0505
0506 if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
0507 return -EINVAL;
0508
0509 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
0510 if (!packet)
0511 return -ENOMEM;
0512
0513 if (copy_from_user(&packet->mad, buf, hdr_size(file))) {
0514 ret = -EFAULT;
0515 goto err;
0516 }
0517
0518 if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
0519 ret = -EINVAL;
0520 goto err;
0521 }
0522
0523 buf += hdr_size(file);
0524
0525 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) {
0526 ret = -EFAULT;
0527 goto err;
0528 }
0529
0530 mutex_lock(&file->mutex);
0531
0532 trace_ib_umad_write(file, &packet->mad.hdr,
0533 (struct ib_mad_hdr *)&packet->mad.data);
0534
0535 agent = __get_agent(file, packet->mad.hdr.id);
0536 if (!agent) {
0537 ret = -EIO;
0538 goto err_up;
0539 }
0540
0541 memset(&ah_attr, 0, sizeof ah_attr);
0542 ah_attr.type = rdma_ah_find_type(agent->device,
0543 file->port->port_num);
0544 rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid));
0545 rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl);
0546 rdma_ah_set_path_bits(&ah_attr, packet->mad.hdr.path_bits);
0547 rdma_ah_set_port_num(&ah_attr, file->port->port_num);
0548 if (packet->mad.hdr.grh_present) {
0549 rdma_ah_set_grh(&ah_attr, NULL,
0550 be32_to_cpu(packet->mad.hdr.flow_label),
0551 packet->mad.hdr.gid_index,
0552 packet->mad.hdr.hop_limit,
0553 packet->mad.hdr.traffic_class);
0554 rdma_ah_set_dgid_raw(&ah_attr, packet->mad.hdr.gid);
0555 }
0556
0557 ah = rdma_create_user_ah(agent->qp->pd, &ah_attr, NULL);
0558 if (IS_ERR(ah)) {
0559 ret = PTR_ERR(ah);
0560 goto err_up;
0561 }
0562
0563 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
0564 hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
0565
0566 if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
0567 && ib_mad_kernel_rmpp_agent(agent)) {
0568 copy_offset = IB_MGMT_RMPP_HDR;
0569 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
0570 IB_MGMT_RMPP_FLAG_ACTIVE;
0571 } else {
0572 copy_offset = IB_MGMT_MAD_HDR;
0573 rmpp_active = 0;
0574 }
0575
0576 base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version;
0577 data_len = count - hdr_size(file) - hdr_len;
0578 packet->msg = ib_create_send_mad(agent,
0579 be32_to_cpu(packet->mad.hdr.qpn),
0580 packet->mad.hdr.pkey_index, rmpp_active,
0581 hdr_len, data_len, GFP_KERNEL,
0582 base_version);
0583 if (IS_ERR(packet->msg)) {
0584 ret = PTR_ERR(packet->msg);
0585 goto err_ah;
0586 }
0587
0588 packet->msg->ah = ah;
0589 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
0590 packet->msg->retries = packet->mad.hdr.retries;
0591 packet->msg->context[0] = packet;
0592
0593
0594 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
0595
0596 if (!rmpp_active) {
0597 if (copy_from_user(packet->msg->mad + copy_offset,
0598 buf + copy_offset,
0599 hdr_len + data_len - copy_offset)) {
0600 ret = -EFAULT;
0601 goto err_msg;
0602 }
0603 } else {
0604 ret = copy_rmpp_mad(packet->msg, buf);
0605 if (ret)
0606 goto err_msg;
0607 }
0608
0609
0610
0611
0612
0613
0614 if (!ib_response_mad(packet->msg->mad)) {
0615 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
0616 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
0617 (be64_to_cpup(tid) & 0xffffffff));
0618 rmpp_mad->mad_hdr.tid = *tid;
0619 }
0620
0621 if (!ib_mad_kernel_rmpp_agent(agent)
0622 && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
0623 && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
0624 spin_lock_irq(&file->send_lock);
0625 list_add_tail(&packet->list, &file->send_list);
0626 spin_unlock_irq(&file->send_lock);
0627 } else {
0628 spin_lock_irq(&file->send_lock);
0629 ret = is_duplicate(file, packet);
0630 if (!ret)
0631 list_add_tail(&packet->list, &file->send_list);
0632 spin_unlock_irq(&file->send_lock);
0633 if (ret) {
0634 ret = -EINVAL;
0635 goto err_msg;
0636 }
0637 }
0638
0639 ret = ib_post_send_mad(packet->msg, NULL);
0640 if (ret)
0641 goto err_send;
0642
0643 mutex_unlock(&file->mutex);
0644 return count;
0645
0646 err_send:
0647 dequeue_send(file, packet);
0648 err_msg:
0649 ib_free_send_mad(packet->msg);
0650 err_ah:
0651 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
0652 err_up:
0653 mutex_unlock(&file->mutex);
0654 err:
0655 kfree(packet);
0656 return ret;
0657 }
0658
0659 static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
0660 {
0661 struct ib_umad_file *file = filp->private_data;
0662
0663
0664 __poll_t mask = EPOLLOUT | EPOLLWRNORM;
0665
0666 mutex_lock(&file->mutex);
0667 poll_wait(filp, &file->recv_wait, wait);
0668
0669 if (!list_empty(&file->recv_list))
0670 mask |= EPOLLIN | EPOLLRDNORM;
0671 if (file->agents_dead)
0672 mask = EPOLLERR;
0673 mutex_unlock(&file->mutex);
0674
0675 return mask;
0676 }
0677
0678 static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
0679 int compat_method_mask)
0680 {
0681 struct ib_user_mad_reg_req ureq;
0682 struct ib_mad_reg_req req;
0683 struct ib_mad_agent *agent = NULL;
0684 int agent_id;
0685 int ret;
0686
0687 mutex_lock(&file->port->file_mutex);
0688 mutex_lock(&file->mutex);
0689
0690 if (!file->port->ib_dev) {
0691 dev_notice(&file->port->dev, "%s: invalid device\n", __func__);
0692 ret = -EPIPE;
0693 goto out;
0694 }
0695
0696 if (copy_from_user(&ureq, arg, sizeof ureq)) {
0697 ret = -EFAULT;
0698 goto out;
0699 }
0700
0701 if (ureq.qpn != 0 && ureq.qpn != 1) {
0702 dev_notice(&file->port->dev,
0703 "%s: invalid QPN %u specified\n", __func__,
0704 ureq.qpn);
0705 ret = -EINVAL;
0706 goto out;
0707 }
0708
0709 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
0710 if (!__get_agent(file, agent_id))
0711 goto found;
0712
0713 dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__,
0714 IB_UMAD_MAX_AGENTS);
0715
0716 ret = -ENOMEM;
0717 goto out;
0718
0719 found:
0720 if (ureq.mgmt_class) {
0721 memset(&req, 0, sizeof(req));
0722 req.mgmt_class = ureq.mgmt_class;
0723 req.mgmt_class_version = ureq.mgmt_class_version;
0724 memcpy(req.oui, ureq.oui, sizeof req.oui);
0725
0726 if (compat_method_mask) {
0727 u32 *umm = (u32 *) ureq.method_mask;
0728 int i;
0729
0730 for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i)
0731 req.method_mask[i] =
0732 umm[i * 2] | ((u64) umm[i * 2 + 1] << 32);
0733 } else
0734 memcpy(req.method_mask, ureq.method_mask,
0735 sizeof req.method_mask);
0736 }
0737
0738 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
0739 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
0740 ureq.mgmt_class ? &req : NULL,
0741 ureq.rmpp_version,
0742 send_handler, recv_handler, file, 0);
0743 if (IS_ERR(agent)) {
0744 ret = PTR_ERR(agent);
0745 agent = NULL;
0746 goto out;
0747 }
0748
0749 if (put_user(agent_id,
0750 (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
0751 ret = -EFAULT;
0752 goto out;
0753 }
0754
0755 if (!file->already_used) {
0756 file->already_used = 1;
0757 if (!file->use_pkey_index) {
0758 dev_warn(&file->port->dev,
0759 "process %s did not enable P_Key index support.\n",
0760 current->comm);
0761 dev_warn(&file->port->dev,
0762 " Documentation/infiniband/user_mad.rst has info on the new ABI.\n");
0763 }
0764 }
0765
0766 file->agent[agent_id] = agent;
0767 ret = 0;
0768
0769 out:
0770 mutex_unlock(&file->mutex);
0771
0772 if (ret && agent)
0773 ib_unregister_mad_agent(agent);
0774
0775 mutex_unlock(&file->port->file_mutex);
0776
0777 return ret;
0778 }
0779
0780 static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
0781 {
0782 struct ib_user_mad_reg_req2 ureq;
0783 struct ib_mad_reg_req req;
0784 struct ib_mad_agent *agent = NULL;
0785 int agent_id;
0786 int ret;
0787
0788 mutex_lock(&file->port->file_mutex);
0789 mutex_lock(&file->mutex);
0790
0791 if (!file->port->ib_dev) {
0792 dev_notice(&file->port->dev, "%s: invalid device\n", __func__);
0793 ret = -EPIPE;
0794 goto out;
0795 }
0796
0797 if (copy_from_user(&ureq, arg, sizeof(ureq))) {
0798 ret = -EFAULT;
0799 goto out;
0800 }
0801
0802 if (ureq.qpn != 0 && ureq.qpn != 1) {
0803 dev_notice(&file->port->dev, "%s: invalid QPN %u specified\n",
0804 __func__, ureq.qpn);
0805 ret = -EINVAL;
0806 goto out;
0807 }
0808
0809 if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) {
0810 dev_notice(&file->port->dev,
0811 "%s failed: invalid registration flags specified 0x%x; supported 0x%x\n",
0812 __func__, ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
0813 ret = -EINVAL;
0814
0815 if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP,
0816 (u32 __user *) (arg + offsetof(struct
0817 ib_user_mad_reg_req2, flags))))
0818 ret = -EFAULT;
0819
0820 goto out;
0821 }
0822
0823 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
0824 if (!__get_agent(file, agent_id))
0825 goto found;
0826
0827 dev_notice(&file->port->dev, "%s: Max Agents (%u) reached\n", __func__,
0828 IB_UMAD_MAX_AGENTS);
0829 ret = -ENOMEM;
0830 goto out;
0831
0832 found:
0833 if (ureq.mgmt_class) {
0834 memset(&req, 0, sizeof(req));
0835 req.mgmt_class = ureq.mgmt_class;
0836 req.mgmt_class_version = ureq.mgmt_class_version;
0837 if (ureq.oui & 0xff000000) {
0838 dev_notice(&file->port->dev,
0839 "%s failed: oui invalid 0x%08x\n", __func__,
0840 ureq.oui);
0841 ret = -EINVAL;
0842 goto out;
0843 }
0844 req.oui[2] = ureq.oui & 0x0000ff;
0845 req.oui[1] = (ureq.oui & 0x00ff00) >> 8;
0846 req.oui[0] = (ureq.oui & 0xff0000) >> 16;
0847 memcpy(req.method_mask, ureq.method_mask,
0848 sizeof(req.method_mask));
0849 }
0850
0851 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
0852 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
0853 ureq.mgmt_class ? &req : NULL,
0854 ureq.rmpp_version,
0855 send_handler, recv_handler, file,
0856 ureq.flags);
0857 if (IS_ERR(agent)) {
0858 ret = PTR_ERR(agent);
0859 agent = NULL;
0860 goto out;
0861 }
0862
0863 if (put_user(agent_id,
0864 (u32 __user *)(arg +
0865 offsetof(struct ib_user_mad_reg_req2, id)))) {
0866 ret = -EFAULT;
0867 goto out;
0868 }
0869
0870 if (!file->already_used) {
0871 file->already_used = 1;
0872 file->use_pkey_index = 1;
0873 }
0874
0875 file->agent[agent_id] = agent;
0876 ret = 0;
0877
0878 out:
0879 mutex_unlock(&file->mutex);
0880
0881 if (ret && agent)
0882 ib_unregister_mad_agent(agent);
0883
0884 mutex_unlock(&file->port->file_mutex);
0885
0886 return ret;
0887 }
0888
0889
0890 static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
0891 {
0892 struct ib_mad_agent *agent = NULL;
0893 u32 id;
0894 int ret = 0;
0895
0896 if (get_user(id, arg))
0897 return -EFAULT;
0898 if (id >= IB_UMAD_MAX_AGENTS)
0899 return -EINVAL;
0900
0901 mutex_lock(&file->port->file_mutex);
0902 mutex_lock(&file->mutex);
0903
0904 id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
0905 if (!__get_agent(file, id)) {
0906 ret = -EINVAL;
0907 goto out;
0908 }
0909
0910 agent = file->agent[id];
0911 file->agent[id] = NULL;
0912
0913 out:
0914 mutex_unlock(&file->mutex);
0915
0916 if (agent)
0917 ib_unregister_mad_agent(agent);
0918
0919 mutex_unlock(&file->port->file_mutex);
0920
0921 return ret;
0922 }
0923
0924 static long ib_umad_enable_pkey(struct ib_umad_file *file)
0925 {
0926 int ret = 0;
0927
0928 mutex_lock(&file->mutex);
0929 if (file->already_used)
0930 ret = -EINVAL;
0931 else
0932 file->use_pkey_index = 1;
0933 mutex_unlock(&file->mutex);
0934
0935 return ret;
0936 }
0937
0938 static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
0939 unsigned long arg)
0940 {
0941 switch (cmd) {
0942 case IB_USER_MAD_REGISTER_AGENT:
0943 return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0);
0944 case IB_USER_MAD_UNREGISTER_AGENT:
0945 return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg);
0946 case IB_USER_MAD_ENABLE_PKEY:
0947 return ib_umad_enable_pkey(filp->private_data);
0948 case IB_USER_MAD_REGISTER_AGENT2:
0949 return ib_umad_reg_agent2(filp->private_data, (void __user *) arg);
0950 default:
0951 return -ENOIOCTLCMD;
0952 }
0953 }
0954
0955 #ifdef CONFIG_COMPAT
0956 static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
0957 unsigned long arg)
0958 {
0959 switch (cmd) {
0960 case IB_USER_MAD_REGISTER_AGENT:
0961 return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1);
0962 case IB_USER_MAD_UNREGISTER_AGENT:
0963 return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg));
0964 case IB_USER_MAD_ENABLE_PKEY:
0965 return ib_umad_enable_pkey(filp->private_data);
0966 case IB_USER_MAD_REGISTER_AGENT2:
0967 return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg));
0968 default:
0969 return -ENOIOCTLCMD;
0970 }
0971 }
0972 #endif
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983 static int ib_umad_open(struct inode *inode, struct file *filp)
0984 {
0985 struct ib_umad_port *port;
0986 struct ib_umad_file *file;
0987 int ret = 0;
0988
0989 port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
0990
0991 mutex_lock(&port->file_mutex);
0992
0993 if (!port->ib_dev) {
0994 ret = -ENXIO;
0995 goto out;
0996 }
0997
0998 if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) {
0999 ret = -EPERM;
1000 goto out;
1001 }
1002
1003 file = kzalloc(sizeof(*file), GFP_KERNEL);
1004 if (!file) {
1005 ret = -ENOMEM;
1006 goto out;
1007 }
1008
1009 mutex_init(&file->mutex);
1010 spin_lock_init(&file->send_lock);
1011 INIT_LIST_HEAD(&file->recv_list);
1012 INIT_LIST_HEAD(&file->send_list);
1013 init_waitqueue_head(&file->recv_wait);
1014
1015 file->port = port;
1016 filp->private_data = file;
1017
1018 list_add_tail(&file->port_list, &port->file_list);
1019
1020 stream_open(inode, filp);
1021 out:
1022 mutex_unlock(&port->file_mutex);
1023 return ret;
1024 }
1025
1026 static int ib_umad_close(struct inode *inode, struct file *filp)
1027 {
1028 struct ib_umad_file *file = filp->private_data;
1029 struct ib_umad_packet *packet, *tmp;
1030 int already_dead;
1031 int i;
1032
1033 mutex_lock(&file->port->file_mutex);
1034 mutex_lock(&file->mutex);
1035
1036 already_dead = file->agents_dead;
1037 file->agents_dead = 1;
1038
1039 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) {
1040 if (packet->recv_wc)
1041 ib_free_recv_mad(packet->recv_wc);
1042 kfree(packet);
1043 }
1044
1045 list_del(&file->port_list);
1046
1047 mutex_unlock(&file->mutex);
1048
1049 if (!already_dead)
1050 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
1051 if (file->agent[i])
1052 ib_unregister_mad_agent(file->agent[i]);
1053
1054 mutex_unlock(&file->port->file_mutex);
1055 mutex_destroy(&file->mutex);
1056 kfree(file);
1057 return 0;
1058 }
1059
1060 static const struct file_operations umad_fops = {
1061 .owner = THIS_MODULE,
1062 .read = ib_umad_read,
1063 .write = ib_umad_write,
1064 .poll = ib_umad_poll,
1065 .unlocked_ioctl = ib_umad_ioctl,
1066 #ifdef CONFIG_COMPAT
1067 .compat_ioctl = ib_umad_compat_ioctl,
1068 #endif
1069 .open = ib_umad_open,
1070 .release = ib_umad_close,
1071 .llseek = no_llseek,
1072 };
1073
1074 static int ib_umad_sm_open(struct inode *inode, struct file *filp)
1075 {
1076 struct ib_umad_port *port;
1077 struct ib_port_modify props = {
1078 .set_port_cap_mask = IB_PORT_SM
1079 };
1080 int ret;
1081
1082 port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
1083
1084 if (filp->f_flags & O_NONBLOCK) {
1085 if (down_trylock(&port->sm_sem)) {
1086 ret = -EAGAIN;
1087 goto fail;
1088 }
1089 } else {
1090 if (down_interruptible(&port->sm_sem)) {
1091 ret = -ERESTARTSYS;
1092 goto fail;
1093 }
1094 }
1095
1096 if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) {
1097 ret = -EPERM;
1098 goto err_up_sem;
1099 }
1100
1101 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1102 if (ret)
1103 goto err_up_sem;
1104
1105 filp->private_data = port;
1106
1107 nonseekable_open(inode, filp);
1108 return 0;
1109
1110 err_up_sem:
1111 up(&port->sm_sem);
1112
1113 fail:
1114 return ret;
1115 }
1116
1117 static int ib_umad_sm_close(struct inode *inode, struct file *filp)
1118 {
1119 struct ib_umad_port *port = filp->private_data;
1120 struct ib_port_modify props = {
1121 .clr_port_cap_mask = IB_PORT_SM
1122 };
1123 int ret = 0;
1124
1125 mutex_lock(&port->file_mutex);
1126 if (port->ib_dev)
1127 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1128 mutex_unlock(&port->file_mutex);
1129
1130 up(&port->sm_sem);
1131
1132 return ret;
1133 }
1134
1135 static const struct file_operations umad_sm_fops = {
1136 .owner = THIS_MODULE,
1137 .open = ib_umad_sm_open,
1138 .release = ib_umad_sm_close,
1139 .llseek = no_llseek,
1140 };
1141
1142 static struct ib_umad_port *get_port(struct ib_device *ibdev,
1143 struct ib_umad_device *umad_dev,
1144 u32 port)
1145 {
1146 if (!umad_dev)
1147 return ERR_PTR(-EOPNOTSUPP);
1148 if (!rdma_is_port_valid(ibdev, port))
1149 return ERR_PTR(-EINVAL);
1150 if (!rdma_cap_ib_mad(ibdev, port))
1151 return ERR_PTR(-EOPNOTSUPP);
1152
1153 return &umad_dev->ports[port - rdma_start_port(ibdev)];
1154 }
1155
1156 static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data,
1157 struct ib_client_nl_info *res)
1158 {
1159 struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
1160
1161 if (IS_ERR(port))
1162 return PTR_ERR(port);
1163
1164 res->abi = IB_USER_MAD_ABI_VERSION;
1165 res->cdev = &port->dev;
1166 return 0;
1167 }
1168
1169 static struct ib_client umad_client = {
1170 .name = "umad",
1171 .add = ib_umad_add_one,
1172 .remove = ib_umad_remove_one,
1173 .get_nl_info = ib_umad_get_nl_info,
1174 };
1175 MODULE_ALIAS_RDMA_CLIENT("umad");
1176
1177 static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data,
1178 struct ib_client_nl_info *res)
1179 {
1180 struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
1181
1182 if (IS_ERR(port))
1183 return PTR_ERR(port);
1184
1185 res->abi = IB_USER_MAD_ABI_VERSION;
1186 res->cdev = &port->sm_dev;
1187 return 0;
1188 }
1189
1190 static struct ib_client issm_client = {
1191 .name = "issm",
1192 .get_nl_info = ib_issm_get_nl_info,
1193 };
1194 MODULE_ALIAS_RDMA_CLIENT("issm");
1195
1196 static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
1197 char *buf)
1198 {
1199 struct ib_umad_port *port = dev_get_drvdata(dev);
1200
1201 if (!port)
1202 return -ENODEV;
1203
1204 return sysfs_emit(buf, "%s\n", dev_name(&port->ib_dev->dev));
1205 }
1206 static DEVICE_ATTR_RO(ibdev);
1207
1208 static ssize_t port_show(struct device *dev, struct device_attribute *attr,
1209 char *buf)
1210 {
1211 struct ib_umad_port *port = dev_get_drvdata(dev);
1212
1213 if (!port)
1214 return -ENODEV;
1215
1216 return sysfs_emit(buf, "%d\n", port->port_num);
1217 }
1218 static DEVICE_ATTR_RO(port);
1219
1220 static struct attribute *umad_class_dev_attrs[] = {
1221 &dev_attr_ibdev.attr,
1222 &dev_attr_port.attr,
1223 NULL,
1224 };
1225 ATTRIBUTE_GROUPS(umad_class_dev);
1226
1227 static char *umad_devnode(struct device *dev, umode_t *mode)
1228 {
1229 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1230 }
1231
1232 static ssize_t abi_version_show(struct class *class,
1233 struct class_attribute *attr, char *buf)
1234 {
1235 return sysfs_emit(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
1236 }
1237 static CLASS_ATTR_RO(abi_version);
1238
1239 static struct attribute *umad_class_attrs[] = {
1240 &class_attr_abi_version.attr,
1241 NULL,
1242 };
1243 ATTRIBUTE_GROUPS(umad_class);
1244
1245 static struct class umad_class = {
1246 .name = "infiniband_mad",
1247 .devnode = umad_devnode,
1248 .class_groups = umad_class_groups,
1249 .dev_groups = umad_class_dev_groups,
1250 };
1251
1252 static void ib_umad_release_port(struct device *device)
1253 {
1254 struct ib_umad_port *port = dev_get_drvdata(device);
1255 struct ib_umad_device *umad_dev = port->umad_dev;
1256
1257 ib_umad_dev_put(umad_dev);
1258 }
1259
1260 static void ib_umad_init_port_dev(struct device *dev,
1261 struct ib_umad_port *port,
1262 const struct ib_device *device)
1263 {
1264 device_initialize(dev);
1265 ib_umad_dev_get(port->umad_dev);
1266 dev->class = &umad_class;
1267 dev->parent = device->dev.parent;
1268 dev_set_drvdata(dev, port);
1269 dev->release = ib_umad_release_port;
1270 }
1271
1272 static int ib_umad_init_port(struct ib_device *device, int port_num,
1273 struct ib_umad_device *umad_dev,
1274 struct ib_umad_port *port)
1275 {
1276 int devnum;
1277 dev_t base_umad;
1278 dev_t base_issm;
1279 int ret;
1280
1281 devnum = ida_alloc_max(&umad_ida, IB_UMAD_MAX_PORTS - 1, GFP_KERNEL);
1282 if (devnum < 0)
1283 return -1;
1284 port->dev_num = devnum;
1285 if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
1286 base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
1287 base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
1288 } else {
1289 base_umad = devnum + base_umad_dev;
1290 base_issm = devnum + base_issm_dev;
1291 }
1292
1293 port->ib_dev = device;
1294 port->umad_dev = umad_dev;
1295 port->port_num = port_num;
1296 sema_init(&port->sm_sem, 1);
1297 mutex_init(&port->file_mutex);
1298 INIT_LIST_HEAD(&port->file_list);
1299
1300 ib_umad_init_port_dev(&port->dev, port, device);
1301 port->dev.devt = base_umad;
1302 dev_set_name(&port->dev, "umad%d", port->dev_num);
1303 cdev_init(&port->cdev, &umad_fops);
1304 port->cdev.owner = THIS_MODULE;
1305
1306 ret = cdev_device_add(&port->cdev, &port->dev);
1307 if (ret)
1308 goto err_cdev;
1309
1310 ib_umad_init_port_dev(&port->sm_dev, port, device);
1311 port->sm_dev.devt = base_issm;
1312 dev_set_name(&port->sm_dev, "issm%d", port->dev_num);
1313 cdev_init(&port->sm_cdev, &umad_sm_fops);
1314 port->sm_cdev.owner = THIS_MODULE;
1315
1316 ret = cdev_device_add(&port->sm_cdev, &port->sm_dev);
1317 if (ret)
1318 goto err_dev;
1319
1320 return 0;
1321
1322 err_dev:
1323 put_device(&port->sm_dev);
1324 cdev_device_del(&port->cdev, &port->dev);
1325 err_cdev:
1326 put_device(&port->dev);
1327 ida_free(&umad_ida, devnum);
1328 return ret;
1329 }
1330
1331 static void ib_umad_kill_port(struct ib_umad_port *port)
1332 {
1333 struct ib_umad_file *file;
1334 int id;
1335
1336 cdev_device_del(&port->sm_cdev, &port->sm_dev);
1337 cdev_device_del(&port->cdev, &port->dev);
1338
1339 mutex_lock(&port->file_mutex);
1340
1341
1342
1343
1344 port->ib_dev = NULL;
1345
1346 list_for_each_entry(file, &port->file_list, port_list) {
1347 mutex_lock(&file->mutex);
1348 file->agents_dead = 1;
1349 wake_up_interruptible(&file->recv_wait);
1350 mutex_unlock(&file->mutex);
1351
1352 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
1353 if (file->agent[id])
1354 ib_unregister_mad_agent(file->agent[id]);
1355 }
1356
1357 mutex_unlock(&port->file_mutex);
1358
1359 ida_free(&umad_ida, port->dev_num);
1360
1361
1362 put_device(&port->sm_dev);
1363 put_device(&port->dev);
1364 }
1365
1366 static int ib_umad_add_one(struct ib_device *device)
1367 {
1368 struct ib_umad_device *umad_dev;
1369 int s, e, i;
1370 int count = 0;
1371 int ret;
1372
1373 s = rdma_start_port(device);
1374 e = rdma_end_port(device);
1375
1376 umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
1377 if (!umad_dev)
1378 return -ENOMEM;
1379
1380 kref_init(&umad_dev->kref);
1381 for (i = s; i <= e; ++i) {
1382 if (!rdma_cap_ib_mad(device, i))
1383 continue;
1384
1385 ret = ib_umad_init_port(device, i, umad_dev,
1386 &umad_dev->ports[i - s]);
1387 if (ret)
1388 goto err;
1389
1390 count++;
1391 }
1392
1393 if (!count) {
1394 ret = -EOPNOTSUPP;
1395 goto free;
1396 }
1397
1398 ib_set_client_data(device, &umad_client, umad_dev);
1399
1400 return 0;
1401
1402 err:
1403 while (--i >= s) {
1404 if (!rdma_cap_ib_mad(device, i))
1405 continue;
1406
1407 ib_umad_kill_port(&umad_dev->ports[i - s]);
1408 }
1409 free:
1410
1411 ib_umad_dev_put(umad_dev);
1412 return ret;
1413 }
1414
1415 static void ib_umad_remove_one(struct ib_device *device, void *client_data)
1416 {
1417 struct ib_umad_device *umad_dev = client_data;
1418 unsigned int i;
1419
1420 rdma_for_each_port (device, i) {
1421 if (rdma_cap_ib_mad(device, i))
1422 ib_umad_kill_port(
1423 &umad_dev->ports[i - rdma_start_port(device)]);
1424 }
1425
1426 ib_umad_dev_put(umad_dev);
1427 }
1428
1429 static int __init ib_umad_init(void)
1430 {
1431 int ret;
1432
1433 ret = register_chrdev_region(base_umad_dev,
1434 IB_UMAD_NUM_FIXED_MINOR * 2,
1435 umad_class.name);
1436 if (ret) {
1437 pr_err("couldn't register device number\n");
1438 goto out;
1439 }
1440
1441 ret = alloc_chrdev_region(&dynamic_umad_dev, 0,
1442 IB_UMAD_NUM_DYNAMIC_MINOR * 2,
1443 umad_class.name);
1444 if (ret) {
1445 pr_err("couldn't register dynamic device number\n");
1446 goto out_alloc;
1447 }
1448 dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR;
1449
1450 ret = class_register(&umad_class);
1451 if (ret) {
1452 pr_err("couldn't create class infiniband_mad\n");
1453 goto out_chrdev;
1454 }
1455
1456 ret = ib_register_client(&umad_client);
1457 if (ret)
1458 goto out_class;
1459
1460 ret = ib_register_client(&issm_client);
1461 if (ret)
1462 goto out_client;
1463
1464 return 0;
1465
1466 out_client:
1467 ib_unregister_client(&umad_client);
1468 out_class:
1469 class_unregister(&umad_class);
1470
1471 out_chrdev:
1472 unregister_chrdev_region(dynamic_umad_dev,
1473 IB_UMAD_NUM_DYNAMIC_MINOR * 2);
1474
1475 out_alloc:
1476 unregister_chrdev_region(base_umad_dev,
1477 IB_UMAD_NUM_FIXED_MINOR * 2);
1478
1479 out:
1480 return ret;
1481 }
1482
1483 static void __exit ib_umad_cleanup(void)
1484 {
1485 ib_unregister_client(&issm_client);
1486 ib_unregister_client(&umad_client);
1487 class_unregister(&umad_class);
1488 unregister_chrdev_region(base_umad_dev,
1489 IB_UMAD_NUM_FIXED_MINOR * 2);
1490 unregister_chrdev_region(dynamic_umad_dev,
1491 IB_UMAD_NUM_DYNAMIC_MINOR * 2);
1492 }
1493
1494 module_init(ib_umad_init);
1495 module_exit(ib_umad_cleanup);