0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 #include <linux/dma-mapping.h>
0039 #include <linux/err.h>
0040 #include <linux/idr.h>
0041 #include <linux/interrupt.h>
0042 #include <linux/rbtree.h>
0043 #include <linux/sched.h>
0044 #include <linux/spinlock.h>
0045 #include <linux/workqueue.h>
0046 #include <linux/completion.h>
0047 #include <linux/slab.h>
0048 #include <linux/module.h>
0049 #include <linux/sysctl.h>
0050
0051 #include <rdma/iw_cm.h>
0052 #include <rdma/ib_addr.h>
0053 #include <rdma/iw_portmap.h>
0054 #include <rdma/rdma_netlink.h>
0055
0056 #include "iwcm.h"
0057
0058 MODULE_AUTHOR("Tom Tucker");
0059 MODULE_DESCRIPTION("iWARP CM");
0060 MODULE_LICENSE("Dual BSD/GPL");
0061
0062 static const char * const iwcm_rej_reason_strs[] = {
0063 [ECONNRESET] = "reset by remote host",
0064 [ECONNREFUSED] = "refused by remote application",
0065 [ETIMEDOUT] = "setup timeout",
0066 };
0067
0068 const char *__attribute_const__ iwcm_reject_msg(int reason)
0069 {
0070 size_t index;
0071
0072
0073 index = -reason;
0074
0075 if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
0076 iwcm_rej_reason_strs[index])
0077 return iwcm_rej_reason_strs[index];
0078 else
0079 return "unrecognized reason";
0080 }
0081 EXPORT_SYMBOL(iwcm_reject_msg);
0082
0083 static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
0084 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
0085 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
0086 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
0087 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
0088 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
0089 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
0090 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb},
0091 [RDMA_NL_IWPM_HELLO] = {.dump = iwpm_hello_cb}
0092 };
0093
0094 static struct workqueue_struct *iwcm_wq;
0095 struct iwcm_work {
0096 struct work_struct work;
0097 struct iwcm_id_private *cm_id;
0098 struct list_head list;
0099 struct iw_cm_event event;
0100 struct list_head free_list;
0101 };
0102
0103 static unsigned int default_backlog = 256;
0104
0105 static struct ctl_table_header *iwcm_ctl_table_hdr;
0106 static struct ctl_table iwcm_ctl_table[] = {
0107 {
0108 .procname = "default_backlog",
0109 .data = &default_backlog,
0110 .maxlen = sizeof(default_backlog),
0111 .mode = 0644,
0112 .proc_handler = proc_dointvec,
0113 },
0114 { }
0115 };
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
0142 {
0143 struct iwcm_work *work;
0144
0145 if (list_empty(&cm_id_priv->work_free_list))
0146 return NULL;
0147 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
0148 free_list);
0149 list_del_init(&work->free_list);
0150 return work;
0151 }
0152
0153 static void put_work(struct iwcm_work *work)
0154 {
0155 list_add(&work->free_list, &work->cm_id->work_free_list);
0156 }
0157
0158 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
0159 {
0160 struct list_head *e, *tmp;
0161
0162 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
0163 list_del(e);
0164 kfree(list_entry(e, struct iwcm_work, free_list));
0165 }
0166 }
0167
0168 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
0169 {
0170 struct iwcm_work *work;
0171
0172 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
0173 while (count--) {
0174 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
0175 if (!work) {
0176 dealloc_work_entries(cm_id_priv);
0177 return -ENOMEM;
0178 }
0179 work->cm_id = cm_id_priv;
0180 INIT_LIST_HEAD(&work->list);
0181 put_work(work);
0182 }
0183 return 0;
0184 }
0185
0186
0187
0188
0189
0190
0191 static int copy_private_data(struct iw_cm_event *event)
0192 {
0193 void *p;
0194
0195 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
0196 if (!p)
0197 return -ENOMEM;
0198 event->private_data = p;
0199 return 0;
0200 }
0201
0202 static void free_cm_id(struct iwcm_id_private *cm_id_priv)
0203 {
0204 dealloc_work_entries(cm_id_priv);
0205 kfree(cm_id_priv);
0206 }
0207
0208
0209
0210
0211
0212 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
0213 {
0214 if (refcount_dec_and_test(&cm_id_priv->refcount)) {
0215 BUG_ON(!list_empty(&cm_id_priv->work_list));
0216 free_cm_id(cm_id_priv);
0217 return 1;
0218 }
0219
0220 return 0;
0221 }
0222
0223 static void add_ref(struct iw_cm_id *cm_id)
0224 {
0225 struct iwcm_id_private *cm_id_priv;
0226 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
0227 refcount_inc(&cm_id_priv->refcount);
0228 }
0229
0230 static void rem_ref(struct iw_cm_id *cm_id)
0231 {
0232 struct iwcm_id_private *cm_id_priv;
0233
0234 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
0235
0236 (void)iwcm_deref_id(cm_id_priv);
0237 }
0238
0239 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
0240
0241 struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
0242 iw_cm_handler cm_handler,
0243 void *context)
0244 {
0245 struct iwcm_id_private *cm_id_priv;
0246
0247 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
0248 if (!cm_id_priv)
0249 return ERR_PTR(-ENOMEM);
0250
0251 cm_id_priv->state = IW_CM_STATE_IDLE;
0252 cm_id_priv->id.device = device;
0253 cm_id_priv->id.cm_handler = cm_handler;
0254 cm_id_priv->id.context = context;
0255 cm_id_priv->id.event_handler = cm_event_handler;
0256 cm_id_priv->id.add_ref = add_ref;
0257 cm_id_priv->id.rem_ref = rem_ref;
0258 spin_lock_init(&cm_id_priv->lock);
0259 refcount_set(&cm_id_priv->refcount, 1);
0260 init_waitqueue_head(&cm_id_priv->connect_wait);
0261 init_completion(&cm_id_priv->destroy_comp);
0262 INIT_LIST_HEAD(&cm_id_priv->work_list);
0263 INIT_LIST_HEAD(&cm_id_priv->work_free_list);
0264
0265 return &cm_id_priv->id;
0266 }
0267 EXPORT_SYMBOL(iw_create_cm_id);
0268
0269
0270 static int iwcm_modify_qp_err(struct ib_qp *qp)
0271 {
0272 struct ib_qp_attr qp_attr;
0273
0274 if (!qp)
0275 return -EINVAL;
0276
0277 qp_attr.qp_state = IB_QPS_ERR;
0278 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
0279 }
0280
0281
0282
0283
0284
0285 static int iwcm_modify_qp_sqd(struct ib_qp *qp)
0286 {
0287 struct ib_qp_attr qp_attr;
0288
0289 BUG_ON(qp == NULL);
0290 qp_attr.qp_state = IB_QPS_SQD;
0291 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
0307 {
0308 struct iwcm_id_private *cm_id_priv;
0309 unsigned long flags;
0310 int ret = 0;
0311 struct ib_qp *qp = NULL;
0312
0313 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
0314
0315 wait_event(cm_id_priv->connect_wait,
0316 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
0317
0318 spin_lock_irqsave(&cm_id_priv->lock, flags);
0319 switch (cm_id_priv->state) {
0320 case IW_CM_STATE_ESTABLISHED:
0321 cm_id_priv->state = IW_CM_STATE_CLOSING;
0322
0323
0324 if (cm_id_priv->qp)
0325 qp = cm_id_priv->qp;
0326 else
0327 ret = -EINVAL;
0328 break;
0329 case IW_CM_STATE_LISTEN:
0330 ret = -EINVAL;
0331 break;
0332 case IW_CM_STATE_CLOSING:
0333
0334 case IW_CM_STATE_IDLE:
0335
0336 break;
0337 case IW_CM_STATE_CONN_RECV:
0338
0339
0340
0341
0342 break;
0343 case IW_CM_STATE_CONN_SENT:
0344
0345 default:
0346 BUG();
0347 }
0348 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0349
0350 if (qp) {
0351 if (abrupt)
0352 ret = iwcm_modify_qp_err(qp);
0353 else
0354 ret = iwcm_modify_qp_sqd(qp);
0355
0356
0357
0358
0359
0360 ret = 0;
0361 }
0362
0363 return ret;
0364 }
0365 EXPORT_SYMBOL(iw_cm_disconnect);
0366
0367
0368
0369
0370
0371
0372
0373 static void destroy_cm_id(struct iw_cm_id *cm_id)
0374 {
0375 struct iwcm_id_private *cm_id_priv;
0376 struct ib_qp *qp;
0377 unsigned long flags;
0378
0379 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
0380
0381
0382
0383
0384 wait_event(cm_id_priv->connect_wait,
0385 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
0386
0387
0388
0389
0390
0391 set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
0392
0393 spin_lock_irqsave(&cm_id_priv->lock, flags);
0394 qp = cm_id_priv->qp;
0395 cm_id_priv->qp = NULL;
0396
0397 switch (cm_id_priv->state) {
0398 case IW_CM_STATE_LISTEN:
0399 cm_id_priv->state = IW_CM_STATE_DESTROYING;
0400 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0401
0402 cm_id->device->ops.iw_destroy_listen(cm_id);
0403 spin_lock_irqsave(&cm_id_priv->lock, flags);
0404 break;
0405 case IW_CM_STATE_ESTABLISHED:
0406 cm_id_priv->state = IW_CM_STATE_DESTROYING;
0407 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0408
0409 (void)iwcm_modify_qp_err(qp);
0410 spin_lock_irqsave(&cm_id_priv->lock, flags);
0411 break;
0412 case IW_CM_STATE_IDLE:
0413 case IW_CM_STATE_CLOSING:
0414 cm_id_priv->state = IW_CM_STATE_DESTROYING;
0415 break;
0416 case IW_CM_STATE_CONN_RECV:
0417
0418
0419
0420
0421
0422
0423 cm_id_priv->state = IW_CM_STATE_DESTROYING;
0424 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0425 cm_id->device->ops.iw_reject(cm_id, NULL, 0);
0426 spin_lock_irqsave(&cm_id_priv->lock, flags);
0427 break;
0428 case IW_CM_STATE_CONN_SENT:
0429 case IW_CM_STATE_DESTROYING:
0430 default:
0431 BUG();
0432 break;
0433 }
0434 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0435 if (qp)
0436 cm_id_priv->id.device->ops.iw_rem_ref(qp);
0437
0438 if (cm_id->mapped) {
0439 iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
0440 iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
0441 }
0442
0443 (void)iwcm_deref_id(cm_id_priv);
0444 }
0445
0446
0447
0448
0449
0450
0451
0452 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
0453 {
0454 destroy_cm_id(cm_id);
0455 }
0456 EXPORT_SYMBOL(iw_destroy_cm_id);
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467 static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
0468 struct sockaddr_storage *cm_addr,
0469 struct sockaddr_storage *cm_outaddr)
0470 {
0471 if (pm_addr->ss_family == AF_INET) {
0472 struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
0473
0474 if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
0475 struct sockaddr_in *cm4_addr =
0476 (struct sockaddr_in *)cm_addr;
0477 struct sockaddr_in *cm4_outaddr =
0478 (struct sockaddr_in *)cm_outaddr;
0479
0480 cm4_outaddr->sin_addr = cm4_addr->sin_addr;
0481 }
0482 } else {
0483 struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
0484
0485 if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
0486 struct sockaddr_in6 *cm6_addr =
0487 (struct sockaddr_in6 *)cm_addr;
0488 struct sockaddr_in6 *cm6_outaddr =
0489 (struct sockaddr_in6 *)cm_outaddr;
0490
0491 cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
0492 }
0493 }
0494 }
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507 static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
0508 {
0509 const char *devname = dev_name(&cm_id->device->dev);
0510 const char *ifname = cm_id->device->iw_ifname;
0511 struct iwpm_dev_data pm_reg_msg = {};
0512 struct iwpm_sa_data pm_msg;
0513 int status;
0514
0515 if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) ||
0516 strlen(ifname) >= sizeof(pm_reg_msg.if_name))
0517 return -EINVAL;
0518
0519 cm_id->m_local_addr = cm_id->local_addr;
0520 cm_id->m_remote_addr = cm_id->remote_addr;
0521
0522 strcpy(pm_reg_msg.dev_name, devname);
0523 strcpy(pm_reg_msg.if_name, ifname);
0524
0525 if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
0526 !iwpm_valid_pid())
0527 return 0;
0528
0529 cm_id->mapped = true;
0530 pm_msg.loc_addr = cm_id->local_addr;
0531 pm_msg.rem_addr = cm_id->remote_addr;
0532 pm_msg.flags = (cm_id->device->iw_driver_flags & IW_F_NO_PORT_MAP) ?
0533 IWPM_FLAGS_NO_PORT_MAP : 0;
0534 if (active)
0535 status = iwpm_add_and_query_mapping(&pm_msg,
0536 RDMA_NL_IWCM);
0537 else
0538 status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
0539
0540 if (!status) {
0541 cm_id->m_local_addr = pm_msg.mapped_loc_addr;
0542 if (active) {
0543 cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
0544 iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
0545 &cm_id->remote_addr,
0546 &cm_id->m_remote_addr);
0547 }
0548 }
0549
0550 return iwpm_create_mapinfo(&cm_id->local_addr,
0551 &cm_id->m_local_addr,
0552 RDMA_NL_IWCM, pm_msg.flags);
0553 }
0554
0555
0556
0557
0558
0559
0560
0561 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
0562 {
0563 struct iwcm_id_private *cm_id_priv;
0564 unsigned long flags;
0565 int ret;
0566
0567 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
0568
0569 if (!backlog)
0570 backlog = default_backlog;
0571
0572 ret = alloc_work_entries(cm_id_priv, backlog);
0573 if (ret)
0574 return ret;
0575
0576 spin_lock_irqsave(&cm_id_priv->lock, flags);
0577 switch (cm_id_priv->state) {
0578 case IW_CM_STATE_IDLE:
0579 cm_id_priv->state = IW_CM_STATE_LISTEN;
0580 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0581 ret = iw_cm_map(cm_id, false);
0582 if (!ret)
0583 ret = cm_id->device->ops.iw_create_listen(cm_id,
0584 backlog);
0585 if (ret)
0586 cm_id_priv->state = IW_CM_STATE_IDLE;
0587 spin_lock_irqsave(&cm_id_priv->lock, flags);
0588 break;
0589 default:
0590 ret = -EINVAL;
0591 }
0592 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0593
0594 return ret;
0595 }
0596 EXPORT_SYMBOL(iw_cm_listen);
0597
0598
0599
0600
0601
0602
0603 int iw_cm_reject(struct iw_cm_id *cm_id,
0604 const void *private_data,
0605 u8 private_data_len)
0606 {
0607 struct iwcm_id_private *cm_id_priv;
0608 unsigned long flags;
0609 int ret;
0610
0611 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
0612 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0613
0614 spin_lock_irqsave(&cm_id_priv->lock, flags);
0615 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
0616 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0617 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0618 wake_up_all(&cm_id_priv->connect_wait);
0619 return -EINVAL;
0620 }
0621 cm_id_priv->state = IW_CM_STATE_IDLE;
0622 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0623
0624 ret = cm_id->device->ops.iw_reject(cm_id, private_data,
0625 private_data_len);
0626
0627 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0628 wake_up_all(&cm_id_priv->connect_wait);
0629
0630 return ret;
0631 }
0632 EXPORT_SYMBOL(iw_cm_reject);
0633
0634
0635
0636
0637
0638
0639
0640
0641 int iw_cm_accept(struct iw_cm_id *cm_id,
0642 struct iw_cm_conn_param *iw_param)
0643 {
0644 struct iwcm_id_private *cm_id_priv;
0645 struct ib_qp *qp;
0646 unsigned long flags;
0647 int ret;
0648
0649 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
0650 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0651
0652 spin_lock_irqsave(&cm_id_priv->lock, flags);
0653 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
0654 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0655 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0656 wake_up_all(&cm_id_priv->connect_wait);
0657 return -EINVAL;
0658 }
0659
0660 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
0661 if (!qp) {
0662 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0663 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0664 wake_up_all(&cm_id_priv->connect_wait);
0665 return -EINVAL;
0666 }
0667 cm_id->device->ops.iw_add_ref(qp);
0668 cm_id_priv->qp = qp;
0669 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0670
0671 ret = cm_id->device->ops.iw_accept(cm_id, iw_param);
0672 if (ret) {
0673
0674 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
0675 cm_id_priv->state = IW_CM_STATE_IDLE;
0676 spin_lock_irqsave(&cm_id_priv->lock, flags);
0677 qp = cm_id_priv->qp;
0678 cm_id_priv->qp = NULL;
0679 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0680 if (qp)
0681 cm_id->device->ops.iw_rem_ref(qp);
0682 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0683 wake_up_all(&cm_id_priv->connect_wait);
0684 }
0685
0686 return ret;
0687 }
0688 EXPORT_SYMBOL(iw_cm_accept);
0689
0690
0691
0692
0693
0694
0695
0696
0697 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
0698 {
0699 struct iwcm_id_private *cm_id_priv;
0700 int ret;
0701 unsigned long flags;
0702 struct ib_qp *qp = NULL;
0703
0704 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
0705
0706 ret = alloc_work_entries(cm_id_priv, 4);
0707 if (ret)
0708 return ret;
0709
0710 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0711 spin_lock_irqsave(&cm_id_priv->lock, flags);
0712
0713 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
0714 ret = -EINVAL;
0715 goto err;
0716 }
0717
0718
0719 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
0720 if (!qp) {
0721 ret = -EINVAL;
0722 goto err;
0723 }
0724 cm_id->device->ops.iw_add_ref(qp);
0725 cm_id_priv->qp = qp;
0726 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
0727 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0728
0729 ret = iw_cm_map(cm_id, true);
0730 if (!ret)
0731 ret = cm_id->device->ops.iw_connect(cm_id, iw_param);
0732 if (!ret)
0733 return 0;
0734
0735 spin_lock_irqsave(&cm_id_priv->lock, flags);
0736 qp = cm_id_priv->qp;
0737 cm_id_priv->qp = NULL;
0738 cm_id_priv->state = IW_CM_STATE_IDLE;
0739 err:
0740 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0741 if (qp)
0742 cm_id->device->ops.iw_rem_ref(qp);
0743 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0744 wake_up_all(&cm_id_priv->connect_wait);
0745 return ret;
0746 }
0747 EXPORT_SYMBOL(iw_cm_connect);
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
0765 struct iw_cm_event *iw_event)
0766 {
0767 unsigned long flags;
0768 struct iw_cm_id *cm_id;
0769 struct iwcm_id_private *cm_id_priv;
0770 int ret;
0771
0772
0773
0774
0775
0776 BUG_ON(iw_event->status);
0777
0778 cm_id = iw_create_cm_id(listen_id_priv->id.device,
0779 listen_id_priv->id.cm_handler,
0780 listen_id_priv->id.context);
0781
0782 if (IS_ERR(cm_id))
0783 goto out;
0784
0785 cm_id->provider_data = iw_event->provider_data;
0786 cm_id->m_local_addr = iw_event->local_addr;
0787 cm_id->m_remote_addr = iw_event->remote_addr;
0788 cm_id->local_addr = listen_id_priv->id.local_addr;
0789
0790 ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
0791 &iw_event->remote_addr,
0792 &cm_id->remote_addr,
0793 RDMA_NL_IWCM);
0794 if (ret) {
0795 cm_id->remote_addr = iw_event->remote_addr;
0796 } else {
0797 iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
0798 &iw_event->local_addr,
0799 &cm_id->local_addr);
0800 iw_event->local_addr = cm_id->local_addr;
0801 iw_event->remote_addr = cm_id->remote_addr;
0802 }
0803
0804 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
0805 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
0806
0807
0808
0809
0810
0811 spin_lock_irqsave(&listen_id_priv->lock, flags);
0812 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
0813 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
0814 iw_cm_reject(cm_id, NULL, 0);
0815 iw_destroy_cm_id(cm_id);
0816 goto out;
0817 }
0818 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
0819
0820 ret = alloc_work_entries(cm_id_priv, 3);
0821 if (ret) {
0822 iw_cm_reject(cm_id, NULL, 0);
0823 iw_destroy_cm_id(cm_id);
0824 goto out;
0825 }
0826
0827
0828 ret = cm_id->cm_handler(cm_id, iw_event);
0829 if (ret) {
0830 iw_cm_reject(cm_id, NULL, 0);
0831 iw_destroy_cm_id(cm_id);
0832 }
0833
0834 out:
0835 if (iw_event->private_data_len)
0836 kfree(iw_event->private_data);
0837 }
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
0852 struct iw_cm_event *iw_event)
0853 {
0854 unsigned long flags;
0855 int ret;
0856
0857 spin_lock_irqsave(&cm_id_priv->lock, flags);
0858
0859
0860
0861
0862
0863
0864 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0865 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
0866 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
0867 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0868 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
0869 wake_up_all(&cm_id_priv->connect_wait);
0870
0871 return ret;
0872 }
0873
0874
0875
0876
0877
0878
0879
0880
0881 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
0882 struct iw_cm_event *iw_event)
0883 {
0884 struct ib_qp *qp = NULL;
0885 unsigned long flags;
0886 int ret;
0887
0888 spin_lock_irqsave(&cm_id_priv->lock, flags);
0889
0890
0891
0892
0893 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
0894 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
0895 if (iw_event->status == 0) {
0896 cm_id_priv->id.m_local_addr = iw_event->local_addr;
0897 cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
0898 iw_event->local_addr = cm_id_priv->id.local_addr;
0899 iw_event->remote_addr = cm_id_priv->id.remote_addr;
0900 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
0901 } else {
0902
0903 qp = cm_id_priv->qp;
0904 cm_id_priv->qp = NULL;
0905 cm_id_priv->state = IW_CM_STATE_IDLE;
0906 }
0907 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0908 if (qp)
0909 cm_id_priv->id.device->ops.iw_rem_ref(qp);
0910 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
0911
0912 if (iw_event->private_data_len)
0913 kfree(iw_event->private_data);
0914
0915
0916 wake_up_all(&cm_id_priv->connect_wait);
0917
0918 return ret;
0919 }
0920
0921
0922
0923
0924
0925
0926 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
0927 struct iw_cm_event *iw_event)
0928 {
0929 unsigned long flags;
0930
0931 spin_lock_irqsave(&cm_id_priv->lock, flags);
0932 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
0933 cm_id_priv->state = IW_CM_STATE_CLOSING;
0934 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0935 }
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
0949 struct iw_cm_event *iw_event)
0950 {
0951 struct ib_qp *qp;
0952 unsigned long flags;
0953 int ret = 0, notify_event = 0;
0954 spin_lock_irqsave(&cm_id_priv->lock, flags);
0955 qp = cm_id_priv->qp;
0956 cm_id_priv->qp = NULL;
0957
0958 switch (cm_id_priv->state) {
0959 case IW_CM_STATE_ESTABLISHED:
0960 case IW_CM_STATE_CLOSING:
0961 cm_id_priv->state = IW_CM_STATE_IDLE;
0962 notify_event = 1;
0963 break;
0964 case IW_CM_STATE_DESTROYING:
0965 break;
0966 default:
0967 BUG();
0968 }
0969 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
0970
0971 if (qp)
0972 cm_id_priv->id.device->ops.iw_rem_ref(qp);
0973 if (notify_event)
0974 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
0975 return ret;
0976 }
0977
0978 static int process_event(struct iwcm_id_private *cm_id_priv,
0979 struct iw_cm_event *iw_event)
0980 {
0981 int ret = 0;
0982
0983 switch (iw_event->event) {
0984 case IW_CM_EVENT_CONNECT_REQUEST:
0985 cm_conn_req_handler(cm_id_priv, iw_event);
0986 break;
0987 case IW_CM_EVENT_CONNECT_REPLY:
0988 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
0989 break;
0990 case IW_CM_EVENT_ESTABLISHED:
0991 ret = cm_conn_est_handler(cm_id_priv, iw_event);
0992 break;
0993 case IW_CM_EVENT_DISCONNECT:
0994 cm_disconnect_handler(cm_id_priv, iw_event);
0995 break;
0996 case IW_CM_EVENT_CLOSE:
0997 ret = cm_close_handler(cm_id_priv, iw_event);
0998 break;
0999 default:
1000 BUG();
1001 }
1002
1003 return ret;
1004 }
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 static void cm_work_handler(struct work_struct *_work)
1016 {
1017 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
1018 struct iw_cm_event levent;
1019 struct iwcm_id_private *cm_id_priv = work->cm_id;
1020 unsigned long flags;
1021 int empty;
1022 int ret = 0;
1023
1024 spin_lock_irqsave(&cm_id_priv->lock, flags);
1025 empty = list_empty(&cm_id_priv->work_list);
1026 while (!empty) {
1027 work = list_entry(cm_id_priv->work_list.next,
1028 struct iwcm_work, list);
1029 list_del_init(&work->list);
1030 empty = list_empty(&cm_id_priv->work_list);
1031 levent = work->event;
1032 put_work(work);
1033 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1034
1035 if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
1036 ret = process_event(cm_id_priv, &levent);
1037 if (ret)
1038 destroy_cm_id(&cm_id_priv->id);
1039 } else
1040 pr_debug("dropping event %d\n", levent.event);
1041 if (iwcm_deref_id(cm_id_priv))
1042 return;
1043 if (empty)
1044 return;
1045 spin_lock_irqsave(&cm_id_priv->lock, flags);
1046 }
1047 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1048 }
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 static int cm_event_handler(struct iw_cm_id *cm_id,
1066 struct iw_cm_event *iw_event)
1067 {
1068 struct iwcm_work *work;
1069 struct iwcm_id_private *cm_id_priv;
1070 unsigned long flags;
1071 int ret = 0;
1072
1073 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1074
1075 spin_lock_irqsave(&cm_id_priv->lock, flags);
1076 work = get_work(cm_id_priv);
1077 if (!work) {
1078 ret = -ENOMEM;
1079 goto out;
1080 }
1081
1082 INIT_WORK(&work->work, cm_work_handler);
1083 work->cm_id = cm_id_priv;
1084 work->event = *iw_event;
1085
1086 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
1087 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
1088 work->event.private_data_len) {
1089 ret = copy_private_data(&work->event);
1090 if (ret) {
1091 put_work(work);
1092 goto out;
1093 }
1094 }
1095
1096 refcount_inc(&cm_id_priv->refcount);
1097 if (list_empty(&cm_id_priv->work_list)) {
1098 list_add_tail(&work->list, &cm_id_priv->work_list);
1099 queue_work(iwcm_wq, &work->work);
1100 } else
1101 list_add_tail(&work->list, &cm_id_priv->work_list);
1102 out:
1103 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1104 return ret;
1105 }
1106
1107 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
1108 struct ib_qp_attr *qp_attr,
1109 int *qp_attr_mask)
1110 {
1111 unsigned long flags;
1112 int ret;
1113
1114 spin_lock_irqsave(&cm_id_priv->lock, flags);
1115 switch (cm_id_priv->state) {
1116 case IW_CM_STATE_IDLE:
1117 case IW_CM_STATE_CONN_SENT:
1118 case IW_CM_STATE_CONN_RECV:
1119 case IW_CM_STATE_ESTABLISHED:
1120 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1121 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
1122 IB_ACCESS_REMOTE_READ;
1123 ret = 0;
1124 break;
1125 default:
1126 ret = -EINVAL;
1127 break;
1128 }
1129 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1130 return ret;
1131 }
1132
1133 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
1134 struct ib_qp_attr *qp_attr,
1135 int *qp_attr_mask)
1136 {
1137 unsigned long flags;
1138 int ret;
1139
1140 spin_lock_irqsave(&cm_id_priv->lock, flags);
1141 switch (cm_id_priv->state) {
1142 case IW_CM_STATE_IDLE:
1143 case IW_CM_STATE_CONN_SENT:
1144 case IW_CM_STATE_CONN_RECV:
1145 case IW_CM_STATE_ESTABLISHED:
1146 *qp_attr_mask = 0;
1147 ret = 0;
1148 break;
1149 default:
1150 ret = -EINVAL;
1151 break;
1152 }
1153 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1154 return ret;
1155 }
1156
1157 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1158 struct ib_qp_attr *qp_attr,
1159 int *qp_attr_mask)
1160 {
1161 struct iwcm_id_private *cm_id_priv;
1162 int ret;
1163
1164 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1165 switch (qp_attr->qp_state) {
1166 case IB_QPS_INIT:
1167 case IB_QPS_RTR:
1168 ret = iwcm_init_qp_init_attr(cm_id_priv,
1169 qp_attr, qp_attr_mask);
1170 break;
1171 case IB_QPS_RTS:
1172 ret = iwcm_init_qp_rts_attr(cm_id_priv,
1173 qp_attr, qp_attr_mask);
1174 break;
1175 default:
1176 ret = -EINVAL;
1177 break;
1178 }
1179 return ret;
1180 }
1181 EXPORT_SYMBOL(iw_cm_init_qp_attr);
1182
1183 static int __init iw_cm_init(void)
1184 {
1185 int ret;
1186
1187 ret = iwpm_init(RDMA_NL_IWCM);
1188 if (ret)
1189 return ret;
1190
1191 iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
1192 if (!iwcm_wq)
1193 goto err_alloc;
1194
1195 iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
1196 iwcm_ctl_table);
1197 if (!iwcm_ctl_table_hdr) {
1198 pr_err("iw_cm: couldn't register sysctl paths\n");
1199 goto err_sysctl;
1200 }
1201
1202 rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
1203 return 0;
1204
1205 err_sysctl:
1206 destroy_workqueue(iwcm_wq);
1207 err_alloc:
1208 iwpm_exit(RDMA_NL_IWCM);
1209 return -ENOMEM;
1210 }
1211
1212 static void __exit iw_cm_cleanup(void)
1213 {
1214 rdma_nl_unregister(RDMA_NL_IWCM);
1215 unregister_net_sysctl_table(iwcm_ctl_table_hdr);
1216 destroy_workqueue(iwcm_wq);
1217 iwpm_exit(RDMA_NL_IWCM);
1218 }
1219
1220 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2);
1221
1222 module_init(iw_cm_init);
1223 module_exit(iw_cm_cleanup);