0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/security.h>
0034 #include <linux/completion.h>
0035 #include <linux/list.h>
0036
0037 #include <rdma/ib_verbs.h>
0038 #include <rdma/ib_cache.h>
0039 #include "core_priv.h"
0040 #include "mad_priv.h"
0041
0042 static LIST_HEAD(mad_agent_list);
0043
0044 static DEFINE_SPINLOCK(mad_agent_list_lock);
0045
0046 static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
0047 {
0048 struct pkey_index_qp_list *pkey = NULL;
0049 struct pkey_index_qp_list *tmp_pkey;
0050 struct ib_device *dev = pp->sec->dev;
0051
0052 spin_lock(&dev->port_data[pp->port_num].pkey_list_lock);
0053 list_for_each_entry (tmp_pkey, &dev->port_data[pp->port_num].pkey_list,
0054 pkey_index_list) {
0055 if (tmp_pkey->pkey_index == pp->pkey_index) {
0056 pkey = tmp_pkey;
0057 break;
0058 }
0059 }
0060 spin_unlock(&dev->port_data[pp->port_num].pkey_list_lock);
0061 return pkey;
0062 }
0063
0064 static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
0065 u16 *pkey,
0066 u64 *subnet_prefix)
0067 {
0068 struct ib_device *dev = pp->sec->dev;
0069 int ret;
0070
0071 ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
0072 if (ret)
0073 return ret;
0074
0075 ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
0076
0077 return ret;
0078 }
0079
0080 static int enforce_qp_pkey_security(u16 pkey,
0081 u64 subnet_prefix,
0082 struct ib_qp_security *qp_sec)
0083 {
0084 struct ib_qp_security *shared_qp_sec;
0085 int ret;
0086
0087 ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
0088 if (ret)
0089 return ret;
0090
0091 list_for_each_entry(shared_qp_sec,
0092 &qp_sec->shared_qp_list,
0093 shared_qp_list) {
0094 ret = security_ib_pkey_access(shared_qp_sec->security,
0095 subnet_prefix,
0096 pkey);
0097 if (ret)
0098 return ret;
0099 }
0100 return 0;
0101 }
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111 static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
0112 struct ib_qp_security *sec)
0113 {
0114 u64 subnet_prefix;
0115 u16 pkey;
0116 int ret = 0;
0117
0118 if (!pps)
0119 return 0;
0120
0121 if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
0122 ret = get_pkey_and_subnet_prefix(&pps->main,
0123 &pkey,
0124 &subnet_prefix);
0125 if (ret)
0126 return ret;
0127
0128 ret = enforce_qp_pkey_security(pkey,
0129 subnet_prefix,
0130 sec);
0131 if (ret)
0132 return ret;
0133 }
0134
0135 if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
0136 ret = get_pkey_and_subnet_prefix(&pps->alt,
0137 &pkey,
0138 &subnet_prefix);
0139 if (ret)
0140 return ret;
0141
0142 ret = enforce_qp_pkey_security(pkey,
0143 subnet_prefix,
0144 sec);
0145 }
0146
0147 return ret;
0148 }
0149
0150
0151
0152
0153 static void qp_to_error(struct ib_qp_security *sec)
0154 {
0155 struct ib_qp_security *shared_qp_sec;
0156 struct ib_qp_attr attr = {
0157 .qp_state = IB_QPS_ERR
0158 };
0159 struct ib_event event = {
0160 .event = IB_EVENT_QP_FATAL
0161 };
0162
0163
0164
0165
0166
0167 if (sec->destroying)
0168 return;
0169
0170 ib_modify_qp(sec->qp,
0171 &attr,
0172 IB_QP_STATE);
0173
0174 if (sec->qp->event_handler && sec->qp->qp_context) {
0175 event.element.qp = sec->qp;
0176 sec->qp->event_handler(&event,
0177 sec->qp->qp_context);
0178 }
0179
0180 list_for_each_entry(shared_qp_sec,
0181 &sec->shared_qp_list,
0182 shared_qp_list) {
0183 struct ib_qp *qp = shared_qp_sec->qp;
0184
0185 if (qp->event_handler && qp->qp_context) {
0186 event.element.qp = qp;
0187 event.device = qp->device;
0188 qp->event_handler(&event,
0189 qp->qp_context);
0190 }
0191 }
0192 }
0193
0194 static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
0195 struct ib_device *device,
0196 u32 port_num,
0197 u64 subnet_prefix)
0198 {
0199 struct ib_port_pkey *pp, *tmp_pp;
0200 bool comp;
0201 LIST_HEAD(to_error_list);
0202 u16 pkey_val;
0203
0204 if (!ib_get_cached_pkey(device,
0205 port_num,
0206 pkey->pkey_index,
0207 &pkey_val)) {
0208 spin_lock(&pkey->qp_list_lock);
0209 list_for_each_entry(pp, &pkey->qp_list, qp_list) {
0210 if (atomic_read(&pp->sec->error_list_count))
0211 continue;
0212
0213 if (enforce_qp_pkey_security(pkey_val,
0214 subnet_prefix,
0215 pp->sec)) {
0216 atomic_inc(&pp->sec->error_list_count);
0217 list_add(&pp->to_error_list,
0218 &to_error_list);
0219 }
0220 }
0221 spin_unlock(&pkey->qp_list_lock);
0222 }
0223
0224 list_for_each_entry_safe(pp,
0225 tmp_pp,
0226 &to_error_list,
0227 to_error_list) {
0228 mutex_lock(&pp->sec->mutex);
0229 qp_to_error(pp->sec);
0230 list_del(&pp->to_error_list);
0231 atomic_dec(&pp->sec->error_list_count);
0232 comp = pp->sec->destroying;
0233 mutex_unlock(&pp->sec->mutex);
0234
0235 if (comp)
0236 complete(&pp->sec->error_complete);
0237 }
0238 }
0239
0240
0241
0242
0243 static int port_pkey_list_insert(struct ib_port_pkey *pp)
0244 {
0245 struct pkey_index_qp_list *tmp_pkey;
0246 struct pkey_index_qp_list *pkey;
0247 struct ib_device *dev;
0248 u32 port_num = pp->port_num;
0249 int ret = 0;
0250
0251 if (pp->state != IB_PORT_PKEY_VALID)
0252 return 0;
0253
0254 dev = pp->sec->dev;
0255
0256 pkey = get_pkey_idx_qp_list(pp);
0257
0258 if (!pkey) {
0259 bool found = false;
0260
0261 pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
0262 if (!pkey)
0263 return -ENOMEM;
0264
0265 spin_lock(&dev->port_data[port_num].pkey_list_lock);
0266
0267
0268
0269 list_for_each_entry(tmp_pkey,
0270 &dev->port_data[port_num].pkey_list,
0271 pkey_index_list) {
0272 if (tmp_pkey->pkey_index == pp->pkey_index) {
0273 kfree(pkey);
0274 pkey = tmp_pkey;
0275 found = true;
0276 break;
0277 }
0278 }
0279
0280 if (!found) {
0281 pkey->pkey_index = pp->pkey_index;
0282 spin_lock_init(&pkey->qp_list_lock);
0283 INIT_LIST_HEAD(&pkey->qp_list);
0284 list_add(&pkey->pkey_index_list,
0285 &dev->port_data[port_num].pkey_list);
0286 }
0287 spin_unlock(&dev->port_data[port_num].pkey_list_lock);
0288 }
0289
0290 spin_lock(&pkey->qp_list_lock);
0291 list_add(&pp->qp_list, &pkey->qp_list);
0292 spin_unlock(&pkey->qp_list_lock);
0293
0294 pp->state = IB_PORT_PKEY_LISTED;
0295
0296 return ret;
0297 }
0298
0299
0300
0301
0302 static void port_pkey_list_remove(struct ib_port_pkey *pp)
0303 {
0304 struct pkey_index_qp_list *pkey;
0305
0306 if (pp->state != IB_PORT_PKEY_LISTED)
0307 return;
0308
0309 pkey = get_pkey_idx_qp_list(pp);
0310
0311 spin_lock(&pkey->qp_list_lock);
0312 list_del(&pp->qp_list);
0313 spin_unlock(&pkey->qp_list_lock);
0314
0315
0316
0317
0318 pp->state = IB_PORT_PKEY_VALID;
0319 }
0320
0321 static void destroy_qp_security(struct ib_qp_security *sec)
0322 {
0323 security_ib_free_security(sec->security);
0324 kfree(sec->ports_pkeys);
0325 kfree(sec);
0326 }
0327
0328
0329
0330
0331 static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
0332 const struct ib_qp_attr *qp_attr,
0333 int qp_attr_mask)
0334 {
0335 struct ib_ports_pkeys *new_pps;
0336 struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
0337
0338 new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
0339 if (!new_pps)
0340 return NULL;
0341
0342 if (qp_attr_mask & IB_QP_PORT)
0343 new_pps->main.port_num = qp_attr->port_num;
0344 else if (qp_pps)
0345 new_pps->main.port_num = qp_pps->main.port_num;
0346
0347 if (qp_attr_mask & IB_QP_PKEY_INDEX)
0348 new_pps->main.pkey_index = qp_attr->pkey_index;
0349 else if (qp_pps)
0350 new_pps->main.pkey_index = qp_pps->main.pkey_index;
0351
0352 if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
0353 (qp_attr_mask & IB_QP_PORT)) ||
0354 (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
0355 new_pps->main.state = IB_PORT_PKEY_VALID;
0356
0357 if (qp_attr_mask & IB_QP_ALT_PATH) {
0358 new_pps->alt.port_num = qp_attr->alt_port_num;
0359 new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
0360 new_pps->alt.state = IB_PORT_PKEY_VALID;
0361 } else if (qp_pps) {
0362 new_pps->alt.port_num = qp_pps->alt.port_num;
0363 new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
0364 if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
0365 new_pps->alt.state = IB_PORT_PKEY_VALID;
0366 }
0367
0368 new_pps->main.sec = qp->qp_sec;
0369 new_pps->alt.sec = qp->qp_sec;
0370 return new_pps;
0371 }
0372
0373 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
0374 {
0375 struct ib_qp *real_qp = qp->real_qp;
0376 int ret;
0377
0378 ret = ib_create_qp_security(qp, dev);
0379
0380 if (ret)
0381 return ret;
0382
0383 if (!qp->qp_sec)
0384 return 0;
0385
0386 mutex_lock(&real_qp->qp_sec->mutex);
0387 ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
0388 qp->qp_sec);
0389
0390 if (ret)
0391 goto ret;
0392
0393 if (qp != real_qp)
0394 list_add(&qp->qp_sec->shared_qp_list,
0395 &real_qp->qp_sec->shared_qp_list);
0396 ret:
0397 mutex_unlock(&real_qp->qp_sec->mutex);
0398 if (ret)
0399 destroy_qp_security(qp->qp_sec);
0400
0401 return ret;
0402 }
0403
0404 void ib_close_shared_qp_security(struct ib_qp_security *sec)
0405 {
0406 struct ib_qp *real_qp = sec->qp->real_qp;
0407
0408 mutex_lock(&real_qp->qp_sec->mutex);
0409 list_del(&sec->shared_qp_list);
0410 mutex_unlock(&real_qp->qp_sec->mutex);
0411
0412 destroy_qp_security(sec);
0413 }
0414
0415 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
0416 {
0417 unsigned int i;
0418 bool is_ib = false;
0419 int ret;
0420
0421 rdma_for_each_port (dev, i) {
0422 is_ib = rdma_protocol_ib(dev, i);
0423 if (is_ib)
0424 break;
0425 }
0426
0427
0428 if (!is_ib)
0429 return 0;
0430
0431 qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
0432 if (!qp->qp_sec)
0433 return -ENOMEM;
0434
0435 qp->qp_sec->qp = qp;
0436 qp->qp_sec->dev = dev;
0437 mutex_init(&qp->qp_sec->mutex);
0438 INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
0439 atomic_set(&qp->qp_sec->error_list_count, 0);
0440 init_completion(&qp->qp_sec->error_complete);
0441 ret = security_ib_alloc_security(&qp->qp_sec->security);
0442 if (ret) {
0443 kfree(qp->qp_sec);
0444 qp->qp_sec = NULL;
0445 }
0446
0447 return ret;
0448 }
0449 EXPORT_SYMBOL(ib_create_qp_security);
0450
0451 void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
0452 {
0453
0454 if (!sec)
0455 return;
0456
0457 mutex_lock(&sec->mutex);
0458
0459
0460
0461
0462 if (sec->ports_pkeys) {
0463 port_pkey_list_remove(&sec->ports_pkeys->main);
0464 port_pkey_list_remove(&sec->ports_pkeys->alt);
0465 }
0466
0467
0468
0469
0470
0471 sec->destroying = true;
0472
0473
0474
0475
0476 sec->error_comps_pending = atomic_read(&sec->error_list_count);
0477
0478 mutex_unlock(&sec->mutex);
0479 }
0480
0481 void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
0482 {
0483 int ret;
0484 int i;
0485
0486
0487 if (!sec)
0488 return;
0489
0490
0491
0492
0493
0494 for (i = 0; i < sec->error_comps_pending; i++)
0495 wait_for_completion(&sec->error_complete);
0496
0497 mutex_lock(&sec->mutex);
0498 sec->destroying = false;
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509 if (sec->ports_pkeys) {
0510 port_pkey_list_insert(&sec->ports_pkeys->main);
0511 port_pkey_list_insert(&sec->ports_pkeys->alt);
0512 }
0513
0514 ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
0515 if (ret)
0516 qp_to_error(sec);
0517
0518 mutex_unlock(&sec->mutex);
0519 }
0520
0521 void ib_destroy_qp_security_end(struct ib_qp_security *sec)
0522 {
0523 int i;
0524
0525
0526 if (!sec)
0527 return;
0528
0529
0530
0531
0532
0533
0534 for (i = 0; i < sec->error_comps_pending; i++)
0535 wait_for_completion(&sec->error_complete);
0536
0537 destroy_qp_security(sec);
0538 }
0539
0540 void ib_security_cache_change(struct ib_device *device,
0541 u32 port_num,
0542 u64 subnet_prefix)
0543 {
0544 struct pkey_index_qp_list *pkey;
0545
0546 list_for_each_entry (pkey, &device->port_data[port_num].pkey_list,
0547 pkey_index_list) {
0548 check_pkey_qps(pkey,
0549 device,
0550 port_num,
0551 subnet_prefix);
0552 }
0553 }
0554
0555 void ib_security_release_port_pkey_list(struct ib_device *device)
0556 {
0557 struct pkey_index_qp_list *pkey, *tmp_pkey;
0558 unsigned int i;
0559
0560 rdma_for_each_port (device, i) {
0561 list_for_each_entry_safe(pkey,
0562 tmp_pkey,
0563 &device->port_data[i].pkey_list,
0564 pkey_index_list) {
0565 list_del(&pkey->pkey_index_list);
0566 kfree(pkey);
0567 }
0568 }
0569 }
0570
0571 int ib_security_modify_qp(struct ib_qp *qp,
0572 struct ib_qp_attr *qp_attr,
0573 int qp_attr_mask,
0574 struct ib_udata *udata)
0575 {
0576 int ret = 0;
0577 struct ib_ports_pkeys *tmp_pps;
0578 struct ib_ports_pkeys *new_pps = NULL;
0579 struct ib_qp *real_qp = qp->real_qp;
0580 bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
0581 real_qp->qp_type == IB_QPT_GSI ||
0582 real_qp->qp_type >= IB_QPT_RESERVED1);
0583 bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
0584 (qp_attr_mask & IB_QP_ALT_PATH));
0585
0586 WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
0587 rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
0588 !real_qp->qp_sec),
0589 "%s: QP security is not initialized for IB QP: %u\n",
0590 __func__, real_qp->qp_num);
0591
0592
0593
0594
0595
0596
0597
0598 if (pps_change && !special_qp && real_qp->qp_sec) {
0599 mutex_lock(&real_qp->qp_sec->mutex);
0600 new_pps = get_new_pps(real_qp,
0601 qp_attr,
0602 qp_attr_mask);
0603 if (!new_pps) {
0604 mutex_unlock(&real_qp->qp_sec->mutex);
0605 return -ENOMEM;
0606 }
0607
0608
0609
0610
0611
0612
0613
0614 ret = port_pkey_list_insert(&new_pps->main);
0615
0616 if (!ret)
0617 ret = port_pkey_list_insert(&new_pps->alt);
0618
0619 if (!ret)
0620 ret = check_qp_port_pkey_settings(new_pps,
0621 real_qp->qp_sec);
0622 }
0623
0624 if (!ret)
0625 ret = real_qp->device->ops.modify_qp(real_qp,
0626 qp_attr,
0627 qp_attr_mask,
0628 udata);
0629
0630 if (new_pps) {
0631
0632
0633
0634 if (ret) {
0635 tmp_pps = new_pps;
0636 } else {
0637 tmp_pps = real_qp->qp_sec->ports_pkeys;
0638 real_qp->qp_sec->ports_pkeys = new_pps;
0639 }
0640
0641 if (tmp_pps) {
0642 port_pkey_list_remove(&tmp_pps->main);
0643 port_pkey_list_remove(&tmp_pps->alt);
0644 }
0645 kfree(tmp_pps);
0646 mutex_unlock(&real_qp->qp_sec->mutex);
0647 }
0648 return ret;
0649 }
0650
0651 static int ib_security_pkey_access(struct ib_device *dev,
0652 u32 port_num,
0653 u16 pkey_index,
0654 void *sec)
0655 {
0656 u64 subnet_prefix;
0657 u16 pkey;
0658 int ret;
0659
0660 if (!rdma_protocol_ib(dev, port_num))
0661 return 0;
0662
0663 ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
0664 if (ret)
0665 return ret;
0666
0667 ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
0668
0669 return security_ib_pkey_access(sec, subnet_prefix, pkey);
0670 }
0671
0672 void ib_mad_agent_security_change(void)
0673 {
0674 struct ib_mad_agent *ag;
0675
0676 spin_lock(&mad_agent_list_lock);
0677 list_for_each_entry(ag,
0678 &mad_agent_list,
0679 mad_agent_sec_list)
0680 WRITE_ONCE(ag->smp_allowed,
0681 !security_ib_endport_manage_subnet(ag->security,
0682 dev_name(&ag->device->dev), ag->port_num));
0683 spin_unlock(&mad_agent_list_lock);
0684 }
0685
0686 int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
0687 enum ib_qp_type qp_type)
0688 {
0689 int ret;
0690
0691 if (!rdma_protocol_ib(agent->device, agent->port_num))
0692 return 0;
0693
0694 INIT_LIST_HEAD(&agent->mad_agent_sec_list);
0695
0696 ret = security_ib_alloc_security(&agent->security);
0697 if (ret)
0698 return ret;
0699
0700 if (qp_type != IB_QPT_SMI)
0701 return 0;
0702
0703 spin_lock(&mad_agent_list_lock);
0704 ret = security_ib_endport_manage_subnet(agent->security,
0705 dev_name(&agent->device->dev),
0706 agent->port_num);
0707 if (ret)
0708 goto free_security;
0709
0710 WRITE_ONCE(agent->smp_allowed, true);
0711 list_add(&agent->mad_agent_sec_list, &mad_agent_list);
0712 spin_unlock(&mad_agent_list_lock);
0713 return 0;
0714
0715 free_security:
0716 spin_unlock(&mad_agent_list_lock);
0717 security_ib_free_security(agent->security);
0718 return ret;
0719 }
0720
0721 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
0722 {
0723 if (!rdma_protocol_ib(agent->device, agent->port_num))
0724 return;
0725
0726 if (agent->qp->qp_type == IB_QPT_SMI) {
0727 spin_lock(&mad_agent_list_lock);
0728 list_del(&agent->mad_agent_sec_list);
0729 spin_unlock(&mad_agent_list_lock);
0730 }
0731
0732 security_ib_free_security(agent->security);
0733 }
0734
0735 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
0736 {
0737 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
0738 return 0;
0739
0740 if (map->agent.qp->qp_type == IB_QPT_SMI) {
0741 if (!READ_ONCE(map->agent.smp_allowed))
0742 return -EACCES;
0743 return 0;
0744 }
0745
0746 return ib_security_pkey_access(map->agent.device,
0747 map->agent.port_num,
0748 pkey_index,
0749 map->agent.security);
0750 }