Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2016 Mellanox Technologies Ltd.  All rights reserved.
0003  *
0004  * This software is available to you under a choice of one of two
0005  * licenses.  You may choose to be licensed under the terms of the GNU
0006  * General Public License (GPL) Version 2, available from the file
0007  * COPYING in the main directory of this source tree, or the
0008  * OpenIB.org BSD license below:
0009  *
0010  *     Redistribution and use in source and binary forms, with or
0011  *     without modification, are permitted provided that the following
0012  *     conditions are met:
0013  *
0014  *      - Redistributions of source code must retain the above
0015  *        copyright notice, this list of conditions and the following
0016  *        disclaimer.
0017  *
0018  *      - Redistributions in binary form must reproduce the above
0019  *        copyright notice, this list of conditions and the following
0020  *        disclaimer in the documentation and/or other materials
0021  *        provided with the distribution.
0022  *
0023  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0024  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0025  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0026  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0027  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0028  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0029  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0030  * SOFTWARE.
0031  */
0032 
0033 #include <linux/security.h>
0034 #include <linux/completion.h>
0035 #include <linux/list.h>
0036 
0037 #include <rdma/ib_verbs.h>
0038 #include <rdma/ib_cache.h>
0039 #include "core_priv.h"
0040 #include "mad_priv.h"
0041 
0042 static LIST_HEAD(mad_agent_list);
0043 /* Lock to protect mad_agent_list */
0044 static DEFINE_SPINLOCK(mad_agent_list_lock);
0045 
0046 static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
0047 {
0048     struct pkey_index_qp_list *pkey = NULL;
0049     struct pkey_index_qp_list *tmp_pkey;
0050     struct ib_device *dev = pp->sec->dev;
0051 
0052     spin_lock(&dev->port_data[pp->port_num].pkey_list_lock);
0053     list_for_each_entry (tmp_pkey, &dev->port_data[pp->port_num].pkey_list,
0054                  pkey_index_list) {
0055         if (tmp_pkey->pkey_index == pp->pkey_index) {
0056             pkey = tmp_pkey;
0057             break;
0058         }
0059     }
0060     spin_unlock(&dev->port_data[pp->port_num].pkey_list_lock);
0061     return pkey;
0062 }
0063 
0064 static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
0065                       u16 *pkey,
0066                       u64 *subnet_prefix)
0067 {
0068     struct ib_device *dev = pp->sec->dev;
0069     int ret;
0070 
0071     ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
0072     if (ret)
0073         return ret;
0074 
0075     ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
0076 
0077     return ret;
0078 }
0079 
0080 static int enforce_qp_pkey_security(u16 pkey,
0081                     u64 subnet_prefix,
0082                     struct ib_qp_security *qp_sec)
0083 {
0084     struct ib_qp_security *shared_qp_sec;
0085     int ret;
0086 
0087     ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
0088     if (ret)
0089         return ret;
0090 
0091     list_for_each_entry(shared_qp_sec,
0092                 &qp_sec->shared_qp_list,
0093                 shared_qp_list) {
0094         ret = security_ib_pkey_access(shared_qp_sec->security,
0095                           subnet_prefix,
0096                           pkey);
0097         if (ret)
0098             return ret;
0099     }
0100     return 0;
0101 }
0102 
0103 /* The caller of this function must hold the QP security
0104  * mutex of the QP of the security structure in *pps.
0105  *
0106  * It takes separate ports_pkeys and security structure
0107  * because in some cases the pps will be for a new settings
0108  * or the pps will be for the real QP and security structure
0109  * will be for a shared QP.
0110  */
0111 static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
0112                        struct ib_qp_security *sec)
0113 {
0114     u64 subnet_prefix;
0115     u16 pkey;
0116     int ret = 0;
0117 
0118     if (!pps)
0119         return 0;
0120 
0121     if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
0122         ret = get_pkey_and_subnet_prefix(&pps->main,
0123                          &pkey,
0124                          &subnet_prefix);
0125         if (ret)
0126             return ret;
0127 
0128         ret = enforce_qp_pkey_security(pkey,
0129                            subnet_prefix,
0130                            sec);
0131         if (ret)
0132             return ret;
0133     }
0134 
0135     if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
0136         ret = get_pkey_and_subnet_prefix(&pps->alt,
0137                          &pkey,
0138                          &subnet_prefix);
0139         if (ret)
0140             return ret;
0141 
0142         ret = enforce_qp_pkey_security(pkey,
0143                            subnet_prefix,
0144                            sec);
0145     }
0146 
0147     return ret;
0148 }
0149 
0150 /* The caller of this function must hold the QP security
0151  * mutex.
0152  */
0153 static void qp_to_error(struct ib_qp_security *sec)
0154 {
0155     struct ib_qp_security *shared_qp_sec;
0156     struct ib_qp_attr attr = {
0157         .qp_state = IB_QPS_ERR
0158     };
0159     struct ib_event event = {
0160         .event = IB_EVENT_QP_FATAL
0161     };
0162 
0163     /* If the QP is in the process of being destroyed
0164      * the qp pointer in the security structure is
0165      * undefined.  It cannot be modified now.
0166      */
0167     if (sec->destroying)
0168         return;
0169 
0170     ib_modify_qp(sec->qp,
0171              &attr,
0172              IB_QP_STATE);
0173 
0174     if (sec->qp->event_handler && sec->qp->qp_context) {
0175         event.element.qp = sec->qp;
0176         sec->qp->event_handler(&event,
0177                        sec->qp->qp_context);
0178     }
0179 
0180     list_for_each_entry(shared_qp_sec,
0181                 &sec->shared_qp_list,
0182                 shared_qp_list) {
0183         struct ib_qp *qp = shared_qp_sec->qp;
0184 
0185         if (qp->event_handler && qp->qp_context) {
0186             event.element.qp = qp;
0187             event.device = qp->device;
0188             qp->event_handler(&event,
0189                       qp->qp_context);
0190         }
0191     }
0192 }
0193 
0194 static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
0195                   struct ib_device *device,
0196                   u32 port_num,
0197                   u64 subnet_prefix)
0198 {
0199     struct ib_port_pkey *pp, *tmp_pp;
0200     bool comp;
0201     LIST_HEAD(to_error_list);
0202     u16 pkey_val;
0203 
0204     if (!ib_get_cached_pkey(device,
0205                 port_num,
0206                 pkey->pkey_index,
0207                 &pkey_val)) {
0208         spin_lock(&pkey->qp_list_lock);
0209         list_for_each_entry(pp, &pkey->qp_list, qp_list) {
0210             if (atomic_read(&pp->sec->error_list_count))
0211                 continue;
0212 
0213             if (enforce_qp_pkey_security(pkey_val,
0214                              subnet_prefix,
0215                              pp->sec)) {
0216                 atomic_inc(&pp->sec->error_list_count);
0217                 list_add(&pp->to_error_list,
0218                      &to_error_list);
0219             }
0220         }
0221         spin_unlock(&pkey->qp_list_lock);
0222     }
0223 
0224     list_for_each_entry_safe(pp,
0225                  tmp_pp,
0226                  &to_error_list,
0227                  to_error_list) {
0228         mutex_lock(&pp->sec->mutex);
0229         qp_to_error(pp->sec);
0230         list_del(&pp->to_error_list);
0231         atomic_dec(&pp->sec->error_list_count);
0232         comp = pp->sec->destroying;
0233         mutex_unlock(&pp->sec->mutex);
0234 
0235         if (comp)
0236             complete(&pp->sec->error_complete);
0237     }
0238 }
0239 
0240 /* The caller of this function must hold the QP security
0241  * mutex.
0242  */
0243 static int port_pkey_list_insert(struct ib_port_pkey *pp)
0244 {
0245     struct pkey_index_qp_list *tmp_pkey;
0246     struct pkey_index_qp_list *pkey;
0247     struct ib_device *dev;
0248     u32 port_num = pp->port_num;
0249     int ret = 0;
0250 
0251     if (pp->state != IB_PORT_PKEY_VALID)
0252         return 0;
0253 
0254     dev = pp->sec->dev;
0255 
0256     pkey = get_pkey_idx_qp_list(pp);
0257 
0258     if (!pkey) {
0259         bool found = false;
0260 
0261         pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
0262         if (!pkey)
0263             return -ENOMEM;
0264 
0265         spin_lock(&dev->port_data[port_num].pkey_list_lock);
0266         /* Check for the PKey again.  A racing process may
0267          * have created it.
0268          */
0269         list_for_each_entry(tmp_pkey,
0270                     &dev->port_data[port_num].pkey_list,
0271                     pkey_index_list) {
0272             if (tmp_pkey->pkey_index == pp->pkey_index) {
0273                 kfree(pkey);
0274                 pkey = tmp_pkey;
0275                 found = true;
0276                 break;
0277             }
0278         }
0279 
0280         if (!found) {
0281             pkey->pkey_index = pp->pkey_index;
0282             spin_lock_init(&pkey->qp_list_lock);
0283             INIT_LIST_HEAD(&pkey->qp_list);
0284             list_add(&pkey->pkey_index_list,
0285                  &dev->port_data[port_num].pkey_list);
0286         }
0287         spin_unlock(&dev->port_data[port_num].pkey_list_lock);
0288     }
0289 
0290     spin_lock(&pkey->qp_list_lock);
0291     list_add(&pp->qp_list, &pkey->qp_list);
0292     spin_unlock(&pkey->qp_list_lock);
0293 
0294     pp->state = IB_PORT_PKEY_LISTED;
0295 
0296     return ret;
0297 }
0298 
0299 /* The caller of this function must hold the QP security
0300  * mutex.
0301  */
0302 static void port_pkey_list_remove(struct ib_port_pkey *pp)
0303 {
0304     struct pkey_index_qp_list *pkey;
0305 
0306     if (pp->state != IB_PORT_PKEY_LISTED)
0307         return;
0308 
0309     pkey = get_pkey_idx_qp_list(pp);
0310 
0311     spin_lock(&pkey->qp_list_lock);
0312     list_del(&pp->qp_list);
0313     spin_unlock(&pkey->qp_list_lock);
0314 
0315     /* The setting may still be valid, i.e. after
0316      * a destroy has failed for example.
0317      */
0318     pp->state = IB_PORT_PKEY_VALID;
0319 }
0320 
0321 static void destroy_qp_security(struct ib_qp_security *sec)
0322 {
0323     security_ib_free_security(sec->security);
0324     kfree(sec->ports_pkeys);
0325     kfree(sec);
0326 }
0327 
0328 /* The caller of this function must hold the QP security
0329  * mutex.
0330  */
0331 static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
0332                       const struct ib_qp_attr *qp_attr,
0333                       int qp_attr_mask)
0334 {
0335     struct ib_ports_pkeys *new_pps;
0336     struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
0337 
0338     new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
0339     if (!new_pps)
0340         return NULL;
0341 
0342     if (qp_attr_mask & IB_QP_PORT)
0343         new_pps->main.port_num = qp_attr->port_num;
0344     else if (qp_pps)
0345         new_pps->main.port_num = qp_pps->main.port_num;
0346 
0347     if (qp_attr_mask & IB_QP_PKEY_INDEX)
0348         new_pps->main.pkey_index = qp_attr->pkey_index;
0349     else if (qp_pps)
0350         new_pps->main.pkey_index = qp_pps->main.pkey_index;
0351 
0352     if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
0353          (qp_attr_mask & IB_QP_PORT)) ||
0354         (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
0355         new_pps->main.state = IB_PORT_PKEY_VALID;
0356 
0357     if (qp_attr_mask & IB_QP_ALT_PATH) {
0358         new_pps->alt.port_num = qp_attr->alt_port_num;
0359         new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
0360         new_pps->alt.state = IB_PORT_PKEY_VALID;
0361     } else if (qp_pps) {
0362         new_pps->alt.port_num = qp_pps->alt.port_num;
0363         new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
0364         if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
0365             new_pps->alt.state = IB_PORT_PKEY_VALID;
0366     }
0367 
0368     new_pps->main.sec = qp->qp_sec;
0369     new_pps->alt.sec = qp->qp_sec;
0370     return new_pps;
0371 }
0372 
0373 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
0374 {
0375     struct ib_qp *real_qp = qp->real_qp;
0376     int ret;
0377 
0378     ret = ib_create_qp_security(qp, dev);
0379 
0380     if (ret)
0381         return ret;
0382 
0383     if (!qp->qp_sec)
0384         return 0;
0385 
0386     mutex_lock(&real_qp->qp_sec->mutex);
0387     ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
0388                       qp->qp_sec);
0389 
0390     if (ret)
0391         goto ret;
0392 
0393     if (qp != real_qp)
0394         list_add(&qp->qp_sec->shared_qp_list,
0395              &real_qp->qp_sec->shared_qp_list);
0396 ret:
0397     mutex_unlock(&real_qp->qp_sec->mutex);
0398     if (ret)
0399         destroy_qp_security(qp->qp_sec);
0400 
0401     return ret;
0402 }
0403 
0404 void ib_close_shared_qp_security(struct ib_qp_security *sec)
0405 {
0406     struct ib_qp *real_qp = sec->qp->real_qp;
0407 
0408     mutex_lock(&real_qp->qp_sec->mutex);
0409     list_del(&sec->shared_qp_list);
0410     mutex_unlock(&real_qp->qp_sec->mutex);
0411 
0412     destroy_qp_security(sec);
0413 }
0414 
0415 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
0416 {
0417     unsigned int i;
0418     bool is_ib = false;
0419     int ret;
0420 
0421     rdma_for_each_port (dev, i) {
0422         is_ib = rdma_protocol_ib(dev, i);
0423         if (is_ib)
0424             break;
0425     }
0426 
0427     /* If this isn't an IB device don't create the security context */
0428     if (!is_ib)
0429         return 0;
0430 
0431     qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
0432     if (!qp->qp_sec)
0433         return -ENOMEM;
0434 
0435     qp->qp_sec->qp = qp;
0436     qp->qp_sec->dev = dev;
0437     mutex_init(&qp->qp_sec->mutex);
0438     INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
0439     atomic_set(&qp->qp_sec->error_list_count, 0);
0440     init_completion(&qp->qp_sec->error_complete);
0441     ret = security_ib_alloc_security(&qp->qp_sec->security);
0442     if (ret) {
0443         kfree(qp->qp_sec);
0444         qp->qp_sec = NULL;
0445     }
0446 
0447     return ret;
0448 }
0449 EXPORT_SYMBOL(ib_create_qp_security);
0450 
0451 void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
0452 {
0453     /* Return if not IB */
0454     if (!sec)
0455         return;
0456 
0457     mutex_lock(&sec->mutex);
0458 
0459     /* Remove the QP from the lists so it won't get added to
0460      * a to_error_list during the destroy process.
0461      */
0462     if (sec->ports_pkeys) {
0463         port_pkey_list_remove(&sec->ports_pkeys->main);
0464         port_pkey_list_remove(&sec->ports_pkeys->alt);
0465     }
0466 
0467     /* If the QP is already in one or more of those lists
0468      * the destroying flag will ensure the to error flow
0469      * doesn't operate on an undefined QP.
0470      */
0471     sec->destroying = true;
0472 
0473     /* Record the error list count to know how many completions
0474      * to wait for.
0475      */
0476     sec->error_comps_pending = atomic_read(&sec->error_list_count);
0477 
0478     mutex_unlock(&sec->mutex);
0479 }
0480 
0481 void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
0482 {
0483     int ret;
0484     int i;
0485 
0486     /* Return if not IB */
0487     if (!sec)
0488         return;
0489 
0490     /* If a concurrent cache update is in progress this
0491      * QP security could be marked for an error state
0492      * transition.  Wait for this to complete.
0493      */
0494     for (i = 0; i < sec->error_comps_pending; i++)
0495         wait_for_completion(&sec->error_complete);
0496 
0497     mutex_lock(&sec->mutex);
0498     sec->destroying = false;
0499 
0500     /* Restore the position in the lists and verify
0501      * access is still allowed in case a cache update
0502      * occurred while attempting to destroy.
0503      *
0504      * Because these setting were listed already
0505      * and removed during ib_destroy_qp_security_begin
0506      * we know the pkey_index_qp_list for the PKey
0507      * already exists so port_pkey_list_insert won't fail.
0508      */
0509     if (sec->ports_pkeys) {
0510         port_pkey_list_insert(&sec->ports_pkeys->main);
0511         port_pkey_list_insert(&sec->ports_pkeys->alt);
0512     }
0513 
0514     ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
0515     if (ret)
0516         qp_to_error(sec);
0517 
0518     mutex_unlock(&sec->mutex);
0519 }
0520 
0521 void ib_destroy_qp_security_end(struct ib_qp_security *sec)
0522 {
0523     int i;
0524 
0525     /* Return if not IB */
0526     if (!sec)
0527         return;
0528 
0529     /* If a concurrent cache update is occurring we must
0530      * wait until this QP security structure is processed
0531      * in the QP to error flow before destroying it because
0532      * the to_error_list is in use.
0533      */
0534     for (i = 0; i < sec->error_comps_pending; i++)
0535         wait_for_completion(&sec->error_complete);
0536 
0537     destroy_qp_security(sec);
0538 }
0539 
0540 void ib_security_cache_change(struct ib_device *device,
0541                   u32 port_num,
0542                   u64 subnet_prefix)
0543 {
0544     struct pkey_index_qp_list *pkey;
0545 
0546     list_for_each_entry (pkey, &device->port_data[port_num].pkey_list,
0547                  pkey_index_list) {
0548         check_pkey_qps(pkey,
0549                    device,
0550                    port_num,
0551                    subnet_prefix);
0552     }
0553 }
0554 
0555 void ib_security_release_port_pkey_list(struct ib_device *device)
0556 {
0557     struct pkey_index_qp_list *pkey, *tmp_pkey;
0558     unsigned int i;
0559 
0560     rdma_for_each_port (device, i) {
0561         list_for_each_entry_safe(pkey,
0562                      tmp_pkey,
0563                      &device->port_data[i].pkey_list,
0564                      pkey_index_list) {
0565             list_del(&pkey->pkey_index_list);
0566             kfree(pkey);
0567         }
0568     }
0569 }
0570 
0571 int ib_security_modify_qp(struct ib_qp *qp,
0572               struct ib_qp_attr *qp_attr,
0573               int qp_attr_mask,
0574               struct ib_udata *udata)
0575 {
0576     int ret = 0;
0577     struct ib_ports_pkeys *tmp_pps;
0578     struct ib_ports_pkeys *new_pps = NULL;
0579     struct ib_qp *real_qp = qp->real_qp;
0580     bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
0581                real_qp->qp_type == IB_QPT_GSI ||
0582                real_qp->qp_type >= IB_QPT_RESERVED1);
0583     bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
0584                (qp_attr_mask & IB_QP_ALT_PATH));
0585 
0586     WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
0587            rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
0588            !real_qp->qp_sec),
0589            "%s: QP security is not initialized for IB QP: %u\n",
0590            __func__, real_qp->qp_num);
0591 
0592     /* The port/pkey settings are maintained only for the real QP. Open
0593      * handles on the real QP will be in the shared_qp_list. When
0594      * enforcing security on the real QP all the shared QPs will be
0595      * checked as well.
0596      */
0597 
0598     if (pps_change && !special_qp && real_qp->qp_sec) {
0599         mutex_lock(&real_qp->qp_sec->mutex);
0600         new_pps = get_new_pps(real_qp,
0601                       qp_attr,
0602                       qp_attr_mask);
0603         if (!new_pps) {
0604             mutex_unlock(&real_qp->qp_sec->mutex);
0605             return -ENOMEM;
0606         }
0607         /* Add this QP to the lists for the new port
0608          * and pkey settings before checking for permission
0609          * in case there is a concurrent cache update
0610          * occurring.  Walking the list for a cache change
0611          * doesn't acquire the security mutex unless it's
0612          * sending the QP to error.
0613          */
0614         ret = port_pkey_list_insert(&new_pps->main);
0615 
0616         if (!ret)
0617             ret = port_pkey_list_insert(&new_pps->alt);
0618 
0619         if (!ret)
0620             ret = check_qp_port_pkey_settings(new_pps,
0621                               real_qp->qp_sec);
0622     }
0623 
0624     if (!ret)
0625         ret = real_qp->device->ops.modify_qp(real_qp,
0626                              qp_attr,
0627                              qp_attr_mask,
0628                              udata);
0629 
0630     if (new_pps) {
0631         /* Clean up the lists and free the appropriate
0632          * ports_pkeys structure.
0633          */
0634         if (ret) {
0635             tmp_pps = new_pps;
0636         } else {
0637             tmp_pps = real_qp->qp_sec->ports_pkeys;
0638             real_qp->qp_sec->ports_pkeys = new_pps;
0639         }
0640 
0641         if (tmp_pps) {
0642             port_pkey_list_remove(&tmp_pps->main);
0643             port_pkey_list_remove(&tmp_pps->alt);
0644         }
0645         kfree(tmp_pps);
0646         mutex_unlock(&real_qp->qp_sec->mutex);
0647     }
0648     return ret;
0649 }
0650 
0651 static int ib_security_pkey_access(struct ib_device *dev,
0652                    u32 port_num,
0653                    u16 pkey_index,
0654                    void *sec)
0655 {
0656     u64 subnet_prefix;
0657     u16 pkey;
0658     int ret;
0659 
0660     if (!rdma_protocol_ib(dev, port_num))
0661         return 0;
0662 
0663     ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
0664     if (ret)
0665         return ret;
0666 
0667     ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
0668 
0669     return security_ib_pkey_access(sec, subnet_prefix, pkey);
0670 }
0671 
0672 void ib_mad_agent_security_change(void)
0673 {
0674     struct ib_mad_agent *ag;
0675 
0676     spin_lock(&mad_agent_list_lock);
0677     list_for_each_entry(ag,
0678                 &mad_agent_list,
0679                 mad_agent_sec_list)
0680         WRITE_ONCE(ag->smp_allowed,
0681                !security_ib_endport_manage_subnet(ag->security,
0682                 dev_name(&ag->device->dev), ag->port_num));
0683     spin_unlock(&mad_agent_list_lock);
0684 }
0685 
0686 int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
0687                 enum ib_qp_type qp_type)
0688 {
0689     int ret;
0690 
0691     if (!rdma_protocol_ib(agent->device, agent->port_num))
0692         return 0;
0693 
0694     INIT_LIST_HEAD(&agent->mad_agent_sec_list);
0695 
0696     ret = security_ib_alloc_security(&agent->security);
0697     if (ret)
0698         return ret;
0699 
0700     if (qp_type != IB_QPT_SMI)
0701         return 0;
0702 
0703     spin_lock(&mad_agent_list_lock);
0704     ret = security_ib_endport_manage_subnet(agent->security,
0705                         dev_name(&agent->device->dev),
0706                         agent->port_num);
0707     if (ret)
0708         goto free_security;
0709 
0710     WRITE_ONCE(agent->smp_allowed, true);
0711     list_add(&agent->mad_agent_sec_list, &mad_agent_list);
0712     spin_unlock(&mad_agent_list_lock);
0713     return 0;
0714 
0715 free_security:
0716     spin_unlock(&mad_agent_list_lock);
0717     security_ib_free_security(agent->security);
0718     return ret;
0719 }
0720 
0721 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
0722 {
0723     if (!rdma_protocol_ib(agent->device, agent->port_num))
0724         return;
0725 
0726     if (agent->qp->qp_type == IB_QPT_SMI) {
0727         spin_lock(&mad_agent_list_lock);
0728         list_del(&agent->mad_agent_sec_list);
0729         spin_unlock(&mad_agent_list_lock);
0730     }
0731 
0732     security_ib_free_security(agent->security);
0733 }
0734 
0735 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
0736 {
0737     if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
0738         return 0;
0739 
0740     if (map->agent.qp->qp_type == IB_QPT_SMI) {
0741         if (!READ_ONCE(map->agent.smp_allowed))
0742             return -EACCES;
0743         return 0;
0744     }
0745 
0746     return ib_security_pkey_access(map->agent.device,
0747                        map->agent.port_num,
0748                        pkey_index,
0749                        map->agent.security);
0750 }