Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*******************************************************************************
0003  * Filename:  target_core_tpg.c
0004  *
0005  * This file contains generic Target Portal Group related functions.
0006  *
0007  * (c) Copyright 2002-2013 Datera, Inc.
0008  *
0009  * Nicholas A. Bellinger <nab@kernel.org>
0010  *
0011  ******************************************************************************/
0012 
0013 #include <linux/net.h>
0014 #include <linux/string.h>
0015 #include <linux/timer.h>
0016 #include <linux/slab.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/in.h>
0019 #include <linux/export.h>
0020 #include <net/sock.h>
0021 #include <net/tcp.h>
0022 #include <scsi/scsi_proto.h>
0023 
0024 #include <target/target_core_base.h>
0025 #include <target/target_core_backend.h>
0026 #include <target/target_core_fabric.h>
0027 
0028 #include "target_core_internal.h"
0029 #include "target_core_alua.h"
0030 #include "target_core_pr.h"
0031 #include "target_core_ua.h"
0032 
0033 extern struct se_device *g_lun0_dev;
0034 
0035 /*  __core_tpg_get_initiator_node_acl():
0036  *
0037  *  mutex_lock(&tpg->acl_node_mutex); must be held when calling
0038  */
0039 struct se_node_acl *__core_tpg_get_initiator_node_acl(
0040     struct se_portal_group *tpg,
0041     const char *initiatorname)
0042 {
0043     struct se_node_acl *acl;
0044 
0045     list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
0046         if (!strcmp(acl->initiatorname, initiatorname))
0047             return acl;
0048     }
0049 
0050     return NULL;
0051 }
0052 
0053 /*  core_tpg_get_initiator_node_acl():
0054  *
0055  *
0056  */
0057 struct se_node_acl *core_tpg_get_initiator_node_acl(
0058     struct se_portal_group *tpg,
0059     unsigned char *initiatorname)
0060 {
0061     struct se_node_acl *acl;
0062     /*
0063      * Obtain se_node_acl->acl_kref using fabric driver provided
0064      * initiatorname[] during node acl endpoint lookup driven by
0065      * new se_session login.
0066      *
0067      * The reference is held until se_session shutdown -> release
0068      * occurs via fabric driver invoked transport_deregister_session()
0069      * or transport_free_session() code.
0070      */
0071     mutex_lock(&tpg->acl_node_mutex);
0072     acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
0073     if (acl) {
0074         if (!kref_get_unless_zero(&acl->acl_kref))
0075             acl = NULL;
0076     }
0077     mutex_unlock(&tpg->acl_node_mutex);
0078 
0079     return acl;
0080 }
0081 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
0082 
0083 void core_allocate_nexus_loss_ua(
0084     struct se_node_acl *nacl)
0085 {
0086     struct se_dev_entry *deve;
0087 
0088     if (!nacl)
0089         return;
0090 
0091     rcu_read_lock();
0092     hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
0093         core_scsi3_ua_allocate(deve, 0x29,
0094             ASCQ_29H_NEXUS_LOSS_OCCURRED);
0095     rcu_read_unlock();
0096 }
0097 EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
0098 
0099 /*  core_tpg_add_node_to_devs():
0100  *
0101  *
0102  */
0103 void core_tpg_add_node_to_devs(
0104     struct se_node_acl *acl,
0105     struct se_portal_group *tpg,
0106     struct se_lun *lun_orig)
0107 {
0108     bool lun_access_ro = true;
0109     struct se_lun *lun;
0110     struct se_device *dev;
0111 
0112     mutex_lock(&tpg->tpg_lun_mutex);
0113     hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
0114         if (lun_orig && lun != lun_orig)
0115             continue;
0116 
0117         dev = rcu_dereference_check(lun->lun_se_dev,
0118                         lockdep_is_held(&tpg->tpg_lun_mutex));
0119         /*
0120          * By default in LIO-Target $FABRIC_MOD,
0121          * demo_mode_write_protect is ON, or READ_ONLY;
0122          */
0123         if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
0124             lun_access_ro = false;
0125         } else {
0126             /*
0127              * Allow only optical drives to issue R/W in default RO
0128              * demo mode.
0129              */
0130             if (dev->transport->get_device_type(dev) == TYPE_DISK)
0131                 lun_access_ro = true;
0132             else
0133                 lun_access_ro = false;
0134         }
0135 
0136         pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
0137             " access for LUN in Demo Mode\n",
0138             tpg->se_tpg_tfo->fabric_name,
0139             tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
0140             lun_access_ro ? "READ-ONLY" : "READ-WRITE");
0141 
0142         core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
0143                          lun_access_ro, acl, tpg);
0144         /*
0145          * Check to see if there are any existing persistent reservation
0146          * APTPL pre-registrations that need to be enabled for this dynamic
0147          * LUN ACL now..
0148          */
0149         core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
0150                             lun->unpacked_lun);
0151     }
0152     mutex_unlock(&tpg->tpg_lun_mutex);
0153 }
0154 
0155 static void
0156 target_set_nacl_queue_depth(struct se_portal_group *tpg,
0157                 struct se_node_acl *acl, u32 queue_depth)
0158 {
0159     acl->queue_depth = queue_depth;
0160 
0161     if (!acl->queue_depth) {
0162         pr_warn("Queue depth for %s Initiator Node: %s is 0,"
0163             "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
0164             acl->initiatorname);
0165         acl->queue_depth = 1;
0166     }
0167 }
0168 
0169 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
0170         const unsigned char *initiatorname)
0171 {
0172     struct se_node_acl *acl;
0173     u32 queue_depth;
0174 
0175     acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
0176             GFP_KERNEL);
0177     if (!acl)
0178         return NULL;
0179 
0180     INIT_LIST_HEAD(&acl->acl_list);
0181     INIT_LIST_HEAD(&acl->acl_sess_list);
0182     INIT_HLIST_HEAD(&acl->lun_entry_hlist);
0183     kref_init(&acl->acl_kref);
0184     init_completion(&acl->acl_free_comp);
0185     spin_lock_init(&acl->nacl_sess_lock);
0186     mutex_init(&acl->lun_entry_mutex);
0187     atomic_set(&acl->acl_pr_ref_count, 0);
0188 
0189     if (tpg->se_tpg_tfo->tpg_get_default_depth)
0190         queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
0191     else
0192         queue_depth = 1;
0193     target_set_nacl_queue_depth(tpg, acl, queue_depth);
0194 
0195     snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
0196     acl->se_tpg = tpg;
0197     acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
0198 
0199     tpg->se_tpg_tfo->set_default_node_attributes(acl);
0200 
0201     return acl;
0202 }
0203 
0204 static void target_add_node_acl(struct se_node_acl *acl)
0205 {
0206     struct se_portal_group *tpg = acl->se_tpg;
0207 
0208     mutex_lock(&tpg->acl_node_mutex);
0209     list_add_tail(&acl->acl_list, &tpg->acl_node_list);
0210     mutex_unlock(&tpg->acl_node_mutex);
0211 
0212     pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
0213         " Initiator Node: %s\n",
0214         tpg->se_tpg_tfo->fabric_name,
0215         tpg->se_tpg_tfo->tpg_get_tag(tpg),
0216         acl->dynamic_node_acl ? "DYNAMIC" : "",
0217         acl->queue_depth,
0218         tpg->se_tpg_tfo->fabric_name,
0219         acl->initiatorname);
0220 }
0221 
0222 bool target_tpg_has_node_acl(struct se_portal_group *tpg,
0223                  const char *initiatorname)
0224 {
0225     struct se_node_acl *acl;
0226     bool found = false;
0227 
0228     mutex_lock(&tpg->acl_node_mutex);
0229     list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
0230         if (!strcmp(acl->initiatorname, initiatorname)) {
0231             found = true;
0232             break;
0233         }
0234     }
0235     mutex_unlock(&tpg->acl_node_mutex);
0236 
0237     return found;
0238 }
0239 EXPORT_SYMBOL(target_tpg_has_node_acl);
0240 
0241 struct se_node_acl *core_tpg_check_initiator_node_acl(
0242     struct se_portal_group *tpg,
0243     unsigned char *initiatorname)
0244 {
0245     struct se_node_acl *acl;
0246 
0247     acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
0248     if (acl)
0249         return acl;
0250 
0251     if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
0252         return NULL;
0253 
0254     acl = target_alloc_node_acl(tpg, initiatorname);
0255     if (!acl)
0256         return NULL;
0257     /*
0258      * When allocating a dynamically generated node_acl, go ahead
0259      * and take the extra kref now before returning to the fabric
0260      * driver caller.
0261      *
0262      * Note this reference will be released at session shutdown
0263      * time within transport_free_session() code.
0264      */
0265     kref_get(&acl->acl_kref);
0266     acl->dynamic_node_acl = 1;
0267 
0268     /*
0269      * Here we only create demo-mode MappedLUNs from the active
0270      * TPG LUNs if the fabric is not explicitly asking for
0271      * tpg_check_demo_mode_login_only() == 1.
0272      */
0273     if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
0274         (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
0275         core_tpg_add_node_to_devs(acl, tpg, NULL);
0276 
0277     target_add_node_acl(acl);
0278     return acl;
0279 }
0280 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
0281 
0282 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
0283 {
0284     while (atomic_read(&nacl->acl_pr_ref_count) != 0)
0285         cpu_relax();
0286 }
0287 
0288 struct se_node_acl *core_tpg_add_initiator_node_acl(
0289     struct se_portal_group *tpg,
0290     const char *initiatorname)
0291 {
0292     struct se_node_acl *acl;
0293 
0294     mutex_lock(&tpg->acl_node_mutex);
0295     acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
0296     if (acl) {
0297         if (acl->dynamic_node_acl) {
0298             acl->dynamic_node_acl = 0;
0299             pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
0300                 " for %s\n", tpg->se_tpg_tfo->fabric_name,
0301                 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
0302             mutex_unlock(&tpg->acl_node_mutex);
0303             return acl;
0304         }
0305 
0306         pr_err("ACL entry for %s Initiator"
0307             " Node %s already exists for TPG %u, ignoring"
0308             " request.\n",  tpg->se_tpg_tfo->fabric_name,
0309             initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
0310         mutex_unlock(&tpg->acl_node_mutex);
0311         return ERR_PTR(-EEXIST);
0312     }
0313     mutex_unlock(&tpg->acl_node_mutex);
0314 
0315     acl = target_alloc_node_acl(tpg, initiatorname);
0316     if (!acl)
0317         return ERR_PTR(-ENOMEM);
0318 
0319     target_add_node_acl(acl);
0320     return acl;
0321 }
0322 
0323 static void target_shutdown_sessions(struct se_node_acl *acl)
0324 {
0325     struct se_session *sess;
0326     unsigned long flags;
0327 
0328 restart:
0329     spin_lock_irqsave(&acl->nacl_sess_lock, flags);
0330     list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
0331         if (atomic_read(&sess->stopped))
0332             continue;
0333 
0334         list_del_init(&sess->sess_acl_list);
0335         spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
0336 
0337         if (acl->se_tpg->se_tpg_tfo->close_session)
0338             acl->se_tpg->se_tpg_tfo->close_session(sess);
0339         goto restart;
0340     }
0341     spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
0342 }
0343 
0344 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
0345 {
0346     struct se_portal_group *tpg = acl->se_tpg;
0347 
0348     mutex_lock(&tpg->acl_node_mutex);
0349     if (acl->dynamic_node_acl)
0350         acl->dynamic_node_acl = 0;
0351     list_del_init(&acl->acl_list);
0352     mutex_unlock(&tpg->acl_node_mutex);
0353 
0354     target_shutdown_sessions(acl);
0355 
0356     target_put_nacl(acl);
0357     /*
0358      * Wait for last target_put_nacl() to complete in target_complete_nacl()
0359      * for active fabric session transport_deregister_session() callbacks.
0360      */
0361     wait_for_completion(&acl->acl_free_comp);
0362 
0363     core_tpg_wait_for_nacl_pr_ref(acl);
0364     core_free_device_list_for_node(acl, tpg);
0365 
0366     pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
0367         " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
0368         tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
0369         tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
0370 
0371     kfree(acl);
0372 }
0373 
0374 /*  core_tpg_set_initiator_node_queue_depth():
0375  *
0376  *
0377  */
0378 int core_tpg_set_initiator_node_queue_depth(
0379     struct se_node_acl *acl,
0380     u32 queue_depth)
0381 {
0382     struct se_portal_group *tpg = acl->se_tpg;
0383 
0384     /*
0385      * Allow the setting of se_node_acl queue_depth to be idempotent,
0386      * and not force a session shutdown event if the value is not
0387      * changing.
0388      */
0389     if (acl->queue_depth == queue_depth)
0390         return 0;
0391     /*
0392      * User has requested to change the queue depth for a Initiator Node.
0393      * Change the value in the Node's struct se_node_acl, and call
0394      * target_set_nacl_queue_depth() to set the new queue depth.
0395      */
0396     target_set_nacl_queue_depth(tpg, acl, queue_depth);
0397 
0398     /*
0399      * Shutdown all pending sessions to force session reinstatement.
0400      */
0401     target_shutdown_sessions(acl);
0402 
0403     pr_debug("Successfully changed queue depth to: %d for Initiator"
0404         " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
0405         acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
0406         tpg->se_tpg_tfo->tpg_get_tag(tpg));
0407 
0408     return 0;
0409 }
0410 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
0411 
0412 /*  core_tpg_set_initiator_node_tag():
0413  *
0414  *  Initiator nodeacl tags are not used internally, but may be used by
0415  *  userspace to emulate aliases or groups.
0416  *  Returns length of newly-set tag or -EINVAL.
0417  */
0418 int core_tpg_set_initiator_node_tag(
0419     struct se_portal_group *tpg,
0420     struct se_node_acl *acl,
0421     const char *new_tag)
0422 {
0423     if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
0424         return -EINVAL;
0425 
0426     if (!strncmp("NULL", new_tag, 4)) {
0427         acl->acl_tag[0] = '\0';
0428         return 0;
0429     }
0430 
0431     return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
0432 }
0433 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
0434 
0435 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
0436 {
0437     struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
0438 
0439     complete(&lun->lun_shutdown_comp);
0440 }
0441 
0442 /* Does not change se_wwn->priv. */
0443 int core_tpg_register(
0444     struct se_wwn *se_wwn,
0445     struct se_portal_group *se_tpg,
0446     int proto_id)
0447 {
0448     int ret;
0449 
0450     if (!se_tpg)
0451         return -EINVAL;
0452     /*
0453      * For the typical case where core_tpg_register() is called by a
0454      * fabric driver from target_core_fabric_ops->fabric_make_tpg()
0455      * configfs context, use the original tf_ops pointer already saved
0456      * by target-core in target_fabric_make_wwn().
0457      *
0458      * Otherwise, for special cases like iscsi-target discovery TPGs
0459      * the caller is responsible for setting ->se_tpg_tfo ahead of
0460      * calling core_tpg_register().
0461      */
0462     if (se_wwn)
0463         se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
0464 
0465     if (!se_tpg->se_tpg_tfo) {
0466         pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
0467         return -EINVAL;
0468     }
0469 
0470     INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
0471     se_tpg->proto_id = proto_id;
0472     se_tpg->se_tpg_wwn = se_wwn;
0473     atomic_set(&se_tpg->tpg_pr_ref_count, 0);
0474     INIT_LIST_HEAD(&se_tpg->acl_node_list);
0475     INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
0476     spin_lock_init(&se_tpg->session_lock);
0477     mutex_init(&se_tpg->tpg_lun_mutex);
0478     mutex_init(&se_tpg->acl_node_mutex);
0479 
0480     if (se_tpg->proto_id >= 0) {
0481         se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
0482         if (IS_ERR(se_tpg->tpg_virt_lun0))
0483             return PTR_ERR(se_tpg->tpg_virt_lun0);
0484 
0485         ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
0486                 true, g_lun0_dev);
0487         if (ret < 0) {
0488             kfree(se_tpg->tpg_virt_lun0);
0489             return ret;
0490         }
0491     }
0492 
0493     pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
0494          "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
0495         se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
0496         se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
0497         se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
0498 
0499     return 0;
0500 }
0501 EXPORT_SYMBOL(core_tpg_register);
0502 
0503 int core_tpg_deregister(struct se_portal_group *se_tpg)
0504 {
0505     const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
0506     struct se_node_acl *nacl, *nacl_tmp;
0507     LIST_HEAD(node_list);
0508 
0509     pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
0510          "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
0511         tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
0512         se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
0513 
0514     while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
0515         cpu_relax();
0516 
0517     mutex_lock(&se_tpg->acl_node_mutex);
0518     list_splice_init(&se_tpg->acl_node_list, &node_list);
0519     mutex_unlock(&se_tpg->acl_node_mutex);
0520     /*
0521      * Release any remaining demo-mode generated se_node_acl that have
0522      * not been released because of TFO->tpg_check_demo_mode_cache() == 1
0523      * in transport_deregister_session().
0524      */
0525     list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
0526         list_del_init(&nacl->acl_list);
0527 
0528         core_tpg_wait_for_nacl_pr_ref(nacl);
0529         core_free_device_list_for_node(nacl, se_tpg);
0530         kfree(nacl);
0531     }
0532 
0533     if (se_tpg->proto_id >= 0) {
0534         core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
0535         kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
0536     }
0537 
0538     return 0;
0539 }
0540 EXPORT_SYMBOL(core_tpg_deregister);
0541 
0542 struct se_lun *core_tpg_alloc_lun(
0543     struct se_portal_group *tpg,
0544     u64 unpacked_lun)
0545 {
0546     struct se_lun *lun;
0547 
0548     lun = kzalloc(sizeof(*lun), GFP_KERNEL);
0549     if (!lun) {
0550         pr_err("Unable to allocate se_lun memory\n");
0551         return ERR_PTR(-ENOMEM);
0552     }
0553     lun->unpacked_lun = unpacked_lun;
0554     atomic_set(&lun->lun_acl_count, 0);
0555     init_completion(&lun->lun_shutdown_comp);
0556     INIT_LIST_HEAD(&lun->lun_deve_list);
0557     INIT_LIST_HEAD(&lun->lun_dev_link);
0558     atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
0559     spin_lock_init(&lun->lun_deve_lock);
0560     mutex_init(&lun->lun_tg_pt_md_mutex);
0561     INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
0562     spin_lock_init(&lun->lun_tg_pt_gp_lock);
0563     lun->lun_tpg = tpg;
0564 
0565     return lun;
0566 }
0567 
0568 int core_tpg_add_lun(
0569     struct se_portal_group *tpg,
0570     struct se_lun *lun,
0571     bool lun_access_ro,
0572     struct se_device *dev)
0573 {
0574     int ret;
0575 
0576     ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
0577                   GFP_KERNEL);
0578     if (ret < 0)
0579         goto out;
0580 
0581     ret = core_alloc_rtpi(lun, dev);
0582     if (ret)
0583         goto out_kill_ref;
0584 
0585     if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
0586         !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
0587         target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
0588 
0589     mutex_lock(&tpg->tpg_lun_mutex);
0590 
0591     spin_lock(&dev->se_port_lock);
0592     lun->lun_index = dev->dev_index;
0593     rcu_assign_pointer(lun->lun_se_dev, dev);
0594     dev->export_count++;
0595     list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
0596     spin_unlock(&dev->se_port_lock);
0597 
0598     if (dev->dev_flags & DF_READ_ONLY)
0599         lun->lun_access_ro = true;
0600     else
0601         lun->lun_access_ro = lun_access_ro;
0602     if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
0603         hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
0604     mutex_unlock(&tpg->tpg_lun_mutex);
0605 
0606     return 0;
0607 
0608 out_kill_ref:
0609     percpu_ref_exit(&lun->lun_ref);
0610 out:
0611     return ret;
0612 }
0613 
0614 void core_tpg_remove_lun(
0615     struct se_portal_group *tpg,
0616     struct se_lun *lun)
0617 {
0618     /*
0619      * rcu_dereference_raw protected by se_lun->lun_group symlink
0620      * reference to se_device->dev_group.
0621      */
0622     struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
0623 
0624     lun->lun_shutdown = true;
0625 
0626     core_clear_lun_from_tpg(lun, tpg);
0627     /*
0628      * Wait for any active I/O references to percpu se_lun->lun_ref to
0629      * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
0630      * logic when referencing a remote target port during ALL_TGT_PT=1
0631      * and generating UNIT_ATTENTIONs for ALUA access state transition.
0632      */
0633     transport_clear_lun_ref(lun);
0634 
0635     mutex_lock(&tpg->tpg_lun_mutex);
0636     if (lun->lun_se_dev) {
0637         target_detach_tg_pt_gp(lun);
0638 
0639         spin_lock(&dev->se_port_lock);
0640         list_del(&lun->lun_dev_link);
0641         dev->export_count--;
0642         rcu_assign_pointer(lun->lun_se_dev, NULL);
0643         spin_unlock(&dev->se_port_lock);
0644     }
0645     if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
0646         hlist_del_rcu(&lun->link);
0647 
0648     lun->lun_shutdown = false;
0649     mutex_unlock(&tpg->tpg_lun_mutex);
0650 
0651     percpu_ref_exit(&lun->lun_ref);
0652 }