Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*******************************************************************************
0003  * Filename:  target_core_hba.c
0004  *
0005  * This file contains the TCM HBA Transport related functions.
0006  *
0007  * (c) Copyright 2003-2013 Datera, Inc.
0008  *
0009  * Nicholas A. Bellinger <nab@kernel.org>
0010  *
0011  ******************************************************************************/
0012 
0013 #include <linux/net.h>
0014 #include <linux/string.h>
0015 #include <linux/timer.h>
0016 #include <linux/slab.h>
0017 #include <linux/spinlock.h>
0018 #include <linux/in.h>
0019 #include <linux/module.h>
0020 #include <net/sock.h>
0021 #include <net/tcp.h>
0022 
0023 #include <target/target_core_base.h>
0024 #include <target/target_core_backend.h>
0025 #include <target/target_core_fabric.h>
0026 
0027 #include "target_core_internal.h"
0028 
0029 static LIST_HEAD(backend_list);
0030 static DEFINE_MUTEX(backend_mutex);
0031 
0032 static u32 hba_id_counter;
0033 
0034 static DEFINE_SPINLOCK(hba_lock);
0035 static LIST_HEAD(hba_list);
0036 
0037 
0038 int transport_backend_register(const struct target_backend_ops *ops)
0039 {
0040     struct target_backend *tb, *old;
0041 
0042     tb = kzalloc(sizeof(*tb), GFP_KERNEL);
0043     if (!tb)
0044         return -ENOMEM;
0045     tb->ops = ops;
0046 
0047     mutex_lock(&backend_mutex);
0048     list_for_each_entry(old, &backend_list, list) {
0049         if (!strcmp(old->ops->name, ops->name)) {
0050             pr_err("backend %s already registered.\n", ops->name);
0051             mutex_unlock(&backend_mutex);
0052             kfree(tb);
0053             return -EEXIST;
0054         }
0055     }
0056     target_setup_backend_cits(tb);
0057     list_add_tail(&tb->list, &backend_list);
0058     mutex_unlock(&backend_mutex);
0059 
0060     pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
0061             ops->name, ops->owner);
0062     return 0;
0063 }
0064 EXPORT_SYMBOL(transport_backend_register);
0065 
0066 void target_backend_unregister(const struct target_backend_ops *ops)
0067 {
0068     struct target_backend *tb;
0069 
0070     mutex_lock(&backend_mutex);
0071     list_for_each_entry(tb, &backend_list, list) {
0072         if (tb->ops == ops) {
0073             list_del(&tb->list);
0074             mutex_unlock(&backend_mutex);
0075             /*
0076              * Wait for any outstanding backend driver ->rcu_head
0077              * callbacks to complete post TBO->free_device() ->
0078              * call_rcu(), before allowing backend driver module
0079              * unload of target_backend_ops->owner to proceed.
0080              */
0081             rcu_barrier();
0082             kfree(tb);
0083             return;
0084         }
0085     }
0086     mutex_unlock(&backend_mutex);
0087 }
0088 EXPORT_SYMBOL(target_backend_unregister);
0089 
0090 static struct target_backend *core_get_backend(const char *name)
0091 {
0092     struct target_backend *tb;
0093 
0094     mutex_lock(&backend_mutex);
0095     list_for_each_entry(tb, &backend_list, list) {
0096         if (!strcmp(tb->ops->name, name))
0097             goto found;
0098     }
0099     mutex_unlock(&backend_mutex);
0100     return NULL;
0101 found:
0102     if (tb->ops->owner && !try_module_get(tb->ops->owner))
0103         tb = NULL;
0104     mutex_unlock(&backend_mutex);
0105     return tb;
0106 }
0107 
0108 struct se_hba *
0109 core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
0110 {
0111     struct se_hba *hba;
0112     int ret = 0;
0113 
0114     hba = kzalloc(sizeof(*hba), GFP_KERNEL);
0115     if (!hba) {
0116         pr_err("Unable to allocate struct se_hba\n");
0117         return ERR_PTR(-ENOMEM);
0118     }
0119 
0120     spin_lock_init(&hba->device_lock);
0121     mutex_init(&hba->hba_access_mutex);
0122 
0123     hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
0124     hba->hba_flags |= hba_flags;
0125 
0126     hba->backend = core_get_backend(plugin_name);
0127     if (!hba->backend) {
0128         ret = -EINVAL;
0129         goto out_free_hba;
0130     }
0131 
0132     ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
0133     if (ret < 0)
0134         goto out_module_put;
0135 
0136     spin_lock(&hba_lock);
0137     hba->hba_id = hba_id_counter++;
0138     list_add_tail(&hba->hba_node, &hba_list);
0139     spin_unlock(&hba_lock);
0140 
0141     pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
0142             " Core\n", hba->hba_id);
0143 
0144     return hba;
0145 
0146 out_module_put:
0147     module_put(hba->backend->ops->owner);
0148     hba->backend = NULL;
0149 out_free_hba:
0150     kfree(hba);
0151     return ERR_PTR(ret);
0152 }
0153 
0154 int
0155 core_delete_hba(struct se_hba *hba)
0156 {
0157     WARN_ON(hba->dev_count);
0158 
0159     hba->backend->ops->detach_hba(hba);
0160 
0161     spin_lock(&hba_lock);
0162     list_del(&hba->hba_node);
0163     spin_unlock(&hba_lock);
0164 
0165     pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
0166             " Core\n", hba->hba_id);
0167 
0168     module_put(hba->backend->ops->owner);
0169 
0170     hba->backend = NULL;
0171     kfree(hba);
0172     return 0;
0173 }
0174 
0175 bool target_sense_desc_format(struct se_device *dev)
0176 {
0177     return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
0178 }