0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/net.h>
0015 #include <linux/string.h>
0016 #include <linux/delay.h>
0017 #include <linux/timer.h>
0018 #include <linux/slab.h>
0019 #include <linux/spinlock.h>
0020 #include <linux/kthread.h>
0021 #include <linux/in.h>
0022 #include <linux/export.h>
0023 #include <linux/t10-pi.h>
0024 #include <asm/unaligned.h>
0025 #include <net/sock.h>
0026 #include <net/tcp.h>
0027 #include <scsi/scsi_common.h>
0028 #include <scsi/scsi_proto.h>
0029
0030 #include <target/target_core_base.h>
0031 #include <target/target_core_backend.h>
0032 #include <target/target_core_fabric.h>
0033
0034 #include "target_core_internal.h"
0035 #include "target_core_alua.h"
0036 #include "target_core_pr.h"
0037 #include "target_core_ua.h"
0038
0039 static DEFINE_MUTEX(device_mutex);
0040 static LIST_HEAD(device_list);
0041 static DEFINE_IDR(devices_idr);
0042
0043 static struct se_hba *lun0_hba;
0044
0045 struct se_device *g_lun0_dev;
0046
0047 sense_reason_t
0048 transport_lookup_cmd_lun(struct se_cmd *se_cmd)
0049 {
0050 struct se_lun *se_lun = NULL;
0051 struct se_session *se_sess = se_cmd->se_sess;
0052 struct se_node_acl *nacl = se_sess->se_node_acl;
0053 struct se_dev_entry *deve;
0054 sense_reason_t ret = TCM_NO_SENSE;
0055
0056 rcu_read_lock();
0057 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
0058 if (deve) {
0059 atomic_long_inc(&deve->total_cmds);
0060
0061 if (se_cmd->data_direction == DMA_TO_DEVICE)
0062 atomic_long_add(se_cmd->data_length,
0063 &deve->write_bytes);
0064 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
0065 atomic_long_add(se_cmd->data_length,
0066 &deve->read_bytes);
0067
0068 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
0069 deve->lun_access_ro) {
0070 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
0071 " Access for 0x%08llx\n",
0072 se_cmd->se_tfo->fabric_name,
0073 se_cmd->orig_fe_lun);
0074 rcu_read_unlock();
0075 return TCM_WRITE_PROTECTED;
0076 }
0077
0078 se_lun = deve->se_lun;
0079
0080 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
0081 se_lun = NULL;
0082 goto out_unlock;
0083 }
0084
0085 se_cmd->se_lun = se_lun;
0086 se_cmd->pr_res_key = deve->pr_res_key;
0087 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
0088 se_cmd->lun_ref_active = true;
0089 }
0090 out_unlock:
0091 rcu_read_unlock();
0092
0093 if (!se_lun) {
0094
0095
0096
0097
0098
0099 if (se_cmd->orig_fe_lun != 0) {
0100 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
0101 " Access for 0x%08llx from %s\n",
0102 se_cmd->se_tfo->fabric_name,
0103 se_cmd->orig_fe_lun,
0104 nacl->initiatorname);
0105 return TCM_NON_EXISTENT_LUN;
0106 }
0107
0108
0109
0110
0111 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
0112 (se_cmd->data_direction != DMA_NONE))
0113 return TCM_WRITE_PROTECTED;
0114
0115 se_lun = se_sess->se_tpg->tpg_virt_lun0;
0116 if (!percpu_ref_tryget_live(&se_lun->lun_ref))
0117 return TCM_NON_EXISTENT_LUN;
0118
0119 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
0120 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
0121 se_cmd->lun_ref_active = true;
0122 }
0123
0124
0125
0126
0127
0128
0129 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
0130 atomic_long_inc(&se_cmd->se_dev->num_cmds);
0131
0132 if (se_cmd->data_direction == DMA_TO_DEVICE)
0133 atomic_long_add(se_cmd->data_length,
0134 &se_cmd->se_dev->write_bytes);
0135 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
0136 atomic_long_add(se_cmd->data_length,
0137 &se_cmd->se_dev->read_bytes);
0138
0139 return ret;
0140 }
0141 EXPORT_SYMBOL(transport_lookup_cmd_lun);
0142
0143 int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
0144 {
0145 struct se_dev_entry *deve;
0146 struct se_lun *se_lun = NULL;
0147 struct se_session *se_sess = se_cmd->se_sess;
0148 struct se_node_acl *nacl = se_sess->se_node_acl;
0149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
0150 unsigned long flags;
0151
0152 rcu_read_lock();
0153 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
0154 if (deve) {
0155 se_lun = deve->se_lun;
0156
0157 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
0158 se_lun = NULL;
0159 goto out_unlock;
0160 }
0161
0162 se_cmd->se_lun = se_lun;
0163 se_cmd->pr_res_key = deve->pr_res_key;
0164 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
0165 se_cmd->lun_ref_active = true;
0166 }
0167 out_unlock:
0168 rcu_read_unlock();
0169
0170 if (!se_lun) {
0171 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
0172 " Access for 0x%08llx for %s\n",
0173 se_cmd->se_tfo->fabric_name,
0174 se_cmd->orig_fe_lun,
0175 nacl->initiatorname);
0176 return -ENODEV;
0177 }
0178 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
0179 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
0180
0181 spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
0182 list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
0183 spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
0184
0185 return 0;
0186 }
0187 EXPORT_SYMBOL(transport_lookup_tmr_lun);
0188
0189 bool target_lun_is_rdonly(struct se_cmd *cmd)
0190 {
0191 struct se_session *se_sess = cmd->se_sess;
0192 struct se_dev_entry *deve;
0193 bool ret;
0194
0195 rcu_read_lock();
0196 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
0197 ret = deve && deve->lun_access_ro;
0198 rcu_read_unlock();
0199
0200 return ret;
0201 }
0202 EXPORT_SYMBOL(target_lun_is_rdonly);
0203
0204
0205
0206
0207
0208
0209 struct se_dev_entry *core_get_se_deve_from_rtpi(
0210 struct se_node_acl *nacl,
0211 u16 rtpi)
0212 {
0213 struct se_dev_entry *deve;
0214 struct se_lun *lun;
0215 struct se_portal_group *tpg = nacl->se_tpg;
0216
0217 rcu_read_lock();
0218 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
0219 lun = deve->se_lun;
0220 if (!lun) {
0221 pr_err("%s device entries device pointer is"
0222 " NULL, but Initiator has access.\n",
0223 tpg->se_tpg_tfo->fabric_name);
0224 continue;
0225 }
0226 if (lun->lun_rtpi != rtpi)
0227 continue;
0228
0229 kref_get(&deve->pr_kref);
0230 rcu_read_unlock();
0231
0232 return deve;
0233 }
0234 rcu_read_unlock();
0235
0236 return NULL;
0237 }
0238
0239 void core_free_device_list_for_node(
0240 struct se_node_acl *nacl,
0241 struct se_portal_group *tpg)
0242 {
0243 struct se_dev_entry *deve;
0244
0245 mutex_lock(&nacl->lun_entry_mutex);
0246 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
0247 core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
0248 mutex_unlock(&nacl->lun_entry_mutex);
0249 }
0250
0251 void core_update_device_list_access(
0252 u64 mapped_lun,
0253 bool lun_access_ro,
0254 struct se_node_acl *nacl)
0255 {
0256 struct se_dev_entry *deve;
0257
0258 mutex_lock(&nacl->lun_entry_mutex);
0259 deve = target_nacl_find_deve(nacl, mapped_lun);
0260 if (deve)
0261 deve->lun_access_ro = lun_access_ro;
0262 mutex_unlock(&nacl->lun_entry_mutex);
0263 }
0264
0265
0266
0267
0268 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
0269 {
0270 struct se_dev_entry *deve;
0271
0272 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
0273 if (deve->mapped_lun == mapped_lun)
0274 return deve;
0275
0276 return NULL;
0277 }
0278 EXPORT_SYMBOL(target_nacl_find_deve);
0279
0280 void target_pr_kref_release(struct kref *kref)
0281 {
0282 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
0283 pr_kref);
0284 complete(&deve->pr_comp);
0285 }
0286
0287 static void
0288 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
0289 bool skip_new)
0290 {
0291 struct se_dev_entry *tmp;
0292
0293 rcu_read_lock();
0294 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
0295 if (skip_new && tmp == new)
0296 continue;
0297 core_scsi3_ua_allocate(tmp, 0x3F,
0298 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
0299 }
0300 rcu_read_unlock();
0301 }
0302
0303 int core_enable_device_list_for_node(
0304 struct se_lun *lun,
0305 struct se_lun_acl *lun_acl,
0306 u64 mapped_lun,
0307 bool lun_access_ro,
0308 struct se_node_acl *nacl,
0309 struct se_portal_group *tpg)
0310 {
0311 struct se_dev_entry *orig, *new;
0312
0313 new = kzalloc(sizeof(*new), GFP_KERNEL);
0314 if (!new) {
0315 pr_err("Unable to allocate se_dev_entry memory\n");
0316 return -ENOMEM;
0317 }
0318
0319 spin_lock_init(&new->ua_lock);
0320 INIT_LIST_HEAD(&new->ua_list);
0321 INIT_LIST_HEAD(&new->lun_link);
0322
0323 new->mapped_lun = mapped_lun;
0324 kref_init(&new->pr_kref);
0325 init_completion(&new->pr_comp);
0326
0327 new->lun_access_ro = lun_access_ro;
0328 new->creation_time = get_jiffies_64();
0329 new->attach_count++;
0330
0331 mutex_lock(&nacl->lun_entry_mutex);
0332 orig = target_nacl_find_deve(nacl, mapped_lun);
0333 if (orig && orig->se_lun) {
0334 struct se_lun *orig_lun = orig->se_lun;
0335
0336 if (orig_lun != lun) {
0337 pr_err("Existing orig->se_lun doesn't match new lun"
0338 " for dynamic -> explicit NodeACL conversion:"
0339 " %s\n", nacl->initiatorname);
0340 mutex_unlock(&nacl->lun_entry_mutex);
0341 kfree(new);
0342 return -EINVAL;
0343 }
0344 if (orig->se_lun_acl != NULL) {
0345 pr_warn_ratelimited("Detected existing explicit"
0346 " se_lun_acl->se_lun_group reference for %s"
0347 " mapped_lun: %llu, failing\n",
0348 nacl->initiatorname, mapped_lun);
0349 mutex_unlock(&nacl->lun_entry_mutex);
0350 kfree(new);
0351 return -EINVAL;
0352 }
0353
0354 new->se_lun = lun;
0355 new->se_lun_acl = lun_acl;
0356 hlist_del_rcu(&orig->link);
0357 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
0358 mutex_unlock(&nacl->lun_entry_mutex);
0359
0360 spin_lock(&lun->lun_deve_lock);
0361 list_del(&orig->lun_link);
0362 list_add_tail(&new->lun_link, &lun->lun_deve_list);
0363 spin_unlock(&lun->lun_deve_lock);
0364
0365 kref_put(&orig->pr_kref, target_pr_kref_release);
0366 wait_for_completion(&orig->pr_comp);
0367
0368 target_luns_data_has_changed(nacl, new, true);
0369 kfree_rcu(orig, rcu_head);
0370 return 0;
0371 }
0372
0373 new->se_lun = lun;
0374 new->se_lun_acl = lun_acl;
0375 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
0376 mutex_unlock(&nacl->lun_entry_mutex);
0377
0378 spin_lock(&lun->lun_deve_lock);
0379 list_add_tail(&new->lun_link, &lun->lun_deve_list);
0380 spin_unlock(&lun->lun_deve_lock);
0381
0382 target_luns_data_has_changed(nacl, new, true);
0383 return 0;
0384 }
0385
0386 void core_disable_device_list_for_node(
0387 struct se_lun *lun,
0388 struct se_dev_entry *orig,
0389 struct se_node_acl *nacl,
0390 struct se_portal_group *tpg)
0391 {
0392
0393
0394
0395
0396 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
0397
0398 lockdep_assert_held(&nacl->lun_entry_mutex);
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 spin_lock(&lun->lun_deve_lock);
0414 list_del(&orig->lun_link);
0415 spin_unlock(&lun->lun_deve_lock);
0416
0417
0418
0419 core_scsi3_ua_release_all(orig);
0420
0421 hlist_del_rcu(&orig->link);
0422 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
0423 orig->lun_access_ro = false;
0424 orig->creation_time = 0;
0425 orig->attach_count--;
0426
0427
0428
0429
0430 kref_put(&orig->pr_kref, target_pr_kref_release);
0431 wait_for_completion(&orig->pr_comp);
0432
0433 kfree_rcu(orig, rcu_head);
0434
0435 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
0436 target_luns_data_has_changed(nacl, NULL, false);
0437 }
0438
0439
0440
0441
0442
0443 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
0444 {
0445 struct se_node_acl *nacl;
0446 struct se_dev_entry *deve;
0447
0448 mutex_lock(&tpg->acl_node_mutex);
0449 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
0450
0451 mutex_lock(&nacl->lun_entry_mutex);
0452 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
0453 if (lun != deve->se_lun)
0454 continue;
0455
0456 core_disable_device_list_for_node(lun, deve, nacl, tpg);
0457 }
0458 mutex_unlock(&nacl->lun_entry_mutex);
0459 }
0460 mutex_unlock(&tpg->acl_node_mutex);
0461 }
0462
0463 int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
0464 {
0465 struct se_lun *tmp;
0466
0467 spin_lock(&dev->se_port_lock);
0468 if (dev->export_count == 0x0000ffff) {
0469 pr_warn("Reached dev->dev_port_count =="
0470 " 0x0000ffff\n");
0471 spin_unlock(&dev->se_port_lock);
0472 return -ENOSPC;
0473 }
0474 again:
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487 lun->lun_rtpi = dev->dev_rpti_counter++;
0488 if (!lun->lun_rtpi)
0489 goto again;
0490
0491 list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
0492
0493
0494
0495
0496 if (lun->lun_rtpi == tmp->lun_rtpi)
0497 goto again;
0498 }
0499 spin_unlock(&dev->se_port_lock);
0500
0501 return 0;
0502 }
0503
0504 static void se_release_vpd_for_dev(struct se_device *dev)
0505 {
0506 struct t10_vpd *vpd, *vpd_tmp;
0507
0508 spin_lock(&dev->t10_wwn.t10_vpd_lock);
0509 list_for_each_entry_safe(vpd, vpd_tmp,
0510 &dev->t10_wwn.t10_vpd_list, vpd_list) {
0511 list_del(&vpd->vpd_list);
0512 kfree(vpd);
0513 }
0514 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
0515 }
0516
0517 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
0518 {
0519 u32 aligned_max_sectors;
0520 u32 alignment;
0521
0522
0523
0524
0525 alignment = max(1ul, PAGE_SIZE / block_size);
0526 aligned_max_sectors = rounddown(max_sectors, alignment);
0527
0528 if (max_sectors != aligned_max_sectors)
0529 pr_info("Rounding down aligned max_sectors from %u to %u\n",
0530 max_sectors, aligned_max_sectors);
0531
0532 return aligned_max_sectors;
0533 }
0534
0535 int core_dev_add_lun(
0536 struct se_portal_group *tpg,
0537 struct se_device *dev,
0538 struct se_lun *lun)
0539 {
0540 int rc;
0541
0542 rc = core_tpg_add_lun(tpg, lun, false, dev);
0543 if (rc < 0)
0544 return rc;
0545
0546 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
0547 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
0548 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
0549 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
0550
0551
0552
0553
0554 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
0555 struct se_node_acl *acl;
0556
0557 mutex_lock(&tpg->acl_node_mutex);
0558 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
0559 if (acl->dynamic_node_acl &&
0560 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
0561 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
0562 core_tpg_add_node_to_devs(acl, tpg, lun);
0563 }
0564 }
0565 mutex_unlock(&tpg->acl_node_mutex);
0566 }
0567
0568 return 0;
0569 }
0570
0571
0572
0573
0574
0575 void core_dev_del_lun(
0576 struct se_portal_group *tpg,
0577 struct se_lun *lun)
0578 {
0579 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
0580 " device object\n", tpg->se_tpg_tfo->fabric_name,
0581 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
0582 tpg->se_tpg_tfo->fabric_name);
0583
0584 core_tpg_remove_lun(tpg, lun);
0585 }
0586
0587 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
0588 struct se_portal_group *tpg,
0589 struct se_node_acl *nacl,
0590 u64 mapped_lun,
0591 int *ret)
0592 {
0593 struct se_lun_acl *lacl;
0594
0595 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
0596 pr_err("%s InitiatorName exceeds maximum size.\n",
0597 tpg->se_tpg_tfo->fabric_name);
0598 *ret = -EOVERFLOW;
0599 return NULL;
0600 }
0601 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
0602 if (!lacl) {
0603 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
0604 *ret = -ENOMEM;
0605 return NULL;
0606 }
0607
0608 lacl->mapped_lun = mapped_lun;
0609 lacl->se_lun_nacl = nacl;
0610
0611 return lacl;
0612 }
0613
0614 int core_dev_add_initiator_node_lun_acl(
0615 struct se_portal_group *tpg,
0616 struct se_lun_acl *lacl,
0617 struct se_lun *lun,
0618 bool lun_access_ro)
0619 {
0620 struct se_node_acl *nacl = lacl->se_lun_nacl;
0621
0622
0623
0624
0625 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
0626
0627 if (!nacl)
0628 return -EINVAL;
0629
0630 if (lun->lun_access_ro)
0631 lun_access_ro = true;
0632
0633 lacl->se_lun = lun;
0634
0635 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
0636 lun_access_ro, nacl, tpg) < 0)
0637 return -EINVAL;
0638
0639 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
0640 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
0641 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
0642 lun_access_ro ? "RO" : "RW",
0643 nacl->initiatorname);
0644
0645
0646
0647
0648 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
0649 lacl->mapped_lun);
0650 return 0;
0651 }
0652
0653 int core_dev_del_initiator_node_lun_acl(
0654 struct se_lun *lun,
0655 struct se_lun_acl *lacl)
0656 {
0657 struct se_portal_group *tpg = lun->lun_tpg;
0658 struct se_node_acl *nacl;
0659 struct se_dev_entry *deve;
0660
0661 nacl = lacl->se_lun_nacl;
0662 if (!nacl)
0663 return -EINVAL;
0664
0665 mutex_lock(&nacl->lun_entry_mutex);
0666 deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
0667 if (deve)
0668 core_disable_device_list_for_node(lun, deve, nacl, tpg);
0669 mutex_unlock(&nacl->lun_entry_mutex);
0670
0671 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
0672 " InitiatorNode: %s Mapped LUN: %llu\n",
0673 tpg->se_tpg_tfo->fabric_name,
0674 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
0675 nacl->initiatorname, lacl->mapped_lun);
0676
0677 return 0;
0678 }
0679
0680 void core_dev_free_initiator_node_lun_acl(
0681 struct se_portal_group *tpg,
0682 struct se_lun_acl *lacl)
0683 {
0684 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
0685 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
0686 tpg->se_tpg_tfo->tpg_get_tag(tpg),
0687 tpg->se_tpg_tfo->fabric_name,
0688 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
0689
0690 kfree(lacl);
0691 }
0692
0693 static void scsi_dump_inquiry(struct se_device *dev)
0694 {
0695 struct t10_wwn *wwn = &dev->t10_wwn;
0696 int device_type = dev->transport->get_device_type(dev);
0697
0698
0699
0700
0701 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
0702 wwn->vendor);
0703 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
0704 wwn->model);
0705 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
0706 wwn->revision);
0707 pr_debug(" Type: %s ", scsi_device_type(device_type));
0708 }
0709
0710 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
0711 {
0712 struct se_device *dev;
0713 struct se_lun *xcopy_lun;
0714 int i;
0715
0716 dev = hba->backend->ops->alloc_device(hba, name);
0717 if (!dev)
0718 return NULL;
0719
0720 dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
0721 if (!dev->queues) {
0722 dev->transport->free_device(dev);
0723 return NULL;
0724 }
0725
0726 dev->queue_cnt = nr_cpu_ids;
0727 for (i = 0; i < dev->queue_cnt; i++) {
0728 struct se_device_queue *q;
0729
0730 q = &dev->queues[i];
0731 INIT_LIST_HEAD(&q->state_list);
0732 spin_lock_init(&q->lock);
0733
0734 init_llist_head(&q->sq.cmd_list);
0735 INIT_WORK(&q->sq.work, target_queued_submit_work);
0736 }
0737
0738 dev->se_hba = hba;
0739 dev->transport = hba->backend->ops;
0740 dev->transport_flags = dev->transport->transport_flags_default;
0741 dev->prot_length = sizeof(struct t10_pi_tuple);
0742 dev->hba_index = hba->hba_index;
0743
0744 INIT_LIST_HEAD(&dev->dev_sep_list);
0745 INIT_LIST_HEAD(&dev->dev_tmr_list);
0746 INIT_LIST_HEAD(&dev->delayed_cmd_list);
0747 INIT_LIST_HEAD(&dev->qf_cmd_list);
0748 spin_lock_init(&dev->delayed_cmd_lock);
0749 spin_lock_init(&dev->dev_reservation_lock);
0750 spin_lock_init(&dev->se_port_lock);
0751 spin_lock_init(&dev->se_tmr_lock);
0752 spin_lock_init(&dev->qf_cmd_lock);
0753 sema_init(&dev->caw_sem, 1);
0754 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
0755 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
0756 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
0757 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
0758 spin_lock_init(&dev->t10_pr.registration_lock);
0759 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
0760 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
0761 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
0762 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
0763 spin_lock_init(&dev->t10_alua.lba_map_lock);
0764
0765 INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
0766
0767 dev->t10_wwn.t10_dev = dev;
0768
0769
0770
0771 dev->t10_wwn.company_id = 0x001405;
0772
0773 dev->t10_alua.t10_dev = dev;
0774
0775 dev->dev_attrib.da_dev = dev;
0776 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
0777 dev->dev_attrib.emulate_dpo = 1;
0778 dev->dev_attrib.emulate_fua_write = 1;
0779 dev->dev_attrib.emulate_fua_read = 1;
0780 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
0781 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
0782 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
0783 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
0784 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
0785 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
0786 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
0787 dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
0788 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
0789 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
0790 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
0791 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
0792 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
0793 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
0794 dev->dev_attrib.max_unmap_block_desc_count =
0795 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
0796 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
0797 dev->dev_attrib.unmap_granularity_alignment =
0798 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
0799 dev->dev_attrib.unmap_zeroes_data =
0800 DA_UNMAP_ZEROES_DATA_DEFAULT;
0801 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
0802
0803 xcopy_lun = &dev->xcopy_lun;
0804 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
0805 init_completion(&xcopy_lun->lun_shutdown_comp);
0806 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
0807 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
0808 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
0809 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
0810
0811
0812 strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
0813 strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
0814 sizeof(dev->t10_wwn.model));
0815 strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
0816 sizeof(dev->t10_wwn.revision));
0817
0818 return dev;
0819 }
0820
0821
0822
0823
0824
0825 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
0826 struct block_device *bdev)
0827 {
0828 int block_size = bdev_logical_block_size(bdev);
0829
0830 if (!bdev_max_discard_sectors(bdev))
0831 return false;
0832
0833 attrib->max_unmap_lba_count =
0834 bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
0835
0836
0837
0838 attrib->max_unmap_block_desc_count = 1;
0839 attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
0840 attrib->unmap_granularity_alignment =
0841 bdev_discard_alignment(bdev) / block_size;
0842 return true;
0843 }
0844 EXPORT_SYMBOL(target_configure_unmap_from_queue);
0845
0846
0847
0848
0849
0850 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
0851 {
0852 switch (dev->dev_attrib.block_size) {
0853 case 4096:
0854 return lb << 3;
0855 case 2048:
0856 return lb << 2;
0857 case 1024:
0858 return lb << 1;
0859 default:
0860 return lb;
0861 }
0862 }
0863 EXPORT_SYMBOL(target_to_linux_sector);
0864
0865 struct devices_idr_iter {
0866 struct config_item *prev_item;
0867 int (*fn)(struct se_device *dev, void *data);
0868 void *data;
0869 };
0870
0871 static int target_devices_idr_iter(int id, void *p, void *data)
0872 __must_hold(&device_mutex)
0873 {
0874 struct devices_idr_iter *iter = data;
0875 struct se_device *dev = p;
0876 int ret;
0877
0878 config_item_put(iter->prev_item);
0879 iter->prev_item = NULL;
0880
0881
0882
0883
0884
0885
0886
0887 if (!target_dev_configured(dev))
0888 return 0;
0889
0890 iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
0891 if (!iter->prev_item)
0892 return 0;
0893 mutex_unlock(&device_mutex);
0894
0895 ret = iter->fn(dev, iter->data);
0896
0897 mutex_lock(&device_mutex);
0898 return ret;
0899 }
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
0910 void *data)
0911 {
0912 struct devices_idr_iter iter = { .fn = fn, .data = data };
0913 int ret;
0914
0915 mutex_lock(&device_mutex);
0916 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
0917 mutex_unlock(&device_mutex);
0918 config_item_put(iter.prev_item);
0919 return ret;
0920 }
0921
0922 int target_configure_device(struct se_device *dev)
0923 {
0924 struct se_hba *hba = dev->se_hba;
0925 int ret, id;
0926
0927 if (target_dev_configured(dev)) {
0928 pr_err("se_dev->se_dev_ptr already set for storage"
0929 " object\n");
0930 return -EEXIST;
0931 }
0932
0933
0934
0935
0936
0937 mutex_lock(&device_mutex);
0938
0939
0940
0941
0942 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
0943 mutex_unlock(&device_mutex);
0944 if (id < 0) {
0945 ret = -ENOMEM;
0946 goto out;
0947 }
0948 dev->dev_index = id;
0949
0950 ret = dev->transport->configure_device(dev);
0951 if (ret)
0952 goto out_free_index;
0953
0954 if (dev->transport->configure_unmap &&
0955 dev->transport->configure_unmap(dev)) {
0956 pr_debug("Discard support available, but disabled by default.\n");
0957 }
0958
0959
0960
0961
0962 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
0963 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
0964
0965
0966
0967
0968 dev->dev_attrib.hw_max_sectors =
0969 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
0970 dev->dev_attrib.hw_block_size);
0971 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
0972
0973 dev->creation_time = get_jiffies_64();
0974
0975 ret = core_setup_alua(dev);
0976 if (ret)
0977 goto out_destroy_device;
0978
0979
0980
0981
0982 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
0983
0984 scsi_dump_inquiry(dev);
0985
0986 spin_lock(&hba->device_lock);
0987 hba->dev_count++;
0988 spin_unlock(&hba->device_lock);
0989
0990 dev->dev_flags |= DF_CONFIGURED;
0991
0992 return 0;
0993
0994 out_destroy_device:
0995 dev->transport->destroy_device(dev);
0996 out_free_index:
0997 mutex_lock(&device_mutex);
0998 idr_remove(&devices_idr, dev->dev_index);
0999 mutex_unlock(&device_mutex);
1000 out:
1001 se_release_vpd_for_dev(dev);
1002 return ret;
1003 }
1004
1005 void target_free_device(struct se_device *dev)
1006 {
1007 struct se_hba *hba = dev->se_hba;
1008
1009 WARN_ON(!list_empty(&dev->dev_sep_list));
1010
1011 if (target_dev_configured(dev)) {
1012 dev->transport->destroy_device(dev);
1013
1014 mutex_lock(&device_mutex);
1015 idr_remove(&devices_idr, dev->dev_index);
1016 mutex_unlock(&device_mutex);
1017
1018 spin_lock(&hba->device_lock);
1019 hba->dev_count--;
1020 spin_unlock(&hba->device_lock);
1021 }
1022
1023 core_alua_free_lu_gp_mem(dev);
1024 core_alua_set_lba_map(dev, NULL, 0, 0);
1025 core_scsi3_free_all_registrations(dev);
1026 se_release_vpd_for_dev(dev);
1027
1028 if (dev->transport->free_prot)
1029 dev->transport->free_prot(dev);
1030
1031 kfree(dev->queues);
1032 dev->transport->free_device(dev);
1033 }
1034
1035 int core_dev_setup_virtual_lun0(void)
1036 {
1037 struct se_hba *hba;
1038 struct se_device *dev;
1039 char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
1040 int ret;
1041
1042 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1043 if (IS_ERR(hba))
1044 return PTR_ERR(hba);
1045
1046 dev = target_alloc_device(hba, "virt_lun0");
1047 if (!dev) {
1048 ret = -ENOMEM;
1049 goto out_free_hba;
1050 }
1051
1052 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1053
1054 ret = target_configure_device(dev);
1055 if (ret)
1056 goto out_free_se_dev;
1057
1058 lun0_hba = hba;
1059 g_lun0_dev = dev;
1060 return 0;
1061
1062 out_free_se_dev:
1063 target_free_device(dev);
1064 out_free_hba:
1065 core_delete_hba(hba);
1066 return ret;
1067 }
1068
1069
1070 void core_dev_release_virtual_lun0(void)
1071 {
1072 struct se_hba *hba = lun0_hba;
1073
1074 if (!hba)
1075 return;
1076
1077 if (g_lun0_dev)
1078 target_free_device(g_lun0_dev);
1079 core_delete_hba(hba);
1080 }
1081
1082
1083
1084
1085 sense_reason_t
1086 passthrough_parse_cdb(struct se_cmd *cmd,
1087 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1088 {
1089 unsigned char *cdb = cmd->t_task_cdb;
1090 struct se_device *dev = cmd->se_dev;
1091 unsigned int size;
1092
1093
1094
1095
1096
1097 if (cdb[0] == REPORT_LUNS) {
1098 cmd->execute_cmd = spc_emulate_report_luns;
1099 return TCM_NO_SENSE;
1100 }
1101
1102
1103
1104
1105
1106 if (!dev->dev_attrib.emulate_pr &&
1107 ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1108 (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1109 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1110 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1111 return TCM_UNSUPPORTED_SCSI_OPCODE;
1112 }
1113
1114
1115
1116
1117
1118
1119 if (!(dev->transport_flags &
1120 TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1121 if (cdb[0] == PERSISTENT_RESERVE_IN) {
1122 cmd->execute_cmd = target_scsi3_emulate_pr_in;
1123 size = get_unaligned_be16(&cdb[7]);
1124 return target_cmd_size_check(cmd, size);
1125 }
1126 if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1127 cmd->execute_cmd = target_scsi3_emulate_pr_out;
1128 size = get_unaligned_be32(&cdb[5]);
1129 return target_cmd_size_check(cmd, size);
1130 }
1131
1132 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1133 cmd->execute_cmd = target_scsi2_reservation_release;
1134 if (cdb[0] == RELEASE_10)
1135 size = get_unaligned_be16(&cdb[7]);
1136 else
1137 size = cmd->data_length;
1138 return target_cmd_size_check(cmd, size);
1139 }
1140 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1141 cmd->execute_cmd = target_scsi2_reservation_reserve;
1142 if (cdb[0] == RESERVE_10)
1143 size = get_unaligned_be16(&cdb[7]);
1144 else
1145 size = cmd->data_length;
1146 return target_cmd_size_check(cmd, size);
1147 }
1148 }
1149
1150
1151 switch (cdb[0]) {
1152 case READ_6:
1153 case READ_10:
1154 case READ_12:
1155 case READ_16:
1156 case WRITE_6:
1157 case WRITE_10:
1158 case WRITE_12:
1159 case WRITE_16:
1160 case WRITE_VERIFY:
1161 case WRITE_VERIFY_12:
1162 case WRITE_VERIFY_16:
1163 case COMPARE_AND_WRITE:
1164 case XDWRITEREAD_10:
1165 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1166 break;
1167 case VARIABLE_LENGTH_CMD:
1168 switch (get_unaligned_be16(&cdb[8])) {
1169 case READ_32:
1170 case WRITE_32:
1171 case WRITE_VERIFY_32:
1172 case XDWRITEREAD_32:
1173 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1174 break;
1175 }
1176 }
1177
1178 cmd->execute_cmd = exec_cmd;
1179
1180 return TCM_NO_SENSE;
1181 }
1182 EXPORT_SYMBOL(passthrough_parse_cdb);