0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/slab.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/list.h>
0016 #include <linux/export.h>
0017
0018 #include <target/target_core_base.h>
0019 #include <target/target_core_backend.h>
0020 #include <target/target_core_fabric.h>
0021
0022 #include "target_core_internal.h"
0023 #include "target_core_alua.h"
0024 #include "target_core_pr.h"
0025
0026 int core_tmr_alloc_req(
0027 struct se_cmd *se_cmd,
0028 void *fabric_tmr_ptr,
0029 u8 function,
0030 gfp_t gfp_flags)
0031 {
0032 struct se_tmr_req *tmr;
0033
0034 tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
0035 if (!tmr) {
0036 pr_err("Unable to allocate struct se_tmr_req\n");
0037 return -ENOMEM;
0038 }
0039
0040 se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
0041 se_cmd->se_tmr_req = tmr;
0042 tmr->task_cmd = se_cmd;
0043 tmr->fabric_tmr_ptr = fabric_tmr_ptr;
0044 tmr->function = function;
0045 INIT_LIST_HEAD(&tmr->tmr_list);
0046
0047 return 0;
0048 }
0049 EXPORT_SYMBOL(core_tmr_alloc_req);
0050
0051 void core_tmr_release_req(struct se_tmr_req *tmr)
0052 {
0053 kfree(tmr);
0054 }
0055
0056 static int target_check_cdb_and_preempt(struct list_head *list,
0057 struct se_cmd *cmd)
0058 {
0059 struct t10_pr_registration *reg;
0060
0061 if (!list)
0062 return 0;
0063 list_for_each_entry(reg, list, pr_reg_abort_list) {
0064 if (reg->pr_res_key == cmd->pr_res_key)
0065 return 0;
0066 }
0067
0068 return 1;
0069 }
0070
0071 static bool __target_check_io_state(struct se_cmd *se_cmd,
0072 struct se_session *tmr_sess, bool tas)
0073 {
0074 struct se_session *sess = se_cmd->se_sess;
0075
0076 assert_spin_locked(&sess->sess_cmd_lock);
0077 WARN_ON_ONCE(!irqs_disabled());
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 spin_lock(&se_cmd->t_state_lock);
0089 if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
0090 pr_debug("Attempted to abort io tag: %llu already complete or"
0091 " fabric stop, skipping\n", se_cmd->tag);
0092 spin_unlock(&se_cmd->t_state_lock);
0093 return false;
0094 }
0095 se_cmd->transport_state |= CMD_T_ABORTED;
0096
0097 if ((tmr_sess != se_cmd->se_sess) && tas)
0098 se_cmd->transport_state |= CMD_T_TAS;
0099
0100 spin_unlock(&se_cmd->t_state_lock);
0101
0102 return kref_get_unless_zero(&se_cmd->cmd_kref);
0103 }
0104
0105 void core_tmr_abort_task(
0106 struct se_device *dev,
0107 struct se_tmr_req *tmr,
0108 struct se_session *se_sess)
0109 {
0110 LIST_HEAD(aborted_list);
0111 struct se_cmd *se_cmd, *next;
0112 unsigned long flags;
0113 bool rc;
0114 u64 ref_tag;
0115 int i;
0116
0117 for (i = 0; i < dev->queue_cnt; i++) {
0118 flush_work(&dev->queues[i].sq.work);
0119
0120 spin_lock_irqsave(&dev->queues[i].lock, flags);
0121 list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list,
0122 state_list) {
0123 if (se_sess != se_cmd->se_sess)
0124 continue;
0125
0126
0127
0128
0129
0130 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
0131 continue;
0132
0133 ref_tag = se_cmd->tag;
0134 if (tmr->ref_task_tag != ref_tag)
0135 continue;
0136
0137 pr_err("ABORT_TASK: Found referenced %s task_tag: %llu\n",
0138 se_cmd->se_tfo->fabric_name, ref_tag);
0139
0140 spin_lock(&se_sess->sess_cmd_lock);
0141 rc = __target_check_io_state(se_cmd, se_sess, 0);
0142 spin_unlock(&se_sess->sess_cmd_lock);
0143 if (!rc)
0144 continue;
0145
0146 list_move_tail(&se_cmd->state_list, &aborted_list);
0147 se_cmd->state_active = false;
0148 spin_unlock_irqrestore(&dev->queues[i].lock, flags);
0149
0150 if (dev->transport->tmr_notify)
0151 dev->transport->tmr_notify(dev, TMR_ABORT_TASK,
0152 &aborted_list);
0153
0154 list_del_init(&se_cmd->state_list);
0155 target_put_cmd_and_wait(se_cmd);
0156
0157 pr_err("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag: %llu\n",
0158 ref_tag);
0159 tmr->response = TMR_FUNCTION_COMPLETE;
0160 atomic_long_inc(&dev->aborts_complete);
0161 return;
0162 }
0163 spin_unlock_irqrestore(&dev->queues[i].lock, flags);
0164 }
0165
0166 if (dev->transport->tmr_notify)
0167 dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list);
0168
0169 printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n",
0170 tmr->ref_task_tag);
0171 tmr->response = TMR_TASK_DOES_NOT_EXIST;
0172 atomic_long_inc(&dev->aborts_no_task);
0173 }
0174
0175 static void core_tmr_drain_tmr_list(
0176 struct se_device *dev,
0177 struct se_tmr_req *tmr,
0178 struct list_head *preempt_and_abort_list)
0179 {
0180 LIST_HEAD(drain_tmr_list);
0181 struct se_session *sess;
0182 struct se_tmr_req *tmr_p, *tmr_pp;
0183 struct se_cmd *cmd;
0184 unsigned long flags;
0185 bool rc;
0186
0187
0188
0189
0190 spin_lock_irqsave(&dev->se_tmr_lock, flags);
0191 if (tmr)
0192 list_del_init(&tmr->tmr_list);
0193 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
0194 cmd = tmr_p->task_cmd;
0195 if (!cmd) {
0196 pr_err("Unable to locate struct se_cmd for TMR\n");
0197 continue;
0198 }
0199
0200
0201
0202
0203
0204 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
0205 continue;
0206
0207 sess = cmd->se_sess;
0208 if (WARN_ON_ONCE(!sess))
0209 continue;
0210
0211 spin_lock(&sess->sess_cmd_lock);
0212 rc = __target_check_io_state(cmd, sess, 0);
0213 spin_unlock(&sess->sess_cmd_lock);
0214
0215 if (!rc) {
0216 printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
0217 continue;
0218 }
0219
0220 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
0221 tmr_p->tmr_dev = NULL;
0222 }
0223 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
0224
0225 list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
0226 list_del_init(&tmr_p->tmr_list);
0227 cmd = tmr_p->task_cmd;
0228
0229 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
0230 " Response: 0x%02x, t_state: %d\n",
0231 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
0232 tmr_p->function, tmr_p->response, cmd->t_state);
0233
0234 target_put_cmd_and_wait(cmd);
0235 }
0236 }
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 static void core_tmr_drain_state_list(
0257 struct se_device *dev,
0258 struct se_cmd *prout_cmd,
0259 struct se_session *tmr_sess,
0260 bool tas,
0261 struct list_head *preempt_and_abort_list)
0262 {
0263 LIST_HEAD(drain_task_list);
0264 struct se_session *sess;
0265 struct se_cmd *cmd, *next;
0266 unsigned long flags;
0267 int rc, i;
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291 for (i = 0; i < dev->queue_cnt; i++) {
0292 flush_work(&dev->queues[i].sq.work);
0293
0294 spin_lock_irqsave(&dev->queues[i].lock, flags);
0295 list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list,
0296 state_list) {
0297
0298
0299
0300
0301 if (target_check_cdb_and_preempt(preempt_and_abort_list,
0302 cmd))
0303 continue;
0304
0305
0306
0307
0308 if (prout_cmd == cmd)
0309 continue;
0310
0311 sess = cmd->se_sess;
0312 if (WARN_ON_ONCE(!sess))
0313 continue;
0314
0315 spin_lock(&sess->sess_cmd_lock);
0316 rc = __target_check_io_state(cmd, tmr_sess, tas);
0317 spin_unlock(&sess->sess_cmd_lock);
0318 if (!rc)
0319 continue;
0320
0321 list_move_tail(&cmd->state_list, &drain_task_list);
0322 cmd->state_active = false;
0323 }
0324 spin_unlock_irqrestore(&dev->queues[i].lock, flags);
0325 }
0326
0327 if (dev->transport->tmr_notify)
0328 dev->transport->tmr_notify(dev, preempt_and_abort_list ?
0329 TMR_LUN_RESET_PRO : TMR_LUN_RESET,
0330 &drain_task_list);
0331
0332 while (!list_empty(&drain_task_list)) {
0333 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
0334 list_del_init(&cmd->state_list);
0335
0336 target_show_cmd("LUN_RESET: ", cmd);
0337 pr_debug("LUN_RESET: ITT[0x%08llx] - %s pr_res_key: 0x%016Lx\n",
0338 cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
0339 cmd->pr_res_key);
0340
0341 target_put_cmd_and_wait(cmd);
0342 }
0343 }
0344
0345 int core_tmr_lun_reset(
0346 struct se_device *dev,
0347 struct se_tmr_req *tmr,
0348 struct list_head *preempt_and_abort_list,
0349 struct se_cmd *prout_cmd)
0350 {
0351 struct se_node_acl *tmr_nacl = NULL;
0352 struct se_portal_group *tmr_tpg = NULL;
0353 struct se_session *tmr_sess = NULL;
0354 bool tas;
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366 tas = dev->dev_attrib.emulate_tas;
0367
0368
0369
0370
0371 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
0372 tmr_sess = tmr->task_cmd->se_sess;
0373 tmr_nacl = tmr_sess->se_node_acl;
0374 tmr_tpg = tmr_sess->se_tpg;
0375 if (tmr_nacl && tmr_tpg) {
0376 pr_debug("LUN_RESET: TMR caller fabric: %s"
0377 " initiator port %s\n",
0378 tmr_tpg->se_tpg_tfo->fabric_name,
0379 tmr_nacl->initiatorname);
0380 }
0381 }
0382 pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
0383 (preempt_and_abort_list) ? "Preempt" : "TMR",
0384 dev->transport->name, tas);
0385
0386 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
0387 core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
0388 preempt_and_abort_list);
0389
0390
0391
0392
0393
0394 if (!preempt_and_abort_list &&
0395 (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
0396 spin_lock(&dev->dev_reservation_lock);
0397 dev->reservation_holder = NULL;
0398 dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
0399 spin_unlock(&dev->dev_reservation_lock);
0400 pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
0401 }
0402
0403 atomic_long_inc(&dev->num_resets);
0404
0405 pr_debug("LUN_RESET: %s for [%s] Complete\n",
0406 (preempt_and_abort_list) ? "Preempt" : "TMR",
0407 dev->transport->name);
0408 return 0;
0409 }
0410