0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/slab.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/configfs.h>
0016 #include <linux/delay.h>
0017 #include <linux/export.h>
0018 #include <linux/fcntl.h>
0019 #include <linux/file.h>
0020 #include <linux/fs.h>
0021 #include <scsi/scsi_proto.h>
0022 #include <asm/unaligned.h>
0023
0024 #include <target/target_core_base.h>
0025 #include <target/target_core_backend.h>
0026 #include <target/target_core_fabric.h>
0027
0028 #include "target_core_internal.h"
0029 #include "target_core_alua.h"
0030 #include "target_core_ua.h"
0031
0032 static sense_reason_t core_alua_check_transition(int state, int valid,
0033 int *primary, int explicit);
0034 static int core_alua_set_tg_pt_secondary_state(
0035 struct se_lun *lun, int explicit, int offline);
0036
0037 static char *core_alua_dump_state(int state);
0038
0039 static void __target_attach_tg_pt_gp(struct se_lun *lun,
0040 struct t10_alua_tg_pt_gp *tg_pt_gp);
0041
0042 static u16 alua_lu_gps_counter;
0043 static u32 alua_lu_gps_count;
0044
0045 static DEFINE_SPINLOCK(lu_gps_lock);
0046 static LIST_HEAD(lu_gps_list);
0047
0048 struct t10_alua_lu_gp *default_lu_gp;
0049
0050
0051
0052
0053
0054
0055 sense_reason_t
0056 target_emulate_report_referrals(struct se_cmd *cmd)
0057 {
0058 struct se_device *dev = cmd->se_dev;
0059 struct t10_alua_lba_map *map;
0060 struct t10_alua_lba_map_member *map_mem;
0061 unsigned char *buf;
0062 u32 rd_len = 0, off;
0063
0064 if (cmd->data_length < 4) {
0065 pr_warn("REPORT REFERRALS allocation length %u too"
0066 " small\n", cmd->data_length);
0067 return TCM_INVALID_CDB_FIELD;
0068 }
0069
0070 buf = transport_kmap_data_sg(cmd);
0071 if (!buf)
0072 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0073
0074 off = 4;
0075 spin_lock(&dev->t10_alua.lba_map_lock);
0076 if (list_empty(&dev->t10_alua.lba_map_list)) {
0077 spin_unlock(&dev->t10_alua.lba_map_lock);
0078 transport_kunmap_data_sg(cmd);
0079
0080 return TCM_UNSUPPORTED_SCSI_OPCODE;
0081 }
0082
0083 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
0084 lba_map_list) {
0085 int desc_num = off + 3;
0086 int pg_num;
0087
0088 off += 4;
0089 if (cmd->data_length > off)
0090 put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
0091 off += 8;
0092 if (cmd->data_length > off)
0093 put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
0094 off += 8;
0095 rd_len += 20;
0096 pg_num = 0;
0097 list_for_each_entry(map_mem, &map->lba_map_mem_list,
0098 lba_map_mem_list) {
0099 int alua_state = map_mem->lba_map_mem_alua_state;
0100 int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
0101
0102 if (cmd->data_length > off)
0103 buf[off] = alua_state & 0x0f;
0104 off += 2;
0105 if (cmd->data_length > off)
0106 buf[off] = (alua_pg_id >> 8) & 0xff;
0107 off++;
0108 if (cmd->data_length > off)
0109 buf[off] = (alua_pg_id & 0xff);
0110 off++;
0111 rd_len += 4;
0112 pg_num++;
0113 }
0114 if (cmd->data_length > desc_num)
0115 buf[desc_num] = pg_num;
0116 }
0117 spin_unlock(&dev->t10_alua.lba_map_lock);
0118
0119
0120
0121
0122 put_unaligned_be16(rd_len, &buf[2]);
0123
0124 transport_kunmap_data_sg(cmd);
0125
0126 target_complete_cmd(cmd, SAM_STAT_GOOD);
0127 return 0;
0128 }
0129
0130
0131
0132
0133
0134
0135 sense_reason_t
0136 target_emulate_report_target_port_groups(struct se_cmd *cmd)
0137 {
0138 struct se_device *dev = cmd->se_dev;
0139 struct t10_alua_tg_pt_gp *tg_pt_gp;
0140 struct se_lun *lun;
0141 unsigned char *buf;
0142 u32 rd_len = 0, off;
0143 int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
0144
0145
0146
0147
0148
0149 if (ext_hdr != 0)
0150 off = 8;
0151 else
0152 off = 4;
0153
0154 if (cmd->data_length < off) {
0155 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
0156 " small for %s header\n", cmd->data_length,
0157 (ext_hdr) ? "extended" : "normal");
0158 return TCM_INVALID_CDB_FIELD;
0159 }
0160 buf = transport_kmap_data_sg(cmd);
0161 if (!buf)
0162 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0163
0164 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
0165 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
0166 tg_pt_gp_list) {
0167
0168
0169
0170
0171
0172
0173 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
0174 cmd->data_length) {
0175 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
0176 continue;
0177 }
0178
0179
0180
0181
0182 if (tg_pt_gp->tg_pt_gp_pref)
0183 buf[off] = 0x80;
0184
0185
0186
0187 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
0188
0189
0190
0191 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
0192
0193
0194
0195 put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
0196 off += 2;
0197
0198 off++;
0199
0200
0201
0202 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
0203
0204
0205
0206 buf[off++] = 0x00;
0207
0208
0209
0210 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
0211 rd_len += 8;
0212
0213 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
0214 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
0215 lun_tg_pt_gp_link) {
0216
0217
0218
0219
0220
0221 off += 2;
0222
0223
0224
0225 put_unaligned_be16(lun->lun_rtpi, &buf[off]);
0226 off += 2;
0227 rd_len += 4;
0228 }
0229 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
0230 }
0231 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
0232
0233
0234
0235 put_unaligned_be32(rd_len, &buf[0]);
0236
0237
0238
0239
0240 if (ext_hdr != 0) {
0241 buf[4] = 0x10;
0242
0243
0244
0245
0246
0247
0248
0249
0250 rcu_read_lock();
0251 tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp);
0252 if (tg_pt_gp)
0253 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
0254 rcu_read_unlock();
0255 }
0256 transport_kunmap_data_sg(cmd);
0257
0258 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, rd_len + 4);
0259 return 0;
0260 }
0261
0262
0263
0264
0265
0266
0267 sense_reason_t
0268 target_emulate_set_target_port_groups(struct se_cmd *cmd)
0269 {
0270 struct se_device *dev = cmd->se_dev;
0271 struct se_lun *l_lun = cmd->se_lun;
0272 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
0273 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
0274 unsigned char *buf;
0275 unsigned char *ptr;
0276 sense_reason_t rc = TCM_NO_SENSE;
0277 u32 len = 4;
0278 int alua_access_state, primary = 0, valid_states;
0279 u16 tg_pt_id, rtpi;
0280
0281 if (cmd->data_length < 4) {
0282 pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
0283 " small\n", cmd->data_length);
0284 return TCM_INVALID_PARAMETER_LIST;
0285 }
0286
0287 buf = transport_kmap_data_sg(cmd);
0288 if (!buf)
0289 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0290
0291
0292
0293
0294
0295 rcu_read_lock();
0296 l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
0297 if (!l_tg_pt_gp) {
0298 rcu_read_unlock();
0299 pr_err("Unable to access l_lun->tg_pt_gp\n");
0300 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
0301 goto out;
0302 }
0303
0304 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
0305 rcu_read_unlock();
0306 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
0307 " while TPGS_EXPLICIT_ALUA is disabled\n");
0308 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
0309 goto out;
0310 }
0311 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
0312 rcu_read_unlock();
0313
0314 ptr = &buf[4];
0315
0316 while (len < cmd->data_length) {
0317 bool found = false;
0318 alua_access_state = (ptr[0] & 0x0f);
0319
0320
0321
0322
0323
0324 rc = core_alua_check_transition(alua_access_state, valid_states,
0325 &primary, 1);
0326 if (rc) {
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 goto out;
0338 }
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353 if (primary) {
0354 tg_pt_id = get_unaligned_be16(ptr + 2);
0355
0356
0357
0358
0359 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
0360 list_for_each_entry(tg_pt_gp,
0361 &dev->t10_alua.tg_pt_gps_list,
0362 tg_pt_gp_list) {
0363 if (!tg_pt_gp->tg_pt_gp_valid_id)
0364 continue;
0365
0366 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
0367 continue;
0368
0369 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
0370
0371 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
0372
0373 if (!core_alua_do_port_transition(tg_pt_gp,
0374 dev, l_lun, nacl,
0375 alua_access_state, 1))
0376 found = true;
0377
0378 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
0379 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
0380 break;
0381 }
0382 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
0383 } else {
0384 struct se_lun *lun;
0385
0386
0387
0388
0389
0390
0391 rtpi = get_unaligned_be16(ptr + 2);
0392
0393
0394
0395
0396 spin_lock(&dev->se_port_lock);
0397 list_for_each_entry(lun, &dev->dev_sep_list,
0398 lun_dev_link) {
0399 if (lun->lun_rtpi != rtpi)
0400 continue;
0401
0402
0403 spin_unlock(&dev->se_port_lock);
0404
0405 if (!core_alua_set_tg_pt_secondary_state(
0406 lun, 1, 1))
0407 found = true;
0408
0409 spin_lock(&dev->se_port_lock);
0410 break;
0411 }
0412 spin_unlock(&dev->se_port_lock);
0413 }
0414
0415 if (!found) {
0416 rc = TCM_INVALID_PARAMETER_LIST;
0417 goto out;
0418 }
0419
0420 ptr += 4;
0421 len += 4;
0422 }
0423
0424 out:
0425 transport_kunmap_data_sg(cmd);
0426 if (!rc)
0427 target_complete_cmd(cmd, SAM_STAT_GOOD);
0428 return rc;
0429 }
0430
0431 static inline void core_alua_state_nonoptimized(
0432 struct se_cmd *cmd,
0433 unsigned char *cdb,
0434 int nonop_delay_msecs)
0435 {
0436
0437
0438
0439
0440
0441 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
0442 cmd->alua_nonop_delay = nonop_delay_msecs;
0443 }
0444
0445 static inline sense_reason_t core_alua_state_lba_dependent(
0446 struct se_cmd *cmd,
0447 u16 tg_pt_gp_id)
0448 {
0449 struct se_device *dev = cmd->se_dev;
0450 u64 segment_size, segment_mult, sectors, lba;
0451
0452
0453 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
0454 return 0;
0455
0456 spin_lock(&dev->t10_alua.lba_map_lock);
0457 segment_size = dev->t10_alua.lba_map_segment_size;
0458 segment_mult = dev->t10_alua.lba_map_segment_multiplier;
0459 sectors = cmd->data_length / dev->dev_attrib.block_size;
0460
0461 lba = cmd->t_task_lba;
0462 while (lba < cmd->t_task_lba + sectors) {
0463 struct t10_alua_lba_map *cur_map = NULL, *map;
0464 struct t10_alua_lba_map_member *map_mem;
0465
0466 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
0467 lba_map_list) {
0468 u64 start_lba, last_lba;
0469 u64 first_lba = map->lba_map_first_lba;
0470
0471 if (segment_mult) {
0472 u64 tmp = lba;
0473 start_lba = do_div(tmp, segment_size * segment_mult);
0474
0475 last_lba = first_lba + segment_size - 1;
0476 if (start_lba >= first_lba &&
0477 start_lba <= last_lba) {
0478 lba += segment_size;
0479 cur_map = map;
0480 break;
0481 }
0482 } else {
0483 last_lba = map->lba_map_last_lba;
0484 if (lba >= first_lba && lba <= last_lba) {
0485 lba = last_lba + 1;
0486 cur_map = map;
0487 break;
0488 }
0489 }
0490 }
0491 if (!cur_map) {
0492 spin_unlock(&dev->t10_alua.lba_map_lock);
0493 return TCM_ALUA_TG_PT_UNAVAILABLE;
0494 }
0495 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
0496 lba_map_mem_list) {
0497 if (map_mem->lba_map_mem_alua_pg_id != tg_pt_gp_id)
0498 continue;
0499 switch(map_mem->lba_map_mem_alua_state) {
0500 case ALUA_ACCESS_STATE_STANDBY:
0501 spin_unlock(&dev->t10_alua.lba_map_lock);
0502 return TCM_ALUA_TG_PT_STANDBY;
0503 case ALUA_ACCESS_STATE_UNAVAILABLE:
0504 spin_unlock(&dev->t10_alua.lba_map_lock);
0505 return TCM_ALUA_TG_PT_UNAVAILABLE;
0506 default:
0507 break;
0508 }
0509 }
0510 }
0511 spin_unlock(&dev->t10_alua.lba_map_lock);
0512 return 0;
0513 }
0514
0515 static inline sense_reason_t core_alua_state_standby(
0516 struct se_cmd *cmd,
0517 unsigned char *cdb)
0518 {
0519
0520
0521
0522
0523 switch (cdb[0]) {
0524 case INQUIRY:
0525 case LOG_SELECT:
0526 case LOG_SENSE:
0527 case MODE_SELECT:
0528 case MODE_SENSE:
0529 case REPORT_LUNS:
0530 case RECEIVE_DIAGNOSTIC:
0531 case SEND_DIAGNOSTIC:
0532 case READ_CAPACITY:
0533 return 0;
0534 case SERVICE_ACTION_IN_16:
0535 switch (cdb[1] & 0x1f) {
0536 case SAI_READ_CAPACITY_16:
0537 return 0;
0538 default:
0539 return TCM_ALUA_TG_PT_STANDBY;
0540 }
0541 case MAINTENANCE_IN:
0542 switch (cdb[1] & 0x1f) {
0543 case MI_REPORT_TARGET_PGS:
0544 return 0;
0545 default:
0546 return TCM_ALUA_TG_PT_STANDBY;
0547 }
0548 case MAINTENANCE_OUT:
0549 switch (cdb[1]) {
0550 case MO_SET_TARGET_PGS:
0551 return 0;
0552 default:
0553 return TCM_ALUA_TG_PT_STANDBY;
0554 }
0555 case REQUEST_SENSE:
0556 case PERSISTENT_RESERVE_IN:
0557 case PERSISTENT_RESERVE_OUT:
0558 case READ_BUFFER:
0559 case WRITE_BUFFER:
0560 return 0;
0561 default:
0562 return TCM_ALUA_TG_PT_STANDBY;
0563 }
0564
0565 return 0;
0566 }
0567
0568 static inline sense_reason_t core_alua_state_unavailable(
0569 struct se_cmd *cmd,
0570 unsigned char *cdb)
0571 {
0572
0573
0574
0575
0576 switch (cdb[0]) {
0577 case INQUIRY:
0578 case REPORT_LUNS:
0579 return 0;
0580 case MAINTENANCE_IN:
0581 switch (cdb[1] & 0x1f) {
0582 case MI_REPORT_TARGET_PGS:
0583 return 0;
0584 default:
0585 return TCM_ALUA_TG_PT_UNAVAILABLE;
0586 }
0587 case MAINTENANCE_OUT:
0588 switch (cdb[1]) {
0589 case MO_SET_TARGET_PGS:
0590 return 0;
0591 default:
0592 return TCM_ALUA_TG_PT_UNAVAILABLE;
0593 }
0594 case REQUEST_SENSE:
0595 case READ_BUFFER:
0596 case WRITE_BUFFER:
0597 return 0;
0598 default:
0599 return TCM_ALUA_TG_PT_UNAVAILABLE;
0600 }
0601
0602 return 0;
0603 }
0604
0605 static inline sense_reason_t core_alua_state_transition(
0606 struct se_cmd *cmd,
0607 unsigned char *cdb)
0608 {
0609
0610
0611
0612
0613 switch (cdb[0]) {
0614 case INQUIRY:
0615 case REPORT_LUNS:
0616 return 0;
0617 case MAINTENANCE_IN:
0618 switch (cdb[1] & 0x1f) {
0619 case MI_REPORT_TARGET_PGS:
0620 return 0;
0621 default:
0622 return TCM_ALUA_STATE_TRANSITION;
0623 }
0624 case REQUEST_SENSE:
0625 case READ_BUFFER:
0626 case WRITE_BUFFER:
0627 return 0;
0628 default:
0629 return TCM_ALUA_STATE_TRANSITION;
0630 }
0631
0632 return 0;
0633 }
0634
0635
0636
0637
0638
0639
0640 sense_reason_t
0641 target_alua_state_check(struct se_cmd *cmd)
0642 {
0643 struct se_device *dev = cmd->se_dev;
0644 unsigned char *cdb = cmd->t_task_cdb;
0645 struct se_lun *lun = cmd->se_lun;
0646 struct t10_alua_tg_pt_gp *tg_pt_gp;
0647 int out_alua_state, nonop_delay_msecs;
0648 u16 tg_pt_gp_id;
0649 sense_reason_t rc = TCM_NO_SENSE;
0650
0651 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
0652 return 0;
0653 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
0654 return 0;
0655
0656
0657
0658
0659
0660 if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
0661 pr_debug("ALUA: Got secondary offline status for local"
0662 " target port\n");
0663 return TCM_ALUA_OFFLINE;
0664 }
0665 rcu_read_lock();
0666 tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
0667 if (!tg_pt_gp) {
0668 rcu_read_unlock();
0669 return 0;
0670 }
0671
0672 out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
0673 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
0674 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
0675 rcu_read_unlock();
0676
0677
0678
0679
0680
0681
0682 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
0683 return 0;
0684
0685 switch (out_alua_state) {
0686 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
0687 core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
0688 break;
0689 case ALUA_ACCESS_STATE_STANDBY:
0690 rc = core_alua_state_standby(cmd, cdb);
0691 break;
0692 case ALUA_ACCESS_STATE_UNAVAILABLE:
0693 rc = core_alua_state_unavailable(cmd, cdb);
0694 break;
0695 case ALUA_ACCESS_STATE_TRANSITION:
0696 rc = core_alua_state_transition(cmd, cdb);
0697 break;
0698 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
0699 rc = core_alua_state_lba_dependent(cmd, tg_pt_gp_id);
0700 break;
0701
0702
0703
0704
0705 case ALUA_ACCESS_STATE_OFFLINE:
0706 default:
0707 pr_err("Unknown ALUA access state: 0x%02x\n",
0708 out_alua_state);
0709 rc = TCM_INVALID_CDB_FIELD;
0710 }
0711
0712 if (rc && rc != TCM_INVALID_CDB_FIELD) {
0713 pr_debug("[%s]: ALUA TG Port not available, "
0714 "SenseKey: NOT_READY, ASC/rc: 0x04/%d\n",
0715 cmd->se_tfo->fabric_name, rc);
0716 }
0717
0718 return rc;
0719 }
0720
0721
0722
0723
0724 static sense_reason_t
0725 core_alua_check_transition(int state, int valid, int *primary, int explicit)
0726 {
0727
0728
0729
0730
0731 switch (state) {
0732 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
0733 if (!(valid & ALUA_AO_SUP))
0734 goto not_supported;
0735 *primary = 1;
0736 break;
0737 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
0738 if (!(valid & ALUA_AN_SUP))
0739 goto not_supported;
0740 *primary = 1;
0741 break;
0742 case ALUA_ACCESS_STATE_STANDBY:
0743 if (!(valid & ALUA_S_SUP))
0744 goto not_supported;
0745 *primary = 1;
0746 break;
0747 case ALUA_ACCESS_STATE_UNAVAILABLE:
0748 if (!(valid & ALUA_U_SUP))
0749 goto not_supported;
0750 *primary = 1;
0751 break;
0752 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
0753 if (!(valid & ALUA_LBD_SUP))
0754 goto not_supported;
0755 *primary = 1;
0756 break;
0757 case ALUA_ACCESS_STATE_OFFLINE:
0758
0759
0760
0761
0762 if (!(valid & ALUA_O_SUP))
0763 goto not_supported;
0764 *primary = 0;
0765 break;
0766 case ALUA_ACCESS_STATE_TRANSITION:
0767 if (!(valid & ALUA_T_SUP) || explicit)
0768
0769
0770
0771
0772 goto not_supported;
0773 *primary = 0;
0774 break;
0775 default:
0776 pr_err("Unknown ALUA access state: 0x%02x\n", state);
0777 return TCM_INVALID_PARAMETER_LIST;
0778 }
0779
0780 return 0;
0781
0782 not_supported:
0783 pr_err("ALUA access state %s not supported",
0784 core_alua_dump_state(state));
0785 return TCM_INVALID_PARAMETER_LIST;
0786 }
0787
0788 static char *core_alua_dump_state(int state)
0789 {
0790 switch (state) {
0791 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
0792 return "Active/Optimized";
0793 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
0794 return "Active/NonOptimized";
0795 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
0796 return "LBA Dependent";
0797 case ALUA_ACCESS_STATE_STANDBY:
0798 return "Standby";
0799 case ALUA_ACCESS_STATE_UNAVAILABLE:
0800 return "Unavailable";
0801 case ALUA_ACCESS_STATE_OFFLINE:
0802 return "Offline";
0803 case ALUA_ACCESS_STATE_TRANSITION:
0804 return "Transitioning";
0805 default:
0806 return "Unknown";
0807 }
0808
0809 return NULL;
0810 }
0811
0812 char *core_alua_dump_status(int status)
0813 {
0814 switch (status) {
0815 case ALUA_STATUS_NONE:
0816 return "None";
0817 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
0818 return "Altered by Explicit STPG";
0819 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
0820 return "Altered by Implicit ALUA";
0821 default:
0822 return "Unknown";
0823 }
0824
0825 return NULL;
0826 }
0827
0828
0829
0830
0831
0832 int core_alua_check_nonop_delay(
0833 struct se_cmd *cmd)
0834 {
0835 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
0836 return 0;
0837
0838
0839
0840
0841 if (!cmd->alua_nonop_delay)
0842 return 0;
0843
0844
0845
0846
0847 msleep_interruptible(cmd->alua_nonop_delay);
0848 return 0;
0849 }
0850 EXPORT_SYMBOL(core_alua_check_nonop_delay);
0851
0852 static int core_alua_write_tpg_metadata(
0853 const char *path,
0854 unsigned char *md_buf,
0855 u32 md_buf_len)
0856 {
0857 struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
0858 loff_t pos = 0;
0859 int ret;
0860
0861 if (IS_ERR(file)) {
0862 pr_err("filp_open(%s) for ALUA metadata failed\n", path);
0863 return -ENODEV;
0864 }
0865 ret = kernel_write(file, md_buf, md_buf_len, &pos);
0866 if (ret < 0)
0867 pr_err("Error writing ALUA metadata file: %s\n", path);
0868 fput(file);
0869 return (ret < 0) ? -EIO : 0;
0870 }
0871
0872 static int core_alua_update_tpg_primary_metadata(
0873 struct t10_alua_tg_pt_gp *tg_pt_gp)
0874 {
0875 unsigned char *md_buf;
0876 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
0877 char *path;
0878 int len, rc;
0879
0880 lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex);
0881
0882 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
0883 if (!md_buf) {
0884 pr_err("Unable to allocate buf for ALUA metadata\n");
0885 return -ENOMEM;
0886 }
0887
0888 len = snprintf(md_buf, ALUA_MD_BUF_LEN,
0889 "tg_pt_gp_id=%hu\n"
0890 "alua_access_state=0x%02x\n"
0891 "alua_access_status=0x%02x\n",
0892 tg_pt_gp->tg_pt_gp_id,
0893 tg_pt_gp->tg_pt_gp_alua_access_state,
0894 tg_pt_gp->tg_pt_gp_alua_access_status);
0895
0896 rc = -ENOMEM;
0897 path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
0898 &wwn->unit_serial[0],
0899 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
0900 if (path) {
0901 rc = core_alua_write_tpg_metadata(path, md_buf, len);
0902 kfree(path);
0903 }
0904 kfree(md_buf);
0905 return rc;
0906 }
0907
0908 static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
0909 {
0910 struct se_dev_entry *se_deve;
0911 struct se_lun *lun;
0912 struct se_lun_acl *lacl;
0913
0914 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
0915 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
0916 lun_tg_pt_gp_link) {
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931 if (!percpu_ref_tryget_live(&lun->lun_ref))
0932 continue;
0933 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
0934
0935 spin_lock(&lun->lun_deve_lock);
0936 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
0937 lacl = se_deve->se_lun_acl;
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949 if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
0950 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
0951 (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
0952 (tg_pt_gp->tg_pt_gp_alua_lun == lun))
0953 continue;
0954
0955
0956
0957
0958
0959 if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
0960 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
0961 continue;
0962
0963 core_scsi3_ua_allocate(se_deve, 0x2A,
0964 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
0965 }
0966 spin_unlock(&lun->lun_deve_lock);
0967
0968 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
0969 percpu_ref_put(&lun->lun_ref);
0970 }
0971 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
0972 }
0973
0974 static int core_alua_do_transition_tg_pt(
0975 struct t10_alua_tg_pt_gp *tg_pt_gp,
0976 int new_state,
0977 int explicit)
0978 {
0979 int prev_state;
0980
0981 mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
0982
0983 if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
0984 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
0985 return 0;
0986 }
0987
0988 if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
0989 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
0990 return -EAGAIN;
0991 }
0992
0993
0994
0995
0996
0997 prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
0998 tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
0999 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1000 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1001 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1002
1003 core_alua_queue_state_change_ua(tg_pt_gp);
1004
1005 if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
1006 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1007 return 0;
1008 }
1009
1010
1011
1012
1013 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1014 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1015
1016
1017
1018
1019 tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1034 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1035 }
1036
1037 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1038 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1039 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1040 tg_pt_gp->tg_pt_gp_id,
1041 core_alua_dump_state(prev_state),
1042 core_alua_dump_state(new_state));
1043
1044 core_alua_queue_state_change_ua(tg_pt_gp);
1045
1046 mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
1047 return 0;
1048 }
1049
1050 int core_alua_do_port_transition(
1051 struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1052 struct se_device *l_dev,
1053 struct se_lun *l_lun,
1054 struct se_node_acl *l_nacl,
1055 int new_state,
1056 int explicit)
1057 {
1058 struct se_device *dev;
1059 struct t10_alua_lu_gp *lu_gp;
1060 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1061 struct t10_alua_tg_pt_gp *tg_pt_gp;
1062 int primary, valid_states, rc = 0;
1063
1064 if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
1065 return -ENODEV;
1066
1067 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1068 if (core_alua_check_transition(new_state, valid_states, &primary,
1069 explicit) != 0)
1070 return -EINVAL;
1071
1072 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1073 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1074 lu_gp = local_lu_gp_mem->lu_gp;
1075 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1076 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1077
1078
1079
1080
1081
1082 if (!lu_gp->lu_gp_id) {
1083
1084
1085
1086
1087 l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1088 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1089 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1090 new_state, explicit);
1091 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1092 return rc;
1093 }
1094
1095
1096
1097
1098
1099 spin_lock(&lu_gp->lu_gp_lock);
1100 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1101 lu_gp_mem_list) {
1102
1103 dev = lu_gp_mem->lu_gp_mem_dev;
1104 atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1105 spin_unlock(&lu_gp->lu_gp_lock);
1106
1107 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1108 list_for_each_entry(tg_pt_gp,
1109 &dev->t10_alua.tg_pt_gps_list,
1110 tg_pt_gp_list) {
1111
1112 if (!tg_pt_gp->tg_pt_gp_valid_id)
1113 continue;
1114
1115
1116
1117
1118
1119
1120
1121
1122 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1123 continue;
1124
1125 if (l_tg_pt_gp == tg_pt_gp) {
1126 tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
1127 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1128 } else {
1129 tg_pt_gp->tg_pt_gp_alua_lun = NULL;
1130 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1131 }
1132 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1133 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1134
1135
1136
1137
1138 rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1139 new_state, explicit);
1140
1141 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1142 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1143 if (rc)
1144 break;
1145 }
1146 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1147
1148 spin_lock(&lu_gp->lu_gp_lock);
1149 atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1150 }
1151 spin_unlock(&lu_gp->lu_gp_lock);
1152
1153 if (!rc) {
1154 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1155 " Group IDs: %hu %s transition to primary state: %s\n",
1156 config_item_name(&lu_gp->lu_gp_group.cg_item),
1157 l_tg_pt_gp->tg_pt_gp_id,
1158 (explicit) ? "explicit" : "implicit",
1159 core_alua_dump_state(new_state));
1160 }
1161
1162 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1163 return rc;
1164 }
1165
1166 static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
1167 {
1168 struct se_portal_group *se_tpg = lun->lun_tpg;
1169 unsigned char *md_buf;
1170 char *path;
1171 int len, rc;
1172
1173 mutex_lock(&lun->lun_tg_pt_md_mutex);
1174
1175 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1176 if (!md_buf) {
1177 pr_err("Unable to allocate buf for ALUA metadata\n");
1178 rc = -ENOMEM;
1179 goto out_unlock;
1180 }
1181
1182 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1183 "alua_tg_pt_status=0x%02x\n",
1184 atomic_read(&lun->lun_tg_pt_secondary_offline),
1185 lun->lun_tg_pt_secondary_stat);
1186
1187 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
1188 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
1189 db_root, se_tpg->se_tpg_tfo->fabric_name,
1190 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1191 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
1192 lun->unpacked_lun);
1193 } else {
1194 path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
1195 db_root, se_tpg->se_tpg_tfo->fabric_name,
1196 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
1197 lun->unpacked_lun);
1198 }
1199 if (!path) {
1200 rc = -ENOMEM;
1201 goto out_free;
1202 }
1203
1204 rc = core_alua_write_tpg_metadata(path, md_buf, len);
1205 kfree(path);
1206 out_free:
1207 kfree(md_buf);
1208 out_unlock:
1209 mutex_unlock(&lun->lun_tg_pt_md_mutex);
1210 return rc;
1211 }
1212
1213 static int core_alua_set_tg_pt_secondary_state(
1214 struct se_lun *lun,
1215 int explicit,
1216 int offline)
1217 {
1218 struct t10_alua_tg_pt_gp *tg_pt_gp;
1219 int trans_delay_msecs;
1220
1221 rcu_read_lock();
1222 tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
1223 if (!tg_pt_gp) {
1224 rcu_read_unlock();
1225 pr_err("Unable to complete secondary state"
1226 " transition\n");
1227 return -EINVAL;
1228 }
1229 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1230
1231
1232
1233
1234 if (offline)
1235 atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
1236 else
1237 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
1238
1239 lun->lun_tg_pt_secondary_stat = (explicit) ?
1240 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1241 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1242
1243 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1244 " to secondary access state: %s\n", (explicit) ? "explicit" :
1245 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1246 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1247
1248 rcu_read_unlock();
1249
1250
1251
1252
1253 if (trans_delay_msecs != 0)
1254 msleep_interruptible(trans_delay_msecs);
1255
1256
1257
1258
1259 if (lun->lun_tg_pt_secondary_write_md)
1260 core_alua_update_tpg_secondary_metadata(lun);
1261
1262 return 0;
1263 }
1264
1265 struct t10_alua_lba_map *
1266 core_alua_allocate_lba_map(struct list_head *list,
1267 u64 first_lba, u64 last_lba)
1268 {
1269 struct t10_alua_lba_map *lba_map;
1270
1271 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1272 if (!lba_map) {
1273 pr_err("Unable to allocate struct t10_alua_lba_map\n");
1274 return ERR_PTR(-ENOMEM);
1275 }
1276 INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1277 lba_map->lba_map_first_lba = first_lba;
1278 lba_map->lba_map_last_lba = last_lba;
1279
1280 list_add_tail(&lba_map->lba_map_list, list);
1281 return lba_map;
1282 }
1283
1284 int
1285 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1286 int pg_id, int state)
1287 {
1288 struct t10_alua_lba_map_member *lba_map_mem;
1289
1290 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1291 lba_map_mem_list) {
1292 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1293 pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1294 return -EINVAL;
1295 }
1296 }
1297
1298 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1299 if (!lba_map_mem) {
1300 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1301 return -ENOMEM;
1302 }
1303 lba_map_mem->lba_map_mem_alua_state = state;
1304 lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1305
1306 list_add_tail(&lba_map_mem->lba_map_mem_list,
1307 &lba_map->lba_map_mem_list);
1308 return 0;
1309 }
1310
1311 void
1312 core_alua_free_lba_map(struct list_head *lba_list)
1313 {
1314 struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1315 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1316
1317 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1318 lba_map_list) {
1319 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1320 &lba_map->lba_map_mem_list,
1321 lba_map_mem_list) {
1322 list_del(&lba_map_mem->lba_map_mem_list);
1323 kmem_cache_free(t10_alua_lba_map_mem_cache,
1324 lba_map_mem);
1325 }
1326 list_del(&lba_map->lba_map_list);
1327 kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1328 }
1329 }
1330
1331 void
1332 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1333 int segment_size, int segment_mult)
1334 {
1335 struct list_head old_lba_map_list;
1336 struct t10_alua_tg_pt_gp *tg_pt_gp;
1337 int activate = 0, supported;
1338
1339 INIT_LIST_HEAD(&old_lba_map_list);
1340 spin_lock(&dev->t10_alua.lba_map_lock);
1341 dev->t10_alua.lba_map_segment_size = segment_size;
1342 dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1343 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1344 if (lba_map_list) {
1345 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1346 activate = 1;
1347 }
1348 spin_unlock(&dev->t10_alua.lba_map_lock);
1349 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1350 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1351 tg_pt_gp_list) {
1352
1353 if (!tg_pt_gp->tg_pt_gp_valid_id)
1354 continue;
1355 supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1356 if (activate)
1357 supported |= ALUA_LBD_SUP;
1358 else
1359 supported &= ~ALUA_LBD_SUP;
1360 tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1361 }
1362 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1363 core_alua_free_lba_map(&old_lba_map_list);
1364 }
1365
1366 struct t10_alua_lu_gp *
1367 core_alua_allocate_lu_gp(const char *name, int def_group)
1368 {
1369 struct t10_alua_lu_gp *lu_gp;
1370
1371 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1372 if (!lu_gp) {
1373 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1374 return ERR_PTR(-ENOMEM);
1375 }
1376 INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1377 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1378 spin_lock_init(&lu_gp->lu_gp_lock);
1379 atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1380
1381 if (def_group) {
1382 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1383 lu_gp->lu_gp_valid_id = 1;
1384 alua_lu_gps_count++;
1385 }
1386
1387 return lu_gp;
1388 }
1389
1390 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1391 {
1392 struct t10_alua_lu_gp *lu_gp_tmp;
1393 u16 lu_gp_id_tmp;
1394
1395
1396
1397 if (lu_gp->lu_gp_valid_id) {
1398 pr_warn("ALUA LU Group already has a valid ID,"
1399 " ignoring request\n");
1400 return -EINVAL;
1401 }
1402
1403 spin_lock(&lu_gps_lock);
1404 if (alua_lu_gps_count == 0x0000ffff) {
1405 pr_err("Maximum ALUA alua_lu_gps_count:"
1406 " 0x0000ffff reached\n");
1407 spin_unlock(&lu_gps_lock);
1408 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1409 return -ENOSPC;
1410 }
1411 again:
1412 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1413 alua_lu_gps_counter++;
1414
1415 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1416 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1417 if (!lu_gp_id)
1418 goto again;
1419
1420 pr_warn("ALUA Logical Unit Group ID: %hu"
1421 " already exists, ignoring request\n",
1422 lu_gp_id);
1423 spin_unlock(&lu_gps_lock);
1424 return -EINVAL;
1425 }
1426 }
1427
1428 lu_gp->lu_gp_id = lu_gp_id_tmp;
1429 lu_gp->lu_gp_valid_id = 1;
1430 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1431 alua_lu_gps_count++;
1432 spin_unlock(&lu_gps_lock);
1433
1434 return 0;
1435 }
1436
1437 static struct t10_alua_lu_gp_member *
1438 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1439 {
1440 struct t10_alua_lu_gp_member *lu_gp_mem;
1441
1442 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1443 if (!lu_gp_mem) {
1444 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1445 return ERR_PTR(-ENOMEM);
1446 }
1447 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1448 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1449 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1450
1451 lu_gp_mem->lu_gp_mem_dev = dev;
1452 dev->dev_alua_lu_gp_mem = lu_gp_mem;
1453
1454 return lu_gp_mem;
1455 }
1456
1457 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1458 {
1459 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1460
1461
1462
1463
1464
1465
1466
1467
1468 spin_lock(&lu_gps_lock);
1469 list_del(&lu_gp->lu_gp_node);
1470 alua_lu_gps_count--;
1471 spin_unlock(&lu_gps_lock);
1472
1473
1474
1475
1476
1477 while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1478 cpu_relax();
1479
1480
1481
1482
1483 spin_lock(&lu_gp->lu_gp_lock);
1484 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1485 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1486 if (lu_gp_mem->lu_gp_assoc) {
1487 list_del(&lu_gp_mem->lu_gp_mem_list);
1488 lu_gp->lu_gp_members--;
1489 lu_gp_mem->lu_gp_assoc = 0;
1490 }
1491 spin_unlock(&lu_gp->lu_gp_lock);
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1502 if (lu_gp != default_lu_gp)
1503 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1504 default_lu_gp);
1505 else
1506 lu_gp_mem->lu_gp = NULL;
1507 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1508
1509 spin_lock(&lu_gp->lu_gp_lock);
1510 }
1511 spin_unlock(&lu_gp->lu_gp_lock);
1512
1513 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1514 }
1515
1516 void core_alua_free_lu_gp_mem(struct se_device *dev)
1517 {
1518 struct t10_alua_lu_gp *lu_gp;
1519 struct t10_alua_lu_gp_member *lu_gp_mem;
1520
1521 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1522 if (!lu_gp_mem)
1523 return;
1524
1525 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1526 cpu_relax();
1527
1528 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1529 lu_gp = lu_gp_mem->lu_gp;
1530 if (lu_gp) {
1531 spin_lock(&lu_gp->lu_gp_lock);
1532 if (lu_gp_mem->lu_gp_assoc) {
1533 list_del(&lu_gp_mem->lu_gp_mem_list);
1534 lu_gp->lu_gp_members--;
1535 lu_gp_mem->lu_gp_assoc = 0;
1536 }
1537 spin_unlock(&lu_gp->lu_gp_lock);
1538 lu_gp_mem->lu_gp = NULL;
1539 }
1540 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1541
1542 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1543 }
1544
1545 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1546 {
1547 struct t10_alua_lu_gp *lu_gp;
1548 struct config_item *ci;
1549
1550 spin_lock(&lu_gps_lock);
1551 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1552 if (!lu_gp->lu_gp_valid_id)
1553 continue;
1554 ci = &lu_gp->lu_gp_group.cg_item;
1555 if (!strcmp(config_item_name(ci), name)) {
1556 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1557 spin_unlock(&lu_gps_lock);
1558 return lu_gp;
1559 }
1560 }
1561 spin_unlock(&lu_gps_lock);
1562
1563 return NULL;
1564 }
1565
1566 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1567 {
1568 spin_lock(&lu_gps_lock);
1569 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1570 spin_unlock(&lu_gps_lock);
1571 }
1572
1573
1574
1575
1576 void __core_alua_attach_lu_gp_mem(
1577 struct t10_alua_lu_gp_member *lu_gp_mem,
1578 struct t10_alua_lu_gp *lu_gp)
1579 {
1580 spin_lock(&lu_gp->lu_gp_lock);
1581 lu_gp_mem->lu_gp = lu_gp;
1582 lu_gp_mem->lu_gp_assoc = 1;
1583 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1584 lu_gp->lu_gp_members++;
1585 spin_unlock(&lu_gp->lu_gp_lock);
1586 }
1587
1588
1589
1590
1591 void __core_alua_drop_lu_gp_mem(
1592 struct t10_alua_lu_gp_member *lu_gp_mem,
1593 struct t10_alua_lu_gp *lu_gp)
1594 {
1595 spin_lock(&lu_gp->lu_gp_lock);
1596 list_del(&lu_gp_mem->lu_gp_mem_list);
1597 lu_gp_mem->lu_gp = NULL;
1598 lu_gp_mem->lu_gp_assoc = 0;
1599 lu_gp->lu_gp_members--;
1600 spin_unlock(&lu_gp->lu_gp_lock);
1601 }
1602
1603 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1604 const char *name, int def_group)
1605 {
1606 struct t10_alua_tg_pt_gp *tg_pt_gp;
1607
1608 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1609 if (!tg_pt_gp) {
1610 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1611 return NULL;
1612 }
1613 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1614 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
1615 mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
1616 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1617 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1618 tg_pt_gp->tg_pt_gp_dev = dev;
1619 tg_pt_gp->tg_pt_gp_alua_access_state =
1620 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
1621
1622
1623
1624 tg_pt_gp->tg_pt_gp_alua_access_type =
1625 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1626
1627
1628
1629 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1630 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1631 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1632
1633
1634
1635
1636 tg_pt_gp->tg_pt_gp_alua_supported_states =
1637 ALUA_T_SUP | ALUA_O_SUP |
1638 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1639
1640 if (def_group) {
1641 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1642 tg_pt_gp->tg_pt_gp_id =
1643 dev->t10_alua.alua_tg_pt_gps_counter++;
1644 tg_pt_gp->tg_pt_gp_valid_id = 1;
1645 dev->t10_alua.alua_tg_pt_gps_count++;
1646 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1647 &dev->t10_alua.tg_pt_gps_list);
1648 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1649 }
1650
1651 return tg_pt_gp;
1652 }
1653
1654 int core_alua_set_tg_pt_gp_id(
1655 struct t10_alua_tg_pt_gp *tg_pt_gp,
1656 u16 tg_pt_gp_id)
1657 {
1658 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1659 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1660 u16 tg_pt_gp_id_tmp;
1661
1662
1663
1664
1665 if (tg_pt_gp->tg_pt_gp_valid_id) {
1666 pr_warn("ALUA TG PT Group already has a valid ID,"
1667 " ignoring request\n");
1668 return -EINVAL;
1669 }
1670
1671 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1672 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1673 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1674 " 0x0000ffff reached\n");
1675 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1676 return -ENOSPC;
1677 }
1678 again:
1679 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1680 dev->t10_alua.alua_tg_pt_gps_counter++;
1681
1682 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1683 tg_pt_gp_list) {
1684 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1685 if (!tg_pt_gp_id)
1686 goto again;
1687
1688 pr_err("ALUA Target Port Group ID: %hu already"
1689 " exists, ignoring request\n", tg_pt_gp_id);
1690 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1691 return -EINVAL;
1692 }
1693 }
1694
1695 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1696 tg_pt_gp->tg_pt_gp_valid_id = 1;
1697 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1698 &dev->t10_alua.tg_pt_gps_list);
1699 dev->t10_alua.alua_tg_pt_gps_count++;
1700 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1701
1702 return 0;
1703 }
1704
1705 void core_alua_free_tg_pt_gp(
1706 struct t10_alua_tg_pt_gp *tg_pt_gp)
1707 {
1708 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1709 struct se_lun *lun, *next;
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1720 if (tg_pt_gp->tg_pt_gp_valid_id) {
1721 list_del(&tg_pt_gp->tg_pt_gp_list);
1722 dev->t10_alua.alua_tg_pt_gps_count--;
1723 }
1724 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1725
1726
1727
1728
1729
1730
1731
1732 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1733 cpu_relax();
1734
1735
1736
1737
1738
1739 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1740 list_for_each_entry_safe(lun, next,
1741 &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
1742 list_del_init(&lun->lun_tg_pt_gp_link);
1743 tg_pt_gp->tg_pt_gp_members--;
1744
1745 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1746
1747
1748
1749
1750
1751 spin_lock(&lun->lun_tg_pt_gp_lock);
1752 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1753 __target_attach_tg_pt_gp(lun,
1754 dev->t10_alua.default_tg_pt_gp);
1755 } else
1756 rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
1757 spin_unlock(&lun->lun_tg_pt_gp_lock);
1758
1759 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1760 }
1761 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1762
1763 synchronize_rcu();
1764 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1765 }
1766
1767 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1768 struct se_device *dev, const char *name)
1769 {
1770 struct t10_alua_tg_pt_gp *tg_pt_gp;
1771 struct config_item *ci;
1772
1773 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1774 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1775 tg_pt_gp_list) {
1776 if (!tg_pt_gp->tg_pt_gp_valid_id)
1777 continue;
1778 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1779 if (!strcmp(config_item_name(ci), name)) {
1780 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1781 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1782 return tg_pt_gp;
1783 }
1784 }
1785 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1786
1787 return NULL;
1788 }
1789
1790 static void core_alua_put_tg_pt_gp_from_name(
1791 struct t10_alua_tg_pt_gp *tg_pt_gp)
1792 {
1793 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1794
1795 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1796 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1797 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1798 }
1799
1800 static void __target_attach_tg_pt_gp(struct se_lun *lun,
1801 struct t10_alua_tg_pt_gp *tg_pt_gp)
1802 {
1803 struct se_dev_entry *se_deve;
1804
1805 assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1806
1807 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1808 rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp);
1809 list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
1810 tg_pt_gp->tg_pt_gp_members++;
1811 spin_lock(&lun->lun_deve_lock);
1812 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
1813 core_scsi3_ua_allocate(se_deve, 0x3f,
1814 ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
1815 spin_unlock(&lun->lun_deve_lock);
1816 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1817 }
1818
1819 void target_attach_tg_pt_gp(struct se_lun *lun,
1820 struct t10_alua_tg_pt_gp *tg_pt_gp)
1821 {
1822 spin_lock(&lun->lun_tg_pt_gp_lock);
1823 __target_attach_tg_pt_gp(lun, tg_pt_gp);
1824 spin_unlock(&lun->lun_tg_pt_gp_lock);
1825 synchronize_rcu();
1826 }
1827
1828 static void __target_detach_tg_pt_gp(struct se_lun *lun,
1829 struct t10_alua_tg_pt_gp *tg_pt_gp)
1830 {
1831 assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1832
1833 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1834 list_del_init(&lun->lun_tg_pt_gp_link);
1835 tg_pt_gp->tg_pt_gp_members--;
1836 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1837 }
1838
1839 void target_detach_tg_pt_gp(struct se_lun *lun)
1840 {
1841 struct t10_alua_tg_pt_gp *tg_pt_gp;
1842
1843 spin_lock(&lun->lun_tg_pt_gp_lock);
1844 tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
1845 lockdep_is_held(&lun->lun_tg_pt_gp_lock));
1846 if (tg_pt_gp) {
1847 __target_detach_tg_pt_gp(lun, tg_pt_gp);
1848 rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
1849 }
1850 spin_unlock(&lun->lun_tg_pt_gp_lock);
1851 synchronize_rcu();
1852 }
1853
1854 static void target_swap_tg_pt_gp(struct se_lun *lun,
1855 struct t10_alua_tg_pt_gp *old_tg_pt_gp,
1856 struct t10_alua_tg_pt_gp *new_tg_pt_gp)
1857 {
1858 assert_spin_locked(&lun->lun_tg_pt_gp_lock);
1859
1860 if (old_tg_pt_gp)
1861 __target_detach_tg_pt_gp(lun, old_tg_pt_gp);
1862 __target_attach_tg_pt_gp(lun, new_tg_pt_gp);
1863 }
1864
1865 ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
1866 {
1867 struct config_item *tg_pt_ci;
1868 struct t10_alua_tg_pt_gp *tg_pt_gp;
1869 ssize_t len = 0;
1870
1871 rcu_read_lock();
1872 tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
1873 if (tg_pt_gp) {
1874 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1875 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
1876 " %hu\nTG Port Primary Access State: %s\nTG Port "
1877 "Primary Access Status: %s\nTG Port Secondary Access"
1878 " State: %s\nTG Port Secondary Access Status: %s\n",
1879 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
1880 core_alua_dump_state(
1881 tg_pt_gp->tg_pt_gp_alua_access_state),
1882 core_alua_dump_status(
1883 tg_pt_gp->tg_pt_gp_alua_access_status),
1884 atomic_read(&lun->lun_tg_pt_secondary_offline) ?
1885 "Offline" : "None",
1886 core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
1887 }
1888 rcu_read_unlock();
1889
1890 return len;
1891 }
1892
1893 ssize_t core_alua_store_tg_pt_gp_info(
1894 struct se_lun *lun,
1895 const char *page,
1896 size_t count)
1897 {
1898 struct se_portal_group *tpg = lun->lun_tpg;
1899
1900
1901
1902
1903 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
1904 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
1905 unsigned char buf[TG_PT_GROUP_NAME_BUF];
1906 int move = 0;
1907
1908 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
1909 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
1910 return -ENODEV;
1911
1912 if (count > TG_PT_GROUP_NAME_BUF) {
1913 pr_err("ALUA Target Port Group alias too large!\n");
1914 return -EINVAL;
1915 }
1916 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
1917 memcpy(buf, page, count);
1918
1919
1920
1921
1922 if (strcmp(strstrip(buf), "NULL")) {
1923
1924
1925
1926
1927
1928 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
1929 strstrip(buf));
1930 if (!tg_pt_gp_new)
1931 return -ENODEV;
1932 }
1933
1934 spin_lock(&lun->lun_tg_pt_gp_lock);
1935 tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
1936 lockdep_is_held(&lun->lun_tg_pt_gp_lock));
1937 if (tg_pt_gp) {
1938
1939
1940
1941
1942 if (!tg_pt_gp_new) {
1943 pr_debug("Target_Core_ConfigFS: Moving"
1944 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
1945 " alua/%s, ID: %hu back to"
1946 " default_tg_pt_gp\n",
1947 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1948 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1949 config_item_name(&lun->lun_group.cg_item),
1950 config_item_name(
1951 &tg_pt_gp->tg_pt_gp_group.cg_item),
1952 tg_pt_gp->tg_pt_gp_id);
1953
1954 target_swap_tg_pt_gp(lun, tg_pt_gp,
1955 dev->t10_alua.default_tg_pt_gp);
1956 spin_unlock(&lun->lun_tg_pt_gp_lock);
1957
1958 goto sync_rcu;
1959 }
1960 move = 1;
1961 }
1962
1963 target_swap_tg_pt_gp(lun, tg_pt_gp, tg_pt_gp_new);
1964 spin_unlock(&lun->lun_tg_pt_gp_lock);
1965 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
1966 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
1967 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
1968 tpg->se_tpg_tfo->tpg_get_tag(tpg),
1969 config_item_name(&lun->lun_group.cg_item),
1970 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
1971 tg_pt_gp_new->tg_pt_gp_id);
1972
1973 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
1974 sync_rcu:
1975 synchronize_rcu();
1976 return count;
1977 }
1978
1979 ssize_t core_alua_show_access_type(
1980 struct t10_alua_tg_pt_gp *tg_pt_gp,
1981 char *page)
1982 {
1983 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
1984 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
1985 return sprintf(page, "Implicit and Explicit\n");
1986 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
1987 return sprintf(page, "Implicit\n");
1988 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
1989 return sprintf(page, "Explicit\n");
1990 else
1991 return sprintf(page, "None\n");
1992 }
1993
1994 ssize_t core_alua_store_access_type(
1995 struct t10_alua_tg_pt_gp *tg_pt_gp,
1996 const char *page,
1997 size_t count)
1998 {
1999 unsigned long tmp;
2000 int ret;
2001
2002 ret = kstrtoul(page, 0, &tmp);
2003 if (ret < 0) {
2004 pr_err("Unable to extract alua_access_type\n");
2005 return ret;
2006 }
2007 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2008 pr_err("Illegal value for alua_access_type:"
2009 " %lu\n", tmp);
2010 return -EINVAL;
2011 }
2012 if (tmp == 3)
2013 tg_pt_gp->tg_pt_gp_alua_access_type =
2014 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2015 else if (tmp == 2)
2016 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2017 else if (tmp == 1)
2018 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2019 else
2020 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2021
2022 return count;
2023 }
2024
2025 ssize_t core_alua_show_nonop_delay_msecs(
2026 struct t10_alua_tg_pt_gp *tg_pt_gp,
2027 char *page)
2028 {
2029 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2030 }
2031
2032 ssize_t core_alua_store_nonop_delay_msecs(
2033 struct t10_alua_tg_pt_gp *tg_pt_gp,
2034 const char *page,
2035 size_t count)
2036 {
2037 unsigned long tmp;
2038 int ret;
2039
2040 ret = kstrtoul(page, 0, &tmp);
2041 if (ret < 0) {
2042 pr_err("Unable to extract nonop_delay_msecs\n");
2043 return ret;
2044 }
2045 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2046 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2047 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2048 ALUA_MAX_NONOP_DELAY_MSECS);
2049 return -EINVAL;
2050 }
2051 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2052
2053 return count;
2054 }
2055
2056 ssize_t core_alua_show_trans_delay_msecs(
2057 struct t10_alua_tg_pt_gp *tg_pt_gp,
2058 char *page)
2059 {
2060 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2061 }
2062
2063 ssize_t core_alua_store_trans_delay_msecs(
2064 struct t10_alua_tg_pt_gp *tg_pt_gp,
2065 const char *page,
2066 size_t count)
2067 {
2068 unsigned long tmp;
2069 int ret;
2070
2071 ret = kstrtoul(page, 0, &tmp);
2072 if (ret < 0) {
2073 pr_err("Unable to extract trans_delay_msecs\n");
2074 return ret;
2075 }
2076 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2077 pr_err("Passed trans_delay_msecs: %lu, exceeds"
2078 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2079 ALUA_MAX_TRANS_DELAY_MSECS);
2080 return -EINVAL;
2081 }
2082 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2083
2084 return count;
2085 }
2086
2087 ssize_t core_alua_show_implicit_trans_secs(
2088 struct t10_alua_tg_pt_gp *tg_pt_gp,
2089 char *page)
2090 {
2091 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2092 }
2093
2094 ssize_t core_alua_store_implicit_trans_secs(
2095 struct t10_alua_tg_pt_gp *tg_pt_gp,
2096 const char *page,
2097 size_t count)
2098 {
2099 unsigned long tmp;
2100 int ret;
2101
2102 ret = kstrtoul(page, 0, &tmp);
2103 if (ret < 0) {
2104 pr_err("Unable to extract implicit_trans_secs\n");
2105 return ret;
2106 }
2107 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2108 pr_err("Passed implicit_trans_secs: %lu, exceeds"
2109 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2110 ALUA_MAX_IMPLICIT_TRANS_SECS);
2111 return -EINVAL;
2112 }
2113 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2114
2115 return count;
2116 }
2117
2118 ssize_t core_alua_show_preferred_bit(
2119 struct t10_alua_tg_pt_gp *tg_pt_gp,
2120 char *page)
2121 {
2122 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2123 }
2124
2125 ssize_t core_alua_store_preferred_bit(
2126 struct t10_alua_tg_pt_gp *tg_pt_gp,
2127 const char *page,
2128 size_t count)
2129 {
2130 unsigned long tmp;
2131 int ret;
2132
2133 ret = kstrtoul(page, 0, &tmp);
2134 if (ret < 0) {
2135 pr_err("Unable to extract preferred ALUA value\n");
2136 return ret;
2137 }
2138 if ((tmp != 0) && (tmp != 1)) {
2139 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2140 return -EINVAL;
2141 }
2142 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2143
2144 return count;
2145 }
2146
2147 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2148 {
2149 return sprintf(page, "%d\n",
2150 atomic_read(&lun->lun_tg_pt_secondary_offline));
2151 }
2152
2153 ssize_t core_alua_store_offline_bit(
2154 struct se_lun *lun,
2155 const char *page,
2156 size_t count)
2157 {
2158
2159
2160
2161
2162 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
2163 unsigned long tmp;
2164 int ret;
2165
2166 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
2167 (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
2168 return -ENODEV;
2169
2170 ret = kstrtoul(page, 0, &tmp);
2171 if (ret < 0) {
2172 pr_err("Unable to extract alua_tg_pt_offline value\n");
2173 return ret;
2174 }
2175 if ((tmp != 0) && (tmp != 1)) {
2176 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2177 tmp);
2178 return -EINVAL;
2179 }
2180
2181 ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
2182 if (ret < 0)
2183 return -EINVAL;
2184
2185 return count;
2186 }
2187
2188 ssize_t core_alua_show_secondary_status(
2189 struct se_lun *lun,
2190 char *page)
2191 {
2192 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
2193 }
2194
2195 ssize_t core_alua_store_secondary_status(
2196 struct se_lun *lun,
2197 const char *page,
2198 size_t count)
2199 {
2200 unsigned long tmp;
2201 int ret;
2202
2203 ret = kstrtoul(page, 0, &tmp);
2204 if (ret < 0) {
2205 pr_err("Unable to extract alua_tg_pt_status\n");
2206 return ret;
2207 }
2208 if ((tmp != ALUA_STATUS_NONE) &&
2209 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2210 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2211 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2212 tmp);
2213 return -EINVAL;
2214 }
2215 lun->lun_tg_pt_secondary_stat = (int)tmp;
2216
2217 return count;
2218 }
2219
2220 ssize_t core_alua_show_secondary_write_metadata(
2221 struct se_lun *lun,
2222 char *page)
2223 {
2224 return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
2225 }
2226
2227 ssize_t core_alua_store_secondary_write_metadata(
2228 struct se_lun *lun,
2229 const char *page,
2230 size_t count)
2231 {
2232 unsigned long tmp;
2233 int ret;
2234
2235 ret = kstrtoul(page, 0, &tmp);
2236 if (ret < 0) {
2237 pr_err("Unable to extract alua_tg_pt_write_md\n");
2238 return ret;
2239 }
2240 if ((tmp != 0) && (tmp != 1)) {
2241 pr_err("Illegal value for alua_tg_pt_write_md:"
2242 " %lu\n", tmp);
2243 return -EINVAL;
2244 }
2245 lun->lun_tg_pt_secondary_write_md = (int)tmp;
2246
2247 return count;
2248 }
2249
2250 int core_setup_alua(struct se_device *dev)
2251 {
2252 if (!(dev->transport_flags &
2253 TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
2254 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2255 struct t10_alua_lu_gp_member *lu_gp_mem;
2256
2257
2258
2259
2260
2261 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2262 if (IS_ERR(lu_gp_mem))
2263 return PTR_ERR(lu_gp_mem);
2264
2265 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2266 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2267 default_lu_gp);
2268 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2269
2270 pr_debug("%s: Adding to default ALUA LU Group:"
2271 " core/alua/lu_gps/default_lu_gp\n",
2272 dev->transport->name);
2273 }
2274
2275 return 0;
2276 }