0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
0017
0018 #include <linux/module.h>
0019 #include <linux/align.h>
0020 #include <linux/kernel.h>
0021 #include <linux/errno.h>
0022 #include <linux/jiffies.h>
0023 #include <linux/slab.h>
0024 #include <linux/types.h>
0025 #include <linux/string.h>
0026 #include <linux/fs.h>
0027 #include <linux/init.h>
0028 #include <linux/proc_fs.h>
0029 #include <linux/vmalloc.h>
0030 #include <linux/moduleparam.h>
0031 #include <linux/scatterlist.h>
0032 #include <linux/blkdev.h>
0033 #include <linux/crc-t10dif.h>
0034 #include <linux/spinlock.h>
0035 #include <linux/interrupt.h>
0036 #include <linux/atomic.h>
0037 #include <linux/hrtimer.h>
0038 #include <linux/uuid.h>
0039 #include <linux/t10-pi.h>
0040 #include <linux/msdos_partition.h>
0041 #include <linux/random.h>
0042 #include <linux/xarray.h>
0043 #include <linux/prefetch.h>
0044
0045 #include <net/checksum.h>
0046
0047 #include <asm/unaligned.h>
0048
0049 #include <scsi/scsi.h>
0050 #include <scsi/scsi_cmnd.h>
0051 #include <scsi/scsi_device.h>
0052 #include <scsi/scsi_host.h>
0053 #include <scsi/scsicam.h>
0054 #include <scsi/scsi_eh.h>
0055 #include <scsi/scsi_tcq.h>
0056 #include <scsi/scsi_dbg.h>
0057
0058 #include "sd.h"
0059 #include "scsi_logging.h"
0060
0061
0062 #define SDEBUG_VERSION "0191"
0063 static const char *sdebug_version_date = "20210520";
0064
0065 #define MY_NAME "scsi_debug"
0066
0067
0068 #define NO_ADDITIONAL_SENSE 0x0
0069 #define LOGICAL_UNIT_NOT_READY 0x4
0070 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
0071 #define UNRECOVERED_READ_ERR 0x11
0072 #define PARAMETER_LIST_LENGTH_ERR 0x1a
0073 #define INVALID_OPCODE 0x20
0074 #define LBA_OUT_OF_RANGE 0x21
0075 #define INVALID_FIELD_IN_CDB 0x24
0076 #define INVALID_FIELD_IN_PARAM_LIST 0x26
0077 #define WRITE_PROTECTED 0x27
0078 #define UA_RESET_ASC 0x29
0079 #define UA_CHANGED_ASC 0x2a
0080 #define TARGET_CHANGED_ASC 0x3f
0081 #define LUNS_CHANGED_ASCQ 0x0e
0082 #define INSUFF_RES_ASC 0x55
0083 #define INSUFF_RES_ASCQ 0x3
0084 #define POWER_ON_RESET_ASCQ 0x0
0085 #define POWER_ON_OCCURRED_ASCQ 0x1
0086 #define BUS_RESET_ASCQ 0x2
0087 #define MODE_CHANGED_ASCQ 0x1
0088 #define CAPACITY_CHANGED_ASCQ 0x9
0089 #define SAVING_PARAMS_UNSUP 0x39
0090 #define TRANSPORT_PROBLEM 0x4b
0091 #define THRESHOLD_EXCEEDED 0x5d
0092 #define LOW_POWER_COND_ON 0x5e
0093 #define MISCOMPARE_VERIFY_ASC 0x1d
0094 #define MICROCODE_CHANGED_ASCQ 0x1
0095 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
0096 #define WRITE_ERROR_ASC 0xc
0097 #define UNALIGNED_WRITE_ASCQ 0x4
0098 #define WRITE_BOUNDARY_ASCQ 0x5
0099 #define READ_INVDATA_ASCQ 0x6
0100 #define READ_BOUNDARY_ASCQ 0x7
0101 #define ATTEMPT_ACCESS_GAP 0x9
0102 #define INSUFF_ZONE_ASCQ 0xe
0103
0104
0105 #define ACK_NAK_TO 0x3
0106
0107
0108 #define DEF_NUM_HOST 1
0109 #define DEF_NUM_TGTS 1
0110 #define DEF_MAX_LUNS 1
0111
0112
0113
0114 #define DEF_ATO 1
0115 #define DEF_CDB_LEN 10
0116 #define DEF_JDELAY 1
0117 #define DEF_DEV_SIZE_PRE_INIT 0
0118 #define DEF_DEV_SIZE_MB 8
0119 #define DEF_ZBC_DEV_SIZE_MB 128
0120 #define DEF_DIF 0
0121 #define DEF_DIX 0
0122 #define DEF_PER_HOST_STORE false
0123 #define DEF_D_SENSE 0
0124 #define DEF_EVERY_NTH 0
0125 #define DEF_FAKE_RW 0
0126 #define DEF_GUARD 0
0127 #define DEF_HOST_LOCK 0
0128 #define DEF_LBPU 0
0129 #define DEF_LBPWS 0
0130 #define DEF_LBPWS10 0
0131 #define DEF_LBPRZ 1
0132 #define DEF_LOWEST_ALIGNED 0
0133 #define DEF_NDELAY 0
0134 #define DEF_NO_LUN_0 0
0135 #define DEF_NUM_PARTS 0
0136 #define DEF_OPTS 0
0137 #define DEF_OPT_BLKS 1024
0138 #define DEF_PHYSBLK_EXP 0
0139 #define DEF_OPT_XFERLEN_EXP 0
0140 #define DEF_PTYPE TYPE_DISK
0141 #define DEF_RANDOM false
0142 #define DEF_REMOVABLE false
0143 #define DEF_SCSI_LEVEL 7
0144 #define DEF_SECTOR_SIZE 512
0145 #define DEF_UNMAP_ALIGNMENT 0
0146 #define DEF_UNMAP_GRANULARITY 1
0147 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
0148 #define DEF_UNMAP_MAX_DESC 256
0149 #define DEF_VIRTUAL_GB 0
0150 #define DEF_VPD_USE_HOSTNO 1
0151 #define DEF_WRITESAME_LENGTH 0xFFFF
0152 #define DEF_STRICT 0
0153 #define DEF_STATISTICS false
0154 #define DEF_SUBMIT_QUEUES 1
0155 #define DEF_TUR_MS_TO_READY 0
0156 #define DEF_UUID_CTL 0
0157 #define JDELAY_OVERRIDDEN -9999
0158
0159
0160 #define DEF_ZBC_ZONE_SIZE_MB 128
0161 #define DEF_ZBC_MAX_OPEN_ZONES 8
0162 #define DEF_ZBC_NR_CONV_ZONES 1
0163
0164 #define SDEBUG_LUN_0_VAL 0
0165
0166
0167 #define SDEBUG_OPT_NOISE 1
0168 #define SDEBUG_OPT_MEDIUM_ERR 2
0169 #define SDEBUG_OPT_TIMEOUT 4
0170 #define SDEBUG_OPT_RECOVERED_ERR 8
0171 #define SDEBUG_OPT_TRANSPORT_ERR 16
0172 #define SDEBUG_OPT_DIF_ERR 32
0173 #define SDEBUG_OPT_DIX_ERR 64
0174 #define SDEBUG_OPT_MAC_TIMEOUT 128
0175 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
0176 #define SDEBUG_OPT_Q_NOISE 0x200
0177 #define SDEBUG_OPT_ALL_TSF 0x400
0178 #define SDEBUG_OPT_RARE_TSF 0x800
0179 #define SDEBUG_OPT_N_WCE 0x1000
0180 #define SDEBUG_OPT_RESET_NOISE 0x2000
0181 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
0182 #define SDEBUG_OPT_HOST_BUSY 0x8000
0183 #define SDEBUG_OPT_CMD_ABORT 0x10000
0184 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
0185 SDEBUG_OPT_RESET_NOISE)
0186 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
0187 SDEBUG_OPT_TRANSPORT_ERR | \
0188 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
0189 SDEBUG_OPT_SHORT_TRANSFER | \
0190 SDEBUG_OPT_HOST_BUSY | \
0191 SDEBUG_OPT_CMD_ABORT)
0192 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
0193 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
0194
0195
0196
0197
0198
0199 #define SDEBUG_UA_POR 0
0200 #define SDEBUG_UA_POOCCUR 1
0201 #define SDEBUG_UA_BUS_RESET 2
0202 #define SDEBUG_UA_MODE_CHANGED 3
0203 #define SDEBUG_UA_CAPACITY_CHANGED 4
0204 #define SDEBUG_UA_LUNS_CHANGED 5
0205 #define SDEBUG_UA_MICROCODE_CHANGED 6
0206 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
0207 #define SDEBUG_NUM_UAS 8
0208
0209
0210
0211 #define OPT_MEDIUM_ERR_ADDR 0x1234
0212 #define OPT_MEDIUM_ERR_NUM 10
0213
0214
0215
0216
0217
0218
0219
0220
0221 #define SDEBUG_CANQUEUE_WORDS 3
0222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
0223 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
0224
0225
0226 #define F_D_IN 1
0227 #define F_D_OUT 2
0228 #define F_D_OUT_MAYBE 4
0229 #define F_D_UNKN 8
0230 #define F_RL_WLUN_OK 0x10
0231 #define F_SKIP_UA 0x20
0232 #define F_DELAY_OVERR 0x40
0233 #define F_SA_LOW 0x80
0234 #define F_SA_HIGH 0x100
0235 #define F_INV_OP 0x200
0236 #define F_FAKE_RW 0x400
0237 #define F_M_ACCESS 0x800
0238 #define F_SSU_DELAY 0x1000
0239 #define F_SYNC_DELAY 0x2000
0240
0241
0242 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
0243 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
0244 #define FF_SA (F_SA_HIGH | F_SA_LOW)
0245 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
0246
0247 #define SDEBUG_MAX_PARTS 4
0248
0249 #define SDEBUG_MAX_CMD_LEN 32
0250
0251 #define SDEB_XA_NOT_IN_USE XA_MARK_1
0252
0253
0254 enum sdebug_z_type {
0255 ZBC_ZTYPE_CNV = 0x1,
0256 ZBC_ZTYPE_SWR = 0x2,
0257 ZBC_ZTYPE_SWP = 0x3,
0258
0259 ZBC_ZTYPE_GAP = 0x5,
0260 };
0261
0262
0263 enum sdebug_z_cond {
0264 ZBC_NOT_WRITE_POINTER = 0x0,
0265 ZC1_EMPTY = 0x1,
0266 ZC2_IMPLICIT_OPEN = 0x2,
0267 ZC3_EXPLICIT_OPEN = 0x3,
0268 ZC4_CLOSED = 0x4,
0269 ZC6_READ_ONLY = 0xd,
0270 ZC5_FULL = 0xe,
0271 ZC7_OFFLINE = 0xf,
0272 };
0273
0274 struct sdeb_zone_state {
0275 enum sdebug_z_type z_type;
0276 enum sdebug_z_cond z_cond;
0277 bool z_non_seq_resource;
0278 unsigned int z_size;
0279 sector_t z_start;
0280 sector_t z_wp;
0281 };
0282
0283 struct sdebug_dev_info {
0284 struct list_head dev_list;
0285 unsigned int channel;
0286 unsigned int target;
0287 u64 lun;
0288 uuid_t lu_name;
0289 struct sdebug_host_info *sdbg_host;
0290 unsigned long uas_bm[1];
0291 atomic_t num_in_q;
0292 atomic_t stopped;
0293 bool used;
0294
0295
0296 enum blk_zoned_model zmodel;
0297 unsigned int zcap;
0298 unsigned int zsize;
0299 unsigned int zsize_shift;
0300 unsigned int nr_zones;
0301 unsigned int nr_conv_zones;
0302 unsigned int nr_seq_zones;
0303 unsigned int nr_imp_open;
0304 unsigned int nr_exp_open;
0305 unsigned int nr_closed;
0306 unsigned int max_open;
0307 ktime_t create_ts;
0308 struct sdeb_zone_state *zstate;
0309 };
0310
0311 struct sdebug_host_info {
0312 struct list_head host_list;
0313 int si_idx;
0314 struct Scsi_Host *shost;
0315 struct device dev;
0316 struct list_head dev_info_list;
0317 };
0318
0319
0320 struct sdeb_store_info {
0321 rwlock_t macc_lck;
0322 u8 *storep;
0323 struct t10_pi_tuple *dif_storep;
0324 void *map_storep;
0325 };
0326
0327 #define to_sdebug_host(d) \
0328 container_of(d, struct sdebug_host_info, dev)
0329
0330 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
0331 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
0332
0333 struct sdebug_defer {
0334 struct hrtimer hrt;
0335 struct execute_work ew;
0336 ktime_t cmpl_ts;
0337 int sqa_idx;
0338 int qc_idx;
0339 int hc_idx;
0340 int issuing_cpu;
0341 bool init_hrt;
0342 bool init_wq;
0343 bool init_poll;
0344 bool aborted;
0345 enum sdeb_defer_type defer_t;
0346 };
0347
0348 struct sdebug_queued_cmd {
0349
0350
0351
0352 struct sdebug_defer *sd_dp;
0353 struct scsi_cmnd *a_cmnd;
0354 };
0355
0356 struct sdebug_queue {
0357 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
0358 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
0359 spinlock_t qc_lock;
0360 atomic_t blocked;
0361 };
0362
0363 static atomic_t sdebug_cmnd_count;
0364 static atomic_t sdebug_completions;
0365 static atomic_t sdebug_miss_cpus;
0366 static atomic_t sdebug_a_tsf;
0367 static atomic_t sdeb_inject_pending;
0368 static atomic_t sdeb_mq_poll_count;
0369
0370 struct opcode_info_t {
0371 u8 num_attached;
0372
0373 u8 opcode;
0374 u16 sa;
0375 u32 flags;
0376 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
0377 const struct opcode_info_t *arrp;
0378 u8 len_mask[16];
0379
0380 };
0381
0382
0383 enum sdeb_opcode_index {
0384 SDEB_I_INVALID_OPCODE = 0,
0385 SDEB_I_INQUIRY = 1,
0386 SDEB_I_REPORT_LUNS = 2,
0387 SDEB_I_REQUEST_SENSE = 3,
0388 SDEB_I_TEST_UNIT_READY = 4,
0389 SDEB_I_MODE_SENSE = 5,
0390 SDEB_I_MODE_SELECT = 6,
0391 SDEB_I_LOG_SENSE = 7,
0392 SDEB_I_READ_CAPACITY = 8,
0393 SDEB_I_READ = 9,
0394 SDEB_I_WRITE = 10,
0395 SDEB_I_START_STOP = 11,
0396 SDEB_I_SERV_ACT_IN_16 = 12,
0397 SDEB_I_SERV_ACT_OUT_16 = 13,
0398 SDEB_I_MAINT_IN = 14,
0399 SDEB_I_MAINT_OUT = 15,
0400 SDEB_I_VERIFY = 16,
0401 SDEB_I_VARIABLE_LEN = 17,
0402 SDEB_I_RESERVE = 18,
0403 SDEB_I_RELEASE = 19,
0404 SDEB_I_ALLOW_REMOVAL = 20,
0405 SDEB_I_REZERO_UNIT = 21,
0406 SDEB_I_ATA_PT = 22,
0407 SDEB_I_SEND_DIAG = 23,
0408 SDEB_I_UNMAP = 24,
0409 SDEB_I_WRITE_BUFFER = 25,
0410 SDEB_I_WRITE_SAME = 26,
0411 SDEB_I_SYNC_CACHE = 27,
0412 SDEB_I_COMP_WRITE = 28,
0413 SDEB_I_PRE_FETCH = 29,
0414 SDEB_I_ZONE_OUT = 30,
0415 SDEB_I_ZONE_IN = 31,
0416 SDEB_I_LAST_ELEM_P1 = 32,
0417 };
0418
0419
0420 static const unsigned char opcode_ind_arr[256] = {
0421
0422 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
0423 0, 0, 0, 0,
0424 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
0425 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
0426 SDEB_I_RELEASE,
0427 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
0428 SDEB_I_ALLOW_REMOVAL, 0,
0429
0430 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
0431 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
0432 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
0433 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
0434
0435 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
0436 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
0437 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
0438 SDEB_I_RELEASE,
0439 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
0440
0441 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0442 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0443 0, SDEB_I_VARIABLE_LEN,
0444
0445 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
0446 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
0447 0, 0, 0, SDEB_I_VERIFY,
0448 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
0449 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
0450 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
0451
0452 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
0453 SDEB_I_MAINT_OUT, 0, 0, 0,
0454 SDEB_I_READ, 0 , SDEB_I_WRITE,
0455 0 , 0, 0, 0, 0,
0456 0, 0, 0, 0, 0, 0, 0, 0,
0457 0, 0, 0, 0, 0, 0, 0, 0,
0458
0459 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0460 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0461 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0462 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0463 };
0464
0465
0466
0467
0468
0469
0470
0471 #define SDEG_RES_IMMED_MASK 0x40000000
0472
0473 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
0474 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
0475 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
0476 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
0477 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
0478 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
0479 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
0480 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
0481 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
0482 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
0483 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
0484 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
0485 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
0486 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
0487 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
0488 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
0489 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
0490 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
0491 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
0492 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
0493 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
0494 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
0495 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
0496 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
0497 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
0498 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
0499 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
0500 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
0501 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
0502
0503 static int sdebug_do_add_host(bool mk_new_store);
0504 static int sdebug_add_host_helper(int per_host_idx);
0505 static void sdebug_do_remove_host(bool the_end);
0506 static int sdebug_add_store(void);
0507 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
0508 static void sdebug_erase_all_stores(bool apart_from_first);
0509
0510
0511
0512
0513
0514
0515 static const struct opcode_info_t msense_iarr[] = {
0516 {0, 0x1a, 0, F_D_IN, NULL, NULL,
0517 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0518 };
0519
0520 static const struct opcode_info_t mselect_iarr[] = {
0521 {0, 0x15, 0, F_D_OUT, NULL, NULL,
0522 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0523 };
0524
0525 static const struct opcode_info_t read_iarr[] = {
0526 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,
0527 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0528 0, 0, 0, 0} },
0529 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,
0530 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0531 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,
0532 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
0533 0xc7, 0, 0, 0, 0} },
0534 };
0535
0536 static const struct opcode_info_t write_iarr[] = {
0537 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
0538 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
0539 0, 0, 0, 0, 0, 0} },
0540 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
0541 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
0542 0, 0, 0} },
0543 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
0544 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0545 0xbf, 0xc7, 0, 0, 0, 0} },
0546 };
0547
0548 static const struct opcode_info_t verify_iarr[] = {
0549 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,
0550 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
0551 0, 0, 0, 0, 0, 0} },
0552 };
0553
0554 static const struct opcode_info_t sa_in_16_iarr[] = {
0555 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
0556 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0557 0xff, 0xff, 0xff, 0, 0xc7} },
0558 };
0559
0560 static const struct opcode_info_t vl_iarr[] = {
0561 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
0562 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
0563 0, 0xff, 0xff, 0xff, 0xff} },
0564 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
0565 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
0566 0, 0xff, 0xff, 0x0, 0x0} },
0567 };
0568
0569 static const struct opcode_info_t maint_in_iarr[] = {
0570 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
0571 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
0572 0xc7, 0, 0, 0, 0} },
0573 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
0574 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
0575 0, 0} },
0576 };
0577
0578 static const struct opcode_info_t write_same_iarr[] = {
0579 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
0580 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0581 0xff, 0xff, 0xff, 0x3f, 0xc7} },
0582 };
0583
0584 static const struct opcode_info_t reserve_iarr[] = {
0585 {0, 0x16, 0, F_D_OUT, NULL, NULL,
0586 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0587 };
0588
0589 static const struct opcode_info_t release_iarr[] = {
0590 {0, 0x17, 0, F_D_OUT, NULL, NULL,
0591 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0592 };
0593
0594 static const struct opcode_info_t sync_cache_iarr[] = {
0595 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
0596 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
0598 };
0599
0600 static const struct opcode_info_t pre_fetch_iarr[] = {
0601 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
0602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0603 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
0604 };
0605
0606 static const struct opcode_info_t zone_out_iarr[] = {
0607 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
0608 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },
0610 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
0611 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0612 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },
0613 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
0614 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0615 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },
0616 };
0617
0618 static const struct opcode_info_t zone_in_iarr[] = {
0619 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
0620 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0621 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
0622 };
0623
0624
0625
0626
0627
0628 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
0629
0630 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
0631 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0632 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
0633 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0634 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
0635 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
0636 0, 0} },
0637 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
0638 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0639 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,
0640 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0641
0642 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,
0643 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
0644 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
0645 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,
0646 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
0647 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
0648 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
0649 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
0650 0, 0, 0} },
0651 {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
0652 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
0653 0, 0} },
0654 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO,
0655 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
0656 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
0657
0658 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
0659 resp_write_dt0, write_iarr,
0660 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0661 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
0662 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,
0663 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0664 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
0665 resp_readcap16, sa_in_16_iarr,
0666 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0667 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
0668 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
0669 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
0670 0xff, 0xff, 0xff, 0xff, 0xc7} },
0671 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
0672 resp_report_tgtpgs,
0673 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
0674 0xff, 0, 0xc7, 0, 0, 0, 0} },
0675
0676 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
0677 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0678 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
0679 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,
0680 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0681 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
0682 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
0683 resp_read_dt0, vl_iarr,
0684 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
0685 0xff, 0xff} },
0686 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
0687 NULL, reserve_iarr,
0688 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0689 0} },
0690 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
0691 NULL, release_iarr,
0692 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0693 0} },
0694
0695 {0, 0x1e, 0, 0, NULL, NULL,
0696 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0697 {0, 0x1, 0, 0, resp_start_stop, NULL,
0698 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0699 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
0700 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0701 {0, 0x1d, F_D_OUT, 0, NULL, NULL,
0702 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0703 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL,
0704 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
0705
0706 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
0707 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
0708 0, 0, 0, 0} },
0709 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
0710 resp_write_same_10, write_same_iarr,
0711 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
0712 0, 0, 0, 0, 0} },
0713 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
0714 resp_sync_cache, sync_cache_iarr,
0715 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0716 0, 0, 0, 0} },
0717 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
0718 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
0719 0, 0xff, 0x3f, 0xc7} },
0720 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
0721 resp_pre_fetch, pre_fetch_iarr,
0722 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0723 0, 0, 0, 0} },
0724
0725
0726 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
0727 resp_open_zone, zone_out_iarr,
0728 {16, 0x3 , 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0729 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
0730 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
0731 resp_report_zones, zone_in_iarr,
0732 {16, 0x0 , 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0733 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
0734
0735 {0xff, 0, 0, 0, NULL, NULL,
0736 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
0737 };
0738
0739 static int sdebug_num_hosts;
0740 static int sdebug_add_host = DEF_NUM_HOST;
0741 static int sdebug_ato = DEF_ATO;
0742 static int sdebug_cdb_len = DEF_CDB_LEN;
0743 static int sdebug_jdelay = DEF_JDELAY;
0744 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
0745 static int sdebug_dif = DEF_DIF;
0746 static int sdebug_dix = DEF_DIX;
0747 static int sdebug_dsense = DEF_D_SENSE;
0748 static int sdebug_every_nth = DEF_EVERY_NTH;
0749 static int sdebug_fake_rw = DEF_FAKE_RW;
0750 static unsigned int sdebug_guard = DEF_GUARD;
0751 static int sdebug_host_max_queue;
0752 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
0753 static int sdebug_max_luns = DEF_MAX_LUNS;
0754 static int sdebug_max_queue = SDEBUG_CANQUEUE;
0755 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
0756 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
0757 static atomic_t retired_max_queue;
0758 static int sdebug_ndelay = DEF_NDELAY;
0759 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
0760 static int sdebug_no_uld;
0761 static int sdebug_num_parts = DEF_NUM_PARTS;
0762 static int sdebug_num_tgts = DEF_NUM_TGTS;
0763 static int sdebug_opt_blks = DEF_OPT_BLKS;
0764 static int sdebug_opts = DEF_OPTS;
0765 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
0766 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
0767 static int sdebug_ptype = DEF_PTYPE;
0768 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
0769 static int sdebug_sector_size = DEF_SECTOR_SIZE;
0770 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
0771 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
0772 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
0773 static unsigned int sdebug_lbpu = DEF_LBPU;
0774 static unsigned int sdebug_lbpws = DEF_LBPWS;
0775 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
0776 static unsigned int sdebug_lbprz = DEF_LBPRZ;
0777 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
0778 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
0779 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
0780 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
0781 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
0782 static int sdebug_uuid_ctl = DEF_UUID_CTL;
0783 static bool sdebug_random = DEF_RANDOM;
0784 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
0785 static bool sdebug_removable = DEF_REMOVABLE;
0786 static bool sdebug_clustering;
0787 static bool sdebug_host_lock = DEF_HOST_LOCK;
0788 static bool sdebug_strict = DEF_STRICT;
0789 static bool sdebug_any_injecting_opt;
0790 static bool sdebug_no_rwlock;
0791 static bool sdebug_verbose;
0792 static bool have_dif_prot;
0793 static bool write_since_sync;
0794 static bool sdebug_statistics = DEF_STATISTICS;
0795 static bool sdebug_wp;
0796
0797 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
0798 static char *sdeb_zbc_model_s;
0799
0800 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
0801 SAM_LUN_AM_FLAT = 0x1,
0802 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
0803 SAM_LUN_AM_EXTENDED = 0x3};
0804 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
0805 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
0806
0807 static unsigned int sdebug_store_sectors;
0808 static sector_t sdebug_capacity;
0809
0810
0811
0812 static int sdebug_heads;
0813 static int sdebug_cylinders_per;
0814 static int sdebug_sectors_per;
0815
0816 static LIST_HEAD(sdebug_host_list);
0817 static DEFINE_SPINLOCK(sdebug_host_list_lock);
0818
0819 static struct xarray per_store_arr;
0820 static struct xarray *per_store_ap = &per_store_arr;
0821 static int sdeb_first_idx = -1;
0822 static int sdeb_most_recent_idx = -1;
0823 static DEFINE_RWLOCK(sdeb_fake_rw_lck);
0824
0825 static unsigned long map_size;
0826 static int num_aborts;
0827 static int num_dev_resets;
0828 static int num_target_resets;
0829 static int num_bus_resets;
0830 static int num_host_resets;
0831 static int dix_writes;
0832 static int dix_reads;
0833 static int dif_errors;
0834
0835
0836 static bool sdeb_zbc_in_use;
0837 static int sdeb_zbc_zone_cap_mb;
0838 static int sdeb_zbc_zone_size_mb;
0839 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
0840 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
0841
0842 static int submit_queues = DEF_SUBMIT_QUEUES;
0843 static int poll_queues;
0844 static struct sdebug_queue *sdebug_q_arr;
0845
0846 static DEFINE_RWLOCK(atomic_rw);
0847 static DEFINE_RWLOCK(atomic_rw2);
0848
0849 static rwlock_t *ramdisk_lck_a[2];
0850
0851 static char sdebug_proc_name[] = MY_NAME;
0852 static const char *my_name = MY_NAME;
0853
0854 static struct bus_type pseudo_lld_bus;
0855
0856 static struct device_driver sdebug_driverfs_driver = {
0857 .name = sdebug_proc_name,
0858 .bus = &pseudo_lld_bus,
0859 };
0860
0861 static const int check_condition_result =
0862 SAM_STAT_CHECK_CONDITION;
0863
0864 static const int illegal_condition_result =
0865 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
0866
0867 static const int device_qfull_result =
0868 (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
0869
0870 static const int condition_met_result = SAM_STAT_CONDITION_MET;
0871
0872
0873
0874
0875
0876
0877 static inline bool scsi_debug_lbp(void)
0878 {
0879 return 0 == sdebug_fake_rw &&
0880 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
0881 }
0882
0883 static void *lba2fake_store(struct sdeb_store_info *sip,
0884 unsigned long long lba)
0885 {
0886 struct sdeb_store_info *lsip = sip;
0887
0888 lba = do_div(lba, sdebug_store_sectors);
0889 if (!sip || !sip->storep) {
0890 WARN_ON_ONCE(true);
0891 lsip = xa_load(per_store_ap, 0);
0892 }
0893 return lsip->storep + lba * sdebug_sector_size;
0894 }
0895
0896 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
0897 sector_t sector)
0898 {
0899 sector = sector_div(sector, sdebug_store_sectors);
0900
0901 return sip->dif_storep + sector;
0902 }
0903
0904 static void sdebug_max_tgts_luns(void)
0905 {
0906 struct sdebug_host_info *sdbg_host;
0907 struct Scsi_Host *hpnt;
0908
0909 spin_lock(&sdebug_host_list_lock);
0910 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
0911 hpnt = sdbg_host->shost;
0912 if ((hpnt->this_id >= 0) &&
0913 (sdebug_num_tgts > hpnt->this_id))
0914 hpnt->max_id = sdebug_num_tgts + 1;
0915 else
0916 hpnt->max_id = sdebug_num_tgts;
0917
0918 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
0919 }
0920 spin_unlock(&sdebug_host_list_lock);
0921 }
0922
0923 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
0924
0925
0926 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
0927 enum sdeb_cmd_data c_d,
0928 int in_byte, int in_bit)
0929 {
0930 unsigned char *sbuff;
0931 u8 sks[4];
0932 int sl, asc;
0933
0934 sbuff = scp->sense_buffer;
0935 if (!sbuff) {
0936 sdev_printk(KERN_ERR, scp->device,
0937 "%s: sense_buffer is NULL\n", __func__);
0938 return;
0939 }
0940 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
0941 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
0942 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
0943 memset(sks, 0, sizeof(sks));
0944 sks[0] = 0x80;
0945 if (c_d)
0946 sks[0] |= 0x40;
0947 if (in_bit >= 0) {
0948 sks[0] |= 0x8;
0949 sks[0] |= 0x7 & in_bit;
0950 }
0951 put_unaligned_be16(in_byte, sks + 1);
0952 if (sdebug_dsense) {
0953 sl = sbuff[7] + 8;
0954 sbuff[7] = sl;
0955 sbuff[sl] = 0x2;
0956 sbuff[sl + 1] = 0x6;
0957 memcpy(sbuff + sl + 4, sks, 3);
0958 } else
0959 memcpy(sbuff + 15, sks, 3);
0960 if (sdebug_verbose)
0961 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
0962 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
0963 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
0964 }
0965
0966 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
0967 {
0968 if (!scp->sense_buffer) {
0969 sdev_printk(KERN_ERR, scp->device,
0970 "%s: sense_buffer is NULL\n", __func__);
0971 return;
0972 }
0973 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
0974
0975 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
0976
0977 if (sdebug_verbose)
0978 sdev_printk(KERN_INFO, scp->device,
0979 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
0980 my_name, key, asc, asq);
0981 }
0982
0983 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
0984 {
0985 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
0986 }
0987
0988 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
0989 void __user *arg)
0990 {
0991 if (sdebug_verbose) {
0992 if (0x1261 == cmd)
0993 sdev_printk(KERN_INFO, dev,
0994 "%s: BLKFLSBUF [0x1261]\n", __func__);
0995 else if (0x5331 == cmd)
0996 sdev_printk(KERN_INFO, dev,
0997 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
0998 __func__);
0999 else
1000 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1001 __func__, cmd);
1002 }
1003 return -EINVAL;
1004
1005 }
1006
1007 static void config_cdb_len(struct scsi_device *sdev)
1008 {
1009 switch (sdebug_cdb_len) {
1010 case 6:
1011 sdev->use_10_for_rw = false;
1012 sdev->use_16_for_rw = false;
1013 sdev->use_10_for_ms = false;
1014 break;
1015 case 10:
1016 sdev->use_10_for_rw = true;
1017 sdev->use_16_for_rw = false;
1018 sdev->use_10_for_ms = false;
1019 break;
1020 case 12:
1021 sdev->use_10_for_rw = true;
1022 sdev->use_16_for_rw = false;
1023 sdev->use_10_for_ms = true;
1024 break;
1025 case 16:
1026 sdev->use_10_for_rw = false;
1027 sdev->use_16_for_rw = true;
1028 sdev->use_10_for_ms = true;
1029 break;
1030 case 32:
1031 sdev->use_10_for_rw = false;
1032 sdev->use_16_for_rw = true;
1033 sdev->use_10_for_ms = true;
1034 break;
1035 default:
1036 pr_warn("unexpected cdb_len=%d, force to 10\n",
1037 sdebug_cdb_len);
1038 sdev->use_10_for_rw = true;
1039 sdev->use_16_for_rw = false;
1040 sdev->use_10_for_ms = false;
1041 sdebug_cdb_len = 10;
1042 break;
1043 }
1044 }
1045
1046 static void all_config_cdb_len(void)
1047 {
1048 struct sdebug_host_info *sdbg_host;
1049 struct Scsi_Host *shost;
1050 struct scsi_device *sdev;
1051
1052 spin_lock(&sdebug_host_list_lock);
1053 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1054 shost = sdbg_host->shost;
1055 shost_for_each_device(sdev, shost) {
1056 config_cdb_len(sdev);
1057 }
1058 }
1059 spin_unlock(&sdebug_host_list_lock);
1060 }
1061
1062 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1063 {
1064 struct sdebug_host_info *sdhp;
1065 struct sdebug_dev_info *dp;
1066
1067 spin_lock(&sdebug_host_list_lock);
1068 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1069 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1070 if ((devip->sdbg_host == dp->sdbg_host) &&
1071 (devip->target == dp->target))
1072 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1073 }
1074 }
1075 spin_unlock(&sdebug_host_list_lock);
1076 }
1077
1078 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1079 {
1080 int k;
1081
1082 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1083 if (k != SDEBUG_NUM_UAS) {
1084 const char *cp = NULL;
1085
1086 switch (k) {
1087 case SDEBUG_UA_POR:
1088 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1089 POWER_ON_RESET_ASCQ);
1090 if (sdebug_verbose)
1091 cp = "power on reset";
1092 break;
1093 case SDEBUG_UA_POOCCUR:
1094 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1095 POWER_ON_OCCURRED_ASCQ);
1096 if (sdebug_verbose)
1097 cp = "power on occurred";
1098 break;
1099 case SDEBUG_UA_BUS_RESET:
1100 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1101 BUS_RESET_ASCQ);
1102 if (sdebug_verbose)
1103 cp = "bus reset";
1104 break;
1105 case SDEBUG_UA_MODE_CHANGED:
1106 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1107 MODE_CHANGED_ASCQ);
1108 if (sdebug_verbose)
1109 cp = "mode parameters changed";
1110 break;
1111 case SDEBUG_UA_CAPACITY_CHANGED:
1112 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1113 CAPACITY_CHANGED_ASCQ);
1114 if (sdebug_verbose)
1115 cp = "capacity data changed";
1116 break;
1117 case SDEBUG_UA_MICROCODE_CHANGED:
1118 mk_sense_buffer(scp, UNIT_ATTENTION,
1119 TARGET_CHANGED_ASC,
1120 MICROCODE_CHANGED_ASCQ);
1121 if (sdebug_verbose)
1122 cp = "microcode has been changed";
1123 break;
1124 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1125 mk_sense_buffer(scp, UNIT_ATTENTION,
1126 TARGET_CHANGED_ASC,
1127 MICROCODE_CHANGED_WO_RESET_ASCQ);
1128 if (sdebug_verbose)
1129 cp = "microcode has been changed without reset";
1130 break;
1131 case SDEBUG_UA_LUNS_CHANGED:
1132
1133
1134
1135
1136
1137
1138
1139
1140 if (sdebug_scsi_level >= 6)
1141 clear_luns_changed_on_target(devip);
1142 mk_sense_buffer(scp, UNIT_ATTENTION,
1143 TARGET_CHANGED_ASC,
1144 LUNS_CHANGED_ASCQ);
1145 if (sdebug_verbose)
1146 cp = "reported luns data has changed";
1147 break;
1148 default:
1149 pr_warn("unexpected unit attention code=%d\n", k);
1150 if (sdebug_verbose)
1151 cp = "unknown";
1152 break;
1153 }
1154 clear_bit(k, devip->uas_bm);
1155 if (sdebug_verbose)
1156 sdev_printk(KERN_INFO, scp->device,
1157 "%s reports: Unit attention: %s\n",
1158 my_name, cp);
1159 return check_condition_result;
1160 }
1161 return 0;
1162 }
1163
1164
1165 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1166 int arr_len)
1167 {
1168 int act_len;
1169 struct scsi_data_buffer *sdb = &scp->sdb;
1170
1171 if (!sdb->length)
1172 return 0;
1173 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1174 return DID_ERROR << 16;
1175
1176 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1177 arr, arr_len);
1178 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1179
1180 return 0;
1181 }
1182
1183
1184
1185
1186
1187
1188 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1189 int arr_len, unsigned int off_dst)
1190 {
1191 unsigned int act_len, n;
1192 struct scsi_data_buffer *sdb = &scp->sdb;
1193 off_t skip = off_dst;
1194
1195 if (sdb->length <= off_dst)
1196 return 0;
1197 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1198 return DID_ERROR << 16;
1199
1200 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1201 arr, arr_len, skip);
1202 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1203 __func__, off_dst, scsi_bufflen(scp), act_len,
1204 scsi_get_resid(scp));
1205 n = scsi_bufflen(scp) - (off_dst + act_len);
1206 scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1207 return 0;
1208 }
1209
1210
1211
1212
1213 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1214 int arr_len)
1215 {
1216 if (!scsi_bufflen(scp))
1217 return 0;
1218 if (scp->sc_data_direction != DMA_TO_DEVICE)
1219 return -1;
1220
1221 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1222 }
1223
1224
1225 static char sdebug_inq_vendor_id[9] = "Linux ";
1226 static char sdebug_inq_product_id[17] = "scsi_debug ";
1227 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1228
1229 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1230 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1231 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1232
1233
1234 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1235 int target_dev_id, int dev_id_num,
1236 const char *dev_id_str, int dev_id_str_len,
1237 const uuid_t *lu_name)
1238 {
1239 int num, port_a;
1240 char b[32];
1241
1242 port_a = target_dev_id + 1;
1243
1244 arr[0] = 0x2;
1245 arr[1] = 0x1;
1246 arr[2] = 0x0;
1247 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1248 memcpy(&arr[12], sdebug_inq_product_id, 16);
1249 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1250 num = 8 + 16 + dev_id_str_len;
1251 arr[3] = num;
1252 num += 4;
1253 if (dev_id_num >= 0) {
1254 if (sdebug_uuid_ctl) {
1255
1256 arr[num++] = 0x1;
1257 arr[num++] = 0xa;
1258 arr[num++] = 0x0;
1259 arr[num++] = 0x12;
1260 arr[num++] = 0x10;
1261 arr[num++] = 0x0;
1262 memcpy(arr + num, lu_name, 16);
1263 num += 16;
1264 } else {
1265
1266 arr[num++] = 0x1;
1267 arr[num++] = 0x3;
1268 arr[num++] = 0x0;
1269 arr[num++] = 0x8;
1270 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1271 num += 8;
1272 }
1273
1274 arr[num++] = 0x61;
1275 arr[num++] = 0x94;
1276 arr[num++] = 0x0;
1277 arr[num++] = 0x4;
1278 arr[num++] = 0x0;
1279 arr[num++] = 0x0;
1280 arr[num++] = 0x0;
1281 arr[num++] = 0x1;
1282 }
1283
1284 arr[num++] = 0x61;
1285 arr[num++] = 0x93;
1286 arr[num++] = 0x0;
1287 arr[num++] = 0x8;
1288 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1289 num += 8;
1290
1291 arr[num++] = 0x61;
1292 arr[num++] = 0x95;
1293 arr[num++] = 0x0;
1294 arr[num++] = 0x4;
1295 arr[num++] = 0;
1296 arr[num++] = 0;
1297 put_unaligned_be16(port_group_id, arr + num);
1298 num += 2;
1299
1300 arr[num++] = 0x61;
1301 arr[num++] = 0xa3;
1302 arr[num++] = 0x0;
1303 arr[num++] = 0x8;
1304 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1305 num += 8;
1306
1307 arr[num++] = 0x63;
1308 arr[num++] = 0xa8;
1309 arr[num++] = 0x0;
1310 arr[num++] = 24;
1311 memcpy(arr + num, "naa.32222220", 12);
1312 num += 12;
1313 snprintf(b, sizeof(b), "%08X", target_dev_id);
1314 memcpy(arr + num, b, 8);
1315 num += 8;
1316 memset(arr + num, 0, 4);
1317 num += 4;
1318 return num;
1319 }
1320
1321 static unsigned char vpd84_data[] = {
1322 0x22,0x22,0x22,0x0,0xbb,0x0,
1323 0x22,0x22,0x22,0x0,0xbb,0x1,
1324 0x22,0x22,0x22,0x0,0xbb,0x2,
1325 };
1326
1327
1328 static int inquiry_vpd_84(unsigned char *arr)
1329 {
1330 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1331 return sizeof(vpd84_data);
1332 }
1333
1334
1335 static int inquiry_vpd_85(unsigned char *arr)
1336 {
1337 int num = 0;
1338 const char *na1 = "https://www.kernel.org/config";
1339 const char *na2 = "http://www.kernel.org/log";
1340 int plen, olen;
1341
1342 arr[num++] = 0x1;
1343 arr[num++] = 0x0;
1344 arr[num++] = 0x0;
1345 olen = strlen(na1);
1346 plen = olen + 1;
1347 if (plen % 4)
1348 plen = ((plen / 4) + 1) * 4;
1349 arr[num++] = plen;
1350 memcpy(arr + num, na1, olen);
1351 memset(arr + num + olen, 0, plen - olen);
1352 num += plen;
1353
1354 arr[num++] = 0x4;
1355 arr[num++] = 0x0;
1356 arr[num++] = 0x0;
1357 olen = strlen(na2);
1358 plen = olen + 1;
1359 if (plen % 4)
1360 plen = ((plen / 4) + 1) * 4;
1361 arr[num++] = plen;
1362 memcpy(arr + num, na2, olen);
1363 memset(arr + num + olen, 0, plen - olen);
1364 num += plen;
1365
1366 return num;
1367 }
1368
1369
1370 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1371 {
1372 int num = 0;
1373 int port_a, port_b;
1374
1375 port_a = target_dev_id + 1;
1376 port_b = port_a + 1;
1377 arr[num++] = 0x0;
1378 arr[num++] = 0x0;
1379 arr[num++] = 0x0;
1380 arr[num++] = 0x1;
1381 memset(arr + num, 0, 6);
1382 num += 6;
1383 arr[num++] = 0x0;
1384 arr[num++] = 12;
1385
1386 arr[num++] = 0x61;
1387 arr[num++] = 0x93;
1388 arr[num++] = 0x0;
1389 arr[num++] = 0x8;
1390 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1391 num += 8;
1392 arr[num++] = 0x0;
1393 arr[num++] = 0x0;
1394 arr[num++] = 0x0;
1395 arr[num++] = 0x2;
1396 memset(arr + num, 0, 6);
1397 num += 6;
1398 arr[num++] = 0x0;
1399 arr[num++] = 12;
1400
1401 arr[num++] = 0x61;
1402 arr[num++] = 0x93;
1403 arr[num++] = 0x0;
1404 arr[num++] = 0x8;
1405 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1406 num += 8;
1407
1408 return num;
1409 }
1410
1411
1412 static unsigned char vpd89_data[] = {
1413 0,0,0,0,
1414 'l','i','n','u','x',' ',' ',' ',
1415 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1416 '1','2','3','4',
1417 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1418 0xec,0,0,0,
1419 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1420 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1421 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1422 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1423 0x53,0x41,
1424 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1425 0x20,0x20,
1426 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1427 0x10,0x80,
1428 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1429 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1430 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1432 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1433 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1434 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1439 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1440 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1441 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1442 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1443 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1444 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1445 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1446 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1447 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1448 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1449 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1450 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1454 };
1455
1456
1457 static int inquiry_vpd_89(unsigned char *arr)
1458 {
1459 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1460 return sizeof(vpd89_data);
1461 }
1462
1463
1464 static unsigned char vpdb0_data[] = {
1465 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1466 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1467 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1468 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1469 };
1470
1471
1472 static int inquiry_vpd_b0(unsigned char *arr)
1473 {
1474 unsigned int gran;
1475
1476 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1477
1478
1479 if (sdebug_opt_xferlen_exp != 0 &&
1480 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1481 gran = 1 << sdebug_opt_xferlen_exp;
1482 else
1483 gran = 1 << sdebug_physblk_exp;
1484 put_unaligned_be16(gran, arr + 2);
1485
1486
1487 if (sdebug_store_sectors > 0x400)
1488 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1489
1490
1491 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1492
1493 if (sdebug_lbpu) {
1494
1495 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1496
1497
1498 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1499 }
1500
1501
1502 if (sdebug_unmap_alignment) {
1503 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1504 arr[28] |= 0x80;
1505 }
1506
1507
1508 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1509
1510
1511 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1512
1513 return 0x3c;
1514
1515 return sizeof(vpdb0_data);
1516 }
1517
1518
1519 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1520 {
1521 memset(arr, 0, 0x3c);
1522 arr[0] = 0;
1523 arr[1] = 1;
1524 arr[2] = 0;
1525 arr[3] = 5;
1526 if (devip->zmodel == BLK_ZONED_HA)
1527 arr[4] = 1 << 4;
1528
1529 return 0x3c;
1530 }
1531
1532
1533 static int inquiry_vpd_b2(unsigned char *arr)
1534 {
1535 memset(arr, 0, 0x4);
1536 arr[0] = 0;
1537 if (sdebug_lbpu)
1538 arr[1] = 1 << 7;
1539 if (sdebug_lbpws)
1540 arr[1] |= 1 << 6;
1541 if (sdebug_lbpws10)
1542 arr[1] |= 1 << 5;
1543 if (sdebug_lbprz && scsi_debug_lbp())
1544 arr[1] |= (sdebug_lbprz & 0x7) << 2;
1545
1546
1547
1548 return 0x4;
1549 }
1550
1551
1552 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1553 {
1554 memset(arr, 0, 0x3c);
1555 arr[0] = 0x1;
1556
1557
1558
1559
1560
1561
1562 put_unaligned_be32(0xffffffff, &arr[4]);
1563 put_unaligned_be32(0xffffffff, &arr[8]);
1564 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1565 put_unaligned_be32(devip->max_open, &arr[12]);
1566 else
1567 put_unaligned_be32(0xffffffff, &arr[12]);
1568 if (devip->zcap < devip->zsize) {
1569 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1570 put_unaligned_be64(devip->zsize, &arr[20]);
1571 } else {
1572 arr[19] = 0;
1573 }
1574 return 0x3c;
1575 }
1576
1577 #define SDEBUG_LONG_INQ_SZ 96
1578 #define SDEBUG_MAX_INQ_ARR_SZ 584
1579
1580 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1581 {
1582 unsigned char pq_pdt;
1583 unsigned char *arr;
1584 unsigned char *cmd = scp->cmnd;
1585 u32 alloc_len, n;
1586 int ret;
1587 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1588
1589 alloc_len = get_unaligned_be16(cmd + 3);
1590 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1591 if (! arr)
1592 return DID_REQUEUE << 16;
1593 is_disk = (sdebug_ptype == TYPE_DISK);
1594 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1595 is_disk_zbc = (is_disk || is_zbc);
1596 have_wlun = scsi_is_wlun(scp->device->lun);
1597 if (have_wlun)
1598 pq_pdt = TYPE_WLUN;
1599 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1600 pq_pdt = 0x7f;
1601 else
1602 pq_pdt = (sdebug_ptype & 0x1f);
1603 arr[0] = pq_pdt;
1604 if (0x2 & cmd[1]) {
1605 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1606 kfree(arr);
1607 return check_condition_result;
1608 } else if (0x1 & cmd[1]) {
1609 int lu_id_num, port_group_id, target_dev_id;
1610 u32 len;
1611 char lu_id_str[6];
1612 int host_no = devip->sdbg_host->shost->host_no;
1613
1614 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1615 (devip->channel & 0x7f);
1616 if (sdebug_vpd_use_hostno == 0)
1617 host_no = 0;
1618 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1619 (devip->target * 1000) + devip->lun);
1620 target_dev_id = ((host_no + 1) * 2000) +
1621 (devip->target * 1000) - 3;
1622 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1623 if (0 == cmd[2]) {
1624 arr[1] = cmd[2];
1625 n = 4;
1626 arr[n++] = 0x0;
1627 arr[n++] = 0x80;
1628 arr[n++] = 0x83;
1629 arr[n++] = 0x84;
1630 arr[n++] = 0x85;
1631 arr[n++] = 0x86;
1632 arr[n++] = 0x87;
1633 arr[n++] = 0x88;
1634 if (is_disk_zbc) {
1635 arr[n++] = 0x89;
1636 arr[n++] = 0xb0;
1637 arr[n++] = 0xb1;
1638 if (is_disk)
1639 arr[n++] = 0xb2;
1640 if (is_zbc)
1641 arr[n++] = 0xb6;
1642 }
1643 arr[3] = n - 4;
1644 } else if (0x80 == cmd[2]) {
1645 arr[1] = cmd[2];
1646 arr[3] = len;
1647 memcpy(&arr[4], lu_id_str, len);
1648 } else if (0x83 == cmd[2]) {
1649 arr[1] = cmd[2];
1650 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1651 target_dev_id, lu_id_num,
1652 lu_id_str, len,
1653 &devip->lu_name);
1654 } else if (0x84 == cmd[2]) {
1655 arr[1] = cmd[2];
1656 arr[3] = inquiry_vpd_84(&arr[4]);
1657 } else if (0x85 == cmd[2]) {
1658 arr[1] = cmd[2];
1659 arr[3] = inquiry_vpd_85(&arr[4]);
1660 } else if (0x86 == cmd[2]) {
1661 arr[1] = cmd[2];
1662 arr[3] = 0x3c;
1663 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1664 arr[4] = 0x4;
1665 else if (have_dif_prot)
1666 arr[4] = 0x5;
1667 else
1668 arr[4] = 0x0;
1669 arr[5] = 0x7;
1670 } else if (0x87 == cmd[2]) {
1671 arr[1] = cmd[2];
1672 arr[3] = 0x8;
1673 arr[4] = 0x2;
1674 arr[6] = 0x80;
1675 arr[8] = 0x18;
1676 arr[10] = 0x82;
1677 } else if (0x88 == cmd[2]) {
1678 arr[1] = cmd[2];
1679 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1680 } else if (is_disk_zbc && 0x89 == cmd[2]) {
1681 arr[1] = cmd[2];
1682 n = inquiry_vpd_89(&arr[4]);
1683 put_unaligned_be16(n, arr + 2);
1684 } else if (is_disk_zbc && 0xb0 == cmd[2]) {
1685 arr[1] = cmd[2];
1686 arr[3] = inquiry_vpd_b0(&arr[4]);
1687 } else if (is_disk_zbc && 0xb1 == cmd[2]) {
1688 arr[1] = cmd[2];
1689 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1690 } else if (is_disk && 0xb2 == cmd[2]) {
1691 arr[1] = cmd[2];
1692 arr[3] = inquiry_vpd_b2(&arr[4]);
1693 } else if (is_zbc && cmd[2] == 0xb6) {
1694 arr[1] = cmd[2];
1695 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1696 } else {
1697 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1698 kfree(arr);
1699 return check_condition_result;
1700 }
1701 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
1702 ret = fill_from_dev_buffer(scp, arr,
1703 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
1704 kfree(arr);
1705 return ret;
1706 }
1707
1708 arr[1] = sdebug_removable ? 0x80 : 0;
1709 arr[2] = sdebug_scsi_level;
1710 arr[3] = 2;
1711 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1712 arr[5] = (int)have_dif_prot;
1713 if (sdebug_vpd_use_hostno == 0)
1714 arr[5] |= 0x10;
1715 arr[6] = 0x10;
1716
1717 arr[7] = 0xa;
1718 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1719 memcpy(&arr[16], sdebug_inq_product_id, 16);
1720 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1721
1722 memcpy(&arr[36], sdebug_version_date, 8);
1723
1724 put_unaligned_be16(0xc0, arr + 58);
1725 put_unaligned_be16(0x5c0, arr + 60);
1726 n = 62;
1727 if (is_disk) {
1728 put_unaligned_be16(0x600, arr + n);
1729 n += 2;
1730 } else if (sdebug_ptype == TYPE_TAPE) {
1731 put_unaligned_be16(0x525, arr + n);
1732 n += 2;
1733 } else if (is_zbc) {
1734 put_unaligned_be16(0x624, arr + n);
1735 n += 2;
1736 }
1737 put_unaligned_be16(0x2100, arr + n);
1738 ret = fill_from_dev_buffer(scp, arr,
1739 min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
1740 kfree(arr);
1741 return ret;
1742 }
1743
1744
1745 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1746 0, 0, 0x0, 0x0};
1747
1748 static int resp_requests(struct scsi_cmnd *scp,
1749 struct sdebug_dev_info *devip)
1750 {
1751 unsigned char *cmd = scp->cmnd;
1752 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1753 bool dsense = !!(cmd[1] & 1);
1754 u32 alloc_len = cmd[4];
1755 u32 len = 18;
1756 int stopped_state = atomic_read(&devip->stopped);
1757
1758 memset(arr, 0, sizeof(arr));
1759 if (stopped_state > 0) {
1760 if (dsense) {
1761 arr[0] = 0x72;
1762 arr[1] = NOT_READY;
1763 arr[2] = LOGICAL_UNIT_NOT_READY;
1764 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1765 len = 8;
1766 } else {
1767 arr[0] = 0x70;
1768 arr[2] = NOT_READY;
1769 arr[7] = 0xa;
1770 arr[12] = LOGICAL_UNIT_NOT_READY;
1771 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1772 }
1773 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1774
1775 if (dsense) {
1776 arr[0] = 0x72;
1777 arr[1] = 0x0;
1778 arr[2] = THRESHOLD_EXCEEDED;
1779 arr[3] = 0xff;
1780 len = 8;
1781 } else {
1782 arr[0] = 0x70;
1783 arr[2] = 0x0;
1784 arr[7] = 0xa;
1785 arr[12] = THRESHOLD_EXCEEDED;
1786 arr[13] = 0xff;
1787 }
1788 } else {
1789 if (dsense) {
1790 len = 8;
1791 memset(arr, 0, len);
1792 arr[0] = 0x72;
1793 } else {
1794 memset(arr, 0, len);
1795 arr[0] = 0x70;
1796 arr[7] = 0xa;
1797 }
1798 }
1799 return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
1800 }
1801
1802 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1803 {
1804 unsigned char *cmd = scp->cmnd;
1805 int power_cond, want_stop, stopped_state;
1806 bool changing;
1807
1808 power_cond = (cmd[4] & 0xf0) >> 4;
1809 if (power_cond) {
1810 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1811 return check_condition_result;
1812 }
1813 want_stop = !(cmd[4] & 1);
1814 stopped_state = atomic_read(&devip->stopped);
1815 if (stopped_state == 2) {
1816 ktime_t now_ts = ktime_get_boottime();
1817
1818 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1819 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1820
1821 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1822
1823 atomic_set(&devip->stopped, 0);
1824 stopped_state = 0;
1825 }
1826 }
1827 if (stopped_state == 2) {
1828 if (want_stop) {
1829 stopped_state = 1;
1830 } else {
1831 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 );
1832 return check_condition_result;
1833 }
1834 }
1835 }
1836 changing = (stopped_state != want_stop);
1837 if (changing)
1838 atomic_xchg(&devip->stopped, want_stop);
1839 if (!changing || (cmd[1] & 0x1))
1840 return SDEG_RES_IMMED_MASK;
1841 else
1842 return 0;
1843 }
1844
1845 static sector_t get_sdebug_capacity(void)
1846 {
1847 static const unsigned int gibibyte = 1073741824;
1848
1849 if (sdebug_virtual_gb > 0)
1850 return (sector_t)sdebug_virtual_gb *
1851 (gibibyte / sdebug_sector_size);
1852 else
1853 return sdebug_store_sectors;
1854 }
1855
1856 #define SDEBUG_READCAP_ARR_SZ 8
1857 static int resp_readcap(struct scsi_cmnd *scp,
1858 struct sdebug_dev_info *devip)
1859 {
1860 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1861 unsigned int capac;
1862
1863
1864 sdebug_capacity = get_sdebug_capacity();
1865 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1866 if (sdebug_capacity < 0xffffffff) {
1867 capac = (unsigned int)sdebug_capacity - 1;
1868 put_unaligned_be32(capac, arr + 0);
1869 } else
1870 put_unaligned_be32(0xffffffff, arr + 0);
1871 put_unaligned_be16(sdebug_sector_size, arr + 6);
1872 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1873 }
1874
1875 #define SDEBUG_READCAP16_ARR_SZ 32
1876 static int resp_readcap16(struct scsi_cmnd *scp,
1877 struct sdebug_dev_info *devip)
1878 {
1879 unsigned char *cmd = scp->cmnd;
1880 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1881 u32 alloc_len;
1882
1883 alloc_len = get_unaligned_be32(cmd + 10);
1884
1885 sdebug_capacity = get_sdebug_capacity();
1886 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1887 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1888 put_unaligned_be32(sdebug_sector_size, arr + 8);
1889 arr[13] = sdebug_physblk_exp & 0xf;
1890 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1891
1892 if (scsi_debug_lbp()) {
1893 arr[14] |= 0x80;
1894
1895
1896
1897
1898 if (sdebug_lbprz & 1)
1899 arr[14] |= 0x40;
1900 }
1901
1902 arr[15] = sdebug_lowest_aligned & 0xff;
1903
1904 if (have_dif_prot) {
1905 arr[12] = (sdebug_dif - 1) << 1;
1906 arr[12] |= 1;
1907 }
1908
1909 return fill_from_dev_buffer(scp, arr,
1910 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1911 }
1912
1913 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1914
1915 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1916 struct sdebug_dev_info *devip)
1917 {
1918 unsigned char *cmd = scp->cmnd;
1919 unsigned char *arr;
1920 int host_no = devip->sdbg_host->shost->host_no;
1921 int port_group_a, port_group_b, port_a, port_b;
1922 u32 alen, n, rlen;
1923 int ret;
1924
1925 alen = get_unaligned_be32(cmd + 6);
1926 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1927 if (! arr)
1928 return DID_REQUEUE << 16;
1929
1930
1931
1932
1933
1934
1935 port_a = 0x1;
1936 port_b = 0x2;
1937 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1938 (devip->channel & 0x7f);
1939 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1940 (devip->channel & 0x7f) + 0x80;
1941
1942
1943
1944
1945 n = 4;
1946 if (sdebug_vpd_use_hostno == 0) {
1947 arr[n++] = host_no % 3;
1948 arr[n++] = 0x0F;
1949 } else {
1950 arr[n++] = 0x0;
1951 arr[n++] = 0x01;
1952 }
1953 put_unaligned_be16(port_group_a, arr + n);
1954 n += 2;
1955 arr[n++] = 0;
1956 arr[n++] = 0;
1957 arr[n++] = 0;
1958 arr[n++] = 0x1;
1959 arr[n++] = 0;
1960 arr[n++] = 0;
1961 put_unaligned_be16(port_a, arr + n);
1962 n += 2;
1963 arr[n++] = 3;
1964 arr[n++] = 0x08;
1965 put_unaligned_be16(port_group_b, arr + n);
1966 n += 2;
1967 arr[n++] = 0;
1968 arr[n++] = 0;
1969 arr[n++] = 0;
1970 arr[n++] = 0x1;
1971 arr[n++] = 0;
1972 arr[n++] = 0;
1973 put_unaligned_be16(port_b, arr + n);
1974 n += 2;
1975
1976 rlen = n - 4;
1977 put_unaligned_be32(rlen, arr + 0);
1978
1979
1980
1981
1982
1983
1984
1985 rlen = min(alen, n);
1986 ret = fill_from_dev_buffer(scp, arr,
1987 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1988 kfree(arr);
1989 return ret;
1990 }
1991
1992 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1993 struct sdebug_dev_info *devip)
1994 {
1995 bool rctd;
1996 u8 reporting_opts, req_opcode, sdeb_i, supp;
1997 u16 req_sa, u;
1998 u32 alloc_len, a_len;
1999 int k, offset, len, errsts, count, bump, na;
2000 const struct opcode_info_t *oip;
2001 const struct opcode_info_t *r_oip;
2002 u8 *arr;
2003 u8 *cmd = scp->cmnd;
2004
2005 rctd = !!(cmd[2] & 0x80);
2006 reporting_opts = cmd[2] & 0x7;
2007 req_opcode = cmd[3];
2008 req_sa = get_unaligned_be16(cmd + 4);
2009 alloc_len = get_unaligned_be32(cmd + 6);
2010 if (alloc_len < 4 || alloc_len > 0xffff) {
2011 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2012 return check_condition_result;
2013 }
2014 if (alloc_len > 8192)
2015 a_len = 8192;
2016 else
2017 a_len = alloc_len;
2018 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2019 if (NULL == arr) {
2020 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2021 INSUFF_RES_ASCQ);
2022 return check_condition_result;
2023 }
2024 switch (reporting_opts) {
2025 case 0:
2026
2027 for (count = 0, oip = opcode_info_arr;
2028 oip->num_attached != 0xff; ++oip) {
2029 if (F_INV_OP & oip->flags)
2030 continue;
2031 count += (oip->num_attached + 1);
2032 }
2033 bump = rctd ? 20 : 8;
2034 put_unaligned_be32(count * bump, arr);
2035 for (offset = 4, oip = opcode_info_arr;
2036 oip->num_attached != 0xff && offset < a_len; ++oip) {
2037 if (F_INV_OP & oip->flags)
2038 continue;
2039 na = oip->num_attached;
2040 arr[offset] = oip->opcode;
2041 put_unaligned_be16(oip->sa, arr + offset + 2);
2042 if (rctd)
2043 arr[offset + 5] |= 0x2;
2044 if (FF_SA & oip->flags)
2045 arr[offset + 5] |= 0x1;
2046 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2047 if (rctd)
2048 put_unaligned_be16(0xa, arr + offset + 8);
2049 r_oip = oip;
2050 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2051 if (F_INV_OP & oip->flags)
2052 continue;
2053 offset += bump;
2054 arr[offset] = oip->opcode;
2055 put_unaligned_be16(oip->sa, arr + offset + 2);
2056 if (rctd)
2057 arr[offset + 5] |= 0x2;
2058 if (FF_SA & oip->flags)
2059 arr[offset + 5] |= 0x1;
2060 put_unaligned_be16(oip->len_mask[0],
2061 arr + offset + 6);
2062 if (rctd)
2063 put_unaligned_be16(0xa,
2064 arr + offset + 8);
2065 }
2066 oip = r_oip;
2067 offset += bump;
2068 }
2069 break;
2070 case 1:
2071 case 2:
2072 case 3:
2073 sdeb_i = opcode_ind_arr[req_opcode];
2074 oip = &opcode_info_arr[sdeb_i];
2075 if (F_INV_OP & oip->flags) {
2076 supp = 1;
2077 offset = 4;
2078 } else {
2079 if (1 == reporting_opts) {
2080 if (FF_SA & oip->flags) {
2081 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2082 2, 2);
2083 kfree(arr);
2084 return check_condition_result;
2085 }
2086 req_sa = 0;
2087 } else if (2 == reporting_opts &&
2088 0 == (FF_SA & oip->flags)) {
2089 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2090 kfree(arr);
2091 return check_condition_result;
2092 }
2093 if (0 == (FF_SA & oip->flags) &&
2094 req_opcode == oip->opcode)
2095 supp = 3;
2096 else if (0 == (FF_SA & oip->flags)) {
2097 na = oip->num_attached;
2098 for (k = 0, oip = oip->arrp; k < na;
2099 ++k, ++oip) {
2100 if (req_opcode == oip->opcode)
2101 break;
2102 }
2103 supp = (k >= na) ? 1 : 3;
2104 } else if (req_sa != oip->sa) {
2105 na = oip->num_attached;
2106 for (k = 0, oip = oip->arrp; k < na;
2107 ++k, ++oip) {
2108 if (req_sa == oip->sa)
2109 break;
2110 }
2111 supp = (k >= na) ? 1 : 3;
2112 } else
2113 supp = 3;
2114 if (3 == supp) {
2115 u = oip->len_mask[0];
2116 put_unaligned_be16(u, arr + 2);
2117 arr[4] = oip->opcode;
2118 for (k = 1; k < u; ++k)
2119 arr[4 + k] = (k < 16) ?
2120 oip->len_mask[k] : 0xff;
2121 offset = 4 + u;
2122 } else
2123 offset = 4;
2124 }
2125 arr[1] = (rctd ? 0x80 : 0) | supp;
2126 if (rctd) {
2127 put_unaligned_be16(0xa, arr + offset);
2128 offset += 12;
2129 }
2130 break;
2131 default:
2132 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2133 kfree(arr);
2134 return check_condition_result;
2135 }
2136 offset = (offset < a_len) ? offset : a_len;
2137 len = (offset < alloc_len) ? offset : alloc_len;
2138 errsts = fill_from_dev_buffer(scp, arr, len);
2139 kfree(arr);
2140 return errsts;
2141 }
2142
2143 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2144 struct sdebug_dev_info *devip)
2145 {
2146 bool repd;
2147 u32 alloc_len, len;
2148 u8 arr[16];
2149 u8 *cmd = scp->cmnd;
2150
2151 memset(arr, 0, sizeof(arr));
2152 repd = !!(cmd[2] & 0x80);
2153 alloc_len = get_unaligned_be32(cmd + 6);
2154 if (alloc_len < 4) {
2155 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2156 return check_condition_result;
2157 }
2158 arr[0] = 0xc8;
2159 arr[1] = 0x1;
2160 if (repd) {
2161 arr[3] = 0xc;
2162 len = 16;
2163 } else
2164 len = 4;
2165
2166 len = (len < alloc_len) ? len : alloc_len;
2167 return fill_from_dev_buffer(scp, arr, len);
2168 }
2169
2170
2171
2172 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2173 {
2174 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2175 5, 0, 0xff, 0xff};
2176
2177 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2178 if (1 == pcontrol)
2179 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2180 return sizeof(err_recov_pg);
2181 }
2182
2183 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2184 {
2185 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2186 0, 0, 0, 0, 0, 0, 0, 0};
2187
2188 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2189 if (1 == pcontrol)
2190 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2191 return sizeof(disconnect_pg);
2192 }
2193
2194 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2195 {
2196 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2197 0, 0, 0, 0, 0, 0, 0, 0,
2198 0, 0, 0, 0, 0x40, 0, 0, 0};
2199
2200 memcpy(p, format_pg, sizeof(format_pg));
2201 put_unaligned_be16(sdebug_sectors_per, p + 10);
2202 put_unaligned_be16(sdebug_sector_size, p + 12);
2203 if (sdebug_removable)
2204 p[20] |= 0x20;
2205 if (1 == pcontrol)
2206 memset(p + 2, 0, sizeof(format_pg) - 2);
2207 return sizeof(format_pg);
2208 }
2209
2210 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2211 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2212 0, 0, 0, 0};
2213
2214 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2215 {
2216 unsigned char ch_caching_pg[] = { 0x4, 0, 0, 0, 0, 0,
2217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2218 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2219 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2220
2221 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2222 caching_pg[2] &= ~0x4;
2223 memcpy(p, caching_pg, sizeof(caching_pg));
2224 if (1 == pcontrol)
2225 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2226 else if (2 == pcontrol)
2227 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2228 return sizeof(caching_pg);
2229 }
2230
2231 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2232 0, 0, 0x2, 0x4b};
2233
2234 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2235 {
2236 unsigned char ch_ctrl_m_pg[] = { 0x6, 0, 0, 0, 0, 0,
2237 0, 0, 0, 0};
2238 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2239 0, 0, 0x2, 0x4b};
2240
2241 if (sdebug_dsense)
2242 ctrl_m_pg[2] |= 0x4;
2243 else
2244 ctrl_m_pg[2] &= ~0x4;
2245
2246 if (sdebug_ato)
2247 ctrl_m_pg[5] |= 0x80;
2248
2249 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2250 if (1 == pcontrol)
2251 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2252 else if (2 == pcontrol)
2253 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2254 return sizeof(ctrl_m_pg);
2255 }
2256
2257
2258 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2259 {
2260 unsigned char ch_iec_m_pg[] = { 0x4, 0xf, 0, 0, 0, 0,
2261 0, 0, 0x0, 0x0};
2262 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2263 0, 0, 0x0, 0x0};
2264
2265 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2266 if (1 == pcontrol)
2267 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2268 else if (2 == pcontrol)
2269 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2270 return sizeof(iec_m_pg);
2271 }
2272
2273 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2274 {
2275 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2276 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2277
2278 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2279 if (1 == pcontrol)
2280 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2281 return sizeof(sas_sf_m_pg);
2282 }
2283
2284
2285 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2286 int target_dev_id)
2287 {
2288 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2289 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2290 0, 0, 0, 0, 0, 0, 0, 0,
2291 0, 0, 0, 0, 0, 0, 0, 0,
2292 0x2, 0, 0, 0, 0, 0, 0, 0,
2293 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2294 0, 0, 0, 0, 0, 0, 0, 0,
2295 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2296 0, 0, 0, 0, 0, 0, 0, 0,
2297 0, 0, 0, 0, 0, 0, 0, 0,
2298 0x3, 0, 0, 0, 0, 0, 0, 0,
2299 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2300 0, 0, 0, 0, 0, 0, 0, 0,
2301 };
2302 int port_a, port_b;
2303
2304 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2305 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2306 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2307 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2308 port_a = target_dev_id + 1;
2309 port_b = port_a + 1;
2310 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2311 put_unaligned_be32(port_a, p + 20);
2312 put_unaligned_be32(port_b, p + 48 + 20);
2313 if (1 == pcontrol)
2314 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2315 return sizeof(sas_pcd_m_pg);
2316 }
2317
2318 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2319 {
2320 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2321 0, 0, 0, 0, 0, 0, 0, 0,
2322 };
2323
2324 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2325 if (1 == pcontrol)
2326 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2327 return sizeof(sas_sha_m_pg);
2328 }
2329
2330 #define SDEBUG_MAX_MSENSE_SZ 256
2331
2332 static int resp_mode_sense(struct scsi_cmnd *scp,
2333 struct sdebug_dev_info *devip)
2334 {
2335 int pcontrol, pcode, subpcode, bd_len;
2336 unsigned char dev_spec;
2337 u32 alloc_len, offset, len;
2338 int target_dev_id;
2339 int target = scp->device->id;
2340 unsigned char *ap;
2341 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2342 unsigned char *cmd = scp->cmnd;
2343 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2344
2345 dbd = !!(cmd[1] & 0x8);
2346 pcontrol = (cmd[2] & 0xc0) >> 6;
2347 pcode = cmd[2] & 0x3f;
2348 subpcode = cmd[3];
2349 msense_6 = (MODE_SENSE == cmd[0]);
2350 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2351 is_disk = (sdebug_ptype == TYPE_DISK);
2352 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2353 if ((is_disk || is_zbc) && !dbd)
2354 bd_len = llbaa ? 16 : 8;
2355 else
2356 bd_len = 0;
2357 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2358 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2359 if (0x3 == pcontrol) {
2360 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2361 return check_condition_result;
2362 }
2363 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2364 (devip->target * 1000) - 3;
2365
2366 if (is_disk || is_zbc) {
2367 dev_spec = 0x10;
2368 if (sdebug_wp)
2369 dev_spec |= 0x80;
2370 } else
2371 dev_spec = 0x0;
2372 if (msense_6) {
2373 arr[2] = dev_spec;
2374 arr[3] = bd_len;
2375 offset = 4;
2376 } else {
2377 arr[3] = dev_spec;
2378 if (16 == bd_len)
2379 arr[4] = 0x1;
2380 arr[7] = bd_len;
2381 offset = 8;
2382 }
2383 ap = arr + offset;
2384 if ((bd_len > 0) && (!sdebug_capacity))
2385 sdebug_capacity = get_sdebug_capacity();
2386
2387 if (8 == bd_len) {
2388 if (sdebug_capacity > 0xfffffffe)
2389 put_unaligned_be32(0xffffffff, ap + 0);
2390 else
2391 put_unaligned_be32(sdebug_capacity, ap + 0);
2392 put_unaligned_be16(sdebug_sector_size, ap + 6);
2393 offset += bd_len;
2394 ap = arr + offset;
2395 } else if (16 == bd_len) {
2396 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2397 put_unaligned_be32(sdebug_sector_size, ap + 12);
2398 offset += bd_len;
2399 ap = arr + offset;
2400 }
2401
2402 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2403
2404 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2405 return check_condition_result;
2406 }
2407 bad_pcode = false;
2408
2409 switch (pcode) {
2410 case 0x1:
2411 len = resp_err_recov_pg(ap, pcontrol, target);
2412 offset += len;
2413 break;
2414 case 0x2:
2415 len = resp_disconnect_pg(ap, pcontrol, target);
2416 offset += len;
2417 break;
2418 case 0x3:
2419 if (is_disk) {
2420 len = resp_format_pg(ap, pcontrol, target);
2421 offset += len;
2422 } else
2423 bad_pcode = true;
2424 break;
2425 case 0x8:
2426 if (is_disk || is_zbc) {
2427 len = resp_caching_pg(ap, pcontrol, target);
2428 offset += len;
2429 } else
2430 bad_pcode = true;
2431 break;
2432 case 0xa:
2433 len = resp_ctrl_m_pg(ap, pcontrol, target);
2434 offset += len;
2435 break;
2436 case 0x19:
2437 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2438 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2439 return check_condition_result;
2440 }
2441 len = 0;
2442 if ((0x0 == subpcode) || (0xff == subpcode))
2443 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2444 if ((0x1 == subpcode) || (0xff == subpcode))
2445 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2446 target_dev_id);
2447 if ((0x2 == subpcode) || (0xff == subpcode))
2448 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2449 offset += len;
2450 break;
2451 case 0x1c:
2452 len = resp_iec_m_pg(ap, pcontrol, target);
2453 offset += len;
2454 break;
2455 case 0x3f:
2456 if ((0 == subpcode) || (0xff == subpcode)) {
2457 len = resp_err_recov_pg(ap, pcontrol, target);
2458 len += resp_disconnect_pg(ap + len, pcontrol, target);
2459 if (is_disk) {
2460 len += resp_format_pg(ap + len, pcontrol,
2461 target);
2462 len += resp_caching_pg(ap + len, pcontrol,
2463 target);
2464 } else if (is_zbc) {
2465 len += resp_caching_pg(ap + len, pcontrol,
2466 target);
2467 }
2468 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2469 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2470 if (0xff == subpcode) {
2471 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2472 target, target_dev_id);
2473 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2474 }
2475 len += resp_iec_m_pg(ap + len, pcontrol, target);
2476 offset += len;
2477 } else {
2478 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2479 return check_condition_result;
2480 }
2481 break;
2482 default:
2483 bad_pcode = true;
2484 break;
2485 }
2486 if (bad_pcode) {
2487 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2488 return check_condition_result;
2489 }
2490 if (msense_6)
2491 arr[0] = offset - 1;
2492 else
2493 put_unaligned_be16((offset - 2), arr + 0);
2494 return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2495 }
2496
2497 #define SDEBUG_MAX_MSELECT_SZ 512
2498
2499 static int resp_mode_select(struct scsi_cmnd *scp,
2500 struct sdebug_dev_info *devip)
2501 {
2502 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2503 int param_len, res, mpage;
2504 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2505 unsigned char *cmd = scp->cmnd;
2506 int mselect6 = (MODE_SELECT == cmd[0]);
2507
2508 memset(arr, 0, sizeof(arr));
2509 pf = cmd[1] & 0x10;
2510 sp = cmd[1] & 0x1;
2511 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2512 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2513 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2514 return check_condition_result;
2515 }
2516 res = fetch_to_dev_buffer(scp, arr, param_len);
2517 if (-1 == res)
2518 return DID_ERROR << 16;
2519 else if (sdebug_verbose && (res < param_len))
2520 sdev_printk(KERN_INFO, scp->device,
2521 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2522 __func__, param_len, res);
2523 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2524 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2525 off = bd_len + (mselect6 ? 4 : 8);
2526 if (md_len > 2 || off >= res) {
2527 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2528 return check_condition_result;
2529 }
2530 mpage = arr[off] & 0x3f;
2531 ps = !!(arr[off] & 0x80);
2532 if (ps) {
2533 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2534 return check_condition_result;
2535 }
2536 spf = !!(arr[off] & 0x40);
2537 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2538 (arr[off + 1] + 2);
2539 if ((pg_len + off) > param_len) {
2540 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2541 PARAMETER_LIST_LENGTH_ERR, 0);
2542 return check_condition_result;
2543 }
2544 switch (mpage) {
2545 case 0x8:
2546 if (caching_pg[1] == arr[off + 1]) {
2547 memcpy(caching_pg + 2, arr + off + 2,
2548 sizeof(caching_pg) - 2);
2549 goto set_mode_changed_ua;
2550 }
2551 break;
2552 case 0xa:
2553 if (ctrl_m_pg[1] == arr[off + 1]) {
2554 memcpy(ctrl_m_pg + 2, arr + off + 2,
2555 sizeof(ctrl_m_pg) - 2);
2556 if (ctrl_m_pg[4] & 0x8)
2557 sdebug_wp = true;
2558 else
2559 sdebug_wp = false;
2560 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2561 goto set_mode_changed_ua;
2562 }
2563 break;
2564 case 0x1c:
2565 if (iec_m_pg[1] == arr[off + 1]) {
2566 memcpy(iec_m_pg + 2, arr + off + 2,
2567 sizeof(iec_m_pg) - 2);
2568 goto set_mode_changed_ua;
2569 }
2570 break;
2571 default:
2572 break;
2573 }
2574 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2575 return check_condition_result;
2576 set_mode_changed_ua:
2577 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2578 return 0;
2579 }
2580
2581 static int resp_temp_l_pg(unsigned char *arr)
2582 {
2583 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2584 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2585 };
2586
2587 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2588 return sizeof(temp_l_pg);
2589 }
2590
2591 static int resp_ie_l_pg(unsigned char *arr)
2592 {
2593 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2594 };
2595
2596 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2597 if (iec_m_pg[2] & 0x4) {
2598 arr[4] = THRESHOLD_EXCEEDED;
2599 arr[5] = 0xff;
2600 }
2601 return sizeof(ie_l_pg);
2602 }
2603
2604 static int resp_env_rep_l_spg(unsigned char *arr)
2605 {
2606 unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2607 0x0, 40, 72, 0xff, 45, 18, 0, 0,
2608 0x1, 0x0, 0x23, 0x8,
2609 0x0, 55, 72, 35, 55, 45, 0, 0,
2610 };
2611
2612 memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2613 return sizeof(env_rep_l_spg);
2614 }
2615
2616 #define SDEBUG_MAX_LSENSE_SZ 512
2617
2618 static int resp_log_sense(struct scsi_cmnd *scp,
2619 struct sdebug_dev_info *devip)
2620 {
2621 int ppc, sp, pcode, subpcode;
2622 u32 alloc_len, len, n;
2623 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2624 unsigned char *cmd = scp->cmnd;
2625
2626 memset(arr, 0, sizeof(arr));
2627 ppc = cmd[1] & 0x2;
2628 sp = cmd[1] & 0x1;
2629 if (ppc || sp) {
2630 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2631 return check_condition_result;
2632 }
2633 pcode = cmd[2] & 0x3f;
2634 subpcode = cmd[3] & 0xff;
2635 alloc_len = get_unaligned_be16(cmd + 7);
2636 arr[0] = pcode;
2637 if (0 == subpcode) {
2638 switch (pcode) {
2639 case 0x0:
2640 n = 4;
2641 arr[n++] = 0x0;
2642 arr[n++] = 0xd;
2643 arr[n++] = 0x2f;
2644 arr[3] = n - 4;
2645 break;
2646 case 0xd:
2647 arr[3] = resp_temp_l_pg(arr + 4);
2648 break;
2649 case 0x2f:
2650 arr[3] = resp_ie_l_pg(arr + 4);
2651 break;
2652 default:
2653 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2654 return check_condition_result;
2655 }
2656 } else if (0xff == subpcode) {
2657 arr[0] |= 0x40;
2658 arr[1] = subpcode;
2659 switch (pcode) {
2660 case 0x0:
2661 n = 4;
2662 arr[n++] = 0x0;
2663 arr[n++] = 0x0;
2664 arr[n++] = 0x0;
2665 arr[n++] = 0xff;
2666 arr[n++] = 0xd;
2667 arr[n++] = 0x0;
2668 arr[n++] = 0xd;
2669 arr[n++] = 0x1;
2670 arr[n++] = 0xd;
2671 arr[n++] = 0xff;
2672 arr[n++] = 0x2f;
2673 arr[n++] = 0x0;
2674 arr[n++] = 0x2f;
2675 arr[n++] = 0xff;
2676 arr[3] = n - 4;
2677 break;
2678 case 0xd:
2679 n = 4;
2680 arr[n++] = 0xd;
2681 arr[n++] = 0x0;
2682 arr[n++] = 0xd;
2683 arr[n++] = 0x1;
2684 arr[n++] = 0xd;
2685 arr[n++] = 0xff;
2686 arr[3] = n - 4;
2687 break;
2688 case 0x2f:
2689 n = 4;
2690 arr[n++] = 0x2f;
2691 arr[n++] = 0x0;
2692 arr[n++] = 0x2f;
2693 arr[n++] = 0xff;
2694 arr[3] = n - 4;
2695 break;
2696 default:
2697 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2698 return check_condition_result;
2699 }
2700 } else if (subpcode > 0) {
2701 arr[0] |= 0x40;
2702 arr[1] = subpcode;
2703 if (pcode == 0xd && subpcode == 1)
2704 arr[3] = resp_env_rep_l_spg(arr + 4);
2705 else {
2706 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2707 return check_condition_result;
2708 }
2709 } else {
2710 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2711 return check_condition_result;
2712 }
2713 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2714 return fill_from_dev_buffer(scp, arr,
2715 min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2716 }
2717
2718 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2719 {
2720 return devip->nr_zones != 0;
2721 }
2722
2723 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2724 unsigned long long lba)
2725 {
2726 u32 zno = lba >> devip->zsize_shift;
2727 struct sdeb_zone_state *zsp;
2728
2729 if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
2730 return &devip->zstate[zno];
2731
2732
2733
2734
2735
2736 zno = 2 * zno - devip->nr_conv_zones;
2737 WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
2738 zsp = &devip->zstate[zno];
2739 if (lba >= zsp->z_start + zsp->z_size)
2740 zsp++;
2741 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2742 return zsp;
2743 }
2744
2745 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2746 {
2747 return zsp->z_type == ZBC_ZTYPE_CNV;
2748 }
2749
2750 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
2751 {
2752 return zsp->z_type == ZBC_ZTYPE_GAP;
2753 }
2754
2755 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
2756 {
2757 return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
2758 }
2759
2760 static void zbc_close_zone(struct sdebug_dev_info *devip,
2761 struct sdeb_zone_state *zsp)
2762 {
2763 enum sdebug_z_cond zc;
2764
2765 if (!zbc_zone_is_seq(zsp))
2766 return;
2767
2768 zc = zsp->z_cond;
2769 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2770 return;
2771
2772 if (zc == ZC2_IMPLICIT_OPEN)
2773 devip->nr_imp_open--;
2774 else
2775 devip->nr_exp_open--;
2776
2777 if (zsp->z_wp == zsp->z_start) {
2778 zsp->z_cond = ZC1_EMPTY;
2779 } else {
2780 zsp->z_cond = ZC4_CLOSED;
2781 devip->nr_closed++;
2782 }
2783 }
2784
2785 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2786 {
2787 struct sdeb_zone_state *zsp = &devip->zstate[0];
2788 unsigned int i;
2789
2790 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2791 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2792 zbc_close_zone(devip, zsp);
2793 return;
2794 }
2795 }
2796 }
2797
2798 static void zbc_open_zone(struct sdebug_dev_info *devip,
2799 struct sdeb_zone_state *zsp, bool explicit)
2800 {
2801 enum sdebug_z_cond zc;
2802
2803 if (!zbc_zone_is_seq(zsp))
2804 return;
2805
2806 zc = zsp->z_cond;
2807 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2808 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2809 return;
2810
2811
2812 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2813 zbc_close_zone(devip, zsp);
2814 else if (devip->max_open &&
2815 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2816 zbc_close_imp_open_zone(devip);
2817
2818 if (zsp->z_cond == ZC4_CLOSED)
2819 devip->nr_closed--;
2820 if (explicit) {
2821 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2822 devip->nr_exp_open++;
2823 } else {
2824 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2825 devip->nr_imp_open++;
2826 }
2827 }
2828
2829 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
2830 struct sdeb_zone_state *zsp)
2831 {
2832 switch (zsp->z_cond) {
2833 case ZC2_IMPLICIT_OPEN:
2834 devip->nr_imp_open--;
2835 break;
2836 case ZC3_EXPLICIT_OPEN:
2837 devip->nr_exp_open--;
2838 break;
2839 default:
2840 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
2841 zsp->z_start, zsp->z_cond);
2842 break;
2843 }
2844 zsp->z_cond = ZC5_FULL;
2845 }
2846
2847 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2848 unsigned long long lba, unsigned int num)
2849 {
2850 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2851 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2852
2853 if (!zbc_zone_is_seq(zsp))
2854 return;
2855
2856 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2857 zsp->z_wp += num;
2858 if (zsp->z_wp >= zend)
2859 zbc_set_zone_full(devip, zsp);
2860 return;
2861 }
2862
2863 while (num) {
2864 if (lba != zsp->z_wp)
2865 zsp->z_non_seq_resource = true;
2866
2867 end = lba + num;
2868 if (end >= zend) {
2869 n = zend - lba;
2870 zsp->z_wp = zend;
2871 } else if (end > zsp->z_wp) {
2872 n = num;
2873 zsp->z_wp = end;
2874 } else {
2875 n = num;
2876 }
2877 if (zsp->z_wp >= zend)
2878 zbc_set_zone_full(devip, zsp);
2879
2880 num -= n;
2881 lba += n;
2882 if (num) {
2883 zsp++;
2884 zend = zsp->z_start + zsp->z_size;
2885 }
2886 }
2887 }
2888
2889 static int check_zbc_access_params(struct scsi_cmnd *scp,
2890 unsigned long long lba, unsigned int num, bool write)
2891 {
2892 struct scsi_device *sdp = scp->device;
2893 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2894 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2895 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2896
2897 if (!write) {
2898 if (devip->zmodel == BLK_ZONED_HA)
2899 return 0;
2900
2901 if (zsp->z_type != zsp_end->z_type) {
2902 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2903 LBA_OUT_OF_RANGE,
2904 READ_INVDATA_ASCQ);
2905 return check_condition_result;
2906 }
2907 return 0;
2908 }
2909
2910
2911 if (zbc_zone_is_gap(zsp)) {
2912 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
2913 ATTEMPT_ACCESS_GAP);
2914 return check_condition_result;
2915 }
2916
2917
2918 if (zbc_zone_is_conv(zsp)) {
2919 if (!zbc_zone_is_conv(zsp_end)) {
2920 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2921 LBA_OUT_OF_RANGE,
2922 WRITE_BOUNDARY_ASCQ);
2923 return check_condition_result;
2924 }
2925 return 0;
2926 }
2927
2928 if (zsp->z_type == ZBC_ZTYPE_SWR) {
2929
2930 if (zsp_end != zsp) {
2931 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2932 LBA_OUT_OF_RANGE,
2933 WRITE_BOUNDARY_ASCQ);
2934 return check_condition_result;
2935 }
2936
2937 if (zsp->z_cond == ZC5_FULL) {
2938 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2939 INVALID_FIELD_IN_CDB, 0);
2940 return check_condition_result;
2941 }
2942
2943 if (lba != zsp->z_wp) {
2944 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2945 LBA_OUT_OF_RANGE,
2946 UNALIGNED_WRITE_ASCQ);
2947 return check_condition_result;
2948 }
2949 }
2950
2951
2952 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2953 if (devip->max_open &&
2954 devip->nr_exp_open >= devip->max_open) {
2955 mk_sense_buffer(scp, DATA_PROTECT,
2956 INSUFF_RES_ASC,
2957 INSUFF_ZONE_ASCQ);
2958 return check_condition_result;
2959 }
2960 zbc_open_zone(devip, zsp, false);
2961 }
2962
2963 return 0;
2964 }
2965
2966 static inline int check_device_access_params
2967 (struct scsi_cmnd *scp, unsigned long long lba,
2968 unsigned int num, bool write)
2969 {
2970 struct scsi_device *sdp = scp->device;
2971 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2972
2973 if (lba + num > sdebug_capacity) {
2974 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2975 return check_condition_result;
2976 }
2977
2978 if (num > sdebug_store_sectors) {
2979
2980 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2981 return check_condition_result;
2982 }
2983 if (write && unlikely(sdebug_wp)) {
2984 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2985 return check_condition_result;
2986 }
2987 if (sdebug_dev_is_zoned(devip))
2988 return check_zbc_access_params(scp, lba, num, write);
2989
2990 return 0;
2991 }
2992
2993
2994
2995
2996
2997
2998
2999 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3000 bool bug_if_fake_rw)
3001 {
3002 if (sdebug_fake_rw) {
3003 BUG_ON(bug_if_fake_rw);
3004 return NULL;
3005 }
3006 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3007 }
3008
3009
3010 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3011 u32 sg_skip, u64 lba, u32 num, bool do_write)
3012 {
3013 int ret;
3014 u64 block, rest = 0;
3015 enum dma_data_direction dir;
3016 struct scsi_data_buffer *sdb = &scp->sdb;
3017 u8 *fsp;
3018
3019 if (do_write) {
3020 dir = DMA_TO_DEVICE;
3021 write_since_sync = true;
3022 } else {
3023 dir = DMA_FROM_DEVICE;
3024 }
3025
3026 if (!sdb->length || !sip)
3027 return 0;
3028 if (scp->sc_data_direction != dir)
3029 return -1;
3030 fsp = sip->storep;
3031
3032 block = do_div(lba, sdebug_store_sectors);
3033 if (block + num > sdebug_store_sectors)
3034 rest = block + num - sdebug_store_sectors;
3035
3036 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3037 fsp + (block * sdebug_sector_size),
3038 (num - rest) * sdebug_sector_size, sg_skip, do_write);
3039 if (ret != (num - rest) * sdebug_sector_size)
3040 return ret;
3041
3042 if (rest) {
3043 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3044 fsp, rest * sdebug_sector_size,
3045 sg_skip + ((num - rest) * sdebug_sector_size),
3046 do_write);
3047 }
3048
3049 return ret;
3050 }
3051
3052
3053 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3054 {
3055 struct scsi_data_buffer *sdb = &scp->sdb;
3056
3057 if (!sdb->length)
3058 return 0;
3059 if (scp->sc_data_direction != DMA_TO_DEVICE)
3060 return -1;
3061 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3062 num * sdebug_sector_size, 0, true);
3063 }
3064
3065
3066
3067
3068 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3069 const u8 *arr, bool compare_only)
3070 {
3071 bool res;
3072 u64 block, rest = 0;
3073 u32 store_blks = sdebug_store_sectors;
3074 u32 lb_size = sdebug_sector_size;
3075 u8 *fsp = sip->storep;
3076
3077 block = do_div(lba, store_blks);
3078 if (block + num > store_blks)
3079 rest = block + num - store_blks;
3080
3081 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3082 if (!res)
3083 return res;
3084 if (rest)
3085 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3086 rest * lb_size);
3087 if (!res)
3088 return res;
3089 if (compare_only)
3090 return true;
3091 arr += num * lb_size;
3092 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3093 if (rest)
3094 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3095 return res;
3096 }
3097
3098 static __be16 dif_compute_csum(const void *buf, int len)
3099 {
3100 __be16 csum;
3101
3102 if (sdebug_guard)
3103 csum = (__force __be16)ip_compute_csum(buf, len);
3104 else
3105 csum = cpu_to_be16(crc_t10dif(buf, len));
3106
3107 return csum;
3108 }
3109
3110 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3111 sector_t sector, u32 ei_lba)
3112 {
3113 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3114
3115 if (sdt->guard_tag != csum) {
3116 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3117 (unsigned long)sector,
3118 be16_to_cpu(sdt->guard_tag),
3119 be16_to_cpu(csum));
3120 return 0x01;
3121 }
3122 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3123 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3124 pr_err("REF check failed on sector %lu\n",
3125 (unsigned long)sector);
3126 return 0x03;
3127 }
3128 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3129 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3130 pr_err("REF check failed on sector %lu\n",
3131 (unsigned long)sector);
3132 return 0x03;
3133 }
3134 return 0;
3135 }
3136
3137 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3138 unsigned int sectors, bool read)
3139 {
3140 size_t resid;
3141 void *paddr;
3142 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3143 scp->device->hostdata, true);
3144 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3145 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3146 struct sg_mapping_iter miter;
3147
3148
3149 resid = sectors * sizeof(*dif_storep);
3150
3151 sg_miter_start(&miter, scsi_prot_sglist(scp),
3152 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3153 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3154
3155 while (sg_miter_next(&miter) && resid > 0) {
3156 size_t len = min_t(size_t, miter.length, resid);
3157 void *start = dif_store(sip, sector);
3158 size_t rest = 0;
3159
3160 if (dif_store_end < start + len)
3161 rest = start + len - dif_store_end;
3162
3163 paddr = miter.addr;
3164
3165 if (read)
3166 memcpy(paddr, start, len - rest);
3167 else
3168 memcpy(start, paddr, len - rest);
3169
3170 if (rest) {
3171 if (read)
3172 memcpy(paddr + len - rest, dif_storep, rest);
3173 else
3174 memcpy(dif_storep, paddr + len - rest, rest);
3175 }
3176
3177 sector += len / sizeof(*dif_storep);
3178 resid -= len;
3179 }
3180 sg_miter_stop(&miter);
3181 }
3182
3183 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3184 unsigned int sectors, u32 ei_lba)
3185 {
3186 int ret = 0;
3187 unsigned int i;
3188 sector_t sector;
3189 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3190 scp->device->hostdata, true);
3191 struct t10_pi_tuple *sdt;
3192
3193 for (i = 0; i < sectors; i++, ei_lba++) {
3194 sector = start_sec + i;
3195 sdt = dif_store(sip, sector);
3196
3197 if (sdt->app_tag == cpu_to_be16(0xffff))
3198 continue;
3199
3200
3201
3202
3203
3204
3205
3206
3207 if (scp->cmnd[1] >> 5) {
3208 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3209 sector, ei_lba);
3210 if (ret) {
3211 dif_errors++;
3212 break;
3213 }
3214 }
3215 }
3216
3217 dif_copy_prot(scp, start_sec, sectors, true);
3218 dix_reads++;
3219
3220 return ret;
3221 }
3222
3223 static inline void
3224 sdeb_read_lock(struct sdeb_store_info *sip)
3225 {
3226 if (sdebug_no_rwlock) {
3227 if (sip)
3228 __acquire(&sip->macc_lck);
3229 else
3230 __acquire(&sdeb_fake_rw_lck);
3231 } else {
3232 if (sip)
3233 read_lock(&sip->macc_lck);
3234 else
3235 read_lock(&sdeb_fake_rw_lck);
3236 }
3237 }
3238
3239 static inline void
3240 sdeb_read_unlock(struct sdeb_store_info *sip)
3241 {
3242 if (sdebug_no_rwlock) {
3243 if (sip)
3244 __release(&sip->macc_lck);
3245 else
3246 __release(&sdeb_fake_rw_lck);
3247 } else {
3248 if (sip)
3249 read_unlock(&sip->macc_lck);
3250 else
3251 read_unlock(&sdeb_fake_rw_lck);
3252 }
3253 }
3254
3255 static inline void
3256 sdeb_write_lock(struct sdeb_store_info *sip)
3257 {
3258 if (sdebug_no_rwlock) {
3259 if (sip)
3260 __acquire(&sip->macc_lck);
3261 else
3262 __acquire(&sdeb_fake_rw_lck);
3263 } else {
3264 if (sip)
3265 write_lock(&sip->macc_lck);
3266 else
3267 write_lock(&sdeb_fake_rw_lck);
3268 }
3269 }
3270
3271 static inline void
3272 sdeb_write_unlock(struct sdeb_store_info *sip)
3273 {
3274 if (sdebug_no_rwlock) {
3275 if (sip)
3276 __release(&sip->macc_lck);
3277 else
3278 __release(&sdeb_fake_rw_lck);
3279 } else {
3280 if (sip)
3281 write_unlock(&sip->macc_lck);
3282 else
3283 write_unlock(&sdeb_fake_rw_lck);
3284 }
3285 }
3286
3287 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3288 {
3289 bool check_prot;
3290 u32 num;
3291 u32 ei_lba;
3292 int ret;
3293 u64 lba;
3294 struct sdeb_store_info *sip = devip2sip(devip, true);
3295 u8 *cmd = scp->cmnd;
3296
3297 switch (cmd[0]) {
3298 case READ_16:
3299 ei_lba = 0;
3300 lba = get_unaligned_be64(cmd + 2);
3301 num = get_unaligned_be32(cmd + 10);
3302 check_prot = true;
3303 break;
3304 case READ_10:
3305 ei_lba = 0;
3306 lba = get_unaligned_be32(cmd + 2);
3307 num = get_unaligned_be16(cmd + 7);
3308 check_prot = true;
3309 break;
3310 case READ_6:
3311 ei_lba = 0;
3312 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3313 (u32)(cmd[1] & 0x1f) << 16;
3314 num = (0 == cmd[4]) ? 256 : cmd[4];
3315 check_prot = true;
3316 break;
3317 case READ_12:
3318 ei_lba = 0;
3319 lba = get_unaligned_be32(cmd + 2);
3320 num = get_unaligned_be32(cmd + 6);
3321 check_prot = true;
3322 break;
3323 case XDWRITEREAD_10:
3324 ei_lba = 0;
3325 lba = get_unaligned_be32(cmd + 2);
3326 num = get_unaligned_be16(cmd + 7);
3327 check_prot = false;
3328 break;
3329 default:
3330 lba = get_unaligned_be64(cmd + 12);
3331 ei_lba = get_unaligned_be32(cmd + 20);
3332 num = get_unaligned_be32(cmd + 28);
3333 check_prot = false;
3334 break;
3335 }
3336 if (unlikely(have_dif_prot && check_prot)) {
3337 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3338 (cmd[1] & 0xe0)) {
3339 mk_sense_invalid_opcode(scp);
3340 return check_condition_result;
3341 }
3342 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3343 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3344 (cmd[1] & 0xe0) == 0)
3345 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3346 "to DIF device\n");
3347 }
3348 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3349 atomic_read(&sdeb_inject_pending))) {
3350 num /= 2;
3351 atomic_set(&sdeb_inject_pending, 0);
3352 }
3353
3354 ret = check_device_access_params(scp, lba, num, false);
3355 if (ret)
3356 return ret;
3357 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3358 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3359 ((lba + num) > sdebug_medium_error_start))) {
3360
3361 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3362
3363 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3364 scp->sense_buffer[0] |= 0x80;
3365 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3366 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3367 put_unaligned_be32(ret, scp->sense_buffer + 3);
3368 }
3369 scsi_set_resid(scp, scsi_bufflen(scp));
3370 return check_condition_result;
3371 }
3372
3373 sdeb_read_lock(sip);
3374
3375
3376 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3377 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3378 case 1:
3379 if (cmd[1] >> 5 != 3) {
3380 sdeb_read_unlock(sip);
3381 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3382 return check_condition_result;
3383 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3384 sdeb_read_unlock(sip);
3385 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3386 return illegal_condition_result;
3387 }
3388 break;
3389 case 3:
3390 if (cmd[1] >> 5 != 3) {
3391 sdeb_read_unlock(sip);
3392 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3393 return check_condition_result;
3394 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3395 sdeb_read_unlock(sip);
3396 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3397 return illegal_condition_result;
3398 }
3399 break;
3400 }
3401 }
3402
3403 ret = do_device_access(sip, scp, 0, lba, num, false);
3404 sdeb_read_unlock(sip);
3405 if (unlikely(ret == -1))
3406 return DID_ERROR << 16;
3407
3408 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3409
3410 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3411 atomic_read(&sdeb_inject_pending))) {
3412 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3413 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3414 atomic_set(&sdeb_inject_pending, 0);
3415 return check_condition_result;
3416 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3417
3418 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3419 atomic_set(&sdeb_inject_pending, 0);
3420 return illegal_condition_result;
3421 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3422 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3423 atomic_set(&sdeb_inject_pending, 0);
3424 return illegal_condition_result;
3425 }
3426 }
3427 return 0;
3428 }
3429
3430 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3431 unsigned int sectors, u32 ei_lba)
3432 {
3433 int ret;
3434 struct t10_pi_tuple *sdt;
3435 void *daddr;
3436 sector_t sector = start_sec;
3437 int ppage_offset;
3438 int dpage_offset;
3439 struct sg_mapping_iter diter;
3440 struct sg_mapping_iter piter;
3441
3442 BUG_ON(scsi_sg_count(SCpnt) == 0);
3443 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3444
3445 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3446 scsi_prot_sg_count(SCpnt),
3447 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3448 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3449 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3450
3451
3452 while (sg_miter_next(&piter)) {
3453 dpage_offset = 0;
3454 if (WARN_ON(!sg_miter_next(&diter))) {
3455 ret = 0x01;
3456 goto out;
3457 }
3458
3459 for (ppage_offset = 0; ppage_offset < piter.length;
3460 ppage_offset += sizeof(struct t10_pi_tuple)) {
3461
3462
3463
3464 if (dpage_offset >= diter.length) {
3465 if (WARN_ON(!sg_miter_next(&diter))) {
3466 ret = 0x01;
3467 goto out;
3468 }
3469 dpage_offset = 0;
3470 }
3471
3472 sdt = piter.addr + ppage_offset;
3473 daddr = diter.addr + dpage_offset;
3474
3475 if (SCpnt->cmnd[1] >> 5 != 3) {
3476 ret = dif_verify(sdt, daddr, sector, ei_lba);
3477 if (ret)
3478 goto out;
3479 }
3480
3481 sector++;
3482 ei_lba++;
3483 dpage_offset += sdebug_sector_size;
3484 }
3485 diter.consumed = dpage_offset;
3486 sg_miter_stop(&diter);
3487 }
3488 sg_miter_stop(&piter);
3489
3490 dif_copy_prot(SCpnt, start_sec, sectors, false);
3491 dix_writes++;
3492
3493 return 0;
3494
3495 out:
3496 dif_errors++;
3497 sg_miter_stop(&diter);
3498 sg_miter_stop(&piter);
3499 return ret;
3500 }
3501
3502 static unsigned long lba_to_map_index(sector_t lba)
3503 {
3504 if (sdebug_unmap_alignment)
3505 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3506 sector_div(lba, sdebug_unmap_granularity);
3507 return lba;
3508 }
3509
3510 static sector_t map_index_to_lba(unsigned long index)
3511 {
3512 sector_t lba = index * sdebug_unmap_granularity;
3513
3514 if (sdebug_unmap_alignment)
3515 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3516 return lba;
3517 }
3518
3519 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3520 unsigned int *num)
3521 {
3522 sector_t end;
3523 unsigned int mapped;
3524 unsigned long index;
3525 unsigned long next;
3526
3527 index = lba_to_map_index(lba);
3528 mapped = test_bit(index, sip->map_storep);
3529
3530 if (mapped)
3531 next = find_next_zero_bit(sip->map_storep, map_size, index);
3532 else
3533 next = find_next_bit(sip->map_storep, map_size, index);
3534
3535 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3536 *num = end - lba;
3537 return mapped;
3538 }
3539
3540 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3541 unsigned int len)
3542 {
3543 sector_t end = lba + len;
3544
3545 while (lba < end) {
3546 unsigned long index = lba_to_map_index(lba);
3547
3548 if (index < map_size)
3549 set_bit(index, sip->map_storep);
3550
3551 lba = map_index_to_lba(index + 1);
3552 }
3553 }
3554
3555 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3556 unsigned int len)
3557 {
3558 sector_t end = lba + len;
3559 u8 *fsp = sip->storep;
3560
3561 while (lba < end) {
3562 unsigned long index = lba_to_map_index(lba);
3563
3564 if (lba == map_index_to_lba(index) &&
3565 lba + sdebug_unmap_granularity <= end &&
3566 index < map_size) {
3567 clear_bit(index, sip->map_storep);
3568 if (sdebug_lbprz) {
3569 memset(fsp + lba * sdebug_sector_size,
3570 (sdebug_lbprz & 1) ? 0 : 0xff,
3571 sdebug_sector_size *
3572 sdebug_unmap_granularity);
3573 }
3574 if (sip->dif_storep) {
3575 memset(sip->dif_storep + lba, 0xff,
3576 sizeof(*sip->dif_storep) *
3577 sdebug_unmap_granularity);
3578 }
3579 }
3580 lba = map_index_to_lba(index + 1);
3581 }
3582 }
3583
3584 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3585 {
3586 bool check_prot;
3587 u32 num;
3588 u32 ei_lba;
3589 int ret;
3590 u64 lba;
3591 struct sdeb_store_info *sip = devip2sip(devip, true);
3592 u8 *cmd = scp->cmnd;
3593
3594 switch (cmd[0]) {
3595 case WRITE_16:
3596 ei_lba = 0;
3597 lba = get_unaligned_be64(cmd + 2);
3598 num = get_unaligned_be32(cmd + 10);
3599 check_prot = true;
3600 break;
3601 case WRITE_10:
3602 ei_lba = 0;
3603 lba = get_unaligned_be32(cmd + 2);
3604 num = get_unaligned_be16(cmd + 7);
3605 check_prot = true;
3606 break;
3607 case WRITE_6:
3608 ei_lba = 0;
3609 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3610 (u32)(cmd[1] & 0x1f) << 16;
3611 num = (0 == cmd[4]) ? 256 : cmd[4];
3612 check_prot = true;
3613 break;
3614 case WRITE_12:
3615 ei_lba = 0;
3616 lba = get_unaligned_be32(cmd + 2);
3617 num = get_unaligned_be32(cmd + 6);
3618 check_prot = true;
3619 break;
3620 case 0x53:
3621 ei_lba = 0;
3622 lba = get_unaligned_be32(cmd + 2);
3623 num = get_unaligned_be16(cmd + 7);
3624 check_prot = false;
3625 break;
3626 default:
3627 lba = get_unaligned_be64(cmd + 12);
3628 ei_lba = get_unaligned_be32(cmd + 20);
3629 num = get_unaligned_be32(cmd + 28);
3630 check_prot = false;
3631 break;
3632 }
3633 if (unlikely(have_dif_prot && check_prot)) {
3634 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3635 (cmd[1] & 0xe0)) {
3636 mk_sense_invalid_opcode(scp);
3637 return check_condition_result;
3638 }
3639 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3640 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3641 (cmd[1] & 0xe0) == 0)
3642 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3643 "to DIF device\n");
3644 }
3645
3646 sdeb_write_lock(sip);
3647 ret = check_device_access_params(scp, lba, num, true);
3648 if (ret) {
3649 sdeb_write_unlock(sip);
3650 return ret;
3651 }
3652
3653
3654 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3655 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3656 case 1:
3657 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3658 sdeb_write_unlock(sip);
3659 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3660 return illegal_condition_result;
3661 } else if (scp->cmnd[1] >> 5 != 3) {
3662 sdeb_write_unlock(sip);
3663 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3664 return check_condition_result;
3665 }
3666 break;
3667 case 3:
3668 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3669 sdeb_write_unlock(sip);
3670 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3671 return illegal_condition_result;
3672 } else if (scp->cmnd[1] >> 5 != 3) {
3673 sdeb_write_unlock(sip);
3674 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3675 return check_condition_result;
3676 }
3677 break;
3678 }
3679 }
3680
3681 ret = do_device_access(sip, scp, 0, lba, num, true);
3682 if (unlikely(scsi_debug_lbp()))
3683 map_region(sip, lba, num);
3684
3685 if (sdebug_dev_is_zoned(devip))
3686 zbc_inc_wp(devip, lba, num);
3687 sdeb_write_unlock(sip);
3688 if (unlikely(-1 == ret))
3689 return DID_ERROR << 16;
3690 else if (unlikely(sdebug_verbose &&
3691 (ret < (num * sdebug_sector_size))))
3692 sdev_printk(KERN_INFO, scp->device,
3693 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3694 my_name, num * sdebug_sector_size, ret);
3695
3696 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3697 atomic_read(&sdeb_inject_pending))) {
3698 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3699 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3700 atomic_set(&sdeb_inject_pending, 0);
3701 return check_condition_result;
3702 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3703
3704 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3705 atomic_set(&sdeb_inject_pending, 0);
3706 return illegal_condition_result;
3707 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3708 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3709 atomic_set(&sdeb_inject_pending, 0);
3710 return illegal_condition_result;
3711 }
3712 }
3713 return 0;
3714 }
3715
3716
3717
3718
3719
3720 static int resp_write_scat(struct scsi_cmnd *scp,
3721 struct sdebug_dev_info *devip)
3722 {
3723 u8 *cmd = scp->cmnd;
3724 u8 *lrdp = NULL;
3725 u8 *up;
3726 struct sdeb_store_info *sip = devip2sip(devip, true);
3727 u8 wrprotect;
3728 u16 lbdof, num_lrd, k;
3729 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3730 u32 lb_size = sdebug_sector_size;
3731 u32 ei_lba;
3732 u64 lba;
3733 int ret, res;
3734 bool is_16;
3735 static const u32 lrd_size = 32;
3736
3737 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3738 is_16 = false;
3739 wrprotect = (cmd[10] >> 5) & 0x7;
3740 lbdof = get_unaligned_be16(cmd + 12);
3741 num_lrd = get_unaligned_be16(cmd + 16);
3742 bt_len = get_unaligned_be32(cmd + 28);
3743 } else {
3744 is_16 = true;
3745 wrprotect = (cmd[2] >> 5) & 0x7;
3746 lbdof = get_unaligned_be16(cmd + 4);
3747 num_lrd = get_unaligned_be16(cmd + 8);
3748 bt_len = get_unaligned_be32(cmd + 10);
3749 if (unlikely(have_dif_prot)) {
3750 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3751 wrprotect) {
3752 mk_sense_invalid_opcode(scp);
3753 return illegal_condition_result;
3754 }
3755 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3756 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3757 wrprotect == 0)
3758 sdev_printk(KERN_ERR, scp->device,
3759 "Unprotected WR to DIF device\n");
3760 }
3761 }
3762 if ((num_lrd == 0) || (bt_len == 0))
3763 return 0;
3764 if (lbdof == 0) {
3765 if (sdebug_verbose)
3766 sdev_printk(KERN_INFO, scp->device,
3767 "%s: %s: LB Data Offset field bad\n",
3768 my_name, __func__);
3769 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3770 return illegal_condition_result;
3771 }
3772 lbdof_blen = lbdof * lb_size;
3773 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3774 if (sdebug_verbose)
3775 sdev_printk(KERN_INFO, scp->device,
3776 "%s: %s: LBA range descriptors don't fit\n",
3777 my_name, __func__);
3778 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3779 return illegal_condition_result;
3780 }
3781 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3782 if (lrdp == NULL)
3783 return SCSI_MLQUEUE_HOST_BUSY;
3784 if (sdebug_verbose)
3785 sdev_printk(KERN_INFO, scp->device,
3786 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3787 my_name, __func__, lbdof_blen);
3788 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3789 if (res == -1) {
3790 ret = DID_ERROR << 16;
3791 goto err_out;
3792 }
3793
3794 sdeb_write_lock(sip);
3795 sg_off = lbdof_blen;
3796
3797 cum_lb = 0;
3798 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3799 lba = get_unaligned_be64(up + 0);
3800 num = get_unaligned_be32(up + 8);
3801 if (sdebug_verbose)
3802 sdev_printk(KERN_INFO, scp->device,
3803 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3804 my_name, __func__, k, lba, num, sg_off);
3805 if (num == 0)
3806 continue;
3807 ret = check_device_access_params(scp, lba, num, true);
3808 if (ret)
3809 goto err_out_unlock;
3810 num_by = num * lb_size;
3811 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3812
3813 if ((cum_lb + num) > bt_len) {
3814 if (sdebug_verbose)
3815 sdev_printk(KERN_INFO, scp->device,
3816 "%s: %s: sum of blocks > data provided\n",
3817 my_name, __func__);
3818 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3819 0);
3820 ret = illegal_condition_result;
3821 goto err_out_unlock;
3822 }
3823
3824
3825 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3826 int prot_ret = prot_verify_write(scp, lba, num,
3827 ei_lba);
3828
3829 if (prot_ret) {
3830 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3831 prot_ret);
3832 ret = illegal_condition_result;
3833 goto err_out_unlock;
3834 }
3835 }
3836
3837 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3838
3839 if (sdebug_dev_is_zoned(devip))
3840 zbc_inc_wp(devip, lba, num);
3841 if (unlikely(scsi_debug_lbp()))
3842 map_region(sip, lba, num);
3843 if (unlikely(-1 == ret)) {
3844 ret = DID_ERROR << 16;
3845 goto err_out_unlock;
3846 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3847 sdev_printk(KERN_INFO, scp->device,
3848 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3849 my_name, num_by, ret);
3850
3851 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3852 atomic_read(&sdeb_inject_pending))) {
3853 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3854 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3855 atomic_set(&sdeb_inject_pending, 0);
3856 ret = check_condition_result;
3857 goto err_out_unlock;
3858 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3859
3860 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3861 atomic_set(&sdeb_inject_pending, 0);
3862 ret = illegal_condition_result;
3863 goto err_out_unlock;
3864 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3865 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3866 atomic_set(&sdeb_inject_pending, 0);
3867 ret = illegal_condition_result;
3868 goto err_out_unlock;
3869 }
3870 }
3871 sg_off += num_by;
3872 cum_lb += num;
3873 }
3874 ret = 0;
3875 err_out_unlock:
3876 sdeb_write_unlock(sip);
3877 err_out:
3878 kfree(lrdp);
3879 return ret;
3880 }
3881
3882 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3883 u32 ei_lba, bool unmap, bool ndob)
3884 {
3885 struct scsi_device *sdp = scp->device;
3886 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3887 unsigned long long i;
3888 u64 block, lbaa;
3889 u32 lb_size = sdebug_sector_size;
3890 int ret;
3891 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3892 scp->device->hostdata, true);
3893 u8 *fs1p;
3894 u8 *fsp;
3895
3896 sdeb_write_lock(sip);
3897
3898 ret = check_device_access_params(scp, lba, num, true);
3899 if (ret) {
3900 sdeb_write_unlock(sip);
3901 return ret;
3902 }
3903
3904 if (unmap && scsi_debug_lbp()) {
3905 unmap_region(sip, lba, num);
3906 goto out;
3907 }
3908 lbaa = lba;
3909 block = do_div(lbaa, sdebug_store_sectors);
3910
3911 fsp = sip->storep;
3912 fs1p = fsp + (block * lb_size);
3913 if (ndob) {
3914 memset(fs1p, 0, lb_size);
3915 ret = 0;
3916 } else
3917 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3918
3919 if (-1 == ret) {
3920 sdeb_write_unlock(sip);
3921 return DID_ERROR << 16;
3922 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3923 sdev_printk(KERN_INFO, scp->device,
3924 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3925 my_name, "write same", lb_size, ret);
3926
3927
3928 for (i = 1 ; i < num ; i++) {
3929 lbaa = lba + i;
3930 block = do_div(lbaa, sdebug_store_sectors);
3931 memmove(fsp + (block * lb_size), fs1p, lb_size);
3932 }
3933 if (scsi_debug_lbp())
3934 map_region(sip, lba, num);
3935
3936 if (sdebug_dev_is_zoned(devip))
3937 zbc_inc_wp(devip, lba, num);
3938 out:
3939 sdeb_write_unlock(sip);
3940
3941 return 0;
3942 }
3943
3944 static int resp_write_same_10(struct scsi_cmnd *scp,
3945 struct sdebug_dev_info *devip)
3946 {
3947 u8 *cmd = scp->cmnd;
3948 u32 lba;
3949 u16 num;
3950 u32 ei_lba = 0;
3951 bool unmap = false;
3952
3953 if (cmd[1] & 0x8) {
3954 if (sdebug_lbpws10 == 0) {
3955 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3956 return check_condition_result;
3957 } else
3958 unmap = true;
3959 }
3960 lba = get_unaligned_be32(cmd + 2);
3961 num = get_unaligned_be16(cmd + 7);
3962 if (num > sdebug_write_same_length) {
3963 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3964 return check_condition_result;
3965 }
3966 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3967 }
3968
3969 static int resp_write_same_16(struct scsi_cmnd *scp,
3970 struct sdebug_dev_info *devip)
3971 {
3972 u8 *cmd = scp->cmnd;
3973 u64 lba;
3974 u32 num;
3975 u32 ei_lba = 0;
3976 bool unmap = false;
3977 bool ndob = false;
3978
3979 if (cmd[1] & 0x8) {
3980 if (sdebug_lbpws == 0) {
3981 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3982 return check_condition_result;
3983 } else
3984 unmap = true;
3985 }
3986 if (cmd[1] & 0x1)
3987 ndob = true;
3988 lba = get_unaligned_be64(cmd + 2);
3989 num = get_unaligned_be32(cmd + 10);
3990 if (num > sdebug_write_same_length) {
3991 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3992 return check_condition_result;
3993 }
3994 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3995 }
3996
3997
3998
3999
4000 static int resp_write_buffer(struct scsi_cmnd *scp,
4001 struct sdebug_dev_info *devip)
4002 {
4003 u8 *cmd = scp->cmnd;
4004 struct scsi_device *sdp = scp->device;
4005 struct sdebug_dev_info *dp;
4006 u8 mode;
4007
4008 mode = cmd[1] & 0x1f;
4009 switch (mode) {
4010 case 0x4:
4011
4012 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4013 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4014 break;
4015 case 0x5:
4016 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4017 break;
4018 case 0x6:
4019
4020 list_for_each_entry(dp,
4021 &devip->sdbg_host->dev_info_list,
4022 dev_list)
4023 if (dp->target == sdp->id) {
4024 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4025 if (devip != dp)
4026 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4027 dp->uas_bm);
4028 }
4029 break;
4030 case 0x7:
4031
4032 list_for_each_entry(dp,
4033 &devip->sdbg_host->dev_info_list,
4034 dev_list)
4035 if (dp->target == sdp->id)
4036 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4037 dp->uas_bm);
4038 break;
4039 default:
4040
4041 break;
4042 }
4043 return 0;
4044 }
4045
4046 static int resp_comp_write(struct scsi_cmnd *scp,
4047 struct sdebug_dev_info *devip)
4048 {
4049 u8 *cmd = scp->cmnd;
4050 u8 *arr;
4051 struct sdeb_store_info *sip = devip2sip(devip, true);
4052 u64 lba;
4053 u32 dnum;
4054 u32 lb_size = sdebug_sector_size;
4055 u8 num;
4056 int ret;
4057 int retval = 0;
4058
4059 lba = get_unaligned_be64(cmd + 2);
4060 num = cmd[13];
4061 if (0 == num)
4062 return 0;
4063 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4064 (cmd[1] & 0xe0)) {
4065 mk_sense_invalid_opcode(scp);
4066 return check_condition_result;
4067 }
4068 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4069 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4070 (cmd[1] & 0xe0) == 0)
4071 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4072 "to DIF device\n");
4073 ret = check_device_access_params(scp, lba, num, false);
4074 if (ret)
4075 return ret;
4076 dnum = 2 * num;
4077 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4078 if (NULL == arr) {
4079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4080 INSUFF_RES_ASCQ);
4081 return check_condition_result;
4082 }
4083
4084 sdeb_write_lock(sip);
4085
4086 ret = do_dout_fetch(scp, dnum, arr);
4087 if (ret == -1) {
4088 retval = DID_ERROR << 16;
4089 goto cleanup;
4090 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4091 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4092 "indicated=%u, IO sent=%d bytes\n", my_name,
4093 dnum * lb_size, ret);
4094 if (!comp_write_worker(sip, lba, num, arr, false)) {
4095 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4096 retval = check_condition_result;
4097 goto cleanup;
4098 }
4099 if (scsi_debug_lbp())
4100 map_region(sip, lba, num);
4101 cleanup:
4102 sdeb_write_unlock(sip);
4103 kfree(arr);
4104 return retval;
4105 }
4106
4107 struct unmap_block_desc {
4108 __be64 lba;
4109 __be32 blocks;
4110 __be32 __reserved;
4111 };
4112
4113 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4114 {
4115 unsigned char *buf;
4116 struct unmap_block_desc *desc;
4117 struct sdeb_store_info *sip = devip2sip(devip, true);
4118 unsigned int i, payload_len, descriptors;
4119 int ret;
4120
4121 if (!scsi_debug_lbp())
4122 return 0;
4123 payload_len = get_unaligned_be16(scp->cmnd + 7);
4124 BUG_ON(scsi_bufflen(scp) != payload_len);
4125
4126 descriptors = (payload_len - 8) / 16;
4127 if (descriptors > sdebug_unmap_max_desc) {
4128 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4129 return check_condition_result;
4130 }
4131
4132 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4133 if (!buf) {
4134 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4135 INSUFF_RES_ASCQ);
4136 return check_condition_result;
4137 }
4138
4139 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4140
4141 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4142 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4143
4144 desc = (void *)&buf[8];
4145
4146 sdeb_write_lock(sip);
4147
4148 for (i = 0 ; i < descriptors ; i++) {
4149 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4150 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4151
4152 ret = check_device_access_params(scp, lba, num, true);
4153 if (ret)
4154 goto out;
4155
4156 unmap_region(sip, lba, num);
4157 }
4158
4159 ret = 0;
4160
4161 out:
4162 sdeb_write_unlock(sip);
4163 kfree(buf);
4164
4165 return ret;
4166 }
4167
4168 #define SDEBUG_GET_LBA_STATUS_LEN 32
4169
4170 static int resp_get_lba_status(struct scsi_cmnd *scp,
4171 struct sdebug_dev_info *devip)
4172 {
4173 u8 *cmd = scp->cmnd;
4174 u64 lba;
4175 u32 alloc_len, mapped, num;
4176 int ret;
4177 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4178
4179 lba = get_unaligned_be64(cmd + 2);
4180 alloc_len = get_unaligned_be32(cmd + 10);
4181
4182 if (alloc_len < 24)
4183 return 0;
4184
4185 ret = check_device_access_params(scp, lba, 1, false);
4186 if (ret)
4187 return ret;
4188
4189 if (scsi_debug_lbp()) {
4190 struct sdeb_store_info *sip = devip2sip(devip, true);
4191
4192 mapped = map_state(sip, lba, &num);
4193 } else {
4194 mapped = 1;
4195
4196 sdebug_capacity = get_sdebug_capacity();
4197 if (sdebug_capacity - lba <= 0xffffffff)
4198 num = sdebug_capacity - lba;
4199 else
4200 num = 0xffffffff;
4201 }
4202
4203 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4204 put_unaligned_be32(20, arr);
4205 put_unaligned_be64(lba, arr + 8);
4206 put_unaligned_be32(num, arr + 16);
4207 arr[20] = !mapped;
4208
4209 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4210 }
4211
4212 static int resp_sync_cache(struct scsi_cmnd *scp,
4213 struct sdebug_dev_info *devip)
4214 {
4215 int res = 0;
4216 u64 lba;
4217 u32 num_blocks;
4218 u8 *cmd = scp->cmnd;
4219
4220 if (cmd[0] == SYNCHRONIZE_CACHE) {
4221 lba = get_unaligned_be32(cmd + 2);
4222 num_blocks = get_unaligned_be16(cmd + 7);
4223 } else {
4224 lba = get_unaligned_be64(cmd + 2);
4225 num_blocks = get_unaligned_be32(cmd + 10);
4226 }
4227 if (lba + num_blocks > sdebug_capacity) {
4228 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4229 return check_condition_result;
4230 }
4231 if (!write_since_sync || (cmd[1] & 0x2))
4232 res = SDEG_RES_IMMED_MASK;
4233 else
4234 write_since_sync = false;
4235 return res;
4236 }
4237
4238
4239
4240
4241
4242
4243
4244
4245 static int resp_pre_fetch(struct scsi_cmnd *scp,
4246 struct sdebug_dev_info *devip)
4247 {
4248 int res = 0;
4249 u64 lba;
4250 u64 block, rest = 0;
4251 u32 nblks;
4252 u8 *cmd = scp->cmnd;
4253 struct sdeb_store_info *sip = devip2sip(devip, true);
4254 u8 *fsp = sip->storep;
4255
4256 if (cmd[0] == PRE_FETCH) {
4257 lba = get_unaligned_be32(cmd + 2);
4258 nblks = get_unaligned_be16(cmd + 7);
4259 } else {
4260 lba = get_unaligned_be64(cmd + 2);
4261 nblks = get_unaligned_be32(cmd + 10);
4262 }
4263 if (lba + nblks > sdebug_capacity) {
4264 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4265 return check_condition_result;
4266 }
4267 if (!fsp)
4268 goto fini;
4269
4270 block = do_div(lba, sdebug_store_sectors);
4271 if (block + nblks > sdebug_store_sectors)
4272 rest = block + nblks - sdebug_store_sectors;
4273
4274
4275 sdeb_read_lock(sip);
4276 prefetch_range(fsp + (sdebug_sector_size * block),
4277 (nblks - rest) * sdebug_sector_size);
4278 if (rest)
4279 prefetch_range(fsp, rest * sdebug_sector_size);
4280 sdeb_read_unlock(sip);
4281 fini:
4282 if (cmd[1] & 0x2)
4283 res = SDEG_RES_IMMED_MASK;
4284 return res | condition_met_result;
4285 }
4286
4287 #define RL_BUCKET_ELEMS 8
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297 static int resp_report_luns(struct scsi_cmnd *scp,
4298 struct sdebug_dev_info *devip)
4299 {
4300 unsigned char *cmd = scp->cmnd;
4301 unsigned int alloc_len;
4302 unsigned char select_report;
4303 u64 lun;
4304 struct scsi_lun *lun_p;
4305 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4306 unsigned int lun_cnt;
4307 unsigned int wlun_cnt;
4308 unsigned int tlun_cnt;
4309 unsigned int rlen;
4310 int k, j, n, res;
4311 unsigned int off_rsp = 0;
4312 const int sz_lun = sizeof(struct scsi_lun);
4313
4314 clear_luns_changed_on_target(devip);
4315
4316 select_report = cmd[2];
4317 alloc_len = get_unaligned_be32(cmd + 6);
4318
4319 if (alloc_len < 4) {
4320 pr_err("alloc len too small %d\n", alloc_len);
4321 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4322 return check_condition_result;
4323 }
4324
4325 switch (select_report) {
4326 case 0:
4327 lun_cnt = sdebug_max_luns;
4328 wlun_cnt = 0;
4329 break;
4330 case 1:
4331 lun_cnt = 0;
4332 wlun_cnt = 1;
4333 break;
4334 case 2:
4335 lun_cnt = sdebug_max_luns;
4336 wlun_cnt = 1;
4337 break;
4338 case 0x10:
4339 case 0x11:
4340 case 0x12:
4341 default:
4342 pr_debug("select report invalid %d\n", select_report);
4343 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4344 return check_condition_result;
4345 }
4346
4347 if (sdebug_no_lun_0 && (lun_cnt > 0))
4348 --lun_cnt;
4349
4350 tlun_cnt = lun_cnt + wlun_cnt;
4351 rlen = tlun_cnt * sz_lun;
4352 scsi_set_resid(scp, scsi_bufflen(scp));
4353 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4354 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4355
4356
4357 lun = sdebug_no_lun_0 ? 1 : 0;
4358 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4359 memset(arr, 0, sizeof(arr));
4360 lun_p = (struct scsi_lun *)&arr[0];
4361 if (k == 0) {
4362 put_unaligned_be32(rlen, &arr[0]);
4363 ++lun_p;
4364 j = 1;
4365 }
4366 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4367 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4368 break;
4369 int_to_scsilun(lun++, lun_p);
4370 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4371 lun_p->scsi_lun[0] |= 0x40;
4372 }
4373 if (j < RL_BUCKET_ELEMS)
4374 break;
4375 n = j * sz_lun;
4376 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4377 if (res)
4378 return res;
4379 off_rsp += n;
4380 }
4381 if (wlun_cnt) {
4382 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4383 ++j;
4384 }
4385 if (j > 0)
4386 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4387 return res;
4388 }
4389
4390 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4391 {
4392 bool is_bytchk3 = false;
4393 u8 bytchk;
4394 int ret, j;
4395 u32 vnum, a_num, off;
4396 const u32 lb_size = sdebug_sector_size;
4397 u64 lba;
4398 u8 *arr;
4399 u8 *cmd = scp->cmnd;
4400 struct sdeb_store_info *sip = devip2sip(devip, true);
4401
4402 bytchk = (cmd[1] >> 1) & 0x3;
4403 if (bytchk == 0) {
4404 return 0;
4405 } else if (bytchk == 2) {
4406 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4407 return check_condition_result;
4408 } else if (bytchk == 3) {
4409 is_bytchk3 = true;
4410 }
4411 switch (cmd[0]) {
4412 case VERIFY_16:
4413 lba = get_unaligned_be64(cmd + 2);
4414 vnum = get_unaligned_be32(cmd + 10);
4415 break;
4416 case VERIFY:
4417 lba = get_unaligned_be32(cmd + 2);
4418 vnum = get_unaligned_be16(cmd + 7);
4419 break;
4420 default:
4421 mk_sense_invalid_opcode(scp);
4422 return check_condition_result;
4423 }
4424 if (vnum == 0)
4425 return 0;
4426 a_num = is_bytchk3 ? 1 : vnum;
4427
4428 ret = check_device_access_params(scp, lba, a_num, false);
4429 if (ret)
4430 return ret;
4431
4432 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4433 if (!arr) {
4434 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4435 INSUFF_RES_ASCQ);
4436 return check_condition_result;
4437 }
4438
4439 sdeb_read_lock(sip);
4440
4441 ret = do_dout_fetch(scp, a_num, arr);
4442 if (ret == -1) {
4443 ret = DID_ERROR << 16;
4444 goto cleanup;
4445 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4446 sdev_printk(KERN_INFO, scp->device,
4447 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4448 my_name, __func__, a_num * lb_size, ret);
4449 }
4450 if (is_bytchk3) {
4451 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4452 memcpy(arr + off, arr, lb_size);
4453 }
4454 ret = 0;
4455 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4456 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4457 ret = check_condition_result;
4458 goto cleanup;
4459 }
4460 cleanup:
4461 sdeb_read_unlock(sip);
4462 kfree(arr);
4463 return ret;
4464 }
4465
4466 #define RZONES_DESC_HD 64
4467
4468
4469 static int resp_report_zones(struct scsi_cmnd *scp,
4470 struct sdebug_dev_info *devip)
4471 {
4472 unsigned int rep_max_zones, nrz = 0;
4473 int ret = 0;
4474 u32 alloc_len, rep_opts, rep_len;
4475 bool partial;
4476 u64 lba, zs_lba;
4477 u8 *arr = NULL, *desc;
4478 u8 *cmd = scp->cmnd;
4479 struct sdeb_zone_state *zsp = NULL;
4480 struct sdeb_store_info *sip = devip2sip(devip, false);
4481
4482 if (!sdebug_dev_is_zoned(devip)) {
4483 mk_sense_invalid_opcode(scp);
4484 return check_condition_result;
4485 }
4486 zs_lba = get_unaligned_be64(cmd + 2);
4487 alloc_len = get_unaligned_be32(cmd + 10);
4488 if (alloc_len == 0)
4489 return 0;
4490 rep_opts = cmd[14] & 0x3f;
4491 partial = cmd[14] & 0x80;
4492
4493 if (zs_lba >= sdebug_capacity) {
4494 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4495 return check_condition_result;
4496 }
4497
4498 rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4499
4500 arr = kzalloc(alloc_len, GFP_ATOMIC);
4501 if (!arr) {
4502 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4503 INSUFF_RES_ASCQ);
4504 return check_condition_result;
4505 }
4506
4507 sdeb_read_lock(sip);
4508
4509 desc = arr + 64;
4510 for (lba = zs_lba; lba < sdebug_capacity;
4511 lba = zsp->z_start + zsp->z_size) {
4512 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4513 break;
4514 zsp = zbc_zone(devip, lba);
4515 switch (rep_opts) {
4516 case 0x00:
4517
4518 break;
4519 case 0x01:
4520
4521 if (zsp->z_cond != ZC1_EMPTY)
4522 continue;
4523 break;
4524 case 0x02:
4525
4526 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4527 continue;
4528 break;
4529 case 0x03:
4530
4531 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4532 continue;
4533 break;
4534 case 0x04:
4535
4536 if (zsp->z_cond != ZC4_CLOSED)
4537 continue;
4538 break;
4539 case 0x05:
4540
4541 if (zsp->z_cond != ZC5_FULL)
4542 continue;
4543 break;
4544 case 0x06:
4545 case 0x07:
4546 case 0x10:
4547
4548
4549
4550
4551 continue;
4552 case 0x11:
4553
4554 if (!zsp->z_non_seq_resource)
4555 continue;
4556 break;
4557 case 0x3e:
4558
4559 if (zbc_zone_is_gap(zsp))
4560 continue;
4561 break;
4562 case 0x3f:
4563
4564 if (zbc_zone_is_seq(zsp))
4565 continue;
4566 break;
4567 default:
4568 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4569 INVALID_FIELD_IN_CDB, 0);
4570 ret = check_condition_result;
4571 goto fini;
4572 }
4573
4574 if (nrz < rep_max_zones) {
4575
4576 desc[0] = zsp->z_type;
4577 desc[1] = zsp->z_cond << 4;
4578 if (zsp->z_non_seq_resource)
4579 desc[1] |= 1 << 1;
4580 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4581 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4582 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4583 desc += 64;
4584 }
4585
4586 if (partial && nrz >= rep_max_zones)
4587 break;
4588
4589 nrz++;
4590 }
4591
4592
4593
4594 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4595
4596 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4597
4598 if (devip->zcap < devip->zsize)
4599 put_unaligned_be64(devip->zsize, arr + 16);
4600
4601 rep_len = (unsigned long)desc - (unsigned long)arr;
4602 ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
4603
4604 fini:
4605 sdeb_read_unlock(sip);
4606 kfree(arr);
4607 return ret;
4608 }
4609
4610
4611 static void zbc_open_all(struct sdebug_dev_info *devip)
4612 {
4613 struct sdeb_zone_state *zsp = &devip->zstate[0];
4614 unsigned int i;
4615
4616 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4617 if (zsp->z_cond == ZC4_CLOSED)
4618 zbc_open_zone(devip, &devip->zstate[i], true);
4619 }
4620 }
4621
4622 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4623 {
4624 int res = 0;
4625 u64 z_id;
4626 enum sdebug_z_cond zc;
4627 u8 *cmd = scp->cmnd;
4628 struct sdeb_zone_state *zsp;
4629 bool all = cmd[14] & 0x01;
4630 struct sdeb_store_info *sip = devip2sip(devip, false);
4631
4632 if (!sdebug_dev_is_zoned(devip)) {
4633 mk_sense_invalid_opcode(scp);
4634 return check_condition_result;
4635 }
4636
4637 sdeb_write_lock(sip);
4638
4639 if (all) {
4640
4641 if (devip->max_open &&
4642 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4643 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4644 INSUFF_ZONE_ASCQ);
4645 res = check_condition_result;
4646 goto fini;
4647 }
4648
4649 zbc_open_all(devip);
4650 goto fini;
4651 }
4652
4653
4654 z_id = get_unaligned_be64(cmd + 2);
4655 if (z_id >= sdebug_capacity) {
4656 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4657 res = check_condition_result;
4658 goto fini;
4659 }
4660
4661 zsp = zbc_zone(devip, z_id);
4662 if (z_id != zsp->z_start) {
4663 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4664 res = check_condition_result;
4665 goto fini;
4666 }
4667 if (zbc_zone_is_conv(zsp)) {
4668 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4669 res = check_condition_result;
4670 goto fini;
4671 }
4672
4673 zc = zsp->z_cond;
4674 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4675 goto fini;
4676
4677 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4678 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4679 INSUFF_ZONE_ASCQ);
4680 res = check_condition_result;
4681 goto fini;
4682 }
4683
4684 zbc_open_zone(devip, zsp, true);
4685 fini:
4686 sdeb_write_unlock(sip);
4687 return res;
4688 }
4689
4690 static void zbc_close_all(struct sdebug_dev_info *devip)
4691 {
4692 unsigned int i;
4693
4694 for (i = 0; i < devip->nr_zones; i++)
4695 zbc_close_zone(devip, &devip->zstate[i]);
4696 }
4697
4698 static int resp_close_zone(struct scsi_cmnd *scp,
4699 struct sdebug_dev_info *devip)
4700 {
4701 int res = 0;
4702 u64 z_id;
4703 u8 *cmd = scp->cmnd;
4704 struct sdeb_zone_state *zsp;
4705 bool all = cmd[14] & 0x01;
4706 struct sdeb_store_info *sip = devip2sip(devip, false);
4707
4708 if (!sdebug_dev_is_zoned(devip)) {
4709 mk_sense_invalid_opcode(scp);
4710 return check_condition_result;
4711 }
4712
4713 sdeb_write_lock(sip);
4714
4715 if (all) {
4716 zbc_close_all(devip);
4717 goto fini;
4718 }
4719
4720
4721 z_id = get_unaligned_be64(cmd + 2);
4722 if (z_id >= sdebug_capacity) {
4723 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4724 res = check_condition_result;
4725 goto fini;
4726 }
4727
4728 zsp = zbc_zone(devip, z_id);
4729 if (z_id != zsp->z_start) {
4730 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4731 res = check_condition_result;
4732 goto fini;
4733 }
4734 if (zbc_zone_is_conv(zsp)) {
4735 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4736 res = check_condition_result;
4737 goto fini;
4738 }
4739
4740 zbc_close_zone(devip, zsp);
4741 fini:
4742 sdeb_write_unlock(sip);
4743 return res;
4744 }
4745
4746 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4747 struct sdeb_zone_state *zsp, bool empty)
4748 {
4749 enum sdebug_z_cond zc = zsp->z_cond;
4750
4751 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4752 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4753 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4754 zbc_close_zone(devip, zsp);
4755 if (zsp->z_cond == ZC4_CLOSED)
4756 devip->nr_closed--;
4757 zsp->z_wp = zsp->z_start + zsp->z_size;
4758 zsp->z_cond = ZC5_FULL;
4759 }
4760 }
4761
4762 static void zbc_finish_all(struct sdebug_dev_info *devip)
4763 {
4764 unsigned int i;
4765
4766 for (i = 0; i < devip->nr_zones; i++)
4767 zbc_finish_zone(devip, &devip->zstate[i], false);
4768 }
4769
4770 static int resp_finish_zone(struct scsi_cmnd *scp,
4771 struct sdebug_dev_info *devip)
4772 {
4773 struct sdeb_zone_state *zsp;
4774 int res = 0;
4775 u64 z_id;
4776 u8 *cmd = scp->cmnd;
4777 bool all = cmd[14] & 0x01;
4778 struct sdeb_store_info *sip = devip2sip(devip, false);
4779
4780 if (!sdebug_dev_is_zoned(devip)) {
4781 mk_sense_invalid_opcode(scp);
4782 return check_condition_result;
4783 }
4784
4785 sdeb_write_lock(sip);
4786
4787 if (all) {
4788 zbc_finish_all(devip);
4789 goto fini;
4790 }
4791
4792
4793 z_id = get_unaligned_be64(cmd + 2);
4794 if (z_id >= sdebug_capacity) {
4795 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4796 res = check_condition_result;
4797 goto fini;
4798 }
4799
4800 zsp = zbc_zone(devip, z_id);
4801 if (z_id != zsp->z_start) {
4802 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4803 res = check_condition_result;
4804 goto fini;
4805 }
4806 if (zbc_zone_is_conv(zsp)) {
4807 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4808 res = check_condition_result;
4809 goto fini;
4810 }
4811
4812 zbc_finish_zone(devip, zsp, true);
4813 fini:
4814 sdeb_write_unlock(sip);
4815 return res;
4816 }
4817
4818 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4819 struct sdeb_zone_state *zsp)
4820 {
4821 enum sdebug_z_cond zc;
4822 struct sdeb_store_info *sip = devip2sip(devip, false);
4823
4824 if (!zbc_zone_is_seq(zsp))
4825 return;
4826
4827 zc = zsp->z_cond;
4828 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4829 zbc_close_zone(devip, zsp);
4830
4831 if (zsp->z_cond == ZC4_CLOSED)
4832 devip->nr_closed--;
4833
4834 if (zsp->z_wp > zsp->z_start)
4835 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
4836 (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
4837
4838 zsp->z_non_seq_resource = false;
4839 zsp->z_wp = zsp->z_start;
4840 zsp->z_cond = ZC1_EMPTY;
4841 }
4842
4843 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4844 {
4845 unsigned int i;
4846
4847 for (i = 0; i < devip->nr_zones; i++)
4848 zbc_rwp_zone(devip, &devip->zstate[i]);
4849 }
4850
4851 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4852 {
4853 struct sdeb_zone_state *zsp;
4854 int res = 0;
4855 u64 z_id;
4856 u8 *cmd = scp->cmnd;
4857 bool all = cmd[14] & 0x01;
4858 struct sdeb_store_info *sip = devip2sip(devip, false);
4859
4860 if (!sdebug_dev_is_zoned(devip)) {
4861 mk_sense_invalid_opcode(scp);
4862 return check_condition_result;
4863 }
4864
4865 sdeb_write_lock(sip);
4866
4867 if (all) {
4868 zbc_rwp_all(devip);
4869 goto fini;
4870 }
4871
4872 z_id = get_unaligned_be64(cmd + 2);
4873 if (z_id >= sdebug_capacity) {
4874 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4875 res = check_condition_result;
4876 goto fini;
4877 }
4878
4879 zsp = zbc_zone(devip, z_id);
4880 if (z_id != zsp->z_start) {
4881 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4882 res = check_condition_result;
4883 goto fini;
4884 }
4885 if (zbc_zone_is_conv(zsp)) {
4886 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4887 res = check_condition_result;
4888 goto fini;
4889 }
4890
4891 zbc_rwp_zone(devip, zsp);
4892 fini:
4893 sdeb_write_unlock(sip);
4894 return res;
4895 }
4896
4897 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4898 {
4899 u16 hwq;
4900 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4901
4902 hwq = blk_mq_unique_tag_to_hwq(tag);
4903
4904 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4905 if (WARN_ON_ONCE(hwq >= submit_queues))
4906 hwq = 0;
4907
4908 return sdebug_q_arr + hwq;
4909 }
4910
4911 static u32 get_tag(struct scsi_cmnd *cmnd)
4912 {
4913 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4914 }
4915
4916
4917 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4918 {
4919 bool aborted = sd_dp->aborted;
4920 int qc_idx;
4921 int retiring = 0;
4922 unsigned long iflags;
4923 struct sdebug_queue *sqp;
4924 struct sdebug_queued_cmd *sqcp;
4925 struct scsi_cmnd *scp;
4926 struct sdebug_dev_info *devip;
4927
4928 if (unlikely(aborted))
4929 sd_dp->aborted = false;
4930 qc_idx = sd_dp->qc_idx;
4931 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4932 if (sdebug_statistics) {
4933 atomic_inc(&sdebug_completions);
4934 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4935 atomic_inc(&sdebug_miss_cpus);
4936 }
4937 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4938 pr_err("wild qc_idx=%d\n", qc_idx);
4939 return;
4940 }
4941 spin_lock_irqsave(&sqp->qc_lock, iflags);
4942 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
4943 sqcp = &sqp->qc_arr[qc_idx];
4944 scp = sqcp->a_cmnd;
4945 if (unlikely(scp == NULL)) {
4946 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4947 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4948 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4949 return;
4950 }
4951 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4952 if (likely(devip))
4953 atomic_dec(&devip->num_in_q);
4954 else
4955 pr_err("devip=NULL\n");
4956 if (unlikely(atomic_read(&retired_max_queue) > 0))
4957 retiring = 1;
4958
4959 sqcp->a_cmnd = NULL;
4960 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4961 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4962 pr_err("Unexpected completion\n");
4963 return;
4964 }
4965
4966 if (unlikely(retiring)) {
4967 int k, retval;
4968
4969 retval = atomic_read(&retired_max_queue);
4970 if (qc_idx >= retval) {
4971 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4972 pr_err("index %d too large\n", retval);
4973 return;
4974 }
4975 k = find_last_bit(sqp->in_use_bm, retval);
4976 if ((k < sdebug_max_queue) || (k == retval))
4977 atomic_set(&retired_max_queue, 0);
4978 else
4979 atomic_set(&retired_max_queue, k + 1);
4980 }
4981 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4982 if (unlikely(aborted)) {
4983 if (sdebug_verbose)
4984 pr_info("bypassing scsi_done() due to aborted cmd\n");
4985 return;
4986 }
4987 scsi_done(scp);
4988 }
4989
4990
4991 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4992 {
4993 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4994 hrt);
4995 sdebug_q_cmd_complete(sd_dp);
4996 return HRTIMER_NORESTART;
4997 }
4998
4999
5000 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5001 {
5002 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5003 ew.work);
5004 sdebug_q_cmd_complete(sd_dp);
5005 }
5006
5007 static bool got_shared_uuid;
5008 static uuid_t shared_uuid;
5009
5010 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5011 {
5012 struct sdeb_zone_state *zsp;
5013 sector_t capacity = get_sdebug_capacity();
5014 sector_t conv_capacity;
5015 sector_t zstart = 0;
5016 unsigned int i;
5017
5018
5019
5020
5021
5022
5023
5024 if (!sdeb_zbc_zone_size_mb) {
5025 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5026 >> ilog2(sdebug_sector_size);
5027 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5028 devip->zsize >>= 1;
5029 if (devip->zsize < 2) {
5030 pr_err("Device capacity too small\n");
5031 return -EINVAL;
5032 }
5033 } else {
5034 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5035 pr_err("Zone size is not a power of 2\n");
5036 return -EINVAL;
5037 }
5038 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5039 >> ilog2(sdebug_sector_size);
5040 if (devip->zsize >= capacity) {
5041 pr_err("Zone size too large for device capacity\n");
5042 return -EINVAL;
5043 }
5044 }
5045
5046 devip->zsize_shift = ilog2(devip->zsize);
5047 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5048
5049 if (sdeb_zbc_zone_cap_mb == 0) {
5050 devip->zcap = devip->zsize;
5051 } else {
5052 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5053 ilog2(sdebug_sector_size);
5054 if (devip->zcap > devip->zsize) {
5055 pr_err("Zone capacity too large\n");
5056 return -EINVAL;
5057 }
5058 }
5059
5060 conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5061 if (conv_capacity >= capacity) {
5062 pr_err("Number of conventional zones too large\n");
5063 return -EINVAL;
5064 }
5065 devip->nr_conv_zones = sdeb_zbc_nr_conv;
5066 devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5067 devip->zsize_shift;
5068 devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5069
5070
5071 if (devip->zcap < devip->zsize)
5072 devip->nr_zones += devip->nr_seq_zones;
5073
5074 if (devip->zmodel == BLK_ZONED_HM) {
5075
5076 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5077 devip->max_open = (devip->nr_zones - 1) / 2;
5078 else
5079 devip->max_open = sdeb_zbc_max_open;
5080 }
5081
5082 devip->zstate = kcalloc(devip->nr_zones,
5083 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5084 if (!devip->zstate)
5085 return -ENOMEM;
5086
5087 for (i = 0; i < devip->nr_zones; i++) {
5088 zsp = &devip->zstate[i];
5089
5090 zsp->z_start = zstart;
5091
5092 if (i < devip->nr_conv_zones) {
5093 zsp->z_type = ZBC_ZTYPE_CNV;
5094 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5095 zsp->z_wp = (sector_t)-1;
5096 zsp->z_size =
5097 min_t(u64, devip->zsize, capacity - zstart);
5098 } else if ((zstart & (devip->zsize - 1)) == 0) {
5099 if (devip->zmodel == BLK_ZONED_HM)
5100 zsp->z_type = ZBC_ZTYPE_SWR;
5101 else
5102 zsp->z_type = ZBC_ZTYPE_SWP;
5103 zsp->z_cond = ZC1_EMPTY;
5104 zsp->z_wp = zsp->z_start;
5105 zsp->z_size =
5106 min_t(u64, devip->zcap, capacity - zstart);
5107 } else {
5108 zsp->z_type = ZBC_ZTYPE_GAP;
5109 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5110 zsp->z_wp = (sector_t)-1;
5111 zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5112 capacity - zstart);
5113 }
5114
5115 WARN_ON_ONCE((int)zsp->z_size <= 0);
5116 zstart += zsp->z_size;
5117 }
5118
5119 return 0;
5120 }
5121
5122 static struct sdebug_dev_info *sdebug_device_create(
5123 struct sdebug_host_info *sdbg_host, gfp_t flags)
5124 {
5125 struct sdebug_dev_info *devip;
5126
5127 devip = kzalloc(sizeof(*devip), flags);
5128 if (devip) {
5129 if (sdebug_uuid_ctl == 1)
5130 uuid_gen(&devip->lu_name);
5131 else if (sdebug_uuid_ctl == 2) {
5132 if (got_shared_uuid)
5133 devip->lu_name = shared_uuid;
5134 else {
5135 uuid_gen(&shared_uuid);
5136 got_shared_uuid = true;
5137 devip->lu_name = shared_uuid;
5138 }
5139 }
5140 devip->sdbg_host = sdbg_host;
5141 if (sdeb_zbc_in_use) {
5142 devip->zmodel = sdeb_zbc_model;
5143 if (sdebug_device_create_zones(devip)) {
5144 kfree(devip);
5145 return NULL;
5146 }
5147 } else {
5148 devip->zmodel = BLK_ZONED_NONE;
5149 }
5150 devip->sdbg_host = sdbg_host;
5151 devip->create_ts = ktime_get_boottime();
5152 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5153 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5154 }
5155 return devip;
5156 }
5157
5158 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5159 {
5160 struct sdebug_host_info *sdbg_host;
5161 struct sdebug_dev_info *open_devip = NULL;
5162 struct sdebug_dev_info *devip;
5163
5164 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
5165 if (!sdbg_host) {
5166 pr_err("Host info NULL\n");
5167 return NULL;
5168 }
5169
5170 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5171 if ((devip->used) && (devip->channel == sdev->channel) &&
5172 (devip->target == sdev->id) &&
5173 (devip->lun == sdev->lun))
5174 return devip;
5175 else {
5176 if ((!devip->used) && (!open_devip))
5177 open_devip = devip;
5178 }
5179 }
5180 if (!open_devip) {
5181 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5182 if (!open_devip) {
5183 pr_err("out of memory at line %d\n", __LINE__);
5184 return NULL;
5185 }
5186 }
5187
5188 open_devip->channel = sdev->channel;
5189 open_devip->target = sdev->id;
5190 open_devip->lun = sdev->lun;
5191 open_devip->sdbg_host = sdbg_host;
5192 atomic_set(&open_devip->num_in_q, 0);
5193 set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5194 open_devip->used = true;
5195 return open_devip;
5196 }
5197
5198 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5199 {
5200 if (sdebug_verbose)
5201 pr_info("slave_alloc <%u %u %u %llu>\n",
5202 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5203 return 0;
5204 }
5205
5206 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5207 {
5208 struct sdebug_dev_info *devip =
5209 (struct sdebug_dev_info *)sdp->hostdata;
5210
5211 if (sdebug_verbose)
5212 pr_info("slave_configure <%u %u %u %llu>\n",
5213 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5214 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5215 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5216 if (devip == NULL) {
5217 devip = find_build_dev_info(sdp);
5218 if (devip == NULL)
5219 return 1;
5220 }
5221 sdp->hostdata = devip;
5222 if (sdebug_no_uld)
5223 sdp->no_uld_attach = 1;
5224 config_cdb_len(sdp);
5225 return 0;
5226 }
5227
5228 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5229 {
5230 struct sdebug_dev_info *devip =
5231 (struct sdebug_dev_info *)sdp->hostdata;
5232
5233 if (sdebug_verbose)
5234 pr_info("slave_destroy <%u %u %u %llu>\n",
5235 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5236 if (devip) {
5237
5238 devip->used = false;
5239 sdp->hostdata = NULL;
5240 }
5241 }
5242
5243 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5244 enum sdeb_defer_type defer_t)
5245 {
5246 if (!sd_dp)
5247 return;
5248 if (defer_t == SDEB_DEFER_HRT)
5249 hrtimer_cancel(&sd_dp->hrt);
5250 else if (defer_t == SDEB_DEFER_WQ)
5251 cancel_work_sync(&sd_dp->ew.work);
5252 }
5253
5254
5255
5256 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5257 {
5258 unsigned long iflags;
5259 int j, k, qmax, r_qmax;
5260 enum sdeb_defer_type l_defer_t;
5261 struct sdebug_queue *sqp;
5262 struct sdebug_queued_cmd *sqcp;
5263 struct sdebug_dev_info *devip;
5264 struct sdebug_defer *sd_dp;
5265
5266 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5267 spin_lock_irqsave(&sqp->qc_lock, iflags);
5268 qmax = sdebug_max_queue;
5269 r_qmax = atomic_read(&retired_max_queue);
5270 if (r_qmax > qmax)
5271 qmax = r_qmax;
5272 for (k = 0; k < qmax; ++k) {
5273 if (test_bit(k, sqp->in_use_bm)) {
5274 sqcp = &sqp->qc_arr[k];
5275 if (cmnd != sqcp->a_cmnd)
5276 continue;
5277
5278 devip = (struct sdebug_dev_info *)
5279 cmnd->device->hostdata;
5280 if (devip)
5281 atomic_dec(&devip->num_in_q);
5282 sqcp->a_cmnd = NULL;
5283 sd_dp = sqcp->sd_dp;
5284 if (sd_dp) {
5285 l_defer_t = READ_ONCE(sd_dp->defer_t);
5286 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5287 } else
5288 l_defer_t = SDEB_DEFER_NONE;
5289 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5290 stop_qc_helper(sd_dp, l_defer_t);
5291 clear_bit(k, sqp->in_use_bm);
5292 return true;
5293 }
5294 }
5295 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5296 }
5297 return false;
5298 }
5299
5300
5301 static void stop_all_queued(void)
5302 {
5303 unsigned long iflags;
5304 int j, k;
5305 enum sdeb_defer_type l_defer_t;
5306 struct sdebug_queue *sqp;
5307 struct sdebug_queued_cmd *sqcp;
5308 struct sdebug_dev_info *devip;
5309 struct sdebug_defer *sd_dp;
5310
5311 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5312 spin_lock_irqsave(&sqp->qc_lock, iflags);
5313 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5314 if (test_bit(k, sqp->in_use_bm)) {
5315 sqcp = &sqp->qc_arr[k];
5316 if (sqcp->a_cmnd == NULL)
5317 continue;
5318 devip = (struct sdebug_dev_info *)
5319 sqcp->a_cmnd->device->hostdata;
5320 if (devip)
5321 atomic_dec(&devip->num_in_q);
5322 sqcp->a_cmnd = NULL;
5323 sd_dp = sqcp->sd_dp;
5324 if (sd_dp) {
5325 l_defer_t = READ_ONCE(sd_dp->defer_t);
5326 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
5327 } else
5328 l_defer_t = SDEB_DEFER_NONE;
5329 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5330 stop_qc_helper(sd_dp, l_defer_t);
5331 clear_bit(k, sqp->in_use_bm);
5332 spin_lock_irqsave(&sqp->qc_lock, iflags);
5333 }
5334 }
5335 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5336 }
5337 }
5338
5339
5340 static void free_all_queued(void)
5341 {
5342 int j, k;
5343 struct sdebug_queue *sqp;
5344 struct sdebug_queued_cmd *sqcp;
5345
5346 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5347 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5348 sqcp = &sqp->qc_arr[k];
5349 kfree(sqcp->sd_dp);
5350 sqcp->sd_dp = NULL;
5351 }
5352 }
5353 }
5354
5355 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5356 {
5357 bool ok;
5358
5359 ++num_aborts;
5360 if (SCpnt) {
5361 ok = stop_queued_cmnd(SCpnt);
5362 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5363 sdev_printk(KERN_INFO, SCpnt->device,
5364 "%s: command%s found\n", __func__,
5365 ok ? "" : " not");
5366 }
5367 return SUCCESS;
5368 }
5369
5370 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5371 {
5372 ++num_dev_resets;
5373 if (SCpnt && SCpnt->device) {
5374 struct scsi_device *sdp = SCpnt->device;
5375 struct sdebug_dev_info *devip =
5376 (struct sdebug_dev_info *)sdp->hostdata;
5377
5378 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5379 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5380 if (devip)
5381 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5382 }
5383 return SUCCESS;
5384 }
5385
5386 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5387 {
5388 struct sdebug_host_info *sdbg_host;
5389 struct sdebug_dev_info *devip;
5390 struct scsi_device *sdp;
5391 struct Scsi_Host *hp;
5392 int k = 0;
5393
5394 ++num_target_resets;
5395 if (!SCpnt)
5396 goto lie;
5397 sdp = SCpnt->device;
5398 if (!sdp)
5399 goto lie;
5400 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5401 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5402 hp = sdp->host;
5403 if (!hp)
5404 goto lie;
5405 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5406 if (sdbg_host) {
5407 list_for_each_entry(devip,
5408 &sdbg_host->dev_info_list,
5409 dev_list)
5410 if (devip->target == sdp->id) {
5411 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5412 ++k;
5413 }
5414 }
5415 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5416 sdev_printk(KERN_INFO, sdp,
5417 "%s: %d device(s) found in target\n", __func__, k);
5418 lie:
5419 return SUCCESS;
5420 }
5421
5422 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5423 {
5424 struct sdebug_host_info *sdbg_host;
5425 struct sdebug_dev_info *devip;
5426 struct scsi_device *sdp;
5427 struct Scsi_Host *hp;
5428 int k = 0;
5429
5430 ++num_bus_resets;
5431 if (!(SCpnt && SCpnt->device))
5432 goto lie;
5433 sdp = SCpnt->device;
5434 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5435 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5436 hp = sdp->host;
5437 if (hp) {
5438 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5439 if (sdbg_host) {
5440 list_for_each_entry(devip,
5441 &sdbg_host->dev_info_list,
5442 dev_list) {
5443 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5444 ++k;
5445 }
5446 }
5447 }
5448 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5449 sdev_printk(KERN_INFO, sdp,
5450 "%s: %d device(s) found in host\n", __func__, k);
5451 lie:
5452 return SUCCESS;
5453 }
5454
5455 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5456 {
5457 struct sdebug_host_info *sdbg_host;
5458 struct sdebug_dev_info *devip;
5459 int k = 0;
5460
5461 ++num_host_resets;
5462 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5463 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5464 spin_lock(&sdebug_host_list_lock);
5465 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5466 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5467 dev_list) {
5468 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5469 ++k;
5470 }
5471 }
5472 spin_unlock(&sdebug_host_list_lock);
5473 stop_all_queued();
5474 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5475 sdev_printk(KERN_INFO, SCpnt->device,
5476 "%s: %d device(s) found\n", __func__, k);
5477 return SUCCESS;
5478 }
5479
5480 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5481 {
5482 struct msdos_partition *pp;
5483 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5484 int sectors_per_part, num_sectors, k;
5485 int heads_by_sects, start_sec, end_sec;
5486
5487
5488 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5489 return;
5490 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5491 sdebug_num_parts = SDEBUG_MAX_PARTS;
5492 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5493 }
5494 num_sectors = (int)get_sdebug_capacity();
5495 sectors_per_part = (num_sectors - sdebug_sectors_per)
5496 / sdebug_num_parts;
5497 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5498 starts[0] = sdebug_sectors_per;
5499 max_part_secs = sectors_per_part;
5500 for (k = 1; k < sdebug_num_parts; ++k) {
5501 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5502 * heads_by_sects;
5503 if (starts[k] - starts[k - 1] < max_part_secs)
5504 max_part_secs = starts[k] - starts[k - 1];
5505 }
5506 starts[sdebug_num_parts] = num_sectors;
5507 starts[sdebug_num_parts + 1] = 0;
5508
5509 ramp[510] = 0x55;
5510 ramp[511] = 0xAA;
5511 pp = (struct msdos_partition *)(ramp + 0x1be);
5512 for (k = 0; starts[k + 1]; ++k, ++pp) {
5513 start_sec = starts[k];
5514 end_sec = starts[k] + max_part_secs - 1;
5515 pp->boot_ind = 0;
5516
5517 pp->cyl = start_sec / heads_by_sects;
5518 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5519 / sdebug_sectors_per;
5520 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5521
5522 pp->end_cyl = end_sec / heads_by_sects;
5523 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5524 / sdebug_sectors_per;
5525 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5526
5527 pp->start_sect = cpu_to_le32(start_sec);
5528 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5529 pp->sys_ind = 0x83;
5530 }
5531 }
5532
5533 static void block_unblock_all_queues(bool block)
5534 {
5535 int j;
5536 struct sdebug_queue *sqp;
5537
5538 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5539 atomic_set(&sqp->blocked, (int)block);
5540 }
5541
5542
5543
5544
5545 static void tweak_cmnd_count(void)
5546 {
5547 int count, modulo;
5548
5549 modulo = abs(sdebug_every_nth);
5550 if (modulo < 2)
5551 return;
5552 block_unblock_all_queues(true);
5553 count = atomic_read(&sdebug_cmnd_count);
5554 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5555 block_unblock_all_queues(false);
5556 }
5557
5558 static void clear_queue_stats(void)
5559 {
5560 atomic_set(&sdebug_cmnd_count, 0);
5561 atomic_set(&sdebug_completions, 0);
5562 atomic_set(&sdebug_miss_cpus, 0);
5563 atomic_set(&sdebug_a_tsf, 0);
5564 }
5565
5566 static bool inject_on_this_cmd(void)
5567 {
5568 if (sdebug_every_nth == 0)
5569 return false;
5570 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5571 }
5572
5573 #define INCLUSIVE_TIMING_MAX_NS 1000000
5574
5575
5576
5577
5578
5579
5580 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5581 int scsi_result,
5582 int (*pfp)(struct scsi_cmnd *,
5583 struct sdebug_dev_info *),
5584 int delta_jiff, int ndelay)
5585 {
5586 bool new_sd_dp;
5587 bool inject = false;
5588 bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
5589 int k, num_in_q, qdepth;
5590 unsigned long iflags;
5591 u64 ns_from_boot = 0;
5592 struct sdebug_queue *sqp;
5593 struct sdebug_queued_cmd *sqcp;
5594 struct scsi_device *sdp;
5595 struct sdebug_defer *sd_dp;
5596
5597 if (unlikely(devip == NULL)) {
5598 if (scsi_result == 0)
5599 scsi_result = DID_NO_CONNECT << 16;
5600 goto respond_in_thread;
5601 }
5602 sdp = cmnd->device;
5603
5604 if (delta_jiff == 0)
5605 goto respond_in_thread;
5606
5607 sqp = get_queue(cmnd);
5608 spin_lock_irqsave(&sqp->qc_lock, iflags);
5609 if (unlikely(atomic_read(&sqp->blocked))) {
5610 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5611 return SCSI_MLQUEUE_HOST_BUSY;
5612 }
5613 num_in_q = atomic_read(&devip->num_in_q);
5614 qdepth = cmnd->device->queue_depth;
5615 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5616 if (scsi_result) {
5617 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5618 goto respond_in_thread;
5619 } else
5620 scsi_result = device_qfull_result;
5621 } else if (unlikely(sdebug_every_nth &&
5622 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5623 (scsi_result == 0))) {
5624 if ((num_in_q == (qdepth - 1)) &&
5625 (atomic_inc_return(&sdebug_a_tsf) >=
5626 abs(sdebug_every_nth))) {
5627 atomic_set(&sdebug_a_tsf, 0);
5628 inject = true;
5629 scsi_result = device_qfull_result;
5630 }
5631 }
5632
5633 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5634 if (unlikely(k >= sdebug_max_queue)) {
5635 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5636 if (scsi_result)
5637 goto respond_in_thread;
5638 scsi_result = device_qfull_result;
5639 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5640 sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
5641 __func__, sdebug_max_queue);
5642 goto respond_in_thread;
5643 }
5644 set_bit(k, sqp->in_use_bm);
5645 atomic_inc(&devip->num_in_q);
5646 sqcp = &sqp->qc_arr[k];
5647 sqcp->a_cmnd = cmnd;
5648 cmnd->host_scribble = (unsigned char *)sqcp;
5649 sd_dp = sqcp->sd_dp;
5650 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5651
5652 if (!sd_dp) {
5653 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5654 if (!sd_dp) {
5655 atomic_dec(&devip->num_in_q);
5656 clear_bit(k, sqp->in_use_bm);
5657 return SCSI_MLQUEUE_HOST_BUSY;
5658 }
5659 new_sd_dp = true;
5660 } else {
5661 new_sd_dp = false;
5662 }
5663
5664
5665 if (sdebug_host_max_queue)
5666 sd_dp->hc_idx = get_tag(cmnd);
5667
5668 if (polled)
5669 ns_from_boot = ktime_get_boottime_ns();
5670
5671
5672 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5673 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5674 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5675 delta_jiff = ndelay = 0;
5676 }
5677 if (cmnd->result == 0 && scsi_result != 0)
5678 cmnd->result = scsi_result;
5679 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5680 if (atomic_read(&sdeb_inject_pending)) {
5681 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5682 atomic_set(&sdeb_inject_pending, 0);
5683 cmnd->result = check_condition_result;
5684 }
5685 }
5686
5687 if (unlikely(sdebug_verbose && cmnd->result))
5688 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5689 __func__, cmnd->result);
5690
5691 if (delta_jiff > 0 || ndelay > 0) {
5692 ktime_t kt;
5693
5694 if (delta_jiff > 0) {
5695 u64 ns = jiffies_to_nsecs(delta_jiff);
5696
5697 if (sdebug_random && ns < U32_MAX) {
5698 ns = prandom_u32_max((u32)ns);
5699 } else if (sdebug_random) {
5700 ns >>= 12;
5701 if (ns < U32_MAX)
5702 ns = prandom_u32_max((u32)ns);
5703 ns <<= 12;
5704 }
5705 kt = ns_to_ktime(ns);
5706 } else {
5707 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5708 (u32)ndelay;
5709 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5710 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5711
5712 if (kt <= d) {
5713 spin_lock_irqsave(&sqp->qc_lock, iflags);
5714 sqcp->a_cmnd = NULL;
5715 atomic_dec(&devip->num_in_q);
5716 clear_bit(k, sqp->in_use_bm);
5717 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5718 if (new_sd_dp)
5719 kfree(sd_dp);
5720
5721 scsi_done(cmnd);
5722 return 0;
5723 }
5724
5725 kt -= d;
5726 }
5727 }
5728 if (polled) {
5729 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5730 spin_lock_irqsave(&sqp->qc_lock, iflags);
5731 if (!sd_dp->init_poll) {
5732 sd_dp->init_poll = true;
5733 sqcp->sd_dp = sd_dp;
5734 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5735 sd_dp->qc_idx = k;
5736 }
5737 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5738 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5739 } else {
5740 if (!sd_dp->init_hrt) {
5741 sd_dp->init_hrt = true;
5742 sqcp->sd_dp = sd_dp;
5743 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5744 HRTIMER_MODE_REL_PINNED);
5745 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5746 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5747 sd_dp->qc_idx = k;
5748 }
5749 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
5750
5751 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5752 }
5753 if (sdebug_statistics)
5754 sd_dp->issuing_cpu = raw_smp_processor_id();
5755 } else {
5756 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5757 atomic_read(&sdeb_inject_pending)))
5758 sd_dp->aborted = true;
5759 if (polled) {
5760 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5761 spin_lock_irqsave(&sqp->qc_lock, iflags);
5762 if (!sd_dp->init_poll) {
5763 sd_dp->init_poll = true;
5764 sqcp->sd_dp = sd_dp;
5765 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5766 sd_dp->qc_idx = k;
5767 }
5768 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
5769 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5770 } else {
5771 if (!sd_dp->init_wq) {
5772 sd_dp->init_wq = true;
5773 sqcp->sd_dp = sd_dp;
5774 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5775 sd_dp->qc_idx = k;
5776 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5777 }
5778 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
5779 schedule_work(&sd_dp->ew.work);
5780 }
5781 if (sdebug_statistics)
5782 sd_dp->issuing_cpu = raw_smp_processor_id();
5783 if (unlikely(sd_dp->aborted)) {
5784 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5785 scsi_cmd_to_rq(cmnd)->tag);
5786 blk_abort_request(scsi_cmd_to_rq(cmnd));
5787 atomic_set(&sdeb_inject_pending, 0);
5788 sd_dp->aborted = false;
5789 }
5790 }
5791 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5792 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5793 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5794 return 0;
5795
5796 respond_in_thread:
5797 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5798 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5799 if (cmnd->result == 0 && scsi_result != 0)
5800 cmnd->result = scsi_result;
5801 scsi_done(cmnd);
5802 return 0;
5803 }
5804
5805
5806
5807
5808
5809
5810
5811 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5812 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5813 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5814 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5815 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5816 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5817 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5818 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5819 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5820 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5821 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5822 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5823 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5824 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5825 module_param_string(inq_product, sdebug_inq_product_id,
5826 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5827 module_param_string(inq_rev, sdebug_inq_product_rev,
5828 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5829 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5830 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5831 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5832 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5833 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5834 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5835 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5836 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5837 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5838 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5839 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5840 S_IRUGO | S_IWUSR);
5841 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5842 S_IRUGO | S_IWUSR);
5843 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5844 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5845 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
5846 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5847 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5848 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5849 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5850 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5851 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5852 module_param_named(per_host_store, sdebug_per_host_store, bool,
5853 S_IRUGO | S_IWUSR);
5854 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5855 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5856 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5857 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5858 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5859 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5860 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5861 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5862 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5863 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5864 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5865 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5866 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5867 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5868 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5869 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5870 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5871 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5872 S_IRUGO | S_IWUSR);
5873 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5874 module_param_named(write_same_length, sdebug_write_same_length, int,
5875 S_IRUGO | S_IWUSR);
5876 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5877 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
5878 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5879 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5880 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5881
5882 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5883 MODULE_DESCRIPTION("SCSI debug adapter driver");
5884 MODULE_LICENSE("GPL");
5885 MODULE_VERSION(SDEBUG_VERSION);
5886
5887 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5888 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5889 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5890 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5891 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5892 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5893 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5894 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5895 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5896 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5897 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5898 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5899 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5900 MODULE_PARM_DESC(host_max_queue,
5901 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5902 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5903 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5904 SDEBUG_VERSION "\")");
5905 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5906 MODULE_PARM_DESC(lbprz,
5907 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5908 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5909 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5910 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5911 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5912 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5913 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5914 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5915 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5916 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5917 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5918 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5919 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
5920 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5921 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5922 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5923 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5924 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5925 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5926 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5927 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5928 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5929 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5930 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5931 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5932 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5933 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5934 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5935 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5936 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5937 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5938 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5939 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5940 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5941 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5942 MODULE_PARM_DESC(uuid_ctl,
5943 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5944 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5945 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5946 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5947 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5948 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5949 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
5950 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5951 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5952 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5953
5954 #define SDEBUG_INFO_LEN 256
5955 static char sdebug_info[SDEBUG_INFO_LEN];
5956
5957 static const char *scsi_debug_info(struct Scsi_Host *shp)
5958 {
5959 int k;
5960
5961 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5962 my_name, SDEBUG_VERSION, sdebug_version_date);
5963 if (k >= (SDEBUG_INFO_LEN - 1))
5964 return sdebug_info;
5965 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5966 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5967 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5968 "statistics", (int)sdebug_statistics);
5969 return sdebug_info;
5970 }
5971
5972
5973 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5974 int length)
5975 {
5976 char arr[16];
5977 int opts;
5978 int minLen = length > 15 ? 15 : length;
5979
5980 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5981 return -EACCES;
5982 memcpy(arr, buffer, minLen);
5983 arr[minLen] = '\0';
5984 if (1 != sscanf(arr, "%d", &opts))
5985 return -EINVAL;
5986 sdebug_opts = opts;
5987 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5988 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5989 if (sdebug_every_nth != 0)
5990 tweak_cmnd_count();
5991 return length;
5992 }
5993
5994
5995
5996
5997 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5998 {
5999 int f, j, l;
6000 struct sdebug_queue *sqp;
6001 struct sdebug_host_info *sdhp;
6002
6003 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6004 SDEBUG_VERSION, sdebug_version_date);
6005 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6006 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6007 sdebug_opts, sdebug_every_nth);
6008 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6009 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6010 sdebug_sector_size, "bytes");
6011 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6012 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6013 num_aborts);
6014 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6015 num_dev_resets, num_target_resets, num_bus_resets,
6016 num_host_resets);
6017 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6018 dix_reads, dix_writes, dif_errors);
6019 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6020 sdebug_statistics);
6021 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6022 atomic_read(&sdebug_cmnd_count),
6023 atomic_read(&sdebug_completions),
6024 "miss_cpus", atomic_read(&sdebug_miss_cpus),
6025 atomic_read(&sdebug_a_tsf),
6026 atomic_read(&sdeb_mq_poll_count));
6027
6028 seq_printf(m, "submit_queues=%d\n", submit_queues);
6029 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
6030 seq_printf(m, " queue %d:\n", j);
6031 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
6032 if (f != sdebug_max_queue) {
6033 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
6034 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
6035 "first,last bits", f, l);
6036 }
6037 }
6038
6039 seq_printf(m, "this host_no=%d\n", host->host_no);
6040 if (!xa_empty(per_store_ap)) {
6041 bool niu;
6042 int idx;
6043 unsigned long l_idx;
6044 struct sdeb_store_info *sip;
6045
6046 seq_puts(m, "\nhost list:\n");
6047 j = 0;
6048 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6049 idx = sdhp->si_idx;
6050 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
6051 sdhp->shost->host_no, idx);
6052 ++j;
6053 }
6054 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6055 sdeb_most_recent_idx);
6056 j = 0;
6057 xa_for_each(per_store_ap, l_idx, sip) {
6058 niu = xa_get_mark(per_store_ap, l_idx,
6059 SDEB_XA_NOT_IN_USE);
6060 idx = (int)l_idx;
6061 seq_printf(m, " %d: idx=%d%s\n", j, idx,
6062 (niu ? " not_in_use" : ""));
6063 ++j;
6064 }
6065 }
6066 return 0;
6067 }
6068
6069 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6070 {
6071 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6072 }
6073
6074
6075
6076 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6077 size_t count)
6078 {
6079 int jdelay, res;
6080
6081 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6082 res = count;
6083 if (sdebug_jdelay != jdelay) {
6084 int j, k;
6085 struct sdebug_queue *sqp;
6086
6087 block_unblock_all_queues(true);
6088 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6089 ++j, ++sqp) {
6090 k = find_first_bit(sqp->in_use_bm,
6091 sdebug_max_queue);
6092 if (k != sdebug_max_queue) {
6093 res = -EBUSY;
6094 break;
6095 }
6096 }
6097 if (res > 0) {
6098 sdebug_jdelay = jdelay;
6099 sdebug_ndelay = 0;
6100 }
6101 block_unblock_all_queues(false);
6102 }
6103 return res;
6104 }
6105 return -EINVAL;
6106 }
6107 static DRIVER_ATTR_RW(delay);
6108
6109 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6110 {
6111 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6112 }
6113
6114
6115 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6116 size_t count)
6117 {
6118 int ndelay, res;
6119
6120 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6121 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6122 res = count;
6123 if (sdebug_ndelay != ndelay) {
6124 int j, k;
6125 struct sdebug_queue *sqp;
6126
6127 block_unblock_all_queues(true);
6128 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6129 ++j, ++sqp) {
6130 k = find_first_bit(sqp->in_use_bm,
6131 sdebug_max_queue);
6132 if (k != sdebug_max_queue) {
6133 res = -EBUSY;
6134 break;
6135 }
6136 }
6137 if (res > 0) {
6138 sdebug_ndelay = ndelay;
6139 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
6140 : DEF_JDELAY;
6141 }
6142 block_unblock_all_queues(false);
6143 }
6144 return res;
6145 }
6146 return -EINVAL;
6147 }
6148 static DRIVER_ATTR_RW(ndelay);
6149
6150 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6151 {
6152 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6153 }
6154
6155 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6156 size_t count)
6157 {
6158 int opts;
6159 char work[20];
6160
6161 if (sscanf(buf, "%10s", work) == 1) {
6162 if (strncasecmp(work, "0x", 2) == 0) {
6163 if (kstrtoint(work + 2, 16, &opts) == 0)
6164 goto opts_done;
6165 } else {
6166 if (kstrtoint(work, 10, &opts) == 0)
6167 goto opts_done;
6168 }
6169 }
6170 return -EINVAL;
6171 opts_done:
6172 sdebug_opts = opts;
6173 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6174 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6175 tweak_cmnd_count();
6176 return count;
6177 }
6178 static DRIVER_ATTR_RW(opts);
6179
6180 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6181 {
6182 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6183 }
6184 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6185 size_t count)
6186 {
6187 int n;
6188
6189
6190 if (sdebug_ptype == TYPE_ZBC)
6191 return -EINVAL;
6192
6193 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6194 if (n == TYPE_ZBC)
6195 return -EINVAL;
6196 sdebug_ptype = n;
6197 return count;
6198 }
6199 return -EINVAL;
6200 }
6201 static DRIVER_ATTR_RW(ptype);
6202
6203 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6204 {
6205 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6206 }
6207 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6208 size_t count)
6209 {
6210 int n;
6211
6212 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6213 sdebug_dsense = n;
6214 return count;
6215 }
6216 return -EINVAL;
6217 }
6218 static DRIVER_ATTR_RW(dsense);
6219
6220 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6221 {
6222 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6223 }
6224 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6225 size_t count)
6226 {
6227 int n, idx;
6228
6229 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6230 bool want_store = (n == 0);
6231 struct sdebug_host_info *sdhp;
6232
6233 n = (n > 0);
6234 sdebug_fake_rw = (sdebug_fake_rw > 0);
6235 if (sdebug_fake_rw == n)
6236 return count;
6237
6238 if (want_store) {
6239 if (sdeb_first_idx < 0) {
6240 idx = sdebug_add_store();
6241 if (idx < 0)
6242 return idx;
6243 } else {
6244 idx = sdeb_first_idx;
6245 xa_clear_mark(per_store_ap, idx,
6246 SDEB_XA_NOT_IN_USE);
6247 }
6248
6249 list_for_each_entry(sdhp, &sdebug_host_list,
6250 host_list) {
6251 if (sdhp->si_idx != idx) {
6252 xa_set_mark(per_store_ap, sdhp->si_idx,
6253 SDEB_XA_NOT_IN_USE);
6254 sdhp->si_idx = idx;
6255 }
6256 }
6257 sdeb_most_recent_idx = idx;
6258 } else {
6259 sdebug_erase_all_stores(true );
6260 }
6261 sdebug_fake_rw = n;
6262 return count;
6263 }
6264 return -EINVAL;
6265 }
6266 static DRIVER_ATTR_RW(fake_rw);
6267
6268 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6269 {
6270 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6271 }
6272 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6273 size_t count)
6274 {
6275 int n;
6276
6277 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6278 sdebug_no_lun_0 = n;
6279 return count;
6280 }
6281 return -EINVAL;
6282 }
6283 static DRIVER_ATTR_RW(no_lun_0);
6284
6285 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6286 {
6287 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6288 }
6289 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6290 size_t count)
6291 {
6292 int n;
6293
6294 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6295 sdebug_num_tgts = n;
6296 sdebug_max_tgts_luns();
6297 return count;
6298 }
6299 return -EINVAL;
6300 }
6301 static DRIVER_ATTR_RW(num_tgts);
6302
6303 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6304 {
6305 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6306 }
6307 static DRIVER_ATTR_RO(dev_size_mb);
6308
6309 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6310 {
6311 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6312 }
6313
6314 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6315 size_t count)
6316 {
6317 bool v;
6318
6319 if (kstrtobool(buf, &v))
6320 return -EINVAL;
6321
6322 sdebug_per_host_store = v;
6323 return count;
6324 }
6325 static DRIVER_ATTR_RW(per_host_store);
6326
6327 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6328 {
6329 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6330 }
6331 static DRIVER_ATTR_RO(num_parts);
6332
6333 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6334 {
6335 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6336 }
6337 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6338 size_t count)
6339 {
6340 int nth;
6341 char work[20];
6342
6343 if (sscanf(buf, "%10s", work) == 1) {
6344 if (strncasecmp(work, "0x", 2) == 0) {
6345 if (kstrtoint(work + 2, 16, &nth) == 0)
6346 goto every_nth_done;
6347 } else {
6348 if (kstrtoint(work, 10, &nth) == 0)
6349 goto every_nth_done;
6350 }
6351 }
6352 return -EINVAL;
6353
6354 every_nth_done:
6355 sdebug_every_nth = nth;
6356 if (nth && !sdebug_statistics) {
6357 pr_info("every_nth needs statistics=1, set it\n");
6358 sdebug_statistics = true;
6359 }
6360 tweak_cmnd_count();
6361 return count;
6362 }
6363 static DRIVER_ATTR_RW(every_nth);
6364
6365 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6366 {
6367 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6368 }
6369 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6370 size_t count)
6371 {
6372 int n;
6373 bool changed;
6374
6375 if (kstrtoint(buf, 0, &n))
6376 return -EINVAL;
6377 if (n >= 0) {
6378 if (n > (int)SAM_LUN_AM_FLAT) {
6379 pr_warn("only LUN address methods 0 and 1 are supported\n");
6380 return -EINVAL;
6381 }
6382 changed = ((int)sdebug_lun_am != n);
6383 sdebug_lun_am = n;
6384 if (changed && sdebug_scsi_level >= 5) {
6385 struct sdebug_host_info *sdhp;
6386 struct sdebug_dev_info *dp;
6387
6388 spin_lock(&sdebug_host_list_lock);
6389 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6390 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6391 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6392 }
6393 }
6394 spin_unlock(&sdebug_host_list_lock);
6395 }
6396 return count;
6397 }
6398 return -EINVAL;
6399 }
6400 static DRIVER_ATTR_RW(lun_format);
6401
6402 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6403 {
6404 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6405 }
6406 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6407 size_t count)
6408 {
6409 int n;
6410 bool changed;
6411
6412 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6413 if (n > 256) {
6414 pr_warn("max_luns can be no more than 256\n");
6415 return -EINVAL;
6416 }
6417 changed = (sdebug_max_luns != n);
6418 sdebug_max_luns = n;
6419 sdebug_max_tgts_luns();
6420 if (changed && (sdebug_scsi_level >= 5)) {
6421 struct sdebug_host_info *sdhp;
6422 struct sdebug_dev_info *dp;
6423
6424 spin_lock(&sdebug_host_list_lock);
6425 list_for_each_entry(sdhp, &sdebug_host_list,
6426 host_list) {
6427 list_for_each_entry(dp, &sdhp->dev_info_list,
6428 dev_list) {
6429 set_bit(SDEBUG_UA_LUNS_CHANGED,
6430 dp->uas_bm);
6431 }
6432 }
6433 spin_unlock(&sdebug_host_list_lock);
6434 }
6435 return count;
6436 }
6437 return -EINVAL;
6438 }
6439 static DRIVER_ATTR_RW(max_luns);
6440
6441 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6442 {
6443 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6444 }
6445
6446
6447 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6448 size_t count)
6449 {
6450 int j, n, k, a;
6451 struct sdebug_queue *sqp;
6452
6453 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6454 (n <= SDEBUG_CANQUEUE) &&
6455 (sdebug_host_max_queue == 0)) {
6456 block_unblock_all_queues(true);
6457 k = 0;
6458 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6459 ++j, ++sqp) {
6460 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6461 if (a > k)
6462 k = a;
6463 }
6464 sdebug_max_queue = n;
6465 if (k == SDEBUG_CANQUEUE)
6466 atomic_set(&retired_max_queue, 0);
6467 else if (k >= n)
6468 atomic_set(&retired_max_queue, k + 1);
6469 else
6470 atomic_set(&retired_max_queue, 0);
6471 block_unblock_all_queues(false);
6472 return count;
6473 }
6474 return -EINVAL;
6475 }
6476 static DRIVER_ATTR_RW(max_queue);
6477
6478 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6479 {
6480 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6481 }
6482
6483 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6484 {
6485 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6486 }
6487
6488 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6489 {
6490 bool v;
6491
6492 if (kstrtobool(buf, &v))
6493 return -EINVAL;
6494
6495 sdebug_no_rwlock = v;
6496 return count;
6497 }
6498 static DRIVER_ATTR_RW(no_rwlock);
6499
6500
6501
6502
6503
6504 static DRIVER_ATTR_RO(host_max_queue);
6505
6506 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6507 {
6508 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6509 }
6510 static DRIVER_ATTR_RO(no_uld);
6511
6512 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6513 {
6514 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6515 }
6516 static DRIVER_ATTR_RO(scsi_level);
6517
6518 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6519 {
6520 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6521 }
6522 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6523 size_t count)
6524 {
6525 int n;
6526 bool changed;
6527
6528
6529 if (sdeb_zbc_in_use)
6530 return -ENOTSUPP;
6531
6532 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6533 changed = (sdebug_virtual_gb != n);
6534 sdebug_virtual_gb = n;
6535 sdebug_capacity = get_sdebug_capacity();
6536 if (changed) {
6537 struct sdebug_host_info *sdhp;
6538 struct sdebug_dev_info *dp;
6539
6540 spin_lock(&sdebug_host_list_lock);
6541 list_for_each_entry(sdhp, &sdebug_host_list,
6542 host_list) {
6543 list_for_each_entry(dp, &sdhp->dev_info_list,
6544 dev_list) {
6545 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6546 dp->uas_bm);
6547 }
6548 }
6549 spin_unlock(&sdebug_host_list_lock);
6550 }
6551 return count;
6552 }
6553 return -EINVAL;
6554 }
6555 static DRIVER_ATTR_RW(virtual_gb);
6556
6557 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6558 {
6559
6560 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6561 }
6562
6563 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6564 size_t count)
6565 {
6566 bool found;
6567 unsigned long idx;
6568 struct sdeb_store_info *sip;
6569 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6570 int delta_hosts;
6571
6572 if (sscanf(buf, "%d", &delta_hosts) != 1)
6573 return -EINVAL;
6574 if (delta_hosts > 0) {
6575 do {
6576 found = false;
6577 if (want_phs) {
6578 xa_for_each_marked(per_store_ap, idx, sip,
6579 SDEB_XA_NOT_IN_USE) {
6580 sdeb_most_recent_idx = (int)idx;
6581 found = true;
6582 break;
6583 }
6584 if (found)
6585 sdebug_add_host_helper((int)idx);
6586 else
6587 sdebug_do_add_host(true);
6588 } else {
6589 sdebug_do_add_host(false);
6590 }
6591 } while (--delta_hosts);
6592 } else if (delta_hosts < 0) {
6593 do {
6594 sdebug_do_remove_host(false);
6595 } while (++delta_hosts);
6596 }
6597 return count;
6598 }
6599 static DRIVER_ATTR_RW(add_host);
6600
6601 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6602 {
6603 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6604 }
6605 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6606 size_t count)
6607 {
6608 int n;
6609
6610 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6611 sdebug_vpd_use_hostno = n;
6612 return count;
6613 }
6614 return -EINVAL;
6615 }
6616 static DRIVER_ATTR_RW(vpd_use_hostno);
6617
6618 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6619 {
6620 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6621 }
6622 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6623 size_t count)
6624 {
6625 int n;
6626
6627 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6628 if (n > 0)
6629 sdebug_statistics = true;
6630 else {
6631 clear_queue_stats();
6632 sdebug_statistics = false;
6633 }
6634 return count;
6635 }
6636 return -EINVAL;
6637 }
6638 static DRIVER_ATTR_RW(statistics);
6639
6640 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6641 {
6642 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6643 }
6644 static DRIVER_ATTR_RO(sector_size);
6645
6646 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6647 {
6648 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6649 }
6650 static DRIVER_ATTR_RO(submit_queues);
6651
6652 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6653 {
6654 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6655 }
6656 static DRIVER_ATTR_RO(dix);
6657
6658 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6659 {
6660 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6661 }
6662 static DRIVER_ATTR_RO(dif);
6663
6664 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6665 {
6666 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6667 }
6668 static DRIVER_ATTR_RO(guard);
6669
6670 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6671 {
6672 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6673 }
6674 static DRIVER_ATTR_RO(ato);
6675
6676 static ssize_t map_show(struct device_driver *ddp, char *buf)
6677 {
6678 ssize_t count = 0;
6679
6680 if (!scsi_debug_lbp())
6681 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6682 sdebug_store_sectors);
6683
6684 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6685 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6686
6687 if (sip)
6688 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6689 (int)map_size, sip->map_storep);
6690 }
6691 buf[count++] = '\n';
6692 buf[count] = '\0';
6693
6694 return count;
6695 }
6696 static DRIVER_ATTR_RO(map);
6697
6698 static ssize_t random_show(struct device_driver *ddp, char *buf)
6699 {
6700 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6701 }
6702
6703 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6704 size_t count)
6705 {
6706 bool v;
6707
6708 if (kstrtobool(buf, &v))
6709 return -EINVAL;
6710
6711 sdebug_random = v;
6712 return count;
6713 }
6714 static DRIVER_ATTR_RW(random);
6715
6716 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6717 {
6718 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6719 }
6720 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6721 size_t count)
6722 {
6723 int n;
6724
6725 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6726 sdebug_removable = (n > 0);
6727 return count;
6728 }
6729 return -EINVAL;
6730 }
6731 static DRIVER_ATTR_RW(removable);
6732
6733 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6734 {
6735 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6736 }
6737
6738 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6739 size_t count)
6740 {
6741 int n;
6742
6743 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6744 sdebug_host_lock = (n > 0);
6745 return count;
6746 }
6747 return -EINVAL;
6748 }
6749 static DRIVER_ATTR_RW(host_lock);
6750
6751 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6752 {
6753 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6754 }
6755 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6756 size_t count)
6757 {
6758 int n;
6759
6760 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6761 sdebug_strict = (n > 0);
6762 return count;
6763 }
6764 return -EINVAL;
6765 }
6766 static DRIVER_ATTR_RW(strict);
6767
6768 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6769 {
6770 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6771 }
6772 static DRIVER_ATTR_RO(uuid_ctl);
6773
6774 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6775 {
6776 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6777 }
6778 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6779 size_t count)
6780 {
6781 int ret, n;
6782
6783 ret = kstrtoint(buf, 0, &n);
6784 if (ret)
6785 return ret;
6786 sdebug_cdb_len = n;
6787 all_config_cdb_len();
6788 return count;
6789 }
6790 static DRIVER_ATTR_RW(cdb_len);
6791
6792 static const char * const zbc_model_strs_a[] = {
6793 [BLK_ZONED_NONE] = "none",
6794 [BLK_ZONED_HA] = "host-aware",
6795 [BLK_ZONED_HM] = "host-managed",
6796 };
6797
6798 static const char * const zbc_model_strs_b[] = {
6799 [BLK_ZONED_NONE] = "no",
6800 [BLK_ZONED_HA] = "aware",
6801 [BLK_ZONED_HM] = "managed",
6802 };
6803
6804 static const char * const zbc_model_strs_c[] = {
6805 [BLK_ZONED_NONE] = "0",
6806 [BLK_ZONED_HA] = "1",
6807 [BLK_ZONED_HM] = "2",
6808 };
6809
6810 static int sdeb_zbc_model_str(const char *cp)
6811 {
6812 int res = sysfs_match_string(zbc_model_strs_a, cp);
6813
6814 if (res < 0) {
6815 res = sysfs_match_string(zbc_model_strs_b, cp);
6816 if (res < 0) {
6817 res = sysfs_match_string(zbc_model_strs_c, cp);
6818 if (res < 0)
6819 return -EINVAL;
6820 }
6821 }
6822 return res;
6823 }
6824
6825 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6826 {
6827 return scnprintf(buf, PAGE_SIZE, "%s\n",
6828 zbc_model_strs_a[sdeb_zbc_model]);
6829 }
6830 static DRIVER_ATTR_RO(zbc);
6831
6832 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6833 {
6834 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6835 }
6836 static DRIVER_ATTR_RO(tur_ms_to_ready);
6837
6838
6839
6840
6841
6842
6843
6844
6845 static struct attribute *sdebug_drv_attrs[] = {
6846 &driver_attr_delay.attr,
6847 &driver_attr_opts.attr,
6848 &driver_attr_ptype.attr,
6849 &driver_attr_dsense.attr,
6850 &driver_attr_fake_rw.attr,
6851 &driver_attr_host_max_queue.attr,
6852 &driver_attr_no_lun_0.attr,
6853 &driver_attr_num_tgts.attr,
6854 &driver_attr_dev_size_mb.attr,
6855 &driver_attr_num_parts.attr,
6856 &driver_attr_every_nth.attr,
6857 &driver_attr_lun_format.attr,
6858 &driver_attr_max_luns.attr,
6859 &driver_attr_max_queue.attr,
6860 &driver_attr_no_rwlock.attr,
6861 &driver_attr_no_uld.attr,
6862 &driver_attr_scsi_level.attr,
6863 &driver_attr_virtual_gb.attr,
6864 &driver_attr_add_host.attr,
6865 &driver_attr_per_host_store.attr,
6866 &driver_attr_vpd_use_hostno.attr,
6867 &driver_attr_sector_size.attr,
6868 &driver_attr_statistics.attr,
6869 &driver_attr_submit_queues.attr,
6870 &driver_attr_dix.attr,
6871 &driver_attr_dif.attr,
6872 &driver_attr_guard.attr,
6873 &driver_attr_ato.attr,
6874 &driver_attr_map.attr,
6875 &driver_attr_random.attr,
6876 &driver_attr_removable.attr,
6877 &driver_attr_host_lock.attr,
6878 &driver_attr_ndelay.attr,
6879 &driver_attr_strict.attr,
6880 &driver_attr_uuid_ctl.attr,
6881 &driver_attr_cdb_len.attr,
6882 &driver_attr_tur_ms_to_ready.attr,
6883 &driver_attr_zbc.attr,
6884 NULL,
6885 };
6886 ATTRIBUTE_GROUPS(sdebug_drv);
6887
6888 static struct device *pseudo_primary;
6889
6890 static int __init scsi_debug_init(void)
6891 {
6892 bool want_store = (sdebug_fake_rw == 0);
6893 unsigned long sz;
6894 int k, ret, hosts_to_add;
6895 int idx = -1;
6896
6897 ramdisk_lck_a[0] = &atomic_rw;
6898 ramdisk_lck_a[1] = &atomic_rw2;
6899 atomic_set(&retired_max_queue, 0);
6900
6901 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6902 pr_warn("ndelay must be less than 1 second, ignored\n");
6903 sdebug_ndelay = 0;
6904 } else if (sdebug_ndelay > 0)
6905 sdebug_jdelay = JDELAY_OVERRIDDEN;
6906
6907 switch (sdebug_sector_size) {
6908 case 512:
6909 case 1024:
6910 case 2048:
6911 case 4096:
6912 break;
6913 default:
6914 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6915 return -EINVAL;
6916 }
6917
6918 switch (sdebug_dif) {
6919 case T10_PI_TYPE0_PROTECTION:
6920 break;
6921 case T10_PI_TYPE1_PROTECTION:
6922 case T10_PI_TYPE2_PROTECTION:
6923 case T10_PI_TYPE3_PROTECTION:
6924 have_dif_prot = true;
6925 break;
6926
6927 default:
6928 pr_err("dif must be 0, 1, 2 or 3\n");
6929 return -EINVAL;
6930 }
6931
6932 if (sdebug_num_tgts < 0) {
6933 pr_err("num_tgts must be >= 0\n");
6934 return -EINVAL;
6935 }
6936
6937 if (sdebug_guard > 1) {
6938 pr_err("guard must be 0 or 1\n");
6939 return -EINVAL;
6940 }
6941
6942 if (sdebug_ato > 1) {
6943 pr_err("ato must be 0 or 1\n");
6944 return -EINVAL;
6945 }
6946
6947 if (sdebug_physblk_exp > 15) {
6948 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6949 return -EINVAL;
6950 }
6951
6952 sdebug_lun_am = sdebug_lun_am_i;
6953 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6954 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6955 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6956 }
6957
6958 if (sdebug_max_luns > 256) {
6959 if (sdebug_max_luns > 16384) {
6960 pr_warn("max_luns can be no more than 16384, use default\n");
6961 sdebug_max_luns = DEF_MAX_LUNS;
6962 }
6963 sdebug_lun_am = SAM_LUN_AM_FLAT;
6964 }
6965
6966 if (sdebug_lowest_aligned > 0x3fff) {
6967 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6968 return -EINVAL;
6969 }
6970
6971 if (submit_queues < 1) {
6972 pr_err("submit_queues must be 1 or more\n");
6973 return -EINVAL;
6974 }
6975
6976 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6977 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6978 return -EINVAL;
6979 }
6980
6981 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6982 (sdebug_host_max_queue < 0)) {
6983 pr_err("host_max_queue must be in range [0 %d]\n",
6984 SDEBUG_CANQUEUE);
6985 return -EINVAL;
6986 }
6987
6988 if (sdebug_host_max_queue &&
6989 (sdebug_max_queue != sdebug_host_max_queue)) {
6990 sdebug_max_queue = sdebug_host_max_queue;
6991 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6992 sdebug_max_queue);
6993 }
6994
6995 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6996 GFP_KERNEL);
6997 if (sdebug_q_arr == NULL)
6998 return -ENOMEM;
6999 for (k = 0; k < submit_queues; ++k)
7000 spin_lock_init(&sdebug_q_arr[k].qc_lock);
7001
7002
7003
7004
7005
7006 if (sdebug_ptype == TYPE_ZBC) {
7007 sdeb_zbc_model = BLK_ZONED_HM;
7008 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7009 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7010 if (k < 0) {
7011 ret = k;
7012 goto free_q_arr;
7013 }
7014 sdeb_zbc_model = k;
7015 switch (sdeb_zbc_model) {
7016 case BLK_ZONED_NONE:
7017 case BLK_ZONED_HA:
7018 sdebug_ptype = TYPE_DISK;
7019 break;
7020 case BLK_ZONED_HM:
7021 sdebug_ptype = TYPE_ZBC;
7022 break;
7023 default:
7024 pr_err("Invalid ZBC model\n");
7025 ret = -EINVAL;
7026 goto free_q_arr;
7027 }
7028 }
7029 if (sdeb_zbc_model != BLK_ZONED_NONE) {
7030 sdeb_zbc_in_use = true;
7031 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7032 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7033 }
7034
7035 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7036 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7037 if (sdebug_dev_size_mb < 1)
7038 sdebug_dev_size_mb = 1;
7039 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7040 sdebug_store_sectors = sz / sdebug_sector_size;
7041 sdebug_capacity = get_sdebug_capacity();
7042
7043
7044 sdebug_heads = 8;
7045 sdebug_sectors_per = 32;
7046 if (sdebug_dev_size_mb >= 256)
7047 sdebug_heads = 64;
7048 else if (sdebug_dev_size_mb >= 16)
7049 sdebug_heads = 32;
7050 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7051 (sdebug_sectors_per * sdebug_heads);
7052 if (sdebug_cylinders_per >= 1024) {
7053
7054 sdebug_heads = 255;
7055 sdebug_sectors_per = 63;
7056 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7057 (sdebug_sectors_per * sdebug_heads);
7058 }
7059 if (scsi_debug_lbp()) {
7060 sdebug_unmap_max_blocks =
7061 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7062
7063 sdebug_unmap_max_desc =
7064 clamp(sdebug_unmap_max_desc, 0U, 256U);
7065
7066 sdebug_unmap_granularity =
7067 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7068
7069 if (sdebug_unmap_alignment &&
7070 sdebug_unmap_granularity <=
7071 sdebug_unmap_alignment) {
7072 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7073 ret = -EINVAL;
7074 goto free_q_arr;
7075 }
7076 }
7077 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7078 if (want_store) {
7079 idx = sdebug_add_store();
7080 if (idx < 0) {
7081 ret = idx;
7082 goto free_q_arr;
7083 }
7084 }
7085
7086 pseudo_primary = root_device_register("pseudo_0");
7087 if (IS_ERR(pseudo_primary)) {
7088 pr_warn("root_device_register() error\n");
7089 ret = PTR_ERR(pseudo_primary);
7090 goto free_vm;
7091 }
7092 ret = bus_register(&pseudo_lld_bus);
7093 if (ret < 0) {
7094 pr_warn("bus_register error: %d\n", ret);
7095 goto dev_unreg;
7096 }
7097 ret = driver_register(&sdebug_driverfs_driver);
7098 if (ret < 0) {
7099 pr_warn("driver_register error: %d\n", ret);
7100 goto bus_unreg;
7101 }
7102
7103 hosts_to_add = sdebug_add_host;
7104 sdebug_add_host = 0;
7105
7106 for (k = 0; k < hosts_to_add; k++) {
7107 if (want_store && k == 0) {
7108 ret = sdebug_add_host_helper(idx);
7109 if (ret < 0) {
7110 pr_err("add_host_helper k=%d, error=%d\n",
7111 k, -ret);
7112 break;
7113 }
7114 } else {
7115 ret = sdebug_do_add_host(want_store &&
7116 sdebug_per_host_store);
7117 if (ret < 0) {
7118 pr_err("add_host k=%d error=%d\n", k, -ret);
7119 break;
7120 }
7121 }
7122 }
7123 if (sdebug_verbose)
7124 pr_info("built %d host(s)\n", sdebug_num_hosts);
7125
7126 return 0;
7127
7128 bus_unreg:
7129 bus_unregister(&pseudo_lld_bus);
7130 dev_unreg:
7131 root_device_unregister(pseudo_primary);
7132 free_vm:
7133 sdebug_erase_store(idx, NULL);
7134 free_q_arr:
7135 kfree(sdebug_q_arr);
7136 return ret;
7137 }
7138
7139 static void __exit scsi_debug_exit(void)
7140 {
7141 int k = sdebug_num_hosts;
7142
7143 stop_all_queued();
7144 for (; k; k--)
7145 sdebug_do_remove_host(true);
7146 free_all_queued();
7147 driver_unregister(&sdebug_driverfs_driver);
7148 bus_unregister(&pseudo_lld_bus);
7149 root_device_unregister(pseudo_primary);
7150
7151 sdebug_erase_all_stores(false);
7152 xa_destroy(per_store_ap);
7153 kfree(sdebug_q_arr);
7154 }
7155
7156 device_initcall(scsi_debug_init);
7157 module_exit(scsi_debug_exit);
7158
7159 static void sdebug_release_adapter(struct device *dev)
7160 {
7161 struct sdebug_host_info *sdbg_host;
7162
7163 sdbg_host = to_sdebug_host(dev);
7164 kfree(sdbg_host);
7165 }
7166
7167
7168 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7169 {
7170 if (idx < 0)
7171 return;
7172 if (!sip) {
7173 if (xa_empty(per_store_ap))
7174 return;
7175 sip = xa_load(per_store_ap, idx);
7176 if (!sip)
7177 return;
7178 }
7179 vfree(sip->map_storep);
7180 vfree(sip->dif_storep);
7181 vfree(sip->storep);
7182 xa_erase(per_store_ap, idx);
7183 kfree(sip);
7184 }
7185
7186
7187 static void sdebug_erase_all_stores(bool apart_from_first)
7188 {
7189 unsigned long idx;
7190 struct sdeb_store_info *sip = NULL;
7191
7192 xa_for_each(per_store_ap, idx, sip) {
7193 if (apart_from_first)
7194 apart_from_first = false;
7195 else
7196 sdebug_erase_store(idx, sip);
7197 }
7198 if (apart_from_first)
7199 sdeb_most_recent_idx = sdeb_first_idx;
7200 }
7201
7202
7203
7204
7205
7206 static int sdebug_add_store(void)
7207 {
7208 int res;
7209 u32 n_idx;
7210 unsigned long iflags;
7211 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7212 struct sdeb_store_info *sip = NULL;
7213 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7214
7215 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7216 if (!sip)
7217 return -ENOMEM;
7218
7219 xa_lock_irqsave(per_store_ap, iflags);
7220 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7221 if (unlikely(res < 0)) {
7222 xa_unlock_irqrestore(per_store_ap, iflags);
7223 kfree(sip);
7224 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7225 return res;
7226 }
7227 sdeb_most_recent_idx = n_idx;
7228 if (sdeb_first_idx < 0)
7229 sdeb_first_idx = n_idx;
7230 xa_unlock_irqrestore(per_store_ap, iflags);
7231
7232 res = -ENOMEM;
7233 sip->storep = vzalloc(sz);
7234 if (!sip->storep) {
7235 pr_err("user data oom\n");
7236 goto err;
7237 }
7238 if (sdebug_num_parts > 0)
7239 sdebug_build_parts(sip->storep, sz);
7240
7241
7242 if (sdebug_dix) {
7243 int dif_size;
7244
7245 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7246 sip->dif_storep = vmalloc(dif_size);
7247
7248 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7249 sip->dif_storep);
7250
7251 if (!sip->dif_storep) {
7252 pr_err("DIX oom\n");
7253 goto err;
7254 }
7255 memset(sip->dif_storep, 0xff, dif_size);
7256 }
7257
7258 if (scsi_debug_lbp()) {
7259 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7260 sip->map_storep = vmalloc(array_size(sizeof(long),
7261 BITS_TO_LONGS(map_size)));
7262
7263 pr_info("%lu provisioning blocks\n", map_size);
7264
7265 if (!sip->map_storep) {
7266 pr_err("LBP map oom\n");
7267 goto err;
7268 }
7269
7270 bitmap_zero(sip->map_storep, map_size);
7271
7272
7273 if (sdebug_num_parts)
7274 map_region(sip, 0, 2);
7275 }
7276
7277 rwlock_init(&sip->macc_lck);
7278 return (int)n_idx;
7279 err:
7280 sdebug_erase_store((int)n_idx, sip);
7281 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7282 return res;
7283 }
7284
7285 static int sdebug_add_host_helper(int per_host_idx)
7286 {
7287 int k, devs_per_host, idx;
7288 int error = -ENOMEM;
7289 struct sdebug_host_info *sdbg_host;
7290 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7291
7292 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7293 if (!sdbg_host)
7294 return -ENOMEM;
7295 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7296 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7297 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7298 sdbg_host->si_idx = idx;
7299
7300 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7301
7302 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7303 for (k = 0; k < devs_per_host; k++) {
7304 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7305 if (!sdbg_devinfo)
7306 goto clean;
7307 }
7308
7309 spin_lock(&sdebug_host_list_lock);
7310 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7311 spin_unlock(&sdebug_host_list_lock);
7312
7313 sdbg_host->dev.bus = &pseudo_lld_bus;
7314 sdbg_host->dev.parent = pseudo_primary;
7315 sdbg_host->dev.release = &sdebug_release_adapter;
7316 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7317
7318 error = device_register(&sdbg_host->dev);
7319 if (error)
7320 goto clean;
7321
7322 ++sdebug_num_hosts;
7323 return 0;
7324
7325 clean:
7326 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7327 dev_list) {
7328 list_del(&sdbg_devinfo->dev_list);
7329 kfree(sdbg_devinfo->zstate);
7330 kfree(sdbg_devinfo);
7331 }
7332 kfree(sdbg_host);
7333 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7334 return error;
7335 }
7336
7337 static int sdebug_do_add_host(bool mk_new_store)
7338 {
7339 int ph_idx = sdeb_most_recent_idx;
7340
7341 if (mk_new_store) {
7342 ph_idx = sdebug_add_store();
7343 if (ph_idx < 0)
7344 return ph_idx;
7345 }
7346 return sdebug_add_host_helper(ph_idx);
7347 }
7348
7349 static void sdebug_do_remove_host(bool the_end)
7350 {
7351 int idx = -1;
7352 struct sdebug_host_info *sdbg_host = NULL;
7353 struct sdebug_host_info *sdbg_host2;
7354
7355 spin_lock(&sdebug_host_list_lock);
7356 if (!list_empty(&sdebug_host_list)) {
7357 sdbg_host = list_entry(sdebug_host_list.prev,
7358 struct sdebug_host_info, host_list);
7359 idx = sdbg_host->si_idx;
7360 }
7361 if (!the_end && idx >= 0) {
7362 bool unique = true;
7363
7364 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7365 if (sdbg_host2 == sdbg_host)
7366 continue;
7367 if (idx == sdbg_host2->si_idx) {
7368 unique = false;
7369 break;
7370 }
7371 }
7372 if (unique) {
7373 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7374 if (idx == sdeb_most_recent_idx)
7375 --sdeb_most_recent_idx;
7376 }
7377 }
7378 if (sdbg_host)
7379 list_del(&sdbg_host->host_list);
7380 spin_unlock(&sdebug_host_list_lock);
7381
7382 if (!sdbg_host)
7383 return;
7384
7385 device_unregister(&sdbg_host->dev);
7386 --sdebug_num_hosts;
7387 }
7388
7389 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7390 {
7391 int num_in_q = 0;
7392 struct sdebug_dev_info *devip;
7393
7394 block_unblock_all_queues(true);
7395 devip = (struct sdebug_dev_info *)sdev->hostdata;
7396 if (NULL == devip) {
7397 block_unblock_all_queues(false);
7398 return -ENODEV;
7399 }
7400 num_in_q = atomic_read(&devip->num_in_q);
7401
7402 if (qdepth > SDEBUG_CANQUEUE) {
7403 qdepth = SDEBUG_CANQUEUE;
7404 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7405 qdepth, SDEBUG_CANQUEUE);
7406 }
7407 if (qdepth < 1)
7408 qdepth = 1;
7409 if (qdepth != sdev->queue_depth)
7410 scsi_change_queue_depth(sdev, qdepth);
7411
7412 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7413 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7414 __func__, qdepth, num_in_q);
7415 }
7416 block_unblock_all_queues(false);
7417 return sdev->queue_depth;
7418 }
7419
7420 static bool fake_timeout(struct scsi_cmnd *scp)
7421 {
7422 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7423 if (sdebug_every_nth < -1)
7424 sdebug_every_nth = -1;
7425 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7426 return true;
7427 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7428 scsi_medium_access_command(scp))
7429 return true;
7430 }
7431 return false;
7432 }
7433
7434
7435 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7436 {
7437 int stopped_state;
7438 u64 diff_ns = 0;
7439 ktime_t now_ts = ktime_get_boottime();
7440 struct scsi_device *sdp = scp->device;
7441
7442 stopped_state = atomic_read(&devip->stopped);
7443 if (stopped_state == 2) {
7444 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7445 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7446 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7447
7448 atomic_set(&devip->stopped, 0);
7449 return 0;
7450 }
7451 }
7452 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7453 if (sdebug_verbose)
7454 sdev_printk(KERN_INFO, sdp,
7455 "%s: Not ready: in process of becoming ready\n", my_name);
7456 if (scp->cmnd[0] == TEST_UNIT_READY) {
7457 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7458
7459 if (diff_ns <= tur_nanosecs_to_ready)
7460 diff_ns = tur_nanosecs_to_ready - diff_ns;
7461 else
7462 diff_ns = tur_nanosecs_to_ready;
7463
7464 do_div(diff_ns, 1000000);
7465 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7466 diff_ns);
7467 return check_condition_result;
7468 }
7469 }
7470 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7471 if (sdebug_verbose)
7472 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7473 my_name);
7474 return check_condition_result;
7475 }
7476
7477 static int sdebug_map_queues(struct Scsi_Host *shost)
7478 {
7479 int i, qoff;
7480
7481 if (shost->nr_hw_queues == 1)
7482 return 0;
7483
7484 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7485 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7486
7487 map->nr_queues = 0;
7488
7489 if (i == HCTX_TYPE_DEFAULT)
7490 map->nr_queues = submit_queues - poll_queues;
7491 else if (i == HCTX_TYPE_POLL)
7492 map->nr_queues = poll_queues;
7493
7494 if (!map->nr_queues) {
7495 BUG_ON(i == HCTX_TYPE_DEFAULT);
7496 continue;
7497 }
7498
7499 map->queue_offset = qoff;
7500 blk_mq_map_queues(map);
7501
7502 qoff += map->nr_queues;
7503 }
7504
7505 return 0;
7506
7507 }
7508
7509 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7510 {
7511 bool first;
7512 bool retiring = false;
7513 int num_entries = 0;
7514 unsigned int qc_idx = 0;
7515 unsigned long iflags;
7516 ktime_t kt_from_boot = ktime_get_boottime();
7517 struct sdebug_queue *sqp;
7518 struct sdebug_queued_cmd *sqcp;
7519 struct scsi_cmnd *scp;
7520 struct sdebug_dev_info *devip;
7521 struct sdebug_defer *sd_dp;
7522
7523 sqp = sdebug_q_arr + queue_num;
7524
7525 spin_lock_irqsave(&sqp->qc_lock, iflags);
7526
7527 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7528 if (qc_idx >= sdebug_max_queue)
7529 goto unlock;
7530
7531 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7532 if (first) {
7533 first = false;
7534 if (!test_bit(qc_idx, sqp->in_use_bm))
7535 continue;
7536 } else {
7537 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7538 }
7539 if (qc_idx >= sdebug_max_queue)
7540 break;
7541
7542 sqcp = &sqp->qc_arr[qc_idx];
7543 sd_dp = sqcp->sd_dp;
7544 if (unlikely(!sd_dp))
7545 continue;
7546 scp = sqcp->a_cmnd;
7547 if (unlikely(scp == NULL)) {
7548 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7549 queue_num, qc_idx, __func__);
7550 break;
7551 }
7552 if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7553 if (kt_from_boot < sd_dp->cmpl_ts)
7554 continue;
7555
7556 } else
7557 continue;
7558 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7559 if (likely(devip))
7560 atomic_dec(&devip->num_in_q);
7561 else
7562 pr_err("devip=NULL from %s\n", __func__);
7563 if (unlikely(atomic_read(&retired_max_queue) > 0))
7564 retiring = true;
7565
7566 sqcp->a_cmnd = NULL;
7567 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7568 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7569 sqp, queue_num, qc_idx, __func__);
7570 break;
7571 }
7572 if (unlikely(retiring)) {
7573 int k, retval;
7574
7575 retval = atomic_read(&retired_max_queue);
7576 if (qc_idx >= retval) {
7577 pr_err("index %d too large\n", retval);
7578 break;
7579 }
7580 k = find_last_bit(sqp->in_use_bm, retval);
7581 if ((k < sdebug_max_queue) || (k == retval))
7582 atomic_set(&retired_max_queue, 0);
7583 else
7584 atomic_set(&retired_max_queue, k + 1);
7585 }
7586 WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
7587 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7588 scsi_done(scp);
7589 num_entries++;
7590 spin_lock_irqsave(&sqp->qc_lock, iflags);
7591 if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7592 break;
7593 }
7594
7595 unlock:
7596 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7597
7598 if (num_entries > 0)
7599 atomic_add(num_entries, &sdeb_mq_poll_count);
7600 return num_entries;
7601 }
7602
7603 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7604 struct scsi_cmnd *scp)
7605 {
7606 u8 sdeb_i;
7607 struct scsi_device *sdp = scp->device;
7608 const struct opcode_info_t *oip;
7609 const struct opcode_info_t *r_oip;
7610 struct sdebug_dev_info *devip;
7611 u8 *cmd = scp->cmnd;
7612 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7613 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7614 int k, na;
7615 int errsts = 0;
7616 u64 lun_index = sdp->lun & 0x3FFF;
7617 u32 flags;
7618 u16 sa;
7619 u8 opcode = cmd[0];
7620 bool has_wlun_rl;
7621 bool inject_now;
7622
7623 scsi_set_resid(scp, 0);
7624 if (sdebug_statistics) {
7625 atomic_inc(&sdebug_cmnd_count);
7626 inject_now = inject_on_this_cmd();
7627 } else {
7628 inject_now = false;
7629 }
7630 if (unlikely(sdebug_verbose &&
7631 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7632 char b[120];
7633 int n, len, sb;
7634
7635 len = scp->cmd_len;
7636 sb = (int)sizeof(b);
7637 if (len > 32)
7638 strcpy(b, "too long, over 32 bytes");
7639 else {
7640 for (k = 0, n = 0; k < len && n < sb; ++k)
7641 n += scnprintf(b + n, sb - n, "%02x ",
7642 (u32)cmd[k]);
7643 }
7644 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7645 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7646 }
7647 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7648 return SCSI_MLQUEUE_HOST_BUSY;
7649 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7650 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7651 goto err_out;
7652
7653 sdeb_i = opcode_ind_arr[opcode];
7654 oip = &opcode_info_arr[sdeb_i];
7655 devip = (struct sdebug_dev_info *)sdp->hostdata;
7656 if (unlikely(!devip)) {
7657 devip = find_build_dev_info(sdp);
7658 if (NULL == devip)
7659 goto err_out;
7660 }
7661 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7662 atomic_set(&sdeb_inject_pending, 1);
7663
7664 na = oip->num_attached;
7665 r_pfp = oip->pfp;
7666 if (na) {
7667 r_oip = oip;
7668 if (FF_SA & r_oip->flags) {
7669 if (F_SA_LOW & oip->flags)
7670 sa = 0x1f & cmd[1];
7671 else
7672 sa = get_unaligned_be16(cmd + 8);
7673 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7674 if (opcode == oip->opcode && sa == oip->sa)
7675 break;
7676 }
7677 } else {
7678 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7679 if (opcode == oip->opcode)
7680 break;
7681 }
7682 }
7683 if (k > na) {
7684 if (F_SA_LOW & r_oip->flags)
7685 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7686 else if (F_SA_HIGH & r_oip->flags)
7687 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7688 else
7689 mk_sense_invalid_opcode(scp);
7690 goto check_cond;
7691 }
7692 }
7693 flags = oip->flags;
7694 if (unlikely(F_INV_OP & flags)) {
7695 mk_sense_invalid_opcode(scp);
7696 goto check_cond;
7697 }
7698 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7699 if (sdebug_verbose)
7700 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7701 my_name, opcode, " supported for wlun");
7702 mk_sense_invalid_opcode(scp);
7703 goto check_cond;
7704 }
7705 if (unlikely(sdebug_strict)) {
7706 u8 rem;
7707 int j;
7708
7709 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7710 rem = ~oip->len_mask[k] & cmd[k];
7711 if (rem) {
7712 for (j = 7; j >= 0; --j, rem <<= 1) {
7713 if (0x80 & rem)
7714 break;
7715 }
7716 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7717 goto check_cond;
7718 }
7719 }
7720 }
7721 if (unlikely(!(F_SKIP_UA & flags) &&
7722 find_first_bit(devip->uas_bm,
7723 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7724 errsts = make_ua(scp, devip);
7725 if (errsts)
7726 goto check_cond;
7727 }
7728 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7729 atomic_read(&devip->stopped))) {
7730 errsts = resp_not_ready(scp, devip);
7731 if (errsts)
7732 goto fini;
7733 }
7734 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7735 goto fini;
7736 if (unlikely(sdebug_every_nth)) {
7737 if (fake_timeout(scp))
7738 return 0;
7739 }
7740 if (likely(oip->pfp))
7741 pfp = oip->pfp;
7742 else
7743 pfp = r_pfp;
7744
7745 fini:
7746 if (F_DELAY_OVERR & flags)
7747 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7748 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7749 sdebug_ndelay > 10000)) {
7750
7751
7752
7753
7754
7755
7756 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7757 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7758
7759 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7760 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7761 } else
7762 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7763 sdebug_ndelay);
7764 check_cond:
7765 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7766 err_out:
7767 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7768 }
7769
7770 static struct scsi_host_template sdebug_driver_template = {
7771 .show_info = scsi_debug_show_info,
7772 .write_info = scsi_debug_write_info,
7773 .proc_name = sdebug_proc_name,
7774 .name = "SCSI DEBUG",
7775 .info = scsi_debug_info,
7776 .slave_alloc = scsi_debug_slave_alloc,
7777 .slave_configure = scsi_debug_slave_configure,
7778 .slave_destroy = scsi_debug_slave_destroy,
7779 .ioctl = scsi_debug_ioctl,
7780 .queuecommand = scsi_debug_queuecommand,
7781 .change_queue_depth = sdebug_change_qdepth,
7782 .map_queues = sdebug_map_queues,
7783 .mq_poll = sdebug_blk_mq_poll,
7784 .eh_abort_handler = scsi_debug_abort,
7785 .eh_device_reset_handler = scsi_debug_device_reset,
7786 .eh_target_reset_handler = scsi_debug_target_reset,
7787 .eh_bus_reset_handler = scsi_debug_bus_reset,
7788 .eh_host_reset_handler = scsi_debug_host_reset,
7789 .can_queue = SDEBUG_CANQUEUE,
7790 .this_id = 7,
7791 .sg_tablesize = SG_MAX_SEGMENTS,
7792 .cmd_per_lun = DEF_CMD_PER_LUN,
7793 .max_sectors = -1U,
7794 .max_segment_size = -1U,
7795 .module = THIS_MODULE,
7796 .track_queue_depth = 1,
7797 };
7798
7799 static int sdebug_driver_probe(struct device *dev)
7800 {
7801 int error = 0;
7802 struct sdebug_host_info *sdbg_host;
7803 struct Scsi_Host *hpnt;
7804 int hprot;
7805
7806 sdbg_host = to_sdebug_host(dev);
7807
7808 sdebug_driver_template.can_queue = sdebug_max_queue;
7809 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7810 if (!sdebug_clustering)
7811 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7812
7813 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7814 if (NULL == hpnt) {
7815 pr_err("scsi_host_alloc failed\n");
7816 error = -ENODEV;
7817 return error;
7818 }
7819 if (submit_queues > nr_cpu_ids) {
7820 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7821 my_name, submit_queues, nr_cpu_ids);
7822 submit_queues = nr_cpu_ids;
7823 }
7824
7825
7826
7827
7828 hpnt->nr_hw_queues = submit_queues;
7829 if (sdebug_host_max_queue)
7830 hpnt->host_tagset = 1;
7831
7832
7833 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7834 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7835 my_name, poll_queues, hpnt->nr_hw_queues);
7836 poll_queues = 0;
7837 }
7838
7839
7840
7841
7842
7843
7844 if (poll_queues >= submit_queues) {
7845 if (submit_queues < 3)
7846 pr_warn("%s: trim poll_queues to 1\n", my_name);
7847 else
7848 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7849 my_name, submit_queues - 1);
7850 poll_queues = 1;
7851 }
7852 if (poll_queues)
7853 hpnt->nr_maps = 3;
7854
7855 sdbg_host->shost = hpnt;
7856 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7857 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7858 hpnt->max_id = sdebug_num_tgts + 1;
7859 else
7860 hpnt->max_id = sdebug_num_tgts;
7861
7862 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7863
7864 hprot = 0;
7865
7866 switch (sdebug_dif) {
7867
7868 case T10_PI_TYPE1_PROTECTION:
7869 hprot = SHOST_DIF_TYPE1_PROTECTION;
7870 if (sdebug_dix)
7871 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7872 break;
7873
7874 case T10_PI_TYPE2_PROTECTION:
7875 hprot = SHOST_DIF_TYPE2_PROTECTION;
7876 if (sdebug_dix)
7877 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7878 break;
7879
7880 case T10_PI_TYPE3_PROTECTION:
7881 hprot = SHOST_DIF_TYPE3_PROTECTION;
7882 if (sdebug_dix)
7883 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7884 break;
7885
7886 default:
7887 if (sdebug_dix)
7888 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7889 break;
7890 }
7891
7892 scsi_host_set_prot(hpnt, hprot);
7893
7894 if (have_dif_prot || sdebug_dix)
7895 pr_info("host protection%s%s%s%s%s%s%s\n",
7896 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7897 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7898 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7899 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7900 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7901 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7902 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7903
7904 if (sdebug_guard == 1)
7905 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7906 else
7907 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7908
7909 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7910 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7911 if (sdebug_every_nth)
7912 sdebug_statistics = true;
7913 error = scsi_add_host(hpnt, &sdbg_host->dev);
7914 if (error) {
7915 pr_err("scsi_add_host failed\n");
7916 error = -ENODEV;
7917 scsi_host_put(hpnt);
7918 } else {
7919 scsi_scan_host(hpnt);
7920 }
7921
7922 return error;
7923 }
7924
7925 static void sdebug_driver_remove(struct device *dev)
7926 {
7927 struct sdebug_host_info *sdbg_host;
7928 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7929
7930 sdbg_host = to_sdebug_host(dev);
7931
7932 scsi_remove_host(sdbg_host->shost);
7933
7934 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7935 dev_list) {
7936 list_del(&sdbg_devinfo->dev_list);
7937 kfree(sdbg_devinfo->zstate);
7938 kfree(sdbg_devinfo);
7939 }
7940
7941 scsi_host_put(sdbg_host->shost);
7942 }
7943
7944 static int pseudo_lld_bus_match(struct device *dev,
7945 struct device_driver *dev_driver)
7946 {
7947 return 1;
7948 }
7949
7950 static struct bus_type pseudo_lld_bus = {
7951 .name = "pseudo",
7952 .match = pseudo_lld_bus_match,
7953 .probe = sdebug_driver_probe,
7954 .remove = sdebug_driver_remove,
7955 .drv_groups = sdebug_drv_groups,
7956 };