1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date
= "20200710";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
163 #define SDEBUG_LUN_0_VAL 0
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN 255
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
245 #define SDEBUG_MAX_PARTS 4
247 #define SDEBUG_MAX_CMD_LEN 32
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 /* Zone types (zbcr05 table 25) */
253 ZBC_ZONE_TYPE_CNV
= 0x1,
254 ZBC_ZONE_TYPE_SWR
= 0x2,
255 ZBC_ZONE_TYPE_SWP
= 0x3,
258 /* enumeration names taken from table 26, zbcr05 */
260 ZBC_NOT_WRITE_POINTER
= 0x0,
262 ZC2_IMPLICIT_OPEN
= 0x2,
263 ZC3_EXPLICIT_OPEN
= 0x3,
270 struct sdeb_zone_state
{ /* ZBC: per zone state */
271 enum sdebug_z_type z_type
;
272 enum sdebug_z_cond z_cond
;
273 bool z_non_seq_resource
;
279 struct sdebug_dev_info
{
280 struct list_head dev_list
;
281 unsigned int channel
;
285 struct sdebug_host_info
*sdbg_host
;
286 unsigned long uas_bm
[1];
288 atomic_t stopped
; /* 1: by SSU, 2: device start */
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel
;
294 unsigned int zsize_shift
;
295 unsigned int nr_zones
;
296 unsigned int nr_conv_zones
;
297 unsigned int nr_imp_open
;
298 unsigned int nr_exp_open
;
299 unsigned int nr_closed
;
300 unsigned int max_open
;
301 ktime_t create_ts
; /* time since bootup that this device was created */
302 struct sdeb_zone_state
*zstate
;
305 struct sdebug_host_info
{
306 struct list_head host_list
;
307 int si_idx
; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host
*shost
;
310 struct list_head dev_info_list
;
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info
{
315 rwlock_t macc_lck
; /* for atomic media access on this store */
316 u8
*storep
; /* user data storage (ram) */
317 struct t10_pi_tuple
*dif_storep
; /* protection info */
318 void *map_storep
; /* provisioning map */
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
324 enum sdeb_defer_type
{SDEB_DEFER_NONE
= 0, SDEB_DEFER_HRT
= 1,
327 struct sdebug_defer
{
329 struct execute_work ew
;
330 int sqa_idx
; /* index of sdebug_queue array */
331 int qc_idx
; /* index of sdebug_queued_cmd array within sqa_idx */
332 int hc_idx
; /* hostwide tag index */
336 bool aborted
; /* true when blk_abort_request() already called */
337 enum sdeb_defer_type defer_t
;
340 struct sdebug_queued_cmd
{
341 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
342 * instance indicates this slot is in use.
344 struct sdebug_defer
*sd_dp
;
345 struct scsi_cmnd
*a_cmnd
;
348 struct sdebug_queue
{
349 struct sdebug_queued_cmd qc_arr
[SDEBUG_CANQUEUE
];
350 unsigned long in_use_bm
[SDEBUG_CANQUEUE_WORDS
];
352 atomic_t blocked
; /* to temporarily stop more being queued */
355 static atomic_t sdebug_cmnd_count
; /* number of incoming commands */
356 static atomic_t sdebug_completions
; /* count of deferred completions */
357 static atomic_t sdebug_miss_cpus
; /* submission + completion cpus differ */
358 static atomic_t sdebug_a_tsf
; /* 'almost task set full' counter */
359 static atomic_t sdeb_inject_pending
;
361 struct opcode_info_t
{
362 u8 num_attached
; /* 0 if this is it (i.e. a leaf); use 0xff */
363 /* for terminating element */
364 u8 opcode
; /* if num_attached > 0, preferred */
365 u16 sa
; /* service action */
366 u32 flags
; /* OR-ed set of SDEB_F_* */
367 int (*pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
368 const struct opcode_info_t
*arrp
; /* num_attached elements or NULL */
369 u8 len_mask
[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
370 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
373 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
374 enum sdeb_opcode_index
{
375 SDEB_I_INVALID_OPCODE
= 0,
377 SDEB_I_REPORT_LUNS
= 2,
378 SDEB_I_REQUEST_SENSE
= 3,
379 SDEB_I_TEST_UNIT_READY
= 4,
380 SDEB_I_MODE_SENSE
= 5, /* 6, 10 */
381 SDEB_I_MODE_SELECT
= 6, /* 6, 10 */
382 SDEB_I_LOG_SENSE
= 7,
383 SDEB_I_READ_CAPACITY
= 8, /* 10; 16 is in SA_IN(16) */
384 SDEB_I_READ
= 9, /* 6, 10, 12, 16 */
385 SDEB_I_WRITE
= 10, /* 6, 10, 12, 16 */
386 SDEB_I_START_STOP
= 11,
387 SDEB_I_SERV_ACT_IN_16
= 12, /* add ...SERV_ACT_IN_12 if needed */
388 SDEB_I_SERV_ACT_OUT_16
= 13, /* add ...SERV_ACT_OUT_12 if needed */
389 SDEB_I_MAINT_IN
= 14,
390 SDEB_I_MAINT_OUT
= 15,
391 SDEB_I_VERIFY
= 16, /* VERIFY(10), VERIFY(16) */
392 SDEB_I_VARIABLE_LEN
= 17, /* READ(32), WRITE(32), WR_SCAT(32) */
393 SDEB_I_RESERVE
= 18, /* 6, 10 */
394 SDEB_I_RELEASE
= 19, /* 6, 10 */
395 SDEB_I_ALLOW_REMOVAL
= 20, /* PREVENT ALLOW MEDIUM REMOVAL */
396 SDEB_I_REZERO_UNIT
= 21, /* REWIND in SSC */
397 SDEB_I_ATA_PT
= 22, /* 12, 16 */
398 SDEB_I_SEND_DIAG
= 23,
400 SDEB_I_WRITE_BUFFER
= 25,
401 SDEB_I_WRITE_SAME
= 26, /* 10, 16 */
402 SDEB_I_SYNC_CACHE
= 27, /* 10, 16 */
403 SDEB_I_COMP_WRITE
= 28,
404 SDEB_I_PRE_FETCH
= 29, /* 10, 16 */
405 SDEB_I_ZONE_OUT
= 30, /* 0x94+SA; includes no data xfer */
406 SDEB_I_ZONE_IN
= 31, /* 0x95+SA; all have data-in */
407 SDEB_I_LAST_ELEM_P1
= 32, /* keep this last (previous + 1) */
411 static const unsigned char opcode_ind_arr
[256] = {
412 /* 0x0; 0x0->0x1f: 6 byte cdbs */
413 SDEB_I_TEST_UNIT_READY
, SDEB_I_REZERO_UNIT
, 0, SDEB_I_REQUEST_SENSE
,
415 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, 0,
416 0, 0, SDEB_I_INQUIRY
, 0, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
418 0, 0, SDEB_I_MODE_SENSE
, SDEB_I_START_STOP
, 0, SDEB_I_SEND_DIAG
,
419 SDEB_I_ALLOW_REMOVAL
, 0,
420 /* 0x20; 0x20->0x3f: 10 byte cdbs */
421 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY
, 0, 0,
422 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, SDEB_I_VERIFY
,
423 0, 0, 0, 0, SDEB_I_PRE_FETCH
, SDEB_I_SYNC_CACHE
, 0, 0,
424 0, 0, 0, SDEB_I_WRITE_BUFFER
, 0, 0, 0, 0,
425 /* 0x40; 0x40->0x5f: 10 byte cdbs */
426 0, SDEB_I_WRITE_SAME
, SDEB_I_UNMAP
, 0, 0, 0, 0, 0,
427 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE
, 0, 0,
428 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
430 0, 0, SDEB_I_MODE_SENSE
, 0, 0, 0, 0, 0,
431 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
432 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
433 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
434 0, SDEB_I_VARIABLE_LEN
,
435 /* 0x80; 0x80->0x9f: 16 byte cdbs */
436 0, 0, 0, 0, 0, SDEB_I_ATA_PT
, 0, 0,
437 SDEB_I_READ
, SDEB_I_COMP_WRITE
, SDEB_I_WRITE
, 0,
438 0, 0, 0, SDEB_I_VERIFY
,
439 SDEB_I_PRE_FETCH
, SDEB_I_SYNC_CACHE
, 0, SDEB_I_WRITE_SAME
,
440 SDEB_I_ZONE_OUT
, SDEB_I_ZONE_IN
, 0, 0,
441 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16
, SDEB_I_SERV_ACT_OUT_16
,
442 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
443 SDEB_I_REPORT_LUNS
, SDEB_I_ATA_PT
, 0, SDEB_I_MAINT_IN
,
444 SDEB_I_MAINT_OUT
, 0, 0, 0,
445 SDEB_I_READ
, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE
,
446 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
447 0, 0, 0, 0, 0, 0, 0, 0,
448 0, 0, 0, 0, 0, 0, 0, 0,
449 /* 0xc0; 0xc0->0xff: vendor specific */
450 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
452 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
457 * The following "response" functions return the SCSI mid-level's 4 byte
458 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
459 * command completion, they can mask their return value with
460 * SDEG_RES_IMMED_MASK .
462 #define SDEG_RES_IMMED_MASK 0x40000000
464 static int resp_inquiry(struct scsi_cmnd
*, struct sdebug_dev_info
*);
465 static int resp_report_luns(struct scsi_cmnd
*, struct sdebug_dev_info
*);
466 static int resp_requests(struct scsi_cmnd
*, struct sdebug_dev_info
*);
467 static int resp_mode_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
468 static int resp_mode_select(struct scsi_cmnd
*, struct sdebug_dev_info
*);
469 static int resp_log_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
470 static int resp_readcap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
471 static int resp_read_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
472 static int resp_write_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
473 static int resp_write_scat(struct scsi_cmnd
*, struct sdebug_dev_info
*);
474 static int resp_start_stop(struct scsi_cmnd
*, struct sdebug_dev_info
*);
475 static int resp_readcap16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
476 static int resp_get_lba_status(struct scsi_cmnd
*, struct sdebug_dev_info
*);
477 static int resp_report_tgtpgs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
478 static int resp_unmap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
479 static int resp_rsup_opcodes(struct scsi_cmnd
*, struct sdebug_dev_info
*);
480 static int resp_rsup_tmfs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
481 static int resp_verify(struct scsi_cmnd
*, struct sdebug_dev_info
*);
482 static int resp_write_same_10(struct scsi_cmnd
*, struct sdebug_dev_info
*);
483 static int resp_write_same_16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
484 static int resp_comp_write(struct scsi_cmnd
*, struct sdebug_dev_info
*);
485 static int resp_write_buffer(struct scsi_cmnd
*, struct sdebug_dev_info
*);
486 static int resp_sync_cache(struct scsi_cmnd
*, struct sdebug_dev_info
*);
487 static int resp_pre_fetch(struct scsi_cmnd
*, struct sdebug_dev_info
*);
488 static int resp_report_zones(struct scsi_cmnd
*, struct sdebug_dev_info
*);
489 static int resp_open_zone(struct scsi_cmnd
*, struct sdebug_dev_info
*);
490 static int resp_close_zone(struct scsi_cmnd
*, struct sdebug_dev_info
*);
491 static int resp_finish_zone(struct scsi_cmnd
*, struct sdebug_dev_info
*);
492 static int resp_rwp_zone(struct scsi_cmnd
*, struct sdebug_dev_info
*);
494 static int sdebug_do_add_host(bool mk_new_store
);
495 static int sdebug_add_host_helper(int per_host_idx
);
496 static void sdebug_do_remove_host(bool the_end
);
497 static int sdebug_add_store(void);
498 static void sdebug_erase_store(int idx
, struct sdeb_store_info
*sip
);
499 static void sdebug_erase_all_stores(bool apart_from_first
);
502 * The following are overflow arrays for cdbs that "hit" the same index in
503 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
504 * should be placed in opcode_info_arr[], the others should be placed here.
506 static const struct opcode_info_t msense_iarr
[] = {
507 {0, 0x1a, 0, F_D_IN
, NULL
, NULL
,
508 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
511 static const struct opcode_info_t mselect_iarr
[] = {
512 {0, 0x15, 0, F_D_OUT
, NULL
, NULL
,
513 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 static const struct opcode_info_t read_iarr
[] = {
517 {0, 0x28, 0, F_D_IN
| FF_MEDIA_IO
, resp_read_dt0
, NULL
,/* READ(10) */
518 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
520 {0, 0x8, 0, F_D_IN
| FF_MEDIA_IO
, resp_read_dt0
, NULL
, /* READ(6) */
521 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
522 {0, 0xa8, 0, F_D_IN
| FF_MEDIA_IO
, resp_read_dt0
, NULL
,/* READ(12) */
523 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
527 static const struct opcode_info_t write_iarr
[] = {
528 {0, 0x2a, 0, F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
, /* WRITE(10) */
529 NULL
, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
531 {0, 0xa, 0, F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
, /* WRITE(6) */
532 NULL
, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
534 {0, 0xaa, 0, F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
, /* WRITE(12) */
535 NULL
, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
536 0xbf, 0xc7, 0, 0, 0, 0} },
539 static const struct opcode_info_t verify_iarr
[] = {
540 {0, 0x2f, 0, F_D_OUT_MAYBE
| FF_MEDIA_IO
, resp_verify
,/* VERIFY(10) */
541 NULL
, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
545 static const struct opcode_info_t sa_in_16_iarr
[] = {
546 {0, 0x9e, 0x12, F_SA_LOW
| F_D_IN
, resp_get_lba_status
, NULL
,
547 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
548 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
551 static const struct opcode_info_t vl_iarr
[] = { /* VARIABLE LENGTH */
552 {0, 0x7f, 0xb, F_SA_HIGH
| F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
,
553 NULL
, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
554 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
555 {0, 0x7f, 0x11, F_SA_HIGH
| F_D_OUT
| FF_MEDIA_IO
, resp_write_scat
,
556 NULL
, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
557 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
560 static const struct opcode_info_t maint_in_iarr
[] = { /* MAINT IN */
561 {0, 0xa3, 0xc, F_SA_LOW
| F_D_IN
, resp_rsup_opcodes
, NULL
,
562 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
563 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
564 {0, 0xa3, 0xd, F_SA_LOW
| F_D_IN
, resp_rsup_tmfs
, NULL
,
565 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
566 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
569 static const struct opcode_info_t write_same_iarr
[] = {
570 {0, 0x93, 0, F_D_OUT_MAYBE
| FF_MEDIA_IO
, resp_write_same_16
, NULL
,
571 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
572 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
575 static const struct opcode_info_t reserve_iarr
[] = {
576 {0, 0x16, 0, F_D_OUT
, NULL
, NULL
, /* RESERVE(6) */
577 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
580 static const struct opcode_info_t release_iarr
[] = {
581 {0, 0x17, 0, F_D_OUT
, NULL
, NULL
, /* RELEASE(6) */
582 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585 static const struct opcode_info_t sync_cache_iarr
[] = {
586 {0, 0x91, 0, F_SYNC_DELAY
| F_M_ACCESS
, resp_sync_cache
, NULL
,
587 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
588 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
591 static const struct opcode_info_t pre_fetch_iarr
[] = {
592 {0, 0x90, 0, F_SYNC_DELAY
| FF_MEDIA_IO
, resp_pre_fetch
, NULL
,
593 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
594 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
597 static const struct opcode_info_t zone_out_iarr
[] = { /* ZONE OUT(16) */
598 {0, 0x94, 0x1, F_SA_LOW
| F_M_ACCESS
, resp_close_zone
, NULL
,
599 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
600 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
601 {0, 0x94, 0x2, F_SA_LOW
| F_M_ACCESS
, resp_finish_zone
, NULL
,
602 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
604 {0, 0x94, 0x4, F_SA_LOW
| F_M_ACCESS
, resp_rwp_zone
, NULL
,
605 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
609 static const struct opcode_info_t zone_in_iarr
[] = { /* ZONE IN(16) */
610 {0, 0x95, 0x6, F_SA_LOW
| F_D_IN
| F_M_ACCESS
, NULL
, NULL
,
611 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
612 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
616 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
617 * plus the terminating elements for logic that scans this table such as
618 * REPORT SUPPORTED OPERATION CODES. */
619 static const struct opcode_info_t opcode_info_arr
[SDEB_I_LAST_ELEM_P1
+ 1] = {
621 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* unknown opcodes */
622 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
623 {0, 0x12, 0, FF_RESPOND
| F_D_IN
, resp_inquiry
, NULL
, /* INQUIRY */
624 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
625 {0, 0xa0, 0, FF_RESPOND
| F_D_IN
, resp_report_luns
, NULL
,
626 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
627 0, 0} }, /* REPORT LUNS */
628 {0, 0x3, 0, FF_RESPOND
| F_D_IN
, resp_requests
, NULL
,
629 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
630 {0, 0x0, 0, F_M_ACCESS
| F_RL_WLUN_OK
, NULL
, NULL
,/* TEST UNIT READY */
631 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 {ARRAY_SIZE(msense_iarr
), 0x5a, 0, F_D_IN
, /* MODE SENSE(10) */
634 resp_mode_sense
, msense_iarr
, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
635 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(mselect_iarr
), 0x55, 0, F_D_OUT
, /* MODE SELECT(10) */
637 resp_mode_select
, mselect_iarr
, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
638 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {0, 0x4d, 0, F_D_IN
, resp_log_sense
, NULL
, /* LOG SENSE */
640 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
642 {0, 0x25, 0, F_D_IN
, resp_readcap
, NULL
, /* READ CAPACITY(10) */
643 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
645 {ARRAY_SIZE(read_iarr
), 0x88, 0, F_D_IN
| FF_MEDIA_IO
, /* READ(16) */
646 resp_read_dt0
, read_iarr
, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
647 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
649 {ARRAY_SIZE(write_iarr
), 0x8a, 0, F_D_OUT
| FF_MEDIA_IO
,
650 resp_write_dt0
, write_iarr
, /* WRITE(16) */
651 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
652 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
653 {0, 0x1b, 0, F_SSU_DELAY
, resp_start_stop
, NULL
,/* START STOP UNIT */
654 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
655 {ARRAY_SIZE(sa_in_16_iarr
), 0x9e, 0x10, F_SA_LOW
| F_D_IN
,
656 resp_readcap16
, sa_in_16_iarr
, /* SA_IN(16), READ CAPACITY(16) */
657 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
658 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
659 {0, 0x9f, 0x12, F_SA_LOW
| F_D_OUT
| FF_MEDIA_IO
, resp_write_scat
,
660 NULL
, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
662 {ARRAY_SIZE(maint_in_iarr
), 0xa3, 0xa, F_SA_LOW
| F_D_IN
,
663 resp_report_tgtpgs
, /* MAINT IN, REPORT TARGET PORT GROUPS */
664 maint_in_iarr
, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
665 0xff, 0, 0xc7, 0, 0, 0, 0} },
667 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* MAINT OUT */
668 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
669 {ARRAY_SIZE(verify_iarr
), 0x8f, 0,
670 F_D_OUT_MAYBE
| FF_MEDIA_IO
, resp_verify
, /* VERIFY(16) */
671 verify_iarr
, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
673 {ARRAY_SIZE(vl_iarr
), 0x7f, 0x9, F_SA_HIGH
| F_D_IN
| FF_MEDIA_IO
,
674 resp_read_dt0
, vl_iarr
, /* VARIABLE LENGTH, READ(32) */
675 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
677 {ARRAY_SIZE(reserve_iarr
), 0x56, 0, F_D_OUT
,
678 NULL
, reserve_iarr
, /* RESERVE(10) <no response function> */
679 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
681 {ARRAY_SIZE(release_iarr
), 0x57, 0, F_D_OUT
,
682 NULL
, release_iarr
, /* RELEASE(10) <no response function> */
683 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
686 {0, 0x1e, 0, 0, NULL
, NULL
, /* ALLOW REMOVAL */
687 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
688 {0, 0x1, 0, 0, resp_start_stop
, NULL
, /* REWIND ?? */
689 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
690 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* ATA_PT */
691 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
692 {0, 0x1d, F_D_OUT
, 0, NULL
, NULL
, /* SEND DIAGNOSTIC */
693 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
694 {0, 0x42, 0, F_D_OUT
| FF_MEDIA_IO
, resp_unmap
, NULL
, /* UNMAP */
695 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
697 {0, 0x3b, 0, F_D_OUT_MAYBE
, resp_write_buffer
, NULL
,
698 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
699 0, 0, 0, 0} }, /* WRITE_BUFFER */
700 {ARRAY_SIZE(write_same_iarr
), 0x41, 0, F_D_OUT_MAYBE
| FF_MEDIA_IO
,
701 resp_write_same_10
, write_same_iarr
, /* WRITE SAME(10) */
702 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
704 {ARRAY_SIZE(sync_cache_iarr
), 0x35, 0, F_SYNC_DELAY
| F_M_ACCESS
,
705 resp_sync_cache
, sync_cache_iarr
,
706 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
707 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
708 {0, 0x89, 0, F_D_OUT
| FF_MEDIA_IO
, resp_comp_write
, NULL
,
709 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
710 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
711 {ARRAY_SIZE(pre_fetch_iarr
), 0x34, 0, F_SYNC_DELAY
| FF_MEDIA_IO
,
712 resp_pre_fetch
, pre_fetch_iarr
,
713 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
714 0, 0, 0, 0} }, /* PRE-FETCH (10) */
717 {ARRAY_SIZE(zone_out_iarr
), 0x94, 0x3, F_SA_LOW
| F_M_ACCESS
,
718 resp_open_zone
, zone_out_iarr
, /* ZONE_OUT(16), OPEN ZONE) */
719 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
720 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
721 {ARRAY_SIZE(zone_in_iarr
), 0x95, 0x0, F_SA_LOW
| F_M_ACCESS
,
722 resp_report_zones
, zone_in_iarr
, /* ZONE_IN(16), REPORT ZONES) */
723 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
726 {0xff, 0, 0, 0, NULL
, NULL
, /* terminating element */
727 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
730 static int sdebug_num_hosts
;
731 static int sdebug_add_host
= DEF_NUM_HOST
; /* in sysfs this is relative */
732 static int sdebug_ato
= DEF_ATO
;
733 static int sdebug_cdb_len
= DEF_CDB_LEN
;
734 static int sdebug_jdelay
= DEF_JDELAY
; /* if > 0 then unit is jiffies */
735 static int sdebug_dev_size_mb
= DEF_DEV_SIZE_PRE_INIT
;
736 static int sdebug_dif
= DEF_DIF
;
737 static int sdebug_dix
= DEF_DIX
;
738 static int sdebug_dsense
= DEF_D_SENSE
;
739 static int sdebug_every_nth
= DEF_EVERY_NTH
;
740 static int sdebug_fake_rw
= DEF_FAKE_RW
;
741 static unsigned int sdebug_guard
= DEF_GUARD
;
742 static int sdebug_host_max_queue
; /* per host */
743 static int sdebug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
744 static int sdebug_max_luns
= DEF_MAX_LUNS
;
745 static int sdebug_max_queue
= SDEBUG_CANQUEUE
; /* per submit queue */
746 static unsigned int sdebug_medium_error_start
= OPT_MEDIUM_ERR_ADDR
;
747 static int sdebug_medium_error_count
= OPT_MEDIUM_ERR_NUM
;
748 static atomic_t retired_max_queue
; /* if > 0 then was prior max_queue */
749 static int sdebug_ndelay
= DEF_NDELAY
; /* if > 0 then unit is nanoseconds */
750 static int sdebug_no_lun_0
= DEF_NO_LUN_0
;
751 static int sdebug_no_uld
;
752 static int sdebug_num_parts
= DEF_NUM_PARTS
;
753 static int sdebug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
754 static int sdebug_opt_blks
= DEF_OPT_BLKS
;
755 static int sdebug_opts
= DEF_OPTS
;
756 static int sdebug_physblk_exp
= DEF_PHYSBLK_EXP
;
757 static int sdebug_opt_xferlen_exp
= DEF_OPT_XFERLEN_EXP
;
758 static int sdebug_ptype
= DEF_PTYPE
; /* SCSI peripheral device type */
759 static int sdebug_scsi_level
= DEF_SCSI_LEVEL
;
760 static int sdebug_sector_size
= DEF_SECTOR_SIZE
;
761 static int sdeb_tur_ms_to_ready
= DEF_TUR_MS_TO_READY
;
762 static int sdebug_virtual_gb
= DEF_VIRTUAL_GB
;
763 static int sdebug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
764 static unsigned int sdebug_lbpu
= DEF_LBPU
;
765 static unsigned int sdebug_lbpws
= DEF_LBPWS
;
766 static unsigned int sdebug_lbpws10
= DEF_LBPWS10
;
767 static unsigned int sdebug_lbprz
= DEF_LBPRZ
;
768 static unsigned int sdebug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
769 static unsigned int sdebug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
770 static unsigned int sdebug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
771 static unsigned int sdebug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
772 static unsigned int sdebug_write_same_length
= DEF_WRITESAME_LENGTH
;
773 static int sdebug_uuid_ctl
= DEF_UUID_CTL
;
774 static bool sdebug_random
= DEF_RANDOM
;
775 static bool sdebug_per_host_store
= DEF_PER_HOST_STORE
;
776 static bool sdebug_removable
= DEF_REMOVABLE
;
777 static bool sdebug_clustering
;
778 static bool sdebug_host_lock
= DEF_HOST_LOCK
;
779 static bool sdebug_strict
= DEF_STRICT
;
780 static bool sdebug_any_injecting_opt
;
781 static bool sdebug_verbose
;
782 static bool have_dif_prot
;
783 static bool write_since_sync
;
784 static bool sdebug_statistics
= DEF_STATISTICS
;
785 static bool sdebug_wp
;
786 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
787 static enum blk_zoned_model sdeb_zbc_model
= BLK_ZONED_NONE
;
788 static char *sdeb_zbc_model_s
;
790 enum sam_lun_addr_method
{SAM_LUN_AM_PERIPHERAL
= 0x0,
791 SAM_LUN_AM_FLAT
= 0x1,
792 SAM_LUN_AM_LOGICAL_UNIT
= 0x2,
793 SAM_LUN_AM_EXTENDED
= 0x3};
794 static enum sam_lun_addr_method sdebug_lun_am
= SAM_LUN_AM_PERIPHERAL
;
795 static int sdebug_lun_am_i
= (int)SAM_LUN_AM_PERIPHERAL
;
797 static unsigned int sdebug_store_sectors
;
798 static sector_t sdebug_capacity
; /* in sectors */
800 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
801 may still need them */
802 static int sdebug_heads
; /* heads per disk */
803 static int sdebug_cylinders_per
; /* cylinders per surface */
804 static int sdebug_sectors_per
; /* sectors per cylinder */
806 static LIST_HEAD(sdebug_host_list
);
807 static DEFINE_SPINLOCK(sdebug_host_list_lock
);
809 static struct xarray per_store_arr
;
810 static struct xarray
*per_store_ap
= &per_store_arr
;
811 static int sdeb_first_idx
= -1; /* invalid index ==> none created */
812 static int sdeb_most_recent_idx
= -1;
813 static DEFINE_RWLOCK(sdeb_fake_rw_lck
); /* need a RW lock when fake_rw=1 */
815 static unsigned long map_size
;
816 static int num_aborts
;
817 static int num_dev_resets
;
818 static int num_target_resets
;
819 static int num_bus_resets
;
820 static int num_host_resets
;
821 static int dix_writes
;
822 static int dix_reads
;
823 static int dif_errors
;
825 /* ZBC global data */
826 static bool sdeb_zbc_in_use
; /* true for host-aware and host-managed disks */
827 static int sdeb_zbc_zone_size_mb
;
828 static int sdeb_zbc_max_open
= DEF_ZBC_MAX_OPEN_ZONES
;
829 static int sdeb_zbc_nr_conv
= DEF_ZBC_NR_CONV_ZONES
;
831 static int submit_queues
= DEF_SUBMIT_QUEUES
; /* > 1 for multi-queue (mq) */
832 static struct sdebug_queue
*sdebug_q_arr
; /* ptr to array of submit queues */
834 static DEFINE_RWLOCK(atomic_rw
);
835 static DEFINE_RWLOCK(atomic_rw2
);
837 static rwlock_t
*ramdisk_lck_a
[2];
839 static char sdebug_proc_name
[] = MY_NAME
;
840 static const char *my_name
= MY_NAME
;
842 static struct bus_type pseudo_lld_bus
;
844 static struct device_driver sdebug_driverfs_driver
= {
845 .name
= sdebug_proc_name
,
846 .bus
= &pseudo_lld_bus
,
849 static const int check_condition_result
=
850 (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
852 static const int illegal_condition_result
=
853 (DRIVER_SENSE
<< 24) | (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
855 static const int device_qfull_result
=
856 (DID_OK
<< 16) | (COMMAND_COMPLETE
<< 8) | SAM_STAT_TASK_SET_FULL
;
858 static const int condition_met_result
= SAM_STAT_CONDITION_MET
;
861 /* Only do the extra work involved in logical block provisioning if one or
862 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
863 * real reads and writes (i.e. not skipping them for speed).
865 static inline bool scsi_debug_lbp(void)
867 return 0 == sdebug_fake_rw
&&
868 (sdebug_lbpu
|| sdebug_lbpws
|| sdebug_lbpws10
);
871 static void *lba2fake_store(struct sdeb_store_info
*sip
,
872 unsigned long long lba
)
874 struct sdeb_store_info
*lsip
= sip
;
876 lba
= do_div(lba
, sdebug_store_sectors
);
877 if (!sip
|| !sip
->storep
) {
879 lsip
= xa_load(per_store_ap
, 0); /* should never be NULL */
881 return lsip
->storep
+ lba
* sdebug_sector_size
;
884 static struct t10_pi_tuple
*dif_store(struct sdeb_store_info
*sip
,
887 sector
= sector_div(sector
, sdebug_store_sectors
);
889 return sip
->dif_storep
+ sector
;
892 static void sdebug_max_tgts_luns(void)
894 struct sdebug_host_info
*sdbg_host
;
895 struct Scsi_Host
*hpnt
;
897 spin_lock(&sdebug_host_list_lock
);
898 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
899 hpnt
= sdbg_host
->shost
;
900 if ((hpnt
->this_id
>= 0) &&
901 (sdebug_num_tgts
> hpnt
->this_id
))
902 hpnt
->max_id
= sdebug_num_tgts
+ 1;
904 hpnt
->max_id
= sdebug_num_tgts
;
905 /* sdebug_max_luns; */
906 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
908 spin_unlock(&sdebug_host_list_lock
);
911 enum sdeb_cmd_data
{SDEB_IN_DATA
= 0, SDEB_IN_CDB
= 1};
913 /* Set in_bit to -1 to indicate no bit position of invalid field */
914 static void mk_sense_invalid_fld(struct scsi_cmnd
*scp
,
915 enum sdeb_cmd_data c_d
,
916 int in_byte
, int in_bit
)
918 unsigned char *sbuff
;
922 sbuff
= scp
->sense_buffer
;
924 sdev_printk(KERN_ERR
, scp
->device
,
925 "%s: sense_buffer is NULL\n", __func__
);
928 asc
= c_d
? INVALID_FIELD_IN_CDB
: INVALID_FIELD_IN_PARAM_LIST
;
929 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
930 scsi_build_sense_buffer(sdebug_dsense
, sbuff
, ILLEGAL_REQUEST
, asc
, 0);
931 memset(sks
, 0, sizeof(sks
));
937 sks
[0] |= 0x7 & in_bit
;
939 put_unaligned_be16(in_byte
, sks
+ 1);
945 memcpy(sbuff
+ sl
+ 4, sks
, 3);
947 memcpy(sbuff
+ 15, sks
, 3);
949 sdev_printk(KERN_INFO
, scp
->device
, "%s: [sense_key,asc,ascq"
950 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
951 my_name
, asc
, c_d
? 'C' : 'D', in_byte
, in_bit
);
954 static void mk_sense_buffer(struct scsi_cmnd
*scp
, int key
, int asc
, int asq
)
956 unsigned char *sbuff
;
958 sbuff
= scp
->sense_buffer
;
960 sdev_printk(KERN_ERR
, scp
->device
,
961 "%s: sense_buffer is NULL\n", __func__
);
964 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
966 scsi_build_sense_buffer(sdebug_dsense
, sbuff
, key
, asc
, asq
);
969 sdev_printk(KERN_INFO
, scp
->device
,
970 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
971 my_name
, key
, asc
, asq
);
974 static void mk_sense_invalid_opcode(struct scsi_cmnd
*scp
)
976 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
979 static int scsi_debug_ioctl(struct scsi_device
*dev
, unsigned int cmd
,
982 if (sdebug_verbose
) {
984 sdev_printk(KERN_INFO
, dev
,
985 "%s: BLKFLSBUF [0x1261]\n", __func__
);
986 else if (0x5331 == cmd
)
987 sdev_printk(KERN_INFO
, dev
,
988 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
991 sdev_printk(KERN_INFO
, dev
, "%s: cmd=0x%x\n",
995 /* return -ENOTTY; // correct return but upsets fdisk */
998 static void config_cdb_len(struct scsi_device
*sdev
)
1000 switch (sdebug_cdb_len
) {
1001 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1002 sdev
->use_10_for_rw
= false;
1003 sdev
->use_16_for_rw
= false;
1004 sdev
->use_10_for_ms
= false;
1006 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1007 sdev
->use_10_for_rw
= true;
1008 sdev
->use_16_for_rw
= false;
1009 sdev
->use_10_for_ms
= false;
1011 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1012 sdev
->use_10_for_rw
= true;
1013 sdev
->use_16_for_rw
= false;
1014 sdev
->use_10_for_ms
= true;
1017 sdev
->use_10_for_rw
= false;
1018 sdev
->use_16_for_rw
= true;
1019 sdev
->use_10_for_ms
= true;
1021 case 32: /* No knobs to suggest this so same as 16 for now */
1022 sdev
->use_10_for_rw
= false;
1023 sdev
->use_16_for_rw
= true;
1024 sdev
->use_10_for_ms
= true;
1027 pr_warn("unexpected cdb_len=%d, force to 10\n",
1029 sdev
->use_10_for_rw
= true;
1030 sdev
->use_16_for_rw
= false;
1031 sdev
->use_10_for_ms
= false;
1032 sdebug_cdb_len
= 10;
1037 static void all_config_cdb_len(void)
1039 struct sdebug_host_info
*sdbg_host
;
1040 struct Scsi_Host
*shost
;
1041 struct scsi_device
*sdev
;
1043 spin_lock(&sdebug_host_list_lock
);
1044 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
1045 shost
= sdbg_host
->shost
;
1046 shost_for_each_device(sdev
, shost
) {
1047 config_cdb_len(sdev
);
1050 spin_unlock(&sdebug_host_list_lock
);
1053 static void clear_luns_changed_on_target(struct sdebug_dev_info
*devip
)
1055 struct sdebug_host_info
*sdhp
;
1056 struct sdebug_dev_info
*dp
;
1058 spin_lock(&sdebug_host_list_lock
);
1059 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
1060 list_for_each_entry(dp
, &sdhp
->dev_info_list
, dev_list
) {
1061 if ((devip
->sdbg_host
== dp
->sdbg_host
) &&
1062 (devip
->target
== dp
->target
))
1063 clear_bit(SDEBUG_UA_LUNS_CHANGED
, dp
->uas_bm
);
1066 spin_unlock(&sdebug_host_list_lock
);
1069 static int make_ua(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1073 k
= find_first_bit(devip
->uas_bm
, SDEBUG_NUM_UAS
);
1074 if (k
!= SDEBUG_NUM_UAS
) {
1075 const char *cp
= NULL
;
1079 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
1080 POWER_ON_RESET_ASCQ
);
1082 cp
= "power on reset";
1084 case SDEBUG_UA_BUS_RESET
:
1085 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
1090 case SDEBUG_UA_MODE_CHANGED
:
1091 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_CHANGED_ASC
,
1094 cp
= "mode parameters changed";
1096 case SDEBUG_UA_CAPACITY_CHANGED
:
1097 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_CHANGED_ASC
,
1098 CAPACITY_CHANGED_ASCQ
);
1100 cp
= "capacity data changed";
1102 case SDEBUG_UA_MICROCODE_CHANGED
:
1103 mk_sense_buffer(scp
, UNIT_ATTENTION
,
1105 MICROCODE_CHANGED_ASCQ
);
1107 cp
= "microcode has been changed";
1109 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
:
1110 mk_sense_buffer(scp
, UNIT_ATTENTION
,
1112 MICROCODE_CHANGED_WO_RESET_ASCQ
);
1114 cp
= "microcode has been changed without reset";
1116 case SDEBUG_UA_LUNS_CHANGED
:
1118 * SPC-3 behavior is to report a UNIT ATTENTION with
1119 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1120 * on the target, until a REPORT LUNS command is
1121 * received. SPC-4 behavior is to report it only once.
1122 * NOTE: sdebug_scsi_level does not use the same
1123 * values as struct scsi_device->scsi_level.
1125 if (sdebug_scsi_level
>= 6) /* SPC-4 and above */
1126 clear_luns_changed_on_target(devip
);
1127 mk_sense_buffer(scp
, UNIT_ATTENTION
,
1131 cp
= "reported luns data has changed";
1134 pr_warn("unexpected unit attention code=%d\n", k
);
1139 clear_bit(k
, devip
->uas_bm
);
1141 sdev_printk(KERN_INFO
, scp
->device
,
1142 "%s reports: Unit attention: %s\n",
1144 return check_condition_result
;
1149 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1150 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
1154 struct scsi_data_buffer
*sdb
= &scp
->sdb
;
1158 if (scp
->sc_data_direction
!= DMA_FROM_DEVICE
)
1159 return DID_ERROR
<< 16;
1161 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
1163 scsi_set_resid(scp
, scsi_bufflen(scp
) - act_len
);
1168 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1169 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1170 * calls, not required to write in ascending offset order. Assumes resid
1171 * set to scsi_bufflen() prior to any calls.
1173 static int p_fill_from_dev_buffer(struct scsi_cmnd
*scp
, const void *arr
,
1174 int arr_len
, unsigned int off_dst
)
1176 unsigned int act_len
, n
;
1177 struct scsi_data_buffer
*sdb
= &scp
->sdb
;
1178 off_t skip
= off_dst
;
1180 if (sdb
->length
<= off_dst
)
1182 if (scp
->sc_data_direction
!= DMA_FROM_DEVICE
)
1183 return DID_ERROR
<< 16;
1185 act_len
= sg_pcopy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
1186 arr
, arr_len
, skip
);
1187 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1188 __func__
, off_dst
, scsi_bufflen(scp
), act_len
,
1189 scsi_get_resid(scp
));
1190 n
= scsi_bufflen(scp
) - (off_dst
+ act_len
);
1191 scsi_set_resid(scp
, min_t(int, scsi_get_resid(scp
), n
));
1195 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1196 * 'arr' or -1 if error.
1198 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
1201 if (!scsi_bufflen(scp
))
1203 if (scp
->sc_data_direction
!= DMA_TO_DEVICE
)
1206 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
1210 static char sdebug_inq_vendor_id
[9] = "Linux ";
1211 static char sdebug_inq_product_id
[17] = "scsi_debug ";
1212 static char sdebug_inq_product_rev
[5] = SDEBUG_VERSION
;
1213 /* Use some locally assigned NAAs for SAS addresses. */
1214 static const u64 naa3_comp_a
= 0x3222222000000000ULL
;
1215 static const u64 naa3_comp_b
= 0x3333333000000000ULL
;
1216 static const u64 naa3_comp_c
= 0x3111111000000000ULL
;
1218 /* Device identification VPD page. Returns number of bytes placed in arr */
1219 static int inquiry_vpd_83(unsigned char *arr
, int port_group_id
,
1220 int target_dev_id
, int dev_id_num
,
1221 const char *dev_id_str
, int dev_id_str_len
,
1222 const uuid_t
*lu_name
)
1227 port_a
= target_dev_id
+ 1;
1228 /* T10 vendor identifier field format (faked) */
1229 arr
[0] = 0x2; /* ASCII */
1232 memcpy(&arr
[4], sdebug_inq_vendor_id
, 8);
1233 memcpy(&arr
[12], sdebug_inq_product_id
, 16);
1234 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
1235 num
= 8 + 16 + dev_id_str_len
;
1238 if (dev_id_num
>= 0) {
1239 if (sdebug_uuid_ctl
) {
1240 /* Locally assigned UUID */
1241 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
1242 arr
[num
++] = 0xa; /* PIV=0, lu, naa */
1245 arr
[num
++] = 0x10; /* uuid type=1, locally assigned */
1247 memcpy(arr
+ num
, lu_name
, 16);
1250 /* NAA-3, Logical unit identifier (binary) */
1251 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
1252 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
1255 put_unaligned_be64(naa3_comp_b
+ dev_id_num
, arr
+ num
);
1258 /* Target relative port number */
1259 arr
[num
++] = 0x61; /* proto=sas, binary */
1260 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
1261 arr
[num
++] = 0x0; /* reserved */
1262 arr
[num
++] = 0x4; /* length */
1263 arr
[num
++] = 0x0; /* reserved */
1264 arr
[num
++] = 0x0; /* reserved */
1266 arr
[num
++] = 0x1; /* relative port A */
1268 /* NAA-3, Target port identifier */
1269 arr
[num
++] = 0x61; /* proto=sas, binary */
1270 arr
[num
++] = 0x93; /* piv=1, target port, naa */
1273 put_unaligned_be64(naa3_comp_a
+ port_a
, arr
+ num
);
1275 /* NAA-3, Target port group identifier */
1276 arr
[num
++] = 0x61; /* proto=sas, binary */
1277 arr
[num
++] = 0x95; /* piv=1, target port group id */
1282 put_unaligned_be16(port_group_id
, arr
+ num
);
1284 /* NAA-3, Target device identifier */
1285 arr
[num
++] = 0x61; /* proto=sas, binary */
1286 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
1289 put_unaligned_be64(naa3_comp_a
+ target_dev_id
, arr
+ num
);
1291 /* SCSI name string: Target device identifier */
1292 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
1293 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
1296 memcpy(arr
+ num
, "naa.32222220", 12);
1298 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
1299 memcpy(arr
+ num
, b
, 8);
1301 memset(arr
+ num
, 0, 4);
1306 static unsigned char vpd84_data
[] = {
1307 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1308 0x22,0x22,0x22,0x0,0xbb,0x1,
1309 0x22,0x22,0x22,0x0,0xbb,0x2,
1312 /* Software interface identification VPD page */
1313 static int inquiry_vpd_84(unsigned char *arr
)
1315 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
1316 return sizeof(vpd84_data
);
1319 /* Management network addresses VPD page */
1320 static int inquiry_vpd_85(unsigned char *arr
)
1323 const char *na1
= "https://www.kernel.org/config";
1324 const char *na2
= "http://www.kernel.org/log";
1327 arr
[num
++] = 0x1; /* lu, storage config */
1328 arr
[num
++] = 0x0; /* reserved */
1333 plen
= ((plen
/ 4) + 1) * 4;
1334 arr
[num
++] = plen
; /* length, null termianted, padded */
1335 memcpy(arr
+ num
, na1
, olen
);
1336 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1339 arr
[num
++] = 0x4; /* lu, logging */
1340 arr
[num
++] = 0x0; /* reserved */
1345 plen
= ((plen
/ 4) + 1) * 4;
1346 arr
[num
++] = plen
; /* length, null terminated, padded */
1347 memcpy(arr
+ num
, na2
, olen
);
1348 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1354 /* SCSI ports VPD page */
1355 static int inquiry_vpd_88(unsigned char *arr
, int target_dev_id
)
1360 port_a
= target_dev_id
+ 1;
1361 port_b
= port_a
+ 1;
1362 arr
[num
++] = 0x0; /* reserved */
1363 arr
[num
++] = 0x0; /* reserved */
1365 arr
[num
++] = 0x1; /* relative port 1 (primary) */
1366 memset(arr
+ num
, 0, 6);
1369 arr
[num
++] = 12; /* length tp descriptor */
1370 /* naa-5 target port identifier (A) */
1371 arr
[num
++] = 0x61; /* proto=sas, binary */
1372 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1373 arr
[num
++] = 0x0; /* reserved */
1374 arr
[num
++] = 0x8; /* length */
1375 put_unaligned_be64(naa3_comp_a
+ port_a
, arr
+ num
);
1377 arr
[num
++] = 0x0; /* reserved */
1378 arr
[num
++] = 0x0; /* reserved */
1380 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
1381 memset(arr
+ num
, 0, 6);
1384 arr
[num
++] = 12; /* length tp descriptor */
1385 /* naa-5 target port identifier (B) */
1386 arr
[num
++] = 0x61; /* proto=sas, binary */
1387 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1388 arr
[num
++] = 0x0; /* reserved */
1389 arr
[num
++] = 0x8; /* length */
1390 put_unaligned_be64(naa3_comp_a
+ port_b
, arr
+ num
);
1397 static unsigned char vpd89_data
[] = {
1398 /* from 4th byte */ 0,0,0,0,
1399 'l','i','n','u','x',' ',' ',' ',
1400 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1402 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1404 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1405 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1406 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1407 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1409 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1411 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1413 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1414 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1415 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1416 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1417 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1418 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1419 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1420 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1424 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1425 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1426 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1427 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1441 /* ATA Information VPD page */
1442 static int inquiry_vpd_89(unsigned char *arr
)
1444 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
1445 return sizeof(vpd89_data
);
1449 static unsigned char vpdb0_data
[] = {
1450 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1451 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1456 /* Block limits VPD page (SBC-3) */
1457 static int inquiry_vpd_b0(unsigned char *arr
)
1461 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
1463 /* Optimal transfer length granularity */
1464 if (sdebug_opt_xferlen_exp
!= 0 &&
1465 sdebug_physblk_exp
< sdebug_opt_xferlen_exp
)
1466 gran
= 1 << sdebug_opt_xferlen_exp
;
1468 gran
= 1 << sdebug_physblk_exp
;
1469 put_unaligned_be16(gran
, arr
+ 2);
1471 /* Maximum Transfer Length */
1472 if (sdebug_store_sectors
> 0x400)
1473 put_unaligned_be32(sdebug_store_sectors
, arr
+ 4);
1475 /* Optimal Transfer Length */
1476 put_unaligned_be32(sdebug_opt_blks
, &arr
[8]);
1479 /* Maximum Unmap LBA Count */
1480 put_unaligned_be32(sdebug_unmap_max_blocks
, &arr
[16]);
1482 /* Maximum Unmap Block Descriptor Count */
1483 put_unaligned_be32(sdebug_unmap_max_desc
, &arr
[20]);
1486 /* Unmap Granularity Alignment */
1487 if (sdebug_unmap_alignment
) {
1488 put_unaligned_be32(sdebug_unmap_alignment
, &arr
[28]);
1489 arr
[28] |= 0x80; /* UGAVALID */
1492 /* Optimal Unmap Granularity */
1493 put_unaligned_be32(sdebug_unmap_granularity
, &arr
[24]);
1495 /* Maximum WRITE SAME Length */
1496 put_unaligned_be64(sdebug_write_same_length
, &arr
[32]);
1498 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1500 return sizeof(vpdb0_data
);
1503 /* Block device characteristics VPD page (SBC-3) */
1504 static int inquiry_vpd_b1(struct sdebug_dev_info
*devip
, unsigned char *arr
)
1506 memset(arr
, 0, 0x3c);
1508 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
1510 arr
[3] = 5; /* less than 1.8" */
1511 if (devip
->zmodel
== BLK_ZONED_HA
)
1512 arr
[4] = 1 << 4; /* zoned field = 01b */
1517 /* Logical block provisioning VPD page (SBC-4) */
1518 static int inquiry_vpd_b2(unsigned char *arr
)
1520 memset(arr
, 0, 0x4);
1521 arr
[0] = 0; /* threshold exponent */
1528 if (sdebug_lbprz
&& scsi_debug_lbp())
1529 arr
[1] |= (sdebug_lbprz
& 0x7) << 2; /* sbc4r07 and later */
1530 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1531 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1532 /* threshold_percentage=0 */
1536 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1537 static int inquiry_vpd_b6(struct sdebug_dev_info
*devip
, unsigned char *arr
)
1539 memset(arr
, 0, 0x3c);
1540 arr
[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1542 * Set Optimal number of open sequential write preferred zones and
1543 * Optimal number of non-sequentially written sequential write
1544 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1545 * fields set to zero, apart from Max. number of open swrz_s field.
1547 put_unaligned_be32(0xffffffff, &arr
[4]);
1548 put_unaligned_be32(0xffffffff, &arr
[8]);
1549 if (sdeb_zbc_model
== BLK_ZONED_HM
&& devip
->max_open
)
1550 put_unaligned_be32(devip
->max_open
, &arr
[12]);
1552 put_unaligned_be32(0xffffffff, &arr
[12]);
1556 #define SDEBUG_LONG_INQ_SZ 96
1557 #define SDEBUG_MAX_INQ_ARR_SZ 584
1559 static int resp_inquiry(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1561 unsigned char pq_pdt
;
1563 unsigned char *cmd
= scp
->cmnd
;
1564 int alloc_len
, n
, ret
;
1565 bool have_wlun
, is_disk
, is_zbc
, is_disk_zbc
;
1567 alloc_len
= get_unaligned_be16(cmd
+ 3);
1568 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
1570 return DID_REQUEUE
<< 16;
1571 is_disk
= (sdebug_ptype
== TYPE_DISK
);
1572 is_zbc
= (devip
->zmodel
!= BLK_ZONED_NONE
);
1573 is_disk_zbc
= (is_disk
|| is_zbc
);
1574 have_wlun
= scsi_is_wlun(scp
->device
->lun
);
1576 pq_pdt
= TYPE_WLUN
; /* present, wlun */
1577 else if (sdebug_no_lun_0
&& (devip
->lun
== SDEBUG_LUN_0_VAL
))
1578 pq_pdt
= 0x7f; /* not present, PQ=3, PDT=0x1f */
1580 pq_pdt
= (sdebug_ptype
& 0x1f);
1582 if (0x2 & cmd
[1]) { /* CMDDT bit set */
1583 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 1);
1585 return check_condition_result
;
1586 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
1587 int lu_id_num
, port_group_id
, target_dev_id
, len
;
1589 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1591 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
1592 (devip
->channel
& 0x7f);
1593 if (sdebug_vpd_use_hostno
== 0)
1595 lu_id_num
= have_wlun
? -1 : (((host_no
+ 1) * 2000) +
1596 (devip
->target
* 1000) + devip
->lun
);
1597 target_dev_id
= ((host_no
+ 1) * 2000) +
1598 (devip
->target
* 1000) - 3;
1599 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
1600 if (0 == cmd
[2]) { /* supported vital product data pages */
1601 arr
[1] = cmd
[2]; /*sanity */
1603 arr
[n
++] = 0x0; /* this page */
1604 arr
[n
++] = 0x80; /* unit serial number */
1605 arr
[n
++] = 0x83; /* device identification */
1606 arr
[n
++] = 0x84; /* software interface ident. */
1607 arr
[n
++] = 0x85; /* management network addresses */
1608 arr
[n
++] = 0x86; /* extended inquiry */
1609 arr
[n
++] = 0x87; /* mode page policy */
1610 arr
[n
++] = 0x88; /* SCSI ports */
1611 if (is_disk_zbc
) { /* SBC or ZBC */
1612 arr
[n
++] = 0x89; /* ATA information */
1613 arr
[n
++] = 0xb0; /* Block limits */
1614 arr
[n
++] = 0xb1; /* Block characteristics */
1616 arr
[n
++] = 0xb2; /* LB Provisioning */
1618 arr
[n
++] = 0xb6; /* ZB dev. char. */
1620 arr
[3] = n
- 4; /* number of supported VPD pages */
1621 } else if (0x80 == cmd
[2]) { /* unit serial number */
1622 arr
[1] = cmd
[2]; /*sanity */
1624 memcpy(&arr
[4], lu_id_str
, len
);
1625 } else if (0x83 == cmd
[2]) { /* device identification */
1626 arr
[1] = cmd
[2]; /*sanity */
1627 arr
[3] = inquiry_vpd_83(&arr
[4], port_group_id
,
1628 target_dev_id
, lu_id_num
,
1631 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
1632 arr
[1] = cmd
[2]; /*sanity */
1633 arr
[3] = inquiry_vpd_84(&arr
[4]);
1634 } else if (0x85 == cmd
[2]) { /* Management network addresses */
1635 arr
[1] = cmd
[2]; /*sanity */
1636 arr
[3] = inquiry_vpd_85(&arr
[4]);
1637 } else if (0x86 == cmd
[2]) { /* extended inquiry */
1638 arr
[1] = cmd
[2]; /*sanity */
1639 arr
[3] = 0x3c; /* number of following entries */
1640 if (sdebug_dif
== T10_PI_TYPE3_PROTECTION
)
1641 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
1642 else if (have_dif_prot
)
1643 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1645 arr
[4] = 0x0; /* no protection stuff */
1646 arr
[5] = 0x7; /* head of q, ordered + simple q's */
1647 } else if (0x87 == cmd
[2]) { /* mode page policy */
1648 arr
[1] = cmd
[2]; /*sanity */
1649 arr
[3] = 0x8; /* number of following entries */
1650 arr
[4] = 0x2; /* disconnect-reconnect mp */
1651 arr
[6] = 0x80; /* mlus, shared */
1652 arr
[8] = 0x18; /* protocol specific lu */
1653 arr
[10] = 0x82; /* mlus, per initiator port */
1654 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
1655 arr
[1] = cmd
[2]; /*sanity */
1656 arr
[3] = inquiry_vpd_88(&arr
[4], target_dev_id
);
1657 } else if (is_disk_zbc
&& 0x89 == cmd
[2]) { /* ATA info */
1658 arr
[1] = cmd
[2]; /*sanity */
1659 n
= inquiry_vpd_89(&arr
[4]);
1660 put_unaligned_be16(n
, arr
+ 2);
1661 } else if (is_disk_zbc
&& 0xb0 == cmd
[2]) { /* Block limits */
1662 arr
[1] = cmd
[2]; /*sanity */
1663 arr
[3] = inquiry_vpd_b0(&arr
[4]);
1664 } else if (is_disk_zbc
&& 0xb1 == cmd
[2]) { /* Block char. */
1665 arr
[1] = cmd
[2]; /*sanity */
1666 arr
[3] = inquiry_vpd_b1(devip
, &arr
[4]);
1667 } else if (is_disk
&& 0xb2 == cmd
[2]) { /* LB Prov. */
1668 arr
[1] = cmd
[2]; /*sanity */
1669 arr
[3] = inquiry_vpd_b2(&arr
[4]);
1670 } else if (is_zbc
&& cmd
[2] == 0xb6) { /* ZB dev. charact. */
1671 arr
[1] = cmd
[2]; /*sanity */
1672 arr
[3] = inquiry_vpd_b6(devip
, &arr
[4]);
1674 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
1676 return check_condition_result
;
1678 len
= min(get_unaligned_be16(arr
+ 2) + 4, alloc_len
);
1679 ret
= fill_from_dev_buffer(scp
, arr
,
1680 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
1684 /* drops through here for a standard inquiry */
1685 arr
[1] = sdebug_removable
? 0x80 : 0; /* Removable disk */
1686 arr
[2] = sdebug_scsi_level
;
1687 arr
[3] = 2; /* response_data_format==2 */
1688 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
1689 arr
[5] = (int)have_dif_prot
; /* PROTECT bit */
1690 if (sdebug_vpd_use_hostno
== 0)
1691 arr
[5] |= 0x10; /* claim: implicit TPGS */
1692 arr
[6] = 0x10; /* claim: MultiP */
1693 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1694 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
1695 memcpy(&arr
[8], sdebug_inq_vendor_id
, 8);
1696 memcpy(&arr
[16], sdebug_inq_product_id
, 16);
1697 memcpy(&arr
[32], sdebug_inq_product_rev
, 4);
1698 /* Use Vendor Specific area to place driver date in ASCII hex */
1699 memcpy(&arr
[36], sdebug_version_date
, 8);
1700 /* version descriptors (2 bytes each) follow */
1701 put_unaligned_be16(0xc0, arr
+ 58); /* SAM-6 no version claimed */
1702 put_unaligned_be16(0x5c0, arr
+ 60); /* SPC-5 no version claimed */
1704 if (is_disk
) { /* SBC-4 no version claimed */
1705 put_unaligned_be16(0x600, arr
+ n
);
1707 } else if (sdebug_ptype
== TYPE_TAPE
) { /* SSC-4 rev 3 */
1708 put_unaligned_be16(0x525, arr
+ n
);
1710 } else if (is_zbc
) { /* ZBC BSR INCITS 536 revision 05 */
1711 put_unaligned_be16(0x624, arr
+ n
);
1714 put_unaligned_be16(0x2100, arr
+ n
); /* SPL-4 no version claimed */
1715 ret
= fill_from_dev_buffer(scp
, arr
,
1716 min_t(int, alloc_len
, SDEBUG_LONG_INQ_SZ
));
1721 /* See resp_iec_m_pg() for how this data is manipulated */
1722 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1725 static int resp_requests(struct scsi_cmnd
*scp
,
1726 struct sdebug_dev_info
*devip
)
1728 unsigned char *cmd
= scp
->cmnd
;
1729 unsigned char arr
[SCSI_SENSE_BUFFERSIZE
]; /* assume >= 18 bytes */
1730 bool dsense
= !!(cmd
[1] & 1);
1731 int alloc_len
= cmd
[4];
1733 int stopped_state
= atomic_read(&devip
->stopped
);
1735 memset(arr
, 0, sizeof(arr
));
1736 if (stopped_state
> 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1740 arr
[2] = LOGICAL_UNIT_NOT_READY
;
1741 arr
[3] = (stopped_state
== 2) ? 0x1 : 0x2;
1745 arr
[2] = NOT_READY
; /* NO_SENSE in sense_key */
1746 arr
[7] = 0xa; /* 18 byte sense buffer */
1747 arr
[12] = LOGICAL_UNIT_NOT_READY
;
1748 arr
[13] = (stopped_state
== 2) ? 0x1 : 0x2;
1750 } else if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
1751 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1754 arr
[1] = 0x0; /* NO_SENSE in sense_key */
1755 arr
[2] = THRESHOLD_EXCEEDED
;
1756 arr
[3] = 0xff; /* Failure prediction(false) */
1760 arr
[2] = 0x0; /* NO_SENSE in sense_key */
1761 arr
[7] = 0xa; /* 18 byte sense buffer */
1762 arr
[12] = THRESHOLD_EXCEEDED
;
1763 arr
[13] = 0xff; /* Failure prediction(false) */
1765 } else { /* nothing to report */
1768 memset(arr
, 0, len
);
1771 memset(arr
, 0, len
);
1776 return fill_from_dev_buffer(scp
, arr
, min_t(int, len
, alloc_len
));
1779 static int resp_start_stop(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1781 unsigned char *cmd
= scp
->cmnd
;
1782 int power_cond
, want_stop
, stopped_state
;
1785 power_cond
= (cmd
[4] & 0xf0) >> 4;
1787 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, 7);
1788 return check_condition_result
;
1790 want_stop
= !(cmd
[4] & 1);
1791 stopped_state
= atomic_read(&devip
->stopped
);
1792 if (stopped_state
== 2) {
1793 ktime_t now_ts
= ktime_get_boottime();
1795 if (ktime_to_ns(now_ts
) > ktime_to_ns(devip
->create_ts
)) {
1796 u64 diff_ns
= ktime_to_ns(ktime_sub(now_ts
, devip
->create_ts
));
1798 if (diff_ns
>= ((u64
)sdeb_tur_ms_to_ready
* 1000000)) {
1799 /* tur_ms_to_ready timer extinguished */
1800 atomic_set(&devip
->stopped
, 0);
1804 if (stopped_state
== 2) {
1806 stopped_state
= 1; /* dummy up success */
1807 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1808 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, 0 /* START bit */);
1809 return check_condition_result
;
1813 changing
= (stopped_state
!= want_stop
);
1815 atomic_xchg(&devip
->stopped
, want_stop
);
1816 if (!changing
|| (cmd
[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1817 return SDEG_RES_IMMED_MASK
;
1822 static sector_t
get_sdebug_capacity(void)
1824 static const unsigned int gibibyte
= 1073741824;
1826 if (sdebug_virtual_gb
> 0)
1827 return (sector_t
)sdebug_virtual_gb
*
1828 (gibibyte
/ sdebug_sector_size
);
1830 return sdebug_store_sectors
;
1833 #define SDEBUG_READCAP_ARR_SZ 8
1834 static int resp_readcap(struct scsi_cmnd
*scp
,
1835 struct sdebug_dev_info
*devip
)
1837 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
1840 /* following just in case virtual_gb changed */
1841 sdebug_capacity
= get_sdebug_capacity();
1842 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
1843 if (sdebug_capacity
< 0xffffffff) {
1844 capac
= (unsigned int)sdebug_capacity
- 1;
1845 put_unaligned_be32(capac
, arr
+ 0);
1847 put_unaligned_be32(0xffffffff, arr
+ 0);
1848 put_unaligned_be16(sdebug_sector_size
, arr
+ 6);
1849 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
1852 #define SDEBUG_READCAP16_ARR_SZ 32
1853 static int resp_readcap16(struct scsi_cmnd
*scp
,
1854 struct sdebug_dev_info
*devip
)
1856 unsigned char *cmd
= scp
->cmnd
;
1857 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
1860 alloc_len
= get_unaligned_be32(cmd
+ 10);
1861 /* following just in case virtual_gb changed */
1862 sdebug_capacity
= get_sdebug_capacity();
1863 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
1864 put_unaligned_be64((u64
)(sdebug_capacity
- 1), arr
+ 0);
1865 put_unaligned_be32(sdebug_sector_size
, arr
+ 8);
1866 arr
[13] = sdebug_physblk_exp
& 0xf;
1867 arr
[14] = (sdebug_lowest_aligned
>> 8) & 0x3f;
1869 if (scsi_debug_lbp()) {
1870 arr
[14] |= 0x80; /* LBPME */
1871 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1872 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1873 * in the wider field maps to 0 in this field.
1875 if (sdebug_lbprz
& 1) /* precisely what the draft requires */
1879 arr
[15] = sdebug_lowest_aligned
& 0xff;
1881 if (have_dif_prot
) {
1882 arr
[12] = (sdebug_dif
- 1) << 1; /* P_TYPE */
1883 arr
[12] |= 1; /* PROT_EN */
1886 return fill_from_dev_buffer(scp
, arr
,
1887 min_t(int, alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
1890 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1892 static int resp_report_tgtpgs(struct scsi_cmnd
*scp
,
1893 struct sdebug_dev_info
*devip
)
1895 unsigned char *cmd
= scp
->cmnd
;
1897 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1898 int n
, ret
, alen
, rlen
;
1899 int port_group_a
, port_group_b
, port_a
, port_b
;
1901 alen
= get_unaligned_be32(cmd
+ 6);
1902 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
1904 return DID_REQUEUE
<< 16;
1906 * EVPD page 0x88 states we have two ports, one
1907 * real and a fake port with no device connected.
1908 * So we create two port groups with one port each
1909 * and set the group with port B to unavailable.
1911 port_a
= 0x1; /* relative port A */
1912 port_b
= 0x2; /* relative port B */
1913 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
1914 (devip
->channel
& 0x7f);
1915 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
1916 (devip
->channel
& 0x7f) + 0x80;
1919 * The asymmetric access state is cycled according to the host_id.
1922 if (sdebug_vpd_use_hostno
== 0) {
1923 arr
[n
++] = host_no
% 3; /* Asymm access state */
1924 arr
[n
++] = 0x0F; /* claim: all states are supported */
1926 arr
[n
++] = 0x0; /* Active/Optimized path */
1927 arr
[n
++] = 0x01; /* only support active/optimized paths */
1929 put_unaligned_be16(port_group_a
, arr
+ n
);
1931 arr
[n
++] = 0; /* Reserved */
1932 arr
[n
++] = 0; /* Status code */
1933 arr
[n
++] = 0; /* Vendor unique */
1934 arr
[n
++] = 0x1; /* One port per group */
1935 arr
[n
++] = 0; /* Reserved */
1936 arr
[n
++] = 0; /* Reserved */
1937 put_unaligned_be16(port_a
, arr
+ n
);
1939 arr
[n
++] = 3; /* Port unavailable */
1940 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
1941 put_unaligned_be16(port_group_b
, arr
+ n
);
1943 arr
[n
++] = 0; /* Reserved */
1944 arr
[n
++] = 0; /* Status code */
1945 arr
[n
++] = 0; /* Vendor unique */
1946 arr
[n
++] = 0x1; /* One port per group */
1947 arr
[n
++] = 0; /* Reserved */
1948 arr
[n
++] = 0; /* Reserved */
1949 put_unaligned_be16(port_b
, arr
+ n
);
1953 put_unaligned_be32(rlen
, arr
+ 0);
1956 * Return the smallest value of either
1957 * - The allocated length
1958 * - The constructed command length
1959 * - The maximum array size
1961 rlen
= min_t(int, alen
, n
);
1962 ret
= fill_from_dev_buffer(scp
, arr
,
1963 min_t(int, rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
1968 static int resp_rsup_opcodes(struct scsi_cmnd
*scp
,
1969 struct sdebug_dev_info
*devip
)
1972 u8 reporting_opts
, req_opcode
, sdeb_i
, supp
;
1974 u32 alloc_len
, a_len
;
1975 int k
, offset
, len
, errsts
, count
, bump
, na
;
1976 const struct opcode_info_t
*oip
;
1977 const struct opcode_info_t
*r_oip
;
1979 u8
*cmd
= scp
->cmnd
;
1981 rctd
= !!(cmd
[2] & 0x80);
1982 reporting_opts
= cmd
[2] & 0x7;
1983 req_opcode
= cmd
[3];
1984 req_sa
= get_unaligned_be16(cmd
+ 4);
1985 alloc_len
= get_unaligned_be32(cmd
+ 6);
1986 if (alloc_len
< 4 || alloc_len
> 0xffff) {
1987 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
1988 return check_condition_result
;
1990 if (alloc_len
> 8192)
1994 arr
= kzalloc((a_len
< 256) ? 320 : a_len
+ 64, GFP_ATOMIC
);
1996 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
1998 return check_condition_result
;
2000 switch (reporting_opts
) {
2001 case 0: /* all commands */
2002 /* count number of commands */
2003 for (count
= 0, oip
= opcode_info_arr
;
2004 oip
->num_attached
!= 0xff; ++oip
) {
2005 if (F_INV_OP
& oip
->flags
)
2007 count
+= (oip
->num_attached
+ 1);
2009 bump
= rctd
? 20 : 8;
2010 put_unaligned_be32(count
* bump
, arr
);
2011 for (offset
= 4, oip
= opcode_info_arr
;
2012 oip
->num_attached
!= 0xff && offset
< a_len
; ++oip
) {
2013 if (F_INV_OP
& oip
->flags
)
2015 na
= oip
->num_attached
;
2016 arr
[offset
] = oip
->opcode
;
2017 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
2019 arr
[offset
+ 5] |= 0x2;
2020 if (FF_SA
& oip
->flags
)
2021 arr
[offset
+ 5] |= 0x1;
2022 put_unaligned_be16(oip
->len_mask
[0], arr
+ offset
+ 6);
2024 put_unaligned_be16(0xa, arr
+ offset
+ 8);
2026 for (k
= 0, oip
= oip
->arrp
; k
< na
; ++k
, ++oip
) {
2027 if (F_INV_OP
& oip
->flags
)
2030 arr
[offset
] = oip
->opcode
;
2031 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
2033 arr
[offset
+ 5] |= 0x2;
2034 if (FF_SA
& oip
->flags
)
2035 arr
[offset
+ 5] |= 0x1;
2036 put_unaligned_be16(oip
->len_mask
[0],
2039 put_unaligned_be16(0xa,
2046 case 1: /* one command: opcode only */
2047 case 2: /* one command: opcode plus service action */
2048 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2049 sdeb_i
= opcode_ind_arr
[req_opcode
];
2050 oip
= &opcode_info_arr
[sdeb_i
];
2051 if (F_INV_OP
& oip
->flags
) {
2055 if (1 == reporting_opts
) {
2056 if (FF_SA
& oip
->flags
) {
2057 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
,
2060 return check_condition_result
;
2063 } else if (2 == reporting_opts
&&
2064 0 == (FF_SA
& oip
->flags
)) {
2065 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, -1);
2066 kfree(arr
); /* point at requested sa */
2067 return check_condition_result
;
2069 if (0 == (FF_SA
& oip
->flags
) &&
2070 req_opcode
== oip
->opcode
)
2072 else if (0 == (FF_SA
& oip
->flags
)) {
2073 na
= oip
->num_attached
;
2074 for (k
= 0, oip
= oip
->arrp
; k
< na
;
2076 if (req_opcode
== oip
->opcode
)
2079 supp
= (k
>= na
) ? 1 : 3;
2080 } else if (req_sa
!= oip
->sa
) {
2081 na
= oip
->num_attached
;
2082 for (k
= 0, oip
= oip
->arrp
; k
< na
;
2084 if (req_sa
== oip
->sa
)
2087 supp
= (k
>= na
) ? 1 : 3;
2091 u
= oip
->len_mask
[0];
2092 put_unaligned_be16(u
, arr
+ 2);
2093 arr
[4] = oip
->opcode
;
2094 for (k
= 1; k
< u
; ++k
)
2095 arr
[4 + k
] = (k
< 16) ?
2096 oip
->len_mask
[k
] : 0xff;
2101 arr
[1] = (rctd
? 0x80 : 0) | supp
;
2103 put_unaligned_be16(0xa, arr
+ offset
);
2108 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 2);
2110 return check_condition_result
;
2112 offset
= (offset
< a_len
) ? offset
: a_len
;
2113 len
= (offset
< alloc_len
) ? offset
: alloc_len
;
2114 errsts
= fill_from_dev_buffer(scp
, arr
, len
);
2119 static int resp_rsup_tmfs(struct scsi_cmnd
*scp
,
2120 struct sdebug_dev_info
*devip
)
2125 u8
*cmd
= scp
->cmnd
;
2127 memset(arr
, 0, sizeof(arr
));
2128 repd
= !!(cmd
[2] & 0x80);
2129 alloc_len
= get_unaligned_be32(cmd
+ 6);
2130 if (alloc_len
< 4) {
2131 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
2132 return check_condition_result
;
2134 arr
[0] = 0xc8; /* ATS | ATSS | LURS */
2135 arr
[1] = 0x1; /* ITNRS */
2142 len
= (len
< alloc_len
) ? len
: alloc_len
;
2143 return fill_from_dev_buffer(scp
, arr
, len
);
2146 /* <<Following mode page info copied from ST318451LW>> */
2148 static int resp_err_recov_pg(unsigned char *p
, int pcontrol
, int target
)
2149 { /* Read-Write Error Recovery page for mode_sense */
2150 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2153 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
2155 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
2156 return sizeof(err_recov_pg
);
2159 static int resp_disconnect_pg(unsigned char *p
, int pcontrol
, int target
)
2160 { /* Disconnect-Reconnect page for mode_sense */
2161 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2162 0, 0, 0, 0, 0, 0, 0, 0};
2164 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
2166 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
2167 return sizeof(disconnect_pg
);
2170 static int resp_format_pg(unsigned char *p
, int pcontrol
, int target
)
2171 { /* Format device page for mode_sense */
2172 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2173 0, 0, 0, 0, 0, 0, 0, 0,
2174 0, 0, 0, 0, 0x40, 0, 0, 0};
2176 memcpy(p
, format_pg
, sizeof(format_pg
));
2177 put_unaligned_be16(sdebug_sectors_per
, p
+ 10);
2178 put_unaligned_be16(sdebug_sector_size
, p
+ 12);
2179 if (sdebug_removable
)
2180 p
[20] |= 0x20; /* should agree with INQUIRY */
2182 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
2183 return sizeof(format_pg
);
2186 static unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2187 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2190 static int resp_caching_pg(unsigned char *p
, int pcontrol
, int target
)
2191 { /* Caching page for mode_sense */
2192 unsigned char ch_caching_pg
[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2193 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2194 unsigned char d_caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2195 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2197 if (SDEBUG_OPT_N_WCE
& sdebug_opts
)
2198 caching_pg
[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2199 memcpy(p
, caching_pg
, sizeof(caching_pg
));
2201 memcpy(p
+ 2, ch_caching_pg
, sizeof(ch_caching_pg
));
2202 else if (2 == pcontrol
)
2203 memcpy(p
, d_caching_pg
, sizeof(d_caching_pg
));
2204 return sizeof(caching_pg
);
2207 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2210 static int resp_ctrl_m_pg(unsigned char *p
, int pcontrol
, int target
)
2211 { /* Control mode page for mode_sense */
2212 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2214 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2218 ctrl_m_pg
[2] |= 0x4;
2220 ctrl_m_pg
[2] &= ~0x4;
2223 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
2225 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
2227 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
2228 else if (2 == pcontrol
)
2229 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
2230 return sizeof(ctrl_m_pg
);
2234 static int resp_iec_m_pg(unsigned char *p
, int pcontrol
, int target
)
2235 { /* Informational Exceptions control mode page for mode_sense */
2236 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2238 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2241 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
2243 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
2244 else if (2 == pcontrol
)
2245 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
2246 return sizeof(iec_m_pg
);
2249 static int resp_sas_sf_m_pg(unsigned char *p
, int pcontrol
, int target
)
2250 { /* SAS SSP mode page - short format for mode_sense */
2251 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
2252 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2254 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
2256 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
2257 return sizeof(sas_sf_m_pg
);
2261 static int resp_sas_pcd_m_spg(unsigned char *p
, int pcontrol
, int target
,
2263 { /* SAS phy control and discover mode page for mode_sense */
2264 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2265 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2266 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2267 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2268 0x2, 0, 0, 0, 0, 0, 0, 0,
2269 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2270 0, 0, 0, 0, 0, 0, 0, 0,
2271 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2272 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2273 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2274 0x3, 0, 0, 0, 0, 0, 0, 0,
2275 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2276 0, 0, 0, 0, 0, 0, 0, 0,
2280 put_unaligned_be64(naa3_comp_a
, sas_pcd_m_pg
+ 16);
2281 put_unaligned_be64(naa3_comp_c
+ 1, sas_pcd_m_pg
+ 24);
2282 put_unaligned_be64(naa3_comp_a
, sas_pcd_m_pg
+ 64);
2283 put_unaligned_be64(naa3_comp_c
+ 1, sas_pcd_m_pg
+ 72);
2284 port_a
= target_dev_id
+ 1;
2285 port_b
= port_a
+ 1;
2286 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
2287 put_unaligned_be32(port_a
, p
+ 20);
2288 put_unaligned_be32(port_b
, p
+ 48 + 20);
2290 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
2291 return sizeof(sas_pcd_m_pg
);
2294 static int resp_sas_sha_m_spg(unsigned char *p
, int pcontrol
)
2295 { /* SAS SSP shared protocol specific port mode subpage */
2296 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2297 0, 0, 0, 0, 0, 0, 0, 0,
2300 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
2302 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
2303 return sizeof(sas_sha_m_pg
);
2306 #define SDEBUG_MAX_MSENSE_SZ 256
2308 static int resp_mode_sense(struct scsi_cmnd
*scp
,
2309 struct sdebug_dev_info
*devip
)
2311 int pcontrol
, pcode
, subpcode
, bd_len
;
2312 unsigned char dev_spec
;
2313 int alloc_len
, offset
, len
, target_dev_id
;
2314 int target
= scp
->device
->id
;
2316 unsigned char arr
[SDEBUG_MAX_MSENSE_SZ
];
2317 unsigned char *cmd
= scp
->cmnd
;
2318 bool dbd
, llbaa
, msense_6
, is_disk
, is_zbc
, bad_pcode
;
2320 dbd
= !!(cmd
[1] & 0x8); /* disable block descriptors */
2321 pcontrol
= (cmd
[2] & 0xc0) >> 6;
2322 pcode
= cmd
[2] & 0x3f;
2324 msense_6
= (MODE_SENSE
== cmd
[0]);
2325 llbaa
= msense_6
? false : !!(cmd
[1] & 0x10);
2326 is_disk
= (sdebug_ptype
== TYPE_DISK
);
2327 is_zbc
= (devip
->zmodel
!= BLK_ZONED_NONE
);
2328 if ((is_disk
|| is_zbc
) && !dbd
)
2329 bd_len
= llbaa
? 16 : 8;
2332 alloc_len
= msense_6
? cmd
[4] : get_unaligned_be16(cmd
+ 7);
2333 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
2334 if (0x3 == pcontrol
) { /* Saving values not supported */
2335 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
, 0);
2336 return check_condition_result
;
2338 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
2339 (devip
->target
* 1000) - 3;
2340 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2341 if (is_disk
|| is_zbc
) {
2342 dev_spec
= 0x10; /* =0x90 if WP=1 implies read-only */
2354 arr
[4] = 0x1; /* set LONGLBA bit */
2355 arr
[7] = bd_len
; /* assume 255 or less */
2359 if ((bd_len
> 0) && (!sdebug_capacity
))
2360 sdebug_capacity
= get_sdebug_capacity();
2363 if (sdebug_capacity
> 0xfffffffe)
2364 put_unaligned_be32(0xffffffff, ap
+ 0);
2366 put_unaligned_be32(sdebug_capacity
, ap
+ 0);
2367 put_unaligned_be16(sdebug_sector_size
, ap
+ 6);
2370 } else if (16 == bd_len
) {
2371 put_unaligned_be64((u64
)sdebug_capacity
, ap
+ 0);
2372 put_unaligned_be32(sdebug_sector_size
, ap
+ 12);
2377 if ((subpcode
> 0x0) && (subpcode
< 0xff) && (0x19 != pcode
)) {
2378 /* TODO: Control Extension page */
2379 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2380 return check_condition_result
;
2385 case 0x1: /* Read-Write error recovery page, direct access */
2386 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2389 case 0x2: /* Disconnect-Reconnect page, all devices */
2390 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
2393 case 0x3: /* Format device page, direct access */
2395 len
= resp_format_pg(ap
, pcontrol
, target
);
2400 case 0x8: /* Caching page, direct access */
2401 if (is_disk
|| is_zbc
) {
2402 len
= resp_caching_pg(ap
, pcontrol
, target
);
2407 case 0xa: /* Control Mode page, all devices */
2408 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
2411 case 0x19: /* if spc==1 then sas phy, control+discover */
2412 if ((subpcode
> 0x2) && (subpcode
< 0xff)) {
2413 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2414 return check_condition_result
;
2417 if ((0x0 == subpcode
) || (0xff == subpcode
))
2418 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2419 if ((0x1 == subpcode
) || (0xff == subpcode
))
2420 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
2422 if ((0x2 == subpcode
) || (0xff == subpcode
))
2423 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2426 case 0x1c: /* Informational Exceptions Mode page, all devices */
2427 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
2430 case 0x3f: /* Read all Mode pages */
2431 if ((0 == subpcode
) || (0xff == subpcode
)) {
2432 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2433 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
2435 len
+= resp_format_pg(ap
+ len
, pcontrol
,
2437 len
+= resp_caching_pg(ap
+ len
, pcontrol
,
2439 } else if (is_zbc
) {
2440 len
+= resp_caching_pg(ap
+ len
, pcontrol
,
2443 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
2444 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2445 if (0xff == subpcode
) {
2446 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
,
2447 target
, target_dev_id
);
2448 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2450 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
2453 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2454 return check_condition_result
;
2462 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2463 return check_condition_result
;
2466 arr
[0] = offset
- 1;
2468 put_unaligned_be16((offset
- 2), arr
+ 0);
2469 return fill_from_dev_buffer(scp
, arr
, min_t(int, alloc_len
, offset
));
2472 #define SDEBUG_MAX_MSELECT_SZ 512
2474 static int resp_mode_select(struct scsi_cmnd
*scp
,
2475 struct sdebug_dev_info
*devip
)
2477 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
2478 int param_len
, res
, mpage
;
2479 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
2480 unsigned char *cmd
= scp
->cmnd
;
2481 int mselect6
= (MODE_SELECT
== cmd
[0]);
2483 memset(arr
, 0, sizeof(arr
));
2486 param_len
= mselect6
? cmd
[4] : get_unaligned_be16(cmd
+ 7);
2487 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
2488 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, mselect6
? 4 : 7, -1);
2489 return check_condition_result
;
2491 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
2493 return DID_ERROR
<< 16;
2494 else if (sdebug_verbose
&& (res
< param_len
))
2495 sdev_printk(KERN_INFO
, scp
->device
,
2496 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2497 __func__
, param_len
, res
);
2498 md_len
= mselect6
? (arr
[0] + 1) : (get_unaligned_be16(arr
+ 0) + 2);
2499 bd_len
= mselect6
? arr
[3] : get_unaligned_be16(arr
+ 6);
2501 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, 0, -1);
2502 return check_condition_result
;
2504 off
= bd_len
+ (mselect6
? 4 : 8);
2505 mpage
= arr
[off
] & 0x3f;
2506 ps
= !!(arr
[off
] & 0x80);
2508 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 7);
2509 return check_condition_result
;
2511 spf
= !!(arr
[off
] & 0x40);
2512 pg_len
= spf
? (get_unaligned_be16(arr
+ off
+ 2) + 4) :
2514 if ((pg_len
+ off
) > param_len
) {
2515 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2516 PARAMETER_LIST_LENGTH_ERR
, 0);
2517 return check_condition_result
;
2520 case 0x8: /* Caching Mode page */
2521 if (caching_pg
[1] == arr
[off
+ 1]) {
2522 memcpy(caching_pg
+ 2, arr
+ off
+ 2,
2523 sizeof(caching_pg
) - 2);
2524 goto set_mode_changed_ua
;
2527 case 0xa: /* Control Mode page */
2528 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
2529 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
2530 sizeof(ctrl_m_pg
) - 2);
2531 if (ctrl_m_pg
[4] & 0x8)
2535 sdebug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
2536 goto set_mode_changed_ua
;
2539 case 0x1c: /* Informational Exceptions Mode page */
2540 if (iec_m_pg
[1] == arr
[off
+ 1]) {
2541 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
2542 sizeof(iec_m_pg
) - 2);
2543 goto set_mode_changed_ua
;
2549 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 5);
2550 return check_condition_result
;
2551 set_mode_changed_ua
:
2552 set_bit(SDEBUG_UA_MODE_CHANGED
, devip
->uas_bm
);
2556 static int resp_temp_l_pg(unsigned char *arr
)
2558 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2559 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2562 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
2563 return sizeof(temp_l_pg
);
2566 static int resp_ie_l_pg(unsigned char *arr
)
2568 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2571 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
2572 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
2573 arr
[4] = THRESHOLD_EXCEEDED
;
2576 return sizeof(ie_l_pg
);
2579 #define SDEBUG_MAX_LSENSE_SZ 512
2581 static int resp_log_sense(struct scsi_cmnd
*scp
,
2582 struct sdebug_dev_info
*devip
)
2584 int ppc
, sp
, pcode
, subpcode
, alloc_len
, len
, n
;
2585 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
2586 unsigned char *cmd
= scp
->cmnd
;
2588 memset(arr
, 0, sizeof(arr
));
2592 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, ppc
? 1 : 0);
2593 return check_condition_result
;
2595 pcode
= cmd
[2] & 0x3f;
2596 subpcode
= cmd
[3] & 0xff;
2597 alloc_len
= get_unaligned_be16(cmd
+ 7);
2599 if (0 == subpcode
) {
2601 case 0x0: /* Supported log pages log page */
2603 arr
[n
++] = 0x0; /* this page */
2604 arr
[n
++] = 0xd; /* Temperature */
2605 arr
[n
++] = 0x2f; /* Informational exceptions */
2608 case 0xd: /* Temperature log page */
2609 arr
[3] = resp_temp_l_pg(arr
+ 4);
2611 case 0x2f: /* Informational exceptions log page */
2612 arr
[3] = resp_ie_l_pg(arr
+ 4);
2615 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2616 return check_condition_result
;
2618 } else if (0xff == subpcode
) {
2622 case 0x0: /* Supported log pages and subpages log page */
2625 arr
[n
++] = 0x0; /* 0,0 page */
2627 arr
[n
++] = 0xff; /* this page */
2629 arr
[n
++] = 0x0; /* Temperature */
2631 arr
[n
++] = 0x0; /* Informational exceptions */
2634 case 0xd: /* Temperature subpages */
2637 arr
[n
++] = 0x0; /* Temperature */
2640 case 0x2f: /* Informational exceptions subpages */
2643 arr
[n
++] = 0x0; /* Informational exceptions */
2647 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2648 return check_condition_result
;
2651 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2652 return check_condition_result
;
2654 len
= min_t(int, get_unaligned_be16(arr
+ 2) + 4, alloc_len
);
2655 return fill_from_dev_buffer(scp
, arr
,
2656 min_t(int, len
, SDEBUG_MAX_INQ_ARR_SZ
));
2659 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info
*devip
)
2661 return devip
->nr_zones
!= 0;
2664 static struct sdeb_zone_state
*zbc_zone(struct sdebug_dev_info
*devip
,
2665 unsigned long long lba
)
2667 return &devip
->zstate
[lba
>> devip
->zsize_shift
];
2670 static inline bool zbc_zone_is_conv(struct sdeb_zone_state
*zsp
)
2672 return zsp
->z_type
== ZBC_ZONE_TYPE_CNV
;
2675 static void zbc_close_zone(struct sdebug_dev_info
*devip
,
2676 struct sdeb_zone_state
*zsp
)
2678 enum sdebug_z_cond zc
;
2680 if (zbc_zone_is_conv(zsp
))
2684 if (!(zc
== ZC2_IMPLICIT_OPEN
|| zc
== ZC3_EXPLICIT_OPEN
))
2687 if (zc
== ZC2_IMPLICIT_OPEN
)
2688 devip
->nr_imp_open
--;
2690 devip
->nr_exp_open
--;
2692 if (zsp
->z_wp
== zsp
->z_start
) {
2693 zsp
->z_cond
= ZC1_EMPTY
;
2695 zsp
->z_cond
= ZC4_CLOSED
;
2700 static void zbc_close_imp_open_zone(struct sdebug_dev_info
*devip
)
2702 struct sdeb_zone_state
*zsp
= &devip
->zstate
[0];
2705 for (i
= 0; i
< devip
->nr_zones
; i
++, zsp
++) {
2706 if (zsp
->z_cond
== ZC2_IMPLICIT_OPEN
) {
2707 zbc_close_zone(devip
, zsp
);
2713 static void zbc_open_zone(struct sdebug_dev_info
*devip
,
2714 struct sdeb_zone_state
*zsp
, bool explicit)
2716 enum sdebug_z_cond zc
;
2718 if (zbc_zone_is_conv(zsp
))
2722 if ((explicit && zc
== ZC3_EXPLICIT_OPEN
) ||
2723 (!explicit && zc
== ZC2_IMPLICIT_OPEN
))
2726 /* Close an implicit open zone if necessary */
2727 if (explicit && zsp
->z_cond
== ZC2_IMPLICIT_OPEN
)
2728 zbc_close_zone(devip
, zsp
);
2729 else if (devip
->max_open
&&
2730 devip
->nr_imp_open
+ devip
->nr_exp_open
>= devip
->max_open
)
2731 zbc_close_imp_open_zone(devip
);
2733 if (zsp
->z_cond
== ZC4_CLOSED
)
2736 zsp
->z_cond
= ZC3_EXPLICIT_OPEN
;
2737 devip
->nr_exp_open
++;
2739 zsp
->z_cond
= ZC2_IMPLICIT_OPEN
;
2740 devip
->nr_imp_open
++;
2744 static void zbc_inc_wp(struct sdebug_dev_info
*devip
,
2745 unsigned long long lba
, unsigned int num
)
2747 struct sdeb_zone_state
*zsp
= zbc_zone(devip
, lba
);
2748 unsigned long long n
, end
, zend
= zsp
->z_start
+ zsp
->z_size
;
2750 if (zbc_zone_is_conv(zsp
))
2753 if (zsp
->z_type
== ZBC_ZONE_TYPE_SWR
) {
2755 if (zsp
->z_wp
>= zend
)
2756 zsp
->z_cond
= ZC5_FULL
;
2761 if (lba
!= zsp
->z_wp
)
2762 zsp
->z_non_seq_resource
= true;
2768 } else if (end
> zsp
->z_wp
) {
2774 if (zsp
->z_wp
>= zend
)
2775 zsp
->z_cond
= ZC5_FULL
;
2781 zend
= zsp
->z_start
+ zsp
->z_size
;
2786 static int check_zbc_access_params(struct scsi_cmnd
*scp
,
2787 unsigned long long lba
, unsigned int num
, bool write
)
2789 struct scsi_device
*sdp
= scp
->device
;
2790 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
2791 struct sdeb_zone_state
*zsp
= zbc_zone(devip
, lba
);
2792 struct sdeb_zone_state
*zsp_end
= zbc_zone(devip
, lba
+ num
- 1);
2795 if (devip
->zmodel
== BLK_ZONED_HA
)
2797 /* For host-managed, reads cannot cross zone types boundaries */
2798 if (zsp_end
!= zsp
&&
2799 zbc_zone_is_conv(zsp
) &&
2800 !zbc_zone_is_conv(zsp_end
)) {
2801 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2804 return check_condition_result
;
2809 /* No restrictions for writes within conventional zones */
2810 if (zbc_zone_is_conv(zsp
)) {
2811 if (!zbc_zone_is_conv(zsp_end
)) {
2812 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2814 WRITE_BOUNDARY_ASCQ
);
2815 return check_condition_result
;
2820 if (zsp
->z_type
== ZBC_ZONE_TYPE_SWR
) {
2821 /* Writes cannot cross sequential zone boundaries */
2822 if (zsp_end
!= zsp
) {
2823 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2825 WRITE_BOUNDARY_ASCQ
);
2826 return check_condition_result
;
2828 /* Cannot write full zones */
2829 if (zsp
->z_cond
== ZC5_FULL
) {
2830 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2831 INVALID_FIELD_IN_CDB
, 0);
2832 return check_condition_result
;
2834 /* Writes must be aligned to the zone WP */
2835 if (lba
!= zsp
->z_wp
) {
2836 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2838 UNALIGNED_WRITE_ASCQ
);
2839 return check_condition_result
;
2843 /* Handle implicit open of closed and empty zones */
2844 if (zsp
->z_cond
== ZC1_EMPTY
|| zsp
->z_cond
== ZC4_CLOSED
) {
2845 if (devip
->max_open
&&
2846 devip
->nr_exp_open
>= devip
->max_open
) {
2847 mk_sense_buffer(scp
, DATA_PROTECT
,
2850 return check_condition_result
;
2852 zbc_open_zone(devip
, zsp
, false);
2858 static inline int check_device_access_params
2859 (struct scsi_cmnd
*scp
, unsigned long long lba
,
2860 unsigned int num
, bool write
)
2862 struct scsi_device
*sdp
= scp
->device
;
2863 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
2865 if (lba
+ num
> sdebug_capacity
) {
2866 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2867 return check_condition_result
;
2869 /* transfer length excessive (tie in to block limits VPD page) */
2870 if (num
> sdebug_store_sectors
) {
2871 /* needs work to find which cdb byte 'num' comes from */
2872 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2873 return check_condition_result
;
2875 if (write
&& unlikely(sdebug_wp
)) {
2876 mk_sense_buffer(scp
, DATA_PROTECT
, WRITE_PROTECTED
, 0x2);
2877 return check_condition_result
;
2879 if (sdebug_dev_is_zoned(devip
))
2880 return check_zbc_access_params(scp
, lba
, num
, write
);
2886 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2887 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2888 * that access any of the "stores" in struct sdeb_store_info should call this
2889 * function with bug_if_fake_rw set to true.
2891 static inline struct sdeb_store_info
*devip2sip(struct sdebug_dev_info
*devip
,
2892 bool bug_if_fake_rw
)
2894 if (sdebug_fake_rw
) {
2895 BUG_ON(bug_if_fake_rw
); /* See note above */
2898 return xa_load(per_store_ap
, devip
->sdbg_host
->si_idx
);
2901 /* Returns number of bytes copied or -1 if error. */
2902 static int do_device_access(struct sdeb_store_info
*sip
, struct scsi_cmnd
*scp
,
2903 u32 sg_skip
, u64 lba
, u32 num
, bool do_write
)
2906 u64 block
, rest
= 0;
2907 enum dma_data_direction dir
;
2908 struct scsi_data_buffer
*sdb
= &scp
->sdb
;
2912 dir
= DMA_TO_DEVICE
;
2913 write_since_sync
= true;
2915 dir
= DMA_FROM_DEVICE
;
2918 if (!sdb
->length
|| !sip
)
2920 if (scp
->sc_data_direction
!= dir
)
2924 block
= do_div(lba
, sdebug_store_sectors
);
2925 if (block
+ num
> sdebug_store_sectors
)
2926 rest
= block
+ num
- sdebug_store_sectors
;
2928 ret
= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
2929 fsp
+ (block
* sdebug_sector_size
),
2930 (num
- rest
) * sdebug_sector_size
, sg_skip
, do_write
);
2931 if (ret
!= (num
- rest
) * sdebug_sector_size
)
2935 ret
+= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
2936 fsp
, rest
* sdebug_sector_size
,
2937 sg_skip
+ ((num
- rest
) * sdebug_sector_size
),
2944 /* Returns number of bytes copied or -1 if error. */
2945 static int do_dout_fetch(struct scsi_cmnd
*scp
, u32 num
, u8
*doutp
)
2947 struct scsi_data_buffer
*sdb
= &scp
->sdb
;
2951 if (scp
->sc_data_direction
!= DMA_TO_DEVICE
)
2953 return sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
, doutp
,
2954 num
* sdebug_sector_size
, 0, true);
2957 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2958 * arr into sip->storep+lba and return true. If comparison fails then
2960 static bool comp_write_worker(struct sdeb_store_info
*sip
, u64 lba
, u32 num
,
2961 const u8
*arr
, bool compare_only
)
2964 u64 block
, rest
= 0;
2965 u32 store_blks
= sdebug_store_sectors
;
2966 u32 lb_size
= sdebug_sector_size
;
2967 u8
*fsp
= sip
->storep
;
2969 block
= do_div(lba
, store_blks
);
2970 if (block
+ num
> store_blks
)
2971 rest
= block
+ num
- store_blks
;
2973 res
= !memcmp(fsp
+ (block
* lb_size
), arr
, (num
- rest
) * lb_size
);
2977 res
= memcmp(fsp
, arr
+ ((num
- rest
) * lb_size
),
2983 arr
+= num
* lb_size
;
2984 memcpy(fsp
+ (block
* lb_size
), arr
, (num
- rest
) * lb_size
);
2986 memcpy(fsp
, arr
+ ((num
- rest
) * lb_size
), rest
* lb_size
);
2990 static __be16
dif_compute_csum(const void *buf
, int len
)
2995 csum
= (__force __be16
)ip_compute_csum(buf
, len
);
2997 csum
= cpu_to_be16(crc_t10dif(buf
, len
));
3002 static int dif_verify(struct t10_pi_tuple
*sdt
, const void *data
,
3003 sector_t sector
, u32 ei_lba
)
3005 __be16 csum
= dif_compute_csum(data
, sdebug_sector_size
);
3007 if (sdt
->guard_tag
!= csum
) {
3008 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3009 (unsigned long)sector
,
3010 be16_to_cpu(sdt
->guard_tag
),
3014 if (sdebug_dif
== T10_PI_TYPE1_PROTECTION
&&
3015 be32_to_cpu(sdt
->ref_tag
) != (sector
& 0xffffffff)) {
3016 pr_err("REF check failed on sector %lu\n",
3017 (unsigned long)sector
);
3020 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
3021 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
3022 pr_err("REF check failed on sector %lu\n",
3023 (unsigned long)sector
);
3029 static void dif_copy_prot(struct scsi_cmnd
*scp
, sector_t sector
,
3030 unsigned int sectors
, bool read
)
3034 struct sdeb_store_info
*sip
= devip2sip((struct sdebug_dev_info
*)
3035 scp
->device
->hostdata
, true);
3036 struct t10_pi_tuple
*dif_storep
= sip
->dif_storep
;
3037 const void *dif_store_end
= dif_storep
+ sdebug_store_sectors
;
3038 struct sg_mapping_iter miter
;
3040 /* Bytes of protection data to copy into sgl */
3041 resid
= sectors
* sizeof(*dif_storep
);
3043 sg_miter_start(&miter
, scsi_prot_sglist(scp
),
3044 scsi_prot_sg_count(scp
), SG_MITER_ATOMIC
|
3045 (read
? SG_MITER_TO_SG
: SG_MITER_FROM_SG
));
3047 while (sg_miter_next(&miter
) && resid
> 0) {
3048 size_t len
= min_t(size_t, miter
.length
, resid
);
3049 void *start
= dif_store(sip
, sector
);
3052 if (dif_store_end
< start
+ len
)
3053 rest
= start
+ len
- dif_store_end
;
3058 memcpy(paddr
, start
, len
- rest
);
3060 memcpy(start
, paddr
, len
- rest
);
3064 memcpy(paddr
+ len
- rest
, dif_storep
, rest
);
3066 memcpy(dif_storep
, paddr
+ len
- rest
, rest
);
3069 sector
+= len
/ sizeof(*dif_storep
);
3072 sg_miter_stop(&miter
);
3075 static int prot_verify_read(struct scsi_cmnd
*scp
, sector_t start_sec
,
3076 unsigned int sectors
, u32 ei_lba
)
3080 struct sdeb_store_info
*sip
= devip2sip((struct sdebug_dev_info
*)
3081 scp
->device
->hostdata
, true);
3082 struct t10_pi_tuple
*sdt
;
3084 for (i
= 0; i
< sectors
; i
++, ei_lba
++) {
3087 sector
= start_sec
+ i
;
3088 sdt
= dif_store(sip
, sector
);
3090 if (sdt
->app_tag
== cpu_to_be16(0xffff))
3093 ret
= dif_verify(sdt
, lba2fake_store(sip
, sector
), sector
,
3101 dif_copy_prot(scp
, start_sec
, sectors
, true);
3107 static int resp_read_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3114 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
3115 rwlock_t
*macc_lckp
= sip
? &sip
->macc_lck
: &sdeb_fake_rw_lck
;
3116 u8
*cmd
= scp
->cmnd
;
3121 lba
= get_unaligned_be64(cmd
+ 2);
3122 num
= get_unaligned_be32(cmd
+ 10);
3127 lba
= get_unaligned_be32(cmd
+ 2);
3128 num
= get_unaligned_be16(cmd
+ 7);
3133 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
3134 (u32
)(cmd
[1] & 0x1f) << 16;
3135 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
3140 lba
= get_unaligned_be32(cmd
+ 2);
3141 num
= get_unaligned_be32(cmd
+ 6);
3144 case XDWRITEREAD_10
:
3146 lba
= get_unaligned_be32(cmd
+ 2);
3147 num
= get_unaligned_be16(cmd
+ 7);
3150 default: /* assume READ(32) */
3151 lba
= get_unaligned_be64(cmd
+ 12);
3152 ei_lba
= get_unaligned_be32(cmd
+ 20);
3153 num
= get_unaligned_be32(cmd
+ 28);
3157 if (unlikely(have_dif_prot
&& check_prot
)) {
3158 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
3160 mk_sense_invalid_opcode(scp
);
3161 return check_condition_result
;
3163 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
3164 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
3165 (cmd
[1] & 0xe0) == 0)
3166 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected RD "
3169 if (unlikely((sdebug_opts
& SDEBUG_OPT_SHORT_TRANSFER
) &&
3170 atomic_read(&sdeb_inject_pending
))) {
3172 atomic_set(&sdeb_inject_pending
, 0);
3175 ret
= check_device_access_params(scp
, lba
, num
, false);
3178 if (unlikely((SDEBUG_OPT_MEDIUM_ERR
& sdebug_opts
) &&
3179 (lba
<= (sdebug_medium_error_start
+ sdebug_medium_error_count
- 1)) &&
3180 ((lba
+ num
) > sdebug_medium_error_start
))) {
3181 /* claim unrecoverable read error */
3182 mk_sense_buffer(scp
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
, 0);
3183 /* set info field and valid bit for fixed descriptor */
3184 if (0x70 == (scp
->sense_buffer
[0] & 0x7f)) {
3185 scp
->sense_buffer
[0] |= 0x80; /* Valid bit */
3186 ret
= (lba
< OPT_MEDIUM_ERR_ADDR
)
3187 ? OPT_MEDIUM_ERR_ADDR
: (int)lba
;
3188 put_unaligned_be32(ret
, scp
->sense_buffer
+ 3);
3190 scsi_set_resid(scp
, scsi_bufflen(scp
));
3191 return check_condition_result
;
3194 read_lock(macc_lckp
);
3197 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
3198 int prot_ret
= prot_verify_read(scp
, lba
, num
, ei_lba
);
3201 read_unlock(macc_lckp
);
3202 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, prot_ret
);
3203 return illegal_condition_result
;
3207 ret
= do_device_access(sip
, scp
, 0, lba
, num
, false);
3208 read_unlock(macc_lckp
);
3209 if (unlikely(ret
== -1))
3210 return DID_ERROR
<< 16;
3212 scsi_set_resid(scp
, scsi_bufflen(scp
) - ret
);
3214 if (unlikely((sdebug_opts
& SDEBUG_OPT_RECOV_DIF_DIX
) &&
3215 atomic_read(&sdeb_inject_pending
))) {
3216 if (sdebug_opts
& SDEBUG_OPT_RECOVERED_ERR
) {
3217 mk_sense_buffer(scp
, RECOVERED_ERROR
, THRESHOLD_EXCEEDED
, 0);
3218 atomic_set(&sdeb_inject_pending
, 0);
3219 return check_condition_result
;
3220 } else if (sdebug_opts
& SDEBUG_OPT_DIF_ERR
) {
3221 /* Logical block guard check failed */
3222 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
3223 atomic_set(&sdeb_inject_pending
, 0);
3224 return illegal_condition_result
;
3225 } else if (SDEBUG_OPT_DIX_ERR
& sdebug_opts
) {
3226 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
3227 atomic_set(&sdeb_inject_pending
, 0);
3228 return illegal_condition_result
;
3234 static void dump_sector(unsigned char *buf
, int len
)
3238 pr_err(">>> Sector Dump <<<\n");
3239 for (i
= 0 ; i
< len
; i
+= 16) {
3242 for (j
= 0, n
= 0; j
< 16; j
++) {
3243 unsigned char c
= buf
[i
+j
];
3245 if (c
>= 0x20 && c
< 0x7e)
3246 n
+= scnprintf(b
+ n
, sizeof(b
) - n
,
3249 n
+= scnprintf(b
+ n
, sizeof(b
) - n
,
3252 pr_err("%04d: %s\n", i
, b
);
3256 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
3257 unsigned int sectors
, u32 ei_lba
)
3260 struct t10_pi_tuple
*sdt
;
3262 sector_t sector
= start_sec
;
3265 struct sg_mapping_iter diter
;
3266 struct sg_mapping_iter piter
;
3268 BUG_ON(scsi_sg_count(SCpnt
) == 0);
3269 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
3271 sg_miter_start(&piter
, scsi_prot_sglist(SCpnt
),
3272 scsi_prot_sg_count(SCpnt
),
3273 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
3274 sg_miter_start(&diter
, scsi_sglist(SCpnt
), scsi_sg_count(SCpnt
),
3275 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
3277 /* For each protection page */
3278 while (sg_miter_next(&piter
)) {
3280 if (WARN_ON(!sg_miter_next(&diter
))) {
3285 for (ppage_offset
= 0; ppage_offset
< piter
.length
;
3286 ppage_offset
+= sizeof(struct t10_pi_tuple
)) {
3287 /* If we're at the end of the current
3288 * data page advance to the next one
3290 if (dpage_offset
>= diter
.length
) {
3291 if (WARN_ON(!sg_miter_next(&diter
))) {
3298 sdt
= piter
.addr
+ ppage_offset
;
3299 daddr
= diter
.addr
+ dpage_offset
;
3301 ret
= dif_verify(sdt
, daddr
, sector
, ei_lba
);
3303 dump_sector(daddr
, sdebug_sector_size
);
3309 dpage_offset
+= sdebug_sector_size
;
3311 diter
.consumed
= dpage_offset
;
3312 sg_miter_stop(&diter
);
3314 sg_miter_stop(&piter
);
3316 dif_copy_prot(SCpnt
, start_sec
, sectors
, false);
3323 sg_miter_stop(&diter
);
3324 sg_miter_stop(&piter
);
3328 static unsigned long lba_to_map_index(sector_t lba
)
3330 if (sdebug_unmap_alignment
)
3331 lba
+= sdebug_unmap_granularity
- sdebug_unmap_alignment
;
3332 sector_div(lba
, sdebug_unmap_granularity
);
3336 static sector_t
map_index_to_lba(unsigned long index
)
3338 sector_t lba
= index
* sdebug_unmap_granularity
;
3340 if (sdebug_unmap_alignment
)
3341 lba
-= sdebug_unmap_granularity
- sdebug_unmap_alignment
;
3345 static unsigned int map_state(struct sdeb_store_info
*sip
, sector_t lba
,
3349 unsigned int mapped
;
3350 unsigned long index
;
3353 index
= lba_to_map_index(lba
);
3354 mapped
= test_bit(index
, sip
->map_storep
);
3357 next
= find_next_zero_bit(sip
->map_storep
, map_size
, index
);
3359 next
= find_next_bit(sip
->map_storep
, map_size
, index
);
3361 end
= min_t(sector_t
, sdebug_store_sectors
, map_index_to_lba(next
));
3366 static void map_region(struct sdeb_store_info
*sip
, sector_t lba
,
3369 sector_t end
= lba
+ len
;
3372 unsigned long index
= lba_to_map_index(lba
);
3374 if (index
< map_size
)
3375 set_bit(index
, sip
->map_storep
);
3377 lba
= map_index_to_lba(index
+ 1);
3381 static void unmap_region(struct sdeb_store_info
*sip
, sector_t lba
,
3384 sector_t end
= lba
+ len
;
3385 u8
*fsp
= sip
->storep
;
3388 unsigned long index
= lba_to_map_index(lba
);
3390 if (lba
== map_index_to_lba(index
) &&
3391 lba
+ sdebug_unmap_granularity
<= end
&&
3393 clear_bit(index
, sip
->map_storep
);
3394 if (sdebug_lbprz
) { /* for LBPRZ=2 return 0xff_s */
3395 memset(fsp
+ lba
* sdebug_sector_size
,
3396 (sdebug_lbprz
& 1) ? 0 : 0xff,
3397 sdebug_sector_size
*
3398 sdebug_unmap_granularity
);
3400 if (sip
->dif_storep
) {
3401 memset(sip
->dif_storep
+ lba
, 0xff,
3402 sizeof(*sip
->dif_storep
) *
3403 sdebug_unmap_granularity
);
3406 lba
= map_index_to_lba(index
+ 1);
3410 static int resp_write_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3417 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
3418 rwlock_t
*macc_lckp
= &sip
->macc_lck
;
3419 u8
*cmd
= scp
->cmnd
;
3424 lba
= get_unaligned_be64(cmd
+ 2);
3425 num
= get_unaligned_be32(cmd
+ 10);
3430 lba
= get_unaligned_be32(cmd
+ 2);
3431 num
= get_unaligned_be16(cmd
+ 7);
3436 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
3437 (u32
)(cmd
[1] & 0x1f) << 16;
3438 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
3443 lba
= get_unaligned_be32(cmd
+ 2);
3444 num
= get_unaligned_be32(cmd
+ 6);
3447 case 0x53: /* XDWRITEREAD(10) */
3449 lba
= get_unaligned_be32(cmd
+ 2);
3450 num
= get_unaligned_be16(cmd
+ 7);
3453 default: /* assume WRITE(32) */
3454 lba
= get_unaligned_be64(cmd
+ 12);
3455 ei_lba
= get_unaligned_be32(cmd
+ 20);
3456 num
= get_unaligned_be32(cmd
+ 28);
3460 if (unlikely(have_dif_prot
&& check_prot
)) {
3461 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
3463 mk_sense_invalid_opcode(scp
);
3464 return check_condition_result
;
3466 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
3467 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
3468 (cmd
[1] & 0xe0) == 0)
3469 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
3473 write_lock(macc_lckp
);
3474 ret
= check_device_access_params(scp
, lba
, num
, true);
3476 write_unlock(macc_lckp
);
3481 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
3482 int prot_ret
= prot_verify_write(scp
, lba
, num
, ei_lba
);
3485 write_unlock(macc_lckp
);
3486 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, prot_ret
);
3487 return illegal_condition_result
;
3491 ret
= do_device_access(sip
, scp
, 0, lba
, num
, true);
3492 if (unlikely(scsi_debug_lbp()))
3493 map_region(sip
, lba
, num
);
3494 /* If ZBC zone then bump its write pointer */
3495 if (sdebug_dev_is_zoned(devip
))
3496 zbc_inc_wp(devip
, lba
, num
);
3497 write_unlock(macc_lckp
);
3498 if (unlikely(-1 == ret
))
3499 return DID_ERROR
<< 16;
3500 else if (unlikely(sdebug_verbose
&&
3501 (ret
< (num
* sdebug_sector_size
))))
3502 sdev_printk(KERN_INFO
, scp
->device
,
3503 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3504 my_name
, num
* sdebug_sector_size
, ret
);
3506 if (unlikely((sdebug_opts
& SDEBUG_OPT_RECOV_DIF_DIX
) &&
3507 atomic_read(&sdeb_inject_pending
))) {
3508 if (sdebug_opts
& SDEBUG_OPT_RECOVERED_ERR
) {
3509 mk_sense_buffer(scp
, RECOVERED_ERROR
, THRESHOLD_EXCEEDED
, 0);
3510 atomic_set(&sdeb_inject_pending
, 0);
3511 return check_condition_result
;
3512 } else if (sdebug_opts
& SDEBUG_OPT_DIF_ERR
) {
3513 /* Logical block guard check failed */
3514 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
3515 atomic_set(&sdeb_inject_pending
, 0);
3516 return illegal_condition_result
;
3517 } else if (sdebug_opts
& SDEBUG_OPT_DIX_ERR
) {
3518 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
3519 atomic_set(&sdeb_inject_pending
, 0);
3520 return illegal_condition_result
;
3527 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3528 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3530 static int resp_write_scat(struct scsi_cmnd
*scp
,
3531 struct sdebug_dev_info
*devip
)
3533 u8
*cmd
= scp
->cmnd
;
3536 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
3537 rwlock_t
*macc_lckp
= &sip
->macc_lck
;
3539 u16 lbdof
, num_lrd
, k
;
3540 u32 num
, num_by
, bt_len
, lbdof_blen
, sg_off
, cum_lb
;
3541 u32 lb_size
= sdebug_sector_size
;
3546 static const u32 lrd_size
= 32; /* + parameter list header size */
3548 if (cmd
[0] == VARIABLE_LENGTH_CMD
) {
3550 wrprotect
= (cmd
[10] >> 5) & 0x7;
3551 lbdof
= get_unaligned_be16(cmd
+ 12);
3552 num_lrd
= get_unaligned_be16(cmd
+ 16);
3553 bt_len
= get_unaligned_be32(cmd
+ 28);
3554 } else { /* that leaves WRITE SCATTERED(16) */
3556 wrprotect
= (cmd
[2] >> 5) & 0x7;
3557 lbdof
= get_unaligned_be16(cmd
+ 4);
3558 num_lrd
= get_unaligned_be16(cmd
+ 8);
3559 bt_len
= get_unaligned_be32(cmd
+ 10);
3560 if (unlikely(have_dif_prot
)) {
3561 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
3563 mk_sense_invalid_opcode(scp
);
3564 return illegal_condition_result
;
3566 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
3567 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
3569 sdev_printk(KERN_ERR
, scp
->device
,
3570 "Unprotected WR to DIF device\n");
3573 if ((num_lrd
== 0) || (bt_len
== 0))
3574 return 0; /* T10 says these do-nothings are not errors */
3577 sdev_printk(KERN_INFO
, scp
->device
,
3578 "%s: %s: LB Data Offset field bad\n",
3580 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
3581 return illegal_condition_result
;
3583 lbdof_blen
= lbdof
* lb_size
;
3584 if ((lrd_size
+ (num_lrd
* lrd_size
)) > lbdof_blen
) {
3586 sdev_printk(KERN_INFO
, scp
->device
,
3587 "%s: %s: LBA range descriptors don't fit\n",
3589 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
3590 return illegal_condition_result
;
3592 lrdp
= kzalloc(lbdof_blen
, GFP_ATOMIC
);
3594 return SCSI_MLQUEUE_HOST_BUSY
;
3596 sdev_printk(KERN_INFO
, scp
->device
,
3597 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3598 my_name
, __func__
, lbdof_blen
);
3599 res
= fetch_to_dev_buffer(scp
, lrdp
, lbdof_blen
);
3601 ret
= DID_ERROR
<< 16;
3605 write_lock(macc_lckp
);
3606 sg_off
= lbdof_blen
;
3607 /* Spec says Buffer xfer Length field in number of LBs in dout */
3609 for (k
= 0, up
= lrdp
+ lrd_size
; k
< num_lrd
; ++k
, up
+= lrd_size
) {
3610 lba
= get_unaligned_be64(up
+ 0);
3611 num
= get_unaligned_be32(up
+ 8);
3613 sdev_printk(KERN_INFO
, scp
->device
,
3614 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3615 my_name
, __func__
, k
, lba
, num
, sg_off
);
3618 ret
= check_device_access_params(scp
, lba
, num
, true);
3620 goto err_out_unlock
;
3621 num_by
= num
* lb_size
;
3622 ei_lba
= is_16
? 0 : get_unaligned_be32(up
+ 12);
3624 if ((cum_lb
+ num
) > bt_len
) {
3626 sdev_printk(KERN_INFO
, scp
->device
,
3627 "%s: %s: sum of blocks > data provided\n",
3629 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, WRITE_ERROR_ASC
,
3631 ret
= illegal_condition_result
;
3632 goto err_out_unlock
;
3636 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
3637 int prot_ret
= prot_verify_write(scp
, lba
, num
,
3641 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10,
3643 ret
= illegal_condition_result
;
3644 goto err_out_unlock
;
3648 ret
= do_device_access(sip
, scp
, sg_off
, lba
, num
, true);
3649 /* If ZBC zone then bump its write pointer */
3650 if (sdebug_dev_is_zoned(devip
))
3651 zbc_inc_wp(devip
, lba
, num
);
3652 if (unlikely(scsi_debug_lbp()))
3653 map_region(sip
, lba
, num
);
3654 if (unlikely(-1 == ret
)) {
3655 ret
= DID_ERROR
<< 16;
3656 goto err_out_unlock
;
3657 } else if (unlikely(sdebug_verbose
&& (ret
< num_by
)))
3658 sdev_printk(KERN_INFO
, scp
->device
,
3659 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3660 my_name
, num_by
, ret
);
3662 if (unlikely((sdebug_opts
& SDEBUG_OPT_RECOV_DIF_DIX
) &&
3663 atomic_read(&sdeb_inject_pending
))) {
3664 if (sdebug_opts
& SDEBUG_OPT_RECOVERED_ERR
) {
3665 mk_sense_buffer(scp
, RECOVERED_ERROR
, THRESHOLD_EXCEEDED
, 0);
3666 atomic_set(&sdeb_inject_pending
, 0);
3667 ret
= check_condition_result
;
3668 goto err_out_unlock
;
3669 } else if (sdebug_opts
& SDEBUG_OPT_DIF_ERR
) {
3670 /* Logical block guard check failed */
3671 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
3672 atomic_set(&sdeb_inject_pending
, 0);
3673 ret
= illegal_condition_result
;
3674 goto err_out_unlock
;
3675 } else if (sdebug_opts
& SDEBUG_OPT_DIX_ERR
) {
3676 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
3677 atomic_set(&sdeb_inject_pending
, 0);
3678 ret
= illegal_condition_result
;
3679 goto err_out_unlock
;
3687 write_unlock(macc_lckp
);
3693 static int resp_write_same(struct scsi_cmnd
*scp
, u64 lba
, u32 num
,
3694 u32 ei_lba
, bool unmap
, bool ndob
)
3696 struct scsi_device
*sdp
= scp
->device
;
3697 struct sdebug_dev_info
*devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
3698 unsigned long long i
;
3700 u32 lb_size
= sdebug_sector_size
;
3702 struct sdeb_store_info
*sip
= devip2sip((struct sdebug_dev_info
*)
3703 scp
->device
->hostdata
, true);
3704 rwlock_t
*macc_lckp
= &sip
->macc_lck
;
3708 write_lock(macc_lckp
);
3710 ret
= check_device_access_params(scp
, lba
, num
, true);
3712 write_unlock(macc_lckp
);
3716 if (unmap
&& scsi_debug_lbp()) {
3717 unmap_region(sip
, lba
, num
);
3721 block
= do_div(lbaa
, sdebug_store_sectors
);
3722 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3724 fs1p
= fsp
+ (block
* lb_size
);
3726 memset(fs1p
, 0, lb_size
);
3729 ret
= fetch_to_dev_buffer(scp
, fs1p
, lb_size
);
3732 write_unlock(&sip
->macc_lck
);
3733 return DID_ERROR
<< 16;
3734 } else if (sdebug_verbose
&& !ndob
&& (ret
< lb_size
))
3735 sdev_printk(KERN_INFO
, scp
->device
,
3736 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3737 my_name
, "write same", lb_size
, ret
);
3739 /* Copy first sector to remaining blocks */
3740 for (i
= 1 ; i
< num
; i
++) {
3742 block
= do_div(lbaa
, sdebug_store_sectors
);
3743 memmove(fsp
+ (block
* lb_size
), fs1p
, lb_size
);
3745 if (scsi_debug_lbp())
3746 map_region(sip
, lba
, num
);
3747 /* If ZBC zone then bump its write pointer */
3748 if (sdebug_dev_is_zoned(devip
))
3749 zbc_inc_wp(devip
, lba
, num
);
3751 write_unlock(macc_lckp
);
3756 static int resp_write_same_10(struct scsi_cmnd
*scp
,
3757 struct sdebug_dev_info
*devip
)
3759 u8
*cmd
= scp
->cmnd
;
3766 if (sdebug_lbpws10
== 0) {
3767 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
3768 return check_condition_result
;
3772 lba
= get_unaligned_be32(cmd
+ 2);
3773 num
= get_unaligned_be16(cmd
+ 7);
3774 if (num
> sdebug_write_same_length
) {
3775 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
3776 return check_condition_result
;
3778 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, false);
3781 static int resp_write_same_16(struct scsi_cmnd
*scp
,
3782 struct sdebug_dev_info
*devip
)
3784 u8
*cmd
= scp
->cmnd
;
3791 if (cmd
[1] & 0x8) { /* UNMAP */
3792 if (sdebug_lbpws
== 0) {
3793 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
3794 return check_condition_result
;
3798 if (cmd
[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3800 lba
= get_unaligned_be64(cmd
+ 2);
3801 num
= get_unaligned_be32(cmd
+ 10);
3802 if (num
> sdebug_write_same_length
) {
3803 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 10, -1);
3804 return check_condition_result
;
3806 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, ndob
);
3809 /* Note the mode field is in the same position as the (lower) service action
3810 * field. For the Report supported operation codes command, SPC-4 suggests
3811 * each mode of this command should be reported separately; for future. */
3812 static int resp_write_buffer(struct scsi_cmnd
*scp
,
3813 struct sdebug_dev_info
*devip
)
3815 u8
*cmd
= scp
->cmnd
;
3816 struct scsi_device
*sdp
= scp
->device
;
3817 struct sdebug_dev_info
*dp
;
3820 mode
= cmd
[1] & 0x1f;
3822 case 0x4: /* download microcode (MC) and activate (ACT) */
3823 /* set UAs on this device only */
3824 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3825 set_bit(SDEBUG_UA_MICROCODE_CHANGED
, devip
->uas_bm
);
3827 case 0x5: /* download MC, save and ACT */
3828 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
, devip
->uas_bm
);
3830 case 0x6: /* download MC with offsets and ACT */
3831 /* set UAs on most devices (LUs) in this target */
3832 list_for_each_entry(dp
,
3833 &devip
->sdbg_host
->dev_info_list
,
3835 if (dp
->target
== sdp
->id
) {
3836 set_bit(SDEBUG_UA_BUS_RESET
, dp
->uas_bm
);
3838 set_bit(SDEBUG_UA_MICROCODE_CHANGED
,
3842 case 0x7: /* download MC with offsets, save, and ACT */
3843 /* set UA on all devices (LUs) in this target */
3844 list_for_each_entry(dp
,
3845 &devip
->sdbg_host
->dev_info_list
,
3847 if (dp
->target
== sdp
->id
)
3848 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
,
3852 /* do nothing for this command for other mode values */
3858 static int resp_comp_write(struct scsi_cmnd
*scp
,
3859 struct sdebug_dev_info
*devip
)
3861 u8
*cmd
= scp
->cmnd
;
3863 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
3864 rwlock_t
*macc_lckp
= &sip
->macc_lck
;
3867 u32 lb_size
= sdebug_sector_size
;
3872 lba
= get_unaligned_be64(cmd
+ 2);
3873 num
= cmd
[13]; /* 1 to a maximum of 255 logical blocks */
3875 return 0; /* degenerate case, not an error */
3876 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
3878 mk_sense_invalid_opcode(scp
);
3879 return check_condition_result
;
3881 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
3882 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
3883 (cmd
[1] & 0xe0) == 0)
3884 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
3886 ret
= check_device_access_params(scp
, lba
, num
, false);
3890 arr
= kcalloc(lb_size
, dnum
, GFP_ATOMIC
);
3892 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3894 return check_condition_result
;
3897 write_lock(macc_lckp
);
3899 ret
= do_dout_fetch(scp
, dnum
, arr
);
3901 retval
= DID_ERROR
<< 16;
3903 } else if (sdebug_verbose
&& (ret
< (dnum
* lb_size
)))
3904 sdev_printk(KERN_INFO
, scp
->device
, "%s: compare_write: cdb "
3905 "indicated=%u, IO sent=%d bytes\n", my_name
,
3906 dnum
* lb_size
, ret
);
3907 if (!comp_write_worker(sip
, lba
, num
, arr
, false)) {
3908 mk_sense_buffer(scp
, MISCOMPARE
, MISCOMPARE_VERIFY_ASC
, 0);
3909 retval
= check_condition_result
;
3912 if (scsi_debug_lbp())
3913 map_region(sip
, lba
, num
);
3915 write_unlock(macc_lckp
);
3920 struct unmap_block_desc
{
3926 static int resp_unmap(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3929 struct unmap_block_desc
*desc
;
3930 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
3931 rwlock_t
*macc_lckp
= &sip
->macc_lck
;
3932 unsigned int i
, payload_len
, descriptors
;
3935 if (!scsi_debug_lbp())
3936 return 0; /* fib and say its done */
3937 payload_len
= get_unaligned_be16(scp
->cmnd
+ 7);
3938 BUG_ON(scsi_bufflen(scp
) != payload_len
);
3940 descriptors
= (payload_len
- 8) / 16;
3941 if (descriptors
> sdebug_unmap_max_desc
) {
3942 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
3943 return check_condition_result
;
3946 buf
= kzalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
3948 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3950 return check_condition_result
;
3953 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
3955 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
3956 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
3958 desc
= (void *)&buf
[8];
3960 write_lock(macc_lckp
);
3962 for (i
= 0 ; i
< descriptors
; i
++) {
3963 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
3964 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
3966 ret
= check_device_access_params(scp
, lba
, num
, true);
3970 unmap_region(sip
, lba
, num
);
3976 write_unlock(macc_lckp
);
3982 #define SDEBUG_GET_LBA_STATUS_LEN 32
3984 static int resp_get_lba_status(struct scsi_cmnd
*scp
,
3985 struct sdebug_dev_info
*devip
)
3987 u8
*cmd
= scp
->cmnd
;
3989 u32 alloc_len
, mapped
, num
;
3991 u8 arr
[SDEBUG_GET_LBA_STATUS_LEN
];
3993 lba
= get_unaligned_be64(cmd
+ 2);
3994 alloc_len
= get_unaligned_be32(cmd
+ 10);
3999 ret
= check_device_access_params(scp
, lba
, 1, false);
4003 if (scsi_debug_lbp()) {
4004 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
4006 mapped
= map_state(sip
, lba
, &num
);
4009 /* following just in case virtual_gb changed */
4010 sdebug_capacity
= get_sdebug_capacity();
4011 if (sdebug_capacity
- lba
<= 0xffffffff)
4012 num
= sdebug_capacity
- lba
;
4017 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
4018 put_unaligned_be32(20, arr
); /* Parameter Data Length */
4019 put_unaligned_be64(lba
, arr
+ 8); /* LBA */
4020 put_unaligned_be32(num
, arr
+ 16); /* Number of blocks */
4021 arr
[20] = !mapped
; /* prov_stat=0: mapped; 1: dealloc */
4023 return fill_from_dev_buffer(scp
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
4026 static int resp_sync_cache(struct scsi_cmnd
*scp
,
4027 struct sdebug_dev_info
*devip
)
4032 u8
*cmd
= scp
->cmnd
;
4034 if (cmd
[0] == SYNCHRONIZE_CACHE
) { /* 10 byte cdb */
4035 lba
= get_unaligned_be32(cmd
+ 2);
4036 num_blocks
= get_unaligned_be16(cmd
+ 7);
4037 } else { /* SYNCHRONIZE_CACHE(16) */
4038 lba
= get_unaligned_be64(cmd
+ 2);
4039 num_blocks
= get_unaligned_be32(cmd
+ 10);
4041 if (lba
+ num_blocks
> sdebug_capacity
) {
4042 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
4043 return check_condition_result
;
4045 if (!write_since_sync
|| (cmd
[1] & 0x2))
4046 res
= SDEG_RES_IMMED_MASK
;
4047 else /* delay if write_since_sync and IMMED clear */
4048 write_since_sync
= false;
4053 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4054 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4055 * a GOOD status otherwise. Model a disk with a big cache and yield
4056 * CONDITION MET. Actually tries to bring range in main memory into the
4057 * cache associated with the CPU(s).
4059 static int resp_pre_fetch(struct scsi_cmnd
*scp
,
4060 struct sdebug_dev_info
*devip
)
4064 u64 block
, rest
= 0;
4066 u8
*cmd
= scp
->cmnd
;
4067 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
4068 rwlock_t
*macc_lckp
= &sip
->macc_lck
;
4069 u8
*fsp
= sip
->storep
;
4071 if (cmd
[0] == PRE_FETCH
) { /* 10 byte cdb */
4072 lba
= get_unaligned_be32(cmd
+ 2);
4073 nblks
= get_unaligned_be16(cmd
+ 7);
4074 } else { /* PRE-FETCH(16) */
4075 lba
= get_unaligned_be64(cmd
+ 2);
4076 nblks
= get_unaligned_be32(cmd
+ 10);
4078 if (lba
+ nblks
> sdebug_capacity
) {
4079 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
4080 return check_condition_result
;
4084 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4085 block
= do_div(lba
, sdebug_store_sectors
);
4086 if (block
+ nblks
> sdebug_store_sectors
)
4087 rest
= block
+ nblks
- sdebug_store_sectors
;
4089 /* Try to bring the PRE-FETCH range into CPU's cache */
4090 read_lock(macc_lckp
);
4091 prefetch_range(fsp
+ (sdebug_sector_size
* block
),
4092 (nblks
- rest
) * sdebug_sector_size
);
4094 prefetch_range(fsp
, rest
* sdebug_sector_size
);
4095 read_unlock(macc_lckp
);
4098 res
= SDEG_RES_IMMED_MASK
;
4099 return res
| condition_met_result
;
4102 #define RL_BUCKET_ELEMS 8
4104 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4105 * (W-LUN), the normal Linux scanning logic does not associate it with a
4106 * device (e.g. /dev/sg7). The following magic will make that association:
4107 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4108 * where <n> is a host number. If there are multiple targets in a host then
4109 * the above will associate a W-LUN to each target. To only get a W-LUN
4110 * for target 2, then use "echo '- 2 49409' > scan" .
4112 static int resp_report_luns(struct scsi_cmnd
*scp
,
4113 struct sdebug_dev_info
*devip
)
4115 unsigned char *cmd
= scp
->cmnd
;
4116 unsigned int alloc_len
;
4117 unsigned char select_report
;
4119 struct scsi_lun
*lun_p
;
4120 u8 arr
[RL_BUCKET_ELEMS
* sizeof(struct scsi_lun
)];
4121 unsigned int lun_cnt
; /* normal LUN count (max: 256) */
4122 unsigned int wlun_cnt
; /* report luns W-LUN count */
4123 unsigned int tlun_cnt
; /* total LUN count */
4124 unsigned int rlen
; /* response length (in bytes) */
4126 unsigned int off_rsp
= 0;
4127 const int sz_lun
= sizeof(struct scsi_lun
);
4129 clear_luns_changed_on_target(devip
);
4131 select_report
= cmd
[2];
4132 alloc_len
= get_unaligned_be32(cmd
+ 6);
4134 if (alloc_len
< 4) {
4135 pr_err("alloc len too small %d\n", alloc_len
);
4136 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
4137 return check_condition_result
;
4140 switch (select_report
) {
4141 case 0: /* all LUNs apart from W-LUNs */
4142 lun_cnt
= sdebug_max_luns
;
4145 case 1: /* only W-LUNs */
4149 case 2: /* all LUNs */
4150 lun_cnt
= sdebug_max_luns
;
4153 case 0x10: /* only administrative LUs */
4154 case 0x11: /* see SPC-5 */
4155 case 0x12: /* only subsiduary LUs owned by referenced LU */
4157 pr_debug("select report invalid %d\n", select_report
);
4158 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
4159 return check_condition_result
;
4162 if (sdebug_no_lun_0
&& (lun_cnt
> 0))
4165 tlun_cnt
= lun_cnt
+ wlun_cnt
;
4166 rlen
= tlun_cnt
* sz_lun
; /* excluding 8 byte header */
4167 scsi_set_resid(scp
, scsi_bufflen(scp
));
4168 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4169 select_report
, lun_cnt
, wlun_cnt
, sdebug_no_lun_0
);
4171 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4172 lun
= sdebug_no_lun_0
? 1 : 0;
4173 for (k
= 0, j
= 0, res
= 0; true; ++k
, j
= 0) {
4174 memset(arr
, 0, sizeof(arr
));
4175 lun_p
= (struct scsi_lun
*)&arr
[0];
4177 put_unaligned_be32(rlen
, &arr
[0]);
4181 for ( ; j
< RL_BUCKET_ELEMS
; ++j
, ++lun_p
) {
4182 if ((k
* RL_BUCKET_ELEMS
) + j
> lun_cnt
)
4184 int_to_scsilun(lun
++, lun_p
);
4185 if (lun
> 1 && sdebug_lun_am
== SAM_LUN_AM_FLAT
)
4186 lun_p
->scsi_lun
[0] |= 0x40;
4188 if (j
< RL_BUCKET_ELEMS
)
4191 res
= p_fill_from_dev_buffer(scp
, arr
, n
, off_rsp
);
4197 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS
, lun_p
);
4201 res
= p_fill_from_dev_buffer(scp
, arr
, j
* sz_lun
, off_rsp
);
4205 static int resp_verify(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
4207 bool is_bytchk3
= false;
4210 u32 vnum
, a_num
, off
;
4211 const u32 lb_size
= sdebug_sector_size
;
4214 u8
*cmd
= scp
->cmnd
;
4215 struct sdeb_store_info
*sip
= devip2sip(devip
, true);
4216 rwlock_t
*macc_lckp
= &sip
->macc_lck
;
4218 bytchk
= (cmd
[1] >> 1) & 0x3;
4220 return 0; /* always claim internal verify okay */
4221 } else if (bytchk
== 2) {
4222 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 2);
4223 return check_condition_result
;
4224 } else if (bytchk
== 3) {
4225 is_bytchk3
= true; /* 1 block sent, compared repeatedly */
4229 lba
= get_unaligned_be64(cmd
+ 2);
4230 vnum
= get_unaligned_be32(cmd
+ 10);
4232 case VERIFY
: /* is VERIFY(10) */
4233 lba
= get_unaligned_be32(cmd
+ 2);
4234 vnum
= get_unaligned_be16(cmd
+ 7);
4237 mk_sense_invalid_opcode(scp
);
4238 return check_condition_result
;
4240 a_num
= is_bytchk3
? 1 : vnum
;
4241 /* Treat following check like one for read (i.e. no write) access */
4242 ret
= check_device_access_params(scp
, lba
, a_num
, false);
4246 arr
= kcalloc(lb_size
, vnum
, GFP_ATOMIC
);
4248 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
4250 return check_condition_result
;
4252 /* Not changing store, so only need read access */
4253 read_lock(macc_lckp
);
4255 ret
= do_dout_fetch(scp
, a_num
, arr
);
4257 ret
= DID_ERROR
<< 16;
4259 } else if (sdebug_verbose
&& (ret
< (a_num
* lb_size
))) {
4260 sdev_printk(KERN_INFO
, scp
->device
,
4261 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4262 my_name
, __func__
, a_num
* lb_size
, ret
);
4265 for (j
= 1, off
= lb_size
; j
< vnum
; ++j
, off
+= lb_size
)
4266 memcpy(arr
+ off
, arr
, lb_size
);
4269 if (!comp_write_worker(sip
, lba
, vnum
, arr
, true)) {
4270 mk_sense_buffer(scp
, MISCOMPARE
, MISCOMPARE_VERIFY_ASC
, 0);
4271 ret
= check_condition_result
;
4275 read_unlock(macc_lckp
);
4280 #define RZONES_DESC_HD 64
4282 /* Report zones depending on start LBA nad reporting options */
4283 static int resp_report_zones(struct scsi_cmnd
*scp
,
4284 struct sdebug_dev_info
*devip
)
4286 unsigned int i
, max_zones
, rep_max_zones
, nrz
= 0;
4288 u32 alloc_len
, rep_opts
, rep_len
;
4291 u8
*arr
= NULL
, *desc
;
4292 u8
*cmd
= scp
->cmnd
;
4293 struct sdeb_zone_state
*zsp
;
4294 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
4295 rwlock_t
*macc_lckp
= sip
? &sip
->macc_lck
: &sdeb_fake_rw_lck
;
4297 if (!sdebug_dev_is_zoned(devip
)) {
4298 mk_sense_invalid_opcode(scp
);
4299 return check_condition_result
;
4301 zs_lba
= get_unaligned_be64(cmd
+ 2);
4302 alloc_len
= get_unaligned_be32(cmd
+ 10);
4303 rep_opts
= cmd
[14] & 0x3f;
4304 partial
= cmd
[14] & 0x80;
4306 if (zs_lba
>= sdebug_capacity
) {
4307 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
4308 return check_condition_result
;
4311 max_zones
= devip
->nr_zones
- (zs_lba
>> devip
->zsize_shift
);
4312 rep_max_zones
= min((alloc_len
- 64) >> ilog2(RZONES_DESC_HD
),
4315 arr
= kcalloc(RZONES_DESC_HD
, alloc_len
, GFP_ATOMIC
);
4317 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
4319 return check_condition_result
;
4322 read_lock(macc_lckp
);
4325 for (i
= 0; i
< max_zones
; i
++) {
4326 lba
= zs_lba
+ devip
->zsize
* i
;
4327 if (lba
> sdebug_capacity
)
4329 zsp
= zbc_zone(devip
, lba
);
4336 if (zsp
->z_cond
!= ZC1_EMPTY
)
4340 /* Implicit open zones */
4341 if (zsp
->z_cond
!= ZC2_IMPLICIT_OPEN
)
4345 /* Explicit open zones */
4346 if (zsp
->z_cond
!= ZC3_EXPLICIT_OPEN
)
4351 if (zsp
->z_cond
!= ZC4_CLOSED
)
4356 if (zsp
->z_cond
!= ZC5_FULL
)
4363 * Read-only, offline, reset WP recommended are
4364 * not emulated: no zones to report;
4368 /* non-seq-resource set */
4369 if (!zsp
->z_non_seq_resource
)
4373 /* Not write pointer (conventional) zones */
4374 if (!zbc_zone_is_conv(zsp
))
4378 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
4379 INVALID_FIELD_IN_CDB
, 0);
4380 ret
= check_condition_result
;
4384 if (nrz
< rep_max_zones
) {
4385 /* Fill zone descriptor */
4386 desc
[0] = zsp
->z_type
;
4387 desc
[1] = zsp
->z_cond
<< 4;
4388 if (zsp
->z_non_seq_resource
)
4390 put_unaligned_be64((u64
)zsp
->z_size
, desc
+ 8);
4391 put_unaligned_be64((u64
)zsp
->z_start
, desc
+ 16);
4392 put_unaligned_be64((u64
)zsp
->z_wp
, desc
+ 24);
4396 if (partial
&& nrz
>= rep_max_zones
)
4403 put_unaligned_be32(nrz
* RZONES_DESC_HD
, arr
+ 0);
4404 put_unaligned_be64(sdebug_capacity
- 1, arr
+ 8);
4406 rep_len
= (unsigned long)desc
- (unsigned long)arr
;
4407 ret
= fill_from_dev_buffer(scp
, arr
, min_t(int, alloc_len
, rep_len
));
4410 read_unlock(macc_lckp
);
4415 /* Logic transplanted from tcmu-runner, file_zbc.c */
4416 static void zbc_open_all(struct sdebug_dev_info
*devip
)
4418 struct sdeb_zone_state
*zsp
= &devip
->zstate
[0];
4421 for (i
= 0; i
< devip
->nr_zones
; i
++, zsp
++) {
4422 if (zsp
->z_cond
== ZC4_CLOSED
)
4423 zbc_open_zone(devip
, &devip
->zstate
[i
], true);
4427 static int resp_open_zone(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
4431 enum sdebug_z_cond zc
;
4432 u8
*cmd
= scp
->cmnd
;
4433 struct sdeb_zone_state
*zsp
;
4434 bool all
= cmd
[14] & 0x01;
4435 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
4436 rwlock_t
*macc_lckp
= sip
? &sip
->macc_lck
: &sdeb_fake_rw_lck
;
4438 if (!sdebug_dev_is_zoned(devip
)) {
4439 mk_sense_invalid_opcode(scp
);
4440 return check_condition_result
;
4443 write_lock(macc_lckp
);
4446 /* Check if all closed zones can be open */
4447 if (devip
->max_open
&&
4448 devip
->nr_exp_open
+ devip
->nr_closed
> devip
->max_open
) {
4449 mk_sense_buffer(scp
, DATA_PROTECT
, INSUFF_RES_ASC
,
4451 res
= check_condition_result
;
4454 /* Open all closed zones */
4455 zbc_open_all(devip
);
4459 /* Open the specified zone */
4460 z_id
= get_unaligned_be64(cmd
+ 2);
4461 if (z_id
>= sdebug_capacity
) {
4462 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
4463 res
= check_condition_result
;
4467 zsp
= zbc_zone(devip
, z_id
);
4468 if (z_id
!= zsp
->z_start
) {
4469 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
4470 res
= check_condition_result
;
4473 if (zbc_zone_is_conv(zsp
)) {
4474 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
4475 res
= check_condition_result
;
4480 if (zc
== ZC3_EXPLICIT_OPEN
|| zc
== ZC5_FULL
)
4483 if (devip
->max_open
&& devip
->nr_exp_open
>= devip
->max_open
) {
4484 mk_sense_buffer(scp
, DATA_PROTECT
, INSUFF_RES_ASC
,
4486 res
= check_condition_result
;
4490 zbc_open_zone(devip
, zsp
, true);
4492 write_unlock(macc_lckp
);
4496 static void zbc_close_all(struct sdebug_dev_info
*devip
)
4500 for (i
= 0; i
< devip
->nr_zones
; i
++)
4501 zbc_close_zone(devip
, &devip
->zstate
[i
]);
4504 static int resp_close_zone(struct scsi_cmnd
*scp
,
4505 struct sdebug_dev_info
*devip
)
4509 u8
*cmd
= scp
->cmnd
;
4510 struct sdeb_zone_state
*zsp
;
4511 bool all
= cmd
[14] & 0x01;
4512 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
4513 rwlock_t
*macc_lckp
= sip
? &sip
->macc_lck
: &sdeb_fake_rw_lck
;
4515 if (!sdebug_dev_is_zoned(devip
)) {
4516 mk_sense_invalid_opcode(scp
);
4517 return check_condition_result
;
4520 write_lock(macc_lckp
);
4523 zbc_close_all(devip
);
4527 /* Close specified zone */
4528 z_id
= get_unaligned_be64(cmd
+ 2);
4529 if (z_id
>= sdebug_capacity
) {
4530 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
4531 res
= check_condition_result
;
4535 zsp
= zbc_zone(devip
, z_id
);
4536 if (z_id
!= zsp
->z_start
) {
4537 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
4538 res
= check_condition_result
;
4541 if (zbc_zone_is_conv(zsp
)) {
4542 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
4543 res
= check_condition_result
;
4547 zbc_close_zone(devip
, zsp
);
4549 write_unlock(macc_lckp
);
4553 static void zbc_finish_zone(struct sdebug_dev_info
*devip
,
4554 struct sdeb_zone_state
*zsp
, bool empty
)
4556 enum sdebug_z_cond zc
= zsp
->z_cond
;
4558 if (zc
== ZC4_CLOSED
|| zc
== ZC2_IMPLICIT_OPEN
||
4559 zc
== ZC3_EXPLICIT_OPEN
|| (empty
&& zc
== ZC1_EMPTY
)) {
4560 if (zc
== ZC2_IMPLICIT_OPEN
|| zc
== ZC3_EXPLICIT_OPEN
)
4561 zbc_close_zone(devip
, zsp
);
4562 if (zsp
->z_cond
== ZC4_CLOSED
)
4564 zsp
->z_wp
= zsp
->z_start
+ zsp
->z_size
;
4565 zsp
->z_cond
= ZC5_FULL
;
4569 static void zbc_finish_all(struct sdebug_dev_info
*devip
)
4573 for (i
= 0; i
< devip
->nr_zones
; i
++)
4574 zbc_finish_zone(devip
, &devip
->zstate
[i
], false);
4577 static int resp_finish_zone(struct scsi_cmnd
*scp
,
4578 struct sdebug_dev_info
*devip
)
4580 struct sdeb_zone_state
*zsp
;
4583 u8
*cmd
= scp
->cmnd
;
4584 bool all
= cmd
[14] & 0x01;
4585 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
4586 rwlock_t
*macc_lckp
= sip
? &sip
->macc_lck
: &sdeb_fake_rw_lck
;
4588 if (!sdebug_dev_is_zoned(devip
)) {
4589 mk_sense_invalid_opcode(scp
);
4590 return check_condition_result
;
4593 write_lock(macc_lckp
);
4596 zbc_finish_all(devip
);
4600 /* Finish the specified zone */
4601 z_id
= get_unaligned_be64(cmd
+ 2);
4602 if (z_id
>= sdebug_capacity
) {
4603 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
4604 res
= check_condition_result
;
4608 zsp
= zbc_zone(devip
, z_id
);
4609 if (z_id
!= zsp
->z_start
) {
4610 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
4611 res
= check_condition_result
;
4614 if (zbc_zone_is_conv(zsp
)) {
4615 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
4616 res
= check_condition_result
;
4620 zbc_finish_zone(devip
, zsp
, true);
4622 write_unlock(macc_lckp
);
4626 static void zbc_rwp_zone(struct sdebug_dev_info
*devip
,
4627 struct sdeb_zone_state
*zsp
)
4629 enum sdebug_z_cond zc
;
4631 if (zbc_zone_is_conv(zsp
))
4635 if (zc
== ZC2_IMPLICIT_OPEN
|| zc
== ZC3_EXPLICIT_OPEN
)
4636 zbc_close_zone(devip
, zsp
);
4638 if (zsp
->z_cond
== ZC4_CLOSED
)
4641 zsp
->z_non_seq_resource
= false;
4642 zsp
->z_wp
= zsp
->z_start
;
4643 zsp
->z_cond
= ZC1_EMPTY
;
4646 static void zbc_rwp_all(struct sdebug_dev_info
*devip
)
4650 for (i
= 0; i
< devip
->nr_zones
; i
++)
4651 zbc_rwp_zone(devip
, &devip
->zstate
[i
]);
4654 static int resp_rwp_zone(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
4656 struct sdeb_zone_state
*zsp
;
4659 u8
*cmd
= scp
->cmnd
;
4660 bool all
= cmd
[14] & 0x01;
4661 struct sdeb_store_info
*sip
= devip2sip(devip
, false);
4662 rwlock_t
*macc_lckp
= sip
? &sip
->macc_lck
: &sdeb_fake_rw_lck
;
4664 if (!sdebug_dev_is_zoned(devip
)) {
4665 mk_sense_invalid_opcode(scp
);
4666 return check_condition_result
;
4669 write_lock(macc_lckp
);
4676 z_id
= get_unaligned_be64(cmd
+ 2);
4677 if (z_id
>= sdebug_capacity
) {
4678 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
4679 res
= check_condition_result
;
4683 zsp
= zbc_zone(devip
, z_id
);
4684 if (z_id
!= zsp
->z_start
) {
4685 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
4686 res
= check_condition_result
;
4689 if (zbc_zone_is_conv(zsp
)) {
4690 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
4691 res
= check_condition_result
;
4695 zbc_rwp_zone(devip
, zsp
);
4697 write_unlock(macc_lckp
);
4701 static struct sdebug_queue
*get_queue(struct scsi_cmnd
*cmnd
)
4704 u32 tag
= blk_mq_unique_tag(cmnd
->request
);
4706 hwq
= blk_mq_unique_tag_to_hwq(tag
);
4708 pr_debug("tag=%#x, hwq=%d\n", tag
, hwq
);
4709 if (WARN_ON_ONCE(hwq
>= submit_queues
))
4712 return sdebug_q_arr
+ hwq
;
4715 static u32
get_tag(struct scsi_cmnd
*cmnd
)
4717 return blk_mq_unique_tag(cmnd
->request
);
4720 /* Queued (deferred) command completions converge here. */
4721 static void sdebug_q_cmd_complete(struct sdebug_defer
*sd_dp
)
4723 bool aborted
= sd_dp
->aborted
;
4726 unsigned long iflags
;
4727 struct sdebug_queue
*sqp
;
4728 struct sdebug_queued_cmd
*sqcp
;
4729 struct scsi_cmnd
*scp
;
4730 struct sdebug_dev_info
*devip
;
4732 sd_dp
->defer_t
= SDEB_DEFER_NONE
;
4733 if (unlikely(aborted
))
4734 sd_dp
->aborted
= false;
4735 qc_idx
= sd_dp
->qc_idx
;
4736 sqp
= sdebug_q_arr
+ sd_dp
->sqa_idx
;
4737 if (sdebug_statistics
) {
4738 atomic_inc(&sdebug_completions
);
4739 if (raw_smp_processor_id() != sd_dp
->issuing_cpu
)
4740 atomic_inc(&sdebug_miss_cpus
);
4742 if (unlikely((qc_idx
< 0) || (qc_idx
>= SDEBUG_CANQUEUE
))) {
4743 pr_err("wild qc_idx=%d\n", qc_idx
);
4746 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
4747 sqcp
= &sqp
->qc_arr
[qc_idx
];
4749 if (unlikely(scp
== NULL
)) {
4750 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4751 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4752 sd_dp
->sqa_idx
, qc_idx
, sd_dp
->hc_idx
);
4755 devip
= (struct sdebug_dev_info
*)scp
->device
->hostdata
;
4757 atomic_dec(&devip
->num_in_q
);
4759 pr_err("devip=NULL\n");
4760 if (unlikely(atomic_read(&retired_max_queue
) > 0))
4763 sqcp
->a_cmnd
= NULL
;
4764 if (unlikely(!test_and_clear_bit(qc_idx
, sqp
->in_use_bm
))) {
4765 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4766 pr_err("Unexpected completion\n");
4770 if (unlikely(retiring
)) { /* user has reduced max_queue */
4773 retval
= atomic_read(&retired_max_queue
);
4774 if (qc_idx
>= retval
) {
4775 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4776 pr_err("index %d too large\n", retval
);
4779 k
= find_last_bit(sqp
->in_use_bm
, retval
);
4780 if ((k
< sdebug_max_queue
) || (k
== retval
))
4781 atomic_set(&retired_max_queue
, 0);
4783 atomic_set(&retired_max_queue
, k
+ 1);
4785 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4786 if (unlikely(aborted
)) {
4788 pr_info("bypassing scsi_done() due to aborted cmd\n");
4791 scp
->scsi_done(scp
); /* callback to mid level */
4794 /* When high resolution timer goes off this function is called. */
4795 static enum hrtimer_restart
sdebug_q_cmd_hrt_complete(struct hrtimer
*timer
)
4797 struct sdebug_defer
*sd_dp
= container_of(timer
, struct sdebug_defer
,
4799 sdebug_q_cmd_complete(sd_dp
);
4800 return HRTIMER_NORESTART
;
4803 /* When work queue schedules work, it calls this function. */
4804 static void sdebug_q_cmd_wq_complete(struct work_struct
*work
)
4806 struct sdebug_defer
*sd_dp
= container_of(work
, struct sdebug_defer
,
4808 sdebug_q_cmd_complete(sd_dp
);
4811 static bool got_shared_uuid
;
4812 static uuid_t shared_uuid
;
4814 static int sdebug_device_create_zones(struct sdebug_dev_info
*devip
)
4816 struct sdeb_zone_state
*zsp
;
4817 sector_t capacity
= get_sdebug_capacity();
4818 sector_t zstart
= 0;
4822 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4823 * a zone size allowing for at least 4 zones on the device. Otherwise,
4824 * use the specified zone size checking that at least 2 zones can be
4825 * created for the device.
4827 if (!sdeb_zbc_zone_size_mb
) {
4828 devip
->zsize
= (DEF_ZBC_ZONE_SIZE_MB
* SZ_1M
)
4829 >> ilog2(sdebug_sector_size
);
4830 while (capacity
< devip
->zsize
<< 2 && devip
->zsize
>= 2)
4832 if (devip
->zsize
< 2) {
4833 pr_err("Device capacity too small\n");
4837 if (!is_power_of_2(sdeb_zbc_zone_size_mb
)) {
4838 pr_err("Zone size is not a power of 2\n");
4841 devip
->zsize
= (sdeb_zbc_zone_size_mb
* SZ_1M
)
4842 >> ilog2(sdebug_sector_size
);
4843 if (devip
->zsize
>= capacity
) {
4844 pr_err("Zone size too large for device capacity\n");
4849 devip
->zsize_shift
= ilog2(devip
->zsize
);
4850 devip
->nr_zones
= (capacity
+ devip
->zsize
- 1) >> devip
->zsize_shift
;
4852 if (sdeb_zbc_nr_conv
>= devip
->nr_zones
) {
4853 pr_err("Number of conventional zones too large\n");
4856 devip
->nr_conv_zones
= sdeb_zbc_nr_conv
;
4858 if (devip
->zmodel
== BLK_ZONED_HM
) {
4859 /* zbc_max_open_zones can be 0, meaning "not reported" */
4860 if (sdeb_zbc_max_open
>= devip
->nr_zones
- 1)
4861 devip
->max_open
= (devip
->nr_zones
- 1) / 2;
4863 devip
->max_open
= sdeb_zbc_max_open
;
4866 devip
->zstate
= kcalloc(devip
->nr_zones
,
4867 sizeof(struct sdeb_zone_state
), GFP_KERNEL
);
4871 for (i
= 0; i
< devip
->nr_zones
; i
++) {
4872 zsp
= &devip
->zstate
[i
];
4874 zsp
->z_start
= zstart
;
4876 if (i
< devip
->nr_conv_zones
) {
4877 zsp
->z_type
= ZBC_ZONE_TYPE_CNV
;
4878 zsp
->z_cond
= ZBC_NOT_WRITE_POINTER
;
4879 zsp
->z_wp
= (sector_t
)-1;
4881 if (devip
->zmodel
== BLK_ZONED_HM
)
4882 zsp
->z_type
= ZBC_ZONE_TYPE_SWR
;
4884 zsp
->z_type
= ZBC_ZONE_TYPE_SWP
;
4885 zsp
->z_cond
= ZC1_EMPTY
;
4886 zsp
->z_wp
= zsp
->z_start
;
4889 if (zsp
->z_start
+ devip
->zsize
< capacity
)
4890 zsp
->z_size
= devip
->zsize
;
4892 zsp
->z_size
= capacity
- zsp
->z_start
;
4894 zstart
+= zsp
->z_size
;
4900 static struct sdebug_dev_info
*sdebug_device_create(
4901 struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
4903 struct sdebug_dev_info
*devip
;
4905 devip
= kzalloc(sizeof(*devip
), flags
);
4907 if (sdebug_uuid_ctl
== 1)
4908 uuid_gen(&devip
->lu_name
);
4909 else if (sdebug_uuid_ctl
== 2) {
4910 if (got_shared_uuid
)
4911 devip
->lu_name
= shared_uuid
;
4913 uuid_gen(&shared_uuid
);
4914 got_shared_uuid
= true;
4915 devip
->lu_name
= shared_uuid
;
4918 devip
->sdbg_host
= sdbg_host
;
4919 if (sdeb_zbc_in_use
) {
4920 devip
->zmodel
= sdeb_zbc_model
;
4921 if (sdebug_device_create_zones(devip
)) {
4926 devip
->zmodel
= BLK_ZONED_NONE
;
4928 devip
->sdbg_host
= sdbg_host
;
4929 devip
->create_ts
= ktime_get_boottime();
4930 atomic_set(&devip
->stopped
, (sdeb_tur_ms_to_ready
> 0 ? 2 : 0));
4931 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
4936 static struct sdebug_dev_info
*find_build_dev_info(struct scsi_device
*sdev
)
4938 struct sdebug_host_info
*sdbg_host
;
4939 struct sdebug_dev_info
*open_devip
= NULL
;
4940 struct sdebug_dev_info
*devip
;
4942 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(sdev
->host
);
4944 pr_err("Host info NULL\n");
4948 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
4949 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
4950 (devip
->target
== sdev
->id
) &&
4951 (devip
->lun
== sdev
->lun
))
4954 if ((!devip
->used
) && (!open_devip
))
4958 if (!open_devip
) { /* try and make a new one */
4959 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
4961 pr_err("out of memory at line %d\n", __LINE__
);
4966 open_devip
->channel
= sdev
->channel
;
4967 open_devip
->target
= sdev
->id
;
4968 open_devip
->lun
= sdev
->lun
;
4969 open_devip
->sdbg_host
= sdbg_host
;
4970 atomic_set(&open_devip
->num_in_q
, 0);
4971 set_bit(SDEBUG_UA_POR
, open_devip
->uas_bm
);
4972 open_devip
->used
= true;
4976 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
4979 pr_info("slave_alloc <%u %u %u %llu>\n",
4980 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
4984 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
4986 struct sdebug_dev_info
*devip
=
4987 (struct sdebug_dev_info
*)sdp
->hostdata
;
4990 pr_info("slave_configure <%u %u %u %llu>\n",
4991 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
4992 if (sdp
->host
->max_cmd_len
!= SDEBUG_MAX_CMD_LEN
)
4993 sdp
->host
->max_cmd_len
= SDEBUG_MAX_CMD_LEN
;
4994 if (devip
== NULL
) {
4995 devip
= find_build_dev_info(sdp
);
4997 return 1; /* no resources, will be marked offline */
4999 sdp
->hostdata
= devip
;
5001 sdp
->no_uld_attach
= 1;
5002 config_cdb_len(sdp
);
5006 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
5008 struct sdebug_dev_info
*devip
=
5009 (struct sdebug_dev_info
*)sdp
->hostdata
;
5012 pr_info("slave_destroy <%u %u %u %llu>\n",
5013 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
5015 /* make this slot available for re-use */
5016 devip
->used
= false;
5017 sdp
->hostdata
= NULL
;
5021 static void stop_qc_helper(struct sdebug_defer
*sd_dp
,
5022 enum sdeb_defer_type defer_t
)
5026 if (defer_t
== SDEB_DEFER_HRT
)
5027 hrtimer_cancel(&sd_dp
->hrt
);
5028 else if (defer_t
== SDEB_DEFER_WQ
)
5029 cancel_work_sync(&sd_dp
->ew
.work
);
5032 /* If @cmnd found deletes its timer or work queue and returns true; else
5034 static bool stop_queued_cmnd(struct scsi_cmnd
*cmnd
)
5036 unsigned long iflags
;
5037 int j
, k
, qmax
, r_qmax
;
5038 enum sdeb_defer_type l_defer_t
;
5039 struct sdebug_queue
*sqp
;
5040 struct sdebug_queued_cmd
*sqcp
;
5041 struct sdebug_dev_info
*devip
;
5042 struct sdebug_defer
*sd_dp
;
5044 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
5045 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
5046 qmax
= sdebug_max_queue
;
5047 r_qmax
= atomic_read(&retired_max_queue
);
5050 for (k
= 0; k
< qmax
; ++k
) {
5051 if (test_bit(k
, sqp
->in_use_bm
)) {
5052 sqcp
= &sqp
->qc_arr
[k
];
5053 if (cmnd
!= sqcp
->a_cmnd
)
5056 devip
= (struct sdebug_dev_info
*)
5057 cmnd
->device
->hostdata
;
5059 atomic_dec(&devip
->num_in_q
);
5060 sqcp
->a_cmnd
= NULL
;
5061 sd_dp
= sqcp
->sd_dp
;
5063 l_defer_t
= sd_dp
->defer_t
;
5064 sd_dp
->defer_t
= SDEB_DEFER_NONE
;
5066 l_defer_t
= SDEB_DEFER_NONE
;
5067 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
5068 stop_qc_helper(sd_dp
, l_defer_t
);
5069 clear_bit(k
, sqp
->in_use_bm
);
5073 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
5078 /* Deletes (stops) timers or work queues of all queued commands */
5079 static void stop_all_queued(void)
5081 unsigned long iflags
;
5083 enum sdeb_defer_type l_defer_t
;
5084 struct sdebug_queue
*sqp
;
5085 struct sdebug_queued_cmd
*sqcp
;
5086 struct sdebug_dev_info
*devip
;
5087 struct sdebug_defer
*sd_dp
;
5089 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
5090 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
5091 for (k
= 0; k
< SDEBUG_CANQUEUE
; ++k
) {
5092 if (test_bit(k
, sqp
->in_use_bm
)) {
5093 sqcp
= &sqp
->qc_arr
[k
];
5094 if (sqcp
->a_cmnd
== NULL
)
5096 devip
= (struct sdebug_dev_info
*)
5097 sqcp
->a_cmnd
->device
->hostdata
;
5099 atomic_dec(&devip
->num_in_q
);
5100 sqcp
->a_cmnd
= NULL
;
5101 sd_dp
= sqcp
->sd_dp
;
5103 l_defer_t
= sd_dp
->defer_t
;
5104 sd_dp
->defer_t
= SDEB_DEFER_NONE
;
5106 l_defer_t
= SDEB_DEFER_NONE
;
5107 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
5108 stop_qc_helper(sd_dp
, l_defer_t
);
5109 clear_bit(k
, sqp
->in_use_bm
);
5110 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
5113 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
5117 /* Free queued command memory on heap */
5118 static void free_all_queued(void)
5121 struct sdebug_queue
*sqp
;
5122 struct sdebug_queued_cmd
*sqcp
;
5124 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
5125 for (k
= 0; k
< SDEBUG_CANQUEUE
; ++k
) {
5126 sqcp
= &sqp
->qc_arr
[k
];
5133 static int scsi_debug_abort(struct scsi_cmnd
*SCpnt
)
5139 ok
= stop_queued_cmnd(SCpnt
);
5140 if (SCpnt
->device
&& (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
))
5141 sdev_printk(KERN_INFO
, SCpnt
->device
,
5142 "%s: command%s found\n", __func__
,
5148 static int scsi_debug_device_reset(struct scsi_cmnd
*SCpnt
)
5151 if (SCpnt
&& SCpnt
->device
) {
5152 struct scsi_device
*sdp
= SCpnt
->device
;
5153 struct sdebug_dev_info
*devip
=
5154 (struct sdebug_dev_info
*)sdp
->hostdata
;
5156 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
5157 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
5159 set_bit(SDEBUG_UA_POR
, devip
->uas_bm
);
5164 static int scsi_debug_target_reset(struct scsi_cmnd
*SCpnt
)
5166 struct sdebug_host_info
*sdbg_host
;
5167 struct sdebug_dev_info
*devip
;
5168 struct scsi_device
*sdp
;
5169 struct Scsi_Host
*hp
;
5172 ++num_target_resets
;
5175 sdp
= SCpnt
->device
;
5178 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
5179 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
5183 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
5185 list_for_each_entry(devip
,
5186 &sdbg_host
->dev_info_list
,
5188 if (devip
->target
== sdp
->id
) {
5189 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
5193 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
5194 sdev_printk(KERN_INFO
, sdp
,
5195 "%s: %d device(s) found in target\n", __func__
, k
);
5200 static int scsi_debug_bus_reset(struct scsi_cmnd
*SCpnt
)
5202 struct sdebug_host_info
*sdbg_host
;
5203 struct sdebug_dev_info
*devip
;
5204 struct scsi_device
*sdp
;
5205 struct Scsi_Host
*hp
;
5209 if (!(SCpnt
&& SCpnt
->device
))
5211 sdp
= SCpnt
->device
;
5212 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
5213 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
5216 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
5218 list_for_each_entry(devip
,
5219 &sdbg_host
->dev_info_list
,
5221 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
5226 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
5227 sdev_printk(KERN_INFO
, sdp
,
5228 "%s: %d device(s) found in host\n", __func__
, k
);
5233 static int scsi_debug_host_reset(struct scsi_cmnd
*SCpnt
)
5235 struct sdebug_host_info
*sdbg_host
;
5236 struct sdebug_dev_info
*devip
;
5240 if ((SCpnt
->device
) && (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
))
5241 sdev_printk(KERN_INFO
, SCpnt
->device
, "%s\n", __func__
);
5242 spin_lock(&sdebug_host_list_lock
);
5243 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
5244 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
,
5246 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
5250 spin_unlock(&sdebug_host_list_lock
);
5252 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
5253 sdev_printk(KERN_INFO
, SCpnt
->device
,
5254 "%s: %d device(s) found\n", __func__
, k
);
5258 static void sdebug_build_parts(unsigned char *ramp
, unsigned long store_size
)
5260 struct msdos_partition
*pp
;
5261 int starts
[SDEBUG_MAX_PARTS
+ 2], max_part_secs
;
5262 int sectors_per_part
, num_sectors
, k
;
5263 int heads_by_sects
, start_sec
, end_sec
;
5265 /* assume partition table already zeroed */
5266 if ((sdebug_num_parts
< 1) || (store_size
< 1048576))
5268 if (sdebug_num_parts
> SDEBUG_MAX_PARTS
) {
5269 sdebug_num_parts
= SDEBUG_MAX_PARTS
;
5270 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS
);
5272 num_sectors
= (int)get_sdebug_capacity();
5273 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
5275 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
5276 starts
[0] = sdebug_sectors_per
;
5277 max_part_secs
= sectors_per_part
;
5278 for (k
= 1; k
< sdebug_num_parts
; ++k
) {
5279 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
5281 if (starts
[k
] - starts
[k
- 1] < max_part_secs
)
5282 max_part_secs
= starts
[k
] - starts
[k
- 1];
5284 starts
[sdebug_num_parts
] = num_sectors
;
5285 starts
[sdebug_num_parts
+ 1] = 0;
5287 ramp
[510] = 0x55; /* magic partition markings */
5289 pp
= (struct msdos_partition
*)(ramp
+ 0x1be);
5290 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
5291 start_sec
= starts
[k
];
5292 end_sec
= starts
[k
] + max_part_secs
- 1;
5295 pp
->cyl
= start_sec
/ heads_by_sects
;
5296 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
5297 / sdebug_sectors_per
;
5298 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
5300 pp
->end_cyl
= end_sec
/ heads_by_sects
;
5301 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
5302 / sdebug_sectors_per
;
5303 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
5305 pp
->start_sect
= cpu_to_le32(start_sec
);
5306 pp
->nr_sects
= cpu_to_le32(end_sec
- start_sec
+ 1);
5307 pp
->sys_ind
= 0x83; /* plain Linux partition */
5311 static void block_unblock_all_queues(bool block
)
5314 struct sdebug_queue
*sqp
;
5316 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
)
5317 atomic_set(&sqp
->blocked
, (int)block
);
5320 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5321 * commands will be processed normally before triggers occur.
5323 static void tweak_cmnd_count(void)
5327 modulo
= abs(sdebug_every_nth
);
5330 block_unblock_all_queues(true);
5331 count
= atomic_read(&sdebug_cmnd_count
);
5332 atomic_set(&sdebug_cmnd_count
, (count
/ modulo
) * modulo
);
5333 block_unblock_all_queues(false);
5336 static void clear_queue_stats(void)
5338 atomic_set(&sdebug_cmnd_count
, 0);
5339 atomic_set(&sdebug_completions
, 0);
5340 atomic_set(&sdebug_miss_cpus
, 0);
5341 atomic_set(&sdebug_a_tsf
, 0);
5344 static bool inject_on_this_cmd(void)
5346 if (sdebug_every_nth
== 0)
5348 return (atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
)) == 0;
5351 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5353 /* Complete the processing of the thread that queued a SCSI command to this
5354 * driver. It either completes the command by calling cmnd_done() or
5355 * schedules a hr timer or work queue then returns 0. Returns
5356 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5358 static int schedule_resp(struct scsi_cmnd
*cmnd
, struct sdebug_dev_info
*devip
,
5360 int (*pfp
)(struct scsi_cmnd
*,
5361 struct sdebug_dev_info
*),
5362 int delta_jiff
, int ndelay
)
5365 bool inject
= false;
5366 int k
, num_in_q
, qdepth
;
5367 unsigned long iflags
;
5368 u64 ns_from_boot
= 0;
5369 struct sdebug_queue
*sqp
;
5370 struct sdebug_queued_cmd
*sqcp
;
5371 struct scsi_device
*sdp
;
5372 struct sdebug_defer
*sd_dp
;
5374 if (unlikely(devip
== NULL
)) {
5375 if (scsi_result
== 0)
5376 scsi_result
= DID_NO_CONNECT
<< 16;
5377 goto respond_in_thread
;
5381 if (delta_jiff
== 0)
5382 goto respond_in_thread
;
5384 sqp
= get_queue(cmnd
);
5385 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
5386 if (unlikely(atomic_read(&sqp
->blocked
))) {
5387 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
5388 return SCSI_MLQUEUE_HOST_BUSY
;
5390 num_in_q
= atomic_read(&devip
->num_in_q
);
5391 qdepth
= cmnd
->device
->queue_depth
;
5392 if (unlikely((qdepth
> 0) && (num_in_q
>= qdepth
))) {
5394 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
5395 goto respond_in_thread
;
5397 scsi_result
= device_qfull_result
;
5398 } else if (unlikely(sdebug_every_nth
&&
5399 (SDEBUG_OPT_RARE_TSF
& sdebug_opts
) &&
5400 (scsi_result
== 0))) {
5401 if ((num_in_q
== (qdepth
- 1)) &&
5402 (atomic_inc_return(&sdebug_a_tsf
) >=
5403 abs(sdebug_every_nth
))) {
5404 atomic_set(&sdebug_a_tsf
, 0);
5406 scsi_result
= device_qfull_result
;
5410 k
= find_first_zero_bit(sqp
->in_use_bm
, sdebug_max_queue
);
5411 if (unlikely(k
>= sdebug_max_queue
)) {
5412 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
5414 goto respond_in_thread
;
5415 else if (SDEBUG_OPT_ALL_TSF
& sdebug_opts
)
5416 scsi_result
= device_qfull_result
;
5417 if (SDEBUG_OPT_Q_NOISE
& sdebug_opts
)
5418 sdev_printk(KERN_INFO
, sdp
,
5419 "%s: max_queue=%d exceeded, %s\n",
5420 __func__
, sdebug_max_queue
,
5421 (scsi_result
? "status: TASK SET FULL" :
5422 "report: host busy"));
5424 goto respond_in_thread
;
5426 return SCSI_MLQUEUE_HOST_BUSY
;
5428 set_bit(k
, sqp
->in_use_bm
);
5429 atomic_inc(&devip
->num_in_q
);
5430 sqcp
= &sqp
->qc_arr
[k
];
5431 sqcp
->a_cmnd
= cmnd
;
5432 cmnd
->host_scribble
= (unsigned char *)sqcp
;
5433 sd_dp
= sqcp
->sd_dp
;
5434 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
5436 sd_dp
= kzalloc(sizeof(*sd_dp
), GFP_ATOMIC
);
5438 atomic_dec(&devip
->num_in_q
);
5439 clear_bit(k
, sqp
->in_use_bm
);
5440 return SCSI_MLQUEUE_HOST_BUSY
;
5447 /* Set the hostwide tag */
5448 if (sdebug_host_max_queue
)
5449 sd_dp
->hc_idx
= get_tag(cmnd
);
5451 if (ndelay
> 0 && ndelay
< INCLUSIVE_TIMING_MAX_NS
)
5452 ns_from_boot
= ktime_get_boottime_ns();
5454 /* one of the resp_*() response functions is called here */
5455 cmnd
->result
= pfp
? pfp(cmnd
, devip
) : 0;
5456 if (cmnd
->result
& SDEG_RES_IMMED_MASK
) {
5457 cmnd
->result
&= ~SDEG_RES_IMMED_MASK
;
5458 delta_jiff
= ndelay
= 0;
5460 if (cmnd
->result
== 0 && scsi_result
!= 0)
5461 cmnd
->result
= scsi_result
;
5462 if (cmnd
->result
== 0 && unlikely(sdebug_opts
& SDEBUG_OPT_TRANSPORT_ERR
)) {
5463 if (atomic_read(&sdeb_inject_pending
)) {
5464 mk_sense_buffer(cmnd
, ABORTED_COMMAND
, TRANSPORT_PROBLEM
, ACK_NAK_TO
);
5465 atomic_set(&sdeb_inject_pending
, 0);
5466 cmnd
->result
= check_condition_result
;
5470 if (unlikely(sdebug_verbose
&& cmnd
->result
))
5471 sdev_printk(KERN_INFO
, sdp
, "%s: non-zero result=0x%x\n",
5472 __func__
, cmnd
->result
);
5474 if (delta_jiff
> 0 || ndelay
> 0) {
5477 if (delta_jiff
> 0) {
5478 u64 ns
= jiffies_to_nsecs(delta_jiff
);
5480 if (sdebug_random
&& ns
< U32_MAX
) {
5481 ns
= prandom_u32_max((u32
)ns
);
5482 } else if (sdebug_random
) {
5483 ns
>>= 12; /* scale to 4 usec precision */
5484 if (ns
< U32_MAX
) /* over 4 hours max */
5485 ns
= prandom_u32_max((u32
)ns
);
5488 kt
= ns_to_ktime(ns
);
5489 } else { /* ndelay has a 4.2 second max */
5490 kt
= sdebug_random
? prandom_u32_max((u32
)ndelay
) :
5492 if (ndelay
< INCLUSIVE_TIMING_MAX_NS
) {
5493 u64 d
= ktime_get_boottime_ns() - ns_from_boot
;
5495 if (kt
<= d
) { /* elapsed duration >= kt */
5496 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
5497 sqcp
->a_cmnd
= NULL
;
5498 atomic_dec(&devip
->num_in_q
);
5499 clear_bit(k
, sqp
->in_use_bm
);
5500 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
5503 /* call scsi_done() from this thread */
5504 cmnd
->scsi_done(cmnd
);
5507 /* otherwise reduce kt by elapsed time */
5511 if (!sd_dp
->init_hrt
) {
5512 sd_dp
->init_hrt
= true;
5513 sqcp
->sd_dp
= sd_dp
;
5514 hrtimer_init(&sd_dp
->hrt
, CLOCK_MONOTONIC
,
5515 HRTIMER_MODE_REL_PINNED
);
5516 sd_dp
->hrt
.function
= sdebug_q_cmd_hrt_complete
;
5517 sd_dp
->sqa_idx
= sqp
- sdebug_q_arr
;
5520 if (sdebug_statistics
)
5521 sd_dp
->issuing_cpu
= raw_smp_processor_id();
5522 sd_dp
->defer_t
= SDEB_DEFER_HRT
;
5523 /* schedule the invocation of scsi_done() for a later time */
5524 hrtimer_start(&sd_dp
->hrt
, kt
, HRTIMER_MODE_REL_PINNED
);
5525 } else { /* jdelay < 0, use work queue */
5526 if (!sd_dp
->init_wq
) {
5527 sd_dp
->init_wq
= true;
5528 sqcp
->sd_dp
= sd_dp
;
5529 sd_dp
->sqa_idx
= sqp
- sdebug_q_arr
;
5531 INIT_WORK(&sd_dp
->ew
.work
, sdebug_q_cmd_wq_complete
);
5533 if (sdebug_statistics
)
5534 sd_dp
->issuing_cpu
= raw_smp_processor_id();
5535 sd_dp
->defer_t
= SDEB_DEFER_WQ
;
5536 if (unlikely((sdebug_opts
& SDEBUG_OPT_CMD_ABORT
) &&
5537 atomic_read(&sdeb_inject_pending
)))
5538 sd_dp
->aborted
= true;
5539 schedule_work(&sd_dp
->ew
.work
);
5540 if (unlikely((sdebug_opts
& SDEBUG_OPT_CMD_ABORT
) &&
5541 atomic_read(&sdeb_inject_pending
))) {
5542 sdev_printk(KERN_INFO
, sdp
, "abort request tag %d\n", cmnd
->request
->tag
);
5543 blk_abort_request(cmnd
->request
);
5544 atomic_set(&sdeb_inject_pending
, 0);
5547 if (unlikely((SDEBUG_OPT_Q_NOISE
& sdebug_opts
) && scsi_result
== device_qfull_result
))
5548 sdev_printk(KERN_INFO
, sdp
, "%s: num_in_q=%d +1, %s%s\n", __func__
,
5549 num_in_q
, (inject
? "<inject> " : ""), "status: TASK SET FULL");
5552 respond_in_thread
: /* call back to mid-layer using invocation thread */
5553 cmnd
->result
= pfp
!= NULL
? pfp(cmnd
, devip
) : 0;
5554 cmnd
->result
&= ~SDEG_RES_IMMED_MASK
;
5555 if (cmnd
->result
== 0 && scsi_result
!= 0)
5556 cmnd
->result
= scsi_result
;
5557 cmnd
->scsi_done(cmnd
);
5561 /* Note: The following macros create attribute files in the
5562 /sys/module/scsi_debug/parameters directory. Unfortunately this
5563 driver is unaware of a change and cannot trigger auxiliary actions
5564 as it can when the corresponding attribute in the
5565 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5567 module_param_named(add_host
, sdebug_add_host
, int, S_IRUGO
| S_IWUSR
);
5568 module_param_named(ato
, sdebug_ato
, int, S_IRUGO
);
5569 module_param_named(cdb_len
, sdebug_cdb_len
, int, 0644);
5570 module_param_named(clustering
, sdebug_clustering
, bool, S_IRUGO
| S_IWUSR
);
5571 module_param_named(delay
, sdebug_jdelay
, int, S_IRUGO
| S_IWUSR
);
5572 module_param_named(dev_size_mb
, sdebug_dev_size_mb
, int, S_IRUGO
);
5573 module_param_named(dif
, sdebug_dif
, int, S_IRUGO
);
5574 module_param_named(dix
, sdebug_dix
, int, S_IRUGO
);
5575 module_param_named(dsense
, sdebug_dsense
, int, S_IRUGO
| S_IWUSR
);
5576 module_param_named(every_nth
, sdebug_every_nth
, int, S_IRUGO
| S_IWUSR
);
5577 module_param_named(fake_rw
, sdebug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
5578 module_param_named(guard
, sdebug_guard
, uint
, S_IRUGO
);
5579 module_param_named(host_lock
, sdebug_host_lock
, bool, S_IRUGO
| S_IWUSR
);
5580 module_param_named(host_max_queue
, sdebug_host_max_queue
, int, S_IRUGO
);
5581 module_param_string(inq_product
, sdebug_inq_product_id
,
5582 sizeof(sdebug_inq_product_id
), S_IRUGO
| S_IWUSR
);
5583 module_param_string(inq_rev
, sdebug_inq_product_rev
,
5584 sizeof(sdebug_inq_product_rev
), S_IRUGO
| S_IWUSR
);
5585 module_param_string(inq_vendor
, sdebug_inq_vendor_id
,
5586 sizeof(sdebug_inq_vendor_id
), S_IRUGO
| S_IWUSR
);
5587 module_param_named(lbprz
, sdebug_lbprz
, int, S_IRUGO
);
5588 module_param_named(lbpu
, sdebug_lbpu
, int, S_IRUGO
);
5589 module_param_named(lbpws
, sdebug_lbpws
, int, S_IRUGO
);
5590 module_param_named(lbpws10
, sdebug_lbpws10
, int, S_IRUGO
);
5591 module_param_named(lowest_aligned
, sdebug_lowest_aligned
, int, S_IRUGO
);
5592 module_param_named(lun_format
, sdebug_lun_am_i
, int, S_IRUGO
| S_IWUSR
);
5593 module_param_named(max_luns
, sdebug_max_luns
, int, S_IRUGO
| S_IWUSR
);
5594 module_param_named(max_queue
, sdebug_max_queue
, int, S_IRUGO
| S_IWUSR
);
5595 module_param_named(medium_error_count
, sdebug_medium_error_count
, int,
5597 module_param_named(medium_error_start
, sdebug_medium_error_start
, int,
5599 module_param_named(ndelay
, sdebug_ndelay
, int, S_IRUGO
| S_IWUSR
);
5600 module_param_named(no_lun_0
, sdebug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
5601 module_param_named(no_uld
, sdebug_no_uld
, int, S_IRUGO
);
5602 module_param_named(num_parts
, sdebug_num_parts
, int, S_IRUGO
);
5603 module_param_named(num_tgts
, sdebug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
5604 module_param_named(opt_blks
, sdebug_opt_blks
, int, S_IRUGO
);
5605 module_param_named(opt_xferlen_exp
, sdebug_opt_xferlen_exp
, int, S_IRUGO
);
5606 module_param_named(opts
, sdebug_opts
, int, S_IRUGO
| S_IWUSR
);
5607 module_param_named(per_host_store
, sdebug_per_host_store
, bool,
5609 module_param_named(physblk_exp
, sdebug_physblk_exp
, int, S_IRUGO
);
5610 module_param_named(ptype
, sdebug_ptype
, int, S_IRUGO
| S_IWUSR
);
5611 module_param_named(random
, sdebug_random
, bool, S_IRUGO
| S_IWUSR
);
5612 module_param_named(removable
, sdebug_removable
, bool, S_IRUGO
| S_IWUSR
);
5613 module_param_named(scsi_level
, sdebug_scsi_level
, int, S_IRUGO
);
5614 module_param_named(sector_size
, sdebug_sector_size
, int, S_IRUGO
);
5615 module_param_named(statistics
, sdebug_statistics
, bool, S_IRUGO
| S_IWUSR
);
5616 module_param_named(strict
, sdebug_strict
, bool, S_IRUGO
| S_IWUSR
);
5617 module_param_named(submit_queues
, submit_queues
, int, S_IRUGO
);
5618 module_param_named(tur_ms_to_ready
, sdeb_tur_ms_to_ready
, int, S_IRUGO
);
5619 module_param_named(unmap_alignment
, sdebug_unmap_alignment
, int, S_IRUGO
);
5620 module_param_named(unmap_granularity
, sdebug_unmap_granularity
, int, S_IRUGO
);
5621 module_param_named(unmap_max_blocks
, sdebug_unmap_max_blocks
, int, S_IRUGO
);
5622 module_param_named(unmap_max_desc
, sdebug_unmap_max_desc
, int, S_IRUGO
);
5623 module_param_named(uuid_ctl
, sdebug_uuid_ctl
, int, S_IRUGO
);
5624 module_param_named(virtual_gb
, sdebug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
5625 module_param_named(vpd_use_hostno
, sdebug_vpd_use_hostno
, int,
5627 module_param_named(wp
, sdebug_wp
, bool, S_IRUGO
| S_IWUSR
);
5628 module_param_named(write_same_length
, sdebug_write_same_length
, int,
5630 module_param_named(zbc
, sdeb_zbc_model_s
, charp
, S_IRUGO
);
5631 module_param_named(zone_max_open
, sdeb_zbc_max_open
, int, S_IRUGO
);
5632 module_param_named(zone_nr_conv
, sdeb_zbc_nr_conv
, int, S_IRUGO
);
5633 module_param_named(zone_size_mb
, sdeb_zbc_zone_size_mb
, int, S_IRUGO
);
5635 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5636 MODULE_DESCRIPTION("SCSI debug adapter driver");
5637 MODULE_LICENSE("GPL");
5638 MODULE_VERSION(SDEBUG_VERSION
);
5640 MODULE_PARM_DESC(add_host
, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5641 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
5642 MODULE_PARM_DESC(cdb_len
, "suggest CDB lengths to drivers (def=10)");
5643 MODULE_PARM_DESC(clustering
, "when set enables larger transfers (def=0)");
5644 MODULE_PARM_DESC(delay
, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5645 MODULE_PARM_DESC(dev_size_mb
, "size in MiB of ram shared by devs(def=8)");
5646 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
5647 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
5648 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
5649 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
5650 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
5651 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
5652 MODULE_PARM_DESC(host_lock
, "host_lock is ignored (def=0)");
5653 MODULE_PARM_DESC(host_max_queue
,
5654 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5655 MODULE_PARM_DESC(inq_product
, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5656 MODULE_PARM_DESC(inq_rev
, "SCSI INQUIRY revision string (def=\""
5657 SDEBUG_VERSION
"\")");
5658 MODULE_PARM_DESC(inq_vendor
, "SCSI INQUIRY vendor string (def=\"Linux\")");
5659 MODULE_PARM_DESC(lbprz
,
5660 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5661 MODULE_PARM_DESC(lbpu
, "enable LBP, support UNMAP command (def=0)");
5662 MODULE_PARM_DESC(lbpws
, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5663 MODULE_PARM_DESC(lbpws10
, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5664 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
5665 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
5666 MODULE_PARM_DESC(lun_format
, "LUN format: 0->peripheral (def); 1 --> flat address method");
5667 MODULE_PARM_DESC(max_queue
, "max number of queued commands (1 to max(def))");
5668 MODULE_PARM_DESC(medium_error_count
, "count of sectors to return follow on MEDIUM error");
5669 MODULE_PARM_DESC(medium_error_start
, "starting sector number to return MEDIUM error");
5670 MODULE_PARM_DESC(ndelay
, "response delay in nanoseconds (def=0 -> ignore)");
5671 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
5672 MODULE_PARM_DESC(no_uld
, "stop ULD (e.g. sd driver) attaching (def=0))");
5673 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
5674 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
5675 MODULE_PARM_DESC(opt_blks
, "optimal transfer length in blocks (def=1024)");
5676 MODULE_PARM_DESC(opt_xferlen_exp
, "optimal transfer length granularity exponent (def=physblk_exp)");
5677 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5678 MODULE_PARM_DESC(per_host_store
, "If set, next positive add_host will get new store (def=0)");
5679 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
5680 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
5681 MODULE_PARM_DESC(random
, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5682 MODULE_PARM_DESC(removable
, "claim to have removable media (def=0)");
5683 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=7[SPC-5])");
5684 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
5685 MODULE_PARM_DESC(statistics
, "collect statistics on commands, queues (def=0)");
5686 MODULE_PARM_DESC(strict
, "stricter checks: reserved field in cdb (def=0)");
5687 MODULE_PARM_DESC(submit_queues
, "support for block multi-queue (def=1)");
5688 MODULE_PARM_DESC(tur_ms_to_ready
, "TEST UNIT READY millisecs before initial good status (def=0)");
5689 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
5690 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=1)");
5691 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5692 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=256)");
5693 MODULE_PARM_DESC(uuid_ctl
,
5694 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5695 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5696 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5697 MODULE_PARM_DESC(wp
, "Write Protect (def=0)");
5698 MODULE_PARM_DESC(write_same_length
, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5699 MODULE_PARM_DESC(zbc
, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5700 MODULE_PARM_DESC(zone_max_open
, "Maximum number of open zones; [0] for no limit (def=auto)");
5701 MODULE_PARM_DESC(zone_nr_conv
, "Number of conventional zones (def=1)");
5702 MODULE_PARM_DESC(zone_size_mb
, "Zone size in MiB (def=auto)");
5704 #define SDEBUG_INFO_LEN 256
5705 static char sdebug_info
[SDEBUG_INFO_LEN
];
5707 static const char *scsi_debug_info(struct Scsi_Host
*shp
)
5711 k
= scnprintf(sdebug_info
, SDEBUG_INFO_LEN
, "%s: version %s [%s]\n",
5712 my_name
, SDEBUG_VERSION
, sdebug_version_date
);
5713 if (k
>= (SDEBUG_INFO_LEN
- 1))
5715 scnprintf(sdebug_info
+ k
, SDEBUG_INFO_LEN
- k
,
5716 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5717 sdebug_dev_size_mb
, sdebug_opts
, submit_queues
,
5718 "statistics", (int)sdebug_statistics
);
5722 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5723 static int scsi_debug_write_info(struct Scsi_Host
*host
, char *buffer
,
5728 int minLen
= length
> 15 ? 15 : length
;
5730 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
5732 memcpy(arr
, buffer
, minLen
);
5734 if (1 != sscanf(arr
, "%d", &opts
))
5737 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& opts
);
5738 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& opts
);
5739 if (sdebug_every_nth
!= 0)
5744 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5745 * same for each scsi_debug host (if more than one). Some of the counters
5746 * output are not atomics so might be inaccurate in a busy system. */
5747 static int scsi_debug_show_info(struct seq_file
*m
, struct Scsi_Host
*host
)
5750 struct sdebug_queue
*sqp
;
5751 struct sdebug_host_info
*sdhp
;
5753 seq_printf(m
, "scsi_debug adapter driver, version %s [%s]\n",
5754 SDEBUG_VERSION
, sdebug_version_date
);
5755 seq_printf(m
, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5756 sdebug_num_tgts
, "shared (ram) ", sdebug_dev_size_mb
,
5757 sdebug_opts
, sdebug_every_nth
);
5758 seq_printf(m
, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5759 sdebug_jdelay
, sdebug_ndelay
, sdebug_max_luns
,
5760 sdebug_sector_size
, "bytes");
5761 seq_printf(m
, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5762 sdebug_cylinders_per
, sdebug_heads
, sdebug_sectors_per
,
5764 seq_printf(m
, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5765 num_dev_resets
, num_target_resets
, num_bus_resets
,
5767 seq_printf(m
, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5768 dix_reads
, dix_writes
, dif_errors
);
5769 seq_printf(m
, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC
/ 1000,
5771 seq_printf(m
, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
5772 atomic_read(&sdebug_cmnd_count
),
5773 atomic_read(&sdebug_completions
),
5774 "miss_cpus", atomic_read(&sdebug_miss_cpus
),
5775 atomic_read(&sdebug_a_tsf
));
5777 seq_printf(m
, "submit_queues=%d\n", submit_queues
);
5778 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
5779 seq_printf(m
, " queue %d:\n", j
);
5780 f
= find_first_bit(sqp
->in_use_bm
, sdebug_max_queue
);
5781 if (f
!= sdebug_max_queue
) {
5782 l
= find_last_bit(sqp
->in_use_bm
, sdebug_max_queue
);
5783 seq_printf(m
, " in_use_bm BUSY: %s: %d,%d\n",
5784 "first,last bits", f
, l
);
5788 seq_printf(m
, "this host_no=%d\n", host
->host_no
);
5789 if (!xa_empty(per_store_ap
)) {
5792 unsigned long l_idx
;
5793 struct sdeb_store_info
*sip
;
5795 seq_puts(m
, "\nhost list:\n");
5797 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
5799 seq_printf(m
, " %d: host_no=%d, si_idx=%d\n", j
,
5800 sdhp
->shost
->host_no
, idx
);
5803 seq_printf(m
, "\nper_store array [most_recent_idx=%d]:\n",
5804 sdeb_most_recent_idx
);
5806 xa_for_each(per_store_ap
, l_idx
, sip
) {
5807 niu
= xa_get_mark(per_store_ap
, l_idx
,
5808 SDEB_XA_NOT_IN_USE
);
5810 seq_printf(m
, " %d: idx=%d%s\n", j
, idx
,
5811 (niu
? " not_in_use" : ""));
5818 static ssize_t
delay_show(struct device_driver
*ddp
, char *buf
)
5820 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_jdelay
);
5822 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5823 * of delay is jiffies.
5825 static ssize_t
delay_store(struct device_driver
*ddp
, const char *buf
,
5830 if (count
> 0 && sscanf(buf
, "%d", &jdelay
) == 1) {
5832 if (sdebug_jdelay
!= jdelay
) {
5834 struct sdebug_queue
*sqp
;
5836 block_unblock_all_queues(true);
5837 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
5839 k
= find_first_bit(sqp
->in_use_bm
,
5841 if (k
!= sdebug_max_queue
) {
5842 res
= -EBUSY
; /* queued commands */
5847 sdebug_jdelay
= jdelay
;
5850 block_unblock_all_queues(false);
5856 static DRIVER_ATTR_RW(delay
);
5858 static ssize_t
ndelay_show(struct device_driver
*ddp
, char *buf
)
5860 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ndelay
);
5862 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5863 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5864 static ssize_t
ndelay_store(struct device_driver
*ddp
, const char *buf
,
5869 if ((count
> 0) && (1 == sscanf(buf
, "%d", &ndelay
)) &&
5870 (ndelay
>= 0) && (ndelay
< (1000 * 1000 * 1000))) {
5872 if (sdebug_ndelay
!= ndelay
) {
5874 struct sdebug_queue
*sqp
;
5876 block_unblock_all_queues(true);
5877 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
5879 k
= find_first_bit(sqp
->in_use_bm
,
5881 if (k
!= sdebug_max_queue
) {
5882 res
= -EBUSY
; /* queued commands */
5887 sdebug_ndelay
= ndelay
;
5888 sdebug_jdelay
= ndelay
? JDELAY_OVERRIDDEN
5891 block_unblock_all_queues(false);
5897 static DRIVER_ATTR_RW(ndelay
);
5899 static ssize_t
opts_show(struct device_driver
*ddp
, char *buf
)
5901 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", sdebug_opts
);
5904 static ssize_t
opts_store(struct device_driver
*ddp
, const char *buf
,
5910 if (sscanf(buf
, "%10s", work
) == 1) {
5911 if (strncasecmp(work
, "0x", 2) == 0) {
5912 if (kstrtoint(work
+ 2, 16, &opts
) == 0)
5915 if (kstrtoint(work
, 10, &opts
) == 0)
5922 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& opts
);
5923 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& opts
);
5927 static DRIVER_ATTR_RW(opts
);
5929 static ssize_t
ptype_show(struct device_driver
*ddp
, char *buf
)
5931 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ptype
);
5933 static ssize_t
ptype_store(struct device_driver
*ddp
, const char *buf
,
5938 /* Cannot change from or to TYPE_ZBC with sysfs */
5939 if (sdebug_ptype
== TYPE_ZBC
)
5942 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
5950 static DRIVER_ATTR_RW(ptype
);
5952 static ssize_t
dsense_show(struct device_driver
*ddp
, char *buf
)
5954 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dsense
);
5956 static ssize_t
dsense_store(struct device_driver
*ddp
, const char *buf
,
5961 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
5967 static DRIVER_ATTR_RW(dsense
);
5969 static ssize_t
fake_rw_show(struct device_driver
*ddp
, char *buf
)
5971 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_fake_rw
);
5973 static ssize_t
fake_rw_store(struct device_driver
*ddp
, const char *buf
,
5978 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
5979 bool want_store
= (n
== 0);
5980 struct sdebug_host_info
*sdhp
;
5983 sdebug_fake_rw
= (sdebug_fake_rw
> 0);
5984 if (sdebug_fake_rw
== n
)
5985 return count
; /* not transitioning so do nothing */
5987 if (want_store
) { /* 1 --> 0 transition, set up store */
5988 if (sdeb_first_idx
< 0) {
5989 idx
= sdebug_add_store();
5993 idx
= sdeb_first_idx
;
5994 xa_clear_mark(per_store_ap
, idx
,
5995 SDEB_XA_NOT_IN_USE
);
5997 /* make all hosts use same store */
5998 list_for_each_entry(sdhp
, &sdebug_host_list
,
6000 if (sdhp
->si_idx
!= idx
) {
6001 xa_set_mark(per_store_ap
, sdhp
->si_idx
,
6002 SDEB_XA_NOT_IN_USE
);
6006 sdeb_most_recent_idx
= idx
;
6007 } else { /* 0 --> 1 transition is trigger for shrink */
6008 sdebug_erase_all_stores(true /* apart from first */);
6015 static DRIVER_ATTR_RW(fake_rw
);
6017 static ssize_t
no_lun_0_show(struct device_driver
*ddp
, char *buf
)
6019 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_lun_0
);
6021 static ssize_t
no_lun_0_store(struct device_driver
*ddp
, const char *buf
,
6026 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6027 sdebug_no_lun_0
= n
;
6032 static DRIVER_ATTR_RW(no_lun_0
);
6034 static ssize_t
num_tgts_show(struct device_driver
*ddp
, char *buf
)
6036 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_tgts
);
6038 static ssize_t
num_tgts_store(struct device_driver
*ddp
, const char *buf
,
6043 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6044 sdebug_num_tgts
= n
;
6045 sdebug_max_tgts_luns();
6050 static DRIVER_ATTR_RW(num_tgts
);
6052 static ssize_t
dev_size_mb_show(struct device_driver
*ddp
, char *buf
)
6054 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dev_size_mb
);
6056 static DRIVER_ATTR_RO(dev_size_mb
);
6058 static ssize_t
per_host_store_show(struct device_driver
*ddp
, char *buf
)
6060 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_per_host_store
);
6063 static ssize_t
per_host_store_store(struct device_driver
*ddp
, const char *buf
,
6068 if (kstrtobool(buf
, &v
))
6071 sdebug_per_host_store
= v
;
6074 static DRIVER_ATTR_RW(per_host_store
);
6076 static ssize_t
num_parts_show(struct device_driver
*ddp
, char *buf
)
6078 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_parts
);
6080 static DRIVER_ATTR_RO(num_parts
);
6082 static ssize_t
every_nth_show(struct device_driver
*ddp
, char *buf
)
6084 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_every_nth
);
6086 static ssize_t
every_nth_store(struct device_driver
*ddp
, const char *buf
,
6092 if (sscanf(buf
, "%10s", work
) == 1) {
6093 if (strncasecmp(work
, "0x", 2) == 0) {
6094 if (kstrtoint(work
+ 2, 16, &nth
) == 0)
6095 goto every_nth_done
;
6097 if (kstrtoint(work
, 10, &nth
) == 0)
6098 goto every_nth_done
;
6104 sdebug_every_nth
= nth
;
6105 if (nth
&& !sdebug_statistics
) {
6106 pr_info("every_nth needs statistics=1, set it\n");
6107 sdebug_statistics
= true;
6112 static DRIVER_ATTR_RW(every_nth
);
6114 static ssize_t
lun_format_show(struct device_driver
*ddp
, char *buf
)
6116 return scnprintf(buf
, PAGE_SIZE
, "%d\n", (int)sdebug_lun_am
);
6118 static ssize_t
lun_format_store(struct device_driver
*ddp
, const char *buf
,
6124 if (kstrtoint(buf
, 0, &n
))
6127 if (n
> (int)SAM_LUN_AM_FLAT
) {
6128 pr_warn("only LUN address methods 0 and 1 are supported\n");
6131 changed
= ((int)sdebug_lun_am
!= n
);
6133 if (changed
&& sdebug_scsi_level
>= 5) { /* >= SPC-3 */
6134 struct sdebug_host_info
*sdhp
;
6135 struct sdebug_dev_info
*dp
;
6137 spin_lock(&sdebug_host_list_lock
);
6138 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
6139 list_for_each_entry(dp
, &sdhp
->dev_info_list
, dev_list
) {
6140 set_bit(SDEBUG_UA_LUNS_CHANGED
, dp
->uas_bm
);
6143 spin_unlock(&sdebug_host_list_lock
);
6149 static DRIVER_ATTR_RW(lun_format
);
6151 static ssize_t
max_luns_show(struct device_driver
*ddp
, char *buf
)
6153 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_max_luns
);
6155 static ssize_t
max_luns_store(struct device_driver
*ddp
, const char *buf
,
6161 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6163 pr_warn("max_luns can be no more than 256\n");
6166 changed
= (sdebug_max_luns
!= n
);
6167 sdebug_max_luns
= n
;
6168 sdebug_max_tgts_luns();
6169 if (changed
&& (sdebug_scsi_level
>= 5)) { /* >= SPC-3 */
6170 struct sdebug_host_info
*sdhp
;
6171 struct sdebug_dev_info
*dp
;
6173 spin_lock(&sdebug_host_list_lock
);
6174 list_for_each_entry(sdhp
, &sdebug_host_list
,
6176 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
6178 set_bit(SDEBUG_UA_LUNS_CHANGED
,
6182 spin_unlock(&sdebug_host_list_lock
);
6188 static DRIVER_ATTR_RW(max_luns
);
6190 static ssize_t
max_queue_show(struct device_driver
*ddp
, char *buf
)
6192 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_max_queue
);
6194 /* N.B. max_queue can be changed while there are queued commands. In flight
6195 * commands beyond the new max_queue will be completed. */
6196 static ssize_t
max_queue_store(struct device_driver
*ddp
, const char *buf
,
6200 struct sdebug_queue
*sqp
;
6202 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
> 0) &&
6203 (n
<= SDEBUG_CANQUEUE
) &&
6204 (sdebug_host_max_queue
== 0)) {
6205 block_unblock_all_queues(true);
6207 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
6209 a
= find_last_bit(sqp
->in_use_bm
, SDEBUG_CANQUEUE
);
6213 sdebug_max_queue
= n
;
6214 if (k
== SDEBUG_CANQUEUE
)
6215 atomic_set(&retired_max_queue
, 0);
6217 atomic_set(&retired_max_queue
, k
+ 1);
6219 atomic_set(&retired_max_queue
, 0);
6220 block_unblock_all_queues(false);
6225 static DRIVER_ATTR_RW(max_queue
);
6227 static ssize_t
host_max_queue_show(struct device_driver
*ddp
, char *buf
)
6229 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_host_max_queue
);
6233 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6234 * in range [0, sdebug_host_max_queue), we can't change it.
6236 static DRIVER_ATTR_RO(host_max_queue
);
6238 static ssize_t
no_uld_show(struct device_driver
*ddp
, char *buf
)
6240 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_uld
);
6242 static DRIVER_ATTR_RO(no_uld
);
6244 static ssize_t
scsi_level_show(struct device_driver
*ddp
, char *buf
)
6246 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_scsi_level
);
6248 static DRIVER_ATTR_RO(scsi_level
);
6250 static ssize_t
virtual_gb_show(struct device_driver
*ddp
, char *buf
)
6252 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_virtual_gb
);
6254 static ssize_t
virtual_gb_store(struct device_driver
*ddp
, const char *buf
,
6260 /* Ignore capacity change for ZBC drives for now */
6261 if (sdeb_zbc_in_use
)
6264 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6265 changed
= (sdebug_virtual_gb
!= n
);
6266 sdebug_virtual_gb
= n
;
6267 sdebug_capacity
= get_sdebug_capacity();
6269 struct sdebug_host_info
*sdhp
;
6270 struct sdebug_dev_info
*dp
;
6272 spin_lock(&sdebug_host_list_lock
);
6273 list_for_each_entry(sdhp
, &sdebug_host_list
,
6275 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
6277 set_bit(SDEBUG_UA_CAPACITY_CHANGED
,
6281 spin_unlock(&sdebug_host_list_lock
);
6287 static DRIVER_ATTR_RW(virtual_gb
);
6289 static ssize_t
add_host_show(struct device_driver
*ddp
, char *buf
)
6291 /* absolute number of hosts currently active is what is shown */
6292 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_hosts
);
6295 static ssize_t
add_host_store(struct device_driver
*ddp
, const char *buf
,
6300 struct sdeb_store_info
*sip
;
6301 bool want_phs
= (sdebug_fake_rw
== 0) && sdebug_per_host_store
;
6304 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
6306 if (delta_hosts
> 0) {
6310 xa_for_each_marked(per_store_ap
, idx
, sip
,
6311 SDEB_XA_NOT_IN_USE
) {
6312 sdeb_most_recent_idx
= (int)idx
;
6316 if (found
) /* re-use case */
6317 sdebug_add_host_helper((int)idx
);
6319 sdebug_do_add_host(true);
6321 sdebug_do_add_host(false);
6323 } while (--delta_hosts
);
6324 } else if (delta_hosts
< 0) {
6326 sdebug_do_remove_host(false);
6327 } while (++delta_hosts
);
6331 static DRIVER_ATTR_RW(add_host
);
6333 static ssize_t
vpd_use_hostno_show(struct device_driver
*ddp
, char *buf
)
6335 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_vpd_use_hostno
);
6337 static ssize_t
vpd_use_hostno_store(struct device_driver
*ddp
, const char *buf
,
6342 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6343 sdebug_vpd_use_hostno
= n
;
6348 static DRIVER_ATTR_RW(vpd_use_hostno
);
6350 static ssize_t
statistics_show(struct device_driver
*ddp
, char *buf
)
6352 return scnprintf(buf
, PAGE_SIZE
, "%d\n", (int)sdebug_statistics
);
6354 static ssize_t
statistics_store(struct device_driver
*ddp
, const char *buf
,
6359 if ((count
> 0) && (sscanf(buf
, "%d", &n
) == 1) && (n
>= 0)) {
6361 sdebug_statistics
= true;
6363 clear_queue_stats();
6364 sdebug_statistics
= false;
6370 static DRIVER_ATTR_RW(statistics
);
6372 static ssize_t
sector_size_show(struct device_driver
*ddp
, char *buf
)
6374 return scnprintf(buf
, PAGE_SIZE
, "%u\n", sdebug_sector_size
);
6376 static DRIVER_ATTR_RO(sector_size
);
6378 static ssize_t
submit_queues_show(struct device_driver
*ddp
, char *buf
)
6380 return scnprintf(buf
, PAGE_SIZE
, "%d\n", submit_queues
);
6382 static DRIVER_ATTR_RO(submit_queues
);
6384 static ssize_t
dix_show(struct device_driver
*ddp
, char *buf
)
6386 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dix
);
6388 static DRIVER_ATTR_RO(dix
);
6390 static ssize_t
dif_show(struct device_driver
*ddp
, char *buf
)
6392 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dif
);
6394 static DRIVER_ATTR_RO(dif
);
6396 static ssize_t
guard_show(struct device_driver
*ddp
, char *buf
)
6398 return scnprintf(buf
, PAGE_SIZE
, "%u\n", sdebug_guard
);
6400 static DRIVER_ATTR_RO(guard
);
6402 static ssize_t
ato_show(struct device_driver
*ddp
, char *buf
)
6404 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ato
);
6406 static DRIVER_ATTR_RO(ato
);
6408 static ssize_t
map_show(struct device_driver
*ddp
, char *buf
)
6412 if (!scsi_debug_lbp())
6413 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
6414 sdebug_store_sectors
);
6416 if (sdebug_fake_rw
== 0 && !xa_empty(per_store_ap
)) {
6417 struct sdeb_store_info
*sip
= xa_load(per_store_ap
, 0);
6420 count
= scnprintf(buf
, PAGE_SIZE
- 1, "%*pbl",
6421 (int)map_size
, sip
->map_storep
);
6423 buf
[count
++] = '\n';
6428 static DRIVER_ATTR_RO(map
);
6430 static ssize_t
random_show(struct device_driver
*ddp
, char *buf
)
6432 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_random
);
6435 static ssize_t
random_store(struct device_driver
*ddp
, const char *buf
,
6440 if (kstrtobool(buf
, &v
))
6446 static DRIVER_ATTR_RW(random
);
6448 static ssize_t
removable_show(struct device_driver
*ddp
, char *buf
)
6450 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_removable
? 1 : 0);
6452 static ssize_t
removable_store(struct device_driver
*ddp
, const char *buf
,
6457 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6458 sdebug_removable
= (n
> 0);
6463 static DRIVER_ATTR_RW(removable
);
6465 static ssize_t
host_lock_show(struct device_driver
*ddp
, char *buf
)
6467 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_host_lock
);
6469 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6470 static ssize_t
host_lock_store(struct device_driver
*ddp
, const char *buf
,
6475 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6476 sdebug_host_lock
= (n
> 0);
6481 static DRIVER_ATTR_RW(host_lock
);
6483 static ssize_t
strict_show(struct device_driver
*ddp
, char *buf
)
6485 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_strict
);
6487 static ssize_t
strict_store(struct device_driver
*ddp
, const char *buf
,
6492 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
6493 sdebug_strict
= (n
> 0);
6498 static DRIVER_ATTR_RW(strict
);
6500 static ssize_t
uuid_ctl_show(struct device_driver
*ddp
, char *buf
)
6502 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_uuid_ctl
);
6504 static DRIVER_ATTR_RO(uuid_ctl
);
6506 static ssize_t
cdb_len_show(struct device_driver
*ddp
, char *buf
)
6508 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_cdb_len
);
6510 static ssize_t
cdb_len_store(struct device_driver
*ddp
, const char *buf
,
6515 ret
= kstrtoint(buf
, 0, &n
);
6519 all_config_cdb_len();
6522 static DRIVER_ATTR_RW(cdb_len
);
6524 static const char * const zbc_model_strs_a
[] = {
6525 [BLK_ZONED_NONE
] = "none",
6526 [BLK_ZONED_HA
] = "host-aware",
6527 [BLK_ZONED_HM
] = "host-managed",
6530 static const char * const zbc_model_strs_b
[] = {
6531 [BLK_ZONED_NONE
] = "no",
6532 [BLK_ZONED_HA
] = "aware",
6533 [BLK_ZONED_HM
] = "managed",
6536 static const char * const zbc_model_strs_c
[] = {
6537 [BLK_ZONED_NONE
] = "0",
6538 [BLK_ZONED_HA
] = "1",
6539 [BLK_ZONED_HM
] = "2",
6542 static int sdeb_zbc_model_str(const char *cp
)
6544 int res
= sysfs_match_string(zbc_model_strs_a
, cp
);
6547 res
= sysfs_match_string(zbc_model_strs_b
, cp
);
6549 res
= sysfs_match_string(zbc_model_strs_c
, cp
);
6557 static ssize_t
zbc_show(struct device_driver
*ddp
, char *buf
)
6559 return scnprintf(buf
, PAGE_SIZE
, "%s\n",
6560 zbc_model_strs_a
[sdeb_zbc_model
]);
6562 static DRIVER_ATTR_RO(zbc
);
6564 static ssize_t
tur_ms_to_ready_show(struct device_driver
*ddp
, char *buf
)
6566 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdeb_tur_ms_to_ready
);
6568 static DRIVER_ATTR_RO(tur_ms_to_ready
);
6570 /* Note: The following array creates attribute files in the
6571 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6572 files (over those found in the /sys/module/scsi_debug/parameters
6573 directory) is that auxiliary actions can be triggered when an attribute
6574 is changed. For example see: add_host_store() above.
6577 static struct attribute
*sdebug_drv_attrs
[] = {
6578 &driver_attr_delay
.attr
,
6579 &driver_attr_opts
.attr
,
6580 &driver_attr_ptype
.attr
,
6581 &driver_attr_dsense
.attr
,
6582 &driver_attr_fake_rw
.attr
,
6583 &driver_attr_host_max_queue
.attr
,
6584 &driver_attr_no_lun_0
.attr
,
6585 &driver_attr_num_tgts
.attr
,
6586 &driver_attr_dev_size_mb
.attr
,
6587 &driver_attr_num_parts
.attr
,
6588 &driver_attr_every_nth
.attr
,
6589 &driver_attr_lun_format
.attr
,
6590 &driver_attr_max_luns
.attr
,
6591 &driver_attr_max_queue
.attr
,
6592 &driver_attr_no_uld
.attr
,
6593 &driver_attr_scsi_level
.attr
,
6594 &driver_attr_virtual_gb
.attr
,
6595 &driver_attr_add_host
.attr
,
6596 &driver_attr_per_host_store
.attr
,
6597 &driver_attr_vpd_use_hostno
.attr
,
6598 &driver_attr_sector_size
.attr
,
6599 &driver_attr_statistics
.attr
,
6600 &driver_attr_submit_queues
.attr
,
6601 &driver_attr_dix
.attr
,
6602 &driver_attr_dif
.attr
,
6603 &driver_attr_guard
.attr
,
6604 &driver_attr_ato
.attr
,
6605 &driver_attr_map
.attr
,
6606 &driver_attr_random
.attr
,
6607 &driver_attr_removable
.attr
,
6608 &driver_attr_host_lock
.attr
,
6609 &driver_attr_ndelay
.attr
,
6610 &driver_attr_strict
.attr
,
6611 &driver_attr_uuid_ctl
.attr
,
6612 &driver_attr_cdb_len
.attr
,
6613 &driver_attr_tur_ms_to_ready
.attr
,
6614 &driver_attr_zbc
.attr
,
6617 ATTRIBUTE_GROUPS(sdebug_drv
);
6619 static struct device
*pseudo_primary
;
6621 static int __init
scsi_debug_init(void)
6623 bool want_store
= (sdebug_fake_rw
== 0);
6625 int k
, ret
, hosts_to_add
;
6628 ramdisk_lck_a
[0] = &atomic_rw
;
6629 ramdisk_lck_a
[1] = &atomic_rw2
;
6630 atomic_set(&retired_max_queue
, 0);
6632 if (sdebug_ndelay
>= 1000 * 1000 * 1000) {
6633 pr_warn("ndelay must be less than 1 second, ignored\n");
6635 } else if (sdebug_ndelay
> 0)
6636 sdebug_jdelay
= JDELAY_OVERRIDDEN
;
6638 switch (sdebug_sector_size
) {
6645 pr_err("invalid sector_size %d\n", sdebug_sector_size
);
6649 switch (sdebug_dif
) {
6650 case T10_PI_TYPE0_PROTECTION
:
6652 case T10_PI_TYPE1_PROTECTION
:
6653 case T10_PI_TYPE2_PROTECTION
:
6654 case T10_PI_TYPE3_PROTECTION
:
6655 have_dif_prot
= true;
6659 pr_err("dif must be 0, 1, 2 or 3\n");
6663 if (sdebug_num_tgts
< 0) {
6664 pr_err("num_tgts must be >= 0\n");
6668 if (sdebug_guard
> 1) {
6669 pr_err("guard must be 0 or 1\n");
6673 if (sdebug_ato
> 1) {
6674 pr_err("ato must be 0 or 1\n");
6678 if (sdebug_physblk_exp
> 15) {
6679 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp
);
6683 sdebug_lun_am
= sdebug_lun_am_i
;
6684 if (sdebug_lun_am
> SAM_LUN_AM_FLAT
) {
6685 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am
);
6686 sdebug_lun_am
= SAM_LUN_AM_PERIPHERAL
;
6689 if (sdebug_max_luns
> 256) {
6690 if (sdebug_max_luns
> 16384) {
6691 pr_warn("max_luns can be no more than 16384, use default\n");
6692 sdebug_max_luns
= DEF_MAX_LUNS
;
6694 sdebug_lun_am
= SAM_LUN_AM_FLAT
;
6697 if (sdebug_lowest_aligned
> 0x3fff) {
6698 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned
);
6702 if (submit_queues
< 1) {
6703 pr_err("submit_queues must be 1 or more\n");
6707 if ((sdebug_max_queue
> SDEBUG_CANQUEUE
) || (sdebug_max_queue
< 1)) {
6708 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE
);
6712 if ((sdebug_host_max_queue
> SDEBUG_CANQUEUE
) ||
6713 (sdebug_host_max_queue
< 0)) {
6714 pr_err("host_max_queue must be in range [0 %d]\n",
6719 if (sdebug_host_max_queue
&&
6720 (sdebug_max_queue
!= sdebug_host_max_queue
)) {
6721 sdebug_max_queue
= sdebug_host_max_queue
;
6722 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6726 sdebug_q_arr
= kcalloc(submit_queues
, sizeof(struct sdebug_queue
),
6728 if (sdebug_q_arr
== NULL
)
6730 for (k
= 0; k
< submit_queues
; ++k
)
6731 spin_lock_init(&sdebug_q_arr
[k
].qc_lock
);
6734 * check for host managed zoned block device specified with
6735 * ptype=0x14 or zbc=XXX.
6737 if (sdebug_ptype
== TYPE_ZBC
) {
6738 sdeb_zbc_model
= BLK_ZONED_HM
;
6739 } else if (sdeb_zbc_model_s
&& *sdeb_zbc_model_s
) {
6740 k
= sdeb_zbc_model_str(sdeb_zbc_model_s
);
6746 switch (sdeb_zbc_model
) {
6747 case BLK_ZONED_NONE
:
6749 sdebug_ptype
= TYPE_DISK
;
6752 sdebug_ptype
= TYPE_ZBC
;
6755 pr_err("Invalid ZBC model\n");
6759 if (sdeb_zbc_model
!= BLK_ZONED_NONE
) {
6760 sdeb_zbc_in_use
= true;
6761 if (sdebug_dev_size_mb
== DEF_DEV_SIZE_PRE_INIT
)
6762 sdebug_dev_size_mb
= DEF_ZBC_DEV_SIZE_MB
;
6765 if (sdebug_dev_size_mb
== DEF_DEV_SIZE_PRE_INIT
)
6766 sdebug_dev_size_mb
= DEF_DEV_SIZE_MB
;
6767 if (sdebug_dev_size_mb
< 1)
6768 sdebug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
6769 sz
= (unsigned long)sdebug_dev_size_mb
* 1048576;
6770 sdebug_store_sectors
= sz
/ sdebug_sector_size
;
6771 sdebug_capacity
= get_sdebug_capacity();
6773 /* play around with geometry, don't waste too much on track 0 */
6775 sdebug_sectors_per
= 32;
6776 if (sdebug_dev_size_mb
>= 256)
6778 else if (sdebug_dev_size_mb
>= 16)
6780 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
6781 (sdebug_sectors_per
* sdebug_heads
);
6782 if (sdebug_cylinders_per
>= 1024) {
6783 /* other LLDs do this; implies >= 1GB ram disk ... */
6785 sdebug_sectors_per
= 63;
6786 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
6787 (sdebug_sectors_per
* sdebug_heads
);
6789 if (scsi_debug_lbp()) {
6790 sdebug_unmap_max_blocks
=
6791 clamp(sdebug_unmap_max_blocks
, 0U, 0xffffffffU
);
6793 sdebug_unmap_max_desc
=
6794 clamp(sdebug_unmap_max_desc
, 0U, 256U);
6796 sdebug_unmap_granularity
=
6797 clamp(sdebug_unmap_granularity
, 1U, 0xffffffffU
);
6799 if (sdebug_unmap_alignment
&&
6800 sdebug_unmap_granularity
<=
6801 sdebug_unmap_alignment
) {
6802 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6807 xa_init_flags(per_store_ap
, XA_FLAGS_ALLOC
| XA_FLAGS_LOCK_IRQ
);
6809 idx
= sdebug_add_store();
6816 pseudo_primary
= root_device_register("pseudo_0");
6817 if (IS_ERR(pseudo_primary
)) {
6818 pr_warn("root_device_register() error\n");
6819 ret
= PTR_ERR(pseudo_primary
);
6822 ret
= bus_register(&pseudo_lld_bus
);
6824 pr_warn("bus_register error: %d\n", ret
);
6827 ret
= driver_register(&sdebug_driverfs_driver
);
6829 pr_warn("driver_register error: %d\n", ret
);
6833 hosts_to_add
= sdebug_add_host
;
6834 sdebug_add_host
= 0;
6836 for (k
= 0; k
< hosts_to_add
; k
++) {
6837 if (want_store
&& k
== 0) {
6838 ret
= sdebug_add_host_helper(idx
);
6840 pr_err("add_host_helper k=%d, error=%d\n",
6845 ret
= sdebug_do_add_host(want_store
&&
6846 sdebug_per_host_store
);
6848 pr_err("add_host k=%d error=%d\n", k
, -ret
);
6854 pr_info("built %d host(s)\n", sdebug_num_hosts
);
6859 bus_unregister(&pseudo_lld_bus
);
6861 root_device_unregister(pseudo_primary
);
6863 sdebug_erase_store(idx
, NULL
);
6865 kfree(sdebug_q_arr
);
6869 static void __exit
scsi_debug_exit(void)
6871 int k
= sdebug_num_hosts
;
6875 sdebug_do_remove_host(true);
6877 driver_unregister(&sdebug_driverfs_driver
);
6878 bus_unregister(&pseudo_lld_bus
);
6879 root_device_unregister(pseudo_primary
);
6881 sdebug_erase_all_stores(false);
6882 xa_destroy(per_store_ap
);
6885 device_initcall(scsi_debug_init
);
6886 module_exit(scsi_debug_exit
);
6888 static void sdebug_release_adapter(struct device
*dev
)
6890 struct sdebug_host_info
*sdbg_host
;
6892 sdbg_host
= to_sdebug_host(dev
);
6896 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6897 static void sdebug_erase_store(int idx
, struct sdeb_store_info
*sip
)
6902 if (xa_empty(per_store_ap
))
6904 sip
= xa_load(per_store_ap
, idx
);
6908 vfree(sip
->map_storep
);
6909 vfree(sip
->dif_storep
);
6911 xa_erase(per_store_ap
, idx
);
6915 /* Assume apart_from_first==false only in shutdown case. */
6916 static void sdebug_erase_all_stores(bool apart_from_first
)
6919 struct sdeb_store_info
*sip
= NULL
;
6921 xa_for_each(per_store_ap
, idx
, sip
) {
6922 if (apart_from_first
)
6923 apart_from_first
= false;
6925 sdebug_erase_store(idx
, sip
);
6927 if (apart_from_first
)
6928 sdeb_most_recent_idx
= sdeb_first_idx
;
6932 * Returns store xarray new element index (idx) if >=0 else negated errno.
6933 * Limit the number of stores to 65536.
6935 static int sdebug_add_store(void)
6939 unsigned long iflags
;
6940 unsigned long sz
= (unsigned long)sdebug_dev_size_mb
* 1048576;
6941 struct sdeb_store_info
*sip
= NULL
;
6942 struct xa_limit xal
= { .max
= 1 << 16, .min
= 0 };
6944 sip
= kzalloc(sizeof(*sip
), GFP_KERNEL
);
6948 xa_lock_irqsave(per_store_ap
, iflags
);
6949 res
= __xa_alloc(per_store_ap
, &n_idx
, sip
, xal
, GFP_ATOMIC
);
6950 if (unlikely(res
< 0)) {
6951 xa_unlock_irqrestore(per_store_ap
, iflags
);
6953 pr_warn("%s: xa_alloc() errno=%d\n", __func__
, -res
);
6956 sdeb_most_recent_idx
= n_idx
;
6957 if (sdeb_first_idx
< 0)
6958 sdeb_first_idx
= n_idx
;
6959 xa_unlock_irqrestore(per_store_ap
, iflags
);
6962 sip
->storep
= vzalloc(sz
);
6964 pr_err("user data oom\n");
6967 if (sdebug_num_parts
> 0)
6968 sdebug_build_parts(sip
->storep
, sz
);
6970 /* DIF/DIX: what T10 calls Protection Information (PI) */
6974 dif_size
= sdebug_store_sectors
* sizeof(struct t10_pi_tuple
);
6975 sip
->dif_storep
= vmalloc(dif_size
);
6977 pr_info("dif_storep %u bytes @ %pK\n", dif_size
,
6980 if (!sip
->dif_storep
) {
6981 pr_err("DIX oom\n");
6984 memset(sip
->dif_storep
, 0xff, dif_size
);
6986 /* Logical Block Provisioning */
6987 if (scsi_debug_lbp()) {
6988 map_size
= lba_to_map_index(sdebug_store_sectors
- 1) + 1;
6989 sip
->map_storep
= vmalloc(array_size(sizeof(long),
6990 BITS_TO_LONGS(map_size
)));
6992 pr_info("%lu provisioning blocks\n", map_size
);
6994 if (!sip
->map_storep
) {
6995 pr_err("LBP map oom\n");
6999 bitmap_zero(sip
->map_storep
, map_size
);
7001 /* Map first 1KB for partition table */
7002 if (sdebug_num_parts
)
7003 map_region(sip
, 0, 2);
7006 rwlock_init(&sip
->macc_lck
);
7009 sdebug_erase_store((int)n_idx
, sip
);
7010 pr_warn("%s: failed, errno=%d\n", __func__
, -res
);
7014 static int sdebug_add_host_helper(int per_host_idx
)
7016 int k
, devs_per_host
, idx
;
7017 int error
= -ENOMEM
;
7018 struct sdebug_host_info
*sdbg_host
;
7019 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
7021 sdbg_host
= kzalloc(sizeof(*sdbg_host
), GFP_KERNEL
);
7024 idx
= (per_host_idx
< 0) ? sdeb_first_idx
: per_host_idx
;
7025 if (xa_get_mark(per_store_ap
, idx
, SDEB_XA_NOT_IN_USE
))
7026 xa_clear_mark(per_store_ap
, idx
, SDEB_XA_NOT_IN_USE
);
7027 sdbg_host
->si_idx
= idx
;
7029 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
7031 devs_per_host
= sdebug_num_tgts
* sdebug_max_luns
;
7032 for (k
= 0; k
< devs_per_host
; k
++) {
7033 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
7038 spin_lock(&sdebug_host_list_lock
);
7039 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
7040 spin_unlock(&sdebug_host_list_lock
);
7042 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
7043 sdbg_host
->dev
.parent
= pseudo_primary
;
7044 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
7045 dev_set_name(&sdbg_host
->dev
, "adapter%d", sdebug_num_hosts
);
7047 error
= device_register(&sdbg_host
->dev
);
7055 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
7057 list_del(&sdbg_devinfo
->dev_list
);
7058 kfree(sdbg_devinfo
->zstate
);
7059 kfree(sdbg_devinfo
);
7062 pr_warn("%s: failed, errno=%d\n", __func__
, -error
);
7066 static int sdebug_do_add_host(bool mk_new_store
)
7068 int ph_idx
= sdeb_most_recent_idx
;
7071 ph_idx
= sdebug_add_store();
7075 return sdebug_add_host_helper(ph_idx
);
7078 static void sdebug_do_remove_host(bool the_end
)
7081 struct sdebug_host_info
*sdbg_host
= NULL
;
7082 struct sdebug_host_info
*sdbg_host2
;
7084 spin_lock(&sdebug_host_list_lock
);
7085 if (!list_empty(&sdebug_host_list
)) {
7086 sdbg_host
= list_entry(sdebug_host_list
.prev
,
7087 struct sdebug_host_info
, host_list
);
7088 idx
= sdbg_host
->si_idx
;
7090 if (!the_end
&& idx
>= 0) {
7093 list_for_each_entry(sdbg_host2
, &sdebug_host_list
, host_list
) {
7094 if (sdbg_host2
== sdbg_host
)
7096 if (idx
== sdbg_host2
->si_idx
) {
7102 xa_set_mark(per_store_ap
, idx
, SDEB_XA_NOT_IN_USE
);
7103 if (idx
== sdeb_most_recent_idx
)
7104 --sdeb_most_recent_idx
;
7108 list_del(&sdbg_host
->host_list
);
7109 spin_unlock(&sdebug_host_list_lock
);
7114 device_unregister(&sdbg_host
->dev
);
7118 static int sdebug_change_qdepth(struct scsi_device
*sdev
, int qdepth
)
7121 struct sdebug_dev_info
*devip
;
7123 block_unblock_all_queues(true);
7124 devip
= (struct sdebug_dev_info
*)sdev
->hostdata
;
7125 if (NULL
== devip
) {
7126 block_unblock_all_queues(false);
7129 num_in_q
= atomic_read(&devip
->num_in_q
);
7133 /* allow to exceed max host qc_arr elements for testing */
7134 if (qdepth
> SDEBUG_CANQUEUE
+ 10)
7135 qdepth
= SDEBUG_CANQUEUE
+ 10;
7136 scsi_change_queue_depth(sdev
, qdepth
);
7138 if (SDEBUG_OPT_Q_NOISE
& sdebug_opts
) {
7139 sdev_printk(KERN_INFO
, sdev
, "%s: qdepth=%d, num_in_q=%d\n",
7140 __func__
, qdepth
, num_in_q
);
7142 block_unblock_all_queues(false);
7143 return sdev
->queue_depth
;
7146 static bool fake_timeout(struct scsi_cmnd
*scp
)
7148 if (0 == (atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
))) {
7149 if (sdebug_every_nth
< -1)
7150 sdebug_every_nth
= -1;
7151 if (SDEBUG_OPT_TIMEOUT
& sdebug_opts
)
7152 return true; /* ignore command causing timeout */
7153 else if (SDEBUG_OPT_MAC_TIMEOUT
& sdebug_opts
&&
7154 scsi_medium_access_command(scp
))
7155 return true; /* time out reads and writes */
7160 /* Response to TUR or media access command when device stopped */
7161 static int resp_not_ready(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
7165 ktime_t now_ts
= ktime_get_boottime();
7166 struct scsi_device
*sdp
= scp
->device
;
7168 stopped_state
= atomic_read(&devip
->stopped
);
7169 if (stopped_state
== 2) {
7170 if (ktime_to_ns(now_ts
) > ktime_to_ns(devip
->create_ts
)) {
7171 diff_ns
= ktime_to_ns(ktime_sub(now_ts
, devip
->create_ts
));
7172 if (diff_ns
>= ((u64
)sdeb_tur_ms_to_ready
* 1000000)) {
7173 /* tur_ms_to_ready timer extinguished */
7174 atomic_set(&devip
->stopped
, 0);
7178 mk_sense_buffer(scp
, NOT_READY
, LOGICAL_UNIT_NOT_READY
, 0x1);
7180 sdev_printk(KERN_INFO
, sdp
,
7181 "%s: Not ready: in process of becoming ready\n", my_name
);
7182 if (scp
->cmnd
[0] == TEST_UNIT_READY
) {
7183 u64 tur_nanosecs_to_ready
= (u64
)sdeb_tur_ms_to_ready
* 1000000;
7185 if (diff_ns
<= tur_nanosecs_to_ready
)
7186 diff_ns
= tur_nanosecs_to_ready
- diff_ns
;
7188 diff_ns
= tur_nanosecs_to_ready
;
7189 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7190 do_div(diff_ns
, 1000000); /* diff_ns becomes milliseconds */
7191 scsi_set_sense_information(scp
->sense_buffer
, SCSI_SENSE_BUFFERSIZE
,
7193 return check_condition_result
;
7196 mk_sense_buffer(scp
, NOT_READY
, LOGICAL_UNIT_NOT_READY
, 0x2);
7198 sdev_printk(KERN_INFO
, sdp
, "%s: Not ready: initializing command required\n",
7200 return check_condition_result
;
7203 static int scsi_debug_queuecommand(struct Scsi_Host
*shost
,
7204 struct scsi_cmnd
*scp
)
7207 struct scsi_device
*sdp
= scp
->device
;
7208 const struct opcode_info_t
*oip
;
7209 const struct opcode_info_t
*r_oip
;
7210 struct sdebug_dev_info
*devip
;
7211 u8
*cmd
= scp
->cmnd
;
7212 int (*r_pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
7213 int (*pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*) = NULL
;
7216 u64 lun_index
= sdp
->lun
& 0x3FFF;
7223 scsi_set_resid(scp
, 0);
7224 if (sdebug_statistics
) {
7225 atomic_inc(&sdebug_cmnd_count
);
7226 inject_now
= inject_on_this_cmd();
7230 if (unlikely(sdebug_verbose
&&
7231 !(SDEBUG_OPT_NO_CDB_NOISE
& sdebug_opts
))) {
7236 sb
= (int)sizeof(b
);
7238 strcpy(b
, "too long, over 32 bytes");
7240 for (k
= 0, n
= 0; k
< len
&& n
< sb
; ++k
)
7241 n
+= scnprintf(b
+ n
, sb
- n
, "%02x ",
7244 sdev_printk(KERN_INFO
, sdp
, "%s: tag=%#x, cmd %s\n", my_name
,
7245 blk_mq_unique_tag(scp
->request
), b
);
7247 if (unlikely(inject_now
&& (sdebug_opts
& SDEBUG_OPT_HOST_BUSY
)))
7248 return SCSI_MLQUEUE_HOST_BUSY
;
7249 has_wlun_rl
= (sdp
->lun
== SCSI_W_LUN_REPORT_LUNS
);
7250 if (unlikely(lun_index
>= sdebug_max_luns
&& !has_wlun_rl
))
7253 sdeb_i
= opcode_ind_arr
[opcode
]; /* fully mapped */
7254 oip
= &opcode_info_arr
[sdeb_i
]; /* safe if table consistent */
7255 devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
7256 if (unlikely(!devip
)) {
7257 devip
= find_build_dev_info(sdp
);
7261 if (unlikely(inject_now
&& !atomic_read(&sdeb_inject_pending
)))
7262 atomic_set(&sdeb_inject_pending
, 1);
7264 na
= oip
->num_attached
;
7266 if (na
) { /* multiple commands with this opcode */
7268 if (FF_SA
& r_oip
->flags
) {
7269 if (F_SA_LOW
& oip
->flags
)
7272 sa
= get_unaligned_be16(cmd
+ 8);
7273 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
7274 if (opcode
== oip
->opcode
&& sa
== oip
->sa
)
7277 } else { /* since no service action only check opcode */
7278 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
7279 if (opcode
== oip
->opcode
)
7284 if (F_SA_LOW
& r_oip
->flags
)
7285 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 4);
7286 else if (F_SA_HIGH
& r_oip
->flags
)
7287 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 8, 7);
7289 mk_sense_invalid_opcode(scp
);
7292 } /* else (when na==0) we assume the oip is a match */
7294 if (unlikely(F_INV_OP
& flags
)) {
7295 mk_sense_invalid_opcode(scp
);
7298 if (unlikely(has_wlun_rl
&& !(F_RL_WLUN_OK
& flags
))) {
7300 sdev_printk(KERN_INFO
, sdp
, "%s: Opcode 0x%x not%s\n",
7301 my_name
, opcode
, " supported for wlun");
7302 mk_sense_invalid_opcode(scp
);
7305 if (unlikely(sdebug_strict
)) { /* check cdb against mask */
7309 for (k
= 1; k
< oip
->len_mask
[0] && k
< 16; ++k
) {
7310 rem
= ~oip
->len_mask
[k
] & cmd
[k
];
7312 for (j
= 7; j
>= 0; --j
, rem
<<= 1) {
7316 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, k
, j
);
7321 if (unlikely(!(F_SKIP_UA
& flags
) &&
7322 find_first_bit(devip
->uas_bm
,
7323 SDEBUG_NUM_UAS
) != SDEBUG_NUM_UAS
)) {
7324 errsts
= make_ua(scp
, devip
);
7328 if (unlikely(((F_M_ACCESS
& flags
) || scp
->cmnd
[0] == TEST_UNIT_READY
) &&
7329 atomic_read(&devip
->stopped
))) {
7330 errsts
= resp_not_ready(scp
, devip
);
7334 if (sdebug_fake_rw
&& (F_FAKE_RW
& flags
))
7336 if (unlikely(sdebug_every_nth
)) {
7337 if (fake_timeout(scp
))
7338 return 0; /* ignore command: make trouble */
7340 if (likely(oip
->pfp
))
7341 pfp
= oip
->pfp
; /* calls a resp_* function */
7343 pfp
= r_pfp
; /* if leaf function ptr NULL, try the root's */
7346 if (F_DELAY_OVERR
& flags
) /* cmds like INQUIRY respond asap */
7347 return schedule_resp(scp
, devip
, errsts
, pfp
, 0, 0);
7348 else if ((flags
& F_LONG_DELAY
) && (sdebug_jdelay
> 0 ||
7349 sdebug_ndelay
> 10000)) {
7351 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7352 * for Start Stop Unit (SSU) want at least 1 second delay and
7353 * if sdebug_jdelay>1 want a long delay of that many seconds.
7354 * For Synchronize Cache want 1/20 of SSU's delay.
7356 int jdelay
= (sdebug_jdelay
< 2) ? 1 : sdebug_jdelay
;
7357 int denom
= (flags
& F_SYNC_DELAY
) ? 20 : 1;
7359 jdelay
= mult_frac(USER_HZ
* jdelay
, HZ
, denom
* USER_HZ
);
7360 return schedule_resp(scp
, devip
, errsts
, pfp
, jdelay
, 0);
7362 return schedule_resp(scp
, devip
, errsts
, pfp
, sdebug_jdelay
,
7365 return schedule_resp(scp
, devip
, check_condition_result
, NULL
, 0, 0);
7367 return schedule_resp(scp
, NULL
, DID_NO_CONNECT
<< 16, NULL
, 0, 0);
7370 static struct scsi_host_template sdebug_driver_template
= {
7371 .show_info
= scsi_debug_show_info
,
7372 .write_info
= scsi_debug_write_info
,
7373 .proc_name
= sdebug_proc_name
,
7374 .name
= "SCSI DEBUG",
7375 .info
= scsi_debug_info
,
7376 .slave_alloc
= scsi_debug_slave_alloc
,
7377 .slave_configure
= scsi_debug_slave_configure
,
7378 .slave_destroy
= scsi_debug_slave_destroy
,
7379 .ioctl
= scsi_debug_ioctl
,
7380 .queuecommand
= scsi_debug_queuecommand
,
7381 .change_queue_depth
= sdebug_change_qdepth
,
7382 .eh_abort_handler
= scsi_debug_abort
,
7383 .eh_device_reset_handler
= scsi_debug_device_reset
,
7384 .eh_target_reset_handler
= scsi_debug_target_reset
,
7385 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
7386 .eh_host_reset_handler
= scsi_debug_host_reset
,
7387 .can_queue
= SDEBUG_CANQUEUE
,
7389 .sg_tablesize
= SG_MAX_SEGMENTS
,
7390 .cmd_per_lun
= DEF_CMD_PER_LUN
,
7392 .max_segment_size
= -1U,
7393 .module
= THIS_MODULE
,
7394 .track_queue_depth
= 1,
7397 static int sdebug_driver_probe(struct device
*dev
)
7400 struct sdebug_host_info
*sdbg_host
;
7401 struct Scsi_Host
*hpnt
;
7404 sdbg_host
= to_sdebug_host(dev
);
7406 sdebug_driver_template
.can_queue
= sdebug_max_queue
;
7407 if (!sdebug_clustering
)
7408 sdebug_driver_template
.dma_boundary
= PAGE_SIZE
- 1;
7410 hpnt
= scsi_host_alloc(&sdebug_driver_template
, sizeof(sdbg_host
));
7412 pr_err("scsi_host_alloc failed\n");
7416 if (submit_queues
> nr_cpu_ids
) {
7417 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7418 my_name
, submit_queues
, nr_cpu_ids
);
7419 submit_queues
= nr_cpu_ids
;
7422 * Decide whether to tell scsi subsystem that we want mq. The
7423 * following should give the same answer for each host.
7425 hpnt
->nr_hw_queues
= submit_queues
;
7426 if (sdebug_host_max_queue
)
7427 hpnt
->host_tagset
= 1;
7429 sdbg_host
->shost
= hpnt
;
7430 *((struct sdebug_host_info
**)hpnt
->hostdata
) = sdbg_host
;
7431 if ((hpnt
->this_id
>= 0) && (sdebug_num_tgts
> hpnt
->this_id
))
7432 hpnt
->max_id
= sdebug_num_tgts
+ 1;
7434 hpnt
->max_id
= sdebug_num_tgts
;
7435 /* = sdebug_max_luns; */
7436 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
7440 switch (sdebug_dif
) {
7442 case T10_PI_TYPE1_PROTECTION
:
7443 hprot
= SHOST_DIF_TYPE1_PROTECTION
;
7445 hprot
|= SHOST_DIX_TYPE1_PROTECTION
;
7448 case T10_PI_TYPE2_PROTECTION
:
7449 hprot
= SHOST_DIF_TYPE2_PROTECTION
;
7451 hprot
|= SHOST_DIX_TYPE2_PROTECTION
;
7454 case T10_PI_TYPE3_PROTECTION
:
7455 hprot
= SHOST_DIF_TYPE3_PROTECTION
;
7457 hprot
|= SHOST_DIX_TYPE3_PROTECTION
;
7462 hprot
|= SHOST_DIX_TYPE0_PROTECTION
;
7466 scsi_host_set_prot(hpnt
, hprot
);
7468 if (have_dif_prot
|| sdebug_dix
)
7469 pr_info("host protection%s%s%s%s%s%s%s\n",
7470 (hprot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
7471 (hprot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
7472 (hprot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
7473 (hprot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
7474 (hprot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
7475 (hprot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
7476 (hprot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
7478 if (sdebug_guard
== 1)
7479 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
7481 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
7483 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& sdebug_opts
);
7484 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& sdebug_opts
);
7485 if (sdebug_every_nth
) /* need stats counters for every_nth */
7486 sdebug_statistics
= true;
7487 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
7489 pr_err("scsi_add_host failed\n");
7491 scsi_host_put(hpnt
);
7493 scsi_scan_host(hpnt
);
7499 static int sdebug_driver_remove(struct device
*dev
)
7501 struct sdebug_host_info
*sdbg_host
;
7502 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
7504 sdbg_host
= to_sdebug_host(dev
);
7507 pr_err("Unable to locate host info\n");
7511 scsi_remove_host(sdbg_host
->shost
);
7513 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
7515 list_del(&sdbg_devinfo
->dev_list
);
7516 kfree(sdbg_devinfo
->zstate
);
7517 kfree(sdbg_devinfo
);
7520 scsi_host_put(sdbg_host
->shost
);
7524 static int pseudo_lld_bus_match(struct device
*dev
,
7525 struct device_driver
*dev_driver
)
7530 static struct bus_type pseudo_lld_bus
= {
7532 .match
= pseudo_lld_bus_match
,
7533 .probe
= sdebug_driver_probe
,
7534 .remove
= sdebug_driver_remove
,
7535 .drv_groups
= sdebug_drv_groups
,