2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * Copyright (C) 2001 - 2018 Douglas Gilbert
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * For documentation see http://sg.danny.cz/sg/sdebug26.html
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
23 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
47 #include <net/checksum.h>
49 #include <asm/unaligned.h>
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
61 #include "scsi_logging.h"
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date
= "20190125";
67 #define MY_NAME "scsi_debug"
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
98 /* Additional Sense Code Qualifier (ASCQ) */
99 #define ACK_NAK_TO 0x3
101 /* Default values for driver parameters */
102 #define DEF_NUM_HOST 1
103 #define DEF_NUM_TGTS 1
104 #define DEF_MAX_LUNS 1
105 /* With these defaults, this driver will make 1 host with 1 target
106 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
109 #define DEF_CDB_LEN 10
110 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
111 #define DEF_DEV_SIZE_MB 8
114 #define DEF_D_SENSE 0
115 #define DEF_EVERY_NTH 0
116 #define DEF_FAKE_RW 0
118 #define DEF_HOST_LOCK 0
121 #define DEF_LBPWS10 0
123 #define DEF_LOWEST_ALIGNED 0
124 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
125 #define DEF_NO_LUN_0 0
126 #define DEF_NUM_PARTS 0
128 #define DEF_OPT_BLKS 1024
129 #define DEF_PHYSBLK_EXP 0
130 #define DEF_OPT_XFERLEN_EXP 0
131 #define DEF_PTYPE TYPE_DISK
132 #define DEF_REMOVABLE false
133 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
134 #define DEF_SECTOR_SIZE 512
135 #define DEF_UNMAP_ALIGNMENT 0
136 #define DEF_UNMAP_GRANULARITY 1
137 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138 #define DEF_UNMAP_MAX_DESC 256
139 #define DEF_VIRTUAL_GB 0
140 #define DEF_VPD_USE_HOSTNO 1
141 #define DEF_WRITESAME_LENGTH 0xFFFF
143 #define DEF_STATISTICS false
144 #define DEF_SUBMIT_QUEUES 1
145 #define DEF_UUID_CTL 0
146 #define JDELAY_OVERRIDDEN -9999
148 #define SDEBUG_LUN_0_VAL 0
150 /* bit mask values for sdebug_opts */
151 #define SDEBUG_OPT_NOISE 1
152 #define SDEBUG_OPT_MEDIUM_ERR 2
153 #define SDEBUG_OPT_TIMEOUT 4
154 #define SDEBUG_OPT_RECOVERED_ERR 8
155 #define SDEBUG_OPT_TRANSPORT_ERR 16
156 #define SDEBUG_OPT_DIF_ERR 32
157 #define SDEBUG_OPT_DIX_ERR 64
158 #define SDEBUG_OPT_MAC_TIMEOUT 128
159 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
160 #define SDEBUG_OPT_Q_NOISE 0x200
161 #define SDEBUG_OPT_ALL_TSF 0x400
162 #define SDEBUG_OPT_RARE_TSF 0x800
163 #define SDEBUG_OPT_N_WCE 0x1000
164 #define SDEBUG_OPT_RESET_NOISE 0x2000
165 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
166 #define SDEBUG_OPT_HOST_BUSY 0x8000
167 #define SDEBUG_OPT_CMD_ABORT 0x10000
168 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
169 SDEBUG_OPT_RESET_NOISE)
170 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
171 SDEBUG_OPT_TRANSPORT_ERR | \
172 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
173 SDEBUG_OPT_SHORT_TRANSFER | \
174 SDEBUG_OPT_HOST_BUSY | \
175 SDEBUG_OPT_CMD_ABORT)
176 /* When "every_nth" > 0 then modulo "every_nth" commands:
177 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
178 * - a RECOVERED_ERROR is simulated on successful read and write
179 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
180 * - a TRANSPORT_ERROR is simulated on successful read and write
181 * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
182 * - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
185 * When "every_nth" < 0 then after "- every_nth" commands the selected
186 * error will be injected. The error will be injected on every subsequent
187 * command until some other action occurs; for example, the user writing
188 * a new value (other than -1 or 1) to every_nth:
189 * echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
192 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
193 * priority order. In the subset implemented here lower numbers have higher
194 * priority. The UA numbers should be a sequence starting from 0 with
195 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
196 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
197 #define SDEBUG_UA_BUS_RESET 1
198 #define SDEBUG_UA_MODE_CHANGED 2
199 #define SDEBUG_UA_CAPACITY_CHANGED 3
200 #define SDEBUG_UA_LUNS_CHANGED 4
201 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
202 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
203 #define SDEBUG_NUM_UAS 7
205 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
206 * sector on read commands: */
207 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
208 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
210 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
211 * or "peripheral device" addressing (value 0) */
212 #define SAM2_LUN_ADDRESS_METHOD 0
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
221 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN 255
227 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
229 #define F_RL_WLUN_OK 0x10
230 #define F_SKIP_UA 0x20
231 #define F_DELAY_OVERR 0x40
232 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
233 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
234 #define F_INV_OP 0x200
235 #define F_FAKE_RW 0x400
236 #define F_M_ACCESS 0x800 /* media access */
237 #define F_SSU_DELAY 0x1000
238 #define F_SYNC_DELAY 0x2000
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
245 #define SDEBUG_MAX_PARTS 4
247 #define SDEBUG_MAX_CMD_LEN 32
250 struct sdebug_dev_info
{
251 struct list_head dev_list
;
252 unsigned int channel
;
256 struct sdebug_host_info
*sdbg_host
;
257 unsigned long uas_bm
[1];
263 struct sdebug_host_info
{
264 struct list_head host_list
;
265 struct Scsi_Host
*shost
;
267 struct list_head dev_info_list
;
270 #define to_sdebug_host(d) \
271 container_of(d, struct sdebug_host_info, dev)
273 enum sdeb_defer_type
{SDEB_DEFER_NONE
= 0, SDEB_DEFER_HRT
= 1,
276 struct sdebug_defer
{
278 struct execute_work ew
;
279 int sqa_idx
; /* index of sdebug_queue array */
280 int qc_idx
; /* index of sdebug_queued_cmd array within sqa_idx */
284 bool aborted
; /* true when blk_abort_request() already called */
285 enum sdeb_defer_type defer_t
;
288 struct sdebug_queued_cmd
{
289 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
290 * instance indicates this slot is in use.
292 struct sdebug_defer
*sd_dp
;
293 struct scsi_cmnd
*a_cmnd
;
294 unsigned int inj_recovered
:1;
295 unsigned int inj_transport
:1;
296 unsigned int inj_dif
:1;
297 unsigned int inj_dix
:1;
298 unsigned int inj_short
:1;
299 unsigned int inj_host_busy
:1;
300 unsigned int inj_cmd_abort
:1;
303 struct sdebug_queue
{
304 struct sdebug_queued_cmd qc_arr
[SDEBUG_CANQUEUE
];
305 unsigned long in_use_bm
[SDEBUG_CANQUEUE_WORDS
];
307 atomic_t blocked
; /* to temporarily stop more being queued */
310 static atomic_t sdebug_cmnd_count
; /* number of incoming commands */
311 static atomic_t sdebug_completions
; /* count of deferred completions */
312 static atomic_t sdebug_miss_cpus
; /* submission + completion cpus differ */
313 static atomic_t sdebug_a_tsf
; /* 'almost task set full' counter */
315 struct opcode_info_t
{
316 u8 num_attached
; /* 0 if this is it (i.e. a leaf); use 0xff */
317 /* for terminating element */
318 u8 opcode
; /* if num_attached > 0, preferred */
319 u16 sa
; /* service action */
320 u32 flags
; /* OR-ed set of SDEB_F_* */
321 int (*pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
322 const struct opcode_info_t
*arrp
; /* num_attached elements or NULL */
323 u8 len_mask
[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
324 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
327 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
328 enum sdeb_opcode_index
{
329 SDEB_I_INVALID_OPCODE
= 0,
331 SDEB_I_REPORT_LUNS
= 2,
332 SDEB_I_REQUEST_SENSE
= 3,
333 SDEB_I_TEST_UNIT_READY
= 4,
334 SDEB_I_MODE_SENSE
= 5, /* 6, 10 */
335 SDEB_I_MODE_SELECT
= 6, /* 6, 10 */
336 SDEB_I_LOG_SENSE
= 7,
337 SDEB_I_READ_CAPACITY
= 8, /* 10; 16 is in SA_IN(16) */
338 SDEB_I_READ
= 9, /* 6, 10, 12, 16 */
339 SDEB_I_WRITE
= 10, /* 6, 10, 12, 16 */
340 SDEB_I_START_STOP
= 11,
341 SDEB_I_SERV_ACT_IN_16
= 12, /* add ...SERV_ACT_IN_12 if needed */
342 SDEB_I_SERV_ACT_OUT_16
= 13, /* add ...SERV_ACT_OUT_12 if needed */
343 SDEB_I_MAINT_IN
= 14,
344 SDEB_I_MAINT_OUT
= 15,
345 SDEB_I_VERIFY
= 16, /* 10 only */
346 SDEB_I_VARIABLE_LEN
= 17, /* READ(32), WRITE(32), WR_SCAT(32) */
347 SDEB_I_RESERVE
= 18, /* 6, 10 */
348 SDEB_I_RELEASE
= 19, /* 6, 10 */
349 SDEB_I_ALLOW_REMOVAL
= 20, /* PREVENT ALLOW MEDIUM REMOVAL */
350 SDEB_I_REZERO_UNIT
= 21, /* REWIND in SSC */
351 SDEB_I_ATA_PT
= 22, /* 12, 16 */
352 SDEB_I_SEND_DIAG
= 23,
354 SDEB_I_XDWRITEREAD
= 25, /* 10 only */
355 SDEB_I_WRITE_BUFFER
= 26,
356 SDEB_I_WRITE_SAME
= 27, /* 10, 16 */
357 SDEB_I_SYNC_CACHE
= 28, /* 10, 16 */
358 SDEB_I_COMP_WRITE
= 29,
359 SDEB_I_LAST_ELEMENT
= 30, /* keep this last (previous + 1) */
363 static const unsigned char opcode_ind_arr
[256] = {
364 /* 0x0; 0x0->0x1f: 6 byte cdbs */
365 SDEB_I_TEST_UNIT_READY
, SDEB_I_REZERO_UNIT
, 0, SDEB_I_REQUEST_SENSE
,
367 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, 0,
368 0, 0, SDEB_I_INQUIRY
, 0, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
370 0, 0, SDEB_I_MODE_SENSE
, SDEB_I_START_STOP
, 0, SDEB_I_SEND_DIAG
,
371 SDEB_I_ALLOW_REMOVAL
, 0,
372 /* 0x20; 0x20->0x3f: 10 byte cdbs */
373 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY
, 0, 0,
374 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, SDEB_I_VERIFY
,
375 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE
, 0, 0,
376 0, 0, 0, SDEB_I_WRITE_BUFFER
, 0, 0, 0, 0,
377 /* 0x40; 0x40->0x5f: 10 byte cdbs */
378 0, SDEB_I_WRITE_SAME
, SDEB_I_UNMAP
, 0, 0, 0, 0, 0,
379 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE
, 0, 0,
380 0, 0, 0, SDEB_I_XDWRITEREAD
, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
382 0, 0, SDEB_I_MODE_SENSE
, 0, 0, 0, 0, 0,
383 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
384 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
386 0, SDEB_I_VARIABLE_LEN
,
387 /* 0x80; 0x80->0x9f: 16 byte cdbs */
388 0, 0, 0, 0, 0, SDEB_I_ATA_PT
, 0, 0,
389 SDEB_I_READ
, SDEB_I_COMP_WRITE
, SDEB_I_WRITE
, 0, 0, 0, 0, 0,
390 0, SDEB_I_SYNC_CACHE
, 0, SDEB_I_WRITE_SAME
, 0, 0, 0, 0,
391 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16
, SDEB_I_SERV_ACT_OUT_16
,
392 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
393 SDEB_I_REPORT_LUNS
, SDEB_I_ATA_PT
, 0, SDEB_I_MAINT_IN
,
394 SDEB_I_MAINT_OUT
, 0, 0, 0,
395 SDEB_I_READ
, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE
,
396 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
397 0, 0, 0, 0, 0, 0, 0, 0,
398 0, 0, 0, 0, 0, 0, 0, 0,
399 /* 0xc0; 0xc0->0xff: vendor specific */
400 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
401 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
402 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
403 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
407 * The following "response" functions return the SCSI mid-level's 4 byte
408 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
409 * command completion, they can mask their return value with
410 * SDEG_RES_IMMED_MASK .
412 #define SDEG_RES_IMMED_MASK 0x40000000
414 static int resp_inquiry(struct scsi_cmnd
*, struct sdebug_dev_info
*);
415 static int resp_report_luns(struct scsi_cmnd
*, struct sdebug_dev_info
*);
416 static int resp_requests(struct scsi_cmnd
*, struct sdebug_dev_info
*);
417 static int resp_mode_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
418 static int resp_mode_select(struct scsi_cmnd
*, struct sdebug_dev_info
*);
419 static int resp_log_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
420 static int resp_readcap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
421 static int resp_read_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
422 static int resp_write_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
423 static int resp_write_scat(struct scsi_cmnd
*, struct sdebug_dev_info
*);
424 static int resp_start_stop(struct scsi_cmnd
*, struct sdebug_dev_info
*);
425 static int resp_readcap16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
426 static int resp_get_lba_status(struct scsi_cmnd
*, struct sdebug_dev_info
*);
427 static int resp_report_tgtpgs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
428 static int resp_unmap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
429 static int resp_rsup_opcodes(struct scsi_cmnd
*, struct sdebug_dev_info
*);
430 static int resp_rsup_tmfs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
431 static int resp_write_same_10(struct scsi_cmnd
*, struct sdebug_dev_info
*);
432 static int resp_write_same_16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
433 static int resp_xdwriteread_10(struct scsi_cmnd
*, struct sdebug_dev_info
*);
434 static int resp_comp_write(struct scsi_cmnd
*, struct sdebug_dev_info
*);
435 static int resp_write_buffer(struct scsi_cmnd
*, struct sdebug_dev_info
*);
436 static int resp_sync_cache(struct scsi_cmnd
*, struct sdebug_dev_info
*);
439 * The following are overflow arrays for cdbs that "hit" the same index in
440 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
441 * should be placed in opcode_info_arr[], the others should be placed here.
443 static const struct opcode_info_t msense_iarr
[] = {
444 {0, 0x1a, 0, F_D_IN
, NULL
, NULL
,
445 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
448 static const struct opcode_info_t mselect_iarr
[] = {
449 {0, 0x15, 0, F_D_OUT
, NULL
, NULL
,
450 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
453 static const struct opcode_info_t read_iarr
[] = {
454 {0, 0x28, 0, F_D_IN
| FF_MEDIA_IO
, resp_read_dt0
, NULL
,/* READ(10) */
455 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
457 {0, 0x8, 0, F_D_IN
| FF_MEDIA_IO
, resp_read_dt0
, NULL
, /* READ(6) */
458 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
459 {0, 0xa8, 0, F_D_IN
| FF_MEDIA_IO
, resp_read_dt0
, NULL
,/* READ(12) */
460 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
464 static const struct opcode_info_t write_iarr
[] = {
465 {0, 0x2a, 0, F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
, /* WRITE(10) */
466 NULL
, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
468 {0, 0xa, 0, F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
, /* WRITE(6) */
469 NULL
, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
471 {0, 0xaa, 0, F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
, /* WRITE(12) */
472 NULL
, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
473 0xbf, 0xc7, 0, 0, 0, 0} },
476 static const struct opcode_info_t sa_in_16_iarr
[] = {
477 {0, 0x9e, 0x12, F_SA_LOW
| F_D_IN
, resp_get_lba_status
, NULL
,
478 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
479 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
482 static const struct opcode_info_t vl_iarr
[] = { /* VARIABLE LENGTH */
483 {0, 0x7f, 0xb, F_SA_HIGH
| F_D_OUT
| FF_MEDIA_IO
, resp_write_dt0
,
484 NULL
, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
485 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
486 {0, 0x7f, 0x11, F_SA_HIGH
| F_D_OUT
| FF_MEDIA_IO
, resp_write_scat
,
487 NULL
, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
488 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
491 static const struct opcode_info_t maint_in_iarr
[] = { /* MAINT IN */
492 {0, 0xa3, 0xc, F_SA_LOW
| F_D_IN
, resp_rsup_opcodes
, NULL
,
493 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
494 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
495 {0, 0xa3, 0xd, F_SA_LOW
| F_D_IN
, resp_rsup_tmfs
, NULL
,
496 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
497 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
500 static const struct opcode_info_t write_same_iarr
[] = {
501 {0, 0x93, 0, F_D_OUT_MAYBE
| FF_MEDIA_IO
, resp_write_same_16
, NULL
,
502 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
503 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
506 static const struct opcode_info_t reserve_iarr
[] = {
507 {0, 0x16, 0, F_D_OUT
, NULL
, NULL
, /* RESERVE(6) */
508 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
511 static const struct opcode_info_t release_iarr
[] = {
512 {0, 0x17, 0, F_D_OUT
, NULL
, NULL
, /* RELEASE(6) */
513 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 static const struct opcode_info_t sync_cache_iarr
[] = {
517 {0, 0x91, 0, F_SYNC_DELAY
| F_M_ACCESS
, resp_sync_cache
, NULL
,
518 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
519 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
523 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
524 * plus the terminating elements for logic that scans this table such as
525 * REPORT SUPPORTED OPERATION CODES. */
526 static const struct opcode_info_t opcode_info_arr
[SDEB_I_LAST_ELEMENT
+ 1] = {
528 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* unknown opcodes */
529 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
530 {0, 0x12, 0, FF_RESPOND
| F_D_IN
, resp_inquiry
, NULL
, /* INQUIRY */
531 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
532 {0, 0xa0, 0, FF_RESPOND
| F_D_IN
, resp_report_luns
, NULL
,
533 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
534 0, 0} }, /* REPORT LUNS */
535 {0, 0x3, 0, FF_RESPOND
| F_D_IN
, resp_requests
, NULL
,
536 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
537 {0, 0x0, 0, F_M_ACCESS
| F_RL_WLUN_OK
, NULL
, NULL
,/* TEST UNIT READY */
538 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
540 {ARRAY_SIZE(msense_iarr
), 0x5a, 0, F_D_IN
, /* MODE SENSE(10) */
541 resp_mode_sense
, msense_iarr
, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
542 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
543 {ARRAY_SIZE(mselect_iarr
), 0x55, 0, F_D_OUT
, /* MODE SELECT(10) */
544 resp_mode_select
, mselect_iarr
, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
545 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
546 {0, 0x4d, 0, F_D_IN
, resp_log_sense
, NULL
, /* LOG SENSE */
547 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
549 {0, 0x25, 0, F_D_IN
, resp_readcap
, NULL
, /* READ CAPACITY(10) */
550 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
552 {ARRAY_SIZE(read_iarr
), 0x88, 0, F_D_IN
| FF_MEDIA_IO
, /* READ(16) */
553 resp_read_dt0
, read_iarr
, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
554 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
556 {ARRAY_SIZE(write_iarr
), 0x8a, 0, F_D_OUT
| FF_MEDIA_IO
,
557 resp_write_dt0
, write_iarr
, /* WRITE(16) */
558 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
559 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
560 {0, 0x1b, 0, F_SSU_DELAY
, resp_start_stop
, NULL
,/* START STOP UNIT */
561 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
562 {ARRAY_SIZE(sa_in_16_iarr
), 0x9e, 0x10, F_SA_LOW
| F_D_IN
,
563 resp_readcap16
, sa_in_16_iarr
, /* SA_IN(16), READ CAPACITY(16) */
564 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
565 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
566 {0, 0x9f, 0x12, F_SA_LOW
| F_D_OUT
| FF_MEDIA_IO
, resp_write_scat
,
567 NULL
, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
568 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
569 {ARRAY_SIZE(maint_in_iarr
), 0xa3, 0xa, F_SA_LOW
| F_D_IN
,
570 resp_report_tgtpgs
, /* MAINT IN, REPORT TARGET PORT GROUPS */
571 maint_in_iarr
, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
572 0xff, 0, 0xc7, 0, 0, 0, 0} },
574 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* MAINT OUT */
575 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
576 {0, 0x2f, 0, F_D_OUT_MAYBE
| FF_MEDIA_IO
, NULL
, NULL
, /* VERIFY(10) */
577 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
579 {ARRAY_SIZE(vl_iarr
), 0x7f, 0x9, F_SA_HIGH
| F_D_IN
| FF_MEDIA_IO
,
580 resp_read_dt0
, vl_iarr
, /* VARIABLE LENGTH, READ(32) */
581 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
583 {ARRAY_SIZE(reserve_iarr
), 0x56, 0, F_D_OUT
,
584 NULL
, reserve_iarr
, /* RESERVE(10) <no response function> */
585 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
587 {ARRAY_SIZE(release_iarr
), 0x57, 0, F_D_OUT
,
588 NULL
, release_iarr
, /* RELEASE(10) <no response function> */
589 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
592 {0, 0x1e, 0, 0, NULL
, NULL
, /* ALLOW REMOVAL */
593 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
594 {0, 0x1, 0, 0, resp_start_stop
, NULL
, /* REWIND ?? */
595 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
596 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* ATA_PT */
597 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
598 {0, 0x1d, F_D_OUT
, 0, NULL
, NULL
, /* SEND DIAGNOSTIC */
599 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
600 {0, 0x42, 0, F_D_OUT
| FF_MEDIA_IO
, resp_unmap
, NULL
, /* UNMAP */
601 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
603 {0, 0x53, 0, F_D_IN
| F_D_OUT
| FF_MEDIA_IO
, resp_xdwriteread_10
,
604 NULL
, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
605 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
606 {0, 0x3b, 0, F_D_OUT_MAYBE
, resp_write_buffer
, NULL
,
607 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
608 0, 0, 0, 0} }, /* WRITE_BUFFER */
609 {ARRAY_SIZE(write_same_iarr
), 0x41, 0, F_D_OUT_MAYBE
| FF_MEDIA_IO
,
610 resp_write_same_10
, write_same_iarr
, /* WRITE SAME(10) */
611 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
613 {ARRAY_SIZE(sync_cache_iarr
), 0x35, 0, F_SYNC_DELAY
| F_M_ACCESS
,
614 resp_sync_cache
, sync_cache_iarr
,
615 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
616 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
617 {0, 0x89, 0, F_D_OUT
| FF_MEDIA_IO
, resp_comp_write
, NULL
,
618 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
619 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
622 {0xff, 0, 0, 0, NULL
, NULL
, /* terminating element */
623 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 static int sdebug_add_host
= DEF_NUM_HOST
;
627 static int sdebug_ato
= DEF_ATO
;
628 static int sdebug_cdb_len
= DEF_CDB_LEN
;
629 static int sdebug_jdelay
= DEF_JDELAY
; /* if > 0 then unit is jiffies */
630 static int sdebug_dev_size_mb
= DEF_DEV_SIZE_MB
;
631 static int sdebug_dif
= DEF_DIF
;
632 static int sdebug_dix
= DEF_DIX
;
633 static int sdebug_dsense
= DEF_D_SENSE
;
634 static int sdebug_every_nth
= DEF_EVERY_NTH
;
635 static int sdebug_fake_rw
= DEF_FAKE_RW
;
636 static unsigned int sdebug_guard
= DEF_GUARD
;
637 static int sdebug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
638 static int sdebug_max_luns
= DEF_MAX_LUNS
;
639 static int sdebug_max_queue
= SDEBUG_CANQUEUE
; /* per submit queue */
640 static unsigned int sdebug_medium_error_start
= OPT_MEDIUM_ERR_ADDR
;
641 static int sdebug_medium_error_count
= OPT_MEDIUM_ERR_NUM
;
642 static atomic_t retired_max_queue
; /* if > 0 then was prior max_queue */
643 static int sdebug_ndelay
= DEF_NDELAY
; /* if > 0 then unit is nanoseconds */
644 static int sdebug_no_lun_0
= DEF_NO_LUN_0
;
645 static int sdebug_no_uld
;
646 static int sdebug_num_parts
= DEF_NUM_PARTS
;
647 static int sdebug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
648 static int sdebug_opt_blks
= DEF_OPT_BLKS
;
649 static int sdebug_opts
= DEF_OPTS
;
650 static int sdebug_physblk_exp
= DEF_PHYSBLK_EXP
;
651 static int sdebug_opt_xferlen_exp
= DEF_OPT_XFERLEN_EXP
;
652 static int sdebug_ptype
= DEF_PTYPE
; /* SCSI peripheral device type */
653 static int sdebug_scsi_level
= DEF_SCSI_LEVEL
;
654 static int sdebug_sector_size
= DEF_SECTOR_SIZE
;
655 static int sdebug_virtual_gb
= DEF_VIRTUAL_GB
;
656 static int sdebug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
657 static unsigned int sdebug_lbpu
= DEF_LBPU
;
658 static unsigned int sdebug_lbpws
= DEF_LBPWS
;
659 static unsigned int sdebug_lbpws10
= DEF_LBPWS10
;
660 static unsigned int sdebug_lbprz
= DEF_LBPRZ
;
661 static unsigned int sdebug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
662 static unsigned int sdebug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
663 static unsigned int sdebug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
664 static unsigned int sdebug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
665 static unsigned int sdebug_write_same_length
= DEF_WRITESAME_LENGTH
;
666 static int sdebug_uuid_ctl
= DEF_UUID_CTL
;
667 static bool sdebug_removable
= DEF_REMOVABLE
;
668 static bool sdebug_clustering
;
669 static bool sdebug_host_lock
= DEF_HOST_LOCK
;
670 static bool sdebug_strict
= DEF_STRICT
;
671 static bool sdebug_any_injecting_opt
;
672 static bool sdebug_verbose
;
673 static bool have_dif_prot
;
674 static bool write_since_sync
;
675 static bool sdebug_statistics
= DEF_STATISTICS
;
677 static unsigned int sdebug_store_sectors
;
678 static sector_t sdebug_capacity
; /* in sectors */
680 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
681 may still need them */
682 static int sdebug_heads
; /* heads per disk */
683 static int sdebug_cylinders_per
; /* cylinders per surface */
684 static int sdebug_sectors_per
; /* sectors per cylinder */
686 static LIST_HEAD(sdebug_host_list
);
687 static DEFINE_SPINLOCK(sdebug_host_list_lock
);
689 static unsigned char *fake_storep
; /* ramdisk storage */
690 static struct t10_pi_tuple
*dif_storep
; /* protection info */
691 static void *map_storep
; /* provisioning map */
693 static unsigned long map_size
;
694 static int num_aborts
;
695 static int num_dev_resets
;
696 static int num_target_resets
;
697 static int num_bus_resets
;
698 static int num_host_resets
;
699 static int dix_writes
;
700 static int dix_reads
;
701 static int dif_errors
;
703 static int submit_queues
= DEF_SUBMIT_QUEUES
; /* > 1 for multi-queue (mq) */
704 static struct sdebug_queue
*sdebug_q_arr
; /* ptr to array of submit queues */
706 static DEFINE_RWLOCK(atomic_rw
);
708 static char sdebug_proc_name
[] = MY_NAME
;
709 static const char *my_name
= MY_NAME
;
711 static struct bus_type pseudo_lld_bus
;
713 static struct device_driver sdebug_driverfs_driver
= {
714 .name
= sdebug_proc_name
,
715 .bus
= &pseudo_lld_bus
,
718 static const int check_condition_result
=
719 (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
721 static const int illegal_condition_result
=
722 (DRIVER_SENSE
<< 24) | (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
724 static const int device_qfull_result
=
725 (DID_OK
<< 16) | (COMMAND_COMPLETE
<< 8) | SAM_STAT_TASK_SET_FULL
;
728 /* Only do the extra work involved in logical block provisioning if one or
729 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
730 * real reads and writes (i.e. not skipping them for speed).
732 static inline bool scsi_debug_lbp(void)
734 return 0 == sdebug_fake_rw
&&
735 (sdebug_lbpu
|| sdebug_lbpws
|| sdebug_lbpws10
);
738 static void *lba2fake_store(unsigned long long lba
)
740 lba
= do_div(lba
, sdebug_store_sectors
);
742 return fake_storep
+ lba
* sdebug_sector_size
;
745 static struct t10_pi_tuple
*dif_store(sector_t sector
)
747 sector
= sector_div(sector
, sdebug_store_sectors
);
749 return dif_storep
+ sector
;
752 static void sdebug_max_tgts_luns(void)
754 struct sdebug_host_info
*sdbg_host
;
755 struct Scsi_Host
*hpnt
;
757 spin_lock(&sdebug_host_list_lock
);
758 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
759 hpnt
= sdbg_host
->shost
;
760 if ((hpnt
->this_id
>= 0) &&
761 (sdebug_num_tgts
> hpnt
->this_id
))
762 hpnt
->max_id
= sdebug_num_tgts
+ 1;
764 hpnt
->max_id
= sdebug_num_tgts
;
765 /* sdebug_max_luns; */
766 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
768 spin_unlock(&sdebug_host_list_lock
);
771 enum sdeb_cmd_data
{SDEB_IN_DATA
= 0, SDEB_IN_CDB
= 1};
773 /* Set in_bit to -1 to indicate no bit position of invalid field */
774 static void mk_sense_invalid_fld(struct scsi_cmnd
*scp
,
775 enum sdeb_cmd_data c_d
,
776 int in_byte
, int in_bit
)
778 unsigned char *sbuff
;
782 sbuff
= scp
->sense_buffer
;
784 sdev_printk(KERN_ERR
, scp
->device
,
785 "%s: sense_buffer is NULL\n", __func__
);
788 asc
= c_d
? INVALID_FIELD_IN_CDB
: INVALID_FIELD_IN_PARAM_LIST
;
789 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
790 scsi_build_sense_buffer(sdebug_dsense
, sbuff
, ILLEGAL_REQUEST
, asc
, 0);
791 memset(sks
, 0, sizeof(sks
));
797 sks
[0] |= 0x7 & in_bit
;
799 put_unaligned_be16(in_byte
, sks
+ 1);
805 memcpy(sbuff
+ sl
+ 4, sks
, 3);
807 memcpy(sbuff
+ 15, sks
, 3);
809 sdev_printk(KERN_INFO
, scp
->device
, "%s: [sense_key,asc,ascq"
810 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
811 my_name
, asc
, c_d
? 'C' : 'D', in_byte
, in_bit
);
814 static void mk_sense_buffer(struct scsi_cmnd
*scp
, int key
, int asc
, int asq
)
816 unsigned char *sbuff
;
818 sbuff
= scp
->sense_buffer
;
820 sdev_printk(KERN_ERR
, scp
->device
,
821 "%s: sense_buffer is NULL\n", __func__
);
824 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
826 scsi_build_sense_buffer(sdebug_dsense
, sbuff
, key
, asc
, asq
);
829 sdev_printk(KERN_INFO
, scp
->device
,
830 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
831 my_name
, key
, asc
, asq
);
834 static void mk_sense_invalid_opcode(struct scsi_cmnd
*scp
)
836 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
839 static int scsi_debug_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
841 if (sdebug_verbose
) {
843 sdev_printk(KERN_INFO
, dev
,
844 "%s: BLKFLSBUF [0x1261]\n", __func__
);
845 else if (0x5331 == cmd
)
846 sdev_printk(KERN_INFO
, dev
,
847 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
850 sdev_printk(KERN_INFO
, dev
, "%s: cmd=0x%x\n",
854 /* return -ENOTTY; // correct return but upsets fdisk */
857 static void config_cdb_len(struct scsi_device
*sdev
)
859 switch (sdebug_cdb_len
) {
860 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
861 sdev
->use_10_for_rw
= false;
862 sdev
->use_16_for_rw
= false;
863 sdev
->use_10_for_ms
= false;
865 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
866 sdev
->use_10_for_rw
= true;
867 sdev
->use_16_for_rw
= false;
868 sdev
->use_10_for_ms
= false;
870 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
871 sdev
->use_10_for_rw
= true;
872 sdev
->use_16_for_rw
= false;
873 sdev
->use_10_for_ms
= true;
876 sdev
->use_10_for_rw
= false;
877 sdev
->use_16_for_rw
= true;
878 sdev
->use_10_for_ms
= true;
880 case 32: /* No knobs to suggest this so same as 16 for now */
881 sdev
->use_10_for_rw
= false;
882 sdev
->use_16_for_rw
= true;
883 sdev
->use_10_for_ms
= true;
886 pr_warn("unexpected cdb_len=%d, force to 10\n",
888 sdev
->use_10_for_rw
= true;
889 sdev
->use_16_for_rw
= false;
890 sdev
->use_10_for_ms
= false;
896 static void all_config_cdb_len(void)
898 struct sdebug_host_info
*sdbg_host
;
899 struct Scsi_Host
*shost
;
900 struct scsi_device
*sdev
;
902 spin_lock(&sdebug_host_list_lock
);
903 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
904 shost
= sdbg_host
->shost
;
905 shost_for_each_device(sdev
, shost
) {
906 config_cdb_len(sdev
);
909 spin_unlock(&sdebug_host_list_lock
);
912 static void clear_luns_changed_on_target(struct sdebug_dev_info
*devip
)
914 struct sdebug_host_info
*sdhp
;
915 struct sdebug_dev_info
*dp
;
917 spin_lock(&sdebug_host_list_lock
);
918 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
919 list_for_each_entry(dp
, &sdhp
->dev_info_list
, dev_list
) {
920 if ((devip
->sdbg_host
== dp
->sdbg_host
) &&
921 (devip
->target
== dp
->target
))
922 clear_bit(SDEBUG_UA_LUNS_CHANGED
, dp
->uas_bm
);
925 spin_unlock(&sdebug_host_list_lock
);
928 static int make_ua(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
932 k
= find_first_bit(devip
->uas_bm
, SDEBUG_NUM_UAS
);
933 if (k
!= SDEBUG_NUM_UAS
) {
934 const char *cp
= NULL
;
938 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
939 POWER_ON_RESET_ASCQ
);
941 cp
= "power on reset";
943 case SDEBUG_UA_BUS_RESET
:
944 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
949 case SDEBUG_UA_MODE_CHANGED
:
950 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_CHANGED_ASC
,
953 cp
= "mode parameters changed";
955 case SDEBUG_UA_CAPACITY_CHANGED
:
956 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_CHANGED_ASC
,
957 CAPACITY_CHANGED_ASCQ
);
959 cp
= "capacity data changed";
961 case SDEBUG_UA_MICROCODE_CHANGED
:
962 mk_sense_buffer(scp
, UNIT_ATTENTION
,
964 MICROCODE_CHANGED_ASCQ
);
966 cp
= "microcode has been changed";
968 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
:
969 mk_sense_buffer(scp
, UNIT_ATTENTION
,
971 MICROCODE_CHANGED_WO_RESET_ASCQ
);
973 cp
= "microcode has been changed without reset";
975 case SDEBUG_UA_LUNS_CHANGED
:
977 * SPC-3 behavior is to report a UNIT ATTENTION with
978 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
979 * on the target, until a REPORT LUNS command is
980 * received. SPC-4 behavior is to report it only once.
981 * NOTE: sdebug_scsi_level does not use the same
982 * values as struct scsi_device->scsi_level.
984 if (sdebug_scsi_level
>= 6) /* SPC-4 and above */
985 clear_luns_changed_on_target(devip
);
986 mk_sense_buffer(scp
, UNIT_ATTENTION
,
990 cp
= "reported luns data has changed";
993 pr_warn("unexpected unit attention code=%d\n", k
);
998 clear_bit(k
, devip
->uas_bm
);
1000 sdev_printk(KERN_INFO
, scp
->device
,
1001 "%s reports: Unit attention: %s\n",
1003 return check_condition_result
;
1008 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1009 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
1013 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
1017 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_FROM_DEVICE
))
1018 return DID_ERROR
<< 16;
1020 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
1022 sdb
->resid
= scsi_bufflen(scp
) - act_len
;
1027 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1028 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1029 * calls, not required to write in ascending offset order. Assumes resid
1030 * set to scsi_bufflen() prior to any calls.
1032 static int p_fill_from_dev_buffer(struct scsi_cmnd
*scp
, const void *arr
,
1033 int arr_len
, unsigned int off_dst
)
1036 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
1037 off_t skip
= off_dst
;
1039 if (sdb
->length
<= off_dst
)
1041 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_FROM_DEVICE
))
1042 return DID_ERROR
<< 16;
1044 act_len
= sg_pcopy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
1045 arr
, arr_len
, skip
);
1046 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1047 __func__
, off_dst
, scsi_bufflen(scp
), act_len
, sdb
->resid
);
1048 n
= (int)scsi_bufflen(scp
) - ((int)off_dst
+ act_len
);
1049 sdb
->resid
= min(sdb
->resid
, n
);
1053 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1054 * 'arr' or -1 if error.
1056 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
1059 if (!scsi_bufflen(scp
))
1061 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_TO_DEVICE
))
1064 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
1068 static char sdebug_inq_vendor_id
[9] = "Linux ";
1069 static char sdebug_inq_product_id
[17] = "scsi_debug ";
1070 static char sdebug_inq_product_rev
[5] = SDEBUG_VERSION
;
1071 /* Use some locally assigned NAAs for SAS addresses. */
1072 static const u64 naa3_comp_a
= 0x3222222000000000ULL
;
1073 static const u64 naa3_comp_b
= 0x3333333000000000ULL
;
1074 static const u64 naa3_comp_c
= 0x3111111000000000ULL
;
1076 /* Device identification VPD page. Returns number of bytes placed in arr */
1077 static int inquiry_vpd_83(unsigned char *arr
, int port_group_id
,
1078 int target_dev_id
, int dev_id_num
,
1079 const char *dev_id_str
, int dev_id_str_len
,
1080 const uuid_t
*lu_name
)
1085 port_a
= target_dev_id
+ 1;
1086 /* T10 vendor identifier field format (faked) */
1087 arr
[0] = 0x2; /* ASCII */
1090 memcpy(&arr
[4], sdebug_inq_vendor_id
, 8);
1091 memcpy(&arr
[12], sdebug_inq_product_id
, 16);
1092 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
1093 num
= 8 + 16 + dev_id_str_len
;
1096 if (dev_id_num
>= 0) {
1097 if (sdebug_uuid_ctl
) {
1098 /* Locally assigned UUID */
1099 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
1100 arr
[num
++] = 0xa; /* PIV=0, lu, naa */
1103 arr
[num
++] = 0x10; /* uuid type=1, locally assigned */
1105 memcpy(arr
+ num
, lu_name
, 16);
1108 /* NAA-3, Logical unit identifier (binary) */
1109 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
1110 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
1113 put_unaligned_be64(naa3_comp_b
+ dev_id_num
, arr
+ num
);
1116 /* Target relative port number */
1117 arr
[num
++] = 0x61; /* proto=sas, binary */
1118 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
1119 arr
[num
++] = 0x0; /* reserved */
1120 arr
[num
++] = 0x4; /* length */
1121 arr
[num
++] = 0x0; /* reserved */
1122 arr
[num
++] = 0x0; /* reserved */
1124 arr
[num
++] = 0x1; /* relative port A */
1126 /* NAA-3, Target port identifier */
1127 arr
[num
++] = 0x61; /* proto=sas, binary */
1128 arr
[num
++] = 0x93; /* piv=1, target port, naa */
1131 put_unaligned_be64(naa3_comp_a
+ port_a
, arr
+ num
);
1133 /* NAA-3, Target port group identifier */
1134 arr
[num
++] = 0x61; /* proto=sas, binary */
1135 arr
[num
++] = 0x95; /* piv=1, target port group id */
1140 put_unaligned_be16(port_group_id
, arr
+ num
);
1142 /* NAA-3, Target device identifier */
1143 arr
[num
++] = 0x61; /* proto=sas, binary */
1144 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
1147 put_unaligned_be64(naa3_comp_a
+ target_dev_id
, arr
+ num
);
1149 /* SCSI name string: Target device identifier */
1150 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
1151 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
1154 memcpy(arr
+ num
, "naa.32222220", 12);
1156 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
1157 memcpy(arr
+ num
, b
, 8);
1159 memset(arr
+ num
, 0, 4);
1164 static unsigned char vpd84_data
[] = {
1165 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1166 0x22,0x22,0x22,0x0,0xbb,0x1,
1167 0x22,0x22,0x22,0x0,0xbb,0x2,
1170 /* Software interface identification VPD page */
1171 static int inquiry_vpd_84(unsigned char *arr
)
1173 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
1174 return sizeof(vpd84_data
);
1177 /* Management network addresses VPD page */
1178 static int inquiry_vpd_85(unsigned char *arr
)
1181 const char *na1
= "https://www.kernel.org/config";
1182 const char *na2
= "http://www.kernel.org/log";
1185 arr
[num
++] = 0x1; /* lu, storage config */
1186 arr
[num
++] = 0x0; /* reserved */
1191 plen
= ((plen
/ 4) + 1) * 4;
1192 arr
[num
++] = plen
; /* length, null termianted, padded */
1193 memcpy(arr
+ num
, na1
, olen
);
1194 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1197 arr
[num
++] = 0x4; /* lu, logging */
1198 arr
[num
++] = 0x0; /* reserved */
1203 plen
= ((plen
/ 4) + 1) * 4;
1204 arr
[num
++] = plen
; /* length, null terminated, padded */
1205 memcpy(arr
+ num
, na2
, olen
);
1206 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1212 /* SCSI ports VPD page */
1213 static int inquiry_vpd_88(unsigned char *arr
, int target_dev_id
)
1218 port_a
= target_dev_id
+ 1;
1219 port_b
= port_a
+ 1;
1220 arr
[num
++] = 0x0; /* reserved */
1221 arr
[num
++] = 0x0; /* reserved */
1223 arr
[num
++] = 0x1; /* relative port 1 (primary) */
1224 memset(arr
+ num
, 0, 6);
1227 arr
[num
++] = 12; /* length tp descriptor */
1228 /* naa-5 target port identifier (A) */
1229 arr
[num
++] = 0x61; /* proto=sas, binary */
1230 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1231 arr
[num
++] = 0x0; /* reserved */
1232 arr
[num
++] = 0x8; /* length */
1233 put_unaligned_be64(naa3_comp_a
+ port_a
, arr
+ num
);
1235 arr
[num
++] = 0x0; /* reserved */
1236 arr
[num
++] = 0x0; /* reserved */
1238 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
1239 memset(arr
+ num
, 0, 6);
1242 arr
[num
++] = 12; /* length tp descriptor */
1243 /* naa-5 target port identifier (B) */
1244 arr
[num
++] = 0x61; /* proto=sas, binary */
1245 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1246 arr
[num
++] = 0x0; /* reserved */
1247 arr
[num
++] = 0x8; /* length */
1248 put_unaligned_be64(naa3_comp_a
+ port_b
, arr
+ num
);
1255 static unsigned char vpd89_data
[] = {
1256 /* from 4th byte */ 0,0,0,0,
1257 'l','i','n','u','x',' ',' ',' ',
1258 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1260 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1262 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1263 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1264 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1265 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1267 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1269 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1271 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1272 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1273 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1274 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1275 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1276 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1277 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1278 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1279 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1280 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1281 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1282 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1283 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1284 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1285 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1286 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1287 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1288 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1289 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1290 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1291 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1292 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1293 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1294 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1295 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1296 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1299 /* ATA Information VPD page */
1300 static int inquiry_vpd_89(unsigned char *arr
)
1302 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
1303 return sizeof(vpd89_data
);
1307 static unsigned char vpdb0_data
[] = {
1308 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1309 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1310 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1311 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1314 /* Block limits VPD page (SBC-3) */
1315 static int inquiry_vpd_b0(unsigned char *arr
)
1319 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
1321 /* Optimal transfer length granularity */
1322 if (sdebug_opt_xferlen_exp
!= 0 &&
1323 sdebug_physblk_exp
< sdebug_opt_xferlen_exp
)
1324 gran
= 1 << sdebug_opt_xferlen_exp
;
1326 gran
= 1 << sdebug_physblk_exp
;
1327 put_unaligned_be16(gran
, arr
+ 2);
1329 /* Maximum Transfer Length */
1330 if (sdebug_store_sectors
> 0x400)
1331 put_unaligned_be32(sdebug_store_sectors
, arr
+ 4);
1333 /* Optimal Transfer Length */
1334 put_unaligned_be32(sdebug_opt_blks
, &arr
[8]);
1337 /* Maximum Unmap LBA Count */
1338 put_unaligned_be32(sdebug_unmap_max_blocks
, &arr
[16]);
1340 /* Maximum Unmap Block Descriptor Count */
1341 put_unaligned_be32(sdebug_unmap_max_desc
, &arr
[20]);
1344 /* Unmap Granularity Alignment */
1345 if (sdebug_unmap_alignment
) {
1346 put_unaligned_be32(sdebug_unmap_alignment
, &arr
[28]);
1347 arr
[28] |= 0x80; /* UGAVALID */
1350 /* Optimal Unmap Granularity */
1351 put_unaligned_be32(sdebug_unmap_granularity
, &arr
[24]);
1353 /* Maximum WRITE SAME Length */
1354 put_unaligned_be64(sdebug_write_same_length
, &arr
[32]);
1356 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1358 return sizeof(vpdb0_data
);
1361 /* Block device characteristics VPD page (SBC-3) */
1362 static int inquiry_vpd_b1(unsigned char *arr
)
1364 memset(arr
, 0, 0x3c);
1366 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
1368 arr
[3] = 5; /* less than 1.8" */
1373 /* Logical block provisioning VPD page (SBC-4) */
1374 static int inquiry_vpd_b2(unsigned char *arr
)
1376 memset(arr
, 0, 0x4);
1377 arr
[0] = 0; /* threshold exponent */
1384 if (sdebug_lbprz
&& scsi_debug_lbp())
1385 arr
[1] |= (sdebug_lbprz
& 0x7) << 2; /* sbc4r07 and later */
1386 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1387 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1388 /* threshold_percentage=0 */
1392 #define SDEBUG_LONG_INQ_SZ 96
1393 #define SDEBUG_MAX_INQ_ARR_SZ 584
1395 static int resp_inquiry(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1397 unsigned char pq_pdt
;
1399 unsigned char *cmd
= scp
->cmnd
;
1400 int alloc_len
, n
, ret
;
1401 bool have_wlun
, is_disk
;
1403 alloc_len
= get_unaligned_be16(cmd
+ 3);
1404 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
1406 return DID_REQUEUE
<< 16;
1407 is_disk
= (sdebug_ptype
== TYPE_DISK
);
1408 have_wlun
= scsi_is_wlun(scp
->device
->lun
);
1410 pq_pdt
= TYPE_WLUN
; /* present, wlun */
1411 else if (sdebug_no_lun_0
&& (devip
->lun
== SDEBUG_LUN_0_VAL
))
1412 pq_pdt
= 0x7f; /* not present, PQ=3, PDT=0x1f */
1414 pq_pdt
= (sdebug_ptype
& 0x1f);
1416 if (0x2 & cmd
[1]) { /* CMDDT bit set */
1417 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 1);
1419 return check_condition_result
;
1420 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
1421 int lu_id_num
, port_group_id
, target_dev_id
, len
;
1423 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1425 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
1426 (devip
->channel
& 0x7f);
1427 if (sdebug_vpd_use_hostno
== 0)
1429 lu_id_num
= have_wlun
? -1 : (((host_no
+ 1) * 2000) +
1430 (devip
->target
* 1000) + devip
->lun
);
1431 target_dev_id
= ((host_no
+ 1) * 2000) +
1432 (devip
->target
* 1000) - 3;
1433 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
1434 if (0 == cmd
[2]) { /* supported vital product data pages */
1435 arr
[1] = cmd
[2]; /*sanity */
1437 arr
[n
++] = 0x0; /* this page */
1438 arr
[n
++] = 0x80; /* unit serial number */
1439 arr
[n
++] = 0x83; /* device identification */
1440 arr
[n
++] = 0x84; /* software interface ident. */
1441 arr
[n
++] = 0x85; /* management network addresses */
1442 arr
[n
++] = 0x86; /* extended inquiry */
1443 arr
[n
++] = 0x87; /* mode page policy */
1444 arr
[n
++] = 0x88; /* SCSI ports */
1445 if (is_disk
) { /* SBC only */
1446 arr
[n
++] = 0x89; /* ATA information */
1447 arr
[n
++] = 0xb0; /* Block limits */
1448 arr
[n
++] = 0xb1; /* Block characteristics */
1449 arr
[n
++] = 0xb2; /* Logical Block Prov */
1451 arr
[3] = n
- 4; /* number of supported VPD pages */
1452 } else if (0x80 == cmd
[2]) { /* unit serial number */
1453 arr
[1] = cmd
[2]; /*sanity */
1455 memcpy(&arr
[4], lu_id_str
, len
);
1456 } else if (0x83 == cmd
[2]) { /* device identification */
1457 arr
[1] = cmd
[2]; /*sanity */
1458 arr
[3] = inquiry_vpd_83(&arr
[4], port_group_id
,
1459 target_dev_id
, lu_id_num
,
1462 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
1463 arr
[1] = cmd
[2]; /*sanity */
1464 arr
[3] = inquiry_vpd_84(&arr
[4]);
1465 } else if (0x85 == cmd
[2]) { /* Management network addresses */
1466 arr
[1] = cmd
[2]; /*sanity */
1467 arr
[3] = inquiry_vpd_85(&arr
[4]);
1468 } else if (0x86 == cmd
[2]) { /* extended inquiry */
1469 arr
[1] = cmd
[2]; /*sanity */
1470 arr
[3] = 0x3c; /* number of following entries */
1471 if (sdebug_dif
== T10_PI_TYPE3_PROTECTION
)
1472 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
1473 else if (have_dif_prot
)
1474 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1476 arr
[4] = 0x0; /* no protection stuff */
1477 arr
[5] = 0x7; /* head of q, ordered + simple q's */
1478 } else if (0x87 == cmd
[2]) { /* mode page policy */
1479 arr
[1] = cmd
[2]; /*sanity */
1480 arr
[3] = 0x8; /* number of following entries */
1481 arr
[4] = 0x2; /* disconnect-reconnect mp */
1482 arr
[6] = 0x80; /* mlus, shared */
1483 arr
[8] = 0x18; /* protocol specific lu */
1484 arr
[10] = 0x82; /* mlus, per initiator port */
1485 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
1486 arr
[1] = cmd
[2]; /*sanity */
1487 arr
[3] = inquiry_vpd_88(&arr
[4], target_dev_id
);
1488 } else if (is_disk
&& 0x89 == cmd
[2]) { /* ATA information */
1489 arr
[1] = cmd
[2]; /*sanity */
1490 n
= inquiry_vpd_89(&arr
[4]);
1491 put_unaligned_be16(n
, arr
+ 2);
1492 } else if (is_disk
&& 0xb0 == cmd
[2]) { /* Block limits */
1493 arr
[1] = cmd
[2]; /*sanity */
1494 arr
[3] = inquiry_vpd_b0(&arr
[4]);
1495 } else if (is_disk
&& 0xb1 == cmd
[2]) { /* Block char. */
1496 arr
[1] = cmd
[2]; /*sanity */
1497 arr
[3] = inquiry_vpd_b1(&arr
[4]);
1498 } else if (is_disk
&& 0xb2 == cmd
[2]) { /* LB Prov. */
1499 arr
[1] = cmd
[2]; /*sanity */
1500 arr
[3] = inquiry_vpd_b2(&arr
[4]);
1502 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
1504 return check_condition_result
;
1506 len
= min(get_unaligned_be16(arr
+ 2) + 4, alloc_len
);
1507 ret
= fill_from_dev_buffer(scp
, arr
,
1508 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
1512 /* drops through here for a standard inquiry */
1513 arr
[1] = sdebug_removable
? 0x80 : 0; /* Removable disk */
1514 arr
[2] = sdebug_scsi_level
;
1515 arr
[3] = 2; /* response_data_format==2 */
1516 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
1517 arr
[5] = (int)have_dif_prot
; /* PROTECT bit */
1518 if (sdebug_vpd_use_hostno
== 0)
1519 arr
[5] |= 0x10; /* claim: implicit TPGS */
1520 arr
[6] = 0x10; /* claim: MultiP */
1521 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1522 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
1523 memcpy(&arr
[8], sdebug_inq_vendor_id
, 8);
1524 memcpy(&arr
[16], sdebug_inq_product_id
, 16);
1525 memcpy(&arr
[32], sdebug_inq_product_rev
, 4);
1526 /* Use Vendor Specific area to place driver date in ASCII hex */
1527 memcpy(&arr
[36], sdebug_version_date
, 8);
1528 /* version descriptors (2 bytes each) follow */
1529 put_unaligned_be16(0xc0, arr
+ 58); /* SAM-6 no version claimed */
1530 put_unaligned_be16(0x5c0, arr
+ 60); /* SPC-5 no version claimed */
1532 if (is_disk
) { /* SBC-4 no version claimed */
1533 put_unaligned_be16(0x600, arr
+ n
);
1535 } else if (sdebug_ptype
== TYPE_TAPE
) { /* SSC-4 rev 3 */
1536 put_unaligned_be16(0x525, arr
+ n
);
1539 put_unaligned_be16(0x2100, arr
+ n
); /* SPL-4 no version claimed */
1540 ret
= fill_from_dev_buffer(scp
, arr
,
1541 min(alloc_len
, SDEBUG_LONG_INQ_SZ
));
1546 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1549 static int resp_requests(struct scsi_cmnd
*scp
,
1550 struct sdebug_dev_info
*devip
)
1552 unsigned char *sbuff
;
1553 unsigned char *cmd
= scp
->cmnd
;
1554 unsigned char arr
[SCSI_SENSE_BUFFERSIZE
];
1558 memset(arr
, 0, sizeof(arr
));
1559 dsense
= !!(cmd
[1] & 1);
1560 sbuff
= scp
->sense_buffer
;
1561 if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
1564 arr
[1] = 0x0; /* NO_SENSE in sense_key */
1565 arr
[2] = THRESHOLD_EXCEEDED
;
1566 arr
[3] = 0xff; /* TEST set and MRIE==6 */
1570 arr
[2] = 0x0; /* NO_SENSE in sense_key */
1571 arr
[7] = 0xa; /* 18 byte sense buffer */
1572 arr
[12] = THRESHOLD_EXCEEDED
;
1573 arr
[13] = 0xff; /* TEST set and MRIE==6 */
1576 memcpy(arr
, sbuff
, SCSI_SENSE_BUFFERSIZE
);
1577 if (arr
[0] >= 0x70 && dsense
== sdebug_dsense
)
1578 ; /* have sense and formats match */
1579 else if (arr
[0] <= 0x70) {
1589 } else if (dsense
) {
1592 arr
[1] = sbuff
[2]; /* sense key */
1593 arr
[2] = sbuff
[12]; /* asc */
1594 arr
[3] = sbuff
[13]; /* ascq */
1606 mk_sense_buffer(scp
, 0, NO_ADDITIONAL_SENSE
, 0);
1607 return fill_from_dev_buffer(scp
, arr
, len
);
1610 static int resp_start_stop(struct scsi_cmnd
*scp
,
1611 struct sdebug_dev_info
*devip
)
1613 unsigned char *cmd
= scp
->cmnd
;
1614 int power_cond
, stop
;
1617 power_cond
= (cmd
[4] & 0xf0) >> 4;
1619 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, 7);
1620 return check_condition_result
;
1622 stop
= !(cmd
[4] & 1);
1623 changing
= atomic_read(&devip
->stopped
) == !stop
;
1624 atomic_xchg(&devip
->stopped
, stop
);
1625 if (!changing
|| cmd
[1] & 0x1) /* state unchanged or IMMED set */
1626 return SDEG_RES_IMMED_MASK
;
1631 static sector_t
get_sdebug_capacity(void)
1633 static const unsigned int gibibyte
= 1073741824;
1635 if (sdebug_virtual_gb
> 0)
1636 return (sector_t
)sdebug_virtual_gb
*
1637 (gibibyte
/ sdebug_sector_size
);
1639 return sdebug_store_sectors
;
1642 #define SDEBUG_READCAP_ARR_SZ 8
1643 static int resp_readcap(struct scsi_cmnd
*scp
,
1644 struct sdebug_dev_info
*devip
)
1646 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
1649 /* following just in case virtual_gb changed */
1650 sdebug_capacity
= get_sdebug_capacity();
1651 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
1652 if (sdebug_capacity
< 0xffffffff) {
1653 capac
= (unsigned int)sdebug_capacity
- 1;
1654 put_unaligned_be32(capac
, arr
+ 0);
1656 put_unaligned_be32(0xffffffff, arr
+ 0);
1657 put_unaligned_be16(sdebug_sector_size
, arr
+ 6);
1658 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
1661 #define SDEBUG_READCAP16_ARR_SZ 32
1662 static int resp_readcap16(struct scsi_cmnd
*scp
,
1663 struct sdebug_dev_info
*devip
)
1665 unsigned char *cmd
= scp
->cmnd
;
1666 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
1669 alloc_len
= get_unaligned_be32(cmd
+ 10);
1670 /* following just in case virtual_gb changed */
1671 sdebug_capacity
= get_sdebug_capacity();
1672 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
1673 put_unaligned_be64((u64
)(sdebug_capacity
- 1), arr
+ 0);
1674 put_unaligned_be32(sdebug_sector_size
, arr
+ 8);
1675 arr
[13] = sdebug_physblk_exp
& 0xf;
1676 arr
[14] = (sdebug_lowest_aligned
>> 8) & 0x3f;
1678 if (scsi_debug_lbp()) {
1679 arr
[14] |= 0x80; /* LBPME */
1680 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1681 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1682 * in the wider field maps to 0 in this field.
1684 if (sdebug_lbprz
& 1) /* precisely what the draft requires */
1688 arr
[15] = sdebug_lowest_aligned
& 0xff;
1690 if (have_dif_prot
) {
1691 arr
[12] = (sdebug_dif
- 1) << 1; /* P_TYPE */
1692 arr
[12] |= 1; /* PROT_EN */
1695 return fill_from_dev_buffer(scp
, arr
,
1696 min(alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
1699 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1701 static int resp_report_tgtpgs(struct scsi_cmnd
*scp
,
1702 struct sdebug_dev_info
*devip
)
1704 unsigned char *cmd
= scp
->cmnd
;
1706 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1707 int n
, ret
, alen
, rlen
;
1708 int port_group_a
, port_group_b
, port_a
, port_b
;
1710 alen
= get_unaligned_be32(cmd
+ 6);
1711 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
1713 return DID_REQUEUE
<< 16;
1715 * EVPD page 0x88 states we have two ports, one
1716 * real and a fake port with no device connected.
1717 * So we create two port groups with one port each
1718 * and set the group with port B to unavailable.
1720 port_a
= 0x1; /* relative port A */
1721 port_b
= 0x2; /* relative port B */
1722 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
1723 (devip
->channel
& 0x7f);
1724 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
1725 (devip
->channel
& 0x7f) + 0x80;
1728 * The asymmetric access state is cycled according to the host_id.
1731 if (sdebug_vpd_use_hostno
== 0) {
1732 arr
[n
++] = host_no
% 3; /* Asymm access state */
1733 arr
[n
++] = 0x0F; /* claim: all states are supported */
1735 arr
[n
++] = 0x0; /* Active/Optimized path */
1736 arr
[n
++] = 0x01; /* only support active/optimized paths */
1738 put_unaligned_be16(port_group_a
, arr
+ n
);
1740 arr
[n
++] = 0; /* Reserved */
1741 arr
[n
++] = 0; /* Status code */
1742 arr
[n
++] = 0; /* Vendor unique */
1743 arr
[n
++] = 0x1; /* One port per group */
1744 arr
[n
++] = 0; /* Reserved */
1745 arr
[n
++] = 0; /* Reserved */
1746 put_unaligned_be16(port_a
, arr
+ n
);
1748 arr
[n
++] = 3; /* Port unavailable */
1749 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
1750 put_unaligned_be16(port_group_b
, arr
+ n
);
1752 arr
[n
++] = 0; /* Reserved */
1753 arr
[n
++] = 0; /* Status code */
1754 arr
[n
++] = 0; /* Vendor unique */
1755 arr
[n
++] = 0x1; /* One port per group */
1756 arr
[n
++] = 0; /* Reserved */
1757 arr
[n
++] = 0; /* Reserved */
1758 put_unaligned_be16(port_b
, arr
+ n
);
1762 put_unaligned_be32(rlen
, arr
+ 0);
1765 * Return the smallest value of either
1766 * - The allocated length
1767 * - The constructed command length
1768 * - The maximum array size
1771 ret
= fill_from_dev_buffer(scp
, arr
,
1772 min(rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
1777 static int resp_rsup_opcodes(struct scsi_cmnd
*scp
,
1778 struct sdebug_dev_info
*devip
)
1781 u8 reporting_opts
, req_opcode
, sdeb_i
, supp
;
1783 u32 alloc_len
, a_len
;
1784 int k
, offset
, len
, errsts
, count
, bump
, na
;
1785 const struct opcode_info_t
*oip
;
1786 const struct opcode_info_t
*r_oip
;
1788 u8
*cmd
= scp
->cmnd
;
1790 rctd
= !!(cmd
[2] & 0x80);
1791 reporting_opts
= cmd
[2] & 0x7;
1792 req_opcode
= cmd
[3];
1793 req_sa
= get_unaligned_be16(cmd
+ 4);
1794 alloc_len
= get_unaligned_be32(cmd
+ 6);
1795 if (alloc_len
< 4 || alloc_len
> 0xffff) {
1796 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
1797 return check_condition_result
;
1799 if (alloc_len
> 8192)
1803 arr
= kzalloc((a_len
< 256) ? 320 : a_len
+ 64, GFP_ATOMIC
);
1805 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
1807 return check_condition_result
;
1809 switch (reporting_opts
) {
1810 case 0: /* all commands */
1811 /* count number of commands */
1812 for (count
= 0, oip
= opcode_info_arr
;
1813 oip
->num_attached
!= 0xff; ++oip
) {
1814 if (F_INV_OP
& oip
->flags
)
1816 count
+= (oip
->num_attached
+ 1);
1818 bump
= rctd
? 20 : 8;
1819 put_unaligned_be32(count
* bump
, arr
);
1820 for (offset
= 4, oip
= opcode_info_arr
;
1821 oip
->num_attached
!= 0xff && offset
< a_len
; ++oip
) {
1822 if (F_INV_OP
& oip
->flags
)
1824 na
= oip
->num_attached
;
1825 arr
[offset
] = oip
->opcode
;
1826 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
1828 arr
[offset
+ 5] |= 0x2;
1829 if (FF_SA
& oip
->flags
)
1830 arr
[offset
+ 5] |= 0x1;
1831 put_unaligned_be16(oip
->len_mask
[0], arr
+ offset
+ 6);
1833 put_unaligned_be16(0xa, arr
+ offset
+ 8);
1835 for (k
= 0, oip
= oip
->arrp
; k
< na
; ++k
, ++oip
) {
1836 if (F_INV_OP
& oip
->flags
)
1839 arr
[offset
] = oip
->opcode
;
1840 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
1842 arr
[offset
+ 5] |= 0x2;
1843 if (FF_SA
& oip
->flags
)
1844 arr
[offset
+ 5] |= 0x1;
1845 put_unaligned_be16(oip
->len_mask
[0],
1848 put_unaligned_be16(0xa,
1855 case 1: /* one command: opcode only */
1856 case 2: /* one command: opcode plus service action */
1857 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1858 sdeb_i
= opcode_ind_arr
[req_opcode
];
1859 oip
= &opcode_info_arr
[sdeb_i
];
1860 if (F_INV_OP
& oip
->flags
) {
1864 if (1 == reporting_opts
) {
1865 if (FF_SA
& oip
->flags
) {
1866 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
,
1869 return check_condition_result
;
1872 } else if (2 == reporting_opts
&&
1873 0 == (FF_SA
& oip
->flags
)) {
1874 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, -1);
1875 kfree(arr
); /* point at requested sa */
1876 return check_condition_result
;
1878 if (0 == (FF_SA
& oip
->flags
) &&
1879 req_opcode
== oip
->opcode
)
1881 else if (0 == (FF_SA
& oip
->flags
)) {
1882 na
= oip
->num_attached
;
1883 for (k
= 0, oip
= oip
->arrp
; k
< na
;
1885 if (req_opcode
== oip
->opcode
)
1888 supp
= (k
>= na
) ? 1 : 3;
1889 } else if (req_sa
!= oip
->sa
) {
1890 na
= oip
->num_attached
;
1891 for (k
= 0, oip
= oip
->arrp
; k
< na
;
1893 if (req_sa
== oip
->sa
)
1896 supp
= (k
>= na
) ? 1 : 3;
1900 u
= oip
->len_mask
[0];
1901 put_unaligned_be16(u
, arr
+ 2);
1902 arr
[4] = oip
->opcode
;
1903 for (k
= 1; k
< u
; ++k
)
1904 arr
[4 + k
] = (k
< 16) ?
1905 oip
->len_mask
[k
] : 0xff;
1910 arr
[1] = (rctd
? 0x80 : 0) | supp
;
1912 put_unaligned_be16(0xa, arr
+ offset
);
1917 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 2);
1919 return check_condition_result
;
1921 offset
= (offset
< a_len
) ? offset
: a_len
;
1922 len
= (offset
< alloc_len
) ? offset
: alloc_len
;
1923 errsts
= fill_from_dev_buffer(scp
, arr
, len
);
1928 static int resp_rsup_tmfs(struct scsi_cmnd
*scp
,
1929 struct sdebug_dev_info
*devip
)
1934 u8
*cmd
= scp
->cmnd
;
1936 memset(arr
, 0, sizeof(arr
));
1937 repd
= !!(cmd
[2] & 0x80);
1938 alloc_len
= get_unaligned_be32(cmd
+ 6);
1939 if (alloc_len
< 4) {
1940 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
1941 return check_condition_result
;
1943 arr
[0] = 0xc8; /* ATS | ATSS | LURS */
1944 arr
[1] = 0x1; /* ITNRS */
1951 len
= (len
< alloc_len
) ? len
: alloc_len
;
1952 return fill_from_dev_buffer(scp
, arr
, len
);
1955 /* <<Following mode page info copied from ST318451LW>> */
1957 static int resp_err_recov_pg(unsigned char *p
, int pcontrol
, int target
)
1958 { /* Read-Write Error Recovery page for mode_sense */
1959 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1962 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
1964 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
1965 return sizeof(err_recov_pg
);
1968 static int resp_disconnect_pg(unsigned char *p
, int pcontrol
, int target
)
1969 { /* Disconnect-Reconnect page for mode_sense */
1970 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1971 0, 0, 0, 0, 0, 0, 0, 0};
1973 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
1975 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
1976 return sizeof(disconnect_pg
);
1979 static int resp_format_pg(unsigned char *p
, int pcontrol
, int target
)
1980 { /* Format device page for mode_sense */
1981 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1982 0, 0, 0, 0, 0, 0, 0, 0,
1983 0, 0, 0, 0, 0x40, 0, 0, 0};
1985 memcpy(p
, format_pg
, sizeof(format_pg
));
1986 put_unaligned_be16(sdebug_sectors_per
, p
+ 10);
1987 put_unaligned_be16(sdebug_sector_size
, p
+ 12);
1988 if (sdebug_removable
)
1989 p
[20] |= 0x20; /* should agree with INQUIRY */
1991 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
1992 return sizeof(format_pg
);
1995 static unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1996 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1999 static int resp_caching_pg(unsigned char *p
, int pcontrol
, int target
)
2000 { /* Caching page for mode_sense */
2001 unsigned char ch_caching_pg
[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2002 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2003 unsigned char d_caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2004 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2006 if (SDEBUG_OPT_N_WCE
& sdebug_opts
)
2007 caching_pg
[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2008 memcpy(p
, caching_pg
, sizeof(caching_pg
));
2010 memcpy(p
+ 2, ch_caching_pg
, sizeof(ch_caching_pg
));
2011 else if (2 == pcontrol
)
2012 memcpy(p
, d_caching_pg
, sizeof(d_caching_pg
));
2013 return sizeof(caching_pg
);
2016 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2019 static int resp_ctrl_m_pg(unsigned char *p
, int pcontrol
, int target
)
2020 { /* Control mode page for mode_sense */
2021 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2023 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2027 ctrl_m_pg
[2] |= 0x4;
2029 ctrl_m_pg
[2] &= ~0x4;
2032 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
2034 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
2036 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
2037 else if (2 == pcontrol
)
2038 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
2039 return sizeof(ctrl_m_pg
);
2043 static int resp_iec_m_pg(unsigned char *p
, int pcontrol
, int target
)
2044 { /* Informational Exceptions control mode page for mode_sense */
2045 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2047 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2050 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
2052 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
2053 else if (2 == pcontrol
)
2054 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
2055 return sizeof(iec_m_pg
);
2058 static int resp_sas_sf_m_pg(unsigned char *p
, int pcontrol
, int target
)
2059 { /* SAS SSP mode page - short format for mode_sense */
2060 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
2061 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2063 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
2065 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
2066 return sizeof(sas_sf_m_pg
);
2070 static int resp_sas_pcd_m_spg(unsigned char *p
, int pcontrol
, int target
,
2072 { /* SAS phy control and discover mode page for mode_sense */
2073 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2074 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2075 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2076 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2077 0x2, 0, 0, 0, 0, 0, 0, 0,
2078 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2079 0, 0, 0, 0, 0, 0, 0, 0,
2080 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2081 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2082 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2083 0x3, 0, 0, 0, 0, 0, 0, 0,
2084 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2085 0, 0, 0, 0, 0, 0, 0, 0,
2089 put_unaligned_be64(naa3_comp_a
, sas_pcd_m_pg
+ 16);
2090 put_unaligned_be64(naa3_comp_c
+ 1, sas_pcd_m_pg
+ 24);
2091 put_unaligned_be64(naa3_comp_a
, sas_pcd_m_pg
+ 64);
2092 put_unaligned_be64(naa3_comp_c
+ 1, sas_pcd_m_pg
+ 72);
2093 port_a
= target_dev_id
+ 1;
2094 port_b
= port_a
+ 1;
2095 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
2096 put_unaligned_be32(port_a
, p
+ 20);
2097 put_unaligned_be32(port_b
, p
+ 48 + 20);
2099 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
2100 return sizeof(sas_pcd_m_pg
);
2103 static int resp_sas_sha_m_spg(unsigned char *p
, int pcontrol
)
2104 { /* SAS SSP shared protocol specific port mode subpage */
2105 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2106 0, 0, 0, 0, 0, 0, 0, 0,
2109 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
2111 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
2112 return sizeof(sas_sha_m_pg
);
2115 #define SDEBUG_MAX_MSENSE_SZ 256
2117 static int resp_mode_sense(struct scsi_cmnd
*scp
,
2118 struct sdebug_dev_info
*devip
)
2120 int pcontrol
, pcode
, subpcode
, bd_len
;
2121 unsigned char dev_spec
;
2122 int alloc_len
, offset
, len
, target_dev_id
;
2123 int target
= scp
->device
->id
;
2125 unsigned char arr
[SDEBUG_MAX_MSENSE_SZ
];
2126 unsigned char *cmd
= scp
->cmnd
;
2127 bool dbd
, llbaa
, msense_6
, is_disk
, bad_pcode
;
2129 dbd
= !!(cmd
[1] & 0x8); /* disable block descriptors */
2130 pcontrol
= (cmd
[2] & 0xc0) >> 6;
2131 pcode
= cmd
[2] & 0x3f;
2133 msense_6
= (MODE_SENSE
== cmd
[0]);
2134 llbaa
= msense_6
? false : !!(cmd
[1] & 0x10);
2135 is_disk
= (sdebug_ptype
== TYPE_DISK
);
2136 if (is_disk
&& !dbd
)
2137 bd_len
= llbaa
? 16 : 8;
2140 alloc_len
= msense_6
? cmd
[4] : get_unaligned_be16(cmd
+ 7);
2141 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
2142 if (0x3 == pcontrol
) { /* Saving values not supported */
2143 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
, 0);
2144 return check_condition_result
;
2146 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
2147 (devip
->target
* 1000) - 3;
2148 /* for disks set DPOFUA bit and clear write protect (WP) bit */
2150 dev_spec
= 0x10; /* =0x90 if WP=1 implies read-only */
2160 arr
[4] = 0x1; /* set LONGLBA bit */
2161 arr
[7] = bd_len
; /* assume 255 or less */
2165 if ((bd_len
> 0) && (!sdebug_capacity
))
2166 sdebug_capacity
= get_sdebug_capacity();
2169 if (sdebug_capacity
> 0xfffffffe)
2170 put_unaligned_be32(0xffffffff, ap
+ 0);
2172 put_unaligned_be32(sdebug_capacity
, ap
+ 0);
2173 put_unaligned_be16(sdebug_sector_size
, ap
+ 6);
2176 } else if (16 == bd_len
) {
2177 put_unaligned_be64((u64
)sdebug_capacity
, ap
+ 0);
2178 put_unaligned_be32(sdebug_sector_size
, ap
+ 12);
2183 if ((subpcode
> 0x0) && (subpcode
< 0xff) && (0x19 != pcode
)) {
2184 /* TODO: Control Extension page */
2185 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2186 return check_condition_result
;
2191 case 0x1: /* Read-Write error recovery page, direct access */
2192 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2195 case 0x2: /* Disconnect-Reconnect page, all devices */
2196 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
2199 case 0x3: /* Format device page, direct access */
2201 len
= resp_format_pg(ap
, pcontrol
, target
);
2206 case 0x8: /* Caching page, direct access */
2208 len
= resp_caching_pg(ap
, pcontrol
, target
);
2213 case 0xa: /* Control Mode page, all devices */
2214 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
2217 case 0x19: /* if spc==1 then sas phy, control+discover */
2218 if ((subpcode
> 0x2) && (subpcode
< 0xff)) {
2219 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2220 return check_condition_result
;
2223 if ((0x0 == subpcode
) || (0xff == subpcode
))
2224 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2225 if ((0x1 == subpcode
) || (0xff == subpcode
))
2226 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
2228 if ((0x2 == subpcode
) || (0xff == subpcode
))
2229 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2232 case 0x1c: /* Informational Exceptions Mode page, all devices */
2233 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
2236 case 0x3f: /* Read all Mode pages */
2237 if ((0 == subpcode
) || (0xff == subpcode
)) {
2238 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2239 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
2241 len
+= resp_format_pg(ap
+ len
, pcontrol
,
2243 len
+= resp_caching_pg(ap
+ len
, pcontrol
,
2246 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
2247 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2248 if (0xff == subpcode
) {
2249 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
,
2250 target
, target_dev_id
);
2251 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2253 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
2256 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2257 return check_condition_result
;
2265 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2266 return check_condition_result
;
2269 arr
[0] = offset
- 1;
2271 put_unaligned_be16((offset
- 2), arr
+ 0);
2272 return fill_from_dev_buffer(scp
, arr
, min(alloc_len
, offset
));
2275 #define SDEBUG_MAX_MSELECT_SZ 512
2277 static int resp_mode_select(struct scsi_cmnd
*scp
,
2278 struct sdebug_dev_info
*devip
)
2280 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
2281 int param_len
, res
, mpage
;
2282 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
2283 unsigned char *cmd
= scp
->cmnd
;
2284 int mselect6
= (MODE_SELECT
== cmd
[0]);
2286 memset(arr
, 0, sizeof(arr
));
2289 param_len
= mselect6
? cmd
[4] : get_unaligned_be16(cmd
+ 7);
2290 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
2291 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, mselect6
? 4 : 7, -1);
2292 return check_condition_result
;
2294 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
2296 return DID_ERROR
<< 16;
2297 else if (sdebug_verbose
&& (res
< param_len
))
2298 sdev_printk(KERN_INFO
, scp
->device
,
2299 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2300 __func__
, param_len
, res
);
2301 md_len
= mselect6
? (arr
[0] + 1) : (get_unaligned_be16(arr
+ 0) + 2);
2302 bd_len
= mselect6
? arr
[3] : get_unaligned_be16(arr
+ 6);
2304 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, 0, -1);
2305 return check_condition_result
;
2307 off
= bd_len
+ (mselect6
? 4 : 8);
2308 mpage
= arr
[off
] & 0x3f;
2309 ps
= !!(arr
[off
] & 0x80);
2311 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 7);
2312 return check_condition_result
;
2314 spf
= !!(arr
[off
] & 0x40);
2315 pg_len
= spf
? (get_unaligned_be16(arr
+ off
+ 2) + 4) :
2317 if ((pg_len
+ off
) > param_len
) {
2318 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2319 PARAMETER_LIST_LENGTH_ERR
, 0);
2320 return check_condition_result
;
2323 case 0x8: /* Caching Mode page */
2324 if (caching_pg
[1] == arr
[off
+ 1]) {
2325 memcpy(caching_pg
+ 2, arr
+ off
+ 2,
2326 sizeof(caching_pg
) - 2);
2327 goto set_mode_changed_ua
;
2330 case 0xa: /* Control Mode page */
2331 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
2332 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
2333 sizeof(ctrl_m_pg
) - 2);
2334 sdebug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
2335 goto set_mode_changed_ua
;
2338 case 0x1c: /* Informational Exceptions Mode page */
2339 if (iec_m_pg
[1] == arr
[off
+ 1]) {
2340 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
2341 sizeof(iec_m_pg
) - 2);
2342 goto set_mode_changed_ua
;
2348 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 5);
2349 return check_condition_result
;
2350 set_mode_changed_ua
:
2351 set_bit(SDEBUG_UA_MODE_CHANGED
, devip
->uas_bm
);
2355 static int resp_temp_l_pg(unsigned char *arr
)
2357 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2358 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2361 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
2362 return sizeof(temp_l_pg
);
2365 static int resp_ie_l_pg(unsigned char *arr
)
2367 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2370 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
2371 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
2372 arr
[4] = THRESHOLD_EXCEEDED
;
2375 return sizeof(ie_l_pg
);
2378 #define SDEBUG_MAX_LSENSE_SZ 512
2380 static int resp_log_sense(struct scsi_cmnd
*scp
,
2381 struct sdebug_dev_info
*devip
)
2383 int ppc
, sp
, pcode
, subpcode
, alloc_len
, len
, n
;
2384 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
2385 unsigned char *cmd
= scp
->cmnd
;
2387 memset(arr
, 0, sizeof(arr
));
2391 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, ppc
? 1 : 0);
2392 return check_condition_result
;
2394 pcode
= cmd
[2] & 0x3f;
2395 subpcode
= cmd
[3] & 0xff;
2396 alloc_len
= get_unaligned_be16(cmd
+ 7);
2398 if (0 == subpcode
) {
2400 case 0x0: /* Supported log pages log page */
2402 arr
[n
++] = 0x0; /* this page */
2403 arr
[n
++] = 0xd; /* Temperature */
2404 arr
[n
++] = 0x2f; /* Informational exceptions */
2407 case 0xd: /* Temperature log page */
2408 arr
[3] = resp_temp_l_pg(arr
+ 4);
2410 case 0x2f: /* Informational exceptions log page */
2411 arr
[3] = resp_ie_l_pg(arr
+ 4);
2414 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2415 return check_condition_result
;
2417 } else if (0xff == subpcode
) {
2421 case 0x0: /* Supported log pages and subpages log page */
2424 arr
[n
++] = 0x0; /* 0,0 page */
2426 arr
[n
++] = 0xff; /* this page */
2428 arr
[n
++] = 0x0; /* Temperature */
2430 arr
[n
++] = 0x0; /* Informational exceptions */
2433 case 0xd: /* Temperature subpages */
2436 arr
[n
++] = 0x0; /* Temperature */
2439 case 0x2f: /* Informational exceptions subpages */
2442 arr
[n
++] = 0x0; /* Informational exceptions */
2446 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2447 return check_condition_result
;
2450 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2451 return check_condition_result
;
2453 len
= min(get_unaligned_be16(arr
+ 2) + 4, alloc_len
);
2454 return fill_from_dev_buffer(scp
, arr
,
2455 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
2458 static int check_device_access_params(struct scsi_cmnd
*scp
,
2459 unsigned long long lba
, unsigned int num
)
2461 if (lba
+ num
> sdebug_capacity
) {
2462 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2463 return check_condition_result
;
2465 /* transfer length excessive (tie in to block limits VPD page) */
2466 if (num
> sdebug_store_sectors
) {
2467 /* needs work to find which cdb byte 'num' comes from */
2468 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2469 return check_condition_result
;
2474 /* Returns number of bytes copied or -1 if error. */
2475 static int do_device_access(struct scsi_cmnd
*scmd
, u32 sg_skip
, u64 lba
,
2476 u32 num
, bool do_write
)
2479 u64 block
, rest
= 0;
2480 struct scsi_data_buffer
*sdb
;
2481 enum dma_data_direction dir
;
2484 sdb
= scsi_out(scmd
);
2485 dir
= DMA_TO_DEVICE
;
2486 write_since_sync
= true;
2488 sdb
= scsi_in(scmd
);
2489 dir
= DMA_FROM_DEVICE
;
2494 if (!(scsi_bidi_cmnd(scmd
) || scmd
->sc_data_direction
== dir
))
2497 block
= do_div(lba
, sdebug_store_sectors
);
2498 if (block
+ num
> sdebug_store_sectors
)
2499 rest
= block
+ num
- sdebug_store_sectors
;
2501 ret
= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
2502 fake_storep
+ (block
* sdebug_sector_size
),
2503 (num
- rest
) * sdebug_sector_size
, sg_skip
, do_write
);
2504 if (ret
!= (num
- rest
) * sdebug_sector_size
)
2508 ret
+= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
2509 fake_storep
, rest
* sdebug_sector_size
,
2510 sg_skip
+ ((num
- rest
) * sdebug_sector_size
),
2517 /* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
2518 * arr into lba2fake_store(lba,num) and return true. If comparison fails then
2520 static bool comp_write_worker(u64 lba
, u32 num
, const u8
*arr
)
2523 u64 block
, rest
= 0;
2524 u32 store_blks
= sdebug_store_sectors
;
2525 u32 lb_size
= sdebug_sector_size
;
2527 block
= do_div(lba
, store_blks
);
2528 if (block
+ num
> store_blks
)
2529 rest
= block
+ num
- store_blks
;
2531 res
= !memcmp(fake_storep
+ (block
* lb_size
), arr
,
2532 (num
- rest
) * lb_size
);
2536 res
= memcmp(fake_storep
, arr
+ ((num
- rest
) * lb_size
),
2540 arr
+= num
* lb_size
;
2541 memcpy(fake_storep
+ (block
* lb_size
), arr
, (num
- rest
) * lb_size
);
2543 memcpy(fake_storep
, arr
+ ((num
- rest
) * lb_size
),
2548 static __be16
dif_compute_csum(const void *buf
, int len
)
2553 csum
= (__force __be16
)ip_compute_csum(buf
, len
);
2555 csum
= cpu_to_be16(crc_t10dif(buf
, len
));
2560 static int dif_verify(struct t10_pi_tuple
*sdt
, const void *data
,
2561 sector_t sector
, u32 ei_lba
)
2563 __be16 csum
= dif_compute_csum(data
, sdebug_sector_size
);
2565 if (sdt
->guard_tag
!= csum
) {
2566 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2567 (unsigned long)sector
,
2568 be16_to_cpu(sdt
->guard_tag
),
2572 if (sdebug_dif
== T10_PI_TYPE1_PROTECTION
&&
2573 be32_to_cpu(sdt
->ref_tag
) != (sector
& 0xffffffff)) {
2574 pr_err("REF check failed on sector %lu\n",
2575 (unsigned long)sector
);
2578 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
2579 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
2580 pr_err("REF check failed on sector %lu\n",
2581 (unsigned long)sector
);
2587 static void dif_copy_prot(struct scsi_cmnd
*SCpnt
, sector_t sector
,
2588 unsigned int sectors
, bool read
)
2592 const void *dif_store_end
= dif_storep
+ sdebug_store_sectors
;
2593 struct sg_mapping_iter miter
;
2595 /* Bytes of protection data to copy into sgl */
2596 resid
= sectors
* sizeof(*dif_storep
);
2598 sg_miter_start(&miter
, scsi_prot_sglist(SCpnt
),
2599 scsi_prot_sg_count(SCpnt
), SG_MITER_ATOMIC
|
2600 (read
? SG_MITER_TO_SG
: SG_MITER_FROM_SG
));
2602 while (sg_miter_next(&miter
) && resid
> 0) {
2603 size_t len
= min(miter
.length
, resid
);
2604 void *start
= dif_store(sector
);
2607 if (dif_store_end
< start
+ len
)
2608 rest
= start
+ len
- dif_store_end
;
2613 memcpy(paddr
, start
, len
- rest
);
2615 memcpy(start
, paddr
, len
- rest
);
2619 memcpy(paddr
+ len
- rest
, dif_storep
, rest
);
2621 memcpy(dif_storep
, paddr
+ len
- rest
, rest
);
2624 sector
+= len
/ sizeof(*dif_storep
);
2627 sg_miter_stop(&miter
);
2630 static int prot_verify_read(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
2631 unsigned int sectors
, u32 ei_lba
)
2634 struct t10_pi_tuple
*sdt
;
2637 for (i
= 0; i
< sectors
; i
++, ei_lba
++) {
2640 sector
= start_sec
+ i
;
2641 sdt
= dif_store(sector
);
2643 if (sdt
->app_tag
== cpu_to_be16(0xffff))
2646 ret
= dif_verify(sdt
, lba2fake_store(sector
), sector
, ei_lba
);
2653 dif_copy_prot(SCpnt
, start_sec
, sectors
, true);
2659 static int resp_read_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2661 u8
*cmd
= scp
->cmnd
;
2662 struct sdebug_queued_cmd
*sqcp
;
2666 unsigned long iflags
;
2673 lba
= get_unaligned_be64(cmd
+ 2);
2674 num
= get_unaligned_be32(cmd
+ 10);
2679 lba
= get_unaligned_be32(cmd
+ 2);
2680 num
= get_unaligned_be16(cmd
+ 7);
2685 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
2686 (u32
)(cmd
[1] & 0x1f) << 16;
2687 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
2692 lba
= get_unaligned_be32(cmd
+ 2);
2693 num
= get_unaligned_be32(cmd
+ 6);
2696 case XDWRITEREAD_10
:
2698 lba
= get_unaligned_be32(cmd
+ 2);
2699 num
= get_unaligned_be16(cmd
+ 7);
2702 default: /* assume READ(32) */
2703 lba
= get_unaligned_be64(cmd
+ 12);
2704 ei_lba
= get_unaligned_be32(cmd
+ 20);
2705 num
= get_unaligned_be32(cmd
+ 28);
2709 if (unlikely(have_dif_prot
&& check_prot
)) {
2710 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
2712 mk_sense_invalid_opcode(scp
);
2713 return check_condition_result
;
2715 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
2716 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
2717 (cmd
[1] & 0xe0) == 0)
2718 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected RD "
2721 if (unlikely(sdebug_any_injecting_opt
)) {
2722 sqcp
= (struct sdebug_queued_cmd
*)scp
->host_scribble
;
2725 if (sqcp
->inj_short
)
2731 /* inline check_device_access_params() */
2732 if (unlikely(lba
+ num
> sdebug_capacity
)) {
2733 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2734 return check_condition_result
;
2736 /* transfer length excessive (tie in to block limits VPD page) */
2737 if (unlikely(num
> sdebug_store_sectors
)) {
2738 /* needs work to find which cdb byte 'num' comes from */
2739 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2740 return check_condition_result
;
2743 if (unlikely((SDEBUG_OPT_MEDIUM_ERR
& sdebug_opts
) &&
2744 (lba
<= (sdebug_medium_error_start
+ sdebug_medium_error_count
- 1)) &&
2745 ((lba
+ num
) > sdebug_medium_error_start
))) {
2746 /* claim unrecoverable read error */
2747 mk_sense_buffer(scp
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
, 0);
2748 /* set info field and valid bit for fixed descriptor */
2749 if (0x70 == (scp
->sense_buffer
[0] & 0x7f)) {
2750 scp
->sense_buffer
[0] |= 0x80; /* Valid bit */
2751 ret
= (lba
< OPT_MEDIUM_ERR_ADDR
)
2752 ? OPT_MEDIUM_ERR_ADDR
: (int)lba
;
2753 put_unaligned_be32(ret
, scp
->sense_buffer
+ 3);
2755 scsi_set_resid(scp
, scsi_bufflen(scp
));
2756 return check_condition_result
;
2759 read_lock_irqsave(&atomic_rw
, iflags
);
2762 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
2763 int prot_ret
= prot_verify_read(scp
, lba
, num
, ei_lba
);
2766 read_unlock_irqrestore(&atomic_rw
, iflags
);
2767 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, prot_ret
);
2768 return illegal_condition_result
;
2772 ret
= do_device_access(scp
, 0, lba
, num
, false);
2773 read_unlock_irqrestore(&atomic_rw
, iflags
);
2774 if (unlikely(ret
== -1))
2775 return DID_ERROR
<< 16;
2777 scsi_in(scp
)->resid
= scsi_bufflen(scp
) - ret
;
2779 if (unlikely(sqcp
)) {
2780 if (sqcp
->inj_recovered
) {
2781 mk_sense_buffer(scp
, RECOVERED_ERROR
,
2782 THRESHOLD_EXCEEDED
, 0);
2783 return check_condition_result
;
2784 } else if (sqcp
->inj_transport
) {
2785 mk_sense_buffer(scp
, ABORTED_COMMAND
,
2786 TRANSPORT_PROBLEM
, ACK_NAK_TO
);
2787 return check_condition_result
;
2788 } else if (sqcp
->inj_dif
) {
2789 /* Logical block guard check failed */
2790 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
2791 return illegal_condition_result
;
2792 } else if (sqcp
->inj_dix
) {
2793 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
2794 return illegal_condition_result
;
2800 static void dump_sector(unsigned char *buf
, int len
)
2804 pr_err(">>> Sector Dump <<<\n");
2805 for (i
= 0 ; i
< len
; i
+= 16) {
2808 for (j
= 0, n
= 0; j
< 16; j
++) {
2809 unsigned char c
= buf
[i
+j
];
2811 if (c
>= 0x20 && c
< 0x7e)
2812 n
+= scnprintf(b
+ n
, sizeof(b
) - n
,
2815 n
+= scnprintf(b
+ n
, sizeof(b
) - n
,
2818 pr_err("%04d: %s\n", i
, b
);
2822 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
2823 unsigned int sectors
, u32 ei_lba
)
2826 struct t10_pi_tuple
*sdt
;
2828 sector_t sector
= start_sec
;
2831 struct sg_mapping_iter diter
;
2832 struct sg_mapping_iter piter
;
2834 BUG_ON(scsi_sg_count(SCpnt
) == 0);
2835 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
2837 sg_miter_start(&piter
, scsi_prot_sglist(SCpnt
),
2838 scsi_prot_sg_count(SCpnt
),
2839 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
2840 sg_miter_start(&diter
, scsi_sglist(SCpnt
), scsi_sg_count(SCpnt
),
2841 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
2843 /* For each protection page */
2844 while (sg_miter_next(&piter
)) {
2846 if (WARN_ON(!sg_miter_next(&diter
))) {
2851 for (ppage_offset
= 0; ppage_offset
< piter
.length
;
2852 ppage_offset
+= sizeof(struct t10_pi_tuple
)) {
2853 /* If we're at the end of the current
2854 * data page advance to the next one
2856 if (dpage_offset
>= diter
.length
) {
2857 if (WARN_ON(!sg_miter_next(&diter
))) {
2864 sdt
= piter
.addr
+ ppage_offset
;
2865 daddr
= diter
.addr
+ dpage_offset
;
2867 ret
= dif_verify(sdt
, daddr
, sector
, ei_lba
);
2869 dump_sector(daddr
, sdebug_sector_size
);
2875 dpage_offset
+= sdebug_sector_size
;
2877 diter
.consumed
= dpage_offset
;
2878 sg_miter_stop(&diter
);
2880 sg_miter_stop(&piter
);
2882 dif_copy_prot(SCpnt
, start_sec
, sectors
, false);
2889 sg_miter_stop(&diter
);
2890 sg_miter_stop(&piter
);
2894 static unsigned long lba_to_map_index(sector_t lba
)
2896 if (sdebug_unmap_alignment
)
2897 lba
+= sdebug_unmap_granularity
- sdebug_unmap_alignment
;
2898 sector_div(lba
, sdebug_unmap_granularity
);
2902 static sector_t
map_index_to_lba(unsigned long index
)
2904 sector_t lba
= index
* sdebug_unmap_granularity
;
2906 if (sdebug_unmap_alignment
)
2907 lba
-= sdebug_unmap_granularity
- sdebug_unmap_alignment
;
2911 static unsigned int map_state(sector_t lba
, unsigned int *num
)
2914 unsigned int mapped
;
2915 unsigned long index
;
2918 index
= lba_to_map_index(lba
);
2919 mapped
= test_bit(index
, map_storep
);
2922 next
= find_next_zero_bit(map_storep
, map_size
, index
);
2924 next
= find_next_bit(map_storep
, map_size
, index
);
2926 end
= min_t(sector_t
, sdebug_store_sectors
, map_index_to_lba(next
));
2931 static void map_region(sector_t lba
, unsigned int len
)
2933 sector_t end
= lba
+ len
;
2936 unsigned long index
= lba_to_map_index(lba
);
2938 if (index
< map_size
)
2939 set_bit(index
, map_storep
);
2941 lba
= map_index_to_lba(index
+ 1);
2945 static void unmap_region(sector_t lba
, unsigned int len
)
2947 sector_t end
= lba
+ len
;
2950 unsigned long index
= lba_to_map_index(lba
);
2952 if (lba
== map_index_to_lba(index
) &&
2953 lba
+ sdebug_unmap_granularity
<= end
&&
2955 clear_bit(index
, map_storep
);
2956 if (sdebug_lbprz
) { /* for LBPRZ=2 return 0xff_s */
2957 memset(fake_storep
+
2958 lba
* sdebug_sector_size
,
2959 (sdebug_lbprz
& 1) ? 0 : 0xff,
2960 sdebug_sector_size
*
2961 sdebug_unmap_granularity
);
2964 memset(dif_storep
+ lba
, 0xff,
2965 sizeof(*dif_storep
) *
2966 sdebug_unmap_granularity
);
2969 lba
= map_index_to_lba(index
+ 1);
2973 static int resp_write_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2975 u8
*cmd
= scp
->cmnd
;
2979 unsigned long iflags
;
2986 lba
= get_unaligned_be64(cmd
+ 2);
2987 num
= get_unaligned_be32(cmd
+ 10);
2992 lba
= get_unaligned_be32(cmd
+ 2);
2993 num
= get_unaligned_be16(cmd
+ 7);
2998 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
2999 (u32
)(cmd
[1] & 0x1f) << 16;
3000 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
3005 lba
= get_unaligned_be32(cmd
+ 2);
3006 num
= get_unaligned_be32(cmd
+ 6);
3009 case 0x53: /* XDWRITEREAD(10) */
3011 lba
= get_unaligned_be32(cmd
+ 2);
3012 num
= get_unaligned_be16(cmd
+ 7);
3015 default: /* assume WRITE(32) */
3016 lba
= get_unaligned_be64(cmd
+ 12);
3017 ei_lba
= get_unaligned_be32(cmd
+ 20);
3018 num
= get_unaligned_be32(cmd
+ 28);
3022 if (unlikely(have_dif_prot
&& check_prot
)) {
3023 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
3025 mk_sense_invalid_opcode(scp
);
3026 return check_condition_result
;
3028 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
3029 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
3030 (cmd
[1] & 0xe0) == 0)
3031 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
3035 /* inline check_device_access_params() */
3036 if (unlikely(lba
+ num
> sdebug_capacity
)) {
3037 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
3038 return check_condition_result
;
3040 /* transfer length excessive (tie in to block limits VPD page) */
3041 if (unlikely(num
> sdebug_store_sectors
)) {
3042 /* needs work to find which cdb byte 'num' comes from */
3043 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
3044 return check_condition_result
;
3047 write_lock_irqsave(&atomic_rw
, iflags
);
3050 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
3051 int prot_ret
= prot_verify_write(scp
, lba
, num
, ei_lba
);
3054 write_unlock_irqrestore(&atomic_rw
, iflags
);
3055 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, prot_ret
);
3056 return illegal_condition_result
;
3060 ret
= do_device_access(scp
, 0, lba
, num
, true);
3061 if (unlikely(scsi_debug_lbp()))
3062 map_region(lba
, num
);
3063 write_unlock_irqrestore(&atomic_rw
, iflags
);
3064 if (unlikely(-1 == ret
))
3065 return DID_ERROR
<< 16;
3066 else if (unlikely(sdebug_verbose
&&
3067 (ret
< (num
* sdebug_sector_size
))))
3068 sdev_printk(KERN_INFO
, scp
->device
,
3069 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3070 my_name
, num
* sdebug_sector_size
, ret
);
3072 if (unlikely(sdebug_any_injecting_opt
)) {
3073 struct sdebug_queued_cmd
*sqcp
=
3074 (struct sdebug_queued_cmd
*)scp
->host_scribble
;
3077 if (sqcp
->inj_recovered
) {
3078 mk_sense_buffer(scp
, RECOVERED_ERROR
,
3079 THRESHOLD_EXCEEDED
, 0);
3080 return check_condition_result
;
3081 } else if (sqcp
->inj_dif
) {
3082 /* Logical block guard check failed */
3083 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
3084 return illegal_condition_result
;
3085 } else if (sqcp
->inj_dix
) {
3086 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
3087 return illegal_condition_result
;
3095 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3096 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3098 static int resp_write_scat(struct scsi_cmnd
*scp
,
3099 struct sdebug_dev_info
*devip
)
3101 u8
*cmd
= scp
->cmnd
;
3105 u16 lbdof
, num_lrd
, k
;
3106 u32 num
, num_by
, bt_len
, lbdof_blen
, sg_off
, cum_lb
;
3107 u32 lb_size
= sdebug_sector_size
;
3110 unsigned long iflags
;
3113 static const u32 lrd_size
= 32; /* + parameter list header size */
3115 if (cmd
[0] == VARIABLE_LENGTH_CMD
) {
3117 wrprotect
= (cmd
[10] >> 5) & 0x7;
3118 lbdof
= get_unaligned_be16(cmd
+ 12);
3119 num_lrd
= get_unaligned_be16(cmd
+ 16);
3120 bt_len
= get_unaligned_be32(cmd
+ 28);
3121 } else { /* that leaves WRITE SCATTERED(16) */
3123 wrprotect
= (cmd
[2] >> 5) & 0x7;
3124 lbdof
= get_unaligned_be16(cmd
+ 4);
3125 num_lrd
= get_unaligned_be16(cmd
+ 8);
3126 bt_len
= get_unaligned_be32(cmd
+ 10);
3127 if (unlikely(have_dif_prot
)) {
3128 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
3130 mk_sense_invalid_opcode(scp
);
3131 return illegal_condition_result
;
3133 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
3134 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
3136 sdev_printk(KERN_ERR
, scp
->device
,
3137 "Unprotected WR to DIF device\n");
3140 if ((num_lrd
== 0) || (bt_len
== 0))
3141 return 0; /* T10 says these do-nothings are not errors */
3144 sdev_printk(KERN_INFO
, scp
->device
,
3145 "%s: %s: LB Data Offset field bad\n",
3147 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
3148 return illegal_condition_result
;
3150 lbdof_blen
= lbdof
* lb_size
;
3151 if ((lrd_size
+ (num_lrd
* lrd_size
)) > lbdof_blen
) {
3153 sdev_printk(KERN_INFO
, scp
->device
,
3154 "%s: %s: LBA range descriptors don't fit\n",
3156 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
3157 return illegal_condition_result
;
3159 lrdp
= kzalloc(lbdof_blen
, GFP_ATOMIC
);
3161 return SCSI_MLQUEUE_HOST_BUSY
;
3163 sdev_printk(KERN_INFO
, scp
->device
,
3164 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3165 my_name
, __func__
, lbdof_blen
);
3166 res
= fetch_to_dev_buffer(scp
, lrdp
, lbdof_blen
);
3168 ret
= DID_ERROR
<< 16;
3172 write_lock_irqsave(&atomic_rw
, iflags
);
3173 sg_off
= lbdof_blen
;
3174 /* Spec says Buffer xfer Length field in number of LBs in dout */
3176 for (k
= 0, up
= lrdp
+ lrd_size
; k
< num_lrd
; ++k
, up
+= lrd_size
) {
3177 lba
= get_unaligned_be64(up
+ 0);
3178 num
= get_unaligned_be32(up
+ 8);
3180 sdev_printk(KERN_INFO
, scp
->device
,
3181 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3182 my_name
, __func__
, k
, lba
, num
, sg_off
);
3185 ret
= check_device_access_params(scp
, lba
, num
);
3187 goto err_out_unlock
;
3188 num_by
= num
* lb_size
;
3189 ei_lba
= is_16
? 0 : get_unaligned_be32(up
+ 12);
3191 if ((cum_lb
+ num
) > bt_len
) {
3193 sdev_printk(KERN_INFO
, scp
->device
,
3194 "%s: %s: sum of blocks > data provided\n",
3196 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, WRITE_ERROR_ASC
,
3198 ret
= illegal_condition_result
;
3199 goto err_out_unlock
;
3203 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
3204 int prot_ret
= prot_verify_write(scp
, lba
, num
,
3208 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10,
3210 ret
= illegal_condition_result
;
3211 goto err_out_unlock
;
3215 ret
= do_device_access(scp
, sg_off
, lba
, num
, true);
3216 if (unlikely(scsi_debug_lbp()))
3217 map_region(lba
, num
);
3218 if (unlikely(-1 == ret
)) {
3219 ret
= DID_ERROR
<< 16;
3220 goto err_out_unlock
;
3221 } else if (unlikely(sdebug_verbose
&& (ret
< num_by
)))
3222 sdev_printk(KERN_INFO
, scp
->device
,
3223 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3224 my_name
, num_by
, ret
);
3226 if (unlikely(sdebug_any_injecting_opt
)) {
3227 struct sdebug_queued_cmd
*sqcp
=
3228 (struct sdebug_queued_cmd
*)scp
->host_scribble
;
3231 if (sqcp
->inj_recovered
) {
3232 mk_sense_buffer(scp
, RECOVERED_ERROR
,
3233 THRESHOLD_EXCEEDED
, 0);
3234 ret
= illegal_condition_result
;
3235 goto err_out_unlock
;
3236 } else if (sqcp
->inj_dif
) {
3237 /* Logical block guard check failed */
3238 mk_sense_buffer(scp
, ABORTED_COMMAND
,
3240 ret
= illegal_condition_result
;
3241 goto err_out_unlock
;
3242 } else if (sqcp
->inj_dix
) {
3243 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
3245 ret
= illegal_condition_result
;
3246 goto err_out_unlock
;
3255 write_unlock_irqrestore(&atomic_rw
, iflags
);
3261 static int resp_write_same(struct scsi_cmnd
*scp
, u64 lba
, u32 num
,
3262 u32 ei_lba
, bool unmap
, bool ndob
)
3265 unsigned long iflags
;
3266 unsigned long long i
;
3267 u32 lb_size
= sdebug_sector_size
;
3271 ret
= check_device_access_params(scp
, lba
, num
);
3275 write_lock_irqsave(&atomic_rw
, iflags
);
3277 if (unmap
&& scsi_debug_lbp()) {
3278 unmap_region(lba
, num
);
3282 block
= do_div(lbaa
, sdebug_store_sectors
);
3283 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3284 fs1p
= fake_storep
+ (block
* lb_size
);
3286 memset(fs1p
, 0, lb_size
);
3289 ret
= fetch_to_dev_buffer(scp
, fs1p
, lb_size
);
3292 write_unlock_irqrestore(&atomic_rw
, iflags
);
3293 return DID_ERROR
<< 16;
3294 } else if (sdebug_verbose
&& !ndob
&& (ret
< lb_size
))
3295 sdev_printk(KERN_INFO
, scp
->device
,
3296 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3297 my_name
, "write same", lb_size
, ret
);
3299 /* Copy first sector to remaining blocks */
3300 for (i
= 1 ; i
< num
; i
++) {
3302 block
= do_div(lbaa
, sdebug_store_sectors
);
3303 memmove(fake_storep
+ (block
* lb_size
), fs1p
, lb_size
);
3305 if (scsi_debug_lbp())
3306 map_region(lba
, num
);
3308 write_unlock_irqrestore(&atomic_rw
, iflags
);
3313 static int resp_write_same_10(struct scsi_cmnd
*scp
,
3314 struct sdebug_dev_info
*devip
)
3316 u8
*cmd
= scp
->cmnd
;
3323 if (sdebug_lbpws10
== 0) {
3324 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
3325 return check_condition_result
;
3329 lba
= get_unaligned_be32(cmd
+ 2);
3330 num
= get_unaligned_be16(cmd
+ 7);
3331 if (num
> sdebug_write_same_length
) {
3332 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
3333 return check_condition_result
;
3335 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, false);
3338 static int resp_write_same_16(struct scsi_cmnd
*scp
,
3339 struct sdebug_dev_info
*devip
)
3341 u8
*cmd
= scp
->cmnd
;
3348 if (cmd
[1] & 0x8) { /* UNMAP */
3349 if (sdebug_lbpws
== 0) {
3350 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
3351 return check_condition_result
;
3355 if (cmd
[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3357 lba
= get_unaligned_be64(cmd
+ 2);
3358 num
= get_unaligned_be32(cmd
+ 10);
3359 if (num
> sdebug_write_same_length
) {
3360 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 10, -1);
3361 return check_condition_result
;
3363 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, ndob
);
3366 /* Note the mode field is in the same position as the (lower) service action
3367 * field. For the Report supported operation codes command, SPC-4 suggests
3368 * each mode of this command should be reported separately; for future. */
3369 static int resp_write_buffer(struct scsi_cmnd
*scp
,
3370 struct sdebug_dev_info
*devip
)
3372 u8
*cmd
= scp
->cmnd
;
3373 struct scsi_device
*sdp
= scp
->device
;
3374 struct sdebug_dev_info
*dp
;
3377 mode
= cmd
[1] & 0x1f;
3379 case 0x4: /* download microcode (MC) and activate (ACT) */
3380 /* set UAs on this device only */
3381 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3382 set_bit(SDEBUG_UA_MICROCODE_CHANGED
, devip
->uas_bm
);
3384 case 0x5: /* download MC, save and ACT */
3385 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
, devip
->uas_bm
);
3387 case 0x6: /* download MC with offsets and ACT */
3388 /* set UAs on most devices (LUs) in this target */
3389 list_for_each_entry(dp
,
3390 &devip
->sdbg_host
->dev_info_list
,
3392 if (dp
->target
== sdp
->id
) {
3393 set_bit(SDEBUG_UA_BUS_RESET
, dp
->uas_bm
);
3395 set_bit(SDEBUG_UA_MICROCODE_CHANGED
,
3399 case 0x7: /* download MC with offsets, save, and ACT */
3400 /* set UA on all devices (LUs) in this target */
3401 list_for_each_entry(dp
,
3402 &devip
->sdbg_host
->dev_info_list
,
3404 if (dp
->target
== sdp
->id
)
3405 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
,
3409 /* do nothing for this command for other mode values */
3415 static int resp_comp_write(struct scsi_cmnd
*scp
,
3416 struct sdebug_dev_info
*devip
)
3418 u8
*cmd
= scp
->cmnd
;
3420 u8
*fake_storep_hold
;
3423 u32 lb_size
= sdebug_sector_size
;
3425 unsigned long iflags
;
3429 lba
= get_unaligned_be64(cmd
+ 2);
3430 num
= cmd
[13]; /* 1 to a maximum of 255 logical blocks */
3432 return 0; /* degenerate case, not an error */
3433 if (sdebug_dif
== T10_PI_TYPE2_PROTECTION
&&
3435 mk_sense_invalid_opcode(scp
);
3436 return check_condition_result
;
3438 if ((sdebug_dif
== T10_PI_TYPE1_PROTECTION
||
3439 sdebug_dif
== T10_PI_TYPE3_PROTECTION
) &&
3440 (cmd
[1] & 0xe0) == 0)
3441 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
3444 /* inline check_device_access_params() */
3445 if (lba
+ num
> sdebug_capacity
) {
3446 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
3447 return check_condition_result
;
3449 /* transfer length excessive (tie in to block limits VPD page) */
3450 if (num
> sdebug_store_sectors
) {
3451 /* needs work to find which cdb byte 'num' comes from */
3452 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
3453 return check_condition_result
;
3456 arr
= kcalloc(lb_size
, dnum
, GFP_ATOMIC
);
3458 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3460 return check_condition_result
;
3463 write_lock_irqsave(&atomic_rw
, iflags
);
3465 /* trick do_device_access() to fetch both compare and write buffers
3466 * from data-in into arr. Safe (atomic) since write_lock held. */
3467 fake_storep_hold
= fake_storep
;
3469 ret
= do_device_access(scp
, 0, 0, dnum
, true);
3470 fake_storep
= fake_storep_hold
;
3472 retval
= DID_ERROR
<< 16;
3474 } else if (sdebug_verbose
&& (ret
< (dnum
* lb_size
)))
3475 sdev_printk(KERN_INFO
, scp
->device
, "%s: compare_write: cdb "
3476 "indicated=%u, IO sent=%d bytes\n", my_name
,
3477 dnum
* lb_size
, ret
);
3478 if (!comp_write_worker(lba
, num
, arr
)) {
3479 mk_sense_buffer(scp
, MISCOMPARE
, MISCOMPARE_VERIFY_ASC
, 0);
3480 retval
= check_condition_result
;
3483 if (scsi_debug_lbp())
3484 map_region(lba
, num
);
3486 write_unlock_irqrestore(&atomic_rw
, iflags
);
3491 struct unmap_block_desc
{
3497 static int resp_unmap(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3500 struct unmap_block_desc
*desc
;
3501 unsigned int i
, payload_len
, descriptors
;
3503 unsigned long iflags
;
3506 if (!scsi_debug_lbp())
3507 return 0; /* fib and say its done */
3508 payload_len
= get_unaligned_be16(scp
->cmnd
+ 7);
3509 BUG_ON(scsi_bufflen(scp
) != payload_len
);
3511 descriptors
= (payload_len
- 8) / 16;
3512 if (descriptors
> sdebug_unmap_max_desc
) {
3513 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
3514 return check_condition_result
;
3517 buf
= kzalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
3519 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3521 return check_condition_result
;
3524 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
3526 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
3527 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
3529 desc
= (void *)&buf
[8];
3531 write_lock_irqsave(&atomic_rw
, iflags
);
3533 for (i
= 0 ; i
< descriptors
; i
++) {
3534 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
3535 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
3537 ret
= check_device_access_params(scp
, lba
, num
);
3541 unmap_region(lba
, num
);
3547 write_unlock_irqrestore(&atomic_rw
, iflags
);
3553 #define SDEBUG_GET_LBA_STATUS_LEN 32
3555 static int resp_get_lba_status(struct scsi_cmnd
*scp
,
3556 struct sdebug_dev_info
*devip
)
3558 u8
*cmd
= scp
->cmnd
;
3560 u32 alloc_len
, mapped
, num
;
3561 u8 arr
[SDEBUG_GET_LBA_STATUS_LEN
];
3564 lba
= get_unaligned_be64(cmd
+ 2);
3565 alloc_len
= get_unaligned_be32(cmd
+ 10);
3570 ret
= check_device_access_params(scp
, lba
, 1);
3574 if (scsi_debug_lbp())
3575 mapped
= map_state(lba
, &num
);
3578 /* following just in case virtual_gb changed */
3579 sdebug_capacity
= get_sdebug_capacity();
3580 if (sdebug_capacity
- lba
<= 0xffffffff)
3581 num
= sdebug_capacity
- lba
;
3586 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
3587 put_unaligned_be32(20, arr
); /* Parameter Data Length */
3588 put_unaligned_be64(lba
, arr
+ 8); /* LBA */
3589 put_unaligned_be32(num
, arr
+ 16); /* Number of blocks */
3590 arr
[20] = !mapped
; /* prov_stat=0: mapped; 1: dealloc */
3592 return fill_from_dev_buffer(scp
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
3595 static int resp_sync_cache(struct scsi_cmnd
*scp
,
3596 struct sdebug_dev_info
*devip
)
3601 u8
*cmd
= scp
->cmnd
;
3603 if (cmd
[0] == SYNCHRONIZE_CACHE
) { /* 10 byte cdb */
3604 lba
= get_unaligned_be32(cmd
+ 2);
3605 num_blocks
= get_unaligned_be16(cmd
+ 7);
3606 } else { /* SYNCHRONIZE_CACHE(16) */
3607 lba
= get_unaligned_be64(cmd
+ 2);
3608 num_blocks
= get_unaligned_be32(cmd
+ 10);
3610 if (lba
+ num_blocks
> sdebug_capacity
) {
3611 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
3612 return check_condition_result
;
3614 if (!write_since_sync
|| cmd
[1] & 0x2)
3615 res
= SDEG_RES_IMMED_MASK
;
3616 else /* delay if write_since_sync and IMMED clear */
3617 write_since_sync
= false;
3621 #define RL_BUCKET_ELEMS 8
3623 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3624 * (W-LUN), the normal Linux scanning logic does not associate it with a
3625 * device (e.g. /dev/sg7). The following magic will make that association:
3626 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3627 * where <n> is a host number. If there are multiple targets in a host then
3628 * the above will associate a W-LUN to each target. To only get a W-LUN
3629 * for target 2, then use "echo '- 2 49409' > scan" .
3631 static int resp_report_luns(struct scsi_cmnd
*scp
,
3632 struct sdebug_dev_info
*devip
)
3634 unsigned char *cmd
= scp
->cmnd
;
3635 unsigned int alloc_len
;
3636 unsigned char select_report
;
3638 struct scsi_lun
*lun_p
;
3639 u8 arr
[RL_BUCKET_ELEMS
* sizeof(struct scsi_lun
)];
3640 unsigned int lun_cnt
; /* normal LUN count (max: 256) */
3641 unsigned int wlun_cnt
; /* report luns W-LUN count */
3642 unsigned int tlun_cnt
; /* total LUN count */
3643 unsigned int rlen
; /* response length (in bytes) */
3645 unsigned int off_rsp
= 0;
3646 const int sz_lun
= sizeof(struct scsi_lun
);
3648 clear_luns_changed_on_target(devip
);
3650 select_report
= cmd
[2];
3651 alloc_len
= get_unaligned_be32(cmd
+ 6);
3653 if (alloc_len
< 4) {
3654 pr_err("alloc len too small %d\n", alloc_len
);
3655 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
3656 return check_condition_result
;
3659 switch (select_report
) {
3660 case 0: /* all LUNs apart from W-LUNs */
3661 lun_cnt
= sdebug_max_luns
;
3664 case 1: /* only W-LUNs */
3668 case 2: /* all LUNs */
3669 lun_cnt
= sdebug_max_luns
;
3672 case 0x10: /* only administrative LUs */
3673 case 0x11: /* see SPC-5 */
3674 case 0x12: /* only subsiduary LUs owned by referenced LU */
3676 pr_debug("select report invalid %d\n", select_report
);
3677 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
3678 return check_condition_result
;
3681 if (sdebug_no_lun_0
&& (lun_cnt
> 0))
3684 tlun_cnt
= lun_cnt
+ wlun_cnt
;
3685 rlen
= tlun_cnt
* sz_lun
; /* excluding 8 byte header */
3686 scsi_set_resid(scp
, scsi_bufflen(scp
));
3687 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3688 select_report
, lun_cnt
, wlun_cnt
, sdebug_no_lun_0
);
3690 /* loops rely on sizeof response header same as sizeof lun (both 8) */
3691 lun
= sdebug_no_lun_0
? 1 : 0;
3692 for (k
= 0, j
= 0, res
= 0; true; ++k
, j
= 0) {
3693 memset(arr
, 0, sizeof(arr
));
3694 lun_p
= (struct scsi_lun
*)&arr
[0];
3696 put_unaligned_be32(rlen
, &arr
[0]);
3700 for ( ; j
< RL_BUCKET_ELEMS
; ++j
, ++lun_p
) {
3701 if ((k
* RL_BUCKET_ELEMS
) + j
> lun_cnt
)
3703 int_to_scsilun(lun
++, lun_p
);
3705 if (j
< RL_BUCKET_ELEMS
)
3708 res
= p_fill_from_dev_buffer(scp
, arr
, n
, off_rsp
);
3714 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS
, lun_p
);
3718 res
= p_fill_from_dev_buffer(scp
, arr
, j
* sz_lun
, off_rsp
);
3722 static int resp_xdwriteread(struct scsi_cmnd
*scp
, unsigned long long lba
,
3723 unsigned int num
, struct sdebug_dev_info
*devip
)
3726 unsigned char *kaddr
, *buf
;
3727 unsigned int offset
;
3728 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
3729 struct sg_mapping_iter miter
;
3731 /* better not to use temporary buffer. */
3732 buf
= kzalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
3734 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3736 return check_condition_result
;
3739 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
3742 sg_miter_start(&miter
, sdb
->table
.sgl
, sdb
->table
.nents
,
3743 SG_MITER_ATOMIC
| SG_MITER_TO_SG
);
3745 while (sg_miter_next(&miter
)) {
3747 for (j
= 0; j
< miter
.length
; j
++)
3748 *(kaddr
+ j
) ^= *(buf
+ offset
+ j
);
3750 offset
+= miter
.length
;
3752 sg_miter_stop(&miter
);
3758 static int resp_xdwriteread_10(struct scsi_cmnd
*scp
,
3759 struct sdebug_dev_info
*devip
)
3761 u8
*cmd
= scp
->cmnd
;
3766 if (!scsi_bidi_cmnd(scp
)) {
3767 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3769 return check_condition_result
;
3771 errsts
= resp_read_dt0(scp
, devip
);
3774 if (!(cmd
[1] & 0x4)) { /* DISABLE_WRITE is not set */
3775 errsts
= resp_write_dt0(scp
, devip
);
3779 lba
= get_unaligned_be32(cmd
+ 2);
3780 num
= get_unaligned_be16(cmd
+ 7);
3781 return resp_xdwriteread(scp
, lba
, num
, devip
);
3784 static struct sdebug_queue
*get_queue(struct scsi_cmnd
*cmnd
)
3786 u32 tag
= blk_mq_unique_tag(cmnd
->request
);
3787 u16 hwq
= blk_mq_unique_tag_to_hwq(tag
);
3789 pr_debug("tag=%#x, hwq=%d\n", tag
, hwq
);
3790 if (WARN_ON_ONCE(hwq
>= submit_queues
))
3792 return sdebug_q_arr
+ hwq
;
3795 /* Queued (deferred) command completions converge here. */
3796 static void sdebug_q_cmd_complete(struct sdebug_defer
*sd_dp
)
3798 bool aborted
= sd_dp
->aborted
;
3801 unsigned long iflags
;
3802 struct sdebug_queue
*sqp
;
3803 struct sdebug_queued_cmd
*sqcp
;
3804 struct scsi_cmnd
*scp
;
3805 struct sdebug_dev_info
*devip
;
3807 sd_dp
->defer_t
= SDEB_DEFER_NONE
;
3808 if (unlikely(aborted
))
3809 sd_dp
->aborted
= false;
3810 qc_idx
= sd_dp
->qc_idx
;
3811 sqp
= sdebug_q_arr
+ sd_dp
->sqa_idx
;
3812 if (sdebug_statistics
) {
3813 atomic_inc(&sdebug_completions
);
3814 if (raw_smp_processor_id() != sd_dp
->issuing_cpu
)
3815 atomic_inc(&sdebug_miss_cpus
);
3817 if (unlikely((qc_idx
< 0) || (qc_idx
>= SDEBUG_CANQUEUE
))) {
3818 pr_err("wild qc_idx=%d\n", qc_idx
);
3821 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3822 sqcp
= &sqp
->qc_arr
[qc_idx
];
3824 if (unlikely(scp
== NULL
)) {
3825 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3826 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3827 sd_dp
->sqa_idx
, qc_idx
);
3830 devip
= (struct sdebug_dev_info
*)scp
->device
->hostdata
;
3832 atomic_dec(&devip
->num_in_q
);
3834 pr_err("devip=NULL\n");
3835 if (unlikely(atomic_read(&retired_max_queue
) > 0))
3838 sqcp
->a_cmnd
= NULL
;
3839 if (unlikely(!test_and_clear_bit(qc_idx
, sqp
->in_use_bm
))) {
3840 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3841 pr_err("Unexpected completion\n");
3845 if (unlikely(retiring
)) { /* user has reduced max_queue */
3848 retval
= atomic_read(&retired_max_queue
);
3849 if (qc_idx
>= retval
) {
3850 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3851 pr_err("index %d too large\n", retval
);
3854 k
= find_last_bit(sqp
->in_use_bm
, retval
);
3855 if ((k
< sdebug_max_queue
) || (k
== retval
))
3856 atomic_set(&retired_max_queue
, 0);
3858 atomic_set(&retired_max_queue
, k
+ 1);
3860 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3861 if (unlikely(aborted
)) {
3863 pr_info("bypassing scsi_done() due to aborted cmd\n");
3866 scp
->scsi_done(scp
); /* callback to mid level */
3869 /* When high resolution timer goes off this function is called. */
3870 static enum hrtimer_restart
sdebug_q_cmd_hrt_complete(struct hrtimer
*timer
)
3872 struct sdebug_defer
*sd_dp
= container_of(timer
, struct sdebug_defer
,
3874 sdebug_q_cmd_complete(sd_dp
);
3875 return HRTIMER_NORESTART
;
3878 /* When work queue schedules work, it calls this function. */
3879 static void sdebug_q_cmd_wq_complete(struct work_struct
*work
)
3881 struct sdebug_defer
*sd_dp
= container_of(work
, struct sdebug_defer
,
3883 sdebug_q_cmd_complete(sd_dp
);
3886 static bool got_shared_uuid
;
3887 static uuid_t shared_uuid
;
3889 static struct sdebug_dev_info
*sdebug_device_create(
3890 struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
3892 struct sdebug_dev_info
*devip
;
3894 devip
= kzalloc(sizeof(*devip
), flags
);
3896 if (sdebug_uuid_ctl
== 1)
3897 uuid_gen(&devip
->lu_name
);
3898 else if (sdebug_uuid_ctl
== 2) {
3899 if (got_shared_uuid
)
3900 devip
->lu_name
= shared_uuid
;
3902 uuid_gen(&shared_uuid
);
3903 got_shared_uuid
= true;
3904 devip
->lu_name
= shared_uuid
;
3907 devip
->sdbg_host
= sdbg_host
;
3908 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
3913 static struct sdebug_dev_info
*find_build_dev_info(struct scsi_device
*sdev
)
3915 struct sdebug_host_info
*sdbg_host
;
3916 struct sdebug_dev_info
*open_devip
= NULL
;
3917 struct sdebug_dev_info
*devip
;
3919 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(sdev
->host
);
3921 pr_err("Host info NULL\n");
3924 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
3925 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
3926 (devip
->target
== sdev
->id
) &&
3927 (devip
->lun
== sdev
->lun
))
3930 if ((!devip
->used
) && (!open_devip
))
3934 if (!open_devip
) { /* try and make a new one */
3935 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
3937 pr_err("out of memory at line %d\n", __LINE__
);
3942 open_devip
->channel
= sdev
->channel
;
3943 open_devip
->target
= sdev
->id
;
3944 open_devip
->lun
= sdev
->lun
;
3945 open_devip
->sdbg_host
= sdbg_host
;
3946 atomic_set(&open_devip
->num_in_q
, 0);
3947 set_bit(SDEBUG_UA_POR
, open_devip
->uas_bm
);
3948 open_devip
->used
= true;
3952 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
3955 pr_info("slave_alloc <%u %u %u %llu>\n",
3956 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3957 blk_queue_flag_set(QUEUE_FLAG_BIDI
, sdp
->request_queue
);
3961 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
3963 struct sdebug_dev_info
*devip
=
3964 (struct sdebug_dev_info
*)sdp
->hostdata
;
3967 pr_info("slave_configure <%u %u %u %llu>\n",
3968 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3969 if (sdp
->host
->max_cmd_len
!= SDEBUG_MAX_CMD_LEN
)
3970 sdp
->host
->max_cmd_len
= SDEBUG_MAX_CMD_LEN
;
3971 if (devip
== NULL
) {
3972 devip
= find_build_dev_info(sdp
);
3974 return 1; /* no resources, will be marked offline */
3976 sdp
->hostdata
= devip
;
3977 blk_queue_max_segment_size(sdp
->request_queue
, -1U);
3979 sdp
->no_uld_attach
= 1;
3980 config_cdb_len(sdp
);
3984 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
3986 struct sdebug_dev_info
*devip
=
3987 (struct sdebug_dev_info
*)sdp
->hostdata
;
3990 pr_info("slave_destroy <%u %u %u %llu>\n",
3991 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3993 /* make this slot available for re-use */
3994 devip
->used
= false;
3995 sdp
->hostdata
= NULL
;
3999 static void stop_qc_helper(struct sdebug_defer
*sd_dp
,
4000 enum sdeb_defer_type defer_t
)
4004 if (defer_t
== SDEB_DEFER_HRT
)
4005 hrtimer_cancel(&sd_dp
->hrt
);
4006 else if (defer_t
== SDEB_DEFER_WQ
)
4007 cancel_work_sync(&sd_dp
->ew
.work
);
4010 /* If @cmnd found deletes its timer or work queue and returns true; else
4012 static bool stop_queued_cmnd(struct scsi_cmnd
*cmnd
)
4014 unsigned long iflags
;
4015 int j
, k
, qmax
, r_qmax
;
4016 enum sdeb_defer_type l_defer_t
;
4017 struct sdebug_queue
*sqp
;
4018 struct sdebug_queued_cmd
*sqcp
;
4019 struct sdebug_dev_info
*devip
;
4020 struct sdebug_defer
*sd_dp
;
4022 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
4023 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
4024 qmax
= sdebug_max_queue
;
4025 r_qmax
= atomic_read(&retired_max_queue
);
4028 for (k
= 0; k
< qmax
; ++k
) {
4029 if (test_bit(k
, sqp
->in_use_bm
)) {
4030 sqcp
= &sqp
->qc_arr
[k
];
4031 if (cmnd
!= sqcp
->a_cmnd
)
4034 devip
= (struct sdebug_dev_info
*)
4035 cmnd
->device
->hostdata
;
4037 atomic_dec(&devip
->num_in_q
);
4038 sqcp
->a_cmnd
= NULL
;
4039 sd_dp
= sqcp
->sd_dp
;
4041 l_defer_t
= sd_dp
->defer_t
;
4042 sd_dp
->defer_t
= SDEB_DEFER_NONE
;
4044 l_defer_t
= SDEB_DEFER_NONE
;
4045 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4046 stop_qc_helper(sd_dp
, l_defer_t
);
4047 clear_bit(k
, sqp
->in_use_bm
);
4051 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4056 /* Deletes (stops) timers or work queues of all queued commands */
4057 static void stop_all_queued(void)
4059 unsigned long iflags
;
4061 enum sdeb_defer_type l_defer_t
;
4062 struct sdebug_queue
*sqp
;
4063 struct sdebug_queued_cmd
*sqcp
;
4064 struct sdebug_dev_info
*devip
;
4065 struct sdebug_defer
*sd_dp
;
4067 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
4068 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
4069 for (k
= 0; k
< SDEBUG_CANQUEUE
; ++k
) {
4070 if (test_bit(k
, sqp
->in_use_bm
)) {
4071 sqcp
= &sqp
->qc_arr
[k
];
4072 if (sqcp
->a_cmnd
== NULL
)
4074 devip
= (struct sdebug_dev_info
*)
4075 sqcp
->a_cmnd
->device
->hostdata
;
4077 atomic_dec(&devip
->num_in_q
);
4078 sqcp
->a_cmnd
= NULL
;
4079 sd_dp
= sqcp
->sd_dp
;
4081 l_defer_t
= sd_dp
->defer_t
;
4082 sd_dp
->defer_t
= SDEB_DEFER_NONE
;
4084 l_defer_t
= SDEB_DEFER_NONE
;
4085 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4086 stop_qc_helper(sd_dp
, l_defer_t
);
4087 clear_bit(k
, sqp
->in_use_bm
);
4088 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
4091 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4095 /* Free queued command memory on heap */
4096 static void free_all_queued(void)
4099 struct sdebug_queue
*sqp
;
4100 struct sdebug_queued_cmd
*sqcp
;
4102 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
4103 for (k
= 0; k
< SDEBUG_CANQUEUE
; ++k
) {
4104 sqcp
= &sqp
->qc_arr
[k
];
4111 static int scsi_debug_abort(struct scsi_cmnd
*SCpnt
)
4117 ok
= stop_queued_cmnd(SCpnt
);
4118 if (SCpnt
->device
&& (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
))
4119 sdev_printk(KERN_INFO
, SCpnt
->device
,
4120 "%s: command%s found\n", __func__
,
4126 static int scsi_debug_device_reset(struct scsi_cmnd
*SCpnt
)
4129 if (SCpnt
&& SCpnt
->device
) {
4130 struct scsi_device
*sdp
= SCpnt
->device
;
4131 struct sdebug_dev_info
*devip
=
4132 (struct sdebug_dev_info
*)sdp
->hostdata
;
4134 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
4135 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
4137 set_bit(SDEBUG_UA_POR
, devip
->uas_bm
);
4142 static int scsi_debug_target_reset(struct scsi_cmnd
*SCpnt
)
4144 struct sdebug_host_info
*sdbg_host
;
4145 struct sdebug_dev_info
*devip
;
4146 struct scsi_device
*sdp
;
4147 struct Scsi_Host
*hp
;
4150 ++num_target_resets
;
4153 sdp
= SCpnt
->device
;
4156 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
4157 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
4161 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
4163 list_for_each_entry(devip
,
4164 &sdbg_host
->dev_info_list
,
4166 if (devip
->target
== sdp
->id
) {
4167 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
4171 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
4172 sdev_printk(KERN_INFO
, sdp
,
4173 "%s: %d device(s) found in target\n", __func__
, k
);
4178 static int scsi_debug_bus_reset(struct scsi_cmnd
*SCpnt
)
4180 struct sdebug_host_info
*sdbg_host
;
4181 struct sdebug_dev_info
*devip
;
4182 struct scsi_device
*sdp
;
4183 struct Scsi_Host
*hp
;
4187 if (!(SCpnt
&& SCpnt
->device
))
4189 sdp
= SCpnt
->device
;
4190 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
4191 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
4194 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
4196 list_for_each_entry(devip
,
4197 &sdbg_host
->dev_info_list
,
4199 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
4204 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
4205 sdev_printk(KERN_INFO
, sdp
,
4206 "%s: %d device(s) found in host\n", __func__
, k
);
4211 static int scsi_debug_host_reset(struct scsi_cmnd
*SCpnt
)
4213 struct sdebug_host_info
*sdbg_host
;
4214 struct sdebug_dev_info
*devip
;
4218 if ((SCpnt
->device
) && (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
))
4219 sdev_printk(KERN_INFO
, SCpnt
->device
, "%s\n", __func__
);
4220 spin_lock(&sdebug_host_list_lock
);
4221 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
4222 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
,
4224 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
4228 spin_unlock(&sdebug_host_list_lock
);
4230 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
4231 sdev_printk(KERN_INFO
, SCpnt
->device
,
4232 "%s: %d device(s) found\n", __func__
, k
);
4236 static void __init
sdebug_build_parts(unsigned char *ramp
,
4237 unsigned long store_size
)
4239 struct partition
*pp
;
4240 int starts
[SDEBUG_MAX_PARTS
+ 2];
4241 int sectors_per_part
, num_sectors
, k
;
4242 int heads_by_sects
, start_sec
, end_sec
;
4244 /* assume partition table already zeroed */
4245 if ((sdebug_num_parts
< 1) || (store_size
< 1048576))
4247 if (sdebug_num_parts
> SDEBUG_MAX_PARTS
) {
4248 sdebug_num_parts
= SDEBUG_MAX_PARTS
;
4249 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS
);
4251 num_sectors
= (int)sdebug_store_sectors
;
4252 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
4254 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
4255 starts
[0] = sdebug_sectors_per
;
4256 for (k
= 1; k
< sdebug_num_parts
; ++k
)
4257 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
4259 starts
[sdebug_num_parts
] = num_sectors
;
4260 starts
[sdebug_num_parts
+ 1] = 0;
4262 ramp
[510] = 0x55; /* magic partition markings */
4264 pp
= (struct partition
*)(ramp
+ 0x1be);
4265 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
4266 start_sec
= starts
[k
];
4267 end_sec
= starts
[k
+ 1] - 1;
4270 pp
->cyl
= start_sec
/ heads_by_sects
;
4271 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
4272 / sdebug_sectors_per
;
4273 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
4275 pp
->end_cyl
= end_sec
/ heads_by_sects
;
4276 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
4277 / sdebug_sectors_per
;
4278 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
4280 pp
->start_sect
= cpu_to_le32(start_sec
);
4281 pp
->nr_sects
= cpu_to_le32(end_sec
- start_sec
+ 1);
4282 pp
->sys_ind
= 0x83; /* plain Linux partition */
4286 static void block_unblock_all_queues(bool block
)
4289 struct sdebug_queue
*sqp
;
4291 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
)
4292 atomic_set(&sqp
->blocked
, (int)block
);
4295 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4296 * commands will be processed normally before triggers occur.
4298 static void tweak_cmnd_count(void)
4302 modulo
= abs(sdebug_every_nth
);
4305 block_unblock_all_queues(true);
4306 count
= atomic_read(&sdebug_cmnd_count
);
4307 atomic_set(&sdebug_cmnd_count
, (count
/ modulo
) * modulo
);
4308 block_unblock_all_queues(false);
4311 static void clear_queue_stats(void)
4313 atomic_set(&sdebug_cmnd_count
, 0);
4314 atomic_set(&sdebug_completions
, 0);
4315 atomic_set(&sdebug_miss_cpus
, 0);
4316 atomic_set(&sdebug_a_tsf
, 0);
4319 static void setup_inject(struct sdebug_queue
*sqp
,
4320 struct sdebug_queued_cmd
*sqcp
)
4322 if ((atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
)) > 0) {
4323 if (sdebug_every_nth
> 0)
4324 sqcp
->inj_recovered
= sqcp
->inj_transport
4326 = sqcp
->inj_dix
= sqcp
->inj_short
4327 = sqcp
->inj_host_busy
= sqcp
->inj_cmd_abort
= 0;
4330 sqcp
->inj_recovered
= !!(SDEBUG_OPT_RECOVERED_ERR
& sdebug_opts
);
4331 sqcp
->inj_transport
= !!(SDEBUG_OPT_TRANSPORT_ERR
& sdebug_opts
);
4332 sqcp
->inj_dif
= !!(SDEBUG_OPT_DIF_ERR
& sdebug_opts
);
4333 sqcp
->inj_dix
= !!(SDEBUG_OPT_DIX_ERR
& sdebug_opts
);
4334 sqcp
->inj_short
= !!(SDEBUG_OPT_SHORT_TRANSFER
& sdebug_opts
);
4335 sqcp
->inj_host_busy
= !!(SDEBUG_OPT_HOST_BUSY
& sdebug_opts
);
4336 sqcp
->inj_cmd_abort
= !!(SDEBUG_OPT_CMD_ABORT
& sdebug_opts
);
4339 /* Complete the processing of the thread that queued a SCSI command to this
4340 * driver. It either completes the command by calling cmnd_done() or
4341 * schedules a hr timer or work queue then returns 0. Returns
4342 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4344 static int schedule_resp(struct scsi_cmnd
*cmnd
, struct sdebug_dev_info
*devip
,
4346 int (*pfp
)(struct scsi_cmnd
*,
4347 struct sdebug_dev_info
*),
4348 int delta_jiff
, int ndelay
)
4350 unsigned long iflags
;
4351 int k
, num_in_q
, qdepth
, inject
;
4352 struct sdebug_queue
*sqp
;
4353 struct sdebug_queued_cmd
*sqcp
;
4354 struct scsi_device
*sdp
;
4355 struct sdebug_defer
*sd_dp
;
4357 if (unlikely(devip
== NULL
)) {
4358 if (scsi_result
== 0)
4359 scsi_result
= DID_NO_CONNECT
<< 16;
4360 goto respond_in_thread
;
4364 if (delta_jiff
== 0)
4365 goto respond_in_thread
;
4367 /* schedule the response at a later time if resources permit */
4368 sqp
= get_queue(cmnd
);
4369 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
4370 if (unlikely(atomic_read(&sqp
->blocked
))) {
4371 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4372 return SCSI_MLQUEUE_HOST_BUSY
;
4374 num_in_q
= atomic_read(&devip
->num_in_q
);
4375 qdepth
= cmnd
->device
->queue_depth
;
4377 if (unlikely((qdepth
> 0) && (num_in_q
>= qdepth
))) {
4379 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4380 goto respond_in_thread
;
4382 scsi_result
= device_qfull_result
;
4383 } else if (unlikely(sdebug_every_nth
&&
4384 (SDEBUG_OPT_RARE_TSF
& sdebug_opts
) &&
4385 (scsi_result
== 0))) {
4386 if ((num_in_q
== (qdepth
- 1)) &&
4387 (atomic_inc_return(&sdebug_a_tsf
) >=
4388 abs(sdebug_every_nth
))) {
4389 atomic_set(&sdebug_a_tsf
, 0);
4391 scsi_result
= device_qfull_result
;
4395 k
= find_first_zero_bit(sqp
->in_use_bm
, sdebug_max_queue
);
4396 if (unlikely(k
>= sdebug_max_queue
)) {
4397 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4399 goto respond_in_thread
;
4400 else if (SDEBUG_OPT_ALL_TSF
& sdebug_opts
)
4401 scsi_result
= device_qfull_result
;
4402 if (SDEBUG_OPT_Q_NOISE
& sdebug_opts
)
4403 sdev_printk(KERN_INFO
, sdp
,
4404 "%s: max_queue=%d exceeded, %s\n",
4405 __func__
, sdebug_max_queue
,
4406 (scsi_result
? "status: TASK SET FULL" :
4407 "report: host busy"));
4409 goto respond_in_thread
;
4411 return SCSI_MLQUEUE_HOST_BUSY
;
4413 __set_bit(k
, sqp
->in_use_bm
);
4414 atomic_inc(&devip
->num_in_q
);
4415 sqcp
= &sqp
->qc_arr
[k
];
4416 sqcp
->a_cmnd
= cmnd
;
4417 cmnd
->host_scribble
= (unsigned char *)sqcp
;
4418 sd_dp
= sqcp
->sd_dp
;
4419 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4420 if (unlikely(sdebug_every_nth
&& sdebug_any_injecting_opt
))
4421 setup_inject(sqp
, sqcp
);
4422 if (sd_dp
== NULL
) {
4423 sd_dp
= kzalloc(sizeof(*sd_dp
), GFP_ATOMIC
);
4425 return SCSI_MLQUEUE_HOST_BUSY
;
4428 cmnd
->result
= pfp
!= NULL
? pfp(cmnd
, devip
) : 0;
4429 if (cmnd
->result
& SDEG_RES_IMMED_MASK
) {
4431 * This is the F_DELAY_OVERR case. No delay.
4433 cmnd
->result
&= ~SDEG_RES_IMMED_MASK
;
4434 delta_jiff
= ndelay
= 0;
4436 if (cmnd
->result
== 0 && scsi_result
!= 0)
4437 cmnd
->result
= scsi_result
;
4439 if (unlikely(sdebug_verbose
&& cmnd
->result
))
4440 sdev_printk(KERN_INFO
, sdp
, "%s: non-zero result=0x%x\n",
4441 __func__
, cmnd
->result
);
4443 if (delta_jiff
> 0 || ndelay
> 0) {
4446 if (delta_jiff
> 0) {
4447 kt
= ns_to_ktime((u64
)delta_jiff
* (NSEC_PER_SEC
/ HZ
));
4450 if (!sd_dp
->init_hrt
) {
4451 sd_dp
->init_hrt
= true;
4452 sqcp
->sd_dp
= sd_dp
;
4453 hrtimer_init(&sd_dp
->hrt
, CLOCK_MONOTONIC
,
4454 HRTIMER_MODE_REL_PINNED
);
4455 sd_dp
->hrt
.function
= sdebug_q_cmd_hrt_complete
;
4456 sd_dp
->sqa_idx
= sqp
- sdebug_q_arr
;
4459 if (sdebug_statistics
)
4460 sd_dp
->issuing_cpu
= raw_smp_processor_id();
4461 sd_dp
->defer_t
= SDEB_DEFER_HRT
;
4462 hrtimer_start(&sd_dp
->hrt
, kt
, HRTIMER_MODE_REL_PINNED
);
4463 } else { /* jdelay < 0, use work queue */
4464 if (!sd_dp
->init_wq
) {
4465 sd_dp
->init_wq
= true;
4466 sqcp
->sd_dp
= sd_dp
;
4467 sd_dp
->sqa_idx
= sqp
- sdebug_q_arr
;
4469 INIT_WORK(&sd_dp
->ew
.work
, sdebug_q_cmd_wq_complete
);
4471 if (sdebug_statistics
)
4472 sd_dp
->issuing_cpu
= raw_smp_processor_id();
4473 sd_dp
->defer_t
= SDEB_DEFER_WQ
;
4474 if (unlikely(sqcp
->inj_cmd_abort
))
4475 sd_dp
->aborted
= true;
4476 schedule_work(&sd_dp
->ew
.work
);
4477 if (unlikely(sqcp
->inj_cmd_abort
)) {
4478 sdev_printk(KERN_INFO
, sdp
, "abort request tag %d\n",
4479 cmnd
->request
->tag
);
4480 blk_abort_request(cmnd
->request
);
4483 if (unlikely((SDEBUG_OPT_Q_NOISE
& sdebug_opts
) &&
4484 (scsi_result
== device_qfull_result
)))
4485 sdev_printk(KERN_INFO
, sdp
,
4486 "%s: num_in_q=%d +1, %s%s\n", __func__
,
4487 num_in_q
, (inject
? "<inject> " : ""),
4488 "status: TASK SET FULL");
4491 respond_in_thread
: /* call back to mid-layer using invocation thread */
4492 cmnd
->result
= pfp
!= NULL
? pfp(cmnd
, devip
) : 0;
4493 cmnd
->result
&= ~SDEG_RES_IMMED_MASK
;
4494 if (cmnd
->result
== 0 && scsi_result
!= 0)
4495 cmnd
->result
= scsi_result
;
4496 cmnd
->scsi_done(cmnd
);
4500 /* Note: The following macros create attribute files in the
4501 /sys/module/scsi_debug/parameters directory. Unfortunately this
4502 driver is unaware of a change and cannot trigger auxiliary actions
4503 as it can when the corresponding attribute in the
4504 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4506 module_param_named(add_host
, sdebug_add_host
, int, S_IRUGO
| S_IWUSR
);
4507 module_param_named(ato
, sdebug_ato
, int, S_IRUGO
);
4508 module_param_named(cdb_len
, sdebug_cdb_len
, int, 0644);
4509 module_param_named(clustering
, sdebug_clustering
, bool, S_IRUGO
| S_IWUSR
);
4510 module_param_named(delay
, sdebug_jdelay
, int, S_IRUGO
| S_IWUSR
);
4511 module_param_named(dev_size_mb
, sdebug_dev_size_mb
, int, S_IRUGO
);
4512 module_param_named(dif
, sdebug_dif
, int, S_IRUGO
);
4513 module_param_named(dix
, sdebug_dix
, int, S_IRUGO
);
4514 module_param_named(dsense
, sdebug_dsense
, int, S_IRUGO
| S_IWUSR
);
4515 module_param_named(every_nth
, sdebug_every_nth
, int, S_IRUGO
| S_IWUSR
);
4516 module_param_named(fake_rw
, sdebug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
4517 module_param_named(guard
, sdebug_guard
, uint
, S_IRUGO
);
4518 module_param_named(host_lock
, sdebug_host_lock
, bool, S_IRUGO
| S_IWUSR
);
4519 module_param_string(inq_vendor
, sdebug_inq_vendor_id
,
4520 sizeof(sdebug_inq_vendor_id
), S_IRUGO
|S_IWUSR
);
4521 module_param_string(inq_product
, sdebug_inq_product_id
,
4522 sizeof(sdebug_inq_product_id
), S_IRUGO
|S_IWUSR
);
4523 module_param_string(inq_rev
, sdebug_inq_product_rev
,
4524 sizeof(sdebug_inq_product_rev
), S_IRUGO
|S_IWUSR
);
4525 module_param_named(lbpu
, sdebug_lbpu
, int, S_IRUGO
);
4526 module_param_named(lbpws
, sdebug_lbpws
, int, S_IRUGO
);
4527 module_param_named(lbpws10
, sdebug_lbpws10
, int, S_IRUGO
);
4528 module_param_named(lbprz
, sdebug_lbprz
, int, S_IRUGO
);
4529 module_param_named(lowest_aligned
, sdebug_lowest_aligned
, int, S_IRUGO
);
4530 module_param_named(max_luns
, sdebug_max_luns
, int, S_IRUGO
| S_IWUSR
);
4531 module_param_named(max_queue
, sdebug_max_queue
, int, S_IRUGO
| S_IWUSR
);
4532 module_param_named(medium_error_start
, sdebug_medium_error_start
, int, S_IRUGO
| S_IWUSR
);
4533 module_param_named(medium_error_count
, sdebug_medium_error_count
, int, S_IRUGO
| S_IWUSR
);
4534 module_param_named(ndelay
, sdebug_ndelay
, int, S_IRUGO
| S_IWUSR
);
4535 module_param_named(no_lun_0
, sdebug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
4536 module_param_named(no_uld
, sdebug_no_uld
, int, S_IRUGO
);
4537 module_param_named(num_parts
, sdebug_num_parts
, int, S_IRUGO
);
4538 module_param_named(num_tgts
, sdebug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
4539 module_param_named(opt_blks
, sdebug_opt_blks
, int, S_IRUGO
);
4540 module_param_named(opts
, sdebug_opts
, int, S_IRUGO
| S_IWUSR
);
4541 module_param_named(physblk_exp
, sdebug_physblk_exp
, int, S_IRUGO
);
4542 module_param_named(opt_xferlen_exp
, sdebug_opt_xferlen_exp
, int, S_IRUGO
);
4543 module_param_named(ptype
, sdebug_ptype
, int, S_IRUGO
| S_IWUSR
);
4544 module_param_named(removable
, sdebug_removable
, bool, S_IRUGO
| S_IWUSR
);
4545 module_param_named(scsi_level
, sdebug_scsi_level
, int, S_IRUGO
);
4546 module_param_named(sector_size
, sdebug_sector_size
, int, S_IRUGO
);
4547 module_param_named(statistics
, sdebug_statistics
, bool, S_IRUGO
| S_IWUSR
);
4548 module_param_named(strict
, sdebug_strict
, bool, S_IRUGO
| S_IWUSR
);
4549 module_param_named(submit_queues
, submit_queues
, int, S_IRUGO
);
4550 module_param_named(unmap_alignment
, sdebug_unmap_alignment
, int, S_IRUGO
);
4551 module_param_named(unmap_granularity
, sdebug_unmap_granularity
, int, S_IRUGO
);
4552 module_param_named(unmap_max_blocks
, sdebug_unmap_max_blocks
, int, S_IRUGO
);
4553 module_param_named(unmap_max_desc
, sdebug_unmap_max_desc
, int, S_IRUGO
);
4554 module_param_named(virtual_gb
, sdebug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
4555 module_param_named(uuid_ctl
, sdebug_uuid_ctl
, int, S_IRUGO
);
4556 module_param_named(vpd_use_hostno
, sdebug_vpd_use_hostno
, int,
4558 module_param_named(write_same_length
, sdebug_write_same_length
, int,
4561 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4562 MODULE_DESCRIPTION("SCSI debug adapter driver");
4563 MODULE_LICENSE("GPL");
4564 MODULE_VERSION(SDEBUG_VERSION
);
4566 MODULE_PARM_DESC(add_host
, "0..127 hosts allowed(def=1)");
4567 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
4568 MODULE_PARM_DESC(cdb_len
, "suggest CDB lengths to drivers (def=10)");
4569 MODULE_PARM_DESC(clustering
, "when set enables larger transfers (def=0)");
4570 MODULE_PARM_DESC(delay
, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4571 MODULE_PARM_DESC(dev_size_mb
, "size in MiB of ram shared by devs(def=8)");
4572 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
4573 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
4574 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
4575 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
4576 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
4577 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
4578 MODULE_PARM_DESC(host_lock
, "host_lock is ignored (def=0)");
4579 MODULE_PARM_DESC(inq_vendor
, "SCSI INQUIRY vendor string (def=\"Linux\")");
4580 MODULE_PARM_DESC(inq_product
, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4581 MODULE_PARM_DESC(inq_rev
, "SCSI INQUIRY revision string (def=\""
4582 SDEBUG_VERSION
"\")");
4583 MODULE_PARM_DESC(lbpu
, "enable LBP, support UNMAP command (def=0)");
4584 MODULE_PARM_DESC(lbpws
, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4585 MODULE_PARM_DESC(lbpws10
, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4586 MODULE_PARM_DESC(lbprz
,
4587 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4588 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
4589 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
4590 MODULE_PARM_DESC(max_queue
, "max number of queued commands (1 to max(def))");
4591 MODULE_PARM_DESC(medium_error_start
, "starting sector number to return MEDIUM error");
4592 MODULE_PARM_DESC(medium_error_count
, "count of sectors to return follow on MEDIUM error");
4593 MODULE_PARM_DESC(ndelay
, "response delay in nanoseconds (def=0 -> ignore)");
4594 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
4595 MODULE_PARM_DESC(no_uld
, "stop ULD (e.g. sd driver) attaching (def=0))");
4596 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
4597 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
4598 MODULE_PARM_DESC(opt_blks
, "optimal transfer length in blocks (def=1024)");
4599 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4600 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
4601 MODULE_PARM_DESC(opt_xferlen_exp
, "optimal transfer length granularity exponent (def=physblk_exp)");
4602 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
4603 MODULE_PARM_DESC(removable
, "claim to have removable media (def=0)");
4604 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=7[SPC-5])");
4605 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
4606 MODULE_PARM_DESC(statistics
, "collect statistics on commands, queues (def=0)");
4607 MODULE_PARM_DESC(strict
, "stricter checks: reserved field in cdb (def=0)");
4608 MODULE_PARM_DESC(submit_queues
, "support for block multi-queue (def=1)");
4609 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
4610 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=1)");
4611 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4612 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=256)");
4613 MODULE_PARM_DESC(uuid_ctl
,
4614 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4615 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4616 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4617 MODULE_PARM_DESC(write_same_length
, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4619 #define SDEBUG_INFO_LEN 256
4620 static char sdebug_info
[SDEBUG_INFO_LEN
];
4622 static const char *scsi_debug_info(struct Scsi_Host
*shp
)
4626 k
= scnprintf(sdebug_info
, SDEBUG_INFO_LEN
, "%s: version %s [%s]\n",
4627 my_name
, SDEBUG_VERSION
, sdebug_version_date
);
4628 if (k
>= (SDEBUG_INFO_LEN
- 1))
4630 scnprintf(sdebug_info
+ k
, SDEBUG_INFO_LEN
- k
,
4631 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4632 sdebug_dev_size_mb
, sdebug_opts
, submit_queues
,
4633 "statistics", (int)sdebug_statistics
);
4637 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4638 static int scsi_debug_write_info(struct Scsi_Host
*host
, char *buffer
,
4643 int minLen
= length
> 15 ? 15 : length
;
4645 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
4647 memcpy(arr
, buffer
, minLen
);
4649 if (1 != sscanf(arr
, "%d", &opts
))
4652 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& opts
);
4653 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& opts
);
4654 if (sdebug_every_nth
!= 0)
4659 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4660 * same for each scsi_debug host (if more than one). Some of the counters
4661 * output are not atomics so might be inaccurate in a busy system. */
4662 static int scsi_debug_show_info(struct seq_file
*m
, struct Scsi_Host
*host
)
4665 struct sdebug_queue
*sqp
;
4667 seq_printf(m
, "scsi_debug adapter driver, version %s [%s]\n",
4668 SDEBUG_VERSION
, sdebug_version_date
);
4669 seq_printf(m
, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4670 sdebug_num_tgts
, "shared (ram) ", sdebug_dev_size_mb
,
4671 sdebug_opts
, sdebug_every_nth
);
4672 seq_printf(m
, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4673 sdebug_jdelay
, sdebug_ndelay
, sdebug_max_luns
,
4674 sdebug_sector_size
, "bytes");
4675 seq_printf(m
, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4676 sdebug_cylinders_per
, sdebug_heads
, sdebug_sectors_per
,
4678 seq_printf(m
, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4679 num_dev_resets
, num_target_resets
, num_bus_resets
,
4681 seq_printf(m
, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4682 dix_reads
, dix_writes
, dif_errors
);
4683 seq_printf(m
, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC
/ 1000,
4685 seq_printf(m
, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4686 atomic_read(&sdebug_cmnd_count
),
4687 atomic_read(&sdebug_completions
),
4688 "miss_cpus", atomic_read(&sdebug_miss_cpus
),
4689 atomic_read(&sdebug_a_tsf
));
4691 seq_printf(m
, "submit_queues=%d\n", submit_queues
);
4692 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
4693 seq_printf(m
, " queue %d:\n", j
);
4694 f
= find_first_bit(sqp
->in_use_bm
, sdebug_max_queue
);
4695 if (f
!= sdebug_max_queue
) {
4696 l
= find_last_bit(sqp
->in_use_bm
, sdebug_max_queue
);
4697 seq_printf(m
, " in_use_bm BUSY: %s: %d,%d\n",
4698 "first,last bits", f
, l
);
4704 static ssize_t
delay_show(struct device_driver
*ddp
, char *buf
)
4706 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_jdelay
);
4708 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4709 * of delay is jiffies.
4711 static ssize_t
delay_store(struct device_driver
*ddp
, const char *buf
,
4716 if (count
> 0 && sscanf(buf
, "%d", &jdelay
) == 1) {
4718 if (sdebug_jdelay
!= jdelay
) {
4720 struct sdebug_queue
*sqp
;
4722 block_unblock_all_queues(true);
4723 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
4725 k
= find_first_bit(sqp
->in_use_bm
,
4727 if (k
!= sdebug_max_queue
) {
4728 res
= -EBUSY
; /* queued commands */
4733 sdebug_jdelay
= jdelay
;
4736 block_unblock_all_queues(false);
4742 static DRIVER_ATTR_RW(delay
);
4744 static ssize_t
ndelay_show(struct device_driver
*ddp
, char *buf
)
4746 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ndelay
);
4748 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4749 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4750 static ssize_t
ndelay_store(struct device_driver
*ddp
, const char *buf
,
4755 if ((count
> 0) && (1 == sscanf(buf
, "%d", &ndelay
)) &&
4756 (ndelay
>= 0) && (ndelay
< (1000 * 1000 * 1000))) {
4758 if (sdebug_ndelay
!= ndelay
) {
4760 struct sdebug_queue
*sqp
;
4762 block_unblock_all_queues(true);
4763 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
4765 k
= find_first_bit(sqp
->in_use_bm
,
4767 if (k
!= sdebug_max_queue
) {
4768 res
= -EBUSY
; /* queued commands */
4773 sdebug_ndelay
= ndelay
;
4774 sdebug_jdelay
= ndelay
? JDELAY_OVERRIDDEN
4777 block_unblock_all_queues(false);
4783 static DRIVER_ATTR_RW(ndelay
);
4785 static ssize_t
opts_show(struct device_driver
*ddp
, char *buf
)
4787 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", sdebug_opts
);
4790 static ssize_t
opts_store(struct device_driver
*ddp
, const char *buf
,
4796 if (sscanf(buf
, "%10s", work
) == 1) {
4797 if (strncasecmp(work
, "0x", 2) == 0) {
4798 if (kstrtoint(work
+ 2, 16, &opts
) == 0)
4801 if (kstrtoint(work
, 10, &opts
) == 0)
4808 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& opts
);
4809 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& opts
);
4813 static DRIVER_ATTR_RW(opts
);
4815 static ssize_t
ptype_show(struct device_driver
*ddp
, char *buf
)
4817 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ptype
);
4819 static ssize_t
ptype_store(struct device_driver
*ddp
, const char *buf
,
4824 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4830 static DRIVER_ATTR_RW(ptype
);
4832 static ssize_t
dsense_show(struct device_driver
*ddp
, char *buf
)
4834 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dsense
);
4836 static ssize_t
dsense_store(struct device_driver
*ddp
, const char *buf
,
4841 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4847 static DRIVER_ATTR_RW(dsense
);
4849 static ssize_t
fake_rw_show(struct device_driver
*ddp
, char *buf
)
4851 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_fake_rw
);
4853 static ssize_t
fake_rw_store(struct device_driver
*ddp
, const char *buf
,
4858 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4860 sdebug_fake_rw
= (sdebug_fake_rw
> 0);
4861 if (sdebug_fake_rw
!= n
) {
4862 if ((0 == n
) && (NULL
== fake_storep
)) {
4864 (unsigned long)sdebug_dev_size_mb
*
4867 fake_storep
= vzalloc(sz
);
4868 if (NULL
== fake_storep
) {
4869 pr_err("out of memory, 9\n");
4879 static DRIVER_ATTR_RW(fake_rw
);
4881 static ssize_t
no_lun_0_show(struct device_driver
*ddp
, char *buf
)
4883 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_lun_0
);
4885 static ssize_t
no_lun_0_store(struct device_driver
*ddp
, const char *buf
,
4890 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4891 sdebug_no_lun_0
= n
;
4896 static DRIVER_ATTR_RW(no_lun_0
);
4898 static ssize_t
num_tgts_show(struct device_driver
*ddp
, char *buf
)
4900 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_tgts
);
4902 static ssize_t
num_tgts_store(struct device_driver
*ddp
, const char *buf
,
4907 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4908 sdebug_num_tgts
= n
;
4909 sdebug_max_tgts_luns();
4914 static DRIVER_ATTR_RW(num_tgts
);
4916 static ssize_t
dev_size_mb_show(struct device_driver
*ddp
, char *buf
)
4918 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dev_size_mb
);
4920 static DRIVER_ATTR_RO(dev_size_mb
);
4922 static ssize_t
num_parts_show(struct device_driver
*ddp
, char *buf
)
4924 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_parts
);
4926 static DRIVER_ATTR_RO(num_parts
);
4928 static ssize_t
every_nth_show(struct device_driver
*ddp
, char *buf
)
4930 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_every_nth
);
4932 static ssize_t
every_nth_store(struct device_driver
*ddp
, const char *buf
,
4937 if ((count
> 0) && (1 == sscanf(buf
, "%d", &nth
))) {
4938 sdebug_every_nth
= nth
;
4939 if (nth
&& !sdebug_statistics
) {
4940 pr_info("every_nth needs statistics=1, set it\n");
4941 sdebug_statistics
= true;
4948 static DRIVER_ATTR_RW(every_nth
);
4950 static ssize_t
max_luns_show(struct device_driver
*ddp
, char *buf
)
4952 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_max_luns
);
4954 static ssize_t
max_luns_store(struct device_driver
*ddp
, const char *buf
,
4960 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4962 pr_warn("max_luns can be no more than 256\n");
4965 changed
= (sdebug_max_luns
!= n
);
4966 sdebug_max_luns
= n
;
4967 sdebug_max_tgts_luns();
4968 if (changed
&& (sdebug_scsi_level
>= 5)) { /* >= SPC-3 */
4969 struct sdebug_host_info
*sdhp
;
4970 struct sdebug_dev_info
*dp
;
4972 spin_lock(&sdebug_host_list_lock
);
4973 list_for_each_entry(sdhp
, &sdebug_host_list
,
4975 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
4977 set_bit(SDEBUG_UA_LUNS_CHANGED
,
4981 spin_unlock(&sdebug_host_list_lock
);
4987 static DRIVER_ATTR_RW(max_luns
);
4989 static ssize_t
max_queue_show(struct device_driver
*ddp
, char *buf
)
4991 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_max_queue
);
4993 /* N.B. max_queue can be changed while there are queued commands. In flight
4994 * commands beyond the new max_queue will be completed. */
4995 static ssize_t
max_queue_store(struct device_driver
*ddp
, const char *buf
,
4999 struct sdebug_queue
*sqp
;
5001 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
> 0) &&
5002 (n
<= SDEBUG_CANQUEUE
)) {
5003 block_unblock_all_queues(true);
5005 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
5007 a
= find_last_bit(sqp
->in_use_bm
, SDEBUG_CANQUEUE
);
5011 sdebug_max_queue
= n
;
5012 if (k
== SDEBUG_CANQUEUE
)
5013 atomic_set(&retired_max_queue
, 0);
5015 atomic_set(&retired_max_queue
, k
+ 1);
5017 atomic_set(&retired_max_queue
, 0);
5018 block_unblock_all_queues(false);
5023 static DRIVER_ATTR_RW(max_queue
);
5025 static ssize_t
no_uld_show(struct device_driver
*ddp
, char *buf
)
5027 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_uld
);
5029 static DRIVER_ATTR_RO(no_uld
);
5031 static ssize_t
scsi_level_show(struct device_driver
*ddp
, char *buf
)
5033 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_scsi_level
);
5035 static DRIVER_ATTR_RO(scsi_level
);
5037 static ssize_t
virtual_gb_show(struct device_driver
*ddp
, char *buf
)
5039 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_virtual_gb
);
5041 static ssize_t
virtual_gb_store(struct device_driver
*ddp
, const char *buf
,
5047 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
5048 changed
= (sdebug_virtual_gb
!= n
);
5049 sdebug_virtual_gb
= n
;
5050 sdebug_capacity
= get_sdebug_capacity();
5052 struct sdebug_host_info
*sdhp
;
5053 struct sdebug_dev_info
*dp
;
5055 spin_lock(&sdebug_host_list_lock
);
5056 list_for_each_entry(sdhp
, &sdebug_host_list
,
5058 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
5060 set_bit(SDEBUG_UA_CAPACITY_CHANGED
,
5064 spin_unlock(&sdebug_host_list_lock
);
5070 static DRIVER_ATTR_RW(virtual_gb
);
5072 static ssize_t
add_host_show(struct device_driver
*ddp
, char *buf
)
5074 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_add_host
);
5077 static int sdebug_add_adapter(void);
5078 static void sdebug_remove_adapter(void);
5080 static ssize_t
add_host_store(struct device_driver
*ddp
, const char *buf
,
5085 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
5087 if (delta_hosts
> 0) {
5089 sdebug_add_adapter();
5090 } while (--delta_hosts
);
5091 } else if (delta_hosts
< 0) {
5093 sdebug_remove_adapter();
5094 } while (++delta_hosts
);
5098 static DRIVER_ATTR_RW(add_host
);
5100 static ssize_t
vpd_use_hostno_show(struct device_driver
*ddp
, char *buf
)
5102 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_vpd_use_hostno
);
5104 static ssize_t
vpd_use_hostno_store(struct device_driver
*ddp
, const char *buf
,
5109 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
5110 sdebug_vpd_use_hostno
= n
;
5115 static DRIVER_ATTR_RW(vpd_use_hostno
);
5117 static ssize_t
statistics_show(struct device_driver
*ddp
, char *buf
)
5119 return scnprintf(buf
, PAGE_SIZE
, "%d\n", (int)sdebug_statistics
);
5121 static ssize_t
statistics_store(struct device_driver
*ddp
, const char *buf
,
5126 if ((count
> 0) && (sscanf(buf
, "%d", &n
) == 1) && (n
>= 0)) {
5128 sdebug_statistics
= true;
5130 clear_queue_stats();
5131 sdebug_statistics
= false;
5137 static DRIVER_ATTR_RW(statistics
);
5139 static ssize_t
sector_size_show(struct device_driver
*ddp
, char *buf
)
5141 return scnprintf(buf
, PAGE_SIZE
, "%u\n", sdebug_sector_size
);
5143 static DRIVER_ATTR_RO(sector_size
);
5145 static ssize_t
submit_queues_show(struct device_driver
*ddp
, char *buf
)
5147 return scnprintf(buf
, PAGE_SIZE
, "%d\n", submit_queues
);
5149 static DRIVER_ATTR_RO(submit_queues
);
5151 static ssize_t
dix_show(struct device_driver
*ddp
, char *buf
)
5153 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dix
);
5155 static DRIVER_ATTR_RO(dix
);
5157 static ssize_t
dif_show(struct device_driver
*ddp
, char *buf
)
5159 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dif
);
5161 static DRIVER_ATTR_RO(dif
);
5163 static ssize_t
guard_show(struct device_driver
*ddp
, char *buf
)
5165 return scnprintf(buf
, PAGE_SIZE
, "%u\n", sdebug_guard
);
5167 static DRIVER_ATTR_RO(guard
);
5169 static ssize_t
ato_show(struct device_driver
*ddp
, char *buf
)
5171 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ato
);
5173 static DRIVER_ATTR_RO(ato
);
5175 static ssize_t
map_show(struct device_driver
*ddp
, char *buf
)
5179 if (!scsi_debug_lbp())
5180 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
5181 sdebug_store_sectors
);
5183 count
= scnprintf(buf
, PAGE_SIZE
- 1, "%*pbl",
5184 (int)map_size
, map_storep
);
5185 buf
[count
++] = '\n';
5190 static DRIVER_ATTR_RO(map
);
5192 static ssize_t
removable_show(struct device_driver
*ddp
, char *buf
)
5194 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_removable
? 1 : 0);
5196 static ssize_t
removable_store(struct device_driver
*ddp
, const char *buf
,
5201 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
5202 sdebug_removable
= (n
> 0);
5207 static DRIVER_ATTR_RW(removable
);
5209 static ssize_t
host_lock_show(struct device_driver
*ddp
, char *buf
)
5211 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_host_lock
);
5213 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5214 static ssize_t
host_lock_store(struct device_driver
*ddp
, const char *buf
,
5219 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
5220 sdebug_host_lock
= (n
> 0);
5225 static DRIVER_ATTR_RW(host_lock
);
5227 static ssize_t
strict_show(struct device_driver
*ddp
, char *buf
)
5229 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_strict
);
5231 static ssize_t
strict_store(struct device_driver
*ddp
, const char *buf
,
5236 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
5237 sdebug_strict
= (n
> 0);
5242 static DRIVER_ATTR_RW(strict
);
5244 static ssize_t
uuid_ctl_show(struct device_driver
*ddp
, char *buf
)
5246 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_uuid_ctl
);
5248 static DRIVER_ATTR_RO(uuid_ctl
);
5250 static ssize_t
cdb_len_show(struct device_driver
*ddp
, char *buf
)
5252 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_cdb_len
);
5254 static ssize_t
cdb_len_store(struct device_driver
*ddp
, const char *buf
,
5259 ret
= kstrtoint(buf
, 0, &n
);
5263 all_config_cdb_len();
5266 static DRIVER_ATTR_RW(cdb_len
);
5269 /* Note: The following array creates attribute files in the
5270 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5271 files (over those found in the /sys/module/scsi_debug/parameters
5272 directory) is that auxiliary actions can be triggered when an attribute
5273 is changed. For example see: sdebug_add_host_store() above.
5276 static struct attribute
*sdebug_drv_attrs
[] = {
5277 &driver_attr_delay
.attr
,
5278 &driver_attr_opts
.attr
,
5279 &driver_attr_ptype
.attr
,
5280 &driver_attr_dsense
.attr
,
5281 &driver_attr_fake_rw
.attr
,
5282 &driver_attr_no_lun_0
.attr
,
5283 &driver_attr_num_tgts
.attr
,
5284 &driver_attr_dev_size_mb
.attr
,
5285 &driver_attr_num_parts
.attr
,
5286 &driver_attr_every_nth
.attr
,
5287 &driver_attr_max_luns
.attr
,
5288 &driver_attr_max_queue
.attr
,
5289 &driver_attr_no_uld
.attr
,
5290 &driver_attr_scsi_level
.attr
,
5291 &driver_attr_virtual_gb
.attr
,
5292 &driver_attr_add_host
.attr
,
5293 &driver_attr_vpd_use_hostno
.attr
,
5294 &driver_attr_sector_size
.attr
,
5295 &driver_attr_statistics
.attr
,
5296 &driver_attr_submit_queues
.attr
,
5297 &driver_attr_dix
.attr
,
5298 &driver_attr_dif
.attr
,
5299 &driver_attr_guard
.attr
,
5300 &driver_attr_ato
.attr
,
5301 &driver_attr_map
.attr
,
5302 &driver_attr_removable
.attr
,
5303 &driver_attr_host_lock
.attr
,
5304 &driver_attr_ndelay
.attr
,
5305 &driver_attr_strict
.attr
,
5306 &driver_attr_uuid_ctl
.attr
,
5307 &driver_attr_cdb_len
.attr
,
5310 ATTRIBUTE_GROUPS(sdebug_drv
);
5312 static struct device
*pseudo_primary
;
5314 static int __init
scsi_debug_init(void)
5321 atomic_set(&retired_max_queue
, 0);
5323 if (sdebug_ndelay
>= 1000 * 1000 * 1000) {
5324 pr_warn("ndelay must be less than 1 second, ignored\n");
5326 } else if (sdebug_ndelay
> 0)
5327 sdebug_jdelay
= JDELAY_OVERRIDDEN
;
5329 switch (sdebug_sector_size
) {
5336 pr_err("invalid sector_size %d\n", sdebug_sector_size
);
5340 switch (sdebug_dif
) {
5341 case T10_PI_TYPE0_PROTECTION
:
5343 case T10_PI_TYPE1_PROTECTION
:
5344 case T10_PI_TYPE2_PROTECTION
:
5345 case T10_PI_TYPE3_PROTECTION
:
5346 have_dif_prot
= true;
5350 pr_err("dif must be 0, 1, 2 or 3\n");
5354 if (sdebug_num_tgts
< 0) {
5355 pr_err("num_tgts must be >= 0\n");
5359 if (sdebug_guard
> 1) {
5360 pr_err("guard must be 0 or 1\n");
5364 if (sdebug_ato
> 1) {
5365 pr_err("ato must be 0 or 1\n");
5369 if (sdebug_physblk_exp
> 15) {
5370 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp
);
5373 if (sdebug_max_luns
> 256) {
5374 pr_warn("max_luns can be no more than 256, use default\n");
5375 sdebug_max_luns
= DEF_MAX_LUNS
;
5378 if (sdebug_lowest_aligned
> 0x3fff) {
5379 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned
);
5383 if (submit_queues
< 1) {
5384 pr_err("submit_queues must be 1 or more\n");
5387 sdebug_q_arr
= kcalloc(submit_queues
, sizeof(struct sdebug_queue
),
5389 if (sdebug_q_arr
== NULL
)
5391 for (k
= 0; k
< submit_queues
; ++k
)
5392 spin_lock_init(&sdebug_q_arr
[k
].qc_lock
);
5394 if (sdebug_dev_size_mb
< 1)
5395 sdebug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
5396 sz
= (unsigned long)sdebug_dev_size_mb
* 1048576;
5397 sdebug_store_sectors
= sz
/ sdebug_sector_size
;
5398 sdebug_capacity
= get_sdebug_capacity();
5400 /* play around with geometry, don't waste too much on track 0 */
5402 sdebug_sectors_per
= 32;
5403 if (sdebug_dev_size_mb
>= 256)
5405 else if (sdebug_dev_size_mb
>= 16)
5407 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
5408 (sdebug_sectors_per
* sdebug_heads
);
5409 if (sdebug_cylinders_per
>= 1024) {
5410 /* other LLDs do this; implies >= 1GB ram disk ... */
5412 sdebug_sectors_per
= 63;
5413 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
5414 (sdebug_sectors_per
* sdebug_heads
);
5417 if (sdebug_fake_rw
== 0) {
5418 fake_storep
= vzalloc(sz
);
5419 if (NULL
== fake_storep
) {
5420 pr_err("out of memory, 1\n");
5424 if (sdebug_num_parts
> 0)
5425 sdebug_build_parts(fake_storep
, sz
);
5431 dif_size
= sdebug_store_sectors
* sizeof(struct t10_pi_tuple
);
5432 dif_storep
= vmalloc(dif_size
);
5434 pr_err("dif_storep %u bytes @ %p\n", dif_size
, dif_storep
);
5436 if (dif_storep
== NULL
) {
5437 pr_err("out of mem. (DIX)\n");
5442 memset(dif_storep
, 0xff, dif_size
);
5445 /* Logical Block Provisioning */
5446 if (scsi_debug_lbp()) {
5447 sdebug_unmap_max_blocks
=
5448 clamp(sdebug_unmap_max_blocks
, 0U, 0xffffffffU
);
5450 sdebug_unmap_max_desc
=
5451 clamp(sdebug_unmap_max_desc
, 0U, 256U);
5453 sdebug_unmap_granularity
=
5454 clamp(sdebug_unmap_granularity
, 1U, 0xffffffffU
);
5456 if (sdebug_unmap_alignment
&&
5457 sdebug_unmap_granularity
<=
5458 sdebug_unmap_alignment
) {
5459 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5464 map_size
= lba_to_map_index(sdebug_store_sectors
- 1) + 1;
5465 map_storep
= vmalloc(array_size(sizeof(long),
5466 BITS_TO_LONGS(map_size
)));
5468 pr_info("%lu provisioning blocks\n", map_size
);
5470 if (map_storep
== NULL
) {
5471 pr_err("out of mem. (MAP)\n");
5476 bitmap_zero(map_storep
, map_size
);
5478 /* Map first 1KB for partition table */
5479 if (sdebug_num_parts
)
5483 pseudo_primary
= root_device_register("pseudo_0");
5484 if (IS_ERR(pseudo_primary
)) {
5485 pr_warn("root_device_register() error\n");
5486 ret
= PTR_ERR(pseudo_primary
);
5489 ret
= bus_register(&pseudo_lld_bus
);
5491 pr_warn("bus_register error: %d\n", ret
);
5494 ret
= driver_register(&sdebug_driverfs_driver
);
5496 pr_warn("driver_register error: %d\n", ret
);
5500 host_to_add
= sdebug_add_host
;
5501 sdebug_add_host
= 0;
5503 for (k
= 0; k
< host_to_add
; k
++) {
5504 if (sdebug_add_adapter()) {
5505 pr_err("sdebug_add_adapter failed k=%d\n", k
);
5511 pr_info("built %d host(s)\n", sdebug_add_host
);
5516 bus_unregister(&pseudo_lld_bus
);
5518 root_device_unregister(pseudo_primary
);
5524 kfree(sdebug_q_arr
);
5528 static void __exit
scsi_debug_exit(void)
5530 int k
= sdebug_add_host
;
5534 sdebug_remove_adapter();
5536 driver_unregister(&sdebug_driverfs_driver
);
5537 bus_unregister(&pseudo_lld_bus
);
5538 root_device_unregister(pseudo_primary
);
5543 kfree(sdebug_q_arr
);
5546 device_initcall(scsi_debug_init
);
5547 module_exit(scsi_debug_exit
);
5549 static void sdebug_release_adapter(struct device
*dev
)
5551 struct sdebug_host_info
*sdbg_host
;
5553 sdbg_host
= to_sdebug_host(dev
);
5557 static int sdebug_add_adapter(void)
5559 int k
, devs_per_host
;
5561 struct sdebug_host_info
*sdbg_host
;
5562 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
5564 sdbg_host
= kzalloc(sizeof(*sdbg_host
), GFP_KERNEL
);
5565 if (sdbg_host
== NULL
) {
5566 pr_err("out of memory at line %d\n", __LINE__
);
5570 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
5572 devs_per_host
= sdebug_num_tgts
* sdebug_max_luns
;
5573 for (k
= 0; k
< devs_per_host
; k
++) {
5574 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
5575 if (!sdbg_devinfo
) {
5576 pr_err("out of memory at line %d\n", __LINE__
);
5582 spin_lock(&sdebug_host_list_lock
);
5583 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
5584 spin_unlock(&sdebug_host_list_lock
);
5586 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
5587 sdbg_host
->dev
.parent
= pseudo_primary
;
5588 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
5589 dev_set_name(&sdbg_host
->dev
, "adapter%d", sdebug_add_host
);
5591 error
= device_register(&sdbg_host
->dev
);
5600 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
5602 list_del(&sdbg_devinfo
->dev_list
);
5603 kfree(sdbg_devinfo
);
5610 static void sdebug_remove_adapter(void)
5612 struct sdebug_host_info
*sdbg_host
= NULL
;
5614 spin_lock(&sdebug_host_list_lock
);
5615 if (!list_empty(&sdebug_host_list
)) {
5616 sdbg_host
= list_entry(sdebug_host_list
.prev
,
5617 struct sdebug_host_info
, host_list
);
5618 list_del(&sdbg_host
->host_list
);
5620 spin_unlock(&sdebug_host_list_lock
);
5625 device_unregister(&sdbg_host
->dev
);
5629 static int sdebug_change_qdepth(struct scsi_device
*sdev
, int qdepth
)
5632 struct sdebug_dev_info
*devip
;
5634 block_unblock_all_queues(true);
5635 devip
= (struct sdebug_dev_info
*)sdev
->hostdata
;
5636 if (NULL
== devip
) {
5637 block_unblock_all_queues(false);
5640 num_in_q
= atomic_read(&devip
->num_in_q
);
5644 /* allow to exceed max host qc_arr elements for testing */
5645 if (qdepth
> SDEBUG_CANQUEUE
+ 10)
5646 qdepth
= SDEBUG_CANQUEUE
+ 10;
5647 scsi_change_queue_depth(sdev
, qdepth
);
5649 if (SDEBUG_OPT_Q_NOISE
& sdebug_opts
) {
5650 sdev_printk(KERN_INFO
, sdev
, "%s: qdepth=%d, num_in_q=%d\n",
5651 __func__
, qdepth
, num_in_q
);
5653 block_unblock_all_queues(false);
5654 return sdev
->queue_depth
;
5657 static bool fake_timeout(struct scsi_cmnd
*scp
)
5659 if (0 == (atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
))) {
5660 if (sdebug_every_nth
< -1)
5661 sdebug_every_nth
= -1;
5662 if (SDEBUG_OPT_TIMEOUT
& sdebug_opts
)
5663 return true; /* ignore command causing timeout */
5664 else if (SDEBUG_OPT_MAC_TIMEOUT
& sdebug_opts
&&
5665 scsi_medium_access_command(scp
))
5666 return true; /* time out reads and writes */
5671 static bool fake_host_busy(struct scsi_cmnd
*scp
)
5673 return (sdebug_opts
& SDEBUG_OPT_HOST_BUSY
) &&
5674 (atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
)) == 0;
5677 static int scsi_debug_queuecommand(struct Scsi_Host
*shost
,
5678 struct scsi_cmnd
*scp
)
5681 struct scsi_device
*sdp
= scp
->device
;
5682 const struct opcode_info_t
*oip
;
5683 const struct opcode_info_t
*r_oip
;
5684 struct sdebug_dev_info
*devip
;
5685 u8
*cmd
= scp
->cmnd
;
5686 int (*r_pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
5687 int (*pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*) = NULL
;
5695 scsi_set_resid(scp
, 0);
5696 if (sdebug_statistics
)
5697 atomic_inc(&sdebug_cmnd_count
);
5698 if (unlikely(sdebug_verbose
&&
5699 !(SDEBUG_OPT_NO_CDB_NOISE
& sdebug_opts
))) {
5704 sb
= (int)sizeof(b
);
5706 strcpy(b
, "too long, over 32 bytes");
5708 for (k
= 0, n
= 0; k
< len
&& n
< sb
; ++k
)
5709 n
+= scnprintf(b
+ n
, sb
- n
, "%02x ",
5712 sdev_printk(KERN_INFO
, sdp
, "%s: tag=%#x, cmd %s\n", my_name
,
5713 blk_mq_unique_tag(scp
->request
), b
);
5715 if (fake_host_busy(scp
))
5716 return SCSI_MLQUEUE_HOST_BUSY
;
5717 has_wlun_rl
= (sdp
->lun
== SCSI_W_LUN_REPORT_LUNS
);
5718 if (unlikely((sdp
->lun
>= sdebug_max_luns
) && !has_wlun_rl
))
5721 sdeb_i
= opcode_ind_arr
[opcode
]; /* fully mapped */
5722 oip
= &opcode_info_arr
[sdeb_i
]; /* safe if table consistent */
5723 devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
5724 if (unlikely(!devip
)) {
5725 devip
= find_build_dev_info(sdp
);
5729 na
= oip
->num_attached
;
5731 if (na
) { /* multiple commands with this opcode */
5733 if (FF_SA
& r_oip
->flags
) {
5734 if (F_SA_LOW
& oip
->flags
)
5737 sa
= get_unaligned_be16(cmd
+ 8);
5738 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
5739 if (opcode
== oip
->opcode
&& sa
== oip
->sa
)
5742 } else { /* since no service action only check opcode */
5743 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
5744 if (opcode
== oip
->opcode
)
5749 if (F_SA_LOW
& r_oip
->flags
)
5750 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 4);
5751 else if (F_SA_HIGH
& r_oip
->flags
)
5752 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 8, 7);
5754 mk_sense_invalid_opcode(scp
);
5757 } /* else (when na==0) we assume the oip is a match */
5759 if (unlikely(F_INV_OP
& flags
)) {
5760 mk_sense_invalid_opcode(scp
);
5763 if (unlikely(has_wlun_rl
&& !(F_RL_WLUN_OK
& flags
))) {
5765 sdev_printk(KERN_INFO
, sdp
, "%s: Opcode 0x%x not%s\n",
5766 my_name
, opcode
, " supported for wlun");
5767 mk_sense_invalid_opcode(scp
);
5770 if (unlikely(sdebug_strict
)) { /* check cdb against mask */
5774 for (k
= 1; k
< oip
->len_mask
[0] && k
< 16; ++k
) {
5775 rem
= ~oip
->len_mask
[k
] & cmd
[k
];
5777 for (j
= 7; j
>= 0; --j
, rem
<<= 1) {
5781 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, k
, j
);
5786 if (unlikely(!(F_SKIP_UA
& flags
) &&
5787 find_first_bit(devip
->uas_bm
,
5788 SDEBUG_NUM_UAS
) != SDEBUG_NUM_UAS
)) {
5789 errsts
= make_ua(scp
, devip
);
5793 if (unlikely((F_M_ACCESS
& flags
) && atomic_read(&devip
->stopped
))) {
5794 mk_sense_buffer(scp
, NOT_READY
, LOGICAL_UNIT_NOT_READY
, 0x2);
5796 sdev_printk(KERN_INFO
, sdp
, "%s reports: Not ready: "
5797 "%s\n", my_name
, "initializing command "
5799 errsts
= check_condition_result
;
5802 if (sdebug_fake_rw
&& (F_FAKE_RW
& flags
))
5804 if (unlikely(sdebug_every_nth
)) {
5805 if (fake_timeout(scp
))
5806 return 0; /* ignore command: make trouble */
5808 if (likely(oip
->pfp
))
5809 pfp
= oip
->pfp
; /* calls a resp_* function */
5811 pfp
= r_pfp
; /* if leaf function ptr NULL, try the root's */
5814 if (F_DELAY_OVERR
& flags
)
5815 return schedule_resp(scp
, devip
, errsts
, pfp
, 0, 0);
5816 else if ((flags
& F_LONG_DELAY
) && (sdebug_jdelay
> 0 ||
5817 sdebug_ndelay
> 10000)) {
5819 * Skip long delays if ndelay <= 10 microseconds. Otherwise
5820 * for Start Stop Unit (SSU) want at least 1 second delay and
5821 * if sdebug_jdelay>1 want a long delay of that many seconds.
5822 * For Synchronize Cache want 1/20 of SSU's delay.
5824 int jdelay
= (sdebug_jdelay
< 2) ? 1 : sdebug_jdelay
;
5825 int denom
= (flags
& F_SYNC_DELAY
) ? 20 : 1;
5827 jdelay
= mult_frac(USER_HZ
* jdelay
, HZ
, denom
* USER_HZ
);
5828 return schedule_resp(scp
, devip
, errsts
, pfp
, jdelay
, 0);
5830 return schedule_resp(scp
, devip
, errsts
, pfp
, sdebug_jdelay
,
5833 return schedule_resp(scp
, devip
, check_condition_result
, NULL
, 0, 0);
5835 return schedule_resp(scp
, NULL
, DID_NO_CONNECT
<< 16, NULL
, 0, 0);
5838 static struct scsi_host_template sdebug_driver_template
= {
5839 .show_info
= scsi_debug_show_info
,
5840 .write_info
= scsi_debug_write_info
,
5841 .proc_name
= sdebug_proc_name
,
5842 .name
= "SCSI DEBUG",
5843 .info
= scsi_debug_info
,
5844 .slave_alloc
= scsi_debug_slave_alloc
,
5845 .slave_configure
= scsi_debug_slave_configure
,
5846 .slave_destroy
= scsi_debug_slave_destroy
,
5847 .ioctl
= scsi_debug_ioctl
,
5848 .queuecommand
= scsi_debug_queuecommand
,
5849 .change_queue_depth
= sdebug_change_qdepth
,
5850 .eh_abort_handler
= scsi_debug_abort
,
5851 .eh_device_reset_handler
= scsi_debug_device_reset
,
5852 .eh_target_reset_handler
= scsi_debug_target_reset
,
5853 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
5854 .eh_host_reset_handler
= scsi_debug_host_reset
,
5855 .can_queue
= SDEBUG_CANQUEUE
,
5857 .sg_tablesize
= SG_MAX_SEGMENTS
,
5858 .cmd_per_lun
= DEF_CMD_PER_LUN
,
5860 .use_clustering
= DISABLE_CLUSTERING
,
5861 .module
= THIS_MODULE
,
5862 .track_queue_depth
= 1,
5865 static int sdebug_driver_probe(struct device
*dev
)
5868 struct sdebug_host_info
*sdbg_host
;
5869 struct Scsi_Host
*hpnt
;
5872 sdbg_host
= to_sdebug_host(dev
);
5874 sdebug_driver_template
.can_queue
= sdebug_max_queue
;
5875 if (sdebug_clustering
)
5876 sdebug_driver_template
.use_clustering
= ENABLE_CLUSTERING
;
5877 hpnt
= scsi_host_alloc(&sdebug_driver_template
, sizeof(sdbg_host
));
5879 pr_err("scsi_host_alloc failed\n");
5883 if (submit_queues
> nr_cpu_ids
) {
5884 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5885 my_name
, submit_queues
, nr_cpu_ids
);
5886 submit_queues
= nr_cpu_ids
;
5888 /* Decide whether to tell scsi subsystem that we want mq */
5889 /* Following should give the same answer for each host */
5890 if (shost_use_blk_mq(hpnt
))
5891 hpnt
->nr_hw_queues
= submit_queues
;
5893 sdbg_host
->shost
= hpnt
;
5894 *((struct sdebug_host_info
**)hpnt
->hostdata
) = sdbg_host
;
5895 if ((hpnt
->this_id
>= 0) && (sdebug_num_tgts
> hpnt
->this_id
))
5896 hpnt
->max_id
= sdebug_num_tgts
+ 1;
5898 hpnt
->max_id
= sdebug_num_tgts
;
5899 /* = sdebug_max_luns; */
5900 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
5904 switch (sdebug_dif
) {
5906 case T10_PI_TYPE1_PROTECTION
:
5907 hprot
= SHOST_DIF_TYPE1_PROTECTION
;
5909 hprot
|= SHOST_DIX_TYPE1_PROTECTION
;
5912 case T10_PI_TYPE2_PROTECTION
:
5913 hprot
= SHOST_DIF_TYPE2_PROTECTION
;
5915 hprot
|= SHOST_DIX_TYPE2_PROTECTION
;
5918 case T10_PI_TYPE3_PROTECTION
:
5919 hprot
= SHOST_DIF_TYPE3_PROTECTION
;
5921 hprot
|= SHOST_DIX_TYPE3_PROTECTION
;
5926 hprot
|= SHOST_DIX_TYPE0_PROTECTION
;
5930 scsi_host_set_prot(hpnt
, hprot
);
5932 if (have_dif_prot
|| sdebug_dix
)
5933 pr_info("host protection%s%s%s%s%s%s%s\n",
5934 (hprot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
5935 (hprot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
5936 (hprot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
5937 (hprot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
5938 (hprot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
5939 (hprot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
5940 (hprot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
5942 if (sdebug_guard
== 1)
5943 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
5945 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
5947 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& sdebug_opts
);
5948 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& sdebug_opts
);
5949 if (sdebug_every_nth
) /* need stats counters for every_nth */
5950 sdebug_statistics
= true;
5951 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
5953 pr_err("scsi_add_host failed\n");
5955 scsi_host_put(hpnt
);
5957 scsi_scan_host(hpnt
);
5962 static int sdebug_driver_remove(struct device
*dev
)
5964 struct sdebug_host_info
*sdbg_host
;
5965 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
5967 sdbg_host
= to_sdebug_host(dev
);
5970 pr_err("Unable to locate host info\n");
5974 scsi_remove_host(sdbg_host
->shost
);
5976 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
5978 list_del(&sdbg_devinfo
->dev_list
);
5979 kfree(sdbg_devinfo
);
5982 scsi_host_put(sdbg_host
->shost
);
5986 static int pseudo_lld_bus_match(struct device
*dev
,
5987 struct device_driver
*dev_driver
)
5992 static struct bus_type pseudo_lld_bus
= {
5994 .match
= pseudo_lld_bus_match
,
5995 .probe
= sdebug_driver_probe
,
5996 .remove
= sdebug_driver_remove
,
5997 .drv_groups
= sdebug_drv_groups
,