2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * Copyright (C) 2001 - 2016 Douglas Gilbert
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * For documentation see http://sg.danny.cz/sg/sdebug26.html
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
23 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "1.86"
64 static const char *sdebug_version_date
= "20160430";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define UA_RESET_ASC 0x29
79 #define UA_CHANGED_ASC 0x2a
80 #define TARGET_CHANGED_ASC 0x3f
81 #define LUNS_CHANGED_ASCQ 0x0e
82 #define INSUFF_RES_ASC 0x55
83 #define INSUFF_RES_ASCQ 0x3
84 #define POWER_ON_RESET_ASCQ 0x0
85 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
86 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
87 #define CAPACITY_CHANGED_ASCQ 0x9
88 #define SAVING_PARAMS_UNSUP 0x39
89 #define TRANSPORT_PROBLEM 0x4b
90 #define THRESHOLD_EXCEEDED 0x5d
91 #define LOW_POWER_COND_ON 0x5e
92 #define MISCOMPARE_VERIFY_ASC 0x1d
93 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
94 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 /* Additional Sense Code Qualifier (ASCQ) */
97 #define ACK_NAK_TO 0x3
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST 1
101 #define DEF_NUM_TGTS 1
102 #define DEF_MAX_LUNS 1
103 /* With these defaults, this driver will make 1 host with 1 target
104 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
107 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB 8
111 #define DEF_D_SENSE 0
112 #define DEF_EVERY_NTH 0
113 #define DEF_FAKE_RW 0
115 #define DEF_HOST_LOCK 0
118 #define DEF_LBPWS10 0
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0 0
123 #define DEF_NUM_PARTS 0
125 #define DEF_OPT_BLKS 1024
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE TYPE_DISK
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB 0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
139 #define DEF_STATISTICS false
140 #define DEF_SUBMIT_QUEUES 1
141 #define DEF_UUID_CTL 0
142 #define JDELAY_OVERRIDDEN -9999
144 #define SDEBUG_LUN_0_VAL 0
146 /* bit mask values for sdebug_opts */
147 #define SDEBUG_OPT_NOISE 1
148 #define SDEBUG_OPT_MEDIUM_ERR 2
149 #define SDEBUG_OPT_TIMEOUT 4
150 #define SDEBUG_OPT_RECOVERED_ERR 8
151 #define SDEBUG_OPT_TRANSPORT_ERR 16
152 #define SDEBUG_OPT_DIF_ERR 32
153 #define SDEBUG_OPT_DIX_ERR 64
154 #define SDEBUG_OPT_MAC_TIMEOUT 128
155 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
156 #define SDEBUG_OPT_Q_NOISE 0x200
157 #define SDEBUG_OPT_ALL_TSF 0x400
158 #define SDEBUG_OPT_RARE_TSF 0x800
159 #define SDEBUG_OPT_N_WCE 0x1000
160 #define SDEBUG_OPT_RESET_NOISE 0x2000
161 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
162 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
163 SDEBUG_OPT_RESET_NOISE)
164 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
165 SDEBUG_OPT_TRANSPORT_ERR | \
166 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
167 SDEBUG_OPT_SHORT_TRANSFER)
168 /* When "every_nth" > 0 then modulo "every_nth" commands:
169 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
170 * - a RECOVERED_ERROR is simulated on successful read and write
171 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
172 * - a TRANSPORT_ERROR is simulated on successful read and write
173 * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
175 * When "every_nth" < 0 then after "- every_nth" commands:
176 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
177 * - a RECOVERED_ERROR is simulated on successful read and write
178 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
179 * - a TRANSPORT_ERROR is simulated on successful read and write
180 * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
181 * This will continue on every subsequent command until some other action
182 * occurs (e.g. the user * writing a new value (other than -1 or 1) to
183 * every_nth via sysfs).
186 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
187 * priority order. In the subset implemented here lower numbers have higher
188 * priority. The UA numbers should be a sequence starting from 0 with
189 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
190 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
191 #define SDEBUG_UA_BUS_RESET 1
192 #define SDEBUG_UA_MODE_CHANGED 2
193 #define SDEBUG_UA_CAPACITY_CHANGED 3
194 #define SDEBUG_UA_LUNS_CHANGED 4
195 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
196 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
197 #define SDEBUG_NUM_UAS 7
199 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
200 * sector on read commands: */
201 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
202 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
204 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
205 * or "peripheral device" addressing (value 0) */
206 #define SAM2_LUN_ADDRESS_METHOD 0
208 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
209 * (for response) per submit queue at one time. Can be reduced by max_queue
210 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
211 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
212 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
213 * but cannot exceed SDEBUG_CANQUEUE .
215 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
216 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
217 #define DEF_CMD_PER_LUN 255
221 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
223 #define F_RL_WLUN_OK 0x10
224 #define F_SKIP_UA 0x20
225 #define F_DELAY_OVERR 0x40
226 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
227 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
228 #define F_INV_OP 0x200
229 #define F_FAKE_RW 0x400
230 #define F_M_ACCESS 0x800 /* media access */
232 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
233 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
234 #define FF_SA (F_SA_HIGH | F_SA_LOW)
236 #define SDEBUG_MAX_PARTS 4
238 #define SDEBUG_MAX_CMD_LEN 32
241 struct sdebug_dev_info
{
242 struct list_head dev_list
;
243 unsigned int channel
;
247 struct sdebug_host_info
*sdbg_host
;
248 unsigned long uas_bm
[1];
254 struct sdebug_host_info
{
255 struct list_head host_list
;
256 struct Scsi_Host
*shost
;
258 struct list_head dev_info_list
;
261 #define to_sdebug_host(d) \
262 container_of(d, struct sdebug_host_info, dev)
264 struct sdebug_defer
{
266 struct execute_work ew
;
267 int sqa_idx
; /* index of sdebug_queue array */
268 int qc_idx
; /* index of sdebug_queued_cmd array within sqa_idx */
272 struct sdebug_queued_cmd
{
273 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
274 * instance indicates this slot is in use.
276 struct sdebug_defer
*sd_dp
;
277 struct scsi_cmnd
*a_cmnd
;
278 unsigned int inj_recovered
:1;
279 unsigned int inj_transport
:1;
280 unsigned int inj_dif
:1;
281 unsigned int inj_dix
:1;
282 unsigned int inj_short
:1;
285 struct sdebug_queue
{
286 struct sdebug_queued_cmd qc_arr
[SDEBUG_CANQUEUE
];
287 unsigned long in_use_bm
[SDEBUG_CANQUEUE_WORDS
];
289 atomic_t blocked
; /* to temporarily stop more being queued */
292 static atomic_t sdebug_cmnd_count
; /* number of incoming commands */
293 static atomic_t sdebug_completions
; /* count of deferred completions */
294 static atomic_t sdebug_miss_cpus
; /* submission + completion cpus differ */
295 static atomic_t sdebug_a_tsf
; /* 'almost task set full' counter */
297 struct opcode_info_t
{
298 u8 num_attached
; /* 0 if this is it (i.e. a leaf); use 0xff */
299 /* for terminating element */
300 u8 opcode
; /* if num_attached > 0, preferred */
301 u16 sa
; /* service action */
302 u32 flags
; /* OR-ed set of SDEB_F_* */
303 int (*pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
304 const struct opcode_info_t
*arrp
; /* num_attached elements or NULL */
305 u8 len_mask
[16]; /* len=len_mask[0], then mask for cdb[1]... */
306 /* ignore cdb bytes after position 15 */
309 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
310 enum sdeb_opcode_index
{
311 SDEB_I_INVALID_OPCODE
= 0,
313 SDEB_I_REPORT_LUNS
= 2,
314 SDEB_I_REQUEST_SENSE
= 3,
315 SDEB_I_TEST_UNIT_READY
= 4,
316 SDEB_I_MODE_SENSE
= 5, /* 6, 10 */
317 SDEB_I_MODE_SELECT
= 6, /* 6, 10 */
318 SDEB_I_LOG_SENSE
= 7,
319 SDEB_I_READ_CAPACITY
= 8, /* 10; 16 is in SA_IN(16) */
320 SDEB_I_READ
= 9, /* 6, 10, 12, 16 */
321 SDEB_I_WRITE
= 10, /* 6, 10, 12, 16 */
322 SDEB_I_START_STOP
= 11,
323 SDEB_I_SERV_ACT_IN
= 12, /* 12, 16 */
324 SDEB_I_SERV_ACT_OUT
= 13, /* 12, 16 */
325 SDEB_I_MAINT_IN
= 14,
326 SDEB_I_MAINT_OUT
= 15,
327 SDEB_I_VERIFY
= 16, /* 10 only */
328 SDEB_I_VARIABLE_LEN
= 17,
329 SDEB_I_RESERVE
= 18, /* 6, 10 */
330 SDEB_I_RELEASE
= 19, /* 6, 10 */
331 SDEB_I_ALLOW_REMOVAL
= 20, /* PREVENT ALLOW MEDIUM REMOVAL */
332 SDEB_I_REZERO_UNIT
= 21, /* REWIND in SSC */
333 SDEB_I_ATA_PT
= 22, /* 12, 16 */
334 SDEB_I_SEND_DIAG
= 23,
336 SDEB_I_XDWRITEREAD
= 25, /* 10 only */
337 SDEB_I_WRITE_BUFFER
= 26,
338 SDEB_I_WRITE_SAME
= 27, /* 10, 16 */
339 SDEB_I_SYNC_CACHE
= 28, /* 10 only */
340 SDEB_I_COMP_WRITE
= 29,
341 SDEB_I_LAST_ELEMENT
= 30, /* keep this last */
345 static const unsigned char opcode_ind_arr
[256] = {
346 /* 0x0; 0x0->0x1f: 6 byte cdbs */
347 SDEB_I_TEST_UNIT_READY
, SDEB_I_REZERO_UNIT
, 0, SDEB_I_REQUEST_SENSE
,
349 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, 0,
350 0, 0, SDEB_I_INQUIRY
, 0, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
352 0, 0, SDEB_I_MODE_SENSE
, SDEB_I_START_STOP
, 0, SDEB_I_SEND_DIAG
,
353 SDEB_I_ALLOW_REMOVAL
, 0,
354 /* 0x20; 0x20->0x3f: 10 byte cdbs */
355 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY
, 0, 0,
356 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, SDEB_I_VERIFY
,
357 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE
, 0, 0,
358 0, 0, 0, SDEB_I_WRITE_BUFFER
, 0, 0, 0, 0,
359 /* 0x40; 0x40->0x5f: 10 byte cdbs */
360 0, SDEB_I_WRITE_SAME
, SDEB_I_UNMAP
, 0, 0, 0, 0, 0,
361 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE
, 0, 0,
362 0, 0, 0, SDEB_I_XDWRITEREAD
, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
364 0, 0, SDEB_I_MODE_SENSE
, 0, 0, 0, 0, 0,
365 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
366 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
367 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
368 0, SDEB_I_VARIABLE_LEN
,
369 /* 0x80; 0x80->0x9f: 16 byte cdbs */
370 0, 0, 0, 0, 0, SDEB_I_ATA_PT
, 0, 0,
371 SDEB_I_READ
, SDEB_I_COMP_WRITE
, SDEB_I_WRITE
, 0, 0, 0, 0, 0,
372 0, 0, 0, SDEB_I_WRITE_SAME
, 0, 0, 0, 0,
373 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN
, SDEB_I_SERV_ACT_OUT
,
374 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
375 SDEB_I_REPORT_LUNS
, SDEB_I_ATA_PT
, 0, SDEB_I_MAINT_IN
,
376 SDEB_I_MAINT_OUT
, 0, 0, 0,
377 SDEB_I_READ
, SDEB_I_SERV_ACT_OUT
, SDEB_I_WRITE
, SDEB_I_SERV_ACT_IN
,
379 0, 0, 0, 0, 0, 0, 0, 0,
380 0, 0, 0, 0, 0, 0, 0, 0,
381 /* 0xc0; 0xc0->0xff: vendor specific */
382 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
383 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
384 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
385 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
388 static int resp_inquiry(struct scsi_cmnd
*, struct sdebug_dev_info
*);
389 static int resp_report_luns(struct scsi_cmnd
*, struct sdebug_dev_info
*);
390 static int resp_requests(struct scsi_cmnd
*, struct sdebug_dev_info
*);
391 static int resp_mode_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
392 static int resp_mode_select(struct scsi_cmnd
*, struct sdebug_dev_info
*);
393 static int resp_log_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
394 static int resp_readcap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
395 static int resp_read_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
396 static int resp_write_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
397 static int resp_start_stop(struct scsi_cmnd
*, struct sdebug_dev_info
*);
398 static int resp_readcap16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
399 static int resp_get_lba_status(struct scsi_cmnd
*, struct sdebug_dev_info
*);
400 static int resp_report_tgtpgs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
401 static int resp_unmap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
402 static int resp_rsup_opcodes(struct scsi_cmnd
*, struct sdebug_dev_info
*);
403 static int resp_rsup_tmfs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
404 static int resp_write_same_10(struct scsi_cmnd
*, struct sdebug_dev_info
*);
405 static int resp_write_same_16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
406 static int resp_xdwriteread_10(struct scsi_cmnd
*, struct sdebug_dev_info
*);
407 static int resp_comp_write(struct scsi_cmnd
*, struct sdebug_dev_info
*);
408 static int resp_write_buffer(struct scsi_cmnd
*, struct sdebug_dev_info
*);
410 static const struct opcode_info_t msense_iarr
[1] = {
411 {0, 0x1a, 0, F_D_IN
, NULL
, NULL
,
412 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
415 static const struct opcode_info_t mselect_iarr
[1] = {
416 {0, 0x15, 0, F_D_OUT
, NULL
, NULL
,
417 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
420 static const struct opcode_info_t read_iarr
[3] = {
421 {0, 0x28, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, NULL
,/* READ(10) */
422 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
424 {0, 0x8, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, NULL
, /* READ(6) */
425 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
426 {0, 0xa8, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, NULL
,/* READ(12) */
427 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
431 static const struct opcode_info_t write_iarr
[3] = {
432 {0, 0x2a, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, NULL
, /* 10 */
433 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
435 {0, 0xa, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, NULL
, /* 6 */
436 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
437 {0, 0xaa, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, NULL
, /* 12 */
438 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
442 static const struct opcode_info_t sa_in_iarr
[1] = {
443 {0, 0x9e, 0x12, F_SA_LOW
| F_D_IN
, resp_get_lba_status
, NULL
,
444 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
445 0xff, 0xff, 0xff, 0, 0xc7} },
448 static const struct opcode_info_t vl_iarr
[1] = { /* VARIABLE LENGTH */
449 {0, 0x7f, 0xb, F_SA_HIGH
| F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
,
450 NULL
, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
451 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
454 static const struct opcode_info_t maint_in_iarr
[2] = {
455 {0, 0xa3, 0xc, F_SA_LOW
| F_D_IN
, resp_rsup_opcodes
, NULL
,
456 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
458 {0, 0xa3, 0xd, F_SA_LOW
| F_D_IN
, resp_rsup_tmfs
, NULL
,
459 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
463 static const struct opcode_info_t write_same_iarr
[1] = {
464 {0, 0x93, 0, F_D_OUT_MAYBE
| FF_DIRECT_IO
, resp_write_same_16
, NULL
,
465 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
466 0xff, 0xff, 0xff, 0x1f, 0xc7} },
469 static const struct opcode_info_t reserve_iarr
[1] = {
470 {0, 0x16, 0, F_D_OUT
, NULL
, NULL
, /* RESERVE(6) */
471 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
474 static const struct opcode_info_t release_iarr
[1] = {
475 {0, 0x17, 0, F_D_OUT
, NULL
, NULL
, /* RELEASE(6) */
476 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
480 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
481 * plus the terminating elements for logic that scans this table such as
482 * REPORT SUPPORTED OPERATION CODES. */
483 static const struct opcode_info_t opcode_info_arr
[SDEB_I_LAST_ELEMENT
+ 1] = {
485 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
,
486 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
487 {0, 0x12, 0, FF_RESPOND
| F_D_IN
, resp_inquiry
, NULL
,
488 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
489 {0, 0xa0, 0, FF_RESPOND
| F_D_IN
, resp_report_luns
, NULL
,
490 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
492 {0, 0x3, 0, FF_RESPOND
| F_D_IN
, resp_requests
, NULL
,
493 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
494 {0, 0x0, 0, F_M_ACCESS
| F_RL_WLUN_OK
, NULL
, NULL
,/* TEST UNIT READY */
495 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
496 {1, 0x5a, 0, F_D_IN
, resp_mode_sense
, msense_iarr
,
497 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
499 {1, 0x55, 0, F_D_OUT
, resp_mode_select
, mselect_iarr
,
500 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
501 {0, 0x4d, 0, F_D_IN
, resp_log_sense
, NULL
,
502 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
504 {0, 0x25, 0, F_D_IN
, resp_readcap
, NULL
,
505 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
507 {3, 0x88, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, read_iarr
,
508 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
509 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
511 {3, 0x8a, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, write_iarr
,
512 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
513 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
514 {0, 0x1b, 0, 0, resp_start_stop
, NULL
, /* START STOP UNIT */
515 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
516 {1, 0x9e, 0x10, F_SA_LOW
| F_D_IN
, resp_readcap16
, sa_in_iarr
,
517 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
518 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
519 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* SA OUT */
520 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
521 {2, 0xa3, 0xa, F_SA_LOW
| F_D_IN
, resp_report_tgtpgs
, maint_in_iarr
,
522 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
524 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* MAINT OUT */
525 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
526 {0, 0x2f, 0, F_D_OUT_MAYBE
| FF_DIRECT_IO
, NULL
, NULL
, /* VERIFY(10) */
527 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
529 {1, 0x7f, 0x9, F_SA_HIGH
| F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
,
530 vl_iarr
, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
531 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
532 {1, 0x56, 0, F_D_OUT
, NULL
, reserve_iarr
, /* RESERVE(10) */
533 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
535 {1, 0x57, 0, F_D_OUT
, NULL
, release_iarr
, /* RELEASE(10) */
536 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
539 {0, 0x1e, 0, 0, NULL
, NULL
, /* ALLOW REMOVAL */
540 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
541 {0, 0x1, 0, 0, resp_start_stop
, NULL
, /* REWIND ?? */
542 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
543 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* ATA_PT */
544 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
545 {0, 0x1d, F_D_OUT
, 0, NULL
, NULL
, /* SEND DIAGNOSTIC */
546 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
547 {0, 0x42, 0, F_D_OUT
| FF_DIRECT_IO
, resp_unmap
, NULL
, /* UNMAP */
548 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
549 {0, 0x53, 0, F_D_IN
| F_D_OUT
| FF_DIRECT_IO
, resp_xdwriteread_10
,
550 NULL
, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
552 {0, 0x3b, 0, F_D_OUT_MAYBE
, resp_write_buffer
, NULL
,
553 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
554 0, 0, 0, 0} }, /* WRITE_BUFFER */
555 {1, 0x41, 0, F_D_OUT_MAYBE
| FF_DIRECT_IO
, resp_write_same_10
,
556 write_same_iarr
, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
557 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
558 {0, 0x35, 0, F_DELAY_OVERR
| FF_DIRECT_IO
, NULL
, NULL
, /* SYNC_CACHE */
559 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
561 {0, 0x89, 0, F_D_OUT
| FF_DIRECT_IO
, resp_comp_write
, NULL
,
562 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
563 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
566 {0xff, 0, 0, 0, NULL
, NULL
, /* terminating element */
567 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
570 static int sdebug_add_host
= DEF_NUM_HOST
;
571 static int sdebug_ato
= DEF_ATO
;
572 static int sdebug_jdelay
= DEF_JDELAY
; /* if > 0 then unit is jiffies */
573 static int sdebug_dev_size_mb
= DEF_DEV_SIZE_MB
;
574 static int sdebug_dif
= DEF_DIF
;
575 static int sdebug_dix
= DEF_DIX
;
576 static int sdebug_dsense
= DEF_D_SENSE
;
577 static int sdebug_every_nth
= DEF_EVERY_NTH
;
578 static int sdebug_fake_rw
= DEF_FAKE_RW
;
579 static unsigned int sdebug_guard
= DEF_GUARD
;
580 static int sdebug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
581 static int sdebug_max_luns
= DEF_MAX_LUNS
;
582 static int sdebug_max_queue
= SDEBUG_CANQUEUE
; /* per submit queue */
583 static atomic_t retired_max_queue
; /* if > 0 then was prior max_queue */
584 static int sdebug_ndelay
= DEF_NDELAY
; /* if > 0 then unit is nanoseconds */
585 static int sdebug_no_lun_0
= DEF_NO_LUN_0
;
586 static int sdebug_no_uld
;
587 static int sdebug_num_parts
= DEF_NUM_PARTS
;
588 static int sdebug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
589 static int sdebug_opt_blks
= DEF_OPT_BLKS
;
590 static int sdebug_opts
= DEF_OPTS
;
591 static int sdebug_physblk_exp
= DEF_PHYSBLK_EXP
;
592 static int sdebug_ptype
= DEF_PTYPE
; /* SCSI peripheral device type */
593 static int sdebug_scsi_level
= DEF_SCSI_LEVEL
;
594 static int sdebug_sector_size
= DEF_SECTOR_SIZE
;
595 static int sdebug_virtual_gb
= DEF_VIRTUAL_GB
;
596 static int sdebug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
597 static unsigned int sdebug_lbpu
= DEF_LBPU
;
598 static unsigned int sdebug_lbpws
= DEF_LBPWS
;
599 static unsigned int sdebug_lbpws10
= DEF_LBPWS10
;
600 static unsigned int sdebug_lbprz
= DEF_LBPRZ
;
601 static unsigned int sdebug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
602 static unsigned int sdebug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
603 static unsigned int sdebug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
604 static unsigned int sdebug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
605 static unsigned int sdebug_write_same_length
= DEF_WRITESAME_LENGTH
;
606 static int sdebug_uuid_ctl
= DEF_UUID_CTL
;
607 static bool sdebug_removable
= DEF_REMOVABLE
;
608 static bool sdebug_clustering
;
609 static bool sdebug_host_lock
= DEF_HOST_LOCK
;
610 static bool sdebug_strict
= DEF_STRICT
;
611 static bool sdebug_any_injecting_opt
;
612 static bool sdebug_verbose
;
613 static bool have_dif_prot
;
614 static bool sdebug_statistics
= DEF_STATISTICS
;
615 static bool sdebug_mq_active
;
617 static unsigned int sdebug_store_sectors
;
618 static sector_t sdebug_capacity
; /* in sectors */
620 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
621 may still need them */
622 static int sdebug_heads
; /* heads per disk */
623 static int sdebug_cylinders_per
; /* cylinders per surface */
624 static int sdebug_sectors_per
; /* sectors per cylinder */
626 static LIST_HEAD(sdebug_host_list
);
627 static DEFINE_SPINLOCK(sdebug_host_list_lock
);
629 static unsigned char *fake_storep
; /* ramdisk storage */
630 static struct sd_dif_tuple
*dif_storep
; /* protection info */
631 static void *map_storep
; /* provisioning map */
633 static unsigned long map_size
;
634 static int num_aborts
;
635 static int num_dev_resets
;
636 static int num_target_resets
;
637 static int num_bus_resets
;
638 static int num_host_resets
;
639 static int dix_writes
;
640 static int dix_reads
;
641 static int dif_errors
;
643 static int submit_queues
= DEF_SUBMIT_QUEUES
; /* > 1 for multi-queue (mq) */
644 static struct sdebug_queue
*sdebug_q_arr
; /* ptr to array of submit queues */
646 static DEFINE_RWLOCK(atomic_rw
);
648 static char sdebug_proc_name
[] = MY_NAME
;
649 static const char *my_name
= MY_NAME
;
651 static struct bus_type pseudo_lld_bus
;
653 static struct device_driver sdebug_driverfs_driver
= {
654 .name
= sdebug_proc_name
,
655 .bus
= &pseudo_lld_bus
,
658 static const int check_condition_result
=
659 (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
661 static const int illegal_condition_result
=
662 (DRIVER_SENSE
<< 24) | (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
664 static const int device_qfull_result
=
665 (DID_OK
<< 16) | (COMMAND_COMPLETE
<< 8) | SAM_STAT_TASK_SET_FULL
;
668 /* Only do the extra work involved in logical block provisioning if one or
669 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
670 * real reads and writes (i.e. not skipping them for speed).
672 static inline bool scsi_debug_lbp(void)
674 return 0 == sdebug_fake_rw
&&
675 (sdebug_lbpu
|| sdebug_lbpws
|| sdebug_lbpws10
);
678 static void *fake_store(unsigned long long lba
)
680 lba
= do_div(lba
, sdebug_store_sectors
);
682 return fake_storep
+ lba
* sdebug_sector_size
;
685 static struct sd_dif_tuple
*dif_store(sector_t sector
)
687 sector
= sector_div(sector
, sdebug_store_sectors
);
689 return dif_storep
+ sector
;
692 static void sdebug_max_tgts_luns(void)
694 struct sdebug_host_info
*sdbg_host
;
695 struct Scsi_Host
*hpnt
;
697 spin_lock(&sdebug_host_list_lock
);
698 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
699 hpnt
= sdbg_host
->shost
;
700 if ((hpnt
->this_id
>= 0) &&
701 (sdebug_num_tgts
> hpnt
->this_id
))
702 hpnt
->max_id
= sdebug_num_tgts
+ 1;
704 hpnt
->max_id
= sdebug_num_tgts
;
705 /* sdebug_max_luns; */
706 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
708 spin_unlock(&sdebug_host_list_lock
);
711 enum sdeb_cmd_data
{SDEB_IN_DATA
= 0, SDEB_IN_CDB
= 1};
713 /* Set in_bit to -1 to indicate no bit position of invalid field */
714 static void mk_sense_invalid_fld(struct scsi_cmnd
*scp
,
715 enum sdeb_cmd_data c_d
,
716 int in_byte
, int in_bit
)
718 unsigned char *sbuff
;
722 sbuff
= scp
->sense_buffer
;
724 sdev_printk(KERN_ERR
, scp
->device
,
725 "%s: sense_buffer is NULL\n", __func__
);
728 asc
= c_d
? INVALID_FIELD_IN_CDB
: INVALID_FIELD_IN_PARAM_LIST
;
729 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
730 scsi_build_sense_buffer(sdebug_dsense
, sbuff
, ILLEGAL_REQUEST
, asc
, 0);
731 memset(sks
, 0, sizeof(sks
));
737 sks
[0] |= 0x7 & in_bit
;
739 put_unaligned_be16(in_byte
, sks
+ 1);
745 memcpy(sbuff
+ sl
+ 4, sks
, 3);
747 memcpy(sbuff
+ 15, sks
, 3);
749 sdev_printk(KERN_INFO
, scp
->device
, "%s: [sense_key,asc,ascq"
750 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
751 my_name
, asc
, c_d
? 'C' : 'D', in_byte
, in_bit
);
754 static void mk_sense_buffer(struct scsi_cmnd
*scp
, int key
, int asc
, int asq
)
756 unsigned char *sbuff
;
758 sbuff
= scp
->sense_buffer
;
760 sdev_printk(KERN_ERR
, scp
->device
,
761 "%s: sense_buffer is NULL\n", __func__
);
764 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
766 scsi_build_sense_buffer(sdebug_dsense
, sbuff
, key
, asc
, asq
);
769 sdev_printk(KERN_INFO
, scp
->device
,
770 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
771 my_name
, key
, asc
, asq
);
774 static void mk_sense_invalid_opcode(struct scsi_cmnd
*scp
)
776 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
779 static int scsi_debug_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
781 if (sdebug_verbose
) {
783 sdev_printk(KERN_INFO
, dev
,
784 "%s: BLKFLSBUF [0x1261]\n", __func__
);
785 else if (0x5331 == cmd
)
786 sdev_printk(KERN_INFO
, dev
,
787 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
790 sdev_printk(KERN_INFO
, dev
, "%s: cmd=0x%x\n",
794 /* return -ENOTTY; // correct return but upsets fdisk */
797 static void clear_luns_changed_on_target(struct sdebug_dev_info
*devip
)
799 struct sdebug_host_info
*sdhp
;
800 struct sdebug_dev_info
*dp
;
802 spin_lock(&sdebug_host_list_lock
);
803 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
804 list_for_each_entry(dp
, &sdhp
->dev_info_list
, dev_list
) {
805 if ((devip
->sdbg_host
== dp
->sdbg_host
) &&
806 (devip
->target
== dp
->target
))
807 clear_bit(SDEBUG_UA_LUNS_CHANGED
, dp
->uas_bm
);
810 spin_unlock(&sdebug_host_list_lock
);
813 static int make_ua(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
817 k
= find_first_bit(devip
->uas_bm
, SDEBUG_NUM_UAS
);
818 if (k
!= SDEBUG_NUM_UAS
) {
819 const char *cp
= NULL
;
823 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
824 POWER_ON_RESET_ASCQ
);
826 cp
= "power on reset";
828 case SDEBUG_UA_BUS_RESET
:
829 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_RESET_ASC
,
834 case SDEBUG_UA_MODE_CHANGED
:
835 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_CHANGED_ASC
,
838 cp
= "mode parameters changed";
840 case SDEBUG_UA_CAPACITY_CHANGED
:
841 mk_sense_buffer(scp
, UNIT_ATTENTION
, UA_CHANGED_ASC
,
842 CAPACITY_CHANGED_ASCQ
);
844 cp
= "capacity data changed";
846 case SDEBUG_UA_MICROCODE_CHANGED
:
847 mk_sense_buffer(scp
, UNIT_ATTENTION
,
849 MICROCODE_CHANGED_ASCQ
);
851 cp
= "microcode has been changed";
853 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
:
854 mk_sense_buffer(scp
, UNIT_ATTENTION
,
856 MICROCODE_CHANGED_WO_RESET_ASCQ
);
858 cp
= "microcode has been changed without reset";
860 case SDEBUG_UA_LUNS_CHANGED
:
862 * SPC-3 behavior is to report a UNIT ATTENTION with
863 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
864 * on the target, until a REPORT LUNS command is
865 * received. SPC-4 behavior is to report it only once.
866 * NOTE: sdebug_scsi_level does not use the same
867 * values as struct scsi_device->scsi_level.
869 if (sdebug_scsi_level
>= 6) /* SPC-4 and above */
870 clear_luns_changed_on_target(devip
);
871 mk_sense_buffer(scp
, UNIT_ATTENTION
,
875 cp
= "reported luns data has changed";
878 pr_warn("unexpected unit attention code=%d\n", k
);
883 clear_bit(k
, devip
->uas_bm
);
885 sdev_printk(KERN_INFO
, scp
->device
,
886 "%s reports: Unit attention: %s\n",
888 return check_condition_result
;
893 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
894 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
898 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
902 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_FROM_DEVICE
))
903 return DID_ERROR
<< 16;
905 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
907 sdb
->resid
= scsi_bufflen(scp
) - act_len
;
912 /* Returns number of bytes fetched into 'arr' or -1 if error. */
913 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
916 if (!scsi_bufflen(scp
))
918 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_TO_DEVICE
))
921 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
925 static const char * inq_vendor_id
= "Linux ";
926 static const char * inq_product_id
= "scsi_debug ";
927 static const char *inq_product_rev
= "0186"; /* version less '.' */
928 /* Use some locally assigned NAAs for SAS addresses. */
929 static const u64 naa3_comp_a
= 0x3222222000000000ULL
;
930 static const u64 naa3_comp_b
= 0x3333333000000000ULL
;
931 static const u64 naa3_comp_c
= 0x3111111000000000ULL
;
933 /* Device identification VPD page. Returns number of bytes placed in arr */
934 static int inquiry_vpd_83(unsigned char *arr
, int port_group_id
,
935 int target_dev_id
, int dev_id_num
,
936 const char *dev_id_str
, int dev_id_str_len
,
937 const uuid_be
*lu_name
)
942 port_a
= target_dev_id
+ 1;
943 /* T10 vendor identifier field format (faked) */
944 arr
[0] = 0x2; /* ASCII */
947 memcpy(&arr
[4], inq_vendor_id
, 8);
948 memcpy(&arr
[12], inq_product_id
, 16);
949 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
950 num
= 8 + 16 + dev_id_str_len
;
953 if (dev_id_num
>= 0) {
954 if (sdebug_uuid_ctl
) {
955 /* Locally assigned UUID */
956 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
957 arr
[num
++] = 0xa; /* PIV=0, lu, naa */
960 arr
[num
++] = 0x10; /* uuid type=1, locally assigned */
962 memcpy(arr
+ num
, lu_name
, 16);
965 /* NAA-3, Logical unit identifier (binary) */
966 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
967 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
970 put_unaligned_be64(naa3_comp_b
+ dev_id_num
, arr
+ num
);
973 /* Target relative port number */
974 arr
[num
++] = 0x61; /* proto=sas, binary */
975 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
976 arr
[num
++] = 0x0; /* reserved */
977 arr
[num
++] = 0x4; /* length */
978 arr
[num
++] = 0x0; /* reserved */
979 arr
[num
++] = 0x0; /* reserved */
981 arr
[num
++] = 0x1; /* relative port A */
983 /* NAA-3, Target port identifier */
984 arr
[num
++] = 0x61; /* proto=sas, binary */
985 arr
[num
++] = 0x93; /* piv=1, target port, naa */
988 put_unaligned_be64(naa3_comp_a
+ port_a
, arr
+ num
);
990 /* NAA-3, Target port group identifier */
991 arr
[num
++] = 0x61; /* proto=sas, binary */
992 arr
[num
++] = 0x95; /* piv=1, target port group id */
997 put_unaligned_be16(port_group_id
, arr
+ num
);
999 /* NAA-3, Target device identifier */
1000 arr
[num
++] = 0x61; /* proto=sas, binary */
1001 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
1004 put_unaligned_be64(naa3_comp_a
+ target_dev_id
, arr
+ num
);
1006 /* SCSI name string: Target device identifier */
1007 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
1008 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
1011 memcpy(arr
+ num
, "naa.32222220", 12);
1013 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
1014 memcpy(arr
+ num
, b
, 8);
1016 memset(arr
+ num
, 0, 4);
1021 static unsigned char vpd84_data
[] = {
1022 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1023 0x22,0x22,0x22,0x0,0xbb,0x1,
1024 0x22,0x22,0x22,0x0,0xbb,0x2,
1027 /* Software interface identification VPD page */
1028 static int inquiry_vpd_84(unsigned char *arr
)
1030 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
1031 return sizeof(vpd84_data
);
1034 /* Management network addresses VPD page */
1035 static int inquiry_vpd_85(unsigned char *arr
)
1038 const char * na1
= "https://www.kernel.org/config";
1039 const char * na2
= "http://www.kernel.org/log";
1042 arr
[num
++] = 0x1; /* lu, storage config */
1043 arr
[num
++] = 0x0; /* reserved */
1048 plen
= ((plen
/ 4) + 1) * 4;
1049 arr
[num
++] = plen
; /* length, null termianted, padded */
1050 memcpy(arr
+ num
, na1
, olen
);
1051 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1054 arr
[num
++] = 0x4; /* lu, logging */
1055 arr
[num
++] = 0x0; /* reserved */
1060 plen
= ((plen
/ 4) + 1) * 4;
1061 arr
[num
++] = plen
; /* length, null terminated, padded */
1062 memcpy(arr
+ num
, na2
, olen
);
1063 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1069 /* SCSI ports VPD page */
1070 static int inquiry_vpd_88(unsigned char *arr
, int target_dev_id
)
1075 port_a
= target_dev_id
+ 1;
1076 port_b
= port_a
+ 1;
1077 arr
[num
++] = 0x0; /* reserved */
1078 arr
[num
++] = 0x0; /* reserved */
1080 arr
[num
++] = 0x1; /* relative port 1 (primary) */
1081 memset(arr
+ num
, 0, 6);
1084 arr
[num
++] = 12; /* length tp descriptor */
1085 /* naa-5 target port identifier (A) */
1086 arr
[num
++] = 0x61; /* proto=sas, binary */
1087 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1088 arr
[num
++] = 0x0; /* reserved */
1089 arr
[num
++] = 0x8; /* length */
1090 put_unaligned_be64(naa3_comp_a
+ port_a
, arr
+ num
);
1092 arr
[num
++] = 0x0; /* reserved */
1093 arr
[num
++] = 0x0; /* reserved */
1095 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
1096 memset(arr
+ num
, 0, 6);
1099 arr
[num
++] = 12; /* length tp descriptor */
1100 /* naa-5 target port identifier (B) */
1101 arr
[num
++] = 0x61; /* proto=sas, binary */
1102 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1103 arr
[num
++] = 0x0; /* reserved */
1104 arr
[num
++] = 0x8; /* length */
1105 put_unaligned_be64(naa3_comp_a
+ port_b
, arr
+ num
);
1112 static unsigned char vpd89_data
[] = {
1113 /* from 4th byte */ 0,0,0,0,
1114 'l','i','n','u','x',' ',' ',' ',
1115 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1117 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1119 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1120 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1121 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1122 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1124 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1126 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1128 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1129 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1130 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1131 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1132 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1133 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1134 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1135 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1136 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1137 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1138 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1139 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1140 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1141 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1142 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1143 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1144 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1145 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1146 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1147 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1148 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1149 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1150 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1151 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1152 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1153 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1156 /* ATA Information VPD page */
1157 static int inquiry_vpd_89(unsigned char *arr
)
1159 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
1160 return sizeof(vpd89_data
);
1164 static unsigned char vpdb0_data
[] = {
1165 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1166 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1167 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1168 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1171 /* Block limits VPD page (SBC-3) */
1172 static int inquiry_vpd_b0(unsigned char *arr
)
1176 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
1178 /* Optimal transfer length granularity */
1179 gran
= 1 << sdebug_physblk_exp
;
1180 put_unaligned_be16(gran
, arr
+ 2);
1182 /* Maximum Transfer Length */
1183 if (sdebug_store_sectors
> 0x400)
1184 put_unaligned_be32(sdebug_store_sectors
, arr
+ 4);
1186 /* Optimal Transfer Length */
1187 put_unaligned_be32(sdebug_opt_blks
, &arr
[8]);
1190 /* Maximum Unmap LBA Count */
1191 put_unaligned_be32(sdebug_unmap_max_blocks
, &arr
[16]);
1193 /* Maximum Unmap Block Descriptor Count */
1194 put_unaligned_be32(sdebug_unmap_max_desc
, &arr
[20]);
1197 /* Unmap Granularity Alignment */
1198 if (sdebug_unmap_alignment
) {
1199 put_unaligned_be32(sdebug_unmap_alignment
, &arr
[28]);
1200 arr
[28] |= 0x80; /* UGAVALID */
1203 /* Optimal Unmap Granularity */
1204 put_unaligned_be32(sdebug_unmap_granularity
, &arr
[24]);
1206 /* Maximum WRITE SAME Length */
1207 put_unaligned_be64(sdebug_write_same_length
, &arr
[32]);
1209 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1211 return sizeof(vpdb0_data
);
1214 /* Block device characteristics VPD page (SBC-3) */
1215 static int inquiry_vpd_b1(unsigned char *arr
)
1217 memset(arr
, 0, 0x3c);
1219 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
1221 arr
[3] = 5; /* less than 1.8" */
1226 /* Logical block provisioning VPD page (SBC-4) */
1227 static int inquiry_vpd_b2(unsigned char *arr
)
1229 memset(arr
, 0, 0x4);
1230 arr
[0] = 0; /* threshold exponent */
1237 if (sdebug_lbprz
&& scsi_debug_lbp())
1238 arr
[1] |= (sdebug_lbprz
& 0x7) << 2; /* sbc4r07 and later */
1239 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1240 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1241 /* threshold_percentage=0 */
1245 #define SDEBUG_LONG_INQ_SZ 96
1246 #define SDEBUG_MAX_INQ_ARR_SZ 584
1248 static int resp_inquiry(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1250 unsigned char pq_pdt
;
1251 unsigned char * arr
;
1252 unsigned char *cmd
= scp
->cmnd
;
1253 int alloc_len
, n
, ret
;
1254 bool have_wlun
, is_disk
;
1256 alloc_len
= get_unaligned_be16(cmd
+ 3);
1257 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
1259 return DID_REQUEUE
<< 16;
1260 is_disk
= (sdebug_ptype
== TYPE_DISK
);
1261 have_wlun
= scsi_is_wlun(scp
->device
->lun
);
1263 pq_pdt
= TYPE_WLUN
; /* present, wlun */
1264 else if (sdebug_no_lun_0
&& (devip
->lun
== SDEBUG_LUN_0_VAL
))
1265 pq_pdt
= 0x7f; /* not present, PQ=3, PDT=0x1f */
1267 pq_pdt
= (sdebug_ptype
& 0x1f);
1269 if (0x2 & cmd
[1]) { /* CMDDT bit set */
1270 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 1);
1272 return check_condition_result
;
1273 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
1274 int lu_id_num
, port_group_id
, target_dev_id
, len
;
1276 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1278 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
1279 (devip
->channel
& 0x7f);
1280 if (sdebug_vpd_use_hostno
== 0)
1282 lu_id_num
= have_wlun
? -1 : (((host_no
+ 1) * 2000) +
1283 (devip
->target
* 1000) + devip
->lun
);
1284 target_dev_id
= ((host_no
+ 1) * 2000) +
1285 (devip
->target
* 1000) - 3;
1286 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
1287 if (0 == cmd
[2]) { /* supported vital product data pages */
1288 arr
[1] = cmd
[2]; /*sanity */
1290 arr
[n
++] = 0x0; /* this page */
1291 arr
[n
++] = 0x80; /* unit serial number */
1292 arr
[n
++] = 0x83; /* device identification */
1293 arr
[n
++] = 0x84; /* software interface ident. */
1294 arr
[n
++] = 0x85; /* management network addresses */
1295 arr
[n
++] = 0x86; /* extended inquiry */
1296 arr
[n
++] = 0x87; /* mode page policy */
1297 arr
[n
++] = 0x88; /* SCSI ports */
1298 if (is_disk
) { /* SBC only */
1299 arr
[n
++] = 0x89; /* ATA information */
1300 arr
[n
++] = 0xb0; /* Block limits */
1301 arr
[n
++] = 0xb1; /* Block characteristics */
1302 arr
[n
++] = 0xb2; /* Logical Block Prov */
1304 arr
[3] = n
- 4; /* number of supported VPD pages */
1305 } else if (0x80 == cmd
[2]) { /* unit serial number */
1306 arr
[1] = cmd
[2]; /*sanity */
1308 memcpy(&arr
[4], lu_id_str
, len
);
1309 } else if (0x83 == cmd
[2]) { /* device identification */
1310 arr
[1] = cmd
[2]; /*sanity */
1311 arr
[3] = inquiry_vpd_83(&arr
[4], port_group_id
,
1312 target_dev_id
, lu_id_num
,
1315 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
1316 arr
[1] = cmd
[2]; /*sanity */
1317 arr
[3] = inquiry_vpd_84(&arr
[4]);
1318 } else if (0x85 == cmd
[2]) { /* Management network addresses */
1319 arr
[1] = cmd
[2]; /*sanity */
1320 arr
[3] = inquiry_vpd_85(&arr
[4]);
1321 } else if (0x86 == cmd
[2]) { /* extended inquiry */
1322 arr
[1] = cmd
[2]; /*sanity */
1323 arr
[3] = 0x3c; /* number of following entries */
1324 if (sdebug_dif
== SD_DIF_TYPE3_PROTECTION
)
1325 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
1326 else if (have_dif_prot
)
1327 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1329 arr
[4] = 0x0; /* no protection stuff */
1330 arr
[5] = 0x7; /* head of q, ordered + simple q's */
1331 } else if (0x87 == cmd
[2]) { /* mode page policy */
1332 arr
[1] = cmd
[2]; /*sanity */
1333 arr
[3] = 0x8; /* number of following entries */
1334 arr
[4] = 0x2; /* disconnect-reconnect mp */
1335 arr
[6] = 0x80; /* mlus, shared */
1336 arr
[8] = 0x18; /* protocol specific lu */
1337 arr
[10] = 0x82; /* mlus, per initiator port */
1338 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
1339 arr
[1] = cmd
[2]; /*sanity */
1340 arr
[3] = inquiry_vpd_88(&arr
[4], target_dev_id
);
1341 } else if (is_disk
&& 0x89 == cmd
[2]) { /* ATA information */
1342 arr
[1] = cmd
[2]; /*sanity */
1343 n
= inquiry_vpd_89(&arr
[4]);
1344 put_unaligned_be16(n
, arr
+ 2);
1345 } else if (is_disk
&& 0xb0 == cmd
[2]) { /* Block limits */
1346 arr
[1] = cmd
[2]; /*sanity */
1347 arr
[3] = inquiry_vpd_b0(&arr
[4]);
1348 } else if (is_disk
&& 0xb1 == cmd
[2]) { /* Block char. */
1349 arr
[1] = cmd
[2]; /*sanity */
1350 arr
[3] = inquiry_vpd_b1(&arr
[4]);
1351 } else if (is_disk
&& 0xb2 == cmd
[2]) { /* LB Prov. */
1352 arr
[1] = cmd
[2]; /*sanity */
1353 arr
[3] = inquiry_vpd_b2(&arr
[4]);
1355 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
1357 return check_condition_result
;
1359 len
= min(get_unaligned_be16(arr
+ 2) + 4, alloc_len
);
1360 ret
= fill_from_dev_buffer(scp
, arr
,
1361 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
1365 /* drops through here for a standard inquiry */
1366 arr
[1] = sdebug_removable
? 0x80 : 0; /* Removable disk */
1367 arr
[2] = sdebug_scsi_level
;
1368 arr
[3] = 2; /* response_data_format==2 */
1369 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
1370 arr
[5] = (int)have_dif_prot
; /* PROTECT bit */
1371 if (sdebug_vpd_use_hostno
== 0)
1372 arr
[5] = 0x10; /* claim: implicit TGPS */
1373 arr
[6] = 0x10; /* claim: MultiP */
1374 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1375 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
1376 memcpy(&arr
[8], inq_vendor_id
, 8);
1377 memcpy(&arr
[16], inq_product_id
, 16);
1378 memcpy(&arr
[32], inq_product_rev
, 4);
1379 /* version descriptors (2 bytes each) follow */
1380 put_unaligned_be16(0xc0, arr
+ 58); /* SAM-6 no version claimed */
1381 put_unaligned_be16(0x5c0, arr
+ 60); /* SPC-5 no version claimed */
1383 if (is_disk
) { /* SBC-4 no version claimed */
1384 put_unaligned_be16(0x600, arr
+ n
);
1386 } else if (sdebug_ptype
== TYPE_TAPE
) { /* SSC-4 rev 3 */
1387 put_unaligned_be16(0x525, arr
+ n
);
1390 put_unaligned_be16(0x2100, arr
+ n
); /* SPL-4 no version claimed */
1391 ret
= fill_from_dev_buffer(scp
, arr
,
1392 min(alloc_len
, SDEBUG_LONG_INQ_SZ
));
1397 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1400 static int resp_requests(struct scsi_cmnd
* scp
,
1401 struct sdebug_dev_info
* devip
)
1403 unsigned char * sbuff
;
1404 unsigned char *cmd
= scp
->cmnd
;
1405 unsigned char arr
[SCSI_SENSE_BUFFERSIZE
];
1409 memset(arr
, 0, sizeof(arr
));
1410 dsense
= !!(cmd
[1] & 1);
1411 sbuff
= scp
->sense_buffer
;
1412 if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
1415 arr
[1] = 0x0; /* NO_SENSE in sense_key */
1416 arr
[2] = THRESHOLD_EXCEEDED
;
1417 arr
[3] = 0xff; /* TEST set and MRIE==6 */
1421 arr
[2] = 0x0; /* NO_SENSE in sense_key */
1422 arr
[7] = 0xa; /* 18 byte sense buffer */
1423 arr
[12] = THRESHOLD_EXCEEDED
;
1424 arr
[13] = 0xff; /* TEST set and MRIE==6 */
1427 memcpy(arr
, sbuff
, SCSI_SENSE_BUFFERSIZE
);
1428 if (arr
[0] >= 0x70 && dsense
== sdebug_dsense
)
1429 ; /* have sense and formats match */
1430 else if (arr
[0] <= 0x70) {
1440 } else if (dsense
) {
1443 arr
[1] = sbuff
[2]; /* sense key */
1444 arr
[2] = sbuff
[12]; /* asc */
1445 arr
[3] = sbuff
[13]; /* ascq */
1457 mk_sense_buffer(scp
, 0, NO_ADDITIONAL_SENSE
, 0);
1458 return fill_from_dev_buffer(scp
, arr
, len
);
1461 static int resp_start_stop(struct scsi_cmnd
* scp
,
1462 struct sdebug_dev_info
* devip
)
1464 unsigned char *cmd
= scp
->cmnd
;
1465 int power_cond
, stop
;
1467 power_cond
= (cmd
[4] & 0xf0) >> 4;
1469 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, 7);
1470 return check_condition_result
;
1472 stop
= !(cmd
[4] & 1);
1473 atomic_xchg(&devip
->stopped
, stop
);
1477 static sector_t
get_sdebug_capacity(void)
1479 static const unsigned int gibibyte
= 1073741824;
1481 if (sdebug_virtual_gb
> 0)
1482 return (sector_t
)sdebug_virtual_gb
*
1483 (gibibyte
/ sdebug_sector_size
);
1485 return sdebug_store_sectors
;
1488 #define SDEBUG_READCAP_ARR_SZ 8
1489 static int resp_readcap(struct scsi_cmnd
* scp
,
1490 struct sdebug_dev_info
* devip
)
1492 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
1495 /* following just in case virtual_gb changed */
1496 sdebug_capacity
= get_sdebug_capacity();
1497 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
1498 if (sdebug_capacity
< 0xffffffff) {
1499 capac
= (unsigned int)sdebug_capacity
- 1;
1500 put_unaligned_be32(capac
, arr
+ 0);
1502 put_unaligned_be32(0xffffffff, arr
+ 0);
1503 put_unaligned_be16(sdebug_sector_size
, arr
+ 6);
1504 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
1507 #define SDEBUG_READCAP16_ARR_SZ 32
1508 static int resp_readcap16(struct scsi_cmnd
* scp
,
1509 struct sdebug_dev_info
* devip
)
1511 unsigned char *cmd
= scp
->cmnd
;
1512 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
1515 alloc_len
= get_unaligned_be32(cmd
+ 10);
1516 /* following just in case virtual_gb changed */
1517 sdebug_capacity
= get_sdebug_capacity();
1518 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
1519 put_unaligned_be64((u64
)(sdebug_capacity
- 1), arr
+ 0);
1520 put_unaligned_be32(sdebug_sector_size
, arr
+ 8);
1521 arr
[13] = sdebug_physblk_exp
& 0xf;
1522 arr
[14] = (sdebug_lowest_aligned
>> 8) & 0x3f;
1524 if (scsi_debug_lbp()) {
1525 arr
[14] |= 0x80; /* LBPME */
1526 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1527 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1528 * in the wider field maps to 0 in this field.
1530 if (sdebug_lbprz
& 1) /* precisely what the draft requires */
1534 arr
[15] = sdebug_lowest_aligned
& 0xff;
1536 if (have_dif_prot
) {
1537 arr
[12] = (sdebug_dif
- 1) << 1; /* P_TYPE */
1538 arr
[12] |= 1; /* PROT_EN */
1541 return fill_from_dev_buffer(scp
, arr
,
1542 min(alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
1545 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1547 static int resp_report_tgtpgs(struct scsi_cmnd
* scp
,
1548 struct sdebug_dev_info
* devip
)
1550 unsigned char *cmd
= scp
->cmnd
;
1551 unsigned char * arr
;
1552 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1553 int n
, ret
, alen
, rlen
;
1554 int port_group_a
, port_group_b
, port_a
, port_b
;
1556 alen
= get_unaligned_be32(cmd
+ 6);
1557 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
1559 return DID_REQUEUE
<< 16;
1561 * EVPD page 0x88 states we have two ports, one
1562 * real and a fake port with no device connected.
1563 * So we create two port groups with one port each
1564 * and set the group with port B to unavailable.
1566 port_a
= 0x1; /* relative port A */
1567 port_b
= 0x2; /* relative port B */
1568 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
1569 (devip
->channel
& 0x7f);
1570 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
1571 (devip
->channel
& 0x7f) + 0x80;
1574 * The asymmetric access state is cycled according to the host_id.
1577 if (sdebug_vpd_use_hostno
== 0) {
1578 arr
[n
++] = host_no
% 3; /* Asymm access state */
1579 arr
[n
++] = 0x0F; /* claim: all states are supported */
1581 arr
[n
++] = 0x0; /* Active/Optimized path */
1582 arr
[n
++] = 0x01; /* only support active/optimized paths */
1584 put_unaligned_be16(port_group_a
, arr
+ n
);
1586 arr
[n
++] = 0; /* Reserved */
1587 arr
[n
++] = 0; /* Status code */
1588 arr
[n
++] = 0; /* Vendor unique */
1589 arr
[n
++] = 0x1; /* One port per group */
1590 arr
[n
++] = 0; /* Reserved */
1591 arr
[n
++] = 0; /* Reserved */
1592 put_unaligned_be16(port_a
, arr
+ n
);
1594 arr
[n
++] = 3; /* Port unavailable */
1595 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
1596 put_unaligned_be16(port_group_b
, arr
+ n
);
1598 arr
[n
++] = 0; /* Reserved */
1599 arr
[n
++] = 0; /* Status code */
1600 arr
[n
++] = 0; /* Vendor unique */
1601 arr
[n
++] = 0x1; /* One port per group */
1602 arr
[n
++] = 0; /* Reserved */
1603 arr
[n
++] = 0; /* Reserved */
1604 put_unaligned_be16(port_b
, arr
+ n
);
1608 put_unaligned_be32(rlen
, arr
+ 0);
1611 * Return the smallest value of either
1612 * - The allocated length
1613 * - The constructed command length
1614 * - The maximum array size
1617 ret
= fill_from_dev_buffer(scp
, arr
,
1618 min(rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
1623 static int resp_rsup_opcodes(struct scsi_cmnd
*scp
,
1624 struct sdebug_dev_info
*devip
)
1627 u8 reporting_opts
, req_opcode
, sdeb_i
, supp
;
1629 u32 alloc_len
, a_len
;
1630 int k
, offset
, len
, errsts
, count
, bump
, na
;
1631 const struct opcode_info_t
*oip
;
1632 const struct opcode_info_t
*r_oip
;
1634 u8
*cmd
= scp
->cmnd
;
1636 rctd
= !!(cmd
[2] & 0x80);
1637 reporting_opts
= cmd
[2] & 0x7;
1638 req_opcode
= cmd
[3];
1639 req_sa
= get_unaligned_be16(cmd
+ 4);
1640 alloc_len
= get_unaligned_be32(cmd
+ 6);
1641 if (alloc_len
< 4 || alloc_len
> 0xffff) {
1642 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
1643 return check_condition_result
;
1645 if (alloc_len
> 8192)
1649 arr
= kzalloc((a_len
< 256) ? 320 : a_len
+ 64, GFP_ATOMIC
);
1651 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
1653 return check_condition_result
;
1655 switch (reporting_opts
) {
1656 case 0: /* all commands */
1657 /* count number of commands */
1658 for (count
= 0, oip
= opcode_info_arr
;
1659 oip
->num_attached
!= 0xff; ++oip
) {
1660 if (F_INV_OP
& oip
->flags
)
1662 count
+= (oip
->num_attached
+ 1);
1664 bump
= rctd
? 20 : 8;
1665 put_unaligned_be32(count
* bump
, arr
);
1666 for (offset
= 4, oip
= opcode_info_arr
;
1667 oip
->num_attached
!= 0xff && offset
< a_len
; ++oip
) {
1668 if (F_INV_OP
& oip
->flags
)
1670 na
= oip
->num_attached
;
1671 arr
[offset
] = oip
->opcode
;
1672 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
1674 arr
[offset
+ 5] |= 0x2;
1675 if (FF_SA
& oip
->flags
)
1676 arr
[offset
+ 5] |= 0x1;
1677 put_unaligned_be16(oip
->len_mask
[0], arr
+ offset
+ 6);
1679 put_unaligned_be16(0xa, arr
+ offset
+ 8);
1681 for (k
= 0, oip
= oip
->arrp
; k
< na
; ++k
, ++oip
) {
1682 if (F_INV_OP
& oip
->flags
)
1685 arr
[offset
] = oip
->opcode
;
1686 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
1688 arr
[offset
+ 5] |= 0x2;
1689 if (FF_SA
& oip
->flags
)
1690 arr
[offset
+ 5] |= 0x1;
1691 put_unaligned_be16(oip
->len_mask
[0],
1694 put_unaligned_be16(0xa,
1701 case 1: /* one command: opcode only */
1702 case 2: /* one command: opcode plus service action */
1703 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1704 sdeb_i
= opcode_ind_arr
[req_opcode
];
1705 oip
= &opcode_info_arr
[sdeb_i
];
1706 if (F_INV_OP
& oip
->flags
) {
1710 if (1 == reporting_opts
) {
1711 if (FF_SA
& oip
->flags
) {
1712 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
,
1715 return check_condition_result
;
1718 } else if (2 == reporting_opts
&&
1719 0 == (FF_SA
& oip
->flags
)) {
1720 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, -1);
1721 kfree(arr
); /* point at requested sa */
1722 return check_condition_result
;
1724 if (0 == (FF_SA
& oip
->flags
) &&
1725 req_opcode
== oip
->opcode
)
1727 else if (0 == (FF_SA
& oip
->flags
)) {
1728 na
= oip
->num_attached
;
1729 for (k
= 0, oip
= oip
->arrp
; k
< na
;
1731 if (req_opcode
== oip
->opcode
)
1734 supp
= (k
>= na
) ? 1 : 3;
1735 } else if (req_sa
!= oip
->sa
) {
1736 na
= oip
->num_attached
;
1737 for (k
= 0, oip
= oip
->arrp
; k
< na
;
1739 if (req_sa
== oip
->sa
)
1742 supp
= (k
>= na
) ? 1 : 3;
1746 u
= oip
->len_mask
[0];
1747 put_unaligned_be16(u
, arr
+ 2);
1748 arr
[4] = oip
->opcode
;
1749 for (k
= 1; k
< u
; ++k
)
1750 arr
[4 + k
] = (k
< 16) ?
1751 oip
->len_mask
[k
] : 0xff;
1756 arr
[1] = (rctd
? 0x80 : 0) | supp
;
1758 put_unaligned_be16(0xa, arr
+ offset
);
1763 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 2);
1765 return check_condition_result
;
1767 offset
= (offset
< a_len
) ? offset
: a_len
;
1768 len
= (offset
< alloc_len
) ? offset
: alloc_len
;
1769 errsts
= fill_from_dev_buffer(scp
, arr
, len
);
1774 static int resp_rsup_tmfs(struct scsi_cmnd
*scp
,
1775 struct sdebug_dev_info
*devip
)
1780 u8
*cmd
= scp
->cmnd
;
1782 memset(arr
, 0, sizeof(arr
));
1783 repd
= !!(cmd
[2] & 0x80);
1784 alloc_len
= get_unaligned_be32(cmd
+ 6);
1785 if (alloc_len
< 4) {
1786 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
1787 return check_condition_result
;
1789 arr
[0] = 0xc8; /* ATS | ATSS | LURS */
1790 arr
[1] = 0x1; /* ITNRS */
1797 len
= (len
< alloc_len
) ? len
: alloc_len
;
1798 return fill_from_dev_buffer(scp
, arr
, len
);
1801 /* <<Following mode page info copied from ST318451LW>> */
1803 static int resp_err_recov_pg(unsigned char * p
, int pcontrol
, int target
)
1804 { /* Read-Write Error Recovery page for mode_sense */
1805 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1808 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
1810 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
1811 return sizeof(err_recov_pg
);
1814 static int resp_disconnect_pg(unsigned char * p
, int pcontrol
, int target
)
1815 { /* Disconnect-Reconnect page for mode_sense */
1816 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1817 0, 0, 0, 0, 0, 0, 0, 0};
1819 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
1821 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
1822 return sizeof(disconnect_pg
);
1825 static int resp_format_pg(unsigned char * p
, int pcontrol
, int target
)
1826 { /* Format device page for mode_sense */
1827 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1828 0, 0, 0, 0, 0, 0, 0, 0,
1829 0, 0, 0, 0, 0x40, 0, 0, 0};
1831 memcpy(p
, format_pg
, sizeof(format_pg
));
1832 put_unaligned_be16(sdebug_sectors_per
, p
+ 10);
1833 put_unaligned_be16(sdebug_sector_size
, p
+ 12);
1834 if (sdebug_removable
)
1835 p
[20] |= 0x20; /* should agree with INQUIRY */
1837 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
1838 return sizeof(format_pg
);
1841 static unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1842 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1845 static int resp_caching_pg(unsigned char * p
, int pcontrol
, int target
)
1846 { /* Caching page for mode_sense */
1847 unsigned char ch_caching_pg
[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1848 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1849 unsigned char d_caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1850 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1852 if (SDEBUG_OPT_N_WCE
& sdebug_opts
)
1853 caching_pg
[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1854 memcpy(p
, caching_pg
, sizeof(caching_pg
));
1856 memcpy(p
+ 2, ch_caching_pg
, sizeof(ch_caching_pg
));
1857 else if (2 == pcontrol
)
1858 memcpy(p
, d_caching_pg
, sizeof(d_caching_pg
));
1859 return sizeof(caching_pg
);
1862 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1865 static int resp_ctrl_m_pg(unsigned char * p
, int pcontrol
, int target
)
1866 { /* Control mode page for mode_sense */
1867 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1869 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1873 ctrl_m_pg
[2] |= 0x4;
1875 ctrl_m_pg
[2] &= ~0x4;
1878 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
1880 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
1882 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
1883 else if (2 == pcontrol
)
1884 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
1885 return sizeof(ctrl_m_pg
);
1889 static int resp_iec_m_pg(unsigned char * p
, int pcontrol
, int target
)
1890 { /* Informational Exceptions control mode page for mode_sense */
1891 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1893 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1896 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
1898 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
1899 else if (2 == pcontrol
)
1900 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
1901 return sizeof(iec_m_pg
);
1904 static int resp_sas_sf_m_pg(unsigned char * p
, int pcontrol
, int target
)
1905 { /* SAS SSP mode page - short format for mode_sense */
1906 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
1907 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1909 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
1911 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
1912 return sizeof(sas_sf_m_pg
);
1916 static int resp_sas_pcd_m_spg(unsigned char * p
, int pcontrol
, int target
,
1918 { /* SAS phy control and discover mode page for mode_sense */
1919 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1920 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1921 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1922 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1923 0x2, 0, 0, 0, 0, 0, 0, 0,
1924 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1925 0, 0, 0, 0, 0, 0, 0, 0,
1926 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1927 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1928 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
1929 0x3, 0, 0, 0, 0, 0, 0, 0,
1930 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1931 0, 0, 0, 0, 0, 0, 0, 0,
1935 put_unaligned_be64(naa3_comp_a
, sas_pcd_m_pg
+ 16);
1936 put_unaligned_be64(naa3_comp_c
+ 1, sas_pcd_m_pg
+ 24);
1937 put_unaligned_be64(naa3_comp_a
, sas_pcd_m_pg
+ 64);
1938 put_unaligned_be64(naa3_comp_c
+ 1, sas_pcd_m_pg
+ 72);
1939 port_a
= target_dev_id
+ 1;
1940 port_b
= port_a
+ 1;
1941 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
1942 put_unaligned_be32(port_a
, p
+ 20);
1943 put_unaligned_be32(port_b
, p
+ 48 + 20);
1945 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
1946 return sizeof(sas_pcd_m_pg
);
1949 static int resp_sas_sha_m_spg(unsigned char * p
, int pcontrol
)
1950 { /* SAS SSP shared protocol specific port mode subpage */
1951 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1952 0, 0, 0, 0, 0, 0, 0, 0,
1955 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
1957 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
1958 return sizeof(sas_sha_m_pg
);
1961 #define SDEBUG_MAX_MSENSE_SZ 256
1963 static int resp_mode_sense(struct scsi_cmnd
*scp
,
1964 struct sdebug_dev_info
*devip
)
1966 int pcontrol
, pcode
, subpcode
, bd_len
;
1967 unsigned char dev_spec
;
1968 int alloc_len
, offset
, len
, target_dev_id
;
1969 int target
= scp
->device
->id
;
1971 unsigned char arr
[SDEBUG_MAX_MSENSE_SZ
];
1972 unsigned char *cmd
= scp
->cmnd
;
1973 bool dbd
, llbaa
, msense_6
, is_disk
, bad_pcode
;
1975 dbd
= !!(cmd
[1] & 0x8); /* disable block descriptors */
1976 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1977 pcode
= cmd
[2] & 0x3f;
1979 msense_6
= (MODE_SENSE
== cmd
[0]);
1980 llbaa
= msense_6
? false : !!(cmd
[1] & 0x10);
1981 is_disk
= (sdebug_ptype
== TYPE_DISK
);
1982 if (is_disk
&& !dbd
)
1983 bd_len
= llbaa
? 16 : 8;
1986 alloc_len
= msense_6
? cmd
[4] : get_unaligned_be16(cmd
+ 7);
1987 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
1988 if (0x3 == pcontrol
) { /* Saving values not supported */
1989 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
, 0);
1990 return check_condition_result
;
1992 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
1993 (devip
->target
* 1000) - 3;
1994 /* for disks set DPOFUA bit and clear write protect (WP) bit */
1996 dev_spec
= 0x10; /* =0x90 if WP=1 implies read-only */
2006 arr
[4] = 0x1; /* set LONGLBA bit */
2007 arr
[7] = bd_len
; /* assume 255 or less */
2011 if ((bd_len
> 0) && (!sdebug_capacity
))
2012 sdebug_capacity
= get_sdebug_capacity();
2015 if (sdebug_capacity
> 0xfffffffe)
2016 put_unaligned_be32(0xffffffff, ap
+ 0);
2018 put_unaligned_be32(sdebug_capacity
, ap
+ 0);
2019 put_unaligned_be16(sdebug_sector_size
, ap
+ 6);
2022 } else if (16 == bd_len
) {
2023 put_unaligned_be64((u64
)sdebug_capacity
, ap
+ 0);
2024 put_unaligned_be32(sdebug_sector_size
, ap
+ 12);
2029 if ((subpcode
> 0x0) && (subpcode
< 0xff) && (0x19 != pcode
)) {
2030 /* TODO: Control Extension page */
2031 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2032 return check_condition_result
;
2037 case 0x1: /* Read-Write error recovery page, direct access */
2038 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2041 case 0x2: /* Disconnect-Reconnect page, all devices */
2042 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
2045 case 0x3: /* Format device page, direct access */
2047 len
= resp_format_pg(ap
, pcontrol
, target
);
2052 case 0x8: /* Caching page, direct access */
2054 len
= resp_caching_pg(ap
, pcontrol
, target
);
2059 case 0xa: /* Control Mode page, all devices */
2060 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
2063 case 0x19: /* if spc==1 then sas phy, control+discover */
2064 if ((subpcode
> 0x2) && (subpcode
< 0xff)) {
2065 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2066 return check_condition_result
;
2069 if ((0x0 == subpcode
) || (0xff == subpcode
))
2070 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2071 if ((0x1 == subpcode
) || (0xff == subpcode
))
2072 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
2074 if ((0x2 == subpcode
) || (0xff == subpcode
))
2075 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2078 case 0x1c: /* Informational Exceptions Mode page, all devices */
2079 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
2082 case 0x3f: /* Read all Mode pages */
2083 if ((0 == subpcode
) || (0xff == subpcode
)) {
2084 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2085 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
2087 len
+= resp_format_pg(ap
+ len
, pcontrol
,
2089 len
+= resp_caching_pg(ap
+ len
, pcontrol
,
2092 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
2093 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2094 if (0xff == subpcode
) {
2095 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
,
2096 target
, target_dev_id
);
2097 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2099 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
2102 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2103 return check_condition_result
;
2111 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2112 return check_condition_result
;
2115 arr
[0] = offset
- 1;
2117 put_unaligned_be16((offset
- 2), arr
+ 0);
2118 return fill_from_dev_buffer(scp
, arr
, min(alloc_len
, offset
));
2121 #define SDEBUG_MAX_MSELECT_SZ 512
2123 static int resp_mode_select(struct scsi_cmnd
*scp
,
2124 struct sdebug_dev_info
*devip
)
2126 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
2127 int param_len
, res
, mpage
;
2128 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
2129 unsigned char *cmd
= scp
->cmnd
;
2130 int mselect6
= (MODE_SELECT
== cmd
[0]);
2132 memset(arr
, 0, sizeof(arr
));
2135 param_len
= mselect6
? cmd
[4] : get_unaligned_be16(cmd
+ 7);
2136 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
2137 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, mselect6
? 4 : 7, -1);
2138 return check_condition_result
;
2140 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
2142 return DID_ERROR
<< 16;
2143 else if (sdebug_verbose
&& (res
< param_len
))
2144 sdev_printk(KERN_INFO
, scp
->device
,
2145 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2146 __func__
, param_len
, res
);
2147 md_len
= mselect6
? (arr
[0] + 1) : (get_unaligned_be16(arr
+ 0) + 2);
2148 bd_len
= mselect6
? arr
[3] : get_unaligned_be16(arr
+ 6);
2150 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, 0, -1);
2151 return check_condition_result
;
2153 off
= bd_len
+ (mselect6
? 4 : 8);
2154 mpage
= arr
[off
] & 0x3f;
2155 ps
= !!(arr
[off
] & 0x80);
2157 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 7);
2158 return check_condition_result
;
2160 spf
= !!(arr
[off
] & 0x40);
2161 pg_len
= spf
? (get_unaligned_be16(arr
+ off
+ 2) + 4) :
2163 if ((pg_len
+ off
) > param_len
) {
2164 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2165 PARAMETER_LIST_LENGTH_ERR
, 0);
2166 return check_condition_result
;
2169 case 0x8: /* Caching Mode page */
2170 if (caching_pg
[1] == arr
[off
+ 1]) {
2171 memcpy(caching_pg
+ 2, arr
+ off
+ 2,
2172 sizeof(caching_pg
) - 2);
2173 goto set_mode_changed_ua
;
2176 case 0xa: /* Control Mode page */
2177 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
2178 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
2179 sizeof(ctrl_m_pg
) - 2);
2180 sdebug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
2181 goto set_mode_changed_ua
;
2184 case 0x1c: /* Informational Exceptions Mode page */
2185 if (iec_m_pg
[1] == arr
[off
+ 1]) {
2186 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
2187 sizeof(iec_m_pg
) - 2);
2188 goto set_mode_changed_ua
;
2194 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 5);
2195 return check_condition_result
;
2196 set_mode_changed_ua
:
2197 set_bit(SDEBUG_UA_MODE_CHANGED
, devip
->uas_bm
);
2201 static int resp_temp_l_pg(unsigned char * arr
)
2203 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2204 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2207 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
2208 return sizeof(temp_l_pg
);
2211 static int resp_ie_l_pg(unsigned char * arr
)
2213 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2216 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
2217 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
2218 arr
[4] = THRESHOLD_EXCEEDED
;
2221 return sizeof(ie_l_pg
);
2224 #define SDEBUG_MAX_LSENSE_SZ 512
2226 static int resp_log_sense(struct scsi_cmnd
* scp
,
2227 struct sdebug_dev_info
* devip
)
2229 int ppc
, sp
, pcontrol
, pcode
, subpcode
, alloc_len
, len
, n
;
2230 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
2231 unsigned char *cmd
= scp
->cmnd
;
2233 memset(arr
, 0, sizeof(arr
));
2237 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, ppc
? 1 : 0);
2238 return check_condition_result
;
2240 pcontrol
= (cmd
[2] & 0xc0) >> 6;
2241 pcode
= cmd
[2] & 0x3f;
2242 subpcode
= cmd
[3] & 0xff;
2243 alloc_len
= get_unaligned_be16(cmd
+ 7);
2245 if (0 == subpcode
) {
2247 case 0x0: /* Supported log pages log page */
2249 arr
[n
++] = 0x0; /* this page */
2250 arr
[n
++] = 0xd; /* Temperature */
2251 arr
[n
++] = 0x2f; /* Informational exceptions */
2254 case 0xd: /* Temperature log page */
2255 arr
[3] = resp_temp_l_pg(arr
+ 4);
2257 case 0x2f: /* Informational exceptions log page */
2258 arr
[3] = resp_ie_l_pg(arr
+ 4);
2261 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2262 return check_condition_result
;
2264 } else if (0xff == subpcode
) {
2268 case 0x0: /* Supported log pages and subpages log page */
2271 arr
[n
++] = 0x0; /* 0,0 page */
2273 arr
[n
++] = 0xff; /* this page */
2275 arr
[n
++] = 0x0; /* Temperature */
2277 arr
[n
++] = 0x0; /* Informational exceptions */
2280 case 0xd: /* Temperature subpages */
2283 arr
[n
++] = 0x0; /* Temperature */
2286 case 0x2f: /* Informational exceptions subpages */
2289 arr
[n
++] = 0x0; /* Informational exceptions */
2293 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2294 return check_condition_result
;
2297 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2298 return check_condition_result
;
2300 len
= min(get_unaligned_be16(arr
+ 2) + 4, alloc_len
);
2301 return fill_from_dev_buffer(scp
, arr
,
2302 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
2305 static int check_device_access_params(struct scsi_cmnd
*scp
,
2306 unsigned long long lba
, unsigned int num
)
2308 if (lba
+ num
> sdebug_capacity
) {
2309 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2310 return check_condition_result
;
2312 /* transfer length excessive (tie in to block limits VPD page) */
2313 if (num
> sdebug_store_sectors
) {
2314 /* needs work to find which cdb byte 'num' comes from */
2315 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2316 return check_condition_result
;
2321 /* Returns number of bytes copied or -1 if error. */
2322 static int do_device_access(struct scsi_cmnd
*scmd
, u64 lba
, u32 num
,
2326 u64 block
, rest
= 0;
2327 struct scsi_data_buffer
*sdb
;
2328 enum dma_data_direction dir
;
2331 sdb
= scsi_out(scmd
);
2332 dir
= DMA_TO_DEVICE
;
2334 sdb
= scsi_in(scmd
);
2335 dir
= DMA_FROM_DEVICE
;
2340 if (!(scsi_bidi_cmnd(scmd
) || scmd
->sc_data_direction
== dir
))
2343 block
= do_div(lba
, sdebug_store_sectors
);
2344 if (block
+ num
> sdebug_store_sectors
)
2345 rest
= block
+ num
- sdebug_store_sectors
;
2347 ret
= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
2348 fake_storep
+ (block
* sdebug_sector_size
),
2349 (num
- rest
) * sdebug_sector_size
, 0, do_write
);
2350 if (ret
!= (num
- rest
) * sdebug_sector_size
)
2354 ret
+= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
2355 fake_storep
, rest
* sdebug_sector_size
,
2356 (num
- rest
) * sdebug_sector_size
, do_write
);
2362 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2363 * arr into fake_store(lba,num) and return true. If comparison fails then
2365 static bool comp_write_worker(u64 lba
, u32 num
, const u8
*arr
)
2368 u64 block
, rest
= 0;
2369 u32 store_blks
= sdebug_store_sectors
;
2370 u32 lb_size
= sdebug_sector_size
;
2372 block
= do_div(lba
, store_blks
);
2373 if (block
+ num
> store_blks
)
2374 rest
= block
+ num
- store_blks
;
2376 res
= !memcmp(fake_storep
+ (block
* lb_size
), arr
,
2377 (num
- rest
) * lb_size
);
2381 res
= memcmp(fake_storep
, arr
+ ((num
- rest
) * lb_size
),
2385 arr
+= num
* lb_size
;
2386 memcpy(fake_storep
+ (block
* lb_size
), arr
, (num
- rest
) * lb_size
);
2388 memcpy(fake_storep
, arr
+ ((num
- rest
) * lb_size
),
2393 static __be16
dif_compute_csum(const void *buf
, int len
)
2398 csum
= (__force __be16
)ip_compute_csum(buf
, len
);
2400 csum
= cpu_to_be16(crc_t10dif(buf
, len
));
2405 static int dif_verify(struct sd_dif_tuple
*sdt
, const void *data
,
2406 sector_t sector
, u32 ei_lba
)
2408 __be16 csum
= dif_compute_csum(data
, sdebug_sector_size
);
2410 if (sdt
->guard_tag
!= csum
) {
2411 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2412 (unsigned long)sector
,
2413 be16_to_cpu(sdt
->guard_tag
),
2417 if (sdebug_dif
== SD_DIF_TYPE1_PROTECTION
&&
2418 be32_to_cpu(sdt
->ref_tag
) != (sector
& 0xffffffff)) {
2419 pr_err("REF check failed on sector %lu\n",
2420 (unsigned long)sector
);
2423 if (sdebug_dif
== SD_DIF_TYPE2_PROTECTION
&&
2424 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
2425 pr_err("REF check failed on sector %lu\n",
2426 (unsigned long)sector
);
2432 static void dif_copy_prot(struct scsi_cmnd
*SCpnt
, sector_t sector
,
2433 unsigned int sectors
, bool read
)
2437 const void *dif_store_end
= dif_storep
+ sdebug_store_sectors
;
2438 struct sg_mapping_iter miter
;
2440 /* Bytes of protection data to copy into sgl */
2441 resid
= sectors
* sizeof(*dif_storep
);
2443 sg_miter_start(&miter
, scsi_prot_sglist(SCpnt
),
2444 scsi_prot_sg_count(SCpnt
), SG_MITER_ATOMIC
|
2445 (read
? SG_MITER_TO_SG
: SG_MITER_FROM_SG
));
2447 while (sg_miter_next(&miter
) && resid
> 0) {
2448 size_t len
= min(miter
.length
, resid
);
2449 void *start
= dif_store(sector
);
2452 if (dif_store_end
< start
+ len
)
2453 rest
= start
+ len
- dif_store_end
;
2458 memcpy(paddr
, start
, len
- rest
);
2460 memcpy(start
, paddr
, len
- rest
);
2464 memcpy(paddr
+ len
- rest
, dif_storep
, rest
);
2466 memcpy(dif_storep
, paddr
+ len
- rest
, rest
);
2469 sector
+= len
/ sizeof(*dif_storep
);
2472 sg_miter_stop(&miter
);
2475 static int prot_verify_read(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
2476 unsigned int sectors
, u32 ei_lba
)
2479 struct sd_dif_tuple
*sdt
;
2482 for (i
= 0; i
< sectors
; i
++, ei_lba
++) {
2485 sector
= start_sec
+ i
;
2486 sdt
= dif_store(sector
);
2488 if (sdt
->app_tag
== cpu_to_be16(0xffff))
2491 ret
= dif_verify(sdt
, fake_store(sector
), sector
, ei_lba
);
2498 dif_copy_prot(SCpnt
, start_sec
, sectors
, true);
2504 static int resp_read_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2506 u8
*cmd
= scp
->cmnd
;
2507 struct sdebug_queued_cmd
*sqcp
;
2511 unsigned long iflags
;
2518 lba
= get_unaligned_be64(cmd
+ 2);
2519 num
= get_unaligned_be32(cmd
+ 10);
2524 lba
= get_unaligned_be32(cmd
+ 2);
2525 num
= get_unaligned_be16(cmd
+ 7);
2530 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
2531 (u32
)(cmd
[1] & 0x1f) << 16;
2532 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
2537 lba
= get_unaligned_be32(cmd
+ 2);
2538 num
= get_unaligned_be32(cmd
+ 6);
2541 case XDWRITEREAD_10
:
2543 lba
= get_unaligned_be32(cmd
+ 2);
2544 num
= get_unaligned_be16(cmd
+ 7);
2547 default: /* assume READ(32) */
2548 lba
= get_unaligned_be64(cmd
+ 12);
2549 ei_lba
= get_unaligned_be32(cmd
+ 20);
2550 num
= get_unaligned_be32(cmd
+ 28);
2554 if (unlikely(have_dif_prot
&& check_prot
)) {
2555 if (sdebug_dif
== SD_DIF_TYPE2_PROTECTION
&&
2557 mk_sense_invalid_opcode(scp
);
2558 return check_condition_result
;
2560 if ((sdebug_dif
== SD_DIF_TYPE1_PROTECTION
||
2561 sdebug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
2562 (cmd
[1] & 0xe0) == 0)
2563 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected RD "
2566 if (unlikely(sdebug_any_injecting_opt
)) {
2567 sqcp
= (struct sdebug_queued_cmd
*)scp
->host_scribble
;
2570 if (sqcp
->inj_short
)
2576 /* inline check_device_access_params() */
2577 if (unlikely(lba
+ num
> sdebug_capacity
)) {
2578 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2579 return check_condition_result
;
2581 /* transfer length excessive (tie in to block limits VPD page) */
2582 if (unlikely(num
> sdebug_store_sectors
)) {
2583 /* needs work to find which cdb byte 'num' comes from */
2584 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2585 return check_condition_result
;
2588 if (unlikely((SDEBUG_OPT_MEDIUM_ERR
& sdebug_opts
) &&
2589 (lba
<= (OPT_MEDIUM_ERR_ADDR
+ OPT_MEDIUM_ERR_NUM
- 1)) &&
2590 ((lba
+ num
) > OPT_MEDIUM_ERR_ADDR
))) {
2591 /* claim unrecoverable read error */
2592 mk_sense_buffer(scp
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
, 0);
2593 /* set info field and valid bit for fixed descriptor */
2594 if (0x70 == (scp
->sense_buffer
[0] & 0x7f)) {
2595 scp
->sense_buffer
[0] |= 0x80; /* Valid bit */
2596 ret
= (lba
< OPT_MEDIUM_ERR_ADDR
)
2597 ? OPT_MEDIUM_ERR_ADDR
: (int)lba
;
2598 put_unaligned_be32(ret
, scp
->sense_buffer
+ 3);
2600 scsi_set_resid(scp
, scsi_bufflen(scp
));
2601 return check_condition_result
;
2604 read_lock_irqsave(&atomic_rw
, iflags
);
2607 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
2608 int prot_ret
= prot_verify_read(scp
, lba
, num
, ei_lba
);
2611 read_unlock_irqrestore(&atomic_rw
, iflags
);
2612 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, prot_ret
);
2613 return illegal_condition_result
;
2617 ret
= do_device_access(scp
, lba
, num
, false);
2618 read_unlock_irqrestore(&atomic_rw
, iflags
);
2619 if (unlikely(ret
== -1))
2620 return DID_ERROR
<< 16;
2622 scsi_in(scp
)->resid
= scsi_bufflen(scp
) - ret
;
2624 if (unlikely(sqcp
)) {
2625 if (sqcp
->inj_recovered
) {
2626 mk_sense_buffer(scp
, RECOVERED_ERROR
,
2627 THRESHOLD_EXCEEDED
, 0);
2628 return check_condition_result
;
2629 } else if (sqcp
->inj_transport
) {
2630 mk_sense_buffer(scp
, ABORTED_COMMAND
,
2631 TRANSPORT_PROBLEM
, ACK_NAK_TO
);
2632 return check_condition_result
;
2633 } else if (sqcp
->inj_dif
) {
2634 /* Logical block guard check failed */
2635 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
2636 return illegal_condition_result
;
2637 } else if (sqcp
->inj_dix
) {
2638 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
2639 return illegal_condition_result
;
2645 static void dump_sector(unsigned char *buf
, int len
)
2649 pr_err(">>> Sector Dump <<<\n");
2650 for (i
= 0 ; i
< len
; i
+= 16) {
2653 for (j
= 0, n
= 0; j
< 16; j
++) {
2654 unsigned char c
= buf
[i
+j
];
2656 if (c
>= 0x20 && c
< 0x7e)
2657 n
+= scnprintf(b
+ n
, sizeof(b
) - n
,
2660 n
+= scnprintf(b
+ n
, sizeof(b
) - n
,
2663 pr_err("%04d: %s\n", i
, b
);
2667 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
2668 unsigned int sectors
, u32 ei_lba
)
2671 struct sd_dif_tuple
*sdt
;
2673 sector_t sector
= start_sec
;
2676 struct sg_mapping_iter diter
;
2677 struct sg_mapping_iter piter
;
2679 BUG_ON(scsi_sg_count(SCpnt
) == 0);
2680 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
2682 sg_miter_start(&piter
, scsi_prot_sglist(SCpnt
),
2683 scsi_prot_sg_count(SCpnt
),
2684 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
2685 sg_miter_start(&diter
, scsi_sglist(SCpnt
), scsi_sg_count(SCpnt
),
2686 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
2688 /* For each protection page */
2689 while (sg_miter_next(&piter
)) {
2691 if (WARN_ON(!sg_miter_next(&diter
))) {
2696 for (ppage_offset
= 0; ppage_offset
< piter
.length
;
2697 ppage_offset
+= sizeof(struct sd_dif_tuple
)) {
2698 /* If we're at the end of the current
2699 * data page advance to the next one
2701 if (dpage_offset
>= diter
.length
) {
2702 if (WARN_ON(!sg_miter_next(&diter
))) {
2709 sdt
= piter
.addr
+ ppage_offset
;
2710 daddr
= diter
.addr
+ dpage_offset
;
2712 ret
= dif_verify(sdt
, daddr
, sector
, ei_lba
);
2714 dump_sector(daddr
, sdebug_sector_size
);
2720 dpage_offset
+= sdebug_sector_size
;
2722 diter
.consumed
= dpage_offset
;
2723 sg_miter_stop(&diter
);
2725 sg_miter_stop(&piter
);
2727 dif_copy_prot(SCpnt
, start_sec
, sectors
, false);
2734 sg_miter_stop(&diter
);
2735 sg_miter_stop(&piter
);
2739 static unsigned long lba_to_map_index(sector_t lba
)
2741 if (sdebug_unmap_alignment
)
2742 lba
+= sdebug_unmap_granularity
- sdebug_unmap_alignment
;
2743 sector_div(lba
, sdebug_unmap_granularity
);
2747 static sector_t
map_index_to_lba(unsigned long index
)
2749 sector_t lba
= index
* sdebug_unmap_granularity
;
2751 if (sdebug_unmap_alignment
)
2752 lba
-= sdebug_unmap_granularity
- sdebug_unmap_alignment
;
2756 static unsigned int map_state(sector_t lba
, unsigned int *num
)
2759 unsigned int mapped
;
2760 unsigned long index
;
2763 index
= lba_to_map_index(lba
);
2764 mapped
= test_bit(index
, map_storep
);
2767 next
= find_next_zero_bit(map_storep
, map_size
, index
);
2769 next
= find_next_bit(map_storep
, map_size
, index
);
2771 end
= min_t(sector_t
, sdebug_store_sectors
, map_index_to_lba(next
));
2776 static void map_region(sector_t lba
, unsigned int len
)
2778 sector_t end
= lba
+ len
;
2781 unsigned long index
= lba_to_map_index(lba
);
2783 if (index
< map_size
)
2784 set_bit(index
, map_storep
);
2786 lba
= map_index_to_lba(index
+ 1);
2790 static void unmap_region(sector_t lba
, unsigned int len
)
2792 sector_t end
= lba
+ len
;
2795 unsigned long index
= lba_to_map_index(lba
);
2797 if (lba
== map_index_to_lba(index
) &&
2798 lba
+ sdebug_unmap_granularity
<= end
&&
2800 clear_bit(index
, map_storep
);
2801 if (sdebug_lbprz
) { /* for LBPRZ=2 return 0xff_s */
2802 memset(fake_storep
+
2803 lba
* sdebug_sector_size
,
2804 (sdebug_lbprz
& 1) ? 0 : 0xff,
2805 sdebug_sector_size
*
2806 sdebug_unmap_granularity
);
2809 memset(dif_storep
+ lba
, 0xff,
2810 sizeof(*dif_storep
) *
2811 sdebug_unmap_granularity
);
2814 lba
= map_index_to_lba(index
+ 1);
2818 static int resp_write_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2820 u8
*cmd
= scp
->cmnd
;
2824 unsigned long iflags
;
2831 lba
= get_unaligned_be64(cmd
+ 2);
2832 num
= get_unaligned_be32(cmd
+ 10);
2837 lba
= get_unaligned_be32(cmd
+ 2);
2838 num
= get_unaligned_be16(cmd
+ 7);
2843 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
2844 (u32
)(cmd
[1] & 0x1f) << 16;
2845 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
2850 lba
= get_unaligned_be32(cmd
+ 2);
2851 num
= get_unaligned_be32(cmd
+ 6);
2854 case 0x53: /* XDWRITEREAD(10) */
2856 lba
= get_unaligned_be32(cmd
+ 2);
2857 num
= get_unaligned_be16(cmd
+ 7);
2860 default: /* assume WRITE(32) */
2861 lba
= get_unaligned_be64(cmd
+ 12);
2862 ei_lba
= get_unaligned_be32(cmd
+ 20);
2863 num
= get_unaligned_be32(cmd
+ 28);
2867 if (unlikely(have_dif_prot
&& check_prot
)) {
2868 if (sdebug_dif
== SD_DIF_TYPE2_PROTECTION
&&
2870 mk_sense_invalid_opcode(scp
);
2871 return check_condition_result
;
2873 if ((sdebug_dif
== SD_DIF_TYPE1_PROTECTION
||
2874 sdebug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
2875 (cmd
[1] & 0xe0) == 0)
2876 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
2880 /* inline check_device_access_params() */
2881 if (unlikely(lba
+ num
> sdebug_capacity
)) {
2882 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2883 return check_condition_result
;
2885 /* transfer length excessive (tie in to block limits VPD page) */
2886 if (unlikely(num
> sdebug_store_sectors
)) {
2887 /* needs work to find which cdb byte 'num' comes from */
2888 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2889 return check_condition_result
;
2892 write_lock_irqsave(&atomic_rw
, iflags
);
2895 if (unlikely(sdebug_dix
&& scsi_prot_sg_count(scp
))) {
2896 int prot_ret
= prot_verify_write(scp
, lba
, num
, ei_lba
);
2899 write_unlock_irqrestore(&atomic_rw
, iflags
);
2900 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, prot_ret
);
2901 return illegal_condition_result
;
2905 ret
= do_device_access(scp
, lba
, num
, true);
2906 if (unlikely(scsi_debug_lbp()))
2907 map_region(lba
, num
);
2908 write_unlock_irqrestore(&atomic_rw
, iflags
);
2909 if (unlikely(-1 == ret
))
2910 return DID_ERROR
<< 16;
2911 else if (unlikely(sdebug_verbose
&&
2912 (ret
< (num
* sdebug_sector_size
))))
2913 sdev_printk(KERN_INFO
, scp
->device
,
2914 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2915 my_name
, num
* sdebug_sector_size
, ret
);
2917 if (unlikely(sdebug_any_injecting_opt
)) {
2918 struct sdebug_queued_cmd
*sqcp
=
2919 (struct sdebug_queued_cmd
*)scp
->host_scribble
;
2922 if (sqcp
->inj_recovered
) {
2923 mk_sense_buffer(scp
, RECOVERED_ERROR
,
2924 THRESHOLD_EXCEEDED
, 0);
2925 return check_condition_result
;
2926 } else if (sqcp
->inj_dif
) {
2927 /* Logical block guard check failed */
2928 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
2929 return illegal_condition_result
;
2930 } else if (sqcp
->inj_dix
) {
2931 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
2932 return illegal_condition_result
;
2939 static int resp_write_same(struct scsi_cmnd
*scp
, u64 lba
, u32 num
,
2940 u32 ei_lba
, bool unmap
, bool ndob
)
2942 unsigned long iflags
;
2943 unsigned long long i
;
2947 ret
= check_device_access_params(scp
, lba
, num
);
2951 write_lock_irqsave(&atomic_rw
, iflags
);
2953 if (unmap
&& scsi_debug_lbp()) {
2954 unmap_region(lba
, num
);
2958 lba_off
= lba
* sdebug_sector_size
;
2959 /* if ndob then zero 1 logical block, else fetch 1 logical block */
2961 memset(fake_storep
+ lba_off
, 0, sdebug_sector_size
);
2964 ret
= fetch_to_dev_buffer(scp
, fake_storep
+ lba_off
,
2965 sdebug_sector_size
);
2968 write_unlock_irqrestore(&atomic_rw
, iflags
);
2969 return DID_ERROR
<< 16;
2970 } else if (sdebug_verbose
&& (ret
< (num
* sdebug_sector_size
)))
2971 sdev_printk(KERN_INFO
, scp
->device
,
2972 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2973 my_name
, "write same",
2974 num
* sdebug_sector_size
, ret
);
2976 /* Copy first sector to remaining blocks */
2977 for (i
= 1 ; i
< num
; i
++)
2978 memcpy(fake_storep
+ ((lba
+ i
) * sdebug_sector_size
),
2979 fake_storep
+ lba_off
,
2980 sdebug_sector_size
);
2982 if (scsi_debug_lbp())
2983 map_region(lba
, num
);
2985 write_unlock_irqrestore(&atomic_rw
, iflags
);
2990 static int resp_write_same_10(struct scsi_cmnd
*scp
,
2991 struct sdebug_dev_info
*devip
)
2993 u8
*cmd
= scp
->cmnd
;
3000 if (sdebug_lbpws10
== 0) {
3001 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
3002 return check_condition_result
;
3006 lba
= get_unaligned_be32(cmd
+ 2);
3007 num
= get_unaligned_be16(cmd
+ 7);
3008 if (num
> sdebug_write_same_length
) {
3009 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
3010 return check_condition_result
;
3012 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, false);
3015 static int resp_write_same_16(struct scsi_cmnd
*scp
,
3016 struct sdebug_dev_info
*devip
)
3018 u8
*cmd
= scp
->cmnd
;
3025 if (cmd
[1] & 0x8) { /* UNMAP */
3026 if (sdebug_lbpws
== 0) {
3027 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
3028 return check_condition_result
;
3032 if (cmd
[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3034 lba
= get_unaligned_be64(cmd
+ 2);
3035 num
= get_unaligned_be32(cmd
+ 10);
3036 if (num
> sdebug_write_same_length
) {
3037 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 10, -1);
3038 return check_condition_result
;
3040 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, ndob
);
3043 /* Note the mode field is in the same position as the (lower) service action
3044 * field. For the Report supported operation codes command, SPC-4 suggests
3045 * each mode of this command should be reported separately; for future. */
3046 static int resp_write_buffer(struct scsi_cmnd
*scp
,
3047 struct sdebug_dev_info
*devip
)
3049 u8
*cmd
= scp
->cmnd
;
3050 struct scsi_device
*sdp
= scp
->device
;
3051 struct sdebug_dev_info
*dp
;
3054 mode
= cmd
[1] & 0x1f;
3056 case 0x4: /* download microcode (MC) and activate (ACT) */
3057 /* set UAs on this device only */
3058 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3059 set_bit(SDEBUG_UA_MICROCODE_CHANGED
, devip
->uas_bm
);
3061 case 0x5: /* download MC, save and ACT */
3062 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
, devip
->uas_bm
);
3064 case 0x6: /* download MC with offsets and ACT */
3065 /* set UAs on most devices (LUs) in this target */
3066 list_for_each_entry(dp
,
3067 &devip
->sdbg_host
->dev_info_list
,
3069 if (dp
->target
== sdp
->id
) {
3070 set_bit(SDEBUG_UA_BUS_RESET
, dp
->uas_bm
);
3072 set_bit(SDEBUG_UA_MICROCODE_CHANGED
,
3076 case 0x7: /* download MC with offsets, save, and ACT */
3077 /* set UA on all devices (LUs) in this target */
3078 list_for_each_entry(dp
,
3079 &devip
->sdbg_host
->dev_info_list
,
3081 if (dp
->target
== sdp
->id
)
3082 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
,
3086 /* do nothing for this command for other mode values */
3092 static int resp_comp_write(struct scsi_cmnd
*scp
,
3093 struct sdebug_dev_info
*devip
)
3095 u8
*cmd
= scp
->cmnd
;
3097 u8
*fake_storep_hold
;
3100 u32 lb_size
= sdebug_sector_size
;
3102 unsigned long iflags
;
3106 lba
= get_unaligned_be64(cmd
+ 2);
3107 num
= cmd
[13]; /* 1 to a maximum of 255 logical blocks */
3109 return 0; /* degenerate case, not an error */
3110 if (sdebug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3112 mk_sense_invalid_opcode(scp
);
3113 return check_condition_result
;
3115 if ((sdebug_dif
== SD_DIF_TYPE1_PROTECTION
||
3116 sdebug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3117 (cmd
[1] & 0xe0) == 0)
3118 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
3121 /* inline check_device_access_params() */
3122 if (lba
+ num
> sdebug_capacity
) {
3123 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
3124 return check_condition_result
;
3126 /* transfer length excessive (tie in to block limits VPD page) */
3127 if (num
> sdebug_store_sectors
) {
3128 /* needs work to find which cdb byte 'num' comes from */
3129 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
3130 return check_condition_result
;
3133 arr
= kzalloc(dnum
* lb_size
, GFP_ATOMIC
);
3135 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3137 return check_condition_result
;
3140 write_lock_irqsave(&atomic_rw
, iflags
);
3142 /* trick do_device_access() to fetch both compare and write buffers
3143 * from data-in into arr. Safe (atomic) since write_lock held. */
3144 fake_storep_hold
= fake_storep
;
3146 ret
= do_device_access(scp
, 0, dnum
, true);
3147 fake_storep
= fake_storep_hold
;
3149 retval
= DID_ERROR
<< 16;
3151 } else if (sdebug_verbose
&& (ret
< (dnum
* lb_size
)))
3152 sdev_printk(KERN_INFO
, scp
->device
, "%s: compare_write: cdb "
3153 "indicated=%u, IO sent=%d bytes\n", my_name
,
3154 dnum
* lb_size
, ret
);
3155 if (!comp_write_worker(lba
, num
, arr
)) {
3156 mk_sense_buffer(scp
, MISCOMPARE
, MISCOMPARE_VERIFY_ASC
, 0);
3157 retval
= check_condition_result
;
3160 if (scsi_debug_lbp())
3161 map_region(lba
, num
);
3163 write_unlock_irqrestore(&atomic_rw
, iflags
);
3168 struct unmap_block_desc
{
3174 static int resp_unmap(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3177 struct unmap_block_desc
*desc
;
3178 unsigned int i
, payload_len
, descriptors
;
3180 unsigned long iflags
;
3183 if (!scsi_debug_lbp())
3184 return 0; /* fib and say its done */
3185 payload_len
= get_unaligned_be16(scp
->cmnd
+ 7);
3186 BUG_ON(scsi_bufflen(scp
) != payload_len
);
3188 descriptors
= (payload_len
- 8) / 16;
3189 if (descriptors
> sdebug_unmap_max_desc
) {
3190 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
3191 return check_condition_result
;
3194 buf
= kzalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
3196 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3198 return check_condition_result
;
3201 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
3203 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
3204 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
3206 desc
= (void *)&buf
[8];
3208 write_lock_irqsave(&atomic_rw
, iflags
);
3210 for (i
= 0 ; i
< descriptors
; i
++) {
3211 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
3212 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
3214 ret
= check_device_access_params(scp
, lba
, num
);
3218 unmap_region(lba
, num
);
3224 write_unlock_irqrestore(&atomic_rw
, iflags
);
3230 #define SDEBUG_GET_LBA_STATUS_LEN 32
3232 static int resp_get_lba_status(struct scsi_cmnd
*scp
,
3233 struct sdebug_dev_info
*devip
)
3235 u8
*cmd
= scp
->cmnd
;
3237 u32 alloc_len
, mapped
, num
;
3238 u8 arr
[SDEBUG_GET_LBA_STATUS_LEN
];
3241 lba
= get_unaligned_be64(cmd
+ 2);
3242 alloc_len
= get_unaligned_be32(cmd
+ 10);
3247 ret
= check_device_access_params(scp
, lba
, 1);
3251 if (scsi_debug_lbp())
3252 mapped
= map_state(lba
, &num
);
3255 /* following just in case virtual_gb changed */
3256 sdebug_capacity
= get_sdebug_capacity();
3257 if (sdebug_capacity
- lba
<= 0xffffffff)
3258 num
= sdebug_capacity
- lba
;
3263 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
3264 put_unaligned_be32(20, arr
); /* Parameter Data Length */
3265 put_unaligned_be64(lba
, arr
+ 8); /* LBA */
3266 put_unaligned_be32(num
, arr
+ 16); /* Number of blocks */
3267 arr
[20] = !mapped
; /* prov_stat=0: mapped; 1: dealloc */
3269 return fill_from_dev_buffer(scp
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
3272 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3273 * (W-LUN), the normal Linux scanning logic does not associate it with a
3274 * device (e.g. /dev/sg7). The following magic will make that association:
3275 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3276 * where <n> is a host number. If there are multiple targets in a host then
3277 * the above will associate a W-LUN to each target. To only get a W-LUN
3278 * for target 2, then use "echo '- 2 49409' > scan" .
3280 static int resp_report_luns(struct scsi_cmnd
*scp
,
3281 struct sdebug_dev_info
*devip
)
3283 unsigned char *cmd
= scp
->cmnd
;
3284 unsigned int alloc_len
;
3285 unsigned char select_report
;
3287 struct scsi_lun
*lun_p
;
3289 unsigned int lun_cnt
; /* normal LUN count (max: 256) */
3290 unsigned int wlun_cnt
; /* report luns W-LUN count */
3291 unsigned int tlun_cnt
; /* total LUN count */
3292 unsigned int rlen
; /* response length (in bytes) */
3295 clear_luns_changed_on_target(devip
);
3297 select_report
= cmd
[2];
3298 alloc_len
= get_unaligned_be32(cmd
+ 6);
3300 if (alloc_len
< 4) {
3301 pr_err("alloc len too small %d\n", alloc_len
);
3302 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
3303 return check_condition_result
;
3306 switch (select_report
) {
3307 case 0: /* all LUNs apart from W-LUNs */
3308 lun_cnt
= sdebug_max_luns
;
3311 case 1: /* only W-LUNs */
3315 case 2: /* all LUNs */
3316 lun_cnt
= sdebug_max_luns
;
3319 case 0x10: /* only administrative LUs */
3320 case 0x11: /* see SPC-5 */
3321 case 0x12: /* only subsiduary LUs owned by referenced LU */
3323 pr_debug("select report invalid %d\n", select_report
);
3324 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
3325 return check_condition_result
;
3328 if (sdebug_no_lun_0
&& (lun_cnt
> 0))
3331 tlun_cnt
= lun_cnt
+ wlun_cnt
;
3333 rlen
= (tlun_cnt
* sizeof(struct scsi_lun
)) + 8;
3334 arr
= vmalloc(rlen
);
3336 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3338 return check_condition_result
;
3340 memset(arr
, 0, rlen
);
3341 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3342 select_report
, lun_cnt
, wlun_cnt
, sdebug_no_lun_0
);
3344 /* luns start at byte 8 in response following the header */
3345 lun_p
= (struct scsi_lun
*)&arr
[8];
3347 /* LUNs use single level peripheral device addressing method */
3348 lun
= sdebug_no_lun_0
? 1 : 0;
3349 for (i
= 0; i
< lun_cnt
; i
++)
3350 int_to_scsilun(lun
++, lun_p
++);
3353 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS
, lun_p
++);
3355 put_unaligned_be32(rlen
- 8, &arr
[0]);
3357 res
= fill_from_dev_buffer(scp
, arr
, rlen
);
3362 static int resp_xdwriteread(struct scsi_cmnd
*scp
, unsigned long long lba
,
3363 unsigned int num
, struct sdebug_dev_info
*devip
)
3366 unsigned char *kaddr
, *buf
;
3367 unsigned int offset
;
3368 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
3369 struct sg_mapping_iter miter
;
3371 /* better not to use temporary buffer. */
3372 buf
= kzalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
3374 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3376 return check_condition_result
;
3379 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
3382 sg_miter_start(&miter
, sdb
->table
.sgl
, sdb
->table
.nents
,
3383 SG_MITER_ATOMIC
| SG_MITER_TO_SG
);
3385 while (sg_miter_next(&miter
)) {
3387 for (j
= 0; j
< miter
.length
; j
++)
3388 *(kaddr
+ j
) ^= *(buf
+ offset
+ j
);
3390 offset
+= miter
.length
;
3392 sg_miter_stop(&miter
);
3398 static int resp_xdwriteread_10(struct scsi_cmnd
*scp
,
3399 struct sdebug_dev_info
*devip
)
3401 u8
*cmd
= scp
->cmnd
;
3406 if (!scsi_bidi_cmnd(scp
)) {
3407 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3409 return check_condition_result
;
3411 errsts
= resp_read_dt0(scp
, devip
);
3414 if (!(cmd
[1] & 0x4)) { /* DISABLE_WRITE is not set */
3415 errsts
= resp_write_dt0(scp
, devip
);
3419 lba
= get_unaligned_be32(cmd
+ 2);
3420 num
= get_unaligned_be16(cmd
+ 7);
3421 return resp_xdwriteread(scp
, lba
, num
, devip
);
3424 static struct sdebug_queue
*get_queue(struct scsi_cmnd
*cmnd
)
3426 struct sdebug_queue
*sqp
= sdebug_q_arr
;
3428 if (sdebug_mq_active
) {
3429 u32 tag
= blk_mq_unique_tag(cmnd
->request
);
3430 u16 hwq
= blk_mq_unique_tag_to_hwq(tag
);
3432 if (unlikely(hwq
>= submit_queues
)) {
3433 pr_warn("Unexpected hwq=%d, apply modulo\n", hwq
);
3434 hwq
%= submit_queues
;
3436 pr_debug("tag=%u, hwq=%d\n", tag
, hwq
);
3442 /* Queued (deferred) command completions converge here. */
3443 static void sdebug_q_cmd_complete(struct sdebug_defer
*sd_dp
)
3447 unsigned long iflags
;
3448 struct sdebug_queue
*sqp
;
3449 struct sdebug_queued_cmd
*sqcp
;
3450 struct scsi_cmnd
*scp
;
3451 struct sdebug_dev_info
*devip
;
3453 qc_idx
= sd_dp
->qc_idx
;
3454 sqp
= sdebug_q_arr
+ sd_dp
->sqa_idx
;
3455 if (sdebug_statistics
) {
3456 atomic_inc(&sdebug_completions
);
3457 if (raw_smp_processor_id() != sd_dp
->issuing_cpu
)
3458 atomic_inc(&sdebug_miss_cpus
);
3460 if (unlikely((qc_idx
< 0) || (qc_idx
>= SDEBUG_CANQUEUE
))) {
3461 pr_err("wild qc_idx=%d\n", qc_idx
);
3464 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3465 sqcp
= &sqp
->qc_arr
[qc_idx
];
3467 if (unlikely(scp
== NULL
)) {
3468 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3469 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3470 sd_dp
->sqa_idx
, qc_idx
);
3473 devip
= (struct sdebug_dev_info
*)scp
->device
->hostdata
;
3475 atomic_dec(&devip
->num_in_q
);
3477 pr_err("devip=NULL\n");
3478 if (unlikely(atomic_read(&retired_max_queue
) > 0))
3481 sqcp
->a_cmnd
= NULL
;
3482 if (unlikely(!test_and_clear_bit(qc_idx
, sqp
->in_use_bm
))) {
3483 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3484 pr_err("Unexpected completion\n");
3488 if (unlikely(retiring
)) { /* user has reduced max_queue */
3491 retval
= atomic_read(&retired_max_queue
);
3492 if (qc_idx
>= retval
) {
3493 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3494 pr_err("index %d too large\n", retval
);
3497 k
= find_last_bit(sqp
->in_use_bm
, retval
);
3498 if ((k
< sdebug_max_queue
) || (k
== retval
))
3499 atomic_set(&retired_max_queue
, 0);
3501 atomic_set(&retired_max_queue
, k
+ 1);
3503 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3504 scp
->scsi_done(scp
); /* callback to mid level */
3507 /* When high resolution timer goes off this function is called. */
3508 static enum hrtimer_restart
sdebug_q_cmd_hrt_complete(struct hrtimer
*timer
)
3510 struct sdebug_defer
*sd_dp
= container_of(timer
, struct sdebug_defer
,
3512 sdebug_q_cmd_complete(sd_dp
);
3513 return HRTIMER_NORESTART
;
3516 /* When work queue schedules work, it calls this function. */
3517 static void sdebug_q_cmd_wq_complete(struct work_struct
*work
)
3519 struct sdebug_defer
*sd_dp
= container_of(work
, struct sdebug_defer
,
3521 sdebug_q_cmd_complete(sd_dp
);
3524 static bool got_shared_uuid
;
3525 static uuid_be shared_uuid
;
3527 static struct sdebug_dev_info
*sdebug_device_create(
3528 struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
3530 struct sdebug_dev_info
*devip
;
3532 devip
= kzalloc(sizeof(*devip
), flags
);
3534 if (sdebug_uuid_ctl
== 1)
3535 uuid_be_gen(&devip
->lu_name
);
3536 else if (sdebug_uuid_ctl
== 2) {
3537 if (got_shared_uuid
)
3538 devip
->lu_name
= shared_uuid
;
3540 uuid_be_gen(&shared_uuid
);
3541 got_shared_uuid
= true;
3542 devip
->lu_name
= shared_uuid
;
3545 devip
->sdbg_host
= sdbg_host
;
3546 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
3551 static struct sdebug_dev_info
*find_build_dev_info(struct scsi_device
*sdev
)
3553 struct sdebug_host_info
*sdbg_host
;
3554 struct sdebug_dev_info
*open_devip
= NULL
;
3555 struct sdebug_dev_info
*devip
;
3557 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(sdev
->host
);
3559 pr_err("Host info NULL\n");
3562 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
3563 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
3564 (devip
->target
== sdev
->id
) &&
3565 (devip
->lun
== sdev
->lun
))
3568 if ((!devip
->used
) && (!open_devip
))
3572 if (!open_devip
) { /* try and make a new one */
3573 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
3575 pr_err("out of memory at line %d\n", __LINE__
);
3580 open_devip
->channel
= sdev
->channel
;
3581 open_devip
->target
= sdev
->id
;
3582 open_devip
->lun
= sdev
->lun
;
3583 open_devip
->sdbg_host
= sdbg_host
;
3584 atomic_set(&open_devip
->num_in_q
, 0);
3585 set_bit(SDEBUG_UA_POR
, open_devip
->uas_bm
);
3586 open_devip
->used
= true;
3590 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
3593 pr_info("slave_alloc <%u %u %u %llu>\n",
3594 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3595 queue_flag_set_unlocked(QUEUE_FLAG_BIDI
, sdp
->request_queue
);
3599 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
3601 struct sdebug_dev_info
*devip
=
3602 (struct sdebug_dev_info
*)sdp
->hostdata
;
3605 pr_info("slave_configure <%u %u %u %llu>\n",
3606 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3607 if (sdp
->host
->max_cmd_len
!= SDEBUG_MAX_CMD_LEN
)
3608 sdp
->host
->max_cmd_len
= SDEBUG_MAX_CMD_LEN
;
3609 if (devip
== NULL
) {
3610 devip
= find_build_dev_info(sdp
);
3612 return 1; /* no resources, will be marked offline */
3614 sdp
->hostdata
= devip
;
3615 blk_queue_max_segment_size(sdp
->request_queue
, -1U);
3617 sdp
->no_uld_attach
= 1;
3621 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
3623 struct sdebug_dev_info
*devip
=
3624 (struct sdebug_dev_info
*)sdp
->hostdata
;
3627 pr_info("slave_destroy <%u %u %u %llu>\n",
3628 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3630 /* make this slot available for re-use */
3631 devip
->used
= false;
3632 sdp
->hostdata
= NULL
;
3636 static void stop_qc_helper(struct sdebug_defer
*sd_dp
)
3640 if ((sdebug_jdelay
> 0) || (sdebug_ndelay
> 0))
3641 hrtimer_cancel(&sd_dp
->hrt
);
3642 else if (sdebug_jdelay
< 0)
3643 cancel_work_sync(&sd_dp
->ew
.work
);
3646 /* If @cmnd found deletes its timer or work queue and returns true; else
3648 static bool stop_queued_cmnd(struct scsi_cmnd
*cmnd
)
3650 unsigned long iflags
;
3651 int j
, k
, qmax
, r_qmax
;
3652 struct sdebug_queue
*sqp
;
3653 struct sdebug_queued_cmd
*sqcp
;
3654 struct sdebug_dev_info
*devip
;
3655 struct sdebug_defer
*sd_dp
;
3657 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
3658 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3659 qmax
= sdebug_max_queue
;
3660 r_qmax
= atomic_read(&retired_max_queue
);
3663 for (k
= 0; k
< qmax
; ++k
) {
3664 if (test_bit(k
, sqp
->in_use_bm
)) {
3665 sqcp
= &sqp
->qc_arr
[k
];
3666 if (cmnd
!= sqcp
->a_cmnd
)
3669 devip
= (struct sdebug_dev_info
*)
3670 cmnd
->device
->hostdata
;
3672 atomic_dec(&devip
->num_in_q
);
3673 sqcp
->a_cmnd
= NULL
;
3674 sd_dp
= sqcp
->sd_dp
;
3675 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3676 stop_qc_helper(sd_dp
);
3677 clear_bit(k
, sqp
->in_use_bm
);
3681 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3686 /* Deletes (stops) timers or work queues of all queued commands */
3687 static void stop_all_queued(void)
3689 unsigned long iflags
;
3691 struct sdebug_queue
*sqp
;
3692 struct sdebug_queued_cmd
*sqcp
;
3693 struct sdebug_dev_info
*devip
;
3694 struct sdebug_defer
*sd_dp
;
3696 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
3697 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3698 for (k
= 0; k
< SDEBUG_CANQUEUE
; ++k
) {
3699 if (test_bit(k
, sqp
->in_use_bm
)) {
3700 sqcp
= &sqp
->qc_arr
[k
];
3701 if (sqcp
->a_cmnd
== NULL
)
3703 devip
= (struct sdebug_dev_info
*)
3704 sqcp
->a_cmnd
->device
->hostdata
;
3706 atomic_dec(&devip
->num_in_q
);
3707 sqcp
->a_cmnd
= NULL
;
3708 sd_dp
= sqcp
->sd_dp
;
3709 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3710 stop_qc_helper(sd_dp
);
3711 clear_bit(k
, sqp
->in_use_bm
);
3712 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3715 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3719 /* Free queued command memory on heap */
3720 static void free_all_queued(void)
3723 struct sdebug_queue
*sqp
;
3724 struct sdebug_queued_cmd
*sqcp
;
3726 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
3727 for (k
= 0; k
< SDEBUG_CANQUEUE
; ++k
) {
3728 sqcp
= &sqp
->qc_arr
[k
];
3735 static int scsi_debug_abort(struct scsi_cmnd
*SCpnt
)
3741 ok
= stop_queued_cmnd(SCpnt
);
3742 if (SCpnt
->device
&& (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
))
3743 sdev_printk(KERN_INFO
, SCpnt
->device
,
3744 "%s: command%s found\n", __func__
,
3750 static int scsi_debug_device_reset(struct scsi_cmnd
* SCpnt
)
3753 if (SCpnt
&& SCpnt
->device
) {
3754 struct scsi_device
*sdp
= SCpnt
->device
;
3755 struct sdebug_dev_info
*devip
=
3756 (struct sdebug_dev_info
*)sdp
->hostdata
;
3758 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
3759 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
3761 set_bit(SDEBUG_UA_POR
, devip
->uas_bm
);
3766 static int scsi_debug_target_reset(struct scsi_cmnd
*SCpnt
)
3768 struct sdebug_host_info
*sdbg_host
;
3769 struct sdebug_dev_info
*devip
;
3770 struct scsi_device
*sdp
;
3771 struct Scsi_Host
*hp
;
3774 ++num_target_resets
;
3777 sdp
= SCpnt
->device
;
3780 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
3781 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
3785 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
3787 list_for_each_entry(devip
,
3788 &sdbg_host
->dev_info_list
,
3790 if (devip
->target
== sdp
->id
) {
3791 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3795 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
3796 sdev_printk(KERN_INFO
, sdp
,
3797 "%s: %d device(s) found in target\n", __func__
, k
);
3802 static int scsi_debug_bus_reset(struct scsi_cmnd
* SCpnt
)
3804 struct sdebug_host_info
*sdbg_host
;
3805 struct sdebug_dev_info
*devip
;
3806 struct scsi_device
* sdp
;
3807 struct Scsi_Host
* hp
;
3811 if (!(SCpnt
&& SCpnt
->device
))
3813 sdp
= SCpnt
->device
;
3814 if (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
)
3815 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
3818 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
3820 list_for_each_entry(devip
,
3821 &sdbg_host
->dev_info_list
,
3823 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3828 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
3829 sdev_printk(KERN_INFO
, sdp
,
3830 "%s: %d device(s) found in host\n", __func__
, k
);
3835 static int scsi_debug_host_reset(struct scsi_cmnd
* SCpnt
)
3837 struct sdebug_host_info
* sdbg_host
;
3838 struct sdebug_dev_info
*devip
;
3842 if ((SCpnt
->device
) && (SDEBUG_OPT_ALL_NOISE
& sdebug_opts
))
3843 sdev_printk(KERN_INFO
, SCpnt
->device
, "%s\n", __func__
);
3844 spin_lock(&sdebug_host_list_lock
);
3845 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
3846 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
,
3848 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3852 spin_unlock(&sdebug_host_list_lock
);
3854 if (SDEBUG_OPT_RESET_NOISE
& sdebug_opts
)
3855 sdev_printk(KERN_INFO
, SCpnt
->device
,
3856 "%s: %d device(s) found\n", __func__
, k
);
3860 static void __init
sdebug_build_parts(unsigned char *ramp
,
3861 unsigned long store_size
)
3863 struct partition
* pp
;
3864 int starts
[SDEBUG_MAX_PARTS
+ 2];
3865 int sectors_per_part
, num_sectors
, k
;
3866 int heads_by_sects
, start_sec
, end_sec
;
3868 /* assume partition table already zeroed */
3869 if ((sdebug_num_parts
< 1) || (store_size
< 1048576))
3871 if (sdebug_num_parts
> SDEBUG_MAX_PARTS
) {
3872 sdebug_num_parts
= SDEBUG_MAX_PARTS
;
3873 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS
);
3875 num_sectors
= (int)sdebug_store_sectors
;
3876 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
3878 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
3879 starts
[0] = sdebug_sectors_per
;
3880 for (k
= 1; k
< sdebug_num_parts
; ++k
)
3881 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
3883 starts
[sdebug_num_parts
] = num_sectors
;
3884 starts
[sdebug_num_parts
+ 1] = 0;
3886 ramp
[510] = 0x55; /* magic partition markings */
3888 pp
= (struct partition
*)(ramp
+ 0x1be);
3889 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
3890 start_sec
= starts
[k
];
3891 end_sec
= starts
[k
+ 1] - 1;
3894 pp
->cyl
= start_sec
/ heads_by_sects
;
3895 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
3896 / sdebug_sectors_per
;
3897 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
3899 pp
->end_cyl
= end_sec
/ heads_by_sects
;
3900 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
3901 / sdebug_sectors_per
;
3902 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
3904 pp
->start_sect
= cpu_to_le32(start_sec
);
3905 pp
->nr_sects
= cpu_to_le32(end_sec
- start_sec
+ 1);
3906 pp
->sys_ind
= 0x83; /* plain Linux partition */
3910 static void block_unblock_all_queues(bool block
)
3913 struct sdebug_queue
*sqp
;
3915 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
)
3916 atomic_set(&sqp
->blocked
, (int)block
);
3919 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
3920 * commands will be processed normally before triggers occur.
3922 static void tweak_cmnd_count(void)
3926 modulo
= abs(sdebug_every_nth
);
3929 block_unblock_all_queues(true);
3930 count
= atomic_read(&sdebug_cmnd_count
);
3931 atomic_set(&sdebug_cmnd_count
, (count
/ modulo
) * modulo
);
3932 block_unblock_all_queues(false);
3935 static void clear_queue_stats(void)
3937 atomic_set(&sdebug_cmnd_count
, 0);
3938 atomic_set(&sdebug_completions
, 0);
3939 atomic_set(&sdebug_miss_cpus
, 0);
3940 atomic_set(&sdebug_a_tsf
, 0);
3943 static void setup_inject(struct sdebug_queue
*sqp
,
3944 struct sdebug_queued_cmd
*sqcp
)
3946 if ((atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
)) > 0)
3948 sqcp
->inj_recovered
= !!(SDEBUG_OPT_RECOVERED_ERR
& sdebug_opts
);
3949 sqcp
->inj_transport
= !!(SDEBUG_OPT_TRANSPORT_ERR
& sdebug_opts
);
3950 sqcp
->inj_dif
= !!(SDEBUG_OPT_DIF_ERR
& sdebug_opts
);
3951 sqcp
->inj_dix
= !!(SDEBUG_OPT_DIX_ERR
& sdebug_opts
);
3952 sqcp
->inj_short
= !!(SDEBUG_OPT_SHORT_TRANSFER
& sdebug_opts
);
3955 /* Complete the processing of the thread that queued a SCSI command to this
3956 * driver. It either completes the command by calling cmnd_done() or
3957 * schedules a hr timer or work queue then returns 0. Returns
3958 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
3960 static int schedule_resp(struct scsi_cmnd
*cmnd
, struct sdebug_dev_info
*devip
,
3961 int scsi_result
, int delta_jiff
)
3963 unsigned long iflags
;
3964 int k
, num_in_q
, qdepth
, inject
;
3965 struct sdebug_queue
*sqp
;
3966 struct sdebug_queued_cmd
*sqcp
;
3967 struct scsi_device
*sdp
;
3968 struct sdebug_defer
*sd_dp
;
3970 if (unlikely(devip
== NULL
)) {
3971 if (scsi_result
== 0)
3972 scsi_result
= DID_NO_CONNECT
<< 16;
3973 goto respond_in_thread
;
3977 if (unlikely(sdebug_verbose
&& scsi_result
))
3978 sdev_printk(KERN_INFO
, sdp
, "%s: non-zero result=0x%x\n",
3979 __func__
, scsi_result
);
3980 if (delta_jiff
== 0)
3981 goto respond_in_thread
;
3983 /* schedule the response at a later time if resources permit */
3984 sqp
= get_queue(cmnd
);
3985 spin_lock_irqsave(&sqp
->qc_lock
, iflags
);
3986 if (unlikely(atomic_read(&sqp
->blocked
))) {
3987 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3988 return SCSI_MLQUEUE_HOST_BUSY
;
3990 num_in_q
= atomic_read(&devip
->num_in_q
);
3991 qdepth
= cmnd
->device
->queue_depth
;
3993 if (unlikely((qdepth
> 0) && (num_in_q
>= qdepth
))) {
3995 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
3996 goto respond_in_thread
;
3998 scsi_result
= device_qfull_result
;
3999 } else if (unlikely(sdebug_every_nth
&&
4000 (SDEBUG_OPT_RARE_TSF
& sdebug_opts
) &&
4001 (scsi_result
== 0))) {
4002 if ((num_in_q
== (qdepth
- 1)) &&
4003 (atomic_inc_return(&sdebug_a_tsf
) >=
4004 abs(sdebug_every_nth
))) {
4005 atomic_set(&sdebug_a_tsf
, 0);
4007 scsi_result
= device_qfull_result
;
4011 k
= find_first_zero_bit(sqp
->in_use_bm
, sdebug_max_queue
);
4012 if (unlikely(k
>= sdebug_max_queue
)) {
4013 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4015 goto respond_in_thread
;
4016 else if (SDEBUG_OPT_ALL_TSF
& sdebug_opts
)
4017 scsi_result
= device_qfull_result
;
4018 if (SDEBUG_OPT_Q_NOISE
& sdebug_opts
)
4019 sdev_printk(KERN_INFO
, sdp
,
4020 "%s: max_queue=%d exceeded, %s\n",
4021 __func__
, sdebug_max_queue
,
4022 (scsi_result
? "status: TASK SET FULL" :
4023 "report: host busy"));
4025 goto respond_in_thread
;
4027 return SCSI_MLQUEUE_HOST_BUSY
;
4029 __set_bit(k
, sqp
->in_use_bm
);
4030 atomic_inc(&devip
->num_in_q
);
4031 sqcp
= &sqp
->qc_arr
[k
];
4032 sqcp
->a_cmnd
= cmnd
;
4033 cmnd
->host_scribble
= (unsigned char *)sqcp
;
4034 cmnd
->result
= scsi_result
;
4035 sd_dp
= sqcp
->sd_dp
;
4036 spin_unlock_irqrestore(&sqp
->qc_lock
, iflags
);
4037 if (unlikely(sdebug_every_nth
&& sdebug_any_injecting_opt
))
4038 setup_inject(sqp
, sqcp
);
4039 if (delta_jiff
> 0 || sdebug_ndelay
> 0) {
4042 if (delta_jiff
> 0) {
4045 jiffies_to_timespec(delta_jiff
, &ts
);
4046 kt
= ktime_set(ts
.tv_sec
, ts
.tv_nsec
);
4048 kt
= ktime_set(0, sdebug_ndelay
);
4049 if (NULL
== sd_dp
) {
4050 sd_dp
= kzalloc(sizeof(*sd_dp
), GFP_ATOMIC
);
4052 return SCSI_MLQUEUE_HOST_BUSY
;
4053 sqcp
->sd_dp
= sd_dp
;
4054 hrtimer_init(&sd_dp
->hrt
, CLOCK_MONOTONIC
,
4055 HRTIMER_MODE_REL_PINNED
);
4056 sd_dp
->hrt
.function
= sdebug_q_cmd_hrt_complete
;
4057 sd_dp
->sqa_idx
= sqp
- sdebug_q_arr
;
4060 if (sdebug_statistics
)
4061 sd_dp
->issuing_cpu
= raw_smp_processor_id();
4062 hrtimer_start(&sd_dp
->hrt
, kt
, HRTIMER_MODE_REL_PINNED
);
4063 } else { /* jdelay < 0, use work queue */
4064 if (NULL
== sd_dp
) {
4065 sd_dp
= kzalloc(sizeof(*sqcp
->sd_dp
), GFP_ATOMIC
);
4067 return SCSI_MLQUEUE_HOST_BUSY
;
4068 sqcp
->sd_dp
= sd_dp
;
4069 sd_dp
->sqa_idx
= sqp
- sdebug_q_arr
;
4071 INIT_WORK(&sd_dp
->ew
.work
, sdebug_q_cmd_wq_complete
);
4073 if (sdebug_statistics
)
4074 sd_dp
->issuing_cpu
= raw_smp_processor_id();
4075 schedule_work(&sd_dp
->ew
.work
);
4077 if (unlikely((SDEBUG_OPT_Q_NOISE
& sdebug_opts
) &&
4078 (scsi_result
== device_qfull_result
)))
4079 sdev_printk(KERN_INFO
, sdp
,
4080 "%s: num_in_q=%d +1, %s%s\n", __func__
,
4081 num_in_q
, (inject
? "<inject> " : ""),
4082 "status: TASK SET FULL");
4085 respond_in_thread
: /* call back to mid-layer using invocation thread */
4086 cmnd
->result
= scsi_result
;
4087 cmnd
->scsi_done(cmnd
);
4091 /* Note: The following macros create attribute files in the
4092 /sys/module/scsi_debug/parameters directory. Unfortunately this
4093 driver is unaware of a change and cannot trigger auxiliary actions
4094 as it can when the corresponding attribute in the
4095 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4097 module_param_named(add_host
, sdebug_add_host
, int, S_IRUGO
| S_IWUSR
);
4098 module_param_named(ato
, sdebug_ato
, int, S_IRUGO
);
4099 module_param_named(clustering
, sdebug_clustering
, bool, S_IRUGO
| S_IWUSR
);
4100 module_param_named(delay
, sdebug_jdelay
, int, S_IRUGO
| S_IWUSR
);
4101 module_param_named(dev_size_mb
, sdebug_dev_size_mb
, int, S_IRUGO
);
4102 module_param_named(dif
, sdebug_dif
, int, S_IRUGO
);
4103 module_param_named(dix
, sdebug_dix
, int, S_IRUGO
);
4104 module_param_named(dsense
, sdebug_dsense
, int, S_IRUGO
| S_IWUSR
);
4105 module_param_named(every_nth
, sdebug_every_nth
, int, S_IRUGO
| S_IWUSR
);
4106 module_param_named(fake_rw
, sdebug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
4107 module_param_named(guard
, sdebug_guard
, uint
, S_IRUGO
);
4108 module_param_named(host_lock
, sdebug_host_lock
, bool, S_IRUGO
| S_IWUSR
);
4109 module_param_named(lbpu
, sdebug_lbpu
, int, S_IRUGO
);
4110 module_param_named(lbpws
, sdebug_lbpws
, int, S_IRUGO
);
4111 module_param_named(lbpws10
, sdebug_lbpws10
, int, S_IRUGO
);
4112 module_param_named(lbprz
, sdebug_lbprz
, int, S_IRUGO
);
4113 module_param_named(lowest_aligned
, sdebug_lowest_aligned
, int, S_IRUGO
);
4114 module_param_named(max_luns
, sdebug_max_luns
, int, S_IRUGO
| S_IWUSR
);
4115 module_param_named(max_queue
, sdebug_max_queue
, int, S_IRUGO
| S_IWUSR
);
4116 module_param_named(ndelay
, sdebug_ndelay
, int, S_IRUGO
| S_IWUSR
);
4117 module_param_named(no_lun_0
, sdebug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
4118 module_param_named(no_uld
, sdebug_no_uld
, int, S_IRUGO
);
4119 module_param_named(num_parts
, sdebug_num_parts
, int, S_IRUGO
);
4120 module_param_named(num_tgts
, sdebug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
4121 module_param_named(opt_blks
, sdebug_opt_blks
, int, S_IRUGO
);
4122 module_param_named(opts
, sdebug_opts
, int, S_IRUGO
| S_IWUSR
);
4123 module_param_named(physblk_exp
, sdebug_physblk_exp
, int, S_IRUGO
);
4124 module_param_named(ptype
, sdebug_ptype
, int, S_IRUGO
| S_IWUSR
);
4125 module_param_named(removable
, sdebug_removable
, bool, S_IRUGO
| S_IWUSR
);
4126 module_param_named(scsi_level
, sdebug_scsi_level
, int, S_IRUGO
);
4127 module_param_named(sector_size
, sdebug_sector_size
, int, S_IRUGO
);
4128 module_param_named(statistics
, sdebug_statistics
, bool, S_IRUGO
| S_IWUSR
);
4129 module_param_named(strict
, sdebug_strict
, bool, S_IRUGO
| S_IWUSR
);
4130 module_param_named(submit_queues
, submit_queues
, int, S_IRUGO
);
4131 module_param_named(unmap_alignment
, sdebug_unmap_alignment
, int, S_IRUGO
);
4132 module_param_named(unmap_granularity
, sdebug_unmap_granularity
, int, S_IRUGO
);
4133 module_param_named(unmap_max_blocks
, sdebug_unmap_max_blocks
, int, S_IRUGO
);
4134 module_param_named(unmap_max_desc
, sdebug_unmap_max_desc
, int, S_IRUGO
);
4135 module_param_named(virtual_gb
, sdebug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
4136 module_param_named(uuid_ctl
, sdebug_uuid_ctl
, int, S_IRUGO
);
4137 module_param_named(vpd_use_hostno
, sdebug_vpd_use_hostno
, int,
4139 module_param_named(write_same_length
, sdebug_write_same_length
, int,
4142 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4143 MODULE_DESCRIPTION("SCSI debug adapter driver");
4144 MODULE_LICENSE("GPL");
4145 MODULE_VERSION(SDEBUG_VERSION
);
4147 MODULE_PARM_DESC(add_host
, "0..127 hosts allowed(def=1)");
4148 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
4149 MODULE_PARM_DESC(clustering
, "when set enables larger transfers (def=0)");
4150 MODULE_PARM_DESC(delay
, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4151 MODULE_PARM_DESC(dev_size_mb
, "size in MiB of ram shared by devs(def=8)");
4152 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
4153 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
4154 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
4155 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
4156 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
4157 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
4158 MODULE_PARM_DESC(host_lock
, "host_lock is ignored (def=0)");
4159 MODULE_PARM_DESC(lbpu
, "enable LBP, support UNMAP command (def=0)");
4160 MODULE_PARM_DESC(lbpws
, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4161 MODULE_PARM_DESC(lbpws10
, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4162 MODULE_PARM_DESC(lbprz
,
4163 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4164 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
4165 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
4166 MODULE_PARM_DESC(max_queue
, "max number of queued commands (1 to max(def))");
4167 MODULE_PARM_DESC(ndelay
, "response delay in nanoseconds (def=0 -> ignore)");
4168 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
4169 MODULE_PARM_DESC(no_uld
, "stop ULD (e.g. sd driver) attaching (def=0))");
4170 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
4171 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
4172 MODULE_PARM_DESC(opt_blks
, "optimal transfer length in blocks (def=1024)");
4173 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4174 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
4175 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
4176 MODULE_PARM_DESC(removable
, "claim to have removable media (def=0)");
4177 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=7[SPC-5])");
4178 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
4179 MODULE_PARM_DESC(statistics
, "collect statistics on commands, queues (def=0)");
4180 MODULE_PARM_DESC(strict
, "stricter checks: reserved field in cdb (def=0)");
4181 MODULE_PARM_DESC(submit_queues
, "support for block multi-queue (def=1)");
4182 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
4183 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=1)");
4184 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4185 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=256)");
4186 MODULE_PARM_DESC(uuid_ctl
,
4187 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4188 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4189 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4190 MODULE_PARM_DESC(write_same_length
, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4192 #define SDEBUG_INFO_LEN 256
4193 static char sdebug_info
[SDEBUG_INFO_LEN
];
4195 static const char * scsi_debug_info(struct Scsi_Host
* shp
)
4199 k
= scnprintf(sdebug_info
, SDEBUG_INFO_LEN
, "%s: version %s [%s]\n",
4200 my_name
, SDEBUG_VERSION
, sdebug_version_date
);
4201 if (k
>= (SDEBUG_INFO_LEN
- 1))
4203 scnprintf(sdebug_info
+ k
, SDEBUG_INFO_LEN
- k
,
4204 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4205 sdebug_dev_size_mb
, sdebug_opts
, submit_queues
,
4206 "statistics", (int)sdebug_statistics
);
4210 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4211 static int scsi_debug_write_info(struct Scsi_Host
*host
, char *buffer
,
4216 int minLen
= length
> 15 ? 15 : length
;
4218 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
4220 memcpy(arr
, buffer
, minLen
);
4222 if (1 != sscanf(arr
, "%d", &opts
))
4225 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& opts
);
4226 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& opts
);
4227 if (sdebug_every_nth
!= 0)
4232 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4233 * same for each scsi_debug host (if more than one). Some of the counters
4234 * output are not atomics so might be inaccurate in a busy system. */
4235 static int scsi_debug_show_info(struct seq_file
*m
, struct Scsi_Host
*host
)
4238 struct sdebug_queue
*sqp
;
4240 seq_printf(m
, "scsi_debug adapter driver, version %s [%s]\n",
4241 SDEBUG_VERSION
, sdebug_version_date
);
4242 seq_printf(m
, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4243 sdebug_num_tgts
, "shared (ram) ", sdebug_dev_size_mb
,
4244 sdebug_opts
, sdebug_every_nth
);
4245 seq_printf(m
, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4246 sdebug_jdelay
, sdebug_ndelay
, sdebug_max_luns
,
4247 sdebug_sector_size
, "bytes");
4248 seq_printf(m
, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4249 sdebug_cylinders_per
, sdebug_heads
, sdebug_sectors_per
,
4251 seq_printf(m
, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4252 num_dev_resets
, num_target_resets
, num_bus_resets
,
4254 seq_printf(m
, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4255 dix_reads
, dix_writes
, dif_errors
);
4256 seq_printf(m
, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4257 TICK_NSEC
/ 1000, "statistics", sdebug_statistics
,
4259 seq_printf(m
, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4260 atomic_read(&sdebug_cmnd_count
),
4261 atomic_read(&sdebug_completions
),
4262 "miss_cpus", atomic_read(&sdebug_miss_cpus
),
4263 atomic_read(&sdebug_a_tsf
));
4265 seq_printf(m
, "submit_queues=%d\n", submit_queues
);
4266 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
; ++j
, ++sqp
) {
4267 seq_printf(m
, " queue %d:\n", j
);
4268 f
= find_first_bit(sqp
->in_use_bm
, sdebug_max_queue
);
4269 if (f
!= sdebug_max_queue
) {
4270 l
= find_last_bit(sqp
->in_use_bm
, sdebug_max_queue
);
4271 seq_printf(m
, " in_use_bm BUSY: %s: %d,%d\n",
4272 "first,last bits", f
, l
);
4278 static ssize_t
delay_show(struct device_driver
*ddp
, char *buf
)
4280 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_jdelay
);
4282 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4283 * of delay is jiffies.
4285 static ssize_t
delay_store(struct device_driver
*ddp
, const char *buf
,
4290 if (count
> 0 && sscanf(buf
, "%d", &jdelay
) == 1) {
4292 if (sdebug_jdelay
!= jdelay
) {
4294 struct sdebug_queue
*sqp
;
4296 block_unblock_all_queues(true);
4297 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
4299 k
= find_first_bit(sqp
->in_use_bm
,
4301 if (k
!= sdebug_max_queue
) {
4302 res
= -EBUSY
; /* queued commands */
4307 /* make sure sdebug_defer instances get
4308 * re-allocated for new delay variant */
4310 sdebug_jdelay
= jdelay
;
4313 block_unblock_all_queues(false);
4319 static DRIVER_ATTR_RW(delay
);
4321 static ssize_t
ndelay_show(struct device_driver
*ddp
, char *buf
)
4323 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ndelay
);
4325 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4326 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4327 static ssize_t
ndelay_store(struct device_driver
*ddp
, const char *buf
,
4332 if ((count
> 0) && (1 == sscanf(buf
, "%d", &ndelay
)) &&
4333 (ndelay
>= 0) && (ndelay
< (1000 * 1000 * 1000))) {
4335 if (sdebug_ndelay
!= ndelay
) {
4337 struct sdebug_queue
*sqp
;
4339 block_unblock_all_queues(true);
4340 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
4342 k
= find_first_bit(sqp
->in_use_bm
,
4344 if (k
!= sdebug_max_queue
) {
4345 res
= -EBUSY
; /* queued commands */
4350 /* make sure sdebug_defer instances get
4351 * re-allocated for new delay variant */
4353 sdebug_ndelay
= ndelay
;
4354 sdebug_jdelay
= ndelay
? JDELAY_OVERRIDDEN
4357 block_unblock_all_queues(false);
4363 static DRIVER_ATTR_RW(ndelay
);
4365 static ssize_t
opts_show(struct device_driver
*ddp
, char *buf
)
4367 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", sdebug_opts
);
4370 static ssize_t
opts_store(struct device_driver
*ddp
, const char *buf
,
4376 if (1 == sscanf(buf
, "%10s", work
)) {
4377 if (0 == strncasecmp(work
,"0x", 2)) {
4378 if (1 == sscanf(&work
[2], "%x", &opts
))
4381 if (1 == sscanf(work
, "%d", &opts
))
4388 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& opts
);
4389 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& opts
);
4393 static DRIVER_ATTR_RW(opts
);
4395 static ssize_t
ptype_show(struct device_driver
*ddp
, char *buf
)
4397 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ptype
);
4399 static ssize_t
ptype_store(struct device_driver
*ddp
, const char *buf
,
4404 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4410 static DRIVER_ATTR_RW(ptype
);
4412 static ssize_t
dsense_show(struct device_driver
*ddp
, char *buf
)
4414 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dsense
);
4416 static ssize_t
dsense_store(struct device_driver
*ddp
, const char *buf
,
4421 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4427 static DRIVER_ATTR_RW(dsense
);
4429 static ssize_t
fake_rw_show(struct device_driver
*ddp
, char *buf
)
4431 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_fake_rw
);
4433 static ssize_t
fake_rw_store(struct device_driver
*ddp
, const char *buf
,
4438 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4440 sdebug_fake_rw
= (sdebug_fake_rw
> 0);
4441 if (sdebug_fake_rw
!= n
) {
4442 if ((0 == n
) && (NULL
== fake_storep
)) {
4444 (unsigned long)sdebug_dev_size_mb
*
4447 fake_storep
= vmalloc(sz
);
4448 if (NULL
== fake_storep
) {
4449 pr_err("out of memory, 9\n");
4452 memset(fake_storep
, 0, sz
);
4460 static DRIVER_ATTR_RW(fake_rw
);
4462 static ssize_t
no_lun_0_show(struct device_driver
*ddp
, char *buf
)
4464 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_lun_0
);
4466 static ssize_t
no_lun_0_store(struct device_driver
*ddp
, const char *buf
,
4471 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4472 sdebug_no_lun_0
= n
;
4477 static DRIVER_ATTR_RW(no_lun_0
);
4479 static ssize_t
num_tgts_show(struct device_driver
*ddp
, char *buf
)
4481 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_tgts
);
4483 static ssize_t
num_tgts_store(struct device_driver
*ddp
, const char *buf
,
4488 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4489 sdebug_num_tgts
= n
;
4490 sdebug_max_tgts_luns();
4495 static DRIVER_ATTR_RW(num_tgts
);
4497 static ssize_t
dev_size_mb_show(struct device_driver
*ddp
, char *buf
)
4499 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dev_size_mb
);
4501 static DRIVER_ATTR_RO(dev_size_mb
);
4503 static ssize_t
num_parts_show(struct device_driver
*ddp
, char *buf
)
4505 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_num_parts
);
4507 static DRIVER_ATTR_RO(num_parts
);
4509 static ssize_t
every_nth_show(struct device_driver
*ddp
, char *buf
)
4511 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_every_nth
);
4513 static ssize_t
every_nth_store(struct device_driver
*ddp
, const char *buf
,
4518 if ((count
> 0) && (1 == sscanf(buf
, "%d", &nth
))) {
4519 sdebug_every_nth
= nth
;
4520 if (nth
&& !sdebug_statistics
) {
4521 pr_info("every_nth needs statistics=1, set it\n");
4522 sdebug_statistics
= true;
4529 static DRIVER_ATTR_RW(every_nth
);
4531 static ssize_t
max_luns_show(struct device_driver
*ddp
, char *buf
)
4533 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_max_luns
);
4535 static ssize_t
max_luns_store(struct device_driver
*ddp
, const char *buf
,
4541 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4543 pr_warn("max_luns can be no more than 256\n");
4546 changed
= (sdebug_max_luns
!= n
);
4547 sdebug_max_luns
= n
;
4548 sdebug_max_tgts_luns();
4549 if (changed
&& (sdebug_scsi_level
>= 5)) { /* >= SPC-3 */
4550 struct sdebug_host_info
*sdhp
;
4551 struct sdebug_dev_info
*dp
;
4553 spin_lock(&sdebug_host_list_lock
);
4554 list_for_each_entry(sdhp
, &sdebug_host_list
,
4556 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
4558 set_bit(SDEBUG_UA_LUNS_CHANGED
,
4562 spin_unlock(&sdebug_host_list_lock
);
4568 static DRIVER_ATTR_RW(max_luns
);
4570 static ssize_t
max_queue_show(struct device_driver
*ddp
, char *buf
)
4572 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_max_queue
);
4574 /* N.B. max_queue can be changed while there are queued commands. In flight
4575 * commands beyond the new max_queue will be completed. */
4576 static ssize_t
max_queue_store(struct device_driver
*ddp
, const char *buf
,
4580 struct sdebug_queue
*sqp
;
4582 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
> 0) &&
4583 (n
<= SDEBUG_CANQUEUE
)) {
4584 block_unblock_all_queues(true);
4586 for (j
= 0, sqp
= sdebug_q_arr
; j
< submit_queues
;
4588 a
= find_last_bit(sqp
->in_use_bm
, SDEBUG_CANQUEUE
);
4592 sdebug_max_queue
= n
;
4593 if (k
== SDEBUG_CANQUEUE
)
4594 atomic_set(&retired_max_queue
, 0);
4596 atomic_set(&retired_max_queue
, k
+ 1);
4598 atomic_set(&retired_max_queue
, 0);
4599 block_unblock_all_queues(false);
4604 static DRIVER_ATTR_RW(max_queue
);
4606 static ssize_t
no_uld_show(struct device_driver
*ddp
, char *buf
)
4608 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_no_uld
);
4610 static DRIVER_ATTR_RO(no_uld
);
4612 static ssize_t
scsi_level_show(struct device_driver
*ddp
, char *buf
)
4614 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_scsi_level
);
4616 static DRIVER_ATTR_RO(scsi_level
);
4618 static ssize_t
virtual_gb_show(struct device_driver
*ddp
, char *buf
)
4620 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_virtual_gb
);
4622 static ssize_t
virtual_gb_store(struct device_driver
*ddp
, const char *buf
,
4628 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4629 changed
= (sdebug_virtual_gb
!= n
);
4630 sdebug_virtual_gb
= n
;
4631 sdebug_capacity
= get_sdebug_capacity();
4633 struct sdebug_host_info
*sdhp
;
4634 struct sdebug_dev_info
*dp
;
4636 spin_lock(&sdebug_host_list_lock
);
4637 list_for_each_entry(sdhp
, &sdebug_host_list
,
4639 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
4641 set_bit(SDEBUG_UA_CAPACITY_CHANGED
,
4645 spin_unlock(&sdebug_host_list_lock
);
4651 static DRIVER_ATTR_RW(virtual_gb
);
4653 static ssize_t
add_host_show(struct device_driver
*ddp
, char *buf
)
4655 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_add_host
);
4658 static int sdebug_add_adapter(void);
4659 static void sdebug_remove_adapter(void);
4661 static ssize_t
add_host_store(struct device_driver
*ddp
, const char *buf
,
4666 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
4668 if (delta_hosts
> 0) {
4670 sdebug_add_adapter();
4671 } while (--delta_hosts
);
4672 } else if (delta_hosts
< 0) {
4674 sdebug_remove_adapter();
4675 } while (++delta_hosts
);
4679 static DRIVER_ATTR_RW(add_host
);
4681 static ssize_t
vpd_use_hostno_show(struct device_driver
*ddp
, char *buf
)
4683 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_vpd_use_hostno
);
4685 static ssize_t
vpd_use_hostno_store(struct device_driver
*ddp
, const char *buf
,
4690 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4691 sdebug_vpd_use_hostno
= n
;
4696 static DRIVER_ATTR_RW(vpd_use_hostno
);
4698 static ssize_t
statistics_show(struct device_driver
*ddp
, char *buf
)
4700 return scnprintf(buf
, PAGE_SIZE
, "%d\n", (int)sdebug_statistics
);
4702 static ssize_t
statistics_store(struct device_driver
*ddp
, const char *buf
,
4707 if ((count
> 0) && (sscanf(buf
, "%d", &n
) == 1) && (n
>= 0)) {
4709 sdebug_statistics
= true;
4711 clear_queue_stats();
4712 sdebug_statistics
= false;
4718 static DRIVER_ATTR_RW(statistics
);
4720 static ssize_t
sector_size_show(struct device_driver
*ddp
, char *buf
)
4722 return scnprintf(buf
, PAGE_SIZE
, "%u\n", sdebug_sector_size
);
4724 static DRIVER_ATTR_RO(sector_size
);
4726 static ssize_t
submit_queues_show(struct device_driver
*ddp
, char *buf
)
4728 return scnprintf(buf
, PAGE_SIZE
, "%d\n", submit_queues
);
4730 static DRIVER_ATTR_RO(submit_queues
);
4732 static ssize_t
dix_show(struct device_driver
*ddp
, char *buf
)
4734 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dix
);
4736 static DRIVER_ATTR_RO(dix
);
4738 static ssize_t
dif_show(struct device_driver
*ddp
, char *buf
)
4740 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_dif
);
4742 static DRIVER_ATTR_RO(dif
);
4744 static ssize_t
guard_show(struct device_driver
*ddp
, char *buf
)
4746 return scnprintf(buf
, PAGE_SIZE
, "%u\n", sdebug_guard
);
4748 static DRIVER_ATTR_RO(guard
);
4750 static ssize_t
ato_show(struct device_driver
*ddp
, char *buf
)
4752 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_ato
);
4754 static DRIVER_ATTR_RO(ato
);
4756 static ssize_t
map_show(struct device_driver
*ddp
, char *buf
)
4760 if (!scsi_debug_lbp())
4761 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
4762 sdebug_store_sectors
);
4764 count
= scnprintf(buf
, PAGE_SIZE
- 1, "%*pbl",
4765 (int)map_size
, map_storep
);
4766 buf
[count
++] = '\n';
4771 static DRIVER_ATTR_RO(map
);
4773 static ssize_t
removable_show(struct device_driver
*ddp
, char *buf
)
4775 return scnprintf(buf
, PAGE_SIZE
, "%d\n", sdebug_removable
? 1 : 0);
4777 static ssize_t
removable_store(struct device_driver
*ddp
, const char *buf
,
4782 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4783 sdebug_removable
= (n
> 0);
4788 static DRIVER_ATTR_RW(removable
);
4790 static ssize_t
host_lock_show(struct device_driver
*ddp
, char *buf
)
4792 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_host_lock
);
4794 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
4795 static ssize_t
host_lock_store(struct device_driver
*ddp
, const char *buf
,
4800 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4801 sdebug_host_lock
= (n
> 0);
4806 static DRIVER_ATTR_RW(host_lock
);
4808 static ssize_t
strict_show(struct device_driver
*ddp
, char *buf
)
4810 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_strict
);
4812 static ssize_t
strict_store(struct device_driver
*ddp
, const char *buf
,
4817 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4818 sdebug_strict
= (n
> 0);
4823 static DRIVER_ATTR_RW(strict
);
4825 static ssize_t
uuid_ctl_show(struct device_driver
*ddp
, char *buf
)
4827 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!sdebug_uuid_ctl
);
4829 static DRIVER_ATTR_RO(uuid_ctl
);
4832 /* Note: The following array creates attribute files in the
4833 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4834 files (over those found in the /sys/module/scsi_debug/parameters
4835 directory) is that auxiliary actions can be triggered when an attribute
4836 is changed. For example see: sdebug_add_host_store() above.
4839 static struct attribute
*sdebug_drv_attrs
[] = {
4840 &driver_attr_delay
.attr
,
4841 &driver_attr_opts
.attr
,
4842 &driver_attr_ptype
.attr
,
4843 &driver_attr_dsense
.attr
,
4844 &driver_attr_fake_rw
.attr
,
4845 &driver_attr_no_lun_0
.attr
,
4846 &driver_attr_num_tgts
.attr
,
4847 &driver_attr_dev_size_mb
.attr
,
4848 &driver_attr_num_parts
.attr
,
4849 &driver_attr_every_nth
.attr
,
4850 &driver_attr_max_luns
.attr
,
4851 &driver_attr_max_queue
.attr
,
4852 &driver_attr_no_uld
.attr
,
4853 &driver_attr_scsi_level
.attr
,
4854 &driver_attr_virtual_gb
.attr
,
4855 &driver_attr_add_host
.attr
,
4856 &driver_attr_vpd_use_hostno
.attr
,
4857 &driver_attr_sector_size
.attr
,
4858 &driver_attr_statistics
.attr
,
4859 &driver_attr_submit_queues
.attr
,
4860 &driver_attr_dix
.attr
,
4861 &driver_attr_dif
.attr
,
4862 &driver_attr_guard
.attr
,
4863 &driver_attr_ato
.attr
,
4864 &driver_attr_map
.attr
,
4865 &driver_attr_removable
.attr
,
4866 &driver_attr_host_lock
.attr
,
4867 &driver_attr_ndelay
.attr
,
4868 &driver_attr_strict
.attr
,
4869 &driver_attr_uuid_ctl
.attr
,
4872 ATTRIBUTE_GROUPS(sdebug_drv
);
4874 static struct device
*pseudo_primary
;
4876 static int __init
scsi_debug_init(void)
4883 atomic_set(&retired_max_queue
, 0);
4885 if (sdebug_ndelay
>= 1000 * 1000 * 1000) {
4886 pr_warn("ndelay must be less than 1 second, ignored\n");
4888 } else if (sdebug_ndelay
> 0)
4889 sdebug_jdelay
= JDELAY_OVERRIDDEN
;
4891 switch (sdebug_sector_size
) {
4898 pr_err("invalid sector_size %d\n", sdebug_sector_size
);
4902 switch (sdebug_dif
) {
4904 case SD_DIF_TYPE0_PROTECTION
:
4906 case SD_DIF_TYPE1_PROTECTION
:
4907 case SD_DIF_TYPE2_PROTECTION
:
4908 case SD_DIF_TYPE3_PROTECTION
:
4909 have_dif_prot
= true;
4913 pr_err("dif must be 0, 1, 2 or 3\n");
4917 if (sdebug_guard
> 1) {
4918 pr_err("guard must be 0 or 1\n");
4922 if (sdebug_ato
> 1) {
4923 pr_err("ato must be 0 or 1\n");
4927 if (sdebug_physblk_exp
> 15) {
4928 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp
);
4931 if (sdebug_max_luns
> 256) {
4932 pr_warn("max_luns can be no more than 256, use default\n");
4933 sdebug_max_luns
= DEF_MAX_LUNS
;
4936 if (sdebug_lowest_aligned
> 0x3fff) {
4937 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned
);
4941 if (submit_queues
< 1) {
4942 pr_err("submit_queues must be 1 or more\n");
4945 sdebug_q_arr
= kcalloc(submit_queues
, sizeof(struct sdebug_queue
),
4947 if (sdebug_q_arr
== NULL
)
4949 for (k
= 0; k
< submit_queues
; ++k
)
4950 spin_lock_init(&sdebug_q_arr
[k
].qc_lock
);
4952 if (sdebug_dev_size_mb
< 1)
4953 sdebug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
4954 sz
= (unsigned long)sdebug_dev_size_mb
* 1048576;
4955 sdebug_store_sectors
= sz
/ sdebug_sector_size
;
4956 sdebug_capacity
= get_sdebug_capacity();
4958 /* play around with geometry, don't waste too much on track 0 */
4960 sdebug_sectors_per
= 32;
4961 if (sdebug_dev_size_mb
>= 256)
4963 else if (sdebug_dev_size_mb
>= 16)
4965 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
4966 (sdebug_sectors_per
* sdebug_heads
);
4967 if (sdebug_cylinders_per
>= 1024) {
4968 /* other LLDs do this; implies >= 1GB ram disk ... */
4970 sdebug_sectors_per
= 63;
4971 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
4972 (sdebug_sectors_per
* sdebug_heads
);
4975 if (sdebug_fake_rw
== 0) {
4976 fake_storep
= vmalloc(sz
);
4977 if (NULL
== fake_storep
) {
4978 pr_err("out of memory, 1\n");
4982 memset(fake_storep
, 0, sz
);
4983 if (sdebug_num_parts
> 0)
4984 sdebug_build_parts(fake_storep
, sz
);
4990 dif_size
= sdebug_store_sectors
* sizeof(struct sd_dif_tuple
);
4991 dif_storep
= vmalloc(dif_size
);
4993 pr_err("dif_storep %u bytes @ %p\n", dif_size
, dif_storep
);
4995 if (dif_storep
== NULL
) {
4996 pr_err("out of mem. (DIX)\n");
5001 memset(dif_storep
, 0xff, dif_size
);
5004 /* Logical Block Provisioning */
5005 if (scsi_debug_lbp()) {
5006 sdebug_unmap_max_blocks
=
5007 clamp(sdebug_unmap_max_blocks
, 0U, 0xffffffffU
);
5009 sdebug_unmap_max_desc
=
5010 clamp(sdebug_unmap_max_desc
, 0U, 256U);
5012 sdebug_unmap_granularity
=
5013 clamp(sdebug_unmap_granularity
, 1U, 0xffffffffU
);
5015 if (sdebug_unmap_alignment
&&
5016 sdebug_unmap_granularity
<=
5017 sdebug_unmap_alignment
) {
5018 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5023 map_size
= lba_to_map_index(sdebug_store_sectors
- 1) + 1;
5024 map_storep
= vmalloc(BITS_TO_LONGS(map_size
) * sizeof(long));
5026 pr_info("%lu provisioning blocks\n", map_size
);
5028 if (map_storep
== NULL
) {
5029 pr_err("out of mem. (MAP)\n");
5034 bitmap_zero(map_storep
, map_size
);
5036 /* Map first 1KB for partition table */
5037 if (sdebug_num_parts
)
5041 pseudo_primary
= root_device_register("pseudo_0");
5042 if (IS_ERR(pseudo_primary
)) {
5043 pr_warn("root_device_register() error\n");
5044 ret
= PTR_ERR(pseudo_primary
);
5047 ret
= bus_register(&pseudo_lld_bus
);
5049 pr_warn("bus_register error: %d\n", ret
);
5052 ret
= driver_register(&sdebug_driverfs_driver
);
5054 pr_warn("driver_register error: %d\n", ret
);
5058 host_to_add
= sdebug_add_host
;
5059 sdebug_add_host
= 0;
5061 for (k
= 0; k
< host_to_add
; k
++) {
5062 if (sdebug_add_adapter()) {
5063 pr_err("sdebug_add_adapter failed k=%d\n", k
);
5069 pr_info("built %d host(s)\n", sdebug_add_host
);
5074 bus_unregister(&pseudo_lld_bus
);
5076 root_device_unregister(pseudo_primary
);
5082 kfree(sdebug_q_arr
);
5086 static void __exit
scsi_debug_exit(void)
5088 int k
= sdebug_add_host
;
5093 sdebug_remove_adapter();
5094 driver_unregister(&sdebug_driverfs_driver
);
5095 bus_unregister(&pseudo_lld_bus
);
5096 root_device_unregister(pseudo_primary
);
5100 kfree(sdebug_q_arr
);
5103 device_initcall(scsi_debug_init
);
5104 module_exit(scsi_debug_exit
);
5106 static void sdebug_release_adapter(struct device
* dev
)
5108 struct sdebug_host_info
*sdbg_host
;
5110 sdbg_host
= to_sdebug_host(dev
);
5114 static int sdebug_add_adapter(void)
5116 int k
, devs_per_host
;
5118 struct sdebug_host_info
*sdbg_host
;
5119 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
5121 sdbg_host
= kzalloc(sizeof(*sdbg_host
),GFP_KERNEL
);
5122 if (NULL
== sdbg_host
) {
5123 pr_err("out of memory at line %d\n", __LINE__
);
5127 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
5129 devs_per_host
= sdebug_num_tgts
* sdebug_max_luns
;
5130 for (k
= 0; k
< devs_per_host
; k
++) {
5131 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
5132 if (!sdbg_devinfo
) {
5133 pr_err("out of memory at line %d\n", __LINE__
);
5139 spin_lock(&sdebug_host_list_lock
);
5140 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
5141 spin_unlock(&sdebug_host_list_lock
);
5143 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
5144 sdbg_host
->dev
.parent
= pseudo_primary
;
5145 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
5146 dev_set_name(&sdbg_host
->dev
, "adapter%d", sdebug_add_host
);
5148 error
= device_register(&sdbg_host
->dev
);
5157 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
5159 list_del(&sdbg_devinfo
->dev_list
);
5160 kfree(sdbg_devinfo
);
5167 static void sdebug_remove_adapter(void)
5169 struct sdebug_host_info
* sdbg_host
= NULL
;
5171 spin_lock(&sdebug_host_list_lock
);
5172 if (!list_empty(&sdebug_host_list
)) {
5173 sdbg_host
= list_entry(sdebug_host_list
.prev
,
5174 struct sdebug_host_info
, host_list
);
5175 list_del(&sdbg_host
->host_list
);
5177 spin_unlock(&sdebug_host_list_lock
);
5182 device_unregister(&sdbg_host
->dev
);
5186 static int sdebug_change_qdepth(struct scsi_device
*sdev
, int qdepth
)
5189 struct sdebug_dev_info
*devip
;
5191 block_unblock_all_queues(true);
5192 devip
= (struct sdebug_dev_info
*)sdev
->hostdata
;
5193 if (NULL
== devip
) {
5194 block_unblock_all_queues(false);
5197 num_in_q
= atomic_read(&devip
->num_in_q
);
5201 /* allow to exceed max host qc_arr elements for testing */
5202 if (qdepth
> SDEBUG_CANQUEUE
+ 10)
5203 qdepth
= SDEBUG_CANQUEUE
+ 10;
5204 scsi_change_queue_depth(sdev
, qdepth
);
5206 if (SDEBUG_OPT_Q_NOISE
& sdebug_opts
) {
5207 sdev_printk(KERN_INFO
, sdev
, "%s: qdepth=%d, num_in_q=%d\n",
5208 __func__
, qdepth
, num_in_q
);
5210 block_unblock_all_queues(false);
5211 return sdev
->queue_depth
;
5214 static bool fake_timeout(struct scsi_cmnd
*scp
)
5216 if (0 == (atomic_read(&sdebug_cmnd_count
) % abs(sdebug_every_nth
))) {
5217 if (sdebug_every_nth
< -1)
5218 sdebug_every_nth
= -1;
5219 if (SDEBUG_OPT_TIMEOUT
& sdebug_opts
)
5220 return true; /* ignore command causing timeout */
5221 else if (SDEBUG_OPT_MAC_TIMEOUT
& sdebug_opts
&&
5222 scsi_medium_access_command(scp
))
5223 return true; /* time out reads and writes */
5228 static int scsi_debug_queuecommand(struct Scsi_Host
*shost
,
5229 struct scsi_cmnd
*scp
)
5232 struct scsi_device
*sdp
= scp
->device
;
5233 const struct opcode_info_t
*oip
;
5234 const struct opcode_info_t
*r_oip
;
5235 struct sdebug_dev_info
*devip
;
5236 u8
*cmd
= scp
->cmnd
;
5237 int (*r_pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
5245 scsi_set_resid(scp
, 0);
5246 if (sdebug_statistics
)
5247 atomic_inc(&sdebug_cmnd_count
);
5248 if (unlikely(sdebug_verbose
&&
5249 !(SDEBUG_OPT_NO_CDB_NOISE
& sdebug_opts
))) {
5254 sb
= (int)sizeof(b
);
5256 strcpy(b
, "too long, over 32 bytes");
5258 for (k
= 0, n
= 0; k
< len
&& n
< sb
; ++k
)
5259 n
+= scnprintf(b
+ n
, sb
- n
, "%02x ",
5262 if (sdebug_mq_active
)
5263 sdev_printk(KERN_INFO
, sdp
, "%s: tag=%u, cmd %s\n",
5264 my_name
, blk_mq_unique_tag(scp
->request
),
5267 sdev_printk(KERN_INFO
, sdp
, "%s: cmd %s\n", my_name
,
5270 has_wlun_rl
= (sdp
->lun
== SCSI_W_LUN_REPORT_LUNS
);
5271 if (unlikely((sdp
->lun
>= sdebug_max_luns
) && !has_wlun_rl
))
5274 sdeb_i
= opcode_ind_arr
[opcode
]; /* fully mapped */
5275 oip
= &opcode_info_arr
[sdeb_i
]; /* safe if table consistent */
5276 devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
5277 if (unlikely(!devip
)) {
5278 devip
= find_build_dev_info(sdp
);
5282 na
= oip
->num_attached
;
5284 if (na
) { /* multiple commands with this opcode */
5286 if (FF_SA
& r_oip
->flags
) {
5287 if (F_SA_LOW
& oip
->flags
)
5290 sa
= get_unaligned_be16(cmd
+ 8);
5291 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
5292 if (opcode
== oip
->opcode
&& sa
== oip
->sa
)
5295 } else { /* since no service action only check opcode */
5296 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
5297 if (opcode
== oip
->opcode
)
5302 if (F_SA_LOW
& r_oip
->flags
)
5303 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 4);
5304 else if (F_SA_HIGH
& r_oip
->flags
)
5305 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 8, 7);
5307 mk_sense_invalid_opcode(scp
);
5310 } /* else (when na==0) we assume the oip is a match */
5312 if (unlikely(F_INV_OP
& flags
)) {
5313 mk_sense_invalid_opcode(scp
);
5316 if (unlikely(has_wlun_rl
&& !(F_RL_WLUN_OK
& flags
))) {
5318 sdev_printk(KERN_INFO
, sdp
, "%s: Opcode 0x%x not%s\n",
5319 my_name
, opcode
, " supported for wlun");
5320 mk_sense_invalid_opcode(scp
);
5323 if (unlikely(sdebug_strict
)) { /* check cdb against mask */
5327 for (k
= 1; k
< oip
->len_mask
[0] && k
< 16; ++k
) {
5328 rem
= ~oip
->len_mask
[k
] & cmd
[k
];
5330 for (j
= 7; j
>= 0; --j
, rem
<<= 1) {
5334 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, k
, j
);
5339 if (unlikely(!(F_SKIP_UA
& flags
) &&
5340 find_first_bit(devip
->uas_bm
,
5341 SDEBUG_NUM_UAS
) != SDEBUG_NUM_UAS
)) {
5342 errsts
= make_ua(scp
, devip
);
5346 if (unlikely((F_M_ACCESS
& flags
) && atomic_read(&devip
->stopped
))) {
5347 mk_sense_buffer(scp
, NOT_READY
, LOGICAL_UNIT_NOT_READY
, 0x2);
5349 sdev_printk(KERN_INFO
, sdp
, "%s reports: Not ready: "
5350 "%s\n", my_name
, "initializing command "
5352 errsts
= check_condition_result
;
5355 if (sdebug_fake_rw
&& (F_FAKE_RW
& flags
))
5357 if (unlikely(sdebug_every_nth
)) {
5358 if (fake_timeout(scp
))
5359 return 0; /* ignore command: make trouble */
5361 if (likely(oip
->pfp
))
5362 errsts
= oip
->pfp(scp
, devip
); /* calls a resp_* function */
5363 else if (r_pfp
) /* if leaf function ptr NULL, try the root's */
5364 errsts
= r_pfp(scp
, devip
);
5367 return schedule_resp(scp
, devip
, errsts
,
5368 ((F_DELAY_OVERR
& flags
) ? 0 : sdebug_jdelay
));
5370 return schedule_resp(scp
, devip
, check_condition_result
, 0);
5372 return schedule_resp(scp
, NULL
, DID_NO_CONNECT
<< 16, 0);
5375 static struct scsi_host_template sdebug_driver_template
= {
5376 .show_info
= scsi_debug_show_info
,
5377 .write_info
= scsi_debug_write_info
,
5378 .proc_name
= sdebug_proc_name
,
5379 .name
= "SCSI DEBUG",
5380 .info
= scsi_debug_info
,
5381 .slave_alloc
= scsi_debug_slave_alloc
,
5382 .slave_configure
= scsi_debug_slave_configure
,
5383 .slave_destroy
= scsi_debug_slave_destroy
,
5384 .ioctl
= scsi_debug_ioctl
,
5385 .queuecommand
= scsi_debug_queuecommand
,
5386 .change_queue_depth
= sdebug_change_qdepth
,
5387 .eh_abort_handler
= scsi_debug_abort
,
5388 .eh_device_reset_handler
= scsi_debug_device_reset
,
5389 .eh_target_reset_handler
= scsi_debug_target_reset
,
5390 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
5391 .eh_host_reset_handler
= scsi_debug_host_reset
,
5392 .can_queue
= SDEBUG_CANQUEUE
,
5394 .sg_tablesize
= SG_MAX_SEGMENTS
,
5395 .cmd_per_lun
= DEF_CMD_PER_LUN
,
5397 .use_clustering
= DISABLE_CLUSTERING
,
5398 .module
= THIS_MODULE
,
5399 .track_queue_depth
= 1,
5402 static int sdebug_driver_probe(struct device
* dev
)
5405 struct sdebug_host_info
*sdbg_host
;
5406 struct Scsi_Host
*hpnt
;
5409 sdbg_host
= to_sdebug_host(dev
);
5411 sdebug_driver_template
.can_queue
= sdebug_max_queue
;
5412 if (sdebug_clustering
)
5413 sdebug_driver_template
.use_clustering
= ENABLE_CLUSTERING
;
5414 hpnt
= scsi_host_alloc(&sdebug_driver_template
, sizeof(sdbg_host
));
5416 pr_err("scsi_host_alloc failed\n");
5420 if (submit_queues
> nr_cpu_ids
) {
5421 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
5422 my_name
, submit_queues
, nr_cpu_ids
);
5423 submit_queues
= nr_cpu_ids
;
5425 /* Decide whether to tell scsi subsystem that we want mq */
5426 /* Following should give the same answer for each host */
5427 sdebug_mq_active
= shost_use_blk_mq(hpnt
) && (submit_queues
> 1);
5428 if (sdebug_mq_active
)
5429 hpnt
->nr_hw_queues
= submit_queues
;
5431 sdbg_host
->shost
= hpnt
;
5432 *((struct sdebug_host_info
**)hpnt
->hostdata
) = sdbg_host
;
5433 if ((hpnt
->this_id
>= 0) && (sdebug_num_tgts
> hpnt
->this_id
))
5434 hpnt
->max_id
= sdebug_num_tgts
+ 1;
5436 hpnt
->max_id
= sdebug_num_tgts
;
5437 /* = sdebug_max_luns; */
5438 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
5442 switch (sdebug_dif
) {
5444 case SD_DIF_TYPE1_PROTECTION
:
5445 hprot
= SHOST_DIF_TYPE1_PROTECTION
;
5447 hprot
|= SHOST_DIX_TYPE1_PROTECTION
;
5450 case SD_DIF_TYPE2_PROTECTION
:
5451 hprot
= SHOST_DIF_TYPE2_PROTECTION
;
5453 hprot
|= SHOST_DIX_TYPE2_PROTECTION
;
5456 case SD_DIF_TYPE3_PROTECTION
:
5457 hprot
= SHOST_DIF_TYPE3_PROTECTION
;
5459 hprot
|= SHOST_DIX_TYPE3_PROTECTION
;
5464 hprot
|= SHOST_DIX_TYPE0_PROTECTION
;
5468 scsi_host_set_prot(hpnt
, hprot
);
5470 if (have_dif_prot
|| sdebug_dix
)
5471 pr_info("host protection%s%s%s%s%s%s%s\n",
5472 (hprot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
5473 (hprot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
5474 (hprot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
5475 (hprot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
5476 (hprot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
5477 (hprot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
5478 (hprot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
5480 if (sdebug_guard
== 1)
5481 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
5483 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
5485 sdebug_verbose
= !!(SDEBUG_OPT_NOISE
& sdebug_opts
);
5486 sdebug_any_injecting_opt
= !!(SDEBUG_OPT_ALL_INJECTING
& sdebug_opts
);
5487 if (sdebug_every_nth
) /* need stats counters for every_nth */
5488 sdebug_statistics
= true;
5489 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
5491 pr_err("scsi_add_host failed\n");
5493 scsi_host_put(hpnt
);
5495 scsi_scan_host(hpnt
);
5500 static int sdebug_driver_remove(struct device
* dev
)
5502 struct sdebug_host_info
*sdbg_host
;
5503 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
5505 sdbg_host
= to_sdebug_host(dev
);
5508 pr_err("Unable to locate host info\n");
5512 scsi_remove_host(sdbg_host
->shost
);
5514 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
5516 list_del(&sdbg_devinfo
->dev_list
);
5517 kfree(sdbg_devinfo
);
5520 scsi_host_put(sdbg_host
->shost
);
5524 static int pseudo_lld_bus_match(struct device
*dev
,
5525 struct device_driver
*dev_driver
)
5530 static struct bus_type pseudo_lld_bus
= {
5532 .match
= pseudo_lld_bus_match
,
5533 .probe
= sdebug_driver_probe
,
5534 .remove
= sdebug_driver_remove
,
5535 .drv_groups
= sdebug_drv_groups
,