2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
29 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
31 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <linux/slab.h>
37 #include <linux/types.h>
38 #include <linux/string.h>
39 #include <linux/genhd.h>
41 #include <linux/init.h>
42 #include <linux/proc_fs.h>
43 #include <linux/vmalloc.h>
44 #include <linux/moduleparam.h>
45 #include <linux/scatterlist.h>
46 #include <linux/blkdev.h>
47 #include <linux/crc-t10dif.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/atomic.h>
51 #include <linux/hrtimer.h>
53 #include <net/checksum.h>
55 #include <asm/unaligned.h>
57 #include <scsi/scsi.h>
58 #include <scsi/scsi_cmnd.h>
59 #include <scsi/scsi_device.h>
60 #include <scsi/scsi_host.h>
61 #include <scsi/scsicam.h>
62 #include <scsi/scsi_eh.h>
63 #include <scsi/scsi_tcq.h>
64 #include <scsi/scsi_dbg.h>
67 #include "scsi_logging.h"
69 #define SCSI_DEBUG_VERSION "1.85"
70 static const char *scsi_debug_version_date
= "20141022";
72 #define MY_NAME "scsi_debug"
74 /* Additional Sense Code (ASC) */
75 #define NO_ADDITIONAL_SENSE 0x0
76 #define LOGICAL_UNIT_NOT_READY 0x4
77 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
78 #define UNRECOVERED_READ_ERR 0x11
79 #define PARAMETER_LIST_LENGTH_ERR 0x1a
80 #define INVALID_OPCODE 0x20
81 #define LBA_OUT_OF_RANGE 0x21
82 #define INVALID_FIELD_IN_CDB 0x24
83 #define INVALID_FIELD_IN_PARAM_LIST 0x26
84 #define UA_RESET_ASC 0x29
85 #define UA_CHANGED_ASC 0x2a
86 #define TARGET_CHANGED_ASC 0x3f
87 #define LUNS_CHANGED_ASCQ 0x0e
88 #define INSUFF_RES_ASC 0x55
89 #define INSUFF_RES_ASCQ 0x3
90 #define POWER_ON_RESET_ASCQ 0x0
91 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
92 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
93 #define CAPACITY_CHANGED_ASCQ 0x9
94 #define SAVING_PARAMS_UNSUP 0x39
95 #define TRANSPORT_PROBLEM 0x4b
96 #define THRESHOLD_EXCEEDED 0x5d
97 #define LOW_POWER_COND_ON 0x5e
98 #define MISCOMPARE_VERIFY_ASC 0x1d
99 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
100 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
102 /* Additional Sense Code Qualifier (ASCQ) */
103 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_DELAY 1 /* if > 0 unit is a jiffy */
115 #define DEF_DEV_SIZE_MB 8
118 #define DEF_D_SENSE 0
119 #define DEF_EVERY_NTH 0
120 #define DEF_FAKE_RW 0
122 #define DEF_HOST_LOCK 0
125 #define DEF_LBPWS10 0
127 #define DEF_LOWEST_ALIGNED 0
128 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
129 #define DEF_NO_LUN_0 0
130 #define DEF_NUM_PARTS 0
132 #define DEF_OPT_BLKS 64
133 #define DEF_PHYSBLK_EXP 0
135 #define DEF_REMOVABLE false
136 #define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
137 #define DEF_SECTOR_SIZE 512
138 #define DEF_UNMAP_ALIGNMENT 0
139 #define DEF_UNMAP_GRANULARITY 1
140 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
141 #define DEF_UNMAP_MAX_DESC 256
142 #define DEF_VIRTUAL_GB 0
143 #define DEF_VPD_USE_HOSTNO 1
144 #define DEF_WRITESAME_LENGTH 0xFFFF
146 #define DELAY_OVERRIDDEN -9999
148 /* bit mask values for scsi_debug_opts */
149 #define SCSI_DEBUG_OPT_NOISE 1
150 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
151 #define SCSI_DEBUG_OPT_TIMEOUT 4
152 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
153 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
154 #define SCSI_DEBUG_OPT_DIF_ERR 32
155 #define SCSI_DEBUG_OPT_DIX_ERR 64
156 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
157 #define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100
158 #define SCSI_DEBUG_OPT_Q_NOISE 0x200
159 #define SCSI_DEBUG_OPT_ALL_TSF 0x400
160 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
161 #define SCSI_DEBUG_OPT_N_WCE 0x1000
162 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
163 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
164 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
165 /* When "every_nth" > 0 then modulo "every_nth" commands:
166 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
167 * - a RECOVERED_ERROR is simulated on successful read and write
168 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
169 * - a TRANSPORT_ERROR is simulated on successful read and write
170 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
172 * When "every_nth" < 0 then after "- every_nth" commands:
173 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
174 * - a RECOVERED_ERROR is simulated on successful read and write
175 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
176 * - a TRANSPORT_ERROR is simulated on successful read and write
177 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
178 * This will continue until some other action occurs (e.g. the user
179 * writing a new value (other than -1 or 1) to every_nth via sysfs).
182 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
183 * priority order. In the subset implemented here lower numbers have higher
184 * priority. The UA numbers should be a sequence starting from 0 with
185 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
186 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
187 #define SDEBUG_UA_BUS_RESET 1
188 #define SDEBUG_UA_MODE_CHANGED 2
189 #define SDEBUG_UA_CAPACITY_CHANGED 3
190 #define SDEBUG_UA_LUNS_CHANGED 4
191 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
192 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
193 #define SDEBUG_NUM_UAS 7
195 /* for check_readiness() */
196 #define UAS_ONLY 1 /* check for UAs only */
197 #define UAS_TUR 0 /* if no UAs then check if media access possible */
199 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
200 * sector on read commands: */
201 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
202 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
204 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
205 * or "peripheral device" addressing (value 0) */
206 #define SAM2_LUN_ADDRESS_METHOD 0
208 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
209 * (for response) at one time. Can be reduced by max_queue option. Command
210 * responses are not queued when delay=0 and ndelay=0. The per-device
211 * DEF_CMD_PER_LUN can be changed via sysfs:
212 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
213 * SCSI_DEBUG_CANQUEUE. */
214 #define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */
215 #define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
216 #define DEF_CMD_PER_LUN 255
218 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
219 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
222 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
223 enum sdeb_opcode_index
{
224 SDEB_I_INVALID_OPCODE
= 0,
226 SDEB_I_REPORT_LUNS
= 2,
227 SDEB_I_REQUEST_SENSE
= 3,
228 SDEB_I_TEST_UNIT_READY
= 4,
229 SDEB_I_MODE_SENSE
= 5, /* 6, 10 */
230 SDEB_I_MODE_SELECT
= 6, /* 6, 10 */
231 SDEB_I_LOG_SENSE
= 7,
232 SDEB_I_READ_CAPACITY
= 8, /* 10; 16 is in SA_IN(16) */
233 SDEB_I_READ
= 9, /* 6, 10, 12, 16 */
234 SDEB_I_WRITE
= 10, /* 6, 10, 12, 16 */
235 SDEB_I_START_STOP
= 11,
236 SDEB_I_SERV_ACT_IN
= 12, /* 12, 16 */
237 SDEB_I_SERV_ACT_OUT
= 13, /* 12, 16 */
238 SDEB_I_MAINT_IN
= 14,
239 SDEB_I_MAINT_OUT
= 15,
240 SDEB_I_VERIFY
= 16, /* 10 only */
241 SDEB_I_VARIABLE_LEN
= 17,
242 SDEB_I_RESERVE
= 18, /* 6, 10 */
243 SDEB_I_RELEASE
= 19, /* 6, 10 */
244 SDEB_I_ALLOW_REMOVAL
= 20, /* PREVENT ALLOW MEDIUM REMOVAL */
245 SDEB_I_REZERO_UNIT
= 21, /* REWIND in SSC */
246 SDEB_I_ATA_PT
= 22, /* 12, 16 */
247 SDEB_I_SEND_DIAG
= 23,
249 SDEB_I_XDWRITEREAD
= 25, /* 10 only */
250 SDEB_I_WRITE_BUFFER
= 26,
251 SDEB_I_WRITE_SAME
= 27, /* 10, 16 */
252 SDEB_I_SYNC_CACHE
= 28, /* 10 only */
253 SDEB_I_COMP_WRITE
= 29,
254 SDEB_I_LAST_ELEMENT
= 30, /* keep this last */
257 static const unsigned char opcode_ind_arr
[256] = {
258 /* 0x0; 0x0->0x1f: 6 byte cdbs */
259 SDEB_I_TEST_UNIT_READY
, SDEB_I_REZERO_UNIT
, 0, SDEB_I_REQUEST_SENSE
,
261 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, 0,
262 0, 0, SDEB_I_INQUIRY
, 0, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
264 0, 0, SDEB_I_MODE_SENSE
, SDEB_I_START_STOP
, 0, SDEB_I_SEND_DIAG
,
265 SDEB_I_ALLOW_REMOVAL
, 0,
266 /* 0x20; 0x20->0x3f: 10 byte cdbs */
267 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY
, 0, 0,
268 SDEB_I_READ
, 0, SDEB_I_WRITE
, 0, 0, 0, 0, SDEB_I_VERIFY
,
269 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE
, 0, 0,
270 0, 0, 0, SDEB_I_WRITE_BUFFER
, 0, 0, 0, 0,
271 /* 0x40; 0x40->0x5f: 10 byte cdbs */
272 0, SDEB_I_WRITE_SAME
, SDEB_I_UNMAP
, 0, 0, 0, 0, 0,
273 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE
, 0, 0,
274 0, 0, 0, SDEB_I_XDWRITEREAD
, 0, SDEB_I_MODE_SELECT
, SDEB_I_RESERVE
,
276 0, 0, SDEB_I_MODE_SENSE
, 0, 0, 0, 0, 0,
277 /* 0x60; 0x60->0x7d are reserved */
278 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
280 0, SDEB_I_VARIABLE_LEN
,
281 /* 0x80; 0x80->0x9f: 16 byte cdbs */
282 0, 0, 0, 0, 0, SDEB_I_ATA_PT
, 0, 0,
283 SDEB_I_READ
, SDEB_I_COMP_WRITE
, SDEB_I_WRITE
, 0, 0, 0, 0, 0,
284 0, 0, 0, SDEB_I_WRITE_SAME
, 0, 0, 0, 0,
285 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN
, SDEB_I_SERV_ACT_OUT
,
286 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
287 SDEB_I_REPORT_LUNS
, SDEB_I_ATA_PT
, 0, SDEB_I_MAINT_IN
,
288 SDEB_I_MAINT_OUT
, 0, 0, 0,
289 SDEB_I_READ
, SDEB_I_SERV_ACT_OUT
, SDEB_I_WRITE
, SDEB_I_SERV_ACT_IN
,
291 0, 0, 0, 0, 0, 0, 0, 0,
292 0, 0, 0, 0, 0, 0, 0, 0,
293 /* 0xc0; 0xc0->0xff: vendor specific */
294 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
295 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
297 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
302 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
304 #define F_RL_WLUN_OK 0x10
305 #define F_SKIP_UA 0x20
306 #define F_DELAY_OVERR 0x40
307 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
308 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
309 #define F_INV_OP 0x200
310 #define F_FAKE_RW 0x400
311 #define F_M_ACCESS 0x800 /* media access */
313 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
314 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
315 #define FF_SA (F_SA_HIGH | F_SA_LOW)
317 struct sdebug_dev_info
;
318 static int resp_inquiry(struct scsi_cmnd
*, struct sdebug_dev_info
*);
319 static int resp_report_luns(struct scsi_cmnd
*, struct sdebug_dev_info
*);
320 static int resp_requests(struct scsi_cmnd
*, struct sdebug_dev_info
*);
321 static int resp_mode_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
322 static int resp_mode_select(struct scsi_cmnd
*, struct sdebug_dev_info
*);
323 static int resp_log_sense(struct scsi_cmnd
*, struct sdebug_dev_info
*);
324 static int resp_readcap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
325 static int resp_read_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
326 static int resp_write_dt0(struct scsi_cmnd
*, struct sdebug_dev_info
*);
327 static int resp_start_stop(struct scsi_cmnd
*, struct sdebug_dev_info
*);
328 static int resp_readcap16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
329 static int resp_get_lba_status(struct scsi_cmnd
*, struct sdebug_dev_info
*);
330 static int resp_report_tgtpgs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
331 static int resp_unmap(struct scsi_cmnd
*, struct sdebug_dev_info
*);
332 static int resp_rsup_opcodes(struct scsi_cmnd
*, struct sdebug_dev_info
*);
333 static int resp_rsup_tmfs(struct scsi_cmnd
*, struct sdebug_dev_info
*);
334 static int resp_write_same_10(struct scsi_cmnd
*, struct sdebug_dev_info
*);
335 static int resp_write_same_16(struct scsi_cmnd
*, struct sdebug_dev_info
*);
336 static int resp_xdwriteread_10(struct scsi_cmnd
*, struct sdebug_dev_info
*);
337 static int resp_comp_write(struct scsi_cmnd
*, struct sdebug_dev_info
*);
338 static int resp_write_buffer(struct scsi_cmnd
*, struct sdebug_dev_info
*);
340 struct opcode_info_t
{
341 u8 num_attached
; /* 0 if this is it (i.e. a leaf); use 0xff
342 * for terminating element */
343 u8 opcode
; /* if num_attached > 0, preferred */
344 u16 sa
; /* service action */
345 u32 flags
; /* OR-ed set of SDEB_F_* */
346 int (*pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
347 const struct opcode_info_t
*arrp
; /* num_attached elements or NULL */
348 u8 len_mask
[16]; /* len=len_mask[0], then mask for cdb[1]... */
349 /* ignore cdb bytes after position 15 */
352 static const struct opcode_info_t msense_iarr
[1] = {
353 {0, 0x1a, 0, F_D_IN
, NULL
, NULL
,
354 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
357 static const struct opcode_info_t mselect_iarr
[1] = {
358 {0, 0x15, 0, F_D_OUT
, NULL
, NULL
,
359 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
362 static const struct opcode_info_t read_iarr
[3] = {
363 {0, 0x28, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, NULL
,/* READ(10) */
364 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
366 {0, 0x8, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, NULL
, /* READ(6) */
367 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
368 {0, 0xa8, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, NULL
,/* READ(12) */
369 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
373 static const struct opcode_info_t write_iarr
[3] = {
374 {0, 0x2a, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, NULL
, /* 10 */
375 {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
377 {0, 0xa, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, NULL
, /* 6 */
378 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
379 {0, 0xaa, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, NULL
, /* 12 */
380 {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
384 static const struct opcode_info_t sa_in_iarr
[1] = {
385 {0, 0x9e, 0x12, F_SA_LOW
| F_D_IN
, resp_get_lba_status
, NULL
,
386 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
387 0xff, 0xff, 0xff, 0, 0xc7} },
390 static const struct opcode_info_t vl_iarr
[1] = { /* VARIABLE LENGTH */
391 {0, 0x7f, 0xb, F_SA_HIGH
| F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
,
392 NULL
, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
393 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
396 static const struct opcode_info_t maint_in_iarr
[2] = {
397 {0, 0xa3, 0xc, F_SA_LOW
| F_D_IN
, resp_rsup_opcodes
, NULL
,
398 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
400 {0, 0xa3, 0xd, F_SA_LOW
| F_D_IN
, resp_rsup_tmfs
, NULL
,
401 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
405 static const struct opcode_info_t write_same_iarr
[1] = {
406 {0, 0x93, 0, F_D_OUT_MAYBE
| FF_DIRECT_IO
, resp_write_same_16
, NULL
,
407 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
408 0xff, 0xff, 0xff, 0x1f, 0xc7} },
411 static const struct opcode_info_t reserve_iarr
[1] = {
412 {0, 0x16, 0, F_D_OUT
, NULL
, NULL
, /* RESERVE(6) */
413 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
416 static const struct opcode_info_t release_iarr
[1] = {
417 {0, 0x17, 0, F_D_OUT
, NULL
, NULL
, /* RELEASE(6) */
418 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
422 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
423 * plus the terminating elements for logic that scans this table such as
424 * REPORT SUPPORTED OPERATION CODES. */
425 static const struct opcode_info_t opcode_info_arr
[SDEB_I_LAST_ELEMENT
+ 1] = {
427 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
,
428 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
429 {0, 0x12, 0, FF_RESPOND
| F_D_IN
, resp_inquiry
, NULL
,
430 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
431 {0, 0xa0, 0, FF_RESPOND
| F_D_IN
, resp_report_luns
, NULL
,
432 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
434 {0, 0x3, 0, FF_RESPOND
| F_D_IN
, resp_requests
, NULL
,
435 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
436 {0, 0x0, 0, F_M_ACCESS
| F_RL_WLUN_OK
, NULL
, NULL
,/* TEST UNIT READY */
437 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
438 {1, 0x5a, 0, F_D_IN
, resp_mode_sense
, msense_iarr
,
439 {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
441 {1, 0x55, 0, F_D_OUT
, resp_mode_select
, mselect_iarr
,
442 {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
443 {0, 0x4d, 0, F_D_IN
, resp_log_sense
, NULL
,
444 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
446 {0, 0x25, 0, F_D_IN
, resp_readcap
, NULL
,
447 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
449 {3, 0x88, 0, F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
, read_iarr
,
450 {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
451 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
453 {3, 0x8a, 0, F_D_OUT
| FF_DIRECT_IO
, resp_write_dt0
, write_iarr
,
454 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
455 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
456 {0, 0x1b, 0, 0, resp_start_stop
, NULL
, /* START STOP UNIT */
457 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
458 {1, 0x9e, 0x10, F_SA_LOW
| F_D_IN
, resp_readcap16
, sa_in_iarr
,
459 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
460 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
461 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* SA OUT */
462 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
463 {2, 0xa3, 0xa, F_SA_LOW
| F_D_IN
, resp_report_tgtpgs
, maint_in_iarr
,
464 {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
466 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* MAINT OUT */
467 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
468 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* VERIFY */
469 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
470 {1, 0x7f, 0x9, F_SA_HIGH
| F_D_IN
| FF_DIRECT_IO
, resp_read_dt0
,
471 vl_iarr
, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
472 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
473 {1, 0x56, 0, F_D_OUT
, NULL
, reserve_iarr
, /* RESERVE(10) */
474 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
476 {1, 0x57, 0, F_D_OUT
, NULL
, release_iarr
, /* RELEASE(10) */
477 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
480 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* ALLOW REMOVAL */
481 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
482 {0, 0x1, 0, 0, resp_start_stop
, NULL
, /* REWIND ?? */
483 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484 {0, 0, 0, F_INV_OP
| FF_RESPOND
, NULL
, NULL
, /* ATA_PT */
485 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486 {0, 0x1d, F_D_OUT
, 0, NULL
, NULL
, /* SEND DIAGNOSTIC */
487 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
488 {0, 0x42, 0, F_D_OUT
| FF_DIRECT_IO
, resp_unmap
, NULL
, /* UNMAP */
489 {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
490 {0, 0x53, 0, F_D_IN
| F_D_OUT
| FF_DIRECT_IO
, resp_xdwriteread_10
,
491 NULL
, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
493 {0, 0x3b, 0, F_D_OUT_MAYBE
, resp_write_buffer
, NULL
,
494 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
495 0, 0, 0, 0} }, /* WRITE_BUFFER */
496 {1, 0x41, 0, F_D_OUT_MAYBE
| FF_DIRECT_IO
, resp_write_same_10
,
497 write_same_iarr
, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
498 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
499 {0, 0x35, 0, F_DELAY_OVERR
| FF_DIRECT_IO
, NULL
, NULL
, /* SYNC_CACHE */
500 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
502 {0, 0x89, 0, F_D_OUT
| FF_DIRECT_IO
, resp_comp_write
, NULL
,
503 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
504 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
507 {0xff, 0, 0, 0, NULL
, NULL
, /* terminating element */
508 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
511 struct sdebug_scmd_extra_t
{
519 static int scsi_debug_add_host
= DEF_NUM_HOST
;
520 static int scsi_debug_ato
= DEF_ATO
;
521 static int scsi_debug_delay
= DEF_DELAY
;
522 static int scsi_debug_dev_size_mb
= DEF_DEV_SIZE_MB
;
523 static int scsi_debug_dif
= DEF_DIF
;
524 static int scsi_debug_dix
= DEF_DIX
;
525 static int scsi_debug_dsense
= DEF_D_SENSE
;
526 static int scsi_debug_every_nth
= DEF_EVERY_NTH
;
527 static int scsi_debug_fake_rw
= DEF_FAKE_RW
;
528 static unsigned int scsi_debug_guard
= DEF_GUARD
;
529 static int scsi_debug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
530 static int scsi_debug_max_luns
= DEF_MAX_LUNS
;
531 static int scsi_debug_max_queue
= SCSI_DEBUG_CANQUEUE
;
532 static atomic_t retired_max_queue
; /* if > 0 then was prior max_queue */
533 static int scsi_debug_ndelay
= DEF_NDELAY
;
534 static int scsi_debug_no_lun_0
= DEF_NO_LUN_0
;
535 static int scsi_debug_no_uld
= 0;
536 static int scsi_debug_num_parts
= DEF_NUM_PARTS
;
537 static int scsi_debug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
538 static int scsi_debug_opt_blks
= DEF_OPT_BLKS
;
539 static int scsi_debug_opts
= DEF_OPTS
;
540 static int scsi_debug_physblk_exp
= DEF_PHYSBLK_EXP
;
541 static int scsi_debug_ptype
= DEF_PTYPE
; /* SCSI peripheral type (0==disk) */
542 static int scsi_debug_scsi_level
= DEF_SCSI_LEVEL
;
543 static int scsi_debug_sector_size
= DEF_SECTOR_SIZE
;
544 static int scsi_debug_virtual_gb
= DEF_VIRTUAL_GB
;
545 static int scsi_debug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
546 static unsigned int scsi_debug_lbpu
= DEF_LBPU
;
547 static unsigned int scsi_debug_lbpws
= DEF_LBPWS
;
548 static unsigned int scsi_debug_lbpws10
= DEF_LBPWS10
;
549 static unsigned int scsi_debug_lbprz
= DEF_LBPRZ
;
550 static unsigned int scsi_debug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
551 static unsigned int scsi_debug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
552 static unsigned int scsi_debug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
553 static unsigned int scsi_debug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
554 static unsigned int scsi_debug_write_same_length
= DEF_WRITESAME_LENGTH
;
555 static bool scsi_debug_removable
= DEF_REMOVABLE
;
556 static bool scsi_debug_clustering
;
557 static bool scsi_debug_host_lock
= DEF_HOST_LOCK
;
558 static bool scsi_debug_strict
= DEF_STRICT
;
559 static bool sdebug_any_injecting_opt
;
561 static atomic_t sdebug_cmnd_count
;
562 static atomic_t sdebug_completions
;
563 static atomic_t sdebug_a_tsf
; /* counter of 'almost' TSFs */
565 #define DEV_READONLY(TGT) (0)
567 static unsigned int sdebug_store_sectors
;
568 static sector_t sdebug_capacity
; /* in sectors */
570 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
571 may still need them */
572 static int sdebug_heads
; /* heads per disk */
573 static int sdebug_cylinders_per
; /* cylinders per surface */
574 static int sdebug_sectors_per
; /* sectors per cylinder */
576 #define SDEBUG_MAX_PARTS 4
578 #define SCSI_DEBUG_MAX_CMD_LEN 32
580 static unsigned int scsi_debug_lbp(void)
582 return ((0 == scsi_debug_fake_rw
) &&
583 (scsi_debug_lbpu
| scsi_debug_lbpws
| scsi_debug_lbpws10
));
586 struct sdebug_dev_info
{
587 struct list_head dev_list
;
588 unsigned int channel
;
591 struct sdebug_host_info
*sdbg_host
;
592 unsigned long uas_bm
[1];
594 char stopped
; /* TODO: should be atomic */
598 struct sdebug_host_info
{
599 struct list_head host_list
;
600 struct Scsi_Host
*shost
;
602 struct list_head dev_info_list
;
605 #define to_sdebug_host(d) \
606 container_of(d, struct sdebug_host_info, dev)
608 static LIST_HEAD(sdebug_host_list
);
609 static DEFINE_SPINLOCK(sdebug_host_list_lock
);
612 struct sdebug_hrtimer
{ /* ... is derived from hrtimer */
613 struct hrtimer hrt
; /* must be first element */
617 struct sdebug_queued_cmd
{
618 /* in_use flagged by a bit in queued_in_use_bm[] */
619 struct timer_list
*cmnd_timerp
;
620 struct tasklet_struct
*tletp
;
621 struct sdebug_hrtimer
*sd_hrtp
;
622 struct scsi_cmnd
* a_cmnd
;
624 static struct sdebug_queued_cmd queued_arr
[SCSI_DEBUG_CANQUEUE
];
625 static unsigned long queued_in_use_bm
[SCSI_DEBUG_CANQUEUE_WORDS
];
628 static unsigned char * fake_storep
; /* ramdisk storage */
629 static struct sd_dif_tuple
*dif_storep
; /* protection info */
630 static void *map_storep
; /* provisioning map */
632 static unsigned long map_size
;
633 static int num_aborts
;
634 static int num_dev_resets
;
635 static int num_target_resets
;
636 static int num_bus_resets
;
637 static int num_host_resets
;
638 static int dix_writes
;
639 static int dix_reads
;
640 static int dif_errors
;
642 static DEFINE_SPINLOCK(queued_arr_lock
);
643 static DEFINE_RWLOCK(atomic_rw
);
645 static char sdebug_proc_name
[] = MY_NAME
;
646 static const char *my_name
= MY_NAME
;
648 static struct bus_type pseudo_lld_bus
;
650 static struct device_driver sdebug_driverfs_driver
= {
651 .name
= sdebug_proc_name
,
652 .bus
= &pseudo_lld_bus
,
655 static const int check_condition_result
=
656 (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
658 static const int illegal_condition_result
=
659 (DRIVER_SENSE
<< 24) | (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
661 static const int device_qfull_result
=
662 (DID_OK
<< 16) | (COMMAND_COMPLETE
<< 8) | SAM_STAT_TASK_SET_FULL
;
664 static unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
665 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
667 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
669 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
672 static void *fake_store(unsigned long long lba
)
674 lba
= do_div(lba
, sdebug_store_sectors
);
676 return fake_storep
+ lba
* scsi_debug_sector_size
;
679 static struct sd_dif_tuple
*dif_store(sector_t sector
)
681 sector
= do_div(sector
, sdebug_store_sectors
);
683 return dif_storep
+ sector
;
686 static int sdebug_add_adapter(void);
687 static void sdebug_remove_adapter(void);
689 static void sdebug_max_tgts_luns(void)
691 struct sdebug_host_info
*sdbg_host
;
692 struct Scsi_Host
*hpnt
;
694 spin_lock(&sdebug_host_list_lock
);
695 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
696 hpnt
= sdbg_host
->shost
;
697 if ((hpnt
->this_id
>= 0) &&
698 (scsi_debug_num_tgts
> hpnt
->this_id
))
699 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
701 hpnt
->max_id
= scsi_debug_num_tgts
;
702 /* scsi_debug_max_luns; */
703 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
705 spin_unlock(&sdebug_host_list_lock
);
708 enum sdeb_cmd_data
{SDEB_IN_DATA
= 0, SDEB_IN_CDB
= 1};
710 /* Set in_bit to -1 to indicate no bit position of invalid field */
712 mk_sense_invalid_fld(struct scsi_cmnd
*scp
, enum sdeb_cmd_data c_d
,
713 int in_byte
, int in_bit
)
715 unsigned char *sbuff
;
719 sbuff
= scp
->sense_buffer
;
721 sdev_printk(KERN_ERR
, scp
->device
,
722 "%s: sense_buffer is NULL\n", __func__
);
725 asc
= c_d
? INVALID_FIELD_IN_CDB
: INVALID_FIELD_IN_PARAM_LIST
;
726 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
727 scsi_build_sense_buffer(scsi_debug_dsense
, sbuff
, ILLEGAL_REQUEST
,
729 memset(sks
, 0, sizeof(sks
));
735 sks
[0] |= 0x7 & in_bit
;
737 put_unaligned_be16(in_byte
, sks
+ 1);
738 if (scsi_debug_dsense
) {
743 memcpy(sbuff
+ sl
+ 4, sks
, 3);
745 memcpy(sbuff
+ 15, sks
, 3);
746 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
747 sdev_printk(KERN_INFO
, scp
->device
, "%s: [sense_key,asc,ascq"
748 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
749 my_name
, asc
, c_d
? 'C' : 'D', in_byte
, in_bit
);
752 static void mk_sense_buffer(struct scsi_cmnd
*scp
, int key
, int asc
, int asq
)
754 unsigned char *sbuff
;
756 sbuff
= scp
->sense_buffer
;
758 sdev_printk(KERN_ERR
, scp
->device
,
759 "%s: sense_buffer is NULL\n", __func__
);
762 memset(sbuff
, 0, SCSI_SENSE_BUFFERSIZE
);
764 scsi_build_sense_buffer(scsi_debug_dsense
, sbuff
, key
, asc
, asq
);
766 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
767 sdev_printk(KERN_INFO
, scp
->device
,
768 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
769 my_name
, key
, asc
, asq
);
773 mk_sense_invalid_opcode(struct scsi_cmnd
*scp
)
775 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
778 static int scsi_debug_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
780 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
782 sdev_printk(KERN_INFO
, dev
,
783 "%s: BLKFLSBUF [0x1261]\n", __func__
);
784 else if (0x5331 == cmd
)
785 sdev_printk(KERN_INFO
, dev
,
786 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
789 sdev_printk(KERN_INFO
, dev
, "%s: cmd=0x%x\n",
793 /* return -ENOTTY; // correct return but upsets fdisk */
796 static void clear_luns_changed_on_target(struct sdebug_dev_info
*devip
)
798 struct sdebug_host_info
*sdhp
;
799 struct sdebug_dev_info
*dp
;
801 spin_lock(&sdebug_host_list_lock
);
802 list_for_each_entry(sdhp
, &sdebug_host_list
, host_list
) {
803 list_for_each_entry(dp
, &sdhp
->dev_info_list
, dev_list
) {
804 if ((devip
->sdbg_host
== dp
->sdbg_host
) &&
805 (devip
->target
== dp
->target
))
806 clear_bit(SDEBUG_UA_LUNS_CHANGED
, dp
->uas_bm
);
809 spin_unlock(&sdebug_host_list_lock
);
812 static int check_readiness(struct scsi_cmnd
*SCpnt
, int uas_only
,
813 struct sdebug_dev_info
* devip
)
816 bool debug
= !!(SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
);
818 k
= find_first_bit(devip
->uas_bm
, SDEBUG_NUM_UAS
);
819 if (k
!= SDEBUG_NUM_UAS
) {
820 const char *cp
= NULL
;
824 mk_sense_buffer(SCpnt
, UNIT_ATTENTION
,
825 UA_RESET_ASC
, POWER_ON_RESET_ASCQ
);
827 cp
= "power on reset";
829 case SDEBUG_UA_BUS_RESET
:
830 mk_sense_buffer(SCpnt
, UNIT_ATTENTION
,
831 UA_RESET_ASC
, BUS_RESET_ASCQ
);
835 case SDEBUG_UA_MODE_CHANGED
:
836 mk_sense_buffer(SCpnt
, UNIT_ATTENTION
,
837 UA_CHANGED_ASC
, MODE_CHANGED_ASCQ
);
839 cp
= "mode parameters changed";
841 case SDEBUG_UA_CAPACITY_CHANGED
:
842 mk_sense_buffer(SCpnt
, UNIT_ATTENTION
,
843 UA_CHANGED_ASC
, CAPACITY_CHANGED_ASCQ
);
845 cp
= "capacity data changed";
847 case SDEBUG_UA_MICROCODE_CHANGED
:
848 mk_sense_buffer(SCpnt
, UNIT_ATTENTION
,
849 TARGET_CHANGED_ASC
, MICROCODE_CHANGED_ASCQ
);
851 cp
= "microcode has been changed";
853 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
:
854 mk_sense_buffer(SCpnt
, UNIT_ATTENTION
,
856 MICROCODE_CHANGED_WO_RESET_ASCQ
);
858 cp
= "microcode has been changed without reset";
860 case SDEBUG_UA_LUNS_CHANGED
:
862 * SPC-3 behavior is to report a UNIT ATTENTION with
863 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
864 * on the target, until a REPORT LUNS command is
865 * received. SPC-4 behavior is to report it only once.
866 * NOTE: scsi_debug_scsi_level does not use the same
867 * values as struct scsi_device->scsi_level.
869 if (scsi_debug_scsi_level
>= 6) /* SPC-4 and above */
870 clear_luns_changed_on_target(devip
);
871 mk_sense_buffer(SCpnt
, UNIT_ATTENTION
,
875 cp
= "reported luns data has changed";
878 pr_warn("%s: unexpected unit attention code=%d\n",
884 clear_bit(k
, devip
->uas_bm
);
886 sdev_printk(KERN_INFO
, SCpnt
->device
,
887 "%s reports: Unit attention: %s\n",
889 return check_condition_result
;
891 if ((UAS_TUR
== uas_only
) && devip
->stopped
) {
892 mk_sense_buffer(SCpnt
, NOT_READY
, LOGICAL_UNIT_NOT_READY
,
895 sdev_printk(KERN_INFO
, SCpnt
->device
,
896 "%s reports: Not ready: %s\n", my_name
,
897 "initializing command required");
898 return check_condition_result
;
903 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
904 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
908 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
912 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_FROM_DEVICE
))
913 return (DID_ERROR
<< 16);
915 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
917 sdb
->resid
= scsi_bufflen(scp
) - act_len
;
922 /* Returns number of bytes fetched into 'arr' or -1 if error. */
923 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
926 if (!scsi_bufflen(scp
))
928 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_TO_DEVICE
))
931 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
935 static const char * inq_vendor_id
= "Linux ";
936 static const char * inq_product_id
= "scsi_debug ";
937 static const char *inq_product_rev
= "0184"; /* version less '.' */
939 /* Device identification VPD page. Returns number of bytes placed in arr */
940 static int inquiry_evpd_83(unsigned char * arr
, int port_group_id
,
941 int target_dev_id
, int dev_id_num
,
942 const char * dev_id_str
,
948 port_a
= target_dev_id
+ 1;
949 /* T10 vendor identifier field format (faked) */
950 arr
[0] = 0x2; /* ASCII */
953 memcpy(&arr
[4], inq_vendor_id
, 8);
954 memcpy(&arr
[12], inq_product_id
, 16);
955 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
956 num
= 8 + 16 + dev_id_str_len
;
959 if (dev_id_num
>= 0) {
960 /* NAA-5, Logical unit identifier (binary) */
961 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
962 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
965 arr
[num
++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
969 arr
[num
++] = (dev_id_num
>> 24);
970 arr
[num
++] = (dev_id_num
>> 16) & 0xff;
971 arr
[num
++] = (dev_id_num
>> 8) & 0xff;
972 arr
[num
++] = dev_id_num
& 0xff;
973 /* Target relative port number */
974 arr
[num
++] = 0x61; /* proto=sas, binary */
975 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
976 arr
[num
++] = 0x0; /* reserved */
977 arr
[num
++] = 0x4; /* length */
978 arr
[num
++] = 0x0; /* reserved */
979 arr
[num
++] = 0x0; /* reserved */
981 arr
[num
++] = 0x1; /* relative port A */
983 /* NAA-5, Target port identifier */
984 arr
[num
++] = 0x61; /* proto=sas, binary */
985 arr
[num
++] = 0x93; /* piv=1, target port, naa */
988 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
992 arr
[num
++] = (port_a
>> 24);
993 arr
[num
++] = (port_a
>> 16) & 0xff;
994 arr
[num
++] = (port_a
>> 8) & 0xff;
995 arr
[num
++] = port_a
& 0xff;
996 /* NAA-5, Target port group identifier */
997 arr
[num
++] = 0x61; /* proto=sas, binary */
998 arr
[num
++] = 0x95; /* piv=1, target port group id */
1003 arr
[num
++] = (port_group_id
>> 8) & 0xff;
1004 arr
[num
++] = port_group_id
& 0xff;
1005 /* NAA-5, Target device identifier */
1006 arr
[num
++] = 0x61; /* proto=sas, binary */
1007 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
1010 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
1014 arr
[num
++] = (target_dev_id
>> 24);
1015 arr
[num
++] = (target_dev_id
>> 16) & 0xff;
1016 arr
[num
++] = (target_dev_id
>> 8) & 0xff;
1017 arr
[num
++] = target_dev_id
& 0xff;
1018 /* SCSI name string: Target device identifier */
1019 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
1020 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
1023 memcpy(arr
+ num
, "naa.52222220", 12);
1025 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
1026 memcpy(arr
+ num
, b
, 8);
1028 memset(arr
+ num
, 0, 4);
1034 static unsigned char vpd84_data
[] = {
1035 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1036 0x22,0x22,0x22,0x0,0xbb,0x1,
1037 0x22,0x22,0x22,0x0,0xbb,0x2,
1040 /* Software interface identification VPD page */
1041 static int inquiry_evpd_84(unsigned char * arr
)
1043 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
1044 return sizeof(vpd84_data
);
1047 /* Management network addresses VPD page */
1048 static int inquiry_evpd_85(unsigned char * arr
)
1051 const char * na1
= "https://www.kernel.org/config";
1052 const char * na2
= "http://www.kernel.org/log";
1055 arr
[num
++] = 0x1; /* lu, storage config */
1056 arr
[num
++] = 0x0; /* reserved */
1061 plen
= ((plen
/ 4) + 1) * 4;
1062 arr
[num
++] = plen
; /* length, null termianted, padded */
1063 memcpy(arr
+ num
, na1
, olen
);
1064 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1067 arr
[num
++] = 0x4; /* lu, logging */
1068 arr
[num
++] = 0x0; /* reserved */
1073 plen
= ((plen
/ 4) + 1) * 4;
1074 arr
[num
++] = plen
; /* length, null terminated, padded */
1075 memcpy(arr
+ num
, na2
, olen
);
1076 memset(arr
+ num
+ olen
, 0, plen
- olen
);
1082 /* SCSI ports VPD page */
1083 static int inquiry_evpd_88(unsigned char * arr
, int target_dev_id
)
1088 port_a
= target_dev_id
+ 1;
1089 port_b
= port_a
+ 1;
1090 arr
[num
++] = 0x0; /* reserved */
1091 arr
[num
++] = 0x0; /* reserved */
1093 arr
[num
++] = 0x1; /* relative port 1 (primary) */
1094 memset(arr
+ num
, 0, 6);
1097 arr
[num
++] = 12; /* length tp descriptor */
1098 /* naa-5 target port identifier (A) */
1099 arr
[num
++] = 0x61; /* proto=sas, binary */
1100 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1101 arr
[num
++] = 0x0; /* reserved */
1102 arr
[num
++] = 0x8; /* length */
1103 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
1107 arr
[num
++] = (port_a
>> 24);
1108 arr
[num
++] = (port_a
>> 16) & 0xff;
1109 arr
[num
++] = (port_a
>> 8) & 0xff;
1110 arr
[num
++] = port_a
& 0xff;
1112 arr
[num
++] = 0x0; /* reserved */
1113 arr
[num
++] = 0x0; /* reserved */
1115 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
1116 memset(arr
+ num
, 0, 6);
1119 arr
[num
++] = 12; /* length tp descriptor */
1120 /* naa-5 target port identifier (B) */
1121 arr
[num
++] = 0x61; /* proto=sas, binary */
1122 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
1123 arr
[num
++] = 0x0; /* reserved */
1124 arr
[num
++] = 0x8; /* length */
1125 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
1129 arr
[num
++] = (port_b
>> 24);
1130 arr
[num
++] = (port_b
>> 16) & 0xff;
1131 arr
[num
++] = (port_b
>> 8) & 0xff;
1132 arr
[num
++] = port_b
& 0xff;
1138 static unsigned char vpd89_data
[] = {
1139 /* from 4th byte */ 0,0,0,0,
1140 'l','i','n','u','x',' ',' ',' ',
1141 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1143 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1145 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1146 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1147 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1148 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1150 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1152 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1154 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1155 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1156 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1157 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1158 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1159 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1160 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1161 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1162 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1163 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1164 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1165 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1166 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1167 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1168 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1169 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1170 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1171 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1172 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1173 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1174 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1175 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1176 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1177 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1178 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1179 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1182 /* ATA Information VPD page */
1183 static int inquiry_evpd_89(unsigned char * arr
)
1185 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
1186 return sizeof(vpd89_data
);
1190 static unsigned char vpdb0_data
[] = {
1191 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1192 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1193 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1194 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1197 /* Block limits VPD page (SBC-3) */
1198 static int inquiry_evpd_b0(unsigned char * arr
)
1202 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
1204 /* Optimal transfer length granularity */
1205 gran
= 1 << scsi_debug_physblk_exp
;
1206 arr
[2] = (gran
>> 8) & 0xff;
1207 arr
[3] = gran
& 0xff;
1209 /* Maximum Transfer Length */
1210 if (sdebug_store_sectors
> 0x400) {
1211 arr
[4] = (sdebug_store_sectors
>> 24) & 0xff;
1212 arr
[5] = (sdebug_store_sectors
>> 16) & 0xff;
1213 arr
[6] = (sdebug_store_sectors
>> 8) & 0xff;
1214 arr
[7] = sdebug_store_sectors
& 0xff;
1217 /* Optimal Transfer Length */
1218 put_unaligned_be32(scsi_debug_opt_blks
, &arr
[8]);
1220 if (scsi_debug_lbpu
) {
1221 /* Maximum Unmap LBA Count */
1222 put_unaligned_be32(scsi_debug_unmap_max_blocks
, &arr
[16]);
1224 /* Maximum Unmap Block Descriptor Count */
1225 put_unaligned_be32(scsi_debug_unmap_max_desc
, &arr
[20]);
1228 /* Unmap Granularity Alignment */
1229 if (scsi_debug_unmap_alignment
) {
1230 put_unaligned_be32(scsi_debug_unmap_alignment
, &arr
[28]);
1231 arr
[28] |= 0x80; /* UGAVALID */
1234 /* Optimal Unmap Granularity */
1235 put_unaligned_be32(scsi_debug_unmap_granularity
, &arr
[24]);
1237 /* Maximum WRITE SAME Length */
1238 put_unaligned_be64(scsi_debug_write_same_length
, &arr
[32]);
1240 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1242 return sizeof(vpdb0_data
);
1245 /* Block device characteristics VPD page (SBC-3) */
1246 static int inquiry_evpd_b1(unsigned char *arr
)
1248 memset(arr
, 0, 0x3c);
1250 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
1252 arr
[3] = 5; /* less than 1.8" */
1257 /* Logical block provisioning VPD page (SBC-3) */
1258 static int inquiry_evpd_b2(unsigned char *arr
)
1260 memset(arr
, 0, 0x4);
1261 arr
[0] = 0; /* threshold exponent */
1263 if (scsi_debug_lbpu
)
1266 if (scsi_debug_lbpws
)
1269 if (scsi_debug_lbpws10
)
1272 if (scsi_debug_lbprz
)
1278 #define SDEBUG_LONG_INQ_SZ 96
1279 #define SDEBUG_MAX_INQ_ARR_SZ 584
1281 static int resp_inquiry(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1283 unsigned char pq_pdt
;
1284 unsigned char * arr
;
1285 unsigned char *cmd
= scp
->cmnd
;
1286 int alloc_len
, n
, ret
;
1289 alloc_len
= (cmd
[3] << 8) + cmd
[4];
1290 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
1292 return DID_REQUEUE
<< 16;
1293 have_wlun
= (scp
->device
->lun
== SCSI_W_LUN_REPORT_LUNS
);
1295 pq_pdt
= 0x1e; /* present, wlun */
1296 else if (scsi_debug_no_lun_0
&& (0 == devip
->lun
))
1297 pq_pdt
= 0x7f; /* not present, no device type */
1299 pq_pdt
= (scsi_debug_ptype
& 0x1f);
1301 if (0x2 & cmd
[1]) { /* CMDDT bit set */
1302 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 1);
1304 return check_condition_result
;
1305 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
1306 int lu_id_num
, port_group_id
, target_dev_id
, len
;
1308 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1310 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
1311 (devip
->channel
& 0x7f);
1312 if (0 == scsi_debug_vpd_use_hostno
)
1314 lu_id_num
= have_wlun
? -1 : (((host_no
+ 1) * 2000) +
1315 (devip
->target
* 1000) + devip
->lun
);
1316 target_dev_id
= ((host_no
+ 1) * 2000) +
1317 (devip
->target
* 1000) - 3;
1318 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
1319 if (0 == cmd
[2]) { /* supported vital product data pages */
1320 arr
[1] = cmd
[2]; /*sanity */
1322 arr
[n
++] = 0x0; /* this page */
1323 arr
[n
++] = 0x80; /* unit serial number */
1324 arr
[n
++] = 0x83; /* device identification */
1325 arr
[n
++] = 0x84; /* software interface ident. */
1326 arr
[n
++] = 0x85; /* management network addresses */
1327 arr
[n
++] = 0x86; /* extended inquiry */
1328 arr
[n
++] = 0x87; /* mode page policy */
1329 arr
[n
++] = 0x88; /* SCSI ports */
1330 arr
[n
++] = 0x89; /* ATA information */
1331 arr
[n
++] = 0xb0; /* Block limits (SBC) */
1332 arr
[n
++] = 0xb1; /* Block characteristics (SBC) */
1333 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1335 arr
[3] = n
- 4; /* number of supported VPD pages */
1336 } else if (0x80 == cmd
[2]) { /* unit serial number */
1337 arr
[1] = cmd
[2]; /*sanity */
1339 memcpy(&arr
[4], lu_id_str
, len
);
1340 } else if (0x83 == cmd
[2]) { /* device identification */
1341 arr
[1] = cmd
[2]; /*sanity */
1342 arr
[3] = inquiry_evpd_83(&arr
[4], port_group_id
,
1343 target_dev_id
, lu_id_num
,
1345 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
1346 arr
[1] = cmd
[2]; /*sanity */
1347 arr
[3] = inquiry_evpd_84(&arr
[4]);
1348 } else if (0x85 == cmd
[2]) { /* Management network addresses */
1349 arr
[1] = cmd
[2]; /*sanity */
1350 arr
[3] = inquiry_evpd_85(&arr
[4]);
1351 } else if (0x86 == cmd
[2]) { /* extended inquiry */
1352 arr
[1] = cmd
[2]; /*sanity */
1353 arr
[3] = 0x3c; /* number of following entries */
1354 if (scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
)
1355 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
1356 else if (scsi_debug_dif
)
1357 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1359 arr
[4] = 0x0; /* no protection stuff */
1360 arr
[5] = 0x7; /* head of q, ordered + simple q's */
1361 } else if (0x87 == cmd
[2]) { /* mode page policy */
1362 arr
[1] = cmd
[2]; /*sanity */
1363 arr
[3] = 0x8; /* number of following entries */
1364 arr
[4] = 0x2; /* disconnect-reconnect mp */
1365 arr
[6] = 0x80; /* mlus, shared */
1366 arr
[8] = 0x18; /* protocol specific lu */
1367 arr
[10] = 0x82; /* mlus, per initiator port */
1368 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
1369 arr
[1] = cmd
[2]; /*sanity */
1370 arr
[3] = inquiry_evpd_88(&arr
[4], target_dev_id
);
1371 } else if (0x89 == cmd
[2]) { /* ATA information */
1372 arr
[1] = cmd
[2]; /*sanity */
1373 n
= inquiry_evpd_89(&arr
[4]);
1375 arr
[3] = (n
& 0xff);
1376 } else if (0xb0 == cmd
[2]) { /* Block limits (SBC) */
1377 arr
[1] = cmd
[2]; /*sanity */
1378 arr
[3] = inquiry_evpd_b0(&arr
[4]);
1379 } else if (0xb1 == cmd
[2]) { /* Block characteristics (SBC) */
1380 arr
[1] = cmd
[2]; /*sanity */
1381 arr
[3] = inquiry_evpd_b1(&arr
[4]);
1382 } else if (0xb2 == cmd
[2]) { /* Logical Block Prov. (SBC) */
1383 arr
[1] = cmd
[2]; /*sanity */
1384 arr
[3] = inquiry_evpd_b2(&arr
[4]);
1386 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, -1);
1388 return check_condition_result
;
1390 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
1391 ret
= fill_from_dev_buffer(scp
, arr
,
1392 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
1396 /* drops through here for a standard inquiry */
1397 arr
[1] = scsi_debug_removable
? 0x80 : 0; /* Removable disk */
1398 arr
[2] = scsi_debug_scsi_level
;
1399 arr
[3] = 2; /* response_data_format==2 */
1400 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
1401 arr
[5] = scsi_debug_dif
? 1 : 0; /* PROTECT bit */
1402 if (0 == scsi_debug_vpd_use_hostno
)
1403 arr
[5] = 0x10; /* claim: implicit TGPS */
1404 arr
[6] = 0x10; /* claim: MultiP */
1405 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1406 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
1407 memcpy(&arr
[8], inq_vendor_id
, 8);
1408 memcpy(&arr
[16], inq_product_id
, 16);
1409 memcpy(&arr
[32], inq_product_rev
, 4);
1410 /* version descriptors (2 bytes each) follow */
1411 arr
[58] = 0x0; arr
[59] = 0xa2; /* SAM-5 rev 4 */
1412 arr
[60] = 0x4; arr
[61] = 0x68; /* SPC-4 rev 37 */
1414 if (scsi_debug_ptype
== 0) {
1415 arr
[n
++] = 0x4; arr
[n
++] = 0xc5; /* SBC-4 rev 36 */
1416 } else if (scsi_debug_ptype
== 1) {
1417 arr
[n
++] = 0x5; arr
[n
++] = 0x25; /* SSC-4 rev 3 */
1419 arr
[n
++] = 0x20; arr
[n
++] = 0xe6; /* SPL-3 rev 7 */
1420 ret
= fill_from_dev_buffer(scp
, arr
,
1421 min(alloc_len
, SDEBUG_LONG_INQ_SZ
));
1426 static int resp_requests(struct scsi_cmnd
* scp
,
1427 struct sdebug_dev_info
* devip
)
1429 unsigned char * sbuff
;
1430 unsigned char *cmd
= scp
->cmnd
;
1431 unsigned char arr
[SCSI_SENSE_BUFFERSIZE
];
1435 memset(arr
, 0, sizeof(arr
));
1436 dsense
= !!(cmd
[1] & 1);
1437 sbuff
= scp
->sense_buffer
;
1438 if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
1441 arr
[1] = 0x0; /* NO_SENSE in sense_key */
1442 arr
[2] = THRESHOLD_EXCEEDED
;
1443 arr
[3] = 0xff; /* TEST set and MRIE==6 */
1447 arr
[2] = 0x0; /* NO_SENSE in sense_key */
1448 arr
[7] = 0xa; /* 18 byte sense buffer */
1449 arr
[12] = THRESHOLD_EXCEEDED
;
1450 arr
[13] = 0xff; /* TEST set and MRIE==6 */
1453 memcpy(arr
, sbuff
, SCSI_SENSE_BUFFERSIZE
);
1454 if (arr
[0] >= 0x70 && dsense
== scsi_debug_dsense
)
1455 ; /* have sense and formats match */
1456 else if (arr
[0] <= 0x70) {
1466 } else if (dsense
) {
1469 arr
[1] = sbuff
[2]; /* sense key */
1470 arr
[2] = sbuff
[12]; /* asc */
1471 arr
[3] = sbuff
[13]; /* ascq */
1483 mk_sense_buffer(scp
, 0, NO_ADDITIONAL_SENSE
, 0);
1484 return fill_from_dev_buffer(scp
, arr
, len
);
1487 static int resp_start_stop(struct scsi_cmnd
* scp
,
1488 struct sdebug_dev_info
* devip
)
1490 unsigned char *cmd
= scp
->cmnd
;
1491 int power_cond
, start
;
1493 power_cond
= (cmd
[4] & 0xf0) >> 4;
1495 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, 7);
1496 return check_condition_result
;
1499 if (start
== devip
->stopped
)
1500 devip
->stopped
= !start
;
1504 static sector_t
get_sdebug_capacity(void)
1506 if (scsi_debug_virtual_gb
> 0)
1507 return (sector_t
)scsi_debug_virtual_gb
*
1508 (1073741824 / scsi_debug_sector_size
);
1510 return sdebug_store_sectors
;
1513 #define SDEBUG_READCAP_ARR_SZ 8
1514 static int resp_readcap(struct scsi_cmnd
* scp
,
1515 struct sdebug_dev_info
* devip
)
1517 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
1520 /* following just in case virtual_gb changed */
1521 sdebug_capacity
= get_sdebug_capacity();
1522 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
1523 if (sdebug_capacity
< 0xffffffff) {
1524 capac
= (unsigned int)sdebug_capacity
- 1;
1525 arr
[0] = (capac
>> 24);
1526 arr
[1] = (capac
>> 16) & 0xff;
1527 arr
[2] = (capac
>> 8) & 0xff;
1528 arr
[3] = capac
& 0xff;
1535 arr
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
1536 arr
[7] = scsi_debug_sector_size
& 0xff;
1537 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
1540 #define SDEBUG_READCAP16_ARR_SZ 32
1541 static int resp_readcap16(struct scsi_cmnd
* scp
,
1542 struct sdebug_dev_info
* devip
)
1544 unsigned char *cmd
= scp
->cmnd
;
1545 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
1546 unsigned long long capac
;
1549 alloc_len
= ((cmd
[10] << 24) + (cmd
[11] << 16) + (cmd
[12] << 8)
1551 /* following just in case virtual_gb changed */
1552 sdebug_capacity
= get_sdebug_capacity();
1553 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
1554 capac
= sdebug_capacity
- 1;
1555 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1556 arr
[7 - k
] = capac
& 0xff;
1557 arr
[8] = (scsi_debug_sector_size
>> 24) & 0xff;
1558 arr
[9] = (scsi_debug_sector_size
>> 16) & 0xff;
1559 arr
[10] = (scsi_debug_sector_size
>> 8) & 0xff;
1560 arr
[11] = scsi_debug_sector_size
& 0xff;
1561 arr
[13] = scsi_debug_physblk_exp
& 0xf;
1562 arr
[14] = (scsi_debug_lowest_aligned
>> 8) & 0x3f;
1564 if (scsi_debug_lbp()) {
1565 arr
[14] |= 0x80; /* LBPME */
1566 if (scsi_debug_lbprz
)
1567 arr
[14] |= 0x40; /* LBPRZ */
1570 arr
[15] = scsi_debug_lowest_aligned
& 0xff;
1572 if (scsi_debug_dif
) {
1573 arr
[12] = (scsi_debug_dif
- 1) << 1; /* P_TYPE */
1574 arr
[12] |= 1; /* PROT_EN */
1577 return fill_from_dev_buffer(scp
, arr
,
1578 min(alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
1581 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1583 static int resp_report_tgtpgs(struct scsi_cmnd
* scp
,
1584 struct sdebug_dev_info
* devip
)
1586 unsigned char *cmd
= scp
->cmnd
;
1587 unsigned char * arr
;
1588 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1589 int n
, ret
, alen
, rlen
;
1590 int port_group_a
, port_group_b
, port_a
, port_b
;
1592 alen
= ((cmd
[6] << 24) + (cmd
[7] << 16) + (cmd
[8] << 8)
1595 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
1597 return DID_REQUEUE
<< 16;
1599 * EVPD page 0x88 states we have two ports, one
1600 * real and a fake port with no device connected.
1601 * So we create two port groups with one port each
1602 * and set the group with port B to unavailable.
1604 port_a
= 0x1; /* relative port A */
1605 port_b
= 0x2; /* relative port B */
1606 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
1607 (devip
->channel
& 0x7f);
1608 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
1609 (devip
->channel
& 0x7f) + 0x80;
1612 * The asymmetric access state is cycled according to the host_id.
1615 if (0 == scsi_debug_vpd_use_hostno
) {
1616 arr
[n
++] = host_no
% 3; /* Asymm access state */
1617 arr
[n
++] = 0x0F; /* claim: all states are supported */
1619 arr
[n
++] = 0x0; /* Active/Optimized path */
1620 arr
[n
++] = 0x01; /* claim: only support active/optimized paths */
1622 arr
[n
++] = (port_group_a
>> 8) & 0xff;
1623 arr
[n
++] = port_group_a
& 0xff;
1624 arr
[n
++] = 0; /* Reserved */
1625 arr
[n
++] = 0; /* Status code */
1626 arr
[n
++] = 0; /* Vendor unique */
1627 arr
[n
++] = 0x1; /* One port per group */
1628 arr
[n
++] = 0; /* Reserved */
1629 arr
[n
++] = 0; /* Reserved */
1630 arr
[n
++] = (port_a
>> 8) & 0xff;
1631 arr
[n
++] = port_a
& 0xff;
1632 arr
[n
++] = 3; /* Port unavailable */
1633 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
1634 arr
[n
++] = (port_group_b
>> 8) & 0xff;
1635 arr
[n
++] = port_group_b
& 0xff;
1636 arr
[n
++] = 0; /* Reserved */
1637 arr
[n
++] = 0; /* Status code */
1638 arr
[n
++] = 0; /* Vendor unique */
1639 arr
[n
++] = 0x1; /* One port per group */
1640 arr
[n
++] = 0; /* Reserved */
1641 arr
[n
++] = 0; /* Reserved */
1642 arr
[n
++] = (port_b
>> 8) & 0xff;
1643 arr
[n
++] = port_b
& 0xff;
1646 arr
[0] = (rlen
>> 24) & 0xff;
1647 arr
[1] = (rlen
>> 16) & 0xff;
1648 arr
[2] = (rlen
>> 8) & 0xff;
1649 arr
[3] = rlen
& 0xff;
1652 * Return the smallest value of either
1653 * - The allocated length
1654 * - The constructed command length
1655 * - The maximum array size
1658 ret
= fill_from_dev_buffer(scp
, arr
,
1659 min(rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
1665 resp_rsup_opcodes(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1668 u8 reporting_opts
, req_opcode
, sdeb_i
, supp
;
1670 u32 alloc_len
, a_len
;
1671 int k
, offset
, len
, errsts
, count
, bump
, na
;
1672 const struct opcode_info_t
*oip
;
1673 const struct opcode_info_t
*r_oip
;
1675 u8
*cmd
= scp
->cmnd
;
1677 rctd
= !!(cmd
[2] & 0x80);
1678 reporting_opts
= cmd
[2] & 0x7;
1679 req_opcode
= cmd
[3];
1680 req_sa
= get_unaligned_be16(cmd
+ 4);
1681 alloc_len
= get_unaligned_be32(cmd
+ 6);
1682 if (alloc_len
< 4 || alloc_len
> 0xffff) {
1683 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
1684 return check_condition_result
;
1686 if (alloc_len
> 8192)
1690 arr
= kzalloc((a_len
< 256) ? 320 : a_len
+ 64, GFP_ATOMIC
);
1692 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
1694 return check_condition_result
;
1696 switch (reporting_opts
) {
1697 case 0: /* all commands */
1698 /* count number of commands */
1699 for (count
= 0, oip
= opcode_info_arr
;
1700 oip
->num_attached
!= 0xff; ++oip
) {
1701 if (F_INV_OP
& oip
->flags
)
1703 count
+= (oip
->num_attached
+ 1);
1705 bump
= rctd
? 20 : 8;
1706 put_unaligned_be32(count
* bump
, arr
);
1707 for (offset
= 4, oip
= opcode_info_arr
;
1708 oip
->num_attached
!= 0xff && offset
< a_len
; ++oip
) {
1709 if (F_INV_OP
& oip
->flags
)
1711 na
= oip
->num_attached
;
1712 arr
[offset
] = oip
->opcode
;
1713 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
1715 arr
[offset
+ 5] |= 0x2;
1716 if (FF_SA
& oip
->flags
)
1717 arr
[offset
+ 5] |= 0x1;
1718 put_unaligned_be16(oip
->len_mask
[0], arr
+ offset
+ 6);
1720 put_unaligned_be16(0xa, arr
+ offset
+ 8);
1722 for (k
= 0, oip
= oip
->arrp
; k
< na
; ++k
, ++oip
) {
1723 if (F_INV_OP
& oip
->flags
)
1726 arr
[offset
] = oip
->opcode
;
1727 put_unaligned_be16(oip
->sa
, arr
+ offset
+ 2);
1729 arr
[offset
+ 5] |= 0x2;
1730 if (FF_SA
& oip
->flags
)
1731 arr
[offset
+ 5] |= 0x1;
1732 put_unaligned_be16(oip
->len_mask
[0],
1735 put_unaligned_be16(0xa,
1742 case 1: /* one command: opcode only */
1743 case 2: /* one command: opcode plus service action */
1744 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1745 sdeb_i
= opcode_ind_arr
[req_opcode
];
1746 oip
= &opcode_info_arr
[sdeb_i
];
1747 if (F_INV_OP
& oip
->flags
) {
1751 if (1 == reporting_opts
) {
1752 if (FF_SA
& oip
->flags
) {
1753 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
,
1756 return check_condition_result
;
1759 } else if (2 == reporting_opts
&&
1760 0 == (FF_SA
& oip
->flags
)) {
1761 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 4, -1);
1762 kfree(arr
); /* point at requested sa */
1763 return check_condition_result
;
1765 if (0 == (FF_SA
& oip
->flags
) &&
1766 req_opcode
== oip
->opcode
)
1768 else if (0 == (FF_SA
& oip
->flags
)) {
1769 na
= oip
->num_attached
;
1770 for (k
= 0, oip
= oip
->arrp
; k
< na
;
1772 if (req_opcode
== oip
->opcode
)
1775 supp
= (k
>= na
) ? 1 : 3;
1776 } else if (req_sa
!= oip
->sa
) {
1777 na
= oip
->num_attached
;
1778 for (k
= 0, oip
= oip
->arrp
; k
< na
;
1780 if (req_sa
== oip
->sa
)
1783 supp
= (k
>= na
) ? 1 : 3;
1787 u
= oip
->len_mask
[0];
1788 put_unaligned_be16(u
, arr
+ 2);
1789 arr
[4] = oip
->opcode
;
1790 for (k
= 1; k
< u
; ++k
)
1791 arr
[4 + k
] = (k
< 16) ?
1792 oip
->len_mask
[k
] : 0xff;
1797 arr
[1] = (rctd
? 0x80 : 0) | supp
;
1799 put_unaligned_be16(0xa, arr
+ offset
);
1804 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 2);
1806 return check_condition_result
;
1808 offset
= (offset
< a_len
) ? offset
: a_len
;
1809 len
= (offset
< alloc_len
) ? offset
: alloc_len
;
1810 errsts
= fill_from_dev_buffer(scp
, arr
, len
);
1816 resp_rsup_tmfs(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
1821 u8
*cmd
= scp
->cmnd
;
1823 memset(arr
, 0, sizeof(arr
));
1824 repd
= !!(cmd
[2] & 0x80);
1825 alloc_len
= get_unaligned_be32(cmd
+ 6);
1826 if (alloc_len
< 4) {
1827 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 6, -1);
1828 return check_condition_result
;
1830 arr
[0] = 0xc8; /* ATS | ATSS | LURS */
1831 arr
[1] = 0x1; /* ITNRS */
1838 len
= (len
< alloc_len
) ? len
: alloc_len
;
1839 return fill_from_dev_buffer(scp
, arr
, len
);
1842 /* <<Following mode page info copied from ST318451LW>> */
1844 static int resp_err_recov_pg(unsigned char * p
, int pcontrol
, int target
)
1845 { /* Read-Write Error Recovery page for mode_sense */
1846 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1849 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
1851 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
1852 return sizeof(err_recov_pg
);
1855 static int resp_disconnect_pg(unsigned char * p
, int pcontrol
, int target
)
1856 { /* Disconnect-Reconnect page for mode_sense */
1857 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1858 0, 0, 0, 0, 0, 0, 0, 0};
1860 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
1862 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
1863 return sizeof(disconnect_pg
);
1866 static int resp_format_pg(unsigned char * p
, int pcontrol
, int target
)
1867 { /* Format device page for mode_sense */
1868 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1869 0, 0, 0, 0, 0, 0, 0, 0,
1870 0, 0, 0, 0, 0x40, 0, 0, 0};
1872 memcpy(p
, format_pg
, sizeof(format_pg
));
1873 p
[10] = (sdebug_sectors_per
>> 8) & 0xff;
1874 p
[11] = sdebug_sectors_per
& 0xff;
1875 p
[12] = (scsi_debug_sector_size
>> 8) & 0xff;
1876 p
[13] = scsi_debug_sector_size
& 0xff;
1877 if (scsi_debug_removable
)
1878 p
[20] |= 0x20; /* should agree with INQUIRY */
1880 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
1881 return sizeof(format_pg
);
1884 static int resp_caching_pg(unsigned char * p
, int pcontrol
, int target
)
1885 { /* Caching page for mode_sense */
1886 unsigned char ch_caching_pg
[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1887 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1888 unsigned char d_caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1889 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1891 if (SCSI_DEBUG_OPT_N_WCE
& scsi_debug_opts
)
1892 caching_pg
[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1893 memcpy(p
, caching_pg
, sizeof(caching_pg
));
1895 memcpy(p
+ 2, ch_caching_pg
, sizeof(ch_caching_pg
));
1896 else if (2 == pcontrol
)
1897 memcpy(p
, d_caching_pg
, sizeof(d_caching_pg
));
1898 return sizeof(caching_pg
);
1901 static int resp_ctrl_m_pg(unsigned char * p
, int pcontrol
, int target
)
1902 { /* Control mode page for mode_sense */
1903 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1905 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1908 if (scsi_debug_dsense
)
1909 ctrl_m_pg
[2] |= 0x4;
1911 ctrl_m_pg
[2] &= ~0x4;
1914 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
1916 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
1918 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
1919 else if (2 == pcontrol
)
1920 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
1921 return sizeof(ctrl_m_pg
);
1925 static int resp_iec_m_pg(unsigned char * p
, int pcontrol
, int target
)
1926 { /* Informational Exceptions control mode page for mode_sense */
1927 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1929 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1932 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
1934 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
1935 else if (2 == pcontrol
)
1936 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
1937 return sizeof(iec_m_pg
);
1940 static int resp_sas_sf_m_pg(unsigned char * p
, int pcontrol
, int target
)
1941 { /* SAS SSP mode page - short format for mode_sense */
1942 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
1943 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1945 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
1947 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
1948 return sizeof(sas_sf_m_pg
);
1952 static int resp_sas_pcd_m_spg(unsigned char * p
, int pcontrol
, int target
,
1954 { /* SAS phy control and discover mode page for mode_sense */
1955 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1956 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1957 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1958 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1959 0x2, 0, 0, 0, 0, 0, 0, 0,
1960 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1961 0, 0, 0, 0, 0, 0, 0, 0,
1962 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1963 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1964 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1965 0x3, 0, 0, 0, 0, 0, 0, 0,
1966 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1967 0, 0, 0, 0, 0, 0, 0, 0,
1971 port_a
= target_dev_id
+ 1;
1972 port_b
= port_a
+ 1;
1973 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
1974 p
[20] = (port_a
>> 24);
1975 p
[21] = (port_a
>> 16) & 0xff;
1976 p
[22] = (port_a
>> 8) & 0xff;
1977 p
[23] = port_a
& 0xff;
1978 p
[48 + 20] = (port_b
>> 24);
1979 p
[48 + 21] = (port_b
>> 16) & 0xff;
1980 p
[48 + 22] = (port_b
>> 8) & 0xff;
1981 p
[48 + 23] = port_b
& 0xff;
1983 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
1984 return sizeof(sas_pcd_m_pg
);
1987 static int resp_sas_sha_m_spg(unsigned char * p
, int pcontrol
)
1988 { /* SAS SSP shared protocol specific port mode subpage */
1989 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1990 0, 0, 0, 0, 0, 0, 0, 0,
1993 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
1995 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
1996 return sizeof(sas_sha_m_pg
);
1999 #define SDEBUG_MAX_MSENSE_SZ 256
2002 resp_mode_sense(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2004 unsigned char dbd
, llbaa
;
2005 int pcontrol
, pcode
, subpcode
, bd_len
;
2006 unsigned char dev_spec
;
2007 int k
, alloc_len
, msense_6
, offset
, len
, target_dev_id
;
2008 int target
= scp
->device
->id
;
2010 unsigned char arr
[SDEBUG_MAX_MSENSE_SZ
];
2011 unsigned char *cmd
= scp
->cmnd
;
2013 dbd
= !!(cmd
[1] & 0x8);
2014 pcontrol
= (cmd
[2] & 0xc0) >> 6;
2015 pcode
= cmd
[2] & 0x3f;
2017 msense_6
= (MODE_SENSE
== cmd
[0]);
2018 llbaa
= msense_6
? 0 : !!(cmd
[1] & 0x10);
2019 if ((0 == scsi_debug_ptype
) && (0 == dbd
))
2020 bd_len
= llbaa
? 16 : 8;
2023 alloc_len
= msense_6
? cmd
[4] : ((cmd
[7] << 8) | cmd
[8]);
2024 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
2025 if (0x3 == pcontrol
) { /* Saving values not supported */
2026 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
, 0);
2027 return check_condition_result
;
2029 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
2030 (devip
->target
* 1000) - 3;
2031 /* set DPOFUA bit for disks */
2032 if (0 == scsi_debug_ptype
)
2033 dev_spec
= (DEV_READONLY(target
) ? 0x80 : 0x0) | 0x10;
2043 arr
[4] = 0x1; /* set LONGLBA bit */
2044 arr
[7] = bd_len
; /* assume 255 or less */
2048 if ((bd_len
> 0) && (!sdebug_capacity
))
2049 sdebug_capacity
= get_sdebug_capacity();
2052 if (sdebug_capacity
> 0xfffffffe) {
2058 ap
[0] = (sdebug_capacity
>> 24) & 0xff;
2059 ap
[1] = (sdebug_capacity
>> 16) & 0xff;
2060 ap
[2] = (sdebug_capacity
>> 8) & 0xff;
2061 ap
[3] = sdebug_capacity
& 0xff;
2063 ap
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
2064 ap
[7] = scsi_debug_sector_size
& 0xff;
2067 } else if (16 == bd_len
) {
2068 unsigned long long capac
= sdebug_capacity
;
2070 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
2071 ap
[7 - k
] = capac
& 0xff;
2072 ap
[12] = (scsi_debug_sector_size
>> 24) & 0xff;
2073 ap
[13] = (scsi_debug_sector_size
>> 16) & 0xff;
2074 ap
[14] = (scsi_debug_sector_size
>> 8) & 0xff;
2075 ap
[15] = scsi_debug_sector_size
& 0xff;
2080 if ((subpcode
> 0x0) && (subpcode
< 0xff) && (0x19 != pcode
)) {
2081 /* TODO: Control Extension page */
2082 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2083 return check_condition_result
;
2086 case 0x1: /* Read-Write error recovery page, direct access */
2087 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2090 case 0x2: /* Disconnect-Reconnect page, all devices */
2091 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
2094 case 0x3: /* Format device page, direct access */
2095 len
= resp_format_pg(ap
, pcontrol
, target
);
2098 case 0x8: /* Caching page, direct access */
2099 len
= resp_caching_pg(ap
, pcontrol
, target
);
2102 case 0xa: /* Control Mode page, all devices */
2103 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
2106 case 0x19: /* if spc==1 then sas phy, control+discover */
2107 if ((subpcode
> 0x2) && (subpcode
< 0xff)) {
2108 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2109 return check_condition_result
;
2112 if ((0x0 == subpcode
) || (0xff == subpcode
))
2113 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2114 if ((0x1 == subpcode
) || (0xff == subpcode
))
2115 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
2117 if ((0x2 == subpcode
) || (0xff == subpcode
))
2118 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2121 case 0x1c: /* Informational Exceptions Mode page, all devices */
2122 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
2125 case 0x3f: /* Read all Mode pages */
2126 if ((0 == subpcode
) || (0xff == subpcode
)) {
2127 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
2128 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
2129 len
+= resp_format_pg(ap
+ len
, pcontrol
, target
);
2130 len
+= resp_caching_pg(ap
+ len
, pcontrol
, target
);
2131 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
2132 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
2133 if (0xff == subpcode
) {
2134 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
,
2135 target
, target_dev_id
);
2136 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
2138 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
2140 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2141 return check_condition_result
;
2146 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2147 return check_condition_result
;
2150 arr
[0] = offset
- 1;
2152 arr
[0] = ((offset
- 2) >> 8) & 0xff;
2153 arr
[1] = (offset
- 2) & 0xff;
2155 return fill_from_dev_buffer(scp
, arr
, min(alloc_len
, offset
));
2158 #define SDEBUG_MAX_MSELECT_SZ 512
2161 resp_mode_select(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2163 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
2164 int param_len
, res
, mpage
;
2165 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
2166 unsigned char *cmd
= scp
->cmnd
;
2167 int mselect6
= (MODE_SELECT
== cmd
[0]);
2169 memset(arr
, 0, sizeof(arr
));
2172 param_len
= mselect6
? cmd
[4] : ((cmd
[7] << 8) + cmd
[8]);
2173 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
2174 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, mselect6
? 4 : 7, -1);
2175 return check_condition_result
;
2177 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
2179 return (DID_ERROR
<< 16);
2180 else if ((res
< param_len
) &&
2181 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2182 sdev_printk(KERN_INFO
, scp
->device
,
2183 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2184 __func__
, param_len
, res
);
2185 md_len
= mselect6
? (arr
[0] + 1) : ((arr
[0] << 8) + arr
[1] + 2);
2186 bd_len
= mselect6
? arr
[3] : ((arr
[6] << 8) + arr
[7]);
2188 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, 0, -1);
2189 return check_condition_result
;
2191 off
= bd_len
+ (mselect6
? 4 : 8);
2192 mpage
= arr
[off
] & 0x3f;
2193 ps
= !!(arr
[off
] & 0x80);
2195 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 7);
2196 return check_condition_result
;
2198 spf
= !!(arr
[off
] & 0x40);
2199 pg_len
= spf
? ((arr
[off
+ 2] << 8) + arr
[off
+ 3] + 4) :
2201 if ((pg_len
+ off
) > param_len
) {
2202 mk_sense_buffer(scp
, ILLEGAL_REQUEST
,
2203 PARAMETER_LIST_LENGTH_ERR
, 0);
2204 return check_condition_result
;
2207 case 0x8: /* Caching Mode page */
2208 if (caching_pg
[1] == arr
[off
+ 1]) {
2209 memcpy(caching_pg
+ 2, arr
+ off
+ 2,
2210 sizeof(caching_pg
) - 2);
2211 goto set_mode_changed_ua
;
2214 case 0xa: /* Control Mode page */
2215 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
2216 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
2217 sizeof(ctrl_m_pg
) - 2);
2218 scsi_debug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
2219 goto set_mode_changed_ua
;
2222 case 0x1c: /* Informational Exceptions Mode page */
2223 if (iec_m_pg
[1] == arr
[off
+ 1]) {
2224 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
2225 sizeof(iec_m_pg
) - 2);
2226 goto set_mode_changed_ua
;
2232 mk_sense_invalid_fld(scp
, SDEB_IN_DATA
, off
, 5);
2233 return check_condition_result
;
2234 set_mode_changed_ua
:
2235 set_bit(SDEBUG_UA_MODE_CHANGED
, devip
->uas_bm
);
2239 static int resp_temp_l_pg(unsigned char * arr
)
2241 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2242 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2245 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
2246 return sizeof(temp_l_pg
);
2249 static int resp_ie_l_pg(unsigned char * arr
)
2251 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2254 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
2255 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
2256 arr
[4] = THRESHOLD_EXCEEDED
;
2259 return sizeof(ie_l_pg
);
2262 #define SDEBUG_MAX_LSENSE_SZ 512
2264 static int resp_log_sense(struct scsi_cmnd
* scp
,
2265 struct sdebug_dev_info
* devip
)
2267 int ppc
, sp
, pcontrol
, pcode
, subpcode
, alloc_len
, len
, n
;
2268 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
2269 unsigned char *cmd
= scp
->cmnd
;
2271 memset(arr
, 0, sizeof(arr
));
2275 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, ppc
? 1 : 0);
2276 return check_condition_result
;
2278 pcontrol
= (cmd
[2] & 0xc0) >> 6;
2279 pcode
= cmd
[2] & 0x3f;
2280 subpcode
= cmd
[3] & 0xff;
2281 alloc_len
= (cmd
[7] << 8) + cmd
[8];
2283 if (0 == subpcode
) {
2285 case 0x0: /* Supported log pages log page */
2287 arr
[n
++] = 0x0; /* this page */
2288 arr
[n
++] = 0xd; /* Temperature */
2289 arr
[n
++] = 0x2f; /* Informational exceptions */
2292 case 0xd: /* Temperature log page */
2293 arr
[3] = resp_temp_l_pg(arr
+ 4);
2295 case 0x2f: /* Informational exceptions log page */
2296 arr
[3] = resp_ie_l_pg(arr
+ 4);
2299 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2300 return check_condition_result
;
2302 } else if (0xff == subpcode
) {
2306 case 0x0: /* Supported log pages and subpages log page */
2309 arr
[n
++] = 0x0; /* 0,0 page */
2311 arr
[n
++] = 0xff; /* this page */
2313 arr
[n
++] = 0x0; /* Temperature */
2315 arr
[n
++] = 0x0; /* Informational exceptions */
2318 case 0xd: /* Temperature subpages */
2321 arr
[n
++] = 0x0; /* Temperature */
2324 case 0x2f: /* Informational exceptions subpages */
2327 arr
[n
++] = 0x0; /* Informational exceptions */
2331 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 2, 5);
2332 return check_condition_result
;
2335 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 3, -1);
2336 return check_condition_result
;
2338 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
2339 return fill_from_dev_buffer(scp
, arr
,
2340 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
2343 static int check_device_access_params(struct scsi_cmnd
*scp
,
2344 unsigned long long lba
, unsigned int num
)
2346 if (lba
+ num
> sdebug_capacity
) {
2347 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2348 return check_condition_result
;
2350 /* transfer length excessive (tie in to block limits VPD page) */
2351 if (num
> sdebug_store_sectors
) {
2352 /* needs work to find which cdb byte 'num' comes from */
2353 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2354 return check_condition_result
;
2359 /* Returns number of bytes copied or -1 if error. */
2361 do_device_access(struct scsi_cmnd
*scmd
, u64 lba
, u32 num
, bool do_write
)
2364 u64 block
, rest
= 0;
2365 struct scsi_data_buffer
*sdb
;
2366 enum dma_data_direction dir
;
2369 sdb
= scsi_out(scmd
);
2370 dir
= DMA_TO_DEVICE
;
2372 sdb
= scsi_in(scmd
);
2373 dir
= DMA_FROM_DEVICE
;
2378 if (!(scsi_bidi_cmnd(scmd
) || scmd
->sc_data_direction
== dir
))
2381 block
= do_div(lba
, sdebug_store_sectors
);
2382 if (block
+ num
> sdebug_store_sectors
)
2383 rest
= block
+ num
- sdebug_store_sectors
;
2385 ret
= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
2386 fake_storep
+ (block
* scsi_debug_sector_size
),
2387 (num
- rest
) * scsi_debug_sector_size
, 0, do_write
);
2388 if (ret
!= (num
- rest
) * scsi_debug_sector_size
)
2392 ret
+= sg_copy_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
2393 fake_storep
, rest
* scsi_debug_sector_size
,
2394 (num
- rest
) * scsi_debug_sector_size
, do_write
);
2400 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2401 * arr into fake_store(lba,num) and return true. If comparison fails then
2404 comp_write_worker(u64 lba
, u32 num
, const u8
*arr
)
2407 u64 block
, rest
= 0;
2408 u32 store_blks
= sdebug_store_sectors
;
2409 u32 lb_size
= scsi_debug_sector_size
;
2411 block
= do_div(lba
, store_blks
);
2412 if (block
+ num
> store_blks
)
2413 rest
= block
+ num
- store_blks
;
2415 res
= !memcmp(fake_storep
+ (block
* lb_size
), arr
,
2416 (num
- rest
) * lb_size
);
2420 res
= memcmp(fake_storep
, arr
+ ((num
- rest
) * lb_size
),
2424 arr
+= num
* lb_size
;
2425 memcpy(fake_storep
+ (block
* lb_size
), arr
, (num
- rest
) * lb_size
);
2427 memcpy(fake_storep
, arr
+ ((num
- rest
) * lb_size
),
2432 static __be16
dif_compute_csum(const void *buf
, int len
)
2436 if (scsi_debug_guard
)
2437 csum
= (__force __be16
)ip_compute_csum(buf
, len
);
2439 csum
= cpu_to_be16(crc_t10dif(buf
, len
));
2444 static int dif_verify(struct sd_dif_tuple
*sdt
, const void *data
,
2445 sector_t sector
, u32 ei_lba
)
2447 __be16 csum
= dif_compute_csum(data
, scsi_debug_sector_size
);
2449 if (sdt
->guard_tag
!= csum
) {
2450 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2451 (unsigned long)sector
,
2452 be16_to_cpu(sdt
->guard_tag
),
2456 if (scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
&&
2457 be32_to_cpu(sdt
->ref_tag
) != (sector
& 0xffffffff)) {
2458 pr_err("REF check failed on sector %lu\n",
2459 (unsigned long)sector
);
2462 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
2463 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
2464 pr_err("REF check failed on sector %lu\n",
2465 (unsigned long)sector
);
2471 static void dif_copy_prot(struct scsi_cmnd
*SCpnt
, sector_t sector
,
2472 unsigned int sectors
, bool read
)
2476 const void *dif_store_end
= dif_storep
+ sdebug_store_sectors
;
2477 struct sg_mapping_iter miter
;
2479 /* Bytes of protection data to copy into sgl */
2480 resid
= sectors
* sizeof(*dif_storep
);
2482 sg_miter_start(&miter
, scsi_prot_sglist(SCpnt
),
2483 scsi_prot_sg_count(SCpnt
), SG_MITER_ATOMIC
|
2484 (read
? SG_MITER_TO_SG
: SG_MITER_FROM_SG
));
2486 while (sg_miter_next(&miter
) && resid
> 0) {
2487 size_t len
= min(miter
.length
, resid
);
2488 void *start
= dif_store(sector
);
2491 if (dif_store_end
< start
+ len
)
2492 rest
= start
+ len
- dif_store_end
;
2497 memcpy(paddr
, start
, len
- rest
);
2499 memcpy(start
, paddr
, len
- rest
);
2503 memcpy(paddr
+ len
- rest
, dif_storep
, rest
);
2505 memcpy(dif_storep
, paddr
+ len
- rest
, rest
);
2508 sector
+= len
/ sizeof(*dif_storep
);
2511 sg_miter_stop(&miter
);
2514 static int prot_verify_read(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
2515 unsigned int sectors
, u32 ei_lba
)
2518 struct sd_dif_tuple
*sdt
;
2521 for (i
= 0; i
< sectors
; i
++, ei_lba
++) {
2524 sector
= start_sec
+ i
;
2525 sdt
= dif_store(sector
);
2527 if (sdt
->app_tag
== cpu_to_be16(0xffff))
2530 ret
= dif_verify(sdt
, fake_store(sector
), sector
, ei_lba
);
2537 dif_copy_prot(SCpnt
, start_sec
, sectors
, true);
2544 resp_read_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2546 u8
*cmd
= scp
->cmnd
;
2550 unsigned long iflags
;
2557 lba
= get_unaligned_be64(cmd
+ 2);
2558 num
= get_unaligned_be32(cmd
+ 10);
2563 lba
= get_unaligned_be32(cmd
+ 2);
2564 num
= get_unaligned_be16(cmd
+ 7);
2569 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
2570 (u32
)(cmd
[1] & 0x1f) << 16;
2571 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
2576 lba
= get_unaligned_be32(cmd
+ 2);
2577 num
= get_unaligned_be32(cmd
+ 6);
2580 case XDWRITEREAD_10
:
2582 lba
= get_unaligned_be32(cmd
+ 2);
2583 num
= get_unaligned_be16(cmd
+ 7);
2586 default: /* assume READ(32) */
2587 lba
= get_unaligned_be64(cmd
+ 12);
2588 ei_lba
= get_unaligned_be32(cmd
+ 20);
2589 num
= get_unaligned_be32(cmd
+ 28);
2594 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
2596 mk_sense_invalid_opcode(scp
);
2597 return check_condition_result
;
2599 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
2600 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
2601 (cmd
[1] & 0xe0) == 0)
2602 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected RD "
2605 if (sdebug_any_injecting_opt
) {
2606 struct sdebug_scmd_extra_t
*ep
= scsi_cmd_priv(scp
);
2612 /* inline check_device_access_params() */
2613 if (lba
+ num
> sdebug_capacity
) {
2614 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2615 return check_condition_result
;
2617 /* transfer length excessive (tie in to block limits VPD page) */
2618 if (num
> sdebug_store_sectors
) {
2619 /* needs work to find which cdb byte 'num' comes from */
2620 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2621 return check_condition_result
;
2624 if ((SCSI_DEBUG_OPT_MEDIUM_ERR
& scsi_debug_opts
) &&
2625 (lba
<= (OPT_MEDIUM_ERR_ADDR
+ OPT_MEDIUM_ERR_NUM
- 1)) &&
2626 ((lba
+ num
) > OPT_MEDIUM_ERR_ADDR
)) {
2627 /* claim unrecoverable read error */
2628 mk_sense_buffer(scp
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
, 0);
2629 /* set info field and valid bit for fixed descriptor */
2630 if (0x70 == (scp
->sense_buffer
[0] & 0x7f)) {
2631 scp
->sense_buffer
[0] |= 0x80; /* Valid bit */
2632 ret
= (lba
< OPT_MEDIUM_ERR_ADDR
)
2633 ? OPT_MEDIUM_ERR_ADDR
: (int)lba
;
2634 put_unaligned_be32(ret
, scp
->sense_buffer
+ 3);
2636 scsi_set_resid(scp
, scsi_bufflen(scp
));
2637 return check_condition_result
;
2640 read_lock_irqsave(&atomic_rw
, iflags
);
2643 if (scsi_debug_dix
&& scsi_prot_sg_count(scp
)) {
2644 int prot_ret
= prot_verify_read(scp
, lba
, num
, ei_lba
);
2647 read_unlock_irqrestore(&atomic_rw
, iflags
);
2648 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, prot_ret
);
2649 return illegal_condition_result
;
2653 ret
= do_device_access(scp
, lba
, num
, false);
2654 read_unlock_irqrestore(&atomic_rw
, iflags
);
2656 return DID_ERROR
<< 16;
2658 scsi_in(scp
)->resid
= scsi_bufflen(scp
) - ret
;
2660 if (sdebug_any_injecting_opt
) {
2661 struct sdebug_scmd_extra_t
*ep
= scsi_cmd_priv(scp
);
2663 if (ep
->inj_recovered
) {
2664 mk_sense_buffer(scp
, RECOVERED_ERROR
,
2665 THRESHOLD_EXCEEDED
, 0);
2666 return check_condition_result
;
2667 } else if (ep
->inj_transport
) {
2668 mk_sense_buffer(scp
, ABORTED_COMMAND
,
2669 TRANSPORT_PROBLEM
, ACK_NAK_TO
);
2670 return check_condition_result
;
2671 } else if (ep
->inj_dif
) {
2672 /* Logical block guard check failed */
2673 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
2674 return illegal_condition_result
;
2675 } else if (ep
->inj_dix
) {
2676 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
2677 return illegal_condition_result
;
2683 static void dump_sector(unsigned char *buf
, int len
)
2687 pr_err(">>> Sector Dump <<<\n");
2688 for (i
= 0 ; i
< len
; i
+= 16) {
2691 for (j
= 0, n
= 0; j
< 16; j
++) {
2692 unsigned char c
= buf
[i
+j
];
2694 if (c
>= 0x20 && c
< 0x7e)
2695 n
+= scnprintf(b
+ n
, sizeof(b
) - n
,
2698 n
+= scnprintf(b
+ n
, sizeof(b
) - n
,
2701 pr_err("%04d: %s\n", i
, b
);
2705 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
2706 unsigned int sectors
, u32 ei_lba
)
2709 struct sd_dif_tuple
*sdt
;
2711 sector_t sector
= start_sec
;
2714 struct sg_mapping_iter diter
;
2715 struct sg_mapping_iter piter
;
2717 BUG_ON(scsi_sg_count(SCpnt
) == 0);
2718 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
2720 sg_miter_start(&piter
, scsi_prot_sglist(SCpnt
),
2721 scsi_prot_sg_count(SCpnt
),
2722 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
2723 sg_miter_start(&diter
, scsi_sglist(SCpnt
), scsi_sg_count(SCpnt
),
2724 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
2726 /* For each protection page */
2727 while (sg_miter_next(&piter
)) {
2729 if (WARN_ON(!sg_miter_next(&diter
))) {
2734 for (ppage_offset
= 0; ppage_offset
< piter
.length
;
2735 ppage_offset
+= sizeof(struct sd_dif_tuple
)) {
2736 /* If we're at the end of the current
2737 * data page advance to the next one
2739 if (dpage_offset
>= diter
.length
) {
2740 if (WARN_ON(!sg_miter_next(&diter
))) {
2747 sdt
= piter
.addr
+ ppage_offset
;
2748 daddr
= diter
.addr
+ dpage_offset
;
2750 ret
= dif_verify(sdt
, daddr
, sector
, ei_lba
);
2752 dump_sector(daddr
, scsi_debug_sector_size
);
2758 dpage_offset
+= scsi_debug_sector_size
;
2760 diter
.consumed
= dpage_offset
;
2761 sg_miter_stop(&diter
);
2763 sg_miter_stop(&piter
);
2765 dif_copy_prot(SCpnt
, start_sec
, sectors
, false);
2772 sg_miter_stop(&diter
);
2773 sg_miter_stop(&piter
);
2777 static unsigned long lba_to_map_index(sector_t lba
)
2779 if (scsi_debug_unmap_alignment
) {
2780 lba
+= scsi_debug_unmap_granularity
-
2781 scsi_debug_unmap_alignment
;
2783 do_div(lba
, scsi_debug_unmap_granularity
);
2788 static sector_t
map_index_to_lba(unsigned long index
)
2790 sector_t lba
= index
* scsi_debug_unmap_granularity
;
2792 if (scsi_debug_unmap_alignment
) {
2793 lba
-= scsi_debug_unmap_granularity
-
2794 scsi_debug_unmap_alignment
;
2800 static unsigned int map_state(sector_t lba
, unsigned int *num
)
2803 unsigned int mapped
;
2804 unsigned long index
;
2807 index
= lba_to_map_index(lba
);
2808 mapped
= test_bit(index
, map_storep
);
2811 next
= find_next_zero_bit(map_storep
, map_size
, index
);
2813 next
= find_next_bit(map_storep
, map_size
, index
);
2815 end
= min_t(sector_t
, sdebug_store_sectors
, map_index_to_lba(next
));
2821 static void map_region(sector_t lba
, unsigned int len
)
2823 sector_t end
= lba
+ len
;
2826 unsigned long index
= lba_to_map_index(lba
);
2828 if (index
< map_size
)
2829 set_bit(index
, map_storep
);
2831 lba
= map_index_to_lba(index
+ 1);
2835 static void unmap_region(sector_t lba
, unsigned int len
)
2837 sector_t end
= lba
+ len
;
2840 unsigned long index
= lba_to_map_index(lba
);
2842 if (lba
== map_index_to_lba(index
) &&
2843 lba
+ scsi_debug_unmap_granularity
<= end
&&
2845 clear_bit(index
, map_storep
);
2846 if (scsi_debug_lbprz
) {
2847 memset(fake_storep
+
2848 lba
* scsi_debug_sector_size
, 0,
2849 scsi_debug_sector_size
*
2850 scsi_debug_unmap_granularity
);
2853 memset(dif_storep
+ lba
, 0xff,
2854 sizeof(*dif_storep
) *
2855 scsi_debug_unmap_granularity
);
2858 lba
= map_index_to_lba(index
+ 1);
2863 resp_write_dt0(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
2865 u8
*cmd
= scp
->cmnd
;
2869 unsigned long iflags
;
2876 lba
= get_unaligned_be64(cmd
+ 2);
2877 num
= get_unaligned_be32(cmd
+ 10);
2882 lba
= get_unaligned_be32(cmd
+ 2);
2883 num
= get_unaligned_be16(cmd
+ 7);
2888 lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
2889 (u32
)(cmd
[1] & 0x1f) << 16;
2890 num
= (0 == cmd
[4]) ? 256 : cmd
[4];
2895 lba
= get_unaligned_be32(cmd
+ 2);
2896 num
= get_unaligned_be32(cmd
+ 6);
2899 case 0x53: /* XDWRITEREAD(10) */
2901 lba
= get_unaligned_be32(cmd
+ 2);
2902 num
= get_unaligned_be16(cmd
+ 7);
2905 default: /* assume WRITE(32) */
2906 lba
= get_unaligned_be64(cmd
+ 12);
2907 ei_lba
= get_unaligned_be32(cmd
+ 20);
2908 num
= get_unaligned_be32(cmd
+ 28);
2913 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
2915 mk_sense_invalid_opcode(scp
);
2916 return check_condition_result
;
2918 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
2919 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
2920 (cmd
[1] & 0xe0) == 0)
2921 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
2925 /* inline check_device_access_params() */
2926 if (lba
+ num
> sdebug_capacity
) {
2927 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
2928 return check_condition_result
;
2930 /* transfer length excessive (tie in to block limits VPD page) */
2931 if (num
> sdebug_store_sectors
) {
2932 /* needs work to find which cdb byte 'num' comes from */
2933 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
2934 return check_condition_result
;
2937 write_lock_irqsave(&atomic_rw
, iflags
);
2940 if (scsi_debug_dix
&& scsi_prot_sg_count(scp
)) {
2941 int prot_ret
= prot_verify_write(scp
, lba
, num
, ei_lba
);
2944 write_unlock_irqrestore(&atomic_rw
, iflags
);
2945 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, prot_ret
);
2946 return illegal_condition_result
;
2950 ret
= do_device_access(scp
, lba
, num
, true);
2951 if (scsi_debug_lbp())
2952 map_region(lba
, num
);
2953 write_unlock_irqrestore(&atomic_rw
, iflags
);
2955 return (DID_ERROR
<< 16);
2956 else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2957 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2958 sdev_printk(KERN_INFO
, scp
->device
,
2959 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2960 my_name
, num
* scsi_debug_sector_size
, ret
);
2962 if (sdebug_any_injecting_opt
) {
2963 struct sdebug_scmd_extra_t
*ep
= scsi_cmd_priv(scp
);
2965 if (ep
->inj_recovered
) {
2966 mk_sense_buffer(scp
, RECOVERED_ERROR
,
2967 THRESHOLD_EXCEEDED
, 0);
2968 return check_condition_result
;
2969 } else if (ep
->inj_dif
) {
2970 /* Logical block guard check failed */
2971 mk_sense_buffer(scp
, ABORTED_COMMAND
, 0x10, 1);
2972 return illegal_condition_result
;
2973 } else if (ep
->inj_dix
) {
2974 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, 0x10, 1);
2975 return illegal_condition_result
;
2982 resp_write_same(struct scsi_cmnd
*scp
, u64 lba
, u32 num
, u32 ei_lba
,
2983 bool unmap
, bool ndob
)
2985 unsigned long iflags
;
2986 unsigned long long i
;
2989 ret
= check_device_access_params(scp
, lba
, num
);
2993 write_lock_irqsave(&atomic_rw
, iflags
);
2995 if (unmap
&& scsi_debug_lbp()) {
2996 unmap_region(lba
, num
);
3000 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3002 memset(fake_storep
+ (lba
* scsi_debug_sector_size
), 0,
3003 scsi_debug_sector_size
);
3006 ret
= fetch_to_dev_buffer(scp
, fake_storep
+
3007 (lba
* scsi_debug_sector_size
),
3008 scsi_debug_sector_size
);
3011 write_unlock_irqrestore(&atomic_rw
, iflags
);
3012 return (DID_ERROR
<< 16);
3013 } else if ((ret
< (num
* scsi_debug_sector_size
)) &&
3014 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
3015 sdev_printk(KERN_INFO
, scp
->device
,
3016 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3017 my_name
, "write same",
3018 num
* scsi_debug_sector_size
, ret
);
3020 /* Copy first sector to remaining blocks */
3021 for (i
= 1 ; i
< num
; i
++)
3022 memcpy(fake_storep
+ ((lba
+ i
) * scsi_debug_sector_size
),
3023 fake_storep
+ (lba
* scsi_debug_sector_size
),
3024 scsi_debug_sector_size
);
3026 if (scsi_debug_lbp())
3027 map_region(lba
, num
);
3029 write_unlock_irqrestore(&atomic_rw
, iflags
);
3035 resp_write_same_10(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3037 u8
*cmd
= scp
->cmnd
;
3044 if (scsi_debug_lbpws10
== 0) {
3045 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
3046 return check_condition_result
;
3050 lba
= get_unaligned_be32(cmd
+ 2);
3051 num
= get_unaligned_be16(cmd
+ 7);
3052 if (num
> scsi_debug_write_same_length
) {
3053 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
3054 return check_condition_result
;
3056 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, false);
3060 resp_write_same_16(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3062 u8
*cmd
= scp
->cmnd
;
3069 if (cmd
[1] & 0x8) { /* UNMAP */
3070 if (scsi_debug_lbpws
== 0) {
3071 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 3);
3072 return check_condition_result
;
3076 if (cmd
[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3078 lba
= get_unaligned_be64(cmd
+ 2);
3079 num
= get_unaligned_be32(cmd
+ 10);
3080 if (num
> scsi_debug_write_same_length
) {
3081 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 10, -1);
3082 return check_condition_result
;
3084 return resp_write_same(scp
, lba
, num
, ei_lba
, unmap
, ndob
);
3087 /* Note the mode field is in the same position as the (lower) service action
3088 * field. For the Report supported operation codes command, SPC-4 suggests
3089 * each mode of this command should be reported separately; for future. */
3091 resp_write_buffer(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3093 u8
*cmd
= scp
->cmnd
;
3094 struct scsi_device
*sdp
= scp
->device
;
3095 struct sdebug_dev_info
*dp
;
3098 mode
= cmd
[1] & 0x1f;
3100 case 0x4: /* download microcode (MC) and activate (ACT) */
3101 /* set UAs on this device only */
3102 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3103 set_bit(SDEBUG_UA_MICROCODE_CHANGED
, devip
->uas_bm
);
3105 case 0x5: /* download MC, save and ACT */
3106 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
, devip
->uas_bm
);
3108 case 0x6: /* download MC with offsets and ACT */
3109 /* set UAs on most devices (LUs) in this target */
3110 list_for_each_entry(dp
,
3111 &devip
->sdbg_host
->dev_info_list
,
3113 if (dp
->target
== sdp
->id
) {
3114 set_bit(SDEBUG_UA_BUS_RESET
, dp
->uas_bm
);
3116 set_bit(SDEBUG_UA_MICROCODE_CHANGED
,
3120 case 0x7: /* download MC with offsets, save, and ACT */
3121 /* set UA on all devices (LUs) in this target */
3122 list_for_each_entry(dp
,
3123 &devip
->sdbg_host
->dev_info_list
,
3125 if (dp
->target
== sdp
->id
)
3126 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
,
3130 /* do nothing for this command for other mode values */
3137 resp_comp_write(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3139 u8
*cmd
= scp
->cmnd
;
3141 u8
*fake_storep_hold
;
3144 u32 lb_size
= scsi_debug_sector_size
;
3146 unsigned long iflags
;
3150 lba
= get_unaligned_be64(cmd
+ 2);
3151 num
= cmd
[13]; /* 1 to a maximum of 255 logical blocks */
3153 return 0; /* degenerate case, not an error */
3154 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3156 mk_sense_invalid_opcode(scp
);
3157 return check_condition_result
;
3159 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3160 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3161 (cmd
[1] & 0xe0) == 0)
3162 sdev_printk(KERN_ERR
, scp
->device
, "Unprotected WR "
3165 /* inline check_device_access_params() */
3166 if (lba
+ num
> sdebug_capacity
) {
3167 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, LBA_OUT_OF_RANGE
, 0);
3168 return check_condition_result
;
3170 /* transfer length excessive (tie in to block limits VPD page) */
3171 if (num
> sdebug_store_sectors
) {
3172 /* needs work to find which cdb byte 'num' comes from */
3173 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
3174 return check_condition_result
;
3177 arr
= kzalloc(dnum
* lb_size
, GFP_ATOMIC
);
3179 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3181 return check_condition_result
;
3184 write_lock_irqsave(&atomic_rw
, iflags
);
3186 /* trick do_device_access() to fetch both compare and write buffers
3187 * from data-in into arr. Safe (atomic) since write_lock held. */
3188 fake_storep_hold
= fake_storep
;
3190 ret
= do_device_access(scp
, 0, dnum
, true);
3191 fake_storep
= fake_storep_hold
;
3193 retval
= DID_ERROR
<< 16;
3195 } else if ((ret
< (dnum
* lb_size
)) &&
3196 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
3197 sdev_printk(KERN_INFO
, scp
->device
, "%s: compare_write: cdb "
3198 "indicated=%u, IO sent=%d bytes\n", my_name
,
3199 dnum
* lb_size
, ret
);
3200 if (!comp_write_worker(lba
, num
, arr
)) {
3201 mk_sense_buffer(scp
, MISCOMPARE
, MISCOMPARE_VERIFY_ASC
, 0);
3202 retval
= check_condition_result
;
3205 if (scsi_debug_lbp())
3206 map_region(lba
, num
);
3208 write_unlock_irqrestore(&atomic_rw
, iflags
);
3213 struct unmap_block_desc
{
3220 resp_unmap(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3223 struct unmap_block_desc
*desc
;
3224 unsigned int i
, payload_len
, descriptors
;
3226 unsigned long iflags
;
3229 if (!scsi_debug_lbp())
3230 return 0; /* fib and say its done */
3231 payload_len
= get_unaligned_be16(scp
->cmnd
+ 7);
3232 BUG_ON(scsi_bufflen(scp
) != payload_len
);
3234 descriptors
= (payload_len
- 8) / 16;
3235 if (descriptors
> scsi_debug_unmap_max_desc
) {
3236 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 7, -1);
3237 return check_condition_result
;
3240 buf
= kmalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
3242 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3244 return check_condition_result
;
3247 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
3249 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
3250 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
3252 desc
= (void *)&buf
[8];
3254 write_lock_irqsave(&atomic_rw
, iflags
);
3256 for (i
= 0 ; i
< descriptors
; i
++) {
3257 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
3258 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
3260 ret
= check_device_access_params(scp
, lba
, num
);
3264 unmap_region(lba
, num
);
3270 write_unlock_irqrestore(&atomic_rw
, iflags
);
3276 #define SDEBUG_GET_LBA_STATUS_LEN 32
3279 resp_get_lba_status(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3281 u8
*cmd
= scp
->cmnd
;
3283 u32 alloc_len
, mapped
, num
;
3284 u8 arr
[SDEBUG_GET_LBA_STATUS_LEN
];
3287 lba
= get_unaligned_be64(cmd
+ 2);
3288 alloc_len
= get_unaligned_be32(cmd
+ 10);
3293 ret
= check_device_access_params(scp
, lba
, 1);
3297 if (scsi_debug_lbp())
3298 mapped
= map_state(lba
, &num
);
3301 /* following just in case virtual_gb changed */
3302 sdebug_capacity
= get_sdebug_capacity();
3303 if (sdebug_capacity
- lba
<= 0xffffffff)
3304 num
= sdebug_capacity
- lba
;
3309 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
3310 put_unaligned_be32(20, arr
); /* Parameter Data Length */
3311 put_unaligned_be64(lba
, arr
+ 8); /* LBA */
3312 put_unaligned_be32(num
, arr
+ 16); /* Number of blocks */
3313 arr
[20] = !mapped
; /* prov_stat=0: mapped; 1: dealloc */
3315 return fill_from_dev_buffer(scp
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
3318 #define SDEBUG_RLUN_ARR_SZ 256
3320 static int resp_report_luns(struct scsi_cmnd
* scp
,
3321 struct sdebug_dev_info
* devip
)
3323 unsigned int alloc_len
;
3324 int lun_cnt
, i
, upper
, num
, n
, want_wlun
, shortish
;
3326 unsigned char *cmd
= scp
->cmnd
;
3327 int select_report
= (int)cmd
[2];
3328 struct scsi_lun
*one_lun
;
3329 unsigned char arr
[SDEBUG_RLUN_ARR_SZ
];
3330 unsigned char * max_addr
;
3332 clear_luns_changed_on_target(devip
);
3333 alloc_len
= cmd
[9] + (cmd
[8] << 8) + (cmd
[7] << 16) + (cmd
[6] << 24);
3334 shortish
= (alloc_len
< 4);
3335 if (shortish
|| (select_report
> 2)) {
3336 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, shortish
? 6 : 2, -1);
3337 return check_condition_result
;
3339 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
3340 memset(arr
, 0, SDEBUG_RLUN_ARR_SZ
);
3341 lun_cnt
= scsi_debug_max_luns
;
3342 if (1 == select_report
)
3344 else if (scsi_debug_no_lun_0
&& (lun_cnt
> 0))
3346 want_wlun
= (select_report
> 0) ? 1 : 0;
3347 num
= lun_cnt
+ want_wlun
;
3348 arr
[2] = ((sizeof(struct scsi_lun
) * num
) >> 8) & 0xff;
3349 arr
[3] = (sizeof(struct scsi_lun
) * num
) & 0xff;
3350 n
= min((int)((SDEBUG_RLUN_ARR_SZ
- 8) /
3351 sizeof(struct scsi_lun
)), num
);
3356 one_lun
= (struct scsi_lun
*) &arr
[8];
3357 max_addr
= arr
+ SDEBUG_RLUN_ARR_SZ
;
3358 for (i
= 0, lun
= (scsi_debug_no_lun_0
? 1 : 0);
3359 ((i
< lun_cnt
) && ((unsigned char *)(one_lun
+ i
) < max_addr
));
3361 upper
= (lun
>> 8) & 0x3f;
3363 one_lun
[i
].scsi_lun
[0] =
3364 (upper
| (SAM2_LUN_ADDRESS_METHOD
<< 6));
3365 one_lun
[i
].scsi_lun
[1] = lun
& 0xff;
3368 one_lun
[i
].scsi_lun
[0] = (SCSI_W_LUN_REPORT_LUNS
>> 8) & 0xff;
3369 one_lun
[i
].scsi_lun
[1] = SCSI_W_LUN_REPORT_LUNS
& 0xff;
3372 alloc_len
= (unsigned char *)(one_lun
+ i
) - arr
;
3373 return fill_from_dev_buffer(scp
, arr
,
3374 min((int)alloc_len
, SDEBUG_RLUN_ARR_SZ
));
3377 static int resp_xdwriteread(struct scsi_cmnd
*scp
, unsigned long long lba
,
3378 unsigned int num
, struct sdebug_dev_info
*devip
)
3381 unsigned char *kaddr
, *buf
;
3382 unsigned int offset
;
3383 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
3384 struct sg_mapping_iter miter
;
3386 /* better not to use temporary buffer. */
3387 buf
= kmalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
3389 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3391 return check_condition_result
;
3394 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
3397 sg_miter_start(&miter
, sdb
->table
.sgl
, sdb
->table
.nents
,
3398 SG_MITER_ATOMIC
| SG_MITER_TO_SG
);
3400 while (sg_miter_next(&miter
)) {
3402 for (j
= 0; j
< miter
.length
; j
++)
3403 *(kaddr
+ j
) ^= *(buf
+ offset
+ j
);
3405 offset
+= miter
.length
;
3407 sg_miter_stop(&miter
);
3414 resp_xdwriteread_10(struct scsi_cmnd
*scp
, struct sdebug_dev_info
*devip
)
3416 u8
*cmd
= scp
->cmnd
;
3421 if (!scsi_bidi_cmnd(scp
)) {
3422 mk_sense_buffer(scp
, ILLEGAL_REQUEST
, INSUFF_RES_ASC
,
3424 return check_condition_result
;
3426 errsts
= resp_read_dt0(scp
, devip
);
3429 if (!(cmd
[1] & 0x4)) { /* DISABLE_WRITE is not set */
3430 errsts
= resp_write_dt0(scp
, devip
);
3434 lba
= get_unaligned_be32(cmd
+ 2);
3435 num
= get_unaligned_be16(cmd
+ 7);
3436 return resp_xdwriteread(scp
, lba
, num
, devip
);
3439 /* When timer or tasklet goes off this function is called. */
3440 static void sdebug_q_cmd_complete(unsigned long indx
)
3444 unsigned long iflags
;
3445 struct sdebug_queued_cmd
*sqcp
;
3446 struct scsi_cmnd
*scp
;
3447 struct sdebug_dev_info
*devip
;
3449 atomic_inc(&sdebug_completions
);
3451 if ((qa_indx
< 0) || (qa_indx
>= SCSI_DEBUG_CANQUEUE
)) {
3452 pr_err("wild qa_indx=%d\n", qa_indx
);
3455 spin_lock_irqsave(&queued_arr_lock
, iflags
);
3456 sqcp
= &queued_arr
[qa_indx
];
3459 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3460 pr_err("scp is NULL\n");
3463 devip
= (struct sdebug_dev_info
*)scp
->device
->hostdata
;
3465 atomic_dec(&devip
->num_in_q
);
3467 pr_err("devip=NULL\n");
3468 if (atomic_read(&retired_max_queue
) > 0)
3471 sqcp
->a_cmnd
= NULL
;
3472 if (!test_and_clear_bit(qa_indx
, queued_in_use_bm
)) {
3473 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3474 pr_err("Unexpected completion\n");
3478 if (unlikely(retiring
)) { /* user has reduced max_queue */
3481 retval
= atomic_read(&retired_max_queue
);
3482 if (qa_indx
>= retval
) {
3483 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3484 pr_err("index %d too large\n", retval
);
3487 k
= find_last_bit(queued_in_use_bm
, retval
);
3488 if ((k
< scsi_debug_max_queue
) || (k
== retval
))
3489 atomic_set(&retired_max_queue
, 0);
3491 atomic_set(&retired_max_queue
, k
+ 1);
3493 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3494 scp
->scsi_done(scp
); /* callback to mid level */
3497 /* When high resolution timer goes off this function is called. */
3498 static enum hrtimer_restart
3499 sdebug_q_cmd_hrt_complete(struct hrtimer
*timer
)
3503 unsigned long iflags
;
3504 struct sdebug_hrtimer
*sd_hrtp
= (struct sdebug_hrtimer
*)timer
;
3505 struct sdebug_queued_cmd
*sqcp
;
3506 struct scsi_cmnd
*scp
;
3507 struct sdebug_dev_info
*devip
;
3509 atomic_inc(&sdebug_completions
);
3510 qa_indx
= sd_hrtp
->qa_indx
;
3511 if ((qa_indx
< 0) || (qa_indx
>= SCSI_DEBUG_CANQUEUE
)) {
3512 pr_err("wild qa_indx=%d\n", qa_indx
);
3515 spin_lock_irqsave(&queued_arr_lock
, iflags
);
3516 sqcp
= &queued_arr
[qa_indx
];
3519 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3520 pr_err("scp is NULL\n");
3523 devip
= (struct sdebug_dev_info
*)scp
->device
->hostdata
;
3525 atomic_dec(&devip
->num_in_q
);
3527 pr_err("devip=NULL\n");
3528 if (atomic_read(&retired_max_queue
) > 0)
3531 sqcp
->a_cmnd
= NULL
;
3532 if (!test_and_clear_bit(qa_indx
, queued_in_use_bm
)) {
3533 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3534 pr_err("Unexpected completion\n");
3538 if (unlikely(retiring
)) { /* user has reduced max_queue */
3541 retval
= atomic_read(&retired_max_queue
);
3542 if (qa_indx
>= retval
) {
3543 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3544 pr_err("index %d too large\n", retval
);
3547 k
= find_last_bit(queued_in_use_bm
, retval
);
3548 if ((k
< scsi_debug_max_queue
) || (k
== retval
))
3549 atomic_set(&retired_max_queue
, 0);
3551 atomic_set(&retired_max_queue
, k
+ 1);
3553 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3554 scp
->scsi_done(scp
); /* callback to mid level */
3556 return HRTIMER_NORESTART
;
3559 static struct sdebug_dev_info
*
3560 sdebug_device_create(struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
3562 struct sdebug_dev_info
*devip
;
3564 devip
= kzalloc(sizeof(*devip
), flags
);
3566 devip
->sdbg_host
= sdbg_host
;
3567 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
3572 static struct sdebug_dev_info
* devInfoReg(struct scsi_device
* sdev
)
3574 struct sdebug_host_info
* sdbg_host
;
3575 struct sdebug_dev_info
* open_devip
= NULL
;
3576 struct sdebug_dev_info
* devip
=
3577 (struct sdebug_dev_info
*)sdev
->hostdata
;
3581 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(sdev
->host
);
3583 pr_err("Host info NULL\n");
3586 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
3587 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
3588 (devip
->target
== sdev
->id
) &&
3589 (devip
->lun
== sdev
->lun
))
3592 if ((!devip
->used
) && (!open_devip
))
3596 if (!open_devip
) { /* try and make a new one */
3597 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
3599 pr_err("out of memory at line %d\n", __LINE__
);
3604 open_devip
->channel
= sdev
->channel
;
3605 open_devip
->target
= sdev
->id
;
3606 open_devip
->lun
= sdev
->lun
;
3607 open_devip
->sdbg_host
= sdbg_host
;
3608 atomic_set(&open_devip
->num_in_q
, 0);
3609 set_bit(SDEBUG_UA_POR
, open_devip
->uas_bm
);
3610 open_devip
->used
= true;
3614 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
3616 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3617 pr_info("slave_alloc <%u %u %u %llu>\n",
3618 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3619 queue_flag_set_unlocked(QUEUE_FLAG_BIDI
, sdp
->request_queue
);
3623 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
3625 struct sdebug_dev_info
*devip
;
3627 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3628 pr_info("slave_configure <%u %u %u %llu>\n",
3629 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3630 if (sdp
->host
->max_cmd_len
!= SCSI_DEBUG_MAX_CMD_LEN
)
3631 sdp
->host
->max_cmd_len
= SCSI_DEBUG_MAX_CMD_LEN
;
3632 devip
= devInfoReg(sdp
);
3634 return 1; /* no resources, will be marked offline */
3635 sdp
->hostdata
= devip
;
3636 blk_queue_max_segment_size(sdp
->request_queue
, -1U);
3637 if (scsi_debug_no_uld
)
3638 sdp
->no_uld_attach
= 1;
3642 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
3644 struct sdebug_dev_info
*devip
=
3645 (struct sdebug_dev_info
*)sdp
->hostdata
;
3647 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3648 pr_info("slave_destroy <%u %u %u %llu>\n",
3649 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
3651 /* make this slot available for re-use */
3652 devip
->used
= false;
3653 sdp
->hostdata
= NULL
;
3657 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3658 static int stop_queued_cmnd(struct scsi_cmnd
*cmnd
)
3660 unsigned long iflags
;
3661 int k
, qmax
, r_qmax
;
3662 struct sdebug_queued_cmd
*sqcp
;
3663 struct sdebug_dev_info
*devip
;
3665 spin_lock_irqsave(&queued_arr_lock
, iflags
);
3666 qmax
= scsi_debug_max_queue
;
3667 r_qmax
= atomic_read(&retired_max_queue
);
3670 for (k
= 0; k
< qmax
; ++k
) {
3671 if (test_bit(k
, queued_in_use_bm
)) {
3672 sqcp
= &queued_arr
[k
];
3673 if (cmnd
== sqcp
->a_cmnd
) {
3674 devip
= (struct sdebug_dev_info
*)
3675 cmnd
->device
->hostdata
;
3677 atomic_dec(&devip
->num_in_q
);
3678 sqcp
->a_cmnd
= NULL
;
3679 spin_unlock_irqrestore(&queued_arr_lock
,
3681 if (scsi_debug_ndelay
> 0) {
3684 &sqcp
->sd_hrtp
->hrt
);
3685 } else if (scsi_debug_delay
> 0) {
3686 if (sqcp
->cmnd_timerp
)
3689 } else if (scsi_debug_delay
< 0) {
3691 tasklet_kill(sqcp
->tletp
);
3693 clear_bit(k
, queued_in_use_bm
);
3698 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3702 /* Deletes (stops) timers or tasklets of all queued commands */
3703 static void stop_all_queued(void)
3705 unsigned long iflags
;
3707 struct sdebug_queued_cmd
*sqcp
;
3708 struct sdebug_dev_info
*devip
;
3710 spin_lock_irqsave(&queued_arr_lock
, iflags
);
3711 for (k
= 0; k
< SCSI_DEBUG_CANQUEUE
; ++k
) {
3712 if (test_bit(k
, queued_in_use_bm
)) {
3713 sqcp
= &queued_arr
[k
];
3715 devip
= (struct sdebug_dev_info
*)
3716 sqcp
->a_cmnd
->device
->hostdata
;
3718 atomic_dec(&devip
->num_in_q
);
3719 sqcp
->a_cmnd
= NULL
;
3720 spin_unlock_irqrestore(&queued_arr_lock
,
3722 if (scsi_debug_ndelay
> 0) {
3725 &sqcp
->sd_hrtp
->hrt
);
3726 } else if (scsi_debug_delay
> 0) {
3727 if (sqcp
->cmnd_timerp
)
3730 } else if (scsi_debug_delay
< 0) {
3732 tasklet_kill(sqcp
->tletp
);
3734 clear_bit(k
, queued_in_use_bm
);
3735 spin_lock_irqsave(&queued_arr_lock
, iflags
);
3739 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3742 /* Free queued command memory on heap */
3743 static void free_all_queued(void)
3745 unsigned long iflags
;
3747 struct sdebug_queued_cmd
*sqcp
;
3749 spin_lock_irqsave(&queued_arr_lock
, iflags
);
3750 for (k
= 0; k
< SCSI_DEBUG_CANQUEUE
; ++k
) {
3751 sqcp
= &queued_arr
[k
];
3752 kfree(sqcp
->cmnd_timerp
);
3753 sqcp
->cmnd_timerp
= NULL
;
3756 kfree(sqcp
->sd_hrtp
);
3757 sqcp
->sd_hrtp
= NULL
;
3759 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3762 static int scsi_debug_abort(struct scsi_cmnd
*SCpnt
)
3766 if (SCpnt
->device
&&
3767 (SCSI_DEBUG_OPT_ALL_NOISE
& scsi_debug_opts
))
3768 sdev_printk(KERN_INFO
, SCpnt
->device
, "%s\n",
3770 stop_queued_cmnd(SCpnt
);
3775 static int scsi_debug_device_reset(struct scsi_cmnd
* SCpnt
)
3777 struct sdebug_dev_info
* devip
;
3780 if (SCpnt
&& SCpnt
->device
) {
3781 struct scsi_device
*sdp
= SCpnt
->device
;
3783 if (SCSI_DEBUG_OPT_ALL_NOISE
& scsi_debug_opts
)
3784 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
3785 devip
= devInfoReg(sdp
);
3787 set_bit(SDEBUG_UA_POR
, devip
->uas_bm
);
3792 static int scsi_debug_target_reset(struct scsi_cmnd
*SCpnt
)
3794 struct sdebug_host_info
*sdbg_host
;
3795 struct sdebug_dev_info
*devip
;
3796 struct scsi_device
*sdp
;
3797 struct Scsi_Host
*hp
;
3800 ++num_target_resets
;
3803 sdp
= SCpnt
->device
;
3806 if (SCSI_DEBUG_OPT_ALL_NOISE
& scsi_debug_opts
)
3807 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
3811 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
3813 list_for_each_entry(devip
,
3814 &sdbg_host
->dev_info_list
,
3816 if (devip
->target
== sdp
->id
) {
3817 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3821 if (SCSI_DEBUG_OPT_RESET_NOISE
& scsi_debug_opts
)
3822 sdev_printk(KERN_INFO
, sdp
,
3823 "%s: %d device(s) found in target\n", __func__
, k
);
3828 static int scsi_debug_bus_reset(struct scsi_cmnd
* SCpnt
)
3830 struct sdebug_host_info
*sdbg_host
;
3831 struct sdebug_dev_info
*devip
;
3832 struct scsi_device
* sdp
;
3833 struct Scsi_Host
* hp
;
3837 if (!(SCpnt
&& SCpnt
->device
))
3839 sdp
= SCpnt
->device
;
3840 if (SCSI_DEBUG_OPT_ALL_NOISE
& scsi_debug_opts
)
3841 sdev_printk(KERN_INFO
, sdp
, "%s\n", __func__
);
3844 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
3846 list_for_each_entry(devip
,
3847 &sdbg_host
->dev_info_list
,
3849 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3854 if (SCSI_DEBUG_OPT_RESET_NOISE
& scsi_debug_opts
)
3855 sdev_printk(KERN_INFO
, sdp
,
3856 "%s: %d device(s) found in host\n", __func__
, k
);
3861 static int scsi_debug_host_reset(struct scsi_cmnd
* SCpnt
)
3863 struct sdebug_host_info
* sdbg_host
;
3864 struct sdebug_dev_info
*devip
;
3868 if ((SCpnt
->device
) && (SCSI_DEBUG_OPT_ALL_NOISE
& scsi_debug_opts
))
3869 sdev_printk(KERN_INFO
, SCpnt
->device
, "%s\n", __func__
);
3870 spin_lock(&sdebug_host_list_lock
);
3871 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
3872 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
,
3874 set_bit(SDEBUG_UA_BUS_RESET
, devip
->uas_bm
);
3878 spin_unlock(&sdebug_host_list_lock
);
3880 if (SCSI_DEBUG_OPT_RESET_NOISE
& scsi_debug_opts
)
3881 sdev_printk(KERN_INFO
, SCpnt
->device
,
3882 "%s: %d device(s) found\n", __func__
, k
);
3886 static void __init
sdebug_build_parts(unsigned char *ramp
,
3887 unsigned long store_size
)
3889 struct partition
* pp
;
3890 int starts
[SDEBUG_MAX_PARTS
+ 2];
3891 int sectors_per_part
, num_sectors
, k
;
3892 int heads_by_sects
, start_sec
, end_sec
;
3894 /* assume partition table already zeroed */
3895 if ((scsi_debug_num_parts
< 1) || (store_size
< 1048576))
3897 if (scsi_debug_num_parts
> SDEBUG_MAX_PARTS
) {
3898 scsi_debug_num_parts
= SDEBUG_MAX_PARTS
;
3899 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS
);
3901 num_sectors
= (int)sdebug_store_sectors
;
3902 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
3903 / scsi_debug_num_parts
;
3904 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
3905 starts
[0] = sdebug_sectors_per
;
3906 for (k
= 1; k
< scsi_debug_num_parts
; ++k
)
3907 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
3909 starts
[scsi_debug_num_parts
] = num_sectors
;
3910 starts
[scsi_debug_num_parts
+ 1] = 0;
3912 ramp
[510] = 0x55; /* magic partition markings */
3914 pp
= (struct partition
*)(ramp
+ 0x1be);
3915 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
3916 start_sec
= starts
[k
];
3917 end_sec
= starts
[k
+ 1] - 1;
3920 pp
->cyl
= start_sec
/ heads_by_sects
;
3921 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
3922 / sdebug_sectors_per
;
3923 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
3925 pp
->end_cyl
= end_sec
/ heads_by_sects
;
3926 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
3927 / sdebug_sectors_per
;
3928 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
3930 pp
->start_sect
= cpu_to_le32(start_sec
);
3931 pp
->nr_sects
= cpu_to_le32(end_sec
- start_sec
+ 1);
3932 pp
->sys_ind
= 0x83; /* plain Linux partition */
3937 schedule_resp(struct scsi_cmnd
*cmnd
, struct sdebug_dev_info
*devip
,
3938 int scsi_result
, int delta_jiff
)
3940 unsigned long iflags
;
3941 int k
, num_in_q
, qdepth
, inject
;
3942 struct sdebug_queued_cmd
*sqcp
= NULL
;
3943 struct scsi_device
*sdp
;
3945 /* this should never happen */
3947 return SCSI_MLQUEUE_HOST_BUSY
;
3949 if (NULL
== devip
) {
3950 pr_warn("called devip == NULL\n");
3951 /* no particularly good error to report back */
3952 return SCSI_MLQUEUE_HOST_BUSY
;
3957 if ((scsi_result
) && (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
3958 sdev_printk(KERN_INFO
, sdp
, "%s: non-zero result=0x%x\n",
3959 __func__
, scsi_result
);
3960 if (delta_jiff
== 0)
3961 goto respond_in_thread
;
3963 /* schedule the response at a later time if resources permit */
3964 spin_lock_irqsave(&queued_arr_lock
, iflags
);
3965 num_in_q
= atomic_read(&devip
->num_in_q
);
3966 qdepth
= cmnd
->device
->queue_depth
;
3968 if ((qdepth
> 0) && (num_in_q
>= qdepth
)) {
3970 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3971 goto respond_in_thread
;
3973 scsi_result
= device_qfull_result
;
3974 } else if ((scsi_debug_every_nth
!= 0) &&
3975 (SCSI_DEBUG_OPT_RARE_TSF
& scsi_debug_opts
) &&
3976 (scsi_result
== 0)) {
3977 if ((num_in_q
== (qdepth
- 1)) &&
3978 (atomic_inc_return(&sdebug_a_tsf
) >=
3979 abs(scsi_debug_every_nth
))) {
3980 atomic_set(&sdebug_a_tsf
, 0);
3982 scsi_result
= device_qfull_result
;
3986 k
= find_first_zero_bit(queued_in_use_bm
, scsi_debug_max_queue
);
3987 if (k
>= scsi_debug_max_queue
) {
3988 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
3990 goto respond_in_thread
;
3991 else if (SCSI_DEBUG_OPT_ALL_TSF
& scsi_debug_opts
)
3992 scsi_result
= device_qfull_result
;
3993 if (SCSI_DEBUG_OPT_Q_NOISE
& scsi_debug_opts
)
3994 sdev_printk(KERN_INFO
, sdp
,
3995 "%s: max_queue=%d exceeded, %s\n",
3996 __func__
, scsi_debug_max_queue
,
3997 (scsi_result
? "status: TASK SET FULL" :
3998 "report: host busy"));
4000 goto respond_in_thread
;
4002 return SCSI_MLQUEUE_HOST_BUSY
;
4004 __set_bit(k
, queued_in_use_bm
);
4005 atomic_inc(&devip
->num_in_q
);
4006 sqcp
= &queued_arr
[k
];
4007 sqcp
->a_cmnd
= cmnd
;
4008 cmnd
->result
= scsi_result
;
4009 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
4010 if (delta_jiff
> 0) {
4011 if (NULL
== sqcp
->cmnd_timerp
) {
4012 sqcp
->cmnd_timerp
= kmalloc(sizeof(struct timer_list
),
4014 if (NULL
== sqcp
->cmnd_timerp
)
4015 return SCSI_MLQUEUE_HOST_BUSY
;
4016 init_timer(sqcp
->cmnd_timerp
);
4018 sqcp
->cmnd_timerp
->function
= sdebug_q_cmd_complete
;
4019 sqcp
->cmnd_timerp
->data
= k
;
4020 sqcp
->cmnd_timerp
->expires
= get_jiffies_64() + delta_jiff
;
4021 add_timer(sqcp
->cmnd_timerp
);
4022 } else if (scsi_debug_ndelay
> 0) {
4023 ktime_t kt
= ktime_set(0, scsi_debug_ndelay
);
4024 struct sdebug_hrtimer
*sd_hp
= sqcp
->sd_hrtp
;
4026 if (NULL
== sd_hp
) {
4027 sd_hp
= kmalloc(sizeof(*sd_hp
), GFP_ATOMIC
);
4029 return SCSI_MLQUEUE_HOST_BUSY
;
4030 sqcp
->sd_hrtp
= sd_hp
;
4031 hrtimer_init(&sd_hp
->hrt
, CLOCK_MONOTONIC
,
4033 sd_hp
->hrt
.function
= sdebug_q_cmd_hrt_complete
;
4036 hrtimer_start(&sd_hp
->hrt
, kt
, HRTIMER_MODE_REL
);
4037 } else { /* delay < 0 */
4038 if (NULL
== sqcp
->tletp
) {
4039 sqcp
->tletp
= kmalloc(sizeof(*sqcp
->tletp
),
4041 if (NULL
== sqcp
->tletp
)
4042 return SCSI_MLQUEUE_HOST_BUSY
;
4043 tasklet_init(sqcp
->tletp
,
4044 sdebug_q_cmd_complete
, k
);
4046 if (-1 == delta_jiff
)
4047 tasklet_hi_schedule(sqcp
->tletp
);
4049 tasklet_schedule(sqcp
->tletp
);
4051 if ((SCSI_DEBUG_OPT_Q_NOISE
& scsi_debug_opts
) &&
4052 (scsi_result
== device_qfull_result
))
4053 sdev_printk(KERN_INFO
, sdp
,
4054 "%s: num_in_q=%d +1, %s%s\n", __func__
,
4055 num_in_q
, (inject
? "<inject> " : ""),
4056 "status: TASK SET FULL");
4059 respond_in_thread
: /* call back to mid-layer using invocation thread */
4060 cmnd
->result
= scsi_result
;
4061 cmnd
->scsi_done(cmnd
);
4065 /* Note: The following macros create attribute files in the
4066 /sys/module/scsi_debug/parameters directory. Unfortunately this
4067 driver is unaware of a change and cannot trigger auxiliary actions
4068 as it can when the corresponding attribute in the
4069 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4071 module_param_named(add_host
, scsi_debug_add_host
, int, S_IRUGO
| S_IWUSR
);
4072 module_param_named(ato
, scsi_debug_ato
, int, S_IRUGO
);
4073 module_param_named(clustering
, scsi_debug_clustering
, bool, S_IRUGO
| S_IWUSR
);
4074 module_param_named(delay
, scsi_debug_delay
, int, S_IRUGO
| S_IWUSR
);
4075 module_param_named(dev_size_mb
, scsi_debug_dev_size_mb
, int, S_IRUGO
);
4076 module_param_named(dif
, scsi_debug_dif
, int, S_IRUGO
);
4077 module_param_named(dix
, scsi_debug_dix
, int, S_IRUGO
);
4078 module_param_named(dsense
, scsi_debug_dsense
, int, S_IRUGO
| S_IWUSR
);
4079 module_param_named(every_nth
, scsi_debug_every_nth
, int, S_IRUGO
| S_IWUSR
);
4080 module_param_named(fake_rw
, scsi_debug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
4081 module_param_named(guard
, scsi_debug_guard
, uint
, S_IRUGO
);
4082 module_param_named(host_lock
, scsi_debug_host_lock
, bool, S_IRUGO
| S_IWUSR
);
4083 module_param_named(lbpu
, scsi_debug_lbpu
, int, S_IRUGO
);
4084 module_param_named(lbpws
, scsi_debug_lbpws
, int, S_IRUGO
);
4085 module_param_named(lbpws10
, scsi_debug_lbpws10
, int, S_IRUGO
);
4086 module_param_named(lbprz
, scsi_debug_lbprz
, int, S_IRUGO
);
4087 module_param_named(lowest_aligned
, scsi_debug_lowest_aligned
, int, S_IRUGO
);
4088 module_param_named(max_luns
, scsi_debug_max_luns
, int, S_IRUGO
| S_IWUSR
);
4089 module_param_named(max_queue
, scsi_debug_max_queue
, int, S_IRUGO
| S_IWUSR
);
4090 module_param_named(ndelay
, scsi_debug_ndelay
, int, S_IRUGO
| S_IWUSR
);
4091 module_param_named(no_lun_0
, scsi_debug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
4092 module_param_named(no_uld
, scsi_debug_no_uld
, int, S_IRUGO
);
4093 module_param_named(num_parts
, scsi_debug_num_parts
, int, S_IRUGO
);
4094 module_param_named(num_tgts
, scsi_debug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
4095 module_param_named(opt_blks
, scsi_debug_opt_blks
, int, S_IRUGO
);
4096 module_param_named(opts
, scsi_debug_opts
, int, S_IRUGO
| S_IWUSR
);
4097 module_param_named(physblk_exp
, scsi_debug_physblk_exp
, int, S_IRUGO
);
4098 module_param_named(ptype
, scsi_debug_ptype
, int, S_IRUGO
| S_IWUSR
);
4099 module_param_named(removable
, scsi_debug_removable
, bool, S_IRUGO
| S_IWUSR
);
4100 module_param_named(scsi_level
, scsi_debug_scsi_level
, int, S_IRUGO
);
4101 module_param_named(sector_size
, scsi_debug_sector_size
, int, S_IRUGO
);
4102 module_param_named(strict
, scsi_debug_strict
, bool, S_IRUGO
| S_IWUSR
);
4103 module_param_named(unmap_alignment
, scsi_debug_unmap_alignment
, int, S_IRUGO
);
4104 module_param_named(unmap_granularity
, scsi_debug_unmap_granularity
, int, S_IRUGO
);
4105 module_param_named(unmap_max_blocks
, scsi_debug_unmap_max_blocks
, int, S_IRUGO
);
4106 module_param_named(unmap_max_desc
, scsi_debug_unmap_max_desc
, int, S_IRUGO
);
4107 module_param_named(virtual_gb
, scsi_debug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
4108 module_param_named(vpd_use_hostno
, scsi_debug_vpd_use_hostno
, int,
4110 module_param_named(write_same_length
, scsi_debug_write_same_length
, int,
4113 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4114 MODULE_DESCRIPTION("SCSI debug adapter driver");
4115 MODULE_LICENSE("GPL");
4116 MODULE_VERSION(SCSI_DEBUG_VERSION
);
4118 MODULE_PARM_DESC(add_host
, "0..127 hosts allowed(def=1)");
4119 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
4120 MODULE_PARM_DESC(clustering
, "when set enables larger transfers (def=0)");
4121 MODULE_PARM_DESC(delay
, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4122 MODULE_PARM_DESC(dev_size_mb
, "size in MiB of ram shared by devs(def=8)");
4123 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
4124 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
4125 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
4126 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
4127 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
4128 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
4129 MODULE_PARM_DESC(host_lock
, "use host_lock around all commands (def=0)");
4130 MODULE_PARM_DESC(lbpu
, "enable LBP, support UNMAP command (def=0)");
4131 MODULE_PARM_DESC(lbpws
, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4132 MODULE_PARM_DESC(lbpws10
, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4133 MODULE_PARM_DESC(lbprz
, "unmapped blocks return 0 on read (def=1)");
4134 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
4135 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
4136 MODULE_PARM_DESC(max_queue
, "max number of queued commands (1 to max(def))");
4137 MODULE_PARM_DESC(ndelay
, "response delay in nanoseconds (def=0 -> ignore)");
4138 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
4139 MODULE_PARM_DESC(no_uld
, "stop ULD (e.g. sd driver) attaching (def=0))");
4140 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
4141 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
4142 MODULE_PARM_DESC(opt_blks
, "optimal transfer length in block (def=64)");
4143 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4144 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
4145 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
4146 MODULE_PARM_DESC(removable
, "claim to have removable media (def=0)");
4147 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=6[SPC-4])");
4148 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
4149 MODULE_PARM_DESC(strict
, "stricter checks: reserved field in cdb (def=0)");
4150 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
4151 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=1)");
4152 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4153 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=256)");
4154 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4155 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4156 MODULE_PARM_DESC(write_same_length
, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4158 static char sdebug_info
[256];
4160 static const char * scsi_debug_info(struct Scsi_Host
* shp
)
4162 sprintf(sdebug_info
, "scsi_debug, version %s [%s], "
4163 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION
,
4164 scsi_debug_version_date
, scsi_debug_dev_size_mb
,
4169 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4170 static int scsi_debug_write_info(struct Scsi_Host
*host
, char *buffer
, int length
)
4174 int minLen
= length
> 15 ? 15 : length
;
4176 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
4178 memcpy(arr
, buffer
, minLen
);
4180 if (1 != sscanf(arr
, "%d", &opts
))
4182 scsi_debug_opts
= opts
;
4183 if (scsi_debug_every_nth
!= 0)
4184 atomic_set(&sdebug_cmnd_count
, 0);
4188 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4189 * same for each scsi_debug host (if more than one). Some of the counters
4190 * output are not atomics so might be inaccurate in a busy system. */
4191 static int scsi_debug_show_info(struct seq_file
*m
, struct Scsi_Host
*host
)
4196 if (scsi_debug_every_nth
> 0)
4197 snprintf(b
, sizeof(b
), " (curr:%d)",
4198 ((SCSI_DEBUG_OPT_RARE_TSF
& scsi_debug_opts
) ?
4199 atomic_read(&sdebug_a_tsf
) :
4200 atomic_read(&sdebug_cmnd_count
)));
4204 seq_printf(m
, "scsi_debug adapter driver, version %s [%s]\n"
4205 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4207 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4208 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4209 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4210 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4211 "usec_in_jiffy=%lu\n",
4212 SCSI_DEBUG_VERSION
, scsi_debug_version_date
,
4213 scsi_debug_num_tgts
, scsi_debug_dev_size_mb
, scsi_debug_opts
,
4214 scsi_debug_every_nth
, b
, scsi_debug_delay
, scsi_debug_ndelay
,
4215 scsi_debug_max_luns
, atomic_read(&sdebug_completions
),
4216 scsi_debug_sector_size
, sdebug_cylinders_per
, sdebug_heads
,
4217 sdebug_sectors_per
, num_aborts
, num_dev_resets
,
4218 num_target_resets
, num_bus_resets
, num_host_resets
,
4219 dix_reads
, dix_writes
, dif_errors
, TICK_NSEC
/ 1000);
4221 f
= find_first_bit(queued_in_use_bm
, scsi_debug_max_queue
);
4222 if (f
!= scsi_debug_max_queue
) {
4223 l
= find_last_bit(queued_in_use_bm
, scsi_debug_max_queue
);
4224 seq_printf(m
, " %s BUSY: first,last bits set: %d,%d\n",
4225 "queued_in_use_bm", f
, l
);
4230 static ssize_t
delay_show(struct device_driver
*ddp
, char *buf
)
4232 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_delay
);
4234 /* Returns -EBUSY if delay is being changed and commands are queued */
4235 static ssize_t
delay_store(struct device_driver
*ddp
, const char *buf
,
4240 if ((count
> 0) && (1 == sscanf(buf
, "%d", &delay
))) {
4242 if (scsi_debug_delay
!= delay
) {
4243 unsigned long iflags
;
4246 spin_lock_irqsave(&queued_arr_lock
, iflags
);
4247 k
= find_first_bit(queued_in_use_bm
,
4248 scsi_debug_max_queue
);
4249 if (k
!= scsi_debug_max_queue
)
4250 res
= -EBUSY
; /* have queued commands */
4252 scsi_debug_delay
= delay
;
4253 scsi_debug_ndelay
= 0;
4255 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
4261 static DRIVER_ATTR_RW(delay
);
4263 static ssize_t
ndelay_show(struct device_driver
*ddp
, char *buf
)
4265 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ndelay
);
4267 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4268 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
4269 static ssize_t
ndelay_store(struct device_driver
*ddp
, const char *buf
,
4272 unsigned long iflags
;
4275 if ((count
> 0) && (1 == sscanf(buf
, "%d", &ndelay
)) &&
4276 (ndelay
>= 0) && (ndelay
< 1000000000)) {
4278 if (scsi_debug_ndelay
!= ndelay
) {
4279 spin_lock_irqsave(&queued_arr_lock
, iflags
);
4280 k
= find_first_bit(queued_in_use_bm
,
4281 scsi_debug_max_queue
);
4282 if (k
!= scsi_debug_max_queue
)
4283 res
= -EBUSY
; /* have queued commands */
4285 scsi_debug_ndelay
= ndelay
;
4286 scsi_debug_delay
= ndelay
? DELAY_OVERRIDDEN
4289 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
4295 static DRIVER_ATTR_RW(ndelay
);
4297 static ssize_t
opts_show(struct device_driver
*ddp
, char *buf
)
4299 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", scsi_debug_opts
);
4302 static ssize_t
opts_store(struct device_driver
*ddp
, const char *buf
,
4308 if (1 == sscanf(buf
, "%10s", work
)) {
4309 if (0 == strncasecmp(work
,"0x", 2)) {
4310 if (1 == sscanf(&work
[2], "%x", &opts
))
4313 if (1 == sscanf(work
, "%d", &opts
))
4319 scsi_debug_opts
= opts
;
4320 if (SCSI_DEBUG_OPT_RECOVERED_ERR
& opts
)
4321 sdebug_any_injecting_opt
= true;
4322 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR
& opts
)
4323 sdebug_any_injecting_opt
= true;
4324 else if (SCSI_DEBUG_OPT_DIF_ERR
& opts
)
4325 sdebug_any_injecting_opt
= true;
4326 else if (SCSI_DEBUG_OPT_DIX_ERR
& opts
)
4327 sdebug_any_injecting_opt
= true;
4328 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER
& opts
)
4329 sdebug_any_injecting_opt
= true;
4330 atomic_set(&sdebug_cmnd_count
, 0);
4331 atomic_set(&sdebug_a_tsf
, 0);
4334 static DRIVER_ATTR_RW(opts
);
4336 static ssize_t
ptype_show(struct device_driver
*ddp
, char *buf
)
4338 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ptype
);
4340 static ssize_t
ptype_store(struct device_driver
*ddp
, const char *buf
,
4345 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4346 scsi_debug_ptype
= n
;
4351 static DRIVER_ATTR_RW(ptype
);
4353 static ssize_t
dsense_show(struct device_driver
*ddp
, char *buf
)
4355 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dsense
);
4357 static ssize_t
dsense_store(struct device_driver
*ddp
, const char *buf
,
4362 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4363 scsi_debug_dsense
= n
;
4368 static DRIVER_ATTR_RW(dsense
);
4370 static ssize_t
fake_rw_show(struct device_driver
*ddp
, char *buf
)
4372 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_fake_rw
);
4374 static ssize_t
fake_rw_store(struct device_driver
*ddp
, const char *buf
,
4379 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4381 scsi_debug_fake_rw
= (scsi_debug_fake_rw
> 0);
4382 if (scsi_debug_fake_rw
!= n
) {
4383 if ((0 == n
) && (NULL
== fake_storep
)) {
4385 (unsigned long)scsi_debug_dev_size_mb
*
4388 fake_storep
= vmalloc(sz
);
4389 if (NULL
== fake_storep
) {
4390 pr_err("out of memory, 9\n");
4393 memset(fake_storep
, 0, sz
);
4395 scsi_debug_fake_rw
= n
;
4401 static DRIVER_ATTR_RW(fake_rw
);
4403 static ssize_t
no_lun_0_show(struct device_driver
*ddp
, char *buf
)
4405 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_no_lun_0
);
4407 static ssize_t
no_lun_0_store(struct device_driver
*ddp
, const char *buf
,
4412 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4413 scsi_debug_no_lun_0
= n
;
4418 static DRIVER_ATTR_RW(no_lun_0
);
4420 static ssize_t
num_tgts_show(struct device_driver
*ddp
, char *buf
)
4422 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_tgts
);
4424 static ssize_t
num_tgts_store(struct device_driver
*ddp
, const char *buf
,
4429 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4430 scsi_debug_num_tgts
= n
;
4431 sdebug_max_tgts_luns();
4436 static DRIVER_ATTR_RW(num_tgts
);
4438 static ssize_t
dev_size_mb_show(struct device_driver
*ddp
, char *buf
)
4440 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dev_size_mb
);
4442 static DRIVER_ATTR_RO(dev_size_mb
);
4444 static ssize_t
num_parts_show(struct device_driver
*ddp
, char *buf
)
4446 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_parts
);
4448 static DRIVER_ATTR_RO(num_parts
);
4450 static ssize_t
every_nth_show(struct device_driver
*ddp
, char *buf
)
4452 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_every_nth
);
4454 static ssize_t
every_nth_store(struct device_driver
*ddp
, const char *buf
,
4459 if ((count
> 0) && (1 == sscanf(buf
, "%d", &nth
))) {
4460 scsi_debug_every_nth
= nth
;
4461 atomic_set(&sdebug_cmnd_count
, 0);
4466 static DRIVER_ATTR_RW(every_nth
);
4468 static ssize_t
max_luns_show(struct device_driver
*ddp
, char *buf
)
4470 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_max_luns
);
4472 static ssize_t
max_luns_store(struct device_driver
*ddp
, const char *buf
,
4478 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4479 changed
= (scsi_debug_max_luns
!= n
);
4480 scsi_debug_max_luns
= n
;
4481 sdebug_max_tgts_luns();
4482 if (changed
&& (scsi_debug_scsi_level
>= 5)) { /* >= SPC-3 */
4483 struct sdebug_host_info
*sdhp
;
4484 struct sdebug_dev_info
*dp
;
4486 spin_lock(&sdebug_host_list_lock
);
4487 list_for_each_entry(sdhp
, &sdebug_host_list
,
4489 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
4491 set_bit(SDEBUG_UA_LUNS_CHANGED
,
4495 spin_unlock(&sdebug_host_list_lock
);
4501 static DRIVER_ATTR_RW(max_luns
);
4503 static ssize_t
max_queue_show(struct device_driver
*ddp
, char *buf
)
4505 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_max_queue
);
4507 /* N.B. max_queue can be changed while there are queued commands. In flight
4508 * commands beyond the new max_queue will be completed. */
4509 static ssize_t
max_queue_store(struct device_driver
*ddp
, const char *buf
,
4512 unsigned long iflags
;
4515 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
> 0) &&
4516 (n
<= SCSI_DEBUG_CANQUEUE
)) {
4517 spin_lock_irqsave(&queued_arr_lock
, iflags
);
4518 k
= find_last_bit(queued_in_use_bm
, SCSI_DEBUG_CANQUEUE
);
4519 scsi_debug_max_queue
= n
;
4520 if (SCSI_DEBUG_CANQUEUE
== k
)
4521 atomic_set(&retired_max_queue
, 0);
4523 atomic_set(&retired_max_queue
, k
+ 1);
4525 atomic_set(&retired_max_queue
, 0);
4526 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
4531 static DRIVER_ATTR_RW(max_queue
);
4533 static ssize_t
no_uld_show(struct device_driver
*ddp
, char *buf
)
4535 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_no_uld
);
4537 static DRIVER_ATTR_RO(no_uld
);
4539 static ssize_t
scsi_level_show(struct device_driver
*ddp
, char *buf
)
4541 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_scsi_level
);
4543 static DRIVER_ATTR_RO(scsi_level
);
4545 static ssize_t
virtual_gb_show(struct device_driver
*ddp
, char *buf
)
4547 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_virtual_gb
);
4549 static ssize_t
virtual_gb_store(struct device_driver
*ddp
, const char *buf
,
4555 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4556 changed
= (scsi_debug_virtual_gb
!= n
);
4557 scsi_debug_virtual_gb
= n
;
4558 sdebug_capacity
= get_sdebug_capacity();
4560 struct sdebug_host_info
*sdhp
;
4561 struct sdebug_dev_info
*dp
;
4563 spin_lock(&sdebug_host_list_lock
);
4564 list_for_each_entry(sdhp
, &sdebug_host_list
,
4566 list_for_each_entry(dp
, &sdhp
->dev_info_list
,
4568 set_bit(SDEBUG_UA_CAPACITY_CHANGED
,
4572 spin_unlock(&sdebug_host_list_lock
);
4578 static DRIVER_ATTR_RW(virtual_gb
);
4580 static ssize_t
add_host_show(struct device_driver
*ddp
, char *buf
)
4582 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_add_host
);
4585 static ssize_t
add_host_store(struct device_driver
*ddp
, const char *buf
,
4590 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
4592 if (delta_hosts
> 0) {
4594 sdebug_add_adapter();
4595 } while (--delta_hosts
);
4596 } else if (delta_hosts
< 0) {
4598 sdebug_remove_adapter();
4599 } while (++delta_hosts
);
4603 static DRIVER_ATTR_RW(add_host
);
4605 static ssize_t
vpd_use_hostno_show(struct device_driver
*ddp
, char *buf
)
4607 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_vpd_use_hostno
);
4609 static ssize_t
vpd_use_hostno_store(struct device_driver
*ddp
, const char *buf
,
4614 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4615 scsi_debug_vpd_use_hostno
= n
;
4620 static DRIVER_ATTR_RW(vpd_use_hostno
);
4622 static ssize_t
sector_size_show(struct device_driver
*ddp
, char *buf
)
4624 return scnprintf(buf
, PAGE_SIZE
, "%u\n", scsi_debug_sector_size
);
4626 static DRIVER_ATTR_RO(sector_size
);
4628 static ssize_t
dix_show(struct device_driver
*ddp
, char *buf
)
4630 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dix
);
4632 static DRIVER_ATTR_RO(dix
);
4634 static ssize_t
dif_show(struct device_driver
*ddp
, char *buf
)
4636 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dif
);
4638 static DRIVER_ATTR_RO(dif
);
4640 static ssize_t
guard_show(struct device_driver
*ddp
, char *buf
)
4642 return scnprintf(buf
, PAGE_SIZE
, "%u\n", scsi_debug_guard
);
4644 static DRIVER_ATTR_RO(guard
);
4646 static ssize_t
ato_show(struct device_driver
*ddp
, char *buf
)
4648 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ato
);
4650 static DRIVER_ATTR_RO(ato
);
4652 static ssize_t
map_show(struct device_driver
*ddp
, char *buf
)
4656 if (!scsi_debug_lbp())
4657 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
4658 sdebug_store_sectors
);
4660 count
= scnprintf(buf
, PAGE_SIZE
- 1, "%*pbl",
4661 (int)map_size
, map_storep
);
4662 buf
[count
++] = '\n';
4667 static DRIVER_ATTR_RO(map
);
4669 static ssize_t
removable_show(struct device_driver
*ddp
, char *buf
)
4671 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_removable
? 1 : 0);
4673 static ssize_t
removable_store(struct device_driver
*ddp
, const char *buf
,
4678 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4679 scsi_debug_removable
= (n
> 0);
4684 static DRIVER_ATTR_RW(removable
);
4686 static ssize_t
host_lock_show(struct device_driver
*ddp
, char *buf
)
4688 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!scsi_debug_host_lock
);
4690 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4691 static ssize_t
host_lock_store(struct device_driver
*ddp
, const char *buf
,
4696 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4697 bool new_host_lock
= (n
> 0);
4700 if (new_host_lock
!= scsi_debug_host_lock
) {
4701 unsigned long iflags
;
4704 spin_lock_irqsave(&queued_arr_lock
, iflags
);
4705 k
= find_first_bit(queued_in_use_bm
,
4706 scsi_debug_max_queue
);
4707 if (k
!= scsi_debug_max_queue
)
4708 res
= -EBUSY
; /* have queued commands */
4710 scsi_debug_host_lock
= new_host_lock
;
4711 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
4717 static DRIVER_ATTR_RW(host_lock
);
4719 static ssize_t
strict_show(struct device_driver
*ddp
, char *buf
)
4721 return scnprintf(buf
, PAGE_SIZE
, "%d\n", !!scsi_debug_strict
);
4723 static ssize_t
strict_store(struct device_driver
*ddp
, const char *buf
,
4728 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
4729 scsi_debug_strict
= (n
> 0);
4734 static DRIVER_ATTR_RW(strict
);
4737 /* Note: The following array creates attribute files in the
4738 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4739 files (over those found in the /sys/module/scsi_debug/parameters
4740 directory) is that auxiliary actions can be triggered when an attribute
4741 is changed. For example see: sdebug_add_host_store() above.
4744 static struct attribute
*sdebug_drv_attrs
[] = {
4745 &driver_attr_delay
.attr
,
4746 &driver_attr_opts
.attr
,
4747 &driver_attr_ptype
.attr
,
4748 &driver_attr_dsense
.attr
,
4749 &driver_attr_fake_rw
.attr
,
4750 &driver_attr_no_lun_0
.attr
,
4751 &driver_attr_num_tgts
.attr
,
4752 &driver_attr_dev_size_mb
.attr
,
4753 &driver_attr_num_parts
.attr
,
4754 &driver_attr_every_nth
.attr
,
4755 &driver_attr_max_luns
.attr
,
4756 &driver_attr_max_queue
.attr
,
4757 &driver_attr_no_uld
.attr
,
4758 &driver_attr_scsi_level
.attr
,
4759 &driver_attr_virtual_gb
.attr
,
4760 &driver_attr_add_host
.attr
,
4761 &driver_attr_vpd_use_hostno
.attr
,
4762 &driver_attr_sector_size
.attr
,
4763 &driver_attr_dix
.attr
,
4764 &driver_attr_dif
.attr
,
4765 &driver_attr_guard
.attr
,
4766 &driver_attr_ato
.attr
,
4767 &driver_attr_map
.attr
,
4768 &driver_attr_removable
.attr
,
4769 &driver_attr_host_lock
.attr
,
4770 &driver_attr_ndelay
.attr
,
4771 &driver_attr_strict
.attr
,
4774 ATTRIBUTE_GROUPS(sdebug_drv
);
4776 static struct device
*pseudo_primary
;
4778 static int __init
scsi_debug_init(void)
4785 atomic_set(&sdebug_cmnd_count
, 0);
4786 atomic_set(&sdebug_completions
, 0);
4787 atomic_set(&retired_max_queue
, 0);
4789 if (scsi_debug_ndelay
>= 1000000000) {
4790 pr_warn("ndelay must be less than 1 second, ignored\n");
4791 scsi_debug_ndelay
= 0;
4792 } else if (scsi_debug_ndelay
> 0)
4793 scsi_debug_delay
= DELAY_OVERRIDDEN
;
4795 switch (scsi_debug_sector_size
) {
4802 pr_err("invalid sector_size %d\n", scsi_debug_sector_size
);
4806 switch (scsi_debug_dif
) {
4808 case SD_DIF_TYPE0_PROTECTION
:
4809 case SD_DIF_TYPE1_PROTECTION
:
4810 case SD_DIF_TYPE2_PROTECTION
:
4811 case SD_DIF_TYPE3_PROTECTION
:
4815 pr_err("dif must be 0, 1, 2 or 3\n");
4819 if (scsi_debug_guard
> 1) {
4820 pr_err("guard must be 0 or 1\n");
4824 if (scsi_debug_ato
> 1) {
4825 pr_err("ato must be 0 or 1\n");
4829 if (scsi_debug_physblk_exp
> 15) {
4830 pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp
);
4834 if (scsi_debug_lowest_aligned
> 0x3fff) {
4835 pr_err("lowest_aligned too big: %u\n",
4836 scsi_debug_lowest_aligned
);
4840 if (scsi_debug_dev_size_mb
< 1)
4841 scsi_debug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
4842 sz
= (unsigned long)scsi_debug_dev_size_mb
* 1048576;
4843 sdebug_store_sectors
= sz
/ scsi_debug_sector_size
;
4844 sdebug_capacity
= get_sdebug_capacity();
4846 /* play around with geometry, don't waste too much on track 0 */
4848 sdebug_sectors_per
= 32;
4849 if (scsi_debug_dev_size_mb
>= 16)
4851 else if (scsi_debug_dev_size_mb
>= 256)
4853 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
4854 (sdebug_sectors_per
* sdebug_heads
);
4855 if (sdebug_cylinders_per
>= 1024) {
4856 /* other LLDs do this; implies >= 1GB ram disk ... */
4858 sdebug_sectors_per
= 63;
4859 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
4860 (sdebug_sectors_per
* sdebug_heads
);
4863 if (0 == scsi_debug_fake_rw
) {
4864 fake_storep
= vmalloc(sz
);
4865 if (NULL
== fake_storep
) {
4866 pr_err("out of memory, 1\n");
4869 memset(fake_storep
, 0, sz
);
4870 if (scsi_debug_num_parts
> 0)
4871 sdebug_build_parts(fake_storep
, sz
);
4874 if (scsi_debug_dix
) {
4877 dif_size
= sdebug_store_sectors
* sizeof(struct sd_dif_tuple
);
4878 dif_storep
= vmalloc(dif_size
);
4880 pr_err("dif_storep %u bytes @ %p\n", dif_size
, dif_storep
);
4882 if (dif_storep
== NULL
) {
4883 pr_err("out of mem. (DIX)\n");
4888 memset(dif_storep
, 0xff, dif_size
);
4891 /* Logical Block Provisioning */
4892 if (scsi_debug_lbp()) {
4893 scsi_debug_unmap_max_blocks
=
4894 clamp(scsi_debug_unmap_max_blocks
, 0U, 0xffffffffU
);
4896 scsi_debug_unmap_max_desc
=
4897 clamp(scsi_debug_unmap_max_desc
, 0U, 256U);
4899 scsi_debug_unmap_granularity
=
4900 clamp(scsi_debug_unmap_granularity
, 1U, 0xffffffffU
);
4902 if (scsi_debug_unmap_alignment
&&
4903 scsi_debug_unmap_granularity
<=
4904 scsi_debug_unmap_alignment
) {
4905 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
4909 map_size
= lba_to_map_index(sdebug_store_sectors
- 1) + 1;
4910 map_storep
= vmalloc(BITS_TO_LONGS(map_size
) * sizeof(long));
4912 pr_info("%lu provisioning blocks\n", map_size
);
4914 if (map_storep
== NULL
) {
4915 pr_err("out of mem. (MAP)\n");
4920 bitmap_zero(map_storep
, map_size
);
4922 /* Map first 1KB for partition table */
4923 if (scsi_debug_num_parts
)
4927 pseudo_primary
= root_device_register("pseudo_0");
4928 if (IS_ERR(pseudo_primary
)) {
4929 pr_warn("root_device_register() error\n");
4930 ret
= PTR_ERR(pseudo_primary
);
4933 ret
= bus_register(&pseudo_lld_bus
);
4935 pr_warn("bus_register error: %d\n", ret
);
4938 ret
= driver_register(&sdebug_driverfs_driver
);
4940 pr_warn("driver_register error: %d\n", ret
);
4944 host_to_add
= scsi_debug_add_host
;
4945 scsi_debug_add_host
= 0;
4947 for (k
= 0; k
< host_to_add
; k
++) {
4948 if (sdebug_add_adapter()) {
4949 pr_err("sdebug_add_adapter failed k=%d\n", k
);
4954 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
4955 pr_info("built %d host(s)\n", scsi_debug_add_host
);
4960 bus_unregister(&pseudo_lld_bus
);
4962 root_device_unregister(pseudo_primary
);
4971 static void __exit
scsi_debug_exit(void)
4973 int k
= scsi_debug_add_host
;
4978 sdebug_remove_adapter();
4979 driver_unregister(&sdebug_driverfs_driver
);
4980 bus_unregister(&pseudo_lld_bus
);
4981 root_device_unregister(pseudo_primary
);
4987 device_initcall(scsi_debug_init
);
4988 module_exit(scsi_debug_exit
);
4990 static void sdebug_release_adapter(struct device
* dev
)
4992 struct sdebug_host_info
*sdbg_host
;
4994 sdbg_host
= to_sdebug_host(dev
);
4998 static int sdebug_add_adapter(void)
5000 int k
, devs_per_host
;
5002 struct sdebug_host_info
*sdbg_host
;
5003 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
5005 sdbg_host
= kzalloc(sizeof(*sdbg_host
),GFP_KERNEL
);
5006 if (NULL
== sdbg_host
) {
5007 pr_err("out of memory at line %d\n", __LINE__
);
5011 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
5013 devs_per_host
= scsi_debug_num_tgts
* scsi_debug_max_luns
;
5014 for (k
= 0; k
< devs_per_host
; k
++) {
5015 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
5016 if (!sdbg_devinfo
) {
5017 pr_err("out of memory at line %d\n", __LINE__
);
5023 spin_lock(&sdebug_host_list_lock
);
5024 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
5025 spin_unlock(&sdebug_host_list_lock
);
5027 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
5028 sdbg_host
->dev
.parent
= pseudo_primary
;
5029 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
5030 dev_set_name(&sdbg_host
->dev
, "adapter%d", scsi_debug_add_host
);
5032 error
= device_register(&sdbg_host
->dev
);
5037 ++scsi_debug_add_host
;
5041 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
5043 list_del(&sdbg_devinfo
->dev_list
);
5044 kfree(sdbg_devinfo
);
5051 static void sdebug_remove_adapter(void)
5053 struct sdebug_host_info
* sdbg_host
= NULL
;
5055 spin_lock(&sdebug_host_list_lock
);
5056 if (!list_empty(&sdebug_host_list
)) {
5057 sdbg_host
= list_entry(sdebug_host_list
.prev
,
5058 struct sdebug_host_info
, host_list
);
5059 list_del(&sdbg_host
->host_list
);
5061 spin_unlock(&sdebug_host_list_lock
);
5066 device_unregister(&sdbg_host
->dev
);
5067 --scsi_debug_add_host
;
5071 sdebug_change_qdepth(struct scsi_device
*sdev
, int qdepth
)
5074 unsigned long iflags
;
5075 struct sdebug_dev_info
*devip
;
5077 spin_lock_irqsave(&queued_arr_lock
, iflags
);
5078 devip
= (struct sdebug_dev_info
*)sdev
->hostdata
;
5079 if (NULL
== devip
) {
5080 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
5083 num_in_q
= atomic_read(&devip
->num_in_q
);
5084 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
5088 /* allow to exceed max host queued_arr elements for testing */
5089 if (qdepth
> SCSI_DEBUG_CANQUEUE
+ 10)
5090 qdepth
= SCSI_DEBUG_CANQUEUE
+ 10;
5091 scsi_change_queue_depth(sdev
, qdepth
);
5093 if (SCSI_DEBUG_OPT_Q_NOISE
& scsi_debug_opts
) {
5094 sdev_printk(KERN_INFO
, sdev
,
5095 "%s: qdepth=%d, num_in_q=%d\n",
5096 __func__
, qdepth
, num_in_q
);
5098 return sdev
->queue_depth
;
5102 check_inject(struct scsi_cmnd
*scp
)
5104 struct sdebug_scmd_extra_t
*ep
= scsi_cmd_priv(scp
);
5106 memset(ep
, 0, sizeof(struct sdebug_scmd_extra_t
));
5108 if (atomic_inc_return(&sdebug_cmnd_count
) >=
5109 abs(scsi_debug_every_nth
)) {
5110 atomic_set(&sdebug_cmnd_count
, 0);
5111 if (scsi_debug_every_nth
< -1)
5112 scsi_debug_every_nth
= -1;
5113 if (SCSI_DEBUG_OPT_TIMEOUT
& scsi_debug_opts
)
5114 return 1; /* ignore command causing timeout */
5115 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT
& scsi_debug_opts
&&
5116 scsi_medium_access_command(scp
))
5117 return 1; /* time out reads and writes */
5118 if (sdebug_any_injecting_opt
) {
5119 int opts
= scsi_debug_opts
;
5121 if (SCSI_DEBUG_OPT_RECOVERED_ERR
& opts
)
5122 ep
->inj_recovered
= true;
5123 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR
& opts
)
5124 ep
->inj_transport
= true;
5125 else if (SCSI_DEBUG_OPT_DIF_ERR
& opts
)
5127 else if (SCSI_DEBUG_OPT_DIX_ERR
& opts
)
5129 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER
& opts
)
5130 ep
->inj_short
= true;
5137 scsi_debug_queuecommand(struct scsi_cmnd
*scp
)
5140 struct scsi_device
*sdp
= scp
->device
;
5141 const struct opcode_info_t
*oip
;
5142 const struct opcode_info_t
*r_oip
;
5143 struct sdebug_dev_info
*devip
;
5144 u8
*cmd
= scp
->cmnd
;
5145 int (*r_pfp
)(struct scsi_cmnd
*, struct sdebug_dev_info
*);
5148 int errsts_no_connect
= DID_NO_CONNECT
<< 16;
5153 bool debug
= !!(SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
);
5155 scsi_set_resid(scp
, 0);
5156 if (debug
&& !(SCSI_DEBUG_OPT_NO_CDB_NOISE
& scsi_debug_opts
)) {
5161 sb
= (int)sizeof(b
);
5163 strcpy(b
, "too long, over 32 bytes");
5165 for (k
= 0, n
= 0; k
< len
&& n
< sb
; ++k
)
5166 n
+= scnprintf(b
+ n
, sb
- n
, "%02x ",
5169 sdev_printk(KERN_INFO
, sdp
, "%s: cmd %s\n", my_name
, b
);
5171 has_wlun_rl
= (sdp
->lun
== SCSI_W_LUN_REPORT_LUNS
);
5172 if ((sdp
->lun
>= scsi_debug_max_luns
) && !has_wlun_rl
)
5173 return schedule_resp(scp
, NULL
, errsts_no_connect
, 0);
5175 sdeb_i
= opcode_ind_arr
[opcode
]; /* fully mapped */
5176 oip
= &opcode_info_arr
[sdeb_i
]; /* safe if table consistent */
5177 devip
= (struct sdebug_dev_info
*)sdp
->hostdata
;
5179 devip
= devInfoReg(sdp
);
5181 return schedule_resp(scp
, NULL
, errsts_no_connect
, 0);
5183 na
= oip
->num_attached
;
5185 if (na
) { /* multiple commands with this opcode */
5187 if (FF_SA
& r_oip
->flags
) {
5188 if (F_SA_LOW
& oip
->flags
)
5191 sa
= get_unaligned_be16(cmd
+ 8);
5192 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
5193 if (opcode
== oip
->opcode
&& sa
== oip
->sa
)
5196 } else { /* since no service action only check opcode */
5197 for (k
= 0; k
<= na
; oip
= r_oip
->arrp
+ k
++) {
5198 if (opcode
== oip
->opcode
)
5203 if (F_SA_LOW
& r_oip
->flags
)
5204 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 1, 4);
5205 else if (F_SA_HIGH
& r_oip
->flags
)
5206 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, 8, 7);
5208 mk_sense_invalid_opcode(scp
);
5211 } /* else (when na==0) we assume the oip is a match */
5213 if (F_INV_OP
& flags
) {
5214 mk_sense_invalid_opcode(scp
);
5217 if (has_wlun_rl
&& !(F_RL_WLUN_OK
& flags
)) {
5219 sdev_printk(KERN_INFO
, sdp
, "scsi_debug: Opcode: "
5220 "0x%x not supported for wlun\n", opcode
);
5221 mk_sense_invalid_opcode(scp
);
5224 if (scsi_debug_strict
) { /* check cdb against mask */
5228 for (k
= 1; k
< oip
->len_mask
[0] && k
< 16; ++k
) {
5229 rem
= ~oip
->len_mask
[k
] & cmd
[k
];
5231 for (j
= 7; j
>= 0; --j
, rem
<<= 1) {
5235 mk_sense_invalid_fld(scp
, SDEB_IN_CDB
, k
, j
);
5240 if (!(F_SKIP_UA
& flags
) &&
5241 SDEBUG_NUM_UAS
!= find_first_bit(devip
->uas_bm
, SDEBUG_NUM_UAS
)) {
5242 errsts
= check_readiness(scp
, UAS_ONLY
, devip
);
5246 if ((F_M_ACCESS
& flags
) && devip
->stopped
) {
5247 mk_sense_buffer(scp
, NOT_READY
, LOGICAL_UNIT_NOT_READY
, 0x2);
5249 sdev_printk(KERN_INFO
, sdp
, "%s reports: Not ready: "
5250 "%s\n", my_name
, "initializing command "
5252 errsts
= check_condition_result
;
5255 if (scsi_debug_fake_rw
&& (F_FAKE_RW
& flags
))
5257 if (scsi_debug_every_nth
) {
5258 if (check_inject(scp
))
5259 return 0; /* ignore command: make trouble */
5261 if (oip
->pfp
) /* if this command has a resp_* function, call it */
5262 errsts
= oip
->pfp(scp
, devip
);
5263 else if (r_pfp
) /* if leaf function ptr NULL, try the root's */
5264 errsts
= r_pfp(scp
, devip
);
5267 return schedule_resp(scp
, devip
, errsts
,
5268 ((F_DELAY_OVERR
& flags
) ? 0 : scsi_debug_delay
));
5270 return schedule_resp(scp
, devip
, check_condition_result
, 0);
5274 sdebug_queuecommand_lock_or_not(struct Scsi_Host
*shost
, struct scsi_cmnd
*cmd
)
5276 if (scsi_debug_host_lock
) {
5277 unsigned long iflags
;
5280 spin_lock_irqsave(shost
->host_lock
, iflags
);
5281 rc
= scsi_debug_queuecommand(cmd
);
5282 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
5285 return scsi_debug_queuecommand(cmd
);
5288 static struct scsi_host_template sdebug_driver_template
= {
5289 .show_info
= scsi_debug_show_info
,
5290 .write_info
= scsi_debug_write_info
,
5291 .proc_name
= sdebug_proc_name
,
5292 .name
= "SCSI DEBUG",
5293 .info
= scsi_debug_info
,
5294 .slave_alloc
= scsi_debug_slave_alloc
,
5295 .slave_configure
= scsi_debug_slave_configure
,
5296 .slave_destroy
= scsi_debug_slave_destroy
,
5297 .ioctl
= scsi_debug_ioctl
,
5298 .queuecommand
= sdebug_queuecommand_lock_or_not
,
5299 .change_queue_depth
= sdebug_change_qdepth
,
5300 .eh_abort_handler
= scsi_debug_abort
,
5301 .eh_device_reset_handler
= scsi_debug_device_reset
,
5302 .eh_target_reset_handler
= scsi_debug_target_reset
,
5303 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
5304 .eh_host_reset_handler
= scsi_debug_host_reset
,
5305 .can_queue
= SCSI_DEBUG_CANQUEUE
,
5307 .sg_tablesize
= SCSI_MAX_SG_CHAIN_SEGMENTS
,
5308 .cmd_per_lun
= DEF_CMD_PER_LUN
,
5310 .use_clustering
= DISABLE_CLUSTERING
,
5311 .module
= THIS_MODULE
,
5312 .track_queue_depth
= 1,
5313 .cmd_size
= sizeof(struct sdebug_scmd_extra_t
),
5316 static int sdebug_driver_probe(struct device
* dev
)
5320 struct sdebug_host_info
*sdbg_host
;
5321 struct Scsi_Host
*hpnt
;
5324 sdbg_host
= to_sdebug_host(dev
);
5326 sdebug_driver_template
.can_queue
= scsi_debug_max_queue
;
5327 if (scsi_debug_clustering
)
5328 sdebug_driver_template
.use_clustering
= ENABLE_CLUSTERING
;
5329 hpnt
= scsi_host_alloc(&sdebug_driver_template
, sizeof(sdbg_host
));
5331 pr_err("scsi_host_alloc failed\n");
5336 sdbg_host
->shost
= hpnt
;
5337 *((struct sdebug_host_info
**)hpnt
->hostdata
) = sdbg_host
;
5338 if ((hpnt
->this_id
>= 0) && (scsi_debug_num_tgts
> hpnt
->this_id
))
5339 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
5341 hpnt
->max_id
= scsi_debug_num_tgts
;
5342 /* = scsi_debug_max_luns; */
5343 hpnt
->max_lun
= SCSI_W_LUN_REPORT_LUNS
+ 1;
5347 switch (scsi_debug_dif
) {
5349 case SD_DIF_TYPE1_PROTECTION
:
5350 host_prot
= SHOST_DIF_TYPE1_PROTECTION
;
5352 host_prot
|= SHOST_DIX_TYPE1_PROTECTION
;
5355 case SD_DIF_TYPE2_PROTECTION
:
5356 host_prot
= SHOST_DIF_TYPE2_PROTECTION
;
5358 host_prot
|= SHOST_DIX_TYPE2_PROTECTION
;
5361 case SD_DIF_TYPE3_PROTECTION
:
5362 host_prot
= SHOST_DIF_TYPE3_PROTECTION
;
5364 host_prot
|= SHOST_DIX_TYPE3_PROTECTION
;
5369 host_prot
|= SHOST_DIX_TYPE0_PROTECTION
;
5373 scsi_host_set_prot(hpnt
, host_prot
);
5375 pr_info("host protection%s%s%s%s%s%s%s\n",
5376 (host_prot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
5377 (host_prot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
5378 (host_prot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
5379 (host_prot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
5380 (host_prot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
5381 (host_prot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
5382 (host_prot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
5384 if (scsi_debug_guard
== 1)
5385 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
5387 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
5389 opts
= scsi_debug_opts
;
5390 if (SCSI_DEBUG_OPT_RECOVERED_ERR
& opts
)
5391 sdebug_any_injecting_opt
= true;
5392 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR
& opts
)
5393 sdebug_any_injecting_opt
= true;
5394 else if (SCSI_DEBUG_OPT_DIF_ERR
& opts
)
5395 sdebug_any_injecting_opt
= true;
5396 else if (SCSI_DEBUG_OPT_DIX_ERR
& opts
)
5397 sdebug_any_injecting_opt
= true;
5398 else if (SCSI_DEBUG_OPT_SHORT_TRANSFER
& opts
)
5399 sdebug_any_injecting_opt
= true;
5401 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
5403 pr_err("scsi_add_host failed\n");
5405 scsi_host_put(hpnt
);
5407 scsi_scan_host(hpnt
);
5412 static int sdebug_driver_remove(struct device
* dev
)
5414 struct sdebug_host_info
*sdbg_host
;
5415 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
5417 sdbg_host
= to_sdebug_host(dev
);
5420 pr_err("Unable to locate host info\n");
5424 scsi_remove_host(sdbg_host
->shost
);
5426 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
5428 list_del(&sdbg_devinfo
->dev_list
);
5429 kfree(sdbg_devinfo
);
5432 scsi_host_put(sdbg_host
->shost
);
5436 static int pseudo_lld_bus_match(struct device
*dev
,
5437 struct device_driver
*dev_driver
)
5442 static struct bus_type pseudo_lld_bus
= {
5444 .match
= pseudo_lld_bus_match
,
5445 .probe
= sdebug_driver_probe
,
5446 .remove
= sdebug_driver_remove
,
5447 .drv_groups
= sdebug_drv_groups
,