Linux 4.16.11
[linux/fpc-iii.git] / drivers / scsi / scsi_debug.c
bloba5986dae902082a74715efed1a795638118f00a5
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * Copyright (C) 2001 - 2017 Douglas Gilbert
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
16 * For documentation see http://sg.danny.cz/sg/sdebug26.html
21 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
23 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/errno.h>
27 #include <linux/jiffies.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/genhd.h>
32 #include <linux/fs.h>
33 #include <linux/init.h>
34 #include <linux/proc_fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/moduleparam.h>
37 #include <linux/scatterlist.h>
38 #include <linux/blkdev.h>
39 #include <linux/crc-t10dif.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/atomic.h>
43 #include <linux/hrtimer.h>
44 #include <linux/uuid.h>
45 #include <linux/t10-pi.h>
47 #include <net/checksum.h>
49 #include <asm/unaligned.h>
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsicam.h>
56 #include <scsi/scsi_eh.h>
57 #include <scsi/scsi_tcq.h>
58 #include <scsi/scsi_dbg.h>
60 #include "sd.h"
61 #include "scsi_logging.h"
63 /* make sure inq_product_rev string corresponds to this version */
64 #define SDEBUG_VERSION "0187" /* format to fit INQUIRY revision field */
65 static const char *sdebug_version_date = "20171202";
67 #define MY_NAME "scsi_debug"
69 /* Additional Sense Code (ASC) */
70 #define NO_ADDITIONAL_SENSE 0x0
71 #define LOGICAL_UNIT_NOT_READY 0x4
72 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
73 #define UNRECOVERED_READ_ERR 0x11
74 #define PARAMETER_LIST_LENGTH_ERR 0x1a
75 #define INVALID_OPCODE 0x20
76 #define LBA_OUT_OF_RANGE 0x21
77 #define INVALID_FIELD_IN_CDB 0x24
78 #define INVALID_FIELD_IN_PARAM_LIST 0x26
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
98 /* Additional Sense Code Qualifier (ASCQ) */
99 #define ACK_NAK_TO 0x3
101 /* Default values for driver parameters */
102 #define DEF_NUM_HOST 1
103 #define DEF_NUM_TGTS 1
104 #define DEF_MAX_LUNS 1
105 /* With these defaults, this driver will make 1 host with 1 target
106 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
108 #define DEF_ATO 1
109 #define DEF_CDB_LEN 10
110 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
111 #define DEF_DEV_SIZE_MB 8
112 #define DEF_DIF 0
113 #define DEF_DIX 0
114 #define DEF_D_SENSE 0
115 #define DEF_EVERY_NTH 0
116 #define DEF_FAKE_RW 0
117 #define DEF_GUARD 0
118 #define DEF_HOST_LOCK 0
119 #define DEF_LBPU 0
120 #define DEF_LBPWS 0
121 #define DEF_LBPWS10 0
122 #define DEF_LBPRZ 1
123 #define DEF_LOWEST_ALIGNED 0
124 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
125 #define DEF_NO_LUN_0 0
126 #define DEF_NUM_PARTS 0
127 #define DEF_OPTS 0
128 #define DEF_OPT_BLKS 1024
129 #define DEF_PHYSBLK_EXP 0
130 #define DEF_OPT_XFERLEN_EXP 0
131 #define DEF_PTYPE TYPE_DISK
132 #define DEF_REMOVABLE false
133 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
134 #define DEF_SECTOR_SIZE 512
135 #define DEF_UNMAP_ALIGNMENT 0
136 #define DEF_UNMAP_GRANULARITY 1
137 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138 #define DEF_UNMAP_MAX_DESC 256
139 #define DEF_VIRTUAL_GB 0
140 #define DEF_VPD_USE_HOSTNO 1
141 #define DEF_WRITESAME_LENGTH 0xFFFF
142 #define DEF_STRICT 0
143 #define DEF_STATISTICS false
144 #define DEF_SUBMIT_QUEUES 1
145 #define DEF_UUID_CTL 0
146 #define JDELAY_OVERRIDDEN -9999
148 #define SDEBUG_LUN_0_VAL 0
150 /* bit mask values for sdebug_opts */
151 #define SDEBUG_OPT_NOISE 1
152 #define SDEBUG_OPT_MEDIUM_ERR 2
153 #define SDEBUG_OPT_TIMEOUT 4
154 #define SDEBUG_OPT_RECOVERED_ERR 8
155 #define SDEBUG_OPT_TRANSPORT_ERR 16
156 #define SDEBUG_OPT_DIF_ERR 32
157 #define SDEBUG_OPT_DIX_ERR 64
158 #define SDEBUG_OPT_MAC_TIMEOUT 128
159 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
160 #define SDEBUG_OPT_Q_NOISE 0x200
161 #define SDEBUG_OPT_ALL_TSF 0x400
162 #define SDEBUG_OPT_RARE_TSF 0x800
163 #define SDEBUG_OPT_N_WCE 0x1000
164 #define SDEBUG_OPT_RESET_NOISE 0x2000
165 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
166 #define SDEBUG_OPT_HOST_BUSY 0x8000
167 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
168 SDEBUG_OPT_RESET_NOISE)
169 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
170 SDEBUG_OPT_TRANSPORT_ERR | \
171 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
172 SDEBUG_OPT_SHORT_TRANSFER | \
173 SDEBUG_OPT_HOST_BUSY)
174 /* When "every_nth" > 0 then modulo "every_nth" commands:
175 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
176 * - a RECOVERED_ERROR is simulated on successful read and write
177 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
178 * - a TRANSPORT_ERROR is simulated on successful read and write
179 * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
181 * When "every_nth" < 0 then after "- every_nth" commands:
182 * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
183 * - a RECOVERED_ERROR is simulated on successful read and write
184 * commands if SDEBUG_OPT_RECOVERED_ERR is set.
185 * - a TRANSPORT_ERROR is simulated on successful read and write
186 * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
187 * This will continue on every subsequent command until some other action
188 * occurs (e.g. the user * writing a new value (other than -1 or 1) to
189 * every_nth via sysfs).
192 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
193 * priority order. In the subset implemented here lower numbers have higher
194 * priority. The UA numbers should be a sequence starting from 0 with
195 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
196 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
197 #define SDEBUG_UA_BUS_RESET 1
198 #define SDEBUG_UA_MODE_CHANGED 2
199 #define SDEBUG_UA_CAPACITY_CHANGED 3
200 #define SDEBUG_UA_LUNS_CHANGED 4
201 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
202 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
203 #define SDEBUG_NUM_UAS 7
205 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
206 * sector on read commands: */
207 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
208 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
210 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
211 * or "peripheral device" addressing (value 0) */
212 #define SAM2_LUN_ADDRESS_METHOD 0
214 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
215 * (for response) per submit queue at one time. Can be reduced by max_queue
216 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
217 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
218 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
219 * but cannot exceed SDEBUG_CANQUEUE .
221 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
222 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
223 #define DEF_CMD_PER_LUN 255
225 #define F_D_IN 1
226 #define F_D_OUT 2
227 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228 #define F_D_UNKN 8
229 #define F_RL_WLUN_OK 0x10
230 #define F_SKIP_UA 0x20
231 #define F_DELAY_OVERR 0x40
232 #define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
233 #define F_SA_HIGH 0x100 /* as used by variable length cdbs */
234 #define F_INV_OP 0x200
235 #define F_FAKE_RW 0x400
236 #define F_M_ACCESS 0x800 /* media access */
238 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
239 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
240 #define FF_SA (F_SA_HIGH | F_SA_LOW)
242 #define SDEBUG_MAX_PARTS 4
244 #define SDEBUG_MAX_CMD_LEN 32
247 struct sdebug_dev_info {
248 struct list_head dev_list;
249 unsigned int channel;
250 unsigned int target;
251 u64 lun;
252 uuid_t lu_name;
253 struct sdebug_host_info *sdbg_host;
254 unsigned long uas_bm[1];
255 atomic_t num_in_q;
256 atomic_t stopped;
257 bool used;
260 struct sdebug_host_info {
261 struct list_head host_list;
262 struct Scsi_Host *shost;
263 struct device dev;
264 struct list_head dev_info_list;
267 #define to_sdebug_host(d) \
268 container_of(d, struct sdebug_host_info, dev)
270 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
271 SDEB_DEFER_WQ = 2};
273 struct sdebug_defer {
274 struct hrtimer hrt;
275 struct execute_work ew;
276 int sqa_idx; /* index of sdebug_queue array */
277 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
278 int issuing_cpu;
279 bool init_hrt;
280 bool init_wq;
281 enum sdeb_defer_type defer_t;
284 struct sdebug_queued_cmd {
285 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
286 * instance indicates this slot is in use.
288 struct sdebug_defer *sd_dp;
289 struct scsi_cmnd *a_cmnd;
290 unsigned int inj_recovered:1;
291 unsigned int inj_transport:1;
292 unsigned int inj_dif:1;
293 unsigned int inj_dix:1;
294 unsigned int inj_short:1;
295 unsigned int inj_host_busy:1;
298 struct sdebug_queue {
299 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
300 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
301 spinlock_t qc_lock;
302 atomic_t blocked; /* to temporarily stop more being queued */
305 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
306 static atomic_t sdebug_completions; /* count of deferred completions */
307 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
308 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
310 struct opcode_info_t {
311 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
312 /* for terminating element */
313 u8 opcode; /* if num_attached > 0, preferred */
314 u16 sa; /* service action */
315 u32 flags; /* OR-ed set of SDEB_F_* */
316 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
317 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
318 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
319 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
322 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
323 enum sdeb_opcode_index {
324 SDEB_I_INVALID_OPCODE = 0,
325 SDEB_I_INQUIRY = 1,
326 SDEB_I_REPORT_LUNS = 2,
327 SDEB_I_REQUEST_SENSE = 3,
328 SDEB_I_TEST_UNIT_READY = 4,
329 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
330 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
331 SDEB_I_LOG_SENSE = 7,
332 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
333 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
334 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
335 SDEB_I_START_STOP = 11,
336 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
337 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
338 SDEB_I_MAINT_IN = 14,
339 SDEB_I_MAINT_OUT = 15,
340 SDEB_I_VERIFY = 16, /* 10 only */
341 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
342 SDEB_I_RESERVE = 18, /* 6, 10 */
343 SDEB_I_RELEASE = 19, /* 6, 10 */
344 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
345 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
346 SDEB_I_ATA_PT = 22, /* 12, 16 */
347 SDEB_I_SEND_DIAG = 23,
348 SDEB_I_UNMAP = 24,
349 SDEB_I_XDWRITEREAD = 25, /* 10 only */
350 SDEB_I_WRITE_BUFFER = 26,
351 SDEB_I_WRITE_SAME = 27, /* 10, 16 */
352 SDEB_I_SYNC_CACHE = 28, /* 10 only */
353 SDEB_I_COMP_WRITE = 29,
354 SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */
358 static const unsigned char opcode_ind_arr[256] = {
359 /* 0x0; 0x0->0x1f: 6 byte cdbs */
360 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
361 0, 0, 0, 0,
362 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
363 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
364 SDEB_I_RELEASE,
365 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
366 SDEB_I_ALLOW_REMOVAL, 0,
367 /* 0x20; 0x20->0x3f: 10 byte cdbs */
368 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
369 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
370 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
371 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
372 /* 0x40; 0x40->0x5f: 10 byte cdbs */
373 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
374 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
375 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
376 SDEB_I_RELEASE,
377 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
378 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
379 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
380 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
381 0, SDEB_I_VARIABLE_LEN,
382 /* 0x80; 0x80->0x9f: 16 byte cdbs */
383 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
384 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
385 0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
386 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
387 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
388 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
389 SDEB_I_MAINT_OUT, 0, 0, 0,
390 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
391 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
392 0, 0, 0, 0, 0, 0, 0, 0,
393 0, 0, 0, 0, 0, 0, 0, 0,
394 /* 0xc0; 0xc0->0xff: vendor specific */
395 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
396 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
397 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
398 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
401 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
402 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
403 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
404 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
405 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
406 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
407 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
408 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
409 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
410 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
411 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
412 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
413 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
414 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
415 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
416 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
417 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
418 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
419 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
420 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
421 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
422 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
425 * The following are overflow arrays for cdbs that "hit" the same index in
426 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
427 * should be placed in opcode_info_arr[], the others should be placed here.
429 static const struct opcode_info_t msense_iarr[] = {
430 {0, 0x1a, 0, F_D_IN, NULL, NULL,
431 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
434 static const struct opcode_info_t mselect_iarr[] = {
435 {0, 0x15, 0, F_D_OUT, NULL, NULL,
436 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
439 static const struct opcode_info_t read_iarr[] = {
440 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
441 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
442 0, 0, 0, 0} },
443 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
444 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
445 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
446 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
447 0xc7, 0, 0, 0, 0} },
450 static const struct opcode_info_t write_iarr[] = {
451 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
452 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
453 0, 0, 0, 0, 0, 0} },
454 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
455 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0} },
457 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
458 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
459 0xbf, 0xc7, 0, 0, 0, 0} },
462 static const struct opcode_info_t sa_in_16_iarr[] = {
463 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
464 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
465 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
468 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
469 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
470 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
471 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
472 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
473 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
474 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
477 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
478 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
479 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
480 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
481 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
482 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
483 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
486 static const struct opcode_info_t write_same_iarr[] = {
487 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
488 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
489 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
492 static const struct opcode_info_t reserve_iarr[] = {
493 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
494 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
497 static const struct opcode_info_t release_iarr[] = {
498 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
499 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
503 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
504 * plus the terminating elements for logic that scans this table such as
505 * REPORT SUPPORTED OPERATION CODES. */
506 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
507 /* 0 */
508 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
509 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
510 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
511 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
512 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
513 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
514 0, 0} }, /* REPORT LUNS */
515 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
516 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
517 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
518 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 /* 5 */
520 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
521 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
522 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
523 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
524 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
525 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
526 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
527 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
528 0, 0, 0} },
529 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
530 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
531 0, 0} },
532 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
533 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
534 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
535 /* 10 */
536 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
537 resp_write_dt0, write_iarr, /* WRITE(16) */
538 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* WRITE(16) */
540 {0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
541 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
542 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
543 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
544 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
545 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
546 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
547 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
548 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
549 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
550 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
551 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
552 0xff, 0, 0xc7, 0, 0, 0, 0} },
553 /* 15 */
554 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
555 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
556 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
557 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
558 0, 0, 0, 0, 0, 0} },
559 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
560 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
561 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
562 0xff, 0xff} },
563 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
564 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
565 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
566 0} },
567 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
568 NULL, release_iarr, /* RELEASE(10) <no response function> */
569 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
570 0} },
571 /* 20 */
572 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
573 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
574 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
575 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
576 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
577 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
578 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
579 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
580 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
581 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
582 /* 25 */
583 {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
584 NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
585 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
586 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
587 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
588 0, 0, 0, 0} }, /* WRITE_BUFFER */
589 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
590 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
591 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
592 0, 0, 0, 0, 0} },
593 {0, 0x35, 0, F_DELAY_OVERR | FF_MEDIA_IO, NULL, NULL, /* SYNC_CACHE */
594 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
595 0, 0, 0, 0} },
596 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
597 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
598 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
600 /* 30 */
601 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
602 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
605 static int sdebug_add_host = DEF_NUM_HOST;
606 static int sdebug_ato = DEF_ATO;
607 static int sdebug_cdb_len = DEF_CDB_LEN;
608 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
609 static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
610 static int sdebug_dif = DEF_DIF;
611 static int sdebug_dix = DEF_DIX;
612 static int sdebug_dsense = DEF_D_SENSE;
613 static int sdebug_every_nth = DEF_EVERY_NTH;
614 static int sdebug_fake_rw = DEF_FAKE_RW;
615 static unsigned int sdebug_guard = DEF_GUARD;
616 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
617 static int sdebug_max_luns = DEF_MAX_LUNS;
618 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
619 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
620 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
621 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
622 static int sdebug_no_uld;
623 static int sdebug_num_parts = DEF_NUM_PARTS;
624 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
625 static int sdebug_opt_blks = DEF_OPT_BLKS;
626 static int sdebug_opts = DEF_OPTS;
627 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
628 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
629 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
630 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
631 static int sdebug_sector_size = DEF_SECTOR_SIZE;
632 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
633 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
634 static unsigned int sdebug_lbpu = DEF_LBPU;
635 static unsigned int sdebug_lbpws = DEF_LBPWS;
636 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
637 static unsigned int sdebug_lbprz = DEF_LBPRZ;
638 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
639 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
640 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
641 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
642 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
643 static int sdebug_uuid_ctl = DEF_UUID_CTL;
644 static bool sdebug_removable = DEF_REMOVABLE;
645 static bool sdebug_clustering;
646 static bool sdebug_host_lock = DEF_HOST_LOCK;
647 static bool sdebug_strict = DEF_STRICT;
648 static bool sdebug_any_injecting_opt;
649 static bool sdebug_verbose;
650 static bool have_dif_prot;
651 static bool sdebug_statistics = DEF_STATISTICS;
652 static bool sdebug_mq_active;
654 static unsigned int sdebug_store_sectors;
655 static sector_t sdebug_capacity; /* in sectors */
657 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
658 may still need them */
659 static int sdebug_heads; /* heads per disk */
660 static int sdebug_cylinders_per; /* cylinders per surface */
661 static int sdebug_sectors_per; /* sectors per cylinder */
663 static LIST_HEAD(sdebug_host_list);
664 static DEFINE_SPINLOCK(sdebug_host_list_lock);
666 static unsigned char *fake_storep; /* ramdisk storage */
667 static struct t10_pi_tuple *dif_storep; /* protection info */
668 static void *map_storep; /* provisioning map */
670 static unsigned long map_size;
671 static int num_aborts;
672 static int num_dev_resets;
673 static int num_target_resets;
674 static int num_bus_resets;
675 static int num_host_resets;
676 static int dix_writes;
677 static int dix_reads;
678 static int dif_errors;
680 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
681 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
683 static DEFINE_RWLOCK(atomic_rw);
685 static char sdebug_proc_name[] = MY_NAME;
686 static const char *my_name = MY_NAME;
688 static struct bus_type pseudo_lld_bus;
690 static struct device_driver sdebug_driverfs_driver = {
691 .name = sdebug_proc_name,
692 .bus = &pseudo_lld_bus,
695 static const int check_condition_result =
696 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
698 static const int illegal_condition_result =
699 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
701 static const int device_qfull_result =
702 (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
705 /* Only do the extra work involved in logical block provisioning if one or
706 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
707 * real reads and writes (i.e. not skipping them for speed).
709 static inline bool scsi_debug_lbp(void)
711 return 0 == sdebug_fake_rw &&
712 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
715 static void *fake_store(unsigned long long lba)
717 lba = do_div(lba, sdebug_store_sectors);
719 return fake_storep + lba * sdebug_sector_size;
722 static struct t10_pi_tuple *dif_store(sector_t sector)
724 sector = sector_div(sector, sdebug_store_sectors);
726 return dif_storep + sector;
729 static void sdebug_max_tgts_luns(void)
731 struct sdebug_host_info *sdbg_host;
732 struct Scsi_Host *hpnt;
734 spin_lock(&sdebug_host_list_lock);
735 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
736 hpnt = sdbg_host->shost;
737 if ((hpnt->this_id >= 0) &&
738 (sdebug_num_tgts > hpnt->this_id))
739 hpnt->max_id = sdebug_num_tgts + 1;
740 else
741 hpnt->max_id = sdebug_num_tgts;
742 /* sdebug_max_luns; */
743 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
745 spin_unlock(&sdebug_host_list_lock);
748 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
750 /* Set in_bit to -1 to indicate no bit position of invalid field */
751 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
752 enum sdeb_cmd_data c_d,
753 int in_byte, int in_bit)
755 unsigned char *sbuff;
756 u8 sks[4];
757 int sl, asc;
759 sbuff = scp->sense_buffer;
760 if (!sbuff) {
761 sdev_printk(KERN_ERR, scp->device,
762 "%s: sense_buffer is NULL\n", __func__);
763 return;
765 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
766 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
767 scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
768 memset(sks, 0, sizeof(sks));
769 sks[0] = 0x80;
770 if (c_d)
771 sks[0] |= 0x40;
772 if (in_bit >= 0) {
773 sks[0] |= 0x8;
774 sks[0] |= 0x7 & in_bit;
776 put_unaligned_be16(in_byte, sks + 1);
777 if (sdebug_dsense) {
778 sl = sbuff[7] + 8;
779 sbuff[7] = sl;
780 sbuff[sl] = 0x2;
781 sbuff[sl + 1] = 0x6;
782 memcpy(sbuff + sl + 4, sks, 3);
783 } else
784 memcpy(sbuff + 15, sks, 3);
785 if (sdebug_verbose)
786 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
787 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
788 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
791 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
793 unsigned char *sbuff;
795 sbuff = scp->sense_buffer;
796 if (!sbuff) {
797 sdev_printk(KERN_ERR, scp->device,
798 "%s: sense_buffer is NULL\n", __func__);
799 return;
801 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
803 scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
805 if (sdebug_verbose)
806 sdev_printk(KERN_INFO, scp->device,
807 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
808 my_name, key, asc, asq);
811 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
813 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
816 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
818 if (sdebug_verbose) {
819 if (0x1261 == cmd)
820 sdev_printk(KERN_INFO, dev,
821 "%s: BLKFLSBUF [0x1261]\n", __func__);
822 else if (0x5331 == cmd)
823 sdev_printk(KERN_INFO, dev,
824 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
825 __func__);
826 else
827 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
828 __func__, cmd);
830 return -EINVAL;
831 /* return -ENOTTY; // correct return but upsets fdisk */
834 static void config_cdb_len(struct scsi_device *sdev)
836 switch (sdebug_cdb_len) {
837 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
838 sdev->use_10_for_rw = false;
839 sdev->use_16_for_rw = false;
840 sdev->use_10_for_ms = false;
841 break;
842 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
843 sdev->use_10_for_rw = true;
844 sdev->use_16_for_rw = false;
845 sdev->use_10_for_ms = false;
846 break;
847 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
848 sdev->use_10_for_rw = true;
849 sdev->use_16_for_rw = false;
850 sdev->use_10_for_ms = true;
851 break;
852 case 16:
853 sdev->use_10_for_rw = false;
854 sdev->use_16_for_rw = true;
855 sdev->use_10_for_ms = true;
856 break;
857 case 32: /* No knobs to suggest this so same as 16 for now */
858 sdev->use_10_for_rw = false;
859 sdev->use_16_for_rw = true;
860 sdev->use_10_for_ms = true;
861 break;
862 default:
863 pr_warn("unexpected cdb_len=%d, force to 10\n",
864 sdebug_cdb_len);
865 sdev->use_10_for_rw = true;
866 sdev->use_16_for_rw = false;
867 sdev->use_10_for_ms = false;
868 sdebug_cdb_len = 10;
869 break;
873 static void all_config_cdb_len(void)
875 struct sdebug_host_info *sdbg_host;
876 struct Scsi_Host *shost;
877 struct scsi_device *sdev;
879 spin_lock(&sdebug_host_list_lock);
880 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
881 shost = sdbg_host->shost;
882 shost_for_each_device(sdev, shost) {
883 config_cdb_len(sdev);
886 spin_unlock(&sdebug_host_list_lock);
889 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
891 struct sdebug_host_info *sdhp;
892 struct sdebug_dev_info *dp;
894 spin_lock(&sdebug_host_list_lock);
895 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
896 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
897 if ((devip->sdbg_host == dp->sdbg_host) &&
898 (devip->target == dp->target))
899 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
902 spin_unlock(&sdebug_host_list_lock);
905 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
907 int k;
909 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
910 if (k != SDEBUG_NUM_UAS) {
911 const char *cp = NULL;
913 switch (k) {
914 case SDEBUG_UA_POR:
915 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
916 POWER_ON_RESET_ASCQ);
917 if (sdebug_verbose)
918 cp = "power on reset";
919 break;
920 case SDEBUG_UA_BUS_RESET:
921 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
922 BUS_RESET_ASCQ);
923 if (sdebug_verbose)
924 cp = "bus reset";
925 break;
926 case SDEBUG_UA_MODE_CHANGED:
927 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
928 MODE_CHANGED_ASCQ);
929 if (sdebug_verbose)
930 cp = "mode parameters changed";
931 break;
932 case SDEBUG_UA_CAPACITY_CHANGED:
933 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
934 CAPACITY_CHANGED_ASCQ);
935 if (sdebug_verbose)
936 cp = "capacity data changed";
937 break;
938 case SDEBUG_UA_MICROCODE_CHANGED:
939 mk_sense_buffer(scp, UNIT_ATTENTION,
940 TARGET_CHANGED_ASC,
941 MICROCODE_CHANGED_ASCQ);
942 if (sdebug_verbose)
943 cp = "microcode has been changed";
944 break;
945 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
946 mk_sense_buffer(scp, UNIT_ATTENTION,
947 TARGET_CHANGED_ASC,
948 MICROCODE_CHANGED_WO_RESET_ASCQ);
949 if (sdebug_verbose)
950 cp = "microcode has been changed without reset";
951 break;
952 case SDEBUG_UA_LUNS_CHANGED:
954 * SPC-3 behavior is to report a UNIT ATTENTION with
955 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
956 * on the target, until a REPORT LUNS command is
957 * received. SPC-4 behavior is to report it only once.
958 * NOTE: sdebug_scsi_level does not use the same
959 * values as struct scsi_device->scsi_level.
961 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
962 clear_luns_changed_on_target(devip);
963 mk_sense_buffer(scp, UNIT_ATTENTION,
964 TARGET_CHANGED_ASC,
965 LUNS_CHANGED_ASCQ);
966 if (sdebug_verbose)
967 cp = "reported luns data has changed";
968 break;
969 default:
970 pr_warn("unexpected unit attention code=%d\n", k);
971 if (sdebug_verbose)
972 cp = "unknown";
973 break;
975 clear_bit(k, devip->uas_bm);
976 if (sdebug_verbose)
977 sdev_printk(KERN_INFO, scp->device,
978 "%s reports: Unit attention: %s\n",
979 my_name, cp);
980 return check_condition_result;
982 return 0;
985 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
986 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
987 int arr_len)
989 int act_len;
990 struct scsi_data_buffer *sdb = scsi_in(scp);
992 if (!sdb->length)
993 return 0;
994 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
995 return DID_ERROR << 16;
997 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
998 arr, arr_len);
999 sdb->resid = scsi_bufflen(scp) - act_len;
1001 return 0;
1004 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1005 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1006 * calls, not required to write in ascending offset order. Assumes resid
1007 * set to scsi_bufflen() prior to any calls.
1009 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1010 int arr_len, unsigned int off_dst)
1012 int act_len, n;
1013 struct scsi_data_buffer *sdb = scsi_in(scp);
1014 off_t skip = off_dst;
1016 if (sdb->length <= off_dst)
1017 return 0;
1018 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
1019 return DID_ERROR << 16;
1021 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1022 arr, arr_len, skip);
1023 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1024 __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
1025 n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
1026 sdb->resid = min(sdb->resid, n);
1027 return 0;
1030 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1031 * 'arr' or -1 if error.
1033 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1034 int arr_len)
1036 if (!scsi_bufflen(scp))
1037 return 0;
1038 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
1039 return -1;
1041 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1045 static char sdebug_inq_vendor_id[9] = "Linux ";
1046 static char sdebug_inq_product_id[17] = "scsi_debug ";
1047 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1048 /* Use some locally assigned NAAs for SAS addresses. */
1049 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1050 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1051 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1053 /* Device identification VPD page. Returns number of bytes placed in arr */
1054 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1055 int target_dev_id, int dev_id_num,
1056 const char *dev_id_str, int dev_id_str_len,
1057 const uuid_t *lu_name)
1059 int num, port_a;
1060 char b[32];
1062 port_a = target_dev_id + 1;
1063 /* T10 vendor identifier field format (faked) */
1064 arr[0] = 0x2; /* ASCII */
1065 arr[1] = 0x1;
1066 arr[2] = 0x0;
1067 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1068 memcpy(&arr[12], sdebug_inq_product_id, 16);
1069 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1070 num = 8 + 16 + dev_id_str_len;
1071 arr[3] = num;
1072 num += 4;
1073 if (dev_id_num >= 0) {
1074 if (sdebug_uuid_ctl) {
1075 /* Locally assigned UUID */
1076 arr[num++] = 0x1; /* binary (not necessarily sas) */
1077 arr[num++] = 0xa; /* PIV=0, lu, naa */
1078 arr[num++] = 0x0;
1079 arr[num++] = 0x12;
1080 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1081 arr[num++] = 0x0;
1082 memcpy(arr + num, lu_name, 16);
1083 num += 16;
1084 } else {
1085 /* NAA-3, Logical unit identifier (binary) */
1086 arr[num++] = 0x1; /* binary (not necessarily sas) */
1087 arr[num++] = 0x3; /* PIV=0, lu, naa */
1088 arr[num++] = 0x0;
1089 arr[num++] = 0x8;
1090 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1091 num += 8;
1093 /* Target relative port number */
1094 arr[num++] = 0x61; /* proto=sas, binary */
1095 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1096 arr[num++] = 0x0; /* reserved */
1097 arr[num++] = 0x4; /* length */
1098 arr[num++] = 0x0; /* reserved */
1099 arr[num++] = 0x0; /* reserved */
1100 arr[num++] = 0x0;
1101 arr[num++] = 0x1; /* relative port A */
1103 /* NAA-3, Target port identifier */
1104 arr[num++] = 0x61; /* proto=sas, binary */
1105 arr[num++] = 0x93; /* piv=1, target port, naa */
1106 arr[num++] = 0x0;
1107 arr[num++] = 0x8;
1108 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1109 num += 8;
1110 /* NAA-3, Target port group identifier */
1111 arr[num++] = 0x61; /* proto=sas, binary */
1112 arr[num++] = 0x95; /* piv=1, target port group id */
1113 arr[num++] = 0x0;
1114 arr[num++] = 0x4;
1115 arr[num++] = 0;
1116 arr[num++] = 0;
1117 put_unaligned_be16(port_group_id, arr + num);
1118 num += 2;
1119 /* NAA-3, Target device identifier */
1120 arr[num++] = 0x61; /* proto=sas, binary */
1121 arr[num++] = 0xa3; /* piv=1, target device, naa */
1122 arr[num++] = 0x0;
1123 arr[num++] = 0x8;
1124 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1125 num += 8;
1126 /* SCSI name string: Target device identifier */
1127 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1128 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1129 arr[num++] = 0x0;
1130 arr[num++] = 24;
1131 memcpy(arr + num, "naa.32222220", 12);
1132 num += 12;
1133 snprintf(b, sizeof(b), "%08X", target_dev_id);
1134 memcpy(arr + num, b, 8);
1135 num += 8;
1136 memset(arr + num, 0, 4);
1137 num += 4;
1138 return num;
1141 static unsigned char vpd84_data[] = {
1142 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1143 0x22,0x22,0x22,0x0,0xbb,0x1,
1144 0x22,0x22,0x22,0x0,0xbb,0x2,
1147 /* Software interface identification VPD page */
1148 static int inquiry_vpd_84(unsigned char *arr)
1150 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1151 return sizeof(vpd84_data);
1154 /* Management network addresses VPD page */
1155 static int inquiry_vpd_85(unsigned char *arr)
1157 int num = 0;
1158 const char * na1 = "https://www.kernel.org/config";
1159 const char * na2 = "http://www.kernel.org/log";
1160 int plen, olen;
1162 arr[num++] = 0x1; /* lu, storage config */
1163 arr[num++] = 0x0; /* reserved */
1164 arr[num++] = 0x0;
1165 olen = strlen(na1);
1166 plen = olen + 1;
1167 if (plen % 4)
1168 plen = ((plen / 4) + 1) * 4;
1169 arr[num++] = plen; /* length, null termianted, padded */
1170 memcpy(arr + num, na1, olen);
1171 memset(arr + num + olen, 0, plen - olen);
1172 num += plen;
1174 arr[num++] = 0x4; /* lu, logging */
1175 arr[num++] = 0x0; /* reserved */
1176 arr[num++] = 0x0;
1177 olen = strlen(na2);
1178 plen = olen + 1;
1179 if (plen % 4)
1180 plen = ((plen / 4) + 1) * 4;
1181 arr[num++] = plen; /* length, null terminated, padded */
1182 memcpy(arr + num, na2, olen);
1183 memset(arr + num + olen, 0, plen - olen);
1184 num += plen;
1186 return num;
1189 /* SCSI ports VPD page */
1190 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1192 int num = 0;
1193 int port_a, port_b;
1195 port_a = target_dev_id + 1;
1196 port_b = port_a + 1;
1197 arr[num++] = 0x0; /* reserved */
1198 arr[num++] = 0x0; /* reserved */
1199 arr[num++] = 0x0;
1200 arr[num++] = 0x1; /* relative port 1 (primary) */
1201 memset(arr + num, 0, 6);
1202 num += 6;
1203 arr[num++] = 0x0;
1204 arr[num++] = 12; /* length tp descriptor */
1205 /* naa-5 target port identifier (A) */
1206 arr[num++] = 0x61; /* proto=sas, binary */
1207 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1208 arr[num++] = 0x0; /* reserved */
1209 arr[num++] = 0x8; /* length */
1210 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1211 num += 8;
1212 arr[num++] = 0x0; /* reserved */
1213 arr[num++] = 0x0; /* reserved */
1214 arr[num++] = 0x0;
1215 arr[num++] = 0x2; /* relative port 2 (secondary) */
1216 memset(arr + num, 0, 6);
1217 num += 6;
1218 arr[num++] = 0x0;
1219 arr[num++] = 12; /* length tp descriptor */
1220 /* naa-5 target port identifier (B) */
1221 arr[num++] = 0x61; /* proto=sas, binary */
1222 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1223 arr[num++] = 0x0; /* reserved */
1224 arr[num++] = 0x8; /* length */
1225 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1226 num += 8;
1228 return num;
1232 static unsigned char vpd89_data[] = {
1233 /* from 4th byte */ 0,0,0,0,
1234 'l','i','n','u','x',' ',' ',' ',
1235 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1236 '1','2','3','4',
1237 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1238 0xec,0,0,0,
1239 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1240 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1241 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1242 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1243 0x53,0x41,
1244 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1245 0x20,0x20,
1246 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1247 0x10,0x80,
1248 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1249 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1250 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1251 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1252 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1253 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1254 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1255 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1256 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1257 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1258 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1259 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1260 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1261 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1262 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1263 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1264 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1265 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1266 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1267 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1268 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1269 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1270 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1271 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1272 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1273 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1276 /* ATA Information VPD page */
1277 static int inquiry_vpd_89(unsigned char *arr)
1279 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1280 return sizeof(vpd89_data);
1284 static unsigned char vpdb0_data[] = {
1285 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1286 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1287 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1288 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1291 /* Block limits VPD page (SBC-3) */
1292 static int inquiry_vpd_b0(unsigned char *arr)
1294 unsigned int gran;
1296 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1298 /* Optimal transfer length granularity */
1299 if (sdebug_opt_xferlen_exp != 0 &&
1300 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1301 gran = 1 << sdebug_opt_xferlen_exp;
1302 else
1303 gran = 1 << sdebug_physblk_exp;
1304 put_unaligned_be16(gran, arr + 2);
1306 /* Maximum Transfer Length */
1307 if (sdebug_store_sectors > 0x400)
1308 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1310 /* Optimal Transfer Length */
1311 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1313 if (sdebug_lbpu) {
1314 /* Maximum Unmap LBA Count */
1315 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1317 /* Maximum Unmap Block Descriptor Count */
1318 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1321 /* Unmap Granularity Alignment */
1322 if (sdebug_unmap_alignment) {
1323 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1324 arr[28] |= 0x80; /* UGAVALID */
1327 /* Optimal Unmap Granularity */
1328 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1330 /* Maximum WRITE SAME Length */
1331 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1333 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1335 return sizeof(vpdb0_data);
1338 /* Block device characteristics VPD page (SBC-3) */
1339 static int inquiry_vpd_b1(unsigned char *arr)
1341 memset(arr, 0, 0x3c);
1342 arr[0] = 0;
1343 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1344 arr[2] = 0;
1345 arr[3] = 5; /* less than 1.8" */
1347 return 0x3c;
1350 /* Logical block provisioning VPD page (SBC-4) */
1351 static int inquiry_vpd_b2(unsigned char *arr)
1353 memset(arr, 0, 0x4);
1354 arr[0] = 0; /* threshold exponent */
1355 if (sdebug_lbpu)
1356 arr[1] = 1 << 7;
1357 if (sdebug_lbpws)
1358 arr[1] |= 1 << 6;
1359 if (sdebug_lbpws10)
1360 arr[1] |= 1 << 5;
1361 if (sdebug_lbprz && scsi_debug_lbp())
1362 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1363 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1364 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1365 /* threshold_percentage=0 */
1366 return 0x4;
1369 #define SDEBUG_LONG_INQ_SZ 96
1370 #define SDEBUG_MAX_INQ_ARR_SZ 584
1372 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1374 unsigned char pq_pdt;
1375 unsigned char * arr;
1376 unsigned char *cmd = scp->cmnd;
1377 int alloc_len, n, ret;
1378 bool have_wlun, is_disk;
1380 alloc_len = get_unaligned_be16(cmd + 3);
1381 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1382 if (! arr)
1383 return DID_REQUEUE << 16;
1384 is_disk = (sdebug_ptype == TYPE_DISK);
1385 have_wlun = scsi_is_wlun(scp->device->lun);
1386 if (have_wlun)
1387 pq_pdt = TYPE_WLUN; /* present, wlun */
1388 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1389 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1390 else
1391 pq_pdt = (sdebug_ptype & 0x1f);
1392 arr[0] = pq_pdt;
1393 if (0x2 & cmd[1]) { /* CMDDT bit set */
1394 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1395 kfree(arr);
1396 return check_condition_result;
1397 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1398 int lu_id_num, port_group_id, target_dev_id, len;
1399 char lu_id_str[6];
1400 int host_no = devip->sdbg_host->shost->host_no;
1402 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1403 (devip->channel & 0x7f);
1404 if (sdebug_vpd_use_hostno == 0)
1405 host_no = 0;
1406 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1407 (devip->target * 1000) + devip->lun);
1408 target_dev_id = ((host_no + 1) * 2000) +
1409 (devip->target * 1000) - 3;
1410 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1411 if (0 == cmd[2]) { /* supported vital product data pages */
1412 arr[1] = cmd[2]; /*sanity */
1413 n = 4;
1414 arr[n++] = 0x0; /* this page */
1415 arr[n++] = 0x80; /* unit serial number */
1416 arr[n++] = 0x83; /* device identification */
1417 arr[n++] = 0x84; /* software interface ident. */
1418 arr[n++] = 0x85; /* management network addresses */
1419 arr[n++] = 0x86; /* extended inquiry */
1420 arr[n++] = 0x87; /* mode page policy */
1421 arr[n++] = 0x88; /* SCSI ports */
1422 if (is_disk) { /* SBC only */
1423 arr[n++] = 0x89; /* ATA information */
1424 arr[n++] = 0xb0; /* Block limits */
1425 arr[n++] = 0xb1; /* Block characteristics */
1426 arr[n++] = 0xb2; /* Logical Block Prov */
1428 arr[3] = n - 4; /* number of supported VPD pages */
1429 } else if (0x80 == cmd[2]) { /* unit serial number */
1430 arr[1] = cmd[2]; /*sanity */
1431 arr[3] = len;
1432 memcpy(&arr[4], lu_id_str, len);
1433 } else if (0x83 == cmd[2]) { /* device identification */
1434 arr[1] = cmd[2]; /*sanity */
1435 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1436 target_dev_id, lu_id_num,
1437 lu_id_str, len,
1438 &devip->lu_name);
1439 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1440 arr[1] = cmd[2]; /*sanity */
1441 arr[3] = inquiry_vpd_84(&arr[4]);
1442 } else if (0x85 == cmd[2]) { /* Management network addresses */
1443 arr[1] = cmd[2]; /*sanity */
1444 arr[3] = inquiry_vpd_85(&arr[4]);
1445 } else if (0x86 == cmd[2]) { /* extended inquiry */
1446 arr[1] = cmd[2]; /*sanity */
1447 arr[3] = 0x3c; /* number of following entries */
1448 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1449 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1450 else if (have_dif_prot)
1451 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1452 else
1453 arr[4] = 0x0; /* no protection stuff */
1454 arr[5] = 0x7; /* head of q, ordered + simple q's */
1455 } else if (0x87 == cmd[2]) { /* mode page policy */
1456 arr[1] = cmd[2]; /*sanity */
1457 arr[3] = 0x8; /* number of following entries */
1458 arr[4] = 0x2; /* disconnect-reconnect mp */
1459 arr[6] = 0x80; /* mlus, shared */
1460 arr[8] = 0x18; /* protocol specific lu */
1461 arr[10] = 0x82; /* mlus, per initiator port */
1462 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1463 arr[1] = cmd[2]; /*sanity */
1464 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1465 } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
1466 arr[1] = cmd[2]; /*sanity */
1467 n = inquiry_vpd_89(&arr[4]);
1468 put_unaligned_be16(n, arr + 2);
1469 } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
1470 arr[1] = cmd[2]; /*sanity */
1471 arr[3] = inquiry_vpd_b0(&arr[4]);
1472 } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
1473 arr[1] = cmd[2]; /*sanity */
1474 arr[3] = inquiry_vpd_b1(&arr[4]);
1475 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1476 arr[1] = cmd[2]; /*sanity */
1477 arr[3] = inquiry_vpd_b2(&arr[4]);
1478 } else {
1479 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1480 kfree(arr);
1481 return check_condition_result;
1483 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1484 ret = fill_from_dev_buffer(scp, arr,
1485 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1486 kfree(arr);
1487 return ret;
1489 /* drops through here for a standard inquiry */
1490 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1491 arr[2] = sdebug_scsi_level;
1492 arr[3] = 2; /* response_data_format==2 */
1493 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1494 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1495 if (sdebug_vpd_use_hostno == 0)
1496 arr[5] |= 0x10; /* claim: implicit TPGS */
1497 arr[6] = 0x10; /* claim: MultiP */
1498 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1499 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1500 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1501 memcpy(&arr[16], sdebug_inq_product_id, 16);
1502 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1503 /* Use Vendor Specific area to place driver date in ASCII hex */
1504 memcpy(&arr[36], sdebug_version_date, 8);
1505 /* version descriptors (2 bytes each) follow */
1506 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1507 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1508 n = 62;
1509 if (is_disk) { /* SBC-4 no version claimed */
1510 put_unaligned_be16(0x600, arr + n);
1511 n += 2;
1512 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1513 put_unaligned_be16(0x525, arr + n);
1514 n += 2;
1516 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1517 ret = fill_from_dev_buffer(scp, arr,
1518 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1519 kfree(arr);
1520 return ret;
1523 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1524 0, 0, 0x0, 0x0};
1526 static int resp_requests(struct scsi_cmnd * scp,
1527 struct sdebug_dev_info * devip)
1529 unsigned char * sbuff;
1530 unsigned char *cmd = scp->cmnd;
1531 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1532 bool dsense;
1533 int len = 18;
1535 memset(arr, 0, sizeof(arr));
1536 dsense = !!(cmd[1] & 1);
1537 sbuff = scp->sense_buffer;
1538 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1539 if (dsense) {
1540 arr[0] = 0x72;
1541 arr[1] = 0x0; /* NO_SENSE in sense_key */
1542 arr[2] = THRESHOLD_EXCEEDED;
1543 arr[3] = 0xff; /* TEST set and MRIE==6 */
1544 len = 8;
1545 } else {
1546 arr[0] = 0x70;
1547 arr[2] = 0x0; /* NO_SENSE in sense_key */
1548 arr[7] = 0xa; /* 18 byte sense buffer */
1549 arr[12] = THRESHOLD_EXCEEDED;
1550 arr[13] = 0xff; /* TEST set and MRIE==6 */
1552 } else {
1553 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1554 if (arr[0] >= 0x70 && dsense == sdebug_dsense)
1555 ; /* have sense and formats match */
1556 else if (arr[0] <= 0x70) {
1557 if (dsense) {
1558 memset(arr, 0, 8);
1559 arr[0] = 0x72;
1560 len = 8;
1561 } else {
1562 memset(arr, 0, 18);
1563 arr[0] = 0x70;
1564 arr[7] = 0xa;
1566 } else if (dsense) {
1567 memset(arr, 0, 8);
1568 arr[0] = 0x72;
1569 arr[1] = sbuff[2]; /* sense key */
1570 arr[2] = sbuff[12]; /* asc */
1571 arr[3] = sbuff[13]; /* ascq */
1572 len = 8;
1573 } else {
1574 memset(arr, 0, 18);
1575 arr[0] = 0x70;
1576 arr[2] = sbuff[1];
1577 arr[7] = 0xa;
1578 arr[12] = sbuff[1];
1579 arr[13] = sbuff[3];
1583 mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1584 return fill_from_dev_buffer(scp, arr, len);
1587 static int resp_start_stop(struct scsi_cmnd * scp,
1588 struct sdebug_dev_info * devip)
1590 unsigned char *cmd = scp->cmnd;
1591 int power_cond, stop;
1593 power_cond = (cmd[4] & 0xf0) >> 4;
1594 if (power_cond) {
1595 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1596 return check_condition_result;
1598 stop = !(cmd[4] & 1);
1599 atomic_xchg(&devip->stopped, stop);
1600 return 0;
1603 static sector_t get_sdebug_capacity(void)
1605 static const unsigned int gibibyte = 1073741824;
1607 if (sdebug_virtual_gb > 0)
1608 return (sector_t)sdebug_virtual_gb *
1609 (gibibyte / sdebug_sector_size);
1610 else
1611 return sdebug_store_sectors;
1614 #define SDEBUG_READCAP_ARR_SZ 8
1615 static int resp_readcap(struct scsi_cmnd * scp,
1616 struct sdebug_dev_info * devip)
1618 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1619 unsigned int capac;
1621 /* following just in case virtual_gb changed */
1622 sdebug_capacity = get_sdebug_capacity();
1623 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1624 if (sdebug_capacity < 0xffffffff) {
1625 capac = (unsigned int)sdebug_capacity - 1;
1626 put_unaligned_be32(capac, arr + 0);
1627 } else
1628 put_unaligned_be32(0xffffffff, arr + 0);
1629 put_unaligned_be16(sdebug_sector_size, arr + 6);
1630 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1633 #define SDEBUG_READCAP16_ARR_SZ 32
1634 static int resp_readcap16(struct scsi_cmnd * scp,
1635 struct sdebug_dev_info * devip)
1637 unsigned char *cmd = scp->cmnd;
1638 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1639 int alloc_len;
1641 alloc_len = get_unaligned_be32(cmd + 10);
1642 /* following just in case virtual_gb changed */
1643 sdebug_capacity = get_sdebug_capacity();
1644 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1645 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1646 put_unaligned_be32(sdebug_sector_size, arr + 8);
1647 arr[13] = sdebug_physblk_exp & 0xf;
1648 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1650 if (scsi_debug_lbp()) {
1651 arr[14] |= 0x80; /* LBPME */
1652 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1653 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1654 * in the wider field maps to 0 in this field.
1656 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1657 arr[14] |= 0x40;
1660 arr[15] = sdebug_lowest_aligned & 0xff;
1662 if (have_dif_prot) {
1663 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1664 arr[12] |= 1; /* PROT_EN */
1667 return fill_from_dev_buffer(scp, arr,
1668 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1671 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1673 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1674 struct sdebug_dev_info * devip)
1676 unsigned char *cmd = scp->cmnd;
1677 unsigned char * arr;
1678 int host_no = devip->sdbg_host->shost->host_no;
1679 int n, ret, alen, rlen;
1680 int port_group_a, port_group_b, port_a, port_b;
1682 alen = get_unaligned_be32(cmd + 6);
1683 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1684 if (! arr)
1685 return DID_REQUEUE << 16;
1687 * EVPD page 0x88 states we have two ports, one
1688 * real and a fake port with no device connected.
1689 * So we create two port groups with one port each
1690 * and set the group with port B to unavailable.
1692 port_a = 0x1; /* relative port A */
1693 port_b = 0x2; /* relative port B */
1694 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1695 (devip->channel & 0x7f);
1696 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1697 (devip->channel & 0x7f) + 0x80;
1700 * The asymmetric access state is cycled according to the host_id.
1702 n = 4;
1703 if (sdebug_vpd_use_hostno == 0) {
1704 arr[n++] = host_no % 3; /* Asymm access state */
1705 arr[n++] = 0x0F; /* claim: all states are supported */
1706 } else {
1707 arr[n++] = 0x0; /* Active/Optimized path */
1708 arr[n++] = 0x01; /* only support active/optimized paths */
1710 put_unaligned_be16(port_group_a, arr + n);
1711 n += 2;
1712 arr[n++] = 0; /* Reserved */
1713 arr[n++] = 0; /* Status code */
1714 arr[n++] = 0; /* Vendor unique */
1715 arr[n++] = 0x1; /* One port per group */
1716 arr[n++] = 0; /* Reserved */
1717 arr[n++] = 0; /* Reserved */
1718 put_unaligned_be16(port_a, arr + n);
1719 n += 2;
1720 arr[n++] = 3; /* Port unavailable */
1721 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1722 put_unaligned_be16(port_group_b, arr + n);
1723 n += 2;
1724 arr[n++] = 0; /* Reserved */
1725 arr[n++] = 0; /* Status code */
1726 arr[n++] = 0; /* Vendor unique */
1727 arr[n++] = 0x1; /* One port per group */
1728 arr[n++] = 0; /* Reserved */
1729 arr[n++] = 0; /* Reserved */
1730 put_unaligned_be16(port_b, arr + n);
1731 n += 2;
1733 rlen = n - 4;
1734 put_unaligned_be32(rlen, arr + 0);
1737 * Return the smallest value of either
1738 * - The allocated length
1739 * - The constructed command length
1740 * - The maximum array size
1742 rlen = min(alen,n);
1743 ret = fill_from_dev_buffer(scp, arr,
1744 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1745 kfree(arr);
1746 return ret;
1749 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1750 struct sdebug_dev_info *devip)
1752 bool rctd;
1753 u8 reporting_opts, req_opcode, sdeb_i, supp;
1754 u16 req_sa, u;
1755 u32 alloc_len, a_len;
1756 int k, offset, len, errsts, count, bump, na;
1757 const struct opcode_info_t *oip;
1758 const struct opcode_info_t *r_oip;
1759 u8 *arr;
1760 u8 *cmd = scp->cmnd;
1762 rctd = !!(cmd[2] & 0x80);
1763 reporting_opts = cmd[2] & 0x7;
1764 req_opcode = cmd[3];
1765 req_sa = get_unaligned_be16(cmd + 4);
1766 alloc_len = get_unaligned_be32(cmd + 6);
1767 if (alloc_len < 4 || alloc_len > 0xffff) {
1768 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1769 return check_condition_result;
1771 if (alloc_len > 8192)
1772 a_len = 8192;
1773 else
1774 a_len = alloc_len;
1775 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1776 if (NULL == arr) {
1777 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1778 INSUFF_RES_ASCQ);
1779 return check_condition_result;
1781 switch (reporting_opts) {
1782 case 0: /* all commands */
1783 /* count number of commands */
1784 for (count = 0, oip = opcode_info_arr;
1785 oip->num_attached != 0xff; ++oip) {
1786 if (F_INV_OP & oip->flags)
1787 continue;
1788 count += (oip->num_attached + 1);
1790 bump = rctd ? 20 : 8;
1791 put_unaligned_be32(count * bump, arr);
1792 for (offset = 4, oip = opcode_info_arr;
1793 oip->num_attached != 0xff && offset < a_len; ++oip) {
1794 if (F_INV_OP & oip->flags)
1795 continue;
1796 na = oip->num_attached;
1797 arr[offset] = oip->opcode;
1798 put_unaligned_be16(oip->sa, arr + offset + 2);
1799 if (rctd)
1800 arr[offset + 5] |= 0x2;
1801 if (FF_SA & oip->flags)
1802 arr[offset + 5] |= 0x1;
1803 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1804 if (rctd)
1805 put_unaligned_be16(0xa, arr + offset + 8);
1806 r_oip = oip;
1807 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1808 if (F_INV_OP & oip->flags)
1809 continue;
1810 offset += bump;
1811 arr[offset] = oip->opcode;
1812 put_unaligned_be16(oip->sa, arr + offset + 2);
1813 if (rctd)
1814 arr[offset + 5] |= 0x2;
1815 if (FF_SA & oip->flags)
1816 arr[offset + 5] |= 0x1;
1817 put_unaligned_be16(oip->len_mask[0],
1818 arr + offset + 6);
1819 if (rctd)
1820 put_unaligned_be16(0xa,
1821 arr + offset + 8);
1823 oip = r_oip;
1824 offset += bump;
1826 break;
1827 case 1: /* one command: opcode only */
1828 case 2: /* one command: opcode plus service action */
1829 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1830 sdeb_i = opcode_ind_arr[req_opcode];
1831 oip = &opcode_info_arr[sdeb_i];
1832 if (F_INV_OP & oip->flags) {
1833 supp = 1;
1834 offset = 4;
1835 } else {
1836 if (1 == reporting_opts) {
1837 if (FF_SA & oip->flags) {
1838 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1839 2, 2);
1840 kfree(arr);
1841 return check_condition_result;
1843 req_sa = 0;
1844 } else if (2 == reporting_opts &&
1845 0 == (FF_SA & oip->flags)) {
1846 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1847 kfree(arr); /* point at requested sa */
1848 return check_condition_result;
1850 if (0 == (FF_SA & oip->flags) &&
1851 req_opcode == oip->opcode)
1852 supp = 3;
1853 else if (0 == (FF_SA & oip->flags)) {
1854 na = oip->num_attached;
1855 for (k = 0, oip = oip->arrp; k < na;
1856 ++k, ++oip) {
1857 if (req_opcode == oip->opcode)
1858 break;
1860 supp = (k >= na) ? 1 : 3;
1861 } else if (req_sa != oip->sa) {
1862 na = oip->num_attached;
1863 for (k = 0, oip = oip->arrp; k < na;
1864 ++k, ++oip) {
1865 if (req_sa == oip->sa)
1866 break;
1868 supp = (k >= na) ? 1 : 3;
1869 } else
1870 supp = 3;
1871 if (3 == supp) {
1872 u = oip->len_mask[0];
1873 put_unaligned_be16(u, arr + 2);
1874 arr[4] = oip->opcode;
1875 for (k = 1; k < u; ++k)
1876 arr[4 + k] = (k < 16) ?
1877 oip->len_mask[k] : 0xff;
1878 offset = 4 + u;
1879 } else
1880 offset = 4;
1882 arr[1] = (rctd ? 0x80 : 0) | supp;
1883 if (rctd) {
1884 put_unaligned_be16(0xa, arr + offset);
1885 offset += 12;
1887 break;
1888 default:
1889 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1890 kfree(arr);
1891 return check_condition_result;
1893 offset = (offset < a_len) ? offset : a_len;
1894 len = (offset < alloc_len) ? offset : alloc_len;
1895 errsts = fill_from_dev_buffer(scp, arr, len);
1896 kfree(arr);
1897 return errsts;
1900 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
1901 struct sdebug_dev_info *devip)
1903 bool repd;
1904 u32 alloc_len, len;
1905 u8 arr[16];
1906 u8 *cmd = scp->cmnd;
1908 memset(arr, 0, sizeof(arr));
1909 repd = !!(cmd[2] & 0x80);
1910 alloc_len = get_unaligned_be32(cmd + 6);
1911 if (alloc_len < 4) {
1912 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1913 return check_condition_result;
1915 arr[0] = 0xc8; /* ATS | ATSS | LURS */
1916 arr[1] = 0x1; /* ITNRS */
1917 if (repd) {
1918 arr[3] = 0xc;
1919 len = 16;
1920 } else
1921 len = 4;
1923 len = (len < alloc_len) ? len : alloc_len;
1924 return fill_from_dev_buffer(scp, arr, len);
1927 /* <<Following mode page info copied from ST318451LW>> */
1929 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1930 { /* Read-Write Error Recovery page for mode_sense */
1931 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1932 5, 0, 0xff, 0xff};
1934 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1935 if (1 == pcontrol)
1936 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1937 return sizeof(err_recov_pg);
1940 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1941 { /* Disconnect-Reconnect page for mode_sense */
1942 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1943 0, 0, 0, 0, 0, 0, 0, 0};
1945 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1946 if (1 == pcontrol)
1947 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1948 return sizeof(disconnect_pg);
1951 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1952 { /* Format device page for mode_sense */
1953 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1954 0, 0, 0, 0, 0, 0, 0, 0,
1955 0, 0, 0, 0, 0x40, 0, 0, 0};
1957 memcpy(p, format_pg, sizeof(format_pg));
1958 put_unaligned_be16(sdebug_sectors_per, p + 10);
1959 put_unaligned_be16(sdebug_sector_size, p + 12);
1960 if (sdebug_removable)
1961 p[20] |= 0x20; /* should agree with INQUIRY */
1962 if (1 == pcontrol)
1963 memset(p + 2, 0, sizeof(format_pg) - 2);
1964 return sizeof(format_pg);
1967 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1968 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
1969 0, 0, 0, 0};
1971 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1972 { /* Caching page for mode_sense */
1973 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1974 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1975 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1976 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1978 if (SDEBUG_OPT_N_WCE & sdebug_opts)
1979 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
1980 memcpy(p, caching_pg, sizeof(caching_pg));
1981 if (1 == pcontrol)
1982 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1983 else if (2 == pcontrol)
1984 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1985 return sizeof(caching_pg);
1988 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1989 0, 0, 0x2, 0x4b};
1991 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1992 { /* Control mode page for mode_sense */
1993 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1994 0, 0, 0, 0};
1995 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1996 0, 0, 0x2, 0x4b};
1998 if (sdebug_dsense)
1999 ctrl_m_pg[2] |= 0x4;
2000 else
2001 ctrl_m_pg[2] &= ~0x4;
2003 if (sdebug_ato)
2004 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2006 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2007 if (1 == pcontrol)
2008 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2009 else if (2 == pcontrol)
2010 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2011 return sizeof(ctrl_m_pg);
2015 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
2016 { /* Informational Exceptions control mode page for mode_sense */
2017 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2018 0, 0, 0x0, 0x0};
2019 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2020 0, 0, 0x0, 0x0};
2022 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2023 if (1 == pcontrol)
2024 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2025 else if (2 == pcontrol)
2026 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2027 return sizeof(iec_m_pg);
2030 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
2031 { /* SAS SSP mode page - short format for mode_sense */
2032 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2033 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2035 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2036 if (1 == pcontrol)
2037 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2038 return sizeof(sas_sf_m_pg);
2042 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
2043 int target_dev_id)
2044 { /* SAS phy control and discover mode page for mode_sense */
2045 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2046 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2047 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2048 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2049 0x2, 0, 0, 0, 0, 0, 0, 0,
2050 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2051 0, 0, 0, 0, 0, 0, 0, 0,
2052 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2053 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2054 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2055 0x3, 0, 0, 0, 0, 0, 0, 0,
2056 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2057 0, 0, 0, 0, 0, 0, 0, 0,
2059 int port_a, port_b;
2061 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2062 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2063 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2064 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2065 port_a = target_dev_id + 1;
2066 port_b = port_a + 1;
2067 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2068 put_unaligned_be32(port_a, p + 20);
2069 put_unaligned_be32(port_b, p + 48 + 20);
2070 if (1 == pcontrol)
2071 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2072 return sizeof(sas_pcd_m_pg);
2075 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
2076 { /* SAS SSP shared protocol specific port mode subpage */
2077 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2078 0, 0, 0, 0, 0, 0, 0, 0,
2081 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2082 if (1 == pcontrol)
2083 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2084 return sizeof(sas_sha_m_pg);
2087 #define SDEBUG_MAX_MSENSE_SZ 256
2089 static int resp_mode_sense(struct scsi_cmnd *scp,
2090 struct sdebug_dev_info *devip)
2092 int pcontrol, pcode, subpcode, bd_len;
2093 unsigned char dev_spec;
2094 int alloc_len, offset, len, target_dev_id;
2095 int target = scp->device->id;
2096 unsigned char * ap;
2097 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2098 unsigned char *cmd = scp->cmnd;
2099 bool dbd, llbaa, msense_6, is_disk, bad_pcode;
2101 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2102 pcontrol = (cmd[2] & 0xc0) >> 6;
2103 pcode = cmd[2] & 0x3f;
2104 subpcode = cmd[3];
2105 msense_6 = (MODE_SENSE == cmd[0]);
2106 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2107 is_disk = (sdebug_ptype == TYPE_DISK);
2108 if (is_disk && !dbd)
2109 bd_len = llbaa ? 16 : 8;
2110 else
2111 bd_len = 0;
2112 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2113 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2114 if (0x3 == pcontrol) { /* Saving values not supported */
2115 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2116 return check_condition_result;
2118 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2119 (devip->target * 1000) - 3;
2120 /* for disks set DPOFUA bit and clear write protect (WP) bit */
2121 if (is_disk)
2122 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2123 else
2124 dev_spec = 0x0;
2125 if (msense_6) {
2126 arr[2] = dev_spec;
2127 arr[3] = bd_len;
2128 offset = 4;
2129 } else {
2130 arr[3] = dev_spec;
2131 if (16 == bd_len)
2132 arr[4] = 0x1; /* set LONGLBA bit */
2133 arr[7] = bd_len; /* assume 255 or less */
2134 offset = 8;
2136 ap = arr + offset;
2137 if ((bd_len > 0) && (!sdebug_capacity))
2138 sdebug_capacity = get_sdebug_capacity();
2140 if (8 == bd_len) {
2141 if (sdebug_capacity > 0xfffffffe)
2142 put_unaligned_be32(0xffffffff, ap + 0);
2143 else
2144 put_unaligned_be32(sdebug_capacity, ap + 0);
2145 put_unaligned_be16(sdebug_sector_size, ap + 6);
2146 offset += bd_len;
2147 ap = arr + offset;
2148 } else if (16 == bd_len) {
2149 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2150 put_unaligned_be32(sdebug_sector_size, ap + 12);
2151 offset += bd_len;
2152 ap = arr + offset;
2155 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2156 /* TODO: Control Extension page */
2157 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2158 return check_condition_result;
2160 bad_pcode = false;
2162 switch (pcode) {
2163 case 0x1: /* Read-Write error recovery page, direct access */
2164 len = resp_err_recov_pg(ap, pcontrol, target);
2165 offset += len;
2166 break;
2167 case 0x2: /* Disconnect-Reconnect page, all devices */
2168 len = resp_disconnect_pg(ap, pcontrol, target);
2169 offset += len;
2170 break;
2171 case 0x3: /* Format device page, direct access */
2172 if (is_disk) {
2173 len = resp_format_pg(ap, pcontrol, target);
2174 offset += len;
2175 } else
2176 bad_pcode = true;
2177 break;
2178 case 0x8: /* Caching page, direct access */
2179 if (is_disk) {
2180 len = resp_caching_pg(ap, pcontrol, target);
2181 offset += len;
2182 } else
2183 bad_pcode = true;
2184 break;
2185 case 0xa: /* Control Mode page, all devices */
2186 len = resp_ctrl_m_pg(ap, pcontrol, target);
2187 offset += len;
2188 break;
2189 case 0x19: /* if spc==1 then sas phy, control+discover */
2190 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2191 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2192 return check_condition_result;
2194 len = 0;
2195 if ((0x0 == subpcode) || (0xff == subpcode))
2196 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2197 if ((0x1 == subpcode) || (0xff == subpcode))
2198 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2199 target_dev_id);
2200 if ((0x2 == subpcode) || (0xff == subpcode))
2201 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2202 offset += len;
2203 break;
2204 case 0x1c: /* Informational Exceptions Mode page, all devices */
2205 len = resp_iec_m_pg(ap, pcontrol, target);
2206 offset += len;
2207 break;
2208 case 0x3f: /* Read all Mode pages */
2209 if ((0 == subpcode) || (0xff == subpcode)) {
2210 len = resp_err_recov_pg(ap, pcontrol, target);
2211 len += resp_disconnect_pg(ap + len, pcontrol, target);
2212 if (is_disk) {
2213 len += resp_format_pg(ap + len, pcontrol,
2214 target);
2215 len += resp_caching_pg(ap + len, pcontrol,
2216 target);
2218 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2219 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2220 if (0xff == subpcode) {
2221 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2222 target, target_dev_id);
2223 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2225 len += resp_iec_m_pg(ap + len, pcontrol, target);
2226 offset += len;
2227 } else {
2228 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2229 return check_condition_result;
2231 break;
2232 default:
2233 bad_pcode = true;
2234 break;
2236 if (bad_pcode) {
2237 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2238 return check_condition_result;
2240 if (msense_6)
2241 arr[0] = offset - 1;
2242 else
2243 put_unaligned_be16((offset - 2), arr + 0);
2244 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2247 #define SDEBUG_MAX_MSELECT_SZ 512
2249 static int resp_mode_select(struct scsi_cmnd *scp,
2250 struct sdebug_dev_info *devip)
2252 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2253 int param_len, res, mpage;
2254 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2255 unsigned char *cmd = scp->cmnd;
2256 int mselect6 = (MODE_SELECT == cmd[0]);
2258 memset(arr, 0, sizeof(arr));
2259 pf = cmd[1] & 0x10;
2260 sp = cmd[1] & 0x1;
2261 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2262 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2263 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2264 return check_condition_result;
2266 res = fetch_to_dev_buffer(scp, arr, param_len);
2267 if (-1 == res)
2268 return DID_ERROR << 16;
2269 else if (sdebug_verbose && (res < param_len))
2270 sdev_printk(KERN_INFO, scp->device,
2271 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2272 __func__, param_len, res);
2273 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2274 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2275 if (md_len > 2) {
2276 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2277 return check_condition_result;
2279 off = bd_len + (mselect6 ? 4 : 8);
2280 mpage = arr[off] & 0x3f;
2281 ps = !!(arr[off] & 0x80);
2282 if (ps) {
2283 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2284 return check_condition_result;
2286 spf = !!(arr[off] & 0x40);
2287 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2288 (arr[off + 1] + 2);
2289 if ((pg_len + off) > param_len) {
2290 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2291 PARAMETER_LIST_LENGTH_ERR, 0);
2292 return check_condition_result;
2294 switch (mpage) {
2295 case 0x8: /* Caching Mode page */
2296 if (caching_pg[1] == arr[off + 1]) {
2297 memcpy(caching_pg + 2, arr + off + 2,
2298 sizeof(caching_pg) - 2);
2299 goto set_mode_changed_ua;
2301 break;
2302 case 0xa: /* Control Mode page */
2303 if (ctrl_m_pg[1] == arr[off + 1]) {
2304 memcpy(ctrl_m_pg + 2, arr + off + 2,
2305 sizeof(ctrl_m_pg) - 2);
2306 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2307 goto set_mode_changed_ua;
2309 break;
2310 case 0x1c: /* Informational Exceptions Mode page */
2311 if (iec_m_pg[1] == arr[off + 1]) {
2312 memcpy(iec_m_pg + 2, arr + off + 2,
2313 sizeof(iec_m_pg) - 2);
2314 goto set_mode_changed_ua;
2316 break;
2317 default:
2318 break;
2320 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2321 return check_condition_result;
2322 set_mode_changed_ua:
2323 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2324 return 0;
2327 static int resp_temp_l_pg(unsigned char * arr)
2329 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2330 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2333 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2334 return sizeof(temp_l_pg);
2337 static int resp_ie_l_pg(unsigned char * arr)
2339 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2342 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2343 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2344 arr[4] = THRESHOLD_EXCEEDED;
2345 arr[5] = 0xff;
2347 return sizeof(ie_l_pg);
2350 #define SDEBUG_MAX_LSENSE_SZ 512
2352 static int resp_log_sense(struct scsi_cmnd *scp,
2353 struct sdebug_dev_info *devip)
2355 int ppc, sp, pcode, subpcode, alloc_len, len, n;
2356 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2357 unsigned char *cmd = scp->cmnd;
2359 memset(arr, 0, sizeof(arr));
2360 ppc = cmd[1] & 0x2;
2361 sp = cmd[1] & 0x1;
2362 if (ppc || sp) {
2363 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2364 return check_condition_result;
2366 pcode = cmd[2] & 0x3f;
2367 subpcode = cmd[3] & 0xff;
2368 alloc_len = get_unaligned_be16(cmd + 7);
2369 arr[0] = pcode;
2370 if (0 == subpcode) {
2371 switch (pcode) {
2372 case 0x0: /* Supported log pages log page */
2373 n = 4;
2374 arr[n++] = 0x0; /* this page */
2375 arr[n++] = 0xd; /* Temperature */
2376 arr[n++] = 0x2f; /* Informational exceptions */
2377 arr[3] = n - 4;
2378 break;
2379 case 0xd: /* Temperature log page */
2380 arr[3] = resp_temp_l_pg(arr + 4);
2381 break;
2382 case 0x2f: /* Informational exceptions log page */
2383 arr[3] = resp_ie_l_pg(arr + 4);
2384 break;
2385 default:
2386 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2387 return check_condition_result;
2389 } else if (0xff == subpcode) {
2390 arr[0] |= 0x40;
2391 arr[1] = subpcode;
2392 switch (pcode) {
2393 case 0x0: /* Supported log pages and subpages log page */
2394 n = 4;
2395 arr[n++] = 0x0;
2396 arr[n++] = 0x0; /* 0,0 page */
2397 arr[n++] = 0x0;
2398 arr[n++] = 0xff; /* this page */
2399 arr[n++] = 0xd;
2400 arr[n++] = 0x0; /* Temperature */
2401 arr[n++] = 0x2f;
2402 arr[n++] = 0x0; /* Informational exceptions */
2403 arr[3] = n - 4;
2404 break;
2405 case 0xd: /* Temperature subpages */
2406 n = 4;
2407 arr[n++] = 0xd;
2408 arr[n++] = 0x0; /* Temperature */
2409 arr[3] = n - 4;
2410 break;
2411 case 0x2f: /* Informational exceptions subpages */
2412 n = 4;
2413 arr[n++] = 0x2f;
2414 arr[n++] = 0x0; /* Informational exceptions */
2415 arr[3] = n - 4;
2416 break;
2417 default:
2418 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2419 return check_condition_result;
2421 } else {
2422 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2423 return check_condition_result;
2425 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
2426 return fill_from_dev_buffer(scp, arr,
2427 min(len, SDEBUG_MAX_INQ_ARR_SZ));
2430 static int check_device_access_params(struct scsi_cmnd *scp,
2431 unsigned long long lba, unsigned int num)
2433 if (lba + num > sdebug_capacity) {
2434 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2435 return check_condition_result;
2437 /* transfer length excessive (tie in to block limits VPD page) */
2438 if (num > sdebug_store_sectors) {
2439 /* needs work to find which cdb byte 'num' comes from */
2440 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2441 return check_condition_result;
2443 return 0;
2446 /* Returns number of bytes copied or -1 if error. */
2447 static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2448 u32 num, bool do_write)
2450 int ret;
2451 u64 block, rest = 0;
2452 struct scsi_data_buffer *sdb;
2453 enum dma_data_direction dir;
2455 if (do_write) {
2456 sdb = scsi_out(scmd);
2457 dir = DMA_TO_DEVICE;
2458 } else {
2459 sdb = scsi_in(scmd);
2460 dir = DMA_FROM_DEVICE;
2463 if (!sdb->length)
2464 return 0;
2465 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2466 return -1;
2468 block = do_div(lba, sdebug_store_sectors);
2469 if (block + num > sdebug_store_sectors)
2470 rest = block + num - sdebug_store_sectors;
2472 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2473 fake_storep + (block * sdebug_sector_size),
2474 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2475 if (ret != (num - rest) * sdebug_sector_size)
2476 return ret;
2478 if (rest) {
2479 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2480 fake_storep, rest * sdebug_sector_size,
2481 sg_skip + ((num - rest) * sdebug_sector_size),
2482 do_write);
2485 return ret;
2488 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2489 * arr into fake_store(lba,num) and return true. If comparison fails then
2490 * return false. */
2491 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2493 bool res;
2494 u64 block, rest = 0;
2495 u32 store_blks = sdebug_store_sectors;
2496 u32 lb_size = sdebug_sector_size;
2498 block = do_div(lba, store_blks);
2499 if (block + num > store_blks)
2500 rest = block + num - store_blks;
2502 res = !memcmp(fake_storep + (block * lb_size), arr,
2503 (num - rest) * lb_size);
2504 if (!res)
2505 return res;
2506 if (rest)
2507 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2508 rest * lb_size);
2509 if (!res)
2510 return res;
2511 arr += num * lb_size;
2512 memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2513 if (rest)
2514 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2515 rest * lb_size);
2516 return res;
2519 static __be16 dif_compute_csum(const void *buf, int len)
2521 __be16 csum;
2523 if (sdebug_guard)
2524 csum = (__force __be16)ip_compute_csum(buf, len);
2525 else
2526 csum = cpu_to_be16(crc_t10dif(buf, len));
2528 return csum;
2531 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2532 sector_t sector, u32 ei_lba)
2534 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
2536 if (sdt->guard_tag != csum) {
2537 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2538 (unsigned long)sector,
2539 be16_to_cpu(sdt->guard_tag),
2540 be16_to_cpu(csum));
2541 return 0x01;
2543 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2544 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2545 pr_err("REF check failed on sector %lu\n",
2546 (unsigned long)sector);
2547 return 0x03;
2549 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2550 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2551 pr_err("REF check failed on sector %lu\n",
2552 (unsigned long)sector);
2553 return 0x03;
2555 return 0;
2558 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2559 unsigned int sectors, bool read)
2561 size_t resid;
2562 void *paddr;
2563 const void *dif_store_end = dif_storep + sdebug_store_sectors;
2564 struct sg_mapping_iter miter;
2566 /* Bytes of protection data to copy into sgl */
2567 resid = sectors * sizeof(*dif_storep);
2569 sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2570 scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2571 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2573 while (sg_miter_next(&miter) && resid > 0) {
2574 size_t len = min(miter.length, resid);
2575 void *start = dif_store(sector);
2576 size_t rest = 0;
2578 if (dif_store_end < start + len)
2579 rest = start + len - dif_store_end;
2581 paddr = miter.addr;
2583 if (read)
2584 memcpy(paddr, start, len - rest);
2585 else
2586 memcpy(start, paddr, len - rest);
2588 if (rest) {
2589 if (read)
2590 memcpy(paddr + len - rest, dif_storep, rest);
2591 else
2592 memcpy(dif_storep, paddr + len - rest, rest);
2595 sector += len / sizeof(*dif_storep);
2596 resid -= len;
2598 sg_miter_stop(&miter);
2601 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2602 unsigned int sectors, u32 ei_lba)
2604 unsigned int i;
2605 struct t10_pi_tuple *sdt;
2606 sector_t sector;
2608 for (i = 0; i < sectors; i++, ei_lba++) {
2609 int ret;
2611 sector = start_sec + i;
2612 sdt = dif_store(sector);
2614 if (sdt->app_tag == cpu_to_be16(0xffff))
2615 continue;
2617 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2618 if (ret) {
2619 dif_errors++;
2620 return ret;
2624 dif_copy_prot(SCpnt, start_sec, sectors, true);
2625 dix_reads++;
2627 return 0;
2630 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2632 u8 *cmd = scp->cmnd;
2633 struct sdebug_queued_cmd *sqcp;
2634 u64 lba;
2635 u32 num;
2636 u32 ei_lba;
2637 unsigned long iflags;
2638 int ret;
2639 bool check_prot;
2641 switch (cmd[0]) {
2642 case READ_16:
2643 ei_lba = 0;
2644 lba = get_unaligned_be64(cmd + 2);
2645 num = get_unaligned_be32(cmd + 10);
2646 check_prot = true;
2647 break;
2648 case READ_10:
2649 ei_lba = 0;
2650 lba = get_unaligned_be32(cmd + 2);
2651 num = get_unaligned_be16(cmd + 7);
2652 check_prot = true;
2653 break;
2654 case READ_6:
2655 ei_lba = 0;
2656 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2657 (u32)(cmd[1] & 0x1f) << 16;
2658 num = (0 == cmd[4]) ? 256 : cmd[4];
2659 check_prot = true;
2660 break;
2661 case READ_12:
2662 ei_lba = 0;
2663 lba = get_unaligned_be32(cmd + 2);
2664 num = get_unaligned_be32(cmd + 6);
2665 check_prot = true;
2666 break;
2667 case XDWRITEREAD_10:
2668 ei_lba = 0;
2669 lba = get_unaligned_be32(cmd + 2);
2670 num = get_unaligned_be16(cmd + 7);
2671 check_prot = false;
2672 break;
2673 default: /* assume READ(32) */
2674 lba = get_unaligned_be64(cmd + 12);
2675 ei_lba = get_unaligned_be32(cmd + 20);
2676 num = get_unaligned_be32(cmd + 28);
2677 check_prot = false;
2678 break;
2680 if (unlikely(have_dif_prot && check_prot)) {
2681 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2682 (cmd[1] & 0xe0)) {
2683 mk_sense_invalid_opcode(scp);
2684 return check_condition_result;
2686 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2687 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2688 (cmd[1] & 0xe0) == 0)
2689 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2690 "to DIF device\n");
2692 if (unlikely(sdebug_any_injecting_opt)) {
2693 sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
2695 if (sqcp) {
2696 if (sqcp->inj_short)
2697 num /= 2;
2699 } else
2700 sqcp = NULL;
2702 /* inline check_device_access_params() */
2703 if (unlikely(lba + num > sdebug_capacity)) {
2704 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2705 return check_condition_result;
2707 /* transfer length excessive (tie in to block limits VPD page) */
2708 if (unlikely(num > sdebug_store_sectors)) {
2709 /* needs work to find which cdb byte 'num' comes from */
2710 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2711 return check_condition_result;
2714 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
2715 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2716 ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
2717 /* claim unrecoverable read error */
2718 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2719 /* set info field and valid bit for fixed descriptor */
2720 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2721 scp->sense_buffer[0] |= 0x80; /* Valid bit */
2722 ret = (lba < OPT_MEDIUM_ERR_ADDR)
2723 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2724 put_unaligned_be32(ret, scp->sense_buffer + 3);
2726 scsi_set_resid(scp, scsi_bufflen(scp));
2727 return check_condition_result;
2730 read_lock_irqsave(&atomic_rw, iflags);
2732 /* DIX + T10 DIF */
2733 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
2734 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2736 if (prot_ret) {
2737 read_unlock_irqrestore(&atomic_rw, iflags);
2738 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2739 return illegal_condition_result;
2743 ret = do_device_access(scp, 0, lba, num, false);
2744 read_unlock_irqrestore(&atomic_rw, iflags);
2745 if (unlikely(ret == -1))
2746 return DID_ERROR << 16;
2748 scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2750 if (unlikely(sqcp)) {
2751 if (sqcp->inj_recovered) {
2752 mk_sense_buffer(scp, RECOVERED_ERROR,
2753 THRESHOLD_EXCEEDED, 0);
2754 return check_condition_result;
2755 } else if (sqcp->inj_transport) {
2756 mk_sense_buffer(scp, ABORTED_COMMAND,
2757 TRANSPORT_PROBLEM, ACK_NAK_TO);
2758 return check_condition_result;
2759 } else if (sqcp->inj_dif) {
2760 /* Logical block guard check failed */
2761 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2762 return illegal_condition_result;
2763 } else if (sqcp->inj_dix) {
2764 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2765 return illegal_condition_result;
2768 return 0;
2771 static void dump_sector(unsigned char *buf, int len)
2773 int i, j, n;
2775 pr_err(">>> Sector Dump <<<\n");
2776 for (i = 0 ; i < len ; i += 16) {
2777 char b[128];
2779 for (j = 0, n = 0; j < 16; j++) {
2780 unsigned char c = buf[i+j];
2782 if (c >= 0x20 && c < 0x7e)
2783 n += scnprintf(b + n, sizeof(b) - n,
2784 " %c ", buf[i+j]);
2785 else
2786 n += scnprintf(b + n, sizeof(b) - n,
2787 "%02x ", buf[i+j]);
2789 pr_err("%04d: %s\n", i, b);
2793 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2794 unsigned int sectors, u32 ei_lba)
2796 int ret;
2797 struct t10_pi_tuple *sdt;
2798 void *daddr;
2799 sector_t sector = start_sec;
2800 int ppage_offset;
2801 int dpage_offset;
2802 struct sg_mapping_iter diter;
2803 struct sg_mapping_iter piter;
2805 BUG_ON(scsi_sg_count(SCpnt) == 0);
2806 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2808 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2809 scsi_prot_sg_count(SCpnt),
2810 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2811 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2812 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2814 /* For each protection page */
2815 while (sg_miter_next(&piter)) {
2816 dpage_offset = 0;
2817 if (WARN_ON(!sg_miter_next(&diter))) {
2818 ret = 0x01;
2819 goto out;
2822 for (ppage_offset = 0; ppage_offset < piter.length;
2823 ppage_offset += sizeof(struct t10_pi_tuple)) {
2824 /* If we're at the end of the current
2825 * data page advance to the next one
2827 if (dpage_offset >= diter.length) {
2828 if (WARN_ON(!sg_miter_next(&diter))) {
2829 ret = 0x01;
2830 goto out;
2832 dpage_offset = 0;
2835 sdt = piter.addr + ppage_offset;
2836 daddr = diter.addr + dpage_offset;
2838 ret = dif_verify(sdt, daddr, sector, ei_lba);
2839 if (ret) {
2840 dump_sector(daddr, sdebug_sector_size);
2841 goto out;
2844 sector++;
2845 ei_lba++;
2846 dpage_offset += sdebug_sector_size;
2848 diter.consumed = dpage_offset;
2849 sg_miter_stop(&diter);
2851 sg_miter_stop(&piter);
2853 dif_copy_prot(SCpnt, start_sec, sectors, false);
2854 dix_writes++;
2856 return 0;
2858 out:
2859 dif_errors++;
2860 sg_miter_stop(&diter);
2861 sg_miter_stop(&piter);
2862 return ret;
2865 static unsigned long lba_to_map_index(sector_t lba)
2867 if (sdebug_unmap_alignment)
2868 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
2869 sector_div(lba, sdebug_unmap_granularity);
2870 return lba;
2873 static sector_t map_index_to_lba(unsigned long index)
2875 sector_t lba = index * sdebug_unmap_granularity;
2877 if (sdebug_unmap_alignment)
2878 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
2879 return lba;
2882 static unsigned int map_state(sector_t lba, unsigned int *num)
2884 sector_t end;
2885 unsigned int mapped;
2886 unsigned long index;
2887 unsigned long next;
2889 index = lba_to_map_index(lba);
2890 mapped = test_bit(index, map_storep);
2892 if (mapped)
2893 next = find_next_zero_bit(map_storep, map_size, index);
2894 else
2895 next = find_next_bit(map_storep, map_size, index);
2897 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2898 *num = end - lba;
2899 return mapped;
2902 static void map_region(sector_t lba, unsigned int len)
2904 sector_t end = lba + len;
2906 while (lba < end) {
2907 unsigned long index = lba_to_map_index(lba);
2909 if (index < map_size)
2910 set_bit(index, map_storep);
2912 lba = map_index_to_lba(index + 1);
2916 static void unmap_region(sector_t lba, unsigned int len)
2918 sector_t end = lba + len;
2920 while (lba < end) {
2921 unsigned long index = lba_to_map_index(lba);
2923 if (lba == map_index_to_lba(index) &&
2924 lba + sdebug_unmap_granularity <= end &&
2925 index < map_size) {
2926 clear_bit(index, map_storep);
2927 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
2928 memset(fake_storep +
2929 lba * sdebug_sector_size,
2930 (sdebug_lbprz & 1) ? 0 : 0xff,
2931 sdebug_sector_size *
2932 sdebug_unmap_granularity);
2934 if (dif_storep) {
2935 memset(dif_storep + lba, 0xff,
2936 sizeof(*dif_storep) *
2937 sdebug_unmap_granularity);
2940 lba = map_index_to_lba(index + 1);
2944 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2946 u8 *cmd = scp->cmnd;
2947 u64 lba;
2948 u32 num;
2949 u32 ei_lba;
2950 unsigned long iflags;
2951 int ret;
2952 bool check_prot;
2954 switch (cmd[0]) {
2955 case WRITE_16:
2956 ei_lba = 0;
2957 lba = get_unaligned_be64(cmd + 2);
2958 num = get_unaligned_be32(cmd + 10);
2959 check_prot = true;
2960 break;
2961 case WRITE_10:
2962 ei_lba = 0;
2963 lba = get_unaligned_be32(cmd + 2);
2964 num = get_unaligned_be16(cmd + 7);
2965 check_prot = true;
2966 break;
2967 case WRITE_6:
2968 ei_lba = 0;
2969 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2970 (u32)(cmd[1] & 0x1f) << 16;
2971 num = (0 == cmd[4]) ? 256 : cmd[4];
2972 check_prot = true;
2973 break;
2974 case WRITE_12:
2975 ei_lba = 0;
2976 lba = get_unaligned_be32(cmd + 2);
2977 num = get_unaligned_be32(cmd + 6);
2978 check_prot = true;
2979 break;
2980 case 0x53: /* XDWRITEREAD(10) */
2981 ei_lba = 0;
2982 lba = get_unaligned_be32(cmd + 2);
2983 num = get_unaligned_be16(cmd + 7);
2984 check_prot = false;
2985 break;
2986 default: /* assume WRITE(32) */
2987 lba = get_unaligned_be64(cmd + 12);
2988 ei_lba = get_unaligned_be32(cmd + 20);
2989 num = get_unaligned_be32(cmd + 28);
2990 check_prot = false;
2991 break;
2993 if (unlikely(have_dif_prot && check_prot)) {
2994 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2995 (cmd[1] & 0xe0)) {
2996 mk_sense_invalid_opcode(scp);
2997 return check_condition_result;
2999 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3000 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3001 (cmd[1] & 0xe0) == 0)
3002 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3003 "to DIF device\n");
3006 /* inline check_device_access_params() */
3007 if (unlikely(lba + num > sdebug_capacity)) {
3008 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3009 return check_condition_result;
3011 /* transfer length excessive (tie in to block limits VPD page) */
3012 if (unlikely(num > sdebug_store_sectors)) {
3013 /* needs work to find which cdb byte 'num' comes from */
3014 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3015 return check_condition_result;
3018 write_lock_irqsave(&atomic_rw, iflags);
3020 /* DIX + T10 DIF */
3021 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3022 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
3024 if (prot_ret) {
3025 write_unlock_irqrestore(&atomic_rw, iflags);
3026 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
3027 return illegal_condition_result;
3031 ret = do_device_access(scp, 0, lba, num, true);
3032 if (unlikely(scsi_debug_lbp()))
3033 map_region(lba, num);
3034 write_unlock_irqrestore(&atomic_rw, iflags);
3035 if (unlikely(-1 == ret))
3036 return DID_ERROR << 16;
3037 else if (unlikely(sdebug_verbose &&
3038 (ret < (num * sdebug_sector_size))))
3039 sdev_printk(KERN_INFO, scp->device,
3040 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3041 my_name, num * sdebug_sector_size, ret);
3043 if (unlikely(sdebug_any_injecting_opt)) {
3044 struct sdebug_queued_cmd *sqcp =
3045 (struct sdebug_queued_cmd *)scp->host_scribble;
3047 if (sqcp) {
3048 if (sqcp->inj_recovered) {
3049 mk_sense_buffer(scp, RECOVERED_ERROR,
3050 THRESHOLD_EXCEEDED, 0);
3051 return check_condition_result;
3052 } else if (sqcp->inj_dif) {
3053 /* Logical block guard check failed */
3054 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3055 return illegal_condition_result;
3056 } else if (sqcp->inj_dix) {
3057 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3058 return illegal_condition_result;
3062 return 0;
3066 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3067 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3069 static int resp_write_scat(struct scsi_cmnd *scp,
3070 struct sdebug_dev_info *devip)
3072 u8 *cmd = scp->cmnd;
3073 u8 *lrdp = NULL;
3074 u8 *up;
3075 u8 wrprotect;
3076 u16 lbdof, num_lrd, k;
3077 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3078 u32 lb_size = sdebug_sector_size;
3079 u32 ei_lba;
3080 u64 lba;
3081 unsigned long iflags;
3082 int ret, res;
3083 bool is_16;
3084 static const u32 lrd_size = 32; /* + parameter list header size */
3086 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3087 is_16 = false;
3088 wrprotect = (cmd[10] >> 5) & 0x7;
3089 lbdof = get_unaligned_be16(cmd + 12);
3090 num_lrd = get_unaligned_be16(cmd + 16);
3091 bt_len = get_unaligned_be32(cmd + 28);
3092 } else { /* that leaves WRITE SCATTERED(16) */
3093 is_16 = true;
3094 wrprotect = (cmd[2] >> 5) & 0x7;
3095 lbdof = get_unaligned_be16(cmd + 4);
3096 num_lrd = get_unaligned_be16(cmd + 8);
3097 bt_len = get_unaligned_be32(cmd + 10);
3098 if (unlikely(have_dif_prot)) {
3099 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3100 wrprotect) {
3101 mk_sense_invalid_opcode(scp);
3102 return illegal_condition_result;
3104 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3105 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3106 wrprotect == 0)
3107 sdev_printk(KERN_ERR, scp->device,
3108 "Unprotected WR to DIF device\n");
3111 if ((num_lrd == 0) || (bt_len == 0))
3112 return 0; /* T10 says these do-nothings are not errors */
3113 if (lbdof == 0) {
3114 if (sdebug_verbose)
3115 sdev_printk(KERN_INFO, scp->device,
3116 "%s: %s: LB Data Offset field bad\n",
3117 my_name, __func__);
3118 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3119 return illegal_condition_result;
3121 lbdof_blen = lbdof * lb_size;
3122 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3123 if (sdebug_verbose)
3124 sdev_printk(KERN_INFO, scp->device,
3125 "%s: %s: LBA range descriptors don't fit\n",
3126 my_name, __func__);
3127 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3128 return illegal_condition_result;
3130 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3131 if (lrdp == NULL)
3132 return SCSI_MLQUEUE_HOST_BUSY;
3133 if (sdebug_verbose)
3134 sdev_printk(KERN_INFO, scp->device,
3135 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3136 my_name, __func__, lbdof_blen);
3137 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3138 if (res == -1) {
3139 ret = DID_ERROR << 16;
3140 goto err_out;
3143 write_lock_irqsave(&atomic_rw, iflags);
3144 sg_off = lbdof_blen;
3145 /* Spec says Buffer xfer Length field in number of LBs in dout */
3146 cum_lb = 0;
3147 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3148 lba = get_unaligned_be64(up + 0);
3149 num = get_unaligned_be32(up + 8);
3150 if (sdebug_verbose)
3151 sdev_printk(KERN_INFO, scp->device,
3152 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3153 my_name, __func__, k, lba, num, sg_off);
3154 if (num == 0)
3155 continue;
3156 ret = check_device_access_params(scp, lba, num);
3157 if (ret)
3158 goto err_out_unlock;
3159 num_by = num * lb_size;
3160 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3162 if ((cum_lb + num) > bt_len) {
3163 if (sdebug_verbose)
3164 sdev_printk(KERN_INFO, scp->device,
3165 "%s: %s: sum of blocks > data provided\n",
3166 my_name, __func__);
3167 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3169 ret = illegal_condition_result;
3170 goto err_out_unlock;
3173 /* DIX + T10 DIF */
3174 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3175 int prot_ret = prot_verify_write(scp, lba, num,
3176 ei_lba);
3178 if (prot_ret) {
3179 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3180 prot_ret);
3181 ret = illegal_condition_result;
3182 goto err_out_unlock;
3186 ret = do_device_access(scp, sg_off, lba, num, true);
3187 if (unlikely(scsi_debug_lbp()))
3188 map_region(lba, num);
3189 if (unlikely(-1 == ret)) {
3190 ret = DID_ERROR << 16;
3191 goto err_out_unlock;
3192 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3193 sdev_printk(KERN_INFO, scp->device,
3194 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3195 my_name, num_by, ret);
3197 if (unlikely(sdebug_any_injecting_opt)) {
3198 struct sdebug_queued_cmd *sqcp =
3199 (struct sdebug_queued_cmd *)scp->host_scribble;
3201 if (sqcp) {
3202 if (sqcp->inj_recovered) {
3203 mk_sense_buffer(scp, RECOVERED_ERROR,
3204 THRESHOLD_EXCEEDED, 0);
3205 ret = illegal_condition_result;
3206 goto err_out_unlock;
3207 } else if (sqcp->inj_dif) {
3208 /* Logical block guard check failed */
3209 mk_sense_buffer(scp, ABORTED_COMMAND,
3210 0x10, 1);
3211 ret = illegal_condition_result;
3212 goto err_out_unlock;
3213 } else if (sqcp->inj_dix) {
3214 mk_sense_buffer(scp, ILLEGAL_REQUEST,
3215 0x10, 1);
3216 ret = illegal_condition_result;
3217 goto err_out_unlock;
3221 sg_off += num_by;
3222 cum_lb += num;
3224 ret = 0;
3225 err_out_unlock:
3226 write_unlock_irqrestore(&atomic_rw, iflags);
3227 err_out:
3228 kfree(lrdp);
3229 return ret;
3232 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3233 u32 ei_lba, bool unmap, bool ndob)
3235 unsigned long iflags;
3236 unsigned long long i;
3237 int ret;
3238 u64 lba_off;
3240 ret = check_device_access_params(scp, lba, num);
3241 if (ret)
3242 return ret;
3244 write_lock_irqsave(&atomic_rw, iflags);
3246 if (unmap && scsi_debug_lbp()) {
3247 unmap_region(lba, num);
3248 goto out;
3251 lba_off = lba * sdebug_sector_size;
3252 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3253 if (ndob) {
3254 memset(fake_storep + lba_off, 0, sdebug_sector_size);
3255 ret = 0;
3256 } else
3257 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3258 sdebug_sector_size);
3260 if (-1 == ret) {
3261 write_unlock_irqrestore(&atomic_rw, iflags);
3262 return DID_ERROR << 16;
3263 } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3264 sdev_printk(KERN_INFO, scp->device,
3265 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3266 my_name, "write same",
3267 sdebug_sector_size, ret);
3269 /* Copy first sector to remaining blocks */
3270 for (i = 1 ; i < num ; i++)
3271 memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3272 fake_storep + lba_off,
3273 sdebug_sector_size);
3275 if (scsi_debug_lbp())
3276 map_region(lba, num);
3277 out:
3278 write_unlock_irqrestore(&atomic_rw, iflags);
3280 return 0;
3283 static int resp_write_same_10(struct scsi_cmnd *scp,
3284 struct sdebug_dev_info *devip)
3286 u8 *cmd = scp->cmnd;
3287 u32 lba;
3288 u16 num;
3289 u32 ei_lba = 0;
3290 bool unmap = false;
3292 if (cmd[1] & 0x8) {
3293 if (sdebug_lbpws10 == 0) {
3294 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3295 return check_condition_result;
3296 } else
3297 unmap = true;
3299 lba = get_unaligned_be32(cmd + 2);
3300 num = get_unaligned_be16(cmd + 7);
3301 if (num > sdebug_write_same_length) {
3302 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3303 return check_condition_result;
3305 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3308 static int resp_write_same_16(struct scsi_cmnd *scp,
3309 struct sdebug_dev_info *devip)
3311 u8 *cmd = scp->cmnd;
3312 u64 lba;
3313 u32 num;
3314 u32 ei_lba = 0;
3315 bool unmap = false;
3316 bool ndob = false;
3318 if (cmd[1] & 0x8) { /* UNMAP */
3319 if (sdebug_lbpws == 0) {
3320 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3321 return check_condition_result;
3322 } else
3323 unmap = true;
3325 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3326 ndob = true;
3327 lba = get_unaligned_be64(cmd + 2);
3328 num = get_unaligned_be32(cmd + 10);
3329 if (num > sdebug_write_same_length) {
3330 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3331 return check_condition_result;
3333 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3336 /* Note the mode field is in the same position as the (lower) service action
3337 * field. For the Report supported operation codes command, SPC-4 suggests
3338 * each mode of this command should be reported separately; for future. */
3339 static int resp_write_buffer(struct scsi_cmnd *scp,
3340 struct sdebug_dev_info *devip)
3342 u8 *cmd = scp->cmnd;
3343 struct scsi_device *sdp = scp->device;
3344 struct sdebug_dev_info *dp;
3345 u8 mode;
3347 mode = cmd[1] & 0x1f;
3348 switch (mode) {
3349 case 0x4: /* download microcode (MC) and activate (ACT) */
3350 /* set UAs on this device only */
3351 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3352 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3353 break;
3354 case 0x5: /* download MC, save and ACT */
3355 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3356 break;
3357 case 0x6: /* download MC with offsets and ACT */
3358 /* set UAs on most devices (LUs) in this target */
3359 list_for_each_entry(dp,
3360 &devip->sdbg_host->dev_info_list,
3361 dev_list)
3362 if (dp->target == sdp->id) {
3363 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3364 if (devip != dp)
3365 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3366 dp->uas_bm);
3368 break;
3369 case 0x7: /* download MC with offsets, save, and ACT */
3370 /* set UA on all devices (LUs) in this target */
3371 list_for_each_entry(dp,
3372 &devip->sdbg_host->dev_info_list,
3373 dev_list)
3374 if (dp->target == sdp->id)
3375 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3376 dp->uas_bm);
3377 break;
3378 default:
3379 /* do nothing for this command for other mode values */
3380 break;
3382 return 0;
3385 static int resp_comp_write(struct scsi_cmnd *scp,
3386 struct sdebug_dev_info *devip)
3388 u8 *cmd = scp->cmnd;
3389 u8 *arr;
3390 u8 *fake_storep_hold;
3391 u64 lba;
3392 u32 dnum;
3393 u32 lb_size = sdebug_sector_size;
3394 u8 num;
3395 unsigned long iflags;
3396 int ret;
3397 int retval = 0;
3399 lba = get_unaligned_be64(cmd + 2);
3400 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3401 if (0 == num)
3402 return 0; /* degenerate case, not an error */
3403 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3404 (cmd[1] & 0xe0)) {
3405 mk_sense_invalid_opcode(scp);
3406 return check_condition_result;
3408 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3409 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3410 (cmd[1] & 0xe0) == 0)
3411 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3412 "to DIF device\n");
3414 /* inline check_device_access_params() */
3415 if (lba + num > sdebug_capacity) {
3416 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3417 return check_condition_result;
3419 /* transfer length excessive (tie in to block limits VPD page) */
3420 if (num > sdebug_store_sectors) {
3421 /* needs work to find which cdb byte 'num' comes from */
3422 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3423 return check_condition_result;
3425 dnum = 2 * num;
3426 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3427 if (NULL == arr) {
3428 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3429 INSUFF_RES_ASCQ);
3430 return check_condition_result;
3433 write_lock_irqsave(&atomic_rw, iflags);
3435 /* trick do_device_access() to fetch both compare and write buffers
3436 * from data-in into arr. Safe (atomic) since write_lock held. */
3437 fake_storep_hold = fake_storep;
3438 fake_storep = arr;
3439 ret = do_device_access(scp, 0, 0, dnum, true);
3440 fake_storep = fake_storep_hold;
3441 if (ret == -1) {
3442 retval = DID_ERROR << 16;
3443 goto cleanup;
3444 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3445 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3446 "indicated=%u, IO sent=%d bytes\n", my_name,
3447 dnum * lb_size, ret);
3448 if (!comp_write_worker(lba, num, arr)) {
3449 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3450 retval = check_condition_result;
3451 goto cleanup;
3453 if (scsi_debug_lbp())
3454 map_region(lba, num);
3455 cleanup:
3456 write_unlock_irqrestore(&atomic_rw, iflags);
3457 kfree(arr);
3458 return retval;
3461 struct unmap_block_desc {
3462 __be64 lba;
3463 __be32 blocks;
3464 __be32 __reserved;
3467 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3469 unsigned char *buf;
3470 struct unmap_block_desc *desc;
3471 unsigned int i, payload_len, descriptors;
3472 int ret;
3473 unsigned long iflags;
3476 if (!scsi_debug_lbp())
3477 return 0; /* fib and say its done */
3478 payload_len = get_unaligned_be16(scp->cmnd + 7);
3479 BUG_ON(scsi_bufflen(scp) != payload_len);
3481 descriptors = (payload_len - 8) / 16;
3482 if (descriptors > sdebug_unmap_max_desc) {
3483 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3484 return check_condition_result;
3487 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3488 if (!buf) {
3489 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3490 INSUFF_RES_ASCQ);
3491 return check_condition_result;
3494 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3496 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3497 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3499 desc = (void *)&buf[8];
3501 write_lock_irqsave(&atomic_rw, iflags);
3503 for (i = 0 ; i < descriptors ; i++) {
3504 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3505 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3507 ret = check_device_access_params(scp, lba, num);
3508 if (ret)
3509 goto out;
3511 unmap_region(lba, num);
3514 ret = 0;
3516 out:
3517 write_unlock_irqrestore(&atomic_rw, iflags);
3518 kfree(buf);
3520 return ret;
3523 #define SDEBUG_GET_LBA_STATUS_LEN 32
3525 static int resp_get_lba_status(struct scsi_cmnd *scp,
3526 struct sdebug_dev_info *devip)
3528 u8 *cmd = scp->cmnd;
3529 u64 lba;
3530 u32 alloc_len, mapped, num;
3531 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3532 int ret;
3534 lba = get_unaligned_be64(cmd + 2);
3535 alloc_len = get_unaligned_be32(cmd + 10);
3537 if (alloc_len < 24)
3538 return 0;
3540 ret = check_device_access_params(scp, lba, 1);
3541 if (ret)
3542 return ret;
3544 if (scsi_debug_lbp())
3545 mapped = map_state(lba, &num);
3546 else {
3547 mapped = 1;
3548 /* following just in case virtual_gb changed */
3549 sdebug_capacity = get_sdebug_capacity();
3550 if (sdebug_capacity - lba <= 0xffffffff)
3551 num = sdebug_capacity - lba;
3552 else
3553 num = 0xffffffff;
3556 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3557 put_unaligned_be32(20, arr); /* Parameter Data Length */
3558 put_unaligned_be64(lba, arr + 8); /* LBA */
3559 put_unaligned_be32(num, arr + 16); /* Number of blocks */
3560 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
3562 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3565 #define RL_BUCKET_ELEMS 8
3567 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
3568 * (W-LUN), the normal Linux scanning logic does not associate it with a
3569 * device (e.g. /dev/sg7). The following magic will make that association:
3570 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
3571 * where <n> is a host number. If there are multiple targets in a host then
3572 * the above will associate a W-LUN to each target. To only get a W-LUN
3573 * for target 2, then use "echo '- 2 49409' > scan" .
3575 static int resp_report_luns(struct scsi_cmnd *scp,
3576 struct sdebug_dev_info *devip)
3578 unsigned char *cmd = scp->cmnd;
3579 unsigned int alloc_len;
3580 unsigned char select_report;
3581 u64 lun;
3582 struct scsi_lun *lun_p;
3583 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
3584 unsigned int lun_cnt; /* normal LUN count (max: 256) */
3585 unsigned int wlun_cnt; /* report luns W-LUN count */
3586 unsigned int tlun_cnt; /* total LUN count */
3587 unsigned int rlen; /* response length (in bytes) */
3588 int k, j, n, res;
3589 unsigned int off_rsp = 0;
3590 const int sz_lun = sizeof(struct scsi_lun);
3592 clear_luns_changed_on_target(devip);
3594 select_report = cmd[2];
3595 alloc_len = get_unaligned_be32(cmd + 6);
3597 if (alloc_len < 4) {
3598 pr_err("alloc len too small %d\n", alloc_len);
3599 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
3600 return check_condition_result;
3603 switch (select_report) {
3604 case 0: /* all LUNs apart from W-LUNs */
3605 lun_cnt = sdebug_max_luns;
3606 wlun_cnt = 0;
3607 break;
3608 case 1: /* only W-LUNs */
3609 lun_cnt = 0;
3610 wlun_cnt = 1;
3611 break;
3612 case 2: /* all LUNs */
3613 lun_cnt = sdebug_max_luns;
3614 wlun_cnt = 1;
3615 break;
3616 case 0x10: /* only administrative LUs */
3617 case 0x11: /* see SPC-5 */
3618 case 0x12: /* only subsiduary LUs owned by referenced LU */
3619 default:
3620 pr_debug("select report invalid %d\n", select_report);
3621 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
3622 return check_condition_result;
3625 if (sdebug_no_lun_0 && (lun_cnt > 0))
3626 --lun_cnt;
3628 tlun_cnt = lun_cnt + wlun_cnt;
3629 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
3630 scsi_set_resid(scp, scsi_bufflen(scp));
3631 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
3632 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
3634 /* loops rely on sizeof response header same as sizeof lun (both 8) */
3635 lun = sdebug_no_lun_0 ? 1 : 0;
3636 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
3637 memset(arr, 0, sizeof(arr));
3638 lun_p = (struct scsi_lun *)&arr[0];
3639 if (k == 0) {
3640 put_unaligned_be32(rlen, &arr[0]);
3641 ++lun_p;
3642 j = 1;
3644 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
3645 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
3646 break;
3647 int_to_scsilun(lun++, lun_p);
3649 if (j < RL_BUCKET_ELEMS)
3650 break;
3651 n = j * sz_lun;
3652 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
3653 if (res)
3654 return res;
3655 off_rsp += n;
3657 if (wlun_cnt) {
3658 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
3659 ++j;
3661 if (j > 0)
3662 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
3663 return res;
3666 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3667 unsigned int num, struct sdebug_dev_info *devip)
3669 int j;
3670 unsigned char *kaddr, *buf;
3671 unsigned int offset;
3672 struct scsi_data_buffer *sdb = scsi_in(scp);
3673 struct sg_mapping_iter miter;
3675 /* better not to use temporary buffer. */
3676 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3677 if (!buf) {
3678 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3679 INSUFF_RES_ASCQ);
3680 return check_condition_result;
3683 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3685 offset = 0;
3686 sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3687 SG_MITER_ATOMIC | SG_MITER_TO_SG);
3689 while (sg_miter_next(&miter)) {
3690 kaddr = miter.addr;
3691 for (j = 0; j < miter.length; j++)
3692 *(kaddr + j) ^= *(buf + offset + j);
3694 offset += miter.length;
3696 sg_miter_stop(&miter);
3697 kfree(buf);
3699 return 0;
3702 static int resp_xdwriteread_10(struct scsi_cmnd *scp,
3703 struct sdebug_dev_info *devip)
3705 u8 *cmd = scp->cmnd;
3706 u64 lba;
3707 u32 num;
3708 int errsts;
3710 if (!scsi_bidi_cmnd(scp)) {
3711 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3712 INSUFF_RES_ASCQ);
3713 return check_condition_result;
3715 errsts = resp_read_dt0(scp, devip);
3716 if (errsts)
3717 return errsts;
3718 if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */
3719 errsts = resp_write_dt0(scp, devip);
3720 if (errsts)
3721 return errsts;
3723 lba = get_unaligned_be32(cmd + 2);
3724 num = get_unaligned_be16(cmd + 7);
3725 return resp_xdwriteread(scp, lba, num, devip);
3728 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
3730 struct sdebug_queue *sqp = sdebug_q_arr;
3732 if (sdebug_mq_active) {
3733 u32 tag = blk_mq_unique_tag(cmnd->request);
3734 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
3736 if (unlikely(hwq >= submit_queues)) {
3737 pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
3738 hwq %= submit_queues;
3740 pr_debug("tag=%u, hwq=%d\n", tag, hwq);
3741 return sqp + hwq;
3742 } else
3743 return sqp;
3746 /* Queued (deferred) command completions converge here. */
3747 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
3749 int qc_idx;
3750 int retiring = 0;
3751 unsigned long iflags;
3752 struct sdebug_queue *sqp;
3753 struct sdebug_queued_cmd *sqcp;
3754 struct scsi_cmnd *scp;
3755 struct sdebug_dev_info *devip;
3757 sd_dp->defer_t = SDEB_DEFER_NONE;
3758 qc_idx = sd_dp->qc_idx;
3759 sqp = sdebug_q_arr + sd_dp->sqa_idx;
3760 if (sdebug_statistics) {
3761 atomic_inc(&sdebug_completions);
3762 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
3763 atomic_inc(&sdebug_miss_cpus);
3765 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
3766 pr_err("wild qc_idx=%d\n", qc_idx);
3767 return;
3769 spin_lock_irqsave(&sqp->qc_lock, iflags);
3770 sqcp = &sqp->qc_arr[qc_idx];
3771 scp = sqcp->a_cmnd;
3772 if (unlikely(scp == NULL)) {
3773 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3774 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
3775 sd_dp->sqa_idx, qc_idx);
3776 return;
3778 devip = (struct sdebug_dev_info *)scp->device->hostdata;
3779 if (likely(devip))
3780 atomic_dec(&devip->num_in_q);
3781 else
3782 pr_err("devip=NULL\n");
3783 if (unlikely(atomic_read(&retired_max_queue) > 0))
3784 retiring = 1;
3786 sqcp->a_cmnd = NULL;
3787 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
3788 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3789 pr_err("Unexpected completion\n");
3790 return;
3793 if (unlikely(retiring)) { /* user has reduced max_queue */
3794 int k, retval;
3796 retval = atomic_read(&retired_max_queue);
3797 if (qc_idx >= retval) {
3798 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3799 pr_err("index %d too large\n", retval);
3800 return;
3802 k = find_last_bit(sqp->in_use_bm, retval);
3803 if ((k < sdebug_max_queue) || (k == retval))
3804 atomic_set(&retired_max_queue, 0);
3805 else
3806 atomic_set(&retired_max_queue, k + 1);
3808 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3809 scp->scsi_done(scp); /* callback to mid level */
3812 /* When high resolution timer goes off this function is called. */
3813 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3815 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
3816 hrt);
3817 sdebug_q_cmd_complete(sd_dp);
3818 return HRTIMER_NORESTART;
3821 /* When work queue schedules work, it calls this function. */
3822 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
3824 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
3825 ew.work);
3826 sdebug_q_cmd_complete(sd_dp);
3829 static bool got_shared_uuid;
3830 static uuid_t shared_uuid;
3832 static struct sdebug_dev_info *sdebug_device_create(
3833 struct sdebug_host_info *sdbg_host, gfp_t flags)
3835 struct sdebug_dev_info *devip;
3837 devip = kzalloc(sizeof(*devip), flags);
3838 if (devip) {
3839 if (sdebug_uuid_ctl == 1)
3840 uuid_gen(&devip->lu_name);
3841 else if (sdebug_uuid_ctl == 2) {
3842 if (got_shared_uuid)
3843 devip->lu_name = shared_uuid;
3844 else {
3845 uuid_gen(&shared_uuid);
3846 got_shared_uuid = true;
3847 devip->lu_name = shared_uuid;
3850 devip->sdbg_host = sdbg_host;
3851 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3853 return devip;
3856 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
3858 struct sdebug_host_info *sdbg_host;
3859 struct sdebug_dev_info *open_devip = NULL;
3860 struct sdebug_dev_info *devip;
3862 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3863 if (!sdbg_host) {
3864 pr_err("Host info NULL\n");
3865 return NULL;
3867 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3868 if ((devip->used) && (devip->channel == sdev->channel) &&
3869 (devip->target == sdev->id) &&
3870 (devip->lun == sdev->lun))
3871 return devip;
3872 else {
3873 if ((!devip->used) && (!open_devip))
3874 open_devip = devip;
3877 if (!open_devip) { /* try and make a new one */
3878 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3879 if (!open_devip) {
3880 pr_err("out of memory at line %d\n", __LINE__);
3881 return NULL;
3885 open_devip->channel = sdev->channel;
3886 open_devip->target = sdev->id;
3887 open_devip->lun = sdev->lun;
3888 open_devip->sdbg_host = sdbg_host;
3889 atomic_set(&open_devip->num_in_q, 0);
3890 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3891 open_devip->used = true;
3892 return open_devip;
3895 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3897 if (sdebug_verbose)
3898 pr_info("slave_alloc <%u %u %u %llu>\n",
3899 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3900 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3901 return 0;
3904 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3906 struct sdebug_dev_info *devip =
3907 (struct sdebug_dev_info *)sdp->hostdata;
3909 if (sdebug_verbose)
3910 pr_info("slave_configure <%u %u %u %llu>\n",
3911 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3912 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
3913 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
3914 if (devip == NULL) {
3915 devip = find_build_dev_info(sdp);
3916 if (devip == NULL)
3917 return 1; /* no resources, will be marked offline */
3919 sdp->hostdata = devip;
3920 blk_queue_max_segment_size(sdp->request_queue, -1U);
3921 if (sdebug_no_uld)
3922 sdp->no_uld_attach = 1;
3923 config_cdb_len(sdp);
3924 return 0;
3927 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3929 struct sdebug_dev_info *devip =
3930 (struct sdebug_dev_info *)sdp->hostdata;
3932 if (sdebug_verbose)
3933 pr_info("slave_destroy <%u %u %u %llu>\n",
3934 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3935 if (devip) {
3936 /* make this slot available for re-use */
3937 devip->used = false;
3938 sdp->hostdata = NULL;
3942 static void stop_qc_helper(struct sdebug_defer *sd_dp,
3943 enum sdeb_defer_type defer_t)
3945 if (!sd_dp)
3946 return;
3947 if (defer_t == SDEB_DEFER_HRT)
3948 hrtimer_cancel(&sd_dp->hrt);
3949 else if (defer_t == SDEB_DEFER_WQ)
3950 cancel_work_sync(&sd_dp->ew.work);
3953 /* If @cmnd found deletes its timer or work queue and returns true; else
3954 returns false */
3955 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
3957 unsigned long iflags;
3958 int j, k, qmax, r_qmax;
3959 enum sdeb_defer_type l_defer_t;
3960 struct sdebug_queue *sqp;
3961 struct sdebug_queued_cmd *sqcp;
3962 struct sdebug_dev_info *devip;
3963 struct sdebug_defer *sd_dp;
3965 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
3966 spin_lock_irqsave(&sqp->qc_lock, iflags);
3967 qmax = sdebug_max_queue;
3968 r_qmax = atomic_read(&retired_max_queue);
3969 if (r_qmax > qmax)
3970 qmax = r_qmax;
3971 for (k = 0; k < qmax; ++k) {
3972 if (test_bit(k, sqp->in_use_bm)) {
3973 sqcp = &sqp->qc_arr[k];
3974 if (cmnd != sqcp->a_cmnd)
3975 continue;
3976 /* found */
3977 devip = (struct sdebug_dev_info *)
3978 cmnd->device->hostdata;
3979 if (devip)
3980 atomic_dec(&devip->num_in_q);
3981 sqcp->a_cmnd = NULL;
3982 sd_dp = sqcp->sd_dp;
3983 if (sd_dp) {
3984 l_defer_t = sd_dp->defer_t;
3985 sd_dp->defer_t = SDEB_DEFER_NONE;
3986 } else
3987 l_defer_t = SDEB_DEFER_NONE;
3988 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3989 stop_qc_helper(sd_dp, l_defer_t);
3990 clear_bit(k, sqp->in_use_bm);
3991 return true;
3994 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
3996 return false;
3999 /* Deletes (stops) timers or work queues of all queued commands */
4000 static void stop_all_queued(void)
4002 unsigned long iflags;
4003 int j, k;
4004 enum sdeb_defer_type l_defer_t;
4005 struct sdebug_queue *sqp;
4006 struct sdebug_queued_cmd *sqcp;
4007 struct sdebug_dev_info *devip;
4008 struct sdebug_defer *sd_dp;
4010 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4011 spin_lock_irqsave(&sqp->qc_lock, iflags);
4012 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4013 if (test_bit(k, sqp->in_use_bm)) {
4014 sqcp = &sqp->qc_arr[k];
4015 if (sqcp->a_cmnd == NULL)
4016 continue;
4017 devip = (struct sdebug_dev_info *)
4018 sqcp->a_cmnd->device->hostdata;
4019 if (devip)
4020 atomic_dec(&devip->num_in_q);
4021 sqcp->a_cmnd = NULL;
4022 sd_dp = sqcp->sd_dp;
4023 if (sd_dp) {
4024 l_defer_t = sd_dp->defer_t;
4025 sd_dp->defer_t = SDEB_DEFER_NONE;
4026 } else
4027 l_defer_t = SDEB_DEFER_NONE;
4028 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4029 stop_qc_helper(sd_dp, l_defer_t);
4030 clear_bit(k, sqp->in_use_bm);
4031 spin_lock_irqsave(&sqp->qc_lock, iflags);
4034 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4038 /* Free queued command memory on heap */
4039 static void free_all_queued(void)
4041 int j, k;
4042 struct sdebug_queue *sqp;
4043 struct sdebug_queued_cmd *sqcp;
4045 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4046 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
4047 sqcp = &sqp->qc_arr[k];
4048 kfree(sqcp->sd_dp);
4049 sqcp->sd_dp = NULL;
4054 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
4056 bool ok;
4058 ++num_aborts;
4059 if (SCpnt) {
4060 ok = stop_queued_cmnd(SCpnt);
4061 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4062 sdev_printk(KERN_INFO, SCpnt->device,
4063 "%s: command%s found\n", __func__,
4064 ok ? "" : " not");
4066 return SUCCESS;
4069 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
4071 ++num_dev_resets;
4072 if (SCpnt && SCpnt->device) {
4073 struct scsi_device *sdp = SCpnt->device;
4074 struct sdebug_dev_info *devip =
4075 (struct sdebug_dev_info *)sdp->hostdata;
4077 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4078 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4079 if (devip)
4080 set_bit(SDEBUG_UA_POR, devip->uas_bm);
4082 return SUCCESS;
4085 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
4087 struct sdebug_host_info *sdbg_host;
4088 struct sdebug_dev_info *devip;
4089 struct scsi_device *sdp;
4090 struct Scsi_Host *hp;
4091 int k = 0;
4093 ++num_target_resets;
4094 if (!SCpnt)
4095 goto lie;
4096 sdp = SCpnt->device;
4097 if (!sdp)
4098 goto lie;
4099 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4100 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4101 hp = sdp->host;
4102 if (!hp)
4103 goto lie;
4104 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4105 if (sdbg_host) {
4106 list_for_each_entry(devip,
4107 &sdbg_host->dev_info_list,
4108 dev_list)
4109 if (devip->target == sdp->id) {
4110 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4111 ++k;
4114 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4115 sdev_printk(KERN_INFO, sdp,
4116 "%s: %d device(s) found in target\n", __func__, k);
4117 lie:
4118 return SUCCESS;
4121 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
4123 struct sdebug_host_info *sdbg_host;
4124 struct sdebug_dev_info *devip;
4125 struct scsi_device *sdp;
4126 struct Scsi_Host *hp;
4127 int k = 0;
4129 ++num_bus_resets;
4130 if (!(SCpnt && SCpnt->device))
4131 goto lie;
4132 sdp = SCpnt->device;
4133 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
4134 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
4135 hp = sdp->host;
4136 if (hp) {
4137 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
4138 if (sdbg_host) {
4139 list_for_each_entry(devip,
4140 &sdbg_host->dev_info_list,
4141 dev_list) {
4142 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4143 ++k;
4147 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4148 sdev_printk(KERN_INFO, sdp,
4149 "%s: %d device(s) found in host\n", __func__, k);
4150 lie:
4151 return SUCCESS;
4154 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
4156 struct sdebug_host_info * sdbg_host;
4157 struct sdebug_dev_info *devip;
4158 int k = 0;
4160 ++num_host_resets;
4161 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
4162 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
4163 spin_lock(&sdebug_host_list_lock);
4164 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
4165 list_for_each_entry(devip, &sdbg_host->dev_info_list,
4166 dev_list) {
4167 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4168 ++k;
4171 spin_unlock(&sdebug_host_list_lock);
4172 stop_all_queued();
4173 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
4174 sdev_printk(KERN_INFO, SCpnt->device,
4175 "%s: %d device(s) found\n", __func__, k);
4176 return SUCCESS;
4179 static void __init sdebug_build_parts(unsigned char *ramp,
4180 unsigned long store_size)
4182 struct partition * pp;
4183 int starts[SDEBUG_MAX_PARTS + 2];
4184 int sectors_per_part, num_sectors, k;
4185 int heads_by_sects, start_sec, end_sec;
4187 /* assume partition table already zeroed */
4188 if ((sdebug_num_parts < 1) || (store_size < 1048576))
4189 return;
4190 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
4191 sdebug_num_parts = SDEBUG_MAX_PARTS;
4192 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
4194 num_sectors = (int)sdebug_store_sectors;
4195 sectors_per_part = (num_sectors - sdebug_sectors_per)
4196 / sdebug_num_parts;
4197 heads_by_sects = sdebug_heads * sdebug_sectors_per;
4198 starts[0] = sdebug_sectors_per;
4199 for (k = 1; k < sdebug_num_parts; ++k)
4200 starts[k] = ((k * sectors_per_part) / heads_by_sects)
4201 * heads_by_sects;
4202 starts[sdebug_num_parts] = num_sectors;
4203 starts[sdebug_num_parts + 1] = 0;
4205 ramp[510] = 0x55; /* magic partition markings */
4206 ramp[511] = 0xAA;
4207 pp = (struct partition *)(ramp + 0x1be);
4208 for (k = 0; starts[k + 1]; ++k, ++pp) {
4209 start_sec = starts[k];
4210 end_sec = starts[k + 1] - 1;
4211 pp->boot_ind = 0;
4213 pp->cyl = start_sec / heads_by_sects;
4214 pp->head = (start_sec - (pp->cyl * heads_by_sects))
4215 / sdebug_sectors_per;
4216 pp->sector = (start_sec % sdebug_sectors_per) + 1;
4218 pp->end_cyl = end_sec / heads_by_sects;
4219 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
4220 / sdebug_sectors_per;
4221 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
4223 pp->start_sect = cpu_to_le32(start_sec);
4224 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
4225 pp->sys_ind = 0x83; /* plain Linux partition */
4229 static void block_unblock_all_queues(bool block)
4231 int j;
4232 struct sdebug_queue *sqp;
4234 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
4235 atomic_set(&sqp->blocked, (int)block);
4238 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
4239 * commands will be processed normally before triggers occur.
4241 static void tweak_cmnd_count(void)
4243 int count, modulo;
4245 modulo = abs(sdebug_every_nth);
4246 if (modulo < 2)
4247 return;
4248 block_unblock_all_queues(true);
4249 count = atomic_read(&sdebug_cmnd_count);
4250 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
4251 block_unblock_all_queues(false);
4254 static void clear_queue_stats(void)
4256 atomic_set(&sdebug_cmnd_count, 0);
4257 atomic_set(&sdebug_completions, 0);
4258 atomic_set(&sdebug_miss_cpus, 0);
4259 atomic_set(&sdebug_a_tsf, 0);
4262 static void setup_inject(struct sdebug_queue *sqp,
4263 struct sdebug_queued_cmd *sqcp)
4265 if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
4266 return;
4267 sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
4268 sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
4269 sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
4270 sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
4271 sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
4272 sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
4275 /* Complete the processing of the thread that queued a SCSI command to this
4276 * driver. It either completes the command by calling cmnd_done() or
4277 * schedules a hr timer or work queue then returns 0. Returns
4278 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
4280 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
4281 int scsi_result, int delta_jiff, int ndelay)
4283 unsigned long iflags;
4284 int k, num_in_q, qdepth, inject;
4285 struct sdebug_queue *sqp;
4286 struct sdebug_queued_cmd *sqcp;
4287 struct scsi_device *sdp;
4288 struct sdebug_defer *sd_dp;
4290 if (unlikely(devip == NULL)) {
4291 if (scsi_result == 0)
4292 scsi_result = DID_NO_CONNECT << 16;
4293 goto respond_in_thread;
4295 sdp = cmnd->device;
4297 if (unlikely(sdebug_verbose && scsi_result))
4298 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
4299 __func__, scsi_result);
4300 if (delta_jiff == 0)
4301 goto respond_in_thread;
4303 /* schedule the response at a later time if resources permit */
4304 sqp = get_queue(cmnd);
4305 spin_lock_irqsave(&sqp->qc_lock, iflags);
4306 if (unlikely(atomic_read(&sqp->blocked))) {
4307 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4308 return SCSI_MLQUEUE_HOST_BUSY;
4310 num_in_q = atomic_read(&devip->num_in_q);
4311 qdepth = cmnd->device->queue_depth;
4312 inject = 0;
4313 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
4314 if (scsi_result) {
4315 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4316 goto respond_in_thread;
4317 } else
4318 scsi_result = device_qfull_result;
4319 } else if (unlikely(sdebug_every_nth &&
4320 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
4321 (scsi_result == 0))) {
4322 if ((num_in_q == (qdepth - 1)) &&
4323 (atomic_inc_return(&sdebug_a_tsf) >=
4324 abs(sdebug_every_nth))) {
4325 atomic_set(&sdebug_a_tsf, 0);
4326 inject = 1;
4327 scsi_result = device_qfull_result;
4331 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
4332 if (unlikely(k >= sdebug_max_queue)) {
4333 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4334 if (scsi_result)
4335 goto respond_in_thread;
4336 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
4337 scsi_result = device_qfull_result;
4338 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
4339 sdev_printk(KERN_INFO, sdp,
4340 "%s: max_queue=%d exceeded, %s\n",
4341 __func__, sdebug_max_queue,
4342 (scsi_result ? "status: TASK SET FULL" :
4343 "report: host busy"));
4344 if (scsi_result)
4345 goto respond_in_thread;
4346 else
4347 return SCSI_MLQUEUE_HOST_BUSY;
4349 __set_bit(k, sqp->in_use_bm);
4350 atomic_inc(&devip->num_in_q);
4351 sqcp = &sqp->qc_arr[k];
4352 sqcp->a_cmnd = cmnd;
4353 cmnd->host_scribble = (unsigned char *)sqcp;
4354 cmnd->result = scsi_result;
4355 sd_dp = sqcp->sd_dp;
4356 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4357 if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
4358 setup_inject(sqp, sqcp);
4359 if (sd_dp == NULL) {
4360 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
4361 if (sd_dp == NULL)
4362 return SCSI_MLQUEUE_HOST_BUSY;
4364 if (delta_jiff > 0 || ndelay > 0) {
4365 ktime_t kt;
4367 if (delta_jiff > 0) {
4368 kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
4369 } else
4370 kt = ndelay;
4371 if (!sd_dp->init_hrt) {
4372 sd_dp->init_hrt = true;
4373 sqcp->sd_dp = sd_dp;
4374 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
4375 HRTIMER_MODE_REL_PINNED);
4376 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
4377 sd_dp->sqa_idx = sqp - sdebug_q_arr;
4378 sd_dp->qc_idx = k;
4380 if (sdebug_statistics)
4381 sd_dp->issuing_cpu = raw_smp_processor_id();
4382 sd_dp->defer_t = SDEB_DEFER_HRT;
4383 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
4384 } else { /* jdelay < 0, use work queue */
4385 if (!sd_dp->init_wq) {
4386 sd_dp->init_wq = true;
4387 sqcp->sd_dp = sd_dp;
4388 sd_dp->sqa_idx = sqp - sdebug_q_arr;
4389 sd_dp->qc_idx = k;
4390 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
4392 if (sdebug_statistics)
4393 sd_dp->issuing_cpu = raw_smp_processor_id();
4394 sd_dp->defer_t = SDEB_DEFER_WQ;
4395 schedule_work(&sd_dp->ew.work);
4397 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
4398 (scsi_result == device_qfull_result)))
4399 sdev_printk(KERN_INFO, sdp,
4400 "%s: num_in_q=%d +1, %s%s\n", __func__,
4401 num_in_q, (inject ? "<inject> " : ""),
4402 "status: TASK SET FULL");
4403 return 0;
4405 respond_in_thread: /* call back to mid-layer using invocation thread */
4406 cmnd->result = scsi_result;
4407 cmnd->scsi_done(cmnd);
4408 return 0;
4411 /* Note: The following macros create attribute files in the
4412 /sys/module/scsi_debug/parameters directory. Unfortunately this
4413 driver is unaware of a change and cannot trigger auxiliary actions
4414 as it can when the corresponding attribute in the
4415 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4417 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
4418 module_param_named(ato, sdebug_ato, int, S_IRUGO);
4419 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
4420 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
4421 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
4422 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
4423 module_param_named(dif, sdebug_dif, int, S_IRUGO);
4424 module_param_named(dix, sdebug_dix, int, S_IRUGO);
4425 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
4426 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
4427 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
4428 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
4429 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
4430 module_param_string(inq_vendor, sdebug_inq_vendor_id,
4431 sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
4432 module_param_string(inq_product, sdebug_inq_product_id,
4433 sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
4434 module_param_string(inq_rev, sdebug_inq_product_rev,
4435 sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
4436 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
4437 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
4438 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
4439 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
4440 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
4441 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
4442 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
4443 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
4444 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
4445 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
4446 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
4447 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
4448 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
4449 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
4450 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
4451 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
4452 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
4453 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
4454 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
4455 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
4456 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
4457 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
4458 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
4459 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
4460 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
4461 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
4462 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
4463 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
4464 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
4465 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
4466 S_IRUGO | S_IWUSR);
4467 module_param_named(write_same_length, sdebug_write_same_length, int,
4468 S_IRUGO | S_IWUSR);
4470 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4471 MODULE_DESCRIPTION("SCSI debug adapter driver");
4472 MODULE_LICENSE("GPL");
4473 MODULE_VERSION(SDEBUG_VERSION);
4475 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4476 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4477 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
4478 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4479 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4480 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4481 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4482 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4483 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4484 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4485 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4486 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4487 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
4488 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
4489 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
4490 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
4491 SDEBUG_VERSION "\")");
4492 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4493 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4494 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4495 MODULE_PARM_DESC(lbprz,
4496 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
4497 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4498 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4499 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4500 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4501 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4502 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4503 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4504 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4505 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
4506 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4507 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4508 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
4509 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4510 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4511 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
4512 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4513 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
4514 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4515 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
4516 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4517 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4518 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4519 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4520 MODULE_PARM_DESC(uuid_ctl,
4521 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
4522 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4523 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4524 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4526 #define SDEBUG_INFO_LEN 256
4527 static char sdebug_info[SDEBUG_INFO_LEN];
4529 static const char * scsi_debug_info(struct Scsi_Host * shp)
4531 int k;
4533 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
4534 my_name, SDEBUG_VERSION, sdebug_version_date);
4535 if (k >= (SDEBUG_INFO_LEN - 1))
4536 return sdebug_info;
4537 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
4538 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
4539 sdebug_dev_size_mb, sdebug_opts, submit_queues,
4540 "statistics", (int)sdebug_statistics);
4541 return sdebug_info;
4544 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4545 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
4546 int length)
4548 char arr[16];
4549 int opts;
4550 int minLen = length > 15 ? 15 : length;
4552 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4553 return -EACCES;
4554 memcpy(arr, buffer, minLen);
4555 arr[minLen] = '\0';
4556 if (1 != sscanf(arr, "%d", &opts))
4557 return -EINVAL;
4558 sdebug_opts = opts;
4559 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4560 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4561 if (sdebug_every_nth != 0)
4562 tweak_cmnd_count();
4563 return length;
4566 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4567 * same for each scsi_debug host (if more than one). Some of the counters
4568 * output are not atomics so might be inaccurate in a busy system. */
4569 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4571 int f, j, l;
4572 struct sdebug_queue *sqp;
4574 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
4575 SDEBUG_VERSION, sdebug_version_date);
4576 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
4577 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
4578 sdebug_opts, sdebug_every_nth);
4579 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
4580 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
4581 sdebug_sector_size, "bytes");
4582 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
4583 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
4584 num_aborts);
4585 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
4586 num_dev_resets, num_target_resets, num_bus_resets,
4587 num_host_resets);
4588 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
4589 dix_reads, dix_writes, dif_errors);
4590 seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
4591 TICK_NSEC / 1000, "statistics", sdebug_statistics,
4592 sdebug_mq_active);
4593 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
4594 atomic_read(&sdebug_cmnd_count),
4595 atomic_read(&sdebug_completions),
4596 "miss_cpus", atomic_read(&sdebug_miss_cpus),
4597 atomic_read(&sdebug_a_tsf));
4599 seq_printf(m, "submit_queues=%d\n", submit_queues);
4600 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
4601 seq_printf(m, " queue %d:\n", j);
4602 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
4603 if (f != sdebug_max_queue) {
4604 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
4605 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
4606 "first,last bits", f, l);
4609 return 0;
4612 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4614 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
4616 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
4617 * of delay is jiffies.
4619 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4620 size_t count)
4622 int jdelay, res;
4624 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
4625 res = count;
4626 if (sdebug_jdelay != jdelay) {
4627 int j, k;
4628 struct sdebug_queue *sqp;
4630 block_unblock_all_queues(true);
4631 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4632 ++j, ++sqp) {
4633 k = find_first_bit(sqp->in_use_bm,
4634 sdebug_max_queue);
4635 if (k != sdebug_max_queue) {
4636 res = -EBUSY; /* queued commands */
4637 break;
4640 if (res > 0) {
4641 sdebug_jdelay = jdelay;
4642 sdebug_ndelay = 0;
4644 block_unblock_all_queues(false);
4646 return res;
4648 return -EINVAL;
4650 static DRIVER_ATTR_RW(delay);
4652 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4654 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
4656 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4657 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
4658 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4659 size_t count)
4661 int ndelay, res;
4663 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4664 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
4665 res = count;
4666 if (sdebug_ndelay != ndelay) {
4667 int j, k;
4668 struct sdebug_queue *sqp;
4670 block_unblock_all_queues(true);
4671 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4672 ++j, ++sqp) {
4673 k = find_first_bit(sqp->in_use_bm,
4674 sdebug_max_queue);
4675 if (k != sdebug_max_queue) {
4676 res = -EBUSY; /* queued commands */
4677 break;
4680 if (res > 0) {
4681 sdebug_ndelay = ndelay;
4682 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
4683 : DEF_JDELAY;
4685 block_unblock_all_queues(false);
4687 return res;
4689 return -EINVAL;
4691 static DRIVER_ATTR_RW(ndelay);
4693 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4695 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
4698 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4699 size_t count)
4701 int opts;
4702 char work[20];
4704 if (sscanf(buf, "%10s", work) == 1) {
4705 if (strncasecmp(work, "0x", 2) == 0) {
4706 if (kstrtoint(work + 2, 16, &opts) == 0)
4707 goto opts_done;
4708 } else {
4709 if (kstrtoint(work, 10, &opts) == 0)
4710 goto opts_done;
4713 return -EINVAL;
4714 opts_done:
4715 sdebug_opts = opts;
4716 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
4717 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
4718 tweak_cmnd_count();
4719 return count;
4721 static DRIVER_ATTR_RW(opts);
4723 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4725 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
4727 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4728 size_t count)
4730 int n;
4732 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4733 sdebug_ptype = n;
4734 return count;
4736 return -EINVAL;
4738 static DRIVER_ATTR_RW(ptype);
4740 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4742 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
4744 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4745 size_t count)
4747 int n;
4749 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4750 sdebug_dsense = n;
4751 return count;
4753 return -EINVAL;
4755 static DRIVER_ATTR_RW(dsense);
4757 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4759 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
4761 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4762 size_t count)
4764 int n;
4766 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4767 n = (n > 0);
4768 sdebug_fake_rw = (sdebug_fake_rw > 0);
4769 if (sdebug_fake_rw != n) {
4770 if ((0 == n) && (NULL == fake_storep)) {
4771 unsigned long sz =
4772 (unsigned long)sdebug_dev_size_mb *
4773 1048576;
4775 fake_storep = vmalloc(sz);
4776 if (NULL == fake_storep) {
4777 pr_err("out of memory, 9\n");
4778 return -ENOMEM;
4780 memset(fake_storep, 0, sz);
4782 sdebug_fake_rw = n;
4784 return count;
4786 return -EINVAL;
4788 static DRIVER_ATTR_RW(fake_rw);
4790 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4792 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
4794 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4795 size_t count)
4797 int n;
4799 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4800 sdebug_no_lun_0 = n;
4801 return count;
4803 return -EINVAL;
4805 static DRIVER_ATTR_RW(no_lun_0);
4807 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4809 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
4811 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4812 size_t count)
4814 int n;
4816 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4817 sdebug_num_tgts = n;
4818 sdebug_max_tgts_luns();
4819 return count;
4821 return -EINVAL;
4823 static DRIVER_ATTR_RW(num_tgts);
4825 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4827 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
4829 static DRIVER_ATTR_RO(dev_size_mb);
4831 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4833 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
4835 static DRIVER_ATTR_RO(num_parts);
4837 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4839 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
4841 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4842 size_t count)
4844 int nth;
4846 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4847 sdebug_every_nth = nth;
4848 if (nth && !sdebug_statistics) {
4849 pr_info("every_nth needs statistics=1, set it\n");
4850 sdebug_statistics = true;
4852 tweak_cmnd_count();
4853 return count;
4855 return -EINVAL;
4857 static DRIVER_ATTR_RW(every_nth);
4859 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4861 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
4863 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4864 size_t count)
4866 int n;
4867 bool changed;
4869 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4870 if (n > 256) {
4871 pr_warn("max_luns can be no more than 256\n");
4872 return -EINVAL;
4874 changed = (sdebug_max_luns != n);
4875 sdebug_max_luns = n;
4876 sdebug_max_tgts_luns();
4877 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
4878 struct sdebug_host_info *sdhp;
4879 struct sdebug_dev_info *dp;
4881 spin_lock(&sdebug_host_list_lock);
4882 list_for_each_entry(sdhp, &sdebug_host_list,
4883 host_list) {
4884 list_for_each_entry(dp, &sdhp->dev_info_list,
4885 dev_list) {
4886 set_bit(SDEBUG_UA_LUNS_CHANGED,
4887 dp->uas_bm);
4890 spin_unlock(&sdebug_host_list_lock);
4892 return count;
4894 return -EINVAL;
4896 static DRIVER_ATTR_RW(max_luns);
4898 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4900 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
4902 /* N.B. max_queue can be changed while there are queued commands. In flight
4903 * commands beyond the new max_queue will be completed. */
4904 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4905 size_t count)
4907 int j, n, k, a;
4908 struct sdebug_queue *sqp;
4910 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4911 (n <= SDEBUG_CANQUEUE)) {
4912 block_unblock_all_queues(true);
4913 k = 0;
4914 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
4915 ++j, ++sqp) {
4916 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
4917 if (a > k)
4918 k = a;
4920 sdebug_max_queue = n;
4921 if (k == SDEBUG_CANQUEUE)
4922 atomic_set(&retired_max_queue, 0);
4923 else if (k >= n)
4924 atomic_set(&retired_max_queue, k + 1);
4925 else
4926 atomic_set(&retired_max_queue, 0);
4927 block_unblock_all_queues(false);
4928 return count;
4930 return -EINVAL;
4932 static DRIVER_ATTR_RW(max_queue);
4934 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4936 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
4938 static DRIVER_ATTR_RO(no_uld);
4940 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4942 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
4944 static DRIVER_ATTR_RO(scsi_level);
4946 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4948 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
4950 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4951 size_t count)
4953 int n;
4954 bool changed;
4956 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4957 changed = (sdebug_virtual_gb != n);
4958 sdebug_virtual_gb = n;
4959 sdebug_capacity = get_sdebug_capacity();
4960 if (changed) {
4961 struct sdebug_host_info *sdhp;
4962 struct sdebug_dev_info *dp;
4964 spin_lock(&sdebug_host_list_lock);
4965 list_for_each_entry(sdhp, &sdebug_host_list,
4966 host_list) {
4967 list_for_each_entry(dp, &sdhp->dev_info_list,
4968 dev_list) {
4969 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4970 dp->uas_bm);
4973 spin_unlock(&sdebug_host_list_lock);
4975 return count;
4977 return -EINVAL;
4979 static DRIVER_ATTR_RW(virtual_gb);
4981 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4983 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
4986 static int sdebug_add_adapter(void);
4987 static void sdebug_remove_adapter(void);
4989 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4990 size_t count)
4992 int delta_hosts;
4994 if (sscanf(buf, "%d", &delta_hosts) != 1)
4995 return -EINVAL;
4996 if (delta_hosts > 0) {
4997 do {
4998 sdebug_add_adapter();
4999 } while (--delta_hosts);
5000 } else if (delta_hosts < 0) {
5001 do {
5002 sdebug_remove_adapter();
5003 } while (++delta_hosts);
5005 return count;
5007 static DRIVER_ATTR_RW(add_host);
5009 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
5011 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
5013 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
5014 size_t count)
5016 int n;
5018 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5019 sdebug_vpd_use_hostno = n;
5020 return count;
5022 return -EINVAL;
5024 static DRIVER_ATTR_RW(vpd_use_hostno);
5026 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
5028 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
5030 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
5031 size_t count)
5033 int n;
5035 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
5036 if (n > 0)
5037 sdebug_statistics = true;
5038 else {
5039 clear_queue_stats();
5040 sdebug_statistics = false;
5042 return count;
5044 return -EINVAL;
5046 static DRIVER_ATTR_RW(statistics);
5048 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
5050 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
5052 static DRIVER_ATTR_RO(sector_size);
5054 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
5056 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
5058 static DRIVER_ATTR_RO(submit_queues);
5060 static ssize_t dix_show(struct device_driver *ddp, char *buf)
5062 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
5064 static DRIVER_ATTR_RO(dix);
5066 static ssize_t dif_show(struct device_driver *ddp, char *buf)
5068 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
5070 static DRIVER_ATTR_RO(dif);
5072 static ssize_t guard_show(struct device_driver *ddp, char *buf)
5074 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
5076 static DRIVER_ATTR_RO(guard);
5078 static ssize_t ato_show(struct device_driver *ddp, char *buf)
5080 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
5082 static DRIVER_ATTR_RO(ato);
5084 static ssize_t map_show(struct device_driver *ddp, char *buf)
5086 ssize_t count;
5088 if (!scsi_debug_lbp())
5089 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
5090 sdebug_store_sectors);
5092 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
5093 (int)map_size, map_storep);
5094 buf[count++] = '\n';
5095 buf[count] = '\0';
5097 return count;
5099 static DRIVER_ATTR_RO(map);
5101 static ssize_t removable_show(struct device_driver *ddp, char *buf)
5103 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
5105 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
5106 size_t count)
5108 int n;
5110 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5111 sdebug_removable = (n > 0);
5112 return count;
5114 return -EINVAL;
5116 static DRIVER_ATTR_RW(removable);
5118 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
5120 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
5122 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
5123 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
5124 size_t count)
5126 int n;
5128 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5129 sdebug_host_lock = (n > 0);
5130 return count;
5132 return -EINVAL;
5134 static DRIVER_ATTR_RW(host_lock);
5136 static ssize_t strict_show(struct device_driver *ddp, char *buf)
5138 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
5140 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
5141 size_t count)
5143 int n;
5145 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
5146 sdebug_strict = (n > 0);
5147 return count;
5149 return -EINVAL;
5151 static DRIVER_ATTR_RW(strict);
5153 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
5155 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
5157 static DRIVER_ATTR_RO(uuid_ctl);
5159 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
5161 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
5163 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
5164 size_t count)
5166 int ret, n;
5168 ret = kstrtoint(buf, 0, &n);
5169 if (ret)
5170 return ret;
5171 sdebug_cdb_len = n;
5172 all_config_cdb_len();
5173 return count;
5175 static DRIVER_ATTR_RW(cdb_len);
5178 /* Note: The following array creates attribute files in the
5179 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
5180 files (over those found in the /sys/module/scsi_debug/parameters
5181 directory) is that auxiliary actions can be triggered when an attribute
5182 is changed. For example see: sdebug_add_host_store() above.
5185 static struct attribute *sdebug_drv_attrs[] = {
5186 &driver_attr_delay.attr,
5187 &driver_attr_opts.attr,
5188 &driver_attr_ptype.attr,
5189 &driver_attr_dsense.attr,
5190 &driver_attr_fake_rw.attr,
5191 &driver_attr_no_lun_0.attr,
5192 &driver_attr_num_tgts.attr,
5193 &driver_attr_dev_size_mb.attr,
5194 &driver_attr_num_parts.attr,
5195 &driver_attr_every_nth.attr,
5196 &driver_attr_max_luns.attr,
5197 &driver_attr_max_queue.attr,
5198 &driver_attr_no_uld.attr,
5199 &driver_attr_scsi_level.attr,
5200 &driver_attr_virtual_gb.attr,
5201 &driver_attr_add_host.attr,
5202 &driver_attr_vpd_use_hostno.attr,
5203 &driver_attr_sector_size.attr,
5204 &driver_attr_statistics.attr,
5205 &driver_attr_submit_queues.attr,
5206 &driver_attr_dix.attr,
5207 &driver_attr_dif.attr,
5208 &driver_attr_guard.attr,
5209 &driver_attr_ato.attr,
5210 &driver_attr_map.attr,
5211 &driver_attr_removable.attr,
5212 &driver_attr_host_lock.attr,
5213 &driver_attr_ndelay.attr,
5214 &driver_attr_strict.attr,
5215 &driver_attr_uuid_ctl.attr,
5216 &driver_attr_cdb_len.attr,
5217 NULL,
5219 ATTRIBUTE_GROUPS(sdebug_drv);
5221 static struct device *pseudo_primary;
5223 static int __init scsi_debug_init(void)
5225 unsigned long sz;
5226 int host_to_add;
5227 int k;
5228 int ret;
5230 atomic_set(&retired_max_queue, 0);
5232 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
5233 pr_warn("ndelay must be less than 1 second, ignored\n");
5234 sdebug_ndelay = 0;
5235 } else if (sdebug_ndelay > 0)
5236 sdebug_jdelay = JDELAY_OVERRIDDEN;
5238 switch (sdebug_sector_size) {
5239 case 512:
5240 case 1024:
5241 case 2048:
5242 case 4096:
5243 break;
5244 default:
5245 pr_err("invalid sector_size %d\n", sdebug_sector_size);
5246 return -EINVAL;
5249 switch (sdebug_dif) {
5250 case T10_PI_TYPE0_PROTECTION:
5251 break;
5252 case T10_PI_TYPE1_PROTECTION:
5253 case T10_PI_TYPE2_PROTECTION:
5254 case T10_PI_TYPE3_PROTECTION:
5255 have_dif_prot = true;
5256 break;
5258 default:
5259 pr_err("dif must be 0, 1, 2 or 3\n");
5260 return -EINVAL;
5263 if (sdebug_guard > 1) {
5264 pr_err("guard must be 0 or 1\n");
5265 return -EINVAL;
5268 if (sdebug_ato > 1) {
5269 pr_err("ato must be 0 or 1\n");
5270 return -EINVAL;
5273 if (sdebug_physblk_exp > 15) {
5274 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
5275 return -EINVAL;
5277 if (sdebug_max_luns > 256) {
5278 pr_warn("max_luns can be no more than 256, use default\n");
5279 sdebug_max_luns = DEF_MAX_LUNS;
5282 if (sdebug_lowest_aligned > 0x3fff) {
5283 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
5284 return -EINVAL;
5287 if (submit_queues < 1) {
5288 pr_err("submit_queues must be 1 or more\n");
5289 return -EINVAL;
5291 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
5292 GFP_KERNEL);
5293 if (sdebug_q_arr == NULL)
5294 return -ENOMEM;
5295 for (k = 0; k < submit_queues; ++k)
5296 spin_lock_init(&sdebug_q_arr[k].qc_lock);
5298 if (sdebug_dev_size_mb < 1)
5299 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
5300 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
5301 sdebug_store_sectors = sz / sdebug_sector_size;
5302 sdebug_capacity = get_sdebug_capacity();
5304 /* play around with geometry, don't waste too much on track 0 */
5305 sdebug_heads = 8;
5306 sdebug_sectors_per = 32;
5307 if (sdebug_dev_size_mb >= 256)
5308 sdebug_heads = 64;
5309 else if (sdebug_dev_size_mb >= 16)
5310 sdebug_heads = 32;
5311 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5312 (sdebug_sectors_per * sdebug_heads);
5313 if (sdebug_cylinders_per >= 1024) {
5314 /* other LLDs do this; implies >= 1GB ram disk ... */
5315 sdebug_heads = 255;
5316 sdebug_sectors_per = 63;
5317 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
5318 (sdebug_sectors_per * sdebug_heads);
5321 if (sdebug_fake_rw == 0) {
5322 fake_storep = vmalloc(sz);
5323 if (NULL == fake_storep) {
5324 pr_err("out of memory, 1\n");
5325 ret = -ENOMEM;
5326 goto free_q_arr;
5328 memset(fake_storep, 0, sz);
5329 if (sdebug_num_parts > 0)
5330 sdebug_build_parts(fake_storep, sz);
5333 if (sdebug_dix) {
5334 int dif_size;
5336 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5337 dif_storep = vmalloc(dif_size);
5339 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
5341 if (dif_storep == NULL) {
5342 pr_err("out of mem. (DIX)\n");
5343 ret = -ENOMEM;
5344 goto free_vm;
5347 memset(dif_storep, 0xff, dif_size);
5350 /* Logical Block Provisioning */
5351 if (scsi_debug_lbp()) {
5352 sdebug_unmap_max_blocks =
5353 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
5355 sdebug_unmap_max_desc =
5356 clamp(sdebug_unmap_max_desc, 0U, 256U);
5358 sdebug_unmap_granularity =
5359 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
5361 if (sdebug_unmap_alignment &&
5362 sdebug_unmap_granularity <=
5363 sdebug_unmap_alignment) {
5364 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
5365 ret = -EINVAL;
5366 goto free_vm;
5369 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
5370 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
5372 pr_info("%lu provisioning blocks\n", map_size);
5374 if (map_storep == NULL) {
5375 pr_err("out of mem. (MAP)\n");
5376 ret = -ENOMEM;
5377 goto free_vm;
5380 bitmap_zero(map_storep, map_size);
5382 /* Map first 1KB for partition table */
5383 if (sdebug_num_parts)
5384 map_region(0, 2);
5387 pseudo_primary = root_device_register("pseudo_0");
5388 if (IS_ERR(pseudo_primary)) {
5389 pr_warn("root_device_register() error\n");
5390 ret = PTR_ERR(pseudo_primary);
5391 goto free_vm;
5393 ret = bus_register(&pseudo_lld_bus);
5394 if (ret < 0) {
5395 pr_warn("bus_register error: %d\n", ret);
5396 goto dev_unreg;
5398 ret = driver_register(&sdebug_driverfs_driver);
5399 if (ret < 0) {
5400 pr_warn("driver_register error: %d\n", ret);
5401 goto bus_unreg;
5404 host_to_add = sdebug_add_host;
5405 sdebug_add_host = 0;
5407 for (k = 0; k < host_to_add; k++) {
5408 if (sdebug_add_adapter()) {
5409 pr_err("sdebug_add_adapter failed k=%d\n", k);
5410 break;
5414 if (sdebug_verbose)
5415 pr_info("built %d host(s)\n", sdebug_add_host);
5417 return 0;
5419 bus_unreg:
5420 bus_unregister(&pseudo_lld_bus);
5421 dev_unreg:
5422 root_device_unregister(pseudo_primary);
5423 free_vm:
5424 vfree(map_storep);
5425 vfree(dif_storep);
5426 vfree(fake_storep);
5427 free_q_arr:
5428 kfree(sdebug_q_arr);
5429 return ret;
5432 static void __exit scsi_debug_exit(void)
5434 int k = sdebug_add_host;
5436 stop_all_queued();
5437 free_all_queued();
5438 for (; k; k--)
5439 sdebug_remove_adapter();
5440 driver_unregister(&sdebug_driverfs_driver);
5441 bus_unregister(&pseudo_lld_bus);
5442 root_device_unregister(pseudo_primary);
5444 vfree(map_storep);
5445 vfree(dif_storep);
5446 vfree(fake_storep);
5447 kfree(sdebug_q_arr);
5450 device_initcall(scsi_debug_init);
5451 module_exit(scsi_debug_exit);
5453 static void sdebug_release_adapter(struct device * dev)
5455 struct sdebug_host_info *sdbg_host;
5457 sdbg_host = to_sdebug_host(dev);
5458 kfree(sdbg_host);
5461 static int sdebug_add_adapter(void)
5463 int k, devs_per_host;
5464 int error = 0;
5465 struct sdebug_host_info *sdbg_host;
5466 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5468 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
5469 if (sdbg_host == NULL) {
5470 pr_err("out of memory at line %d\n", __LINE__);
5471 return -ENOMEM;
5474 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5476 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
5477 for (k = 0; k < devs_per_host; k++) {
5478 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5479 if (!sdbg_devinfo) {
5480 pr_err("out of memory at line %d\n", __LINE__);
5481 error = -ENOMEM;
5482 goto clean;
5486 spin_lock(&sdebug_host_list_lock);
5487 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5488 spin_unlock(&sdebug_host_list_lock);
5490 sdbg_host->dev.bus = &pseudo_lld_bus;
5491 sdbg_host->dev.parent = pseudo_primary;
5492 sdbg_host->dev.release = &sdebug_release_adapter;
5493 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
5495 error = device_register(&sdbg_host->dev);
5497 if (error)
5498 goto clean;
5500 ++sdebug_add_host;
5501 return error;
5503 clean:
5504 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5505 dev_list) {
5506 list_del(&sdbg_devinfo->dev_list);
5507 kfree(sdbg_devinfo);
5510 kfree(sdbg_host);
5511 return error;
5514 static void sdebug_remove_adapter(void)
5516 struct sdebug_host_info *sdbg_host = NULL;
5518 spin_lock(&sdebug_host_list_lock);
5519 if (!list_empty(&sdebug_host_list)) {
5520 sdbg_host = list_entry(sdebug_host_list.prev,
5521 struct sdebug_host_info, host_list);
5522 list_del(&sdbg_host->host_list);
5524 spin_unlock(&sdebug_host_list_lock);
5526 if (!sdbg_host)
5527 return;
5529 device_unregister(&sdbg_host->dev);
5530 --sdebug_add_host;
5533 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5535 int num_in_q = 0;
5536 struct sdebug_dev_info *devip;
5538 block_unblock_all_queues(true);
5539 devip = (struct sdebug_dev_info *)sdev->hostdata;
5540 if (NULL == devip) {
5541 block_unblock_all_queues(false);
5542 return -ENODEV;
5544 num_in_q = atomic_read(&devip->num_in_q);
5546 if (qdepth < 1)
5547 qdepth = 1;
5548 /* allow to exceed max host qc_arr elements for testing */
5549 if (qdepth > SDEBUG_CANQUEUE + 10)
5550 qdepth = SDEBUG_CANQUEUE + 10;
5551 scsi_change_queue_depth(sdev, qdepth);
5553 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
5554 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
5555 __func__, qdepth, num_in_q);
5557 block_unblock_all_queues(false);
5558 return sdev->queue_depth;
5561 static bool fake_timeout(struct scsi_cmnd *scp)
5563 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
5564 if (sdebug_every_nth < -1)
5565 sdebug_every_nth = -1;
5566 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
5567 return true; /* ignore command causing timeout */
5568 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
5569 scsi_medium_access_command(scp))
5570 return true; /* time out reads and writes */
5572 return false;
5575 static bool fake_host_busy(struct scsi_cmnd *scp)
5577 return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
5578 (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5581 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
5582 struct scsi_cmnd *scp)
5584 u8 sdeb_i;
5585 struct scsi_device *sdp = scp->device;
5586 const struct opcode_info_t *oip;
5587 const struct opcode_info_t *r_oip;
5588 struct sdebug_dev_info *devip;
5589 u8 *cmd = scp->cmnd;
5590 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5591 int k, na;
5592 int errsts = 0;
5593 u32 flags;
5594 u16 sa;
5595 u8 opcode = cmd[0];
5596 bool has_wlun_rl;
5598 scsi_set_resid(scp, 0);
5599 if (sdebug_statistics)
5600 atomic_inc(&sdebug_cmnd_count);
5601 if (unlikely(sdebug_verbose &&
5602 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
5603 char b[120];
5604 int n, len, sb;
5606 len = scp->cmd_len;
5607 sb = (int)sizeof(b);
5608 if (len > 32)
5609 strcpy(b, "too long, over 32 bytes");
5610 else {
5611 for (k = 0, n = 0; k < len && n < sb; ++k)
5612 n += scnprintf(b + n, sb - n, "%02x ",
5613 (u32)cmd[k]);
5615 if (sdebug_mq_active)
5616 sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
5617 my_name, blk_mq_unique_tag(scp->request),
5619 else
5620 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
5623 if (fake_host_busy(scp))
5624 return SCSI_MLQUEUE_HOST_BUSY;
5625 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
5626 if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
5627 goto err_out;
5629 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
5630 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
5631 devip = (struct sdebug_dev_info *)sdp->hostdata;
5632 if (unlikely(!devip)) {
5633 devip = find_build_dev_info(sdp);
5634 if (NULL == devip)
5635 goto err_out;
5637 na = oip->num_attached;
5638 r_pfp = oip->pfp;
5639 if (na) { /* multiple commands with this opcode */
5640 r_oip = oip;
5641 if (FF_SA & r_oip->flags) {
5642 if (F_SA_LOW & oip->flags)
5643 sa = 0x1f & cmd[1];
5644 else
5645 sa = get_unaligned_be16(cmd + 8);
5646 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5647 if (opcode == oip->opcode && sa == oip->sa)
5648 break;
5650 } else { /* since no service action only check opcode */
5651 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5652 if (opcode == oip->opcode)
5653 break;
5656 if (k > na) {
5657 if (F_SA_LOW & r_oip->flags)
5658 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5659 else if (F_SA_HIGH & r_oip->flags)
5660 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5661 else
5662 mk_sense_invalid_opcode(scp);
5663 goto check_cond;
5665 } /* else (when na==0) we assume the oip is a match */
5666 flags = oip->flags;
5667 if (unlikely(F_INV_OP & flags)) {
5668 mk_sense_invalid_opcode(scp);
5669 goto check_cond;
5671 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
5672 if (sdebug_verbose)
5673 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
5674 my_name, opcode, " supported for wlun");
5675 mk_sense_invalid_opcode(scp);
5676 goto check_cond;
5678 if (unlikely(sdebug_strict)) { /* check cdb against mask */
5679 u8 rem;
5680 int j;
5682 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5683 rem = ~oip->len_mask[k] & cmd[k];
5684 if (rem) {
5685 for (j = 7; j >= 0; --j, rem <<= 1) {
5686 if (0x80 & rem)
5687 break;
5689 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5690 goto check_cond;
5694 if (unlikely(!(F_SKIP_UA & flags) &&
5695 find_first_bit(devip->uas_bm,
5696 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
5697 errsts = make_ua(scp, devip);
5698 if (errsts)
5699 goto check_cond;
5701 if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
5702 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5703 if (sdebug_verbose)
5704 sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5705 "%s\n", my_name, "initializing command "
5706 "required");
5707 errsts = check_condition_result;
5708 goto fini;
5710 if (sdebug_fake_rw && (F_FAKE_RW & flags))
5711 goto fini;
5712 if (unlikely(sdebug_every_nth)) {
5713 if (fake_timeout(scp))
5714 return 0; /* ignore command: make trouble */
5716 if (likely(oip->pfp))
5717 errsts = oip->pfp(scp, devip); /* calls a resp_* function */
5718 else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5719 errsts = r_pfp(scp, devip);
5721 fini:
5722 if (F_DELAY_OVERR & flags)
5723 return schedule_resp(scp, devip, errsts, 0, 0);
5724 else
5725 return schedule_resp(scp, devip, errsts, sdebug_jdelay,
5726 sdebug_ndelay);
5727 check_cond:
5728 return schedule_resp(scp, devip, check_condition_result, 0, 0);
5729 err_out:
5730 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0, 0);
5733 static struct scsi_host_template sdebug_driver_template = {
5734 .show_info = scsi_debug_show_info,
5735 .write_info = scsi_debug_write_info,
5736 .proc_name = sdebug_proc_name,
5737 .name = "SCSI DEBUG",
5738 .info = scsi_debug_info,
5739 .slave_alloc = scsi_debug_slave_alloc,
5740 .slave_configure = scsi_debug_slave_configure,
5741 .slave_destroy = scsi_debug_slave_destroy,
5742 .ioctl = scsi_debug_ioctl,
5743 .queuecommand = scsi_debug_queuecommand,
5744 .change_queue_depth = sdebug_change_qdepth,
5745 .eh_abort_handler = scsi_debug_abort,
5746 .eh_device_reset_handler = scsi_debug_device_reset,
5747 .eh_target_reset_handler = scsi_debug_target_reset,
5748 .eh_bus_reset_handler = scsi_debug_bus_reset,
5749 .eh_host_reset_handler = scsi_debug_host_reset,
5750 .can_queue = SDEBUG_CANQUEUE,
5751 .this_id = 7,
5752 .sg_tablesize = SG_MAX_SEGMENTS,
5753 .cmd_per_lun = DEF_CMD_PER_LUN,
5754 .max_sectors = -1U,
5755 .use_clustering = DISABLE_CLUSTERING,
5756 .module = THIS_MODULE,
5757 .track_queue_depth = 1,
5760 static int sdebug_driver_probe(struct device * dev)
5762 int error = 0;
5763 struct sdebug_host_info *sdbg_host;
5764 struct Scsi_Host *hpnt;
5765 int hprot;
5767 sdbg_host = to_sdebug_host(dev);
5769 sdebug_driver_template.can_queue = sdebug_max_queue;
5770 if (sdebug_clustering)
5771 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5772 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5773 if (NULL == hpnt) {
5774 pr_err("scsi_host_alloc failed\n");
5775 error = -ENODEV;
5776 return error;
5778 if (submit_queues > nr_cpu_ids) {
5779 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
5780 my_name, submit_queues, nr_cpu_ids);
5781 submit_queues = nr_cpu_ids;
5783 /* Decide whether to tell scsi subsystem that we want mq */
5784 /* Following should give the same answer for each host */
5785 sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
5786 if (sdebug_mq_active)
5787 hpnt->nr_hw_queues = submit_queues;
5789 sdbg_host->shost = hpnt;
5790 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5791 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
5792 hpnt->max_id = sdebug_num_tgts + 1;
5793 else
5794 hpnt->max_id = sdebug_num_tgts;
5795 /* = sdebug_max_luns; */
5796 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
5798 hprot = 0;
5800 switch (sdebug_dif) {
5802 case T10_PI_TYPE1_PROTECTION:
5803 hprot = SHOST_DIF_TYPE1_PROTECTION;
5804 if (sdebug_dix)
5805 hprot |= SHOST_DIX_TYPE1_PROTECTION;
5806 break;
5808 case T10_PI_TYPE2_PROTECTION:
5809 hprot = SHOST_DIF_TYPE2_PROTECTION;
5810 if (sdebug_dix)
5811 hprot |= SHOST_DIX_TYPE2_PROTECTION;
5812 break;
5814 case T10_PI_TYPE3_PROTECTION:
5815 hprot = SHOST_DIF_TYPE3_PROTECTION;
5816 if (sdebug_dix)
5817 hprot |= SHOST_DIX_TYPE3_PROTECTION;
5818 break;
5820 default:
5821 if (sdebug_dix)
5822 hprot |= SHOST_DIX_TYPE0_PROTECTION;
5823 break;
5826 scsi_host_set_prot(hpnt, hprot);
5828 if (have_dif_prot || sdebug_dix)
5829 pr_info("host protection%s%s%s%s%s%s%s\n",
5830 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5831 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5832 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5833 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5834 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5835 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5836 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5838 if (sdebug_guard == 1)
5839 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5840 else
5841 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5843 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
5844 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
5845 if (sdebug_every_nth) /* need stats counters for every_nth */
5846 sdebug_statistics = true;
5847 error = scsi_add_host(hpnt, &sdbg_host->dev);
5848 if (error) {
5849 pr_err("scsi_add_host failed\n");
5850 error = -ENODEV;
5851 scsi_host_put(hpnt);
5852 } else
5853 scsi_scan_host(hpnt);
5855 return error;
5858 static int sdebug_driver_remove(struct device * dev)
5860 struct sdebug_host_info *sdbg_host;
5861 struct sdebug_dev_info *sdbg_devinfo, *tmp;
5863 sdbg_host = to_sdebug_host(dev);
5865 if (!sdbg_host) {
5866 pr_err("Unable to locate host info\n");
5867 return -ENODEV;
5870 scsi_remove_host(sdbg_host->shost);
5872 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5873 dev_list) {
5874 list_del(&sdbg_devinfo->dev_list);
5875 kfree(sdbg_devinfo);
5878 scsi_host_put(sdbg_host->shost);
5879 return 0;
5882 static int pseudo_lld_bus_match(struct device *dev,
5883 struct device_driver *dev_driver)
5885 return 1;
5888 static struct bus_type pseudo_lld_bus = {
5889 .name = "pseudo",
5890 .match = pseudo_lld_bus_match,
5891 .probe = sdebug_driver_probe,
5892 .remove = sdebug_driver_remove,
5893 .drv_groups = sdebug_drv_groups,