2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://www.torque.net/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.81"
62 static const char * scsi_debug_version_date
= "20070104";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
93 #define DEF_DEV_SIZE_MB 8
94 #define DEF_EVERY_NTH 0
95 #define DEF_NUM_PARTS 0
97 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
100 #define DEF_NO_LUN_0 0
101 #define DEF_VIRTUAL_GB 0
102 #define DEF_FAKE_RW 0
103 #define DEF_VPD_USE_HOSTNO 1
104 #define DEF_SECTOR_SIZE 512
109 #define DEF_PHYSBLK_EXP 0
110 #define DEF_LOWEST_ALIGNED 0
111 #define DEF_UNMAP_MAX_BLOCKS 0
112 #define DEF_UNMAP_MAX_DESC 0
113 #define DEF_UNMAP_GRANULARITY 0
114 #define DEF_UNMAP_ALIGNMENT 0
116 /* bit mask values for scsi_debug_opts */
117 #define SCSI_DEBUG_OPT_NOISE 1
118 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
119 #define SCSI_DEBUG_OPT_TIMEOUT 4
120 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
121 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
122 #define SCSI_DEBUG_OPT_DIF_ERR 32
123 #define SCSI_DEBUG_OPT_DIX_ERR 64
124 /* When "every_nth" > 0 then modulo "every_nth" commands:
125 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
126 * - a RECOVERED_ERROR is simulated on successful read and write
127 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
128 * - a TRANSPORT_ERROR is simulated on successful read and write
129 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
131 * When "every_nth" < 0 then after "- every_nth" commands:
132 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
133 * - a RECOVERED_ERROR is simulated on successful read and write
134 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
135 * - a TRANSPORT_ERROR is simulated on successful read and write
136 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
137 * This will continue until some other action occurs (e.g. the user
138 * writing a new value (other than -1 or 1) to every_nth via sysfs).
141 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
142 * sector on read commands: */
143 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
145 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
146 * or "peripheral device" addressing (value 0) */
147 #define SAM2_LUN_ADDRESS_METHOD 0
148 #define SAM2_WLUN_REPORT_LUNS 0xc101
150 static int scsi_debug_add_host
= DEF_NUM_HOST
;
151 static int scsi_debug_delay
= DEF_DELAY
;
152 static int scsi_debug_dev_size_mb
= DEF_DEV_SIZE_MB
;
153 static int scsi_debug_every_nth
= DEF_EVERY_NTH
;
154 static int scsi_debug_max_luns
= DEF_MAX_LUNS
;
155 static int scsi_debug_num_parts
= DEF_NUM_PARTS
;
156 static int scsi_debug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
157 static int scsi_debug_opts
= DEF_OPTS
;
158 static int scsi_debug_scsi_level
= DEF_SCSI_LEVEL
;
159 static int scsi_debug_ptype
= DEF_PTYPE
; /* SCSI peripheral type (0==disk) */
160 static int scsi_debug_dsense
= DEF_D_SENSE
;
161 static int scsi_debug_no_lun_0
= DEF_NO_LUN_0
;
162 static int scsi_debug_virtual_gb
= DEF_VIRTUAL_GB
;
163 static int scsi_debug_fake_rw
= DEF_FAKE_RW
;
164 static int scsi_debug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
165 static int scsi_debug_sector_size
= DEF_SECTOR_SIZE
;
166 static int scsi_debug_dix
= DEF_DIX
;
167 static int scsi_debug_dif
= DEF_DIF
;
168 static int scsi_debug_guard
= DEF_GUARD
;
169 static int scsi_debug_ato
= DEF_ATO
;
170 static int scsi_debug_physblk_exp
= DEF_PHYSBLK_EXP
;
171 static int scsi_debug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
172 static int scsi_debug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
173 static int scsi_debug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
174 static int scsi_debug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
175 static int scsi_debug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
177 static int scsi_debug_cmnd_count
= 0;
179 #define DEV_READONLY(TGT) (0)
180 #define DEV_REMOVEABLE(TGT) (0)
182 static unsigned int sdebug_store_sectors
;
183 static sector_t sdebug_capacity
; /* in sectors */
185 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
186 may still need them */
187 static int sdebug_heads
; /* heads per disk */
188 static int sdebug_cylinders_per
; /* cylinders per surface */
189 static int sdebug_sectors_per
; /* sectors per cylinder */
191 #define SDEBUG_MAX_PARTS 4
193 #define SDEBUG_SENSE_LEN 32
195 #define SCSI_DEBUG_CANQUEUE 255
196 #define SCSI_DEBUG_MAX_CMD_LEN 32
198 struct sdebug_dev_info
{
199 struct list_head dev_list
;
200 unsigned char sense_buff
[SDEBUG_SENSE_LEN
]; /* weak nexus */
201 unsigned int channel
;
204 struct sdebug_host_info
*sdbg_host
;
211 struct sdebug_host_info
{
212 struct list_head host_list
;
213 struct Scsi_Host
*shost
;
215 struct list_head dev_info_list
;
218 #define to_sdebug_host(d) \
219 container_of(d, struct sdebug_host_info, dev)
221 static LIST_HEAD(sdebug_host_list
);
222 static DEFINE_SPINLOCK(sdebug_host_list_lock
);
224 typedef void (* done_funct_t
) (struct scsi_cmnd
*);
226 struct sdebug_queued_cmd
{
228 struct timer_list cmnd_timer
;
229 done_funct_t done_funct
;
230 struct scsi_cmnd
* a_cmnd
;
233 static struct sdebug_queued_cmd queued_arr
[SCSI_DEBUG_CANQUEUE
];
235 static unsigned char * fake_storep
; /* ramdisk storage */
236 static unsigned char *dif_storep
; /* protection info */
237 static void *map_storep
; /* provisioning map */
239 static unsigned long map_size
;
240 static int num_aborts
= 0;
241 static int num_dev_resets
= 0;
242 static int num_bus_resets
= 0;
243 static int num_host_resets
= 0;
244 static int dix_writes
;
245 static int dix_reads
;
246 static int dif_errors
;
248 static DEFINE_SPINLOCK(queued_arr_lock
);
249 static DEFINE_RWLOCK(atomic_rw
);
251 static char sdebug_proc_name
[] = "scsi_debug";
253 static struct bus_type pseudo_lld_bus
;
255 static inline sector_t
dif_offset(sector_t sector
)
260 static struct device_driver sdebug_driverfs_driver
= {
261 .name
= sdebug_proc_name
,
262 .bus
= &pseudo_lld_bus
,
265 static const int check_condition_result
=
266 (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
268 static const int illegal_condition_result
=
269 (DRIVER_SENSE
<< 24) | (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
271 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
273 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
276 static int sdebug_add_adapter(void);
277 static void sdebug_remove_adapter(void);
279 static void sdebug_max_tgts_luns(void)
281 struct sdebug_host_info
*sdbg_host
;
282 struct Scsi_Host
*hpnt
;
284 spin_lock(&sdebug_host_list_lock
);
285 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
286 hpnt
= sdbg_host
->shost
;
287 if ((hpnt
->this_id
>= 0) &&
288 (scsi_debug_num_tgts
> hpnt
->this_id
))
289 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
291 hpnt
->max_id
= scsi_debug_num_tgts
;
292 /* scsi_debug_max_luns; */
293 hpnt
->max_lun
= SAM2_WLUN_REPORT_LUNS
;
295 spin_unlock(&sdebug_host_list_lock
);
298 static void mk_sense_buffer(struct sdebug_dev_info
*devip
, int key
,
301 unsigned char *sbuff
;
303 sbuff
= devip
->sense_buff
;
304 memset(sbuff
, 0, SDEBUG_SENSE_LEN
);
306 scsi_build_sense_buffer(scsi_debug_dsense
, sbuff
, key
, asc
, asq
);
308 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
309 printk(KERN_INFO
"scsi_debug: [sense_key,asc,ascq]: "
310 "[0x%x,0x%x,0x%x]\n", key
, asc
, asq
);
313 static void get_data_transfer_info(unsigned char *cmd
,
314 unsigned long long *lba
, unsigned int *num
,
320 case VARIABLE_LENGTH_CMD
:
321 *lba
= (u64
)cmd
[19] | (u64
)cmd
[18] << 8 |
322 (u64
)cmd
[17] << 16 | (u64
)cmd
[16] << 24 |
323 (u64
)cmd
[15] << 32 | (u64
)cmd
[14] << 40 |
324 (u64
)cmd
[13] << 48 | (u64
)cmd
[12] << 56;
326 *ei_lba
= (u32
)cmd
[23] | (u32
)cmd
[22] << 8 |
327 (u32
)cmd
[21] << 16 | (u32
)cmd
[20] << 24;
329 *num
= (u32
)cmd
[31] | (u32
)cmd
[30] << 8 | (u32
)cmd
[29] << 16 |
336 *lba
= (u64
)cmd
[9] | (u64
)cmd
[8] << 8 |
337 (u64
)cmd
[7] << 16 | (u64
)cmd
[6] << 24 |
338 (u64
)cmd
[5] << 32 | (u64
)cmd
[4] << 40 |
339 (u64
)cmd
[3] << 48 | (u64
)cmd
[2] << 56;
341 *num
= (u32
)cmd
[13] | (u32
)cmd
[12] << 8 | (u32
)cmd
[11] << 16 |
346 *lba
= (u32
)cmd
[5] | (u32
)cmd
[4] << 8 | (u32
)cmd
[3] << 16 |
349 *num
= (u32
)cmd
[9] | (u32
)cmd
[8] << 8 | (u32
)cmd
[7] << 16 |
356 *lba
= (u32
)cmd
[5] | (u32
)cmd
[4] << 8 | (u32
)cmd
[3] << 16 |
359 *num
= (u32
)cmd
[8] | (u32
)cmd
[7] << 8;
363 *lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
364 (u32
)(cmd
[1] & 0x1f) << 16;
365 *num
= (0 == cmd
[4]) ? 256 : cmd
[4];
372 static int scsi_debug_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
374 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
375 printk(KERN_INFO
"scsi_debug: ioctl: cmd=0x%x\n", cmd
);
378 /* return -ENOTTY; // correct return but upsets fdisk */
381 static int check_readiness(struct scsi_cmnd
* SCpnt
, int reset_only
,
382 struct sdebug_dev_info
* devip
)
385 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
386 printk(KERN_INFO
"scsi_debug: Reporting Unit "
387 "attention: power on reset\n");
389 mk_sense_buffer(devip
, UNIT_ATTENTION
, POWERON_RESET
, 0);
390 return check_condition_result
;
392 if ((0 == reset_only
) && devip
->stopped
) {
393 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
394 printk(KERN_INFO
"scsi_debug: Reporting Not "
395 "ready: initializing command required\n");
396 mk_sense_buffer(devip
, NOT_READY
, LOGICAL_UNIT_NOT_READY
,
398 return check_condition_result
;
403 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
404 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
408 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
412 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_FROM_DEVICE
))
413 return (DID_ERROR
<< 16);
415 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
418 sdb
->resid
-= act_len
;
420 sdb
->resid
= scsi_bufflen(scp
) - act_len
;
425 /* Returns number of bytes fetched into 'arr' or -1 if error. */
426 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
429 if (!scsi_bufflen(scp
))
431 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_TO_DEVICE
))
434 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
438 static const char * inq_vendor_id
= "Linux ";
439 static const char * inq_product_id
= "scsi_debug ";
440 static const char * inq_product_rev
= "0004";
442 static int inquiry_evpd_83(unsigned char * arr
, int port_group_id
,
443 int target_dev_id
, int dev_id_num
,
444 const char * dev_id_str
,
450 port_a
= target_dev_id
+ 1;
451 /* T10 vendor identifier field format (faked) */
452 arr
[0] = 0x2; /* ASCII */
455 memcpy(&arr
[4], inq_vendor_id
, 8);
456 memcpy(&arr
[12], inq_product_id
, 16);
457 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
458 num
= 8 + 16 + dev_id_str_len
;
461 if (dev_id_num
>= 0) {
462 /* NAA-5, Logical unit identifier (binary) */
463 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
464 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
467 arr
[num
++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
471 arr
[num
++] = (dev_id_num
>> 24);
472 arr
[num
++] = (dev_id_num
>> 16) & 0xff;
473 arr
[num
++] = (dev_id_num
>> 8) & 0xff;
474 arr
[num
++] = dev_id_num
& 0xff;
475 /* Target relative port number */
476 arr
[num
++] = 0x61; /* proto=sas, binary */
477 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
478 arr
[num
++] = 0x0; /* reserved */
479 arr
[num
++] = 0x4; /* length */
480 arr
[num
++] = 0x0; /* reserved */
481 arr
[num
++] = 0x0; /* reserved */
483 arr
[num
++] = 0x1; /* relative port A */
485 /* NAA-5, Target port identifier */
486 arr
[num
++] = 0x61; /* proto=sas, binary */
487 arr
[num
++] = 0x93; /* piv=1, target port, naa */
490 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
494 arr
[num
++] = (port_a
>> 24);
495 arr
[num
++] = (port_a
>> 16) & 0xff;
496 arr
[num
++] = (port_a
>> 8) & 0xff;
497 arr
[num
++] = port_a
& 0xff;
498 /* NAA-5, Target port group identifier */
499 arr
[num
++] = 0x61; /* proto=sas, binary */
500 arr
[num
++] = 0x95; /* piv=1, target port group id */
505 arr
[num
++] = (port_group_id
>> 8) & 0xff;
506 arr
[num
++] = port_group_id
& 0xff;
507 /* NAA-5, Target device identifier */
508 arr
[num
++] = 0x61; /* proto=sas, binary */
509 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
512 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
516 arr
[num
++] = (target_dev_id
>> 24);
517 arr
[num
++] = (target_dev_id
>> 16) & 0xff;
518 arr
[num
++] = (target_dev_id
>> 8) & 0xff;
519 arr
[num
++] = target_dev_id
& 0xff;
520 /* SCSI name string: Target device identifier */
521 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
522 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
525 memcpy(arr
+ num
, "naa.52222220", 12);
527 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
528 memcpy(arr
+ num
, b
, 8);
530 memset(arr
+ num
, 0, 4);
536 static unsigned char vpd84_data
[] = {
537 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
538 0x22,0x22,0x22,0x0,0xbb,0x1,
539 0x22,0x22,0x22,0x0,0xbb,0x2,
542 static int inquiry_evpd_84(unsigned char * arr
)
544 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
545 return sizeof(vpd84_data
);
548 static int inquiry_evpd_85(unsigned char * arr
)
551 const char * na1
= "https://www.kernel.org/config";
552 const char * na2
= "http://www.kernel.org/log";
555 arr
[num
++] = 0x1; /* lu, storage config */
556 arr
[num
++] = 0x0; /* reserved */
561 plen
= ((plen
/ 4) + 1) * 4;
562 arr
[num
++] = plen
; /* length, null termianted, padded */
563 memcpy(arr
+ num
, na1
, olen
);
564 memset(arr
+ num
+ olen
, 0, plen
- olen
);
567 arr
[num
++] = 0x4; /* lu, logging */
568 arr
[num
++] = 0x0; /* reserved */
573 plen
= ((plen
/ 4) + 1) * 4;
574 arr
[num
++] = plen
; /* length, null terminated, padded */
575 memcpy(arr
+ num
, na2
, olen
);
576 memset(arr
+ num
+ olen
, 0, plen
- olen
);
582 /* SCSI ports VPD page */
583 static int inquiry_evpd_88(unsigned char * arr
, int target_dev_id
)
588 port_a
= target_dev_id
+ 1;
590 arr
[num
++] = 0x0; /* reserved */
591 arr
[num
++] = 0x0; /* reserved */
593 arr
[num
++] = 0x1; /* relative port 1 (primary) */
594 memset(arr
+ num
, 0, 6);
597 arr
[num
++] = 12; /* length tp descriptor */
598 /* naa-5 target port identifier (A) */
599 arr
[num
++] = 0x61; /* proto=sas, binary */
600 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
601 arr
[num
++] = 0x0; /* reserved */
602 arr
[num
++] = 0x8; /* length */
603 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
607 arr
[num
++] = (port_a
>> 24);
608 arr
[num
++] = (port_a
>> 16) & 0xff;
609 arr
[num
++] = (port_a
>> 8) & 0xff;
610 arr
[num
++] = port_a
& 0xff;
612 arr
[num
++] = 0x0; /* reserved */
613 arr
[num
++] = 0x0; /* reserved */
615 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
616 memset(arr
+ num
, 0, 6);
619 arr
[num
++] = 12; /* length tp descriptor */
620 /* naa-5 target port identifier (B) */
621 arr
[num
++] = 0x61; /* proto=sas, binary */
622 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
623 arr
[num
++] = 0x0; /* reserved */
624 arr
[num
++] = 0x8; /* length */
625 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
629 arr
[num
++] = (port_b
>> 24);
630 arr
[num
++] = (port_b
>> 16) & 0xff;
631 arr
[num
++] = (port_b
>> 8) & 0xff;
632 arr
[num
++] = port_b
& 0xff;
638 static unsigned char vpd89_data
[] = {
639 /* from 4th byte */ 0,0,0,0,
640 'l','i','n','u','x',' ',' ',' ',
641 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
643 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
645 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
646 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
647 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
648 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
650 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
652 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
654 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
655 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
656 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
657 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
658 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
659 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
660 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
661 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
662 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
663 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
664 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
665 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
666 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
667 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
668 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
669 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
670 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
671 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
672 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
673 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
674 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
675 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
676 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
677 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
678 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
679 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
682 static int inquiry_evpd_89(unsigned char * arr
)
684 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
685 return sizeof(vpd89_data
);
689 /* Block limits VPD page (SBC-3) */
690 static unsigned char vpdb0_data
[] = {
691 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 static int inquiry_evpd_b0(unsigned char * arr
)
701 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
702 gran
= 1 << scsi_debug_physblk_exp
;
703 arr
[2] = (gran
>> 8) & 0xff;
704 arr
[3] = gran
& 0xff;
705 if (sdebug_store_sectors
> 0x400) {
706 arr
[4] = (sdebug_store_sectors
>> 24) & 0xff;
707 arr
[5] = (sdebug_store_sectors
>> 16) & 0xff;
708 arr
[6] = (sdebug_store_sectors
>> 8) & 0xff;
709 arr
[7] = sdebug_store_sectors
& 0xff;
712 if (scsi_debug_unmap_max_desc
) {
715 if (scsi_debug_unmap_max_blocks
)
716 blocks
= scsi_debug_unmap_max_blocks
;
720 put_unaligned_be32(blocks
, &arr
[16]);
721 put_unaligned_be32(scsi_debug_unmap_max_desc
, &arr
[20]);
724 if (scsi_debug_unmap_alignment
) {
725 put_unaligned_be32(scsi_debug_unmap_alignment
, &arr
[28]);
726 arr
[28] |= 0x80; /* UGAVALID */
729 if (scsi_debug_unmap_granularity
) {
730 put_unaligned_be32(scsi_debug_unmap_granularity
, &arr
[24]);
731 return 0x3c; /* Mandatory page length for thin provisioning */
734 return sizeof(vpdb0_data
);
737 /* Block device characteristics VPD page (SBC-3) */
738 static int inquiry_evpd_b1(unsigned char *arr
)
740 memset(arr
, 0, 0x3c);
742 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
744 arr
[3] = 5; /* less than 1.8" */
749 #define SDEBUG_LONG_INQ_SZ 96
750 #define SDEBUG_MAX_INQ_ARR_SZ 584
752 static int resp_inquiry(struct scsi_cmnd
* scp
, int target
,
753 struct sdebug_dev_info
* devip
)
755 unsigned char pq_pdt
;
757 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
758 int alloc_len
, n
, ret
;
760 alloc_len
= (cmd
[3] << 8) + cmd
[4];
761 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
763 return DID_REQUEUE
<< 16;
765 pq_pdt
= 0x1e; /* present, wlun */
766 else if (scsi_debug_no_lun_0
&& (0 == devip
->lun
))
767 pq_pdt
= 0x7f; /* not present, no device type */
769 pq_pdt
= (scsi_debug_ptype
& 0x1f);
771 if (0x2 & cmd
[1]) { /* CMDDT bit set */
772 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
775 return check_condition_result
;
776 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
777 int lu_id_num
, port_group_id
, target_dev_id
, len
;
779 int host_no
= devip
->sdbg_host
->shost
->host_no
;
781 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
782 (devip
->channel
& 0x7f);
783 if (0 == scsi_debug_vpd_use_hostno
)
785 lu_id_num
= devip
->wlun
? -1 : (((host_no
+ 1) * 2000) +
786 (devip
->target
* 1000) + devip
->lun
);
787 target_dev_id
= ((host_no
+ 1) * 2000) +
788 (devip
->target
* 1000) - 3;
789 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
790 if (0 == cmd
[2]) { /* supported vital product data pages */
791 arr
[1] = cmd
[2]; /*sanity */
793 arr
[n
++] = 0x0; /* this page */
794 arr
[n
++] = 0x80; /* unit serial number */
795 arr
[n
++] = 0x83; /* device identification */
796 arr
[n
++] = 0x84; /* software interface ident. */
797 arr
[n
++] = 0x85; /* management network addresses */
798 arr
[n
++] = 0x86; /* extended inquiry */
799 arr
[n
++] = 0x87; /* mode page policy */
800 arr
[n
++] = 0x88; /* SCSI ports */
801 arr
[n
++] = 0x89; /* ATA information */
802 arr
[n
++] = 0xb0; /* Block limits (SBC) */
803 arr
[n
++] = 0xb1; /* Block characteristics (SBC) */
804 arr
[3] = n
- 4; /* number of supported VPD pages */
805 } else if (0x80 == cmd
[2]) { /* unit serial number */
806 arr
[1] = cmd
[2]; /*sanity */
808 memcpy(&arr
[4], lu_id_str
, len
);
809 } else if (0x83 == cmd
[2]) { /* device identification */
810 arr
[1] = cmd
[2]; /*sanity */
811 arr
[3] = inquiry_evpd_83(&arr
[4], port_group_id
,
812 target_dev_id
, lu_id_num
,
814 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
815 arr
[1] = cmd
[2]; /*sanity */
816 arr
[3] = inquiry_evpd_84(&arr
[4]);
817 } else if (0x85 == cmd
[2]) { /* Management network addresses */
818 arr
[1] = cmd
[2]; /*sanity */
819 arr
[3] = inquiry_evpd_85(&arr
[4]);
820 } else if (0x86 == cmd
[2]) { /* extended inquiry */
821 arr
[1] = cmd
[2]; /*sanity */
822 arr
[3] = 0x3c; /* number of following entries */
823 if (scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
)
824 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
825 else if (scsi_debug_dif
)
826 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
828 arr
[4] = 0x0; /* no protection stuff */
829 arr
[5] = 0x7; /* head of q, ordered + simple q's */
830 } else if (0x87 == cmd
[2]) { /* mode page policy */
831 arr
[1] = cmd
[2]; /*sanity */
832 arr
[3] = 0x8; /* number of following entries */
833 arr
[4] = 0x2; /* disconnect-reconnect mp */
834 arr
[6] = 0x80; /* mlus, shared */
835 arr
[8] = 0x18; /* protocol specific lu */
836 arr
[10] = 0x82; /* mlus, per initiator port */
837 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
838 arr
[1] = cmd
[2]; /*sanity */
839 arr
[3] = inquiry_evpd_88(&arr
[4], target_dev_id
);
840 } else if (0x89 == cmd
[2]) { /* ATA information */
841 arr
[1] = cmd
[2]; /*sanity */
842 n
= inquiry_evpd_89(&arr
[4]);
845 } else if (0xb0 == cmd
[2]) { /* Block limits (SBC) */
846 arr
[1] = cmd
[2]; /*sanity */
847 arr
[3] = inquiry_evpd_b0(&arr
[4]);
848 } else if (0xb1 == cmd
[2]) { /* Block characteristics (SBC) */
849 arr
[1] = cmd
[2]; /*sanity */
850 arr
[3] = inquiry_evpd_b1(&arr
[4]);
852 /* Illegal request, invalid field in cdb */
853 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
854 INVALID_FIELD_IN_CDB
, 0);
856 return check_condition_result
;
858 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
859 ret
= fill_from_dev_buffer(scp
, arr
,
860 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
864 /* drops through here for a standard inquiry */
865 arr
[1] = DEV_REMOVEABLE(target
) ? 0x80 : 0; /* Removable disk */
866 arr
[2] = scsi_debug_scsi_level
;
867 arr
[3] = 2; /* response_data_format==2 */
868 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
869 arr
[5] = scsi_debug_dif
? 1 : 0; /* PROTECT bit */
870 if (0 == scsi_debug_vpd_use_hostno
)
871 arr
[5] = 0x10; /* claim: implicit TGPS */
872 arr
[6] = 0x10; /* claim: MultiP */
873 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
874 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
875 memcpy(&arr
[8], inq_vendor_id
, 8);
876 memcpy(&arr
[16], inq_product_id
, 16);
877 memcpy(&arr
[32], inq_product_rev
, 4);
878 /* version descriptors (2 bytes each) follow */
879 arr
[58] = 0x0; arr
[59] = 0x77; /* SAM-3 ANSI */
880 arr
[60] = 0x3; arr
[61] = 0x14; /* SPC-3 ANSI */
882 if (scsi_debug_ptype
== 0) {
883 arr
[n
++] = 0x3; arr
[n
++] = 0x3d; /* SBC-2 ANSI */
884 } else if (scsi_debug_ptype
== 1) {
885 arr
[n
++] = 0x3; arr
[n
++] = 0x60; /* SSC-2 no version */
887 arr
[n
++] = 0xc; arr
[n
++] = 0xf; /* SAS-1.1 rev 10 */
888 ret
= fill_from_dev_buffer(scp
, arr
,
889 min(alloc_len
, SDEBUG_LONG_INQ_SZ
));
894 static int resp_requests(struct scsi_cmnd
* scp
,
895 struct sdebug_dev_info
* devip
)
897 unsigned char * sbuff
;
898 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
899 unsigned char arr
[SDEBUG_SENSE_LEN
];
903 memset(arr
, 0, sizeof(arr
));
904 if (devip
->reset
== 1)
905 mk_sense_buffer(devip
, 0, NO_ADDITIONAL_SENSE
, 0);
906 want_dsense
= !!(cmd
[1] & 1) || scsi_debug_dsense
;
907 sbuff
= devip
->sense_buff
;
908 if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
911 arr
[1] = 0x0; /* NO_SENSE in sense_key */
912 arr
[2] = THRESHOLD_EXCEEDED
;
913 arr
[3] = 0xff; /* TEST set and MRIE==6 */
916 arr
[2] = 0x0; /* NO_SENSE in sense_key */
917 arr
[7] = 0xa; /* 18 byte sense buffer */
918 arr
[12] = THRESHOLD_EXCEEDED
;
919 arr
[13] = 0xff; /* TEST set and MRIE==6 */
922 memcpy(arr
, sbuff
, SDEBUG_SENSE_LEN
);
923 if ((cmd
[1] & 1) && (! scsi_debug_dsense
)) {
924 /* DESC bit set and sense_buff in fixed format */
925 memset(arr
, 0, sizeof(arr
));
927 arr
[1] = sbuff
[2]; /* sense key */
928 arr
[2] = sbuff
[12]; /* asc */
929 arr
[3] = sbuff
[13]; /* ascq */
933 mk_sense_buffer(devip
, 0, NO_ADDITIONAL_SENSE
, 0);
934 return fill_from_dev_buffer(scp
, arr
, len
);
937 static int resp_start_stop(struct scsi_cmnd
* scp
,
938 struct sdebug_dev_info
* devip
)
940 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
941 int power_cond
, errsts
, start
;
943 if ((errsts
= check_readiness(scp
, 1, devip
)))
945 power_cond
= (cmd
[4] & 0xf0) >> 4;
947 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
949 return check_condition_result
;
952 if (start
== devip
->stopped
)
953 devip
->stopped
= !start
;
957 static sector_t
get_sdebug_capacity(void)
959 if (scsi_debug_virtual_gb
> 0)
960 return (sector_t
)scsi_debug_virtual_gb
*
961 (1073741824 / scsi_debug_sector_size
);
963 return sdebug_store_sectors
;
966 #define SDEBUG_READCAP_ARR_SZ 8
967 static int resp_readcap(struct scsi_cmnd
* scp
,
968 struct sdebug_dev_info
* devip
)
970 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
974 if ((errsts
= check_readiness(scp
, 1, devip
)))
976 /* following just in case virtual_gb changed */
977 sdebug_capacity
= get_sdebug_capacity();
978 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
979 if (sdebug_capacity
< 0xffffffff) {
980 capac
= (unsigned int)sdebug_capacity
- 1;
981 arr
[0] = (capac
>> 24);
982 arr
[1] = (capac
>> 16) & 0xff;
983 arr
[2] = (capac
>> 8) & 0xff;
984 arr
[3] = capac
& 0xff;
991 arr
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
992 arr
[7] = scsi_debug_sector_size
& 0xff;
993 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
996 #define SDEBUG_READCAP16_ARR_SZ 32
997 static int resp_readcap16(struct scsi_cmnd
* scp
,
998 struct sdebug_dev_info
* devip
)
1000 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1001 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
1002 unsigned long long capac
;
1003 int errsts
, k
, alloc_len
;
1005 if ((errsts
= check_readiness(scp
, 1, devip
)))
1007 alloc_len
= ((cmd
[10] << 24) + (cmd
[11] << 16) + (cmd
[12] << 8)
1009 /* following just in case virtual_gb changed */
1010 sdebug_capacity
= get_sdebug_capacity();
1011 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
1012 capac
= sdebug_capacity
- 1;
1013 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1014 arr
[7 - k
] = capac
& 0xff;
1015 arr
[8] = (scsi_debug_sector_size
>> 24) & 0xff;
1016 arr
[9] = (scsi_debug_sector_size
>> 16) & 0xff;
1017 arr
[10] = (scsi_debug_sector_size
>> 8) & 0xff;
1018 arr
[11] = scsi_debug_sector_size
& 0xff;
1019 arr
[13] = scsi_debug_physblk_exp
& 0xf;
1020 arr
[14] = (scsi_debug_lowest_aligned
>> 8) & 0x3f;
1022 if (scsi_debug_unmap_granularity
)
1023 arr
[14] |= 0x80; /* TPE */
1025 arr
[15] = scsi_debug_lowest_aligned
& 0xff;
1027 if (scsi_debug_dif
) {
1028 arr
[12] = (scsi_debug_dif
- 1) << 1; /* P_TYPE */
1029 arr
[12] |= 1; /* PROT_EN */
1032 return fill_from_dev_buffer(scp
, arr
,
1033 min(alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
1036 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1038 static int resp_report_tgtpgs(struct scsi_cmnd
* scp
,
1039 struct sdebug_dev_info
* devip
)
1041 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1042 unsigned char * arr
;
1043 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1044 int n
, ret
, alen
, rlen
;
1045 int port_group_a
, port_group_b
, port_a
, port_b
;
1047 alen
= ((cmd
[6] << 24) + (cmd
[7] << 16) + (cmd
[8] << 8)
1050 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
1052 return DID_REQUEUE
<< 16;
1054 * EVPD page 0x88 states we have two ports, one
1055 * real and a fake port with no device connected.
1056 * So we create two port groups with one port each
1057 * and set the group with port B to unavailable.
1059 port_a
= 0x1; /* relative port A */
1060 port_b
= 0x2; /* relative port B */
1061 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
1062 (devip
->channel
& 0x7f);
1063 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
1064 (devip
->channel
& 0x7f) + 0x80;
1067 * The asymmetric access state is cycled according to the host_id.
1070 if (0 == scsi_debug_vpd_use_hostno
) {
1071 arr
[n
++] = host_no
% 3; /* Asymm access state */
1072 arr
[n
++] = 0x0F; /* claim: all states are supported */
1074 arr
[n
++] = 0x0; /* Active/Optimized path */
1075 arr
[n
++] = 0x01; /* claim: only support active/optimized paths */
1077 arr
[n
++] = (port_group_a
>> 8) & 0xff;
1078 arr
[n
++] = port_group_a
& 0xff;
1079 arr
[n
++] = 0; /* Reserved */
1080 arr
[n
++] = 0; /* Status code */
1081 arr
[n
++] = 0; /* Vendor unique */
1082 arr
[n
++] = 0x1; /* One port per group */
1083 arr
[n
++] = 0; /* Reserved */
1084 arr
[n
++] = 0; /* Reserved */
1085 arr
[n
++] = (port_a
>> 8) & 0xff;
1086 arr
[n
++] = port_a
& 0xff;
1087 arr
[n
++] = 3; /* Port unavailable */
1088 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
1089 arr
[n
++] = (port_group_b
>> 8) & 0xff;
1090 arr
[n
++] = port_group_b
& 0xff;
1091 arr
[n
++] = 0; /* Reserved */
1092 arr
[n
++] = 0; /* Status code */
1093 arr
[n
++] = 0; /* Vendor unique */
1094 arr
[n
++] = 0x1; /* One port per group */
1095 arr
[n
++] = 0; /* Reserved */
1096 arr
[n
++] = 0; /* Reserved */
1097 arr
[n
++] = (port_b
>> 8) & 0xff;
1098 arr
[n
++] = port_b
& 0xff;
1101 arr
[0] = (rlen
>> 24) & 0xff;
1102 arr
[1] = (rlen
>> 16) & 0xff;
1103 arr
[2] = (rlen
>> 8) & 0xff;
1104 arr
[3] = rlen
& 0xff;
1107 * Return the smallest value of either
1108 * - The allocated length
1109 * - The constructed command length
1110 * - The maximum array size
1113 ret
= fill_from_dev_buffer(scp
, arr
,
1114 min(rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
1119 /* <<Following mode page info copied from ST318451LW>> */
1121 static int resp_err_recov_pg(unsigned char * p
, int pcontrol
, int target
)
1122 { /* Read-Write Error Recovery page for mode_sense */
1123 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1126 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
1128 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
1129 return sizeof(err_recov_pg
);
1132 static int resp_disconnect_pg(unsigned char * p
, int pcontrol
, int target
)
1133 { /* Disconnect-Reconnect page for mode_sense */
1134 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1135 0, 0, 0, 0, 0, 0, 0, 0};
1137 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
1139 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
1140 return sizeof(disconnect_pg
);
1143 static int resp_format_pg(unsigned char * p
, int pcontrol
, int target
)
1144 { /* Format device page for mode_sense */
1145 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1146 0, 0, 0, 0, 0, 0, 0, 0,
1147 0, 0, 0, 0, 0x40, 0, 0, 0};
1149 memcpy(p
, format_pg
, sizeof(format_pg
));
1150 p
[10] = (sdebug_sectors_per
>> 8) & 0xff;
1151 p
[11] = sdebug_sectors_per
& 0xff;
1152 p
[12] = (scsi_debug_sector_size
>> 8) & 0xff;
1153 p
[13] = scsi_debug_sector_size
& 0xff;
1154 if (DEV_REMOVEABLE(target
))
1155 p
[20] |= 0x20; /* should agree with INQUIRY */
1157 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
1158 return sizeof(format_pg
);
1161 static int resp_caching_pg(unsigned char * p
, int pcontrol
, int target
)
1162 { /* Caching page for mode_sense */
1163 unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1164 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1166 memcpy(p
, caching_pg
, sizeof(caching_pg
));
1168 memset(p
+ 2, 0, sizeof(caching_pg
) - 2);
1169 return sizeof(caching_pg
);
1172 static int resp_ctrl_m_pg(unsigned char * p
, int pcontrol
, int target
)
1173 { /* Control mode page for mode_sense */
1174 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1176 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1179 if (scsi_debug_dsense
)
1180 ctrl_m_pg
[2] |= 0x4;
1182 ctrl_m_pg
[2] &= ~0x4;
1185 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
1187 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
1189 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
1190 else if (2 == pcontrol
)
1191 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
1192 return sizeof(ctrl_m_pg
);
1196 static int resp_iec_m_pg(unsigned char * p
, int pcontrol
, int target
)
1197 { /* Informational Exceptions control mode page for mode_sense */
1198 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1200 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1203 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
1205 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
1206 else if (2 == pcontrol
)
1207 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
1208 return sizeof(iec_m_pg
);
1211 static int resp_sas_sf_m_pg(unsigned char * p
, int pcontrol
, int target
)
1212 { /* SAS SSP mode page - short format for mode_sense */
1213 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
1214 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1216 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
1218 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
1219 return sizeof(sas_sf_m_pg
);
1223 static int resp_sas_pcd_m_spg(unsigned char * p
, int pcontrol
, int target
,
1225 { /* SAS phy control and discover mode page for mode_sense */
1226 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1227 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1228 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1229 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1230 0x2, 0, 0, 0, 0, 0, 0, 0,
1231 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1232 0, 0, 0, 0, 0, 0, 0, 0,
1233 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1234 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1235 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1236 0x3, 0, 0, 0, 0, 0, 0, 0,
1237 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1238 0, 0, 0, 0, 0, 0, 0, 0,
1242 port_a
= target_dev_id
+ 1;
1243 port_b
= port_a
+ 1;
1244 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
1245 p
[20] = (port_a
>> 24);
1246 p
[21] = (port_a
>> 16) & 0xff;
1247 p
[22] = (port_a
>> 8) & 0xff;
1248 p
[23] = port_a
& 0xff;
1249 p
[48 + 20] = (port_b
>> 24);
1250 p
[48 + 21] = (port_b
>> 16) & 0xff;
1251 p
[48 + 22] = (port_b
>> 8) & 0xff;
1252 p
[48 + 23] = port_b
& 0xff;
1254 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
1255 return sizeof(sas_pcd_m_pg
);
1258 static int resp_sas_sha_m_spg(unsigned char * p
, int pcontrol
)
1259 { /* SAS SSP shared protocol specific port mode subpage */
1260 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1261 0, 0, 0, 0, 0, 0, 0, 0,
1264 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
1266 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
1267 return sizeof(sas_sha_m_pg
);
1270 #define SDEBUG_MAX_MSENSE_SZ 256
1272 static int resp_mode_sense(struct scsi_cmnd
* scp
, int target
,
1273 struct sdebug_dev_info
* devip
)
1275 unsigned char dbd
, llbaa
;
1276 int pcontrol
, pcode
, subpcode
, bd_len
;
1277 unsigned char dev_spec
;
1278 int k
, alloc_len
, msense_6
, offset
, len
, errsts
, target_dev_id
;
1280 unsigned char arr
[SDEBUG_MAX_MSENSE_SZ
];
1281 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1283 if ((errsts
= check_readiness(scp
, 1, devip
)))
1285 dbd
= !!(cmd
[1] & 0x8);
1286 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1287 pcode
= cmd
[2] & 0x3f;
1289 msense_6
= (MODE_SENSE
== cmd
[0]);
1290 llbaa
= msense_6
? 0 : !!(cmd
[1] & 0x10);
1291 if ((0 == scsi_debug_ptype
) && (0 == dbd
))
1292 bd_len
= llbaa
? 16 : 8;
1295 alloc_len
= msense_6
? cmd
[4] : ((cmd
[7] << 8) | cmd
[8]);
1296 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
1297 if (0x3 == pcontrol
) { /* Saving values not supported */
1298 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
,
1300 return check_condition_result
;
1302 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
1303 (devip
->target
* 1000) - 3;
1304 /* set DPOFUA bit for disks */
1305 if (0 == scsi_debug_ptype
)
1306 dev_spec
= (DEV_READONLY(target
) ? 0x80 : 0x0) | 0x10;
1316 arr
[4] = 0x1; /* set LONGLBA bit */
1317 arr
[7] = bd_len
; /* assume 255 or less */
1321 if ((bd_len
> 0) && (!sdebug_capacity
))
1322 sdebug_capacity
= get_sdebug_capacity();
1325 if (sdebug_capacity
> 0xfffffffe) {
1331 ap
[0] = (sdebug_capacity
>> 24) & 0xff;
1332 ap
[1] = (sdebug_capacity
>> 16) & 0xff;
1333 ap
[2] = (sdebug_capacity
>> 8) & 0xff;
1334 ap
[3] = sdebug_capacity
& 0xff;
1336 ap
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
1337 ap
[7] = scsi_debug_sector_size
& 0xff;
1340 } else if (16 == bd_len
) {
1341 unsigned long long capac
= sdebug_capacity
;
1343 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1344 ap
[7 - k
] = capac
& 0xff;
1345 ap
[12] = (scsi_debug_sector_size
>> 24) & 0xff;
1346 ap
[13] = (scsi_debug_sector_size
>> 16) & 0xff;
1347 ap
[14] = (scsi_debug_sector_size
>> 8) & 0xff;
1348 ap
[15] = scsi_debug_sector_size
& 0xff;
1353 if ((subpcode
> 0x0) && (subpcode
< 0xff) && (0x19 != pcode
)) {
1354 /* TODO: Control Extension page */
1355 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1357 return check_condition_result
;
1360 case 0x1: /* Read-Write error recovery page, direct access */
1361 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
1364 case 0x2: /* Disconnect-Reconnect page, all devices */
1365 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
1368 case 0x3: /* Format device page, direct access */
1369 len
= resp_format_pg(ap
, pcontrol
, target
);
1372 case 0x8: /* Caching page, direct access */
1373 len
= resp_caching_pg(ap
, pcontrol
, target
);
1376 case 0xa: /* Control Mode page, all devices */
1377 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
1380 case 0x19: /* if spc==1 then sas phy, control+discover */
1381 if ((subpcode
> 0x2) && (subpcode
< 0xff)) {
1382 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1383 INVALID_FIELD_IN_CDB
, 0);
1384 return check_condition_result
;
1387 if ((0x0 == subpcode
) || (0xff == subpcode
))
1388 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
1389 if ((0x1 == subpcode
) || (0xff == subpcode
))
1390 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
1392 if ((0x2 == subpcode
) || (0xff == subpcode
))
1393 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
1396 case 0x1c: /* Informational Exceptions Mode page, all devices */
1397 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
1400 case 0x3f: /* Read all Mode pages */
1401 if ((0 == subpcode
) || (0xff == subpcode
)) {
1402 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
1403 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
1404 len
+= resp_format_pg(ap
+ len
, pcontrol
, target
);
1405 len
+= resp_caching_pg(ap
+ len
, pcontrol
, target
);
1406 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
1407 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
1408 if (0xff == subpcode
) {
1409 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
,
1410 target
, target_dev_id
);
1411 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
1413 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
1415 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1416 INVALID_FIELD_IN_CDB
, 0);
1417 return check_condition_result
;
1422 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1424 return check_condition_result
;
1427 arr
[0] = offset
- 1;
1429 arr
[0] = ((offset
- 2) >> 8) & 0xff;
1430 arr
[1] = (offset
- 2) & 0xff;
1432 return fill_from_dev_buffer(scp
, arr
, min(alloc_len
, offset
));
1435 #define SDEBUG_MAX_MSELECT_SZ 512
1437 static int resp_mode_select(struct scsi_cmnd
* scp
, int mselect6
,
1438 struct sdebug_dev_info
* devip
)
1440 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
1441 int param_len
, res
, errsts
, mpage
;
1442 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
1443 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1445 if ((errsts
= check_readiness(scp
, 1, devip
)))
1447 memset(arr
, 0, sizeof(arr
));
1450 param_len
= mselect6
? cmd
[4] : ((cmd
[7] << 8) + cmd
[8]);
1451 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
1452 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1453 INVALID_FIELD_IN_CDB
, 0);
1454 return check_condition_result
;
1456 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
1458 return (DID_ERROR
<< 16);
1459 else if ((res
< param_len
) &&
1460 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
1461 printk(KERN_INFO
"scsi_debug: mode_select: cdb indicated=%d, "
1462 " IO sent=%d bytes\n", param_len
, res
);
1463 md_len
= mselect6
? (arr
[0] + 1) : ((arr
[0] << 8) + arr
[1] + 2);
1464 bd_len
= mselect6
? arr
[3] : ((arr
[6] << 8) + arr
[7]);
1466 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1467 INVALID_FIELD_IN_PARAM_LIST
, 0);
1468 return check_condition_result
;
1470 off
= bd_len
+ (mselect6
? 4 : 8);
1471 mpage
= arr
[off
] & 0x3f;
1472 ps
= !!(arr
[off
] & 0x80);
1474 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1475 INVALID_FIELD_IN_PARAM_LIST
, 0);
1476 return check_condition_result
;
1478 spf
= !!(arr
[off
] & 0x40);
1479 pg_len
= spf
? ((arr
[off
+ 2] << 8) + arr
[off
+ 3] + 4) :
1481 if ((pg_len
+ off
) > param_len
) {
1482 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1483 PARAMETER_LIST_LENGTH_ERR
, 0);
1484 return check_condition_result
;
1487 case 0xa: /* Control Mode page */
1488 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
1489 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
1490 sizeof(ctrl_m_pg
) - 2);
1491 scsi_debug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
1495 case 0x1c: /* Informational Exceptions Mode page */
1496 if (iec_m_pg
[1] == arr
[off
+ 1]) {
1497 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
1498 sizeof(iec_m_pg
) - 2);
1505 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1506 INVALID_FIELD_IN_PARAM_LIST
, 0);
1507 return check_condition_result
;
1510 static int resp_temp_l_pg(unsigned char * arr
)
1512 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1513 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1516 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
1517 return sizeof(temp_l_pg
);
1520 static int resp_ie_l_pg(unsigned char * arr
)
1522 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1525 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
1526 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
1527 arr
[4] = THRESHOLD_EXCEEDED
;
1530 return sizeof(ie_l_pg
);
1533 #define SDEBUG_MAX_LSENSE_SZ 512
1535 static int resp_log_sense(struct scsi_cmnd
* scp
,
1536 struct sdebug_dev_info
* devip
)
1538 int ppc
, sp
, pcontrol
, pcode
, subpcode
, alloc_len
, errsts
, len
, n
;
1539 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
1540 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1542 if ((errsts
= check_readiness(scp
, 1, devip
)))
1544 memset(arr
, 0, sizeof(arr
));
1548 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1549 INVALID_FIELD_IN_CDB
, 0);
1550 return check_condition_result
;
1552 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1553 pcode
= cmd
[2] & 0x3f;
1554 subpcode
= cmd
[3] & 0xff;
1555 alloc_len
= (cmd
[7] << 8) + cmd
[8];
1557 if (0 == subpcode
) {
1559 case 0x0: /* Supported log pages log page */
1561 arr
[n
++] = 0x0; /* this page */
1562 arr
[n
++] = 0xd; /* Temperature */
1563 arr
[n
++] = 0x2f; /* Informational exceptions */
1566 case 0xd: /* Temperature log page */
1567 arr
[3] = resp_temp_l_pg(arr
+ 4);
1569 case 0x2f: /* Informational exceptions log page */
1570 arr
[3] = resp_ie_l_pg(arr
+ 4);
1573 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1574 INVALID_FIELD_IN_CDB
, 0);
1575 return check_condition_result
;
1577 } else if (0xff == subpcode
) {
1581 case 0x0: /* Supported log pages and subpages log page */
1584 arr
[n
++] = 0x0; /* 0,0 page */
1586 arr
[n
++] = 0xff; /* this page */
1588 arr
[n
++] = 0x0; /* Temperature */
1590 arr
[n
++] = 0x0; /* Informational exceptions */
1593 case 0xd: /* Temperature subpages */
1596 arr
[n
++] = 0x0; /* Temperature */
1599 case 0x2f: /* Informational exceptions subpages */
1602 arr
[n
++] = 0x0; /* Informational exceptions */
1606 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1607 INVALID_FIELD_IN_CDB
, 0);
1608 return check_condition_result
;
1611 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1612 INVALID_FIELD_IN_CDB
, 0);
1613 return check_condition_result
;
1615 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
1616 return fill_from_dev_buffer(scp
, arr
,
1617 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
1620 static int check_device_access_params(struct sdebug_dev_info
*devi
,
1621 unsigned long long lba
, unsigned int num
)
1623 if (lba
+ num
> sdebug_capacity
) {
1624 mk_sense_buffer(devi
, ILLEGAL_REQUEST
, ADDR_OUT_OF_RANGE
, 0);
1625 return check_condition_result
;
1627 /* transfer length excessive (tie in to block limits VPD page) */
1628 if (num
> sdebug_store_sectors
) {
1629 mk_sense_buffer(devi
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
1630 return check_condition_result
;
1635 static int do_device_access(struct scsi_cmnd
*scmd
,
1636 struct sdebug_dev_info
*devi
,
1637 unsigned long long lba
, unsigned int num
, int write
)
1640 unsigned int block
, rest
= 0;
1641 int (*func
)(struct scsi_cmnd
*, unsigned char *, int);
1643 func
= write
? fetch_to_dev_buffer
: fill_from_dev_buffer
;
1645 block
= do_div(lba
, sdebug_store_sectors
);
1646 if (block
+ num
> sdebug_store_sectors
)
1647 rest
= block
+ num
- sdebug_store_sectors
;
1649 ret
= func(scmd
, fake_storep
+ (block
* scsi_debug_sector_size
),
1650 (num
- rest
) * scsi_debug_sector_size
);
1652 ret
= func(scmd
, fake_storep
, rest
* scsi_debug_sector_size
);
1657 static int prot_verify_read(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
1658 unsigned int sectors
, u32 ei_lba
)
1660 unsigned int i
, resid
;
1661 struct scatterlist
*psgl
;
1662 struct sd_dif_tuple
*sdt
;
1664 sector_t tmp_sec
= start_sec
;
1667 start_sec
= do_div(tmp_sec
, sdebug_store_sectors
);
1669 sdt
= (struct sd_dif_tuple
*)(dif_storep
+ dif_offset(start_sec
));
1671 for (i
= 0 ; i
< sectors
; i
++) {
1674 if (sdt
[i
].app_tag
== 0xffff)
1677 sector
= start_sec
+ i
;
1679 switch (scsi_debug_guard
) {
1681 csum
= ip_compute_csum(fake_storep
+
1682 sector
* scsi_debug_sector_size
,
1683 scsi_debug_sector_size
);
1686 csum
= crc_t10dif(fake_storep
+
1687 sector
* scsi_debug_sector_size
,
1688 scsi_debug_sector_size
);
1689 csum
= cpu_to_be16(csum
);
1695 if (sdt
[i
].guard_tag
!= csum
) {
1696 printk(KERN_ERR
"%s: GUARD check failed on sector %lu" \
1697 " rcvd 0x%04x, data 0x%04x\n", __func__
,
1698 (unsigned long)sector
,
1699 be16_to_cpu(sdt
[i
].guard_tag
),
1705 if (scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
&&
1706 be32_to_cpu(sdt
[i
].ref_tag
) != (sector
& 0xffffffff)) {
1707 printk(KERN_ERR
"%s: REF check failed on sector %lu\n",
1708 __func__
, (unsigned long)sector
);
1713 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
1714 be32_to_cpu(sdt
[i
].ref_tag
) != ei_lba
) {
1715 printk(KERN_ERR
"%s: REF check failed on sector %lu\n",
1716 __func__
, (unsigned long)sector
);
1724 resid
= sectors
* 8; /* Bytes of protection data to copy into sgl */
1727 scsi_for_each_prot_sg(SCpnt
, psgl
, scsi_prot_sg_count(SCpnt
), i
) {
1728 int len
= min(psgl
->length
, resid
);
1730 paddr
= kmap_atomic(sg_page(psgl
), KM_IRQ0
) + psgl
->offset
;
1731 memcpy(paddr
, dif_storep
+ dif_offset(sector
), len
);
1734 if (sector
>= sdebug_store_sectors
) {
1737 sector
= do_div(tmp_sec
, sdebug_store_sectors
);
1740 kunmap_atomic(paddr
, KM_IRQ0
);
1748 static int resp_read(struct scsi_cmnd
*SCpnt
, unsigned long long lba
,
1749 unsigned int num
, struct sdebug_dev_info
*devip
,
1752 unsigned long iflags
;
1755 ret
= check_device_access_params(devip
, lba
, num
);
1759 if ((SCSI_DEBUG_OPT_MEDIUM_ERR
& scsi_debug_opts
) &&
1760 (lba
<= OPT_MEDIUM_ERR_ADDR
) &&
1761 ((lba
+ num
) > OPT_MEDIUM_ERR_ADDR
)) {
1762 /* claim unrecoverable read error */
1763 mk_sense_buffer(devip
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
,
1765 /* set info field and valid bit for fixed descriptor */
1766 if (0x70 == (devip
->sense_buff
[0] & 0x7f)) {
1767 devip
->sense_buff
[0] |= 0x80; /* Valid bit */
1768 ret
= OPT_MEDIUM_ERR_ADDR
;
1769 devip
->sense_buff
[3] = (ret
>> 24) & 0xff;
1770 devip
->sense_buff
[4] = (ret
>> 16) & 0xff;
1771 devip
->sense_buff
[5] = (ret
>> 8) & 0xff;
1772 devip
->sense_buff
[6] = ret
& 0xff;
1774 return check_condition_result
;
1778 if (scsi_debug_dix
&& scsi_prot_sg_count(SCpnt
)) {
1779 int prot_ret
= prot_verify_read(SCpnt
, lba
, num
, ei_lba
);
1782 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, prot_ret
);
1783 return illegal_condition_result
;
1787 read_lock_irqsave(&atomic_rw
, iflags
);
1788 ret
= do_device_access(SCpnt
, devip
, lba
, num
, 0);
1789 read_unlock_irqrestore(&atomic_rw
, iflags
);
1793 void dump_sector(unsigned char *buf
, int len
)
1797 printk(KERN_ERR
">>> Sector Dump <<<\n");
1799 for (i
= 0 ; i
< len
; i
+= 16) {
1800 printk(KERN_ERR
"%04d: ", i
);
1802 for (j
= 0 ; j
< 16 ; j
++) {
1803 unsigned char c
= buf
[i
+j
];
1804 if (c
>= 0x20 && c
< 0x7e)
1805 printk(" %c ", buf
[i
+j
]);
1807 printk("%02x ", buf
[i
+j
]);
1814 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
1815 unsigned int sectors
, u32 ei_lba
)
1818 struct sd_dif_tuple
*sdt
;
1819 struct scatterlist
*dsgl
= scsi_sglist(SCpnt
);
1820 struct scatterlist
*psgl
= scsi_prot_sglist(SCpnt
);
1821 void *daddr
, *paddr
;
1822 sector_t tmp_sec
= start_sec
;
1825 unsigned short csum
;
1827 sector
= do_div(tmp_sec
, sdebug_store_sectors
);
1829 BUG_ON(scsi_sg_count(SCpnt
) == 0);
1830 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
1832 paddr
= kmap_atomic(sg_page(psgl
), KM_IRQ1
) + psgl
->offset
;
1835 /* For each data page */
1836 scsi_for_each_sg(SCpnt
, dsgl
, scsi_sg_count(SCpnt
), i
) {
1837 daddr
= kmap_atomic(sg_page(dsgl
), KM_IRQ0
) + dsgl
->offset
;
1839 /* For each sector-sized chunk in data page */
1840 for (j
= 0 ; j
< dsgl
->length
; j
+= scsi_debug_sector_size
) {
1842 /* If we're at the end of the current
1843 * protection page advance to the next one
1845 if (ppage_offset
>= psgl
->length
) {
1846 kunmap_atomic(paddr
, KM_IRQ1
);
1847 psgl
= sg_next(psgl
);
1848 BUG_ON(psgl
== NULL
);
1849 paddr
= kmap_atomic(sg_page(psgl
), KM_IRQ1
)
1854 sdt
= paddr
+ ppage_offset
;
1856 switch (scsi_debug_guard
) {
1858 csum
= ip_compute_csum(daddr
,
1859 scsi_debug_sector_size
);
1862 csum
= cpu_to_be16(crc_t10dif(daddr
,
1863 scsi_debug_sector_size
));
1871 if (sdt
->guard_tag
!= csum
) {
1873 "%s: GUARD check failed on sector %lu " \
1874 "rcvd 0x%04x, calculated 0x%04x\n",
1875 __func__
, (unsigned long)sector
,
1876 be16_to_cpu(sdt
->guard_tag
),
1879 dump_sector(daddr
, scsi_debug_sector_size
);
1883 if (scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
&&
1884 be32_to_cpu(sdt
->ref_tag
)
1885 != (start_sec
& 0xffffffff)) {
1887 "%s: REF check failed on sector %lu\n",
1888 __func__
, (unsigned long)sector
);
1890 dump_sector(daddr
, scsi_debug_sector_size
);
1894 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
1895 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
1897 "%s: REF check failed on sector %lu\n",
1898 __func__
, (unsigned long)sector
);
1900 dump_sector(daddr
, scsi_debug_sector_size
);
1904 /* Would be great to copy this in bigger
1905 * chunks. However, for the sake of
1906 * correctness we need to verify each sector
1907 * before writing it to "stable" storage
1909 memcpy(dif_storep
+ dif_offset(sector
), sdt
, 8);
1913 if (sector
== sdebug_store_sectors
)
1914 sector
= 0; /* Force wrap */
1918 daddr
+= scsi_debug_sector_size
;
1919 ppage_offset
+= sizeof(struct sd_dif_tuple
);
1922 kunmap_atomic(daddr
, KM_IRQ0
);
1925 kunmap_atomic(paddr
, KM_IRQ1
);
1933 kunmap_atomic(daddr
, KM_IRQ0
);
1934 kunmap_atomic(paddr
, KM_IRQ1
);
1938 static unsigned int map_state(sector_t lba
, unsigned int *num
)
1940 unsigned int granularity
, alignment
, mapped
;
1941 sector_t block
, next
, end
;
1943 granularity
= scsi_debug_unmap_granularity
;
1944 alignment
= granularity
- scsi_debug_unmap_alignment
;
1945 block
= lba
+ alignment
;
1946 do_div(block
, granularity
);
1948 mapped
= test_bit(block
, map_storep
);
1951 next
= find_next_zero_bit(map_storep
, map_size
, block
);
1953 next
= find_next_bit(map_storep
, map_size
, block
);
1955 end
= next
* granularity
- scsi_debug_unmap_alignment
;
1961 static void map_region(sector_t lba
, unsigned int len
)
1963 unsigned int granularity
, alignment
;
1964 sector_t end
= lba
+ len
;
1966 granularity
= scsi_debug_unmap_granularity
;
1967 alignment
= granularity
- scsi_debug_unmap_alignment
;
1970 sector_t block
, rem
;
1972 block
= lba
+ alignment
;
1973 rem
= do_div(block
, granularity
);
1975 set_bit(block
, map_storep
);
1977 lba
+= granularity
- rem
;
1981 static void unmap_region(sector_t lba
, unsigned int len
)
1983 unsigned int granularity
, alignment
;
1984 sector_t end
= lba
+ len
;
1986 granularity
= scsi_debug_unmap_granularity
;
1987 alignment
= granularity
- scsi_debug_unmap_alignment
;
1990 sector_t block
, rem
;
1992 block
= lba
+ alignment
;
1993 rem
= do_div(block
, granularity
);
1995 if (rem
== 0 && lba
+ granularity
<= end
)
1996 clear_bit(block
, map_storep
);
1998 lba
+= granularity
- rem
;
2002 static int resp_write(struct scsi_cmnd
*SCpnt
, unsigned long long lba
,
2003 unsigned int num
, struct sdebug_dev_info
*devip
,
2006 unsigned long iflags
;
2009 ret
= check_device_access_params(devip
, lba
, num
);
2014 if (scsi_debug_dix
&& scsi_prot_sg_count(SCpnt
)) {
2015 int prot_ret
= prot_verify_write(SCpnt
, lba
, num
, ei_lba
);
2018 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, prot_ret
);
2019 return illegal_condition_result
;
2023 write_lock_irqsave(&atomic_rw
, iflags
);
2024 ret
= do_device_access(SCpnt
, devip
, lba
, num
, 1);
2025 if (scsi_debug_unmap_granularity
)
2026 map_region(lba
, num
);
2027 write_unlock_irqrestore(&atomic_rw
, iflags
);
2029 return (DID_ERROR
<< 16);
2030 else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2031 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2032 printk(KERN_INFO
"scsi_debug: write: cdb indicated=%u, "
2033 " IO sent=%d bytes\n", num
* scsi_debug_sector_size
, ret
);
2038 static int resp_write_same(struct scsi_cmnd
*scmd
, unsigned long long lba
,
2039 unsigned int num
, struct sdebug_dev_info
*devip
,
2040 u32 ei_lba
, unsigned int unmap
)
2042 unsigned long iflags
;
2043 unsigned long long i
;
2046 ret
= check_device_access_params(devip
, lba
, num
);
2050 write_lock_irqsave(&atomic_rw
, iflags
);
2052 if (unmap
&& scsi_debug_unmap_granularity
) {
2053 unmap_region(lba
, num
);
2057 /* Else fetch one logical block */
2058 ret
= fetch_to_dev_buffer(scmd
,
2059 fake_storep
+ (lba
* scsi_debug_sector_size
),
2060 scsi_debug_sector_size
);
2063 write_unlock_irqrestore(&atomic_rw
, iflags
);
2064 return (DID_ERROR
<< 16);
2065 } else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2066 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2067 printk(KERN_INFO
"scsi_debug: write same: cdb indicated=%u, "
2068 " IO sent=%d bytes\n", num
* scsi_debug_sector_size
, ret
);
2070 /* Copy first sector to remaining blocks */
2071 for (i
= 1 ; i
< num
; i
++)
2072 memcpy(fake_storep
+ ((lba
+ i
) * scsi_debug_sector_size
),
2073 fake_storep
+ (lba
* scsi_debug_sector_size
),
2074 scsi_debug_sector_size
);
2076 if (scsi_debug_unmap_granularity
)
2077 map_region(lba
, num
);
2079 write_unlock_irqrestore(&atomic_rw
, iflags
);
2084 struct unmap_block_desc
{
2090 static int resp_unmap(struct scsi_cmnd
* scmd
, struct sdebug_dev_info
* devip
)
2093 struct unmap_block_desc
*desc
;
2094 unsigned int i
, payload_len
, descriptors
;
2097 ret
= check_readiness(scmd
, 1, devip
);
2101 payload_len
= get_unaligned_be16(&scmd
->cmnd
[7]);
2102 BUG_ON(scsi_bufflen(scmd
) != payload_len
);
2104 descriptors
= (payload_len
- 8) / 16;
2106 buf
= kmalloc(scsi_bufflen(scmd
), GFP_ATOMIC
);
2108 return check_condition_result
;
2110 scsi_sg_copy_to_buffer(scmd
, buf
, scsi_bufflen(scmd
));
2112 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
2113 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
2115 desc
= (void *)&buf
[8];
2117 for (i
= 0 ; i
< descriptors
; i
++) {
2118 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
2119 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
2121 ret
= check_device_access_params(devip
, lba
, num
);
2125 unmap_region(lba
, num
);
2136 #define SDEBUG_GET_LBA_STATUS_LEN 32
2138 static int resp_get_lba_status(struct scsi_cmnd
* scmd
,
2139 struct sdebug_dev_info
* devip
)
2141 unsigned long long lba
;
2142 unsigned int alloc_len
, mapped
, num
;
2143 unsigned char arr
[SDEBUG_GET_LBA_STATUS_LEN
];
2146 ret
= check_readiness(scmd
, 1, devip
);
2150 lba
= get_unaligned_be64(&scmd
->cmnd
[2]);
2151 alloc_len
= get_unaligned_be32(&scmd
->cmnd
[10]);
2156 ret
= check_device_access_params(devip
, lba
, 1);
2160 mapped
= map_state(lba
, &num
);
2162 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
2163 put_unaligned_be32(16, &arr
[0]); /* Parameter Data Length */
2164 put_unaligned_be64(lba
, &arr
[8]); /* LBA */
2165 put_unaligned_be32(num
, &arr
[16]); /* Number of blocks */
2166 arr
[20] = !mapped
; /* mapped = 0, unmapped = 1 */
2168 return fill_from_dev_buffer(scmd
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
2171 #define SDEBUG_RLUN_ARR_SZ 256
2173 static int resp_report_luns(struct scsi_cmnd
* scp
,
2174 struct sdebug_dev_info
* devip
)
2176 unsigned int alloc_len
;
2177 int lun_cnt
, i
, upper
, num
, n
, wlun
, lun
;
2178 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
2179 int select_report
= (int)cmd
[2];
2180 struct scsi_lun
*one_lun
;
2181 unsigned char arr
[SDEBUG_RLUN_ARR_SZ
];
2182 unsigned char * max_addr
;
2184 alloc_len
= cmd
[9] + (cmd
[8] << 8) + (cmd
[7] << 16) + (cmd
[6] << 24);
2185 if ((alloc_len
< 4) || (select_report
> 2)) {
2186 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
2188 return check_condition_result
;
2190 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2191 memset(arr
, 0, SDEBUG_RLUN_ARR_SZ
);
2192 lun_cnt
= scsi_debug_max_luns
;
2193 if (1 == select_report
)
2195 else if (scsi_debug_no_lun_0
&& (lun_cnt
> 0))
2197 wlun
= (select_report
> 0) ? 1 : 0;
2198 num
= lun_cnt
+ wlun
;
2199 arr
[2] = ((sizeof(struct scsi_lun
) * num
) >> 8) & 0xff;
2200 arr
[3] = (sizeof(struct scsi_lun
) * num
) & 0xff;
2201 n
= min((int)((SDEBUG_RLUN_ARR_SZ
- 8) /
2202 sizeof(struct scsi_lun
)), num
);
2207 one_lun
= (struct scsi_lun
*) &arr
[8];
2208 max_addr
= arr
+ SDEBUG_RLUN_ARR_SZ
;
2209 for (i
= 0, lun
= (scsi_debug_no_lun_0
? 1 : 0);
2210 ((i
< lun_cnt
) && ((unsigned char *)(one_lun
+ i
) < max_addr
));
2212 upper
= (lun
>> 8) & 0x3f;
2214 one_lun
[i
].scsi_lun
[0] =
2215 (upper
| (SAM2_LUN_ADDRESS_METHOD
<< 6));
2216 one_lun
[i
].scsi_lun
[1] = lun
& 0xff;
2219 one_lun
[i
].scsi_lun
[0] = (SAM2_WLUN_REPORT_LUNS
>> 8) & 0xff;
2220 one_lun
[i
].scsi_lun
[1] = SAM2_WLUN_REPORT_LUNS
& 0xff;
2223 alloc_len
= (unsigned char *)(one_lun
+ i
) - arr
;
2224 return fill_from_dev_buffer(scp
, arr
,
2225 min((int)alloc_len
, SDEBUG_RLUN_ARR_SZ
));
2228 static int resp_xdwriteread(struct scsi_cmnd
*scp
, unsigned long long lba
,
2229 unsigned int num
, struct sdebug_dev_info
*devip
)
2232 unsigned char *kaddr
, *buf
;
2233 unsigned int offset
;
2234 struct scatterlist
*sg
;
2235 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
2237 /* better not to use temporary buffer. */
2238 buf
= kmalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
2242 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
2245 for_each_sg(sdb
->table
.sgl
, sg
, sdb
->table
.nents
, i
) {
2246 kaddr
= (unsigned char *)kmap_atomic(sg_page(sg
), KM_USER0
);
2250 for (j
= 0; j
< sg
->length
; j
++)
2251 *(kaddr
+ sg
->offset
+ j
) ^= *(buf
+ offset
+ j
);
2253 offset
+= sg
->length
;
2254 kunmap_atomic(kaddr
, KM_USER0
);
2263 /* When timer goes off this function is called. */
2264 static void timer_intr_handler(unsigned long indx
)
2266 struct sdebug_queued_cmd
* sqcp
;
2267 unsigned long iflags
;
2269 if (indx
>= SCSI_DEBUG_CANQUEUE
) {
2270 printk(KERN_ERR
"scsi_debug:timer_intr_handler: indx too "
2274 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2275 sqcp
= &queued_arr
[(int)indx
];
2276 if (! sqcp
->in_use
) {
2277 printk(KERN_ERR
"scsi_debug:timer_intr_handler: Unexpected "
2279 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2283 if (sqcp
->done_funct
) {
2284 sqcp
->a_cmnd
->result
= sqcp
->scsi_result
;
2285 sqcp
->done_funct(sqcp
->a_cmnd
); /* callback to mid level */
2287 sqcp
->done_funct
= NULL
;
2288 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2292 static struct sdebug_dev_info
*
2293 sdebug_device_create(struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
2295 struct sdebug_dev_info
*devip
;
2297 devip
= kzalloc(sizeof(*devip
), flags
);
2299 devip
->sdbg_host
= sdbg_host
;
2300 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
2305 static struct sdebug_dev_info
* devInfoReg(struct scsi_device
* sdev
)
2307 struct sdebug_host_info
* sdbg_host
;
2308 struct sdebug_dev_info
* open_devip
= NULL
;
2309 struct sdebug_dev_info
* devip
=
2310 (struct sdebug_dev_info
*)sdev
->hostdata
;
2314 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(sdev
->host
);
2316 printk(KERN_ERR
"Host info NULL\n");
2319 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
2320 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
2321 (devip
->target
== sdev
->id
) &&
2322 (devip
->lun
== sdev
->lun
))
2325 if ((!devip
->used
) && (!open_devip
))
2329 if (!open_devip
) { /* try and make a new one */
2330 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
2332 printk(KERN_ERR
"%s: out of memory at line %d\n",
2333 __func__
, __LINE__
);
2338 open_devip
->channel
= sdev
->channel
;
2339 open_devip
->target
= sdev
->id
;
2340 open_devip
->lun
= sdev
->lun
;
2341 open_devip
->sdbg_host
= sdbg_host
;
2342 open_devip
->reset
= 1;
2343 open_devip
->used
= 1;
2344 memset(open_devip
->sense_buff
, 0, SDEBUG_SENSE_LEN
);
2345 if (scsi_debug_dsense
)
2346 open_devip
->sense_buff
[0] = 0x72;
2348 open_devip
->sense_buff
[0] = 0x70;
2349 open_devip
->sense_buff
[7] = 0xa;
2351 if (sdev
->lun
== SAM2_WLUN_REPORT_LUNS
)
2352 open_devip
->wlun
= SAM2_WLUN_REPORT_LUNS
& 0xff;
2357 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
2359 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2360 printk(KERN_INFO
"scsi_debug: slave_alloc <%u %u %u %u>\n",
2361 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2362 queue_flag_set_unlocked(QUEUE_FLAG_BIDI
, sdp
->request_queue
);
2366 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
2368 struct sdebug_dev_info
*devip
;
2370 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2371 printk(KERN_INFO
"scsi_debug: slave_configure <%u %u %u %u>\n",
2372 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2373 if (sdp
->host
->max_cmd_len
!= SCSI_DEBUG_MAX_CMD_LEN
)
2374 sdp
->host
->max_cmd_len
= SCSI_DEBUG_MAX_CMD_LEN
;
2375 devip
= devInfoReg(sdp
);
2377 return 1; /* no resources, will be marked offline */
2378 sdp
->hostdata
= devip
;
2379 if (sdp
->host
->cmd_per_lun
)
2380 scsi_adjust_queue_depth(sdp
, SDEBUG_TAGGED_QUEUING
,
2381 sdp
->host
->cmd_per_lun
);
2382 blk_queue_max_segment_size(sdp
->request_queue
, 256 * 1024);
2386 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
2388 struct sdebug_dev_info
*devip
=
2389 (struct sdebug_dev_info
*)sdp
->hostdata
;
2391 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2392 printk(KERN_INFO
"scsi_debug: slave_destroy <%u %u %u %u>\n",
2393 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2395 /* make this slot avaliable for re-use */
2397 sdp
->hostdata
= NULL
;
2401 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2402 static int stop_queued_cmnd(struct scsi_cmnd
*cmnd
)
2404 unsigned long iflags
;
2406 struct sdebug_queued_cmd
*sqcp
;
2408 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2409 for (k
= 0; k
< SCSI_DEBUG_CANQUEUE
; ++k
) {
2410 sqcp
= &queued_arr
[k
];
2411 if (sqcp
->in_use
&& (cmnd
== sqcp
->a_cmnd
)) {
2412 del_timer_sync(&sqcp
->cmnd_timer
);
2414 sqcp
->a_cmnd
= NULL
;
2418 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2419 return (k
< SCSI_DEBUG_CANQUEUE
) ? 1 : 0;
2422 /* Deletes (stops) timers of all queued commands */
2423 static void stop_all_queued(void)
2425 unsigned long iflags
;
2427 struct sdebug_queued_cmd
*sqcp
;
2429 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2430 for (k
= 0; k
< SCSI_DEBUG_CANQUEUE
; ++k
) {
2431 sqcp
= &queued_arr
[k
];
2432 if (sqcp
->in_use
&& sqcp
->a_cmnd
) {
2433 del_timer_sync(&sqcp
->cmnd_timer
);
2435 sqcp
->a_cmnd
= NULL
;
2438 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2441 static int scsi_debug_abort(struct scsi_cmnd
* SCpnt
)
2443 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2444 printk(KERN_INFO
"scsi_debug: abort\n");
2446 stop_queued_cmnd(SCpnt
);
2450 static int scsi_debug_biosparam(struct scsi_device
*sdev
,
2451 struct block_device
* bdev
, sector_t capacity
, int *info
)
2456 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2457 printk(KERN_INFO
"scsi_debug: biosparam\n");
2458 buf
= scsi_bios_ptable(bdev
);
2460 res
= scsi_partsize(buf
, capacity
,
2461 &info
[2], &info
[0], &info
[1]);
2466 info
[0] = sdebug_heads
;
2467 info
[1] = sdebug_sectors_per
;
2468 info
[2] = sdebug_cylinders_per
;
2472 static int scsi_debug_device_reset(struct scsi_cmnd
* SCpnt
)
2474 struct sdebug_dev_info
* devip
;
2476 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2477 printk(KERN_INFO
"scsi_debug: device_reset\n");
2480 devip
= devInfoReg(SCpnt
->device
);
2487 static int scsi_debug_bus_reset(struct scsi_cmnd
* SCpnt
)
2489 struct sdebug_host_info
*sdbg_host
;
2490 struct sdebug_dev_info
* dev_info
;
2491 struct scsi_device
* sdp
;
2492 struct Scsi_Host
* hp
;
2494 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2495 printk(KERN_INFO
"scsi_debug: bus_reset\n");
2497 if (SCpnt
&& ((sdp
= SCpnt
->device
)) && ((hp
= sdp
->host
))) {
2498 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
2500 list_for_each_entry(dev_info
,
2501 &sdbg_host
->dev_info_list
,
2503 dev_info
->reset
= 1;
2509 static int scsi_debug_host_reset(struct scsi_cmnd
* SCpnt
)
2511 struct sdebug_host_info
* sdbg_host
;
2512 struct sdebug_dev_info
* dev_info
;
2514 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2515 printk(KERN_INFO
"scsi_debug: host_reset\n");
2517 spin_lock(&sdebug_host_list_lock
);
2518 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
2519 list_for_each_entry(dev_info
, &sdbg_host
->dev_info_list
,
2521 dev_info
->reset
= 1;
2523 spin_unlock(&sdebug_host_list_lock
);
2528 /* Initializes timers in queued array */
2529 static void __init
init_all_queued(void)
2531 unsigned long iflags
;
2533 struct sdebug_queued_cmd
* sqcp
;
2535 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2536 for (k
= 0; k
< SCSI_DEBUG_CANQUEUE
; ++k
) {
2537 sqcp
= &queued_arr
[k
];
2538 init_timer(&sqcp
->cmnd_timer
);
2540 sqcp
->a_cmnd
= NULL
;
2542 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2545 static void __init
sdebug_build_parts(unsigned char *ramp
,
2546 unsigned long store_size
)
2548 struct partition
* pp
;
2549 int starts
[SDEBUG_MAX_PARTS
+ 2];
2550 int sectors_per_part
, num_sectors
, k
;
2551 int heads_by_sects
, start_sec
, end_sec
;
2553 /* assume partition table already zeroed */
2554 if ((scsi_debug_num_parts
< 1) || (store_size
< 1048576))
2556 if (scsi_debug_num_parts
> SDEBUG_MAX_PARTS
) {
2557 scsi_debug_num_parts
= SDEBUG_MAX_PARTS
;
2558 printk(KERN_WARNING
"scsi_debug:build_parts: reducing "
2559 "partitions to %d\n", SDEBUG_MAX_PARTS
);
2561 num_sectors
= (int)sdebug_store_sectors
;
2562 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
2563 / scsi_debug_num_parts
;
2564 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
2565 starts
[0] = sdebug_sectors_per
;
2566 for (k
= 1; k
< scsi_debug_num_parts
; ++k
)
2567 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
2569 starts
[scsi_debug_num_parts
] = num_sectors
;
2570 starts
[scsi_debug_num_parts
+ 1] = 0;
2572 ramp
[510] = 0x55; /* magic partition markings */
2574 pp
= (struct partition
*)(ramp
+ 0x1be);
2575 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
2576 start_sec
= starts
[k
];
2577 end_sec
= starts
[k
+ 1] - 1;
2580 pp
->cyl
= start_sec
/ heads_by_sects
;
2581 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
2582 / sdebug_sectors_per
;
2583 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
2585 pp
->end_cyl
= end_sec
/ heads_by_sects
;
2586 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
2587 / sdebug_sectors_per
;
2588 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
2590 pp
->start_sect
= start_sec
;
2591 pp
->nr_sects
= end_sec
- start_sec
+ 1;
2592 pp
->sys_ind
= 0x83; /* plain Linux partition */
2596 static int schedule_resp(struct scsi_cmnd
* cmnd
,
2597 struct sdebug_dev_info
* devip
,
2598 done_funct_t done
, int scsi_result
, int delta_jiff
)
2600 if ((SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) && cmnd
) {
2602 struct scsi_device
* sdp
= cmnd
->device
;
2604 printk(KERN_INFO
"scsi_debug: <%u %u %u %u> "
2605 "non-zero result=0x%x\n", sdp
->host
->host_no
,
2606 sdp
->channel
, sdp
->id
, sdp
->lun
, scsi_result
);
2609 if (cmnd
&& devip
) {
2610 /* simulate autosense by this driver */
2611 if (SAM_STAT_CHECK_CONDITION
== (scsi_result
& 0xff))
2612 memcpy(cmnd
->sense_buffer
, devip
->sense_buff
,
2613 (SCSI_SENSE_BUFFERSIZE
> SDEBUG_SENSE_LEN
) ?
2614 SDEBUG_SENSE_LEN
: SCSI_SENSE_BUFFERSIZE
);
2616 if (delta_jiff
<= 0) {
2618 cmnd
->result
= scsi_result
;
2623 unsigned long iflags
;
2625 struct sdebug_queued_cmd
* sqcp
= NULL
;
2627 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2628 for (k
= 0; k
< SCSI_DEBUG_CANQUEUE
; ++k
) {
2629 sqcp
= &queued_arr
[k
];
2633 if (k
>= SCSI_DEBUG_CANQUEUE
) {
2634 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2635 printk(KERN_WARNING
"scsi_debug: can_queue exceeded\n");
2636 return 1; /* report busy to mid level */
2639 sqcp
->a_cmnd
= cmnd
;
2640 sqcp
->scsi_result
= scsi_result
;
2641 sqcp
->done_funct
= done
;
2642 sqcp
->cmnd_timer
.function
= timer_intr_handler
;
2643 sqcp
->cmnd_timer
.data
= k
;
2644 sqcp
->cmnd_timer
.expires
= jiffies
+ delta_jiff
;
2645 add_timer(&sqcp
->cmnd_timer
);
2646 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2652 /* Note: The following macros create attribute files in the
2653 /sys/module/scsi_debug/parameters directory. Unfortunately this
2654 driver is unaware of a change and cannot trigger auxiliary actions
2655 as it can when the corresponding attribute in the
2656 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2658 module_param_named(add_host
, scsi_debug_add_host
, int, S_IRUGO
| S_IWUSR
);
2659 module_param_named(delay
, scsi_debug_delay
, int, S_IRUGO
| S_IWUSR
);
2660 module_param_named(dev_size_mb
, scsi_debug_dev_size_mb
, int, S_IRUGO
);
2661 module_param_named(dsense
, scsi_debug_dsense
, int, S_IRUGO
| S_IWUSR
);
2662 module_param_named(every_nth
, scsi_debug_every_nth
, int, S_IRUGO
| S_IWUSR
);
2663 module_param_named(fake_rw
, scsi_debug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
2664 module_param_named(max_luns
, scsi_debug_max_luns
, int, S_IRUGO
| S_IWUSR
);
2665 module_param_named(no_lun_0
, scsi_debug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
2666 module_param_named(num_parts
, scsi_debug_num_parts
, int, S_IRUGO
);
2667 module_param_named(num_tgts
, scsi_debug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
2668 module_param_named(opts
, scsi_debug_opts
, int, S_IRUGO
| S_IWUSR
);
2669 module_param_named(ptype
, scsi_debug_ptype
, int, S_IRUGO
| S_IWUSR
);
2670 module_param_named(scsi_level
, scsi_debug_scsi_level
, int, S_IRUGO
);
2671 module_param_named(virtual_gb
, scsi_debug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
2672 module_param_named(vpd_use_hostno
, scsi_debug_vpd_use_hostno
, int,
2674 module_param_named(sector_size
, scsi_debug_sector_size
, int, S_IRUGO
);
2675 module_param_named(dix
, scsi_debug_dix
, int, S_IRUGO
);
2676 module_param_named(dif
, scsi_debug_dif
, int, S_IRUGO
);
2677 module_param_named(guard
, scsi_debug_guard
, int, S_IRUGO
);
2678 module_param_named(ato
, scsi_debug_ato
, int, S_IRUGO
);
2679 module_param_named(physblk_exp
, scsi_debug_physblk_exp
, int, S_IRUGO
);
2680 module_param_named(lowest_aligned
, scsi_debug_lowest_aligned
, int, S_IRUGO
);
2681 module_param_named(unmap_max_blocks
, scsi_debug_unmap_max_blocks
, int, S_IRUGO
);
2682 module_param_named(unmap_max_desc
, scsi_debug_unmap_max_desc
, int, S_IRUGO
);
2683 module_param_named(unmap_granularity
, scsi_debug_unmap_granularity
, int, S_IRUGO
);
2684 module_param_named(unmap_alignment
, scsi_debug_unmap_alignment
, int, S_IRUGO
);
2686 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2687 MODULE_DESCRIPTION("SCSI debug adapter driver");
2688 MODULE_LICENSE("GPL");
2689 MODULE_VERSION(SCSI_DEBUG_VERSION
);
2691 MODULE_PARM_DESC(add_host
, "0..127 hosts allowed(def=1)");
2692 MODULE_PARM_DESC(delay
, "# of jiffies to delay response(def=1)");
2693 MODULE_PARM_DESC(dev_size_mb
, "size in MB of ram shared by devs(def=8)");
2694 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
2695 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
2696 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
2697 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
2698 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
2699 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
2700 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
2701 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2702 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
2703 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=5[SPC-3])");
2704 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2705 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2706 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
2707 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
2708 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
2709 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
2710 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
2711 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
2712 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
2713 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0)");
2714 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=0)");
2715 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=0)");
2716 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
2718 static char sdebug_info
[256];
2720 static const char * scsi_debug_info(struct Scsi_Host
* shp
)
2722 sprintf(sdebug_info
, "scsi_debug, version %s [%s], "
2723 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION
,
2724 scsi_debug_version_date
, scsi_debug_dev_size_mb
,
2729 /* scsi_debug_proc_info
2730 * Used if the driver currently has no own support for /proc/scsi
2732 static int scsi_debug_proc_info(struct Scsi_Host
*host
, char *buffer
, char **start
, off_t offset
,
2733 int length
, int inout
)
2735 int len
, pos
, begin
;
2738 orig_length
= length
;
2742 int minLen
= length
> 15 ? 15 : length
;
2744 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2746 memcpy(arr
, buffer
, minLen
);
2748 if (1 != sscanf(arr
, "%d", &pos
))
2750 scsi_debug_opts
= pos
;
2751 if (scsi_debug_every_nth
!= 0)
2752 scsi_debug_cmnd_count
= 0;
2756 pos
= len
= sprintf(buffer
, "scsi_debug adapter driver, version "
2758 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2759 "every_nth=%d(curr:%d)\n"
2760 "delay=%d, max_luns=%d, scsi_level=%d\n"
2761 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2762 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2763 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2764 SCSI_DEBUG_VERSION
, scsi_debug_version_date
, scsi_debug_num_tgts
,
2765 scsi_debug_dev_size_mb
, scsi_debug_opts
, scsi_debug_every_nth
,
2766 scsi_debug_cmnd_count
, scsi_debug_delay
,
2767 scsi_debug_max_luns
, scsi_debug_scsi_level
,
2768 scsi_debug_sector_size
, sdebug_cylinders_per
, sdebug_heads
,
2769 sdebug_sectors_per
, num_aborts
, num_dev_resets
, num_bus_resets
,
2770 num_host_resets
, dix_reads
, dix_writes
, dif_errors
);
2775 *start
= buffer
+ (offset
- begin
); /* Start of wanted data */
2776 len
-= (offset
- begin
);
2782 static ssize_t
sdebug_delay_show(struct device_driver
* ddp
, char * buf
)
2784 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_delay
);
2787 static ssize_t
sdebug_delay_store(struct device_driver
* ddp
,
2788 const char * buf
, size_t count
)
2793 if (1 == sscanf(buf
, "%10s", work
)) {
2794 if ((1 == sscanf(work
, "%d", &delay
)) && (delay
>= 0)) {
2795 scsi_debug_delay
= delay
;
2801 DRIVER_ATTR(delay
, S_IRUGO
| S_IWUSR
, sdebug_delay_show
,
2802 sdebug_delay_store
);
2804 static ssize_t
sdebug_opts_show(struct device_driver
* ddp
, char * buf
)
2806 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", scsi_debug_opts
);
2809 static ssize_t
sdebug_opts_store(struct device_driver
* ddp
,
2810 const char * buf
, size_t count
)
2815 if (1 == sscanf(buf
, "%10s", work
)) {
2816 if (0 == strnicmp(work
,"0x", 2)) {
2817 if (1 == sscanf(&work
[2], "%x", &opts
))
2820 if (1 == sscanf(work
, "%d", &opts
))
2826 scsi_debug_opts
= opts
;
2827 scsi_debug_cmnd_count
= 0;
2830 DRIVER_ATTR(opts
, S_IRUGO
| S_IWUSR
, sdebug_opts_show
,
2833 static ssize_t
sdebug_ptype_show(struct device_driver
* ddp
, char * buf
)
2835 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ptype
);
2837 static ssize_t
sdebug_ptype_store(struct device_driver
* ddp
,
2838 const char * buf
, size_t count
)
2842 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2843 scsi_debug_ptype
= n
;
2848 DRIVER_ATTR(ptype
, S_IRUGO
| S_IWUSR
, sdebug_ptype_show
, sdebug_ptype_store
);
2850 static ssize_t
sdebug_dsense_show(struct device_driver
* ddp
, char * buf
)
2852 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dsense
);
2854 static ssize_t
sdebug_dsense_store(struct device_driver
* ddp
,
2855 const char * buf
, size_t count
)
2859 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2860 scsi_debug_dsense
= n
;
2865 DRIVER_ATTR(dsense
, S_IRUGO
| S_IWUSR
, sdebug_dsense_show
,
2866 sdebug_dsense_store
);
2868 static ssize_t
sdebug_fake_rw_show(struct device_driver
* ddp
, char * buf
)
2870 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_fake_rw
);
2872 static ssize_t
sdebug_fake_rw_store(struct device_driver
* ddp
,
2873 const char * buf
, size_t count
)
2877 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2878 scsi_debug_fake_rw
= n
;
2883 DRIVER_ATTR(fake_rw
, S_IRUGO
| S_IWUSR
, sdebug_fake_rw_show
,
2884 sdebug_fake_rw_store
);
2886 static ssize_t
sdebug_no_lun_0_show(struct device_driver
* ddp
, char * buf
)
2888 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_no_lun_0
);
2890 static ssize_t
sdebug_no_lun_0_store(struct device_driver
* ddp
,
2891 const char * buf
, size_t count
)
2895 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2896 scsi_debug_no_lun_0
= n
;
2901 DRIVER_ATTR(no_lun_0
, S_IRUGO
| S_IWUSR
, sdebug_no_lun_0_show
,
2902 sdebug_no_lun_0_store
);
2904 static ssize_t
sdebug_num_tgts_show(struct device_driver
* ddp
, char * buf
)
2906 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_tgts
);
2908 static ssize_t
sdebug_num_tgts_store(struct device_driver
* ddp
,
2909 const char * buf
, size_t count
)
2913 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2914 scsi_debug_num_tgts
= n
;
2915 sdebug_max_tgts_luns();
2920 DRIVER_ATTR(num_tgts
, S_IRUGO
| S_IWUSR
, sdebug_num_tgts_show
,
2921 sdebug_num_tgts_store
);
2923 static ssize_t
sdebug_dev_size_mb_show(struct device_driver
* ddp
, char * buf
)
2925 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dev_size_mb
);
2927 DRIVER_ATTR(dev_size_mb
, S_IRUGO
, sdebug_dev_size_mb_show
, NULL
);
2929 static ssize_t
sdebug_num_parts_show(struct device_driver
* ddp
, char * buf
)
2931 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_parts
);
2933 DRIVER_ATTR(num_parts
, S_IRUGO
, sdebug_num_parts_show
, NULL
);
2935 static ssize_t
sdebug_every_nth_show(struct device_driver
* ddp
, char * buf
)
2937 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_every_nth
);
2939 static ssize_t
sdebug_every_nth_store(struct device_driver
* ddp
,
2940 const char * buf
, size_t count
)
2944 if ((count
> 0) && (1 == sscanf(buf
, "%d", &nth
))) {
2945 scsi_debug_every_nth
= nth
;
2946 scsi_debug_cmnd_count
= 0;
2951 DRIVER_ATTR(every_nth
, S_IRUGO
| S_IWUSR
, sdebug_every_nth_show
,
2952 sdebug_every_nth_store
);
2954 static ssize_t
sdebug_max_luns_show(struct device_driver
* ddp
, char * buf
)
2956 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_max_luns
);
2958 static ssize_t
sdebug_max_luns_store(struct device_driver
* ddp
,
2959 const char * buf
, size_t count
)
2963 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2964 scsi_debug_max_luns
= n
;
2965 sdebug_max_tgts_luns();
2970 DRIVER_ATTR(max_luns
, S_IRUGO
| S_IWUSR
, sdebug_max_luns_show
,
2971 sdebug_max_luns_store
);
2973 static ssize_t
sdebug_scsi_level_show(struct device_driver
* ddp
, char * buf
)
2975 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_scsi_level
);
2977 DRIVER_ATTR(scsi_level
, S_IRUGO
, sdebug_scsi_level_show
, NULL
);
2979 static ssize_t
sdebug_virtual_gb_show(struct device_driver
* ddp
, char * buf
)
2981 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_virtual_gb
);
2983 static ssize_t
sdebug_virtual_gb_store(struct device_driver
* ddp
,
2984 const char * buf
, size_t count
)
2988 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2989 scsi_debug_virtual_gb
= n
;
2991 sdebug_capacity
= get_sdebug_capacity();
2997 DRIVER_ATTR(virtual_gb
, S_IRUGO
| S_IWUSR
, sdebug_virtual_gb_show
,
2998 sdebug_virtual_gb_store
);
3000 static ssize_t
sdebug_add_host_show(struct device_driver
* ddp
, char * buf
)
3002 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_add_host
);
3005 static ssize_t
sdebug_add_host_store(struct device_driver
* ddp
,
3006 const char * buf
, size_t count
)
3010 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
3012 if (delta_hosts
> 0) {
3014 sdebug_add_adapter();
3015 } while (--delta_hosts
);
3016 } else if (delta_hosts
< 0) {
3018 sdebug_remove_adapter();
3019 } while (++delta_hosts
);
3023 DRIVER_ATTR(add_host
, S_IRUGO
| S_IWUSR
, sdebug_add_host_show
,
3024 sdebug_add_host_store
);
3026 static ssize_t
sdebug_vpd_use_hostno_show(struct device_driver
* ddp
,
3029 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_vpd_use_hostno
);
3031 static ssize_t
sdebug_vpd_use_hostno_store(struct device_driver
* ddp
,
3032 const char * buf
, size_t count
)
3036 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3037 scsi_debug_vpd_use_hostno
= n
;
3042 DRIVER_ATTR(vpd_use_hostno
, S_IRUGO
| S_IWUSR
, sdebug_vpd_use_hostno_show
,
3043 sdebug_vpd_use_hostno_store
);
3045 static ssize_t
sdebug_sector_size_show(struct device_driver
* ddp
, char * buf
)
3047 return scnprintf(buf
, PAGE_SIZE
, "%u\n", scsi_debug_sector_size
);
3049 DRIVER_ATTR(sector_size
, S_IRUGO
, sdebug_sector_size_show
, NULL
);
3051 static ssize_t
sdebug_dix_show(struct device_driver
*ddp
, char *buf
)
3053 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dix
);
3055 DRIVER_ATTR(dix
, S_IRUGO
, sdebug_dix_show
, NULL
);
3057 static ssize_t
sdebug_dif_show(struct device_driver
*ddp
, char *buf
)
3059 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dif
);
3061 DRIVER_ATTR(dif
, S_IRUGO
, sdebug_dif_show
, NULL
);
3063 static ssize_t
sdebug_guard_show(struct device_driver
*ddp
, char *buf
)
3065 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_guard
);
3067 DRIVER_ATTR(guard
, S_IRUGO
, sdebug_guard_show
, NULL
);
3069 static ssize_t
sdebug_ato_show(struct device_driver
*ddp
, char *buf
)
3071 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ato
);
3073 DRIVER_ATTR(ato
, S_IRUGO
, sdebug_ato_show
, NULL
);
3075 static ssize_t
sdebug_map_show(struct device_driver
*ddp
, char *buf
)
3079 if (scsi_debug_unmap_granularity
== 0)
3080 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
3081 sdebug_store_sectors
);
3083 count
= bitmap_scnlistprintf(buf
, PAGE_SIZE
, map_storep
, map_size
);
3085 buf
[count
++] = '\n';
3090 DRIVER_ATTR(map
, S_IRUGO
, sdebug_map_show
, NULL
);
3093 /* Note: The following function creates attribute files in the
3094 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3095 files (over those found in the /sys/module/scsi_debug/parameters
3096 directory) is that auxiliary actions can be triggered when an attribute
3097 is changed. For example see: sdebug_add_host_store() above.
3099 static int do_create_driverfs_files(void)
3103 ret
= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_add_host
);
3104 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_delay
);
3105 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dev_size_mb
);
3106 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dsense
);
3107 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_every_nth
);
3108 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_fake_rw
);
3109 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_max_luns
);
3110 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_no_lun_0
);
3111 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_num_parts
);
3112 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_num_tgts
);
3113 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_ptype
);
3114 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_opts
);
3115 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_scsi_level
);
3116 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_virtual_gb
);
3117 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_vpd_use_hostno
);
3118 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_sector_size
);
3119 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dix
);
3120 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dif
);
3121 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_guard
);
3122 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_ato
);
3123 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_map
);
3127 static void do_remove_driverfs_files(void)
3129 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_map
);
3130 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_ato
);
3131 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_guard
);
3132 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dif
);
3133 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dix
);
3134 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_sector_size
);
3135 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_vpd_use_hostno
);
3136 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_virtual_gb
);
3137 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_scsi_level
);
3138 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_opts
);
3139 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_ptype
);
3140 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_num_tgts
);
3141 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_num_parts
);
3142 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_no_lun_0
);
3143 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_max_luns
);
3144 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_fake_rw
);
3145 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_every_nth
);
3146 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dsense
);
3147 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dev_size_mb
);
3148 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_delay
);
3149 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_add_host
);
3152 static void pseudo_0_release(struct device
*dev
)
3154 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3155 printk(KERN_INFO
"scsi_debug: pseudo_0_release() called\n");
3158 static struct device pseudo_primary
= {
3159 .init_name
= "pseudo_0",
3160 .release
= pseudo_0_release
,
3163 static int __init
scsi_debug_init(void)
3170 switch (scsi_debug_sector_size
) {
3177 printk(KERN_ERR
"scsi_debug_init: invalid sector_size %d\n",
3178 scsi_debug_sector_size
);
3182 switch (scsi_debug_dif
) {
3184 case SD_DIF_TYPE0_PROTECTION
:
3185 case SD_DIF_TYPE1_PROTECTION
:
3186 case SD_DIF_TYPE2_PROTECTION
:
3187 case SD_DIF_TYPE3_PROTECTION
:
3191 printk(KERN_ERR
"scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3195 if (scsi_debug_guard
> 1) {
3196 printk(KERN_ERR
"scsi_debug_init: guard must be 0 or 1\n");
3200 if (scsi_debug_ato
> 1) {
3201 printk(KERN_ERR
"scsi_debug_init: ato must be 0 or 1\n");
3205 if (scsi_debug_physblk_exp
> 15) {
3206 printk(KERN_ERR
"scsi_debug_init: invalid physblk_exp %u\n",
3207 scsi_debug_physblk_exp
);
3211 if (scsi_debug_lowest_aligned
> 0x3fff) {
3212 printk(KERN_ERR
"scsi_debug_init: lowest_aligned too big: %u\n",
3213 scsi_debug_lowest_aligned
);
3217 if (scsi_debug_dev_size_mb
< 1)
3218 scsi_debug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
3219 sz
= (unsigned long)scsi_debug_dev_size_mb
* 1048576;
3220 sdebug_store_sectors
= sz
/ scsi_debug_sector_size
;
3221 sdebug_capacity
= get_sdebug_capacity();
3223 /* play around with geometry, don't waste too much on track 0 */
3225 sdebug_sectors_per
= 32;
3226 if (scsi_debug_dev_size_mb
>= 16)
3228 else if (scsi_debug_dev_size_mb
>= 256)
3230 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
3231 (sdebug_sectors_per
* sdebug_heads
);
3232 if (sdebug_cylinders_per
>= 1024) {
3233 /* other LLDs do this; implies >= 1GB ram disk ... */
3235 sdebug_sectors_per
= 63;
3236 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
3237 (sdebug_sectors_per
* sdebug_heads
);
3240 fake_storep
= vmalloc(sz
);
3241 if (NULL
== fake_storep
) {
3242 printk(KERN_ERR
"scsi_debug_init: out of memory, 1\n");
3245 memset(fake_storep
, 0, sz
);
3246 if (scsi_debug_num_parts
> 0)
3247 sdebug_build_parts(fake_storep
, sz
);
3249 if (scsi_debug_dif
) {
3252 dif_size
= sdebug_store_sectors
* sizeof(struct sd_dif_tuple
);
3253 dif_storep
= vmalloc(dif_size
);
3255 printk(KERN_ERR
"scsi_debug_init: dif_storep %u bytes @ %p\n",
3256 dif_size
, dif_storep
);
3258 if (dif_storep
== NULL
) {
3259 printk(KERN_ERR
"scsi_debug_init: out of mem. (DIX)\n");
3264 memset(dif_storep
, 0xff, dif_size
);
3267 if (scsi_debug_unmap_granularity
) {
3268 unsigned int map_bytes
;
3270 if (scsi_debug_unmap_granularity
< scsi_debug_unmap_alignment
) {
3272 "%s: ERR: unmap_granularity < unmap_alignment\n",
3277 map_size
= (sdebug_store_sectors
/ scsi_debug_unmap_granularity
);
3278 map_bytes
= map_size
>> 3;
3279 map_storep
= vmalloc(map_bytes
);
3281 printk(KERN_INFO
"scsi_debug_init: %lu provisioning blocks\n",
3284 if (map_storep
== NULL
) {
3285 printk(KERN_ERR
"scsi_debug_init: out of mem. (MAP)\n");
3290 memset(map_storep
, 0x0, map_bytes
);
3292 /* Map first 1KB for partition table */
3293 if (scsi_debug_num_parts
)
3297 ret
= device_register(&pseudo_primary
);
3299 printk(KERN_WARNING
"scsi_debug: device_register error: %d\n",
3303 ret
= bus_register(&pseudo_lld_bus
);
3305 printk(KERN_WARNING
"scsi_debug: bus_register error: %d\n",
3309 ret
= driver_register(&sdebug_driverfs_driver
);
3311 printk(KERN_WARNING
"scsi_debug: driver_register error: %d\n",
3315 ret
= do_create_driverfs_files();
3317 printk(KERN_WARNING
"scsi_debug: driver_create_file error: %d\n",
3324 host_to_add
= scsi_debug_add_host
;
3325 scsi_debug_add_host
= 0;
3327 for (k
= 0; k
< host_to_add
; k
++) {
3328 if (sdebug_add_adapter()) {
3329 printk(KERN_ERR
"scsi_debug_init: "
3330 "sdebug_add_adapter failed k=%d\n", k
);
3335 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
3336 printk(KERN_INFO
"scsi_debug_init: built %d host(s)\n",
3337 scsi_debug_add_host
);
3342 do_remove_driverfs_files();
3343 driver_unregister(&sdebug_driverfs_driver
);
3345 bus_unregister(&pseudo_lld_bus
);
3347 device_unregister(&pseudo_primary
);
3358 static void __exit
scsi_debug_exit(void)
3360 int k
= scsi_debug_add_host
;
3364 sdebug_remove_adapter();
3365 do_remove_driverfs_files();
3366 driver_unregister(&sdebug_driverfs_driver
);
3367 bus_unregister(&pseudo_lld_bus
);
3368 device_unregister(&pseudo_primary
);
3376 device_initcall(scsi_debug_init
);
3377 module_exit(scsi_debug_exit
);
3379 static void sdebug_release_adapter(struct device
* dev
)
3381 struct sdebug_host_info
*sdbg_host
;
3383 sdbg_host
= to_sdebug_host(dev
);
3387 static int sdebug_add_adapter(void)
3389 int k
, devs_per_host
;
3391 struct sdebug_host_info
*sdbg_host
;
3392 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
3394 sdbg_host
= kzalloc(sizeof(*sdbg_host
),GFP_KERNEL
);
3395 if (NULL
== sdbg_host
) {
3396 printk(KERN_ERR
"%s: out of memory at line %d\n",
3397 __func__
, __LINE__
);
3401 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
3403 devs_per_host
= scsi_debug_num_tgts
* scsi_debug_max_luns
;
3404 for (k
= 0; k
< devs_per_host
; k
++) {
3405 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
3406 if (!sdbg_devinfo
) {
3407 printk(KERN_ERR
"%s: out of memory at line %d\n",
3408 __func__
, __LINE__
);
3414 spin_lock(&sdebug_host_list_lock
);
3415 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
3416 spin_unlock(&sdebug_host_list_lock
);
3418 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
3419 sdbg_host
->dev
.parent
= &pseudo_primary
;
3420 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
3421 dev_set_name(&sdbg_host
->dev
, "adapter%d", scsi_debug_add_host
);
3423 error
= device_register(&sdbg_host
->dev
);
3428 ++scsi_debug_add_host
;
3432 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
3434 list_del(&sdbg_devinfo
->dev_list
);
3435 kfree(sdbg_devinfo
);
3442 static void sdebug_remove_adapter(void)
3444 struct sdebug_host_info
* sdbg_host
= NULL
;
3446 spin_lock(&sdebug_host_list_lock
);
3447 if (!list_empty(&sdebug_host_list
)) {
3448 sdbg_host
= list_entry(sdebug_host_list
.prev
,
3449 struct sdebug_host_info
, host_list
);
3450 list_del(&sdbg_host
->host_list
);
3452 spin_unlock(&sdebug_host_list_lock
);
3457 device_unregister(&sdbg_host
->dev
);
3458 --scsi_debug_add_host
;
3462 int scsi_debug_queuecommand(struct scsi_cmnd
*SCpnt
, done_funct_t done
)
3464 unsigned char *cmd
= (unsigned char *) SCpnt
->cmnd
;
3467 unsigned long long lba
;
3470 int target
= SCpnt
->device
->id
;
3471 struct sdebug_dev_info
*devip
= NULL
;
3472 int inj_recovered
= 0;
3473 int inj_transport
= 0;
3476 int delay_override
= 0;
3479 scsi_set_resid(SCpnt
, 0);
3480 if ((SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) && cmd
) {
3481 printk(KERN_INFO
"scsi_debug: cmd ");
3482 for (k
= 0, len
= SCpnt
->cmd_len
; k
< len
; ++k
)
3483 printk("%02x ", (int)cmd
[k
]);
3487 if (target
== SCpnt
->device
->host
->hostt
->this_id
) {
3488 printk(KERN_INFO
"scsi_debug: initiator's id used as "
3490 return schedule_resp(SCpnt
, NULL
, done
,
3491 DID_NO_CONNECT
<< 16, 0);
3494 if ((SCpnt
->device
->lun
>= scsi_debug_max_luns
) &&
3495 (SCpnt
->device
->lun
!= SAM2_WLUN_REPORT_LUNS
))
3496 return schedule_resp(SCpnt
, NULL
, done
,
3497 DID_NO_CONNECT
<< 16, 0);
3498 devip
= devInfoReg(SCpnt
->device
);
3500 return schedule_resp(SCpnt
, NULL
, done
,
3501 DID_NO_CONNECT
<< 16, 0);
3503 if ((scsi_debug_every_nth
!= 0) &&
3504 (++scsi_debug_cmnd_count
>= abs(scsi_debug_every_nth
))) {
3505 scsi_debug_cmnd_count
= 0;
3506 if (scsi_debug_every_nth
< -1)
3507 scsi_debug_every_nth
= -1;
3508 if (SCSI_DEBUG_OPT_TIMEOUT
& scsi_debug_opts
)
3509 return 0; /* ignore command causing timeout */
3510 else if (SCSI_DEBUG_OPT_RECOVERED_ERR
& scsi_debug_opts
)
3511 inj_recovered
= 1; /* to reads and writes below */
3512 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR
& scsi_debug_opts
)
3513 inj_transport
= 1; /* to reads and writes below */
3514 else if (SCSI_DEBUG_OPT_DIF_ERR
& scsi_debug_opts
)
3515 inj_dif
= 1; /* to reads and writes below */
3516 else if (SCSI_DEBUG_OPT_DIX_ERR
& scsi_debug_opts
)
3517 inj_dix
= 1; /* to reads and writes below */
3524 case TEST_UNIT_READY
:
3526 break; /* only allowable wlun commands */
3528 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3529 printk(KERN_INFO
"scsi_debug: Opcode: 0x%x "
3530 "not supported for wlun\n", *cmd
);
3531 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3533 errsts
= check_condition_result
;
3534 return schedule_resp(SCpnt
, devip
, done
, errsts
,
3540 case INQUIRY
: /* mandatory, ignore unit attention */
3542 errsts
= resp_inquiry(SCpnt
, target
, devip
);
3544 case REQUEST_SENSE
: /* mandatory, ignore unit attention */
3546 errsts
= resp_requests(SCpnt
, devip
);
3548 case REZERO_UNIT
: /* actually this is REWIND for SSC */
3550 errsts
= resp_start_stop(SCpnt
, devip
);
3552 case ALLOW_MEDIUM_REMOVAL
:
3553 errsts
= check_readiness(SCpnt
, 1, devip
);
3556 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3557 printk(KERN_INFO
"scsi_debug: Medium removal %s\n",
3558 cmd
[4] ? "inhibited" : "enabled");
3560 case SEND_DIAGNOSTIC
: /* mandatory */
3561 errsts
= check_readiness(SCpnt
, 1, devip
);
3563 case TEST_UNIT_READY
: /* mandatory */
3565 errsts
= check_readiness(SCpnt
, 0, devip
);
3568 errsts
= check_readiness(SCpnt
, 1, devip
);
3571 errsts
= check_readiness(SCpnt
, 1, devip
);
3574 errsts
= check_readiness(SCpnt
, 1, devip
);
3577 errsts
= check_readiness(SCpnt
, 1, devip
);
3580 errsts
= resp_readcap(SCpnt
, devip
);
3582 case SERVICE_ACTION_IN
:
3583 if (cmd
[1] == SAI_READ_CAPACITY_16
)
3584 errsts
= resp_readcap16(SCpnt
, devip
);
3585 else if (cmd
[1] == SAI_GET_LBA_STATUS
) {
3587 if (scsi_debug_unmap_max_desc
== 0) {
3588 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3589 INVALID_COMMAND_OPCODE
, 0);
3590 errsts
= check_condition_result
;
3592 errsts
= resp_get_lba_status(SCpnt
, devip
);
3594 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3596 errsts
= check_condition_result
;
3599 case MAINTENANCE_IN
:
3600 if (MI_REPORT_TARGET_PGS
!= cmd
[1]) {
3601 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3603 errsts
= check_condition_result
;
3606 errsts
= resp_report_tgtpgs(SCpnt
, devip
);
3611 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3612 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3614 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3615 INVALID_COMMAND_OPCODE
, 0);
3616 errsts
= check_condition_result
;
3620 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3621 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3622 (cmd
[1] & 0xe0) == 0)
3623 printk(KERN_ERR
"Unprotected RD/WR to DIF device\n");
3628 errsts
= check_readiness(SCpnt
, 0, devip
);
3631 if (scsi_debug_fake_rw
)
3633 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3634 errsts
= resp_read(SCpnt
, lba
, num
, devip
, ei_lba
);
3635 if (inj_recovered
&& (0 == errsts
)) {
3636 mk_sense_buffer(devip
, RECOVERED_ERROR
,
3637 THRESHOLD_EXCEEDED
, 0);
3638 errsts
= check_condition_result
;
3639 } else if (inj_transport
&& (0 == errsts
)) {
3640 mk_sense_buffer(devip
, ABORTED_COMMAND
,
3641 TRANSPORT_PROBLEM
, ACK_NAK_TO
);
3642 errsts
= check_condition_result
;
3643 } else if (inj_dif
&& (0 == errsts
)) {
3644 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, 1);
3645 errsts
= illegal_condition_result
;
3646 } else if (inj_dix
&& (0 == errsts
)) {
3647 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, 1);
3648 errsts
= illegal_condition_result
;
3651 case REPORT_LUNS
: /* mandatory, ignore unit attention */
3653 errsts
= resp_report_luns(SCpnt
, devip
);
3655 case VERIFY
: /* 10 byte SBC-2 command */
3656 errsts
= check_readiness(SCpnt
, 0, devip
);
3661 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3662 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3664 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3665 INVALID_COMMAND_OPCODE
, 0);
3666 errsts
= check_condition_result
;
3670 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3671 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3672 (cmd
[1] & 0xe0) == 0)
3673 printk(KERN_ERR
"Unprotected RD/WR to DIF device\n");
3678 errsts
= check_readiness(SCpnt
, 0, devip
);
3681 if (scsi_debug_fake_rw
)
3683 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3684 errsts
= resp_write(SCpnt
, lba
, num
, devip
, ei_lba
);
3685 if (inj_recovered
&& (0 == errsts
)) {
3686 mk_sense_buffer(devip
, RECOVERED_ERROR
,
3687 THRESHOLD_EXCEEDED
, 0);
3688 errsts
= check_condition_result
;
3689 } else if (inj_dif
&& (0 == errsts
)) {
3690 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, 1);
3691 errsts
= illegal_condition_result
;
3692 } else if (inj_dix
&& (0 == errsts
)) {
3693 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, 1);
3694 errsts
= illegal_condition_result
;
3702 errsts
= check_readiness(SCpnt
, 0, devip
);
3705 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3706 errsts
= resp_write_same(SCpnt
, lba
, num
, devip
, ei_lba
, unmap
);
3709 errsts
= check_readiness(SCpnt
, 0, devip
);
3713 if (scsi_debug_unmap_max_desc
== 0) {
3714 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3715 INVALID_COMMAND_OPCODE
, 0);
3716 errsts
= check_condition_result
;
3718 errsts
= resp_unmap(SCpnt
, devip
);
3722 errsts
= resp_mode_sense(SCpnt
, target
, devip
);
3725 errsts
= resp_mode_select(SCpnt
, 1, devip
);
3727 case MODE_SELECT_10
:
3728 errsts
= resp_mode_select(SCpnt
, 0, devip
);
3731 errsts
= resp_log_sense(SCpnt
, devip
);
3733 case SYNCHRONIZE_CACHE
:
3735 errsts
= check_readiness(SCpnt
, 0, devip
);
3738 errsts
= check_readiness(SCpnt
, 1, devip
);
3740 case XDWRITEREAD_10
:
3741 if (!scsi_bidi_cmnd(SCpnt
)) {
3742 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3743 INVALID_FIELD_IN_CDB
, 0);
3744 errsts
= check_condition_result
;
3748 errsts
= check_readiness(SCpnt
, 0, devip
);
3751 if (scsi_debug_fake_rw
)
3753 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3754 errsts
= resp_read(SCpnt
, lba
, num
, devip
, ei_lba
);
3757 errsts
= resp_write(SCpnt
, lba
, num
, devip
, ei_lba
);
3760 errsts
= resp_xdwriteread(SCpnt
, lba
, num
, devip
);
3762 case VARIABLE_LENGTH_CMD
:
3763 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
) {
3765 if ((cmd
[10] & 0xe0) == 0)
3767 "Unprotected RD/WR to DIF device\n");
3769 if (cmd
[9] == READ_32
) {
3770 BUG_ON(SCpnt
->cmd_len
< 32);
3774 if (cmd
[9] == WRITE_32
) {
3775 BUG_ON(SCpnt
->cmd_len
< 32);
3780 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3781 INVALID_FIELD_IN_CDB
, 0);
3782 errsts
= check_condition_result
;
3786 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3787 printk(KERN_INFO
"scsi_debug: Opcode: 0x%x not "
3788 "supported\n", *cmd
);
3789 errsts
= check_readiness(SCpnt
, 1, devip
);
3791 break; /* Unit attention takes precedence */
3792 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
3793 errsts
= check_condition_result
;
3796 return schedule_resp(SCpnt
, devip
, done
, errsts
,
3797 (delay_override
? 0 : scsi_debug_delay
));
3800 static struct scsi_host_template sdebug_driver_template
= {
3801 .proc_info
= scsi_debug_proc_info
,
3802 .proc_name
= sdebug_proc_name
,
3803 .name
= "SCSI DEBUG",
3804 .info
= scsi_debug_info
,
3805 .slave_alloc
= scsi_debug_slave_alloc
,
3806 .slave_configure
= scsi_debug_slave_configure
,
3807 .slave_destroy
= scsi_debug_slave_destroy
,
3808 .ioctl
= scsi_debug_ioctl
,
3809 .queuecommand
= scsi_debug_queuecommand
,
3810 .eh_abort_handler
= scsi_debug_abort
,
3811 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
3812 .eh_device_reset_handler
= scsi_debug_device_reset
,
3813 .eh_host_reset_handler
= scsi_debug_host_reset
,
3814 .bios_param
= scsi_debug_biosparam
,
3815 .can_queue
= SCSI_DEBUG_CANQUEUE
,
3817 .sg_tablesize
= 256,
3819 .max_sectors
= 0xffff,
3820 .use_clustering
= DISABLE_CLUSTERING
,
3821 .module
= THIS_MODULE
,
3824 static int sdebug_driver_probe(struct device
* dev
)
3827 struct sdebug_host_info
*sdbg_host
;
3828 struct Scsi_Host
*hpnt
;
3831 sdbg_host
= to_sdebug_host(dev
);
3833 hpnt
= scsi_host_alloc(&sdebug_driver_template
, sizeof(sdbg_host
));
3835 printk(KERN_ERR
"%s: scsi_register failed\n", __func__
);
3840 sdbg_host
->shost
= hpnt
;
3841 *((struct sdebug_host_info
**)hpnt
->hostdata
) = sdbg_host
;
3842 if ((hpnt
->this_id
>= 0) && (scsi_debug_num_tgts
> hpnt
->this_id
))
3843 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
3845 hpnt
->max_id
= scsi_debug_num_tgts
;
3846 hpnt
->max_lun
= SAM2_WLUN_REPORT_LUNS
; /* = scsi_debug_max_luns; */
3850 switch (scsi_debug_dif
) {
3852 case SD_DIF_TYPE1_PROTECTION
:
3853 host_prot
= SHOST_DIF_TYPE1_PROTECTION
;
3855 host_prot
|= SHOST_DIX_TYPE1_PROTECTION
;
3858 case SD_DIF_TYPE2_PROTECTION
:
3859 host_prot
= SHOST_DIF_TYPE2_PROTECTION
;
3861 host_prot
|= SHOST_DIX_TYPE2_PROTECTION
;
3864 case SD_DIF_TYPE3_PROTECTION
:
3865 host_prot
= SHOST_DIF_TYPE3_PROTECTION
;
3867 host_prot
|= SHOST_DIX_TYPE3_PROTECTION
;
3872 host_prot
|= SHOST_DIX_TYPE0_PROTECTION
;
3876 scsi_host_set_prot(hpnt
, host_prot
);
3878 printk(KERN_INFO
"scsi_debug: host protection%s%s%s%s%s%s%s\n",
3879 (host_prot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
3880 (host_prot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
3881 (host_prot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
3882 (host_prot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
3883 (host_prot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
3884 (host_prot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
3885 (host_prot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
3887 if (scsi_debug_guard
== 1)
3888 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
3890 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
3892 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
3894 printk(KERN_ERR
"%s: scsi_add_host failed\n", __func__
);
3896 scsi_host_put(hpnt
);
3898 scsi_scan_host(hpnt
);
3904 static int sdebug_driver_remove(struct device
* dev
)
3906 struct sdebug_host_info
*sdbg_host
;
3907 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
3909 sdbg_host
= to_sdebug_host(dev
);
3912 printk(KERN_ERR
"%s: Unable to locate host info\n",
3917 scsi_remove_host(sdbg_host
->shost
);
3919 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
3921 list_del(&sdbg_devinfo
->dev_list
);
3922 kfree(sdbg_devinfo
);
3925 scsi_host_put(sdbg_host
->shost
);
3929 static int pseudo_lld_bus_match(struct device
*dev
,
3930 struct device_driver
*dev_driver
)
3935 static struct bus_type pseudo_lld_bus
= {
3937 .match
= pseudo_lld_bus_match
,
3938 .probe
= sdebug_driver_probe
,
3939 .remove
= sdebug_driver_remove
,