2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://www.torque.net/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.81"
62 static const char * scsi_debug_version_date
= "20070104";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
93 #define DEF_DEV_SIZE_MB 8
94 #define DEF_EVERY_NTH 0
95 #define DEF_NUM_PARTS 0
97 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
100 #define DEF_NO_LUN_0 0
101 #define DEF_VIRTUAL_GB 0
102 #define DEF_FAKE_RW 0
103 #define DEF_VPD_USE_HOSTNO 1
104 #define DEF_SECTOR_SIZE 512
109 #define DEF_PHYSBLK_EXP 0
110 #define DEF_LOWEST_ALIGNED 0
111 #define DEF_UNMAP_MAX_BLOCKS 0
112 #define DEF_UNMAP_MAX_DESC 0
113 #define DEF_UNMAP_GRANULARITY 0
114 #define DEF_UNMAP_ALIGNMENT 0
116 /* bit mask values for scsi_debug_opts */
117 #define SCSI_DEBUG_OPT_NOISE 1
118 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
119 #define SCSI_DEBUG_OPT_TIMEOUT 4
120 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
121 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
122 #define SCSI_DEBUG_OPT_DIF_ERR 32
123 #define SCSI_DEBUG_OPT_DIX_ERR 64
124 /* When "every_nth" > 0 then modulo "every_nth" commands:
125 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
126 * - a RECOVERED_ERROR is simulated on successful read and write
127 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
128 * - a TRANSPORT_ERROR is simulated on successful read and write
129 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
131 * When "every_nth" < 0 then after "- every_nth" commands:
132 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
133 * - a RECOVERED_ERROR is simulated on successful read and write
134 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
135 * - a TRANSPORT_ERROR is simulated on successful read and write
136 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
137 * This will continue until some other action occurs (e.g. the user
138 * writing a new value (other than -1 or 1) to every_nth via sysfs).
141 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
142 * sector on read commands: */
143 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
145 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
146 * or "peripheral device" addressing (value 0) */
147 #define SAM2_LUN_ADDRESS_METHOD 0
148 #define SAM2_WLUN_REPORT_LUNS 0xc101
150 static int scsi_debug_add_host
= DEF_NUM_HOST
;
151 static int scsi_debug_delay
= DEF_DELAY
;
152 static int scsi_debug_dev_size_mb
= DEF_DEV_SIZE_MB
;
153 static int scsi_debug_every_nth
= DEF_EVERY_NTH
;
154 static int scsi_debug_max_luns
= DEF_MAX_LUNS
;
155 static int scsi_debug_num_parts
= DEF_NUM_PARTS
;
156 static int scsi_debug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
157 static int scsi_debug_opts
= DEF_OPTS
;
158 static int scsi_debug_scsi_level
= DEF_SCSI_LEVEL
;
159 static int scsi_debug_ptype
= DEF_PTYPE
; /* SCSI peripheral type (0==disk) */
160 static int scsi_debug_dsense
= DEF_D_SENSE
;
161 static int scsi_debug_no_lun_0
= DEF_NO_LUN_0
;
162 static int scsi_debug_virtual_gb
= DEF_VIRTUAL_GB
;
163 static int scsi_debug_fake_rw
= DEF_FAKE_RW
;
164 static int scsi_debug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
165 static int scsi_debug_sector_size
= DEF_SECTOR_SIZE
;
166 static int scsi_debug_dix
= DEF_DIX
;
167 static int scsi_debug_dif
= DEF_DIF
;
168 static int scsi_debug_guard
= DEF_GUARD
;
169 static int scsi_debug_ato
= DEF_ATO
;
170 static int scsi_debug_physblk_exp
= DEF_PHYSBLK_EXP
;
171 static int scsi_debug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
172 static int scsi_debug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
173 static int scsi_debug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
174 static int scsi_debug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
175 static int scsi_debug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
177 static int scsi_debug_cmnd_count
= 0;
179 #define DEV_READONLY(TGT) (0)
180 #define DEV_REMOVEABLE(TGT) (0)
182 static unsigned int sdebug_store_sectors
;
183 static sector_t sdebug_capacity
; /* in sectors */
185 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
186 may still need them */
187 static int sdebug_heads
; /* heads per disk */
188 static int sdebug_cylinders_per
; /* cylinders per surface */
189 static int sdebug_sectors_per
; /* sectors per cylinder */
191 #define SDEBUG_MAX_PARTS 4
193 #define SDEBUG_SENSE_LEN 32
195 #define SCSI_DEBUG_CANQUEUE 255
196 #define SCSI_DEBUG_MAX_CMD_LEN 32
198 struct sdebug_dev_info
{
199 struct list_head dev_list
;
200 unsigned char sense_buff
[SDEBUG_SENSE_LEN
]; /* weak nexus */
201 unsigned int channel
;
204 struct sdebug_host_info
*sdbg_host
;
211 struct sdebug_host_info
{
212 struct list_head host_list
;
213 struct Scsi_Host
*shost
;
215 struct list_head dev_info_list
;
218 #define to_sdebug_host(d) \
219 container_of(d, struct sdebug_host_info, dev)
221 static LIST_HEAD(sdebug_host_list
);
222 static DEFINE_SPINLOCK(sdebug_host_list_lock
);
224 typedef void (* done_funct_t
) (struct scsi_cmnd
*);
226 struct sdebug_queued_cmd
{
228 struct timer_list cmnd_timer
;
229 done_funct_t done_funct
;
230 struct scsi_cmnd
* a_cmnd
;
233 static struct sdebug_queued_cmd queued_arr
[SCSI_DEBUG_CANQUEUE
];
235 static unsigned char * fake_storep
; /* ramdisk storage */
236 static unsigned char *dif_storep
; /* protection info */
237 static void *map_storep
; /* provisioning map */
239 static unsigned long map_size
;
240 static int num_aborts
= 0;
241 static int num_dev_resets
= 0;
242 static int num_bus_resets
= 0;
243 static int num_host_resets
= 0;
244 static int dix_writes
;
245 static int dix_reads
;
246 static int dif_errors
;
248 static DEFINE_SPINLOCK(queued_arr_lock
);
249 static DEFINE_RWLOCK(atomic_rw
);
251 static char sdebug_proc_name
[] = "scsi_debug";
253 static struct bus_type pseudo_lld_bus
;
255 static inline sector_t
dif_offset(sector_t sector
)
260 static struct device_driver sdebug_driverfs_driver
= {
261 .name
= sdebug_proc_name
,
262 .bus
= &pseudo_lld_bus
,
265 static const int check_condition_result
=
266 (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
268 static const int illegal_condition_result
=
269 (DRIVER_SENSE
<< 24) | (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
271 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
273 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
276 static int sdebug_add_adapter(void);
277 static void sdebug_remove_adapter(void);
279 static void sdebug_max_tgts_luns(void)
281 struct sdebug_host_info
*sdbg_host
;
282 struct Scsi_Host
*hpnt
;
284 spin_lock(&sdebug_host_list_lock
);
285 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
286 hpnt
= sdbg_host
->shost
;
287 if ((hpnt
->this_id
>= 0) &&
288 (scsi_debug_num_tgts
> hpnt
->this_id
))
289 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
291 hpnt
->max_id
= scsi_debug_num_tgts
;
292 /* scsi_debug_max_luns; */
293 hpnt
->max_lun
= SAM2_WLUN_REPORT_LUNS
;
295 spin_unlock(&sdebug_host_list_lock
);
298 static void mk_sense_buffer(struct sdebug_dev_info
*devip
, int key
,
301 unsigned char *sbuff
;
303 sbuff
= devip
->sense_buff
;
304 memset(sbuff
, 0, SDEBUG_SENSE_LEN
);
306 scsi_build_sense_buffer(scsi_debug_dsense
, sbuff
, key
, asc
, asq
);
308 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
309 printk(KERN_INFO
"scsi_debug: [sense_key,asc,ascq]: "
310 "[0x%x,0x%x,0x%x]\n", key
, asc
, asq
);
313 static void get_data_transfer_info(unsigned char *cmd
,
314 unsigned long long *lba
, unsigned int *num
,
320 case VARIABLE_LENGTH_CMD
:
321 *lba
= (u64
)cmd
[19] | (u64
)cmd
[18] << 8 |
322 (u64
)cmd
[17] << 16 | (u64
)cmd
[16] << 24 |
323 (u64
)cmd
[15] << 32 | (u64
)cmd
[14] << 40 |
324 (u64
)cmd
[13] << 48 | (u64
)cmd
[12] << 56;
326 *ei_lba
= (u32
)cmd
[23] | (u32
)cmd
[22] << 8 |
327 (u32
)cmd
[21] << 16 | (u32
)cmd
[20] << 24;
329 *num
= (u32
)cmd
[31] | (u32
)cmd
[30] << 8 | (u32
)cmd
[29] << 16 |
336 *lba
= (u64
)cmd
[9] | (u64
)cmd
[8] << 8 |
337 (u64
)cmd
[7] << 16 | (u64
)cmd
[6] << 24 |
338 (u64
)cmd
[5] << 32 | (u64
)cmd
[4] << 40 |
339 (u64
)cmd
[3] << 48 | (u64
)cmd
[2] << 56;
341 *num
= (u32
)cmd
[13] | (u32
)cmd
[12] << 8 | (u32
)cmd
[11] << 16 |
346 *lba
= (u32
)cmd
[5] | (u32
)cmd
[4] << 8 | (u32
)cmd
[3] << 16 |
349 *num
= (u32
)cmd
[9] | (u32
)cmd
[8] << 8 | (u32
)cmd
[7] << 16 |
356 *lba
= (u32
)cmd
[5] | (u32
)cmd
[4] << 8 | (u32
)cmd
[3] << 16 |
359 *num
= (u32
)cmd
[8] | (u32
)cmd
[7] << 8;
363 *lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
364 (u32
)(cmd
[1] & 0x1f) << 16;
365 *num
= (0 == cmd
[4]) ? 256 : cmd
[4];
372 static int scsi_debug_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
374 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
375 printk(KERN_INFO
"scsi_debug: ioctl: cmd=0x%x\n", cmd
);
378 /* return -ENOTTY; // correct return but upsets fdisk */
381 static int check_readiness(struct scsi_cmnd
* SCpnt
, int reset_only
,
382 struct sdebug_dev_info
* devip
)
385 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
386 printk(KERN_INFO
"scsi_debug: Reporting Unit "
387 "attention: power on reset\n");
389 mk_sense_buffer(devip
, UNIT_ATTENTION
, POWERON_RESET
, 0);
390 return check_condition_result
;
392 if ((0 == reset_only
) && devip
->stopped
) {
393 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
394 printk(KERN_INFO
"scsi_debug: Reporting Not "
395 "ready: initializing command required\n");
396 mk_sense_buffer(devip
, NOT_READY
, LOGICAL_UNIT_NOT_READY
,
398 return check_condition_result
;
403 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
404 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
408 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
412 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_FROM_DEVICE
))
413 return (DID_ERROR
<< 16);
415 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
418 sdb
->resid
-= act_len
;
420 sdb
->resid
= scsi_bufflen(scp
) - act_len
;
425 /* Returns number of bytes fetched into 'arr' or -1 if error. */
426 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
429 if (!scsi_bufflen(scp
))
431 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_TO_DEVICE
))
434 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
438 static const char * inq_vendor_id
= "Linux ";
439 static const char * inq_product_id
= "scsi_debug ";
440 static const char * inq_product_rev
= "0004";
442 static int inquiry_evpd_83(unsigned char * arr
, int port_group_id
,
443 int target_dev_id
, int dev_id_num
,
444 const char * dev_id_str
,
450 port_a
= target_dev_id
+ 1;
451 /* T10 vendor identifier field format (faked) */
452 arr
[0] = 0x2; /* ASCII */
455 memcpy(&arr
[4], inq_vendor_id
, 8);
456 memcpy(&arr
[12], inq_product_id
, 16);
457 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
458 num
= 8 + 16 + dev_id_str_len
;
461 if (dev_id_num
>= 0) {
462 /* NAA-5, Logical unit identifier (binary) */
463 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
464 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
467 arr
[num
++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
471 arr
[num
++] = (dev_id_num
>> 24);
472 arr
[num
++] = (dev_id_num
>> 16) & 0xff;
473 arr
[num
++] = (dev_id_num
>> 8) & 0xff;
474 arr
[num
++] = dev_id_num
& 0xff;
475 /* Target relative port number */
476 arr
[num
++] = 0x61; /* proto=sas, binary */
477 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
478 arr
[num
++] = 0x0; /* reserved */
479 arr
[num
++] = 0x4; /* length */
480 arr
[num
++] = 0x0; /* reserved */
481 arr
[num
++] = 0x0; /* reserved */
483 arr
[num
++] = 0x1; /* relative port A */
485 /* NAA-5, Target port identifier */
486 arr
[num
++] = 0x61; /* proto=sas, binary */
487 arr
[num
++] = 0x93; /* piv=1, target port, naa */
490 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
494 arr
[num
++] = (port_a
>> 24);
495 arr
[num
++] = (port_a
>> 16) & 0xff;
496 arr
[num
++] = (port_a
>> 8) & 0xff;
497 arr
[num
++] = port_a
& 0xff;
498 /* NAA-5, Target port group identifier */
499 arr
[num
++] = 0x61; /* proto=sas, binary */
500 arr
[num
++] = 0x95; /* piv=1, target port group id */
505 arr
[num
++] = (port_group_id
>> 8) & 0xff;
506 arr
[num
++] = port_group_id
& 0xff;
507 /* NAA-5, Target device identifier */
508 arr
[num
++] = 0x61; /* proto=sas, binary */
509 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
512 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
516 arr
[num
++] = (target_dev_id
>> 24);
517 arr
[num
++] = (target_dev_id
>> 16) & 0xff;
518 arr
[num
++] = (target_dev_id
>> 8) & 0xff;
519 arr
[num
++] = target_dev_id
& 0xff;
520 /* SCSI name string: Target device identifier */
521 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
522 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
525 memcpy(arr
+ num
, "naa.52222220", 12);
527 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
528 memcpy(arr
+ num
, b
, 8);
530 memset(arr
+ num
, 0, 4);
536 static unsigned char vpd84_data
[] = {
537 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
538 0x22,0x22,0x22,0x0,0xbb,0x1,
539 0x22,0x22,0x22,0x0,0xbb,0x2,
542 static int inquiry_evpd_84(unsigned char * arr
)
544 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
545 return sizeof(vpd84_data
);
548 static int inquiry_evpd_85(unsigned char * arr
)
551 const char * na1
= "https://www.kernel.org/config";
552 const char * na2
= "http://www.kernel.org/log";
555 arr
[num
++] = 0x1; /* lu, storage config */
556 arr
[num
++] = 0x0; /* reserved */
561 plen
= ((plen
/ 4) + 1) * 4;
562 arr
[num
++] = plen
; /* length, null termianted, padded */
563 memcpy(arr
+ num
, na1
, olen
);
564 memset(arr
+ num
+ olen
, 0, plen
- olen
);
567 arr
[num
++] = 0x4; /* lu, logging */
568 arr
[num
++] = 0x0; /* reserved */
573 plen
= ((plen
/ 4) + 1) * 4;
574 arr
[num
++] = plen
; /* length, null terminated, padded */
575 memcpy(arr
+ num
, na2
, olen
);
576 memset(arr
+ num
+ olen
, 0, plen
- olen
);
582 /* SCSI ports VPD page */
583 static int inquiry_evpd_88(unsigned char * arr
, int target_dev_id
)
588 port_a
= target_dev_id
+ 1;
590 arr
[num
++] = 0x0; /* reserved */
591 arr
[num
++] = 0x0; /* reserved */
593 arr
[num
++] = 0x1; /* relative port 1 (primary) */
594 memset(arr
+ num
, 0, 6);
597 arr
[num
++] = 12; /* length tp descriptor */
598 /* naa-5 target port identifier (A) */
599 arr
[num
++] = 0x61; /* proto=sas, binary */
600 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
601 arr
[num
++] = 0x0; /* reserved */
602 arr
[num
++] = 0x8; /* length */
603 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
607 arr
[num
++] = (port_a
>> 24);
608 arr
[num
++] = (port_a
>> 16) & 0xff;
609 arr
[num
++] = (port_a
>> 8) & 0xff;
610 arr
[num
++] = port_a
& 0xff;
612 arr
[num
++] = 0x0; /* reserved */
613 arr
[num
++] = 0x0; /* reserved */
615 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
616 memset(arr
+ num
, 0, 6);
619 arr
[num
++] = 12; /* length tp descriptor */
620 /* naa-5 target port identifier (B) */
621 arr
[num
++] = 0x61; /* proto=sas, binary */
622 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
623 arr
[num
++] = 0x0; /* reserved */
624 arr
[num
++] = 0x8; /* length */
625 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
629 arr
[num
++] = (port_b
>> 24);
630 arr
[num
++] = (port_b
>> 16) & 0xff;
631 arr
[num
++] = (port_b
>> 8) & 0xff;
632 arr
[num
++] = port_b
& 0xff;
638 static unsigned char vpd89_data
[] = {
639 /* from 4th byte */ 0,0,0,0,
640 'l','i','n','u','x',' ',' ',' ',
641 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
643 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
645 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
646 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
647 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
648 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
650 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
652 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
654 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
655 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
656 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
657 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
658 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
659 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
660 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
661 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
662 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
663 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
664 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
665 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
666 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
667 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
668 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
669 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
670 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
671 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
672 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
673 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
674 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
675 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
676 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
677 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
678 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
679 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
682 static int inquiry_evpd_89(unsigned char * arr
)
684 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
685 return sizeof(vpd89_data
);
689 /* Block limits VPD page (SBC-3) */
690 static unsigned char vpdb0_data
[] = {
691 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 static int inquiry_evpd_b0(unsigned char * arr
)
701 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
702 gran
= 1 << scsi_debug_physblk_exp
;
703 arr
[2] = (gran
>> 8) & 0xff;
704 arr
[3] = gran
& 0xff;
705 if (sdebug_store_sectors
> 0x400) {
706 arr
[4] = (sdebug_store_sectors
>> 24) & 0xff;
707 arr
[5] = (sdebug_store_sectors
>> 16) & 0xff;
708 arr
[6] = (sdebug_store_sectors
>> 8) & 0xff;
709 arr
[7] = sdebug_store_sectors
& 0xff;
712 if (scsi_debug_unmap_max_desc
) {
715 if (scsi_debug_unmap_max_blocks
)
716 blocks
= scsi_debug_unmap_max_blocks
;
720 put_unaligned_be32(blocks
, &arr
[16]);
721 put_unaligned_be32(scsi_debug_unmap_max_desc
, &arr
[20]);
724 if (scsi_debug_unmap_alignment
) {
725 put_unaligned_be32(scsi_debug_unmap_alignment
, &arr
[28]);
726 arr
[28] |= 0x80; /* UGAVALID */
729 if (scsi_debug_unmap_granularity
) {
730 put_unaligned_be32(scsi_debug_unmap_granularity
, &arr
[24]);
731 return 0x3c; /* Mandatory page length for thin provisioning */
734 return sizeof(vpdb0_data
);
737 /* Block device characteristics VPD page (SBC-3) */
738 static int inquiry_evpd_b1(unsigned char *arr
)
740 memset(arr
, 0, 0x3c);
742 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
744 arr
[3] = 5; /* less than 1.8" */
749 #define SDEBUG_LONG_INQ_SZ 96
750 #define SDEBUG_MAX_INQ_ARR_SZ 584
752 static int resp_inquiry(struct scsi_cmnd
* scp
, int target
,
753 struct sdebug_dev_info
* devip
)
755 unsigned char pq_pdt
;
757 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
758 int alloc_len
, n
, ret
;
760 alloc_len
= (cmd
[3] << 8) + cmd
[4];
761 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
763 return DID_REQUEUE
<< 16;
765 pq_pdt
= 0x1e; /* present, wlun */
766 else if (scsi_debug_no_lun_0
&& (0 == devip
->lun
))
767 pq_pdt
= 0x7f; /* not present, no device type */
769 pq_pdt
= (scsi_debug_ptype
& 0x1f);
771 if (0x2 & cmd
[1]) { /* CMDDT bit set */
772 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
775 return check_condition_result
;
776 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
777 int lu_id_num
, port_group_id
, target_dev_id
, len
;
779 int host_no
= devip
->sdbg_host
->shost
->host_no
;
781 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
782 (devip
->channel
& 0x7f);
783 if (0 == scsi_debug_vpd_use_hostno
)
785 lu_id_num
= devip
->wlun
? -1 : (((host_no
+ 1) * 2000) +
786 (devip
->target
* 1000) + devip
->lun
);
787 target_dev_id
= ((host_no
+ 1) * 2000) +
788 (devip
->target
* 1000) - 3;
789 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
790 if (0 == cmd
[2]) { /* supported vital product data pages */
791 arr
[1] = cmd
[2]; /*sanity */
793 arr
[n
++] = 0x0; /* this page */
794 arr
[n
++] = 0x80; /* unit serial number */
795 arr
[n
++] = 0x83; /* device identification */
796 arr
[n
++] = 0x84; /* software interface ident. */
797 arr
[n
++] = 0x85; /* management network addresses */
798 arr
[n
++] = 0x86; /* extended inquiry */
799 arr
[n
++] = 0x87; /* mode page policy */
800 arr
[n
++] = 0x88; /* SCSI ports */
801 arr
[n
++] = 0x89; /* ATA information */
802 arr
[n
++] = 0xb0; /* Block limits (SBC) */
803 arr
[n
++] = 0xb1; /* Block characteristics (SBC) */
804 arr
[3] = n
- 4; /* number of supported VPD pages */
805 } else if (0x80 == cmd
[2]) { /* unit serial number */
806 arr
[1] = cmd
[2]; /*sanity */
808 memcpy(&arr
[4], lu_id_str
, len
);
809 } else if (0x83 == cmd
[2]) { /* device identification */
810 arr
[1] = cmd
[2]; /*sanity */
811 arr
[3] = inquiry_evpd_83(&arr
[4], port_group_id
,
812 target_dev_id
, lu_id_num
,
814 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
815 arr
[1] = cmd
[2]; /*sanity */
816 arr
[3] = inquiry_evpd_84(&arr
[4]);
817 } else if (0x85 == cmd
[2]) { /* Management network addresses */
818 arr
[1] = cmd
[2]; /*sanity */
819 arr
[3] = inquiry_evpd_85(&arr
[4]);
820 } else if (0x86 == cmd
[2]) { /* extended inquiry */
821 arr
[1] = cmd
[2]; /*sanity */
822 arr
[3] = 0x3c; /* number of following entries */
823 if (scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
)
824 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
825 else if (scsi_debug_dif
)
826 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
828 arr
[4] = 0x0; /* no protection stuff */
829 arr
[5] = 0x7; /* head of q, ordered + simple q's */
830 } else if (0x87 == cmd
[2]) { /* mode page policy */
831 arr
[1] = cmd
[2]; /*sanity */
832 arr
[3] = 0x8; /* number of following entries */
833 arr
[4] = 0x2; /* disconnect-reconnect mp */
834 arr
[6] = 0x80; /* mlus, shared */
835 arr
[8] = 0x18; /* protocol specific lu */
836 arr
[10] = 0x82; /* mlus, per initiator port */
837 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
838 arr
[1] = cmd
[2]; /*sanity */
839 arr
[3] = inquiry_evpd_88(&arr
[4], target_dev_id
);
840 } else if (0x89 == cmd
[2]) { /* ATA information */
841 arr
[1] = cmd
[2]; /*sanity */
842 n
= inquiry_evpd_89(&arr
[4]);
845 } else if (0xb0 == cmd
[2]) { /* Block limits (SBC) */
846 arr
[1] = cmd
[2]; /*sanity */
847 arr
[3] = inquiry_evpd_b0(&arr
[4]);
848 } else if (0xb1 == cmd
[2]) { /* Block characteristics (SBC) */
849 arr
[1] = cmd
[2]; /*sanity */
850 arr
[3] = inquiry_evpd_b1(&arr
[4]);
852 /* Illegal request, invalid field in cdb */
853 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
854 INVALID_FIELD_IN_CDB
, 0);
856 return check_condition_result
;
858 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
859 ret
= fill_from_dev_buffer(scp
, arr
,
860 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
864 /* drops through here for a standard inquiry */
865 arr
[1] = DEV_REMOVEABLE(target
) ? 0x80 : 0; /* Removable disk */
866 arr
[2] = scsi_debug_scsi_level
;
867 arr
[3] = 2; /* response_data_format==2 */
868 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
869 arr
[5] = scsi_debug_dif
? 1 : 0; /* PROTECT bit */
870 if (0 == scsi_debug_vpd_use_hostno
)
871 arr
[5] = 0x10; /* claim: implicit TGPS */
872 arr
[6] = 0x10; /* claim: MultiP */
873 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
874 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
875 memcpy(&arr
[8], inq_vendor_id
, 8);
876 memcpy(&arr
[16], inq_product_id
, 16);
877 memcpy(&arr
[32], inq_product_rev
, 4);
878 /* version descriptors (2 bytes each) follow */
879 arr
[58] = 0x0; arr
[59] = 0x77; /* SAM-3 ANSI */
880 arr
[60] = 0x3; arr
[61] = 0x14; /* SPC-3 ANSI */
882 if (scsi_debug_ptype
== 0) {
883 arr
[n
++] = 0x3; arr
[n
++] = 0x3d; /* SBC-2 ANSI */
884 } else if (scsi_debug_ptype
== 1) {
885 arr
[n
++] = 0x3; arr
[n
++] = 0x60; /* SSC-2 no version */
887 arr
[n
++] = 0xc; arr
[n
++] = 0xf; /* SAS-1.1 rev 10 */
888 ret
= fill_from_dev_buffer(scp
, arr
,
889 min(alloc_len
, SDEBUG_LONG_INQ_SZ
));
894 static int resp_requests(struct scsi_cmnd
* scp
,
895 struct sdebug_dev_info
* devip
)
897 unsigned char * sbuff
;
898 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
899 unsigned char arr
[SDEBUG_SENSE_LEN
];
903 memset(arr
, 0, sizeof(arr
));
904 if (devip
->reset
== 1)
905 mk_sense_buffer(devip
, 0, NO_ADDITIONAL_SENSE
, 0);
906 want_dsense
= !!(cmd
[1] & 1) || scsi_debug_dsense
;
907 sbuff
= devip
->sense_buff
;
908 if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
911 arr
[1] = 0x0; /* NO_SENSE in sense_key */
912 arr
[2] = THRESHOLD_EXCEEDED
;
913 arr
[3] = 0xff; /* TEST set and MRIE==6 */
916 arr
[2] = 0x0; /* NO_SENSE in sense_key */
917 arr
[7] = 0xa; /* 18 byte sense buffer */
918 arr
[12] = THRESHOLD_EXCEEDED
;
919 arr
[13] = 0xff; /* TEST set and MRIE==6 */
922 memcpy(arr
, sbuff
, SDEBUG_SENSE_LEN
);
923 if ((cmd
[1] & 1) && (! scsi_debug_dsense
)) {
924 /* DESC bit set and sense_buff in fixed format */
925 memset(arr
, 0, sizeof(arr
));
927 arr
[1] = sbuff
[2]; /* sense key */
928 arr
[2] = sbuff
[12]; /* asc */
929 arr
[3] = sbuff
[13]; /* ascq */
933 mk_sense_buffer(devip
, 0, NO_ADDITIONAL_SENSE
, 0);
934 return fill_from_dev_buffer(scp
, arr
, len
);
937 static int resp_start_stop(struct scsi_cmnd
* scp
,
938 struct sdebug_dev_info
* devip
)
940 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
941 int power_cond
, errsts
, start
;
943 if ((errsts
= check_readiness(scp
, 1, devip
)))
945 power_cond
= (cmd
[4] & 0xf0) >> 4;
947 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
949 return check_condition_result
;
952 if (start
== devip
->stopped
)
953 devip
->stopped
= !start
;
957 static sector_t
get_sdebug_capacity(void)
959 if (scsi_debug_virtual_gb
> 0)
960 return 2048 * 1024 * (sector_t
)scsi_debug_virtual_gb
;
962 return sdebug_store_sectors
;
965 #define SDEBUG_READCAP_ARR_SZ 8
966 static int resp_readcap(struct scsi_cmnd
* scp
,
967 struct sdebug_dev_info
* devip
)
969 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
973 if ((errsts
= check_readiness(scp
, 1, devip
)))
975 /* following just in case virtual_gb changed */
976 sdebug_capacity
= get_sdebug_capacity();
977 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
978 if (sdebug_capacity
< 0xffffffff) {
979 capac
= (unsigned int)sdebug_capacity
- 1;
980 arr
[0] = (capac
>> 24);
981 arr
[1] = (capac
>> 16) & 0xff;
982 arr
[2] = (capac
>> 8) & 0xff;
983 arr
[3] = capac
& 0xff;
990 arr
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
991 arr
[7] = scsi_debug_sector_size
& 0xff;
992 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
995 #define SDEBUG_READCAP16_ARR_SZ 32
996 static int resp_readcap16(struct scsi_cmnd
* scp
,
997 struct sdebug_dev_info
* devip
)
999 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1000 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
1001 unsigned long long capac
;
1002 int errsts
, k
, alloc_len
;
1004 if ((errsts
= check_readiness(scp
, 1, devip
)))
1006 alloc_len
= ((cmd
[10] << 24) + (cmd
[11] << 16) + (cmd
[12] << 8)
1008 /* following just in case virtual_gb changed */
1009 sdebug_capacity
= get_sdebug_capacity();
1010 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
1011 capac
= sdebug_capacity
- 1;
1012 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1013 arr
[7 - k
] = capac
& 0xff;
1014 arr
[8] = (scsi_debug_sector_size
>> 24) & 0xff;
1015 arr
[9] = (scsi_debug_sector_size
>> 16) & 0xff;
1016 arr
[10] = (scsi_debug_sector_size
>> 8) & 0xff;
1017 arr
[11] = scsi_debug_sector_size
& 0xff;
1018 arr
[13] = scsi_debug_physblk_exp
& 0xf;
1019 arr
[14] = (scsi_debug_lowest_aligned
>> 8) & 0x3f;
1021 if (scsi_debug_unmap_granularity
)
1022 arr
[14] |= 0x80; /* TPE */
1024 arr
[15] = scsi_debug_lowest_aligned
& 0xff;
1026 if (scsi_debug_dif
) {
1027 arr
[12] = (scsi_debug_dif
- 1) << 1; /* P_TYPE */
1028 arr
[12] |= 1; /* PROT_EN */
1031 return fill_from_dev_buffer(scp
, arr
,
1032 min(alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
1035 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1037 static int resp_report_tgtpgs(struct scsi_cmnd
* scp
,
1038 struct sdebug_dev_info
* devip
)
1040 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1041 unsigned char * arr
;
1042 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1043 int n
, ret
, alen
, rlen
;
1044 int port_group_a
, port_group_b
, port_a
, port_b
;
1046 alen
= ((cmd
[6] << 24) + (cmd
[7] << 16) + (cmd
[8] << 8)
1049 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
1051 return DID_REQUEUE
<< 16;
1053 * EVPD page 0x88 states we have two ports, one
1054 * real and a fake port with no device connected.
1055 * So we create two port groups with one port each
1056 * and set the group with port B to unavailable.
1058 port_a
= 0x1; /* relative port A */
1059 port_b
= 0x2; /* relative port B */
1060 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
1061 (devip
->channel
& 0x7f);
1062 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
1063 (devip
->channel
& 0x7f) + 0x80;
1066 * The asymmetric access state is cycled according to the host_id.
1069 if (0 == scsi_debug_vpd_use_hostno
) {
1070 arr
[n
++] = host_no
% 3; /* Asymm access state */
1071 arr
[n
++] = 0x0F; /* claim: all states are supported */
1073 arr
[n
++] = 0x0; /* Active/Optimized path */
1074 arr
[n
++] = 0x01; /* claim: only support active/optimized paths */
1076 arr
[n
++] = (port_group_a
>> 8) & 0xff;
1077 arr
[n
++] = port_group_a
& 0xff;
1078 arr
[n
++] = 0; /* Reserved */
1079 arr
[n
++] = 0; /* Status code */
1080 arr
[n
++] = 0; /* Vendor unique */
1081 arr
[n
++] = 0x1; /* One port per group */
1082 arr
[n
++] = 0; /* Reserved */
1083 arr
[n
++] = 0; /* Reserved */
1084 arr
[n
++] = (port_a
>> 8) & 0xff;
1085 arr
[n
++] = port_a
& 0xff;
1086 arr
[n
++] = 3; /* Port unavailable */
1087 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
1088 arr
[n
++] = (port_group_b
>> 8) & 0xff;
1089 arr
[n
++] = port_group_b
& 0xff;
1090 arr
[n
++] = 0; /* Reserved */
1091 arr
[n
++] = 0; /* Status code */
1092 arr
[n
++] = 0; /* Vendor unique */
1093 arr
[n
++] = 0x1; /* One port per group */
1094 arr
[n
++] = 0; /* Reserved */
1095 arr
[n
++] = 0; /* Reserved */
1096 arr
[n
++] = (port_b
>> 8) & 0xff;
1097 arr
[n
++] = port_b
& 0xff;
1100 arr
[0] = (rlen
>> 24) & 0xff;
1101 arr
[1] = (rlen
>> 16) & 0xff;
1102 arr
[2] = (rlen
>> 8) & 0xff;
1103 arr
[3] = rlen
& 0xff;
1106 * Return the smallest value of either
1107 * - The allocated length
1108 * - The constructed command length
1109 * - The maximum array size
1112 ret
= fill_from_dev_buffer(scp
, arr
,
1113 min(rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
1118 /* <<Following mode page info copied from ST318451LW>> */
1120 static int resp_err_recov_pg(unsigned char * p
, int pcontrol
, int target
)
1121 { /* Read-Write Error Recovery page for mode_sense */
1122 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1125 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
1127 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
1128 return sizeof(err_recov_pg
);
1131 static int resp_disconnect_pg(unsigned char * p
, int pcontrol
, int target
)
1132 { /* Disconnect-Reconnect page for mode_sense */
1133 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1134 0, 0, 0, 0, 0, 0, 0, 0};
1136 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
1138 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
1139 return sizeof(disconnect_pg
);
1142 static int resp_format_pg(unsigned char * p
, int pcontrol
, int target
)
1143 { /* Format device page for mode_sense */
1144 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1145 0, 0, 0, 0, 0, 0, 0, 0,
1146 0, 0, 0, 0, 0x40, 0, 0, 0};
1148 memcpy(p
, format_pg
, sizeof(format_pg
));
1149 p
[10] = (sdebug_sectors_per
>> 8) & 0xff;
1150 p
[11] = sdebug_sectors_per
& 0xff;
1151 p
[12] = (scsi_debug_sector_size
>> 8) & 0xff;
1152 p
[13] = scsi_debug_sector_size
& 0xff;
1153 if (DEV_REMOVEABLE(target
))
1154 p
[20] |= 0x20; /* should agree with INQUIRY */
1156 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
1157 return sizeof(format_pg
);
1160 static int resp_caching_pg(unsigned char * p
, int pcontrol
, int target
)
1161 { /* Caching page for mode_sense */
1162 unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1163 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1165 memcpy(p
, caching_pg
, sizeof(caching_pg
));
1167 memset(p
+ 2, 0, sizeof(caching_pg
) - 2);
1168 return sizeof(caching_pg
);
1171 static int resp_ctrl_m_pg(unsigned char * p
, int pcontrol
, int target
)
1172 { /* Control mode page for mode_sense */
1173 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1175 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1178 if (scsi_debug_dsense
)
1179 ctrl_m_pg
[2] |= 0x4;
1181 ctrl_m_pg
[2] &= ~0x4;
1184 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
1186 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
1188 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
1189 else if (2 == pcontrol
)
1190 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
1191 return sizeof(ctrl_m_pg
);
1195 static int resp_iec_m_pg(unsigned char * p
, int pcontrol
, int target
)
1196 { /* Informational Exceptions control mode page for mode_sense */
1197 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1199 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1202 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
1204 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
1205 else if (2 == pcontrol
)
1206 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
1207 return sizeof(iec_m_pg
);
1210 static int resp_sas_sf_m_pg(unsigned char * p
, int pcontrol
, int target
)
1211 { /* SAS SSP mode page - short format for mode_sense */
1212 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
1213 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1215 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
1217 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
1218 return sizeof(sas_sf_m_pg
);
1222 static int resp_sas_pcd_m_spg(unsigned char * p
, int pcontrol
, int target
,
1224 { /* SAS phy control and discover mode page for mode_sense */
1225 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1226 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1227 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1228 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1229 0x2, 0, 0, 0, 0, 0, 0, 0,
1230 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1231 0, 0, 0, 0, 0, 0, 0, 0,
1232 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1233 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1234 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1235 0x3, 0, 0, 0, 0, 0, 0, 0,
1236 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1237 0, 0, 0, 0, 0, 0, 0, 0,
1241 port_a
= target_dev_id
+ 1;
1242 port_b
= port_a
+ 1;
1243 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
1244 p
[20] = (port_a
>> 24);
1245 p
[21] = (port_a
>> 16) & 0xff;
1246 p
[22] = (port_a
>> 8) & 0xff;
1247 p
[23] = port_a
& 0xff;
1248 p
[48 + 20] = (port_b
>> 24);
1249 p
[48 + 21] = (port_b
>> 16) & 0xff;
1250 p
[48 + 22] = (port_b
>> 8) & 0xff;
1251 p
[48 + 23] = port_b
& 0xff;
1253 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
1254 return sizeof(sas_pcd_m_pg
);
1257 static int resp_sas_sha_m_spg(unsigned char * p
, int pcontrol
)
1258 { /* SAS SSP shared protocol specific port mode subpage */
1259 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1260 0, 0, 0, 0, 0, 0, 0, 0,
1263 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
1265 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
1266 return sizeof(sas_sha_m_pg
);
1269 #define SDEBUG_MAX_MSENSE_SZ 256
1271 static int resp_mode_sense(struct scsi_cmnd
* scp
, int target
,
1272 struct sdebug_dev_info
* devip
)
1274 unsigned char dbd
, llbaa
;
1275 int pcontrol
, pcode
, subpcode
, bd_len
;
1276 unsigned char dev_spec
;
1277 int k
, alloc_len
, msense_6
, offset
, len
, errsts
, target_dev_id
;
1279 unsigned char arr
[SDEBUG_MAX_MSENSE_SZ
];
1280 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1282 if ((errsts
= check_readiness(scp
, 1, devip
)))
1284 dbd
= !!(cmd
[1] & 0x8);
1285 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1286 pcode
= cmd
[2] & 0x3f;
1288 msense_6
= (MODE_SENSE
== cmd
[0]);
1289 llbaa
= msense_6
? 0 : !!(cmd
[1] & 0x10);
1290 if ((0 == scsi_debug_ptype
) && (0 == dbd
))
1291 bd_len
= llbaa
? 16 : 8;
1294 alloc_len
= msense_6
? cmd
[4] : ((cmd
[7] << 8) | cmd
[8]);
1295 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
1296 if (0x3 == pcontrol
) { /* Saving values not supported */
1297 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
,
1299 return check_condition_result
;
1301 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
1302 (devip
->target
* 1000) - 3;
1303 /* set DPOFUA bit for disks */
1304 if (0 == scsi_debug_ptype
)
1305 dev_spec
= (DEV_READONLY(target
) ? 0x80 : 0x0) | 0x10;
1315 arr
[4] = 0x1; /* set LONGLBA bit */
1316 arr
[7] = bd_len
; /* assume 255 or less */
1320 if ((bd_len
> 0) && (!sdebug_capacity
))
1321 sdebug_capacity
= get_sdebug_capacity();
1324 if (sdebug_capacity
> 0xfffffffe) {
1330 ap
[0] = (sdebug_capacity
>> 24) & 0xff;
1331 ap
[1] = (sdebug_capacity
>> 16) & 0xff;
1332 ap
[2] = (sdebug_capacity
>> 8) & 0xff;
1333 ap
[3] = sdebug_capacity
& 0xff;
1335 ap
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
1336 ap
[7] = scsi_debug_sector_size
& 0xff;
1339 } else if (16 == bd_len
) {
1340 unsigned long long capac
= sdebug_capacity
;
1342 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1343 ap
[7 - k
] = capac
& 0xff;
1344 ap
[12] = (scsi_debug_sector_size
>> 24) & 0xff;
1345 ap
[13] = (scsi_debug_sector_size
>> 16) & 0xff;
1346 ap
[14] = (scsi_debug_sector_size
>> 8) & 0xff;
1347 ap
[15] = scsi_debug_sector_size
& 0xff;
1352 if ((subpcode
> 0x0) && (subpcode
< 0xff) && (0x19 != pcode
)) {
1353 /* TODO: Control Extension page */
1354 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1356 return check_condition_result
;
1359 case 0x1: /* Read-Write error recovery page, direct access */
1360 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
1363 case 0x2: /* Disconnect-Reconnect page, all devices */
1364 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
1367 case 0x3: /* Format device page, direct access */
1368 len
= resp_format_pg(ap
, pcontrol
, target
);
1371 case 0x8: /* Caching page, direct access */
1372 len
= resp_caching_pg(ap
, pcontrol
, target
);
1375 case 0xa: /* Control Mode page, all devices */
1376 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
1379 case 0x19: /* if spc==1 then sas phy, control+discover */
1380 if ((subpcode
> 0x2) && (subpcode
< 0xff)) {
1381 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1382 INVALID_FIELD_IN_CDB
, 0);
1383 return check_condition_result
;
1386 if ((0x0 == subpcode
) || (0xff == subpcode
))
1387 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
1388 if ((0x1 == subpcode
) || (0xff == subpcode
))
1389 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
1391 if ((0x2 == subpcode
) || (0xff == subpcode
))
1392 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
1395 case 0x1c: /* Informational Exceptions Mode page, all devices */
1396 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
1399 case 0x3f: /* Read all Mode pages */
1400 if ((0 == subpcode
) || (0xff == subpcode
)) {
1401 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
1402 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
1403 len
+= resp_format_pg(ap
+ len
, pcontrol
, target
);
1404 len
+= resp_caching_pg(ap
+ len
, pcontrol
, target
);
1405 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
1406 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
1407 if (0xff == subpcode
) {
1408 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
,
1409 target
, target_dev_id
);
1410 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
1412 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
1414 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1415 INVALID_FIELD_IN_CDB
, 0);
1416 return check_condition_result
;
1421 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1423 return check_condition_result
;
1426 arr
[0] = offset
- 1;
1428 arr
[0] = ((offset
- 2) >> 8) & 0xff;
1429 arr
[1] = (offset
- 2) & 0xff;
1431 return fill_from_dev_buffer(scp
, arr
, min(alloc_len
, offset
));
1434 #define SDEBUG_MAX_MSELECT_SZ 512
1436 static int resp_mode_select(struct scsi_cmnd
* scp
, int mselect6
,
1437 struct sdebug_dev_info
* devip
)
1439 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
1440 int param_len
, res
, errsts
, mpage
;
1441 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
1442 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1444 if ((errsts
= check_readiness(scp
, 1, devip
)))
1446 memset(arr
, 0, sizeof(arr
));
1449 param_len
= mselect6
? cmd
[4] : ((cmd
[7] << 8) + cmd
[8]);
1450 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
1451 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1452 INVALID_FIELD_IN_CDB
, 0);
1453 return check_condition_result
;
1455 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
1457 return (DID_ERROR
<< 16);
1458 else if ((res
< param_len
) &&
1459 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
1460 printk(KERN_INFO
"scsi_debug: mode_select: cdb indicated=%d, "
1461 " IO sent=%d bytes\n", param_len
, res
);
1462 md_len
= mselect6
? (arr
[0] + 1) : ((arr
[0] << 8) + arr
[1] + 2);
1463 bd_len
= mselect6
? arr
[3] : ((arr
[6] << 8) + arr
[7]);
1465 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1466 INVALID_FIELD_IN_PARAM_LIST
, 0);
1467 return check_condition_result
;
1469 off
= bd_len
+ (mselect6
? 4 : 8);
1470 mpage
= arr
[off
] & 0x3f;
1471 ps
= !!(arr
[off
] & 0x80);
1473 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1474 INVALID_FIELD_IN_PARAM_LIST
, 0);
1475 return check_condition_result
;
1477 spf
= !!(arr
[off
] & 0x40);
1478 pg_len
= spf
? ((arr
[off
+ 2] << 8) + arr
[off
+ 3] + 4) :
1480 if ((pg_len
+ off
) > param_len
) {
1481 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1482 PARAMETER_LIST_LENGTH_ERR
, 0);
1483 return check_condition_result
;
1486 case 0xa: /* Control Mode page */
1487 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
1488 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
1489 sizeof(ctrl_m_pg
) - 2);
1490 scsi_debug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
1494 case 0x1c: /* Informational Exceptions Mode page */
1495 if (iec_m_pg
[1] == arr
[off
+ 1]) {
1496 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
1497 sizeof(iec_m_pg
) - 2);
1504 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1505 INVALID_FIELD_IN_PARAM_LIST
, 0);
1506 return check_condition_result
;
1509 static int resp_temp_l_pg(unsigned char * arr
)
1511 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1512 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1515 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
1516 return sizeof(temp_l_pg
);
1519 static int resp_ie_l_pg(unsigned char * arr
)
1521 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1524 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
1525 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
1526 arr
[4] = THRESHOLD_EXCEEDED
;
1529 return sizeof(ie_l_pg
);
1532 #define SDEBUG_MAX_LSENSE_SZ 512
1534 static int resp_log_sense(struct scsi_cmnd
* scp
,
1535 struct sdebug_dev_info
* devip
)
1537 int ppc
, sp
, pcontrol
, pcode
, subpcode
, alloc_len
, errsts
, len
, n
;
1538 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
1539 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1541 if ((errsts
= check_readiness(scp
, 1, devip
)))
1543 memset(arr
, 0, sizeof(arr
));
1547 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1548 INVALID_FIELD_IN_CDB
, 0);
1549 return check_condition_result
;
1551 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1552 pcode
= cmd
[2] & 0x3f;
1553 subpcode
= cmd
[3] & 0xff;
1554 alloc_len
= (cmd
[7] << 8) + cmd
[8];
1556 if (0 == subpcode
) {
1558 case 0x0: /* Supported log pages log page */
1560 arr
[n
++] = 0x0; /* this page */
1561 arr
[n
++] = 0xd; /* Temperature */
1562 arr
[n
++] = 0x2f; /* Informational exceptions */
1565 case 0xd: /* Temperature log page */
1566 arr
[3] = resp_temp_l_pg(arr
+ 4);
1568 case 0x2f: /* Informational exceptions log page */
1569 arr
[3] = resp_ie_l_pg(arr
+ 4);
1572 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1573 INVALID_FIELD_IN_CDB
, 0);
1574 return check_condition_result
;
1576 } else if (0xff == subpcode
) {
1580 case 0x0: /* Supported log pages and subpages log page */
1583 arr
[n
++] = 0x0; /* 0,0 page */
1585 arr
[n
++] = 0xff; /* this page */
1587 arr
[n
++] = 0x0; /* Temperature */
1589 arr
[n
++] = 0x0; /* Informational exceptions */
1592 case 0xd: /* Temperature subpages */
1595 arr
[n
++] = 0x0; /* Temperature */
1598 case 0x2f: /* Informational exceptions subpages */
1601 arr
[n
++] = 0x0; /* Informational exceptions */
1605 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1606 INVALID_FIELD_IN_CDB
, 0);
1607 return check_condition_result
;
1610 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1611 INVALID_FIELD_IN_CDB
, 0);
1612 return check_condition_result
;
1614 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
1615 return fill_from_dev_buffer(scp
, arr
,
1616 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
1619 static int check_device_access_params(struct sdebug_dev_info
*devi
,
1620 unsigned long long lba
, unsigned int num
)
1622 if (lba
+ num
> sdebug_capacity
) {
1623 mk_sense_buffer(devi
, ILLEGAL_REQUEST
, ADDR_OUT_OF_RANGE
, 0);
1624 return check_condition_result
;
1626 /* transfer length excessive (tie in to block limits VPD page) */
1627 if (num
> sdebug_store_sectors
) {
1628 mk_sense_buffer(devi
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
1629 return check_condition_result
;
1634 static int do_device_access(struct scsi_cmnd
*scmd
,
1635 struct sdebug_dev_info
*devi
,
1636 unsigned long long lba
, unsigned int num
, int write
)
1639 unsigned int block
, rest
= 0;
1640 int (*func
)(struct scsi_cmnd
*, unsigned char *, int);
1642 func
= write
? fetch_to_dev_buffer
: fill_from_dev_buffer
;
1644 block
= do_div(lba
, sdebug_store_sectors
);
1645 if (block
+ num
> sdebug_store_sectors
)
1646 rest
= block
+ num
- sdebug_store_sectors
;
1648 ret
= func(scmd
, fake_storep
+ (block
* scsi_debug_sector_size
),
1649 (num
- rest
) * scsi_debug_sector_size
);
1651 ret
= func(scmd
, fake_storep
, rest
* scsi_debug_sector_size
);
1656 static int prot_verify_read(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
1657 unsigned int sectors
, u32 ei_lba
)
1659 unsigned int i
, resid
;
1660 struct scatterlist
*psgl
;
1661 struct sd_dif_tuple
*sdt
;
1663 sector_t tmp_sec
= start_sec
;
1666 start_sec
= do_div(tmp_sec
, sdebug_store_sectors
);
1668 sdt
= (struct sd_dif_tuple
*)(dif_storep
+ dif_offset(start_sec
));
1670 for (i
= 0 ; i
< sectors
; i
++) {
1673 if (sdt
[i
].app_tag
== 0xffff)
1676 sector
= start_sec
+ i
;
1678 switch (scsi_debug_guard
) {
1680 csum
= ip_compute_csum(fake_storep
+
1681 sector
* scsi_debug_sector_size
,
1682 scsi_debug_sector_size
);
1685 csum
= crc_t10dif(fake_storep
+
1686 sector
* scsi_debug_sector_size
,
1687 scsi_debug_sector_size
);
1688 csum
= cpu_to_be16(csum
);
1694 if (sdt
[i
].guard_tag
!= csum
) {
1695 printk(KERN_ERR
"%s: GUARD check failed on sector %lu" \
1696 " rcvd 0x%04x, data 0x%04x\n", __func__
,
1697 (unsigned long)sector
,
1698 be16_to_cpu(sdt
[i
].guard_tag
),
1704 if (scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
&&
1705 be32_to_cpu(sdt
[i
].ref_tag
) != (sector
& 0xffffffff)) {
1706 printk(KERN_ERR
"%s: REF check failed on sector %lu\n",
1707 __func__
, (unsigned long)sector
);
1712 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
1713 be32_to_cpu(sdt
[i
].ref_tag
) != ei_lba
) {
1714 printk(KERN_ERR
"%s: REF check failed on sector %lu\n",
1715 __func__
, (unsigned long)sector
);
1723 resid
= sectors
* 8; /* Bytes of protection data to copy into sgl */
1726 scsi_for_each_prot_sg(SCpnt
, psgl
, scsi_prot_sg_count(SCpnt
), i
) {
1727 int len
= min(psgl
->length
, resid
);
1729 paddr
= kmap_atomic(sg_page(psgl
), KM_IRQ0
) + psgl
->offset
;
1730 memcpy(paddr
, dif_storep
+ dif_offset(sector
), len
);
1733 if (sector
>= sdebug_store_sectors
) {
1736 sector
= do_div(tmp_sec
, sdebug_store_sectors
);
1739 kunmap_atomic(paddr
, KM_IRQ0
);
1747 static int resp_read(struct scsi_cmnd
*SCpnt
, unsigned long long lba
,
1748 unsigned int num
, struct sdebug_dev_info
*devip
,
1751 unsigned long iflags
;
1754 ret
= check_device_access_params(devip
, lba
, num
);
1758 if ((SCSI_DEBUG_OPT_MEDIUM_ERR
& scsi_debug_opts
) &&
1759 (lba
<= OPT_MEDIUM_ERR_ADDR
) &&
1760 ((lba
+ num
) > OPT_MEDIUM_ERR_ADDR
)) {
1761 /* claim unrecoverable read error */
1762 mk_sense_buffer(devip
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
,
1764 /* set info field and valid bit for fixed descriptor */
1765 if (0x70 == (devip
->sense_buff
[0] & 0x7f)) {
1766 devip
->sense_buff
[0] |= 0x80; /* Valid bit */
1767 ret
= OPT_MEDIUM_ERR_ADDR
;
1768 devip
->sense_buff
[3] = (ret
>> 24) & 0xff;
1769 devip
->sense_buff
[4] = (ret
>> 16) & 0xff;
1770 devip
->sense_buff
[5] = (ret
>> 8) & 0xff;
1771 devip
->sense_buff
[6] = ret
& 0xff;
1773 return check_condition_result
;
1777 if (scsi_debug_dix
&& scsi_prot_sg_count(SCpnt
)) {
1778 int prot_ret
= prot_verify_read(SCpnt
, lba
, num
, ei_lba
);
1781 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, prot_ret
);
1782 return illegal_condition_result
;
1786 read_lock_irqsave(&atomic_rw
, iflags
);
1787 ret
= do_device_access(SCpnt
, devip
, lba
, num
, 0);
1788 read_unlock_irqrestore(&atomic_rw
, iflags
);
1792 void dump_sector(unsigned char *buf
, int len
)
1796 printk(KERN_ERR
">>> Sector Dump <<<\n");
1798 for (i
= 0 ; i
< len
; i
+= 16) {
1799 printk(KERN_ERR
"%04d: ", i
);
1801 for (j
= 0 ; j
< 16 ; j
++) {
1802 unsigned char c
= buf
[i
+j
];
1803 if (c
>= 0x20 && c
< 0x7e)
1804 printk(" %c ", buf
[i
+j
]);
1806 printk("%02x ", buf
[i
+j
]);
1813 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
1814 unsigned int sectors
, u32 ei_lba
)
1817 struct sd_dif_tuple
*sdt
;
1818 struct scatterlist
*dsgl
= scsi_sglist(SCpnt
);
1819 struct scatterlist
*psgl
= scsi_prot_sglist(SCpnt
);
1820 void *daddr
, *paddr
;
1821 sector_t tmp_sec
= start_sec
;
1824 unsigned short csum
;
1826 sector
= do_div(tmp_sec
, sdebug_store_sectors
);
1828 BUG_ON(scsi_sg_count(SCpnt
) == 0);
1829 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
1831 paddr
= kmap_atomic(sg_page(psgl
), KM_IRQ1
) + psgl
->offset
;
1834 /* For each data page */
1835 scsi_for_each_sg(SCpnt
, dsgl
, scsi_sg_count(SCpnt
), i
) {
1836 daddr
= kmap_atomic(sg_page(dsgl
), KM_IRQ0
) + dsgl
->offset
;
1838 /* For each sector-sized chunk in data page */
1839 for (j
= 0 ; j
< dsgl
->length
; j
+= scsi_debug_sector_size
) {
1841 /* If we're at the end of the current
1842 * protection page advance to the next one
1844 if (ppage_offset
>= psgl
->length
) {
1845 kunmap_atomic(paddr
, KM_IRQ1
);
1846 psgl
= sg_next(psgl
);
1847 BUG_ON(psgl
== NULL
);
1848 paddr
= kmap_atomic(sg_page(psgl
), KM_IRQ1
)
1853 sdt
= paddr
+ ppage_offset
;
1855 switch (scsi_debug_guard
) {
1857 csum
= ip_compute_csum(daddr
,
1858 scsi_debug_sector_size
);
1861 csum
= cpu_to_be16(crc_t10dif(daddr
,
1862 scsi_debug_sector_size
));
1870 if (sdt
->guard_tag
!= csum
) {
1872 "%s: GUARD check failed on sector %lu " \
1873 "rcvd 0x%04x, calculated 0x%04x\n",
1874 __func__
, (unsigned long)sector
,
1875 be16_to_cpu(sdt
->guard_tag
),
1878 dump_sector(daddr
, scsi_debug_sector_size
);
1882 if (scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
&&
1883 be32_to_cpu(sdt
->ref_tag
)
1884 != (start_sec
& 0xffffffff)) {
1886 "%s: REF check failed on sector %lu\n",
1887 __func__
, (unsigned long)sector
);
1889 dump_sector(daddr
, scsi_debug_sector_size
);
1893 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
1894 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
1896 "%s: REF check failed on sector %lu\n",
1897 __func__
, (unsigned long)sector
);
1899 dump_sector(daddr
, scsi_debug_sector_size
);
1903 /* Would be great to copy this in bigger
1904 * chunks. However, for the sake of
1905 * correctness we need to verify each sector
1906 * before writing it to "stable" storage
1908 memcpy(dif_storep
+ dif_offset(sector
), sdt
, 8);
1912 if (sector
== sdebug_store_sectors
)
1913 sector
= 0; /* Force wrap */
1917 daddr
+= scsi_debug_sector_size
;
1918 ppage_offset
+= sizeof(struct sd_dif_tuple
);
1921 kunmap_atomic(daddr
, KM_IRQ0
);
1924 kunmap_atomic(paddr
, KM_IRQ1
);
1932 kunmap_atomic(daddr
, KM_IRQ0
);
1933 kunmap_atomic(paddr
, KM_IRQ1
);
1937 static unsigned int map_state(sector_t lba
, unsigned int *num
)
1939 unsigned int granularity
, alignment
, mapped
;
1940 sector_t block
, next
, end
;
1942 granularity
= scsi_debug_unmap_granularity
;
1943 alignment
= granularity
- scsi_debug_unmap_alignment
;
1944 block
= lba
+ alignment
;
1945 do_div(block
, granularity
);
1947 mapped
= test_bit(block
, map_storep
);
1950 next
= find_next_zero_bit(map_storep
, map_size
, block
);
1952 next
= find_next_bit(map_storep
, map_size
, block
);
1954 end
= next
* granularity
- scsi_debug_unmap_alignment
;
1960 static void map_region(sector_t lba
, unsigned int len
)
1962 unsigned int granularity
, alignment
;
1963 sector_t end
= lba
+ len
;
1965 granularity
= scsi_debug_unmap_granularity
;
1966 alignment
= granularity
- scsi_debug_unmap_alignment
;
1969 sector_t block
, rem
;
1971 block
= lba
+ alignment
;
1972 rem
= do_div(block
, granularity
);
1974 set_bit(block
, map_storep
);
1976 lba
+= granularity
- rem
;
1980 static void unmap_region(sector_t lba
, unsigned int len
)
1982 unsigned int granularity
, alignment
;
1983 sector_t end
= lba
+ len
;
1985 granularity
= scsi_debug_unmap_granularity
;
1986 alignment
= granularity
- scsi_debug_unmap_alignment
;
1989 sector_t block
, rem
;
1991 block
= lba
+ alignment
;
1992 rem
= do_div(block
, granularity
);
1994 if (rem
== 0 && lba
+ granularity
<= end
)
1995 clear_bit(block
, map_storep
);
1997 lba
+= granularity
- rem
;
2001 static int resp_write(struct scsi_cmnd
*SCpnt
, unsigned long long lba
,
2002 unsigned int num
, struct sdebug_dev_info
*devip
,
2005 unsigned long iflags
;
2008 ret
= check_device_access_params(devip
, lba
, num
);
2013 if (scsi_debug_dix
&& scsi_prot_sg_count(SCpnt
)) {
2014 int prot_ret
= prot_verify_write(SCpnt
, lba
, num
, ei_lba
);
2017 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, prot_ret
);
2018 return illegal_condition_result
;
2022 write_lock_irqsave(&atomic_rw
, iflags
);
2023 ret
= do_device_access(SCpnt
, devip
, lba
, num
, 1);
2024 if (scsi_debug_unmap_granularity
)
2025 map_region(lba
, num
);
2026 write_unlock_irqrestore(&atomic_rw
, iflags
);
2028 return (DID_ERROR
<< 16);
2029 else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2030 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2031 printk(KERN_INFO
"scsi_debug: write: cdb indicated=%u, "
2032 " IO sent=%d bytes\n", num
* scsi_debug_sector_size
, ret
);
2037 static int resp_write_same(struct scsi_cmnd
*scmd
, unsigned long long lba
,
2038 unsigned int num
, struct sdebug_dev_info
*devip
,
2039 u32 ei_lba
, unsigned int unmap
)
2041 unsigned long iflags
;
2042 unsigned long long i
;
2045 ret
= check_device_access_params(devip
, lba
, num
);
2049 write_lock_irqsave(&atomic_rw
, iflags
);
2051 if (unmap
&& scsi_debug_unmap_granularity
) {
2052 unmap_region(lba
, num
);
2056 /* Else fetch one logical block */
2057 ret
= fetch_to_dev_buffer(scmd
,
2058 fake_storep
+ (lba
* scsi_debug_sector_size
),
2059 scsi_debug_sector_size
);
2062 write_unlock_irqrestore(&atomic_rw
, iflags
);
2063 return (DID_ERROR
<< 16);
2064 } else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2065 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2066 printk(KERN_INFO
"scsi_debug: write same: cdb indicated=%u, "
2067 " IO sent=%d bytes\n", num
* scsi_debug_sector_size
, ret
);
2069 /* Copy first sector to remaining blocks */
2070 for (i
= 1 ; i
< num
; i
++)
2071 memcpy(fake_storep
+ ((lba
+ i
) * scsi_debug_sector_size
),
2072 fake_storep
+ (lba
* scsi_debug_sector_size
),
2073 scsi_debug_sector_size
);
2075 if (scsi_debug_unmap_granularity
)
2076 map_region(lba
, num
);
2078 write_unlock_irqrestore(&atomic_rw
, iflags
);
2083 struct unmap_block_desc
{
2089 static int resp_unmap(struct scsi_cmnd
* scmd
, struct sdebug_dev_info
* devip
)
2092 struct unmap_block_desc
*desc
;
2093 unsigned int i
, payload_len
, descriptors
;
2096 ret
= check_readiness(scmd
, 1, devip
);
2100 payload_len
= get_unaligned_be16(&scmd
->cmnd
[7]);
2101 BUG_ON(scsi_bufflen(scmd
) != payload_len
);
2103 descriptors
= (payload_len
- 8) / 16;
2105 buf
= kmalloc(scsi_bufflen(scmd
), GFP_ATOMIC
);
2107 return check_condition_result
;
2109 scsi_sg_copy_to_buffer(scmd
, buf
, scsi_bufflen(scmd
));
2111 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
2112 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
2114 desc
= (void *)&buf
[8];
2116 for (i
= 0 ; i
< descriptors
; i
++) {
2117 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
2118 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
2120 ret
= check_device_access_params(devip
, lba
, num
);
2124 unmap_region(lba
, num
);
2135 #define SDEBUG_GET_LBA_STATUS_LEN 32
2137 static int resp_get_lba_status(struct scsi_cmnd
* scmd
,
2138 struct sdebug_dev_info
* devip
)
2140 unsigned long long lba
;
2141 unsigned int alloc_len
, mapped
, num
;
2142 unsigned char arr
[SDEBUG_GET_LBA_STATUS_LEN
];
2145 ret
= check_readiness(scmd
, 1, devip
);
2149 lba
= get_unaligned_be64(&scmd
->cmnd
[2]);
2150 alloc_len
= get_unaligned_be32(&scmd
->cmnd
[10]);
2155 ret
= check_device_access_params(devip
, lba
, 1);
2159 mapped
= map_state(lba
, &num
);
2161 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
2162 put_unaligned_be32(16, &arr
[0]); /* Parameter Data Length */
2163 put_unaligned_be64(lba
, &arr
[8]); /* LBA */
2164 put_unaligned_be32(num
, &arr
[16]); /* Number of blocks */
2165 arr
[20] = !mapped
; /* mapped = 0, unmapped = 1 */
2167 return fill_from_dev_buffer(scmd
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
2170 #define SDEBUG_RLUN_ARR_SZ 256
2172 static int resp_report_luns(struct scsi_cmnd
* scp
,
2173 struct sdebug_dev_info
* devip
)
2175 unsigned int alloc_len
;
2176 int lun_cnt
, i
, upper
, num
, n
, wlun
, lun
;
2177 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
2178 int select_report
= (int)cmd
[2];
2179 struct scsi_lun
*one_lun
;
2180 unsigned char arr
[SDEBUG_RLUN_ARR_SZ
];
2181 unsigned char * max_addr
;
2183 alloc_len
= cmd
[9] + (cmd
[8] << 8) + (cmd
[7] << 16) + (cmd
[6] << 24);
2184 if ((alloc_len
< 4) || (select_report
> 2)) {
2185 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
2187 return check_condition_result
;
2189 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2190 memset(arr
, 0, SDEBUG_RLUN_ARR_SZ
);
2191 lun_cnt
= scsi_debug_max_luns
;
2192 if (1 == select_report
)
2194 else if (scsi_debug_no_lun_0
&& (lun_cnt
> 0))
2196 wlun
= (select_report
> 0) ? 1 : 0;
2197 num
= lun_cnt
+ wlun
;
2198 arr
[2] = ((sizeof(struct scsi_lun
) * num
) >> 8) & 0xff;
2199 arr
[3] = (sizeof(struct scsi_lun
) * num
) & 0xff;
2200 n
= min((int)((SDEBUG_RLUN_ARR_SZ
- 8) /
2201 sizeof(struct scsi_lun
)), num
);
2206 one_lun
= (struct scsi_lun
*) &arr
[8];
2207 max_addr
= arr
+ SDEBUG_RLUN_ARR_SZ
;
2208 for (i
= 0, lun
= (scsi_debug_no_lun_0
? 1 : 0);
2209 ((i
< lun_cnt
) && ((unsigned char *)(one_lun
+ i
) < max_addr
));
2211 upper
= (lun
>> 8) & 0x3f;
2213 one_lun
[i
].scsi_lun
[0] =
2214 (upper
| (SAM2_LUN_ADDRESS_METHOD
<< 6));
2215 one_lun
[i
].scsi_lun
[1] = lun
& 0xff;
2218 one_lun
[i
].scsi_lun
[0] = (SAM2_WLUN_REPORT_LUNS
>> 8) & 0xff;
2219 one_lun
[i
].scsi_lun
[1] = SAM2_WLUN_REPORT_LUNS
& 0xff;
2222 alloc_len
= (unsigned char *)(one_lun
+ i
) - arr
;
2223 return fill_from_dev_buffer(scp
, arr
,
2224 min((int)alloc_len
, SDEBUG_RLUN_ARR_SZ
));
2227 static int resp_xdwriteread(struct scsi_cmnd
*scp
, unsigned long long lba
,
2228 unsigned int num
, struct sdebug_dev_info
*devip
)
2231 unsigned char *kaddr
, *buf
;
2232 unsigned int offset
;
2233 struct scatterlist
*sg
;
2234 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
2236 /* better not to use temporary buffer. */
2237 buf
= kmalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
2241 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
2244 for_each_sg(sdb
->table
.sgl
, sg
, sdb
->table
.nents
, i
) {
2245 kaddr
= (unsigned char *)kmap_atomic(sg_page(sg
), KM_USER0
);
2249 for (j
= 0; j
< sg
->length
; j
++)
2250 *(kaddr
+ sg
->offset
+ j
) ^= *(buf
+ offset
+ j
);
2252 offset
+= sg
->length
;
2253 kunmap_atomic(kaddr
, KM_USER0
);
2262 /* When timer goes off this function is called. */
2263 static void timer_intr_handler(unsigned long indx
)
2265 struct sdebug_queued_cmd
* sqcp
;
2266 unsigned long iflags
;
2268 if (indx
>= SCSI_DEBUG_CANQUEUE
) {
2269 printk(KERN_ERR
"scsi_debug:timer_intr_handler: indx too "
2273 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2274 sqcp
= &queued_arr
[(int)indx
];
2275 if (! sqcp
->in_use
) {
2276 printk(KERN_ERR
"scsi_debug:timer_intr_handler: Unexpected "
2278 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2282 if (sqcp
->done_funct
) {
2283 sqcp
->a_cmnd
->result
= sqcp
->scsi_result
;
2284 sqcp
->done_funct(sqcp
->a_cmnd
); /* callback to mid level */
2286 sqcp
->done_funct
= NULL
;
2287 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2291 static struct sdebug_dev_info
*
2292 sdebug_device_create(struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
2294 struct sdebug_dev_info
*devip
;
2296 devip
= kzalloc(sizeof(*devip
), flags
);
2298 devip
->sdbg_host
= sdbg_host
;
2299 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
2304 static struct sdebug_dev_info
* devInfoReg(struct scsi_device
* sdev
)
2306 struct sdebug_host_info
* sdbg_host
;
2307 struct sdebug_dev_info
* open_devip
= NULL
;
2308 struct sdebug_dev_info
* devip
=
2309 (struct sdebug_dev_info
*)sdev
->hostdata
;
2313 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(sdev
->host
);
2315 printk(KERN_ERR
"Host info NULL\n");
2318 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
2319 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
2320 (devip
->target
== sdev
->id
) &&
2321 (devip
->lun
== sdev
->lun
))
2324 if ((!devip
->used
) && (!open_devip
))
2328 if (!open_devip
) { /* try and make a new one */
2329 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
2331 printk(KERN_ERR
"%s: out of memory at line %d\n",
2332 __func__
, __LINE__
);
2337 open_devip
->channel
= sdev
->channel
;
2338 open_devip
->target
= sdev
->id
;
2339 open_devip
->lun
= sdev
->lun
;
2340 open_devip
->sdbg_host
= sdbg_host
;
2341 open_devip
->reset
= 1;
2342 open_devip
->used
= 1;
2343 memset(open_devip
->sense_buff
, 0, SDEBUG_SENSE_LEN
);
2344 if (scsi_debug_dsense
)
2345 open_devip
->sense_buff
[0] = 0x72;
2347 open_devip
->sense_buff
[0] = 0x70;
2348 open_devip
->sense_buff
[7] = 0xa;
2350 if (sdev
->lun
== SAM2_WLUN_REPORT_LUNS
)
2351 open_devip
->wlun
= SAM2_WLUN_REPORT_LUNS
& 0xff;
2356 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
2358 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2359 printk(KERN_INFO
"scsi_debug: slave_alloc <%u %u %u %u>\n",
2360 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2361 queue_flag_set_unlocked(QUEUE_FLAG_BIDI
, sdp
->request_queue
);
2365 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
2367 struct sdebug_dev_info
*devip
;
2369 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2370 printk(KERN_INFO
"scsi_debug: slave_configure <%u %u %u %u>\n",
2371 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2372 if (sdp
->host
->max_cmd_len
!= SCSI_DEBUG_MAX_CMD_LEN
)
2373 sdp
->host
->max_cmd_len
= SCSI_DEBUG_MAX_CMD_LEN
;
2374 devip
= devInfoReg(sdp
);
2376 return 1; /* no resources, will be marked offline */
2377 sdp
->hostdata
= devip
;
2378 if (sdp
->host
->cmd_per_lun
)
2379 scsi_adjust_queue_depth(sdp
, SDEBUG_TAGGED_QUEUING
,
2380 sdp
->host
->cmd_per_lun
);
2381 blk_queue_max_segment_size(sdp
->request_queue
, 256 * 1024);
2385 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
2387 struct sdebug_dev_info
*devip
=
2388 (struct sdebug_dev_info
*)sdp
->hostdata
;
2390 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2391 printk(KERN_INFO
"scsi_debug: slave_destroy <%u %u %u %u>\n",
2392 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2394 /* make this slot avaliable for re-use */
2396 sdp
->hostdata
= NULL
;
2400 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2401 static int stop_queued_cmnd(struct scsi_cmnd
*cmnd
)
2403 unsigned long iflags
;
2405 struct sdebug_queued_cmd
*sqcp
;
2407 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2408 for (k
= 0; k
< SCSI_DEBUG_CANQUEUE
; ++k
) {
2409 sqcp
= &queued_arr
[k
];
2410 if (sqcp
->in_use
&& (cmnd
== sqcp
->a_cmnd
)) {
2411 del_timer_sync(&sqcp
->cmnd_timer
);
2413 sqcp
->a_cmnd
= NULL
;
2417 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2418 return (k
< SCSI_DEBUG_CANQUEUE
) ? 1 : 0;
2421 /* Deletes (stops) timers of all queued commands */
2422 static void stop_all_queued(void)
2424 unsigned long iflags
;
2426 struct sdebug_queued_cmd
*sqcp
;
2428 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2429 for (k
= 0; k
< SCSI_DEBUG_CANQUEUE
; ++k
) {
2430 sqcp
= &queued_arr
[k
];
2431 if (sqcp
->in_use
&& sqcp
->a_cmnd
) {
2432 del_timer_sync(&sqcp
->cmnd_timer
);
2434 sqcp
->a_cmnd
= NULL
;
2437 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2440 static int scsi_debug_abort(struct scsi_cmnd
* SCpnt
)
2442 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2443 printk(KERN_INFO
"scsi_debug: abort\n");
2445 stop_queued_cmnd(SCpnt
);
2449 static int scsi_debug_biosparam(struct scsi_device
*sdev
,
2450 struct block_device
* bdev
, sector_t capacity
, int *info
)
2455 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2456 printk(KERN_INFO
"scsi_debug: biosparam\n");
2457 buf
= scsi_bios_ptable(bdev
);
2459 res
= scsi_partsize(buf
, capacity
,
2460 &info
[2], &info
[0], &info
[1]);
2465 info
[0] = sdebug_heads
;
2466 info
[1] = sdebug_sectors_per
;
2467 info
[2] = sdebug_cylinders_per
;
2471 static int scsi_debug_device_reset(struct scsi_cmnd
* SCpnt
)
2473 struct sdebug_dev_info
* devip
;
2475 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2476 printk(KERN_INFO
"scsi_debug: device_reset\n");
2479 devip
= devInfoReg(SCpnt
->device
);
2486 static int scsi_debug_bus_reset(struct scsi_cmnd
* SCpnt
)
2488 struct sdebug_host_info
*sdbg_host
;
2489 struct sdebug_dev_info
* dev_info
;
2490 struct scsi_device
* sdp
;
2491 struct Scsi_Host
* hp
;
2493 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2494 printk(KERN_INFO
"scsi_debug: bus_reset\n");
2496 if (SCpnt
&& ((sdp
= SCpnt
->device
)) && ((hp
= sdp
->host
))) {
2497 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
2499 list_for_each_entry(dev_info
,
2500 &sdbg_host
->dev_info_list
,
2502 dev_info
->reset
= 1;
2508 static int scsi_debug_host_reset(struct scsi_cmnd
* SCpnt
)
2510 struct sdebug_host_info
* sdbg_host
;
2511 struct sdebug_dev_info
* dev_info
;
2513 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2514 printk(KERN_INFO
"scsi_debug: host_reset\n");
2516 spin_lock(&sdebug_host_list_lock
);
2517 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
2518 list_for_each_entry(dev_info
, &sdbg_host
->dev_info_list
,
2520 dev_info
->reset
= 1;
2522 spin_unlock(&sdebug_host_list_lock
);
2527 /* Initializes timers in queued array */
2528 static void __init
init_all_queued(void)
2530 unsigned long iflags
;
2532 struct sdebug_queued_cmd
* sqcp
;
2534 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2535 for (k
= 0; k
< SCSI_DEBUG_CANQUEUE
; ++k
) {
2536 sqcp
= &queued_arr
[k
];
2537 init_timer(&sqcp
->cmnd_timer
);
2539 sqcp
->a_cmnd
= NULL
;
2541 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2544 static void __init
sdebug_build_parts(unsigned char *ramp
,
2545 unsigned long store_size
)
2547 struct partition
* pp
;
2548 int starts
[SDEBUG_MAX_PARTS
+ 2];
2549 int sectors_per_part
, num_sectors
, k
;
2550 int heads_by_sects
, start_sec
, end_sec
;
2552 /* assume partition table already zeroed */
2553 if ((scsi_debug_num_parts
< 1) || (store_size
< 1048576))
2555 if (scsi_debug_num_parts
> SDEBUG_MAX_PARTS
) {
2556 scsi_debug_num_parts
= SDEBUG_MAX_PARTS
;
2557 printk(KERN_WARNING
"scsi_debug:build_parts: reducing "
2558 "partitions to %d\n", SDEBUG_MAX_PARTS
);
2560 num_sectors
= (int)sdebug_store_sectors
;
2561 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
2562 / scsi_debug_num_parts
;
2563 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
2564 starts
[0] = sdebug_sectors_per
;
2565 for (k
= 1; k
< scsi_debug_num_parts
; ++k
)
2566 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
2568 starts
[scsi_debug_num_parts
] = num_sectors
;
2569 starts
[scsi_debug_num_parts
+ 1] = 0;
2571 ramp
[510] = 0x55; /* magic partition markings */
2573 pp
= (struct partition
*)(ramp
+ 0x1be);
2574 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
2575 start_sec
= starts
[k
];
2576 end_sec
= starts
[k
+ 1] - 1;
2579 pp
->cyl
= start_sec
/ heads_by_sects
;
2580 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
2581 / sdebug_sectors_per
;
2582 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
2584 pp
->end_cyl
= end_sec
/ heads_by_sects
;
2585 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
2586 / sdebug_sectors_per
;
2587 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
2589 pp
->start_sect
= start_sec
;
2590 pp
->nr_sects
= end_sec
- start_sec
+ 1;
2591 pp
->sys_ind
= 0x83; /* plain Linux partition */
2595 static int schedule_resp(struct scsi_cmnd
* cmnd
,
2596 struct sdebug_dev_info
* devip
,
2597 done_funct_t done
, int scsi_result
, int delta_jiff
)
2599 if ((SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) && cmnd
) {
2601 struct scsi_device
* sdp
= cmnd
->device
;
2603 printk(KERN_INFO
"scsi_debug: <%u %u %u %u> "
2604 "non-zero result=0x%x\n", sdp
->host
->host_no
,
2605 sdp
->channel
, sdp
->id
, sdp
->lun
, scsi_result
);
2608 if (cmnd
&& devip
) {
2609 /* simulate autosense by this driver */
2610 if (SAM_STAT_CHECK_CONDITION
== (scsi_result
& 0xff))
2611 memcpy(cmnd
->sense_buffer
, devip
->sense_buff
,
2612 (SCSI_SENSE_BUFFERSIZE
> SDEBUG_SENSE_LEN
) ?
2613 SDEBUG_SENSE_LEN
: SCSI_SENSE_BUFFERSIZE
);
2615 if (delta_jiff
<= 0) {
2617 cmnd
->result
= scsi_result
;
2622 unsigned long iflags
;
2624 struct sdebug_queued_cmd
* sqcp
= NULL
;
2626 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2627 for (k
= 0; k
< SCSI_DEBUG_CANQUEUE
; ++k
) {
2628 sqcp
= &queued_arr
[k
];
2632 if (k
>= SCSI_DEBUG_CANQUEUE
) {
2633 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2634 printk(KERN_WARNING
"scsi_debug: can_queue exceeded\n");
2635 return 1; /* report busy to mid level */
2638 sqcp
->a_cmnd
= cmnd
;
2639 sqcp
->scsi_result
= scsi_result
;
2640 sqcp
->done_funct
= done
;
2641 sqcp
->cmnd_timer
.function
= timer_intr_handler
;
2642 sqcp
->cmnd_timer
.data
= k
;
2643 sqcp
->cmnd_timer
.expires
= jiffies
+ delta_jiff
;
2644 add_timer(&sqcp
->cmnd_timer
);
2645 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2651 /* Note: The following macros create attribute files in the
2652 /sys/module/scsi_debug/parameters directory. Unfortunately this
2653 driver is unaware of a change and cannot trigger auxiliary actions
2654 as it can when the corresponding attribute in the
2655 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2657 module_param_named(add_host
, scsi_debug_add_host
, int, S_IRUGO
| S_IWUSR
);
2658 module_param_named(delay
, scsi_debug_delay
, int, S_IRUGO
| S_IWUSR
);
2659 module_param_named(dev_size_mb
, scsi_debug_dev_size_mb
, int, S_IRUGO
);
2660 module_param_named(dsense
, scsi_debug_dsense
, int, S_IRUGO
| S_IWUSR
);
2661 module_param_named(every_nth
, scsi_debug_every_nth
, int, S_IRUGO
| S_IWUSR
);
2662 module_param_named(fake_rw
, scsi_debug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
2663 module_param_named(max_luns
, scsi_debug_max_luns
, int, S_IRUGO
| S_IWUSR
);
2664 module_param_named(no_lun_0
, scsi_debug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
2665 module_param_named(num_parts
, scsi_debug_num_parts
, int, S_IRUGO
);
2666 module_param_named(num_tgts
, scsi_debug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
2667 module_param_named(opts
, scsi_debug_opts
, int, S_IRUGO
| S_IWUSR
);
2668 module_param_named(ptype
, scsi_debug_ptype
, int, S_IRUGO
| S_IWUSR
);
2669 module_param_named(scsi_level
, scsi_debug_scsi_level
, int, S_IRUGO
);
2670 module_param_named(virtual_gb
, scsi_debug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
2671 module_param_named(vpd_use_hostno
, scsi_debug_vpd_use_hostno
, int,
2673 module_param_named(sector_size
, scsi_debug_sector_size
, int, S_IRUGO
);
2674 module_param_named(dix
, scsi_debug_dix
, int, S_IRUGO
);
2675 module_param_named(dif
, scsi_debug_dif
, int, S_IRUGO
);
2676 module_param_named(guard
, scsi_debug_guard
, int, S_IRUGO
);
2677 module_param_named(ato
, scsi_debug_ato
, int, S_IRUGO
);
2678 module_param_named(physblk_exp
, scsi_debug_physblk_exp
, int, S_IRUGO
);
2679 module_param_named(lowest_aligned
, scsi_debug_lowest_aligned
, int, S_IRUGO
);
2680 module_param_named(unmap_max_blocks
, scsi_debug_unmap_max_blocks
, int, S_IRUGO
);
2681 module_param_named(unmap_max_desc
, scsi_debug_unmap_max_desc
, int, S_IRUGO
);
2682 module_param_named(unmap_granularity
, scsi_debug_unmap_granularity
, int, S_IRUGO
);
2683 module_param_named(unmap_alignment
, scsi_debug_unmap_alignment
, int, S_IRUGO
);
2685 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2686 MODULE_DESCRIPTION("SCSI debug adapter driver");
2687 MODULE_LICENSE("GPL");
2688 MODULE_VERSION(SCSI_DEBUG_VERSION
);
2690 MODULE_PARM_DESC(add_host
, "0..127 hosts allowed(def=1)");
2691 MODULE_PARM_DESC(delay
, "# of jiffies to delay response(def=1)");
2692 MODULE_PARM_DESC(dev_size_mb
, "size in MB of ram shared by devs(def=8)");
2693 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
2694 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
2695 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
2696 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
2697 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
2698 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
2699 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
2700 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2701 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
2702 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=5[SPC-3])");
2703 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2704 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2705 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
2706 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
2707 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
2708 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
2709 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
2710 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
2711 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
2712 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0)");
2713 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=0)");
2714 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=0)");
2715 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
2717 static char sdebug_info
[256];
2719 static const char * scsi_debug_info(struct Scsi_Host
* shp
)
2721 sprintf(sdebug_info
, "scsi_debug, version %s [%s], "
2722 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION
,
2723 scsi_debug_version_date
, scsi_debug_dev_size_mb
,
2728 /* scsi_debug_proc_info
2729 * Used if the driver currently has no own support for /proc/scsi
2731 static int scsi_debug_proc_info(struct Scsi_Host
*host
, char *buffer
, char **start
, off_t offset
,
2732 int length
, int inout
)
2734 int len
, pos
, begin
;
2737 orig_length
= length
;
2741 int minLen
= length
> 15 ? 15 : length
;
2743 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2745 memcpy(arr
, buffer
, minLen
);
2747 if (1 != sscanf(arr
, "%d", &pos
))
2749 scsi_debug_opts
= pos
;
2750 if (scsi_debug_every_nth
!= 0)
2751 scsi_debug_cmnd_count
= 0;
2755 pos
= len
= sprintf(buffer
, "scsi_debug adapter driver, version "
2757 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2758 "every_nth=%d(curr:%d)\n"
2759 "delay=%d, max_luns=%d, scsi_level=%d\n"
2760 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2761 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2762 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2763 SCSI_DEBUG_VERSION
, scsi_debug_version_date
, scsi_debug_num_tgts
,
2764 scsi_debug_dev_size_mb
, scsi_debug_opts
, scsi_debug_every_nth
,
2765 scsi_debug_cmnd_count
, scsi_debug_delay
,
2766 scsi_debug_max_luns
, scsi_debug_scsi_level
,
2767 scsi_debug_sector_size
, sdebug_cylinders_per
, sdebug_heads
,
2768 sdebug_sectors_per
, num_aborts
, num_dev_resets
, num_bus_resets
,
2769 num_host_resets
, dix_reads
, dix_writes
, dif_errors
);
2774 *start
= buffer
+ (offset
- begin
); /* Start of wanted data */
2775 len
-= (offset
- begin
);
2781 static ssize_t
sdebug_delay_show(struct device_driver
* ddp
, char * buf
)
2783 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_delay
);
2786 static ssize_t
sdebug_delay_store(struct device_driver
* ddp
,
2787 const char * buf
, size_t count
)
2792 if (1 == sscanf(buf
, "%10s", work
)) {
2793 if ((1 == sscanf(work
, "%d", &delay
)) && (delay
>= 0)) {
2794 scsi_debug_delay
= delay
;
2800 DRIVER_ATTR(delay
, S_IRUGO
| S_IWUSR
, sdebug_delay_show
,
2801 sdebug_delay_store
);
2803 static ssize_t
sdebug_opts_show(struct device_driver
* ddp
, char * buf
)
2805 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", scsi_debug_opts
);
2808 static ssize_t
sdebug_opts_store(struct device_driver
* ddp
,
2809 const char * buf
, size_t count
)
2814 if (1 == sscanf(buf
, "%10s", work
)) {
2815 if (0 == strnicmp(work
,"0x", 2)) {
2816 if (1 == sscanf(&work
[2], "%x", &opts
))
2819 if (1 == sscanf(work
, "%d", &opts
))
2825 scsi_debug_opts
= opts
;
2826 scsi_debug_cmnd_count
= 0;
2829 DRIVER_ATTR(opts
, S_IRUGO
| S_IWUSR
, sdebug_opts_show
,
2832 static ssize_t
sdebug_ptype_show(struct device_driver
* ddp
, char * buf
)
2834 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ptype
);
2836 static ssize_t
sdebug_ptype_store(struct device_driver
* ddp
,
2837 const char * buf
, size_t count
)
2841 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2842 scsi_debug_ptype
= n
;
2847 DRIVER_ATTR(ptype
, S_IRUGO
| S_IWUSR
, sdebug_ptype_show
, sdebug_ptype_store
);
2849 static ssize_t
sdebug_dsense_show(struct device_driver
* ddp
, char * buf
)
2851 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dsense
);
2853 static ssize_t
sdebug_dsense_store(struct device_driver
* ddp
,
2854 const char * buf
, size_t count
)
2858 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2859 scsi_debug_dsense
= n
;
2864 DRIVER_ATTR(dsense
, S_IRUGO
| S_IWUSR
, sdebug_dsense_show
,
2865 sdebug_dsense_store
);
2867 static ssize_t
sdebug_fake_rw_show(struct device_driver
* ddp
, char * buf
)
2869 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_fake_rw
);
2871 static ssize_t
sdebug_fake_rw_store(struct device_driver
* ddp
,
2872 const char * buf
, size_t count
)
2876 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2877 scsi_debug_fake_rw
= n
;
2882 DRIVER_ATTR(fake_rw
, S_IRUGO
| S_IWUSR
, sdebug_fake_rw_show
,
2883 sdebug_fake_rw_store
);
2885 static ssize_t
sdebug_no_lun_0_show(struct device_driver
* ddp
, char * buf
)
2887 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_no_lun_0
);
2889 static ssize_t
sdebug_no_lun_0_store(struct device_driver
* ddp
,
2890 const char * buf
, size_t count
)
2894 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2895 scsi_debug_no_lun_0
= n
;
2900 DRIVER_ATTR(no_lun_0
, S_IRUGO
| S_IWUSR
, sdebug_no_lun_0_show
,
2901 sdebug_no_lun_0_store
);
2903 static ssize_t
sdebug_num_tgts_show(struct device_driver
* ddp
, char * buf
)
2905 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_tgts
);
2907 static ssize_t
sdebug_num_tgts_store(struct device_driver
* ddp
,
2908 const char * buf
, size_t count
)
2912 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2913 scsi_debug_num_tgts
= n
;
2914 sdebug_max_tgts_luns();
2919 DRIVER_ATTR(num_tgts
, S_IRUGO
| S_IWUSR
, sdebug_num_tgts_show
,
2920 sdebug_num_tgts_store
);
2922 static ssize_t
sdebug_dev_size_mb_show(struct device_driver
* ddp
, char * buf
)
2924 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dev_size_mb
);
2926 DRIVER_ATTR(dev_size_mb
, S_IRUGO
, sdebug_dev_size_mb_show
, NULL
);
2928 static ssize_t
sdebug_num_parts_show(struct device_driver
* ddp
, char * buf
)
2930 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_parts
);
2932 DRIVER_ATTR(num_parts
, S_IRUGO
, sdebug_num_parts_show
, NULL
);
2934 static ssize_t
sdebug_every_nth_show(struct device_driver
* ddp
, char * buf
)
2936 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_every_nth
);
2938 static ssize_t
sdebug_every_nth_store(struct device_driver
* ddp
,
2939 const char * buf
, size_t count
)
2943 if ((count
> 0) && (1 == sscanf(buf
, "%d", &nth
))) {
2944 scsi_debug_every_nth
= nth
;
2945 scsi_debug_cmnd_count
= 0;
2950 DRIVER_ATTR(every_nth
, S_IRUGO
| S_IWUSR
, sdebug_every_nth_show
,
2951 sdebug_every_nth_store
);
2953 static ssize_t
sdebug_max_luns_show(struct device_driver
* ddp
, char * buf
)
2955 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_max_luns
);
2957 static ssize_t
sdebug_max_luns_store(struct device_driver
* ddp
,
2958 const char * buf
, size_t count
)
2962 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2963 scsi_debug_max_luns
= n
;
2964 sdebug_max_tgts_luns();
2969 DRIVER_ATTR(max_luns
, S_IRUGO
| S_IWUSR
, sdebug_max_luns_show
,
2970 sdebug_max_luns_store
);
2972 static ssize_t
sdebug_scsi_level_show(struct device_driver
* ddp
, char * buf
)
2974 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_scsi_level
);
2976 DRIVER_ATTR(scsi_level
, S_IRUGO
, sdebug_scsi_level_show
, NULL
);
2978 static ssize_t
sdebug_virtual_gb_show(struct device_driver
* ddp
, char * buf
)
2980 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_virtual_gb
);
2982 static ssize_t
sdebug_virtual_gb_store(struct device_driver
* ddp
,
2983 const char * buf
, size_t count
)
2987 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2988 scsi_debug_virtual_gb
= n
;
2990 sdebug_capacity
= get_sdebug_capacity();
2996 DRIVER_ATTR(virtual_gb
, S_IRUGO
| S_IWUSR
, sdebug_virtual_gb_show
,
2997 sdebug_virtual_gb_store
);
2999 static ssize_t
sdebug_add_host_show(struct device_driver
* ddp
, char * buf
)
3001 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_add_host
);
3004 static ssize_t
sdebug_add_host_store(struct device_driver
* ddp
,
3005 const char * buf
, size_t count
)
3009 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
3011 if (delta_hosts
> 0) {
3013 sdebug_add_adapter();
3014 } while (--delta_hosts
);
3015 } else if (delta_hosts
< 0) {
3017 sdebug_remove_adapter();
3018 } while (++delta_hosts
);
3022 DRIVER_ATTR(add_host
, S_IRUGO
| S_IWUSR
, sdebug_add_host_show
,
3023 sdebug_add_host_store
);
3025 static ssize_t
sdebug_vpd_use_hostno_show(struct device_driver
* ddp
,
3028 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_vpd_use_hostno
);
3030 static ssize_t
sdebug_vpd_use_hostno_store(struct device_driver
* ddp
,
3031 const char * buf
, size_t count
)
3035 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3036 scsi_debug_vpd_use_hostno
= n
;
3041 DRIVER_ATTR(vpd_use_hostno
, S_IRUGO
| S_IWUSR
, sdebug_vpd_use_hostno_show
,
3042 sdebug_vpd_use_hostno_store
);
3044 static ssize_t
sdebug_sector_size_show(struct device_driver
* ddp
, char * buf
)
3046 return scnprintf(buf
, PAGE_SIZE
, "%u\n", scsi_debug_sector_size
);
3048 DRIVER_ATTR(sector_size
, S_IRUGO
, sdebug_sector_size_show
, NULL
);
3050 static ssize_t
sdebug_dix_show(struct device_driver
*ddp
, char *buf
)
3052 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dix
);
3054 DRIVER_ATTR(dix
, S_IRUGO
, sdebug_dix_show
, NULL
);
3056 static ssize_t
sdebug_dif_show(struct device_driver
*ddp
, char *buf
)
3058 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dif
);
3060 DRIVER_ATTR(dif
, S_IRUGO
, sdebug_dif_show
, NULL
);
3062 static ssize_t
sdebug_guard_show(struct device_driver
*ddp
, char *buf
)
3064 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_guard
);
3066 DRIVER_ATTR(guard
, S_IRUGO
, sdebug_guard_show
, NULL
);
3068 static ssize_t
sdebug_ato_show(struct device_driver
*ddp
, char *buf
)
3070 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ato
);
3072 DRIVER_ATTR(ato
, S_IRUGO
, sdebug_ato_show
, NULL
);
3074 static ssize_t
sdebug_map_show(struct device_driver
*ddp
, char *buf
)
3078 if (scsi_debug_unmap_granularity
== 0)
3079 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
3080 sdebug_store_sectors
);
3082 count
= bitmap_scnlistprintf(buf
, PAGE_SIZE
, map_storep
, map_size
);
3084 buf
[count
++] = '\n';
3089 DRIVER_ATTR(map
, S_IRUGO
, sdebug_map_show
, NULL
);
3092 /* Note: The following function creates attribute files in the
3093 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3094 files (over those found in the /sys/module/scsi_debug/parameters
3095 directory) is that auxiliary actions can be triggered when an attribute
3096 is changed. For example see: sdebug_add_host_store() above.
3098 static int do_create_driverfs_files(void)
3102 ret
= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_add_host
);
3103 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_delay
);
3104 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dev_size_mb
);
3105 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dsense
);
3106 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_every_nth
);
3107 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_fake_rw
);
3108 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_max_luns
);
3109 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_no_lun_0
);
3110 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_num_parts
);
3111 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_num_tgts
);
3112 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_ptype
);
3113 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_opts
);
3114 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_scsi_level
);
3115 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_virtual_gb
);
3116 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_vpd_use_hostno
);
3117 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_sector_size
);
3118 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dix
);
3119 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_dif
);
3120 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_guard
);
3121 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_ato
);
3122 ret
|= driver_create_file(&sdebug_driverfs_driver
, &driver_attr_map
);
3126 static void do_remove_driverfs_files(void)
3128 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_map
);
3129 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_ato
);
3130 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_guard
);
3131 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dif
);
3132 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dix
);
3133 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_sector_size
);
3134 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_vpd_use_hostno
);
3135 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_virtual_gb
);
3136 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_scsi_level
);
3137 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_opts
);
3138 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_ptype
);
3139 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_num_tgts
);
3140 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_num_parts
);
3141 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_no_lun_0
);
3142 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_max_luns
);
3143 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_fake_rw
);
3144 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_every_nth
);
3145 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dsense
);
3146 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_dev_size_mb
);
3147 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_delay
);
3148 driver_remove_file(&sdebug_driverfs_driver
, &driver_attr_add_host
);
3151 static void pseudo_0_release(struct device
*dev
)
3153 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3154 printk(KERN_INFO
"scsi_debug: pseudo_0_release() called\n");
3157 static struct device pseudo_primary
= {
3158 .init_name
= "pseudo_0",
3159 .release
= pseudo_0_release
,
3162 static int __init
scsi_debug_init(void)
3169 switch (scsi_debug_sector_size
) {
3176 printk(KERN_ERR
"scsi_debug_init: invalid sector_size %d\n",
3177 scsi_debug_sector_size
);
3181 switch (scsi_debug_dif
) {
3183 case SD_DIF_TYPE0_PROTECTION
:
3184 case SD_DIF_TYPE1_PROTECTION
:
3185 case SD_DIF_TYPE2_PROTECTION
:
3186 case SD_DIF_TYPE3_PROTECTION
:
3190 printk(KERN_ERR
"scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3194 if (scsi_debug_guard
> 1) {
3195 printk(KERN_ERR
"scsi_debug_init: guard must be 0 or 1\n");
3199 if (scsi_debug_ato
> 1) {
3200 printk(KERN_ERR
"scsi_debug_init: ato must be 0 or 1\n");
3204 if (scsi_debug_physblk_exp
> 15) {
3205 printk(KERN_ERR
"scsi_debug_init: invalid physblk_exp %u\n",
3206 scsi_debug_physblk_exp
);
3210 if (scsi_debug_lowest_aligned
> 0x3fff) {
3211 printk(KERN_ERR
"scsi_debug_init: lowest_aligned too big: %u\n",
3212 scsi_debug_lowest_aligned
);
3216 if (scsi_debug_dev_size_mb
< 1)
3217 scsi_debug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
3218 sz
= (unsigned long)scsi_debug_dev_size_mb
* 1048576;
3219 sdebug_store_sectors
= sz
/ scsi_debug_sector_size
;
3220 sdebug_capacity
= get_sdebug_capacity();
3222 /* play around with geometry, don't waste too much on track 0 */
3224 sdebug_sectors_per
= 32;
3225 if (scsi_debug_dev_size_mb
>= 16)
3227 else if (scsi_debug_dev_size_mb
>= 256)
3229 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
3230 (sdebug_sectors_per
* sdebug_heads
);
3231 if (sdebug_cylinders_per
>= 1024) {
3232 /* other LLDs do this; implies >= 1GB ram disk ... */
3234 sdebug_sectors_per
= 63;
3235 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
3236 (sdebug_sectors_per
* sdebug_heads
);
3239 fake_storep
= vmalloc(sz
);
3240 if (NULL
== fake_storep
) {
3241 printk(KERN_ERR
"scsi_debug_init: out of memory, 1\n");
3244 memset(fake_storep
, 0, sz
);
3245 if (scsi_debug_num_parts
> 0)
3246 sdebug_build_parts(fake_storep
, sz
);
3248 if (scsi_debug_dif
) {
3251 dif_size
= sdebug_store_sectors
* sizeof(struct sd_dif_tuple
);
3252 dif_storep
= vmalloc(dif_size
);
3254 printk(KERN_ERR
"scsi_debug_init: dif_storep %u bytes @ %p\n",
3255 dif_size
, dif_storep
);
3257 if (dif_storep
== NULL
) {
3258 printk(KERN_ERR
"scsi_debug_init: out of mem. (DIX)\n");
3263 memset(dif_storep
, 0xff, dif_size
);
3266 if (scsi_debug_unmap_granularity
) {
3267 unsigned int map_bytes
;
3269 if (scsi_debug_unmap_granularity
< scsi_debug_unmap_alignment
) {
3271 "%s: ERR: unmap_granularity < unmap_alignment\n",
3276 map_size
= (sdebug_store_sectors
/ scsi_debug_unmap_granularity
);
3277 map_bytes
= map_size
>> 3;
3278 map_storep
= vmalloc(map_bytes
);
3280 printk(KERN_INFO
"scsi_debug_init: %lu provisioning blocks\n",
3283 if (map_storep
== NULL
) {
3284 printk(KERN_ERR
"scsi_debug_init: out of mem. (MAP)\n");
3289 memset(map_storep
, 0x0, map_bytes
);
3291 /* Map first 1KB for partition table */
3292 if (scsi_debug_num_parts
)
3296 ret
= device_register(&pseudo_primary
);
3298 printk(KERN_WARNING
"scsi_debug: device_register error: %d\n",
3302 ret
= bus_register(&pseudo_lld_bus
);
3304 printk(KERN_WARNING
"scsi_debug: bus_register error: %d\n",
3308 ret
= driver_register(&sdebug_driverfs_driver
);
3310 printk(KERN_WARNING
"scsi_debug: driver_register error: %d\n",
3314 ret
= do_create_driverfs_files();
3316 printk(KERN_WARNING
"scsi_debug: driver_create_file error: %d\n",
3323 host_to_add
= scsi_debug_add_host
;
3324 scsi_debug_add_host
= 0;
3326 for (k
= 0; k
< host_to_add
; k
++) {
3327 if (sdebug_add_adapter()) {
3328 printk(KERN_ERR
"scsi_debug_init: "
3329 "sdebug_add_adapter failed k=%d\n", k
);
3334 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
3335 printk(KERN_INFO
"scsi_debug_init: built %d host(s)\n",
3336 scsi_debug_add_host
);
3341 do_remove_driverfs_files();
3342 driver_unregister(&sdebug_driverfs_driver
);
3344 bus_unregister(&pseudo_lld_bus
);
3346 device_unregister(&pseudo_primary
);
3357 static void __exit
scsi_debug_exit(void)
3359 int k
= scsi_debug_add_host
;
3363 sdebug_remove_adapter();
3364 do_remove_driverfs_files();
3365 driver_unregister(&sdebug_driverfs_driver
);
3366 bus_unregister(&pseudo_lld_bus
);
3367 device_unregister(&pseudo_primary
);
3375 device_initcall(scsi_debug_init
);
3376 module_exit(scsi_debug_exit
);
3378 static void sdebug_release_adapter(struct device
* dev
)
3380 struct sdebug_host_info
*sdbg_host
;
3382 sdbg_host
= to_sdebug_host(dev
);
3386 static int sdebug_add_adapter(void)
3388 int k
, devs_per_host
;
3390 struct sdebug_host_info
*sdbg_host
;
3391 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
3393 sdbg_host
= kzalloc(sizeof(*sdbg_host
),GFP_KERNEL
);
3394 if (NULL
== sdbg_host
) {
3395 printk(KERN_ERR
"%s: out of memory at line %d\n",
3396 __func__
, __LINE__
);
3400 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
3402 devs_per_host
= scsi_debug_num_tgts
* scsi_debug_max_luns
;
3403 for (k
= 0; k
< devs_per_host
; k
++) {
3404 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
3405 if (!sdbg_devinfo
) {
3406 printk(KERN_ERR
"%s: out of memory at line %d\n",
3407 __func__
, __LINE__
);
3413 spin_lock(&sdebug_host_list_lock
);
3414 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
3415 spin_unlock(&sdebug_host_list_lock
);
3417 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
3418 sdbg_host
->dev
.parent
= &pseudo_primary
;
3419 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
3420 dev_set_name(&sdbg_host
->dev
, "adapter%d", scsi_debug_add_host
);
3422 error
= device_register(&sdbg_host
->dev
);
3427 ++scsi_debug_add_host
;
3431 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
3433 list_del(&sdbg_devinfo
->dev_list
);
3434 kfree(sdbg_devinfo
);
3441 static void sdebug_remove_adapter(void)
3443 struct sdebug_host_info
* sdbg_host
= NULL
;
3445 spin_lock(&sdebug_host_list_lock
);
3446 if (!list_empty(&sdebug_host_list
)) {
3447 sdbg_host
= list_entry(sdebug_host_list
.prev
,
3448 struct sdebug_host_info
, host_list
);
3449 list_del(&sdbg_host
->host_list
);
3451 spin_unlock(&sdebug_host_list_lock
);
3456 device_unregister(&sdbg_host
->dev
);
3457 --scsi_debug_add_host
;
3461 int scsi_debug_queuecommand(struct scsi_cmnd
*SCpnt
, done_funct_t done
)
3463 unsigned char *cmd
= (unsigned char *) SCpnt
->cmnd
;
3466 unsigned long long lba
;
3469 int target
= SCpnt
->device
->id
;
3470 struct sdebug_dev_info
*devip
= NULL
;
3471 int inj_recovered
= 0;
3472 int inj_transport
= 0;
3475 int delay_override
= 0;
3478 scsi_set_resid(SCpnt
, 0);
3479 if ((SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) && cmd
) {
3480 printk(KERN_INFO
"scsi_debug: cmd ");
3481 for (k
= 0, len
= SCpnt
->cmd_len
; k
< len
; ++k
)
3482 printk("%02x ", (int)cmd
[k
]);
3486 if (target
== SCpnt
->device
->host
->hostt
->this_id
) {
3487 printk(KERN_INFO
"scsi_debug: initiator's id used as "
3489 return schedule_resp(SCpnt
, NULL
, done
,
3490 DID_NO_CONNECT
<< 16, 0);
3493 if ((SCpnt
->device
->lun
>= scsi_debug_max_luns
) &&
3494 (SCpnt
->device
->lun
!= SAM2_WLUN_REPORT_LUNS
))
3495 return schedule_resp(SCpnt
, NULL
, done
,
3496 DID_NO_CONNECT
<< 16, 0);
3497 devip
= devInfoReg(SCpnt
->device
);
3499 return schedule_resp(SCpnt
, NULL
, done
,
3500 DID_NO_CONNECT
<< 16, 0);
3502 if ((scsi_debug_every_nth
!= 0) &&
3503 (++scsi_debug_cmnd_count
>= abs(scsi_debug_every_nth
))) {
3504 scsi_debug_cmnd_count
= 0;
3505 if (scsi_debug_every_nth
< -1)
3506 scsi_debug_every_nth
= -1;
3507 if (SCSI_DEBUG_OPT_TIMEOUT
& scsi_debug_opts
)
3508 return 0; /* ignore command causing timeout */
3509 else if (SCSI_DEBUG_OPT_RECOVERED_ERR
& scsi_debug_opts
)
3510 inj_recovered
= 1; /* to reads and writes below */
3511 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR
& scsi_debug_opts
)
3512 inj_transport
= 1; /* to reads and writes below */
3513 else if (SCSI_DEBUG_OPT_DIF_ERR
& scsi_debug_opts
)
3514 inj_dif
= 1; /* to reads and writes below */
3515 else if (SCSI_DEBUG_OPT_DIX_ERR
& scsi_debug_opts
)
3516 inj_dix
= 1; /* to reads and writes below */
3523 case TEST_UNIT_READY
:
3525 break; /* only allowable wlun commands */
3527 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3528 printk(KERN_INFO
"scsi_debug: Opcode: 0x%x "
3529 "not supported for wlun\n", *cmd
);
3530 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3532 errsts
= check_condition_result
;
3533 return schedule_resp(SCpnt
, devip
, done
, errsts
,
3539 case INQUIRY
: /* mandatory, ignore unit attention */
3541 errsts
= resp_inquiry(SCpnt
, target
, devip
);
3543 case REQUEST_SENSE
: /* mandatory, ignore unit attention */
3545 errsts
= resp_requests(SCpnt
, devip
);
3547 case REZERO_UNIT
: /* actually this is REWIND for SSC */
3549 errsts
= resp_start_stop(SCpnt
, devip
);
3551 case ALLOW_MEDIUM_REMOVAL
:
3552 errsts
= check_readiness(SCpnt
, 1, devip
);
3555 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3556 printk(KERN_INFO
"scsi_debug: Medium removal %s\n",
3557 cmd
[4] ? "inhibited" : "enabled");
3559 case SEND_DIAGNOSTIC
: /* mandatory */
3560 errsts
= check_readiness(SCpnt
, 1, devip
);
3562 case TEST_UNIT_READY
: /* mandatory */
3564 errsts
= check_readiness(SCpnt
, 0, devip
);
3567 errsts
= check_readiness(SCpnt
, 1, devip
);
3570 errsts
= check_readiness(SCpnt
, 1, devip
);
3573 errsts
= check_readiness(SCpnt
, 1, devip
);
3576 errsts
= check_readiness(SCpnt
, 1, devip
);
3579 errsts
= resp_readcap(SCpnt
, devip
);
3581 case SERVICE_ACTION_IN
:
3582 if (cmd
[1] == SAI_READ_CAPACITY_16
)
3583 errsts
= resp_readcap16(SCpnt
, devip
);
3584 else if (cmd
[1] == SAI_GET_LBA_STATUS
) {
3586 if (scsi_debug_unmap_max_desc
== 0) {
3587 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3588 INVALID_COMMAND_OPCODE
, 0);
3589 errsts
= check_condition_result
;
3591 errsts
= resp_get_lba_status(SCpnt
, devip
);
3593 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3595 errsts
= check_condition_result
;
3598 case MAINTENANCE_IN
:
3599 if (MI_REPORT_TARGET_PGS
!= cmd
[1]) {
3600 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3602 errsts
= check_condition_result
;
3605 errsts
= resp_report_tgtpgs(SCpnt
, devip
);
3610 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3611 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3613 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3614 INVALID_COMMAND_OPCODE
, 0);
3615 errsts
= check_condition_result
;
3619 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3620 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3621 (cmd
[1] & 0xe0) == 0)
3622 printk(KERN_ERR
"Unprotected RD/WR to DIF device\n");
3627 errsts
= check_readiness(SCpnt
, 0, devip
);
3630 if (scsi_debug_fake_rw
)
3632 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3633 errsts
= resp_read(SCpnt
, lba
, num
, devip
, ei_lba
);
3634 if (inj_recovered
&& (0 == errsts
)) {
3635 mk_sense_buffer(devip
, RECOVERED_ERROR
,
3636 THRESHOLD_EXCEEDED
, 0);
3637 errsts
= check_condition_result
;
3638 } else if (inj_transport
&& (0 == errsts
)) {
3639 mk_sense_buffer(devip
, ABORTED_COMMAND
,
3640 TRANSPORT_PROBLEM
, ACK_NAK_TO
);
3641 errsts
= check_condition_result
;
3642 } else if (inj_dif
&& (0 == errsts
)) {
3643 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, 1);
3644 errsts
= illegal_condition_result
;
3645 } else if (inj_dix
&& (0 == errsts
)) {
3646 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, 1);
3647 errsts
= illegal_condition_result
;
3650 case REPORT_LUNS
: /* mandatory, ignore unit attention */
3652 errsts
= resp_report_luns(SCpnt
, devip
);
3654 case VERIFY
: /* 10 byte SBC-2 command */
3655 errsts
= check_readiness(SCpnt
, 0, devip
);
3660 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3661 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3663 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3664 INVALID_COMMAND_OPCODE
, 0);
3665 errsts
= check_condition_result
;
3669 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3670 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3671 (cmd
[1] & 0xe0) == 0)
3672 printk(KERN_ERR
"Unprotected RD/WR to DIF device\n");
3677 errsts
= check_readiness(SCpnt
, 0, devip
);
3680 if (scsi_debug_fake_rw
)
3682 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3683 errsts
= resp_write(SCpnt
, lba
, num
, devip
, ei_lba
);
3684 if (inj_recovered
&& (0 == errsts
)) {
3685 mk_sense_buffer(devip
, RECOVERED_ERROR
,
3686 THRESHOLD_EXCEEDED
, 0);
3687 errsts
= check_condition_result
;
3688 } else if (inj_dif
&& (0 == errsts
)) {
3689 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, 1);
3690 errsts
= illegal_condition_result
;
3691 } else if (inj_dix
&& (0 == errsts
)) {
3692 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, 1);
3693 errsts
= illegal_condition_result
;
3701 errsts
= check_readiness(SCpnt
, 0, devip
);
3704 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3705 errsts
= resp_write_same(SCpnt
, lba
, num
, devip
, ei_lba
, unmap
);
3708 errsts
= check_readiness(SCpnt
, 0, devip
);
3712 if (scsi_debug_unmap_max_desc
== 0) {
3713 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3714 INVALID_COMMAND_OPCODE
, 0);
3715 errsts
= check_condition_result
;
3717 errsts
= resp_unmap(SCpnt
, devip
);
3721 errsts
= resp_mode_sense(SCpnt
, target
, devip
);
3724 errsts
= resp_mode_select(SCpnt
, 1, devip
);
3726 case MODE_SELECT_10
:
3727 errsts
= resp_mode_select(SCpnt
, 0, devip
);
3730 errsts
= resp_log_sense(SCpnt
, devip
);
3732 case SYNCHRONIZE_CACHE
:
3734 errsts
= check_readiness(SCpnt
, 0, devip
);
3737 errsts
= check_readiness(SCpnt
, 1, devip
);
3739 case XDWRITEREAD_10
:
3740 if (!scsi_bidi_cmnd(SCpnt
)) {
3741 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3742 INVALID_FIELD_IN_CDB
, 0);
3743 errsts
= check_condition_result
;
3747 errsts
= check_readiness(SCpnt
, 0, devip
);
3750 if (scsi_debug_fake_rw
)
3752 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3753 errsts
= resp_read(SCpnt
, lba
, num
, devip
, ei_lba
);
3756 errsts
= resp_write(SCpnt
, lba
, num
, devip
, ei_lba
);
3759 errsts
= resp_xdwriteread(SCpnt
, lba
, num
, devip
);
3761 case VARIABLE_LENGTH_CMD
:
3762 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
) {
3764 if ((cmd
[10] & 0xe0) == 0)
3766 "Unprotected RD/WR to DIF device\n");
3768 if (cmd
[9] == READ_32
) {
3769 BUG_ON(SCpnt
->cmd_len
< 32);
3773 if (cmd
[9] == WRITE_32
) {
3774 BUG_ON(SCpnt
->cmd_len
< 32);
3779 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3780 INVALID_FIELD_IN_CDB
, 0);
3781 errsts
= check_condition_result
;
3785 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3786 printk(KERN_INFO
"scsi_debug: Opcode: 0x%x not "
3787 "supported\n", *cmd
);
3788 errsts
= check_readiness(SCpnt
, 1, devip
);
3790 break; /* Unit attention takes precedence */
3791 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
3792 errsts
= check_condition_result
;
3795 return schedule_resp(SCpnt
, devip
, done
, errsts
,
3796 (delay_override
? 0 : scsi_debug_delay
));
3799 static struct scsi_host_template sdebug_driver_template
= {
3800 .proc_info
= scsi_debug_proc_info
,
3801 .proc_name
= sdebug_proc_name
,
3802 .name
= "SCSI DEBUG",
3803 .info
= scsi_debug_info
,
3804 .slave_alloc
= scsi_debug_slave_alloc
,
3805 .slave_configure
= scsi_debug_slave_configure
,
3806 .slave_destroy
= scsi_debug_slave_destroy
,
3807 .ioctl
= scsi_debug_ioctl
,
3808 .queuecommand
= scsi_debug_queuecommand
,
3809 .eh_abort_handler
= scsi_debug_abort
,
3810 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
3811 .eh_device_reset_handler
= scsi_debug_device_reset
,
3812 .eh_host_reset_handler
= scsi_debug_host_reset
,
3813 .bios_param
= scsi_debug_biosparam
,
3814 .can_queue
= SCSI_DEBUG_CANQUEUE
,
3816 .sg_tablesize
= 256,
3818 .max_sectors
= 0xffff,
3819 .use_clustering
= DISABLE_CLUSTERING
,
3820 .module
= THIS_MODULE
,
3823 static int sdebug_driver_probe(struct device
* dev
)
3826 struct sdebug_host_info
*sdbg_host
;
3827 struct Scsi_Host
*hpnt
;
3830 sdbg_host
= to_sdebug_host(dev
);
3832 hpnt
= scsi_host_alloc(&sdebug_driver_template
, sizeof(sdbg_host
));
3834 printk(KERN_ERR
"%s: scsi_register failed\n", __func__
);
3839 sdbg_host
->shost
= hpnt
;
3840 *((struct sdebug_host_info
**)hpnt
->hostdata
) = sdbg_host
;
3841 if ((hpnt
->this_id
>= 0) && (scsi_debug_num_tgts
> hpnt
->this_id
))
3842 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
3844 hpnt
->max_id
= scsi_debug_num_tgts
;
3845 hpnt
->max_lun
= SAM2_WLUN_REPORT_LUNS
; /* = scsi_debug_max_luns; */
3849 switch (scsi_debug_dif
) {
3851 case SD_DIF_TYPE1_PROTECTION
:
3852 host_prot
= SHOST_DIF_TYPE1_PROTECTION
;
3854 host_prot
|= SHOST_DIX_TYPE1_PROTECTION
;
3857 case SD_DIF_TYPE2_PROTECTION
:
3858 host_prot
= SHOST_DIF_TYPE2_PROTECTION
;
3860 host_prot
|= SHOST_DIX_TYPE2_PROTECTION
;
3863 case SD_DIF_TYPE3_PROTECTION
:
3864 host_prot
= SHOST_DIF_TYPE3_PROTECTION
;
3866 host_prot
|= SHOST_DIX_TYPE3_PROTECTION
;
3871 host_prot
|= SHOST_DIX_TYPE0_PROTECTION
;
3875 scsi_host_set_prot(hpnt
, host_prot
);
3877 printk(KERN_INFO
"scsi_debug: host protection%s%s%s%s%s%s%s\n",
3878 (host_prot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
3879 (host_prot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
3880 (host_prot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
3881 (host_prot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
3882 (host_prot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
3883 (host_prot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
3884 (host_prot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
3886 if (scsi_debug_guard
== 1)
3887 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
3889 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
3891 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
3893 printk(KERN_ERR
"%s: scsi_add_host failed\n", __func__
);
3895 scsi_host_put(hpnt
);
3897 scsi_scan_host(hpnt
);
3903 static int sdebug_driver_remove(struct device
* dev
)
3905 struct sdebug_host_info
*sdbg_host
;
3906 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
3908 sdbg_host
= to_sdebug_host(dev
);
3911 printk(KERN_ERR
"%s: Unable to locate host info\n",
3916 scsi_remove_host(sdbg_host
->shost
);
3918 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
3920 list_del(&sdbg_devinfo
->dev_list
);
3921 kfree(sdbg_devinfo
);
3924 scsi_host_put(sdbg_host
->shost
);
3928 static int pseudo_lld_bus_match(struct device
*dev
,
3929 struct device_driver
*dev_driver
)
3934 static struct bus_type pseudo_lld_bus
= {
3936 .match
= pseudo_lld_bus_match
,
3937 .probe
= sdebug_driver_probe
,
3938 .remove
= sdebug_driver_remove
,