2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date
= "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
68 #define UNRECOVERED_READ_ERR 0x11
69 #define PARAMETER_LIST_LENGTH_ERR 0x1a
70 #define INVALID_OPCODE 0x20
71 #define ADDR_OUT_OF_RANGE 0x21
72 #define INVALID_COMMAND_OPCODE 0x20
73 #define INVALID_FIELD_IN_CDB 0x24
74 #define INVALID_FIELD_IN_PARAM_LIST 0x26
75 #define POWERON_RESET 0x29
76 #define SAVING_PARAMS_UNSUP 0x39
77 #define TRANSPORT_PROBLEM 0x4b
78 #define THRESHOLD_EXCEEDED 0x5d
79 #define LOW_POWER_COND_ON 0x5e
81 /* Additional Sense Code Qualifier (ASCQ) */
82 #define ACK_NAK_TO 0x3
84 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
86 /* Default values for driver parameters */
87 #define DEF_NUM_HOST 1
88 #define DEF_NUM_TGTS 1
89 #define DEF_MAX_LUNS 1
90 /* With these defaults, this driver will make 1 host with 1 target
91 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
95 #define DEF_DEV_SIZE_MB 8
99 #define DEF_EVERY_NTH 0
100 #define DEF_FAKE_RW 0
104 #define DEF_LBPWS10 0
106 #define DEF_LOWEST_ALIGNED 0
107 #define DEF_NO_LUN_0 0
108 #define DEF_NUM_PARTS 0
110 #define DEF_OPT_BLKS 64
111 #define DEF_PHYSBLK_EXP 0
113 #define DEF_REMOVABLE false
114 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
115 #define DEF_SECTOR_SIZE 512
116 #define DEF_UNMAP_ALIGNMENT 0
117 #define DEF_UNMAP_GRANULARITY 1
118 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
119 #define DEF_UNMAP_MAX_DESC 256
120 #define DEF_VIRTUAL_GB 0
121 #define DEF_VPD_USE_HOSTNO 1
122 #define DEF_WRITESAME_LENGTH 0xFFFF
124 /* bit mask values for scsi_debug_opts */
125 #define SCSI_DEBUG_OPT_NOISE 1
126 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
127 #define SCSI_DEBUG_OPT_TIMEOUT 4
128 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
129 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
130 #define SCSI_DEBUG_OPT_DIF_ERR 32
131 #define SCSI_DEBUG_OPT_DIX_ERR 64
132 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
133 /* When "every_nth" > 0 then modulo "every_nth" commands:
134 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
135 * - a RECOVERED_ERROR is simulated on successful read and write
136 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
137 * - a TRANSPORT_ERROR is simulated on successful read and write
138 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
140 * When "every_nth" < 0 then after "- every_nth" commands:
141 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
142 * - a RECOVERED_ERROR is simulated on successful read and write
143 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
144 * - a TRANSPORT_ERROR is simulated on successful read and write
145 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
146 * This will continue until some other action occurs (e.g. the user
147 * writing a new value (other than -1 or 1) to every_nth via sysfs).
150 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
151 * sector on read commands: */
152 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
153 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
155 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
156 * or "peripheral device" addressing (value 0) */
157 #define SAM2_LUN_ADDRESS_METHOD 0
158 #define SAM2_WLUN_REPORT_LUNS 0xc101
160 /* Can queue up to this number of commands. Typically commands that
161 * that have a non-zero delay are queued. */
162 #define SCSI_DEBUG_CANQUEUE 255
164 static int scsi_debug_add_host
= DEF_NUM_HOST
;
165 static int scsi_debug_ato
= DEF_ATO
;
166 static int scsi_debug_delay
= DEF_DELAY
;
167 static int scsi_debug_dev_size_mb
= DEF_DEV_SIZE_MB
;
168 static int scsi_debug_dif
= DEF_DIF
;
169 static int scsi_debug_dix
= DEF_DIX
;
170 static int scsi_debug_dsense
= DEF_D_SENSE
;
171 static int scsi_debug_every_nth
= DEF_EVERY_NTH
;
172 static int scsi_debug_fake_rw
= DEF_FAKE_RW
;
173 static unsigned int scsi_debug_guard
= DEF_GUARD
;
174 static int scsi_debug_lowest_aligned
= DEF_LOWEST_ALIGNED
;
175 static int scsi_debug_max_luns
= DEF_MAX_LUNS
;
176 static int scsi_debug_max_queue
= SCSI_DEBUG_CANQUEUE
;
177 static int scsi_debug_no_lun_0
= DEF_NO_LUN_0
;
178 static int scsi_debug_no_uld
= 0;
179 static int scsi_debug_num_parts
= DEF_NUM_PARTS
;
180 static int scsi_debug_num_tgts
= DEF_NUM_TGTS
; /* targets per host */
181 static int scsi_debug_opt_blks
= DEF_OPT_BLKS
;
182 static int scsi_debug_opts
= DEF_OPTS
;
183 static int scsi_debug_physblk_exp
= DEF_PHYSBLK_EXP
;
184 static int scsi_debug_ptype
= DEF_PTYPE
; /* SCSI peripheral type (0==disk) */
185 static int scsi_debug_scsi_level
= DEF_SCSI_LEVEL
;
186 static int scsi_debug_sector_size
= DEF_SECTOR_SIZE
;
187 static int scsi_debug_virtual_gb
= DEF_VIRTUAL_GB
;
188 static int scsi_debug_vpd_use_hostno
= DEF_VPD_USE_HOSTNO
;
189 static unsigned int scsi_debug_lbpu
= DEF_LBPU
;
190 static unsigned int scsi_debug_lbpws
= DEF_LBPWS
;
191 static unsigned int scsi_debug_lbpws10
= DEF_LBPWS10
;
192 static unsigned int scsi_debug_lbprz
= DEF_LBPRZ
;
193 static unsigned int scsi_debug_unmap_alignment
= DEF_UNMAP_ALIGNMENT
;
194 static unsigned int scsi_debug_unmap_granularity
= DEF_UNMAP_GRANULARITY
;
195 static unsigned int scsi_debug_unmap_max_blocks
= DEF_UNMAP_MAX_BLOCKS
;
196 static unsigned int scsi_debug_unmap_max_desc
= DEF_UNMAP_MAX_DESC
;
197 static unsigned int scsi_debug_write_same_length
= DEF_WRITESAME_LENGTH
;
198 static bool scsi_debug_removable
= DEF_REMOVABLE
;
199 static bool scsi_debug_clustering
;
201 static int scsi_debug_cmnd_count
= 0;
203 #define DEV_READONLY(TGT) (0)
205 static unsigned int sdebug_store_sectors
;
206 static sector_t sdebug_capacity
; /* in sectors */
208 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
209 may still need them */
210 static int sdebug_heads
; /* heads per disk */
211 static int sdebug_cylinders_per
; /* cylinders per surface */
212 static int sdebug_sectors_per
; /* sectors per cylinder */
214 #define SDEBUG_MAX_PARTS 4
216 #define SDEBUG_SENSE_LEN 32
218 #define SCSI_DEBUG_MAX_CMD_LEN 32
220 static unsigned int scsi_debug_lbp(void)
222 return scsi_debug_lbpu
| scsi_debug_lbpws
| scsi_debug_lbpws10
;
225 struct sdebug_dev_info
{
226 struct list_head dev_list
;
227 unsigned char sense_buff
[SDEBUG_SENSE_LEN
]; /* weak nexus */
228 unsigned int channel
;
231 struct sdebug_host_info
*sdbg_host
;
238 struct sdebug_host_info
{
239 struct list_head host_list
;
240 struct Scsi_Host
*shost
;
242 struct list_head dev_info_list
;
245 #define to_sdebug_host(d) \
246 container_of(d, struct sdebug_host_info, dev)
248 static LIST_HEAD(sdebug_host_list
);
249 static DEFINE_SPINLOCK(sdebug_host_list_lock
);
251 typedef void (* done_funct_t
) (struct scsi_cmnd
*);
253 struct sdebug_queued_cmd
{
255 struct timer_list cmnd_timer
;
256 done_funct_t done_funct
;
257 struct scsi_cmnd
* a_cmnd
;
260 static struct sdebug_queued_cmd queued_arr
[SCSI_DEBUG_CANQUEUE
];
262 static unsigned char * fake_storep
; /* ramdisk storage */
263 static struct sd_dif_tuple
*dif_storep
; /* protection info */
264 static void *map_storep
; /* provisioning map */
266 static unsigned long map_size
;
267 static int num_aborts
= 0;
268 static int num_dev_resets
= 0;
269 static int num_bus_resets
= 0;
270 static int num_host_resets
= 0;
271 static int dix_writes
;
272 static int dix_reads
;
273 static int dif_errors
;
275 static DEFINE_SPINLOCK(queued_arr_lock
);
276 static DEFINE_RWLOCK(atomic_rw
);
278 static char sdebug_proc_name
[] = "scsi_debug";
280 static struct bus_type pseudo_lld_bus
;
282 static struct device_driver sdebug_driverfs_driver
= {
283 .name
= sdebug_proc_name
,
284 .bus
= &pseudo_lld_bus
,
287 static const int check_condition_result
=
288 (DRIVER_SENSE
<< 24) | SAM_STAT_CHECK_CONDITION
;
290 static const int illegal_condition_result
=
291 (DRIVER_SENSE
<< 24) | (DID_ABORT
<< 16) | SAM_STAT_CHECK_CONDITION
;
293 static unsigned char ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
295 static unsigned char iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
298 static void *fake_store(unsigned long long lba
)
300 lba
= do_div(lba
, sdebug_store_sectors
);
302 return fake_storep
+ lba
* scsi_debug_sector_size
;
305 static struct sd_dif_tuple
*dif_store(sector_t sector
)
307 sector
= do_div(sector
, sdebug_store_sectors
);
309 return dif_storep
+ sector
;
312 static int sdebug_add_adapter(void);
313 static void sdebug_remove_adapter(void);
315 static void sdebug_max_tgts_luns(void)
317 struct sdebug_host_info
*sdbg_host
;
318 struct Scsi_Host
*hpnt
;
320 spin_lock(&sdebug_host_list_lock
);
321 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
322 hpnt
= sdbg_host
->shost
;
323 if ((hpnt
->this_id
>= 0) &&
324 (scsi_debug_num_tgts
> hpnt
->this_id
))
325 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
327 hpnt
->max_id
= scsi_debug_num_tgts
;
328 /* scsi_debug_max_luns; */
329 hpnt
->max_lun
= SAM2_WLUN_REPORT_LUNS
;
331 spin_unlock(&sdebug_host_list_lock
);
334 static void mk_sense_buffer(struct sdebug_dev_info
*devip
, int key
,
337 unsigned char *sbuff
;
339 sbuff
= devip
->sense_buff
;
340 memset(sbuff
, 0, SDEBUG_SENSE_LEN
);
342 scsi_build_sense_buffer(scsi_debug_dsense
, sbuff
, key
, asc
, asq
);
344 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
345 printk(KERN_INFO
"scsi_debug: [sense_key,asc,ascq]: "
346 "[0x%x,0x%x,0x%x]\n", key
, asc
, asq
);
349 static void get_data_transfer_info(unsigned char *cmd
,
350 unsigned long long *lba
, unsigned int *num
,
356 case VARIABLE_LENGTH_CMD
:
357 *lba
= (u64
)cmd
[19] | (u64
)cmd
[18] << 8 |
358 (u64
)cmd
[17] << 16 | (u64
)cmd
[16] << 24 |
359 (u64
)cmd
[15] << 32 | (u64
)cmd
[14] << 40 |
360 (u64
)cmd
[13] << 48 | (u64
)cmd
[12] << 56;
362 *ei_lba
= (u32
)cmd
[23] | (u32
)cmd
[22] << 8 |
363 (u32
)cmd
[21] << 16 | (u32
)cmd
[20] << 24;
365 *num
= (u32
)cmd
[31] | (u32
)cmd
[30] << 8 | (u32
)cmd
[29] << 16 |
372 *lba
= (u64
)cmd
[9] | (u64
)cmd
[8] << 8 |
373 (u64
)cmd
[7] << 16 | (u64
)cmd
[6] << 24 |
374 (u64
)cmd
[5] << 32 | (u64
)cmd
[4] << 40 |
375 (u64
)cmd
[3] << 48 | (u64
)cmd
[2] << 56;
377 *num
= (u32
)cmd
[13] | (u32
)cmd
[12] << 8 | (u32
)cmd
[11] << 16 |
382 *lba
= (u32
)cmd
[5] | (u32
)cmd
[4] << 8 | (u32
)cmd
[3] << 16 |
385 *num
= (u32
)cmd
[9] | (u32
)cmd
[8] << 8 | (u32
)cmd
[7] << 16 |
392 *lba
= (u32
)cmd
[5] | (u32
)cmd
[4] << 8 | (u32
)cmd
[3] << 16 |
395 *num
= (u32
)cmd
[8] | (u32
)cmd
[7] << 8;
399 *lba
= (u32
)cmd
[3] | (u32
)cmd
[2] << 8 |
400 (u32
)(cmd
[1] & 0x1f) << 16;
401 *num
= (0 == cmd
[4]) ? 256 : cmd
[4];
408 static int scsi_debug_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
410 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
411 printk(KERN_INFO
"scsi_debug: ioctl: cmd=0x%x\n", cmd
);
414 /* return -ENOTTY; // correct return but upsets fdisk */
417 static int check_readiness(struct scsi_cmnd
* SCpnt
, int reset_only
,
418 struct sdebug_dev_info
* devip
)
421 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
422 printk(KERN_INFO
"scsi_debug: Reporting Unit "
423 "attention: power on reset\n");
425 mk_sense_buffer(devip
, UNIT_ATTENTION
, POWERON_RESET
, 0);
426 return check_condition_result
;
428 if ((0 == reset_only
) && devip
->stopped
) {
429 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
430 printk(KERN_INFO
"scsi_debug: Reporting Not "
431 "ready: initializing command required\n");
432 mk_sense_buffer(devip
, NOT_READY
, LOGICAL_UNIT_NOT_READY
,
434 return check_condition_result
;
439 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
440 static int fill_from_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
444 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
448 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_FROM_DEVICE
))
449 return (DID_ERROR
<< 16);
451 act_len
= sg_copy_from_buffer(sdb
->table
.sgl
, sdb
->table
.nents
,
453 sdb
->resid
= scsi_bufflen(scp
) - act_len
;
458 /* Returns number of bytes fetched into 'arr' or -1 if error. */
459 static int fetch_to_dev_buffer(struct scsi_cmnd
*scp
, unsigned char *arr
,
462 if (!scsi_bufflen(scp
))
464 if (!(scsi_bidi_cmnd(scp
) || scp
->sc_data_direction
== DMA_TO_DEVICE
))
467 return scsi_sg_copy_to_buffer(scp
, arr
, arr_len
);
471 static const char * inq_vendor_id
= "Linux ";
472 static const char * inq_product_id
= "scsi_debug ";
473 static const char * inq_product_rev
= "0004";
475 static int inquiry_evpd_83(unsigned char * arr
, int port_group_id
,
476 int target_dev_id
, int dev_id_num
,
477 const char * dev_id_str
,
483 port_a
= target_dev_id
+ 1;
484 /* T10 vendor identifier field format (faked) */
485 arr
[0] = 0x2; /* ASCII */
488 memcpy(&arr
[4], inq_vendor_id
, 8);
489 memcpy(&arr
[12], inq_product_id
, 16);
490 memcpy(&arr
[28], dev_id_str
, dev_id_str_len
);
491 num
= 8 + 16 + dev_id_str_len
;
494 if (dev_id_num
>= 0) {
495 /* NAA-5, Logical unit identifier (binary) */
496 arr
[num
++] = 0x1; /* binary (not necessarily sas) */
497 arr
[num
++] = 0x3; /* PIV=0, lu, naa */
500 arr
[num
++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
504 arr
[num
++] = (dev_id_num
>> 24);
505 arr
[num
++] = (dev_id_num
>> 16) & 0xff;
506 arr
[num
++] = (dev_id_num
>> 8) & 0xff;
507 arr
[num
++] = dev_id_num
& 0xff;
508 /* Target relative port number */
509 arr
[num
++] = 0x61; /* proto=sas, binary */
510 arr
[num
++] = 0x94; /* PIV=1, target port, rel port */
511 arr
[num
++] = 0x0; /* reserved */
512 arr
[num
++] = 0x4; /* length */
513 arr
[num
++] = 0x0; /* reserved */
514 arr
[num
++] = 0x0; /* reserved */
516 arr
[num
++] = 0x1; /* relative port A */
518 /* NAA-5, Target port identifier */
519 arr
[num
++] = 0x61; /* proto=sas, binary */
520 arr
[num
++] = 0x93; /* piv=1, target port, naa */
523 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
527 arr
[num
++] = (port_a
>> 24);
528 arr
[num
++] = (port_a
>> 16) & 0xff;
529 arr
[num
++] = (port_a
>> 8) & 0xff;
530 arr
[num
++] = port_a
& 0xff;
531 /* NAA-5, Target port group identifier */
532 arr
[num
++] = 0x61; /* proto=sas, binary */
533 arr
[num
++] = 0x95; /* piv=1, target port group id */
538 arr
[num
++] = (port_group_id
>> 8) & 0xff;
539 arr
[num
++] = port_group_id
& 0xff;
540 /* NAA-5, Target device identifier */
541 arr
[num
++] = 0x61; /* proto=sas, binary */
542 arr
[num
++] = 0xa3; /* piv=1, target device, naa */
545 arr
[num
++] = 0x52; /* naa-5, company id=0x222222 (fake) */
549 arr
[num
++] = (target_dev_id
>> 24);
550 arr
[num
++] = (target_dev_id
>> 16) & 0xff;
551 arr
[num
++] = (target_dev_id
>> 8) & 0xff;
552 arr
[num
++] = target_dev_id
& 0xff;
553 /* SCSI name string: Target device identifier */
554 arr
[num
++] = 0x63; /* proto=sas, UTF-8 */
555 arr
[num
++] = 0xa8; /* piv=1, target device, SCSI name string */
558 memcpy(arr
+ num
, "naa.52222220", 12);
560 snprintf(b
, sizeof(b
), "%08X", target_dev_id
);
561 memcpy(arr
+ num
, b
, 8);
563 memset(arr
+ num
, 0, 4);
569 static unsigned char vpd84_data
[] = {
570 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
571 0x22,0x22,0x22,0x0,0xbb,0x1,
572 0x22,0x22,0x22,0x0,0xbb,0x2,
575 static int inquiry_evpd_84(unsigned char * arr
)
577 memcpy(arr
, vpd84_data
, sizeof(vpd84_data
));
578 return sizeof(vpd84_data
);
581 static int inquiry_evpd_85(unsigned char * arr
)
584 const char * na1
= "https://www.kernel.org/config";
585 const char * na2
= "http://www.kernel.org/log";
588 arr
[num
++] = 0x1; /* lu, storage config */
589 arr
[num
++] = 0x0; /* reserved */
594 plen
= ((plen
/ 4) + 1) * 4;
595 arr
[num
++] = plen
; /* length, null termianted, padded */
596 memcpy(arr
+ num
, na1
, olen
);
597 memset(arr
+ num
+ olen
, 0, plen
- olen
);
600 arr
[num
++] = 0x4; /* lu, logging */
601 arr
[num
++] = 0x0; /* reserved */
606 plen
= ((plen
/ 4) + 1) * 4;
607 arr
[num
++] = plen
; /* length, null terminated, padded */
608 memcpy(arr
+ num
, na2
, olen
);
609 memset(arr
+ num
+ olen
, 0, plen
- olen
);
615 /* SCSI ports VPD page */
616 static int inquiry_evpd_88(unsigned char * arr
, int target_dev_id
)
621 port_a
= target_dev_id
+ 1;
623 arr
[num
++] = 0x0; /* reserved */
624 arr
[num
++] = 0x0; /* reserved */
626 arr
[num
++] = 0x1; /* relative port 1 (primary) */
627 memset(arr
+ num
, 0, 6);
630 arr
[num
++] = 12; /* length tp descriptor */
631 /* naa-5 target port identifier (A) */
632 arr
[num
++] = 0x61; /* proto=sas, binary */
633 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
634 arr
[num
++] = 0x0; /* reserved */
635 arr
[num
++] = 0x8; /* length */
636 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
640 arr
[num
++] = (port_a
>> 24);
641 arr
[num
++] = (port_a
>> 16) & 0xff;
642 arr
[num
++] = (port_a
>> 8) & 0xff;
643 arr
[num
++] = port_a
& 0xff;
645 arr
[num
++] = 0x0; /* reserved */
646 arr
[num
++] = 0x0; /* reserved */
648 arr
[num
++] = 0x2; /* relative port 2 (secondary) */
649 memset(arr
+ num
, 0, 6);
652 arr
[num
++] = 12; /* length tp descriptor */
653 /* naa-5 target port identifier (B) */
654 arr
[num
++] = 0x61; /* proto=sas, binary */
655 arr
[num
++] = 0x93; /* PIV=1, target port, NAA */
656 arr
[num
++] = 0x0; /* reserved */
657 arr
[num
++] = 0x8; /* length */
658 arr
[num
++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
662 arr
[num
++] = (port_b
>> 24);
663 arr
[num
++] = (port_b
>> 16) & 0xff;
664 arr
[num
++] = (port_b
>> 8) & 0xff;
665 arr
[num
++] = port_b
& 0xff;
671 static unsigned char vpd89_data
[] = {
672 /* from 4th byte */ 0,0,0,0,
673 'l','i','n','u','x',' ',' ',' ',
674 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
676 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
678 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
679 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
680 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
681 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
683 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
685 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
687 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
688 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
689 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
690 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
691 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
692 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
693 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
698 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
699 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
700 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
702 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
705 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
706 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
707 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
708 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
709 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
710 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
711 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
712 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
715 static int inquiry_evpd_89(unsigned char * arr
)
717 memcpy(arr
, vpd89_data
, sizeof(vpd89_data
));
718 return sizeof(vpd89_data
);
722 /* Block limits VPD page (SBC-3) */
723 static unsigned char vpdb0_data
[] = {
724 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
725 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
726 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
727 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
730 static int inquiry_evpd_b0(unsigned char * arr
)
734 memcpy(arr
, vpdb0_data
, sizeof(vpdb0_data
));
736 /* Optimal transfer length granularity */
737 gran
= 1 << scsi_debug_physblk_exp
;
738 arr
[2] = (gran
>> 8) & 0xff;
739 arr
[3] = gran
& 0xff;
741 /* Maximum Transfer Length */
742 if (sdebug_store_sectors
> 0x400) {
743 arr
[4] = (sdebug_store_sectors
>> 24) & 0xff;
744 arr
[5] = (sdebug_store_sectors
>> 16) & 0xff;
745 arr
[6] = (sdebug_store_sectors
>> 8) & 0xff;
746 arr
[7] = sdebug_store_sectors
& 0xff;
749 /* Optimal Transfer Length */
750 put_unaligned_be32(scsi_debug_opt_blks
, &arr
[8]);
752 if (scsi_debug_lbpu
) {
753 /* Maximum Unmap LBA Count */
754 put_unaligned_be32(scsi_debug_unmap_max_blocks
, &arr
[16]);
756 /* Maximum Unmap Block Descriptor Count */
757 put_unaligned_be32(scsi_debug_unmap_max_desc
, &arr
[20]);
760 /* Unmap Granularity Alignment */
761 if (scsi_debug_unmap_alignment
) {
762 put_unaligned_be32(scsi_debug_unmap_alignment
, &arr
[28]);
763 arr
[28] |= 0x80; /* UGAVALID */
766 /* Optimal Unmap Granularity */
767 put_unaligned_be32(scsi_debug_unmap_granularity
, &arr
[24]);
769 /* Maximum WRITE SAME Length */
770 put_unaligned_be64(scsi_debug_write_same_length
, &arr
[32]);
772 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
774 return sizeof(vpdb0_data
);
777 /* Block device characteristics VPD page (SBC-3) */
778 static int inquiry_evpd_b1(unsigned char *arr
)
780 memset(arr
, 0, 0x3c);
782 arr
[1] = 1; /* non rotating medium (e.g. solid state) */
784 arr
[3] = 5; /* less than 1.8" */
789 /* Logical block provisioning VPD page (SBC-3) */
790 static int inquiry_evpd_b2(unsigned char *arr
)
793 arr
[0] = 0; /* threshold exponent */
798 if (scsi_debug_lbpws
)
801 if (scsi_debug_lbpws10
)
804 if (scsi_debug_lbprz
)
810 #define SDEBUG_LONG_INQ_SZ 96
811 #define SDEBUG_MAX_INQ_ARR_SZ 584
813 static int resp_inquiry(struct scsi_cmnd
* scp
, int target
,
814 struct sdebug_dev_info
* devip
)
816 unsigned char pq_pdt
;
818 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
819 int alloc_len
, n
, ret
;
821 alloc_len
= (cmd
[3] << 8) + cmd
[4];
822 arr
= kzalloc(SDEBUG_MAX_INQ_ARR_SZ
, GFP_ATOMIC
);
824 return DID_REQUEUE
<< 16;
826 pq_pdt
= 0x1e; /* present, wlun */
827 else if (scsi_debug_no_lun_0
&& (0 == devip
->lun
))
828 pq_pdt
= 0x7f; /* not present, no device type */
830 pq_pdt
= (scsi_debug_ptype
& 0x1f);
832 if (0x2 & cmd
[1]) { /* CMDDT bit set */
833 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
836 return check_condition_result
;
837 } else if (0x1 & cmd
[1]) { /* EVPD bit set */
838 int lu_id_num
, port_group_id
, target_dev_id
, len
;
840 int host_no
= devip
->sdbg_host
->shost
->host_no
;
842 port_group_id
= (((host_no
+ 1) & 0x7f) << 8) +
843 (devip
->channel
& 0x7f);
844 if (0 == scsi_debug_vpd_use_hostno
)
846 lu_id_num
= devip
->wlun
? -1 : (((host_no
+ 1) * 2000) +
847 (devip
->target
* 1000) + devip
->lun
);
848 target_dev_id
= ((host_no
+ 1) * 2000) +
849 (devip
->target
* 1000) - 3;
850 len
= scnprintf(lu_id_str
, 6, "%d", lu_id_num
);
851 if (0 == cmd
[2]) { /* supported vital product data pages */
852 arr
[1] = cmd
[2]; /*sanity */
854 arr
[n
++] = 0x0; /* this page */
855 arr
[n
++] = 0x80; /* unit serial number */
856 arr
[n
++] = 0x83; /* device identification */
857 arr
[n
++] = 0x84; /* software interface ident. */
858 arr
[n
++] = 0x85; /* management network addresses */
859 arr
[n
++] = 0x86; /* extended inquiry */
860 arr
[n
++] = 0x87; /* mode page policy */
861 arr
[n
++] = 0x88; /* SCSI ports */
862 arr
[n
++] = 0x89; /* ATA information */
863 arr
[n
++] = 0xb0; /* Block limits (SBC) */
864 arr
[n
++] = 0xb1; /* Block characteristics (SBC) */
865 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
867 arr
[3] = n
- 4; /* number of supported VPD pages */
868 } else if (0x80 == cmd
[2]) { /* unit serial number */
869 arr
[1] = cmd
[2]; /*sanity */
871 memcpy(&arr
[4], lu_id_str
, len
);
872 } else if (0x83 == cmd
[2]) { /* device identification */
873 arr
[1] = cmd
[2]; /*sanity */
874 arr
[3] = inquiry_evpd_83(&arr
[4], port_group_id
,
875 target_dev_id
, lu_id_num
,
877 } else if (0x84 == cmd
[2]) { /* Software interface ident. */
878 arr
[1] = cmd
[2]; /*sanity */
879 arr
[3] = inquiry_evpd_84(&arr
[4]);
880 } else if (0x85 == cmd
[2]) { /* Management network addresses */
881 arr
[1] = cmd
[2]; /*sanity */
882 arr
[3] = inquiry_evpd_85(&arr
[4]);
883 } else if (0x86 == cmd
[2]) { /* extended inquiry */
884 arr
[1] = cmd
[2]; /*sanity */
885 arr
[3] = 0x3c; /* number of following entries */
886 if (scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
)
887 arr
[4] = 0x4; /* SPT: GRD_CHK:1 */
888 else if (scsi_debug_dif
)
889 arr
[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
891 arr
[4] = 0x0; /* no protection stuff */
892 arr
[5] = 0x7; /* head of q, ordered + simple q's */
893 } else if (0x87 == cmd
[2]) { /* mode page policy */
894 arr
[1] = cmd
[2]; /*sanity */
895 arr
[3] = 0x8; /* number of following entries */
896 arr
[4] = 0x2; /* disconnect-reconnect mp */
897 arr
[6] = 0x80; /* mlus, shared */
898 arr
[8] = 0x18; /* protocol specific lu */
899 arr
[10] = 0x82; /* mlus, per initiator port */
900 } else if (0x88 == cmd
[2]) { /* SCSI Ports */
901 arr
[1] = cmd
[2]; /*sanity */
902 arr
[3] = inquiry_evpd_88(&arr
[4], target_dev_id
);
903 } else if (0x89 == cmd
[2]) { /* ATA information */
904 arr
[1] = cmd
[2]; /*sanity */
905 n
= inquiry_evpd_89(&arr
[4]);
908 } else if (0xb0 == cmd
[2]) { /* Block limits (SBC) */
909 arr
[1] = cmd
[2]; /*sanity */
910 arr
[3] = inquiry_evpd_b0(&arr
[4]);
911 } else if (0xb1 == cmd
[2]) { /* Block characteristics (SBC) */
912 arr
[1] = cmd
[2]; /*sanity */
913 arr
[3] = inquiry_evpd_b1(&arr
[4]);
914 } else if (0xb2 == cmd
[2]) { /* Logical Block Prov. (SBC) */
915 arr
[1] = cmd
[2]; /*sanity */
916 arr
[3] = inquiry_evpd_b2(&arr
[4]);
918 /* Illegal request, invalid field in cdb */
919 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
920 INVALID_FIELD_IN_CDB
, 0);
922 return check_condition_result
;
924 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
925 ret
= fill_from_dev_buffer(scp
, arr
,
926 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
930 /* drops through here for a standard inquiry */
931 arr
[1] = scsi_debug_removable
? 0x80 : 0; /* Removable disk */
932 arr
[2] = scsi_debug_scsi_level
;
933 arr
[3] = 2; /* response_data_format==2 */
934 arr
[4] = SDEBUG_LONG_INQ_SZ
- 5;
935 arr
[5] = scsi_debug_dif
? 1 : 0; /* PROTECT bit */
936 if (0 == scsi_debug_vpd_use_hostno
)
937 arr
[5] = 0x10; /* claim: implicit TGPS */
938 arr
[6] = 0x10; /* claim: MultiP */
939 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
940 arr
[7] = 0xa; /* claim: LINKED + CMDQUE */
941 memcpy(&arr
[8], inq_vendor_id
, 8);
942 memcpy(&arr
[16], inq_product_id
, 16);
943 memcpy(&arr
[32], inq_product_rev
, 4);
944 /* version descriptors (2 bytes each) follow */
945 arr
[58] = 0x0; arr
[59] = 0x77; /* SAM-3 ANSI */
946 arr
[60] = 0x3; arr
[61] = 0x14; /* SPC-3 ANSI */
948 if (scsi_debug_ptype
== 0) {
949 arr
[n
++] = 0x3; arr
[n
++] = 0x3d; /* SBC-2 ANSI */
950 } else if (scsi_debug_ptype
== 1) {
951 arr
[n
++] = 0x3; arr
[n
++] = 0x60; /* SSC-2 no version */
953 arr
[n
++] = 0xc; arr
[n
++] = 0xf; /* SAS-1.1 rev 10 */
954 ret
= fill_from_dev_buffer(scp
, arr
,
955 min(alloc_len
, SDEBUG_LONG_INQ_SZ
));
960 static int resp_requests(struct scsi_cmnd
* scp
,
961 struct sdebug_dev_info
* devip
)
963 unsigned char * sbuff
;
964 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
965 unsigned char arr
[SDEBUG_SENSE_LEN
];
969 memset(arr
, 0, sizeof(arr
));
970 if (devip
->reset
== 1)
971 mk_sense_buffer(devip
, 0, NO_ADDITIONAL_SENSE
, 0);
972 want_dsense
= !!(cmd
[1] & 1) || scsi_debug_dsense
;
973 sbuff
= devip
->sense_buff
;
974 if ((iec_m_pg
[2] & 0x4) && (6 == (iec_m_pg
[3] & 0xf))) {
977 arr
[1] = 0x0; /* NO_SENSE in sense_key */
978 arr
[2] = THRESHOLD_EXCEEDED
;
979 arr
[3] = 0xff; /* TEST set and MRIE==6 */
982 arr
[2] = 0x0; /* NO_SENSE in sense_key */
983 arr
[7] = 0xa; /* 18 byte sense buffer */
984 arr
[12] = THRESHOLD_EXCEEDED
;
985 arr
[13] = 0xff; /* TEST set and MRIE==6 */
988 memcpy(arr
, sbuff
, SDEBUG_SENSE_LEN
);
989 if ((cmd
[1] & 1) && (! scsi_debug_dsense
)) {
990 /* DESC bit set and sense_buff in fixed format */
991 memset(arr
, 0, sizeof(arr
));
993 arr
[1] = sbuff
[2]; /* sense key */
994 arr
[2] = sbuff
[12]; /* asc */
995 arr
[3] = sbuff
[13]; /* ascq */
999 mk_sense_buffer(devip
, 0, NO_ADDITIONAL_SENSE
, 0);
1000 return fill_from_dev_buffer(scp
, arr
, len
);
1003 static int resp_start_stop(struct scsi_cmnd
* scp
,
1004 struct sdebug_dev_info
* devip
)
1006 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1007 int power_cond
, errsts
, start
;
1009 if ((errsts
= check_readiness(scp
, 1, devip
)))
1011 power_cond
= (cmd
[4] & 0xf0) >> 4;
1013 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1015 return check_condition_result
;
1018 if (start
== devip
->stopped
)
1019 devip
->stopped
= !start
;
1023 static sector_t
get_sdebug_capacity(void)
1025 if (scsi_debug_virtual_gb
> 0)
1026 return (sector_t
)scsi_debug_virtual_gb
*
1027 (1073741824 / scsi_debug_sector_size
);
1029 return sdebug_store_sectors
;
1032 #define SDEBUG_READCAP_ARR_SZ 8
1033 static int resp_readcap(struct scsi_cmnd
* scp
,
1034 struct sdebug_dev_info
* devip
)
1036 unsigned char arr
[SDEBUG_READCAP_ARR_SZ
];
1040 if ((errsts
= check_readiness(scp
, 1, devip
)))
1042 /* following just in case virtual_gb changed */
1043 sdebug_capacity
= get_sdebug_capacity();
1044 memset(arr
, 0, SDEBUG_READCAP_ARR_SZ
);
1045 if (sdebug_capacity
< 0xffffffff) {
1046 capac
= (unsigned int)sdebug_capacity
- 1;
1047 arr
[0] = (capac
>> 24);
1048 arr
[1] = (capac
>> 16) & 0xff;
1049 arr
[2] = (capac
>> 8) & 0xff;
1050 arr
[3] = capac
& 0xff;
1057 arr
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
1058 arr
[7] = scsi_debug_sector_size
& 0xff;
1059 return fill_from_dev_buffer(scp
, arr
, SDEBUG_READCAP_ARR_SZ
);
1062 #define SDEBUG_READCAP16_ARR_SZ 32
1063 static int resp_readcap16(struct scsi_cmnd
* scp
,
1064 struct sdebug_dev_info
* devip
)
1066 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1067 unsigned char arr
[SDEBUG_READCAP16_ARR_SZ
];
1068 unsigned long long capac
;
1069 int errsts
, k
, alloc_len
;
1071 if ((errsts
= check_readiness(scp
, 1, devip
)))
1073 alloc_len
= ((cmd
[10] << 24) + (cmd
[11] << 16) + (cmd
[12] << 8)
1075 /* following just in case virtual_gb changed */
1076 sdebug_capacity
= get_sdebug_capacity();
1077 memset(arr
, 0, SDEBUG_READCAP16_ARR_SZ
);
1078 capac
= sdebug_capacity
- 1;
1079 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1080 arr
[7 - k
] = capac
& 0xff;
1081 arr
[8] = (scsi_debug_sector_size
>> 24) & 0xff;
1082 arr
[9] = (scsi_debug_sector_size
>> 16) & 0xff;
1083 arr
[10] = (scsi_debug_sector_size
>> 8) & 0xff;
1084 arr
[11] = scsi_debug_sector_size
& 0xff;
1085 arr
[13] = scsi_debug_physblk_exp
& 0xf;
1086 arr
[14] = (scsi_debug_lowest_aligned
>> 8) & 0x3f;
1088 if (scsi_debug_lbp()) {
1089 arr
[14] |= 0x80; /* LBPME */
1090 if (scsi_debug_lbprz
)
1091 arr
[14] |= 0x40; /* LBPRZ */
1094 arr
[15] = scsi_debug_lowest_aligned
& 0xff;
1096 if (scsi_debug_dif
) {
1097 arr
[12] = (scsi_debug_dif
- 1) << 1; /* P_TYPE */
1098 arr
[12] |= 1; /* PROT_EN */
1101 return fill_from_dev_buffer(scp
, arr
,
1102 min(alloc_len
, SDEBUG_READCAP16_ARR_SZ
));
1105 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1107 static int resp_report_tgtpgs(struct scsi_cmnd
* scp
,
1108 struct sdebug_dev_info
* devip
)
1110 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1111 unsigned char * arr
;
1112 int host_no
= devip
->sdbg_host
->shost
->host_no
;
1113 int n
, ret
, alen
, rlen
;
1114 int port_group_a
, port_group_b
, port_a
, port_b
;
1116 alen
= ((cmd
[6] << 24) + (cmd
[7] << 16) + (cmd
[8] << 8)
1119 arr
= kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ
, GFP_ATOMIC
);
1121 return DID_REQUEUE
<< 16;
1123 * EVPD page 0x88 states we have two ports, one
1124 * real and a fake port with no device connected.
1125 * So we create two port groups with one port each
1126 * and set the group with port B to unavailable.
1128 port_a
= 0x1; /* relative port A */
1129 port_b
= 0x2; /* relative port B */
1130 port_group_a
= (((host_no
+ 1) & 0x7f) << 8) +
1131 (devip
->channel
& 0x7f);
1132 port_group_b
= (((host_no
+ 1) & 0x7f) << 8) +
1133 (devip
->channel
& 0x7f) + 0x80;
1136 * The asymmetric access state is cycled according to the host_id.
1139 if (0 == scsi_debug_vpd_use_hostno
) {
1140 arr
[n
++] = host_no
% 3; /* Asymm access state */
1141 arr
[n
++] = 0x0F; /* claim: all states are supported */
1143 arr
[n
++] = 0x0; /* Active/Optimized path */
1144 arr
[n
++] = 0x01; /* claim: only support active/optimized paths */
1146 arr
[n
++] = (port_group_a
>> 8) & 0xff;
1147 arr
[n
++] = port_group_a
& 0xff;
1148 arr
[n
++] = 0; /* Reserved */
1149 arr
[n
++] = 0; /* Status code */
1150 arr
[n
++] = 0; /* Vendor unique */
1151 arr
[n
++] = 0x1; /* One port per group */
1152 arr
[n
++] = 0; /* Reserved */
1153 arr
[n
++] = 0; /* Reserved */
1154 arr
[n
++] = (port_a
>> 8) & 0xff;
1155 arr
[n
++] = port_a
& 0xff;
1156 arr
[n
++] = 3; /* Port unavailable */
1157 arr
[n
++] = 0x08; /* claim: only unavailalbe paths are supported */
1158 arr
[n
++] = (port_group_b
>> 8) & 0xff;
1159 arr
[n
++] = port_group_b
& 0xff;
1160 arr
[n
++] = 0; /* Reserved */
1161 arr
[n
++] = 0; /* Status code */
1162 arr
[n
++] = 0; /* Vendor unique */
1163 arr
[n
++] = 0x1; /* One port per group */
1164 arr
[n
++] = 0; /* Reserved */
1165 arr
[n
++] = 0; /* Reserved */
1166 arr
[n
++] = (port_b
>> 8) & 0xff;
1167 arr
[n
++] = port_b
& 0xff;
1170 arr
[0] = (rlen
>> 24) & 0xff;
1171 arr
[1] = (rlen
>> 16) & 0xff;
1172 arr
[2] = (rlen
>> 8) & 0xff;
1173 arr
[3] = rlen
& 0xff;
1176 * Return the smallest value of either
1177 * - The allocated length
1178 * - The constructed command length
1179 * - The maximum array size
1182 ret
= fill_from_dev_buffer(scp
, arr
,
1183 min(rlen
, SDEBUG_MAX_TGTPGS_ARR_SZ
));
1188 /* <<Following mode page info copied from ST318451LW>> */
1190 static int resp_err_recov_pg(unsigned char * p
, int pcontrol
, int target
)
1191 { /* Read-Write Error Recovery page for mode_sense */
1192 unsigned char err_recov_pg
[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1195 memcpy(p
, err_recov_pg
, sizeof(err_recov_pg
));
1197 memset(p
+ 2, 0, sizeof(err_recov_pg
) - 2);
1198 return sizeof(err_recov_pg
);
1201 static int resp_disconnect_pg(unsigned char * p
, int pcontrol
, int target
)
1202 { /* Disconnect-Reconnect page for mode_sense */
1203 unsigned char disconnect_pg
[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1204 0, 0, 0, 0, 0, 0, 0, 0};
1206 memcpy(p
, disconnect_pg
, sizeof(disconnect_pg
));
1208 memset(p
+ 2, 0, sizeof(disconnect_pg
) - 2);
1209 return sizeof(disconnect_pg
);
1212 static int resp_format_pg(unsigned char * p
, int pcontrol
, int target
)
1213 { /* Format device page for mode_sense */
1214 unsigned char format_pg
[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1215 0, 0, 0, 0, 0, 0, 0, 0,
1216 0, 0, 0, 0, 0x40, 0, 0, 0};
1218 memcpy(p
, format_pg
, sizeof(format_pg
));
1219 p
[10] = (sdebug_sectors_per
>> 8) & 0xff;
1220 p
[11] = sdebug_sectors_per
& 0xff;
1221 p
[12] = (scsi_debug_sector_size
>> 8) & 0xff;
1222 p
[13] = scsi_debug_sector_size
& 0xff;
1223 if (scsi_debug_removable
)
1224 p
[20] |= 0x20; /* should agree with INQUIRY */
1226 memset(p
+ 2, 0, sizeof(format_pg
) - 2);
1227 return sizeof(format_pg
);
1230 static int resp_caching_pg(unsigned char * p
, int pcontrol
, int target
)
1231 { /* Caching page for mode_sense */
1232 unsigned char caching_pg
[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1233 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1235 memcpy(p
, caching_pg
, sizeof(caching_pg
));
1237 memset(p
+ 2, 0, sizeof(caching_pg
) - 2);
1238 return sizeof(caching_pg
);
1241 static int resp_ctrl_m_pg(unsigned char * p
, int pcontrol
, int target
)
1242 { /* Control mode page for mode_sense */
1243 unsigned char ch_ctrl_m_pg
[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1245 unsigned char d_ctrl_m_pg
[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1248 if (scsi_debug_dsense
)
1249 ctrl_m_pg
[2] |= 0x4;
1251 ctrl_m_pg
[2] &= ~0x4;
1254 ctrl_m_pg
[5] |= 0x80; /* ATO=1 */
1256 memcpy(p
, ctrl_m_pg
, sizeof(ctrl_m_pg
));
1258 memcpy(p
+ 2, ch_ctrl_m_pg
, sizeof(ch_ctrl_m_pg
));
1259 else if (2 == pcontrol
)
1260 memcpy(p
, d_ctrl_m_pg
, sizeof(d_ctrl_m_pg
));
1261 return sizeof(ctrl_m_pg
);
1265 static int resp_iec_m_pg(unsigned char * p
, int pcontrol
, int target
)
1266 { /* Informational Exceptions control mode page for mode_sense */
1267 unsigned char ch_iec_m_pg
[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1269 unsigned char d_iec_m_pg
[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1272 memcpy(p
, iec_m_pg
, sizeof(iec_m_pg
));
1274 memcpy(p
+ 2, ch_iec_m_pg
, sizeof(ch_iec_m_pg
));
1275 else if (2 == pcontrol
)
1276 memcpy(p
, d_iec_m_pg
, sizeof(d_iec_m_pg
));
1277 return sizeof(iec_m_pg
);
1280 static int resp_sas_sf_m_pg(unsigned char * p
, int pcontrol
, int target
)
1281 { /* SAS SSP mode page - short format for mode_sense */
1282 unsigned char sas_sf_m_pg
[] = {0x19, 0x6,
1283 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1285 memcpy(p
, sas_sf_m_pg
, sizeof(sas_sf_m_pg
));
1287 memset(p
+ 2, 0, sizeof(sas_sf_m_pg
) - 2);
1288 return sizeof(sas_sf_m_pg
);
1292 static int resp_sas_pcd_m_spg(unsigned char * p
, int pcontrol
, int target
,
1294 { /* SAS phy control and discover mode page for mode_sense */
1295 unsigned char sas_pcd_m_pg
[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1296 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1297 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1298 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1299 0x2, 0, 0, 0, 0, 0, 0, 0,
1300 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1301 0, 0, 0, 0, 0, 0, 0, 0,
1302 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1303 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1304 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1305 0x3, 0, 0, 0, 0, 0, 0, 0,
1306 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1307 0, 0, 0, 0, 0, 0, 0, 0,
1311 port_a
= target_dev_id
+ 1;
1312 port_b
= port_a
+ 1;
1313 memcpy(p
, sas_pcd_m_pg
, sizeof(sas_pcd_m_pg
));
1314 p
[20] = (port_a
>> 24);
1315 p
[21] = (port_a
>> 16) & 0xff;
1316 p
[22] = (port_a
>> 8) & 0xff;
1317 p
[23] = port_a
& 0xff;
1318 p
[48 + 20] = (port_b
>> 24);
1319 p
[48 + 21] = (port_b
>> 16) & 0xff;
1320 p
[48 + 22] = (port_b
>> 8) & 0xff;
1321 p
[48 + 23] = port_b
& 0xff;
1323 memset(p
+ 4, 0, sizeof(sas_pcd_m_pg
) - 4);
1324 return sizeof(sas_pcd_m_pg
);
1327 static int resp_sas_sha_m_spg(unsigned char * p
, int pcontrol
)
1328 { /* SAS SSP shared protocol specific port mode subpage */
1329 unsigned char sas_sha_m_pg
[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1330 0, 0, 0, 0, 0, 0, 0, 0,
1333 memcpy(p
, sas_sha_m_pg
, sizeof(sas_sha_m_pg
));
1335 memset(p
+ 4, 0, sizeof(sas_sha_m_pg
) - 4);
1336 return sizeof(sas_sha_m_pg
);
1339 #define SDEBUG_MAX_MSENSE_SZ 256
1341 static int resp_mode_sense(struct scsi_cmnd
* scp
, int target
,
1342 struct sdebug_dev_info
* devip
)
1344 unsigned char dbd
, llbaa
;
1345 int pcontrol
, pcode
, subpcode
, bd_len
;
1346 unsigned char dev_spec
;
1347 int k
, alloc_len
, msense_6
, offset
, len
, errsts
, target_dev_id
;
1349 unsigned char arr
[SDEBUG_MAX_MSENSE_SZ
];
1350 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1352 if ((errsts
= check_readiness(scp
, 1, devip
)))
1354 dbd
= !!(cmd
[1] & 0x8);
1355 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1356 pcode
= cmd
[2] & 0x3f;
1358 msense_6
= (MODE_SENSE
== cmd
[0]);
1359 llbaa
= msense_6
? 0 : !!(cmd
[1] & 0x10);
1360 if ((0 == scsi_debug_ptype
) && (0 == dbd
))
1361 bd_len
= llbaa
? 16 : 8;
1364 alloc_len
= msense_6
? cmd
[4] : ((cmd
[7] << 8) | cmd
[8]);
1365 memset(arr
, 0, SDEBUG_MAX_MSENSE_SZ
);
1366 if (0x3 == pcontrol
) { /* Saving values not supported */
1367 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, SAVING_PARAMS_UNSUP
,
1369 return check_condition_result
;
1371 target_dev_id
= ((devip
->sdbg_host
->shost
->host_no
+ 1) * 2000) +
1372 (devip
->target
* 1000) - 3;
1373 /* set DPOFUA bit for disks */
1374 if (0 == scsi_debug_ptype
)
1375 dev_spec
= (DEV_READONLY(target
) ? 0x80 : 0x0) | 0x10;
1385 arr
[4] = 0x1; /* set LONGLBA bit */
1386 arr
[7] = bd_len
; /* assume 255 or less */
1390 if ((bd_len
> 0) && (!sdebug_capacity
))
1391 sdebug_capacity
= get_sdebug_capacity();
1394 if (sdebug_capacity
> 0xfffffffe) {
1400 ap
[0] = (sdebug_capacity
>> 24) & 0xff;
1401 ap
[1] = (sdebug_capacity
>> 16) & 0xff;
1402 ap
[2] = (sdebug_capacity
>> 8) & 0xff;
1403 ap
[3] = sdebug_capacity
& 0xff;
1405 ap
[6] = (scsi_debug_sector_size
>> 8) & 0xff;
1406 ap
[7] = scsi_debug_sector_size
& 0xff;
1409 } else if (16 == bd_len
) {
1410 unsigned long long capac
= sdebug_capacity
;
1412 for (k
= 0; k
< 8; ++k
, capac
>>= 8)
1413 ap
[7 - k
] = capac
& 0xff;
1414 ap
[12] = (scsi_debug_sector_size
>> 24) & 0xff;
1415 ap
[13] = (scsi_debug_sector_size
>> 16) & 0xff;
1416 ap
[14] = (scsi_debug_sector_size
>> 8) & 0xff;
1417 ap
[15] = scsi_debug_sector_size
& 0xff;
1422 if ((subpcode
> 0x0) && (subpcode
< 0xff) && (0x19 != pcode
)) {
1423 /* TODO: Control Extension page */
1424 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1426 return check_condition_result
;
1429 case 0x1: /* Read-Write error recovery page, direct access */
1430 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
1433 case 0x2: /* Disconnect-Reconnect page, all devices */
1434 len
= resp_disconnect_pg(ap
, pcontrol
, target
);
1437 case 0x3: /* Format device page, direct access */
1438 len
= resp_format_pg(ap
, pcontrol
, target
);
1441 case 0x8: /* Caching page, direct access */
1442 len
= resp_caching_pg(ap
, pcontrol
, target
);
1445 case 0xa: /* Control Mode page, all devices */
1446 len
= resp_ctrl_m_pg(ap
, pcontrol
, target
);
1449 case 0x19: /* if spc==1 then sas phy, control+discover */
1450 if ((subpcode
> 0x2) && (subpcode
< 0xff)) {
1451 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1452 INVALID_FIELD_IN_CDB
, 0);
1453 return check_condition_result
;
1456 if ((0x0 == subpcode
) || (0xff == subpcode
))
1457 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
1458 if ((0x1 == subpcode
) || (0xff == subpcode
))
1459 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
, target
,
1461 if ((0x2 == subpcode
) || (0xff == subpcode
))
1462 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
1465 case 0x1c: /* Informational Exceptions Mode page, all devices */
1466 len
= resp_iec_m_pg(ap
, pcontrol
, target
);
1469 case 0x3f: /* Read all Mode pages */
1470 if ((0 == subpcode
) || (0xff == subpcode
)) {
1471 len
= resp_err_recov_pg(ap
, pcontrol
, target
);
1472 len
+= resp_disconnect_pg(ap
+ len
, pcontrol
, target
);
1473 len
+= resp_format_pg(ap
+ len
, pcontrol
, target
);
1474 len
+= resp_caching_pg(ap
+ len
, pcontrol
, target
);
1475 len
+= resp_ctrl_m_pg(ap
+ len
, pcontrol
, target
);
1476 len
+= resp_sas_sf_m_pg(ap
+ len
, pcontrol
, target
);
1477 if (0xff == subpcode
) {
1478 len
+= resp_sas_pcd_m_spg(ap
+ len
, pcontrol
,
1479 target
, target_dev_id
);
1480 len
+= resp_sas_sha_m_spg(ap
+ len
, pcontrol
);
1482 len
+= resp_iec_m_pg(ap
+ len
, pcontrol
, target
);
1484 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1485 INVALID_FIELD_IN_CDB
, 0);
1486 return check_condition_result
;
1491 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
1493 return check_condition_result
;
1496 arr
[0] = offset
- 1;
1498 arr
[0] = ((offset
- 2) >> 8) & 0xff;
1499 arr
[1] = (offset
- 2) & 0xff;
1501 return fill_from_dev_buffer(scp
, arr
, min(alloc_len
, offset
));
1504 #define SDEBUG_MAX_MSELECT_SZ 512
1506 static int resp_mode_select(struct scsi_cmnd
* scp
, int mselect6
,
1507 struct sdebug_dev_info
* devip
)
1509 int pf
, sp
, ps
, md_len
, bd_len
, off
, spf
, pg_len
;
1510 int param_len
, res
, errsts
, mpage
;
1511 unsigned char arr
[SDEBUG_MAX_MSELECT_SZ
];
1512 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1514 if ((errsts
= check_readiness(scp
, 1, devip
)))
1516 memset(arr
, 0, sizeof(arr
));
1519 param_len
= mselect6
? cmd
[4] : ((cmd
[7] << 8) + cmd
[8]);
1520 if ((0 == pf
) || sp
|| (param_len
> SDEBUG_MAX_MSELECT_SZ
)) {
1521 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1522 INVALID_FIELD_IN_CDB
, 0);
1523 return check_condition_result
;
1525 res
= fetch_to_dev_buffer(scp
, arr
, param_len
);
1527 return (DID_ERROR
<< 16);
1528 else if ((res
< param_len
) &&
1529 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
1530 printk(KERN_INFO
"scsi_debug: mode_select: cdb indicated=%d, "
1531 " IO sent=%d bytes\n", param_len
, res
);
1532 md_len
= mselect6
? (arr
[0] + 1) : ((arr
[0] << 8) + arr
[1] + 2);
1533 bd_len
= mselect6
? arr
[3] : ((arr
[6] << 8) + arr
[7]);
1535 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1536 INVALID_FIELD_IN_PARAM_LIST
, 0);
1537 return check_condition_result
;
1539 off
= bd_len
+ (mselect6
? 4 : 8);
1540 mpage
= arr
[off
] & 0x3f;
1541 ps
= !!(arr
[off
] & 0x80);
1543 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1544 INVALID_FIELD_IN_PARAM_LIST
, 0);
1545 return check_condition_result
;
1547 spf
= !!(arr
[off
] & 0x40);
1548 pg_len
= spf
? ((arr
[off
+ 2] << 8) + arr
[off
+ 3] + 4) :
1550 if ((pg_len
+ off
) > param_len
) {
1551 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1552 PARAMETER_LIST_LENGTH_ERR
, 0);
1553 return check_condition_result
;
1556 case 0xa: /* Control Mode page */
1557 if (ctrl_m_pg
[1] == arr
[off
+ 1]) {
1558 memcpy(ctrl_m_pg
+ 2, arr
+ off
+ 2,
1559 sizeof(ctrl_m_pg
) - 2);
1560 scsi_debug_dsense
= !!(ctrl_m_pg
[2] & 0x4);
1564 case 0x1c: /* Informational Exceptions Mode page */
1565 if (iec_m_pg
[1] == arr
[off
+ 1]) {
1566 memcpy(iec_m_pg
+ 2, arr
+ off
+ 2,
1567 sizeof(iec_m_pg
) - 2);
1574 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1575 INVALID_FIELD_IN_PARAM_LIST
, 0);
1576 return check_condition_result
;
1579 static int resp_temp_l_pg(unsigned char * arr
)
1581 unsigned char temp_l_pg
[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1582 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1585 memcpy(arr
, temp_l_pg
, sizeof(temp_l_pg
));
1586 return sizeof(temp_l_pg
);
1589 static int resp_ie_l_pg(unsigned char * arr
)
1591 unsigned char ie_l_pg
[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1594 memcpy(arr
, ie_l_pg
, sizeof(ie_l_pg
));
1595 if (iec_m_pg
[2] & 0x4) { /* TEST bit set */
1596 arr
[4] = THRESHOLD_EXCEEDED
;
1599 return sizeof(ie_l_pg
);
1602 #define SDEBUG_MAX_LSENSE_SZ 512
1604 static int resp_log_sense(struct scsi_cmnd
* scp
,
1605 struct sdebug_dev_info
* devip
)
1607 int ppc
, sp
, pcontrol
, pcode
, subpcode
, alloc_len
, errsts
, len
, n
;
1608 unsigned char arr
[SDEBUG_MAX_LSENSE_SZ
];
1609 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
1611 if ((errsts
= check_readiness(scp
, 1, devip
)))
1613 memset(arr
, 0, sizeof(arr
));
1617 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1618 INVALID_FIELD_IN_CDB
, 0);
1619 return check_condition_result
;
1621 pcontrol
= (cmd
[2] & 0xc0) >> 6;
1622 pcode
= cmd
[2] & 0x3f;
1623 subpcode
= cmd
[3] & 0xff;
1624 alloc_len
= (cmd
[7] << 8) + cmd
[8];
1626 if (0 == subpcode
) {
1628 case 0x0: /* Supported log pages log page */
1630 arr
[n
++] = 0x0; /* this page */
1631 arr
[n
++] = 0xd; /* Temperature */
1632 arr
[n
++] = 0x2f; /* Informational exceptions */
1635 case 0xd: /* Temperature log page */
1636 arr
[3] = resp_temp_l_pg(arr
+ 4);
1638 case 0x2f: /* Informational exceptions log page */
1639 arr
[3] = resp_ie_l_pg(arr
+ 4);
1642 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1643 INVALID_FIELD_IN_CDB
, 0);
1644 return check_condition_result
;
1646 } else if (0xff == subpcode
) {
1650 case 0x0: /* Supported log pages and subpages log page */
1653 arr
[n
++] = 0x0; /* 0,0 page */
1655 arr
[n
++] = 0xff; /* this page */
1657 arr
[n
++] = 0x0; /* Temperature */
1659 arr
[n
++] = 0x0; /* Informational exceptions */
1662 case 0xd: /* Temperature subpages */
1665 arr
[n
++] = 0x0; /* Temperature */
1668 case 0x2f: /* Informational exceptions subpages */
1671 arr
[n
++] = 0x0; /* Informational exceptions */
1675 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1676 INVALID_FIELD_IN_CDB
, 0);
1677 return check_condition_result
;
1680 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
1681 INVALID_FIELD_IN_CDB
, 0);
1682 return check_condition_result
;
1684 len
= min(((arr
[2] << 8) + arr
[3]) + 4, alloc_len
);
1685 return fill_from_dev_buffer(scp
, arr
,
1686 min(len
, SDEBUG_MAX_INQ_ARR_SZ
));
1689 static int check_device_access_params(struct sdebug_dev_info
*devi
,
1690 unsigned long long lba
, unsigned int num
)
1692 if (lba
+ num
> sdebug_capacity
) {
1693 mk_sense_buffer(devi
, ILLEGAL_REQUEST
, ADDR_OUT_OF_RANGE
, 0);
1694 return check_condition_result
;
1696 /* transfer length excessive (tie in to block limits VPD page) */
1697 if (num
> sdebug_store_sectors
) {
1698 mk_sense_buffer(devi
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
, 0);
1699 return check_condition_result
;
1704 /* Returns number of bytes copied or -1 if error. */
1705 static int do_device_access(struct scsi_cmnd
*scmd
,
1706 struct sdebug_dev_info
*devi
,
1707 unsigned long long lba
, unsigned int num
, int write
)
1710 unsigned long long block
, rest
= 0;
1711 struct scsi_data_buffer
*sdb
;
1712 enum dma_data_direction dir
;
1713 size_t (*func
)(struct scatterlist
*, unsigned int, void *, size_t,
1717 sdb
= scsi_out(scmd
);
1718 dir
= DMA_TO_DEVICE
;
1719 func
= sg_pcopy_to_buffer
;
1721 sdb
= scsi_in(scmd
);
1722 dir
= DMA_FROM_DEVICE
;
1723 func
= sg_pcopy_from_buffer
;
1728 if (!(scsi_bidi_cmnd(scmd
) || scmd
->sc_data_direction
== dir
))
1731 block
= do_div(lba
, sdebug_store_sectors
);
1732 if (block
+ num
> sdebug_store_sectors
)
1733 rest
= block
+ num
- sdebug_store_sectors
;
1735 ret
= func(sdb
->table
.sgl
, sdb
->table
.nents
,
1736 fake_storep
+ (block
* scsi_debug_sector_size
),
1737 (num
- rest
) * scsi_debug_sector_size
, 0);
1738 if (ret
!= (num
- rest
) * scsi_debug_sector_size
)
1742 ret
+= func(sdb
->table
.sgl
, sdb
->table
.nents
,
1743 fake_storep
, rest
* scsi_debug_sector_size
,
1744 (num
- rest
) * scsi_debug_sector_size
);
1750 static __be16
dif_compute_csum(const void *buf
, int len
)
1754 if (scsi_debug_guard
)
1755 csum
= (__force __be16
)ip_compute_csum(buf
, len
);
1757 csum
= cpu_to_be16(crc_t10dif(buf
, len
));
1762 static int dif_verify(struct sd_dif_tuple
*sdt
, const void *data
,
1763 sector_t sector
, u32 ei_lba
)
1765 __be16 csum
= dif_compute_csum(data
, scsi_debug_sector_size
);
1767 if (sdt
->guard_tag
!= csum
) {
1768 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1770 (unsigned long)sector
,
1771 be16_to_cpu(sdt
->guard_tag
),
1775 if (scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
&&
1776 be32_to_cpu(sdt
->ref_tag
) != (sector
& 0xffffffff)) {
1777 pr_err("%s: REF check failed on sector %lu\n",
1778 __func__
, (unsigned long)sector
);
1781 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
1782 be32_to_cpu(sdt
->ref_tag
) != ei_lba
) {
1783 pr_err("%s: REF check failed on sector %lu\n",
1784 __func__
, (unsigned long)sector
);
1790 static void dif_copy_prot(struct scsi_cmnd
*SCpnt
, sector_t sector
,
1791 unsigned int sectors
, bool read
)
1795 const void *dif_store_end
= dif_storep
+ sdebug_store_sectors
;
1796 struct sg_mapping_iter miter
;
1798 /* Bytes of protection data to copy into sgl */
1799 resid
= sectors
* sizeof(*dif_storep
);
1801 sg_miter_start(&miter
, scsi_prot_sglist(SCpnt
),
1802 scsi_prot_sg_count(SCpnt
), SG_MITER_ATOMIC
|
1803 (read
? SG_MITER_TO_SG
: SG_MITER_FROM_SG
));
1805 while (sg_miter_next(&miter
) && resid
> 0) {
1806 size_t len
= min(miter
.length
, resid
);
1807 void *start
= dif_store(sector
);
1810 if (dif_store_end
< start
+ len
)
1811 rest
= start
+ len
- dif_store_end
;
1816 memcpy(paddr
, start
, len
- rest
);
1818 memcpy(start
, paddr
, len
- rest
);
1822 memcpy(paddr
+ len
- rest
, dif_storep
, rest
);
1824 memcpy(dif_storep
, paddr
+ len
- rest
, rest
);
1827 sector
+= len
/ sizeof(*dif_storep
);
1830 sg_miter_stop(&miter
);
1833 static int prot_verify_read(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
1834 unsigned int sectors
, u32 ei_lba
)
1837 struct sd_dif_tuple
*sdt
;
1840 for (i
= 0; i
< sectors
; i
++, ei_lba
++) {
1843 sector
= start_sec
+ i
;
1844 sdt
= dif_store(sector
);
1846 if (sdt
->app_tag
== cpu_to_be16(0xffff))
1849 ret
= dif_verify(sdt
, fake_store(sector
), sector
, ei_lba
);
1856 dif_copy_prot(SCpnt
, start_sec
, sectors
, true);
1862 static int resp_read(struct scsi_cmnd
*SCpnt
, unsigned long long lba
,
1863 unsigned int num
, struct sdebug_dev_info
*devip
,
1866 unsigned long iflags
;
1869 ret
= check_device_access_params(devip
, lba
, num
);
1873 if ((SCSI_DEBUG_OPT_MEDIUM_ERR
& scsi_debug_opts
) &&
1874 (lba
<= (OPT_MEDIUM_ERR_ADDR
+ OPT_MEDIUM_ERR_NUM
- 1)) &&
1875 ((lba
+ num
) > OPT_MEDIUM_ERR_ADDR
)) {
1876 /* claim unrecoverable read error */
1877 mk_sense_buffer(devip
, MEDIUM_ERROR
, UNRECOVERED_READ_ERR
, 0);
1878 /* set info field and valid bit for fixed descriptor */
1879 if (0x70 == (devip
->sense_buff
[0] & 0x7f)) {
1880 devip
->sense_buff
[0] |= 0x80; /* Valid bit */
1881 ret
= (lba
< OPT_MEDIUM_ERR_ADDR
)
1882 ? OPT_MEDIUM_ERR_ADDR
: (int)lba
;
1883 devip
->sense_buff
[3] = (ret
>> 24) & 0xff;
1884 devip
->sense_buff
[4] = (ret
>> 16) & 0xff;
1885 devip
->sense_buff
[5] = (ret
>> 8) & 0xff;
1886 devip
->sense_buff
[6] = ret
& 0xff;
1888 scsi_set_resid(SCpnt
, scsi_bufflen(SCpnt
));
1889 return check_condition_result
;
1892 read_lock_irqsave(&atomic_rw
, iflags
);
1895 if (scsi_debug_dix
&& scsi_prot_sg_count(SCpnt
)) {
1896 int prot_ret
= prot_verify_read(SCpnt
, lba
, num
, ei_lba
);
1899 read_unlock_irqrestore(&atomic_rw
, iflags
);
1900 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, prot_ret
);
1901 return illegal_condition_result
;
1905 ret
= do_device_access(SCpnt
, devip
, lba
, num
, 0);
1906 read_unlock_irqrestore(&atomic_rw
, iflags
);
1908 return DID_ERROR
<< 16;
1910 scsi_in(SCpnt
)->resid
= scsi_bufflen(SCpnt
) - ret
;
1915 void dump_sector(unsigned char *buf
, int len
)
1919 printk(KERN_ERR
">>> Sector Dump <<<\n");
1921 for (i
= 0 ; i
< len
; i
+= 16) {
1922 printk(KERN_ERR
"%04d: ", i
);
1924 for (j
= 0 ; j
< 16 ; j
++) {
1925 unsigned char c
= buf
[i
+j
];
1926 if (c
>= 0x20 && c
< 0x7e)
1927 printk(" %c ", buf
[i
+j
]);
1929 printk("%02x ", buf
[i
+j
]);
1936 static int prot_verify_write(struct scsi_cmnd
*SCpnt
, sector_t start_sec
,
1937 unsigned int sectors
, u32 ei_lba
)
1940 struct sd_dif_tuple
*sdt
;
1942 sector_t sector
= start_sec
;
1945 struct sg_mapping_iter diter
;
1946 struct sg_mapping_iter piter
;
1948 BUG_ON(scsi_sg_count(SCpnt
) == 0);
1949 BUG_ON(scsi_prot_sg_count(SCpnt
) == 0);
1951 sg_miter_start(&piter
, scsi_prot_sglist(SCpnt
),
1952 scsi_prot_sg_count(SCpnt
),
1953 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
1954 sg_miter_start(&diter
, scsi_sglist(SCpnt
), scsi_sg_count(SCpnt
),
1955 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
1957 /* For each protection page */
1958 while (sg_miter_next(&piter
)) {
1960 if (WARN_ON(!sg_miter_next(&diter
))) {
1965 for (ppage_offset
= 0; ppage_offset
< piter
.length
;
1966 ppage_offset
+= sizeof(struct sd_dif_tuple
)) {
1967 /* If we're at the end of the current
1968 * data page advance to the next one
1970 if (dpage_offset
>= diter
.length
) {
1971 if (WARN_ON(!sg_miter_next(&diter
))) {
1978 sdt
= piter
.addr
+ ppage_offset
;
1979 daddr
= diter
.addr
+ dpage_offset
;
1981 ret
= dif_verify(sdt
, daddr
, sector
, ei_lba
);
1983 dump_sector(daddr
, scsi_debug_sector_size
);
1989 dpage_offset
+= scsi_debug_sector_size
;
1991 diter
.consumed
= dpage_offset
;
1992 sg_miter_stop(&diter
);
1994 sg_miter_stop(&piter
);
1996 dif_copy_prot(SCpnt
, start_sec
, sectors
, false);
2003 sg_miter_stop(&diter
);
2004 sg_miter_stop(&piter
);
2008 static unsigned long lba_to_map_index(sector_t lba
)
2010 if (scsi_debug_unmap_alignment
) {
2011 lba
+= scsi_debug_unmap_granularity
-
2012 scsi_debug_unmap_alignment
;
2014 do_div(lba
, scsi_debug_unmap_granularity
);
2019 static sector_t
map_index_to_lba(unsigned long index
)
2021 sector_t lba
= index
* scsi_debug_unmap_granularity
;
2023 if (scsi_debug_unmap_alignment
) {
2024 lba
-= scsi_debug_unmap_granularity
-
2025 scsi_debug_unmap_alignment
;
2031 static unsigned int map_state(sector_t lba
, unsigned int *num
)
2034 unsigned int mapped
;
2035 unsigned long index
;
2038 index
= lba_to_map_index(lba
);
2039 mapped
= test_bit(index
, map_storep
);
2042 next
= find_next_zero_bit(map_storep
, map_size
, index
);
2044 next
= find_next_bit(map_storep
, map_size
, index
);
2046 end
= min_t(sector_t
, sdebug_store_sectors
, map_index_to_lba(next
));
2052 static void map_region(sector_t lba
, unsigned int len
)
2054 sector_t end
= lba
+ len
;
2057 unsigned long index
= lba_to_map_index(lba
);
2059 if (index
< map_size
)
2060 set_bit(index
, map_storep
);
2062 lba
= map_index_to_lba(index
+ 1);
2066 static void unmap_region(sector_t lba
, unsigned int len
)
2068 sector_t end
= lba
+ len
;
2071 unsigned long index
= lba_to_map_index(lba
);
2073 if (lba
== map_index_to_lba(index
) &&
2074 lba
+ scsi_debug_unmap_granularity
<= end
&&
2076 clear_bit(index
, map_storep
);
2077 if (scsi_debug_lbprz
) {
2078 memset(fake_storep
+
2079 lba
* scsi_debug_sector_size
, 0,
2080 scsi_debug_sector_size
*
2081 scsi_debug_unmap_granularity
);
2084 memset(dif_storep
+ lba
, 0xff,
2085 sizeof(*dif_storep
) *
2086 scsi_debug_unmap_granularity
);
2089 lba
= map_index_to_lba(index
+ 1);
2093 static int resp_write(struct scsi_cmnd
*SCpnt
, unsigned long long lba
,
2094 unsigned int num
, struct sdebug_dev_info
*devip
,
2097 unsigned long iflags
;
2100 ret
= check_device_access_params(devip
, lba
, num
);
2104 write_lock_irqsave(&atomic_rw
, iflags
);
2107 if (scsi_debug_dix
&& scsi_prot_sg_count(SCpnt
)) {
2108 int prot_ret
= prot_verify_write(SCpnt
, lba
, num
, ei_lba
);
2111 write_unlock_irqrestore(&atomic_rw
, iflags
);
2112 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, prot_ret
);
2113 return illegal_condition_result
;
2117 ret
= do_device_access(SCpnt
, devip
, lba
, num
, 1);
2118 if (scsi_debug_lbp())
2119 map_region(lba
, num
);
2120 write_unlock_irqrestore(&atomic_rw
, iflags
);
2122 return (DID_ERROR
<< 16);
2123 else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2124 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2125 printk(KERN_INFO
"scsi_debug: write: cdb indicated=%u, "
2126 " IO sent=%d bytes\n", num
* scsi_debug_sector_size
, ret
);
2131 static int resp_write_same(struct scsi_cmnd
*scmd
, unsigned long long lba
,
2132 unsigned int num
, struct sdebug_dev_info
*devip
,
2133 u32 ei_lba
, unsigned int unmap
)
2135 unsigned long iflags
;
2136 unsigned long long i
;
2139 ret
= check_device_access_params(devip
, lba
, num
);
2143 if (num
> scsi_debug_write_same_length
) {
2144 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
2146 return check_condition_result
;
2149 write_lock_irqsave(&atomic_rw
, iflags
);
2151 if (unmap
&& scsi_debug_lbp()) {
2152 unmap_region(lba
, num
);
2156 /* Else fetch one logical block */
2157 ret
= fetch_to_dev_buffer(scmd
,
2158 fake_storep
+ (lba
* scsi_debug_sector_size
),
2159 scsi_debug_sector_size
);
2162 write_unlock_irqrestore(&atomic_rw
, iflags
);
2163 return (DID_ERROR
<< 16);
2164 } else if ((ret
< (num
* scsi_debug_sector_size
)) &&
2165 (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
))
2166 printk(KERN_INFO
"scsi_debug: write same: cdb indicated=%u, "
2167 " IO sent=%d bytes\n", num
* scsi_debug_sector_size
, ret
);
2169 /* Copy first sector to remaining blocks */
2170 for (i
= 1 ; i
< num
; i
++)
2171 memcpy(fake_storep
+ ((lba
+ i
) * scsi_debug_sector_size
),
2172 fake_storep
+ (lba
* scsi_debug_sector_size
),
2173 scsi_debug_sector_size
);
2175 if (scsi_debug_lbp())
2176 map_region(lba
, num
);
2178 write_unlock_irqrestore(&atomic_rw
, iflags
);
2183 struct unmap_block_desc
{
2189 static int resp_unmap(struct scsi_cmnd
* scmd
, struct sdebug_dev_info
* devip
)
2192 struct unmap_block_desc
*desc
;
2193 unsigned int i
, payload_len
, descriptors
;
2195 unsigned long iflags
;
2197 ret
= check_readiness(scmd
, 1, devip
);
2201 payload_len
= get_unaligned_be16(&scmd
->cmnd
[7]);
2202 BUG_ON(scsi_bufflen(scmd
) != payload_len
);
2204 descriptors
= (payload_len
- 8) / 16;
2206 buf
= kmalloc(scsi_bufflen(scmd
), GFP_ATOMIC
);
2208 return check_condition_result
;
2210 scsi_sg_copy_to_buffer(scmd
, buf
, scsi_bufflen(scmd
));
2212 BUG_ON(get_unaligned_be16(&buf
[0]) != payload_len
- 2);
2213 BUG_ON(get_unaligned_be16(&buf
[2]) != descriptors
* 16);
2215 desc
= (void *)&buf
[8];
2217 write_lock_irqsave(&atomic_rw
, iflags
);
2219 for (i
= 0 ; i
< descriptors
; i
++) {
2220 unsigned long long lba
= get_unaligned_be64(&desc
[i
].lba
);
2221 unsigned int num
= get_unaligned_be32(&desc
[i
].blocks
);
2223 ret
= check_device_access_params(devip
, lba
, num
);
2227 unmap_region(lba
, num
);
2233 write_unlock_irqrestore(&atomic_rw
, iflags
);
2239 #define SDEBUG_GET_LBA_STATUS_LEN 32
2241 static int resp_get_lba_status(struct scsi_cmnd
* scmd
,
2242 struct sdebug_dev_info
* devip
)
2244 unsigned long long lba
;
2245 unsigned int alloc_len
, mapped
, num
;
2246 unsigned char arr
[SDEBUG_GET_LBA_STATUS_LEN
];
2249 ret
= check_readiness(scmd
, 1, devip
);
2253 lba
= get_unaligned_be64(&scmd
->cmnd
[2]);
2254 alloc_len
= get_unaligned_be32(&scmd
->cmnd
[10]);
2259 ret
= check_device_access_params(devip
, lba
, 1);
2263 mapped
= map_state(lba
, &num
);
2265 memset(arr
, 0, SDEBUG_GET_LBA_STATUS_LEN
);
2266 put_unaligned_be32(20, &arr
[0]); /* Parameter Data Length */
2267 put_unaligned_be64(lba
, &arr
[8]); /* LBA */
2268 put_unaligned_be32(num
, &arr
[16]); /* Number of blocks */
2269 arr
[20] = !mapped
; /* mapped = 0, unmapped = 1 */
2271 return fill_from_dev_buffer(scmd
, arr
, SDEBUG_GET_LBA_STATUS_LEN
);
2274 #define SDEBUG_RLUN_ARR_SZ 256
2276 static int resp_report_luns(struct scsi_cmnd
* scp
,
2277 struct sdebug_dev_info
* devip
)
2279 unsigned int alloc_len
;
2280 int lun_cnt
, i
, upper
, num
, n
, wlun
, lun
;
2281 unsigned char *cmd
= (unsigned char *)scp
->cmnd
;
2282 int select_report
= (int)cmd
[2];
2283 struct scsi_lun
*one_lun
;
2284 unsigned char arr
[SDEBUG_RLUN_ARR_SZ
];
2285 unsigned char * max_addr
;
2287 alloc_len
= cmd
[9] + (cmd
[8] << 8) + (cmd
[7] << 16) + (cmd
[6] << 24);
2288 if ((alloc_len
< 4) || (select_report
> 2)) {
2289 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_FIELD_IN_CDB
,
2291 return check_condition_result
;
2293 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2294 memset(arr
, 0, SDEBUG_RLUN_ARR_SZ
);
2295 lun_cnt
= scsi_debug_max_luns
;
2296 if (1 == select_report
)
2298 else if (scsi_debug_no_lun_0
&& (lun_cnt
> 0))
2300 wlun
= (select_report
> 0) ? 1 : 0;
2301 num
= lun_cnt
+ wlun
;
2302 arr
[2] = ((sizeof(struct scsi_lun
) * num
) >> 8) & 0xff;
2303 arr
[3] = (sizeof(struct scsi_lun
) * num
) & 0xff;
2304 n
= min((int)((SDEBUG_RLUN_ARR_SZ
- 8) /
2305 sizeof(struct scsi_lun
)), num
);
2310 one_lun
= (struct scsi_lun
*) &arr
[8];
2311 max_addr
= arr
+ SDEBUG_RLUN_ARR_SZ
;
2312 for (i
= 0, lun
= (scsi_debug_no_lun_0
? 1 : 0);
2313 ((i
< lun_cnt
) && ((unsigned char *)(one_lun
+ i
) < max_addr
));
2315 upper
= (lun
>> 8) & 0x3f;
2317 one_lun
[i
].scsi_lun
[0] =
2318 (upper
| (SAM2_LUN_ADDRESS_METHOD
<< 6));
2319 one_lun
[i
].scsi_lun
[1] = lun
& 0xff;
2322 one_lun
[i
].scsi_lun
[0] = (SAM2_WLUN_REPORT_LUNS
>> 8) & 0xff;
2323 one_lun
[i
].scsi_lun
[1] = SAM2_WLUN_REPORT_LUNS
& 0xff;
2326 alloc_len
= (unsigned char *)(one_lun
+ i
) - arr
;
2327 return fill_from_dev_buffer(scp
, arr
,
2328 min((int)alloc_len
, SDEBUG_RLUN_ARR_SZ
));
2331 static int resp_xdwriteread(struct scsi_cmnd
*scp
, unsigned long long lba
,
2332 unsigned int num
, struct sdebug_dev_info
*devip
)
2335 unsigned char *kaddr
, *buf
;
2336 unsigned int offset
;
2337 struct scsi_data_buffer
*sdb
= scsi_in(scp
);
2338 struct sg_mapping_iter miter
;
2340 /* better not to use temporary buffer. */
2341 buf
= kmalloc(scsi_bufflen(scp
), GFP_ATOMIC
);
2343 mk_sense_buffer(devip
, NOT_READY
,
2344 LOGICAL_UNIT_COMMUNICATION_FAILURE
, 0);
2345 return check_condition_result
;
2348 scsi_sg_copy_to_buffer(scp
, buf
, scsi_bufflen(scp
));
2351 sg_miter_start(&miter
, sdb
->table
.sgl
, sdb
->table
.nents
,
2352 SG_MITER_ATOMIC
| SG_MITER_TO_SG
);
2354 while (sg_miter_next(&miter
)) {
2356 for (j
= 0; j
< miter
.length
; j
++)
2357 *(kaddr
+ j
) ^= *(buf
+ offset
+ j
);
2359 offset
+= miter
.length
;
2361 sg_miter_stop(&miter
);
2367 /* When timer goes off this function is called. */
2368 static void timer_intr_handler(unsigned long indx
)
2370 struct sdebug_queued_cmd
* sqcp
;
2371 unsigned long iflags
;
2373 if (indx
>= scsi_debug_max_queue
) {
2374 printk(KERN_ERR
"scsi_debug:timer_intr_handler: indx too "
2378 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2379 sqcp
= &queued_arr
[(int)indx
];
2380 if (! sqcp
->in_use
) {
2381 printk(KERN_ERR
"scsi_debug:timer_intr_handler: Unexpected "
2383 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2387 if (sqcp
->done_funct
) {
2388 sqcp
->a_cmnd
->result
= sqcp
->scsi_result
;
2389 sqcp
->done_funct(sqcp
->a_cmnd
); /* callback to mid level */
2391 sqcp
->done_funct
= NULL
;
2392 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2396 static struct sdebug_dev_info
*
2397 sdebug_device_create(struct sdebug_host_info
*sdbg_host
, gfp_t flags
)
2399 struct sdebug_dev_info
*devip
;
2401 devip
= kzalloc(sizeof(*devip
), flags
);
2403 devip
->sdbg_host
= sdbg_host
;
2404 list_add_tail(&devip
->dev_list
, &sdbg_host
->dev_info_list
);
2409 static struct sdebug_dev_info
* devInfoReg(struct scsi_device
* sdev
)
2411 struct sdebug_host_info
* sdbg_host
;
2412 struct sdebug_dev_info
* open_devip
= NULL
;
2413 struct sdebug_dev_info
* devip
=
2414 (struct sdebug_dev_info
*)sdev
->hostdata
;
2418 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(sdev
->host
);
2420 printk(KERN_ERR
"Host info NULL\n");
2423 list_for_each_entry(devip
, &sdbg_host
->dev_info_list
, dev_list
) {
2424 if ((devip
->used
) && (devip
->channel
== sdev
->channel
) &&
2425 (devip
->target
== sdev
->id
) &&
2426 (devip
->lun
== sdev
->lun
))
2429 if ((!devip
->used
) && (!open_devip
))
2433 if (!open_devip
) { /* try and make a new one */
2434 open_devip
= sdebug_device_create(sdbg_host
, GFP_ATOMIC
);
2436 printk(KERN_ERR
"%s: out of memory at line %d\n",
2437 __func__
, __LINE__
);
2442 open_devip
->channel
= sdev
->channel
;
2443 open_devip
->target
= sdev
->id
;
2444 open_devip
->lun
= sdev
->lun
;
2445 open_devip
->sdbg_host
= sdbg_host
;
2446 open_devip
->reset
= 1;
2447 open_devip
->used
= 1;
2448 memset(open_devip
->sense_buff
, 0, SDEBUG_SENSE_LEN
);
2449 if (scsi_debug_dsense
)
2450 open_devip
->sense_buff
[0] = 0x72;
2452 open_devip
->sense_buff
[0] = 0x70;
2453 open_devip
->sense_buff
[7] = 0xa;
2455 if (sdev
->lun
== SAM2_WLUN_REPORT_LUNS
)
2456 open_devip
->wlun
= SAM2_WLUN_REPORT_LUNS
& 0xff;
2461 static int scsi_debug_slave_alloc(struct scsi_device
*sdp
)
2463 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2464 printk(KERN_INFO
"scsi_debug: slave_alloc <%u %u %u %u>\n",
2465 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2466 queue_flag_set_unlocked(QUEUE_FLAG_BIDI
, sdp
->request_queue
);
2470 static int scsi_debug_slave_configure(struct scsi_device
*sdp
)
2472 struct sdebug_dev_info
*devip
;
2474 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2475 printk(KERN_INFO
"scsi_debug: slave_configure <%u %u %u %u>\n",
2476 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2477 if (sdp
->host
->max_cmd_len
!= SCSI_DEBUG_MAX_CMD_LEN
)
2478 sdp
->host
->max_cmd_len
= SCSI_DEBUG_MAX_CMD_LEN
;
2479 devip
= devInfoReg(sdp
);
2481 return 1; /* no resources, will be marked offline */
2482 sdp
->hostdata
= devip
;
2483 if (sdp
->host
->cmd_per_lun
)
2484 scsi_adjust_queue_depth(sdp
, SDEBUG_TAGGED_QUEUING
,
2485 sdp
->host
->cmd_per_lun
);
2486 blk_queue_max_segment_size(sdp
->request_queue
, 256 * 1024);
2487 if (scsi_debug_no_uld
)
2488 sdp
->no_uld_attach
= 1;
2492 static void scsi_debug_slave_destroy(struct scsi_device
*sdp
)
2494 struct sdebug_dev_info
*devip
=
2495 (struct sdebug_dev_info
*)sdp
->hostdata
;
2497 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2498 printk(KERN_INFO
"scsi_debug: slave_destroy <%u %u %u %u>\n",
2499 sdp
->host
->host_no
, sdp
->channel
, sdp
->id
, sdp
->lun
);
2501 /* make this slot available for re-use */
2503 sdp
->hostdata
= NULL
;
2507 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2508 static int stop_queued_cmnd(struct scsi_cmnd
*cmnd
)
2510 unsigned long iflags
;
2512 struct sdebug_queued_cmd
*sqcp
;
2514 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2515 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2516 sqcp
= &queued_arr
[k
];
2517 if (sqcp
->in_use
&& (cmnd
== sqcp
->a_cmnd
)) {
2518 del_timer_sync(&sqcp
->cmnd_timer
);
2520 sqcp
->a_cmnd
= NULL
;
2524 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2525 return (k
< scsi_debug_max_queue
) ? 1 : 0;
2528 /* Deletes (stops) timers of all queued commands */
2529 static void stop_all_queued(void)
2531 unsigned long iflags
;
2533 struct sdebug_queued_cmd
*sqcp
;
2535 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2536 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2537 sqcp
= &queued_arr
[k
];
2538 if (sqcp
->in_use
&& sqcp
->a_cmnd
) {
2539 del_timer_sync(&sqcp
->cmnd_timer
);
2541 sqcp
->a_cmnd
= NULL
;
2544 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2547 static int scsi_debug_abort(struct scsi_cmnd
* SCpnt
)
2549 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2550 printk(KERN_INFO
"scsi_debug: abort\n");
2552 stop_queued_cmnd(SCpnt
);
2556 static int scsi_debug_biosparam(struct scsi_device
*sdev
,
2557 struct block_device
* bdev
, sector_t capacity
, int *info
)
2562 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2563 printk(KERN_INFO
"scsi_debug: biosparam\n");
2564 buf
= scsi_bios_ptable(bdev
);
2566 res
= scsi_partsize(buf
, capacity
,
2567 &info
[2], &info
[0], &info
[1]);
2572 info
[0] = sdebug_heads
;
2573 info
[1] = sdebug_sectors_per
;
2574 info
[2] = sdebug_cylinders_per
;
2578 static int scsi_debug_device_reset(struct scsi_cmnd
* SCpnt
)
2580 struct sdebug_dev_info
* devip
;
2582 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2583 printk(KERN_INFO
"scsi_debug: device_reset\n");
2586 devip
= devInfoReg(SCpnt
->device
);
2593 static int scsi_debug_bus_reset(struct scsi_cmnd
* SCpnt
)
2595 struct sdebug_host_info
*sdbg_host
;
2596 struct sdebug_dev_info
* dev_info
;
2597 struct scsi_device
* sdp
;
2598 struct Scsi_Host
* hp
;
2600 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2601 printk(KERN_INFO
"scsi_debug: bus_reset\n");
2603 if (SCpnt
&& ((sdp
= SCpnt
->device
)) && ((hp
= sdp
->host
))) {
2604 sdbg_host
= *(struct sdebug_host_info
**)shost_priv(hp
);
2606 list_for_each_entry(dev_info
,
2607 &sdbg_host
->dev_info_list
,
2609 dev_info
->reset
= 1;
2615 static int scsi_debug_host_reset(struct scsi_cmnd
* SCpnt
)
2617 struct sdebug_host_info
* sdbg_host
;
2618 struct sdebug_dev_info
* dev_info
;
2620 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
2621 printk(KERN_INFO
"scsi_debug: host_reset\n");
2623 spin_lock(&sdebug_host_list_lock
);
2624 list_for_each_entry(sdbg_host
, &sdebug_host_list
, host_list
) {
2625 list_for_each_entry(dev_info
, &sdbg_host
->dev_info_list
,
2627 dev_info
->reset
= 1;
2629 spin_unlock(&sdebug_host_list_lock
);
2634 /* Initializes timers in queued array */
2635 static void __init
init_all_queued(void)
2637 unsigned long iflags
;
2639 struct sdebug_queued_cmd
* sqcp
;
2641 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2642 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2643 sqcp
= &queued_arr
[k
];
2644 init_timer(&sqcp
->cmnd_timer
);
2646 sqcp
->a_cmnd
= NULL
;
2648 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2651 static void __init
sdebug_build_parts(unsigned char *ramp
,
2652 unsigned long store_size
)
2654 struct partition
* pp
;
2655 int starts
[SDEBUG_MAX_PARTS
+ 2];
2656 int sectors_per_part
, num_sectors
, k
;
2657 int heads_by_sects
, start_sec
, end_sec
;
2659 /* assume partition table already zeroed */
2660 if ((scsi_debug_num_parts
< 1) || (store_size
< 1048576))
2662 if (scsi_debug_num_parts
> SDEBUG_MAX_PARTS
) {
2663 scsi_debug_num_parts
= SDEBUG_MAX_PARTS
;
2664 printk(KERN_WARNING
"scsi_debug:build_parts: reducing "
2665 "partitions to %d\n", SDEBUG_MAX_PARTS
);
2667 num_sectors
= (int)sdebug_store_sectors
;
2668 sectors_per_part
= (num_sectors
- sdebug_sectors_per
)
2669 / scsi_debug_num_parts
;
2670 heads_by_sects
= sdebug_heads
* sdebug_sectors_per
;
2671 starts
[0] = sdebug_sectors_per
;
2672 for (k
= 1; k
< scsi_debug_num_parts
; ++k
)
2673 starts
[k
] = ((k
* sectors_per_part
) / heads_by_sects
)
2675 starts
[scsi_debug_num_parts
] = num_sectors
;
2676 starts
[scsi_debug_num_parts
+ 1] = 0;
2678 ramp
[510] = 0x55; /* magic partition markings */
2680 pp
= (struct partition
*)(ramp
+ 0x1be);
2681 for (k
= 0; starts
[k
+ 1]; ++k
, ++pp
) {
2682 start_sec
= starts
[k
];
2683 end_sec
= starts
[k
+ 1] - 1;
2686 pp
->cyl
= start_sec
/ heads_by_sects
;
2687 pp
->head
= (start_sec
- (pp
->cyl
* heads_by_sects
))
2688 / sdebug_sectors_per
;
2689 pp
->sector
= (start_sec
% sdebug_sectors_per
) + 1;
2691 pp
->end_cyl
= end_sec
/ heads_by_sects
;
2692 pp
->end_head
= (end_sec
- (pp
->end_cyl
* heads_by_sects
))
2693 / sdebug_sectors_per
;
2694 pp
->end_sector
= (end_sec
% sdebug_sectors_per
) + 1;
2696 pp
->start_sect
= cpu_to_le32(start_sec
);
2697 pp
->nr_sects
= cpu_to_le32(end_sec
- start_sec
+ 1);
2698 pp
->sys_ind
= 0x83; /* plain Linux partition */
2702 static int schedule_resp(struct scsi_cmnd
* cmnd
,
2703 struct sdebug_dev_info
* devip
,
2704 done_funct_t done
, int scsi_result
, int delta_jiff
)
2706 if ((SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) && cmnd
) {
2708 struct scsi_device
* sdp
= cmnd
->device
;
2710 printk(KERN_INFO
"scsi_debug: <%u %u %u %u> "
2711 "non-zero result=0x%x\n", sdp
->host
->host_no
,
2712 sdp
->channel
, sdp
->id
, sdp
->lun
, scsi_result
);
2715 if (cmnd
&& devip
) {
2716 /* simulate autosense by this driver */
2717 if (SAM_STAT_CHECK_CONDITION
== (scsi_result
& 0xff))
2718 memcpy(cmnd
->sense_buffer
, devip
->sense_buff
,
2719 (SCSI_SENSE_BUFFERSIZE
> SDEBUG_SENSE_LEN
) ?
2720 SDEBUG_SENSE_LEN
: SCSI_SENSE_BUFFERSIZE
);
2722 if (delta_jiff
<= 0) {
2724 cmnd
->result
= scsi_result
;
2729 unsigned long iflags
;
2731 struct sdebug_queued_cmd
* sqcp
= NULL
;
2733 spin_lock_irqsave(&queued_arr_lock
, iflags
);
2734 for (k
= 0; k
< scsi_debug_max_queue
; ++k
) {
2735 sqcp
= &queued_arr
[k
];
2739 if (k
>= scsi_debug_max_queue
) {
2740 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2741 printk(KERN_WARNING
"scsi_debug: can_queue exceeded\n");
2742 return 1; /* report busy to mid level */
2745 sqcp
->a_cmnd
= cmnd
;
2746 sqcp
->scsi_result
= scsi_result
;
2747 sqcp
->done_funct
= done
;
2748 sqcp
->cmnd_timer
.function
= timer_intr_handler
;
2749 sqcp
->cmnd_timer
.data
= k
;
2750 sqcp
->cmnd_timer
.expires
= jiffies
+ delta_jiff
;
2751 add_timer(&sqcp
->cmnd_timer
);
2752 spin_unlock_irqrestore(&queued_arr_lock
, iflags
);
2758 /* Note: The following macros create attribute files in the
2759 /sys/module/scsi_debug/parameters directory. Unfortunately this
2760 driver is unaware of a change and cannot trigger auxiliary actions
2761 as it can when the corresponding attribute in the
2762 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2764 module_param_named(add_host
, scsi_debug_add_host
, int, S_IRUGO
| S_IWUSR
);
2765 module_param_named(ato
, scsi_debug_ato
, int, S_IRUGO
);
2766 module_param_named(clustering
, scsi_debug_clustering
, bool, S_IRUGO
| S_IWUSR
);
2767 module_param_named(delay
, scsi_debug_delay
, int, S_IRUGO
| S_IWUSR
);
2768 module_param_named(dev_size_mb
, scsi_debug_dev_size_mb
, int, S_IRUGO
);
2769 module_param_named(dif
, scsi_debug_dif
, int, S_IRUGO
);
2770 module_param_named(dix
, scsi_debug_dix
, int, S_IRUGO
);
2771 module_param_named(dsense
, scsi_debug_dsense
, int, S_IRUGO
| S_IWUSR
);
2772 module_param_named(every_nth
, scsi_debug_every_nth
, int, S_IRUGO
| S_IWUSR
);
2773 module_param_named(fake_rw
, scsi_debug_fake_rw
, int, S_IRUGO
| S_IWUSR
);
2774 module_param_named(guard
, scsi_debug_guard
, uint
, S_IRUGO
);
2775 module_param_named(lbpu
, scsi_debug_lbpu
, int, S_IRUGO
);
2776 module_param_named(lbpws
, scsi_debug_lbpws
, int, S_IRUGO
);
2777 module_param_named(lbpws10
, scsi_debug_lbpws10
, int, S_IRUGO
);
2778 module_param_named(lbprz
, scsi_debug_lbprz
, int, S_IRUGO
);
2779 module_param_named(lowest_aligned
, scsi_debug_lowest_aligned
, int, S_IRUGO
);
2780 module_param_named(max_luns
, scsi_debug_max_luns
, int, S_IRUGO
| S_IWUSR
);
2781 module_param_named(max_queue
, scsi_debug_max_queue
, int, S_IRUGO
| S_IWUSR
);
2782 module_param_named(no_lun_0
, scsi_debug_no_lun_0
, int, S_IRUGO
| S_IWUSR
);
2783 module_param_named(no_uld
, scsi_debug_no_uld
, int, S_IRUGO
);
2784 module_param_named(num_parts
, scsi_debug_num_parts
, int, S_IRUGO
);
2785 module_param_named(num_tgts
, scsi_debug_num_tgts
, int, S_IRUGO
| S_IWUSR
);
2786 module_param_named(opt_blks
, scsi_debug_opt_blks
, int, S_IRUGO
);
2787 module_param_named(opts
, scsi_debug_opts
, int, S_IRUGO
| S_IWUSR
);
2788 module_param_named(physblk_exp
, scsi_debug_physblk_exp
, int, S_IRUGO
);
2789 module_param_named(ptype
, scsi_debug_ptype
, int, S_IRUGO
| S_IWUSR
);
2790 module_param_named(removable
, scsi_debug_removable
, bool, S_IRUGO
| S_IWUSR
);
2791 module_param_named(scsi_level
, scsi_debug_scsi_level
, int, S_IRUGO
);
2792 module_param_named(sector_size
, scsi_debug_sector_size
, int, S_IRUGO
);
2793 module_param_named(unmap_alignment
, scsi_debug_unmap_alignment
, int, S_IRUGO
);
2794 module_param_named(unmap_granularity
, scsi_debug_unmap_granularity
, int, S_IRUGO
);
2795 module_param_named(unmap_max_blocks
, scsi_debug_unmap_max_blocks
, int, S_IRUGO
);
2796 module_param_named(unmap_max_desc
, scsi_debug_unmap_max_desc
, int, S_IRUGO
);
2797 module_param_named(virtual_gb
, scsi_debug_virtual_gb
, int, S_IRUGO
| S_IWUSR
);
2798 module_param_named(vpd_use_hostno
, scsi_debug_vpd_use_hostno
, int,
2800 module_param_named(write_same_length
, scsi_debug_write_same_length
, int,
2803 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2804 MODULE_DESCRIPTION("SCSI debug adapter driver");
2805 MODULE_LICENSE("GPL");
2806 MODULE_VERSION(SCSI_DEBUG_VERSION
);
2808 MODULE_PARM_DESC(add_host
, "0..127 hosts allowed(def=1)");
2809 MODULE_PARM_DESC(ato
, "application tag ownership: 0=disk 1=host (def=1)");
2810 MODULE_PARM_DESC(clustering
, "when set enables larger transfers (def=0)");
2811 MODULE_PARM_DESC(delay
, "# of jiffies to delay response(def=1)");
2812 MODULE_PARM_DESC(dev_size_mb
, "size in MB of ram shared by devs(def=8)");
2813 MODULE_PARM_DESC(dif
, "data integrity field type: 0-3 (def=0)");
2814 MODULE_PARM_DESC(dix
, "data integrity extensions mask (def=0)");
2815 MODULE_PARM_DESC(dsense
, "use descriptor sense format(def=0 -> fixed)");
2816 MODULE_PARM_DESC(every_nth
, "timeout every nth command(def=0)");
2817 MODULE_PARM_DESC(fake_rw
, "fake reads/writes instead of copying (def=0)");
2818 MODULE_PARM_DESC(guard
, "protection checksum: 0=crc, 1=ip (def=0)");
2819 MODULE_PARM_DESC(lbpu
, "enable LBP, support UNMAP command (def=0)");
2820 MODULE_PARM_DESC(lbpws
, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2821 MODULE_PARM_DESC(lbpws10
, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2822 MODULE_PARM_DESC(lbprz
, "unmapped blocks return 0 on read (def=1)");
2823 MODULE_PARM_DESC(lowest_aligned
, "lowest aligned lba (def=0)");
2824 MODULE_PARM_DESC(max_luns
, "number of LUNs per target to simulate(def=1)");
2825 MODULE_PARM_DESC(max_queue
, "max number of queued commands (1 to 255(def))");
2826 MODULE_PARM_DESC(no_lun_0
, "no LU number 0 (def=0 -> have lun 0)");
2827 MODULE_PARM_DESC(no_uld
, "stop ULD (e.g. sd driver) attaching (def=0))");
2828 MODULE_PARM_DESC(num_parts
, "number of partitions(def=0)");
2829 MODULE_PARM_DESC(num_tgts
, "number of targets per host to simulate(def=1)");
2830 MODULE_PARM_DESC(opt_blks
, "optimal transfer length in block (def=64)");
2831 MODULE_PARM_DESC(opts
, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2832 MODULE_PARM_DESC(physblk_exp
, "physical block exponent (def=0)");
2833 MODULE_PARM_DESC(ptype
, "SCSI peripheral type(def=0[disk])");
2834 MODULE_PARM_DESC(removable
, "claim to have removable media (def=0)");
2835 MODULE_PARM_DESC(scsi_level
, "SCSI level to simulate(def=5[SPC-3])");
2836 MODULE_PARM_DESC(sector_size
, "logical block size in bytes (def=512)");
2837 MODULE_PARM_DESC(unmap_alignment
, "lowest aligned thin provisioning lba (def=0)");
2838 MODULE_PARM_DESC(unmap_granularity
, "thin provisioning granularity in blocks (def=1)");
2839 MODULE_PARM_DESC(unmap_max_blocks
, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2840 MODULE_PARM_DESC(unmap_max_desc
, "max # of ranges that can be unmapped in one cmd (def=256)");
2841 MODULE_PARM_DESC(virtual_gb
, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2842 MODULE_PARM_DESC(vpd_use_hostno
, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2843 MODULE_PARM_DESC(write_same_length
, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2845 static char sdebug_info
[256];
2847 static const char * scsi_debug_info(struct Scsi_Host
* shp
)
2849 sprintf(sdebug_info
, "scsi_debug, version %s [%s], "
2850 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION
,
2851 scsi_debug_version_date
, scsi_debug_dev_size_mb
,
2856 /* scsi_debug_proc_info
2857 * Used if the driver currently has no own support for /proc/scsi
2859 static int scsi_debug_write_info(struct Scsi_Host
*host
, char *buffer
, int length
)
2863 int minLen
= length
> 15 ? 15 : length
;
2865 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2867 memcpy(arr
, buffer
, minLen
);
2869 if (1 != sscanf(arr
, "%d", &opts
))
2871 scsi_debug_opts
= opts
;
2872 if (scsi_debug_every_nth
!= 0)
2873 scsi_debug_cmnd_count
= 0;
2877 static int scsi_debug_show_info(struct seq_file
*m
, struct Scsi_Host
*host
)
2879 seq_printf(m
, "scsi_debug adapter driver, version "
2881 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2882 "every_nth=%d(curr:%d)\n"
2883 "delay=%d, max_luns=%d, scsi_level=%d\n"
2884 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2885 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2886 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2887 SCSI_DEBUG_VERSION
, scsi_debug_version_date
, scsi_debug_num_tgts
,
2888 scsi_debug_dev_size_mb
, scsi_debug_opts
, scsi_debug_every_nth
,
2889 scsi_debug_cmnd_count
, scsi_debug_delay
,
2890 scsi_debug_max_luns
, scsi_debug_scsi_level
,
2891 scsi_debug_sector_size
, sdebug_cylinders_per
, sdebug_heads
,
2892 sdebug_sectors_per
, num_aborts
, num_dev_resets
, num_bus_resets
,
2893 num_host_resets
, dix_reads
, dix_writes
, dif_errors
);
2897 static ssize_t
delay_show(struct device_driver
*ddp
, char *buf
)
2899 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_delay
);
2902 static ssize_t
delay_store(struct device_driver
*ddp
, const char *buf
,
2908 if (1 == sscanf(buf
, "%10s", work
)) {
2909 if ((1 == sscanf(work
, "%d", &delay
)) && (delay
>= 0)) {
2910 scsi_debug_delay
= delay
;
2916 static DRIVER_ATTR_RW(delay
);
2918 static ssize_t
opts_show(struct device_driver
*ddp
, char *buf
)
2920 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", scsi_debug_opts
);
2923 static ssize_t
opts_store(struct device_driver
*ddp
, const char *buf
,
2929 if (1 == sscanf(buf
, "%10s", work
)) {
2930 if (0 == strnicmp(work
,"0x", 2)) {
2931 if (1 == sscanf(&work
[2], "%x", &opts
))
2934 if (1 == sscanf(work
, "%d", &opts
))
2940 scsi_debug_opts
= opts
;
2941 scsi_debug_cmnd_count
= 0;
2944 static DRIVER_ATTR_RW(opts
);
2946 static ssize_t
ptype_show(struct device_driver
*ddp
, char *buf
)
2948 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ptype
);
2950 static ssize_t
ptype_store(struct device_driver
*ddp
, const char *buf
,
2955 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2956 scsi_debug_ptype
= n
;
2961 static DRIVER_ATTR_RW(ptype
);
2963 static ssize_t
dsense_show(struct device_driver
*ddp
, char *buf
)
2965 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dsense
);
2967 static ssize_t
dsense_store(struct device_driver
*ddp
, const char *buf
,
2972 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2973 scsi_debug_dsense
= n
;
2978 static DRIVER_ATTR_RW(dsense
);
2980 static ssize_t
fake_rw_show(struct device_driver
*ddp
, char *buf
)
2982 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_fake_rw
);
2984 static ssize_t
fake_rw_store(struct device_driver
*ddp
, const char *buf
,
2989 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
2990 scsi_debug_fake_rw
= n
;
2995 static DRIVER_ATTR_RW(fake_rw
);
2997 static ssize_t
no_lun_0_show(struct device_driver
*ddp
, char *buf
)
2999 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_no_lun_0
);
3001 static ssize_t
no_lun_0_store(struct device_driver
*ddp
, const char *buf
,
3006 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3007 scsi_debug_no_lun_0
= n
;
3012 static DRIVER_ATTR_RW(no_lun_0
);
3014 static ssize_t
num_tgts_show(struct device_driver
*ddp
, char *buf
)
3016 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_tgts
);
3018 static ssize_t
num_tgts_store(struct device_driver
*ddp
, const char *buf
,
3023 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3024 scsi_debug_num_tgts
= n
;
3025 sdebug_max_tgts_luns();
3030 static DRIVER_ATTR_RW(num_tgts
);
3032 static ssize_t
dev_size_mb_show(struct device_driver
*ddp
, char *buf
)
3034 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dev_size_mb
);
3036 static DRIVER_ATTR_RO(dev_size_mb
);
3038 static ssize_t
num_parts_show(struct device_driver
*ddp
, char *buf
)
3040 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_num_parts
);
3042 static DRIVER_ATTR_RO(num_parts
);
3044 static ssize_t
every_nth_show(struct device_driver
*ddp
, char *buf
)
3046 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_every_nth
);
3048 static ssize_t
every_nth_store(struct device_driver
*ddp
, const char *buf
,
3053 if ((count
> 0) && (1 == sscanf(buf
, "%d", &nth
))) {
3054 scsi_debug_every_nth
= nth
;
3055 scsi_debug_cmnd_count
= 0;
3060 static DRIVER_ATTR_RW(every_nth
);
3062 static ssize_t
max_luns_show(struct device_driver
*ddp
, char *buf
)
3064 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_max_luns
);
3066 static ssize_t
max_luns_store(struct device_driver
*ddp
, const char *buf
,
3071 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3072 scsi_debug_max_luns
= n
;
3073 sdebug_max_tgts_luns();
3078 static DRIVER_ATTR_RW(max_luns
);
3080 static ssize_t
max_queue_show(struct device_driver
*ddp
, char *buf
)
3082 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_max_queue
);
3084 static ssize_t
max_queue_store(struct device_driver
*ddp
, const char *buf
,
3089 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
> 0) &&
3090 (n
<= SCSI_DEBUG_CANQUEUE
)) {
3091 scsi_debug_max_queue
= n
;
3096 static DRIVER_ATTR_RW(max_queue
);
3098 static ssize_t
no_uld_show(struct device_driver
*ddp
, char *buf
)
3100 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_no_uld
);
3102 static DRIVER_ATTR_RO(no_uld
);
3104 static ssize_t
scsi_level_show(struct device_driver
*ddp
, char *buf
)
3106 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_scsi_level
);
3108 static DRIVER_ATTR_RO(scsi_level
);
3110 static ssize_t
virtual_gb_show(struct device_driver
*ddp
, char *buf
)
3112 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_virtual_gb
);
3114 static ssize_t
virtual_gb_store(struct device_driver
*ddp
, const char *buf
,
3119 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3120 scsi_debug_virtual_gb
= n
;
3122 sdebug_capacity
= get_sdebug_capacity();
3128 static DRIVER_ATTR_RW(virtual_gb
);
3130 static ssize_t
add_host_show(struct device_driver
*ddp
, char *buf
)
3132 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_add_host
);
3135 static ssize_t
add_host_store(struct device_driver
*ddp
, const char *buf
,
3140 if (sscanf(buf
, "%d", &delta_hosts
) != 1)
3142 if (delta_hosts
> 0) {
3144 sdebug_add_adapter();
3145 } while (--delta_hosts
);
3146 } else if (delta_hosts
< 0) {
3148 sdebug_remove_adapter();
3149 } while (++delta_hosts
);
3153 static DRIVER_ATTR_RW(add_host
);
3155 static ssize_t
vpd_use_hostno_show(struct device_driver
*ddp
, char *buf
)
3157 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_vpd_use_hostno
);
3159 static ssize_t
vpd_use_hostno_store(struct device_driver
*ddp
, const char *buf
,
3164 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3165 scsi_debug_vpd_use_hostno
= n
;
3170 static DRIVER_ATTR_RW(vpd_use_hostno
);
3172 static ssize_t
sector_size_show(struct device_driver
*ddp
, char *buf
)
3174 return scnprintf(buf
, PAGE_SIZE
, "%u\n", scsi_debug_sector_size
);
3176 static DRIVER_ATTR_RO(sector_size
);
3178 static ssize_t
dix_show(struct device_driver
*ddp
, char *buf
)
3180 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dix
);
3182 static DRIVER_ATTR_RO(dix
);
3184 static ssize_t
dif_show(struct device_driver
*ddp
, char *buf
)
3186 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_dif
);
3188 static DRIVER_ATTR_RO(dif
);
3190 static ssize_t
guard_show(struct device_driver
*ddp
, char *buf
)
3192 return scnprintf(buf
, PAGE_SIZE
, "%u\n", scsi_debug_guard
);
3194 static DRIVER_ATTR_RO(guard
);
3196 static ssize_t
ato_show(struct device_driver
*ddp
, char *buf
)
3198 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_ato
);
3200 static DRIVER_ATTR_RO(ato
);
3202 static ssize_t
map_show(struct device_driver
*ddp
, char *buf
)
3206 if (!scsi_debug_lbp())
3207 return scnprintf(buf
, PAGE_SIZE
, "0-%u\n",
3208 sdebug_store_sectors
);
3210 count
= bitmap_scnlistprintf(buf
, PAGE_SIZE
, map_storep
, map_size
);
3212 buf
[count
++] = '\n';
3217 static DRIVER_ATTR_RO(map
);
3219 static ssize_t
removable_show(struct device_driver
*ddp
, char *buf
)
3221 return scnprintf(buf
, PAGE_SIZE
, "%d\n", scsi_debug_removable
? 1 : 0);
3223 static ssize_t
removable_store(struct device_driver
*ddp
, const char *buf
,
3228 if ((count
> 0) && (1 == sscanf(buf
, "%d", &n
)) && (n
>= 0)) {
3229 scsi_debug_removable
= (n
> 0);
3234 static DRIVER_ATTR_RW(removable
);
3236 /* Note: The following array creates attribute files in the
3237 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3238 files (over those found in the /sys/module/scsi_debug/parameters
3239 directory) is that auxiliary actions can be triggered when an attribute
3240 is changed. For example see: sdebug_add_host_store() above.
3243 static struct attribute
*sdebug_drv_attrs
[] = {
3244 &driver_attr_delay
.attr
,
3245 &driver_attr_opts
.attr
,
3246 &driver_attr_ptype
.attr
,
3247 &driver_attr_dsense
.attr
,
3248 &driver_attr_fake_rw
.attr
,
3249 &driver_attr_no_lun_0
.attr
,
3250 &driver_attr_num_tgts
.attr
,
3251 &driver_attr_dev_size_mb
.attr
,
3252 &driver_attr_num_parts
.attr
,
3253 &driver_attr_every_nth
.attr
,
3254 &driver_attr_max_luns
.attr
,
3255 &driver_attr_max_queue
.attr
,
3256 &driver_attr_no_uld
.attr
,
3257 &driver_attr_scsi_level
.attr
,
3258 &driver_attr_virtual_gb
.attr
,
3259 &driver_attr_add_host
.attr
,
3260 &driver_attr_vpd_use_hostno
.attr
,
3261 &driver_attr_sector_size
.attr
,
3262 &driver_attr_dix
.attr
,
3263 &driver_attr_dif
.attr
,
3264 &driver_attr_guard
.attr
,
3265 &driver_attr_ato
.attr
,
3266 &driver_attr_map
.attr
,
3267 &driver_attr_removable
.attr
,
3270 ATTRIBUTE_GROUPS(sdebug_drv
);
3272 static struct device
*pseudo_primary
;
3274 static int __init
scsi_debug_init(void)
3281 switch (scsi_debug_sector_size
) {
3288 printk(KERN_ERR
"scsi_debug_init: invalid sector_size %d\n",
3289 scsi_debug_sector_size
);
3293 switch (scsi_debug_dif
) {
3295 case SD_DIF_TYPE0_PROTECTION
:
3296 case SD_DIF_TYPE1_PROTECTION
:
3297 case SD_DIF_TYPE2_PROTECTION
:
3298 case SD_DIF_TYPE3_PROTECTION
:
3302 printk(KERN_ERR
"scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3306 if (scsi_debug_guard
> 1) {
3307 printk(KERN_ERR
"scsi_debug_init: guard must be 0 or 1\n");
3311 if (scsi_debug_ato
> 1) {
3312 printk(KERN_ERR
"scsi_debug_init: ato must be 0 or 1\n");
3316 if (scsi_debug_physblk_exp
> 15) {
3317 printk(KERN_ERR
"scsi_debug_init: invalid physblk_exp %u\n",
3318 scsi_debug_physblk_exp
);
3322 if (scsi_debug_lowest_aligned
> 0x3fff) {
3323 printk(KERN_ERR
"scsi_debug_init: lowest_aligned too big: %u\n",
3324 scsi_debug_lowest_aligned
);
3328 if (scsi_debug_dev_size_mb
< 1)
3329 scsi_debug_dev_size_mb
= 1; /* force minimum 1 MB ramdisk */
3330 sz
= (unsigned long)scsi_debug_dev_size_mb
* 1048576;
3331 sdebug_store_sectors
= sz
/ scsi_debug_sector_size
;
3332 sdebug_capacity
= get_sdebug_capacity();
3334 /* play around with geometry, don't waste too much on track 0 */
3336 sdebug_sectors_per
= 32;
3337 if (scsi_debug_dev_size_mb
>= 16)
3339 else if (scsi_debug_dev_size_mb
>= 256)
3341 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
3342 (sdebug_sectors_per
* sdebug_heads
);
3343 if (sdebug_cylinders_per
>= 1024) {
3344 /* other LLDs do this; implies >= 1GB ram disk ... */
3346 sdebug_sectors_per
= 63;
3347 sdebug_cylinders_per
= (unsigned long)sdebug_capacity
/
3348 (sdebug_sectors_per
* sdebug_heads
);
3351 fake_storep
= vmalloc(sz
);
3352 if (NULL
== fake_storep
) {
3353 printk(KERN_ERR
"scsi_debug_init: out of memory, 1\n");
3356 memset(fake_storep
, 0, sz
);
3357 if (scsi_debug_num_parts
> 0)
3358 sdebug_build_parts(fake_storep
, sz
);
3360 if (scsi_debug_dix
) {
3363 dif_size
= sdebug_store_sectors
* sizeof(struct sd_dif_tuple
);
3364 dif_storep
= vmalloc(dif_size
);
3366 printk(KERN_ERR
"scsi_debug_init: dif_storep %u bytes @ %p\n",
3367 dif_size
, dif_storep
);
3369 if (dif_storep
== NULL
) {
3370 printk(KERN_ERR
"scsi_debug_init: out of mem. (DIX)\n");
3375 memset(dif_storep
, 0xff, dif_size
);
3378 /* Logical Block Provisioning */
3379 if (scsi_debug_lbp()) {
3380 scsi_debug_unmap_max_blocks
=
3381 clamp(scsi_debug_unmap_max_blocks
, 0U, 0xffffffffU
);
3383 scsi_debug_unmap_max_desc
=
3384 clamp(scsi_debug_unmap_max_desc
, 0U, 256U);
3386 scsi_debug_unmap_granularity
=
3387 clamp(scsi_debug_unmap_granularity
, 1U, 0xffffffffU
);
3389 if (scsi_debug_unmap_alignment
&&
3390 scsi_debug_unmap_granularity
<=
3391 scsi_debug_unmap_alignment
) {
3393 "%s: ERR: unmap_granularity <= unmap_alignment\n",
3398 map_size
= lba_to_map_index(sdebug_store_sectors
- 1) + 1;
3399 map_storep
= vmalloc(BITS_TO_LONGS(map_size
) * sizeof(long));
3401 printk(KERN_INFO
"scsi_debug_init: %lu provisioning blocks\n",
3404 if (map_storep
== NULL
) {
3405 printk(KERN_ERR
"scsi_debug_init: out of mem. (MAP)\n");
3410 bitmap_zero(map_storep
, map_size
);
3412 /* Map first 1KB for partition table */
3413 if (scsi_debug_num_parts
)
3417 pseudo_primary
= root_device_register("pseudo_0");
3418 if (IS_ERR(pseudo_primary
)) {
3419 printk(KERN_WARNING
"scsi_debug: root_device_register() error\n");
3420 ret
= PTR_ERR(pseudo_primary
);
3423 ret
= bus_register(&pseudo_lld_bus
);
3425 printk(KERN_WARNING
"scsi_debug: bus_register error: %d\n",
3429 ret
= driver_register(&sdebug_driverfs_driver
);
3431 printk(KERN_WARNING
"scsi_debug: driver_register error: %d\n",
3438 host_to_add
= scsi_debug_add_host
;
3439 scsi_debug_add_host
= 0;
3441 for (k
= 0; k
< host_to_add
; k
++) {
3442 if (sdebug_add_adapter()) {
3443 printk(KERN_ERR
"scsi_debug_init: "
3444 "sdebug_add_adapter failed k=%d\n", k
);
3449 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) {
3450 printk(KERN_INFO
"scsi_debug_init: built %d host(s)\n",
3451 scsi_debug_add_host
);
3456 bus_unregister(&pseudo_lld_bus
);
3458 root_device_unregister(pseudo_primary
);
3469 static void __exit
scsi_debug_exit(void)
3471 int k
= scsi_debug_add_host
;
3475 sdebug_remove_adapter();
3476 driver_unregister(&sdebug_driverfs_driver
);
3477 bus_unregister(&pseudo_lld_bus
);
3478 root_device_unregister(pseudo_primary
);
3486 device_initcall(scsi_debug_init
);
3487 module_exit(scsi_debug_exit
);
3489 static void sdebug_release_adapter(struct device
* dev
)
3491 struct sdebug_host_info
*sdbg_host
;
3493 sdbg_host
= to_sdebug_host(dev
);
3497 static int sdebug_add_adapter(void)
3499 int k
, devs_per_host
;
3501 struct sdebug_host_info
*sdbg_host
;
3502 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
3504 sdbg_host
= kzalloc(sizeof(*sdbg_host
),GFP_KERNEL
);
3505 if (NULL
== sdbg_host
) {
3506 printk(KERN_ERR
"%s: out of memory at line %d\n",
3507 __func__
, __LINE__
);
3511 INIT_LIST_HEAD(&sdbg_host
->dev_info_list
);
3513 devs_per_host
= scsi_debug_num_tgts
* scsi_debug_max_luns
;
3514 for (k
= 0; k
< devs_per_host
; k
++) {
3515 sdbg_devinfo
= sdebug_device_create(sdbg_host
, GFP_KERNEL
);
3516 if (!sdbg_devinfo
) {
3517 printk(KERN_ERR
"%s: out of memory at line %d\n",
3518 __func__
, __LINE__
);
3524 spin_lock(&sdebug_host_list_lock
);
3525 list_add_tail(&sdbg_host
->host_list
, &sdebug_host_list
);
3526 spin_unlock(&sdebug_host_list_lock
);
3528 sdbg_host
->dev
.bus
= &pseudo_lld_bus
;
3529 sdbg_host
->dev
.parent
= pseudo_primary
;
3530 sdbg_host
->dev
.release
= &sdebug_release_adapter
;
3531 dev_set_name(&sdbg_host
->dev
, "adapter%d", scsi_debug_add_host
);
3533 error
= device_register(&sdbg_host
->dev
);
3538 ++scsi_debug_add_host
;
3542 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
3544 list_del(&sdbg_devinfo
->dev_list
);
3545 kfree(sdbg_devinfo
);
3552 static void sdebug_remove_adapter(void)
3554 struct sdebug_host_info
* sdbg_host
= NULL
;
3556 spin_lock(&sdebug_host_list_lock
);
3557 if (!list_empty(&sdebug_host_list
)) {
3558 sdbg_host
= list_entry(sdebug_host_list
.prev
,
3559 struct sdebug_host_info
, host_list
);
3560 list_del(&sdbg_host
->host_list
);
3562 spin_unlock(&sdebug_host_list_lock
);
3567 device_unregister(&sdbg_host
->dev
);
3568 --scsi_debug_add_host
;
3572 int scsi_debug_queuecommand_lck(struct scsi_cmnd
*SCpnt
, done_funct_t done
)
3574 unsigned char *cmd
= (unsigned char *) SCpnt
->cmnd
;
3577 unsigned long long lba
;
3580 int target
= SCpnt
->device
->id
;
3581 struct sdebug_dev_info
*devip
= NULL
;
3582 int inj_recovered
= 0;
3583 int inj_transport
= 0;
3586 int delay_override
= 0;
3589 scsi_set_resid(SCpnt
, 0);
3590 if ((SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
) && cmd
) {
3591 printk(KERN_INFO
"scsi_debug: cmd ");
3592 for (k
= 0, len
= SCpnt
->cmd_len
; k
< len
; ++k
)
3593 printk("%02x ", (int)cmd
[k
]);
3597 if (target
== SCpnt
->device
->host
->hostt
->this_id
) {
3598 printk(KERN_INFO
"scsi_debug: initiator's id used as "
3600 return schedule_resp(SCpnt
, NULL
, done
,
3601 DID_NO_CONNECT
<< 16, 0);
3604 if ((SCpnt
->device
->lun
>= scsi_debug_max_luns
) &&
3605 (SCpnt
->device
->lun
!= SAM2_WLUN_REPORT_LUNS
))
3606 return schedule_resp(SCpnt
, NULL
, done
,
3607 DID_NO_CONNECT
<< 16, 0);
3608 devip
= devInfoReg(SCpnt
->device
);
3610 return schedule_resp(SCpnt
, NULL
, done
,
3611 DID_NO_CONNECT
<< 16, 0);
3613 if ((scsi_debug_every_nth
!= 0) &&
3614 (++scsi_debug_cmnd_count
>= abs(scsi_debug_every_nth
))) {
3615 scsi_debug_cmnd_count
= 0;
3616 if (scsi_debug_every_nth
< -1)
3617 scsi_debug_every_nth
= -1;
3618 if (SCSI_DEBUG_OPT_TIMEOUT
& scsi_debug_opts
)
3619 return 0; /* ignore command causing timeout */
3620 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT
& scsi_debug_opts
&&
3621 scsi_medium_access_command(SCpnt
))
3622 return 0; /* time out reads and writes */
3623 else if (SCSI_DEBUG_OPT_RECOVERED_ERR
& scsi_debug_opts
)
3624 inj_recovered
= 1; /* to reads and writes below */
3625 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR
& scsi_debug_opts
)
3626 inj_transport
= 1; /* to reads and writes below */
3627 else if (SCSI_DEBUG_OPT_DIF_ERR
& scsi_debug_opts
)
3628 inj_dif
= 1; /* to reads and writes below */
3629 else if (SCSI_DEBUG_OPT_DIX_ERR
& scsi_debug_opts
)
3630 inj_dix
= 1; /* to reads and writes below */
3637 case TEST_UNIT_READY
:
3639 break; /* only allowable wlun commands */
3641 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3642 printk(KERN_INFO
"scsi_debug: Opcode: 0x%x "
3643 "not supported for wlun\n", *cmd
);
3644 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3646 errsts
= check_condition_result
;
3647 return schedule_resp(SCpnt
, devip
, done
, errsts
,
3653 case INQUIRY
: /* mandatory, ignore unit attention */
3655 errsts
= resp_inquiry(SCpnt
, target
, devip
);
3657 case REQUEST_SENSE
: /* mandatory, ignore unit attention */
3659 errsts
= resp_requests(SCpnt
, devip
);
3661 case REZERO_UNIT
: /* actually this is REWIND for SSC */
3663 errsts
= resp_start_stop(SCpnt
, devip
);
3665 case ALLOW_MEDIUM_REMOVAL
:
3666 errsts
= check_readiness(SCpnt
, 1, devip
);
3669 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3670 printk(KERN_INFO
"scsi_debug: Medium removal %s\n",
3671 cmd
[4] ? "inhibited" : "enabled");
3673 case SEND_DIAGNOSTIC
: /* mandatory */
3674 errsts
= check_readiness(SCpnt
, 1, devip
);
3676 case TEST_UNIT_READY
: /* mandatory */
3678 errsts
= check_readiness(SCpnt
, 0, devip
);
3681 errsts
= check_readiness(SCpnt
, 1, devip
);
3684 errsts
= check_readiness(SCpnt
, 1, devip
);
3687 errsts
= check_readiness(SCpnt
, 1, devip
);
3690 errsts
= check_readiness(SCpnt
, 1, devip
);
3693 errsts
= resp_readcap(SCpnt
, devip
);
3695 case SERVICE_ACTION_IN
:
3696 if (cmd
[1] == SAI_READ_CAPACITY_16
)
3697 errsts
= resp_readcap16(SCpnt
, devip
);
3698 else if (cmd
[1] == SAI_GET_LBA_STATUS
) {
3700 if (scsi_debug_lbp() == 0) {
3701 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3702 INVALID_COMMAND_OPCODE
, 0);
3703 errsts
= check_condition_result
;
3705 errsts
= resp_get_lba_status(SCpnt
, devip
);
3707 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3709 errsts
= check_condition_result
;
3712 case MAINTENANCE_IN
:
3713 if (MI_REPORT_TARGET_PGS
!= cmd
[1]) {
3714 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3716 errsts
= check_condition_result
;
3719 errsts
= resp_report_tgtpgs(SCpnt
, devip
);
3724 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3725 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3727 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3728 INVALID_COMMAND_OPCODE
, 0);
3729 errsts
= check_condition_result
;
3733 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3734 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3735 (cmd
[1] & 0xe0) == 0)
3736 printk(KERN_ERR
"Unprotected RD/WR to DIF device\n");
3741 errsts
= check_readiness(SCpnt
, 0, devip
);
3744 if (scsi_debug_fake_rw
)
3746 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3747 errsts
= resp_read(SCpnt
, lba
, num
, devip
, ei_lba
);
3748 if (inj_recovered
&& (0 == errsts
)) {
3749 mk_sense_buffer(devip
, RECOVERED_ERROR
,
3750 THRESHOLD_EXCEEDED
, 0);
3751 errsts
= check_condition_result
;
3752 } else if (inj_transport
&& (0 == errsts
)) {
3753 mk_sense_buffer(devip
, ABORTED_COMMAND
,
3754 TRANSPORT_PROBLEM
, ACK_NAK_TO
);
3755 errsts
= check_condition_result
;
3756 } else if (inj_dif
&& (0 == errsts
)) {
3757 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, 1);
3758 errsts
= illegal_condition_result
;
3759 } else if (inj_dix
&& (0 == errsts
)) {
3760 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, 1);
3761 errsts
= illegal_condition_result
;
3764 case REPORT_LUNS
: /* mandatory, ignore unit attention */
3766 errsts
= resp_report_luns(SCpnt
, devip
);
3768 case VERIFY
: /* 10 byte SBC-2 command */
3769 errsts
= check_readiness(SCpnt
, 0, devip
);
3774 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3775 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
&&
3777 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3778 INVALID_COMMAND_OPCODE
, 0);
3779 errsts
= check_condition_result
;
3783 if ((scsi_debug_dif
== SD_DIF_TYPE1_PROTECTION
||
3784 scsi_debug_dif
== SD_DIF_TYPE3_PROTECTION
) &&
3785 (cmd
[1] & 0xe0) == 0)
3786 printk(KERN_ERR
"Unprotected RD/WR to DIF device\n");
3791 errsts
= check_readiness(SCpnt
, 0, devip
);
3794 if (scsi_debug_fake_rw
)
3796 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3797 errsts
= resp_write(SCpnt
, lba
, num
, devip
, ei_lba
);
3798 if (inj_recovered
&& (0 == errsts
)) {
3799 mk_sense_buffer(devip
, RECOVERED_ERROR
,
3800 THRESHOLD_EXCEEDED
, 0);
3801 errsts
= check_condition_result
;
3802 } else if (inj_dif
&& (0 == errsts
)) {
3803 mk_sense_buffer(devip
, ABORTED_COMMAND
, 0x10, 1);
3804 errsts
= illegal_condition_result
;
3805 } else if (inj_dix
&& (0 == errsts
)) {
3806 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, 0x10, 1);
3807 errsts
= illegal_condition_result
;
3813 if ((*cmd
== WRITE_SAME_16
&& scsi_debug_lbpws
== 0) ||
3814 (*cmd
== WRITE_SAME
&& scsi_debug_lbpws10
== 0)) {
3815 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3816 INVALID_FIELD_IN_CDB
, 0);
3817 errsts
= check_condition_result
;
3823 errsts
= check_readiness(SCpnt
, 0, devip
);
3826 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3827 errsts
= resp_write_same(SCpnt
, lba
, num
, devip
, ei_lba
, unmap
);
3830 errsts
= check_readiness(SCpnt
, 0, devip
);
3834 if (scsi_debug_unmap_max_desc
== 0 || scsi_debug_lbpu
== 0) {
3835 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3836 INVALID_COMMAND_OPCODE
, 0);
3837 errsts
= check_condition_result
;
3839 errsts
= resp_unmap(SCpnt
, devip
);
3843 errsts
= resp_mode_sense(SCpnt
, target
, devip
);
3846 errsts
= resp_mode_select(SCpnt
, 1, devip
);
3848 case MODE_SELECT_10
:
3849 errsts
= resp_mode_select(SCpnt
, 0, devip
);
3852 errsts
= resp_log_sense(SCpnt
, devip
);
3854 case SYNCHRONIZE_CACHE
:
3856 errsts
= check_readiness(SCpnt
, 0, devip
);
3859 errsts
= check_readiness(SCpnt
, 1, devip
);
3861 case XDWRITEREAD_10
:
3862 if (!scsi_bidi_cmnd(SCpnt
)) {
3863 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3864 INVALID_FIELD_IN_CDB
, 0);
3865 errsts
= check_condition_result
;
3869 errsts
= check_readiness(SCpnt
, 0, devip
);
3872 if (scsi_debug_fake_rw
)
3874 get_data_transfer_info(cmd
, &lba
, &num
, &ei_lba
);
3875 errsts
= resp_read(SCpnt
, lba
, num
, devip
, ei_lba
);
3878 errsts
= resp_write(SCpnt
, lba
, num
, devip
, ei_lba
);
3881 errsts
= resp_xdwriteread(SCpnt
, lba
, num
, devip
);
3883 case VARIABLE_LENGTH_CMD
:
3884 if (scsi_debug_dif
== SD_DIF_TYPE2_PROTECTION
) {
3886 if ((cmd
[10] & 0xe0) == 0)
3888 "Unprotected RD/WR to DIF device\n");
3890 if (cmd
[9] == READ_32
) {
3891 BUG_ON(SCpnt
->cmd_len
< 32);
3895 if (cmd
[9] == WRITE_32
) {
3896 BUG_ON(SCpnt
->cmd_len
< 32);
3901 mk_sense_buffer(devip
, ILLEGAL_REQUEST
,
3902 INVALID_FIELD_IN_CDB
, 0);
3903 errsts
= check_condition_result
;
3907 if (SCSI_DEBUG_OPT_NOISE
& scsi_debug_opts
)
3908 printk(KERN_INFO
"scsi_debug: Opcode: 0x%x not "
3909 "supported\n", *cmd
);
3910 errsts
= check_readiness(SCpnt
, 1, devip
);
3912 break; /* Unit attention takes precedence */
3913 mk_sense_buffer(devip
, ILLEGAL_REQUEST
, INVALID_OPCODE
, 0);
3914 errsts
= check_condition_result
;
3917 return schedule_resp(SCpnt
, devip
, done
, errsts
,
3918 (delay_override
? 0 : scsi_debug_delay
));
3921 static DEF_SCSI_QCMD(scsi_debug_queuecommand
)
3923 static struct scsi_host_template sdebug_driver_template
= {
3924 .show_info
= scsi_debug_show_info
,
3925 .write_info
= scsi_debug_write_info
,
3926 .proc_name
= sdebug_proc_name
,
3927 .name
= "SCSI DEBUG",
3928 .info
= scsi_debug_info
,
3929 .slave_alloc
= scsi_debug_slave_alloc
,
3930 .slave_configure
= scsi_debug_slave_configure
,
3931 .slave_destroy
= scsi_debug_slave_destroy
,
3932 .ioctl
= scsi_debug_ioctl
,
3933 .queuecommand
= scsi_debug_queuecommand
,
3934 .eh_abort_handler
= scsi_debug_abort
,
3935 .eh_bus_reset_handler
= scsi_debug_bus_reset
,
3936 .eh_device_reset_handler
= scsi_debug_device_reset
,
3937 .eh_host_reset_handler
= scsi_debug_host_reset
,
3938 .bios_param
= scsi_debug_biosparam
,
3939 .can_queue
= SCSI_DEBUG_CANQUEUE
,
3941 .sg_tablesize
= 256,
3943 .max_sectors
= 0xffff,
3944 .use_clustering
= DISABLE_CLUSTERING
,
3945 .module
= THIS_MODULE
,
3948 static int sdebug_driver_probe(struct device
* dev
)
3951 struct sdebug_host_info
*sdbg_host
;
3952 struct Scsi_Host
*hpnt
;
3955 sdbg_host
= to_sdebug_host(dev
);
3957 sdebug_driver_template
.can_queue
= scsi_debug_max_queue
;
3958 if (scsi_debug_clustering
)
3959 sdebug_driver_template
.use_clustering
= ENABLE_CLUSTERING
;
3960 hpnt
= scsi_host_alloc(&sdebug_driver_template
, sizeof(sdbg_host
));
3962 printk(KERN_ERR
"%s: scsi_register failed\n", __func__
);
3967 sdbg_host
->shost
= hpnt
;
3968 *((struct sdebug_host_info
**)hpnt
->hostdata
) = sdbg_host
;
3969 if ((hpnt
->this_id
>= 0) && (scsi_debug_num_tgts
> hpnt
->this_id
))
3970 hpnt
->max_id
= scsi_debug_num_tgts
+ 1;
3972 hpnt
->max_id
= scsi_debug_num_tgts
;
3973 hpnt
->max_lun
= SAM2_WLUN_REPORT_LUNS
; /* = scsi_debug_max_luns; */
3977 switch (scsi_debug_dif
) {
3979 case SD_DIF_TYPE1_PROTECTION
:
3980 host_prot
= SHOST_DIF_TYPE1_PROTECTION
;
3982 host_prot
|= SHOST_DIX_TYPE1_PROTECTION
;
3985 case SD_DIF_TYPE2_PROTECTION
:
3986 host_prot
= SHOST_DIF_TYPE2_PROTECTION
;
3988 host_prot
|= SHOST_DIX_TYPE2_PROTECTION
;
3991 case SD_DIF_TYPE3_PROTECTION
:
3992 host_prot
= SHOST_DIF_TYPE3_PROTECTION
;
3994 host_prot
|= SHOST_DIX_TYPE3_PROTECTION
;
3999 host_prot
|= SHOST_DIX_TYPE0_PROTECTION
;
4003 scsi_host_set_prot(hpnt
, host_prot
);
4005 printk(KERN_INFO
"scsi_debug: host protection%s%s%s%s%s%s%s\n",
4006 (host_prot
& SHOST_DIF_TYPE1_PROTECTION
) ? " DIF1" : "",
4007 (host_prot
& SHOST_DIF_TYPE2_PROTECTION
) ? " DIF2" : "",
4008 (host_prot
& SHOST_DIF_TYPE3_PROTECTION
) ? " DIF3" : "",
4009 (host_prot
& SHOST_DIX_TYPE0_PROTECTION
) ? " DIX0" : "",
4010 (host_prot
& SHOST_DIX_TYPE1_PROTECTION
) ? " DIX1" : "",
4011 (host_prot
& SHOST_DIX_TYPE2_PROTECTION
) ? " DIX2" : "",
4012 (host_prot
& SHOST_DIX_TYPE3_PROTECTION
) ? " DIX3" : "");
4014 if (scsi_debug_guard
== 1)
4015 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_IP
);
4017 scsi_host_set_guard(hpnt
, SHOST_DIX_GUARD_CRC
);
4019 error
= scsi_add_host(hpnt
, &sdbg_host
->dev
);
4021 printk(KERN_ERR
"%s: scsi_add_host failed\n", __func__
);
4023 scsi_host_put(hpnt
);
4025 scsi_scan_host(hpnt
);
4031 static int sdebug_driver_remove(struct device
* dev
)
4033 struct sdebug_host_info
*sdbg_host
;
4034 struct sdebug_dev_info
*sdbg_devinfo
, *tmp
;
4036 sdbg_host
= to_sdebug_host(dev
);
4039 printk(KERN_ERR
"%s: Unable to locate host info\n",
4044 scsi_remove_host(sdbg_host
->shost
);
4046 list_for_each_entry_safe(sdbg_devinfo
, tmp
, &sdbg_host
->dev_info_list
,
4048 list_del(&sdbg_devinfo
->dev_list
);
4049 kfree(sdbg_devinfo
);
4052 scsi_host_put(sdbg_host
->shost
);
4056 static int pseudo_lld_bus_match(struct device
*dev
,
4057 struct device_driver
*dev_driver
)
4062 static struct bus_type pseudo_lld_bus
= {
4064 .match
= pseudo_lld_bus_match
,
4065 .probe
= sdebug_driver_probe
,
4066 .remove
= sdebug_driver_remove
,
4067 .drv_groups
= sdebug_drv_groups
,