Platform: add Samsung Laptop platform driver
[zen-stable.git] / drivers / scsi / scsi_debug.c
blobfa5758cbdedb10a4c8bf5424c6c189afb47fd2e9
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
58 #include "sd.h"
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
92 #define DEF_ATO 1
93 #define DEF_DELAY 1
94 #define DEF_DEV_SIZE_MB 8
95 #define DEF_DIF 0
96 #define DEF_DIX 0
97 #define DEF_D_SENSE 0
98 #define DEF_EVERY_NTH 0
99 #define DEF_FAKE_RW 0
100 #define DEF_GUARD 0
101 #define DEF_LBPU 0
102 #define DEF_LBPWS 0
103 #define DEF_LBPWS10 0
104 #define DEF_LOWEST_ALIGNED 0
105 #define DEF_NO_LUN_0 0
106 #define DEF_NUM_PARTS 0
107 #define DEF_OPTS 0
108 #define DEF_OPT_BLKS 64
109 #define DEF_PHYSBLK_EXP 0
110 #define DEF_PTYPE 0
111 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
112 #define DEF_SECTOR_SIZE 512
113 #define DEF_UNMAP_ALIGNMENT 0
114 #define DEF_UNMAP_GRANULARITY 1
115 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
116 #define DEF_UNMAP_MAX_DESC 256
117 #define DEF_VIRTUAL_GB 0
118 #define DEF_VPD_USE_HOSTNO 1
119 #define DEF_WRITESAME_LENGTH 0xFFFF
121 /* bit mask values for scsi_debug_opts */
122 #define SCSI_DEBUG_OPT_NOISE 1
123 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
124 #define SCSI_DEBUG_OPT_TIMEOUT 4
125 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
126 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
127 #define SCSI_DEBUG_OPT_DIF_ERR 32
128 #define SCSI_DEBUG_OPT_DIX_ERR 64
129 /* When "every_nth" > 0 then modulo "every_nth" commands:
130 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
131 * - a RECOVERED_ERROR is simulated on successful read and write
132 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
133 * - a TRANSPORT_ERROR is simulated on successful read and write
134 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
136 * When "every_nth" < 0 then after "- every_nth" commands:
137 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
138 * - a RECOVERED_ERROR is simulated on successful read and write
139 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
140 * - a TRANSPORT_ERROR is simulated on successful read and write
141 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
142 * This will continue until some other action occurs (e.g. the user
143 * writing a new value (other than -1 or 1) to every_nth via sysfs).
146 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
147 * sector on read commands: */
148 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
149 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
151 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
152 * or "peripheral device" addressing (value 0) */
153 #define SAM2_LUN_ADDRESS_METHOD 0
154 #define SAM2_WLUN_REPORT_LUNS 0xc101
156 /* Can queue up to this number of commands. Typically commands that
157 * that have a non-zero delay are queued. */
158 #define SCSI_DEBUG_CANQUEUE 255
160 static int scsi_debug_add_host = DEF_NUM_HOST;
161 static int scsi_debug_ato = DEF_ATO;
162 static int scsi_debug_delay = DEF_DELAY;
163 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
164 static int scsi_debug_dif = DEF_DIF;
165 static int scsi_debug_dix = DEF_DIX;
166 static int scsi_debug_dsense = DEF_D_SENSE;
167 static int scsi_debug_every_nth = DEF_EVERY_NTH;
168 static int scsi_debug_fake_rw = DEF_FAKE_RW;
169 static int scsi_debug_guard = DEF_GUARD;
170 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
171 static int scsi_debug_max_luns = DEF_MAX_LUNS;
172 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
173 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
174 static int scsi_debug_no_uld = 0;
175 static int scsi_debug_num_parts = DEF_NUM_PARTS;
176 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
177 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
178 static int scsi_debug_opts = DEF_OPTS;
179 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
180 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
181 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
182 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
183 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
184 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
185 static unsigned int scsi_debug_lbpu = DEF_LBPU;
186 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
187 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
188 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
189 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
190 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
191 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
192 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
194 static int scsi_debug_cmnd_count = 0;
196 #define DEV_READONLY(TGT) (0)
197 #define DEV_REMOVEABLE(TGT) (0)
199 static unsigned int sdebug_store_sectors;
200 static sector_t sdebug_capacity; /* in sectors */
202 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
203 may still need them */
204 static int sdebug_heads; /* heads per disk */
205 static int sdebug_cylinders_per; /* cylinders per surface */
206 static int sdebug_sectors_per; /* sectors per cylinder */
208 #define SDEBUG_MAX_PARTS 4
210 #define SDEBUG_SENSE_LEN 32
212 #define SCSI_DEBUG_MAX_CMD_LEN 32
214 static unsigned int scsi_debug_lbp(void)
216 return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
219 struct sdebug_dev_info {
220 struct list_head dev_list;
221 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
222 unsigned int channel;
223 unsigned int target;
224 unsigned int lun;
225 struct sdebug_host_info *sdbg_host;
226 unsigned int wlun;
227 char reset;
228 char stopped;
229 char used;
232 struct sdebug_host_info {
233 struct list_head host_list;
234 struct Scsi_Host *shost;
235 struct device dev;
236 struct list_head dev_info_list;
239 #define to_sdebug_host(d) \
240 container_of(d, struct sdebug_host_info, dev)
242 static LIST_HEAD(sdebug_host_list);
243 static DEFINE_SPINLOCK(sdebug_host_list_lock);
245 typedef void (* done_funct_t) (struct scsi_cmnd *);
247 struct sdebug_queued_cmd {
248 int in_use;
249 struct timer_list cmnd_timer;
250 done_funct_t done_funct;
251 struct scsi_cmnd * a_cmnd;
252 int scsi_result;
254 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
256 static unsigned char * fake_storep; /* ramdisk storage */
257 static unsigned char *dif_storep; /* protection info */
258 static void *map_storep; /* provisioning map */
260 static unsigned long map_size;
261 static int num_aborts = 0;
262 static int num_dev_resets = 0;
263 static int num_bus_resets = 0;
264 static int num_host_resets = 0;
265 static int dix_writes;
266 static int dix_reads;
267 static int dif_errors;
269 static DEFINE_SPINLOCK(queued_arr_lock);
270 static DEFINE_RWLOCK(atomic_rw);
272 static char sdebug_proc_name[] = "scsi_debug";
274 static struct bus_type pseudo_lld_bus;
276 static inline sector_t dif_offset(sector_t sector)
278 return sector << 3;
281 static struct device_driver sdebug_driverfs_driver = {
282 .name = sdebug_proc_name,
283 .bus = &pseudo_lld_bus,
286 static const int check_condition_result =
287 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
289 static const int illegal_condition_result =
290 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
292 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
293 0, 0, 0x2, 0x4b};
294 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
295 0, 0, 0x0, 0x0};
297 static int sdebug_add_adapter(void);
298 static void sdebug_remove_adapter(void);
300 static void sdebug_max_tgts_luns(void)
302 struct sdebug_host_info *sdbg_host;
303 struct Scsi_Host *hpnt;
305 spin_lock(&sdebug_host_list_lock);
306 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
307 hpnt = sdbg_host->shost;
308 if ((hpnt->this_id >= 0) &&
309 (scsi_debug_num_tgts > hpnt->this_id))
310 hpnt->max_id = scsi_debug_num_tgts + 1;
311 else
312 hpnt->max_id = scsi_debug_num_tgts;
313 /* scsi_debug_max_luns; */
314 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
316 spin_unlock(&sdebug_host_list_lock);
319 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
320 int asc, int asq)
322 unsigned char *sbuff;
324 sbuff = devip->sense_buff;
325 memset(sbuff, 0, SDEBUG_SENSE_LEN);
327 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
329 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
330 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
331 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
334 static void get_data_transfer_info(unsigned char *cmd,
335 unsigned long long *lba, unsigned int *num,
336 u32 *ei_lba)
338 *ei_lba = 0;
340 switch (*cmd) {
341 case VARIABLE_LENGTH_CMD:
342 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
343 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
344 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
345 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
347 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
348 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
350 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
351 (u32)cmd[28] << 24;
352 break;
354 case WRITE_SAME_16:
355 case WRITE_16:
356 case READ_16:
357 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
358 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
359 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
360 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
362 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
363 (u32)cmd[10] << 24;
364 break;
365 case WRITE_12:
366 case READ_12:
367 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
368 (u32)cmd[2] << 24;
370 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
371 (u32)cmd[6] << 24;
372 break;
373 case WRITE_SAME:
374 case WRITE_10:
375 case READ_10:
376 case XDWRITEREAD_10:
377 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
378 (u32)cmd[2] << 24;
380 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
381 break;
382 case WRITE_6:
383 case READ_6:
384 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
385 (u32)(cmd[1] & 0x1f) << 16;
386 *num = (0 == cmd[4]) ? 256 : cmd[4];
387 break;
388 default:
389 break;
393 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
395 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
396 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
398 return -EINVAL;
399 /* return -ENOTTY; // correct return but upsets fdisk */
402 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
403 struct sdebug_dev_info * devip)
405 if (devip->reset) {
406 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
407 printk(KERN_INFO "scsi_debug: Reporting Unit "
408 "attention: power on reset\n");
409 devip->reset = 0;
410 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
411 return check_condition_result;
413 if ((0 == reset_only) && devip->stopped) {
414 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
415 printk(KERN_INFO "scsi_debug: Reporting Not "
416 "ready: initializing command required\n");
417 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
418 0x2);
419 return check_condition_result;
421 return 0;
424 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
425 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
426 int arr_len)
428 int act_len;
429 struct scsi_data_buffer *sdb = scsi_in(scp);
431 if (!sdb->length)
432 return 0;
433 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
434 return (DID_ERROR << 16);
436 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
437 arr, arr_len);
438 if (sdb->resid)
439 sdb->resid -= act_len;
440 else
441 sdb->resid = scsi_bufflen(scp) - act_len;
443 return 0;
446 /* Returns number of bytes fetched into 'arr' or -1 if error. */
447 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
448 int arr_len)
450 if (!scsi_bufflen(scp))
451 return 0;
452 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
453 return -1;
455 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
459 static const char * inq_vendor_id = "Linux ";
460 static const char * inq_product_id = "scsi_debug ";
461 static const char * inq_product_rev = "0004";
463 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
464 int target_dev_id, int dev_id_num,
465 const char * dev_id_str,
466 int dev_id_str_len)
468 int num, port_a;
469 char b[32];
471 port_a = target_dev_id + 1;
472 /* T10 vendor identifier field format (faked) */
473 arr[0] = 0x2; /* ASCII */
474 arr[1] = 0x1;
475 arr[2] = 0x0;
476 memcpy(&arr[4], inq_vendor_id, 8);
477 memcpy(&arr[12], inq_product_id, 16);
478 memcpy(&arr[28], dev_id_str, dev_id_str_len);
479 num = 8 + 16 + dev_id_str_len;
480 arr[3] = num;
481 num += 4;
482 if (dev_id_num >= 0) {
483 /* NAA-5, Logical unit identifier (binary) */
484 arr[num++] = 0x1; /* binary (not necessarily sas) */
485 arr[num++] = 0x3; /* PIV=0, lu, naa */
486 arr[num++] = 0x0;
487 arr[num++] = 0x8;
488 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
489 arr[num++] = 0x33;
490 arr[num++] = 0x33;
491 arr[num++] = 0x30;
492 arr[num++] = (dev_id_num >> 24);
493 arr[num++] = (dev_id_num >> 16) & 0xff;
494 arr[num++] = (dev_id_num >> 8) & 0xff;
495 arr[num++] = dev_id_num & 0xff;
496 /* Target relative port number */
497 arr[num++] = 0x61; /* proto=sas, binary */
498 arr[num++] = 0x94; /* PIV=1, target port, rel port */
499 arr[num++] = 0x0; /* reserved */
500 arr[num++] = 0x4; /* length */
501 arr[num++] = 0x0; /* reserved */
502 arr[num++] = 0x0; /* reserved */
503 arr[num++] = 0x0;
504 arr[num++] = 0x1; /* relative port A */
506 /* NAA-5, Target port identifier */
507 arr[num++] = 0x61; /* proto=sas, binary */
508 arr[num++] = 0x93; /* piv=1, target port, naa */
509 arr[num++] = 0x0;
510 arr[num++] = 0x8;
511 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
512 arr[num++] = 0x22;
513 arr[num++] = 0x22;
514 arr[num++] = 0x20;
515 arr[num++] = (port_a >> 24);
516 arr[num++] = (port_a >> 16) & 0xff;
517 arr[num++] = (port_a >> 8) & 0xff;
518 arr[num++] = port_a & 0xff;
519 /* NAA-5, Target port group identifier */
520 arr[num++] = 0x61; /* proto=sas, binary */
521 arr[num++] = 0x95; /* piv=1, target port group id */
522 arr[num++] = 0x0;
523 arr[num++] = 0x4;
524 arr[num++] = 0;
525 arr[num++] = 0;
526 arr[num++] = (port_group_id >> 8) & 0xff;
527 arr[num++] = port_group_id & 0xff;
528 /* NAA-5, Target device identifier */
529 arr[num++] = 0x61; /* proto=sas, binary */
530 arr[num++] = 0xa3; /* piv=1, target device, naa */
531 arr[num++] = 0x0;
532 arr[num++] = 0x8;
533 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
534 arr[num++] = 0x22;
535 arr[num++] = 0x22;
536 arr[num++] = 0x20;
537 arr[num++] = (target_dev_id >> 24);
538 arr[num++] = (target_dev_id >> 16) & 0xff;
539 arr[num++] = (target_dev_id >> 8) & 0xff;
540 arr[num++] = target_dev_id & 0xff;
541 /* SCSI name string: Target device identifier */
542 arr[num++] = 0x63; /* proto=sas, UTF-8 */
543 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
544 arr[num++] = 0x0;
545 arr[num++] = 24;
546 memcpy(arr + num, "naa.52222220", 12);
547 num += 12;
548 snprintf(b, sizeof(b), "%08X", target_dev_id);
549 memcpy(arr + num, b, 8);
550 num += 8;
551 memset(arr + num, 0, 4);
552 num += 4;
553 return num;
557 static unsigned char vpd84_data[] = {
558 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
559 0x22,0x22,0x22,0x0,0xbb,0x1,
560 0x22,0x22,0x22,0x0,0xbb,0x2,
563 static int inquiry_evpd_84(unsigned char * arr)
565 memcpy(arr, vpd84_data, sizeof(vpd84_data));
566 return sizeof(vpd84_data);
569 static int inquiry_evpd_85(unsigned char * arr)
571 int num = 0;
572 const char * na1 = "https://www.kernel.org/config";
573 const char * na2 = "http://www.kernel.org/log";
574 int plen, olen;
576 arr[num++] = 0x1; /* lu, storage config */
577 arr[num++] = 0x0; /* reserved */
578 arr[num++] = 0x0;
579 olen = strlen(na1);
580 plen = olen + 1;
581 if (plen % 4)
582 plen = ((plen / 4) + 1) * 4;
583 arr[num++] = plen; /* length, null termianted, padded */
584 memcpy(arr + num, na1, olen);
585 memset(arr + num + olen, 0, plen - olen);
586 num += plen;
588 arr[num++] = 0x4; /* lu, logging */
589 arr[num++] = 0x0; /* reserved */
590 arr[num++] = 0x0;
591 olen = strlen(na2);
592 plen = olen + 1;
593 if (plen % 4)
594 plen = ((plen / 4) + 1) * 4;
595 arr[num++] = plen; /* length, null terminated, padded */
596 memcpy(arr + num, na2, olen);
597 memset(arr + num + olen, 0, plen - olen);
598 num += plen;
600 return num;
603 /* SCSI ports VPD page */
604 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
606 int num = 0;
607 int port_a, port_b;
609 port_a = target_dev_id + 1;
610 port_b = port_a + 1;
611 arr[num++] = 0x0; /* reserved */
612 arr[num++] = 0x0; /* reserved */
613 arr[num++] = 0x0;
614 arr[num++] = 0x1; /* relative port 1 (primary) */
615 memset(arr + num, 0, 6);
616 num += 6;
617 arr[num++] = 0x0;
618 arr[num++] = 12; /* length tp descriptor */
619 /* naa-5 target port identifier (A) */
620 arr[num++] = 0x61; /* proto=sas, binary */
621 arr[num++] = 0x93; /* PIV=1, target port, NAA */
622 arr[num++] = 0x0; /* reserved */
623 arr[num++] = 0x8; /* length */
624 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
625 arr[num++] = 0x22;
626 arr[num++] = 0x22;
627 arr[num++] = 0x20;
628 arr[num++] = (port_a >> 24);
629 arr[num++] = (port_a >> 16) & 0xff;
630 arr[num++] = (port_a >> 8) & 0xff;
631 arr[num++] = port_a & 0xff;
633 arr[num++] = 0x0; /* reserved */
634 arr[num++] = 0x0; /* reserved */
635 arr[num++] = 0x0;
636 arr[num++] = 0x2; /* relative port 2 (secondary) */
637 memset(arr + num, 0, 6);
638 num += 6;
639 arr[num++] = 0x0;
640 arr[num++] = 12; /* length tp descriptor */
641 /* naa-5 target port identifier (B) */
642 arr[num++] = 0x61; /* proto=sas, binary */
643 arr[num++] = 0x93; /* PIV=1, target port, NAA */
644 arr[num++] = 0x0; /* reserved */
645 arr[num++] = 0x8; /* length */
646 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
647 arr[num++] = 0x22;
648 arr[num++] = 0x22;
649 arr[num++] = 0x20;
650 arr[num++] = (port_b >> 24);
651 arr[num++] = (port_b >> 16) & 0xff;
652 arr[num++] = (port_b >> 8) & 0xff;
653 arr[num++] = port_b & 0xff;
655 return num;
659 static unsigned char vpd89_data[] = {
660 /* from 4th byte */ 0,0,0,0,
661 'l','i','n','u','x',' ',' ',' ',
662 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
663 '1','2','3','4',
664 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
665 0xec,0,0,0,
666 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
667 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
668 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
669 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
670 0x53,0x41,
671 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
672 0x20,0x20,
673 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
674 0x10,0x80,
675 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
676 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
677 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
678 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
679 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
680 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
681 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
682 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
683 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
684 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
685 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
686 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
687 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
688 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
689 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
690 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
691 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
698 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
703 static int inquiry_evpd_89(unsigned char * arr)
705 memcpy(arr, vpd89_data, sizeof(vpd89_data));
706 return sizeof(vpd89_data);
710 /* Block limits VPD page (SBC-3) */
711 static unsigned char vpdb0_data[] = {
712 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
713 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
714 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
715 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
718 static int inquiry_evpd_b0(unsigned char * arr)
720 unsigned int gran;
722 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
724 /* Optimal transfer length granularity */
725 gran = 1 << scsi_debug_physblk_exp;
726 arr[2] = (gran >> 8) & 0xff;
727 arr[3] = gran & 0xff;
729 /* Maximum Transfer Length */
730 if (sdebug_store_sectors > 0x400) {
731 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
732 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
733 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
734 arr[7] = sdebug_store_sectors & 0xff;
737 /* Optimal Transfer Length */
738 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
740 if (scsi_debug_lbpu) {
741 /* Maximum Unmap LBA Count */
742 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
744 /* Maximum Unmap Block Descriptor Count */
745 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
748 /* Unmap Granularity Alignment */
749 if (scsi_debug_unmap_alignment) {
750 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
751 arr[28] |= 0x80; /* UGAVALID */
754 /* Optimal Unmap Granularity */
755 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
757 /* Maximum WRITE SAME Length */
758 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
760 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
762 return sizeof(vpdb0_data);
765 /* Block device characteristics VPD page (SBC-3) */
766 static int inquiry_evpd_b1(unsigned char *arr)
768 memset(arr, 0, 0x3c);
769 arr[0] = 0;
770 arr[1] = 1; /* non rotating medium (e.g. solid state) */
771 arr[2] = 0;
772 arr[3] = 5; /* less than 1.8" */
774 return 0x3c;
777 /* Thin provisioning VPD page (SBC-3) */
778 static int inquiry_evpd_b2(unsigned char *arr)
780 memset(arr, 0, 0x8);
781 arr[0] = 0; /* threshold exponent */
783 if (scsi_debug_lbpu)
784 arr[1] = 1 << 7;
786 if (scsi_debug_lbpws)
787 arr[1] |= 1 << 6;
789 if (scsi_debug_lbpws10)
790 arr[1] |= 1 << 5;
792 return 0x8;
795 #define SDEBUG_LONG_INQ_SZ 96
796 #define SDEBUG_MAX_INQ_ARR_SZ 584
798 static int resp_inquiry(struct scsi_cmnd * scp, int target,
799 struct sdebug_dev_info * devip)
801 unsigned char pq_pdt;
802 unsigned char * arr;
803 unsigned char *cmd = (unsigned char *)scp->cmnd;
804 int alloc_len, n, ret;
806 alloc_len = (cmd[3] << 8) + cmd[4];
807 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
808 if (! arr)
809 return DID_REQUEUE << 16;
810 if (devip->wlun)
811 pq_pdt = 0x1e; /* present, wlun */
812 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
813 pq_pdt = 0x7f; /* not present, no device type */
814 else
815 pq_pdt = (scsi_debug_ptype & 0x1f);
816 arr[0] = pq_pdt;
817 if (0x2 & cmd[1]) { /* CMDDT bit set */
818 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
820 kfree(arr);
821 return check_condition_result;
822 } else if (0x1 & cmd[1]) { /* EVPD bit set */
823 int lu_id_num, port_group_id, target_dev_id, len;
824 char lu_id_str[6];
825 int host_no = devip->sdbg_host->shost->host_no;
827 port_group_id = (((host_no + 1) & 0x7f) << 8) +
828 (devip->channel & 0x7f);
829 if (0 == scsi_debug_vpd_use_hostno)
830 host_no = 0;
831 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
832 (devip->target * 1000) + devip->lun);
833 target_dev_id = ((host_no + 1) * 2000) +
834 (devip->target * 1000) - 3;
835 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
836 if (0 == cmd[2]) { /* supported vital product data pages */
837 arr[1] = cmd[2]; /*sanity */
838 n = 4;
839 arr[n++] = 0x0; /* this page */
840 arr[n++] = 0x80; /* unit serial number */
841 arr[n++] = 0x83; /* device identification */
842 arr[n++] = 0x84; /* software interface ident. */
843 arr[n++] = 0x85; /* management network addresses */
844 arr[n++] = 0x86; /* extended inquiry */
845 arr[n++] = 0x87; /* mode page policy */
846 arr[n++] = 0x88; /* SCSI ports */
847 arr[n++] = 0x89; /* ATA information */
848 arr[n++] = 0xb0; /* Block limits (SBC) */
849 arr[n++] = 0xb1; /* Block characteristics (SBC) */
850 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
851 arr[n++] = 0xb2;
852 arr[3] = n - 4; /* number of supported VPD pages */
853 } else if (0x80 == cmd[2]) { /* unit serial number */
854 arr[1] = cmd[2]; /*sanity */
855 arr[3] = len;
856 memcpy(&arr[4], lu_id_str, len);
857 } else if (0x83 == cmd[2]) { /* device identification */
858 arr[1] = cmd[2]; /*sanity */
859 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
860 target_dev_id, lu_id_num,
861 lu_id_str, len);
862 } else if (0x84 == cmd[2]) { /* Software interface ident. */
863 arr[1] = cmd[2]; /*sanity */
864 arr[3] = inquiry_evpd_84(&arr[4]);
865 } else if (0x85 == cmd[2]) { /* Management network addresses */
866 arr[1] = cmd[2]; /*sanity */
867 arr[3] = inquiry_evpd_85(&arr[4]);
868 } else if (0x86 == cmd[2]) { /* extended inquiry */
869 arr[1] = cmd[2]; /*sanity */
870 arr[3] = 0x3c; /* number of following entries */
871 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
872 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
873 else if (scsi_debug_dif)
874 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
875 else
876 arr[4] = 0x0; /* no protection stuff */
877 arr[5] = 0x7; /* head of q, ordered + simple q's */
878 } else if (0x87 == cmd[2]) { /* mode page policy */
879 arr[1] = cmd[2]; /*sanity */
880 arr[3] = 0x8; /* number of following entries */
881 arr[4] = 0x2; /* disconnect-reconnect mp */
882 arr[6] = 0x80; /* mlus, shared */
883 arr[8] = 0x18; /* protocol specific lu */
884 arr[10] = 0x82; /* mlus, per initiator port */
885 } else if (0x88 == cmd[2]) { /* SCSI Ports */
886 arr[1] = cmd[2]; /*sanity */
887 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
888 } else if (0x89 == cmd[2]) { /* ATA information */
889 arr[1] = cmd[2]; /*sanity */
890 n = inquiry_evpd_89(&arr[4]);
891 arr[2] = (n >> 8);
892 arr[3] = (n & 0xff);
893 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
894 arr[1] = cmd[2]; /*sanity */
895 arr[3] = inquiry_evpd_b0(&arr[4]);
896 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
897 arr[1] = cmd[2]; /*sanity */
898 arr[3] = inquiry_evpd_b1(&arr[4]);
899 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
900 arr[1] = cmd[2]; /*sanity */
901 arr[3] = inquiry_evpd_b2(&arr[4]);
902 } else {
903 /* Illegal request, invalid field in cdb */
904 mk_sense_buffer(devip, ILLEGAL_REQUEST,
905 INVALID_FIELD_IN_CDB, 0);
906 kfree(arr);
907 return check_condition_result;
909 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
910 ret = fill_from_dev_buffer(scp, arr,
911 min(len, SDEBUG_MAX_INQ_ARR_SZ));
912 kfree(arr);
913 return ret;
915 /* drops through here for a standard inquiry */
916 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
917 arr[2] = scsi_debug_scsi_level;
918 arr[3] = 2; /* response_data_format==2 */
919 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
920 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
921 if (0 == scsi_debug_vpd_use_hostno)
922 arr[5] = 0x10; /* claim: implicit TGPS */
923 arr[6] = 0x10; /* claim: MultiP */
924 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
925 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
926 memcpy(&arr[8], inq_vendor_id, 8);
927 memcpy(&arr[16], inq_product_id, 16);
928 memcpy(&arr[32], inq_product_rev, 4);
929 /* version descriptors (2 bytes each) follow */
930 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
931 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
932 n = 62;
933 if (scsi_debug_ptype == 0) {
934 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
935 } else if (scsi_debug_ptype == 1) {
936 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
938 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
939 ret = fill_from_dev_buffer(scp, arr,
940 min(alloc_len, SDEBUG_LONG_INQ_SZ));
941 kfree(arr);
942 return ret;
945 static int resp_requests(struct scsi_cmnd * scp,
946 struct sdebug_dev_info * devip)
948 unsigned char * sbuff;
949 unsigned char *cmd = (unsigned char *)scp->cmnd;
950 unsigned char arr[SDEBUG_SENSE_LEN];
951 int want_dsense;
952 int len = 18;
954 memset(arr, 0, sizeof(arr));
955 if (devip->reset == 1)
956 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
957 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
958 sbuff = devip->sense_buff;
959 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
960 if (want_dsense) {
961 arr[0] = 0x72;
962 arr[1] = 0x0; /* NO_SENSE in sense_key */
963 arr[2] = THRESHOLD_EXCEEDED;
964 arr[3] = 0xff; /* TEST set and MRIE==6 */
965 } else {
966 arr[0] = 0x70;
967 arr[2] = 0x0; /* NO_SENSE in sense_key */
968 arr[7] = 0xa; /* 18 byte sense buffer */
969 arr[12] = THRESHOLD_EXCEEDED;
970 arr[13] = 0xff; /* TEST set and MRIE==6 */
972 } else {
973 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
974 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
975 /* DESC bit set and sense_buff in fixed format */
976 memset(arr, 0, sizeof(arr));
977 arr[0] = 0x72;
978 arr[1] = sbuff[2]; /* sense key */
979 arr[2] = sbuff[12]; /* asc */
980 arr[3] = sbuff[13]; /* ascq */
981 len = 8;
984 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
985 return fill_from_dev_buffer(scp, arr, len);
988 static int resp_start_stop(struct scsi_cmnd * scp,
989 struct sdebug_dev_info * devip)
991 unsigned char *cmd = (unsigned char *)scp->cmnd;
992 int power_cond, errsts, start;
994 if ((errsts = check_readiness(scp, 1, devip)))
995 return errsts;
996 power_cond = (cmd[4] & 0xf0) >> 4;
997 if (power_cond) {
998 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1000 return check_condition_result;
1002 start = cmd[4] & 1;
1003 if (start == devip->stopped)
1004 devip->stopped = !start;
1005 return 0;
1008 static sector_t get_sdebug_capacity(void)
1010 if (scsi_debug_virtual_gb > 0)
1011 return (sector_t)scsi_debug_virtual_gb *
1012 (1073741824 / scsi_debug_sector_size);
1013 else
1014 return sdebug_store_sectors;
1017 #define SDEBUG_READCAP_ARR_SZ 8
1018 static int resp_readcap(struct scsi_cmnd * scp,
1019 struct sdebug_dev_info * devip)
1021 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1022 unsigned int capac;
1023 int errsts;
1025 if ((errsts = check_readiness(scp, 1, devip)))
1026 return errsts;
1027 /* following just in case virtual_gb changed */
1028 sdebug_capacity = get_sdebug_capacity();
1029 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1030 if (sdebug_capacity < 0xffffffff) {
1031 capac = (unsigned int)sdebug_capacity - 1;
1032 arr[0] = (capac >> 24);
1033 arr[1] = (capac >> 16) & 0xff;
1034 arr[2] = (capac >> 8) & 0xff;
1035 arr[3] = capac & 0xff;
1036 } else {
1037 arr[0] = 0xff;
1038 arr[1] = 0xff;
1039 arr[2] = 0xff;
1040 arr[3] = 0xff;
1042 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1043 arr[7] = scsi_debug_sector_size & 0xff;
1044 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1047 #define SDEBUG_READCAP16_ARR_SZ 32
1048 static int resp_readcap16(struct scsi_cmnd * scp,
1049 struct sdebug_dev_info * devip)
1051 unsigned char *cmd = (unsigned char *)scp->cmnd;
1052 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1053 unsigned long long capac;
1054 int errsts, k, alloc_len;
1056 if ((errsts = check_readiness(scp, 1, devip)))
1057 return errsts;
1058 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1059 + cmd[13]);
1060 /* following just in case virtual_gb changed */
1061 sdebug_capacity = get_sdebug_capacity();
1062 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1063 capac = sdebug_capacity - 1;
1064 for (k = 0; k < 8; ++k, capac >>= 8)
1065 arr[7 - k] = capac & 0xff;
1066 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1067 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1068 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1069 arr[11] = scsi_debug_sector_size & 0xff;
1070 arr[13] = scsi_debug_physblk_exp & 0xf;
1071 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1073 if (scsi_debug_lbp())
1074 arr[14] |= 0x80; /* LBPME */
1076 arr[15] = scsi_debug_lowest_aligned & 0xff;
1078 if (scsi_debug_dif) {
1079 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1080 arr[12] |= 1; /* PROT_EN */
1083 return fill_from_dev_buffer(scp, arr,
1084 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1087 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1089 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1090 struct sdebug_dev_info * devip)
1092 unsigned char *cmd = (unsigned char *)scp->cmnd;
1093 unsigned char * arr;
1094 int host_no = devip->sdbg_host->shost->host_no;
1095 int n, ret, alen, rlen;
1096 int port_group_a, port_group_b, port_a, port_b;
1098 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1099 + cmd[9]);
1101 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1102 if (! arr)
1103 return DID_REQUEUE << 16;
1105 * EVPD page 0x88 states we have two ports, one
1106 * real and a fake port with no device connected.
1107 * So we create two port groups with one port each
1108 * and set the group with port B to unavailable.
1110 port_a = 0x1; /* relative port A */
1111 port_b = 0x2; /* relative port B */
1112 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1113 (devip->channel & 0x7f);
1114 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1115 (devip->channel & 0x7f) + 0x80;
1118 * The asymmetric access state is cycled according to the host_id.
1120 n = 4;
1121 if (0 == scsi_debug_vpd_use_hostno) {
1122 arr[n++] = host_no % 3; /* Asymm access state */
1123 arr[n++] = 0x0F; /* claim: all states are supported */
1124 } else {
1125 arr[n++] = 0x0; /* Active/Optimized path */
1126 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1128 arr[n++] = (port_group_a >> 8) & 0xff;
1129 arr[n++] = port_group_a & 0xff;
1130 arr[n++] = 0; /* Reserved */
1131 arr[n++] = 0; /* Status code */
1132 arr[n++] = 0; /* Vendor unique */
1133 arr[n++] = 0x1; /* One port per group */
1134 arr[n++] = 0; /* Reserved */
1135 arr[n++] = 0; /* Reserved */
1136 arr[n++] = (port_a >> 8) & 0xff;
1137 arr[n++] = port_a & 0xff;
1138 arr[n++] = 3; /* Port unavailable */
1139 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1140 arr[n++] = (port_group_b >> 8) & 0xff;
1141 arr[n++] = port_group_b & 0xff;
1142 arr[n++] = 0; /* Reserved */
1143 arr[n++] = 0; /* Status code */
1144 arr[n++] = 0; /* Vendor unique */
1145 arr[n++] = 0x1; /* One port per group */
1146 arr[n++] = 0; /* Reserved */
1147 arr[n++] = 0; /* Reserved */
1148 arr[n++] = (port_b >> 8) & 0xff;
1149 arr[n++] = port_b & 0xff;
1151 rlen = n - 4;
1152 arr[0] = (rlen >> 24) & 0xff;
1153 arr[1] = (rlen >> 16) & 0xff;
1154 arr[2] = (rlen >> 8) & 0xff;
1155 arr[3] = rlen & 0xff;
1158 * Return the smallest value of either
1159 * - The allocated length
1160 * - The constructed command length
1161 * - The maximum array size
1163 rlen = min(alen,n);
1164 ret = fill_from_dev_buffer(scp, arr,
1165 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1166 kfree(arr);
1167 return ret;
1170 /* <<Following mode page info copied from ST318451LW>> */
1172 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1173 { /* Read-Write Error Recovery page for mode_sense */
1174 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1175 5, 0, 0xff, 0xff};
1177 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1178 if (1 == pcontrol)
1179 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1180 return sizeof(err_recov_pg);
1183 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1184 { /* Disconnect-Reconnect page for mode_sense */
1185 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1186 0, 0, 0, 0, 0, 0, 0, 0};
1188 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1189 if (1 == pcontrol)
1190 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1191 return sizeof(disconnect_pg);
1194 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1195 { /* Format device page for mode_sense */
1196 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1197 0, 0, 0, 0, 0, 0, 0, 0,
1198 0, 0, 0, 0, 0x40, 0, 0, 0};
1200 memcpy(p, format_pg, sizeof(format_pg));
1201 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1202 p[11] = sdebug_sectors_per & 0xff;
1203 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1204 p[13] = scsi_debug_sector_size & 0xff;
1205 if (DEV_REMOVEABLE(target))
1206 p[20] |= 0x20; /* should agree with INQUIRY */
1207 if (1 == pcontrol)
1208 memset(p + 2, 0, sizeof(format_pg) - 2);
1209 return sizeof(format_pg);
1212 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1213 { /* Caching page for mode_sense */
1214 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1215 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1217 memcpy(p, caching_pg, sizeof(caching_pg));
1218 if (1 == pcontrol)
1219 memset(p + 2, 0, sizeof(caching_pg) - 2);
1220 return sizeof(caching_pg);
1223 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1224 { /* Control mode page for mode_sense */
1225 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1226 0, 0, 0, 0};
1227 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1228 0, 0, 0x2, 0x4b};
1230 if (scsi_debug_dsense)
1231 ctrl_m_pg[2] |= 0x4;
1232 else
1233 ctrl_m_pg[2] &= ~0x4;
1235 if (scsi_debug_ato)
1236 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1238 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1239 if (1 == pcontrol)
1240 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1241 else if (2 == pcontrol)
1242 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1243 return sizeof(ctrl_m_pg);
1247 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1248 { /* Informational Exceptions control mode page for mode_sense */
1249 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1250 0, 0, 0x0, 0x0};
1251 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1252 0, 0, 0x0, 0x0};
1254 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1255 if (1 == pcontrol)
1256 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1257 else if (2 == pcontrol)
1258 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1259 return sizeof(iec_m_pg);
1262 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1263 { /* SAS SSP mode page - short format for mode_sense */
1264 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1265 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1267 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1268 if (1 == pcontrol)
1269 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1270 return sizeof(sas_sf_m_pg);
1274 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1275 int target_dev_id)
1276 { /* SAS phy control and discover mode page for mode_sense */
1277 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1278 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1279 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1280 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1281 0x2, 0, 0, 0, 0, 0, 0, 0,
1282 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1283 0, 0, 0, 0, 0, 0, 0, 0,
1284 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1285 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1286 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1287 0x3, 0, 0, 0, 0, 0, 0, 0,
1288 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1289 0, 0, 0, 0, 0, 0, 0, 0,
1291 int port_a, port_b;
1293 port_a = target_dev_id + 1;
1294 port_b = port_a + 1;
1295 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1296 p[20] = (port_a >> 24);
1297 p[21] = (port_a >> 16) & 0xff;
1298 p[22] = (port_a >> 8) & 0xff;
1299 p[23] = port_a & 0xff;
1300 p[48 + 20] = (port_b >> 24);
1301 p[48 + 21] = (port_b >> 16) & 0xff;
1302 p[48 + 22] = (port_b >> 8) & 0xff;
1303 p[48 + 23] = port_b & 0xff;
1304 if (1 == pcontrol)
1305 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1306 return sizeof(sas_pcd_m_pg);
1309 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1310 { /* SAS SSP shared protocol specific port mode subpage */
1311 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1312 0, 0, 0, 0, 0, 0, 0, 0,
1315 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1316 if (1 == pcontrol)
1317 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1318 return sizeof(sas_sha_m_pg);
1321 #define SDEBUG_MAX_MSENSE_SZ 256
1323 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1324 struct sdebug_dev_info * devip)
1326 unsigned char dbd, llbaa;
1327 int pcontrol, pcode, subpcode, bd_len;
1328 unsigned char dev_spec;
1329 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1330 unsigned char * ap;
1331 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1332 unsigned char *cmd = (unsigned char *)scp->cmnd;
1334 if ((errsts = check_readiness(scp, 1, devip)))
1335 return errsts;
1336 dbd = !!(cmd[1] & 0x8);
1337 pcontrol = (cmd[2] & 0xc0) >> 6;
1338 pcode = cmd[2] & 0x3f;
1339 subpcode = cmd[3];
1340 msense_6 = (MODE_SENSE == cmd[0]);
1341 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1342 if ((0 == scsi_debug_ptype) && (0 == dbd))
1343 bd_len = llbaa ? 16 : 8;
1344 else
1345 bd_len = 0;
1346 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1347 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1348 if (0x3 == pcontrol) { /* Saving values not supported */
1349 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1351 return check_condition_result;
1353 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1354 (devip->target * 1000) - 3;
1355 /* set DPOFUA bit for disks */
1356 if (0 == scsi_debug_ptype)
1357 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1358 else
1359 dev_spec = 0x0;
1360 if (msense_6) {
1361 arr[2] = dev_spec;
1362 arr[3] = bd_len;
1363 offset = 4;
1364 } else {
1365 arr[3] = dev_spec;
1366 if (16 == bd_len)
1367 arr[4] = 0x1; /* set LONGLBA bit */
1368 arr[7] = bd_len; /* assume 255 or less */
1369 offset = 8;
1371 ap = arr + offset;
1372 if ((bd_len > 0) && (!sdebug_capacity))
1373 sdebug_capacity = get_sdebug_capacity();
1375 if (8 == bd_len) {
1376 if (sdebug_capacity > 0xfffffffe) {
1377 ap[0] = 0xff;
1378 ap[1] = 0xff;
1379 ap[2] = 0xff;
1380 ap[3] = 0xff;
1381 } else {
1382 ap[0] = (sdebug_capacity >> 24) & 0xff;
1383 ap[1] = (sdebug_capacity >> 16) & 0xff;
1384 ap[2] = (sdebug_capacity >> 8) & 0xff;
1385 ap[3] = sdebug_capacity & 0xff;
1387 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1388 ap[7] = scsi_debug_sector_size & 0xff;
1389 offset += bd_len;
1390 ap = arr + offset;
1391 } else if (16 == bd_len) {
1392 unsigned long long capac = sdebug_capacity;
1394 for (k = 0; k < 8; ++k, capac >>= 8)
1395 ap[7 - k] = capac & 0xff;
1396 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1397 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1398 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1399 ap[15] = scsi_debug_sector_size & 0xff;
1400 offset += bd_len;
1401 ap = arr + offset;
1404 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1405 /* TODO: Control Extension page */
1406 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1408 return check_condition_result;
1410 switch (pcode) {
1411 case 0x1: /* Read-Write error recovery page, direct access */
1412 len = resp_err_recov_pg(ap, pcontrol, target);
1413 offset += len;
1414 break;
1415 case 0x2: /* Disconnect-Reconnect page, all devices */
1416 len = resp_disconnect_pg(ap, pcontrol, target);
1417 offset += len;
1418 break;
1419 case 0x3: /* Format device page, direct access */
1420 len = resp_format_pg(ap, pcontrol, target);
1421 offset += len;
1422 break;
1423 case 0x8: /* Caching page, direct access */
1424 len = resp_caching_pg(ap, pcontrol, target);
1425 offset += len;
1426 break;
1427 case 0xa: /* Control Mode page, all devices */
1428 len = resp_ctrl_m_pg(ap, pcontrol, target);
1429 offset += len;
1430 break;
1431 case 0x19: /* if spc==1 then sas phy, control+discover */
1432 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1433 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1434 INVALID_FIELD_IN_CDB, 0);
1435 return check_condition_result;
1437 len = 0;
1438 if ((0x0 == subpcode) || (0xff == subpcode))
1439 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1440 if ((0x1 == subpcode) || (0xff == subpcode))
1441 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1442 target_dev_id);
1443 if ((0x2 == subpcode) || (0xff == subpcode))
1444 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1445 offset += len;
1446 break;
1447 case 0x1c: /* Informational Exceptions Mode page, all devices */
1448 len = resp_iec_m_pg(ap, pcontrol, target);
1449 offset += len;
1450 break;
1451 case 0x3f: /* Read all Mode pages */
1452 if ((0 == subpcode) || (0xff == subpcode)) {
1453 len = resp_err_recov_pg(ap, pcontrol, target);
1454 len += resp_disconnect_pg(ap + len, pcontrol, target);
1455 len += resp_format_pg(ap + len, pcontrol, target);
1456 len += resp_caching_pg(ap + len, pcontrol, target);
1457 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1458 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1459 if (0xff == subpcode) {
1460 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1461 target, target_dev_id);
1462 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1464 len += resp_iec_m_pg(ap + len, pcontrol, target);
1465 } else {
1466 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1467 INVALID_FIELD_IN_CDB, 0);
1468 return check_condition_result;
1470 offset += len;
1471 break;
1472 default:
1473 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1475 return check_condition_result;
1477 if (msense_6)
1478 arr[0] = offset - 1;
1479 else {
1480 arr[0] = ((offset - 2) >> 8) & 0xff;
1481 arr[1] = (offset - 2) & 0xff;
1483 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1486 #define SDEBUG_MAX_MSELECT_SZ 512
1488 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1489 struct sdebug_dev_info * devip)
1491 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1492 int param_len, res, errsts, mpage;
1493 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1494 unsigned char *cmd = (unsigned char *)scp->cmnd;
1496 if ((errsts = check_readiness(scp, 1, devip)))
1497 return errsts;
1498 memset(arr, 0, sizeof(arr));
1499 pf = cmd[1] & 0x10;
1500 sp = cmd[1] & 0x1;
1501 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1502 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1503 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1504 INVALID_FIELD_IN_CDB, 0);
1505 return check_condition_result;
1507 res = fetch_to_dev_buffer(scp, arr, param_len);
1508 if (-1 == res)
1509 return (DID_ERROR << 16);
1510 else if ((res < param_len) &&
1511 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1512 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1513 " IO sent=%d bytes\n", param_len, res);
1514 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1515 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1516 if (md_len > 2) {
1517 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1518 INVALID_FIELD_IN_PARAM_LIST, 0);
1519 return check_condition_result;
1521 off = bd_len + (mselect6 ? 4 : 8);
1522 mpage = arr[off] & 0x3f;
1523 ps = !!(arr[off] & 0x80);
1524 if (ps) {
1525 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1526 INVALID_FIELD_IN_PARAM_LIST, 0);
1527 return check_condition_result;
1529 spf = !!(arr[off] & 0x40);
1530 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1531 (arr[off + 1] + 2);
1532 if ((pg_len + off) > param_len) {
1533 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1534 PARAMETER_LIST_LENGTH_ERR, 0);
1535 return check_condition_result;
1537 switch (mpage) {
1538 case 0xa: /* Control Mode page */
1539 if (ctrl_m_pg[1] == arr[off + 1]) {
1540 memcpy(ctrl_m_pg + 2, arr + off + 2,
1541 sizeof(ctrl_m_pg) - 2);
1542 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1543 return 0;
1545 break;
1546 case 0x1c: /* Informational Exceptions Mode page */
1547 if (iec_m_pg[1] == arr[off + 1]) {
1548 memcpy(iec_m_pg + 2, arr + off + 2,
1549 sizeof(iec_m_pg) - 2);
1550 return 0;
1552 break;
1553 default:
1554 break;
1556 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1557 INVALID_FIELD_IN_PARAM_LIST, 0);
1558 return check_condition_result;
1561 static int resp_temp_l_pg(unsigned char * arr)
1563 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1564 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1567 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1568 return sizeof(temp_l_pg);
1571 static int resp_ie_l_pg(unsigned char * arr)
1573 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1576 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1577 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1578 arr[4] = THRESHOLD_EXCEEDED;
1579 arr[5] = 0xff;
1581 return sizeof(ie_l_pg);
1584 #define SDEBUG_MAX_LSENSE_SZ 512
1586 static int resp_log_sense(struct scsi_cmnd * scp,
1587 struct sdebug_dev_info * devip)
1589 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1590 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1591 unsigned char *cmd = (unsigned char *)scp->cmnd;
1593 if ((errsts = check_readiness(scp, 1, devip)))
1594 return errsts;
1595 memset(arr, 0, sizeof(arr));
1596 ppc = cmd[1] & 0x2;
1597 sp = cmd[1] & 0x1;
1598 if (ppc || sp) {
1599 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1600 INVALID_FIELD_IN_CDB, 0);
1601 return check_condition_result;
1603 pcontrol = (cmd[2] & 0xc0) >> 6;
1604 pcode = cmd[2] & 0x3f;
1605 subpcode = cmd[3] & 0xff;
1606 alloc_len = (cmd[7] << 8) + cmd[8];
1607 arr[0] = pcode;
1608 if (0 == subpcode) {
1609 switch (pcode) {
1610 case 0x0: /* Supported log pages log page */
1611 n = 4;
1612 arr[n++] = 0x0; /* this page */
1613 arr[n++] = 0xd; /* Temperature */
1614 arr[n++] = 0x2f; /* Informational exceptions */
1615 arr[3] = n - 4;
1616 break;
1617 case 0xd: /* Temperature log page */
1618 arr[3] = resp_temp_l_pg(arr + 4);
1619 break;
1620 case 0x2f: /* Informational exceptions log page */
1621 arr[3] = resp_ie_l_pg(arr + 4);
1622 break;
1623 default:
1624 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1625 INVALID_FIELD_IN_CDB, 0);
1626 return check_condition_result;
1628 } else if (0xff == subpcode) {
1629 arr[0] |= 0x40;
1630 arr[1] = subpcode;
1631 switch (pcode) {
1632 case 0x0: /* Supported log pages and subpages log page */
1633 n = 4;
1634 arr[n++] = 0x0;
1635 arr[n++] = 0x0; /* 0,0 page */
1636 arr[n++] = 0x0;
1637 arr[n++] = 0xff; /* this page */
1638 arr[n++] = 0xd;
1639 arr[n++] = 0x0; /* Temperature */
1640 arr[n++] = 0x2f;
1641 arr[n++] = 0x0; /* Informational exceptions */
1642 arr[3] = n - 4;
1643 break;
1644 case 0xd: /* Temperature subpages */
1645 n = 4;
1646 arr[n++] = 0xd;
1647 arr[n++] = 0x0; /* Temperature */
1648 arr[3] = n - 4;
1649 break;
1650 case 0x2f: /* Informational exceptions subpages */
1651 n = 4;
1652 arr[n++] = 0x2f;
1653 arr[n++] = 0x0; /* Informational exceptions */
1654 arr[3] = n - 4;
1655 break;
1656 default:
1657 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1658 INVALID_FIELD_IN_CDB, 0);
1659 return check_condition_result;
1661 } else {
1662 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1663 INVALID_FIELD_IN_CDB, 0);
1664 return check_condition_result;
1666 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1667 return fill_from_dev_buffer(scp, arr,
1668 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1671 static int check_device_access_params(struct sdebug_dev_info *devi,
1672 unsigned long long lba, unsigned int num)
1674 if (lba + num > sdebug_capacity) {
1675 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1676 return check_condition_result;
1678 /* transfer length excessive (tie in to block limits VPD page) */
1679 if (num > sdebug_store_sectors) {
1680 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1681 return check_condition_result;
1683 return 0;
1686 static int do_device_access(struct scsi_cmnd *scmd,
1687 struct sdebug_dev_info *devi,
1688 unsigned long long lba, unsigned int num, int write)
1690 int ret;
1691 unsigned long long block, rest = 0;
1692 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1694 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1696 block = do_div(lba, sdebug_store_sectors);
1697 if (block + num > sdebug_store_sectors)
1698 rest = block + num - sdebug_store_sectors;
1700 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1701 (num - rest) * scsi_debug_sector_size);
1702 if (!ret && rest)
1703 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1705 return ret;
1708 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1709 unsigned int sectors, u32 ei_lba)
1711 unsigned int i, resid;
1712 struct scatterlist *psgl;
1713 struct sd_dif_tuple *sdt;
1714 sector_t sector;
1715 sector_t tmp_sec = start_sec;
1716 void *paddr;
1718 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1720 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1722 for (i = 0 ; i < sectors ; i++) {
1723 u16 csum;
1725 if (sdt[i].app_tag == 0xffff)
1726 continue;
1728 sector = start_sec + i;
1730 switch (scsi_debug_guard) {
1731 case 1:
1732 csum = ip_compute_csum(fake_storep +
1733 sector * scsi_debug_sector_size,
1734 scsi_debug_sector_size);
1735 break;
1736 case 0:
1737 csum = crc_t10dif(fake_storep +
1738 sector * scsi_debug_sector_size,
1739 scsi_debug_sector_size);
1740 csum = cpu_to_be16(csum);
1741 break;
1742 default:
1743 BUG();
1746 if (sdt[i].guard_tag != csum) {
1747 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1748 " rcvd 0x%04x, data 0x%04x\n", __func__,
1749 (unsigned long)sector,
1750 be16_to_cpu(sdt[i].guard_tag),
1751 be16_to_cpu(csum));
1752 dif_errors++;
1753 return 0x01;
1756 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1757 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1758 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1759 __func__, (unsigned long)sector);
1760 dif_errors++;
1761 return 0x03;
1764 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1765 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1766 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1767 __func__, (unsigned long)sector);
1768 dif_errors++;
1769 return 0x03;
1772 ei_lba++;
1775 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1776 sector = start_sec;
1778 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1779 int len = min(psgl->length, resid);
1781 paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
1782 memcpy(paddr, dif_storep + dif_offset(sector), len);
1784 sector += len >> 3;
1785 if (sector >= sdebug_store_sectors) {
1786 /* Force wrap */
1787 tmp_sec = sector;
1788 sector = do_div(tmp_sec, sdebug_store_sectors);
1790 resid -= len;
1791 kunmap_atomic(paddr, KM_IRQ0);
1794 dix_reads++;
1796 return 0;
1799 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1800 unsigned int num, struct sdebug_dev_info *devip,
1801 u32 ei_lba)
1803 unsigned long iflags;
1804 int ret;
1806 ret = check_device_access_params(devip, lba, num);
1807 if (ret)
1808 return ret;
1810 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1811 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1812 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1813 /* claim unrecoverable read error */
1814 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1815 /* set info field and valid bit for fixed descriptor */
1816 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1817 devip->sense_buff[0] |= 0x80; /* Valid bit */
1818 ret = (lba < OPT_MEDIUM_ERR_ADDR)
1819 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1820 devip->sense_buff[3] = (ret >> 24) & 0xff;
1821 devip->sense_buff[4] = (ret >> 16) & 0xff;
1822 devip->sense_buff[5] = (ret >> 8) & 0xff;
1823 devip->sense_buff[6] = ret & 0xff;
1825 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1826 return check_condition_result;
1829 /* DIX + T10 DIF */
1830 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1831 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1833 if (prot_ret) {
1834 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1835 return illegal_condition_result;
1839 read_lock_irqsave(&atomic_rw, iflags);
1840 ret = do_device_access(SCpnt, devip, lba, num, 0);
1841 read_unlock_irqrestore(&atomic_rw, iflags);
1842 return ret;
1845 void dump_sector(unsigned char *buf, int len)
1847 int i, j;
1849 printk(KERN_ERR ">>> Sector Dump <<<\n");
1851 for (i = 0 ; i < len ; i += 16) {
1852 printk(KERN_ERR "%04d: ", i);
1854 for (j = 0 ; j < 16 ; j++) {
1855 unsigned char c = buf[i+j];
1856 if (c >= 0x20 && c < 0x7e)
1857 printk(" %c ", buf[i+j]);
1858 else
1859 printk("%02x ", buf[i+j]);
1862 printk("\n");
1866 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1867 unsigned int sectors, u32 ei_lba)
1869 int i, j, ret;
1870 struct sd_dif_tuple *sdt;
1871 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1872 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1873 void *daddr, *paddr;
1874 sector_t tmp_sec = start_sec;
1875 sector_t sector;
1876 int ppage_offset;
1877 unsigned short csum;
1879 sector = do_div(tmp_sec, sdebug_store_sectors);
1881 BUG_ON(scsi_sg_count(SCpnt) == 0);
1882 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1884 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
1885 ppage_offset = 0;
1887 /* For each data page */
1888 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1889 daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
1891 /* For each sector-sized chunk in data page */
1892 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1894 /* If we're at the end of the current
1895 * protection page advance to the next one
1897 if (ppage_offset >= psgl->length) {
1898 kunmap_atomic(paddr, KM_IRQ1);
1899 psgl = sg_next(psgl);
1900 BUG_ON(psgl == NULL);
1901 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
1902 + psgl->offset;
1903 ppage_offset = 0;
1906 sdt = paddr + ppage_offset;
1908 switch (scsi_debug_guard) {
1909 case 1:
1910 csum = ip_compute_csum(daddr,
1911 scsi_debug_sector_size);
1912 break;
1913 case 0:
1914 csum = cpu_to_be16(crc_t10dif(daddr,
1915 scsi_debug_sector_size));
1916 break;
1917 default:
1918 BUG();
1919 ret = 0;
1920 goto out;
1923 if (sdt->guard_tag != csum) {
1924 printk(KERN_ERR
1925 "%s: GUARD check failed on sector %lu " \
1926 "rcvd 0x%04x, calculated 0x%04x\n",
1927 __func__, (unsigned long)sector,
1928 be16_to_cpu(sdt->guard_tag),
1929 be16_to_cpu(csum));
1930 ret = 0x01;
1931 dump_sector(daddr, scsi_debug_sector_size);
1932 goto out;
1935 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1936 be32_to_cpu(sdt->ref_tag)
1937 != (start_sec & 0xffffffff)) {
1938 printk(KERN_ERR
1939 "%s: REF check failed on sector %lu\n",
1940 __func__, (unsigned long)sector);
1941 ret = 0x03;
1942 dump_sector(daddr, scsi_debug_sector_size);
1943 goto out;
1946 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1947 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1948 printk(KERN_ERR
1949 "%s: REF check failed on sector %lu\n",
1950 __func__, (unsigned long)sector);
1951 ret = 0x03;
1952 dump_sector(daddr, scsi_debug_sector_size);
1953 goto out;
1956 /* Would be great to copy this in bigger
1957 * chunks. However, for the sake of
1958 * correctness we need to verify each sector
1959 * before writing it to "stable" storage
1961 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1963 sector++;
1965 if (sector == sdebug_store_sectors)
1966 sector = 0; /* Force wrap */
1968 start_sec++;
1969 ei_lba++;
1970 daddr += scsi_debug_sector_size;
1971 ppage_offset += sizeof(struct sd_dif_tuple);
1974 kunmap_atomic(daddr, KM_IRQ0);
1977 kunmap_atomic(paddr, KM_IRQ1);
1979 dix_writes++;
1981 return 0;
1983 out:
1984 dif_errors++;
1985 kunmap_atomic(daddr, KM_IRQ0);
1986 kunmap_atomic(paddr, KM_IRQ1);
1987 return ret;
1990 static unsigned int map_state(sector_t lba, unsigned int *num)
1992 unsigned int granularity, alignment, mapped;
1993 sector_t block, next, end;
1995 granularity = scsi_debug_unmap_granularity;
1996 alignment = granularity - scsi_debug_unmap_alignment;
1997 block = lba + alignment;
1998 do_div(block, granularity);
2000 mapped = test_bit(block, map_storep);
2002 if (mapped)
2003 next = find_next_zero_bit(map_storep, map_size, block);
2004 else
2005 next = find_next_bit(map_storep, map_size, block);
2007 end = next * granularity - scsi_debug_unmap_alignment;
2008 *num = end - lba;
2010 return mapped;
2013 static void map_region(sector_t lba, unsigned int len)
2015 unsigned int granularity, alignment;
2016 sector_t end = lba + len;
2018 granularity = scsi_debug_unmap_granularity;
2019 alignment = granularity - scsi_debug_unmap_alignment;
2021 while (lba < end) {
2022 sector_t block, rem;
2024 block = lba + alignment;
2025 rem = do_div(block, granularity);
2027 if (block < map_size)
2028 set_bit(block, map_storep);
2030 lba += granularity - rem;
2034 static void unmap_region(sector_t lba, unsigned int len)
2036 unsigned int granularity, alignment;
2037 sector_t end = lba + len;
2039 granularity = scsi_debug_unmap_granularity;
2040 alignment = granularity - scsi_debug_unmap_alignment;
2042 while (lba < end) {
2043 sector_t block, rem;
2045 block = lba + alignment;
2046 rem = do_div(block, granularity);
2048 if (rem == 0 && lba + granularity <= end &&
2049 block < map_size)
2050 clear_bit(block, map_storep);
2052 lba += granularity - rem;
2056 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2057 unsigned int num, struct sdebug_dev_info *devip,
2058 u32 ei_lba)
2060 unsigned long iflags;
2061 int ret;
2063 ret = check_device_access_params(devip, lba, num);
2064 if (ret)
2065 return ret;
2067 /* DIX + T10 DIF */
2068 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2069 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2071 if (prot_ret) {
2072 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2073 return illegal_condition_result;
2077 write_lock_irqsave(&atomic_rw, iflags);
2078 ret = do_device_access(SCpnt, devip, lba, num, 1);
2079 if (scsi_debug_unmap_granularity)
2080 map_region(lba, num);
2081 write_unlock_irqrestore(&atomic_rw, iflags);
2082 if (-1 == ret)
2083 return (DID_ERROR << 16);
2084 else if ((ret < (num * scsi_debug_sector_size)) &&
2085 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2086 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2087 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2089 return 0;
2092 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2093 unsigned int num, struct sdebug_dev_info *devip,
2094 u32 ei_lba, unsigned int unmap)
2096 unsigned long iflags;
2097 unsigned long long i;
2098 int ret;
2100 ret = check_device_access_params(devip, lba, num);
2101 if (ret)
2102 return ret;
2104 if (num > scsi_debug_write_same_length) {
2105 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2107 return check_condition_result;
2110 write_lock_irqsave(&atomic_rw, iflags);
2112 if (unmap && scsi_debug_unmap_granularity) {
2113 unmap_region(lba, num);
2114 goto out;
2117 /* Else fetch one logical block */
2118 ret = fetch_to_dev_buffer(scmd,
2119 fake_storep + (lba * scsi_debug_sector_size),
2120 scsi_debug_sector_size);
2122 if (-1 == ret) {
2123 write_unlock_irqrestore(&atomic_rw, iflags);
2124 return (DID_ERROR << 16);
2125 } else if ((ret < (num * scsi_debug_sector_size)) &&
2126 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2127 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2128 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2130 /* Copy first sector to remaining blocks */
2131 for (i = 1 ; i < num ; i++)
2132 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2133 fake_storep + (lba * scsi_debug_sector_size),
2134 scsi_debug_sector_size);
2136 if (scsi_debug_unmap_granularity)
2137 map_region(lba, num);
2138 out:
2139 write_unlock_irqrestore(&atomic_rw, iflags);
2141 return 0;
2144 struct unmap_block_desc {
2145 __be64 lba;
2146 __be32 blocks;
2147 __be32 __reserved;
2150 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2152 unsigned char *buf;
2153 struct unmap_block_desc *desc;
2154 unsigned int i, payload_len, descriptors;
2155 int ret;
2157 ret = check_readiness(scmd, 1, devip);
2158 if (ret)
2159 return ret;
2161 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2162 BUG_ON(scsi_bufflen(scmd) != payload_len);
2164 descriptors = (payload_len - 8) / 16;
2166 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2167 if (!buf)
2168 return check_condition_result;
2170 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2172 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2173 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2175 desc = (void *)&buf[8];
2177 for (i = 0 ; i < descriptors ; i++) {
2178 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2179 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2181 ret = check_device_access_params(devip, lba, num);
2182 if (ret)
2183 goto out;
2185 unmap_region(lba, num);
2188 ret = 0;
2190 out:
2191 kfree(buf);
2193 return ret;
2196 #define SDEBUG_GET_LBA_STATUS_LEN 32
2198 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2199 struct sdebug_dev_info * devip)
2201 unsigned long long lba;
2202 unsigned int alloc_len, mapped, num;
2203 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2204 int ret;
2206 ret = check_readiness(scmd, 1, devip);
2207 if (ret)
2208 return ret;
2210 lba = get_unaligned_be64(&scmd->cmnd[2]);
2211 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2213 if (alloc_len < 24)
2214 return 0;
2216 ret = check_device_access_params(devip, lba, 1);
2217 if (ret)
2218 return ret;
2220 mapped = map_state(lba, &num);
2222 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2223 put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */
2224 put_unaligned_be64(lba, &arr[8]); /* LBA */
2225 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2226 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2228 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2231 #define SDEBUG_RLUN_ARR_SZ 256
2233 static int resp_report_luns(struct scsi_cmnd * scp,
2234 struct sdebug_dev_info * devip)
2236 unsigned int alloc_len;
2237 int lun_cnt, i, upper, num, n, wlun, lun;
2238 unsigned char *cmd = (unsigned char *)scp->cmnd;
2239 int select_report = (int)cmd[2];
2240 struct scsi_lun *one_lun;
2241 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2242 unsigned char * max_addr;
2244 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2245 if ((alloc_len < 4) || (select_report > 2)) {
2246 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2248 return check_condition_result;
2250 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2251 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2252 lun_cnt = scsi_debug_max_luns;
2253 if (1 == select_report)
2254 lun_cnt = 0;
2255 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2256 --lun_cnt;
2257 wlun = (select_report > 0) ? 1 : 0;
2258 num = lun_cnt + wlun;
2259 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2260 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2261 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2262 sizeof(struct scsi_lun)), num);
2263 if (n < num) {
2264 wlun = 0;
2265 lun_cnt = n;
2267 one_lun = (struct scsi_lun *) &arr[8];
2268 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2269 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2270 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2271 i++, lun++) {
2272 upper = (lun >> 8) & 0x3f;
2273 if (upper)
2274 one_lun[i].scsi_lun[0] =
2275 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2276 one_lun[i].scsi_lun[1] = lun & 0xff;
2278 if (wlun) {
2279 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2280 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2281 i++;
2283 alloc_len = (unsigned char *)(one_lun + i) - arr;
2284 return fill_from_dev_buffer(scp, arr,
2285 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2288 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2289 unsigned int num, struct sdebug_dev_info *devip)
2291 int i, j, ret = -1;
2292 unsigned char *kaddr, *buf;
2293 unsigned int offset;
2294 struct scatterlist *sg;
2295 struct scsi_data_buffer *sdb = scsi_in(scp);
2297 /* better not to use temporary buffer. */
2298 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2299 if (!buf)
2300 return ret;
2302 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2304 offset = 0;
2305 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2306 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
2307 if (!kaddr)
2308 goto out;
2310 for (j = 0; j < sg->length; j++)
2311 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2313 offset += sg->length;
2314 kunmap_atomic(kaddr, KM_USER0);
2316 ret = 0;
2317 out:
2318 kfree(buf);
2320 return ret;
2323 /* When timer goes off this function is called. */
2324 static void timer_intr_handler(unsigned long indx)
2326 struct sdebug_queued_cmd * sqcp;
2327 unsigned long iflags;
2329 if (indx >= scsi_debug_max_queue) {
2330 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2331 "large\n");
2332 return;
2334 spin_lock_irqsave(&queued_arr_lock, iflags);
2335 sqcp = &queued_arr[(int)indx];
2336 if (! sqcp->in_use) {
2337 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2338 "interrupt\n");
2339 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2340 return;
2342 sqcp->in_use = 0;
2343 if (sqcp->done_funct) {
2344 sqcp->a_cmnd->result = sqcp->scsi_result;
2345 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2347 sqcp->done_funct = NULL;
2348 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2352 static struct sdebug_dev_info *
2353 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2355 struct sdebug_dev_info *devip;
2357 devip = kzalloc(sizeof(*devip), flags);
2358 if (devip) {
2359 devip->sdbg_host = sdbg_host;
2360 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2362 return devip;
2365 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2367 struct sdebug_host_info * sdbg_host;
2368 struct sdebug_dev_info * open_devip = NULL;
2369 struct sdebug_dev_info * devip =
2370 (struct sdebug_dev_info *)sdev->hostdata;
2372 if (devip)
2373 return devip;
2374 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2375 if (!sdbg_host) {
2376 printk(KERN_ERR "Host info NULL\n");
2377 return NULL;
2379 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2380 if ((devip->used) && (devip->channel == sdev->channel) &&
2381 (devip->target == sdev->id) &&
2382 (devip->lun == sdev->lun))
2383 return devip;
2384 else {
2385 if ((!devip->used) && (!open_devip))
2386 open_devip = devip;
2389 if (!open_devip) { /* try and make a new one */
2390 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2391 if (!open_devip) {
2392 printk(KERN_ERR "%s: out of memory at line %d\n",
2393 __func__, __LINE__);
2394 return NULL;
2398 open_devip->channel = sdev->channel;
2399 open_devip->target = sdev->id;
2400 open_devip->lun = sdev->lun;
2401 open_devip->sdbg_host = sdbg_host;
2402 open_devip->reset = 1;
2403 open_devip->used = 1;
2404 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2405 if (scsi_debug_dsense)
2406 open_devip->sense_buff[0] = 0x72;
2407 else {
2408 open_devip->sense_buff[0] = 0x70;
2409 open_devip->sense_buff[7] = 0xa;
2411 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2412 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2414 return open_devip;
2417 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2419 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2420 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2421 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2422 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2423 return 0;
2426 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2428 struct sdebug_dev_info *devip;
2430 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2431 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2432 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2433 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2434 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2435 devip = devInfoReg(sdp);
2436 if (NULL == devip)
2437 return 1; /* no resources, will be marked offline */
2438 sdp->hostdata = devip;
2439 if (sdp->host->cmd_per_lun)
2440 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2441 sdp->host->cmd_per_lun);
2442 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2443 if (scsi_debug_no_uld)
2444 sdp->no_uld_attach = 1;
2445 return 0;
2448 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2450 struct sdebug_dev_info *devip =
2451 (struct sdebug_dev_info *)sdp->hostdata;
2453 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2454 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2455 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2456 if (devip) {
2457 /* make this slot avaliable for re-use */
2458 devip->used = 0;
2459 sdp->hostdata = NULL;
2463 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2464 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2466 unsigned long iflags;
2467 int k;
2468 struct sdebug_queued_cmd *sqcp;
2470 spin_lock_irqsave(&queued_arr_lock, iflags);
2471 for (k = 0; k < scsi_debug_max_queue; ++k) {
2472 sqcp = &queued_arr[k];
2473 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2474 del_timer_sync(&sqcp->cmnd_timer);
2475 sqcp->in_use = 0;
2476 sqcp->a_cmnd = NULL;
2477 break;
2480 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2481 return (k < scsi_debug_max_queue) ? 1 : 0;
2484 /* Deletes (stops) timers of all queued commands */
2485 static void stop_all_queued(void)
2487 unsigned long iflags;
2488 int k;
2489 struct sdebug_queued_cmd *sqcp;
2491 spin_lock_irqsave(&queued_arr_lock, iflags);
2492 for (k = 0; k < scsi_debug_max_queue; ++k) {
2493 sqcp = &queued_arr[k];
2494 if (sqcp->in_use && sqcp->a_cmnd) {
2495 del_timer_sync(&sqcp->cmnd_timer);
2496 sqcp->in_use = 0;
2497 sqcp->a_cmnd = NULL;
2500 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2503 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2505 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2506 printk(KERN_INFO "scsi_debug: abort\n");
2507 ++num_aborts;
2508 stop_queued_cmnd(SCpnt);
2509 return SUCCESS;
2512 static int scsi_debug_biosparam(struct scsi_device *sdev,
2513 struct block_device * bdev, sector_t capacity, int *info)
2515 int res;
2516 unsigned char *buf;
2518 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2519 printk(KERN_INFO "scsi_debug: biosparam\n");
2520 buf = scsi_bios_ptable(bdev);
2521 if (buf) {
2522 res = scsi_partsize(buf, capacity,
2523 &info[2], &info[0], &info[1]);
2524 kfree(buf);
2525 if (! res)
2526 return res;
2528 info[0] = sdebug_heads;
2529 info[1] = sdebug_sectors_per;
2530 info[2] = sdebug_cylinders_per;
2531 return 0;
2534 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2536 struct sdebug_dev_info * devip;
2538 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2539 printk(KERN_INFO "scsi_debug: device_reset\n");
2540 ++num_dev_resets;
2541 if (SCpnt) {
2542 devip = devInfoReg(SCpnt->device);
2543 if (devip)
2544 devip->reset = 1;
2546 return SUCCESS;
2549 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2551 struct sdebug_host_info *sdbg_host;
2552 struct sdebug_dev_info * dev_info;
2553 struct scsi_device * sdp;
2554 struct Scsi_Host * hp;
2556 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2557 printk(KERN_INFO "scsi_debug: bus_reset\n");
2558 ++num_bus_resets;
2559 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2560 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2561 if (sdbg_host) {
2562 list_for_each_entry(dev_info,
2563 &sdbg_host->dev_info_list,
2564 dev_list)
2565 dev_info->reset = 1;
2568 return SUCCESS;
2571 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2573 struct sdebug_host_info * sdbg_host;
2574 struct sdebug_dev_info * dev_info;
2576 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2577 printk(KERN_INFO "scsi_debug: host_reset\n");
2578 ++num_host_resets;
2579 spin_lock(&sdebug_host_list_lock);
2580 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2581 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2582 dev_list)
2583 dev_info->reset = 1;
2585 spin_unlock(&sdebug_host_list_lock);
2586 stop_all_queued();
2587 return SUCCESS;
2590 /* Initializes timers in queued array */
2591 static void __init init_all_queued(void)
2593 unsigned long iflags;
2594 int k;
2595 struct sdebug_queued_cmd * sqcp;
2597 spin_lock_irqsave(&queued_arr_lock, iflags);
2598 for (k = 0; k < scsi_debug_max_queue; ++k) {
2599 sqcp = &queued_arr[k];
2600 init_timer(&sqcp->cmnd_timer);
2601 sqcp->in_use = 0;
2602 sqcp->a_cmnd = NULL;
2604 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2607 static void __init sdebug_build_parts(unsigned char *ramp,
2608 unsigned long store_size)
2610 struct partition * pp;
2611 int starts[SDEBUG_MAX_PARTS + 2];
2612 int sectors_per_part, num_sectors, k;
2613 int heads_by_sects, start_sec, end_sec;
2615 /* assume partition table already zeroed */
2616 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2617 return;
2618 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2619 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2620 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2621 "partitions to %d\n", SDEBUG_MAX_PARTS);
2623 num_sectors = (int)sdebug_store_sectors;
2624 sectors_per_part = (num_sectors - sdebug_sectors_per)
2625 / scsi_debug_num_parts;
2626 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2627 starts[0] = sdebug_sectors_per;
2628 for (k = 1; k < scsi_debug_num_parts; ++k)
2629 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2630 * heads_by_sects;
2631 starts[scsi_debug_num_parts] = num_sectors;
2632 starts[scsi_debug_num_parts + 1] = 0;
2634 ramp[510] = 0x55; /* magic partition markings */
2635 ramp[511] = 0xAA;
2636 pp = (struct partition *)(ramp + 0x1be);
2637 for (k = 0; starts[k + 1]; ++k, ++pp) {
2638 start_sec = starts[k];
2639 end_sec = starts[k + 1] - 1;
2640 pp->boot_ind = 0;
2642 pp->cyl = start_sec / heads_by_sects;
2643 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2644 / sdebug_sectors_per;
2645 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2647 pp->end_cyl = end_sec / heads_by_sects;
2648 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2649 / sdebug_sectors_per;
2650 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2652 pp->start_sect = start_sec;
2653 pp->nr_sects = end_sec - start_sec + 1;
2654 pp->sys_ind = 0x83; /* plain Linux partition */
2658 static int schedule_resp(struct scsi_cmnd * cmnd,
2659 struct sdebug_dev_info * devip,
2660 done_funct_t done, int scsi_result, int delta_jiff)
2662 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2663 if (scsi_result) {
2664 struct scsi_device * sdp = cmnd->device;
2666 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2667 "non-zero result=0x%x\n", sdp->host->host_no,
2668 sdp->channel, sdp->id, sdp->lun, scsi_result);
2671 if (cmnd && devip) {
2672 /* simulate autosense by this driver */
2673 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2674 memcpy(cmnd->sense_buffer, devip->sense_buff,
2675 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2676 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2678 if (delta_jiff <= 0) {
2679 if (cmnd)
2680 cmnd->result = scsi_result;
2681 if (done)
2682 done(cmnd);
2683 return 0;
2684 } else {
2685 unsigned long iflags;
2686 int k;
2687 struct sdebug_queued_cmd * sqcp = NULL;
2689 spin_lock_irqsave(&queued_arr_lock, iflags);
2690 for (k = 0; k < scsi_debug_max_queue; ++k) {
2691 sqcp = &queued_arr[k];
2692 if (! sqcp->in_use)
2693 break;
2695 if (k >= scsi_debug_max_queue) {
2696 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2697 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2698 return 1; /* report busy to mid level */
2700 sqcp->in_use = 1;
2701 sqcp->a_cmnd = cmnd;
2702 sqcp->scsi_result = scsi_result;
2703 sqcp->done_funct = done;
2704 sqcp->cmnd_timer.function = timer_intr_handler;
2705 sqcp->cmnd_timer.data = k;
2706 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2707 add_timer(&sqcp->cmnd_timer);
2708 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2709 if (cmnd)
2710 cmnd->result = 0;
2711 return 0;
2714 /* Note: The following macros create attribute files in the
2715 /sys/module/scsi_debug/parameters directory. Unfortunately this
2716 driver is unaware of a change and cannot trigger auxiliary actions
2717 as it can when the corresponding attribute in the
2718 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2720 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2721 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2722 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2723 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2724 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2725 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2726 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2727 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2728 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2729 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2730 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2731 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2732 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2733 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2734 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2735 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2736 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2737 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2738 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2739 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2740 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2741 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2742 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2743 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2744 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2745 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2746 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2747 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2748 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2749 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2750 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2751 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2752 S_IRUGO | S_IWUSR);
2753 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2754 S_IRUGO | S_IWUSR);
2756 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2757 MODULE_DESCRIPTION("SCSI debug adapter driver");
2758 MODULE_LICENSE("GPL");
2759 MODULE_VERSION(SCSI_DEBUG_VERSION);
2761 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2762 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2763 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2764 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2765 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2766 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2767 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2768 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2769 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2770 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2771 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2772 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2773 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2774 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2775 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2776 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2777 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2778 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2779 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2780 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2781 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2782 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2783 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2784 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2785 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2786 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2787 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2788 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2789 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2790 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2791 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2792 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2793 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2795 static char sdebug_info[256];
2797 static const char * scsi_debug_info(struct Scsi_Host * shp)
2799 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2800 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2801 scsi_debug_version_date, scsi_debug_dev_size_mb,
2802 scsi_debug_opts);
2803 return sdebug_info;
2806 /* scsi_debug_proc_info
2807 * Used if the driver currently has no own support for /proc/scsi
2809 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2810 int length, int inout)
2812 int len, pos, begin;
2813 int orig_length;
2815 orig_length = length;
2817 if (inout == 1) {
2818 char arr[16];
2819 int minLen = length > 15 ? 15 : length;
2821 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2822 return -EACCES;
2823 memcpy(arr, buffer, minLen);
2824 arr[minLen] = '\0';
2825 if (1 != sscanf(arr, "%d", &pos))
2826 return -EINVAL;
2827 scsi_debug_opts = pos;
2828 if (scsi_debug_every_nth != 0)
2829 scsi_debug_cmnd_count = 0;
2830 return length;
2832 begin = 0;
2833 pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2834 "%s [%s]\n"
2835 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2836 "every_nth=%d(curr:%d)\n"
2837 "delay=%d, max_luns=%d, scsi_level=%d\n"
2838 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2839 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2840 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2841 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2842 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2843 scsi_debug_cmnd_count, scsi_debug_delay,
2844 scsi_debug_max_luns, scsi_debug_scsi_level,
2845 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2846 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2847 num_host_resets, dix_reads, dix_writes, dif_errors);
2848 if (pos < offset) {
2849 len = 0;
2850 begin = pos;
2852 *start = buffer + (offset - begin); /* Start of wanted data */
2853 len -= (offset - begin);
2854 if (len > length)
2855 len = length;
2856 return len;
2859 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2861 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2864 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2865 const char * buf, size_t count)
2867 int delay;
2868 char work[20];
2870 if (1 == sscanf(buf, "%10s", work)) {
2871 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2872 scsi_debug_delay = delay;
2873 return count;
2876 return -EINVAL;
2878 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2879 sdebug_delay_store);
2881 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2883 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2886 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2887 const char * buf, size_t count)
2889 int opts;
2890 char work[20];
2892 if (1 == sscanf(buf, "%10s", work)) {
2893 if (0 == strnicmp(work,"0x", 2)) {
2894 if (1 == sscanf(&work[2], "%x", &opts))
2895 goto opts_done;
2896 } else {
2897 if (1 == sscanf(work, "%d", &opts))
2898 goto opts_done;
2901 return -EINVAL;
2902 opts_done:
2903 scsi_debug_opts = opts;
2904 scsi_debug_cmnd_count = 0;
2905 return count;
2907 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2908 sdebug_opts_store);
2910 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2912 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2914 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2915 const char * buf, size_t count)
2917 int n;
2919 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2920 scsi_debug_ptype = n;
2921 return count;
2923 return -EINVAL;
2925 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2927 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2929 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2931 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2932 const char * buf, size_t count)
2934 int n;
2936 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2937 scsi_debug_dsense = n;
2938 return count;
2940 return -EINVAL;
2942 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2943 sdebug_dsense_store);
2945 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2947 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2949 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2950 const char * buf, size_t count)
2952 int n;
2954 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2955 scsi_debug_fake_rw = n;
2956 return count;
2958 return -EINVAL;
2960 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2961 sdebug_fake_rw_store);
2963 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2965 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2967 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2968 const char * buf, size_t count)
2970 int n;
2972 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2973 scsi_debug_no_lun_0 = n;
2974 return count;
2976 return -EINVAL;
2978 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2979 sdebug_no_lun_0_store);
2981 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2983 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2985 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2986 const char * buf, size_t count)
2988 int n;
2990 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2991 scsi_debug_num_tgts = n;
2992 sdebug_max_tgts_luns();
2993 return count;
2995 return -EINVAL;
2997 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
2998 sdebug_num_tgts_store);
3000 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
3002 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3004 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
3006 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
3008 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3010 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
3012 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
3014 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3016 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
3017 const char * buf, size_t count)
3019 int nth;
3021 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3022 scsi_debug_every_nth = nth;
3023 scsi_debug_cmnd_count = 0;
3024 return count;
3026 return -EINVAL;
3028 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3029 sdebug_every_nth_store);
3031 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3033 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3035 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3036 const char * buf, size_t count)
3038 int n;
3040 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3041 scsi_debug_max_luns = n;
3042 sdebug_max_tgts_luns();
3043 return count;
3045 return -EINVAL;
3047 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3048 sdebug_max_luns_store);
3050 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3052 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3054 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3055 const char * buf, size_t count)
3057 int n;
3059 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3060 (n <= SCSI_DEBUG_CANQUEUE)) {
3061 scsi_debug_max_queue = n;
3062 return count;
3064 return -EINVAL;
3066 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3067 sdebug_max_queue_store);
3069 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3071 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3073 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3075 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3077 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3079 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3081 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3083 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3085 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3086 const char * buf, size_t count)
3088 int n;
3090 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3091 scsi_debug_virtual_gb = n;
3093 sdebug_capacity = get_sdebug_capacity();
3095 return count;
3097 return -EINVAL;
3099 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3100 sdebug_virtual_gb_store);
3102 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3104 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3107 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3108 const char * buf, size_t count)
3110 int delta_hosts;
3112 if (sscanf(buf, "%d", &delta_hosts) != 1)
3113 return -EINVAL;
3114 if (delta_hosts > 0) {
3115 do {
3116 sdebug_add_adapter();
3117 } while (--delta_hosts);
3118 } else if (delta_hosts < 0) {
3119 do {
3120 sdebug_remove_adapter();
3121 } while (++delta_hosts);
3123 return count;
3125 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3126 sdebug_add_host_store);
3128 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3129 char * buf)
3131 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3133 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3134 const char * buf, size_t count)
3136 int n;
3138 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3139 scsi_debug_vpd_use_hostno = n;
3140 return count;
3142 return -EINVAL;
3144 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3145 sdebug_vpd_use_hostno_store);
3147 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3149 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3151 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3153 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3155 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3157 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3159 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3161 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3163 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3165 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3167 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3169 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3171 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3173 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3175 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3177 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3179 ssize_t count;
3181 if (!scsi_debug_lbp())
3182 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3183 sdebug_store_sectors);
3185 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3187 buf[count++] = '\n';
3188 buf[count++] = 0;
3190 return count;
3192 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3195 /* Note: The following function creates attribute files in the
3196 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3197 files (over those found in the /sys/module/scsi_debug/parameters
3198 directory) is that auxiliary actions can be triggered when an attribute
3199 is changed. For example see: sdebug_add_host_store() above.
3201 static int do_create_driverfs_files(void)
3203 int ret;
3205 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3206 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3207 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3208 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3209 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3210 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3211 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3212 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3213 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3214 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3215 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3216 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3217 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3218 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3219 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3220 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3221 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3222 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3223 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3224 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3225 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3226 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3227 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3228 return ret;
3231 static void do_remove_driverfs_files(void)
3233 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3234 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3235 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3236 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3237 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3238 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3239 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3240 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3241 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3242 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3243 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3244 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3245 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3246 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3247 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3248 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3249 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3250 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3251 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3252 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3253 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3254 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3255 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3258 struct device *pseudo_primary;
3260 static int __init scsi_debug_init(void)
3262 unsigned long sz;
3263 int host_to_add;
3264 int k;
3265 int ret;
3267 switch (scsi_debug_sector_size) {
3268 case 512:
3269 case 1024:
3270 case 2048:
3271 case 4096:
3272 break;
3273 default:
3274 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3275 scsi_debug_sector_size);
3276 return -EINVAL;
3279 switch (scsi_debug_dif) {
3281 case SD_DIF_TYPE0_PROTECTION:
3282 case SD_DIF_TYPE1_PROTECTION:
3283 case SD_DIF_TYPE2_PROTECTION:
3284 case SD_DIF_TYPE3_PROTECTION:
3285 break;
3287 default:
3288 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3289 return -EINVAL;
3292 if (scsi_debug_guard > 1) {
3293 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3294 return -EINVAL;
3297 if (scsi_debug_ato > 1) {
3298 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3299 return -EINVAL;
3302 if (scsi_debug_physblk_exp > 15) {
3303 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3304 scsi_debug_physblk_exp);
3305 return -EINVAL;
3308 if (scsi_debug_lowest_aligned > 0x3fff) {
3309 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3310 scsi_debug_lowest_aligned);
3311 return -EINVAL;
3314 if (scsi_debug_dev_size_mb < 1)
3315 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3316 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3317 sdebug_store_sectors = sz / scsi_debug_sector_size;
3318 sdebug_capacity = get_sdebug_capacity();
3320 /* play around with geometry, don't waste too much on track 0 */
3321 sdebug_heads = 8;
3322 sdebug_sectors_per = 32;
3323 if (scsi_debug_dev_size_mb >= 16)
3324 sdebug_heads = 32;
3325 else if (scsi_debug_dev_size_mb >= 256)
3326 sdebug_heads = 64;
3327 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3328 (sdebug_sectors_per * sdebug_heads);
3329 if (sdebug_cylinders_per >= 1024) {
3330 /* other LLDs do this; implies >= 1GB ram disk ... */
3331 sdebug_heads = 255;
3332 sdebug_sectors_per = 63;
3333 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3334 (sdebug_sectors_per * sdebug_heads);
3337 fake_storep = vmalloc(sz);
3338 if (NULL == fake_storep) {
3339 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3340 return -ENOMEM;
3342 memset(fake_storep, 0, sz);
3343 if (scsi_debug_num_parts > 0)
3344 sdebug_build_parts(fake_storep, sz);
3346 if (scsi_debug_dif) {
3347 int dif_size;
3349 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3350 dif_storep = vmalloc(dif_size);
3352 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3353 dif_size, dif_storep);
3355 if (dif_storep == NULL) {
3356 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3357 ret = -ENOMEM;
3358 goto free_vm;
3361 memset(dif_storep, 0xff, dif_size);
3364 /* Logical Block Provisioning */
3365 if (scsi_debug_lbp()) {
3366 unsigned int map_bytes;
3368 scsi_debug_unmap_max_blocks =
3369 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3371 scsi_debug_unmap_max_desc =
3372 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3374 scsi_debug_unmap_granularity =
3375 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3377 if (scsi_debug_unmap_alignment &&
3378 scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3379 printk(KERN_ERR
3380 "%s: ERR: unmap_granularity < unmap_alignment\n",
3381 __func__);
3382 return -EINVAL;
3385 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3386 map_bytes = map_size >> 3;
3387 map_storep = vmalloc(map_bytes);
3389 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3390 map_size);
3392 if (map_storep == NULL) {
3393 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3394 ret = -ENOMEM;
3395 goto free_vm;
3398 memset(map_storep, 0x0, map_bytes);
3400 /* Map first 1KB for partition table */
3401 if (scsi_debug_num_parts)
3402 map_region(0, 2);
3405 pseudo_primary = root_device_register("pseudo_0");
3406 if (IS_ERR(pseudo_primary)) {
3407 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3408 ret = PTR_ERR(pseudo_primary);
3409 goto free_vm;
3411 ret = bus_register(&pseudo_lld_bus);
3412 if (ret < 0) {
3413 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3414 ret);
3415 goto dev_unreg;
3417 ret = driver_register(&sdebug_driverfs_driver);
3418 if (ret < 0) {
3419 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3420 ret);
3421 goto bus_unreg;
3423 ret = do_create_driverfs_files();
3424 if (ret < 0) {
3425 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3426 ret);
3427 goto del_files;
3430 init_all_queued();
3432 host_to_add = scsi_debug_add_host;
3433 scsi_debug_add_host = 0;
3435 for (k = 0; k < host_to_add; k++) {
3436 if (sdebug_add_adapter()) {
3437 printk(KERN_ERR "scsi_debug_init: "
3438 "sdebug_add_adapter failed k=%d\n", k);
3439 break;
3443 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3444 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3445 scsi_debug_add_host);
3447 return 0;
3449 del_files:
3450 do_remove_driverfs_files();
3451 driver_unregister(&sdebug_driverfs_driver);
3452 bus_unreg:
3453 bus_unregister(&pseudo_lld_bus);
3454 dev_unreg:
3455 root_device_unregister(pseudo_primary);
3456 free_vm:
3457 if (map_storep)
3458 vfree(map_storep);
3459 if (dif_storep)
3460 vfree(dif_storep);
3461 vfree(fake_storep);
3463 return ret;
3466 static void __exit scsi_debug_exit(void)
3468 int k = scsi_debug_add_host;
3470 stop_all_queued();
3471 for (; k; k--)
3472 sdebug_remove_adapter();
3473 do_remove_driverfs_files();
3474 driver_unregister(&sdebug_driverfs_driver);
3475 bus_unregister(&pseudo_lld_bus);
3476 root_device_unregister(pseudo_primary);
3478 if (dif_storep)
3479 vfree(dif_storep);
3481 vfree(fake_storep);
3484 device_initcall(scsi_debug_init);
3485 module_exit(scsi_debug_exit);
3487 static void sdebug_release_adapter(struct device * dev)
3489 struct sdebug_host_info *sdbg_host;
3491 sdbg_host = to_sdebug_host(dev);
3492 kfree(sdbg_host);
3495 static int sdebug_add_adapter(void)
3497 int k, devs_per_host;
3498 int error = 0;
3499 struct sdebug_host_info *sdbg_host;
3500 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3502 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3503 if (NULL == sdbg_host) {
3504 printk(KERN_ERR "%s: out of memory at line %d\n",
3505 __func__, __LINE__);
3506 return -ENOMEM;
3509 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3511 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3512 for (k = 0; k < devs_per_host; k++) {
3513 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3514 if (!sdbg_devinfo) {
3515 printk(KERN_ERR "%s: out of memory at line %d\n",
3516 __func__, __LINE__);
3517 error = -ENOMEM;
3518 goto clean;
3522 spin_lock(&sdebug_host_list_lock);
3523 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3524 spin_unlock(&sdebug_host_list_lock);
3526 sdbg_host->dev.bus = &pseudo_lld_bus;
3527 sdbg_host->dev.parent = pseudo_primary;
3528 sdbg_host->dev.release = &sdebug_release_adapter;
3529 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3531 error = device_register(&sdbg_host->dev);
3533 if (error)
3534 goto clean;
3536 ++scsi_debug_add_host;
3537 return error;
3539 clean:
3540 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3541 dev_list) {
3542 list_del(&sdbg_devinfo->dev_list);
3543 kfree(sdbg_devinfo);
3546 kfree(sdbg_host);
3547 return error;
3550 static void sdebug_remove_adapter(void)
3552 struct sdebug_host_info * sdbg_host = NULL;
3554 spin_lock(&sdebug_host_list_lock);
3555 if (!list_empty(&sdebug_host_list)) {
3556 sdbg_host = list_entry(sdebug_host_list.prev,
3557 struct sdebug_host_info, host_list);
3558 list_del(&sdbg_host->host_list);
3560 spin_unlock(&sdebug_host_list_lock);
3562 if (!sdbg_host)
3563 return;
3565 device_unregister(&sdbg_host->dev);
3566 --scsi_debug_add_host;
3569 static
3570 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3572 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3573 int len, k;
3574 unsigned int num;
3575 unsigned long long lba;
3576 u32 ei_lba;
3577 int errsts = 0;
3578 int target = SCpnt->device->id;
3579 struct sdebug_dev_info *devip = NULL;
3580 int inj_recovered = 0;
3581 int inj_transport = 0;
3582 int inj_dif = 0;
3583 int inj_dix = 0;
3584 int delay_override = 0;
3585 int unmap = 0;
3587 scsi_set_resid(SCpnt, 0);
3588 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3589 printk(KERN_INFO "scsi_debug: cmd ");
3590 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3591 printk("%02x ", (int)cmd[k]);
3592 printk("\n");
3595 if (target == SCpnt->device->host->hostt->this_id) {
3596 printk(KERN_INFO "scsi_debug: initiator's id used as "
3597 "target!\n");
3598 return schedule_resp(SCpnt, NULL, done,
3599 DID_NO_CONNECT << 16, 0);
3602 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3603 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3604 return schedule_resp(SCpnt, NULL, done,
3605 DID_NO_CONNECT << 16, 0);
3606 devip = devInfoReg(SCpnt->device);
3607 if (NULL == devip)
3608 return schedule_resp(SCpnt, NULL, done,
3609 DID_NO_CONNECT << 16, 0);
3611 if ((scsi_debug_every_nth != 0) &&
3612 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3613 scsi_debug_cmnd_count = 0;
3614 if (scsi_debug_every_nth < -1)
3615 scsi_debug_every_nth = -1;
3616 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3617 return 0; /* ignore command causing timeout */
3618 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3619 inj_recovered = 1; /* to reads and writes below */
3620 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3621 inj_transport = 1; /* to reads and writes below */
3622 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3623 inj_dif = 1; /* to reads and writes below */
3624 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3625 inj_dix = 1; /* to reads and writes below */
3628 if (devip->wlun) {
3629 switch (*cmd) {
3630 case INQUIRY:
3631 case REQUEST_SENSE:
3632 case TEST_UNIT_READY:
3633 case REPORT_LUNS:
3634 break; /* only allowable wlun commands */
3635 default:
3636 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3637 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3638 "not supported for wlun\n", *cmd);
3639 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3640 INVALID_OPCODE, 0);
3641 errsts = check_condition_result;
3642 return schedule_resp(SCpnt, devip, done, errsts,
3647 switch (*cmd) {
3648 case INQUIRY: /* mandatory, ignore unit attention */
3649 delay_override = 1;
3650 errsts = resp_inquiry(SCpnt, target, devip);
3651 break;
3652 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3653 delay_override = 1;
3654 errsts = resp_requests(SCpnt, devip);
3655 break;
3656 case REZERO_UNIT: /* actually this is REWIND for SSC */
3657 case START_STOP:
3658 errsts = resp_start_stop(SCpnt, devip);
3659 break;
3660 case ALLOW_MEDIUM_REMOVAL:
3661 errsts = check_readiness(SCpnt, 1, devip);
3662 if (errsts)
3663 break;
3664 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3665 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3666 cmd[4] ? "inhibited" : "enabled");
3667 break;
3668 case SEND_DIAGNOSTIC: /* mandatory */
3669 errsts = check_readiness(SCpnt, 1, devip);
3670 break;
3671 case TEST_UNIT_READY: /* mandatory */
3672 delay_override = 1;
3673 errsts = check_readiness(SCpnt, 0, devip);
3674 break;
3675 case RESERVE:
3676 errsts = check_readiness(SCpnt, 1, devip);
3677 break;
3678 case RESERVE_10:
3679 errsts = check_readiness(SCpnt, 1, devip);
3680 break;
3681 case RELEASE:
3682 errsts = check_readiness(SCpnt, 1, devip);
3683 break;
3684 case RELEASE_10:
3685 errsts = check_readiness(SCpnt, 1, devip);
3686 break;
3687 case READ_CAPACITY:
3688 errsts = resp_readcap(SCpnt, devip);
3689 break;
3690 case SERVICE_ACTION_IN:
3691 if (cmd[1] == SAI_READ_CAPACITY_16)
3692 errsts = resp_readcap16(SCpnt, devip);
3693 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3695 if (scsi_debug_lbp() == 0) {
3696 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3697 INVALID_COMMAND_OPCODE, 0);
3698 errsts = check_condition_result;
3699 } else
3700 errsts = resp_get_lba_status(SCpnt, devip);
3701 } else {
3702 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3703 INVALID_OPCODE, 0);
3704 errsts = check_condition_result;
3706 break;
3707 case MAINTENANCE_IN:
3708 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3709 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3710 INVALID_OPCODE, 0);
3711 errsts = check_condition_result;
3712 break;
3714 errsts = resp_report_tgtpgs(SCpnt, devip);
3715 break;
3716 case READ_16:
3717 case READ_12:
3718 case READ_10:
3719 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3720 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3721 cmd[1] & 0xe0) {
3722 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3723 INVALID_COMMAND_OPCODE, 0);
3724 errsts = check_condition_result;
3725 break;
3728 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3729 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3730 (cmd[1] & 0xe0) == 0)
3731 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3733 /* fall through */
3734 case READ_6:
3735 read:
3736 errsts = check_readiness(SCpnt, 0, devip);
3737 if (errsts)
3738 break;
3739 if (scsi_debug_fake_rw)
3740 break;
3741 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3742 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3743 if (inj_recovered && (0 == errsts)) {
3744 mk_sense_buffer(devip, RECOVERED_ERROR,
3745 THRESHOLD_EXCEEDED, 0);
3746 errsts = check_condition_result;
3747 } else if (inj_transport && (0 == errsts)) {
3748 mk_sense_buffer(devip, ABORTED_COMMAND,
3749 TRANSPORT_PROBLEM, ACK_NAK_TO);
3750 errsts = check_condition_result;
3751 } else if (inj_dif && (0 == errsts)) {
3752 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3753 errsts = illegal_condition_result;
3754 } else if (inj_dix && (0 == errsts)) {
3755 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3756 errsts = illegal_condition_result;
3758 break;
3759 case REPORT_LUNS: /* mandatory, ignore unit attention */
3760 delay_override = 1;
3761 errsts = resp_report_luns(SCpnt, devip);
3762 break;
3763 case VERIFY: /* 10 byte SBC-2 command */
3764 errsts = check_readiness(SCpnt, 0, devip);
3765 break;
3766 case WRITE_16:
3767 case WRITE_12:
3768 case WRITE_10:
3769 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3770 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3771 cmd[1] & 0xe0) {
3772 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3773 INVALID_COMMAND_OPCODE, 0);
3774 errsts = check_condition_result;
3775 break;
3778 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3779 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3780 (cmd[1] & 0xe0) == 0)
3781 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3783 /* fall through */
3784 case WRITE_6:
3785 write:
3786 errsts = check_readiness(SCpnt, 0, devip);
3787 if (errsts)
3788 break;
3789 if (scsi_debug_fake_rw)
3790 break;
3791 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3792 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3793 if (inj_recovered && (0 == errsts)) {
3794 mk_sense_buffer(devip, RECOVERED_ERROR,
3795 THRESHOLD_EXCEEDED, 0);
3796 errsts = check_condition_result;
3797 } else if (inj_dif && (0 == errsts)) {
3798 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3799 errsts = illegal_condition_result;
3800 } else if (inj_dix && (0 == errsts)) {
3801 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3802 errsts = illegal_condition_result;
3804 break;
3805 case WRITE_SAME_16:
3806 case WRITE_SAME:
3807 if (cmd[1] & 0x8) {
3808 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3809 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3810 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3811 INVALID_FIELD_IN_CDB, 0);
3812 errsts = check_condition_result;
3813 } else
3814 unmap = 1;
3816 if (errsts)
3817 break;
3818 errsts = check_readiness(SCpnt, 0, devip);
3819 if (errsts)
3820 break;
3821 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3822 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3823 break;
3824 case UNMAP:
3825 errsts = check_readiness(SCpnt, 0, devip);
3826 if (errsts)
3827 break;
3829 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3830 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3831 INVALID_COMMAND_OPCODE, 0);
3832 errsts = check_condition_result;
3833 } else
3834 errsts = resp_unmap(SCpnt, devip);
3835 break;
3836 case MODE_SENSE:
3837 case MODE_SENSE_10:
3838 errsts = resp_mode_sense(SCpnt, target, devip);
3839 break;
3840 case MODE_SELECT:
3841 errsts = resp_mode_select(SCpnt, 1, devip);
3842 break;
3843 case MODE_SELECT_10:
3844 errsts = resp_mode_select(SCpnt, 0, devip);
3845 break;
3846 case LOG_SENSE:
3847 errsts = resp_log_sense(SCpnt, devip);
3848 break;
3849 case SYNCHRONIZE_CACHE:
3850 delay_override = 1;
3851 errsts = check_readiness(SCpnt, 0, devip);
3852 break;
3853 case WRITE_BUFFER:
3854 errsts = check_readiness(SCpnt, 1, devip);
3855 break;
3856 case XDWRITEREAD_10:
3857 if (!scsi_bidi_cmnd(SCpnt)) {
3858 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3859 INVALID_FIELD_IN_CDB, 0);
3860 errsts = check_condition_result;
3861 break;
3864 errsts = check_readiness(SCpnt, 0, devip);
3865 if (errsts)
3866 break;
3867 if (scsi_debug_fake_rw)
3868 break;
3869 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3870 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3871 if (errsts)
3872 break;
3873 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3874 if (errsts)
3875 break;
3876 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3877 break;
3878 case VARIABLE_LENGTH_CMD:
3879 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3881 if ((cmd[10] & 0xe0) == 0)
3882 printk(KERN_ERR
3883 "Unprotected RD/WR to DIF device\n");
3885 if (cmd[9] == READ_32) {
3886 BUG_ON(SCpnt->cmd_len < 32);
3887 goto read;
3890 if (cmd[9] == WRITE_32) {
3891 BUG_ON(SCpnt->cmd_len < 32);
3892 goto write;
3896 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3897 INVALID_FIELD_IN_CDB, 0);
3898 errsts = check_condition_result;
3899 break;
3901 default:
3902 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3903 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3904 "supported\n", *cmd);
3905 errsts = check_readiness(SCpnt, 1, devip);
3906 if (errsts)
3907 break; /* Unit attention takes precedence */
3908 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3909 errsts = check_condition_result;
3910 break;
3912 return schedule_resp(SCpnt, devip, done, errsts,
3913 (delay_override ? 0 : scsi_debug_delay));
3916 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3918 static struct scsi_host_template sdebug_driver_template = {
3919 .proc_info = scsi_debug_proc_info,
3920 .proc_name = sdebug_proc_name,
3921 .name = "SCSI DEBUG",
3922 .info = scsi_debug_info,
3923 .slave_alloc = scsi_debug_slave_alloc,
3924 .slave_configure = scsi_debug_slave_configure,
3925 .slave_destroy = scsi_debug_slave_destroy,
3926 .ioctl = scsi_debug_ioctl,
3927 .queuecommand = scsi_debug_queuecommand,
3928 .eh_abort_handler = scsi_debug_abort,
3929 .eh_bus_reset_handler = scsi_debug_bus_reset,
3930 .eh_device_reset_handler = scsi_debug_device_reset,
3931 .eh_host_reset_handler = scsi_debug_host_reset,
3932 .bios_param = scsi_debug_biosparam,
3933 .can_queue = SCSI_DEBUG_CANQUEUE,
3934 .this_id = 7,
3935 .sg_tablesize = 256,
3936 .cmd_per_lun = 16,
3937 .max_sectors = 0xffff,
3938 .use_clustering = DISABLE_CLUSTERING,
3939 .module = THIS_MODULE,
3942 static int sdebug_driver_probe(struct device * dev)
3944 int error = 0;
3945 struct sdebug_host_info *sdbg_host;
3946 struct Scsi_Host *hpnt;
3947 int host_prot;
3949 sdbg_host = to_sdebug_host(dev);
3951 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3952 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3953 if (NULL == hpnt) {
3954 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3955 error = -ENODEV;
3956 return error;
3959 sdbg_host->shost = hpnt;
3960 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3961 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3962 hpnt->max_id = scsi_debug_num_tgts + 1;
3963 else
3964 hpnt->max_id = scsi_debug_num_tgts;
3965 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3967 host_prot = 0;
3969 switch (scsi_debug_dif) {
3971 case SD_DIF_TYPE1_PROTECTION:
3972 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3973 if (scsi_debug_dix)
3974 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3975 break;
3977 case SD_DIF_TYPE2_PROTECTION:
3978 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3979 if (scsi_debug_dix)
3980 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3981 break;
3983 case SD_DIF_TYPE3_PROTECTION:
3984 host_prot = SHOST_DIF_TYPE3_PROTECTION;
3985 if (scsi_debug_dix)
3986 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3987 break;
3989 default:
3990 if (scsi_debug_dix)
3991 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3992 break;
3995 scsi_host_set_prot(hpnt, host_prot);
3997 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3998 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3999 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4000 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4001 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4002 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4003 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4004 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4006 if (scsi_debug_guard == 1)
4007 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4008 else
4009 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4011 error = scsi_add_host(hpnt, &sdbg_host->dev);
4012 if (error) {
4013 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4014 error = -ENODEV;
4015 scsi_host_put(hpnt);
4016 } else
4017 scsi_scan_host(hpnt);
4020 return error;
4023 static int sdebug_driver_remove(struct device * dev)
4025 struct sdebug_host_info *sdbg_host;
4026 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4028 sdbg_host = to_sdebug_host(dev);
4030 if (!sdbg_host) {
4031 printk(KERN_ERR "%s: Unable to locate host info\n",
4032 __func__);
4033 return -ENODEV;
4036 scsi_remove_host(sdbg_host->shost);
4038 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4039 dev_list) {
4040 list_del(&sdbg_devinfo->dev_list);
4041 kfree(sdbg_devinfo);
4044 scsi_host_put(sdbg_host->shost);
4045 return 0;
4048 static int pseudo_lld_bus_match(struct device *dev,
4049 struct device_driver *dev_driver)
4051 return 1;
4054 static struct bus_type pseudo_lld_bus = {
4055 .name = "pseudo",
4056 .match = pseudo_lld_bus_match,
4057 .probe = sdebug_driver_probe,
4058 .remove = sdebug_driver_remove,