x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / scsi / scsi_debug.c
blob01c0ffa31276296a985d1381d53e1fc7ac069e80
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
58 #include "sd.h"
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
92 #define DEF_ATO 1
93 #define DEF_DELAY 1
94 #define DEF_DEV_SIZE_MB 8
95 #define DEF_DIF 0
96 #define DEF_DIX 0
97 #define DEF_D_SENSE 0
98 #define DEF_EVERY_NTH 0
99 #define DEF_FAKE_RW 0
100 #define DEF_GUARD 0
101 #define DEF_LBPU 0
102 #define DEF_LBPWS 0
103 #define DEF_LBPWS10 0
104 #define DEF_LBPRZ 1
105 #define DEF_LOWEST_ALIGNED 0
106 #define DEF_NO_LUN_0 0
107 #define DEF_NUM_PARTS 0
108 #define DEF_OPTS 0
109 #define DEF_OPT_BLKS 64
110 #define DEF_PHYSBLK_EXP 0
111 #define DEF_PTYPE 0
112 #define DEF_REMOVABLE false
113 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
114 #define DEF_SECTOR_SIZE 512
115 #define DEF_UNMAP_ALIGNMENT 0
116 #define DEF_UNMAP_GRANULARITY 1
117 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
118 #define DEF_UNMAP_MAX_DESC 256
119 #define DEF_VIRTUAL_GB 0
120 #define DEF_VPD_USE_HOSTNO 1
121 #define DEF_WRITESAME_LENGTH 0xFFFF
123 /* bit mask values for scsi_debug_opts */
124 #define SCSI_DEBUG_OPT_NOISE 1
125 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
126 #define SCSI_DEBUG_OPT_TIMEOUT 4
127 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
128 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
129 #define SCSI_DEBUG_OPT_DIF_ERR 32
130 #define SCSI_DEBUG_OPT_DIX_ERR 64
131 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
132 /* When "every_nth" > 0 then modulo "every_nth" commands:
133 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
134 * - a RECOVERED_ERROR is simulated on successful read and write
135 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
136 * - a TRANSPORT_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
139 * When "every_nth" < 0 then after "- every_nth" commands:
140 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
141 * - a RECOVERED_ERROR is simulated on successful read and write
142 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
143 * - a TRANSPORT_ERROR is simulated on successful read and write
144 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
145 * This will continue until some other action occurs (e.g. the user
146 * writing a new value (other than -1 or 1) to every_nth via sysfs).
149 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
150 * sector on read commands: */
151 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
152 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
154 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
155 * or "peripheral device" addressing (value 0) */
156 #define SAM2_LUN_ADDRESS_METHOD 0
157 #define SAM2_WLUN_REPORT_LUNS 0xc101
159 /* Can queue up to this number of commands. Typically commands that
160 * that have a non-zero delay are queued. */
161 #define SCSI_DEBUG_CANQUEUE 255
163 static int scsi_debug_add_host = DEF_NUM_HOST;
164 static int scsi_debug_ato = DEF_ATO;
165 static int scsi_debug_delay = DEF_DELAY;
166 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
167 static int scsi_debug_dif = DEF_DIF;
168 static int scsi_debug_dix = DEF_DIX;
169 static int scsi_debug_dsense = DEF_D_SENSE;
170 static int scsi_debug_every_nth = DEF_EVERY_NTH;
171 static int scsi_debug_fake_rw = DEF_FAKE_RW;
172 static int scsi_debug_guard = DEF_GUARD;
173 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
174 static int scsi_debug_max_luns = DEF_MAX_LUNS;
175 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
176 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
177 static int scsi_debug_no_uld = 0;
178 static int scsi_debug_num_parts = DEF_NUM_PARTS;
179 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
180 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
181 static int scsi_debug_opts = DEF_OPTS;
182 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
183 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
184 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
185 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
186 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
187 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
188 static unsigned int scsi_debug_lbpu = DEF_LBPU;
189 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
190 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
191 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
192 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
193 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
194 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
195 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
196 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
197 static bool scsi_debug_removable = DEF_REMOVABLE;
199 static int scsi_debug_cmnd_count = 0;
201 #define DEV_READONLY(TGT) (0)
203 static unsigned int sdebug_store_sectors;
204 static sector_t sdebug_capacity; /* in sectors */
206 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
207 may still need them */
208 static int sdebug_heads; /* heads per disk */
209 static int sdebug_cylinders_per; /* cylinders per surface */
210 static int sdebug_sectors_per; /* sectors per cylinder */
212 #define SDEBUG_MAX_PARTS 4
214 #define SDEBUG_SENSE_LEN 32
216 #define SCSI_DEBUG_MAX_CMD_LEN 32
218 static unsigned int scsi_debug_lbp(void)
220 return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
223 struct sdebug_dev_info {
224 struct list_head dev_list;
225 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
226 unsigned int channel;
227 unsigned int target;
228 unsigned int lun;
229 struct sdebug_host_info *sdbg_host;
230 unsigned int wlun;
231 char reset;
232 char stopped;
233 char used;
236 struct sdebug_host_info {
237 struct list_head host_list;
238 struct Scsi_Host *shost;
239 struct device dev;
240 struct list_head dev_info_list;
243 #define to_sdebug_host(d) \
244 container_of(d, struct sdebug_host_info, dev)
246 static LIST_HEAD(sdebug_host_list);
247 static DEFINE_SPINLOCK(sdebug_host_list_lock);
249 typedef void (* done_funct_t) (struct scsi_cmnd *);
251 struct sdebug_queued_cmd {
252 int in_use;
253 struct timer_list cmnd_timer;
254 done_funct_t done_funct;
255 struct scsi_cmnd * a_cmnd;
256 int scsi_result;
258 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
260 static unsigned char * fake_storep; /* ramdisk storage */
261 static struct sd_dif_tuple *dif_storep; /* protection info */
262 static void *map_storep; /* provisioning map */
264 static unsigned long map_size;
265 static int num_aborts = 0;
266 static int num_dev_resets = 0;
267 static int num_bus_resets = 0;
268 static int num_host_resets = 0;
269 static int dix_writes;
270 static int dix_reads;
271 static int dif_errors;
273 static DEFINE_SPINLOCK(queued_arr_lock);
274 static DEFINE_RWLOCK(atomic_rw);
276 static char sdebug_proc_name[] = "scsi_debug";
278 static struct bus_type pseudo_lld_bus;
280 static struct device_driver sdebug_driverfs_driver = {
281 .name = sdebug_proc_name,
282 .bus = &pseudo_lld_bus,
285 static const int check_condition_result =
286 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
288 static const int illegal_condition_result =
289 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
291 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
292 0, 0, 0x2, 0x4b};
293 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
294 0, 0, 0x0, 0x0};
296 static int sdebug_add_adapter(void);
297 static void sdebug_remove_adapter(void);
299 static void sdebug_max_tgts_luns(void)
301 struct sdebug_host_info *sdbg_host;
302 struct Scsi_Host *hpnt;
304 spin_lock(&sdebug_host_list_lock);
305 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
306 hpnt = sdbg_host->shost;
307 if ((hpnt->this_id >= 0) &&
308 (scsi_debug_num_tgts > hpnt->this_id))
309 hpnt->max_id = scsi_debug_num_tgts + 1;
310 else
311 hpnt->max_id = scsi_debug_num_tgts;
312 /* scsi_debug_max_luns; */
313 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
315 spin_unlock(&sdebug_host_list_lock);
318 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
319 int asc, int asq)
321 unsigned char *sbuff;
323 sbuff = devip->sense_buff;
324 memset(sbuff, 0, SDEBUG_SENSE_LEN);
326 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
328 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
329 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
330 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
333 static void get_data_transfer_info(unsigned char *cmd,
334 unsigned long long *lba, unsigned int *num,
335 u32 *ei_lba)
337 *ei_lba = 0;
339 switch (*cmd) {
340 case VARIABLE_LENGTH_CMD:
341 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
342 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
343 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
344 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
346 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
347 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
349 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
350 (u32)cmd[28] << 24;
351 break;
353 case WRITE_SAME_16:
354 case WRITE_16:
355 case READ_16:
356 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
357 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
358 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
359 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
361 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
362 (u32)cmd[10] << 24;
363 break;
364 case WRITE_12:
365 case READ_12:
366 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
367 (u32)cmd[2] << 24;
369 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
370 (u32)cmd[6] << 24;
371 break;
372 case WRITE_SAME:
373 case WRITE_10:
374 case READ_10:
375 case XDWRITEREAD_10:
376 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
377 (u32)cmd[2] << 24;
379 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
380 break;
381 case WRITE_6:
382 case READ_6:
383 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
384 (u32)(cmd[1] & 0x1f) << 16;
385 *num = (0 == cmd[4]) ? 256 : cmd[4];
386 break;
387 default:
388 break;
392 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
394 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
395 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
397 return -EINVAL;
398 /* return -ENOTTY; // correct return but upsets fdisk */
401 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
402 struct sdebug_dev_info * devip)
404 if (devip->reset) {
405 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
406 printk(KERN_INFO "scsi_debug: Reporting Unit "
407 "attention: power on reset\n");
408 devip->reset = 0;
409 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
410 return check_condition_result;
412 if ((0 == reset_only) && devip->stopped) {
413 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
414 printk(KERN_INFO "scsi_debug: Reporting Not "
415 "ready: initializing command required\n");
416 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
417 0x2);
418 return check_condition_result;
420 return 0;
423 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
424 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
425 int arr_len)
427 int act_len;
428 struct scsi_data_buffer *sdb = scsi_in(scp);
430 if (!sdb->length)
431 return 0;
432 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
433 return (DID_ERROR << 16);
435 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
436 arr, arr_len);
437 sdb->resid = scsi_bufflen(scp) - act_len;
439 return 0;
442 /* Returns number of bytes fetched into 'arr' or -1 if error. */
443 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
444 int arr_len)
446 if (!scsi_bufflen(scp))
447 return 0;
448 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
449 return -1;
451 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
455 static const char * inq_vendor_id = "Linux ";
456 static const char * inq_product_id = "scsi_debug ";
457 static const char * inq_product_rev = "0004";
459 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
460 int target_dev_id, int dev_id_num,
461 const char * dev_id_str,
462 int dev_id_str_len)
464 int num, port_a;
465 char b[32];
467 port_a = target_dev_id + 1;
468 /* T10 vendor identifier field format (faked) */
469 arr[0] = 0x2; /* ASCII */
470 arr[1] = 0x1;
471 arr[2] = 0x0;
472 memcpy(&arr[4], inq_vendor_id, 8);
473 memcpy(&arr[12], inq_product_id, 16);
474 memcpy(&arr[28], dev_id_str, dev_id_str_len);
475 num = 8 + 16 + dev_id_str_len;
476 arr[3] = num;
477 num += 4;
478 if (dev_id_num >= 0) {
479 /* NAA-5, Logical unit identifier (binary) */
480 arr[num++] = 0x1; /* binary (not necessarily sas) */
481 arr[num++] = 0x3; /* PIV=0, lu, naa */
482 arr[num++] = 0x0;
483 arr[num++] = 0x8;
484 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
485 arr[num++] = 0x33;
486 arr[num++] = 0x33;
487 arr[num++] = 0x30;
488 arr[num++] = (dev_id_num >> 24);
489 arr[num++] = (dev_id_num >> 16) & 0xff;
490 arr[num++] = (dev_id_num >> 8) & 0xff;
491 arr[num++] = dev_id_num & 0xff;
492 /* Target relative port number */
493 arr[num++] = 0x61; /* proto=sas, binary */
494 arr[num++] = 0x94; /* PIV=1, target port, rel port */
495 arr[num++] = 0x0; /* reserved */
496 arr[num++] = 0x4; /* length */
497 arr[num++] = 0x0; /* reserved */
498 arr[num++] = 0x0; /* reserved */
499 arr[num++] = 0x0;
500 arr[num++] = 0x1; /* relative port A */
502 /* NAA-5, Target port identifier */
503 arr[num++] = 0x61; /* proto=sas, binary */
504 arr[num++] = 0x93; /* piv=1, target port, naa */
505 arr[num++] = 0x0;
506 arr[num++] = 0x8;
507 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
508 arr[num++] = 0x22;
509 arr[num++] = 0x22;
510 arr[num++] = 0x20;
511 arr[num++] = (port_a >> 24);
512 arr[num++] = (port_a >> 16) & 0xff;
513 arr[num++] = (port_a >> 8) & 0xff;
514 arr[num++] = port_a & 0xff;
515 /* NAA-5, Target port group identifier */
516 arr[num++] = 0x61; /* proto=sas, binary */
517 arr[num++] = 0x95; /* piv=1, target port group id */
518 arr[num++] = 0x0;
519 arr[num++] = 0x4;
520 arr[num++] = 0;
521 arr[num++] = 0;
522 arr[num++] = (port_group_id >> 8) & 0xff;
523 arr[num++] = port_group_id & 0xff;
524 /* NAA-5, Target device identifier */
525 arr[num++] = 0x61; /* proto=sas, binary */
526 arr[num++] = 0xa3; /* piv=1, target device, naa */
527 arr[num++] = 0x0;
528 arr[num++] = 0x8;
529 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
530 arr[num++] = 0x22;
531 arr[num++] = 0x22;
532 arr[num++] = 0x20;
533 arr[num++] = (target_dev_id >> 24);
534 arr[num++] = (target_dev_id >> 16) & 0xff;
535 arr[num++] = (target_dev_id >> 8) & 0xff;
536 arr[num++] = target_dev_id & 0xff;
537 /* SCSI name string: Target device identifier */
538 arr[num++] = 0x63; /* proto=sas, UTF-8 */
539 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
540 arr[num++] = 0x0;
541 arr[num++] = 24;
542 memcpy(arr + num, "naa.52222220", 12);
543 num += 12;
544 snprintf(b, sizeof(b), "%08X", target_dev_id);
545 memcpy(arr + num, b, 8);
546 num += 8;
547 memset(arr + num, 0, 4);
548 num += 4;
549 return num;
553 static unsigned char vpd84_data[] = {
554 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
555 0x22,0x22,0x22,0x0,0xbb,0x1,
556 0x22,0x22,0x22,0x0,0xbb,0x2,
559 static int inquiry_evpd_84(unsigned char * arr)
561 memcpy(arr, vpd84_data, sizeof(vpd84_data));
562 return sizeof(vpd84_data);
565 static int inquiry_evpd_85(unsigned char * arr)
567 int num = 0;
568 const char * na1 = "https://www.kernel.org/config";
569 const char * na2 = "http://www.kernel.org/log";
570 int plen, olen;
572 arr[num++] = 0x1; /* lu, storage config */
573 arr[num++] = 0x0; /* reserved */
574 arr[num++] = 0x0;
575 olen = strlen(na1);
576 plen = olen + 1;
577 if (plen % 4)
578 plen = ((plen / 4) + 1) * 4;
579 arr[num++] = plen; /* length, null termianted, padded */
580 memcpy(arr + num, na1, olen);
581 memset(arr + num + olen, 0, plen - olen);
582 num += plen;
584 arr[num++] = 0x4; /* lu, logging */
585 arr[num++] = 0x0; /* reserved */
586 arr[num++] = 0x0;
587 olen = strlen(na2);
588 plen = olen + 1;
589 if (plen % 4)
590 plen = ((plen / 4) + 1) * 4;
591 arr[num++] = plen; /* length, null terminated, padded */
592 memcpy(arr + num, na2, olen);
593 memset(arr + num + olen, 0, plen - olen);
594 num += plen;
596 return num;
599 /* SCSI ports VPD page */
600 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
602 int num = 0;
603 int port_a, port_b;
605 port_a = target_dev_id + 1;
606 port_b = port_a + 1;
607 arr[num++] = 0x0; /* reserved */
608 arr[num++] = 0x0; /* reserved */
609 arr[num++] = 0x0;
610 arr[num++] = 0x1; /* relative port 1 (primary) */
611 memset(arr + num, 0, 6);
612 num += 6;
613 arr[num++] = 0x0;
614 arr[num++] = 12; /* length tp descriptor */
615 /* naa-5 target port identifier (A) */
616 arr[num++] = 0x61; /* proto=sas, binary */
617 arr[num++] = 0x93; /* PIV=1, target port, NAA */
618 arr[num++] = 0x0; /* reserved */
619 arr[num++] = 0x8; /* length */
620 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
621 arr[num++] = 0x22;
622 arr[num++] = 0x22;
623 arr[num++] = 0x20;
624 arr[num++] = (port_a >> 24);
625 arr[num++] = (port_a >> 16) & 0xff;
626 arr[num++] = (port_a >> 8) & 0xff;
627 arr[num++] = port_a & 0xff;
629 arr[num++] = 0x0; /* reserved */
630 arr[num++] = 0x0; /* reserved */
631 arr[num++] = 0x0;
632 arr[num++] = 0x2; /* relative port 2 (secondary) */
633 memset(arr + num, 0, 6);
634 num += 6;
635 arr[num++] = 0x0;
636 arr[num++] = 12; /* length tp descriptor */
637 /* naa-5 target port identifier (B) */
638 arr[num++] = 0x61; /* proto=sas, binary */
639 arr[num++] = 0x93; /* PIV=1, target port, NAA */
640 arr[num++] = 0x0; /* reserved */
641 arr[num++] = 0x8; /* length */
642 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
643 arr[num++] = 0x22;
644 arr[num++] = 0x22;
645 arr[num++] = 0x20;
646 arr[num++] = (port_b >> 24);
647 arr[num++] = (port_b >> 16) & 0xff;
648 arr[num++] = (port_b >> 8) & 0xff;
649 arr[num++] = port_b & 0xff;
651 return num;
655 static unsigned char vpd89_data[] = {
656 /* from 4th byte */ 0,0,0,0,
657 'l','i','n','u','x',' ',' ',' ',
658 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
659 '1','2','3','4',
660 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
661 0xec,0,0,0,
662 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
663 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
664 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
665 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
666 0x53,0x41,
667 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
668 0x20,0x20,
669 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
670 0x10,0x80,
671 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
672 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
673 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
674 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
675 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
676 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
677 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
678 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
679 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
680 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
681 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
682 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
683 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
684 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
685 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
686 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
687 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
688 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
689 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
690 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
691 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
699 static int inquiry_evpd_89(unsigned char * arr)
701 memcpy(arr, vpd89_data, sizeof(vpd89_data));
702 return sizeof(vpd89_data);
706 /* Block limits VPD page (SBC-3) */
707 static unsigned char vpdb0_data[] = {
708 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
709 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
710 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
711 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
714 static int inquiry_evpd_b0(unsigned char * arr)
716 unsigned int gran;
718 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
720 /* Optimal transfer length granularity */
721 gran = 1 << scsi_debug_physblk_exp;
722 arr[2] = (gran >> 8) & 0xff;
723 arr[3] = gran & 0xff;
725 /* Maximum Transfer Length */
726 if (sdebug_store_sectors > 0x400) {
727 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
728 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
729 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
730 arr[7] = sdebug_store_sectors & 0xff;
733 /* Optimal Transfer Length */
734 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
736 if (scsi_debug_lbpu) {
737 /* Maximum Unmap LBA Count */
738 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
740 /* Maximum Unmap Block Descriptor Count */
741 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
744 /* Unmap Granularity Alignment */
745 if (scsi_debug_unmap_alignment) {
746 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
747 arr[28] |= 0x80; /* UGAVALID */
750 /* Optimal Unmap Granularity */
751 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
753 /* Maximum WRITE SAME Length */
754 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
756 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
758 return sizeof(vpdb0_data);
761 /* Block device characteristics VPD page (SBC-3) */
762 static int inquiry_evpd_b1(unsigned char *arr)
764 memset(arr, 0, 0x3c);
765 arr[0] = 0;
766 arr[1] = 1; /* non rotating medium (e.g. solid state) */
767 arr[2] = 0;
768 arr[3] = 5; /* less than 1.8" */
770 return 0x3c;
773 /* Logical block provisioning VPD page (SBC-3) */
774 static int inquiry_evpd_b2(unsigned char *arr)
776 memset(arr, 0, 0x4);
777 arr[0] = 0; /* threshold exponent */
779 if (scsi_debug_lbpu)
780 arr[1] = 1 << 7;
782 if (scsi_debug_lbpws)
783 arr[1] |= 1 << 6;
785 if (scsi_debug_lbpws10)
786 arr[1] |= 1 << 5;
788 if (scsi_debug_lbprz)
789 arr[1] |= 1 << 2;
791 return 0x4;
794 #define SDEBUG_LONG_INQ_SZ 96
795 #define SDEBUG_MAX_INQ_ARR_SZ 584
797 static int resp_inquiry(struct scsi_cmnd * scp, int target,
798 struct sdebug_dev_info * devip)
800 unsigned char pq_pdt;
801 unsigned char * arr;
802 unsigned char *cmd = (unsigned char *)scp->cmnd;
803 int alloc_len, n, ret;
805 alloc_len = (cmd[3] << 8) + cmd[4];
806 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
807 if (! arr)
808 return DID_REQUEUE << 16;
809 if (devip->wlun)
810 pq_pdt = 0x1e; /* present, wlun */
811 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
812 pq_pdt = 0x7f; /* not present, no device type */
813 else
814 pq_pdt = (scsi_debug_ptype & 0x1f);
815 arr[0] = pq_pdt;
816 if (0x2 & cmd[1]) { /* CMDDT bit set */
817 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
819 kfree(arr);
820 return check_condition_result;
821 } else if (0x1 & cmd[1]) { /* EVPD bit set */
822 int lu_id_num, port_group_id, target_dev_id, len;
823 char lu_id_str[6];
824 int host_no = devip->sdbg_host->shost->host_no;
826 port_group_id = (((host_no + 1) & 0x7f) << 8) +
827 (devip->channel & 0x7f);
828 if (0 == scsi_debug_vpd_use_hostno)
829 host_no = 0;
830 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
831 (devip->target * 1000) + devip->lun);
832 target_dev_id = ((host_no + 1) * 2000) +
833 (devip->target * 1000) - 3;
834 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
835 if (0 == cmd[2]) { /* supported vital product data pages */
836 arr[1] = cmd[2]; /*sanity */
837 n = 4;
838 arr[n++] = 0x0; /* this page */
839 arr[n++] = 0x80; /* unit serial number */
840 arr[n++] = 0x83; /* device identification */
841 arr[n++] = 0x84; /* software interface ident. */
842 arr[n++] = 0x85; /* management network addresses */
843 arr[n++] = 0x86; /* extended inquiry */
844 arr[n++] = 0x87; /* mode page policy */
845 arr[n++] = 0x88; /* SCSI ports */
846 arr[n++] = 0x89; /* ATA information */
847 arr[n++] = 0xb0; /* Block limits (SBC) */
848 arr[n++] = 0xb1; /* Block characteristics (SBC) */
849 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
850 arr[n++] = 0xb2;
851 arr[3] = n - 4; /* number of supported VPD pages */
852 } else if (0x80 == cmd[2]) { /* unit serial number */
853 arr[1] = cmd[2]; /*sanity */
854 arr[3] = len;
855 memcpy(&arr[4], lu_id_str, len);
856 } else if (0x83 == cmd[2]) { /* device identification */
857 arr[1] = cmd[2]; /*sanity */
858 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
859 target_dev_id, lu_id_num,
860 lu_id_str, len);
861 } else if (0x84 == cmd[2]) { /* Software interface ident. */
862 arr[1] = cmd[2]; /*sanity */
863 arr[3] = inquiry_evpd_84(&arr[4]);
864 } else if (0x85 == cmd[2]) { /* Management network addresses */
865 arr[1] = cmd[2]; /*sanity */
866 arr[3] = inquiry_evpd_85(&arr[4]);
867 } else if (0x86 == cmd[2]) { /* extended inquiry */
868 arr[1] = cmd[2]; /*sanity */
869 arr[3] = 0x3c; /* number of following entries */
870 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
871 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
872 else if (scsi_debug_dif)
873 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
874 else
875 arr[4] = 0x0; /* no protection stuff */
876 arr[5] = 0x7; /* head of q, ordered + simple q's */
877 } else if (0x87 == cmd[2]) { /* mode page policy */
878 arr[1] = cmd[2]; /*sanity */
879 arr[3] = 0x8; /* number of following entries */
880 arr[4] = 0x2; /* disconnect-reconnect mp */
881 arr[6] = 0x80; /* mlus, shared */
882 arr[8] = 0x18; /* protocol specific lu */
883 arr[10] = 0x82; /* mlus, per initiator port */
884 } else if (0x88 == cmd[2]) { /* SCSI Ports */
885 arr[1] = cmd[2]; /*sanity */
886 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
887 } else if (0x89 == cmd[2]) { /* ATA information */
888 arr[1] = cmd[2]; /*sanity */
889 n = inquiry_evpd_89(&arr[4]);
890 arr[2] = (n >> 8);
891 arr[3] = (n & 0xff);
892 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
893 arr[1] = cmd[2]; /*sanity */
894 arr[3] = inquiry_evpd_b0(&arr[4]);
895 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
896 arr[1] = cmd[2]; /*sanity */
897 arr[3] = inquiry_evpd_b1(&arr[4]);
898 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
899 arr[1] = cmd[2]; /*sanity */
900 arr[3] = inquiry_evpd_b2(&arr[4]);
901 } else {
902 /* Illegal request, invalid field in cdb */
903 mk_sense_buffer(devip, ILLEGAL_REQUEST,
904 INVALID_FIELD_IN_CDB, 0);
905 kfree(arr);
906 return check_condition_result;
908 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
909 ret = fill_from_dev_buffer(scp, arr,
910 min(len, SDEBUG_MAX_INQ_ARR_SZ));
911 kfree(arr);
912 return ret;
914 /* drops through here for a standard inquiry */
915 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
916 arr[2] = scsi_debug_scsi_level;
917 arr[3] = 2; /* response_data_format==2 */
918 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
919 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
920 if (0 == scsi_debug_vpd_use_hostno)
921 arr[5] = 0x10; /* claim: implicit TGPS */
922 arr[6] = 0x10; /* claim: MultiP */
923 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
924 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
925 memcpy(&arr[8], inq_vendor_id, 8);
926 memcpy(&arr[16], inq_product_id, 16);
927 memcpy(&arr[32], inq_product_rev, 4);
928 /* version descriptors (2 bytes each) follow */
929 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
930 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
931 n = 62;
932 if (scsi_debug_ptype == 0) {
933 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
934 } else if (scsi_debug_ptype == 1) {
935 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
937 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
938 ret = fill_from_dev_buffer(scp, arr,
939 min(alloc_len, SDEBUG_LONG_INQ_SZ));
940 kfree(arr);
941 return ret;
944 static int resp_requests(struct scsi_cmnd * scp,
945 struct sdebug_dev_info * devip)
947 unsigned char * sbuff;
948 unsigned char *cmd = (unsigned char *)scp->cmnd;
949 unsigned char arr[SDEBUG_SENSE_LEN];
950 int want_dsense;
951 int len = 18;
953 memset(arr, 0, sizeof(arr));
954 if (devip->reset == 1)
955 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
956 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
957 sbuff = devip->sense_buff;
958 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
959 if (want_dsense) {
960 arr[0] = 0x72;
961 arr[1] = 0x0; /* NO_SENSE in sense_key */
962 arr[2] = THRESHOLD_EXCEEDED;
963 arr[3] = 0xff; /* TEST set and MRIE==6 */
964 } else {
965 arr[0] = 0x70;
966 arr[2] = 0x0; /* NO_SENSE in sense_key */
967 arr[7] = 0xa; /* 18 byte sense buffer */
968 arr[12] = THRESHOLD_EXCEEDED;
969 arr[13] = 0xff; /* TEST set and MRIE==6 */
971 } else {
972 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
973 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
974 /* DESC bit set and sense_buff in fixed format */
975 memset(arr, 0, sizeof(arr));
976 arr[0] = 0x72;
977 arr[1] = sbuff[2]; /* sense key */
978 arr[2] = sbuff[12]; /* asc */
979 arr[3] = sbuff[13]; /* ascq */
980 len = 8;
983 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
984 return fill_from_dev_buffer(scp, arr, len);
987 static int resp_start_stop(struct scsi_cmnd * scp,
988 struct sdebug_dev_info * devip)
990 unsigned char *cmd = (unsigned char *)scp->cmnd;
991 int power_cond, errsts, start;
993 if ((errsts = check_readiness(scp, 1, devip)))
994 return errsts;
995 power_cond = (cmd[4] & 0xf0) >> 4;
996 if (power_cond) {
997 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
999 return check_condition_result;
1001 start = cmd[4] & 1;
1002 if (start == devip->stopped)
1003 devip->stopped = !start;
1004 return 0;
1007 static sector_t get_sdebug_capacity(void)
1009 if (scsi_debug_virtual_gb > 0)
1010 return (sector_t)scsi_debug_virtual_gb *
1011 (1073741824 / scsi_debug_sector_size);
1012 else
1013 return sdebug_store_sectors;
1016 #define SDEBUG_READCAP_ARR_SZ 8
1017 static int resp_readcap(struct scsi_cmnd * scp,
1018 struct sdebug_dev_info * devip)
1020 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1021 unsigned int capac;
1022 int errsts;
1024 if ((errsts = check_readiness(scp, 1, devip)))
1025 return errsts;
1026 /* following just in case virtual_gb changed */
1027 sdebug_capacity = get_sdebug_capacity();
1028 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1029 if (sdebug_capacity < 0xffffffff) {
1030 capac = (unsigned int)sdebug_capacity - 1;
1031 arr[0] = (capac >> 24);
1032 arr[1] = (capac >> 16) & 0xff;
1033 arr[2] = (capac >> 8) & 0xff;
1034 arr[3] = capac & 0xff;
1035 } else {
1036 arr[0] = 0xff;
1037 arr[1] = 0xff;
1038 arr[2] = 0xff;
1039 arr[3] = 0xff;
1041 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1042 arr[7] = scsi_debug_sector_size & 0xff;
1043 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1046 #define SDEBUG_READCAP16_ARR_SZ 32
1047 static int resp_readcap16(struct scsi_cmnd * scp,
1048 struct sdebug_dev_info * devip)
1050 unsigned char *cmd = (unsigned char *)scp->cmnd;
1051 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1052 unsigned long long capac;
1053 int errsts, k, alloc_len;
1055 if ((errsts = check_readiness(scp, 1, devip)))
1056 return errsts;
1057 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1058 + cmd[13]);
1059 /* following just in case virtual_gb changed */
1060 sdebug_capacity = get_sdebug_capacity();
1061 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1062 capac = sdebug_capacity - 1;
1063 for (k = 0; k < 8; ++k, capac >>= 8)
1064 arr[7 - k] = capac & 0xff;
1065 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1066 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1067 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1068 arr[11] = scsi_debug_sector_size & 0xff;
1069 arr[13] = scsi_debug_physblk_exp & 0xf;
1070 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1072 if (scsi_debug_lbp()) {
1073 arr[14] |= 0x80; /* LBPME */
1074 if (scsi_debug_lbprz)
1075 arr[14] |= 0x40; /* LBPRZ */
1078 arr[15] = scsi_debug_lowest_aligned & 0xff;
1080 if (scsi_debug_dif) {
1081 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1082 arr[12] |= 1; /* PROT_EN */
1085 return fill_from_dev_buffer(scp, arr,
1086 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1089 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1091 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1092 struct sdebug_dev_info * devip)
1094 unsigned char *cmd = (unsigned char *)scp->cmnd;
1095 unsigned char * arr;
1096 int host_no = devip->sdbg_host->shost->host_no;
1097 int n, ret, alen, rlen;
1098 int port_group_a, port_group_b, port_a, port_b;
1100 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1101 + cmd[9]);
1103 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1104 if (! arr)
1105 return DID_REQUEUE << 16;
1107 * EVPD page 0x88 states we have two ports, one
1108 * real and a fake port with no device connected.
1109 * So we create two port groups with one port each
1110 * and set the group with port B to unavailable.
1112 port_a = 0x1; /* relative port A */
1113 port_b = 0x2; /* relative port B */
1114 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1115 (devip->channel & 0x7f);
1116 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1117 (devip->channel & 0x7f) + 0x80;
1120 * The asymmetric access state is cycled according to the host_id.
1122 n = 4;
1123 if (0 == scsi_debug_vpd_use_hostno) {
1124 arr[n++] = host_no % 3; /* Asymm access state */
1125 arr[n++] = 0x0F; /* claim: all states are supported */
1126 } else {
1127 arr[n++] = 0x0; /* Active/Optimized path */
1128 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1130 arr[n++] = (port_group_a >> 8) & 0xff;
1131 arr[n++] = port_group_a & 0xff;
1132 arr[n++] = 0; /* Reserved */
1133 arr[n++] = 0; /* Status code */
1134 arr[n++] = 0; /* Vendor unique */
1135 arr[n++] = 0x1; /* One port per group */
1136 arr[n++] = 0; /* Reserved */
1137 arr[n++] = 0; /* Reserved */
1138 arr[n++] = (port_a >> 8) & 0xff;
1139 arr[n++] = port_a & 0xff;
1140 arr[n++] = 3; /* Port unavailable */
1141 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1142 arr[n++] = (port_group_b >> 8) & 0xff;
1143 arr[n++] = port_group_b & 0xff;
1144 arr[n++] = 0; /* Reserved */
1145 arr[n++] = 0; /* Status code */
1146 arr[n++] = 0; /* Vendor unique */
1147 arr[n++] = 0x1; /* One port per group */
1148 arr[n++] = 0; /* Reserved */
1149 arr[n++] = 0; /* Reserved */
1150 arr[n++] = (port_b >> 8) & 0xff;
1151 arr[n++] = port_b & 0xff;
1153 rlen = n - 4;
1154 arr[0] = (rlen >> 24) & 0xff;
1155 arr[1] = (rlen >> 16) & 0xff;
1156 arr[2] = (rlen >> 8) & 0xff;
1157 arr[3] = rlen & 0xff;
1160 * Return the smallest value of either
1161 * - The allocated length
1162 * - The constructed command length
1163 * - The maximum array size
1165 rlen = min(alen,n);
1166 ret = fill_from_dev_buffer(scp, arr,
1167 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1168 kfree(arr);
1169 return ret;
1172 /* <<Following mode page info copied from ST318451LW>> */
1174 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1175 { /* Read-Write Error Recovery page for mode_sense */
1176 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1177 5, 0, 0xff, 0xff};
1179 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1180 if (1 == pcontrol)
1181 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1182 return sizeof(err_recov_pg);
1185 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1186 { /* Disconnect-Reconnect page for mode_sense */
1187 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1188 0, 0, 0, 0, 0, 0, 0, 0};
1190 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1191 if (1 == pcontrol)
1192 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1193 return sizeof(disconnect_pg);
1196 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1197 { /* Format device page for mode_sense */
1198 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1199 0, 0, 0, 0, 0, 0, 0, 0,
1200 0, 0, 0, 0, 0x40, 0, 0, 0};
1202 memcpy(p, format_pg, sizeof(format_pg));
1203 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1204 p[11] = sdebug_sectors_per & 0xff;
1205 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1206 p[13] = scsi_debug_sector_size & 0xff;
1207 if (scsi_debug_removable)
1208 p[20] |= 0x20; /* should agree with INQUIRY */
1209 if (1 == pcontrol)
1210 memset(p + 2, 0, sizeof(format_pg) - 2);
1211 return sizeof(format_pg);
1214 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1215 { /* Caching page for mode_sense */
1216 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1217 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1219 memcpy(p, caching_pg, sizeof(caching_pg));
1220 if (1 == pcontrol)
1221 memset(p + 2, 0, sizeof(caching_pg) - 2);
1222 return sizeof(caching_pg);
1225 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1226 { /* Control mode page for mode_sense */
1227 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1228 0, 0, 0, 0};
1229 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1230 0, 0, 0x2, 0x4b};
1232 if (scsi_debug_dsense)
1233 ctrl_m_pg[2] |= 0x4;
1234 else
1235 ctrl_m_pg[2] &= ~0x4;
1237 if (scsi_debug_ato)
1238 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1240 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1241 if (1 == pcontrol)
1242 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1243 else if (2 == pcontrol)
1244 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1245 return sizeof(ctrl_m_pg);
1249 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1250 { /* Informational Exceptions control mode page for mode_sense */
1251 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1252 0, 0, 0x0, 0x0};
1253 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1254 0, 0, 0x0, 0x0};
1256 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1257 if (1 == pcontrol)
1258 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1259 else if (2 == pcontrol)
1260 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1261 return sizeof(iec_m_pg);
1264 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1265 { /* SAS SSP mode page - short format for mode_sense */
1266 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1267 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1269 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1270 if (1 == pcontrol)
1271 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1272 return sizeof(sas_sf_m_pg);
1276 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1277 int target_dev_id)
1278 { /* SAS phy control and discover mode page for mode_sense */
1279 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1280 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1281 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1282 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1283 0x2, 0, 0, 0, 0, 0, 0, 0,
1284 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1285 0, 0, 0, 0, 0, 0, 0, 0,
1286 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1287 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1288 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1289 0x3, 0, 0, 0, 0, 0, 0, 0,
1290 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1291 0, 0, 0, 0, 0, 0, 0, 0,
1293 int port_a, port_b;
1295 port_a = target_dev_id + 1;
1296 port_b = port_a + 1;
1297 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1298 p[20] = (port_a >> 24);
1299 p[21] = (port_a >> 16) & 0xff;
1300 p[22] = (port_a >> 8) & 0xff;
1301 p[23] = port_a & 0xff;
1302 p[48 + 20] = (port_b >> 24);
1303 p[48 + 21] = (port_b >> 16) & 0xff;
1304 p[48 + 22] = (port_b >> 8) & 0xff;
1305 p[48 + 23] = port_b & 0xff;
1306 if (1 == pcontrol)
1307 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1308 return sizeof(sas_pcd_m_pg);
1311 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1312 { /* SAS SSP shared protocol specific port mode subpage */
1313 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1314 0, 0, 0, 0, 0, 0, 0, 0,
1317 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1318 if (1 == pcontrol)
1319 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1320 return sizeof(sas_sha_m_pg);
1323 #define SDEBUG_MAX_MSENSE_SZ 256
1325 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1326 struct sdebug_dev_info * devip)
1328 unsigned char dbd, llbaa;
1329 int pcontrol, pcode, subpcode, bd_len;
1330 unsigned char dev_spec;
1331 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1332 unsigned char * ap;
1333 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1334 unsigned char *cmd = (unsigned char *)scp->cmnd;
1336 if ((errsts = check_readiness(scp, 1, devip)))
1337 return errsts;
1338 dbd = !!(cmd[1] & 0x8);
1339 pcontrol = (cmd[2] & 0xc0) >> 6;
1340 pcode = cmd[2] & 0x3f;
1341 subpcode = cmd[3];
1342 msense_6 = (MODE_SENSE == cmd[0]);
1343 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1344 if ((0 == scsi_debug_ptype) && (0 == dbd))
1345 bd_len = llbaa ? 16 : 8;
1346 else
1347 bd_len = 0;
1348 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1349 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1350 if (0x3 == pcontrol) { /* Saving values not supported */
1351 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1353 return check_condition_result;
1355 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1356 (devip->target * 1000) - 3;
1357 /* set DPOFUA bit for disks */
1358 if (0 == scsi_debug_ptype)
1359 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1360 else
1361 dev_spec = 0x0;
1362 if (msense_6) {
1363 arr[2] = dev_spec;
1364 arr[3] = bd_len;
1365 offset = 4;
1366 } else {
1367 arr[3] = dev_spec;
1368 if (16 == bd_len)
1369 arr[4] = 0x1; /* set LONGLBA bit */
1370 arr[7] = bd_len; /* assume 255 or less */
1371 offset = 8;
1373 ap = arr + offset;
1374 if ((bd_len > 0) && (!sdebug_capacity))
1375 sdebug_capacity = get_sdebug_capacity();
1377 if (8 == bd_len) {
1378 if (sdebug_capacity > 0xfffffffe) {
1379 ap[0] = 0xff;
1380 ap[1] = 0xff;
1381 ap[2] = 0xff;
1382 ap[3] = 0xff;
1383 } else {
1384 ap[0] = (sdebug_capacity >> 24) & 0xff;
1385 ap[1] = (sdebug_capacity >> 16) & 0xff;
1386 ap[2] = (sdebug_capacity >> 8) & 0xff;
1387 ap[3] = sdebug_capacity & 0xff;
1389 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1390 ap[7] = scsi_debug_sector_size & 0xff;
1391 offset += bd_len;
1392 ap = arr + offset;
1393 } else if (16 == bd_len) {
1394 unsigned long long capac = sdebug_capacity;
1396 for (k = 0; k < 8; ++k, capac >>= 8)
1397 ap[7 - k] = capac & 0xff;
1398 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1399 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1400 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1401 ap[15] = scsi_debug_sector_size & 0xff;
1402 offset += bd_len;
1403 ap = arr + offset;
1406 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1407 /* TODO: Control Extension page */
1408 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1410 return check_condition_result;
1412 switch (pcode) {
1413 case 0x1: /* Read-Write error recovery page, direct access */
1414 len = resp_err_recov_pg(ap, pcontrol, target);
1415 offset += len;
1416 break;
1417 case 0x2: /* Disconnect-Reconnect page, all devices */
1418 len = resp_disconnect_pg(ap, pcontrol, target);
1419 offset += len;
1420 break;
1421 case 0x3: /* Format device page, direct access */
1422 len = resp_format_pg(ap, pcontrol, target);
1423 offset += len;
1424 break;
1425 case 0x8: /* Caching page, direct access */
1426 len = resp_caching_pg(ap, pcontrol, target);
1427 offset += len;
1428 break;
1429 case 0xa: /* Control Mode page, all devices */
1430 len = resp_ctrl_m_pg(ap, pcontrol, target);
1431 offset += len;
1432 break;
1433 case 0x19: /* if spc==1 then sas phy, control+discover */
1434 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1435 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1436 INVALID_FIELD_IN_CDB, 0);
1437 return check_condition_result;
1439 len = 0;
1440 if ((0x0 == subpcode) || (0xff == subpcode))
1441 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1442 if ((0x1 == subpcode) || (0xff == subpcode))
1443 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1444 target_dev_id);
1445 if ((0x2 == subpcode) || (0xff == subpcode))
1446 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1447 offset += len;
1448 break;
1449 case 0x1c: /* Informational Exceptions Mode page, all devices */
1450 len = resp_iec_m_pg(ap, pcontrol, target);
1451 offset += len;
1452 break;
1453 case 0x3f: /* Read all Mode pages */
1454 if ((0 == subpcode) || (0xff == subpcode)) {
1455 len = resp_err_recov_pg(ap, pcontrol, target);
1456 len += resp_disconnect_pg(ap + len, pcontrol, target);
1457 len += resp_format_pg(ap + len, pcontrol, target);
1458 len += resp_caching_pg(ap + len, pcontrol, target);
1459 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1460 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1461 if (0xff == subpcode) {
1462 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1463 target, target_dev_id);
1464 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1466 len += resp_iec_m_pg(ap + len, pcontrol, target);
1467 } else {
1468 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1469 INVALID_FIELD_IN_CDB, 0);
1470 return check_condition_result;
1472 offset += len;
1473 break;
1474 default:
1475 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1477 return check_condition_result;
1479 if (msense_6)
1480 arr[0] = offset - 1;
1481 else {
1482 arr[0] = ((offset - 2) >> 8) & 0xff;
1483 arr[1] = (offset - 2) & 0xff;
1485 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1488 #define SDEBUG_MAX_MSELECT_SZ 512
1490 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1491 struct sdebug_dev_info * devip)
1493 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1494 int param_len, res, errsts, mpage;
1495 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1496 unsigned char *cmd = (unsigned char *)scp->cmnd;
1498 if ((errsts = check_readiness(scp, 1, devip)))
1499 return errsts;
1500 memset(arr, 0, sizeof(arr));
1501 pf = cmd[1] & 0x10;
1502 sp = cmd[1] & 0x1;
1503 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1504 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1505 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1506 INVALID_FIELD_IN_CDB, 0);
1507 return check_condition_result;
1509 res = fetch_to_dev_buffer(scp, arr, param_len);
1510 if (-1 == res)
1511 return (DID_ERROR << 16);
1512 else if ((res < param_len) &&
1513 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1514 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1515 " IO sent=%d bytes\n", param_len, res);
1516 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1517 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1518 if (md_len > 2) {
1519 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1520 INVALID_FIELD_IN_PARAM_LIST, 0);
1521 return check_condition_result;
1523 off = bd_len + (mselect6 ? 4 : 8);
1524 mpage = arr[off] & 0x3f;
1525 ps = !!(arr[off] & 0x80);
1526 if (ps) {
1527 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1528 INVALID_FIELD_IN_PARAM_LIST, 0);
1529 return check_condition_result;
1531 spf = !!(arr[off] & 0x40);
1532 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1533 (arr[off + 1] + 2);
1534 if ((pg_len + off) > param_len) {
1535 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1536 PARAMETER_LIST_LENGTH_ERR, 0);
1537 return check_condition_result;
1539 switch (mpage) {
1540 case 0xa: /* Control Mode page */
1541 if (ctrl_m_pg[1] == arr[off + 1]) {
1542 memcpy(ctrl_m_pg + 2, arr + off + 2,
1543 sizeof(ctrl_m_pg) - 2);
1544 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1545 return 0;
1547 break;
1548 case 0x1c: /* Informational Exceptions Mode page */
1549 if (iec_m_pg[1] == arr[off + 1]) {
1550 memcpy(iec_m_pg + 2, arr + off + 2,
1551 sizeof(iec_m_pg) - 2);
1552 return 0;
1554 break;
1555 default:
1556 break;
1558 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1559 INVALID_FIELD_IN_PARAM_LIST, 0);
1560 return check_condition_result;
1563 static int resp_temp_l_pg(unsigned char * arr)
1565 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1566 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1569 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1570 return sizeof(temp_l_pg);
1573 static int resp_ie_l_pg(unsigned char * arr)
1575 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1578 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1579 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1580 arr[4] = THRESHOLD_EXCEEDED;
1581 arr[5] = 0xff;
1583 return sizeof(ie_l_pg);
1586 #define SDEBUG_MAX_LSENSE_SZ 512
1588 static int resp_log_sense(struct scsi_cmnd * scp,
1589 struct sdebug_dev_info * devip)
1591 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1592 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1593 unsigned char *cmd = (unsigned char *)scp->cmnd;
1595 if ((errsts = check_readiness(scp, 1, devip)))
1596 return errsts;
1597 memset(arr, 0, sizeof(arr));
1598 ppc = cmd[1] & 0x2;
1599 sp = cmd[1] & 0x1;
1600 if (ppc || sp) {
1601 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1602 INVALID_FIELD_IN_CDB, 0);
1603 return check_condition_result;
1605 pcontrol = (cmd[2] & 0xc0) >> 6;
1606 pcode = cmd[2] & 0x3f;
1607 subpcode = cmd[3] & 0xff;
1608 alloc_len = (cmd[7] << 8) + cmd[8];
1609 arr[0] = pcode;
1610 if (0 == subpcode) {
1611 switch (pcode) {
1612 case 0x0: /* Supported log pages log page */
1613 n = 4;
1614 arr[n++] = 0x0; /* this page */
1615 arr[n++] = 0xd; /* Temperature */
1616 arr[n++] = 0x2f; /* Informational exceptions */
1617 arr[3] = n - 4;
1618 break;
1619 case 0xd: /* Temperature log page */
1620 arr[3] = resp_temp_l_pg(arr + 4);
1621 break;
1622 case 0x2f: /* Informational exceptions log page */
1623 arr[3] = resp_ie_l_pg(arr + 4);
1624 break;
1625 default:
1626 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1627 INVALID_FIELD_IN_CDB, 0);
1628 return check_condition_result;
1630 } else if (0xff == subpcode) {
1631 arr[0] |= 0x40;
1632 arr[1] = subpcode;
1633 switch (pcode) {
1634 case 0x0: /* Supported log pages and subpages log page */
1635 n = 4;
1636 arr[n++] = 0x0;
1637 arr[n++] = 0x0; /* 0,0 page */
1638 arr[n++] = 0x0;
1639 arr[n++] = 0xff; /* this page */
1640 arr[n++] = 0xd;
1641 arr[n++] = 0x0; /* Temperature */
1642 arr[n++] = 0x2f;
1643 arr[n++] = 0x0; /* Informational exceptions */
1644 arr[3] = n - 4;
1645 break;
1646 case 0xd: /* Temperature subpages */
1647 n = 4;
1648 arr[n++] = 0xd;
1649 arr[n++] = 0x0; /* Temperature */
1650 arr[3] = n - 4;
1651 break;
1652 case 0x2f: /* Informational exceptions subpages */
1653 n = 4;
1654 arr[n++] = 0x2f;
1655 arr[n++] = 0x0; /* Informational exceptions */
1656 arr[3] = n - 4;
1657 break;
1658 default:
1659 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1660 INVALID_FIELD_IN_CDB, 0);
1661 return check_condition_result;
1663 } else {
1664 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1665 INVALID_FIELD_IN_CDB, 0);
1666 return check_condition_result;
1668 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1669 return fill_from_dev_buffer(scp, arr,
1670 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1673 static int check_device_access_params(struct sdebug_dev_info *devi,
1674 unsigned long long lba, unsigned int num)
1676 if (lba + num > sdebug_capacity) {
1677 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1678 return check_condition_result;
1680 /* transfer length excessive (tie in to block limits VPD page) */
1681 if (num > sdebug_store_sectors) {
1682 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1683 return check_condition_result;
1685 return 0;
1688 /* Returns number of bytes copied or -1 if error. */
1689 static int do_device_access(struct scsi_cmnd *scmd,
1690 struct sdebug_dev_info *devi,
1691 unsigned long long lba, unsigned int num, int write)
1693 int ret;
1694 unsigned long long block, rest = 0;
1695 struct scsi_data_buffer *sdb;
1696 enum dma_data_direction dir;
1697 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1698 off_t);
1700 if (write) {
1701 sdb = scsi_out(scmd);
1702 dir = DMA_TO_DEVICE;
1703 func = sg_pcopy_to_buffer;
1704 } else {
1705 sdb = scsi_in(scmd);
1706 dir = DMA_FROM_DEVICE;
1707 func = sg_pcopy_from_buffer;
1710 if (!sdb->length)
1711 return 0;
1712 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1713 return -1;
1715 block = do_div(lba, sdebug_store_sectors);
1716 if (block + num > sdebug_store_sectors)
1717 rest = block + num - sdebug_store_sectors;
1719 ret = func(sdb->table.sgl, sdb->table.nents,
1720 fake_storep + (block * scsi_debug_sector_size),
1721 (num - rest) * scsi_debug_sector_size, 0);
1722 if (ret != (num - rest) * scsi_debug_sector_size)
1723 return ret;
1725 if (rest) {
1726 ret += func(sdb->table.sgl, sdb->table.nents,
1727 fake_storep, rest * scsi_debug_sector_size,
1728 (num - rest) * scsi_debug_sector_size);
1731 return ret;
1734 static u16 dif_compute_csum(const void *buf, int len)
1736 u16 csum;
1738 switch (scsi_debug_guard) {
1739 case 1:
1740 csum = ip_compute_csum(buf, len);
1741 break;
1742 case 0:
1743 csum = cpu_to_be16(crc_t10dif(buf, len));
1744 break;
1746 return csum;
1749 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1750 sector_t sector, u32 ei_lba)
1752 u16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1754 if (sdt->guard_tag != csum) {
1755 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1756 __func__,
1757 (unsigned long)sector,
1758 be16_to_cpu(sdt->guard_tag),
1759 be16_to_cpu(csum));
1760 return 0x01;
1762 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1763 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1764 pr_err("%s: REF check failed on sector %lu\n",
1765 __func__, (unsigned long)sector);
1766 return 0x03;
1768 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1769 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1770 pr_err("%s: REF check failed on sector %lu\n",
1771 __func__, (unsigned long)sector);
1772 dif_errors++;
1773 return 0x03;
1775 return 0;
1778 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1779 unsigned int sectors, u32 ei_lba)
1781 unsigned int i, resid;
1782 struct scatterlist *psgl;
1783 struct sd_dif_tuple *sdt;
1784 sector_t sector;
1785 sector_t tmp_sec = start_sec;
1786 void *paddr;
1788 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1790 sdt = dif_storep + start_sec;
1792 for (i = 0 ; i < sectors ; i++) {
1793 int ret;
1795 if (sdt[i].app_tag == 0xffff)
1796 continue;
1798 sector = start_sec + i;
1800 ret = dif_verify(&sdt[i],
1801 fake_storep + sector * scsi_debug_sector_size,
1802 sector, ei_lba);
1803 if (ret) {
1804 dif_errors++;
1805 return ret;
1808 ei_lba++;
1811 /* Bytes of protection data to copy into sgl */
1812 resid = sectors * sizeof(*dif_storep);
1813 sector = start_sec;
1815 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1816 int len = min(psgl->length, resid);
1818 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1819 memcpy(paddr, dif_storep + sector, len);
1821 sector += len / sizeof(*dif_storep);
1822 if (sector >= sdebug_store_sectors) {
1823 /* Force wrap */
1824 tmp_sec = sector;
1825 sector = do_div(tmp_sec, sdebug_store_sectors);
1827 resid -= len;
1828 kunmap_atomic(paddr);
1831 dix_reads++;
1833 return 0;
1836 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1837 unsigned int num, struct sdebug_dev_info *devip,
1838 u32 ei_lba)
1840 unsigned long iflags;
1841 int ret;
1843 ret = check_device_access_params(devip, lba, num);
1844 if (ret)
1845 return ret;
1847 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1848 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1849 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1850 /* claim unrecoverable read error */
1851 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1852 /* set info field and valid bit for fixed descriptor */
1853 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1854 devip->sense_buff[0] |= 0x80; /* Valid bit */
1855 ret = (lba < OPT_MEDIUM_ERR_ADDR)
1856 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1857 devip->sense_buff[3] = (ret >> 24) & 0xff;
1858 devip->sense_buff[4] = (ret >> 16) & 0xff;
1859 devip->sense_buff[5] = (ret >> 8) & 0xff;
1860 devip->sense_buff[6] = ret & 0xff;
1862 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1863 return check_condition_result;
1866 /* DIX + T10 DIF */
1867 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1868 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1870 if (prot_ret) {
1871 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1872 return illegal_condition_result;
1876 read_lock_irqsave(&atomic_rw, iflags);
1877 ret = do_device_access(SCpnt, devip, lba, num, 0);
1878 read_unlock_irqrestore(&atomic_rw, iflags);
1879 if (ret == -1)
1880 return DID_ERROR << 16;
1882 scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
1884 return 0;
1887 void dump_sector(unsigned char *buf, int len)
1889 int i, j;
1891 printk(KERN_ERR ">>> Sector Dump <<<\n");
1893 for (i = 0 ; i < len ; i += 16) {
1894 printk(KERN_ERR "%04d: ", i);
1896 for (j = 0 ; j < 16 ; j++) {
1897 unsigned char c = buf[i+j];
1898 if (c >= 0x20 && c < 0x7e)
1899 printk(" %c ", buf[i+j]);
1900 else
1901 printk("%02x ", buf[i+j]);
1904 printk("\n");
1908 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1909 unsigned int sectors, u32 ei_lba)
1911 int i, j, ret;
1912 struct sd_dif_tuple *sdt;
1913 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1914 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1915 void *daddr, *paddr;
1916 sector_t tmp_sec = start_sec;
1917 sector_t sector;
1918 int ppage_offset;
1920 sector = do_div(tmp_sec, sdebug_store_sectors);
1922 BUG_ON(scsi_sg_count(SCpnt) == 0);
1923 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1925 ppage_offset = 0;
1927 /* For each data page */
1928 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1929 daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1930 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1932 /* For each sector-sized chunk in data page */
1933 for (j = 0; j < dsgl->length; j += scsi_debug_sector_size) {
1935 /* If we're at the end of the current
1936 * protection page advance to the next one
1938 if (ppage_offset >= psgl->length) {
1939 kunmap_atomic(paddr);
1940 psgl = sg_next(psgl);
1941 BUG_ON(psgl == NULL);
1942 paddr = kmap_atomic(sg_page(psgl))
1943 + psgl->offset;
1944 ppage_offset = 0;
1947 sdt = paddr + ppage_offset;
1949 ret = dif_verify(sdt, daddr + j, start_sec, ei_lba);
1950 if (ret) {
1951 dump_sector(daddr + j, scsi_debug_sector_size);
1952 goto out;
1955 /* Would be great to copy this in bigger
1956 * chunks. However, for the sake of
1957 * correctness we need to verify each sector
1958 * before writing it to "stable" storage
1960 memcpy(dif_storep + sector, sdt, sizeof(*sdt));
1962 sector++;
1964 if (sector == sdebug_store_sectors)
1965 sector = 0; /* Force wrap */
1967 start_sec++;
1968 ei_lba++;
1969 ppage_offset += sizeof(struct sd_dif_tuple);
1972 kunmap_atomic(paddr);
1973 kunmap_atomic(daddr);
1976 dix_writes++;
1978 return 0;
1980 out:
1981 dif_errors++;
1982 kunmap_atomic(paddr);
1983 kunmap_atomic(daddr);
1984 return ret;
1987 static unsigned long lba_to_map_index(sector_t lba)
1989 if (scsi_debug_unmap_alignment) {
1990 lba += scsi_debug_unmap_granularity -
1991 scsi_debug_unmap_alignment;
1993 do_div(lba, scsi_debug_unmap_granularity);
1995 return lba;
1998 static sector_t map_index_to_lba(unsigned long index)
2000 sector_t lba = index * scsi_debug_unmap_granularity;
2002 if (scsi_debug_unmap_alignment) {
2003 lba -= scsi_debug_unmap_granularity -
2004 scsi_debug_unmap_alignment;
2007 return lba;
2010 static unsigned int map_state(sector_t lba, unsigned int *num)
2012 sector_t end;
2013 unsigned int mapped;
2014 unsigned long index;
2015 unsigned long next;
2017 index = lba_to_map_index(lba);
2018 mapped = test_bit(index, map_storep);
2020 if (mapped)
2021 next = find_next_zero_bit(map_storep, map_size, index);
2022 else
2023 next = find_next_bit(map_storep, map_size, index);
2025 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2026 *num = end - lba;
2028 return mapped;
2031 static void map_region(sector_t lba, unsigned int len)
2033 sector_t end = lba + len;
2035 while (lba < end) {
2036 unsigned long index = lba_to_map_index(lba);
2038 if (index < map_size)
2039 set_bit(index, map_storep);
2041 lba = map_index_to_lba(index + 1);
2045 static void unmap_region(sector_t lba, unsigned int len)
2047 sector_t end = lba + len;
2049 while (lba < end) {
2050 unsigned long index = lba_to_map_index(lba);
2052 if (lba == map_index_to_lba(index) &&
2053 lba + scsi_debug_unmap_granularity <= end &&
2054 index < map_size) {
2055 clear_bit(index, map_storep);
2056 if (scsi_debug_lbprz) {
2057 memset(fake_storep +
2058 lba * scsi_debug_sector_size, 0,
2059 scsi_debug_sector_size *
2060 scsi_debug_unmap_granularity);
2062 if (dif_storep) {
2063 memset(dif_storep + lba, 0xff,
2064 sizeof(*dif_storep) *
2065 scsi_debug_unmap_granularity);
2068 lba = map_index_to_lba(index + 1);
2072 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2073 unsigned int num, struct sdebug_dev_info *devip,
2074 u32 ei_lba)
2076 unsigned long iflags;
2077 int ret;
2079 ret = check_device_access_params(devip, lba, num);
2080 if (ret)
2081 return ret;
2083 /* DIX + T10 DIF */
2084 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2085 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2087 if (prot_ret) {
2088 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2089 return illegal_condition_result;
2093 write_lock_irqsave(&atomic_rw, iflags);
2094 ret = do_device_access(SCpnt, devip, lba, num, 1);
2095 if (scsi_debug_lbp())
2096 map_region(lba, num);
2097 write_unlock_irqrestore(&atomic_rw, iflags);
2098 if (-1 == ret)
2099 return (DID_ERROR << 16);
2100 else if ((ret < (num * scsi_debug_sector_size)) &&
2101 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2102 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2103 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2105 return 0;
2108 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2109 unsigned int num, struct sdebug_dev_info *devip,
2110 u32 ei_lba, unsigned int unmap)
2112 unsigned long iflags;
2113 unsigned long long i;
2114 int ret;
2116 ret = check_device_access_params(devip, lba, num);
2117 if (ret)
2118 return ret;
2120 if (num > scsi_debug_write_same_length) {
2121 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2123 return check_condition_result;
2126 write_lock_irqsave(&atomic_rw, iflags);
2128 if (unmap && scsi_debug_lbp()) {
2129 unmap_region(lba, num);
2130 goto out;
2133 /* Else fetch one logical block */
2134 ret = fetch_to_dev_buffer(scmd,
2135 fake_storep + (lba * scsi_debug_sector_size),
2136 scsi_debug_sector_size);
2138 if (-1 == ret) {
2139 write_unlock_irqrestore(&atomic_rw, iflags);
2140 return (DID_ERROR << 16);
2141 } else if ((ret < (num * scsi_debug_sector_size)) &&
2142 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2143 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2144 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2146 /* Copy first sector to remaining blocks */
2147 for (i = 1 ; i < num ; i++)
2148 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2149 fake_storep + (lba * scsi_debug_sector_size),
2150 scsi_debug_sector_size);
2152 if (scsi_debug_lbp())
2153 map_region(lba, num);
2154 out:
2155 write_unlock_irqrestore(&atomic_rw, iflags);
2157 return 0;
2160 struct unmap_block_desc {
2161 __be64 lba;
2162 __be32 blocks;
2163 __be32 __reserved;
2166 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2168 unsigned char *buf;
2169 struct unmap_block_desc *desc;
2170 unsigned int i, payload_len, descriptors;
2171 int ret;
2173 ret = check_readiness(scmd, 1, devip);
2174 if (ret)
2175 return ret;
2177 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2178 BUG_ON(scsi_bufflen(scmd) != payload_len);
2180 descriptors = (payload_len - 8) / 16;
2182 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2183 if (!buf)
2184 return check_condition_result;
2186 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2188 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2189 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2191 desc = (void *)&buf[8];
2193 for (i = 0 ; i < descriptors ; i++) {
2194 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2195 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2197 ret = check_device_access_params(devip, lba, num);
2198 if (ret)
2199 goto out;
2201 unmap_region(lba, num);
2204 ret = 0;
2206 out:
2207 kfree(buf);
2209 return ret;
2212 #define SDEBUG_GET_LBA_STATUS_LEN 32
2214 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2215 struct sdebug_dev_info * devip)
2217 unsigned long long lba;
2218 unsigned int alloc_len, mapped, num;
2219 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2220 int ret;
2222 ret = check_readiness(scmd, 1, devip);
2223 if (ret)
2224 return ret;
2226 lba = get_unaligned_be64(&scmd->cmnd[2]);
2227 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2229 if (alloc_len < 24)
2230 return 0;
2232 ret = check_device_access_params(devip, lba, 1);
2233 if (ret)
2234 return ret;
2236 mapped = map_state(lba, &num);
2238 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2239 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
2240 put_unaligned_be64(lba, &arr[8]); /* LBA */
2241 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2242 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2244 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2247 #define SDEBUG_RLUN_ARR_SZ 256
2249 static int resp_report_luns(struct scsi_cmnd * scp,
2250 struct sdebug_dev_info * devip)
2252 unsigned int alloc_len;
2253 int lun_cnt, i, upper, num, n, wlun, lun;
2254 unsigned char *cmd = (unsigned char *)scp->cmnd;
2255 int select_report = (int)cmd[2];
2256 struct scsi_lun *one_lun;
2257 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2258 unsigned char * max_addr;
2260 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2261 if ((alloc_len < 4) || (select_report > 2)) {
2262 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2264 return check_condition_result;
2266 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2267 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2268 lun_cnt = scsi_debug_max_luns;
2269 if (1 == select_report)
2270 lun_cnt = 0;
2271 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2272 --lun_cnt;
2273 wlun = (select_report > 0) ? 1 : 0;
2274 num = lun_cnt + wlun;
2275 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2276 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2277 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2278 sizeof(struct scsi_lun)), num);
2279 if (n < num) {
2280 wlun = 0;
2281 lun_cnt = n;
2283 one_lun = (struct scsi_lun *) &arr[8];
2284 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2285 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2286 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2287 i++, lun++) {
2288 upper = (lun >> 8) & 0x3f;
2289 if (upper)
2290 one_lun[i].scsi_lun[0] =
2291 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2292 one_lun[i].scsi_lun[1] = lun & 0xff;
2294 if (wlun) {
2295 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2296 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2297 i++;
2299 alloc_len = (unsigned char *)(one_lun + i) - arr;
2300 return fill_from_dev_buffer(scp, arr,
2301 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2304 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2305 unsigned int num, struct sdebug_dev_info *devip)
2307 int i, j, ret = -1;
2308 unsigned char *kaddr, *buf;
2309 unsigned int offset;
2310 struct scatterlist *sg;
2311 struct scsi_data_buffer *sdb = scsi_in(scp);
2313 /* better not to use temporary buffer. */
2314 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2315 if (!buf)
2316 return ret;
2318 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2320 offset = 0;
2321 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2322 kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2323 if (!kaddr)
2324 goto out;
2326 for (j = 0; j < sg->length; j++)
2327 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2329 offset += sg->length;
2330 kunmap_atomic(kaddr);
2332 ret = 0;
2333 out:
2334 kfree(buf);
2336 return ret;
2339 /* When timer goes off this function is called. */
2340 static void timer_intr_handler(unsigned long indx)
2342 struct sdebug_queued_cmd * sqcp;
2343 unsigned long iflags;
2345 if (indx >= scsi_debug_max_queue) {
2346 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2347 "large\n");
2348 return;
2350 spin_lock_irqsave(&queued_arr_lock, iflags);
2351 sqcp = &queued_arr[(int)indx];
2352 if (! sqcp->in_use) {
2353 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2354 "interrupt\n");
2355 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2356 return;
2358 sqcp->in_use = 0;
2359 if (sqcp->done_funct) {
2360 sqcp->a_cmnd->result = sqcp->scsi_result;
2361 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2363 sqcp->done_funct = NULL;
2364 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2368 static struct sdebug_dev_info *
2369 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2371 struct sdebug_dev_info *devip;
2373 devip = kzalloc(sizeof(*devip), flags);
2374 if (devip) {
2375 devip->sdbg_host = sdbg_host;
2376 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2378 return devip;
2381 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2383 struct sdebug_host_info * sdbg_host;
2384 struct sdebug_dev_info * open_devip = NULL;
2385 struct sdebug_dev_info * devip =
2386 (struct sdebug_dev_info *)sdev->hostdata;
2388 if (devip)
2389 return devip;
2390 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2391 if (!sdbg_host) {
2392 printk(KERN_ERR "Host info NULL\n");
2393 return NULL;
2395 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2396 if ((devip->used) && (devip->channel == sdev->channel) &&
2397 (devip->target == sdev->id) &&
2398 (devip->lun == sdev->lun))
2399 return devip;
2400 else {
2401 if ((!devip->used) && (!open_devip))
2402 open_devip = devip;
2405 if (!open_devip) { /* try and make a new one */
2406 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2407 if (!open_devip) {
2408 printk(KERN_ERR "%s: out of memory at line %d\n",
2409 __func__, __LINE__);
2410 return NULL;
2414 open_devip->channel = sdev->channel;
2415 open_devip->target = sdev->id;
2416 open_devip->lun = sdev->lun;
2417 open_devip->sdbg_host = sdbg_host;
2418 open_devip->reset = 1;
2419 open_devip->used = 1;
2420 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2421 if (scsi_debug_dsense)
2422 open_devip->sense_buff[0] = 0x72;
2423 else {
2424 open_devip->sense_buff[0] = 0x70;
2425 open_devip->sense_buff[7] = 0xa;
2427 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2428 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2430 return open_devip;
2433 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2435 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2436 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2437 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2438 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2439 return 0;
2442 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2444 struct sdebug_dev_info *devip;
2446 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2447 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2448 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2449 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2450 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2451 devip = devInfoReg(sdp);
2452 if (NULL == devip)
2453 return 1; /* no resources, will be marked offline */
2454 sdp->hostdata = devip;
2455 if (sdp->host->cmd_per_lun)
2456 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2457 sdp->host->cmd_per_lun);
2458 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2459 if (scsi_debug_no_uld)
2460 sdp->no_uld_attach = 1;
2461 return 0;
2464 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2466 struct sdebug_dev_info *devip =
2467 (struct sdebug_dev_info *)sdp->hostdata;
2469 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2470 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2471 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2472 if (devip) {
2473 /* make this slot available for re-use */
2474 devip->used = 0;
2475 sdp->hostdata = NULL;
2479 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2480 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2482 unsigned long iflags;
2483 int k;
2484 struct sdebug_queued_cmd *sqcp;
2486 spin_lock_irqsave(&queued_arr_lock, iflags);
2487 for (k = 0; k < scsi_debug_max_queue; ++k) {
2488 sqcp = &queued_arr[k];
2489 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2490 del_timer_sync(&sqcp->cmnd_timer);
2491 sqcp->in_use = 0;
2492 sqcp->a_cmnd = NULL;
2493 break;
2496 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2497 return (k < scsi_debug_max_queue) ? 1 : 0;
2500 /* Deletes (stops) timers of all queued commands */
2501 static void stop_all_queued(void)
2503 unsigned long iflags;
2504 int k;
2505 struct sdebug_queued_cmd *sqcp;
2507 spin_lock_irqsave(&queued_arr_lock, iflags);
2508 for (k = 0; k < scsi_debug_max_queue; ++k) {
2509 sqcp = &queued_arr[k];
2510 if (sqcp->in_use && sqcp->a_cmnd) {
2511 del_timer_sync(&sqcp->cmnd_timer);
2512 sqcp->in_use = 0;
2513 sqcp->a_cmnd = NULL;
2516 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2519 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2521 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2522 printk(KERN_INFO "scsi_debug: abort\n");
2523 ++num_aborts;
2524 stop_queued_cmnd(SCpnt);
2525 return SUCCESS;
2528 static int scsi_debug_biosparam(struct scsi_device *sdev,
2529 struct block_device * bdev, sector_t capacity, int *info)
2531 int res;
2532 unsigned char *buf;
2534 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2535 printk(KERN_INFO "scsi_debug: biosparam\n");
2536 buf = scsi_bios_ptable(bdev);
2537 if (buf) {
2538 res = scsi_partsize(buf, capacity,
2539 &info[2], &info[0], &info[1]);
2540 kfree(buf);
2541 if (! res)
2542 return res;
2544 info[0] = sdebug_heads;
2545 info[1] = sdebug_sectors_per;
2546 info[2] = sdebug_cylinders_per;
2547 return 0;
2550 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2552 struct sdebug_dev_info * devip;
2554 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2555 printk(KERN_INFO "scsi_debug: device_reset\n");
2556 ++num_dev_resets;
2557 if (SCpnt) {
2558 devip = devInfoReg(SCpnt->device);
2559 if (devip)
2560 devip->reset = 1;
2562 return SUCCESS;
2565 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2567 struct sdebug_host_info *sdbg_host;
2568 struct sdebug_dev_info * dev_info;
2569 struct scsi_device * sdp;
2570 struct Scsi_Host * hp;
2572 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2573 printk(KERN_INFO "scsi_debug: bus_reset\n");
2574 ++num_bus_resets;
2575 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2576 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2577 if (sdbg_host) {
2578 list_for_each_entry(dev_info,
2579 &sdbg_host->dev_info_list,
2580 dev_list)
2581 dev_info->reset = 1;
2584 return SUCCESS;
2587 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2589 struct sdebug_host_info * sdbg_host;
2590 struct sdebug_dev_info * dev_info;
2592 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2593 printk(KERN_INFO "scsi_debug: host_reset\n");
2594 ++num_host_resets;
2595 spin_lock(&sdebug_host_list_lock);
2596 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2597 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2598 dev_list)
2599 dev_info->reset = 1;
2601 spin_unlock(&sdebug_host_list_lock);
2602 stop_all_queued();
2603 return SUCCESS;
2606 /* Initializes timers in queued array */
2607 static void __init init_all_queued(void)
2609 unsigned long iflags;
2610 int k;
2611 struct sdebug_queued_cmd * sqcp;
2613 spin_lock_irqsave(&queued_arr_lock, iflags);
2614 for (k = 0; k < scsi_debug_max_queue; ++k) {
2615 sqcp = &queued_arr[k];
2616 init_timer(&sqcp->cmnd_timer);
2617 sqcp->in_use = 0;
2618 sqcp->a_cmnd = NULL;
2620 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2623 static void __init sdebug_build_parts(unsigned char *ramp,
2624 unsigned long store_size)
2626 struct partition * pp;
2627 int starts[SDEBUG_MAX_PARTS + 2];
2628 int sectors_per_part, num_sectors, k;
2629 int heads_by_sects, start_sec, end_sec;
2631 /* assume partition table already zeroed */
2632 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2633 return;
2634 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2635 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2636 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2637 "partitions to %d\n", SDEBUG_MAX_PARTS);
2639 num_sectors = (int)sdebug_store_sectors;
2640 sectors_per_part = (num_sectors - sdebug_sectors_per)
2641 / scsi_debug_num_parts;
2642 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2643 starts[0] = sdebug_sectors_per;
2644 for (k = 1; k < scsi_debug_num_parts; ++k)
2645 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2646 * heads_by_sects;
2647 starts[scsi_debug_num_parts] = num_sectors;
2648 starts[scsi_debug_num_parts + 1] = 0;
2650 ramp[510] = 0x55; /* magic partition markings */
2651 ramp[511] = 0xAA;
2652 pp = (struct partition *)(ramp + 0x1be);
2653 for (k = 0; starts[k + 1]; ++k, ++pp) {
2654 start_sec = starts[k];
2655 end_sec = starts[k + 1] - 1;
2656 pp->boot_ind = 0;
2658 pp->cyl = start_sec / heads_by_sects;
2659 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2660 / sdebug_sectors_per;
2661 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2663 pp->end_cyl = end_sec / heads_by_sects;
2664 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2665 / sdebug_sectors_per;
2666 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2668 pp->start_sect = cpu_to_le32(start_sec);
2669 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
2670 pp->sys_ind = 0x83; /* plain Linux partition */
2674 static int schedule_resp(struct scsi_cmnd * cmnd,
2675 struct sdebug_dev_info * devip,
2676 done_funct_t done, int scsi_result, int delta_jiff)
2678 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2679 if (scsi_result) {
2680 struct scsi_device * sdp = cmnd->device;
2682 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2683 "non-zero result=0x%x\n", sdp->host->host_no,
2684 sdp->channel, sdp->id, sdp->lun, scsi_result);
2687 if (cmnd && devip) {
2688 /* simulate autosense by this driver */
2689 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2690 memcpy(cmnd->sense_buffer, devip->sense_buff,
2691 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2692 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2694 if (delta_jiff <= 0) {
2695 if (cmnd)
2696 cmnd->result = scsi_result;
2697 if (done)
2698 done(cmnd);
2699 return 0;
2700 } else {
2701 unsigned long iflags;
2702 int k;
2703 struct sdebug_queued_cmd * sqcp = NULL;
2705 spin_lock_irqsave(&queued_arr_lock, iflags);
2706 for (k = 0; k < scsi_debug_max_queue; ++k) {
2707 sqcp = &queued_arr[k];
2708 if (! sqcp->in_use)
2709 break;
2711 if (k >= scsi_debug_max_queue) {
2712 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2713 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2714 return 1; /* report busy to mid level */
2716 sqcp->in_use = 1;
2717 sqcp->a_cmnd = cmnd;
2718 sqcp->scsi_result = scsi_result;
2719 sqcp->done_funct = done;
2720 sqcp->cmnd_timer.function = timer_intr_handler;
2721 sqcp->cmnd_timer.data = k;
2722 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2723 add_timer(&sqcp->cmnd_timer);
2724 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2725 if (cmnd)
2726 cmnd->result = 0;
2727 return 0;
2730 /* Note: The following macros create attribute files in the
2731 /sys/module/scsi_debug/parameters directory. Unfortunately this
2732 driver is unaware of a change and cannot trigger auxiliary actions
2733 as it can when the corresponding attribute in the
2734 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2736 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2737 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2738 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2739 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2740 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2741 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2742 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2743 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2744 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2745 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2746 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2747 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2748 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2749 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2750 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2751 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2752 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2753 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2754 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2755 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2756 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2757 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2758 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2759 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2760 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2761 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2762 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2763 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2764 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2765 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2766 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2767 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2768 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2769 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2770 S_IRUGO | S_IWUSR);
2771 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2772 S_IRUGO | S_IWUSR);
2774 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2775 MODULE_DESCRIPTION("SCSI debug adapter driver");
2776 MODULE_LICENSE("GPL");
2777 MODULE_VERSION(SCSI_DEBUG_VERSION);
2779 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2780 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2781 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2782 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2783 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2784 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2785 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2786 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2787 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2788 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2789 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2790 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2791 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2792 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2793 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2794 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2795 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2796 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2797 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2798 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2799 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2800 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2801 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2802 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2803 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2804 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2805 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2806 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2807 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2808 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2809 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2810 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2811 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2812 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2813 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2815 static char sdebug_info[256];
2817 static const char * scsi_debug_info(struct Scsi_Host * shp)
2819 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2820 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2821 scsi_debug_version_date, scsi_debug_dev_size_mb,
2822 scsi_debug_opts);
2823 return sdebug_info;
2826 /* scsi_debug_proc_info
2827 * Used if the driver currently has no own support for /proc/scsi
2829 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
2831 char arr[16];
2832 int opts;
2833 int minLen = length > 15 ? 15 : length;
2835 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2836 return -EACCES;
2837 memcpy(arr, buffer, minLen);
2838 arr[minLen] = '\0';
2839 if (1 != sscanf(arr, "%d", &opts))
2840 return -EINVAL;
2841 scsi_debug_opts = opts;
2842 if (scsi_debug_every_nth != 0)
2843 scsi_debug_cmnd_count = 0;
2844 return length;
2847 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
2849 seq_printf(m, "scsi_debug adapter driver, version "
2850 "%s [%s]\n"
2851 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2852 "every_nth=%d(curr:%d)\n"
2853 "delay=%d, max_luns=%d, scsi_level=%d\n"
2854 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2855 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2856 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2857 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2858 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2859 scsi_debug_cmnd_count, scsi_debug_delay,
2860 scsi_debug_max_luns, scsi_debug_scsi_level,
2861 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2862 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2863 num_host_resets, dix_reads, dix_writes, dif_errors);
2864 return 0;
2867 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2869 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2872 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2873 const char * buf, size_t count)
2875 int delay;
2876 char work[20];
2878 if (1 == sscanf(buf, "%10s", work)) {
2879 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2880 scsi_debug_delay = delay;
2881 return count;
2884 return -EINVAL;
2886 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2887 sdebug_delay_store);
2889 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2891 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2894 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2895 const char * buf, size_t count)
2897 int opts;
2898 char work[20];
2900 if (1 == sscanf(buf, "%10s", work)) {
2901 if (0 == strnicmp(work,"0x", 2)) {
2902 if (1 == sscanf(&work[2], "%x", &opts))
2903 goto opts_done;
2904 } else {
2905 if (1 == sscanf(work, "%d", &opts))
2906 goto opts_done;
2909 return -EINVAL;
2910 opts_done:
2911 scsi_debug_opts = opts;
2912 scsi_debug_cmnd_count = 0;
2913 return count;
2915 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2916 sdebug_opts_store);
2918 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2920 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2922 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2923 const char * buf, size_t count)
2925 int n;
2927 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2928 scsi_debug_ptype = n;
2929 return count;
2931 return -EINVAL;
2933 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2935 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2937 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2939 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2940 const char * buf, size_t count)
2942 int n;
2944 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2945 scsi_debug_dsense = n;
2946 return count;
2948 return -EINVAL;
2950 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2951 sdebug_dsense_store);
2953 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2955 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2957 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2958 const char * buf, size_t count)
2960 int n;
2962 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2963 scsi_debug_fake_rw = n;
2964 return count;
2966 return -EINVAL;
2968 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2969 sdebug_fake_rw_store);
2971 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2973 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2975 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2976 const char * buf, size_t count)
2978 int n;
2980 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2981 scsi_debug_no_lun_0 = n;
2982 return count;
2984 return -EINVAL;
2986 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2987 sdebug_no_lun_0_store);
2989 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2991 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2993 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2994 const char * buf, size_t count)
2996 int n;
2998 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2999 scsi_debug_num_tgts = n;
3000 sdebug_max_tgts_luns();
3001 return count;
3003 return -EINVAL;
3005 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
3006 sdebug_num_tgts_store);
3008 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
3010 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3012 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
3014 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
3016 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3018 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
3020 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
3022 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3024 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
3025 const char * buf, size_t count)
3027 int nth;
3029 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3030 scsi_debug_every_nth = nth;
3031 scsi_debug_cmnd_count = 0;
3032 return count;
3034 return -EINVAL;
3036 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3037 sdebug_every_nth_store);
3039 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3041 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3043 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3044 const char * buf, size_t count)
3046 int n;
3048 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3049 scsi_debug_max_luns = n;
3050 sdebug_max_tgts_luns();
3051 return count;
3053 return -EINVAL;
3055 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3056 sdebug_max_luns_store);
3058 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3060 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3062 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3063 const char * buf, size_t count)
3065 int n;
3067 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3068 (n <= SCSI_DEBUG_CANQUEUE)) {
3069 scsi_debug_max_queue = n;
3070 return count;
3072 return -EINVAL;
3074 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3075 sdebug_max_queue_store);
3077 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3079 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3081 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3083 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3085 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3087 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3089 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3091 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3093 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3094 const char * buf, size_t count)
3096 int n;
3098 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3099 scsi_debug_virtual_gb = n;
3101 sdebug_capacity = get_sdebug_capacity();
3103 return count;
3105 return -EINVAL;
3107 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3108 sdebug_virtual_gb_store);
3110 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3112 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3115 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3116 const char * buf, size_t count)
3118 int delta_hosts;
3120 if (sscanf(buf, "%d", &delta_hosts) != 1)
3121 return -EINVAL;
3122 if (delta_hosts > 0) {
3123 do {
3124 sdebug_add_adapter();
3125 } while (--delta_hosts);
3126 } else if (delta_hosts < 0) {
3127 do {
3128 sdebug_remove_adapter();
3129 } while (++delta_hosts);
3131 return count;
3133 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3134 sdebug_add_host_store);
3136 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3137 char * buf)
3139 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3141 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3142 const char * buf, size_t count)
3144 int n;
3146 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3147 scsi_debug_vpd_use_hostno = n;
3148 return count;
3150 return -EINVAL;
3152 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3153 sdebug_vpd_use_hostno_store);
3155 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3157 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3159 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3161 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3163 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3165 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3167 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3169 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3171 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3173 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3175 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3177 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3179 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3181 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3183 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3185 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3187 ssize_t count;
3189 if (!scsi_debug_lbp())
3190 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3191 sdebug_store_sectors);
3193 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3195 buf[count++] = '\n';
3196 buf[count++] = 0;
3198 return count;
3200 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3202 static ssize_t sdebug_removable_show(struct device_driver *ddp,
3203 char *buf)
3205 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3207 static ssize_t sdebug_removable_store(struct device_driver *ddp,
3208 const char *buf, size_t count)
3210 int n;
3212 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3213 scsi_debug_removable = (n > 0);
3214 return count;
3216 return -EINVAL;
3218 DRIVER_ATTR(removable, S_IRUGO | S_IWUSR, sdebug_removable_show,
3219 sdebug_removable_store);
3222 /* Note: The following function creates attribute files in the
3223 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3224 files (over those found in the /sys/module/scsi_debug/parameters
3225 directory) is that auxiliary actions can be triggered when an attribute
3226 is changed. For example see: sdebug_add_host_store() above.
3228 static int do_create_driverfs_files(void)
3230 int ret;
3232 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3233 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3234 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3235 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3236 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3237 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3238 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3239 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3240 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3241 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3242 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3243 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3244 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3245 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3246 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_removable);
3247 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3248 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3249 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3250 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3251 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3252 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3253 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3254 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3255 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3256 return ret;
3259 static void do_remove_driverfs_files(void)
3261 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3262 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3263 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3264 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3265 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3266 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3267 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3268 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3269 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3270 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3271 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3272 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_removable);
3273 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3274 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3275 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3276 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3277 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3278 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3279 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3280 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3281 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3282 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3283 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3284 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3287 struct device *pseudo_primary;
3289 static int __init scsi_debug_init(void)
3291 unsigned long sz;
3292 int host_to_add;
3293 int k;
3294 int ret;
3296 switch (scsi_debug_sector_size) {
3297 case 512:
3298 case 1024:
3299 case 2048:
3300 case 4096:
3301 break;
3302 default:
3303 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3304 scsi_debug_sector_size);
3305 return -EINVAL;
3308 switch (scsi_debug_dif) {
3310 case SD_DIF_TYPE0_PROTECTION:
3311 case SD_DIF_TYPE1_PROTECTION:
3312 case SD_DIF_TYPE2_PROTECTION:
3313 case SD_DIF_TYPE3_PROTECTION:
3314 break;
3316 default:
3317 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3318 return -EINVAL;
3321 if (scsi_debug_guard > 1) {
3322 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3323 return -EINVAL;
3326 if (scsi_debug_ato > 1) {
3327 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3328 return -EINVAL;
3331 if (scsi_debug_physblk_exp > 15) {
3332 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3333 scsi_debug_physblk_exp);
3334 return -EINVAL;
3337 if (scsi_debug_lowest_aligned > 0x3fff) {
3338 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3339 scsi_debug_lowest_aligned);
3340 return -EINVAL;
3343 if (scsi_debug_dev_size_mb < 1)
3344 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3345 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3346 sdebug_store_sectors = sz / scsi_debug_sector_size;
3347 sdebug_capacity = get_sdebug_capacity();
3349 /* play around with geometry, don't waste too much on track 0 */
3350 sdebug_heads = 8;
3351 sdebug_sectors_per = 32;
3352 if (scsi_debug_dev_size_mb >= 16)
3353 sdebug_heads = 32;
3354 else if (scsi_debug_dev_size_mb >= 256)
3355 sdebug_heads = 64;
3356 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3357 (sdebug_sectors_per * sdebug_heads);
3358 if (sdebug_cylinders_per >= 1024) {
3359 /* other LLDs do this; implies >= 1GB ram disk ... */
3360 sdebug_heads = 255;
3361 sdebug_sectors_per = 63;
3362 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3363 (sdebug_sectors_per * sdebug_heads);
3366 fake_storep = vmalloc(sz);
3367 if (NULL == fake_storep) {
3368 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3369 return -ENOMEM;
3371 memset(fake_storep, 0, sz);
3372 if (scsi_debug_num_parts > 0)
3373 sdebug_build_parts(fake_storep, sz);
3375 if (scsi_debug_dix) {
3376 int dif_size;
3378 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3379 dif_storep = vmalloc(dif_size);
3381 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3382 dif_size, dif_storep);
3384 if (dif_storep == NULL) {
3385 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3386 ret = -ENOMEM;
3387 goto free_vm;
3390 memset(dif_storep, 0xff, dif_size);
3393 /* Logical Block Provisioning */
3394 if (scsi_debug_lbp()) {
3395 scsi_debug_unmap_max_blocks =
3396 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3398 scsi_debug_unmap_max_desc =
3399 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3401 scsi_debug_unmap_granularity =
3402 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3404 if (scsi_debug_unmap_alignment &&
3405 scsi_debug_unmap_granularity <=
3406 scsi_debug_unmap_alignment) {
3407 printk(KERN_ERR
3408 "%s: ERR: unmap_granularity <= unmap_alignment\n",
3409 __func__);
3410 return -EINVAL;
3413 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3414 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3416 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3417 map_size);
3419 if (map_storep == NULL) {
3420 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3421 ret = -ENOMEM;
3422 goto free_vm;
3425 bitmap_zero(map_storep, map_size);
3427 /* Map first 1KB for partition table */
3428 if (scsi_debug_num_parts)
3429 map_region(0, 2);
3432 pseudo_primary = root_device_register("pseudo_0");
3433 if (IS_ERR(pseudo_primary)) {
3434 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3435 ret = PTR_ERR(pseudo_primary);
3436 goto free_vm;
3438 ret = bus_register(&pseudo_lld_bus);
3439 if (ret < 0) {
3440 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3441 ret);
3442 goto dev_unreg;
3444 ret = driver_register(&sdebug_driverfs_driver);
3445 if (ret < 0) {
3446 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3447 ret);
3448 goto bus_unreg;
3450 ret = do_create_driverfs_files();
3451 if (ret < 0) {
3452 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3453 ret);
3454 goto del_files;
3457 init_all_queued();
3459 host_to_add = scsi_debug_add_host;
3460 scsi_debug_add_host = 0;
3462 for (k = 0; k < host_to_add; k++) {
3463 if (sdebug_add_adapter()) {
3464 printk(KERN_ERR "scsi_debug_init: "
3465 "sdebug_add_adapter failed k=%d\n", k);
3466 break;
3470 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3471 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3472 scsi_debug_add_host);
3474 return 0;
3476 del_files:
3477 do_remove_driverfs_files();
3478 driver_unregister(&sdebug_driverfs_driver);
3479 bus_unreg:
3480 bus_unregister(&pseudo_lld_bus);
3481 dev_unreg:
3482 root_device_unregister(pseudo_primary);
3483 free_vm:
3484 if (map_storep)
3485 vfree(map_storep);
3486 if (dif_storep)
3487 vfree(dif_storep);
3488 vfree(fake_storep);
3490 return ret;
3493 static void __exit scsi_debug_exit(void)
3495 int k = scsi_debug_add_host;
3497 stop_all_queued();
3498 for (; k; k--)
3499 sdebug_remove_adapter();
3500 do_remove_driverfs_files();
3501 driver_unregister(&sdebug_driverfs_driver);
3502 bus_unregister(&pseudo_lld_bus);
3503 root_device_unregister(pseudo_primary);
3505 if (dif_storep)
3506 vfree(dif_storep);
3508 vfree(fake_storep);
3511 device_initcall(scsi_debug_init);
3512 module_exit(scsi_debug_exit);
3514 static void sdebug_release_adapter(struct device * dev)
3516 struct sdebug_host_info *sdbg_host;
3518 sdbg_host = to_sdebug_host(dev);
3519 kfree(sdbg_host);
3522 static int sdebug_add_adapter(void)
3524 int k, devs_per_host;
3525 int error = 0;
3526 struct sdebug_host_info *sdbg_host;
3527 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3529 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3530 if (NULL == sdbg_host) {
3531 printk(KERN_ERR "%s: out of memory at line %d\n",
3532 __func__, __LINE__);
3533 return -ENOMEM;
3536 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3538 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3539 for (k = 0; k < devs_per_host; k++) {
3540 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3541 if (!sdbg_devinfo) {
3542 printk(KERN_ERR "%s: out of memory at line %d\n",
3543 __func__, __LINE__);
3544 error = -ENOMEM;
3545 goto clean;
3549 spin_lock(&sdebug_host_list_lock);
3550 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3551 spin_unlock(&sdebug_host_list_lock);
3553 sdbg_host->dev.bus = &pseudo_lld_bus;
3554 sdbg_host->dev.parent = pseudo_primary;
3555 sdbg_host->dev.release = &sdebug_release_adapter;
3556 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3558 error = device_register(&sdbg_host->dev);
3560 if (error)
3561 goto clean;
3563 ++scsi_debug_add_host;
3564 return error;
3566 clean:
3567 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3568 dev_list) {
3569 list_del(&sdbg_devinfo->dev_list);
3570 kfree(sdbg_devinfo);
3573 kfree(sdbg_host);
3574 return error;
3577 static void sdebug_remove_adapter(void)
3579 struct sdebug_host_info * sdbg_host = NULL;
3581 spin_lock(&sdebug_host_list_lock);
3582 if (!list_empty(&sdebug_host_list)) {
3583 sdbg_host = list_entry(sdebug_host_list.prev,
3584 struct sdebug_host_info, host_list);
3585 list_del(&sdbg_host->host_list);
3587 spin_unlock(&sdebug_host_list_lock);
3589 if (!sdbg_host)
3590 return;
3592 device_unregister(&sdbg_host->dev);
3593 --scsi_debug_add_host;
3596 static
3597 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3599 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3600 int len, k;
3601 unsigned int num;
3602 unsigned long long lba;
3603 u32 ei_lba;
3604 int errsts = 0;
3605 int target = SCpnt->device->id;
3606 struct sdebug_dev_info *devip = NULL;
3607 int inj_recovered = 0;
3608 int inj_transport = 0;
3609 int inj_dif = 0;
3610 int inj_dix = 0;
3611 int delay_override = 0;
3612 int unmap = 0;
3614 scsi_set_resid(SCpnt, 0);
3615 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3616 printk(KERN_INFO "scsi_debug: cmd ");
3617 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3618 printk("%02x ", (int)cmd[k]);
3619 printk("\n");
3622 if (target == SCpnt->device->host->hostt->this_id) {
3623 printk(KERN_INFO "scsi_debug: initiator's id used as "
3624 "target!\n");
3625 return schedule_resp(SCpnt, NULL, done,
3626 DID_NO_CONNECT << 16, 0);
3629 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3630 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3631 return schedule_resp(SCpnt, NULL, done,
3632 DID_NO_CONNECT << 16, 0);
3633 devip = devInfoReg(SCpnt->device);
3634 if (NULL == devip)
3635 return schedule_resp(SCpnt, NULL, done,
3636 DID_NO_CONNECT << 16, 0);
3638 if ((scsi_debug_every_nth != 0) &&
3639 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3640 scsi_debug_cmnd_count = 0;
3641 if (scsi_debug_every_nth < -1)
3642 scsi_debug_every_nth = -1;
3643 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3644 return 0; /* ignore command causing timeout */
3645 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3646 scsi_medium_access_command(SCpnt))
3647 return 0; /* time out reads and writes */
3648 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3649 inj_recovered = 1; /* to reads and writes below */
3650 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3651 inj_transport = 1; /* to reads and writes below */
3652 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3653 inj_dif = 1; /* to reads and writes below */
3654 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3655 inj_dix = 1; /* to reads and writes below */
3658 if (devip->wlun) {
3659 switch (*cmd) {
3660 case INQUIRY:
3661 case REQUEST_SENSE:
3662 case TEST_UNIT_READY:
3663 case REPORT_LUNS:
3664 break; /* only allowable wlun commands */
3665 default:
3666 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3667 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3668 "not supported for wlun\n", *cmd);
3669 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3670 INVALID_OPCODE, 0);
3671 errsts = check_condition_result;
3672 return schedule_resp(SCpnt, devip, done, errsts,
3677 switch (*cmd) {
3678 case INQUIRY: /* mandatory, ignore unit attention */
3679 delay_override = 1;
3680 errsts = resp_inquiry(SCpnt, target, devip);
3681 break;
3682 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3683 delay_override = 1;
3684 errsts = resp_requests(SCpnt, devip);
3685 break;
3686 case REZERO_UNIT: /* actually this is REWIND for SSC */
3687 case START_STOP:
3688 errsts = resp_start_stop(SCpnt, devip);
3689 break;
3690 case ALLOW_MEDIUM_REMOVAL:
3691 errsts = check_readiness(SCpnt, 1, devip);
3692 if (errsts)
3693 break;
3694 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3695 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3696 cmd[4] ? "inhibited" : "enabled");
3697 break;
3698 case SEND_DIAGNOSTIC: /* mandatory */
3699 errsts = check_readiness(SCpnt, 1, devip);
3700 break;
3701 case TEST_UNIT_READY: /* mandatory */
3702 delay_override = 1;
3703 errsts = check_readiness(SCpnt, 0, devip);
3704 break;
3705 case RESERVE:
3706 errsts = check_readiness(SCpnt, 1, devip);
3707 break;
3708 case RESERVE_10:
3709 errsts = check_readiness(SCpnt, 1, devip);
3710 break;
3711 case RELEASE:
3712 errsts = check_readiness(SCpnt, 1, devip);
3713 break;
3714 case RELEASE_10:
3715 errsts = check_readiness(SCpnt, 1, devip);
3716 break;
3717 case READ_CAPACITY:
3718 errsts = resp_readcap(SCpnt, devip);
3719 break;
3720 case SERVICE_ACTION_IN:
3721 if (cmd[1] == SAI_READ_CAPACITY_16)
3722 errsts = resp_readcap16(SCpnt, devip);
3723 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3725 if (scsi_debug_lbp() == 0) {
3726 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3727 INVALID_COMMAND_OPCODE, 0);
3728 errsts = check_condition_result;
3729 } else
3730 errsts = resp_get_lba_status(SCpnt, devip);
3731 } else {
3732 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3733 INVALID_OPCODE, 0);
3734 errsts = check_condition_result;
3736 break;
3737 case MAINTENANCE_IN:
3738 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3739 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3740 INVALID_OPCODE, 0);
3741 errsts = check_condition_result;
3742 break;
3744 errsts = resp_report_tgtpgs(SCpnt, devip);
3745 break;
3746 case READ_16:
3747 case READ_12:
3748 case READ_10:
3749 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3750 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3751 cmd[1] & 0xe0) {
3752 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3753 INVALID_COMMAND_OPCODE, 0);
3754 errsts = check_condition_result;
3755 break;
3758 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3759 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3760 (cmd[1] & 0xe0) == 0)
3761 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3763 /* fall through */
3764 case READ_6:
3765 read:
3766 errsts = check_readiness(SCpnt, 0, devip);
3767 if (errsts)
3768 break;
3769 if (scsi_debug_fake_rw)
3770 break;
3771 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3772 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3773 if (inj_recovered && (0 == errsts)) {
3774 mk_sense_buffer(devip, RECOVERED_ERROR,
3775 THRESHOLD_EXCEEDED, 0);
3776 errsts = check_condition_result;
3777 } else if (inj_transport && (0 == errsts)) {
3778 mk_sense_buffer(devip, ABORTED_COMMAND,
3779 TRANSPORT_PROBLEM, ACK_NAK_TO);
3780 errsts = check_condition_result;
3781 } else if (inj_dif && (0 == errsts)) {
3782 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3783 errsts = illegal_condition_result;
3784 } else if (inj_dix && (0 == errsts)) {
3785 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3786 errsts = illegal_condition_result;
3788 break;
3789 case REPORT_LUNS: /* mandatory, ignore unit attention */
3790 delay_override = 1;
3791 errsts = resp_report_luns(SCpnt, devip);
3792 break;
3793 case VERIFY: /* 10 byte SBC-2 command */
3794 errsts = check_readiness(SCpnt, 0, devip);
3795 break;
3796 case WRITE_16:
3797 case WRITE_12:
3798 case WRITE_10:
3799 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3800 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3801 cmd[1] & 0xe0) {
3802 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3803 INVALID_COMMAND_OPCODE, 0);
3804 errsts = check_condition_result;
3805 break;
3808 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3809 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3810 (cmd[1] & 0xe0) == 0)
3811 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3813 /* fall through */
3814 case WRITE_6:
3815 write:
3816 errsts = check_readiness(SCpnt, 0, devip);
3817 if (errsts)
3818 break;
3819 if (scsi_debug_fake_rw)
3820 break;
3821 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3822 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3823 if (inj_recovered && (0 == errsts)) {
3824 mk_sense_buffer(devip, RECOVERED_ERROR,
3825 THRESHOLD_EXCEEDED, 0);
3826 errsts = check_condition_result;
3827 } else if (inj_dif && (0 == errsts)) {
3828 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3829 errsts = illegal_condition_result;
3830 } else if (inj_dix && (0 == errsts)) {
3831 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3832 errsts = illegal_condition_result;
3834 break;
3835 case WRITE_SAME_16:
3836 case WRITE_SAME:
3837 if (cmd[1] & 0x8) {
3838 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3839 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3840 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3841 INVALID_FIELD_IN_CDB, 0);
3842 errsts = check_condition_result;
3843 } else
3844 unmap = 1;
3846 if (errsts)
3847 break;
3848 errsts = check_readiness(SCpnt, 0, devip);
3849 if (errsts)
3850 break;
3851 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3852 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3853 break;
3854 case UNMAP:
3855 errsts = check_readiness(SCpnt, 0, devip);
3856 if (errsts)
3857 break;
3859 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3860 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3861 INVALID_COMMAND_OPCODE, 0);
3862 errsts = check_condition_result;
3863 } else
3864 errsts = resp_unmap(SCpnt, devip);
3865 break;
3866 case MODE_SENSE:
3867 case MODE_SENSE_10:
3868 errsts = resp_mode_sense(SCpnt, target, devip);
3869 break;
3870 case MODE_SELECT:
3871 errsts = resp_mode_select(SCpnt, 1, devip);
3872 break;
3873 case MODE_SELECT_10:
3874 errsts = resp_mode_select(SCpnt, 0, devip);
3875 break;
3876 case LOG_SENSE:
3877 errsts = resp_log_sense(SCpnt, devip);
3878 break;
3879 case SYNCHRONIZE_CACHE:
3880 delay_override = 1;
3881 errsts = check_readiness(SCpnt, 0, devip);
3882 break;
3883 case WRITE_BUFFER:
3884 errsts = check_readiness(SCpnt, 1, devip);
3885 break;
3886 case XDWRITEREAD_10:
3887 if (!scsi_bidi_cmnd(SCpnt)) {
3888 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3889 INVALID_FIELD_IN_CDB, 0);
3890 errsts = check_condition_result;
3891 break;
3894 errsts = check_readiness(SCpnt, 0, devip);
3895 if (errsts)
3896 break;
3897 if (scsi_debug_fake_rw)
3898 break;
3899 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3900 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3901 if (errsts)
3902 break;
3903 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3904 if (errsts)
3905 break;
3906 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3907 break;
3908 case VARIABLE_LENGTH_CMD:
3909 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3911 if ((cmd[10] & 0xe0) == 0)
3912 printk(KERN_ERR
3913 "Unprotected RD/WR to DIF device\n");
3915 if (cmd[9] == READ_32) {
3916 BUG_ON(SCpnt->cmd_len < 32);
3917 goto read;
3920 if (cmd[9] == WRITE_32) {
3921 BUG_ON(SCpnt->cmd_len < 32);
3922 goto write;
3926 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3927 INVALID_FIELD_IN_CDB, 0);
3928 errsts = check_condition_result;
3929 break;
3931 default:
3932 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3933 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3934 "supported\n", *cmd);
3935 errsts = check_readiness(SCpnt, 1, devip);
3936 if (errsts)
3937 break; /* Unit attention takes precedence */
3938 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3939 errsts = check_condition_result;
3940 break;
3942 return schedule_resp(SCpnt, devip, done, errsts,
3943 (delay_override ? 0 : scsi_debug_delay));
3946 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3948 static struct scsi_host_template sdebug_driver_template = {
3949 .show_info = scsi_debug_show_info,
3950 .write_info = scsi_debug_write_info,
3951 .proc_name = sdebug_proc_name,
3952 .name = "SCSI DEBUG",
3953 .info = scsi_debug_info,
3954 .slave_alloc = scsi_debug_slave_alloc,
3955 .slave_configure = scsi_debug_slave_configure,
3956 .slave_destroy = scsi_debug_slave_destroy,
3957 .ioctl = scsi_debug_ioctl,
3958 .queuecommand = scsi_debug_queuecommand,
3959 .eh_abort_handler = scsi_debug_abort,
3960 .eh_bus_reset_handler = scsi_debug_bus_reset,
3961 .eh_device_reset_handler = scsi_debug_device_reset,
3962 .eh_host_reset_handler = scsi_debug_host_reset,
3963 .bios_param = scsi_debug_biosparam,
3964 .can_queue = SCSI_DEBUG_CANQUEUE,
3965 .this_id = 7,
3966 .sg_tablesize = 256,
3967 .cmd_per_lun = 16,
3968 .max_sectors = 0xffff,
3969 .use_clustering = DISABLE_CLUSTERING,
3970 .module = THIS_MODULE,
3973 static int sdebug_driver_probe(struct device * dev)
3975 int error = 0;
3976 struct sdebug_host_info *sdbg_host;
3977 struct Scsi_Host *hpnt;
3978 int host_prot;
3980 sdbg_host = to_sdebug_host(dev);
3982 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3983 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3984 if (NULL == hpnt) {
3985 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3986 error = -ENODEV;
3987 return error;
3990 sdbg_host->shost = hpnt;
3991 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3992 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3993 hpnt->max_id = scsi_debug_num_tgts + 1;
3994 else
3995 hpnt->max_id = scsi_debug_num_tgts;
3996 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3998 host_prot = 0;
4000 switch (scsi_debug_dif) {
4002 case SD_DIF_TYPE1_PROTECTION:
4003 host_prot = SHOST_DIF_TYPE1_PROTECTION;
4004 if (scsi_debug_dix)
4005 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
4006 break;
4008 case SD_DIF_TYPE2_PROTECTION:
4009 host_prot = SHOST_DIF_TYPE2_PROTECTION;
4010 if (scsi_debug_dix)
4011 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
4012 break;
4014 case SD_DIF_TYPE3_PROTECTION:
4015 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4016 if (scsi_debug_dix)
4017 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4018 break;
4020 default:
4021 if (scsi_debug_dix)
4022 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4023 break;
4026 scsi_host_set_prot(hpnt, host_prot);
4028 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4029 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4030 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4031 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4032 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4033 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4034 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4035 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4037 if (scsi_debug_guard == 1)
4038 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4039 else
4040 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4042 error = scsi_add_host(hpnt, &sdbg_host->dev);
4043 if (error) {
4044 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4045 error = -ENODEV;
4046 scsi_host_put(hpnt);
4047 } else
4048 scsi_scan_host(hpnt);
4051 return error;
4054 static int sdebug_driver_remove(struct device * dev)
4056 struct sdebug_host_info *sdbg_host;
4057 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4059 sdbg_host = to_sdebug_host(dev);
4061 if (!sdbg_host) {
4062 printk(KERN_ERR "%s: Unable to locate host info\n",
4063 __func__);
4064 return -ENODEV;
4067 scsi_remove_host(sdbg_host->shost);
4069 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4070 dev_list) {
4071 list_del(&sdbg_devinfo->dev_list);
4072 kfree(sdbg_devinfo);
4075 scsi_host_put(sdbg_host->shost);
4076 return 0;
4079 static int pseudo_lld_bus_match(struct device *dev,
4080 struct device_driver *dev_driver)
4082 return 1;
4085 static struct bus_type pseudo_lld_bus = {
4086 .name = "pseudo",
4087 .match = pseudo_lld_bus_match,
4088 .probe = sdebug_driver_probe,
4089 .remove = sdebug_driver_remove,