2 * SCSI Block Commands (SBC) parsing and emulation.
4 * (c) Copyright 2002-2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@kernel.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ratelimit.h>
26 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_tcq.h>
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
34 #include "target_core_internal.h"
35 #include "target_core_ua.h"
39 sbc_emulate_readcapacity(struct se_cmd
*cmd
)
41 struct se_device
*dev
= cmd
->se_dev
;
42 unsigned char *cdb
= cmd
->t_task_cdb
;
43 unsigned long long blocks_long
= dev
->transport
->get_blocks(dev
);
50 * If the PMI bit is set to zero and the LOGICAL BLOCK
51 * ADDRESS field is not set to zero, the device server shall
52 * terminate the command with CHECK CONDITION status with
53 * the sense key set to ILLEGAL REQUEST and the additional
54 * sense code set to INVALID FIELD IN CDB.
56 * In SBC-3, these fields are obsolete, but some SCSI
57 * compliance tests actually check this, so we might as well
60 if (!(cdb
[8] & 1) && !!(cdb
[2] | cdb
[3] | cdb
[4] | cdb
[5]))
61 return TCM_INVALID_CDB_FIELD
;
63 if (blocks_long
>= 0x00000000ffffffff)
66 blocks
= (u32
)blocks_long
;
68 buf
[0] = (blocks
>> 24) & 0xff;
69 buf
[1] = (blocks
>> 16) & 0xff;
70 buf
[2] = (blocks
>> 8) & 0xff;
71 buf
[3] = blocks
& 0xff;
72 buf
[4] = (dev
->dev_attrib
.block_size
>> 24) & 0xff;
73 buf
[5] = (dev
->dev_attrib
.block_size
>> 16) & 0xff;
74 buf
[6] = (dev
->dev_attrib
.block_size
>> 8) & 0xff;
75 buf
[7] = dev
->dev_attrib
.block_size
& 0xff;
77 rbuf
= transport_kmap_data_sg(cmd
);
79 memcpy(rbuf
, buf
, min_t(u32
, sizeof(buf
), cmd
->data_length
));
80 transport_kunmap_data_sg(cmd
);
83 target_complete_cmd_with_length(cmd
, GOOD
, 8);
88 sbc_emulate_readcapacity_16(struct se_cmd
*cmd
)
90 struct se_device
*dev
= cmd
->se_dev
;
92 unsigned char buf
[32];
93 unsigned long long blocks
= dev
->transport
->get_blocks(dev
);
95 memset(buf
, 0, sizeof(buf
));
96 buf
[0] = (blocks
>> 56) & 0xff;
97 buf
[1] = (blocks
>> 48) & 0xff;
98 buf
[2] = (blocks
>> 40) & 0xff;
99 buf
[3] = (blocks
>> 32) & 0xff;
100 buf
[4] = (blocks
>> 24) & 0xff;
101 buf
[5] = (blocks
>> 16) & 0xff;
102 buf
[6] = (blocks
>> 8) & 0xff;
103 buf
[7] = blocks
& 0xff;
104 buf
[8] = (dev
->dev_attrib
.block_size
>> 24) & 0xff;
105 buf
[9] = (dev
->dev_attrib
.block_size
>> 16) & 0xff;
106 buf
[10] = (dev
->dev_attrib
.block_size
>> 8) & 0xff;
107 buf
[11] = dev
->dev_attrib
.block_size
& 0xff;
109 * Set Thin Provisioning Enable bit following sbc3r22 in section
110 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
112 if (dev
->dev_attrib
.emulate_tpu
|| dev
->dev_attrib
.emulate_tpws
)
115 rbuf
= transport_kmap_data_sg(cmd
);
117 memcpy(rbuf
, buf
, min_t(u32
, sizeof(buf
), cmd
->data_length
));
118 transport_kunmap_data_sg(cmd
);
121 target_complete_cmd_with_length(cmd
, GOOD
, 32);
125 sector_t
sbc_get_write_same_sectors(struct se_cmd
*cmd
)
129 if (cmd
->t_task_cdb
[0] == WRITE_SAME
)
130 num_blocks
= get_unaligned_be16(&cmd
->t_task_cdb
[7]);
131 else if (cmd
->t_task_cdb
[0] == WRITE_SAME_16
)
132 num_blocks
= get_unaligned_be32(&cmd
->t_task_cdb
[10]);
133 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
134 num_blocks
= get_unaligned_be32(&cmd
->t_task_cdb
[28]);
137 * Use the explicit range when non zero is supplied, otherwise calculate
138 * the remaining range based on ->get_blocks() - starting LBA.
143 return cmd
->se_dev
->transport
->get_blocks(cmd
->se_dev
) -
146 EXPORT_SYMBOL(sbc_get_write_same_sectors
);
148 static sense_reason_t
149 sbc_emulate_noop(struct se_cmd
*cmd
)
151 target_complete_cmd(cmd
, GOOD
);
155 static inline u32
sbc_get_size(struct se_cmd
*cmd
, u32 sectors
)
157 return cmd
->se_dev
->dev_attrib
.block_size
* sectors
;
160 static int sbc_check_valid_sectors(struct se_cmd
*cmd
)
162 struct se_device
*dev
= cmd
->se_dev
;
163 unsigned long long end_lba
;
166 sectors
= cmd
->data_length
/ dev
->dev_attrib
.block_size
;
167 end_lba
= dev
->transport
->get_blocks(dev
) + 1;
169 if (cmd
->t_task_lba
+ sectors
> end_lba
) {
170 pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
171 cmd
->t_task_lba
, sectors
, end_lba
);
178 static inline u32
transport_get_sectors_6(unsigned char *cdb
)
181 * Use 8-bit sector value. SBC-3 says:
183 * A TRANSFER LENGTH field set to zero specifies that 256
184 * logical blocks shall be written. Any other value
185 * specifies the number of logical blocks that shall be
188 return cdb
[4] ? : 256;
191 static inline u32
transport_get_sectors_10(unsigned char *cdb
)
193 return (u32
)(cdb
[7] << 8) + cdb
[8];
196 static inline u32
transport_get_sectors_12(unsigned char *cdb
)
198 return (u32
)(cdb
[6] << 24) + (cdb
[7] << 16) + (cdb
[8] << 8) + cdb
[9];
201 static inline u32
transport_get_sectors_16(unsigned char *cdb
)
203 return (u32
)(cdb
[10] << 24) + (cdb
[11] << 16) +
204 (cdb
[12] << 8) + cdb
[13];
208 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
210 static inline u32
transport_get_sectors_32(unsigned char *cdb
)
212 return (u32
)(cdb
[28] << 24) + (cdb
[29] << 16) +
213 (cdb
[30] << 8) + cdb
[31];
217 static inline u32
transport_lba_21(unsigned char *cdb
)
219 return ((cdb
[1] & 0x1f) << 16) | (cdb
[2] << 8) | cdb
[3];
222 static inline u32
transport_lba_32(unsigned char *cdb
)
224 return (cdb
[2] << 24) | (cdb
[3] << 16) | (cdb
[4] << 8) | cdb
[5];
227 static inline unsigned long long transport_lba_64(unsigned char *cdb
)
229 unsigned int __v1
, __v2
;
231 __v1
= (cdb
[2] << 24) | (cdb
[3] << 16) | (cdb
[4] << 8) | cdb
[5];
232 __v2
= (cdb
[6] << 24) | (cdb
[7] << 16) | (cdb
[8] << 8) | cdb
[9];
234 return ((unsigned long long)__v2
) | (unsigned long long)__v1
<< 32;
238 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
240 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb
)
242 unsigned int __v1
, __v2
;
244 __v1
= (cdb
[12] << 24) | (cdb
[13] << 16) | (cdb
[14] << 8) | cdb
[15];
245 __v2
= (cdb
[16] << 24) | (cdb
[17] << 16) | (cdb
[18] << 8) | cdb
[19];
247 return ((unsigned long long)__v2
) | (unsigned long long)__v1
<< 32;
250 static sense_reason_t
251 sbc_setup_write_same(struct se_cmd
*cmd
, unsigned char *flags
, struct sbc_ops
*ops
)
253 struct se_device
*dev
= cmd
->se_dev
;
254 sector_t end_lba
= dev
->transport
->get_blocks(dev
) + 1;
255 unsigned int sectors
= sbc_get_write_same_sectors(cmd
);
257 if ((flags
[0] & 0x04) || (flags
[0] & 0x02)) {
258 pr_err("WRITE_SAME PBDATA and LBDATA"
259 " bits not supported for Block Discard"
261 return TCM_UNSUPPORTED_SCSI_OPCODE
;
263 if (sectors
> cmd
->se_dev
->dev_attrib
.max_write_same_len
) {
264 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
265 sectors
, cmd
->se_dev
->dev_attrib
.max_write_same_len
);
266 return TCM_INVALID_CDB_FIELD
;
269 * Sanity check for LBA wrap and request past end of device.
271 if (((cmd
->t_task_lba
+ sectors
) < cmd
->t_task_lba
) ||
272 ((cmd
->t_task_lba
+ sectors
) > end_lba
)) {
273 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
274 (unsigned long long)end_lba
, cmd
->t_task_lba
, sectors
);
275 return TCM_ADDRESS_OUT_OF_RANGE
;
278 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
279 if (flags
[0] & 0x10) {
280 pr_warn("WRITE SAME with ANCHOR not supported\n");
281 return TCM_INVALID_CDB_FIELD
;
284 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
285 * translated into block discard requests within backend code.
287 if (flags
[0] & 0x08) {
288 if (!ops
->execute_write_same_unmap
)
289 return TCM_UNSUPPORTED_SCSI_OPCODE
;
291 cmd
->execute_cmd
= ops
->execute_write_same_unmap
;
294 if (!ops
->execute_write_same
)
295 return TCM_UNSUPPORTED_SCSI_OPCODE
;
297 cmd
->execute_cmd
= ops
->execute_write_same
;
301 static sense_reason_t
xdreadwrite_callback(struct se_cmd
*cmd
)
303 unsigned char *buf
, *addr
;
304 struct scatterlist
*sg
;
306 sense_reason_t ret
= TCM_NO_SENSE
;
309 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
311 * 1) read the specified logical block(s);
312 * 2) transfer logical blocks from the data-out buffer;
313 * 3) XOR the logical blocks transferred from the data-out buffer with
314 * the logical blocks read, storing the resulting XOR data in a buffer;
315 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
316 * blocks transferred from the data-out buffer; and
317 * 5) transfer the resulting XOR data to the data-in buffer.
319 buf
= kmalloc(cmd
->data_length
, GFP_KERNEL
);
321 pr_err("Unable to allocate xor_callback buf\n");
322 return TCM_OUT_OF_RESOURCES
;
325 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
326 * into the locally allocated *buf
328 sg_copy_to_buffer(cmd
->t_data_sg
,
334 * Now perform the XOR against the BIDI read memory located at
335 * cmd->t_mem_bidi_list
339 for_each_sg(cmd
->t_bidi_data_sg
, sg
, cmd
->t_bidi_data_nents
, count
) {
340 addr
= kmap_atomic(sg_page(sg
));
342 ret
= TCM_OUT_OF_RESOURCES
;
346 for (i
= 0; i
< sg
->length
; i
++)
347 *(addr
+ sg
->offset
+ i
) ^= *(buf
+ offset
+ i
);
349 offset
+= sg
->length
;
358 static sense_reason_t
359 sbc_execute_rw(struct se_cmd
*cmd
)
361 return cmd
->execute_rw(cmd
, cmd
->t_data_sg
, cmd
->t_data_nents
,
362 cmd
->data_direction
);
365 static sense_reason_t
compare_and_write_post(struct se_cmd
*cmd
)
367 struct se_device
*dev
= cmd
->se_dev
;
370 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
371 * within target_complete_ok_work() if the command was successfully
372 * sent to the backend driver.
374 spin_lock_irq(&cmd
->t_state_lock
);
375 if ((cmd
->transport_state
& CMD_T_SENT
) && !cmd
->scsi_status
)
376 cmd
->se_cmd_flags
|= SCF_COMPARE_AND_WRITE_POST
;
377 spin_unlock_irq(&cmd
->t_state_lock
);
380 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
381 * before the original READ I/O submission.
388 static sense_reason_t
compare_and_write_callback(struct se_cmd
*cmd
)
390 struct se_device
*dev
= cmd
->se_dev
;
391 struct scatterlist
*write_sg
= NULL
, *sg
;
392 unsigned char *buf
= NULL
, *addr
;
393 struct sg_mapping_iter m
;
394 unsigned int offset
= 0, len
;
395 unsigned int nlbas
= cmd
->t_task_nolb
;
396 unsigned int block_size
= dev
->dev_attrib
.block_size
;
397 unsigned int compare_len
= (nlbas
* block_size
);
398 sense_reason_t ret
= TCM_NO_SENSE
;
402 * Handle early failure in transport_generic_request_failure(),
403 * which will not have taken ->caw_mutex yet..
405 if (!cmd
->t_data_sg
|| !cmd
->t_bidi_data_sg
)
408 * Immediately exit + release dev->caw_sem if command has already
409 * been failed with a non-zero SCSI status.
411 if (cmd
->scsi_status
) {
412 pr_err("compare_and_write_callback: non zero scsi_status:"
413 " 0x%02x\n", cmd
->scsi_status
);
417 buf
= kzalloc(cmd
->data_length
, GFP_KERNEL
);
419 pr_err("Unable to allocate compare_and_write buf\n");
420 ret
= TCM_OUT_OF_RESOURCES
;
424 write_sg
= kmalloc(sizeof(struct scatterlist
) * cmd
->t_data_nents
,
427 pr_err("Unable to allocate compare_and_write sg\n");
428 ret
= TCM_OUT_OF_RESOURCES
;
431 sg_init_table(write_sg
, cmd
->t_data_nents
);
433 * Setup verify and write data payloads from total NumberLBAs.
435 rc
= sg_copy_to_buffer(cmd
->t_data_sg
, cmd
->t_data_nents
, buf
,
438 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
439 ret
= TCM_OUT_OF_RESOURCES
;
443 * Compare against SCSI READ payload against verify payload
445 for_each_sg(cmd
->t_bidi_data_sg
, sg
, cmd
->t_bidi_data_nents
, i
) {
446 addr
= (unsigned char *)kmap_atomic(sg_page(sg
));
448 ret
= TCM_OUT_OF_RESOURCES
;
452 len
= min(sg
->length
, compare_len
);
454 if (memcmp(addr
, buf
+ offset
, len
)) {
455 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
469 len
= cmd
->t_task_nolb
* block_size
;
470 sg_miter_start(&m
, cmd
->t_data_sg
, cmd
->t_data_nents
, SG_MITER_TO_SG
);
472 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
477 if (block_size
< PAGE_SIZE
) {
478 sg_set_page(&write_sg
[i
], m
.page
, block_size
,
482 sg_set_page(&write_sg
[i
], m
.page
, block_size
,
490 * Save the original SGL + nents values before updating to new
491 * assignments, to be released in transport_free_pages() ->
492 * transport_reset_sgl_orig()
494 cmd
->t_data_sg_orig
= cmd
->t_data_sg
;
495 cmd
->t_data_sg
= write_sg
;
496 cmd
->t_data_nents_orig
= cmd
->t_data_nents
;
497 cmd
->t_data_nents
= 1;
499 cmd
->sam_task_attr
= MSG_HEAD_TAG
;
500 cmd
->transport_complete_callback
= compare_and_write_post
;
502 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
503 * for submitting the adjusted SGL to write instance user-data.
505 cmd
->execute_cmd
= sbc_execute_rw
;
507 spin_lock_irq(&cmd
->t_state_lock
);
508 cmd
->t_state
= TRANSPORT_PROCESSING
;
509 cmd
->transport_state
|= CMD_T_ACTIVE
|CMD_T_BUSY
|CMD_T_SENT
;
510 spin_unlock_irq(&cmd
->t_state_lock
);
512 __target_execute_cmd(cmd
);
518 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
519 dev
->transport
->name
);
520 ret
= TCM_MISCOMPARE_VERIFY
;
523 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
524 * sbc_compare_and_write() before the original READ I/O submission.
532 static sense_reason_t
533 sbc_compare_and_write(struct se_cmd
*cmd
)
535 struct se_device
*dev
= cmd
->se_dev
;
539 * Submit the READ first for COMPARE_AND_WRITE to perform the
540 * comparision using SGLs at cmd->t_bidi_data_sg..
542 rc
= down_interruptible(&dev
->caw_sem
);
543 if ((rc
!= 0) || signal_pending(current
)) {
544 cmd
->transport_complete_callback
= NULL
;
545 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
548 * Reset cmd->data_length to individual block_size in order to not
549 * confuse backend drivers that depend on this value matching the
550 * size of the I/O being submitted.
552 cmd
->data_length
= cmd
->t_task_nolb
* dev
->dev_attrib
.block_size
;
554 ret
= cmd
->execute_rw(cmd
, cmd
->t_bidi_data_sg
, cmd
->t_bidi_data_nents
,
557 cmd
->transport_complete_callback
= NULL
;
562 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
563 * upon MISCOMPARE, or in compare_and_write_done() upon completion
564 * of WRITE instance user-data.
570 sbc_parse_cdb(struct se_cmd
*cmd
, struct sbc_ops
*ops
)
572 struct se_device
*dev
= cmd
->se_dev
;
573 unsigned char *cdb
= cmd
->t_task_cdb
;
580 sectors
= transport_get_sectors_6(cdb
);
581 cmd
->t_task_lba
= transport_lba_21(cdb
);
582 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
583 cmd
->execute_rw
= ops
->execute_rw
;
584 cmd
->execute_cmd
= sbc_execute_rw
;
587 sectors
= transport_get_sectors_10(cdb
);
588 cmd
->t_task_lba
= transport_lba_32(cdb
);
589 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
590 cmd
->execute_rw
= ops
->execute_rw
;
591 cmd
->execute_cmd
= sbc_execute_rw
;
594 sectors
= transport_get_sectors_12(cdb
);
595 cmd
->t_task_lba
= transport_lba_32(cdb
);
596 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
597 cmd
->execute_rw
= ops
->execute_rw
;
598 cmd
->execute_cmd
= sbc_execute_rw
;
601 sectors
= transport_get_sectors_16(cdb
);
602 cmd
->t_task_lba
= transport_lba_64(cdb
);
603 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
604 cmd
->execute_rw
= ops
->execute_rw
;
605 cmd
->execute_cmd
= sbc_execute_rw
;
608 sectors
= transport_get_sectors_6(cdb
);
609 cmd
->t_task_lba
= transport_lba_21(cdb
);
610 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
611 cmd
->execute_rw
= ops
->execute_rw
;
612 cmd
->execute_cmd
= sbc_execute_rw
;
616 sectors
= transport_get_sectors_10(cdb
);
617 cmd
->t_task_lba
= transport_lba_32(cdb
);
619 cmd
->se_cmd_flags
|= SCF_FUA
;
620 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
621 cmd
->execute_rw
= ops
->execute_rw
;
622 cmd
->execute_cmd
= sbc_execute_rw
;
625 sectors
= transport_get_sectors_12(cdb
);
626 cmd
->t_task_lba
= transport_lba_32(cdb
);
628 cmd
->se_cmd_flags
|= SCF_FUA
;
629 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
630 cmd
->execute_rw
= ops
->execute_rw
;
631 cmd
->execute_cmd
= sbc_execute_rw
;
634 sectors
= transport_get_sectors_16(cdb
);
635 cmd
->t_task_lba
= transport_lba_64(cdb
);
637 cmd
->se_cmd_flags
|= SCF_FUA
;
638 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
639 cmd
->execute_rw
= ops
->execute_rw
;
640 cmd
->execute_cmd
= sbc_execute_rw
;
643 if (cmd
->data_direction
!= DMA_TO_DEVICE
||
644 !(cmd
->se_cmd_flags
& SCF_BIDI
))
645 return TCM_INVALID_CDB_FIELD
;
646 sectors
= transport_get_sectors_10(cdb
);
648 cmd
->t_task_lba
= transport_lba_32(cdb
);
649 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
652 * Setup BIDI XOR callback to be run after I/O completion.
654 cmd
->execute_rw
= ops
->execute_rw
;
655 cmd
->execute_cmd
= sbc_execute_rw
;
656 cmd
->transport_complete_callback
= &xdreadwrite_callback
;
658 cmd
->se_cmd_flags
|= SCF_FUA
;
660 case VARIABLE_LENGTH_CMD
:
662 u16 service_action
= get_unaligned_be16(&cdb
[8]);
663 switch (service_action
) {
665 sectors
= transport_get_sectors_32(cdb
);
668 * Use WRITE_32 and READ_32 opcodes for the emulated
669 * XDWRITE_READ_32 logic.
671 cmd
->t_task_lba
= transport_lba_64_ext(cdb
);
672 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
675 * Setup BIDI XOR callback to be run during after I/O
678 cmd
->execute_rw
= ops
->execute_rw
;
679 cmd
->execute_cmd
= sbc_execute_rw
;
680 cmd
->transport_complete_callback
= &xdreadwrite_callback
;
682 cmd
->se_cmd_flags
|= SCF_FUA
;
685 sectors
= transport_get_sectors_32(cdb
);
687 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
689 return TCM_INVALID_CDB_FIELD
;
692 size
= sbc_get_size(cmd
, 1);
693 cmd
->t_task_lba
= get_unaligned_be64(&cdb
[12]);
695 ret
= sbc_setup_write_same(cmd
, &cdb
[10], ops
);
700 pr_err("VARIABLE_LENGTH_CMD service action"
701 " 0x%04x not supported\n", service_action
);
702 return TCM_UNSUPPORTED_SCSI_OPCODE
;
706 case COMPARE_AND_WRITE
:
709 * Currently enforce COMPARE_AND_WRITE for a single sector
712 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
713 " than 1\n", sectors
);
714 return TCM_INVALID_CDB_FIELD
;
717 * Double size because we have two buffers, note that
718 * zero is not an error..
720 size
= 2 * sbc_get_size(cmd
, sectors
);
721 cmd
->t_task_lba
= get_unaligned_be64(&cdb
[2]);
722 cmd
->t_task_nolb
= sectors
;
723 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
| SCF_COMPARE_AND_WRITE
;
724 cmd
->execute_rw
= ops
->execute_rw
;
725 cmd
->execute_cmd
= sbc_compare_and_write
;
726 cmd
->transport_complete_callback
= compare_and_write_callback
;
730 cmd
->execute_cmd
= sbc_emulate_readcapacity
;
732 case SERVICE_ACTION_IN
:
733 switch (cmd
->t_task_cdb
[1] & 0x1f) {
734 case SAI_READ_CAPACITY_16
:
735 cmd
->execute_cmd
= sbc_emulate_readcapacity_16
;
738 pr_err("Unsupported SA: 0x%02x\n",
739 cmd
->t_task_cdb
[1] & 0x1f);
740 return TCM_INVALID_CDB_FIELD
;
742 size
= (cdb
[10] << 24) | (cdb
[11] << 16) |
743 (cdb
[12] << 8) | cdb
[13];
745 case SYNCHRONIZE_CACHE
:
746 case SYNCHRONIZE_CACHE_16
:
747 if (!ops
->execute_sync_cache
) {
749 cmd
->execute_cmd
= sbc_emulate_noop
;
754 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
756 if (cdb
[0] == SYNCHRONIZE_CACHE
) {
757 sectors
= transport_get_sectors_10(cdb
);
758 cmd
->t_task_lba
= transport_lba_32(cdb
);
760 sectors
= transport_get_sectors_16(cdb
);
761 cmd
->t_task_lba
= transport_lba_64(cdb
);
764 size
= sbc_get_size(cmd
, sectors
);
767 * Check to ensure that LBA + Range does not exceed past end of
768 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
770 if (cmd
->t_task_lba
|| sectors
) {
771 if (sbc_check_valid_sectors(cmd
) < 0)
772 return TCM_ADDRESS_OUT_OF_RANGE
;
774 cmd
->execute_cmd
= ops
->execute_sync_cache
;
777 if (!ops
->execute_unmap
)
778 return TCM_UNSUPPORTED_SCSI_OPCODE
;
780 size
= get_unaligned_be16(&cdb
[7]);
781 cmd
->execute_cmd
= ops
->execute_unmap
;
784 sectors
= transport_get_sectors_16(cdb
);
786 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
787 return TCM_INVALID_CDB_FIELD
;
790 size
= sbc_get_size(cmd
, 1);
791 cmd
->t_task_lba
= get_unaligned_be64(&cdb
[2]);
793 ret
= sbc_setup_write_same(cmd
, &cdb
[1], ops
);
798 sectors
= transport_get_sectors_10(cdb
);
800 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
801 return TCM_INVALID_CDB_FIELD
;
804 size
= sbc_get_size(cmd
, 1);
805 cmd
->t_task_lba
= get_unaligned_be32(&cdb
[2]);
808 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
809 * of byte 1 bit 3 UNMAP instead of original reserved field
811 ret
= sbc_setup_write_same(cmd
, &cdb
[1], ops
);
817 cmd
->execute_cmd
= sbc_emulate_noop
;
823 * There are still clients out there which use these old SCSI-2
824 * commands. This mainly happens when running VMs with legacy
825 * guest systems, connected via SCSI command pass-through to
826 * iSCSI targets. Make them happy and return status GOOD.
829 cmd
->execute_cmd
= sbc_emulate_noop
;
832 ret
= spc_parse_cdb(cmd
, &size
);
837 /* reject any command that we don't have a handler for */
838 if (!(cmd
->se_cmd_flags
& SCF_SCSI_DATA_CDB
) && !cmd
->execute_cmd
)
839 return TCM_UNSUPPORTED_SCSI_OPCODE
;
841 if (cmd
->se_cmd_flags
& SCF_SCSI_DATA_CDB
) {
842 unsigned long long end_lba
;
844 end_lba
= dev
->transport
->get_blocks(dev
) + 1;
845 if (((cmd
->t_task_lba
+ sectors
) < cmd
->t_task_lba
) ||
846 ((cmd
->t_task_lba
+ sectors
) > end_lba
)) {
847 pr_err("cmd exceeds last lba %llu "
848 "(lba %llu, sectors %u)\n",
849 end_lba
, cmd
->t_task_lba
, sectors
);
850 return TCM_ADDRESS_OUT_OF_RANGE
;
853 if (!(cmd
->se_cmd_flags
& SCF_COMPARE_AND_WRITE
))
854 size
= sbc_get_size(cmd
, sectors
);
857 return target_cmd_size_check(cmd
, size
);
859 EXPORT_SYMBOL(sbc_parse_cdb
);
861 u32
sbc_get_device_type(struct se_device
*dev
)
865 EXPORT_SYMBOL(sbc_get_device_type
);
868 sbc_execute_unmap(struct se_cmd
*cmd
,
869 sense_reason_t (*do_unmap_fn
)(struct se_cmd
*, void *,
873 struct se_device
*dev
= cmd
->se_dev
;
874 unsigned char *buf
, *ptr
= NULL
;
878 sense_reason_t ret
= 0;
881 /* We never set ANC_SUP */
882 if (cmd
->t_task_cdb
[1])
883 return TCM_INVALID_CDB_FIELD
;
885 if (cmd
->data_length
== 0) {
886 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
890 if (cmd
->data_length
< 8) {
891 pr_warn("UNMAP parameter list length %u too small\n",
893 return TCM_PARAMETER_LIST_LENGTH_ERROR
;
896 buf
= transport_kmap_data_sg(cmd
);
898 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
900 dl
= get_unaligned_be16(&buf
[0]);
901 bd_dl
= get_unaligned_be16(&buf
[2]);
903 size
= cmd
->data_length
- 8;
905 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
906 cmd
->data_length
, bd_dl
);
910 if (size
/ 16 > dev
->dev_attrib
.max_unmap_block_desc_count
) {
911 ret
= TCM_INVALID_PARAMETER_LIST
;
915 /* First UNMAP block descriptor starts at 8 byte offset */
917 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
918 " ptr: %p\n", dev
->transport
->name
, dl
, bd_dl
, size
, ptr
);
921 lba
= get_unaligned_be64(&ptr
[0]);
922 range
= get_unaligned_be32(&ptr
[8]);
923 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
924 (unsigned long long)lba
, range
);
926 if (range
> dev
->dev_attrib
.max_unmap_lba_count
) {
927 ret
= TCM_INVALID_PARAMETER_LIST
;
931 if (lba
+ range
> dev
->transport
->get_blocks(dev
) + 1) {
932 ret
= TCM_ADDRESS_OUT_OF_RANGE
;
936 ret
= do_unmap_fn(cmd
, priv
, lba
, range
);
945 transport_kunmap_data_sg(cmd
);
947 target_complete_cmd(cmd
, GOOD
);
950 EXPORT_SYMBOL(sbc_execute_unmap
);