Merge branch 'sock_hold-misuses'
[linux/fpc-iii.git] / drivers / target / target_core_sbc.c
blob68d8aef7ab78d4084b57e6fd0fa0b0afce7251df
1 /*
2 * SCSI Block Commands (SBC) parsing and emulation.
4 * (c) Copyright 2002-2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@kernel.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ratelimit.h>
26 #include <linux/crc-t10dif.h>
27 #include <linux/t10-pi.h>
28 #include <asm/unaligned.h>
29 #include <scsi/scsi_proto.h>
30 #include <scsi/scsi_tcq.h>
32 #include <target/target_core_base.h>
33 #include <target/target_core_backend.h>
34 #include <target/target_core_fabric.h>
36 #include "target_core_internal.h"
37 #include "target_core_ua.h"
38 #include "target_core_alua.h"
40 static sense_reason_t
41 sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
42 static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
44 static sense_reason_t
45 sbc_emulate_readcapacity(struct se_cmd *cmd)
47 struct se_device *dev = cmd->se_dev;
48 unsigned char *cdb = cmd->t_task_cdb;
49 unsigned long long blocks_long = dev->transport->get_blocks(dev);
50 unsigned char *rbuf;
51 unsigned char buf[8];
52 u32 blocks;
55 * SBC-2 says:
56 * If the PMI bit is set to zero and the LOGICAL BLOCK
57 * ADDRESS field is not set to zero, the device server shall
58 * terminate the command with CHECK CONDITION status with
59 * the sense key set to ILLEGAL REQUEST and the additional
60 * sense code set to INVALID FIELD IN CDB.
62 * In SBC-3, these fields are obsolete, but some SCSI
63 * compliance tests actually check this, so we might as well
64 * follow SBC-2.
66 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
67 return TCM_INVALID_CDB_FIELD;
69 if (blocks_long >= 0x00000000ffffffff)
70 blocks = 0xffffffff;
71 else
72 blocks = (u32)blocks_long;
74 buf[0] = (blocks >> 24) & 0xff;
75 buf[1] = (blocks >> 16) & 0xff;
76 buf[2] = (blocks >> 8) & 0xff;
77 buf[3] = blocks & 0xff;
78 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
79 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
80 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
81 buf[7] = dev->dev_attrib.block_size & 0xff;
83 rbuf = transport_kmap_data_sg(cmd);
84 if (rbuf) {
85 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
86 transport_kunmap_data_sg(cmd);
89 target_complete_cmd_with_length(cmd, GOOD, 8);
90 return 0;
93 static sense_reason_t
94 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
96 struct se_device *dev = cmd->se_dev;
97 struct se_session *sess = cmd->se_sess;
98 int pi_prot_type = dev->dev_attrib.pi_prot_type;
100 unsigned char *rbuf;
101 unsigned char buf[32];
102 unsigned long long blocks = dev->transport->get_blocks(dev);
104 memset(buf, 0, sizeof(buf));
105 buf[0] = (blocks >> 56) & 0xff;
106 buf[1] = (blocks >> 48) & 0xff;
107 buf[2] = (blocks >> 40) & 0xff;
108 buf[3] = (blocks >> 32) & 0xff;
109 buf[4] = (blocks >> 24) & 0xff;
110 buf[5] = (blocks >> 16) & 0xff;
111 buf[6] = (blocks >> 8) & 0xff;
112 buf[7] = blocks & 0xff;
113 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
114 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
115 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
116 buf[11] = dev->dev_attrib.block_size & 0xff;
118 * Set P_TYPE and PROT_EN bits for DIF support
120 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
122 * Only override a device's pi_prot_type if no T10-PI is
123 * available, and sess_prot_type has been explicitly enabled.
125 if (!pi_prot_type)
126 pi_prot_type = sess->sess_prot_type;
128 if (pi_prot_type)
129 buf[12] = (pi_prot_type - 1) << 1 | 0x1;
132 if (dev->transport->get_lbppbe)
133 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
135 if (dev->transport->get_alignment_offset_lbas) {
136 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
137 buf[14] = (lalba >> 8) & 0x3f;
138 buf[15] = lalba & 0xff;
142 * Set Thin Provisioning Enable bit following sbc3r22 in section
143 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
145 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) {
146 buf[14] |= 0x80;
149 * LBPRZ signifies that zeroes will be read back from an LBA after
150 * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2)
152 if (dev->dev_attrib.unmap_zeroes_data)
153 buf[14] |= 0x40;
156 rbuf = transport_kmap_data_sg(cmd);
157 if (rbuf) {
158 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
159 transport_kunmap_data_sg(cmd);
162 target_complete_cmd_with_length(cmd, GOOD, 32);
163 return 0;
166 static sense_reason_t
167 sbc_emulate_startstop(struct se_cmd *cmd)
169 unsigned char *cdb = cmd->t_task_cdb;
172 * See sbc3r36 section 5.25
173 * Immediate bit should be set since there is nothing to complete
174 * POWER CONDITION MODIFIER 0h
176 if (!(cdb[1] & 1) || cdb[2] || cdb[3])
177 return TCM_INVALID_CDB_FIELD;
180 * See sbc3r36 section 5.25
181 * POWER CONDITION 0h START_VALID - process START and LOEJ
183 if (cdb[4] >> 4 & 0xf)
184 return TCM_INVALID_CDB_FIELD;
187 * See sbc3r36 section 5.25
188 * LOEJ 0h - nothing to load or unload
189 * START 1h - we are ready
191 if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
192 return TCM_INVALID_CDB_FIELD;
194 target_complete_cmd(cmd, SAM_STAT_GOOD);
195 return 0;
198 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
200 u32 num_blocks;
202 if (cmd->t_task_cdb[0] == WRITE_SAME)
203 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
204 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
205 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
206 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
207 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
210 * Use the explicit range when non zero is supplied, otherwise calculate
211 * the remaining range based on ->get_blocks() - starting LBA.
213 if (num_blocks)
214 return num_blocks;
216 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
217 cmd->t_task_lba + 1;
219 EXPORT_SYMBOL(sbc_get_write_same_sectors);
221 static sense_reason_t
222 sbc_execute_write_same_unmap(struct se_cmd *cmd)
224 struct sbc_ops *ops = cmd->protocol_data;
225 sector_t nolb = sbc_get_write_same_sectors(cmd);
226 sense_reason_t ret;
228 if (nolb) {
229 ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb);
230 if (ret)
231 return ret;
234 target_complete_cmd(cmd, GOOD);
235 return 0;
238 static sense_reason_t
239 sbc_emulate_noop(struct se_cmd *cmd)
241 target_complete_cmd(cmd, GOOD);
242 return 0;
245 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
247 return cmd->se_dev->dev_attrib.block_size * sectors;
250 static inline u32 transport_get_sectors_6(unsigned char *cdb)
253 * Use 8-bit sector value. SBC-3 says:
255 * A TRANSFER LENGTH field set to zero specifies that 256
256 * logical blocks shall be written. Any other value
257 * specifies the number of logical blocks that shall be
258 * written.
260 return cdb[4] ? : 256;
263 static inline u32 transport_get_sectors_10(unsigned char *cdb)
265 return (u32)(cdb[7] << 8) + cdb[8];
268 static inline u32 transport_get_sectors_12(unsigned char *cdb)
270 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
273 static inline u32 transport_get_sectors_16(unsigned char *cdb)
275 return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
276 (cdb[12] << 8) + cdb[13];
280 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
282 static inline u32 transport_get_sectors_32(unsigned char *cdb)
284 return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
285 (cdb[30] << 8) + cdb[31];
289 static inline u32 transport_lba_21(unsigned char *cdb)
291 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
294 static inline u32 transport_lba_32(unsigned char *cdb)
296 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
299 static inline unsigned long long transport_lba_64(unsigned char *cdb)
301 unsigned int __v1, __v2;
303 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
304 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
306 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
310 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
312 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
314 unsigned int __v1, __v2;
316 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
317 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
319 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
322 static sense_reason_t
323 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
325 struct se_device *dev = cmd->se_dev;
326 sector_t end_lba = dev->transport->get_blocks(dev) + 1;
327 unsigned int sectors = sbc_get_write_same_sectors(cmd);
328 sense_reason_t ret;
330 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
331 pr_err("WRITE_SAME PBDATA and LBDATA"
332 " bits not supported for Block Discard"
333 " Emulation\n");
334 return TCM_UNSUPPORTED_SCSI_OPCODE;
336 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
337 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
338 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
339 return TCM_INVALID_CDB_FIELD;
342 * Sanity check for LBA wrap and request past end of device.
344 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
345 ((cmd->t_task_lba + sectors) > end_lba)) {
346 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
347 (unsigned long long)end_lba, cmd->t_task_lba, sectors);
348 return TCM_ADDRESS_OUT_OF_RANGE;
351 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
352 if (flags[0] & 0x10) {
353 pr_warn("WRITE SAME with ANCHOR not supported\n");
354 return TCM_INVALID_CDB_FIELD;
357 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
358 * translated into block discard requests within backend code.
360 if (flags[0] & 0x08) {
361 if (!ops->execute_unmap)
362 return TCM_UNSUPPORTED_SCSI_OPCODE;
364 if (!dev->dev_attrib.emulate_tpws) {
365 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
366 " has emulate_tpws disabled\n");
367 return TCM_UNSUPPORTED_SCSI_OPCODE;
369 cmd->execute_cmd = sbc_execute_write_same_unmap;
370 return 0;
372 if (!ops->execute_write_same)
373 return TCM_UNSUPPORTED_SCSI_OPCODE;
375 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
376 if (ret)
377 return ret;
379 cmd->execute_cmd = ops->execute_write_same;
380 return 0;
383 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
384 int *post_ret)
386 unsigned char *buf, *addr;
387 struct scatterlist *sg;
388 unsigned int offset;
389 sense_reason_t ret = TCM_NO_SENSE;
390 int i, count;
392 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
394 * 1) read the specified logical block(s);
395 * 2) transfer logical blocks from the data-out buffer;
396 * 3) XOR the logical blocks transferred from the data-out buffer with
397 * the logical blocks read, storing the resulting XOR data in a buffer;
398 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
399 * blocks transferred from the data-out buffer; and
400 * 5) transfer the resulting XOR data to the data-in buffer.
402 buf = kmalloc(cmd->data_length, GFP_KERNEL);
403 if (!buf) {
404 pr_err("Unable to allocate xor_callback buf\n");
405 return TCM_OUT_OF_RESOURCES;
408 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
409 * into the locally allocated *buf
411 sg_copy_to_buffer(cmd->t_data_sg,
412 cmd->t_data_nents,
413 buf,
414 cmd->data_length);
417 * Now perform the XOR against the BIDI read memory located at
418 * cmd->t_mem_bidi_list
421 offset = 0;
422 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
423 addr = kmap_atomic(sg_page(sg));
424 if (!addr) {
425 ret = TCM_OUT_OF_RESOURCES;
426 goto out;
429 for (i = 0; i < sg->length; i++)
430 *(addr + sg->offset + i) ^= *(buf + offset + i);
432 offset += sg->length;
433 kunmap_atomic(addr);
436 out:
437 kfree(buf);
438 return ret;
441 static sense_reason_t
442 sbc_execute_rw(struct se_cmd *cmd)
444 struct sbc_ops *ops = cmd->protocol_data;
446 return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
447 cmd->data_direction);
450 static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
451 int *post_ret)
453 struct se_device *dev = cmd->se_dev;
454 sense_reason_t ret = TCM_NO_SENSE;
457 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
458 * within target_complete_ok_work() if the command was successfully
459 * sent to the backend driver.
461 spin_lock_irq(&cmd->t_state_lock);
462 if (cmd->transport_state & CMD_T_SENT) {
463 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
464 *post_ret = 1;
466 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
467 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
469 spin_unlock_irq(&cmd->t_state_lock);
472 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
473 * before the original READ I/O submission.
475 up(&dev->caw_sem);
477 return ret;
480 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
481 int *post_ret)
483 struct se_device *dev = cmd->se_dev;
484 struct scatterlist *write_sg = NULL, *sg;
485 unsigned char *buf = NULL, *addr;
486 struct sg_mapping_iter m;
487 unsigned int offset = 0, len;
488 unsigned int nlbas = cmd->t_task_nolb;
489 unsigned int block_size = dev->dev_attrib.block_size;
490 unsigned int compare_len = (nlbas * block_size);
491 sense_reason_t ret = TCM_NO_SENSE;
492 int rc, i;
495 * Handle early failure in transport_generic_request_failure(),
496 * which will not have taken ->caw_sem yet..
498 if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
499 return TCM_NO_SENSE;
501 * Handle special case for zero-length COMPARE_AND_WRITE
503 if (!cmd->data_length)
504 goto out;
506 * Immediately exit + release dev->caw_sem if command has already
507 * been failed with a non-zero SCSI status.
509 if (cmd->scsi_status) {
510 pr_err("compare_and_write_callback: non zero scsi_status:"
511 " 0x%02x\n", cmd->scsi_status);
512 goto out;
515 buf = kzalloc(cmd->data_length, GFP_KERNEL);
516 if (!buf) {
517 pr_err("Unable to allocate compare_and_write buf\n");
518 ret = TCM_OUT_OF_RESOURCES;
519 goto out;
522 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
523 GFP_KERNEL);
524 if (!write_sg) {
525 pr_err("Unable to allocate compare_and_write sg\n");
526 ret = TCM_OUT_OF_RESOURCES;
527 goto out;
529 sg_init_table(write_sg, cmd->t_data_nents);
531 * Setup verify and write data payloads from total NumberLBAs.
533 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
534 cmd->data_length);
535 if (!rc) {
536 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
537 ret = TCM_OUT_OF_RESOURCES;
538 goto out;
541 * Compare against SCSI READ payload against verify payload
543 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
544 addr = (unsigned char *)kmap_atomic(sg_page(sg));
545 if (!addr) {
546 ret = TCM_OUT_OF_RESOURCES;
547 goto out;
550 len = min(sg->length, compare_len);
552 if (memcmp(addr, buf + offset, len)) {
553 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
554 addr, buf + offset);
555 kunmap_atomic(addr);
556 goto miscompare;
558 kunmap_atomic(addr);
560 offset += len;
561 compare_len -= len;
562 if (!compare_len)
563 break;
566 i = 0;
567 len = cmd->t_task_nolb * block_size;
568 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
570 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
572 while (len) {
573 sg_miter_next(&m);
575 if (block_size < PAGE_SIZE) {
576 sg_set_page(&write_sg[i], m.page, block_size,
577 m.piter.sg->offset + block_size);
578 } else {
579 sg_miter_next(&m);
580 sg_set_page(&write_sg[i], m.page, block_size,
581 m.piter.sg->offset);
583 len -= block_size;
584 i++;
586 sg_miter_stop(&m);
588 * Save the original SGL + nents values before updating to new
589 * assignments, to be released in transport_free_pages() ->
590 * transport_reset_sgl_orig()
592 cmd->t_data_sg_orig = cmd->t_data_sg;
593 cmd->t_data_sg = write_sg;
594 cmd->t_data_nents_orig = cmd->t_data_nents;
595 cmd->t_data_nents = 1;
597 cmd->sam_task_attr = TCM_HEAD_TAG;
598 cmd->transport_complete_callback = compare_and_write_post;
600 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
601 * for submitting the adjusted SGL to write instance user-data.
603 cmd->execute_cmd = sbc_execute_rw;
605 spin_lock_irq(&cmd->t_state_lock);
606 cmd->t_state = TRANSPORT_PROCESSING;
607 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
608 spin_unlock_irq(&cmd->t_state_lock);
610 __target_execute_cmd(cmd, false);
612 kfree(buf);
613 return ret;
615 miscompare:
616 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
617 dev->transport->name);
618 ret = TCM_MISCOMPARE_VERIFY;
619 out:
621 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
622 * sbc_compare_and_write() before the original READ I/O submission.
624 up(&dev->caw_sem);
625 kfree(write_sg);
626 kfree(buf);
627 return ret;
630 static sense_reason_t
631 sbc_compare_and_write(struct se_cmd *cmd)
633 struct sbc_ops *ops = cmd->protocol_data;
634 struct se_device *dev = cmd->se_dev;
635 sense_reason_t ret;
636 int rc;
638 * Submit the READ first for COMPARE_AND_WRITE to perform the
639 * comparision using SGLs at cmd->t_bidi_data_sg..
641 rc = down_interruptible(&dev->caw_sem);
642 if (rc != 0) {
643 cmd->transport_complete_callback = NULL;
644 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
647 * Reset cmd->data_length to individual block_size in order to not
648 * confuse backend drivers that depend on this value matching the
649 * size of the I/O being submitted.
651 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
653 ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
654 DMA_FROM_DEVICE);
655 if (ret) {
656 cmd->transport_complete_callback = NULL;
657 up(&dev->caw_sem);
658 return ret;
661 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
662 * upon MISCOMPARE, or in compare_and_write_done() upon completion
663 * of WRITE instance user-data.
665 return TCM_NO_SENSE;
668 static int
669 sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type,
670 bool is_write, struct se_cmd *cmd)
672 if (is_write) {
673 cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
674 protect ? TARGET_PROT_DOUT_PASS :
675 TARGET_PROT_DOUT_INSERT;
676 switch (protect) {
677 case 0x0:
678 case 0x3:
679 cmd->prot_checks = 0;
680 break;
681 case 0x1:
682 case 0x5:
683 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
684 if (prot_type == TARGET_DIF_TYPE1_PROT)
685 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
686 break;
687 case 0x2:
688 if (prot_type == TARGET_DIF_TYPE1_PROT)
689 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
690 break;
691 case 0x4:
692 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
693 break;
694 default:
695 pr_err("Unsupported protect field %d\n", protect);
696 return -EINVAL;
698 } else {
699 cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
700 protect ? TARGET_PROT_DIN_PASS :
701 TARGET_PROT_DIN_STRIP;
702 switch (protect) {
703 case 0x0:
704 case 0x1:
705 case 0x5:
706 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
707 if (prot_type == TARGET_DIF_TYPE1_PROT)
708 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
709 break;
710 case 0x2:
711 if (prot_type == TARGET_DIF_TYPE1_PROT)
712 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
713 break;
714 case 0x3:
715 cmd->prot_checks = 0;
716 break;
717 case 0x4:
718 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
719 break;
720 default:
721 pr_err("Unsupported protect field %d\n", protect);
722 return -EINVAL;
726 return 0;
729 static sense_reason_t
730 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
731 u32 sectors, bool is_write)
733 u8 protect = cdb[1] >> 5;
734 int sp_ops = cmd->se_sess->sup_prot_ops;
735 int pi_prot_type = dev->dev_attrib.pi_prot_type;
736 bool fabric_prot = false;
738 if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
739 if (unlikely(protect &&
740 !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) {
741 pr_err("CDB contains protect bit, but device + fabric does"
742 " not advertise PROTECT=1 feature bit\n");
743 return TCM_INVALID_CDB_FIELD;
745 if (cmd->prot_pto)
746 return TCM_NO_SENSE;
749 switch (dev->dev_attrib.pi_prot_type) {
750 case TARGET_DIF_TYPE3_PROT:
751 cmd->reftag_seed = 0xffffffff;
752 break;
753 case TARGET_DIF_TYPE2_PROT:
754 if (protect)
755 return TCM_INVALID_CDB_FIELD;
757 cmd->reftag_seed = cmd->t_task_lba;
758 break;
759 case TARGET_DIF_TYPE1_PROT:
760 cmd->reftag_seed = cmd->t_task_lba;
761 break;
762 case TARGET_DIF_TYPE0_PROT:
764 * See if the fabric supports T10-PI, and the session has been
765 * configured to allow export PROTECT=1 feature bit with backend
766 * devices that don't support T10-PI.
768 fabric_prot = is_write ?
769 !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) :
770 !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT));
772 if (fabric_prot && cmd->se_sess->sess_prot_type) {
773 pi_prot_type = cmd->se_sess->sess_prot_type;
774 break;
776 if (!protect)
777 return TCM_NO_SENSE;
778 /* Fallthrough */
779 default:
780 pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
781 "PROTECT: 0x%02x\n", cdb[0], protect);
782 return TCM_INVALID_CDB_FIELD;
785 if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd))
786 return TCM_INVALID_CDB_FIELD;
788 cmd->prot_type = pi_prot_type;
789 cmd->prot_length = dev->prot_length * sectors;
792 * In case protection information exists over the wire
793 * we modify command data length to describe pure data.
794 * The actual transfer length is data length + protection
795 * length
797 if (protect)
798 cmd->data_length = sectors * dev->dev_attrib.block_size;
800 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
801 "prot_op=%d prot_checks=%d\n",
802 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
803 cmd->prot_op, cmd->prot_checks);
805 return TCM_NO_SENSE;
808 static int
809 sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
811 if (cdb[1] & 0x10) {
812 /* see explanation in spc_emulate_modesense */
813 if (!target_check_fua(dev)) {
814 pr_err("Got CDB: 0x%02x with DPO bit set, but device"
815 " does not advertise support for DPO\n", cdb[0]);
816 return -EINVAL;
819 if (cdb[1] & 0x8) {
820 if (!target_check_fua(dev)) {
821 pr_err("Got CDB: 0x%02x with FUA bit set, but device"
822 " does not advertise support for FUA write\n",
823 cdb[0]);
824 return -EINVAL;
826 cmd->se_cmd_flags |= SCF_FUA;
828 return 0;
831 sense_reason_t
832 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
834 struct se_device *dev = cmd->se_dev;
835 unsigned char *cdb = cmd->t_task_cdb;
836 unsigned int size;
837 u32 sectors = 0;
838 sense_reason_t ret;
840 cmd->protocol_data = ops;
842 switch (cdb[0]) {
843 case READ_6:
844 sectors = transport_get_sectors_6(cdb);
845 cmd->t_task_lba = transport_lba_21(cdb);
846 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
847 cmd->execute_cmd = sbc_execute_rw;
848 break;
849 case READ_10:
850 sectors = transport_get_sectors_10(cdb);
851 cmd->t_task_lba = transport_lba_32(cdb);
853 if (sbc_check_dpofua(dev, cmd, cdb))
854 return TCM_INVALID_CDB_FIELD;
856 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
857 if (ret)
858 return ret;
860 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
861 cmd->execute_cmd = sbc_execute_rw;
862 break;
863 case READ_12:
864 sectors = transport_get_sectors_12(cdb);
865 cmd->t_task_lba = transport_lba_32(cdb);
867 if (sbc_check_dpofua(dev, cmd, cdb))
868 return TCM_INVALID_CDB_FIELD;
870 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
871 if (ret)
872 return ret;
874 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
875 cmd->execute_cmd = sbc_execute_rw;
876 break;
877 case READ_16:
878 sectors = transport_get_sectors_16(cdb);
879 cmd->t_task_lba = transport_lba_64(cdb);
881 if (sbc_check_dpofua(dev, cmd, cdb))
882 return TCM_INVALID_CDB_FIELD;
884 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
885 if (ret)
886 return ret;
888 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
889 cmd->execute_cmd = sbc_execute_rw;
890 break;
891 case WRITE_6:
892 sectors = transport_get_sectors_6(cdb);
893 cmd->t_task_lba = transport_lba_21(cdb);
894 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
895 cmd->execute_cmd = sbc_execute_rw;
896 break;
897 case WRITE_10:
898 case WRITE_VERIFY:
899 sectors = transport_get_sectors_10(cdb);
900 cmd->t_task_lba = transport_lba_32(cdb);
902 if (sbc_check_dpofua(dev, cmd, cdb))
903 return TCM_INVALID_CDB_FIELD;
905 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
906 if (ret)
907 return ret;
909 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
910 cmd->execute_cmd = sbc_execute_rw;
911 break;
912 case WRITE_12:
913 sectors = transport_get_sectors_12(cdb);
914 cmd->t_task_lba = transport_lba_32(cdb);
916 if (sbc_check_dpofua(dev, cmd, cdb))
917 return TCM_INVALID_CDB_FIELD;
919 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
920 if (ret)
921 return ret;
923 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
924 cmd->execute_cmd = sbc_execute_rw;
925 break;
926 case WRITE_16:
927 sectors = transport_get_sectors_16(cdb);
928 cmd->t_task_lba = transport_lba_64(cdb);
930 if (sbc_check_dpofua(dev, cmd, cdb))
931 return TCM_INVALID_CDB_FIELD;
933 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
934 if (ret)
935 return ret;
937 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
938 cmd->execute_cmd = sbc_execute_rw;
939 break;
940 case XDWRITEREAD_10:
941 if (cmd->data_direction != DMA_TO_DEVICE ||
942 !(cmd->se_cmd_flags & SCF_BIDI))
943 return TCM_INVALID_CDB_FIELD;
944 sectors = transport_get_sectors_10(cdb);
946 if (sbc_check_dpofua(dev, cmd, cdb))
947 return TCM_INVALID_CDB_FIELD;
949 cmd->t_task_lba = transport_lba_32(cdb);
950 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
953 * Setup BIDI XOR callback to be run after I/O completion.
955 cmd->execute_cmd = sbc_execute_rw;
956 cmd->transport_complete_callback = &xdreadwrite_callback;
957 break;
958 case VARIABLE_LENGTH_CMD:
960 u16 service_action = get_unaligned_be16(&cdb[8]);
961 switch (service_action) {
962 case XDWRITEREAD_32:
963 sectors = transport_get_sectors_32(cdb);
965 if (sbc_check_dpofua(dev, cmd, cdb))
966 return TCM_INVALID_CDB_FIELD;
968 * Use WRITE_32 and READ_32 opcodes for the emulated
969 * XDWRITE_READ_32 logic.
971 cmd->t_task_lba = transport_lba_64_ext(cdb);
972 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
975 * Setup BIDI XOR callback to be run during after I/O
976 * completion.
978 cmd->execute_cmd = sbc_execute_rw;
979 cmd->transport_complete_callback = &xdreadwrite_callback;
980 break;
981 case WRITE_SAME_32:
982 sectors = transport_get_sectors_32(cdb);
983 if (!sectors) {
984 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
985 " supported\n");
986 return TCM_INVALID_CDB_FIELD;
989 size = sbc_get_size(cmd, 1);
990 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
992 ret = sbc_setup_write_same(cmd, &cdb[10], ops);
993 if (ret)
994 return ret;
995 break;
996 default:
997 pr_err("VARIABLE_LENGTH_CMD service action"
998 " 0x%04x not supported\n", service_action);
999 return TCM_UNSUPPORTED_SCSI_OPCODE;
1001 break;
1003 case COMPARE_AND_WRITE:
1004 sectors = cdb[13];
1006 * Currently enforce COMPARE_AND_WRITE for a single sector
1008 if (sectors > 1) {
1009 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
1010 " than 1\n", sectors);
1011 return TCM_INVALID_CDB_FIELD;
1013 if (sbc_check_dpofua(dev, cmd, cdb))
1014 return TCM_INVALID_CDB_FIELD;
1017 * Double size because we have two buffers, note that
1018 * zero is not an error..
1020 size = 2 * sbc_get_size(cmd, sectors);
1021 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
1022 cmd->t_task_nolb = sectors;
1023 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
1024 cmd->execute_cmd = sbc_compare_and_write;
1025 cmd->transport_complete_callback = compare_and_write_callback;
1026 break;
1027 case READ_CAPACITY:
1028 size = READ_CAP_LEN;
1029 cmd->execute_cmd = sbc_emulate_readcapacity;
1030 break;
1031 case SERVICE_ACTION_IN_16:
1032 switch (cmd->t_task_cdb[1] & 0x1f) {
1033 case SAI_READ_CAPACITY_16:
1034 cmd->execute_cmd = sbc_emulate_readcapacity_16;
1035 break;
1036 case SAI_REPORT_REFERRALS:
1037 cmd->execute_cmd = target_emulate_report_referrals;
1038 break;
1039 default:
1040 pr_err("Unsupported SA: 0x%02x\n",
1041 cmd->t_task_cdb[1] & 0x1f);
1042 return TCM_INVALID_CDB_FIELD;
1044 size = (cdb[10] << 24) | (cdb[11] << 16) |
1045 (cdb[12] << 8) | cdb[13];
1046 break;
1047 case SYNCHRONIZE_CACHE:
1048 case SYNCHRONIZE_CACHE_16:
1049 if (cdb[0] == SYNCHRONIZE_CACHE) {
1050 sectors = transport_get_sectors_10(cdb);
1051 cmd->t_task_lba = transport_lba_32(cdb);
1052 } else {
1053 sectors = transport_get_sectors_16(cdb);
1054 cmd->t_task_lba = transport_lba_64(cdb);
1056 if (ops->execute_sync_cache) {
1057 cmd->execute_cmd = ops->execute_sync_cache;
1058 goto check_lba;
1060 size = 0;
1061 cmd->execute_cmd = sbc_emulate_noop;
1062 break;
1063 case UNMAP:
1064 if (!ops->execute_unmap)
1065 return TCM_UNSUPPORTED_SCSI_OPCODE;
1067 if (!dev->dev_attrib.emulate_tpu) {
1068 pr_err("Got UNMAP, but backend device has"
1069 " emulate_tpu disabled\n");
1070 return TCM_UNSUPPORTED_SCSI_OPCODE;
1072 size = get_unaligned_be16(&cdb[7]);
1073 cmd->execute_cmd = sbc_execute_unmap;
1074 break;
1075 case WRITE_SAME_16:
1076 sectors = transport_get_sectors_16(cdb);
1077 if (!sectors) {
1078 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
1079 return TCM_INVALID_CDB_FIELD;
1082 size = sbc_get_size(cmd, 1);
1083 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
1085 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
1086 if (ret)
1087 return ret;
1088 break;
1089 case WRITE_SAME:
1090 sectors = transport_get_sectors_10(cdb);
1091 if (!sectors) {
1092 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
1093 return TCM_INVALID_CDB_FIELD;
1096 size = sbc_get_size(cmd, 1);
1097 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
1100 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
1101 * of byte 1 bit 3 UNMAP instead of original reserved field
1103 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
1104 if (ret)
1105 return ret;
1106 break;
1107 case VERIFY:
1108 size = 0;
1109 sectors = transport_get_sectors_10(cdb);
1110 cmd->t_task_lba = transport_lba_32(cdb);
1111 cmd->execute_cmd = sbc_emulate_noop;
1112 goto check_lba;
1113 case REZERO_UNIT:
1114 case SEEK_6:
1115 case SEEK_10:
1117 * There are still clients out there which use these old SCSI-2
1118 * commands. This mainly happens when running VMs with legacy
1119 * guest systems, connected via SCSI command pass-through to
1120 * iSCSI targets. Make them happy and return status GOOD.
1122 size = 0;
1123 cmd->execute_cmd = sbc_emulate_noop;
1124 break;
1125 case START_STOP:
1126 size = 0;
1127 cmd->execute_cmd = sbc_emulate_startstop;
1128 break;
1129 default:
1130 ret = spc_parse_cdb(cmd, &size);
1131 if (ret)
1132 return ret;
1135 /* reject any command that we don't have a handler for */
1136 if (!cmd->execute_cmd)
1137 return TCM_UNSUPPORTED_SCSI_OPCODE;
1139 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1140 unsigned long long end_lba;
1141 check_lba:
1142 end_lba = dev->transport->get_blocks(dev) + 1;
1143 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
1144 ((cmd->t_task_lba + sectors) > end_lba)) {
1145 pr_err("cmd exceeds last lba %llu "
1146 "(lba %llu, sectors %u)\n",
1147 end_lba, cmd->t_task_lba, sectors);
1148 return TCM_ADDRESS_OUT_OF_RANGE;
1151 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
1152 size = sbc_get_size(cmd, sectors);
1155 return target_cmd_size_check(cmd, size);
1157 EXPORT_SYMBOL(sbc_parse_cdb);
1159 u32 sbc_get_device_type(struct se_device *dev)
1161 return TYPE_DISK;
1163 EXPORT_SYMBOL(sbc_get_device_type);
1165 static sense_reason_t
1166 sbc_execute_unmap(struct se_cmd *cmd)
1168 struct sbc_ops *ops = cmd->protocol_data;
1169 struct se_device *dev = cmd->se_dev;
1170 unsigned char *buf, *ptr = NULL;
1171 sector_t lba;
1172 int size;
1173 u32 range;
1174 sense_reason_t ret = 0;
1175 int dl, bd_dl;
1177 /* We never set ANC_SUP */
1178 if (cmd->t_task_cdb[1])
1179 return TCM_INVALID_CDB_FIELD;
1181 if (cmd->data_length == 0) {
1182 target_complete_cmd(cmd, SAM_STAT_GOOD);
1183 return 0;
1186 if (cmd->data_length < 8) {
1187 pr_warn("UNMAP parameter list length %u too small\n",
1188 cmd->data_length);
1189 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1192 buf = transport_kmap_data_sg(cmd);
1193 if (!buf)
1194 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1196 dl = get_unaligned_be16(&buf[0]);
1197 bd_dl = get_unaligned_be16(&buf[2]);
1199 size = cmd->data_length - 8;
1200 if (bd_dl > size)
1201 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
1202 cmd->data_length, bd_dl);
1203 else
1204 size = bd_dl;
1206 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
1207 ret = TCM_INVALID_PARAMETER_LIST;
1208 goto err;
1211 /* First UNMAP block descriptor starts at 8 byte offset */
1212 ptr = &buf[8];
1213 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1214 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1216 while (size >= 16) {
1217 lba = get_unaligned_be64(&ptr[0]);
1218 range = get_unaligned_be32(&ptr[8]);
1219 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1220 (unsigned long long)lba, range);
1222 if (range > dev->dev_attrib.max_unmap_lba_count) {
1223 ret = TCM_INVALID_PARAMETER_LIST;
1224 goto err;
1227 if (lba + range > dev->transport->get_blocks(dev) + 1) {
1228 ret = TCM_ADDRESS_OUT_OF_RANGE;
1229 goto err;
1232 ret = ops->execute_unmap(cmd, lba, range);
1233 if (ret)
1234 goto err;
1236 ptr += 16;
1237 size -= 16;
1240 err:
1241 transport_kunmap_data_sg(cmd);
1242 if (!ret)
1243 target_complete_cmd(cmd, GOOD);
1244 return ret;
1247 void
1248 sbc_dif_generate(struct se_cmd *cmd)
1250 struct se_device *dev = cmd->se_dev;
1251 struct t10_pi_tuple *sdt;
1252 struct scatterlist *dsg = cmd->t_data_sg, *psg;
1253 sector_t sector = cmd->t_task_lba;
1254 void *daddr, *paddr;
1255 int i, j, offset = 0;
1256 unsigned int block_size = dev->dev_attrib.block_size;
1258 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1259 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1260 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1262 for (j = 0; j < psg->length;
1263 j += sizeof(*sdt)) {
1264 __u16 crc;
1265 unsigned int avail;
1267 if (offset >= dsg->length) {
1268 offset -= dsg->length;
1269 kunmap_atomic(daddr - dsg->offset);
1270 dsg = sg_next(dsg);
1271 if (!dsg) {
1272 kunmap_atomic(paddr - psg->offset);
1273 return;
1275 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1278 sdt = paddr + j;
1279 avail = min(block_size, dsg->length - offset);
1280 crc = crc_t10dif(daddr + offset, avail);
1281 if (avail < block_size) {
1282 kunmap_atomic(daddr - dsg->offset);
1283 dsg = sg_next(dsg);
1284 if (!dsg) {
1285 kunmap_atomic(paddr - psg->offset);
1286 return;
1288 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1289 offset = block_size - avail;
1290 crc = crc_t10dif_update(crc, daddr, offset);
1291 } else {
1292 offset += block_size;
1295 sdt->guard_tag = cpu_to_be16(crc);
1296 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
1297 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1298 sdt->app_tag = 0;
1300 pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
1301 " app_tag: 0x%04x ref_tag: %u\n",
1302 (cmd->data_direction == DMA_TO_DEVICE) ?
1303 "WRITE" : "READ", (unsigned long long)sector,
1304 sdt->guard_tag, sdt->app_tag,
1305 be32_to_cpu(sdt->ref_tag));
1307 sector++;
1310 kunmap_atomic(daddr - dsg->offset);
1311 kunmap_atomic(paddr - psg->offset);
1315 static sense_reason_t
1316 sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
1317 __u16 crc, sector_t sector, unsigned int ei_lba)
1319 __be16 csum;
1321 if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
1322 goto check_ref;
1324 csum = cpu_to_be16(crc);
1326 if (sdt->guard_tag != csum) {
1327 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1328 " csum 0x%04x\n", (unsigned long long)sector,
1329 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1330 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1333 check_ref:
1334 if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG))
1335 return 0;
1337 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT &&
1338 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1339 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1340 " sector MSB: 0x%08x\n", (unsigned long long)sector,
1341 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1342 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1345 if (cmd->prot_type == TARGET_DIF_TYPE2_PROT &&
1346 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1347 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1348 " ei_lba: 0x%08x\n", (unsigned long long)sector,
1349 be32_to_cpu(sdt->ref_tag), ei_lba);
1350 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1353 return 0;
1356 void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1357 struct scatterlist *sg, int sg_off)
1359 struct se_device *dev = cmd->se_dev;
1360 struct scatterlist *psg;
1361 void *paddr, *addr;
1362 unsigned int i, len, left;
1363 unsigned int offset = sg_off;
1365 if (!sg)
1366 return;
1368 left = sectors * dev->prot_length;
1370 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1371 unsigned int psg_len, copied = 0;
1373 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1374 psg_len = min(left, psg->length);
1375 while (psg_len) {
1376 len = min(psg_len, sg->length - offset);
1377 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1379 if (read)
1380 memcpy(paddr + copied, addr, len);
1381 else
1382 memcpy(addr, paddr + copied, len);
1384 left -= len;
1385 offset += len;
1386 copied += len;
1387 psg_len -= len;
1389 kunmap_atomic(addr - sg->offset - offset);
1391 if (offset >= sg->length) {
1392 sg = sg_next(sg);
1393 offset = 0;
1396 kunmap_atomic(paddr - psg->offset);
1399 EXPORT_SYMBOL(sbc_dif_copy_prot);
1401 sense_reason_t
1402 sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1403 unsigned int ei_lba, struct scatterlist *psg, int psg_off)
1405 struct se_device *dev = cmd->se_dev;
1406 struct t10_pi_tuple *sdt;
1407 struct scatterlist *dsg = cmd->t_data_sg;
1408 sector_t sector = start;
1409 void *daddr, *paddr;
1410 int i;
1411 sense_reason_t rc;
1412 int dsg_off = 0;
1413 unsigned int block_size = dev->dev_attrib.block_size;
1415 for (; psg && sector < start + sectors; psg = sg_next(psg)) {
1416 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1417 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1419 for (i = psg_off; i < psg->length &&
1420 sector < start + sectors;
1421 i += sizeof(*sdt)) {
1422 __u16 crc;
1423 unsigned int avail;
1425 if (dsg_off >= dsg->length) {
1426 dsg_off -= dsg->length;
1427 kunmap_atomic(daddr - dsg->offset);
1428 dsg = sg_next(dsg);
1429 if (!dsg) {
1430 kunmap_atomic(paddr - psg->offset);
1431 return 0;
1433 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1436 sdt = paddr + i;
1438 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1439 " app_tag: 0x%04x ref_tag: %u\n",
1440 (unsigned long long)sector, sdt->guard_tag,
1441 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1443 if (sdt->app_tag == cpu_to_be16(0xffff)) {
1444 dsg_off += block_size;
1445 goto next;
1448 avail = min(block_size, dsg->length - dsg_off);
1449 crc = crc_t10dif(daddr + dsg_off, avail);
1450 if (avail < block_size) {
1451 kunmap_atomic(daddr - dsg->offset);
1452 dsg = sg_next(dsg);
1453 if (!dsg) {
1454 kunmap_atomic(paddr - psg->offset);
1455 return 0;
1457 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1458 dsg_off = block_size - avail;
1459 crc = crc_t10dif_update(crc, daddr, dsg_off);
1460 } else {
1461 dsg_off += block_size;
1464 rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
1465 if (rc) {
1466 kunmap_atomic(daddr - dsg->offset);
1467 kunmap_atomic(paddr - psg->offset);
1468 cmd->bad_sector = sector;
1469 return rc;
1471 next:
1472 sector++;
1473 ei_lba++;
1476 psg_off = 0;
1477 kunmap_atomic(daddr - dsg->offset);
1478 kunmap_atomic(paddr - psg->offset);
1481 return 0;
1483 EXPORT_SYMBOL(sbc_dif_verify);