staging: rtl8188eu: cleanup comments in update_hw_ht_param
[linux/fpc-iii.git] / drivers / target / target_core_sbc.c
blobf1e81886122d0ade6ba6438bc7e77df5bce1b8ae
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * SCSI Block Commands (SBC) parsing and emulation.
5 * (c) Copyright 2002-2013 Datera, Inc.
7 * Nicholas A. Bellinger <nab@kernel.org>
8 */
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/ratelimit.h>
13 #include <linux/crc-t10dif.h>
14 #include <linux/t10-pi.h>
15 #include <asm/unaligned.h>
16 #include <scsi/scsi_proto.h>
17 #include <scsi/scsi_tcq.h>
19 #include <target/target_core_base.h>
20 #include <target/target_core_backend.h>
21 #include <target/target_core_fabric.h>
23 #include "target_core_internal.h"
24 #include "target_core_ua.h"
25 #include "target_core_alua.h"
27 static sense_reason_t
28 sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
29 static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
31 static sense_reason_t
32 sbc_emulate_readcapacity(struct se_cmd *cmd)
34 struct se_device *dev = cmd->se_dev;
35 unsigned char *cdb = cmd->t_task_cdb;
36 unsigned long long blocks_long = dev->transport->get_blocks(dev);
37 unsigned char *rbuf;
38 unsigned char buf[8];
39 u32 blocks;
42 * SBC-2 says:
43 * If the PMI bit is set to zero and the LOGICAL BLOCK
44 * ADDRESS field is not set to zero, the device server shall
45 * terminate the command with CHECK CONDITION status with
46 * the sense key set to ILLEGAL REQUEST and the additional
47 * sense code set to INVALID FIELD IN CDB.
49 * In SBC-3, these fields are obsolete, but some SCSI
50 * compliance tests actually check this, so we might as well
51 * follow SBC-2.
53 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
54 return TCM_INVALID_CDB_FIELD;
56 if (blocks_long >= 0x00000000ffffffff)
57 blocks = 0xffffffff;
58 else
59 blocks = (u32)blocks_long;
61 put_unaligned_be32(blocks, &buf[0]);
62 put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]);
64 rbuf = transport_kmap_data_sg(cmd);
65 if (rbuf) {
66 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
67 transport_kunmap_data_sg(cmd);
70 target_complete_cmd_with_length(cmd, GOOD, 8);
71 return 0;
74 static sense_reason_t
75 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
77 struct se_device *dev = cmd->se_dev;
78 struct se_session *sess = cmd->se_sess;
79 int pi_prot_type = dev->dev_attrib.pi_prot_type;
81 unsigned char *rbuf;
82 unsigned char buf[32];
83 unsigned long long blocks = dev->transport->get_blocks(dev);
85 memset(buf, 0, sizeof(buf));
86 put_unaligned_be64(blocks, &buf[0]);
87 put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]);
89 * Set P_TYPE and PROT_EN bits for DIF support
91 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
93 * Only override a device's pi_prot_type if no T10-PI is
94 * available, and sess_prot_type has been explicitly enabled.
96 if (!pi_prot_type)
97 pi_prot_type = sess->sess_prot_type;
99 if (pi_prot_type)
100 buf[12] = (pi_prot_type - 1) << 1 | 0x1;
103 if (dev->transport->get_lbppbe)
104 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
106 if (dev->transport->get_alignment_offset_lbas) {
107 u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
109 put_unaligned_be16(lalba, &buf[14]);
113 * Set Thin Provisioning Enable bit following sbc3r22 in section
114 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
116 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) {
117 buf[14] |= 0x80;
120 * LBPRZ signifies that zeroes will be read back from an LBA after
121 * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2)
123 if (dev->dev_attrib.unmap_zeroes_data)
124 buf[14] |= 0x40;
127 rbuf = transport_kmap_data_sg(cmd);
128 if (rbuf) {
129 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
130 transport_kunmap_data_sg(cmd);
133 target_complete_cmd_with_length(cmd, GOOD, 32);
134 return 0;
137 static sense_reason_t
138 sbc_emulate_startstop(struct se_cmd *cmd)
140 unsigned char *cdb = cmd->t_task_cdb;
143 * See sbc3r36 section 5.25
144 * Immediate bit should be set since there is nothing to complete
145 * POWER CONDITION MODIFIER 0h
147 if (!(cdb[1] & 1) || cdb[2] || cdb[3])
148 return TCM_INVALID_CDB_FIELD;
151 * See sbc3r36 section 5.25
152 * POWER CONDITION 0h START_VALID - process START and LOEJ
154 if (cdb[4] >> 4 & 0xf)
155 return TCM_INVALID_CDB_FIELD;
158 * See sbc3r36 section 5.25
159 * LOEJ 0h - nothing to load or unload
160 * START 1h - we are ready
162 if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
163 return TCM_INVALID_CDB_FIELD;
165 target_complete_cmd(cmd, SAM_STAT_GOOD);
166 return 0;
169 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
171 u32 num_blocks;
173 if (cmd->t_task_cdb[0] == WRITE_SAME)
174 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
175 else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
176 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
177 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
178 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
181 * Use the explicit range when non zero is supplied, otherwise calculate
182 * the remaining range based on ->get_blocks() - starting LBA.
184 if (num_blocks)
185 return num_blocks;
187 return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
188 cmd->t_task_lba + 1;
190 EXPORT_SYMBOL(sbc_get_write_same_sectors);
192 static sense_reason_t
193 sbc_execute_write_same_unmap(struct se_cmd *cmd)
195 struct sbc_ops *ops = cmd->protocol_data;
196 sector_t nolb = sbc_get_write_same_sectors(cmd);
197 sense_reason_t ret;
199 if (nolb) {
200 ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb);
201 if (ret)
202 return ret;
205 target_complete_cmd(cmd, GOOD);
206 return 0;
209 static sense_reason_t
210 sbc_emulate_noop(struct se_cmd *cmd)
212 target_complete_cmd(cmd, GOOD);
213 return 0;
216 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
218 return cmd->se_dev->dev_attrib.block_size * sectors;
221 static inline u32 transport_get_sectors_6(unsigned char *cdb)
224 * Use 8-bit sector value. SBC-3 says:
226 * A TRANSFER LENGTH field set to zero specifies that 256
227 * logical blocks shall be written. Any other value
228 * specifies the number of logical blocks that shall be
229 * written.
231 return cdb[4] ? : 256;
234 static inline u32 transport_get_sectors_10(unsigned char *cdb)
236 return get_unaligned_be16(&cdb[7]);
239 static inline u32 transport_get_sectors_12(unsigned char *cdb)
241 return get_unaligned_be32(&cdb[6]);
244 static inline u32 transport_get_sectors_16(unsigned char *cdb)
246 return get_unaligned_be32(&cdb[10]);
250 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
252 static inline u32 transport_get_sectors_32(unsigned char *cdb)
254 return get_unaligned_be32(&cdb[28]);
258 static inline u32 transport_lba_21(unsigned char *cdb)
260 return get_unaligned_be24(&cdb[1]) & 0x1fffff;
263 static inline u32 transport_lba_32(unsigned char *cdb)
265 return get_unaligned_be32(&cdb[2]);
268 static inline unsigned long long transport_lba_64(unsigned char *cdb)
270 return get_unaligned_be64(&cdb[2]);
274 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
276 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
278 return get_unaligned_be64(&cdb[12]);
281 static sense_reason_t
282 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
284 struct se_device *dev = cmd->se_dev;
285 sector_t end_lba = dev->transport->get_blocks(dev) + 1;
286 unsigned int sectors = sbc_get_write_same_sectors(cmd);
287 sense_reason_t ret;
289 if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
290 pr_err("WRITE_SAME PBDATA and LBDATA"
291 " bits not supported for Block Discard"
292 " Emulation\n");
293 return TCM_UNSUPPORTED_SCSI_OPCODE;
295 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
296 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
297 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
298 return TCM_INVALID_CDB_FIELD;
301 * Sanity check for LBA wrap and request past end of device.
303 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
304 ((cmd->t_task_lba + sectors) > end_lba)) {
305 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
306 (unsigned long long)end_lba, cmd->t_task_lba, sectors);
307 return TCM_ADDRESS_OUT_OF_RANGE;
310 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
311 if (flags[0] & 0x10) {
312 pr_warn("WRITE SAME with ANCHOR not supported\n");
313 return TCM_INVALID_CDB_FIELD;
316 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
317 * translated into block discard requests within backend code.
319 if (flags[0] & 0x08) {
320 if (!ops->execute_unmap)
321 return TCM_UNSUPPORTED_SCSI_OPCODE;
323 if (!dev->dev_attrib.emulate_tpws) {
324 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
325 " has emulate_tpws disabled\n");
326 return TCM_UNSUPPORTED_SCSI_OPCODE;
328 cmd->execute_cmd = sbc_execute_write_same_unmap;
329 return 0;
331 if (!ops->execute_write_same)
332 return TCM_UNSUPPORTED_SCSI_OPCODE;
334 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
335 if (ret)
336 return ret;
338 cmd->execute_cmd = ops->execute_write_same;
339 return 0;
342 static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
343 int *post_ret)
345 unsigned char *buf, *addr;
346 struct scatterlist *sg;
347 unsigned int offset;
348 sense_reason_t ret = TCM_NO_SENSE;
349 int i, count;
351 if (!success)
352 return 0;
355 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
357 * 1) read the specified logical block(s);
358 * 2) transfer logical blocks from the data-out buffer;
359 * 3) XOR the logical blocks transferred from the data-out buffer with
360 * the logical blocks read, storing the resulting XOR data in a buffer;
361 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
362 * blocks transferred from the data-out buffer; and
363 * 5) transfer the resulting XOR data to the data-in buffer.
365 buf = kmalloc(cmd->data_length, GFP_KERNEL);
366 if (!buf) {
367 pr_err("Unable to allocate xor_callback buf\n");
368 return TCM_OUT_OF_RESOURCES;
371 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
372 * into the locally allocated *buf
374 sg_copy_to_buffer(cmd->t_data_sg,
375 cmd->t_data_nents,
376 buf,
377 cmd->data_length);
380 * Now perform the XOR against the BIDI read memory located at
381 * cmd->t_mem_bidi_list
384 offset = 0;
385 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
386 addr = kmap_atomic(sg_page(sg));
387 if (!addr) {
388 ret = TCM_OUT_OF_RESOURCES;
389 goto out;
392 for (i = 0; i < sg->length; i++)
393 *(addr + sg->offset + i) ^= *(buf + offset + i);
395 offset += sg->length;
396 kunmap_atomic(addr);
399 out:
400 kfree(buf);
401 return ret;
404 static sense_reason_t
405 sbc_execute_rw(struct se_cmd *cmd)
407 struct sbc_ops *ops = cmd->protocol_data;
409 return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
410 cmd->data_direction);
413 static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
414 int *post_ret)
416 struct se_device *dev = cmd->se_dev;
417 sense_reason_t ret = TCM_NO_SENSE;
419 spin_lock_irq(&cmd->t_state_lock);
420 if (success) {
421 *post_ret = 1;
423 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
424 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
426 spin_unlock_irq(&cmd->t_state_lock);
429 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
430 * before the original READ I/O submission.
432 up(&dev->caw_sem);
434 return ret;
437 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
438 int *post_ret)
440 struct se_device *dev = cmd->se_dev;
441 struct sg_table write_tbl = { };
442 struct scatterlist *write_sg, *sg;
443 unsigned char *buf = NULL, *addr;
444 struct sg_mapping_iter m;
445 unsigned int offset = 0, len;
446 unsigned int nlbas = cmd->t_task_nolb;
447 unsigned int block_size = dev->dev_attrib.block_size;
448 unsigned int compare_len = (nlbas * block_size);
449 sense_reason_t ret = TCM_NO_SENSE;
450 int rc, i;
453 * Handle early failure in transport_generic_request_failure(),
454 * which will not have taken ->caw_sem yet..
456 if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
457 return TCM_NO_SENSE;
459 * Handle special case for zero-length COMPARE_AND_WRITE
461 if (!cmd->data_length)
462 goto out;
464 * Immediately exit + release dev->caw_sem if command has already
465 * been failed with a non-zero SCSI status.
467 if (cmd->scsi_status) {
468 pr_debug("compare_and_write_callback: non zero scsi_status:"
469 " 0x%02x\n", cmd->scsi_status);
470 *post_ret = 1;
471 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
472 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
473 goto out;
476 buf = kzalloc(cmd->data_length, GFP_KERNEL);
477 if (!buf) {
478 pr_err("Unable to allocate compare_and_write buf\n");
479 ret = TCM_OUT_OF_RESOURCES;
480 goto out;
483 if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
484 pr_err("Unable to allocate compare_and_write sg\n");
485 ret = TCM_OUT_OF_RESOURCES;
486 goto out;
488 write_sg = write_tbl.sgl;
490 * Setup verify and write data payloads from total NumberLBAs.
492 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf,
493 cmd->data_length);
494 if (!rc) {
495 pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
496 ret = TCM_OUT_OF_RESOURCES;
497 goto out;
500 * Compare against SCSI READ payload against verify payload
502 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) {
503 addr = (unsigned char *)kmap_atomic(sg_page(sg));
504 if (!addr) {
505 ret = TCM_OUT_OF_RESOURCES;
506 goto out;
509 len = min(sg->length, compare_len);
511 if (memcmp(addr, buf + offset, len)) {
512 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n",
513 addr, buf + offset);
514 kunmap_atomic(addr);
515 goto miscompare;
517 kunmap_atomic(addr);
519 offset += len;
520 compare_len -= len;
521 if (!compare_len)
522 break;
525 i = 0;
526 len = cmd->t_task_nolb * block_size;
527 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
529 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
531 while (len) {
532 sg_miter_next(&m);
534 if (block_size < PAGE_SIZE) {
535 sg_set_page(&write_sg[i], m.page, block_size,
536 m.piter.sg->offset + block_size);
537 } else {
538 sg_miter_next(&m);
539 sg_set_page(&write_sg[i], m.page, block_size,
540 m.piter.sg->offset);
542 len -= block_size;
543 i++;
545 sg_miter_stop(&m);
547 * Save the original SGL + nents values before updating to new
548 * assignments, to be released in transport_free_pages() ->
549 * transport_reset_sgl_orig()
551 cmd->t_data_sg_orig = cmd->t_data_sg;
552 cmd->t_data_sg = write_sg;
553 cmd->t_data_nents_orig = cmd->t_data_nents;
554 cmd->t_data_nents = 1;
556 cmd->sam_task_attr = TCM_HEAD_TAG;
557 cmd->transport_complete_callback = compare_and_write_post;
559 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
560 * for submitting the adjusted SGL to write instance user-data.
562 cmd->execute_cmd = sbc_execute_rw;
564 spin_lock_irq(&cmd->t_state_lock);
565 cmd->t_state = TRANSPORT_PROCESSING;
566 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
567 spin_unlock_irq(&cmd->t_state_lock);
569 __target_execute_cmd(cmd, false);
571 kfree(buf);
572 return ret;
574 miscompare:
575 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n",
576 dev->transport->name);
577 ret = TCM_MISCOMPARE_VERIFY;
578 out:
580 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
581 * sbc_compare_and_write() before the original READ I/O submission.
583 up(&dev->caw_sem);
584 sg_free_table(&write_tbl);
585 kfree(buf);
586 return ret;
589 static sense_reason_t
590 sbc_compare_and_write(struct se_cmd *cmd)
592 struct sbc_ops *ops = cmd->protocol_data;
593 struct se_device *dev = cmd->se_dev;
594 sense_reason_t ret;
595 int rc;
597 * Submit the READ first for COMPARE_AND_WRITE to perform the
598 * comparision using SGLs at cmd->t_bidi_data_sg..
600 rc = down_interruptible(&dev->caw_sem);
601 if (rc != 0) {
602 cmd->transport_complete_callback = NULL;
603 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
606 * Reset cmd->data_length to individual block_size in order to not
607 * confuse backend drivers that depend on this value matching the
608 * size of the I/O being submitted.
610 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
612 ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
613 DMA_FROM_DEVICE);
614 if (ret) {
615 cmd->transport_complete_callback = NULL;
616 up(&dev->caw_sem);
617 return ret;
620 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
621 * upon MISCOMPARE, or in compare_and_write_done() upon completion
622 * of WRITE instance user-data.
624 return TCM_NO_SENSE;
627 static int
628 sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type,
629 bool is_write, struct se_cmd *cmd)
631 if (is_write) {
632 cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
633 protect ? TARGET_PROT_DOUT_PASS :
634 TARGET_PROT_DOUT_INSERT;
635 switch (protect) {
636 case 0x0:
637 case 0x3:
638 cmd->prot_checks = 0;
639 break;
640 case 0x1:
641 case 0x5:
642 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
643 if (prot_type == TARGET_DIF_TYPE1_PROT)
644 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
645 break;
646 case 0x2:
647 if (prot_type == TARGET_DIF_TYPE1_PROT)
648 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
649 break;
650 case 0x4:
651 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
652 break;
653 default:
654 pr_err("Unsupported protect field %d\n", protect);
655 return -EINVAL;
657 } else {
658 cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
659 protect ? TARGET_PROT_DIN_PASS :
660 TARGET_PROT_DIN_STRIP;
661 switch (protect) {
662 case 0x0:
663 case 0x1:
664 case 0x5:
665 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
666 if (prot_type == TARGET_DIF_TYPE1_PROT)
667 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
668 break;
669 case 0x2:
670 if (prot_type == TARGET_DIF_TYPE1_PROT)
671 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
672 break;
673 case 0x3:
674 cmd->prot_checks = 0;
675 break;
676 case 0x4:
677 cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
678 break;
679 default:
680 pr_err("Unsupported protect field %d\n", protect);
681 return -EINVAL;
685 return 0;
688 static sense_reason_t
689 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
690 u32 sectors, bool is_write)
692 u8 protect = cdb[1] >> 5;
693 int sp_ops = cmd->se_sess->sup_prot_ops;
694 int pi_prot_type = dev->dev_attrib.pi_prot_type;
695 bool fabric_prot = false;
697 if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
698 if (unlikely(protect &&
699 !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) {
700 pr_err("CDB contains protect bit, but device + fabric does"
701 " not advertise PROTECT=1 feature bit\n");
702 return TCM_INVALID_CDB_FIELD;
704 if (cmd->prot_pto)
705 return TCM_NO_SENSE;
708 switch (dev->dev_attrib.pi_prot_type) {
709 case TARGET_DIF_TYPE3_PROT:
710 cmd->reftag_seed = 0xffffffff;
711 break;
712 case TARGET_DIF_TYPE2_PROT:
713 if (protect)
714 return TCM_INVALID_CDB_FIELD;
716 cmd->reftag_seed = cmd->t_task_lba;
717 break;
718 case TARGET_DIF_TYPE1_PROT:
719 cmd->reftag_seed = cmd->t_task_lba;
720 break;
721 case TARGET_DIF_TYPE0_PROT:
723 * See if the fabric supports T10-PI, and the session has been
724 * configured to allow export PROTECT=1 feature bit with backend
725 * devices that don't support T10-PI.
727 fabric_prot = is_write ?
728 !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) :
729 !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT));
731 if (fabric_prot && cmd->se_sess->sess_prot_type) {
732 pi_prot_type = cmd->se_sess->sess_prot_type;
733 break;
735 if (!protect)
736 return TCM_NO_SENSE;
737 /* Fallthrough */
738 default:
739 pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
740 "PROTECT: 0x%02x\n", cdb[0], protect);
741 return TCM_INVALID_CDB_FIELD;
744 if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd))
745 return TCM_INVALID_CDB_FIELD;
747 cmd->prot_type = pi_prot_type;
748 cmd->prot_length = dev->prot_length * sectors;
751 * In case protection information exists over the wire
752 * we modify command data length to describe pure data.
753 * The actual transfer length is data length + protection
754 * length
756 if (protect)
757 cmd->data_length = sectors * dev->dev_attrib.block_size;
759 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
760 "prot_op=%d prot_checks=%d\n",
761 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
762 cmd->prot_op, cmd->prot_checks);
764 return TCM_NO_SENSE;
767 static int
768 sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
770 if (cdb[1] & 0x10) {
771 /* see explanation in spc_emulate_modesense */
772 if (!target_check_fua(dev)) {
773 pr_err("Got CDB: 0x%02x with DPO bit set, but device"
774 " does not advertise support for DPO\n", cdb[0]);
775 return -EINVAL;
778 if (cdb[1] & 0x8) {
779 if (!target_check_fua(dev)) {
780 pr_err("Got CDB: 0x%02x with FUA bit set, but device"
781 " does not advertise support for FUA write\n",
782 cdb[0]);
783 return -EINVAL;
785 cmd->se_cmd_flags |= SCF_FUA;
787 return 0;
790 sense_reason_t
791 sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
793 struct se_device *dev = cmd->se_dev;
794 unsigned char *cdb = cmd->t_task_cdb;
795 unsigned int size;
796 u32 sectors = 0;
797 sense_reason_t ret;
799 cmd->protocol_data = ops;
801 switch (cdb[0]) {
802 case READ_6:
803 sectors = transport_get_sectors_6(cdb);
804 cmd->t_task_lba = transport_lba_21(cdb);
805 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
806 cmd->execute_cmd = sbc_execute_rw;
807 break;
808 case READ_10:
809 sectors = transport_get_sectors_10(cdb);
810 cmd->t_task_lba = transport_lba_32(cdb);
812 if (sbc_check_dpofua(dev, cmd, cdb))
813 return TCM_INVALID_CDB_FIELD;
815 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
816 if (ret)
817 return ret;
819 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
820 cmd->execute_cmd = sbc_execute_rw;
821 break;
822 case READ_12:
823 sectors = transport_get_sectors_12(cdb);
824 cmd->t_task_lba = transport_lba_32(cdb);
826 if (sbc_check_dpofua(dev, cmd, cdb))
827 return TCM_INVALID_CDB_FIELD;
829 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
830 if (ret)
831 return ret;
833 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
834 cmd->execute_cmd = sbc_execute_rw;
835 break;
836 case READ_16:
837 sectors = transport_get_sectors_16(cdb);
838 cmd->t_task_lba = transport_lba_64(cdb);
840 if (sbc_check_dpofua(dev, cmd, cdb))
841 return TCM_INVALID_CDB_FIELD;
843 ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
844 if (ret)
845 return ret;
847 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
848 cmd->execute_cmd = sbc_execute_rw;
849 break;
850 case WRITE_6:
851 sectors = transport_get_sectors_6(cdb);
852 cmd->t_task_lba = transport_lba_21(cdb);
853 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
854 cmd->execute_cmd = sbc_execute_rw;
855 break;
856 case WRITE_10:
857 case WRITE_VERIFY:
858 sectors = transport_get_sectors_10(cdb);
859 cmd->t_task_lba = transport_lba_32(cdb);
861 if (sbc_check_dpofua(dev, cmd, cdb))
862 return TCM_INVALID_CDB_FIELD;
864 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
865 if (ret)
866 return ret;
868 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
869 cmd->execute_cmd = sbc_execute_rw;
870 break;
871 case WRITE_12:
872 sectors = transport_get_sectors_12(cdb);
873 cmd->t_task_lba = transport_lba_32(cdb);
875 if (sbc_check_dpofua(dev, cmd, cdb))
876 return TCM_INVALID_CDB_FIELD;
878 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
879 if (ret)
880 return ret;
882 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
883 cmd->execute_cmd = sbc_execute_rw;
884 break;
885 case WRITE_16:
886 case WRITE_VERIFY_16:
887 sectors = transport_get_sectors_16(cdb);
888 cmd->t_task_lba = transport_lba_64(cdb);
890 if (sbc_check_dpofua(dev, cmd, cdb))
891 return TCM_INVALID_CDB_FIELD;
893 ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
894 if (ret)
895 return ret;
897 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
898 cmd->execute_cmd = sbc_execute_rw;
899 break;
900 case XDWRITEREAD_10:
901 if (cmd->data_direction != DMA_TO_DEVICE ||
902 !(cmd->se_cmd_flags & SCF_BIDI))
903 return TCM_INVALID_CDB_FIELD;
904 sectors = transport_get_sectors_10(cdb);
906 if (sbc_check_dpofua(dev, cmd, cdb))
907 return TCM_INVALID_CDB_FIELD;
909 cmd->t_task_lba = transport_lba_32(cdb);
910 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
913 * Setup BIDI XOR callback to be run after I/O completion.
915 cmd->execute_cmd = sbc_execute_rw;
916 cmd->transport_complete_callback = &xdreadwrite_callback;
917 break;
918 case VARIABLE_LENGTH_CMD:
920 u16 service_action = get_unaligned_be16(&cdb[8]);
921 switch (service_action) {
922 case XDWRITEREAD_32:
923 sectors = transport_get_sectors_32(cdb);
925 if (sbc_check_dpofua(dev, cmd, cdb))
926 return TCM_INVALID_CDB_FIELD;
928 * Use WRITE_32 and READ_32 opcodes for the emulated
929 * XDWRITE_READ_32 logic.
931 cmd->t_task_lba = transport_lba_64_ext(cdb);
932 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
935 * Setup BIDI XOR callback to be run during after I/O
936 * completion.
938 cmd->execute_cmd = sbc_execute_rw;
939 cmd->transport_complete_callback = &xdreadwrite_callback;
940 break;
941 case WRITE_SAME_32:
942 sectors = transport_get_sectors_32(cdb);
943 if (!sectors) {
944 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
945 " supported\n");
946 return TCM_INVALID_CDB_FIELD;
949 size = sbc_get_size(cmd, 1);
950 cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
952 ret = sbc_setup_write_same(cmd, &cdb[10], ops);
953 if (ret)
954 return ret;
955 break;
956 default:
957 pr_err("VARIABLE_LENGTH_CMD service action"
958 " 0x%04x not supported\n", service_action);
959 return TCM_UNSUPPORTED_SCSI_OPCODE;
961 break;
963 case COMPARE_AND_WRITE:
964 if (!dev->dev_attrib.emulate_caw) {
965 pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n",
966 dev->se_hba->backend->ops->name,
967 config_item_name(&dev->dev_group.cg_item),
968 dev->t10_wwn.unit_serial);
969 return TCM_UNSUPPORTED_SCSI_OPCODE;
971 sectors = cdb[13];
973 * Currently enforce COMPARE_AND_WRITE for a single sector
975 if (sectors > 1) {
976 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
977 " than 1\n", sectors);
978 return TCM_INVALID_CDB_FIELD;
980 if (sbc_check_dpofua(dev, cmd, cdb))
981 return TCM_INVALID_CDB_FIELD;
984 * Double size because we have two buffers, note that
985 * zero is not an error..
987 size = 2 * sbc_get_size(cmd, sectors);
988 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
989 cmd->t_task_nolb = sectors;
990 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
991 cmd->execute_cmd = sbc_compare_and_write;
992 cmd->transport_complete_callback = compare_and_write_callback;
993 break;
994 case READ_CAPACITY:
995 size = READ_CAP_LEN;
996 cmd->execute_cmd = sbc_emulate_readcapacity;
997 break;
998 case SERVICE_ACTION_IN_16:
999 switch (cmd->t_task_cdb[1] & 0x1f) {
1000 case SAI_READ_CAPACITY_16:
1001 cmd->execute_cmd = sbc_emulate_readcapacity_16;
1002 break;
1003 case SAI_REPORT_REFERRALS:
1004 cmd->execute_cmd = target_emulate_report_referrals;
1005 break;
1006 default:
1007 pr_err("Unsupported SA: 0x%02x\n",
1008 cmd->t_task_cdb[1] & 0x1f);
1009 return TCM_INVALID_CDB_FIELD;
1011 size = get_unaligned_be32(&cdb[10]);
1012 break;
1013 case SYNCHRONIZE_CACHE:
1014 case SYNCHRONIZE_CACHE_16:
1015 if (cdb[0] == SYNCHRONIZE_CACHE) {
1016 sectors = transport_get_sectors_10(cdb);
1017 cmd->t_task_lba = transport_lba_32(cdb);
1018 } else {
1019 sectors = transport_get_sectors_16(cdb);
1020 cmd->t_task_lba = transport_lba_64(cdb);
1022 if (ops->execute_sync_cache) {
1023 cmd->execute_cmd = ops->execute_sync_cache;
1024 goto check_lba;
1026 size = 0;
1027 cmd->execute_cmd = sbc_emulate_noop;
1028 break;
1029 case UNMAP:
1030 if (!ops->execute_unmap)
1031 return TCM_UNSUPPORTED_SCSI_OPCODE;
1033 if (!dev->dev_attrib.emulate_tpu) {
1034 pr_err("Got UNMAP, but backend device has"
1035 " emulate_tpu disabled\n");
1036 return TCM_UNSUPPORTED_SCSI_OPCODE;
1038 size = get_unaligned_be16(&cdb[7]);
1039 cmd->execute_cmd = sbc_execute_unmap;
1040 break;
1041 case WRITE_SAME_16:
1042 sectors = transport_get_sectors_16(cdb);
1043 if (!sectors) {
1044 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
1045 return TCM_INVALID_CDB_FIELD;
1048 size = sbc_get_size(cmd, 1);
1049 cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
1051 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
1052 if (ret)
1053 return ret;
1054 break;
1055 case WRITE_SAME:
1056 sectors = transport_get_sectors_10(cdb);
1057 if (!sectors) {
1058 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
1059 return TCM_INVALID_CDB_FIELD;
1062 size = sbc_get_size(cmd, 1);
1063 cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
1066 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
1067 * of byte 1 bit 3 UNMAP instead of original reserved field
1069 ret = sbc_setup_write_same(cmd, &cdb[1], ops);
1070 if (ret)
1071 return ret;
1072 break;
1073 case VERIFY:
1074 case VERIFY_16:
1075 size = 0;
1076 if (cdb[0] == VERIFY) {
1077 sectors = transport_get_sectors_10(cdb);
1078 cmd->t_task_lba = transport_lba_32(cdb);
1079 } else {
1080 sectors = transport_get_sectors_16(cdb);
1081 cmd->t_task_lba = transport_lba_64(cdb);
1083 cmd->execute_cmd = sbc_emulate_noop;
1084 goto check_lba;
1085 case REZERO_UNIT:
1086 case SEEK_6:
1087 case SEEK_10:
1089 * There are still clients out there which use these old SCSI-2
1090 * commands. This mainly happens when running VMs with legacy
1091 * guest systems, connected via SCSI command pass-through to
1092 * iSCSI targets. Make them happy and return status GOOD.
1094 size = 0;
1095 cmd->execute_cmd = sbc_emulate_noop;
1096 break;
1097 case START_STOP:
1098 size = 0;
1099 cmd->execute_cmd = sbc_emulate_startstop;
1100 break;
1101 default:
1102 ret = spc_parse_cdb(cmd, &size);
1103 if (ret)
1104 return ret;
1107 /* reject any command that we don't have a handler for */
1108 if (!cmd->execute_cmd)
1109 return TCM_UNSUPPORTED_SCSI_OPCODE;
1111 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1112 unsigned long long end_lba;
1113 check_lba:
1114 end_lba = dev->transport->get_blocks(dev) + 1;
1115 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
1116 ((cmd->t_task_lba + sectors) > end_lba)) {
1117 pr_err("cmd exceeds last lba %llu "
1118 "(lba %llu, sectors %u)\n",
1119 end_lba, cmd->t_task_lba, sectors);
1120 return TCM_ADDRESS_OUT_OF_RANGE;
1123 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
1124 size = sbc_get_size(cmd, sectors);
1127 return target_cmd_size_check(cmd, size);
1129 EXPORT_SYMBOL(sbc_parse_cdb);
1131 u32 sbc_get_device_type(struct se_device *dev)
1133 return TYPE_DISK;
1135 EXPORT_SYMBOL(sbc_get_device_type);
1137 static sense_reason_t
1138 sbc_execute_unmap(struct se_cmd *cmd)
1140 struct sbc_ops *ops = cmd->protocol_data;
1141 struct se_device *dev = cmd->se_dev;
1142 unsigned char *buf, *ptr = NULL;
1143 sector_t lba;
1144 int size;
1145 u32 range;
1146 sense_reason_t ret = 0;
1147 int dl, bd_dl;
1149 /* We never set ANC_SUP */
1150 if (cmd->t_task_cdb[1])
1151 return TCM_INVALID_CDB_FIELD;
1153 if (cmd->data_length == 0) {
1154 target_complete_cmd(cmd, SAM_STAT_GOOD);
1155 return 0;
1158 if (cmd->data_length < 8) {
1159 pr_warn("UNMAP parameter list length %u too small\n",
1160 cmd->data_length);
1161 return TCM_PARAMETER_LIST_LENGTH_ERROR;
1164 buf = transport_kmap_data_sg(cmd);
1165 if (!buf)
1166 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1168 dl = get_unaligned_be16(&buf[0]);
1169 bd_dl = get_unaligned_be16(&buf[2]);
1171 size = cmd->data_length - 8;
1172 if (bd_dl > size)
1173 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
1174 cmd->data_length, bd_dl);
1175 else
1176 size = bd_dl;
1178 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
1179 ret = TCM_INVALID_PARAMETER_LIST;
1180 goto err;
1183 /* First UNMAP block descriptor starts at 8 byte offset */
1184 ptr = &buf[8];
1185 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
1186 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
1188 while (size >= 16) {
1189 lba = get_unaligned_be64(&ptr[0]);
1190 range = get_unaligned_be32(&ptr[8]);
1191 pr_debug("UNMAP: Using lba: %llu and range: %u\n",
1192 (unsigned long long)lba, range);
1194 if (range > dev->dev_attrib.max_unmap_lba_count) {
1195 ret = TCM_INVALID_PARAMETER_LIST;
1196 goto err;
1199 if (lba + range > dev->transport->get_blocks(dev) + 1) {
1200 ret = TCM_ADDRESS_OUT_OF_RANGE;
1201 goto err;
1204 if (range) {
1205 ret = ops->execute_unmap(cmd, lba, range);
1206 if (ret)
1207 goto err;
1210 ptr += 16;
1211 size -= 16;
1214 err:
1215 transport_kunmap_data_sg(cmd);
1216 if (!ret)
1217 target_complete_cmd(cmd, GOOD);
1218 return ret;
1221 void
1222 sbc_dif_generate(struct se_cmd *cmd)
1224 struct se_device *dev = cmd->se_dev;
1225 struct t10_pi_tuple *sdt;
1226 struct scatterlist *dsg = cmd->t_data_sg, *psg;
1227 sector_t sector = cmd->t_task_lba;
1228 void *daddr, *paddr;
1229 int i, j, offset = 0;
1230 unsigned int block_size = dev->dev_attrib.block_size;
1232 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1233 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1234 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1236 for (j = 0; j < psg->length;
1237 j += sizeof(*sdt)) {
1238 __u16 crc;
1239 unsigned int avail;
1241 if (offset >= dsg->length) {
1242 offset -= dsg->length;
1243 kunmap_atomic(daddr - dsg->offset);
1244 dsg = sg_next(dsg);
1245 if (!dsg) {
1246 kunmap_atomic(paddr - psg->offset);
1247 return;
1249 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1252 sdt = paddr + j;
1253 avail = min(block_size, dsg->length - offset);
1254 crc = crc_t10dif(daddr + offset, avail);
1255 if (avail < block_size) {
1256 kunmap_atomic(daddr - dsg->offset);
1257 dsg = sg_next(dsg);
1258 if (!dsg) {
1259 kunmap_atomic(paddr - psg->offset);
1260 return;
1262 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1263 offset = block_size - avail;
1264 crc = crc_t10dif_update(crc, daddr, offset);
1265 } else {
1266 offset += block_size;
1269 sdt->guard_tag = cpu_to_be16(crc);
1270 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
1271 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1272 sdt->app_tag = 0;
1274 pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
1275 " app_tag: 0x%04x ref_tag: %u\n",
1276 (cmd->data_direction == DMA_TO_DEVICE) ?
1277 "WRITE" : "READ", (unsigned long long)sector,
1278 sdt->guard_tag, sdt->app_tag,
1279 be32_to_cpu(sdt->ref_tag));
1281 sector++;
1284 kunmap_atomic(daddr - dsg->offset);
1285 kunmap_atomic(paddr - psg->offset);
1289 static sense_reason_t
1290 sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
1291 __u16 crc, sector_t sector, unsigned int ei_lba)
1293 __be16 csum;
1295 if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
1296 goto check_ref;
1298 csum = cpu_to_be16(crc);
1300 if (sdt->guard_tag != csum) {
1301 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1302 " csum 0x%04x\n", (unsigned long long)sector,
1303 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
1304 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1307 check_ref:
1308 if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG))
1309 return 0;
1311 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT &&
1312 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1313 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
1314 " sector MSB: 0x%08x\n", (unsigned long long)sector,
1315 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
1316 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1319 if (cmd->prot_type == TARGET_DIF_TYPE2_PROT &&
1320 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1321 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
1322 " ei_lba: 0x%08x\n", (unsigned long long)sector,
1323 be32_to_cpu(sdt->ref_tag), ei_lba);
1324 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1327 return 0;
1330 void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
1331 struct scatterlist *sg, int sg_off)
1333 struct se_device *dev = cmd->se_dev;
1334 struct scatterlist *psg;
1335 void *paddr, *addr;
1336 unsigned int i, len, left;
1337 unsigned int offset = sg_off;
1339 if (!sg)
1340 return;
1342 left = sectors * dev->prot_length;
1344 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
1345 unsigned int psg_len, copied = 0;
1347 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1348 psg_len = min(left, psg->length);
1349 while (psg_len) {
1350 len = min(psg_len, sg->length - offset);
1351 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
1353 if (read)
1354 memcpy(paddr + copied, addr, len);
1355 else
1356 memcpy(addr, paddr + copied, len);
1358 left -= len;
1359 offset += len;
1360 copied += len;
1361 psg_len -= len;
1363 kunmap_atomic(addr - sg->offset - offset);
1365 if (offset >= sg->length) {
1366 sg = sg_next(sg);
1367 offset = 0;
1370 kunmap_atomic(paddr - psg->offset);
1373 EXPORT_SYMBOL(sbc_dif_copy_prot);
1375 sense_reason_t
1376 sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
1377 unsigned int ei_lba, struct scatterlist *psg, int psg_off)
1379 struct se_device *dev = cmd->se_dev;
1380 struct t10_pi_tuple *sdt;
1381 struct scatterlist *dsg = cmd->t_data_sg;
1382 sector_t sector = start;
1383 void *daddr, *paddr;
1384 int i;
1385 sense_reason_t rc;
1386 int dsg_off = 0;
1387 unsigned int block_size = dev->dev_attrib.block_size;
1389 for (; psg && sector < start + sectors; psg = sg_next(psg)) {
1390 paddr = kmap_atomic(sg_page(psg)) + psg->offset;
1391 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1393 for (i = psg_off; i < psg->length &&
1394 sector < start + sectors;
1395 i += sizeof(*sdt)) {
1396 __u16 crc;
1397 unsigned int avail;
1399 if (dsg_off >= dsg->length) {
1400 dsg_off -= dsg->length;
1401 kunmap_atomic(daddr - dsg->offset);
1402 dsg = sg_next(dsg);
1403 if (!dsg) {
1404 kunmap_atomic(paddr - psg->offset);
1405 return 0;
1407 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1410 sdt = paddr + i;
1412 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
1413 " app_tag: 0x%04x ref_tag: %u\n",
1414 (unsigned long long)sector, sdt->guard_tag,
1415 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
1417 if (sdt->app_tag == T10_PI_APP_ESCAPE) {
1418 dsg_off += block_size;
1419 goto next;
1422 avail = min(block_size, dsg->length - dsg_off);
1423 crc = crc_t10dif(daddr + dsg_off, avail);
1424 if (avail < block_size) {
1425 kunmap_atomic(daddr - dsg->offset);
1426 dsg = sg_next(dsg);
1427 if (!dsg) {
1428 kunmap_atomic(paddr - psg->offset);
1429 return 0;
1431 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
1432 dsg_off = block_size - avail;
1433 crc = crc_t10dif_update(crc, daddr, dsg_off);
1434 } else {
1435 dsg_off += block_size;
1438 rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
1439 if (rc) {
1440 kunmap_atomic(daddr - dsg->offset);
1441 kunmap_atomic(paddr - psg->offset);
1442 cmd->bad_sector = sector;
1443 return rc;
1445 next:
1446 sector++;
1447 ei_lba++;
1450 psg_off = 0;
1451 kunmap_atomic(daddr - dsg->offset);
1452 kunmap_atomic(paddr - psg->offset);
1455 return 0;
1457 EXPORT_SYMBOL(sbc_dif_verify);