Merge branch 'mv88r6xxx-eeprom-rework'
[linux/fpc-iii.git] / drivers / target / target_core_xcopy.c
blob75cd85426ae3a27f276947f667794acb7c9454d0
1 /*******************************************************************************
2 * Filename: target_core_xcopy.c
4 * This file contains support for SPC-4 Extended-Copy offload with generic
5 * TCM backends.
7 * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
9 * Author:
10 * Nicholas A. Bellinger <nab@daterainc.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 ******************************************************************************/
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <linux/list.h>
27 #include <linux/configfs.h>
28 #include <scsi/scsi_proto.h>
29 #include <asm/unaligned.h>
31 #include <target/target_core_base.h>
32 #include <target/target_core_backend.h>
33 #include <target/target_core_fabric.h>
35 #include "target_core_internal.h"
36 #include "target_core_pr.h"
37 #include "target_core_ua.h"
38 #include "target_core_xcopy.h"
40 static struct workqueue_struct *xcopy_wq = NULL;
42 static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
44 int off = 0;
46 buf[off++] = (0x6 << 4);
47 buf[off++] = 0x01;
48 buf[off++] = 0x40;
49 buf[off] = (0x5 << 4);
51 spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
52 return 0;
55 static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
56 bool src)
58 struct se_device *se_dev;
59 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
60 int rc;
62 if (src)
63 dev_wwn = &xop->dst_tid_wwn[0];
64 else
65 dev_wwn = &xop->src_tid_wwn[0];
67 mutex_lock(&g_device_mutex);
68 list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
70 if (!se_dev->dev_attrib.emulate_3pc)
71 continue;
73 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
74 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
76 rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
77 if (rc != 0)
78 continue;
80 if (src) {
81 xop->dst_dev = se_dev;
82 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
83 " se_dev\n", xop->dst_dev);
84 } else {
85 xop->src_dev = se_dev;
86 pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
87 " se_dev\n", xop->src_dev);
90 rc = target_depend_item(&se_dev->dev_group.cg_item);
91 if (rc != 0) {
92 pr_err("configfs_depend_item attempt failed:"
93 " %d for se_dev: %p\n", rc, se_dev);
94 mutex_unlock(&g_device_mutex);
95 return rc;
98 pr_debug("Called configfs_depend_item for se_dev: %p"
99 " se_dev->se_dev_group: %p\n", se_dev,
100 &se_dev->dev_group);
102 mutex_unlock(&g_device_mutex);
103 return 0;
105 mutex_unlock(&g_device_mutex);
107 pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
108 return -EINVAL;
111 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
112 unsigned char *p, bool src)
114 unsigned char *desc = p;
115 unsigned short ript;
116 u8 desig_len;
118 * Extract RELATIVE INITIATOR PORT IDENTIFIER
120 ript = get_unaligned_be16(&desc[2]);
121 pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
123 * Check for supported code set, association, and designator type
125 if ((desc[4] & 0x0f) != 0x1) {
126 pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
127 return -EINVAL;
129 if ((desc[5] & 0x30) != 0x00) {
130 pr_err("XCOPY 0xe4: association other than LUN not supported\n");
131 return -EINVAL;
133 if ((desc[5] & 0x0f) != 0x3) {
134 pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
135 (desc[5] & 0x0f));
136 return -EINVAL;
139 * Check for matching 16 byte length for NAA IEEE Registered Extended
140 * Assigned designator
142 desig_len = desc[7];
143 if (desig_len != 16) {
144 pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
145 return -EINVAL;
147 pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
149 * Check for NAA IEEE Registered Extended Assigned header..
151 if ((desc[8] & 0xf0) != 0x60) {
152 pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
153 (desc[8] & 0xf0));
154 return -EINVAL;
157 if (src) {
158 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
160 * Determine if the source designator matches the local device
162 if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
163 XCOPY_NAA_IEEE_REGEX_LEN)) {
164 xop->op_origin = XCOL_SOURCE_RECV_OP;
165 xop->src_dev = se_cmd->se_dev;
166 pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
167 " received xop\n", xop->src_dev);
169 } else {
170 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
172 * Determine if the destination designator matches the local device
174 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
175 XCOPY_NAA_IEEE_REGEX_LEN)) {
176 xop->op_origin = XCOL_DEST_RECV_OP;
177 xop->dst_dev = se_cmd->se_dev;
178 pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
179 " received xop\n", xop->dst_dev);
183 return 0;
186 static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
187 struct xcopy_op *xop, unsigned char *p,
188 unsigned short tdll)
190 struct se_device *local_dev = se_cmd->se_dev;
191 unsigned char *desc = p;
192 int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
193 unsigned short start = 0;
194 bool src = true;
196 if (offset != 0) {
197 pr_err("XCOPY target descriptor list length is not"
198 " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
199 return -EINVAL;
201 if (tdll > 64) {
202 pr_err("XCOPY target descriptor supports a maximum"
203 " two src/dest descriptors, tdll: %hu too large..\n", tdll);
204 return -EINVAL;
207 * Generate an IEEE Registered Extended designator based upon the
208 * se_device the XCOPY was received upon..
210 memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
211 target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
213 while (start < tdll) {
215 * Check target descriptor identification with 0xE4 type with
216 * use VPD 0x83 WWPN matching ..
218 switch (desc[0]) {
219 case 0xe4:
220 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
221 &desc[0], src);
222 if (rc != 0)
223 goto out;
225 * Assume target descriptors are in source -> destination order..
227 if (src)
228 src = false;
229 else
230 src = true;
231 start += XCOPY_TARGET_DESC_LEN;
232 desc += XCOPY_TARGET_DESC_LEN;
233 ret++;
234 break;
235 default:
236 pr_err("XCOPY unsupported descriptor type code:"
237 " 0x%02x\n", desc[0]);
238 goto out;
242 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
243 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
244 else
245 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
247 if (rc < 0)
248 goto out;
250 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
251 xop->src_dev, &xop->src_tid_wwn[0]);
252 pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
253 xop->dst_dev, &xop->dst_tid_wwn[0]);
255 return ret;
257 out:
258 return -EINVAL;
261 static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
262 unsigned char *p)
264 unsigned char *desc = p;
265 int dc = (desc[1] & 0x02);
266 unsigned short desc_len;
268 desc_len = get_unaligned_be16(&desc[2]);
269 if (desc_len != 0x18) {
270 pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
271 " %hu\n", desc_len);
272 return -EINVAL;
275 xop->stdi = get_unaligned_be16(&desc[4]);
276 xop->dtdi = get_unaligned_be16(&desc[6]);
277 pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
278 desc_len, xop->stdi, xop->dtdi, dc);
280 xop->nolb = get_unaligned_be16(&desc[10]);
281 xop->src_lba = get_unaligned_be64(&desc[12]);
282 xop->dst_lba = get_unaligned_be64(&desc[20]);
283 pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
284 xop->nolb, (unsigned long long)xop->src_lba,
285 (unsigned long long)xop->dst_lba);
287 if (dc != 0) {
288 xop->dbl = (desc[29] & 0xff) << 16;
289 xop->dbl |= (desc[30] & 0xff) << 8;
290 xop->dbl |= desc[31] & 0xff;
292 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
294 return 0;
297 static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
298 struct xcopy_op *xop, unsigned char *p,
299 unsigned int sdll)
301 unsigned char *desc = p;
302 unsigned int start = 0;
303 int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
305 if (offset != 0) {
306 pr_err("XCOPY segment descriptor list length is not"
307 " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
308 return -EINVAL;
311 while (start < sdll) {
313 * Check segment descriptor type code for block -> block
315 switch (desc[0]) {
316 case 0x02:
317 rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
318 if (rc < 0)
319 goto out;
321 ret++;
322 start += XCOPY_SEGMENT_DESC_LEN;
323 desc += XCOPY_SEGMENT_DESC_LEN;
324 break;
325 default:
326 pr_err("XCOPY unsupported segment descriptor"
327 "type: 0x%02x\n", desc[0]);
328 goto out;
332 return ret;
334 out:
335 return -EINVAL;
339 * Start xcopy_pt ops
342 struct xcopy_pt_cmd {
343 bool remote_port;
344 struct se_cmd se_cmd;
345 struct xcopy_op *xcopy_op;
346 struct completion xpt_passthrough_sem;
347 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
350 struct se_portal_group xcopy_pt_tpg;
351 static struct se_session xcopy_pt_sess;
352 static struct se_node_acl xcopy_pt_nacl;
354 static char *xcopy_pt_get_fabric_name(void)
356 return "xcopy-pt";
359 static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
361 return 0;
364 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
366 struct se_device *remote_dev;
368 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
369 remote_dev = xop->dst_dev;
370 else
371 remote_dev = xop->src_dev;
373 pr_debug("Calling configfs_undepend_item for"
374 " remote_dev: %p remote_dev->dev_group: %p\n",
375 remote_dev, &remote_dev->dev_group.cg_item);
377 target_undepend_item(&remote_dev->dev_group.cg_item);
380 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
382 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
383 struct xcopy_pt_cmd, se_cmd);
385 kfree(xpt_cmd);
388 static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
390 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
391 struct xcopy_pt_cmd, se_cmd);
393 complete(&xpt_cmd->xpt_passthrough_sem);
394 return 0;
397 static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
399 return 0;
402 static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
404 return 0;
407 static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
409 return 0;
412 static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
414 return 0;
417 static const struct target_core_fabric_ops xcopy_pt_tfo = {
418 .get_fabric_name = xcopy_pt_get_fabric_name,
419 .get_cmd_state = xcopy_pt_get_cmd_state,
420 .release_cmd = xcopy_pt_release_cmd,
421 .check_stop_free = xcopy_pt_check_stop_free,
422 .write_pending = xcopy_pt_write_pending,
423 .write_pending_status = xcopy_pt_write_pending_status,
424 .queue_data_in = xcopy_pt_queue_data_in,
425 .queue_status = xcopy_pt_queue_status,
429 * End xcopy_pt_ops
432 int target_xcopy_setup_pt(void)
434 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
435 if (!xcopy_wq) {
436 pr_err("Unable to allocate xcopy_wq\n");
437 return -ENOMEM;
440 memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
441 INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
442 INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
443 INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
445 xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
447 memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
448 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
449 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
450 memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
451 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
452 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
453 INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
454 spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
456 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
457 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
459 xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
460 xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
462 return 0;
465 void target_xcopy_release_pt(void)
467 if (xcopy_wq)
468 destroy_workqueue(xcopy_wq);
471 static void target_xcopy_setup_pt_port(
472 struct xcopy_pt_cmd *xpt_cmd,
473 struct xcopy_op *xop,
474 bool remote_port)
476 struct se_cmd *ec_cmd = xop->xop_se_cmd;
477 struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
479 if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
481 * Honor destination port reservations for X-COPY PUSH emulation
482 * when CDB is received on local source port, and READs blocks to
483 * WRITE on remote destination port.
485 if (remote_port) {
486 xpt_cmd->remote_port = remote_port;
487 } else {
488 pt_cmd->se_lun = ec_cmd->se_lun;
489 pt_cmd->se_dev = ec_cmd->se_dev;
491 pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
492 " %p\n", pt_cmd->se_dev);
493 pt_cmd->se_lun = ec_cmd->se_lun;
494 pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
495 pt_cmd->se_lun);
497 } else {
499 * Honor source port reservation for X-COPY PULL emulation
500 * when CDB is received on local desintation port, and READs
501 * blocks from the remote source port to WRITE on local
502 * destination port.
504 if (remote_port) {
505 xpt_cmd->remote_port = remote_port;
506 } else {
507 pt_cmd->se_lun = ec_cmd->se_lun;
508 pt_cmd->se_dev = ec_cmd->se_dev;
510 pr_debug("Honoring local DST port from ec_cmd->se_dev:"
511 " %p\n", pt_cmd->se_dev);
512 pt_cmd->se_lun = ec_cmd->se_lun;
513 pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
514 pt_cmd->se_lun);
519 static void target_xcopy_init_pt_lun(struct se_device *se_dev,
520 struct se_cmd *pt_cmd, bool remote_port)
523 * Don't allocate + init an pt_cmd->se_lun if honoring local port for
524 * reservations. The pt_cmd->se_lun pointer will be setup from within
525 * target_xcopy_setup_pt_port()
527 if (remote_port) {
528 pr_debug("Setup emulated se_dev: %p from se_dev\n",
529 pt_cmd->se_dev);
530 pt_cmd->se_lun = &se_dev->xcopy_lun;
531 pt_cmd->se_dev = se_dev;
534 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
537 static int target_xcopy_setup_pt_cmd(
538 struct xcopy_pt_cmd *xpt_cmd,
539 struct xcopy_op *xop,
540 struct se_device *se_dev,
541 unsigned char *cdb,
542 bool remote_port,
543 bool alloc_mem)
545 struct se_cmd *cmd = &xpt_cmd->se_cmd;
546 sense_reason_t sense_rc;
547 int ret = 0, rc;
549 * Setup LUN+port to honor reservations based upon xop->op_origin for
550 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
552 target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
554 xpt_cmd->xcopy_op = xop;
555 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
557 cmd->tag = 0;
558 sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
559 if (sense_rc) {
560 ret = -EINVAL;
561 goto out;
564 if (alloc_mem) {
565 rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
566 cmd->data_length, false, false);
567 if (rc < 0) {
568 ret = rc;
569 goto out;
572 * Set this bit so that transport_free_pages() allows the
573 * caller to release SGLs + physical memory allocated by
574 * transport_generic_get_mem()..
576 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
577 } else {
579 * Here the previously allocated SGLs for the internal READ
580 * are mapped zero-copy to the internal WRITE.
582 sense_rc = transport_generic_map_mem_to_cmd(cmd,
583 xop->xop_data_sg, xop->xop_data_nents,
584 NULL, 0);
585 if (sense_rc) {
586 ret = -EINVAL;
587 goto out;
590 pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
591 " %u\n", cmd->t_data_sg, cmd->t_data_nents);
594 return 0;
596 out:
597 return ret;
600 static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
602 struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
603 sense_reason_t sense_rc;
605 sense_rc = transport_generic_new_cmd(se_cmd);
606 if (sense_rc)
607 return -EINVAL;
609 if (se_cmd->data_direction == DMA_TO_DEVICE)
610 target_execute_cmd(se_cmd);
612 wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
614 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
615 se_cmd->scsi_status);
617 return (se_cmd->scsi_status) ? -EINVAL : 0;
620 static int target_xcopy_read_source(
621 struct se_cmd *ec_cmd,
622 struct xcopy_op *xop,
623 struct se_device *src_dev,
624 sector_t src_lba,
625 u32 src_sectors)
627 struct xcopy_pt_cmd *xpt_cmd;
628 struct se_cmd *se_cmd;
629 u32 length = (src_sectors * src_dev->dev_attrib.block_size);
630 int rc;
631 unsigned char cdb[16];
632 bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
634 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
635 if (!xpt_cmd) {
636 pr_err("Unable to allocate xcopy_pt_cmd\n");
637 return -ENOMEM;
639 init_completion(&xpt_cmd->xpt_passthrough_sem);
640 se_cmd = &xpt_cmd->se_cmd;
642 memset(&cdb[0], 0, 16);
643 cdb[0] = READ_16;
644 put_unaligned_be64(src_lba, &cdb[2]);
645 put_unaligned_be32(src_sectors, &cdb[10]);
646 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
647 (unsigned long long)src_lba, src_sectors, length);
649 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
650 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
651 xop->src_pt_cmd = xpt_cmd;
653 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
654 remote_port, true);
655 if (rc < 0) {
656 transport_generic_free_cmd(se_cmd, 0);
657 return rc;
660 xop->xop_data_sg = se_cmd->t_data_sg;
661 xop->xop_data_nents = se_cmd->t_data_nents;
662 pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
663 " memory\n", xop->xop_data_sg, xop->xop_data_nents);
665 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
666 if (rc < 0) {
667 transport_generic_free_cmd(se_cmd, 0);
668 return rc;
671 * Clear off the allocated t_data_sg, that has been saved for
672 * zero-copy WRITE submission reuse in struct xcopy_op..
674 se_cmd->t_data_sg = NULL;
675 se_cmd->t_data_nents = 0;
677 return 0;
680 static int target_xcopy_write_destination(
681 struct se_cmd *ec_cmd,
682 struct xcopy_op *xop,
683 struct se_device *dst_dev,
684 sector_t dst_lba,
685 u32 dst_sectors)
687 struct xcopy_pt_cmd *xpt_cmd;
688 struct se_cmd *se_cmd;
689 u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
690 int rc;
691 unsigned char cdb[16];
692 bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
694 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
695 if (!xpt_cmd) {
696 pr_err("Unable to allocate xcopy_pt_cmd\n");
697 return -ENOMEM;
699 init_completion(&xpt_cmd->xpt_passthrough_sem);
700 se_cmd = &xpt_cmd->se_cmd;
702 memset(&cdb[0], 0, 16);
703 cdb[0] = WRITE_16;
704 put_unaligned_be64(dst_lba, &cdb[2]);
705 put_unaligned_be32(dst_sectors, &cdb[10]);
706 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
707 (unsigned long long)dst_lba, dst_sectors, length);
709 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
710 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
711 xop->dst_pt_cmd = xpt_cmd;
713 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
714 remote_port, false);
715 if (rc < 0) {
716 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
718 * If the failure happened before the t_mem_list hand-off in
719 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
720 * core releases this memory on error during X-COPY WRITE I/O.
722 src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
723 src_cmd->t_data_sg = xop->xop_data_sg;
724 src_cmd->t_data_nents = xop->xop_data_nents;
726 transport_generic_free_cmd(se_cmd, 0);
727 return rc;
730 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
731 if (rc < 0) {
732 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
733 transport_generic_free_cmd(se_cmd, 0);
734 return rc;
737 return 0;
740 static void target_xcopy_do_work(struct work_struct *work)
742 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
743 struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
744 struct se_cmd *ec_cmd = xop->xop_se_cmd;
745 sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
746 unsigned int max_sectors;
747 int rc;
748 unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
750 end_lba = src_lba + nolb;
752 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
753 * smallest max_sectors between src_dev + dev_dev, or
755 max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
756 dst_dev->dev_attrib.hw_max_sectors);
757 max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
759 max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
761 pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
762 nolb, max_nolb, (unsigned long long)end_lba);
763 pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
764 (unsigned long long)src_lba, (unsigned long long)dst_lba);
766 while (src_lba < end_lba) {
767 cur_nolb = min(nolb, max_nolb);
769 pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
770 " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
772 rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
773 if (rc < 0)
774 goto out;
776 src_lba += cur_nolb;
777 pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
778 (unsigned long long)src_lba);
780 pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
781 " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
783 rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
784 dst_lba, cur_nolb);
785 if (rc < 0) {
786 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
787 goto out;
790 dst_lba += cur_nolb;
791 pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
792 (unsigned long long)dst_lba);
794 copied_nolb += cur_nolb;
795 nolb -= cur_nolb;
797 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
798 xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
800 transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
803 xcopy_pt_undepend_remotedev(xop);
804 kfree(xop);
806 pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
807 (unsigned long long)src_lba, (unsigned long long)dst_lba);
808 pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
809 copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
811 pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
812 target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
813 return;
815 out:
816 xcopy_pt_undepend_remotedev(xop);
817 kfree(xop);
819 pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
820 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
821 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
824 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
826 struct se_device *dev = se_cmd->se_dev;
827 struct xcopy_op *xop = NULL;
828 unsigned char *p = NULL, *seg_desc;
829 unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
830 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
831 int rc;
832 unsigned short tdll;
834 if (!dev->dev_attrib.emulate_3pc) {
835 pr_err("EXTENDED_COPY operation explicitly disabled\n");
836 return TCM_UNSUPPORTED_SCSI_OPCODE;
839 sa = se_cmd->t_task_cdb[1] & 0x1f;
840 if (sa != 0x00) {
841 pr_err("EXTENDED_COPY(LID4) not supported\n");
842 return TCM_UNSUPPORTED_SCSI_OPCODE;
845 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
846 if (!xop) {
847 pr_err("Unable to allocate xcopy_op\n");
848 return TCM_OUT_OF_RESOURCES;
850 xop->xop_se_cmd = se_cmd;
852 p = transport_kmap_data_sg(se_cmd);
853 if (!p) {
854 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
855 kfree(xop);
856 return TCM_OUT_OF_RESOURCES;
859 list_id = p[0];
860 list_id_usage = (p[1] & 0x18) >> 3;
863 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
865 tdll = get_unaligned_be16(&p[2]);
866 sdll = get_unaligned_be32(&p[8]);
868 inline_dl = get_unaligned_be32(&p[12]);
869 if (inline_dl != 0) {
870 pr_err("XCOPY with non zero inline data length\n");
871 goto out;
874 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
875 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
876 tdll, sdll, inline_dl);
878 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
879 if (rc <= 0)
880 goto out;
882 if (xop->src_dev->dev_attrib.block_size !=
883 xop->dst_dev->dev_attrib.block_size) {
884 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
885 " block_size: %u currently unsupported\n",
886 xop->src_dev->dev_attrib.block_size,
887 xop->dst_dev->dev_attrib.block_size);
888 xcopy_pt_undepend_remotedev(xop);
889 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
890 goto out;
893 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
894 rc * XCOPY_TARGET_DESC_LEN);
895 seg_desc = &p[16];
896 seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
898 rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
899 if (rc <= 0) {
900 xcopy_pt_undepend_remotedev(xop);
901 goto out;
903 transport_kunmap_data_sg(se_cmd);
905 pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
906 rc * XCOPY_SEGMENT_DESC_LEN);
907 INIT_WORK(&xop->xop_work, target_xcopy_do_work);
908 queue_work(xcopy_wq, &xop->xop_work);
909 return TCM_NO_SENSE;
911 out:
912 if (p)
913 transport_kunmap_data_sg(se_cmd);
914 kfree(xop);
915 return ret;
918 static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
920 unsigned char *p;
922 p = transport_kmap_data_sg(se_cmd);
923 if (!p) {
924 pr_err("transport_kmap_data_sg failed in"
925 " target_rcr_operating_parameters\n");
926 return TCM_OUT_OF_RESOURCES;
929 if (se_cmd->data_length < 54) {
930 pr_err("Receive Copy Results Op Parameters length"
931 " too small: %u\n", se_cmd->data_length);
932 transport_kunmap_data_sg(se_cmd);
933 return TCM_INVALID_CDB_FIELD;
936 * Set SNLID=1 (Supports no List ID)
938 p[4] = 0x1;
940 * MAXIMUM TARGET DESCRIPTOR COUNT
942 put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
944 * MAXIMUM SEGMENT DESCRIPTOR COUNT
946 put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
948 * MAXIMUM DESCRIPTOR LIST LENGTH
950 put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
952 * MAXIMUM SEGMENT LENGTH
954 put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
956 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
958 put_unaligned_be32(0x0, &p[20]);
960 * HELD DATA LIMIT
962 put_unaligned_be32(0x0, &p[24]);
964 * MAXIMUM STREAM DEVICE TRANSFER SIZE
966 put_unaligned_be32(0x0, &p[28]);
968 * TOTAL CONCURRENT COPIES
970 put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
972 * MAXIMUM CONCURRENT COPIES
974 p[36] = RCR_OP_MAX_CONCURR_COPIES;
976 * DATA SEGMENT GRANULARITY (log 2)
978 p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
980 * INLINE DATA GRANULARITY log 2)
982 p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
984 * HELD DATA GRANULARITY
986 p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
988 * IMPLEMENTED DESCRIPTOR LIST LENGTH
990 p[43] = 0x2;
992 * List of implemented descriptor type codes (ordered)
994 p[44] = 0x02; /* Copy Block to Block device */
995 p[45] = 0xe4; /* Identification descriptor target descriptor */
998 * AVAILABLE DATA (n-3)
1000 put_unaligned_be32(42, &p[0]);
1002 transport_kunmap_data_sg(se_cmd);
1003 target_complete_cmd(se_cmd, GOOD);
1005 return TCM_NO_SENSE;
1008 sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
1010 unsigned char *cdb = &se_cmd->t_task_cdb[0];
1011 int sa = (cdb[1] & 0x1f), list_id = cdb[2];
1012 sense_reason_t rc = TCM_NO_SENSE;
1014 pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
1015 " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
1017 if (list_id != 0) {
1018 pr_err("Receive Copy Results with non zero list identifier"
1019 " not supported\n");
1020 return TCM_INVALID_CDB_FIELD;
1023 switch (sa) {
1024 case RCR_SA_OPERATING_PARAMETERS:
1025 rc = target_rcr_operating_parameters(se_cmd);
1026 break;
1027 case RCR_SA_COPY_STATUS:
1028 case RCR_SA_RECEIVE_DATA:
1029 case RCR_SA_FAILED_SEGMENT_DETAILS:
1030 default:
1031 pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
1032 return TCM_INVALID_CDB_FIELD;
1035 return rc;