Input: adp5589-keys - add support for the ADP5585 derivatives
[linux-btrfs-devel.git] / drivers / target / tcm_fc / tfc_cmd.c
blob5654dc22f7aef0adb2cbedb301057d9e5cc4ca69
1 /*
2 * Copyright (c) 2010 Cisco Systems, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 /* XXX TBD some includes may be extraneous */
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/version.h>
23 #include <generated/utsrelease.h>
24 #include <linux/utsname.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/kthread.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/configfs.h>
31 #include <linux/ctype.h>
32 #include <linux/hash.h>
33 #include <asm/unaligned.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_tcq.h>
39 #include <scsi/libfc.h>
40 #include <scsi/fc_encode.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_transport.h>
44 #include <target/target_core_fabric_ops.h>
45 #include <target/target_core_device.h>
46 #include <target/target_core_tpg.h>
47 #include <target/target_core_configfs.h>
48 #include <target/target_core_tmr.h>
49 #include <target/configfs_macros.h>
51 #include "tcm_fc.h"
54 * Dump cmd state for debugging.
56 void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
58 struct fc_exch *ep;
59 struct fc_seq *sp;
60 struct se_cmd *se_cmd;
61 struct scatterlist *sg;
62 int count;
64 se_cmd = &cmd->se_cmd;
65 pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
66 caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
67 pr_debug("%s: cmd %p cdb %p\n",
68 caller, cmd, cmd->cdb);
69 pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
71 pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
72 caller, cmd, se_cmd->t_data_nents,
73 se_cmd->data_length, se_cmd->se_cmd_flags);
75 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
76 pr_debug("%s: cmd %p sg %p page %p "
77 "len 0x%x off 0x%x\n",
78 caller, cmd, sg,
79 sg_page(sg), sg->length, sg->offset);
81 sp = cmd->seq;
82 if (sp) {
83 ep = fc_seq_exch(sp);
84 pr_debug("%s: cmd %p sid %x did %x "
85 "ox_id %x rx_id %x seq_id %x e_stat %x\n",
86 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
87 sp->id, ep->esb_stat);
89 print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
90 16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
93 static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
95 struct ft_tpg *tpg = sess->tport->tpg;
96 struct se_queue_obj *qobj = &tpg->qobj;
97 unsigned long flags;
99 qobj = &sess->tport->tpg->qobj;
100 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
101 list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
102 atomic_inc(&qobj->queue_cnt);
103 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
105 wake_up_process(tpg->thread);
108 static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
110 unsigned long flags;
111 struct se_queue_req *qr;
113 spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
114 if (list_empty(&qobj->qobj_list)) {
115 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
116 return NULL;
118 qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list);
119 list_del(&qr->qr_list);
120 atomic_dec(&qobj->queue_cnt);
121 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
122 return container_of(qr, struct ft_cmd, se_req);
125 static void ft_free_cmd(struct ft_cmd *cmd)
127 struct fc_frame *fp;
128 struct fc_lport *lport;
130 if (!cmd)
131 return;
132 fp = cmd->req_frame;
133 lport = fr_dev(fp);
134 if (fr_seq(fp))
135 lport->tt.seq_release(fr_seq(fp));
136 fc_frame_free(fp);
137 ft_sess_put(cmd->sess); /* undo get from lookup at recv */
138 kfree(cmd);
141 void ft_release_cmd(struct se_cmd *se_cmd)
143 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
145 ft_free_cmd(cmd);
148 void ft_check_stop_free(struct se_cmd *se_cmd)
150 transport_generic_free_cmd(se_cmd, 0, 0);
154 * Send response.
156 int ft_queue_status(struct se_cmd *se_cmd)
158 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
159 struct fc_frame *fp;
160 struct fcp_resp_with_ext *fcp;
161 struct fc_lport *lport;
162 struct fc_exch *ep;
163 size_t len;
165 ft_dump_cmd(cmd, __func__);
166 ep = fc_seq_exch(cmd->seq);
167 lport = ep->lp;
168 len = sizeof(*fcp) + se_cmd->scsi_sense_length;
169 fp = fc_frame_alloc(lport, len);
170 if (!fp) {
171 /* XXX shouldn't just drop it - requeue and retry? */
172 return 0;
174 fcp = fc_frame_payload_get(fp, len);
175 memset(fcp, 0, len);
176 fcp->resp.fr_status = se_cmd->scsi_status;
178 len = se_cmd->scsi_sense_length;
179 if (len) {
180 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
181 fcp->ext.fr_sns_len = htonl(len);
182 memcpy((fcp + 1), se_cmd->sense_buffer, len);
186 * Test underflow and overflow with one mask. Usually both are off.
187 * Bidirectional commands are not handled yet.
189 if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
190 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
191 fcp->resp.fr_flags |= FCP_RESID_OVER;
192 else
193 fcp->resp.fr_flags |= FCP_RESID_UNDER;
194 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
198 * Send response.
200 cmd->seq = lport->tt.seq_start_next(cmd->seq);
201 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
202 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
204 lport->tt.seq_send(lport, cmd->seq, fp);
205 lport->tt.exch_done(cmd->seq);
206 return 0;
209 int ft_write_pending_status(struct se_cmd *se_cmd)
211 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
213 return cmd->write_data_len != se_cmd->data_length;
217 * Send TX_RDY (transfer ready).
219 int ft_write_pending(struct se_cmd *se_cmd)
221 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
222 struct fc_frame *fp;
223 struct fcp_txrdy *txrdy;
224 struct fc_lport *lport;
225 struct fc_exch *ep;
226 struct fc_frame_header *fh;
227 u32 f_ctl;
229 ft_dump_cmd(cmd, __func__);
231 ep = fc_seq_exch(cmd->seq);
232 lport = ep->lp;
233 fp = fc_frame_alloc(lport, sizeof(*txrdy));
234 if (!fp)
235 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
237 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
238 memset(txrdy, 0, sizeof(*txrdy));
239 txrdy->ft_burst_len = htonl(se_cmd->data_length);
241 cmd->seq = lport->tt.seq_start_next(cmd->seq);
242 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
243 FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
245 fh = fc_frame_header_get(fp);
246 f_ctl = ntoh24(fh->fh_f_ctl);
248 /* Only if it is 'Exchange Responder' */
249 if (f_ctl & FC_FC_EX_CTX) {
250 /* Target is 'exchange responder' and sending XFER_READY
251 * to 'exchange initiator (initiator)'
253 if ((ep->xid <= lport->lro_xid) &&
254 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
255 if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
257 * cmd may have been broken up into multiple
258 * tasks. Link their sgs together so we can
259 * operate on them all at once.
261 transport_do_task_sg_chain(se_cmd);
262 cmd->sg = se_cmd->t_tasks_sg_chained;
263 cmd->sg_cnt =
264 se_cmd->t_tasks_sg_chained_no;
266 if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
267 cmd->sg,
268 cmd->sg_cnt))
269 cmd->was_ddp_setup = 1;
272 lport->tt.seq_send(lport, cmd->seq, fp);
273 return 0;
276 u32 ft_get_task_tag(struct se_cmd *se_cmd)
278 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
280 return fc_seq_exch(cmd->seq)->rxid;
283 int ft_get_cmd_state(struct se_cmd *se_cmd)
285 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
287 return cmd->state;
290 int ft_is_state_remove(struct se_cmd *se_cmd)
292 return 0; /* XXX TBD */
296 * FC sequence response handler for follow-on sequences (data) and aborts.
298 static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
300 struct ft_cmd *cmd = arg;
301 struct fc_frame_header *fh;
303 if (IS_ERR(fp)) {
304 /* XXX need to find cmd if queued */
305 cmd->se_cmd.t_state = TRANSPORT_REMOVE;
306 cmd->seq = NULL;
307 transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
308 return;
311 fh = fc_frame_header_get(fp);
313 switch (fh->fh_r_ctl) {
314 case FC_RCTL_DD_SOL_DATA: /* write data */
315 ft_recv_write_data(cmd, fp);
316 break;
317 case FC_RCTL_DD_UNSOL_CTL: /* command */
318 case FC_RCTL_DD_SOL_CTL: /* transfer ready */
319 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
320 default:
321 pr_debug("%s: unhandled frame r_ctl %x\n",
322 __func__, fh->fh_r_ctl);
323 ft_invl_hw_context(cmd);
324 fc_frame_free(fp);
325 transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
326 break;
331 * Send a FCP response including SCSI status and optional FCP rsp_code.
332 * status is SAM_STAT_GOOD (zero) iff code is valid.
333 * This is used in error cases, such as allocation failures.
335 static void ft_send_resp_status(struct fc_lport *lport,
336 const struct fc_frame *rx_fp,
337 u32 status, enum fcp_resp_rsp_codes code)
339 struct fc_frame *fp;
340 struct fc_seq *sp;
341 const struct fc_frame_header *fh;
342 size_t len;
343 struct fcp_resp_with_ext *fcp;
344 struct fcp_resp_rsp_info *info;
346 fh = fc_frame_header_get(rx_fp);
347 pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
348 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
349 len = sizeof(*fcp);
350 if (status == SAM_STAT_GOOD)
351 len += sizeof(*info);
352 fp = fc_frame_alloc(lport, len);
353 if (!fp)
354 return;
355 fcp = fc_frame_payload_get(fp, len);
356 memset(fcp, 0, len);
357 fcp->resp.fr_status = status;
358 if (status == SAM_STAT_GOOD) {
359 fcp->ext.fr_rsp_len = htonl(sizeof(*info));
360 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL;
361 info = (struct fcp_resp_rsp_info *)(fcp + 1);
362 info->rsp_code = code;
365 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
366 sp = fr_seq(fp);
367 if (sp)
368 lport->tt.seq_send(lport, sp, fp);
369 else
370 lport->tt.frame_send(lport, fp);
374 * Send error or task management response.
376 static void ft_send_resp_code(struct ft_cmd *cmd,
377 enum fcp_resp_rsp_codes code)
379 ft_send_resp_status(cmd->sess->tport->lport,
380 cmd->req_frame, SAM_STAT_GOOD, code);
385 * Send error or task management response.
386 * Always frees the cmd and associated state.
388 static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
389 enum fcp_resp_rsp_codes code)
391 ft_send_resp_code(cmd, code);
392 ft_free_cmd(cmd);
396 * Handle Task Management Request.
398 static void ft_send_tm(struct ft_cmd *cmd)
400 struct se_tmr_req *tmr;
401 struct fcp_cmnd *fcp;
402 struct ft_sess *sess;
403 u8 tm_func;
405 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
407 switch (fcp->fc_tm_flags) {
408 case FCP_TMF_LUN_RESET:
409 tm_func = TMR_LUN_RESET;
410 break;
411 case FCP_TMF_TGT_RESET:
412 tm_func = TMR_TARGET_WARM_RESET;
413 break;
414 case FCP_TMF_CLR_TASK_SET:
415 tm_func = TMR_CLEAR_TASK_SET;
416 break;
417 case FCP_TMF_ABT_TASK_SET:
418 tm_func = TMR_ABORT_TASK_SET;
419 break;
420 case FCP_TMF_CLR_ACA:
421 tm_func = TMR_CLEAR_ACA;
422 break;
423 default:
425 * FCP4r01 indicates having a combination of
426 * tm_flags set is invalid.
428 pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
429 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
430 return;
433 pr_debug("alloc tm cmd fn %d\n", tm_func);
434 tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
435 if (!tmr) {
436 pr_debug("alloc failed\n");
437 ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
438 return;
440 cmd->se_cmd.se_tmr_req = tmr;
442 switch (fcp->fc_tm_flags) {
443 case FCP_TMF_LUN_RESET:
444 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
445 if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
447 * Make sure to clean up newly allocated TMR request
448 * since "unable to handle TMR request because failed
449 * to get to LUN"
451 pr_debug("Failed to get LUN for TMR func %d, "
452 "se_cmd %p, unpacked_lun %d\n",
453 tm_func, &cmd->se_cmd, cmd->lun);
454 ft_dump_cmd(cmd, __func__);
455 sess = cmd->sess;
456 transport_send_check_condition_and_sense(&cmd->se_cmd,
457 cmd->se_cmd.scsi_sense_reason, 0);
458 transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
459 ft_sess_put(sess);
460 return;
462 break;
463 case FCP_TMF_TGT_RESET:
464 case FCP_TMF_CLR_TASK_SET:
465 case FCP_TMF_ABT_TASK_SET:
466 case FCP_TMF_CLR_ACA:
467 break;
468 default:
469 return;
471 transport_generic_handle_tmr(&cmd->se_cmd);
475 * Send status from completed task management request.
477 int ft_queue_tm_resp(struct se_cmd *se_cmd)
479 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
480 struct se_tmr_req *tmr = se_cmd->se_tmr_req;
481 enum fcp_resp_rsp_codes code;
483 switch (tmr->response) {
484 case TMR_FUNCTION_COMPLETE:
485 code = FCP_TMF_CMPL;
486 break;
487 case TMR_LUN_DOES_NOT_EXIST:
488 code = FCP_TMF_INVALID_LUN;
489 break;
490 case TMR_FUNCTION_REJECTED:
491 code = FCP_TMF_REJECTED;
492 break;
493 case TMR_TASK_DOES_NOT_EXIST:
494 case TMR_TASK_STILL_ALLEGIANT:
495 case TMR_TASK_FAILOVER_NOT_SUPPORTED:
496 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
497 case TMR_FUNCTION_AUTHORIZATION_FAILED:
498 default:
499 code = FCP_TMF_FAILED;
500 break;
502 pr_debug("tmr fn %d resp %d fcp code %d\n",
503 tmr->function, tmr->response, code);
504 ft_send_resp_code(cmd, code);
505 return 0;
509 * Handle incoming FCP command.
511 static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
513 struct ft_cmd *cmd;
514 struct fc_lport *lport = sess->tport->lport;
516 cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
517 if (!cmd)
518 goto busy;
519 cmd->sess = sess;
520 cmd->seq = lport->tt.seq_assign(lport, fp);
521 if (!cmd->seq) {
522 kfree(cmd);
523 goto busy;
525 cmd->req_frame = fp; /* hold frame during cmd */
526 ft_queue_cmd(sess, cmd);
527 return;
529 busy:
530 pr_debug("cmd or seq allocation failure - sending BUSY\n");
531 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
532 fc_frame_free(fp);
533 ft_sess_put(sess); /* undo get from lookup */
538 * Handle incoming FCP frame.
539 * Caller has verified that the frame is type FCP.
541 void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
543 struct fc_frame_header *fh = fc_frame_header_get(fp);
545 switch (fh->fh_r_ctl) {
546 case FC_RCTL_DD_UNSOL_CMD: /* command */
547 ft_recv_cmd(sess, fp);
548 break;
549 case FC_RCTL_DD_SOL_DATA: /* write data */
550 case FC_RCTL_DD_UNSOL_CTL:
551 case FC_RCTL_DD_SOL_CTL:
552 case FC_RCTL_DD_DATA_DESC: /* transfer ready */
553 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
554 default:
555 pr_debug("%s: unhandled frame r_ctl %x\n",
556 __func__, fh->fh_r_ctl);
557 fc_frame_free(fp);
558 ft_sess_put(sess); /* undo get from lookup */
559 break;
564 * Send new command to target.
566 static void ft_send_cmd(struct ft_cmd *cmd)
568 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
569 struct se_cmd *se_cmd;
570 struct fcp_cmnd *fcp;
571 int data_dir;
572 u32 data_len;
573 int task_attr;
574 int ret;
576 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
577 if (!fcp)
578 goto err;
580 if (fcp->fc_flags & FCP_CFL_LEN_MASK)
581 goto err; /* not handling longer CDBs yet */
583 if (fcp->fc_tm_flags) {
584 task_attr = FCP_PTA_SIMPLE;
585 data_dir = DMA_NONE;
586 data_len = 0;
587 } else {
588 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
589 case 0:
590 data_dir = DMA_NONE;
591 break;
592 case FCP_CFL_RDDATA:
593 data_dir = DMA_FROM_DEVICE;
594 break;
595 case FCP_CFL_WRDATA:
596 data_dir = DMA_TO_DEVICE;
597 break;
598 case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
599 goto err; /* TBD not supported by tcm_fc yet */
602 * Locate the SAM Task Attr from fc_pri_ta
604 switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
605 case FCP_PTA_HEADQ:
606 task_attr = MSG_HEAD_TAG;
607 break;
608 case FCP_PTA_ORDERED:
609 task_attr = MSG_ORDERED_TAG;
610 break;
611 case FCP_PTA_ACA:
612 task_attr = MSG_ACA_TAG;
613 break;
614 case FCP_PTA_SIMPLE: /* Fallthrough */
615 default:
616 task_attr = MSG_SIMPLE_TAG;
620 task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
621 data_len = ntohl(fcp->fc_dl);
622 cmd->cdb = fcp->fc_cdb;
625 se_cmd = &cmd->se_cmd;
627 * Initialize struct se_cmd descriptor from target_core_mod
628 * infrastructure
630 transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
631 data_len, data_dir, task_attr,
632 &cmd->ft_sense_buffer[0]);
634 * Check for FCP task management flags
636 if (fcp->fc_tm_flags) {
637 ft_send_tm(cmd);
638 return;
641 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
643 cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
644 ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
645 if (ret < 0) {
646 ft_dump_cmd(cmd, __func__);
647 transport_send_check_condition_and_sense(&cmd->se_cmd,
648 cmd->se_cmd.scsi_sense_reason, 0);
649 return;
652 ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
654 pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
655 ft_dump_cmd(cmd, __func__);
657 if (ret == -ENOMEM) {
658 transport_send_check_condition_and_sense(se_cmd,
659 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
660 transport_generic_free_cmd(se_cmd, 0, 0);
661 return;
663 if (ret == -EINVAL) {
664 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
665 ft_queue_status(se_cmd);
666 else
667 transport_send_check_condition_and_sense(se_cmd,
668 se_cmd->scsi_sense_reason, 0);
669 transport_generic_free_cmd(se_cmd, 0, 0);
670 return;
672 transport_generic_handle_cdb(se_cmd);
673 return;
675 err:
676 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
680 * Handle request in the command thread.
682 static void ft_exec_req(struct ft_cmd *cmd)
684 pr_debug("cmd state %x\n", cmd->state);
685 switch (cmd->state) {
686 case FC_CMD_ST_NEW:
687 ft_send_cmd(cmd);
688 break;
689 default:
690 break;
695 * Processing thread.
696 * Currently one thread per tpg.
698 int ft_thread(void *arg)
700 struct ft_tpg *tpg = arg;
701 struct se_queue_obj *qobj = &tpg->qobj;
702 struct ft_cmd *cmd;
704 while (!kthread_should_stop()) {
705 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
706 if (kthread_should_stop())
707 goto out;
709 cmd = ft_dequeue_cmd(qobj);
710 if (cmd)
711 ft_exec_req(cmd);
714 out:
715 return 0;