1 /*******************************************************************************
2 * This file contains the iSCSI Target specific utility functions.
4 * (c) Copyright 2007-2013 Datera, Inc.
6 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ******************************************************************************/
19 #include <linux/list.h>
20 #include <linux/percpu_ida.h>
21 #include <net/ipv6.h> /* ipv6_addr_equal() */
22 #include <scsi/scsi_tcq.h>
23 #include <scsi/iscsi_proto.h>
24 #include <target/target_core_base.h>
25 #include <target/target_core_fabric.h>
26 #include <target/iscsi/iscsi_transport.h>
28 #include <target/iscsi/iscsi_target_core.h>
29 #include "iscsi_target_parameters.h"
30 #include "iscsi_target_seq_pdu_list.h"
31 #include "iscsi_target_datain_values.h"
32 #include "iscsi_target_erl0.h"
33 #include "iscsi_target_erl1.h"
34 #include "iscsi_target_erl2.h"
35 #include "iscsi_target_tpg.h"
36 #include "iscsi_target_util.h"
37 #include "iscsi_target.h"
39 #define PRINT_BUFF(buff, len) \
43 pr_debug("%d:\n", __LINE__); \
44 for (zzz = 0; zzz < len; zzz++) { \
45 if (zzz % 16 == 0) { \
48 pr_debug("%4i: ", zzz); \
50 pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
56 extern struct list_head g_tiqn_list
;
57 extern spinlock_t tiqn_lock
;
60 * Called with cmd->r2t_lock held.
62 int iscsit_add_r2t_to_list(
63 struct iscsi_cmd
*cmd
,
69 struct iscsi_r2t
*r2t
;
71 r2t
= kmem_cache_zalloc(lio_r2t_cache
, GFP_ATOMIC
);
73 pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
76 INIT_LIST_HEAD(&r2t
->r2t_list
);
78 r2t
->recovery_r2t
= recovery
;
79 r2t
->r2t_sn
= (!r2t_sn
) ? cmd
->r2t_sn
++ : r2t_sn
;
81 r2t
->xfer_len
= xfer_len
;
82 list_add_tail(&r2t
->r2t_list
, &cmd
->cmd_r2t_list
);
83 spin_unlock_bh(&cmd
->r2t_lock
);
85 iscsit_add_cmd_to_immediate_queue(cmd
, cmd
->conn
, ISTATE_SEND_R2T
);
87 spin_lock_bh(&cmd
->r2t_lock
);
91 struct iscsi_r2t
*iscsit_get_r2t_for_eos(
92 struct iscsi_cmd
*cmd
,
96 struct iscsi_r2t
*r2t
;
98 spin_lock_bh(&cmd
->r2t_lock
);
99 list_for_each_entry(r2t
, &cmd
->cmd_r2t_list
, r2t_list
) {
100 if ((r2t
->offset
<= offset
) &&
101 (r2t
->offset
+ r2t
->xfer_len
) >= (offset
+ length
)) {
102 spin_unlock_bh(&cmd
->r2t_lock
);
106 spin_unlock_bh(&cmd
->r2t_lock
);
108 pr_err("Unable to locate R2T for Offset: %u, Length:"
109 " %u\n", offset
, length
);
113 struct iscsi_r2t
*iscsit_get_r2t_from_list(struct iscsi_cmd
*cmd
)
115 struct iscsi_r2t
*r2t
;
117 spin_lock_bh(&cmd
->r2t_lock
);
118 list_for_each_entry(r2t
, &cmd
->cmd_r2t_list
, r2t_list
) {
119 if (!r2t
->sent_r2t
) {
120 spin_unlock_bh(&cmd
->r2t_lock
);
124 spin_unlock_bh(&cmd
->r2t_lock
);
126 pr_err("Unable to locate next R2T to send for ITT:"
127 " 0x%08x.\n", cmd
->init_task_tag
);
132 * Called with cmd->r2t_lock held.
134 void iscsit_free_r2t(struct iscsi_r2t
*r2t
, struct iscsi_cmd
*cmd
)
136 list_del(&r2t
->r2t_list
);
137 kmem_cache_free(lio_r2t_cache
, r2t
);
140 void iscsit_free_r2ts_from_list(struct iscsi_cmd
*cmd
)
142 struct iscsi_r2t
*r2t
, *r2t_tmp
;
144 spin_lock_bh(&cmd
->r2t_lock
);
145 list_for_each_entry_safe(r2t
, r2t_tmp
, &cmd
->cmd_r2t_list
, r2t_list
)
146 iscsit_free_r2t(r2t
, cmd
);
147 spin_unlock_bh(&cmd
->r2t_lock
);
151 * May be called from software interrupt (timer) context for allocating
154 struct iscsi_cmd
*iscsit_allocate_cmd(struct iscsi_conn
*conn
, int state
)
156 struct iscsi_cmd
*cmd
;
157 struct se_session
*se_sess
= conn
->sess
->se_sess
;
160 tag
= percpu_ida_alloc(&se_sess
->sess_tag_pool
, state
);
164 size
= sizeof(struct iscsi_cmd
) + conn
->conn_transport
->priv_size
;
165 cmd
= (struct iscsi_cmd
*)(se_sess
->sess_cmd_map
+ (tag
* size
));
166 memset(cmd
, 0, size
);
168 cmd
->se_cmd
.map_tag
= tag
;
170 cmd
->data_direction
= DMA_NONE
;
171 INIT_LIST_HEAD(&cmd
->i_conn_node
);
172 INIT_LIST_HEAD(&cmd
->datain_list
);
173 INIT_LIST_HEAD(&cmd
->cmd_r2t_list
);
174 spin_lock_init(&cmd
->datain_lock
);
175 spin_lock_init(&cmd
->dataout_timeout_lock
);
176 spin_lock_init(&cmd
->istate_lock
);
177 spin_lock_init(&cmd
->error_lock
);
178 spin_lock_init(&cmd
->r2t_lock
);
179 timer_setup(&cmd
->dataout_timer
, iscsit_handle_dataout_timeout
, 0);
183 EXPORT_SYMBOL(iscsit_allocate_cmd
);
185 struct iscsi_seq
*iscsit_get_seq_holder_for_datain(
186 struct iscsi_cmd
*cmd
,
191 for (i
= 0; i
< cmd
->seq_count
; i
++)
192 if (cmd
->seq_list
[i
].seq_send_order
== seq_send_order
)
193 return &cmd
->seq_list
[i
];
198 struct iscsi_seq
*iscsit_get_seq_holder_for_r2t(struct iscsi_cmd
*cmd
)
202 if (!cmd
->seq_list
) {
203 pr_err("struct iscsi_cmd->seq_list is NULL!\n");
207 for (i
= 0; i
< cmd
->seq_count
; i
++) {
208 if (cmd
->seq_list
[i
].type
!= SEQTYPE_NORMAL
)
210 if (cmd
->seq_list
[i
].seq_send_order
== cmd
->seq_send_order
) {
211 cmd
->seq_send_order
++;
212 return &cmd
->seq_list
[i
];
219 struct iscsi_r2t
*iscsit_get_holder_for_r2tsn(
220 struct iscsi_cmd
*cmd
,
223 struct iscsi_r2t
*r2t
;
225 spin_lock_bh(&cmd
->r2t_lock
);
226 list_for_each_entry(r2t
, &cmd
->cmd_r2t_list
, r2t_list
) {
227 if (r2t
->r2t_sn
== r2t_sn
) {
228 spin_unlock_bh(&cmd
->r2t_lock
);
232 spin_unlock_bh(&cmd
->r2t_lock
);
237 static inline int iscsit_check_received_cmdsn(struct iscsi_session
*sess
, u32 cmdsn
)
243 * This is the proper method of checking received CmdSN against
244 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
245 * or order CmdSNs due to multiple connection sessions and/or
248 max_cmdsn
= atomic_read(&sess
->max_cmd_sn
);
249 if (iscsi_sna_gt(cmdsn
, max_cmdsn
)) {
250 pr_err("Received CmdSN: 0x%08x is greater than"
251 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn
, max_cmdsn
);
252 ret
= CMDSN_MAXCMDSN_OVERRUN
;
254 } else if (cmdsn
== sess
->exp_cmd_sn
) {
256 pr_debug("Received CmdSN matches ExpCmdSN,"
257 " incremented ExpCmdSN to: 0x%08x\n",
259 ret
= CMDSN_NORMAL_OPERATION
;
261 } else if (iscsi_sna_gt(cmdsn
, sess
->exp_cmd_sn
)) {
262 pr_debug("Received CmdSN: 0x%08x is greater"
263 " than ExpCmdSN: 0x%08x, not acknowledging.\n",
264 cmdsn
, sess
->exp_cmd_sn
);
265 ret
= CMDSN_HIGHER_THAN_EXP
;
268 pr_err("Received CmdSN: 0x%08x is less than"
269 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn
,
271 ret
= CMDSN_LOWER_THAN_EXP
;
278 * Commands may be received out of order if MC/S is in use.
279 * Ensure they are executed in CmdSN order.
281 int iscsit_sequence_cmd(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
282 unsigned char *buf
, __be32 cmdsn
)
286 u8 reason
= ISCSI_REASON_BOOKMARK_NO_RESOURCES
;
288 mutex_lock(&conn
->sess
->cmdsn_mutex
);
290 cmdsn_ret
= iscsit_check_received_cmdsn(conn
->sess
, be32_to_cpu(cmdsn
));
292 case CMDSN_NORMAL_OPERATION
:
293 ret
= iscsit_execute_cmd(cmd
, 0);
294 if ((ret
>= 0) && !list_empty(&conn
->sess
->sess_ooo_cmdsn_list
))
295 iscsit_execute_ooo_cmdsns(conn
->sess
);
298 ret
= CMDSN_ERROR_CANNOT_RECOVER
;
301 case CMDSN_HIGHER_THAN_EXP
:
302 ret
= iscsit_handle_ooo_cmdsn(conn
->sess
, cmd
, be32_to_cpu(cmdsn
));
305 ret
= CMDSN_ERROR_CANNOT_RECOVER
;
308 ret
= CMDSN_HIGHER_THAN_EXP
;
310 case CMDSN_LOWER_THAN_EXP
:
311 case CMDSN_MAXCMDSN_OVERRUN
:
313 cmd
->i_state
= ISTATE_REMOVE
;
314 iscsit_add_cmd_to_immediate_queue(cmd
, conn
, cmd
->i_state
);
316 * Existing callers for iscsit_sequence_cmd() will silently
317 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
318 * return for CMDSN_MAXCMDSN_OVERRUN as well..
320 ret
= CMDSN_LOWER_THAN_EXP
;
323 mutex_unlock(&conn
->sess
->cmdsn_mutex
);
326 iscsit_reject_cmd(cmd
, reason
, buf
);
330 EXPORT_SYMBOL(iscsit_sequence_cmd
);
332 int iscsit_check_unsolicited_dataout(struct iscsi_cmd
*cmd
, unsigned char *buf
)
334 struct iscsi_conn
*conn
= cmd
->conn
;
335 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
336 struct iscsi_data
*hdr
= (struct iscsi_data
*) buf
;
337 u32 payload_length
= ntoh24(hdr
->dlength
);
339 if (conn
->sess
->sess_ops
->InitialR2T
) {
340 pr_err("Received unexpected unsolicited data"
341 " while InitialR2T=Yes, protocol error.\n");
342 transport_send_check_condition_and_sense(se_cmd
,
343 TCM_UNEXPECTED_UNSOLICITED_DATA
, 0);
347 if ((cmd
->first_burst_len
+ payload_length
) >
348 conn
->sess
->sess_ops
->FirstBurstLength
) {
349 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
350 " for this Unsolicited DataOut Burst.\n",
351 (cmd
->first_burst_len
+ payload_length
),
352 conn
->sess
->sess_ops
->FirstBurstLength
);
353 transport_send_check_condition_and_sense(se_cmd
,
354 TCM_INCORRECT_AMOUNT_OF_DATA
, 0);
358 if (!(hdr
->flags
& ISCSI_FLAG_CMD_FINAL
))
361 if (((cmd
->first_burst_len
+ payload_length
) != cmd
->se_cmd
.data_length
) &&
362 ((cmd
->first_burst_len
+ payload_length
) !=
363 conn
->sess
->sess_ops
->FirstBurstLength
)) {
364 pr_err("Unsolicited non-immediate data received %u"
365 " does not equal FirstBurstLength: %u, and does"
366 " not equal ExpXferLen %u.\n",
367 (cmd
->first_burst_len
+ payload_length
),
368 conn
->sess
->sess_ops
->FirstBurstLength
, cmd
->se_cmd
.data_length
);
369 transport_send_check_condition_and_sense(se_cmd
,
370 TCM_INCORRECT_AMOUNT_OF_DATA
, 0);
376 struct iscsi_cmd
*iscsit_find_cmd_from_itt(
377 struct iscsi_conn
*conn
,
380 struct iscsi_cmd
*cmd
;
382 spin_lock_bh(&conn
->cmd_lock
);
383 list_for_each_entry(cmd
, &conn
->conn_cmd_list
, i_conn_node
) {
384 if (cmd
->init_task_tag
== init_task_tag
) {
385 spin_unlock_bh(&conn
->cmd_lock
);
389 spin_unlock_bh(&conn
->cmd_lock
);
391 pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
392 init_task_tag
, conn
->cid
);
395 EXPORT_SYMBOL(iscsit_find_cmd_from_itt
);
397 struct iscsi_cmd
*iscsit_find_cmd_from_itt_or_dump(
398 struct iscsi_conn
*conn
,
402 struct iscsi_cmd
*cmd
;
404 spin_lock_bh(&conn
->cmd_lock
);
405 list_for_each_entry(cmd
, &conn
->conn_cmd_list
, i_conn_node
) {
406 if (cmd
->cmd_flags
& ICF_GOT_LAST_DATAOUT
)
408 if (cmd
->init_task_tag
== init_task_tag
) {
409 spin_unlock_bh(&conn
->cmd_lock
);
413 spin_unlock_bh(&conn
->cmd_lock
);
415 pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
416 " dumping payload\n", init_task_tag
, conn
->cid
);
418 iscsit_dump_data_payload(conn
, length
, 1);
422 EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump
);
424 struct iscsi_cmd
*iscsit_find_cmd_from_ttt(
425 struct iscsi_conn
*conn
,
428 struct iscsi_cmd
*cmd
= NULL
;
430 spin_lock_bh(&conn
->cmd_lock
);
431 list_for_each_entry(cmd
, &conn
->conn_cmd_list
, i_conn_node
) {
432 if (cmd
->targ_xfer_tag
== targ_xfer_tag
) {
433 spin_unlock_bh(&conn
->cmd_lock
);
437 spin_unlock_bh(&conn
->cmd_lock
);
439 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
440 targ_xfer_tag
, conn
->cid
);
444 int iscsit_find_cmd_for_recovery(
445 struct iscsi_session
*sess
,
446 struct iscsi_cmd
**cmd_ptr
,
447 struct iscsi_conn_recovery
**cr_ptr
,
450 struct iscsi_cmd
*cmd
= NULL
;
451 struct iscsi_conn_recovery
*cr
;
453 * Scan through the inactive connection recovery list's command list.
454 * If init_task_tag matches the command is still alligent.
456 spin_lock(&sess
->cr_i_lock
);
457 list_for_each_entry(cr
, &sess
->cr_inactive_list
, cr_list
) {
458 spin_lock(&cr
->conn_recovery_cmd_lock
);
459 list_for_each_entry(cmd
, &cr
->conn_recovery_cmd_list
, i_conn_node
) {
460 if (cmd
->init_task_tag
== init_task_tag
) {
461 spin_unlock(&cr
->conn_recovery_cmd_lock
);
462 spin_unlock(&sess
->cr_i_lock
);
469 spin_unlock(&cr
->conn_recovery_cmd_lock
);
471 spin_unlock(&sess
->cr_i_lock
);
473 * Scan through the active connection recovery list's command list.
474 * If init_task_tag matches the command is ready to be reassigned.
476 spin_lock(&sess
->cr_a_lock
);
477 list_for_each_entry(cr
, &sess
->cr_active_list
, cr_list
) {
478 spin_lock(&cr
->conn_recovery_cmd_lock
);
479 list_for_each_entry(cmd
, &cr
->conn_recovery_cmd_list
, i_conn_node
) {
480 if (cmd
->init_task_tag
== init_task_tag
) {
481 spin_unlock(&cr
->conn_recovery_cmd_lock
);
482 spin_unlock(&sess
->cr_a_lock
);
489 spin_unlock(&cr
->conn_recovery_cmd_lock
);
491 spin_unlock(&sess
->cr_a_lock
);
496 void iscsit_add_cmd_to_immediate_queue(
497 struct iscsi_cmd
*cmd
,
498 struct iscsi_conn
*conn
,
501 struct iscsi_queue_req
*qr
;
503 qr
= kmem_cache_zalloc(lio_qr_cache
, GFP_ATOMIC
);
505 pr_err("Unable to allocate memory for"
506 " struct iscsi_queue_req\n");
509 INIT_LIST_HEAD(&qr
->qr_list
);
513 spin_lock_bh(&conn
->immed_queue_lock
);
514 list_add_tail(&qr
->qr_list
, &conn
->immed_queue_list
);
515 atomic_inc(&cmd
->immed_queue_count
);
516 atomic_set(&conn
->check_immediate_queue
, 1);
517 spin_unlock_bh(&conn
->immed_queue_lock
);
519 wake_up(&conn
->queues_wq
);
521 EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue
);
523 struct iscsi_queue_req
*iscsit_get_cmd_from_immediate_queue(struct iscsi_conn
*conn
)
525 struct iscsi_queue_req
*qr
;
527 spin_lock_bh(&conn
->immed_queue_lock
);
528 if (list_empty(&conn
->immed_queue_list
)) {
529 spin_unlock_bh(&conn
->immed_queue_lock
);
532 qr
= list_first_entry(&conn
->immed_queue_list
,
533 struct iscsi_queue_req
, qr_list
);
535 list_del(&qr
->qr_list
);
537 atomic_dec(&qr
->cmd
->immed_queue_count
);
538 spin_unlock_bh(&conn
->immed_queue_lock
);
543 static void iscsit_remove_cmd_from_immediate_queue(
544 struct iscsi_cmd
*cmd
,
545 struct iscsi_conn
*conn
)
547 struct iscsi_queue_req
*qr
, *qr_tmp
;
549 spin_lock_bh(&conn
->immed_queue_lock
);
550 if (!atomic_read(&cmd
->immed_queue_count
)) {
551 spin_unlock_bh(&conn
->immed_queue_lock
);
555 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->immed_queue_list
, qr_list
) {
559 atomic_dec(&qr
->cmd
->immed_queue_count
);
560 list_del(&qr
->qr_list
);
561 kmem_cache_free(lio_qr_cache
, qr
);
563 spin_unlock_bh(&conn
->immed_queue_lock
);
565 if (atomic_read(&cmd
->immed_queue_count
)) {
566 pr_err("ITT: 0x%08x immed_queue_count: %d\n",
568 atomic_read(&cmd
->immed_queue_count
));
572 int iscsit_add_cmd_to_response_queue(
573 struct iscsi_cmd
*cmd
,
574 struct iscsi_conn
*conn
,
577 struct iscsi_queue_req
*qr
;
579 qr
= kmem_cache_zalloc(lio_qr_cache
, GFP_ATOMIC
);
581 pr_err("Unable to allocate memory for"
582 " struct iscsi_queue_req\n");
585 INIT_LIST_HEAD(&qr
->qr_list
);
589 spin_lock_bh(&conn
->response_queue_lock
);
590 list_add_tail(&qr
->qr_list
, &conn
->response_queue_list
);
591 atomic_inc(&cmd
->response_queue_count
);
592 spin_unlock_bh(&conn
->response_queue_lock
);
594 wake_up(&conn
->queues_wq
);
598 struct iscsi_queue_req
*iscsit_get_cmd_from_response_queue(struct iscsi_conn
*conn
)
600 struct iscsi_queue_req
*qr
;
602 spin_lock_bh(&conn
->response_queue_lock
);
603 if (list_empty(&conn
->response_queue_list
)) {
604 spin_unlock_bh(&conn
->response_queue_lock
);
608 qr
= list_first_entry(&conn
->response_queue_list
,
609 struct iscsi_queue_req
, qr_list
);
611 list_del(&qr
->qr_list
);
613 atomic_dec(&qr
->cmd
->response_queue_count
);
614 spin_unlock_bh(&conn
->response_queue_lock
);
619 static void iscsit_remove_cmd_from_response_queue(
620 struct iscsi_cmd
*cmd
,
621 struct iscsi_conn
*conn
)
623 struct iscsi_queue_req
*qr
, *qr_tmp
;
625 spin_lock_bh(&conn
->response_queue_lock
);
626 if (!atomic_read(&cmd
->response_queue_count
)) {
627 spin_unlock_bh(&conn
->response_queue_lock
);
631 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->response_queue_list
,
636 atomic_dec(&qr
->cmd
->response_queue_count
);
637 list_del(&qr
->qr_list
);
638 kmem_cache_free(lio_qr_cache
, qr
);
640 spin_unlock_bh(&conn
->response_queue_lock
);
642 if (atomic_read(&cmd
->response_queue_count
)) {
643 pr_err("ITT: 0x%08x response_queue_count: %d\n",
645 atomic_read(&cmd
->response_queue_count
));
649 bool iscsit_conn_all_queues_empty(struct iscsi_conn
*conn
)
653 spin_lock_bh(&conn
->immed_queue_lock
);
654 empty
= list_empty(&conn
->immed_queue_list
);
655 spin_unlock_bh(&conn
->immed_queue_lock
);
660 spin_lock_bh(&conn
->response_queue_lock
);
661 empty
= list_empty(&conn
->response_queue_list
);
662 spin_unlock_bh(&conn
->response_queue_lock
);
667 void iscsit_free_queue_reqs_for_conn(struct iscsi_conn
*conn
)
669 struct iscsi_queue_req
*qr
, *qr_tmp
;
671 spin_lock_bh(&conn
->immed_queue_lock
);
672 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->immed_queue_list
, qr_list
) {
673 list_del(&qr
->qr_list
);
675 atomic_dec(&qr
->cmd
->immed_queue_count
);
677 kmem_cache_free(lio_qr_cache
, qr
);
679 spin_unlock_bh(&conn
->immed_queue_lock
);
681 spin_lock_bh(&conn
->response_queue_lock
);
682 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->response_queue_list
,
684 list_del(&qr
->qr_list
);
686 atomic_dec(&qr
->cmd
->response_queue_count
);
688 kmem_cache_free(lio_qr_cache
, qr
);
690 spin_unlock_bh(&conn
->response_queue_lock
);
693 void iscsit_release_cmd(struct iscsi_cmd
*cmd
)
695 struct iscsi_session
*sess
;
696 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
698 WARN_ON(!list_empty(&cmd
->i_conn_node
));
701 sess
= cmd
->conn
->sess
;
705 BUG_ON(!sess
|| !sess
->se_sess
);
708 kfree(cmd
->pdu_list
);
709 kfree(cmd
->seq_list
);
711 kfree(cmd
->iov_data
);
712 kfree(cmd
->text_in_ptr
);
714 percpu_ida_free(&sess
->se_sess
->sess_tag_pool
, se_cmd
->map_tag
);
716 EXPORT_SYMBOL(iscsit_release_cmd
);
718 void __iscsit_free_cmd(struct iscsi_cmd
*cmd
, bool check_queues
)
720 struct iscsi_conn
*conn
= cmd
->conn
;
722 WARN_ON(!list_empty(&cmd
->i_conn_node
));
724 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
725 iscsit_stop_dataout_timer(cmd
);
726 iscsit_free_r2ts_from_list(cmd
);
728 if (cmd
->data_direction
== DMA_FROM_DEVICE
)
729 iscsit_free_all_datain_reqs(cmd
);
731 if (conn
&& check_queues
) {
732 iscsit_remove_cmd_from_immediate_queue(cmd
, conn
);
733 iscsit_remove_cmd_from_response_queue(cmd
, conn
);
736 if (conn
&& conn
->conn_transport
->iscsit_release_cmd
)
737 conn
->conn_transport
->iscsit_release_cmd(conn
, cmd
);
740 void iscsit_free_cmd(struct iscsi_cmd
*cmd
, bool shutdown
)
742 struct se_cmd
*se_cmd
= cmd
->se_cmd
.se_tfo
? &cmd
->se_cmd
: NULL
;
745 __iscsit_free_cmd(cmd
, shutdown
);
747 rc
= transport_generic_free_cmd(se_cmd
, shutdown
);
748 if (!rc
&& shutdown
&& se_cmd
->se_sess
) {
749 __iscsit_free_cmd(cmd
, shutdown
);
750 target_put_sess_cmd(se_cmd
);
753 iscsit_release_cmd(cmd
);
756 EXPORT_SYMBOL(iscsit_free_cmd
);
758 int iscsit_check_session_usage_count(struct iscsi_session
*sess
)
760 spin_lock_bh(&sess
->session_usage_lock
);
761 if (sess
->session_usage_count
!= 0) {
762 sess
->session_waiting_on_uc
= 1;
763 spin_unlock_bh(&sess
->session_usage_lock
);
767 wait_for_completion(&sess
->session_waiting_on_uc_comp
);
770 spin_unlock_bh(&sess
->session_usage_lock
);
775 void iscsit_dec_session_usage_count(struct iscsi_session
*sess
)
777 spin_lock_bh(&sess
->session_usage_lock
);
778 sess
->session_usage_count
--;
780 if (!sess
->session_usage_count
&& sess
->session_waiting_on_uc
)
781 complete(&sess
->session_waiting_on_uc_comp
);
783 spin_unlock_bh(&sess
->session_usage_lock
);
786 void iscsit_inc_session_usage_count(struct iscsi_session
*sess
)
788 spin_lock_bh(&sess
->session_usage_lock
);
789 sess
->session_usage_count
++;
790 spin_unlock_bh(&sess
->session_usage_lock
);
793 struct iscsi_conn
*iscsit_get_conn_from_cid(struct iscsi_session
*sess
, u16 cid
)
795 struct iscsi_conn
*conn
;
797 spin_lock_bh(&sess
->conn_lock
);
798 list_for_each_entry(conn
, &sess
->sess_conn_list
, conn_list
) {
799 if ((conn
->cid
== cid
) &&
800 (conn
->conn_state
== TARG_CONN_STATE_LOGGED_IN
)) {
801 iscsit_inc_conn_usage_count(conn
);
802 spin_unlock_bh(&sess
->conn_lock
);
806 spin_unlock_bh(&sess
->conn_lock
);
811 struct iscsi_conn
*iscsit_get_conn_from_cid_rcfr(struct iscsi_session
*sess
, u16 cid
)
813 struct iscsi_conn
*conn
;
815 spin_lock_bh(&sess
->conn_lock
);
816 list_for_each_entry(conn
, &sess
->sess_conn_list
, conn_list
) {
817 if (conn
->cid
== cid
) {
818 iscsit_inc_conn_usage_count(conn
);
819 spin_lock(&conn
->state_lock
);
820 atomic_set(&conn
->connection_wait_rcfr
, 1);
821 spin_unlock(&conn
->state_lock
);
822 spin_unlock_bh(&sess
->conn_lock
);
826 spin_unlock_bh(&sess
->conn_lock
);
831 void iscsit_check_conn_usage_count(struct iscsi_conn
*conn
)
833 spin_lock_bh(&conn
->conn_usage_lock
);
834 if (conn
->conn_usage_count
!= 0) {
835 conn
->conn_waiting_on_uc
= 1;
836 spin_unlock_bh(&conn
->conn_usage_lock
);
838 wait_for_completion(&conn
->conn_waiting_on_uc_comp
);
841 spin_unlock_bh(&conn
->conn_usage_lock
);
844 void iscsit_dec_conn_usage_count(struct iscsi_conn
*conn
)
846 spin_lock_bh(&conn
->conn_usage_lock
);
847 conn
->conn_usage_count
--;
849 if (!conn
->conn_usage_count
&& conn
->conn_waiting_on_uc
)
850 complete(&conn
->conn_waiting_on_uc_comp
);
852 spin_unlock_bh(&conn
->conn_usage_lock
);
855 void iscsit_inc_conn_usage_count(struct iscsi_conn
*conn
)
857 spin_lock_bh(&conn
->conn_usage_lock
);
858 conn
->conn_usage_count
++;
859 spin_unlock_bh(&conn
->conn_usage_lock
);
862 static int iscsit_add_nopin(struct iscsi_conn
*conn
, int want_response
)
865 struct iscsi_cmd
*cmd
;
867 cmd
= iscsit_allocate_cmd(conn
, TASK_RUNNING
);
871 cmd
->iscsi_opcode
= ISCSI_OP_NOOP_IN
;
872 state
= (want_response
) ? ISTATE_SEND_NOPIN_WANT_RESPONSE
:
873 ISTATE_SEND_NOPIN_NO_RESPONSE
;
874 cmd
->init_task_tag
= RESERVED_ITT
;
875 cmd
->targ_xfer_tag
= (want_response
) ?
876 session_get_next_ttt(conn
->sess
) : 0xFFFFFFFF;
877 spin_lock_bh(&conn
->cmd_lock
);
878 list_add_tail(&cmd
->i_conn_node
, &conn
->conn_cmd_list
);
879 spin_unlock_bh(&conn
->cmd_lock
);
882 iscsit_start_nopin_response_timer(conn
);
883 iscsit_add_cmd_to_immediate_queue(cmd
, conn
, state
);
888 void iscsit_handle_nopin_response_timeout(struct timer_list
*t
)
890 struct iscsi_conn
*conn
= from_timer(conn
, t
, nopin_response_timer
);
892 iscsit_inc_conn_usage_count(conn
);
894 spin_lock_bh(&conn
->nopin_timer_lock
);
895 if (conn
->nopin_response_timer_flags
& ISCSI_TF_STOP
) {
896 spin_unlock_bh(&conn
->nopin_timer_lock
);
897 iscsit_dec_conn_usage_count(conn
);
901 pr_debug("Did not receive response to NOPIN on CID: %hu on"
902 " SID: %u, failing connection.\n", conn
->cid
,
904 conn
->nopin_response_timer_flags
&= ~ISCSI_TF_RUNNING
;
905 spin_unlock_bh(&conn
->nopin_timer_lock
);
908 struct iscsi_portal_group
*tpg
= conn
->sess
->tpg
;
909 struct iscsi_tiqn
*tiqn
= tpg
->tpg_tiqn
;
912 spin_lock_bh(&tiqn
->sess_err_stats
.lock
);
913 strcpy(tiqn
->sess_err_stats
.last_sess_fail_rem_name
,
914 conn
->sess
->sess_ops
->InitiatorName
);
915 tiqn
->sess_err_stats
.last_sess_failure_type
=
916 ISCSI_SESS_ERR_CXN_TIMEOUT
;
917 tiqn
->sess_err_stats
.cxn_timeout_errors
++;
918 atomic_long_inc(&conn
->sess
->conn_timeout_errors
);
919 spin_unlock_bh(&tiqn
->sess_err_stats
.lock
);
923 iscsit_cause_connection_reinstatement(conn
, 0);
924 iscsit_dec_conn_usage_count(conn
);
927 void iscsit_mod_nopin_response_timer(struct iscsi_conn
*conn
)
929 struct iscsi_session
*sess
= conn
->sess
;
930 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
932 spin_lock_bh(&conn
->nopin_timer_lock
);
933 if (!(conn
->nopin_response_timer_flags
& ISCSI_TF_RUNNING
)) {
934 spin_unlock_bh(&conn
->nopin_timer_lock
);
938 mod_timer(&conn
->nopin_response_timer
,
939 (get_jiffies_64() + na
->nopin_response_timeout
* HZ
));
940 spin_unlock_bh(&conn
->nopin_timer_lock
);
944 * Called with conn->nopin_timer_lock held.
946 void iscsit_start_nopin_response_timer(struct iscsi_conn
*conn
)
948 struct iscsi_session
*sess
= conn
->sess
;
949 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
951 spin_lock_bh(&conn
->nopin_timer_lock
);
952 if (conn
->nopin_response_timer_flags
& ISCSI_TF_RUNNING
) {
953 spin_unlock_bh(&conn
->nopin_timer_lock
);
957 conn
->nopin_response_timer_flags
&= ~ISCSI_TF_STOP
;
958 conn
->nopin_response_timer_flags
|= ISCSI_TF_RUNNING
;
959 mod_timer(&conn
->nopin_response_timer
,
960 jiffies
+ na
->nopin_response_timeout
* HZ
);
962 pr_debug("Started NOPIN Response Timer on CID: %d to %u"
963 " seconds\n", conn
->cid
, na
->nopin_response_timeout
);
964 spin_unlock_bh(&conn
->nopin_timer_lock
);
967 void iscsit_stop_nopin_response_timer(struct iscsi_conn
*conn
)
969 spin_lock_bh(&conn
->nopin_timer_lock
);
970 if (!(conn
->nopin_response_timer_flags
& ISCSI_TF_RUNNING
)) {
971 spin_unlock_bh(&conn
->nopin_timer_lock
);
974 conn
->nopin_response_timer_flags
|= ISCSI_TF_STOP
;
975 spin_unlock_bh(&conn
->nopin_timer_lock
);
977 del_timer_sync(&conn
->nopin_response_timer
);
979 spin_lock_bh(&conn
->nopin_timer_lock
);
980 conn
->nopin_response_timer_flags
&= ~ISCSI_TF_RUNNING
;
981 spin_unlock_bh(&conn
->nopin_timer_lock
);
984 void iscsit_handle_nopin_timeout(struct timer_list
*t
)
986 struct iscsi_conn
*conn
= from_timer(conn
, t
, nopin_timer
);
988 iscsit_inc_conn_usage_count(conn
);
990 spin_lock_bh(&conn
->nopin_timer_lock
);
991 if (conn
->nopin_timer_flags
& ISCSI_TF_STOP
) {
992 spin_unlock_bh(&conn
->nopin_timer_lock
);
993 iscsit_dec_conn_usage_count(conn
);
996 conn
->nopin_timer_flags
&= ~ISCSI_TF_RUNNING
;
997 spin_unlock_bh(&conn
->nopin_timer_lock
);
999 iscsit_add_nopin(conn
, 1);
1000 iscsit_dec_conn_usage_count(conn
);
1004 * Called with conn->nopin_timer_lock held.
1006 void __iscsit_start_nopin_timer(struct iscsi_conn
*conn
)
1008 struct iscsi_session
*sess
= conn
->sess
;
1009 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
1011 * NOPIN timeout is disabled.
1013 if (!na
->nopin_timeout
)
1016 if (conn
->nopin_timer_flags
& ISCSI_TF_RUNNING
)
1019 conn
->nopin_timer_flags
&= ~ISCSI_TF_STOP
;
1020 conn
->nopin_timer_flags
|= ISCSI_TF_RUNNING
;
1021 mod_timer(&conn
->nopin_timer
, jiffies
+ na
->nopin_timeout
* HZ
);
1023 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1024 " interval\n", conn
->cid
, na
->nopin_timeout
);
1027 void iscsit_start_nopin_timer(struct iscsi_conn
*conn
)
1029 struct iscsi_session
*sess
= conn
->sess
;
1030 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
1032 * NOPIN timeout is disabled..
1034 if (!na
->nopin_timeout
)
1037 spin_lock_bh(&conn
->nopin_timer_lock
);
1038 if (conn
->nopin_timer_flags
& ISCSI_TF_RUNNING
) {
1039 spin_unlock_bh(&conn
->nopin_timer_lock
);
1043 conn
->nopin_timer_flags
&= ~ISCSI_TF_STOP
;
1044 conn
->nopin_timer_flags
|= ISCSI_TF_RUNNING
;
1045 mod_timer(&conn
->nopin_timer
, jiffies
+ na
->nopin_timeout
* HZ
);
1047 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1048 " interval\n", conn
->cid
, na
->nopin_timeout
);
1049 spin_unlock_bh(&conn
->nopin_timer_lock
);
1052 void iscsit_stop_nopin_timer(struct iscsi_conn
*conn
)
1054 spin_lock_bh(&conn
->nopin_timer_lock
);
1055 if (!(conn
->nopin_timer_flags
& ISCSI_TF_RUNNING
)) {
1056 spin_unlock_bh(&conn
->nopin_timer_lock
);
1059 conn
->nopin_timer_flags
|= ISCSI_TF_STOP
;
1060 spin_unlock_bh(&conn
->nopin_timer_lock
);
1062 del_timer_sync(&conn
->nopin_timer
);
1064 spin_lock_bh(&conn
->nopin_timer_lock
);
1065 conn
->nopin_timer_flags
&= ~ISCSI_TF_RUNNING
;
1066 spin_unlock_bh(&conn
->nopin_timer_lock
);
1069 int iscsit_send_tx_data(
1070 struct iscsi_cmd
*cmd
,
1071 struct iscsi_conn
*conn
,
1074 int tx_sent
, tx_size
;
1079 tx_size
= cmd
->tx_size
;
1082 iov
= &cmd
->iov_data
[0];
1083 iov_count
= cmd
->iov_data_count
;
1085 iov
= &cmd
->iov_misc
[0];
1086 iov_count
= cmd
->iov_misc_count
;
1089 tx_sent
= tx_data(conn
, &iov
[0], iov_count
, tx_size
);
1090 if (tx_size
!= tx_sent
) {
1091 if (tx_sent
== -EAGAIN
) {
1092 pr_err("tx_data() returned -EAGAIN\n");
1102 int iscsit_fe_sendpage_sg(
1103 struct iscsi_cmd
*cmd
,
1104 struct iscsi_conn
*conn
)
1106 struct scatterlist
*sg
= cmd
->first_data_sg
;
1108 u32 tx_hdr_size
, data_len
;
1109 u32 offset
= cmd
->first_data_sg_off
;
1110 int tx_sent
, iov_off
;
1113 tx_hdr_size
= ISCSI_HDR_LEN
;
1114 if (conn
->conn_ops
->HeaderDigest
)
1115 tx_hdr_size
+= ISCSI_CRC_LEN
;
1117 iov
.iov_base
= cmd
->pdu
;
1118 iov
.iov_len
= tx_hdr_size
;
1120 tx_sent
= tx_data(conn
, &iov
, 1, tx_hdr_size
);
1121 if (tx_hdr_size
!= tx_sent
) {
1122 if (tx_sent
== -EAGAIN
) {
1123 pr_err("tx_data() returned -EAGAIN\n");
1129 data_len
= cmd
->tx_size
- tx_hdr_size
- cmd
->padding
;
1131 * Set iov_off used by padding and data digest tx_data() calls below
1132 * in order to determine proper offset into cmd->iov_data[]
1134 if (conn
->conn_ops
->DataDigest
) {
1135 data_len
-= ISCSI_CRC_LEN
;
1137 iov_off
= (cmd
->iov_data_count
- 2);
1139 iov_off
= (cmd
->iov_data_count
- 1);
1141 iov_off
= (cmd
->iov_data_count
- 1);
1144 * Perform sendpage() for each page in the scatterlist
1147 u32 space
= (sg
->length
- offset
);
1148 u32 sub_len
= min_t(u32
, data_len
, space
);
1150 tx_sent
= conn
->sock
->ops
->sendpage(conn
->sock
,
1151 sg_page(sg
), sg
->offset
+ offset
, sub_len
, 0);
1152 if (tx_sent
!= sub_len
) {
1153 if (tx_sent
== -EAGAIN
) {
1154 pr_err("tcp_sendpage() returned"
1159 pr_err("tcp_sendpage() failure: %d\n",
1164 data_len
-= sub_len
;
1171 struct kvec
*iov_p
= &cmd
->iov_data
[iov_off
++];
1173 tx_sent
= tx_data(conn
, iov_p
, 1, cmd
->padding
);
1174 if (cmd
->padding
!= tx_sent
) {
1175 if (tx_sent
== -EAGAIN
) {
1176 pr_err("tx_data() returned -EAGAIN\n");
1184 if (conn
->conn_ops
->DataDigest
) {
1185 struct kvec
*iov_d
= &cmd
->iov_data
[iov_off
];
1187 tx_sent
= tx_data(conn
, iov_d
, 1, ISCSI_CRC_LEN
);
1188 if (ISCSI_CRC_LEN
!= tx_sent
) {
1189 if (tx_sent
== -EAGAIN
) {
1190 pr_err("tx_data() returned -EAGAIN\n");
1201 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
1202 * back to the Initiator when an expection condition occurs with the
1203 * errors set in status_class and status_detail.
1205 * Parameters: iSCSI Connection, Status Class, Status Detail.
1206 * Returns: 0 on success, -1 on error.
1208 int iscsit_tx_login_rsp(struct iscsi_conn
*conn
, u8 status_class
, u8 status_detail
)
1210 struct iscsi_login_rsp
*hdr
;
1211 struct iscsi_login
*login
= conn
->conn_login
;
1213 login
->login_failed
= 1;
1214 iscsit_collect_login_stats(conn
, status_class
, status_detail
);
1216 memset(&login
->rsp
[0], 0, ISCSI_HDR_LEN
);
1218 hdr
= (struct iscsi_login_rsp
*)&login
->rsp
[0];
1219 hdr
->opcode
= ISCSI_OP_LOGIN_RSP
;
1220 hdr
->status_class
= status_class
;
1221 hdr
->status_detail
= status_detail
;
1222 hdr
->itt
= conn
->login_itt
;
1224 return conn
->conn_transport
->iscsit_put_login_tx(conn
, login
, 0);
1227 void iscsit_print_session_params(struct iscsi_session
*sess
)
1229 struct iscsi_conn
*conn
;
1231 pr_debug("-----------------------------[Session Params for"
1232 " SID: %u]-----------------------------\n", sess
->sid
);
1233 spin_lock_bh(&sess
->conn_lock
);
1234 list_for_each_entry(conn
, &sess
->sess_conn_list
, conn_list
)
1235 iscsi_dump_conn_ops(conn
->conn_ops
);
1236 spin_unlock_bh(&sess
->conn_lock
);
1238 iscsi_dump_sess_ops(sess
->sess_ops
);
1241 static int iscsit_do_rx_data(
1242 struct iscsi_conn
*conn
,
1243 struct iscsi_data_count
*count
)
1245 int data
= count
->data_length
, rx_loop
= 0, total_rx
= 0;
1248 if (!conn
|| !conn
->sock
|| !conn
->conn_ops
)
1251 memset(&msg
, 0, sizeof(struct msghdr
));
1252 iov_iter_kvec(&msg
.msg_iter
, READ
| ITER_KVEC
,
1253 count
->iov
, count
->iov_count
, data
);
1255 while (msg_data_left(&msg
)) {
1256 rx_loop
= sock_recvmsg(conn
->sock
, &msg
, MSG_WAITALL
);
1258 pr_debug("rx_loop: %d total_rx: %d\n",
1262 total_rx
+= rx_loop
;
1263 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1264 rx_loop
, total_rx
, data
);
1271 struct iscsi_conn
*conn
,
1276 struct iscsi_data_count c
;
1278 if (!conn
|| !conn
->sock
|| !conn
->conn_ops
)
1281 memset(&c
, 0, sizeof(struct iscsi_data_count
));
1283 c
.iov_count
= iov_count
;
1284 c
.data_length
= data
;
1285 c
.type
= ISCSI_RX_DATA
;
1287 return iscsit_do_rx_data(conn
, &c
);
1291 struct iscsi_conn
*conn
,
1299 if (!conn
|| !conn
->sock
|| !conn
->conn_ops
)
1303 pr_err("Data length is: %d\n", data
);
1307 memset(&msg
, 0, sizeof(struct msghdr
));
1309 iov_iter_kvec(&msg
.msg_iter
, WRITE
| ITER_KVEC
,
1310 iov
, iov_count
, data
);
1312 while (msg_data_left(&msg
)) {
1313 int tx_loop
= sock_sendmsg(conn
->sock
, &msg
);
1315 pr_debug("tx_loop: %d total_tx %d\n",
1319 total_tx
+= tx_loop
;
1320 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1321 tx_loop
, total_tx
, data
);
1327 void iscsit_collect_login_stats(
1328 struct iscsi_conn
*conn
,
1332 struct iscsi_param
*intrname
= NULL
;
1333 struct iscsi_tiqn
*tiqn
;
1334 struct iscsi_login_stats
*ls
;
1336 tiqn
= iscsit_snmp_get_tiqn(conn
);
1340 ls
= &tiqn
->login_stats
;
1342 spin_lock(&ls
->lock
);
1343 if (status_class
== ISCSI_STATUS_CLS_SUCCESS
)
1345 else if (status_class
== ISCSI_STATUS_CLS_REDIRECT
) {
1347 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_REDIRECT
;
1348 } else if ((status_class
== ISCSI_STATUS_CLS_INITIATOR_ERR
) &&
1349 (status_detail
== ISCSI_LOGIN_STATUS_AUTH_FAILED
)) {
1350 ls
->authenticate_fails
++;
1351 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_AUTHENTICATE
;
1352 } else if ((status_class
== ISCSI_STATUS_CLS_INITIATOR_ERR
) &&
1353 (status_detail
== ISCSI_LOGIN_STATUS_TGT_FORBIDDEN
)) {
1354 ls
->authorize_fails
++;
1355 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_AUTHORIZE
;
1356 } else if ((status_class
== ISCSI_STATUS_CLS_INITIATOR_ERR
) &&
1357 (status_detail
== ISCSI_LOGIN_STATUS_INIT_ERR
)) {
1358 ls
->negotiate_fails
++;
1359 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_NEGOTIATE
;
1362 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_OTHER
;
1365 /* Save initiator name, ip address and time, if it is a failed login */
1366 if (status_class
!= ISCSI_STATUS_CLS_SUCCESS
) {
1367 if (conn
->param_list
)
1368 intrname
= iscsi_find_param_from_key(INITIATORNAME
,
1370 strlcpy(ls
->last_intr_fail_name
,
1371 (intrname
? intrname
->value
: "Unknown"),
1372 sizeof(ls
->last_intr_fail_name
));
1374 ls
->last_intr_fail_ip_family
= conn
->login_family
;
1376 ls
->last_intr_fail_sockaddr
= conn
->login_sockaddr
;
1377 ls
->last_fail_time
= get_jiffies_64();
1380 spin_unlock(&ls
->lock
);
1383 struct iscsi_tiqn
*iscsit_snmp_get_tiqn(struct iscsi_conn
*conn
)
1385 struct iscsi_portal_group
*tpg
;
1397 return tpg
->tpg_tiqn
;