1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * This file contains the iSCSI Target specific utility functions.
5 * (c) Copyright 2007-2013 Datera, Inc.
7 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
9 ******************************************************************************/
11 #include <linux/list.h>
12 #include <linux/sched/signal.h>
13 #include <net/ipv6.h> /* ipv6_addr_equal() */
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/iscsi_proto.h>
16 #include <target/target_core_base.h>
17 #include <target/target_core_fabric.h>
18 #include <target/iscsi/iscsi_transport.h>
20 #include <target/iscsi/iscsi_target_core.h>
21 #include "iscsi_target_parameters.h"
22 #include "iscsi_target_seq_pdu_list.h"
23 #include "iscsi_target_datain_values.h"
24 #include "iscsi_target_erl0.h"
25 #include "iscsi_target_erl1.h"
26 #include "iscsi_target_erl2.h"
27 #include "iscsi_target_tpg.h"
28 #include "iscsi_target_util.h"
29 #include "iscsi_target.h"
31 extern struct list_head g_tiqn_list
;
32 extern spinlock_t tiqn_lock
;
34 int iscsit_add_r2t_to_list(
35 struct iscsit_cmd
*cmd
,
41 struct iscsi_r2t
*r2t
;
43 lockdep_assert_held(&cmd
->r2t_lock
);
45 WARN_ON_ONCE((s32
)xfer_len
< 0);
47 r2t
= kmem_cache_zalloc(lio_r2t_cache
, GFP_ATOMIC
);
49 pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
52 INIT_LIST_HEAD(&r2t
->r2t_list
);
54 r2t
->recovery_r2t
= recovery
;
55 r2t
->r2t_sn
= (!r2t_sn
) ? cmd
->r2t_sn
++ : r2t_sn
;
57 r2t
->xfer_len
= xfer_len
;
58 list_add_tail(&r2t
->r2t_list
, &cmd
->cmd_r2t_list
);
59 spin_unlock_bh(&cmd
->r2t_lock
);
61 iscsit_add_cmd_to_immediate_queue(cmd
, cmd
->conn
, ISTATE_SEND_R2T
);
63 spin_lock_bh(&cmd
->r2t_lock
);
67 struct iscsi_r2t
*iscsit_get_r2t_for_eos(
68 struct iscsit_cmd
*cmd
,
72 struct iscsi_r2t
*r2t
;
74 spin_lock_bh(&cmd
->r2t_lock
);
75 list_for_each_entry(r2t
, &cmd
->cmd_r2t_list
, r2t_list
) {
76 if ((r2t
->offset
<= offset
) &&
77 (r2t
->offset
+ r2t
->xfer_len
) >= (offset
+ length
)) {
78 spin_unlock_bh(&cmd
->r2t_lock
);
82 spin_unlock_bh(&cmd
->r2t_lock
);
84 pr_err("Unable to locate R2T for Offset: %u, Length:"
85 " %u\n", offset
, length
);
89 struct iscsi_r2t
*iscsit_get_r2t_from_list(struct iscsit_cmd
*cmd
)
91 struct iscsi_r2t
*r2t
;
93 spin_lock_bh(&cmd
->r2t_lock
);
94 list_for_each_entry(r2t
, &cmd
->cmd_r2t_list
, r2t_list
) {
96 spin_unlock_bh(&cmd
->r2t_lock
);
100 spin_unlock_bh(&cmd
->r2t_lock
);
102 pr_err("Unable to locate next R2T to send for ITT:"
103 " 0x%08x.\n", cmd
->init_task_tag
);
107 void iscsit_free_r2t(struct iscsi_r2t
*r2t
, struct iscsit_cmd
*cmd
)
109 lockdep_assert_held(&cmd
->r2t_lock
);
111 list_del(&r2t
->r2t_list
);
112 kmem_cache_free(lio_r2t_cache
, r2t
);
115 void iscsit_free_r2ts_from_list(struct iscsit_cmd
*cmd
)
117 struct iscsi_r2t
*r2t
, *r2t_tmp
;
119 spin_lock_bh(&cmd
->r2t_lock
);
120 list_for_each_entry_safe(r2t
, r2t_tmp
, &cmd
->cmd_r2t_list
, r2t_list
)
121 iscsit_free_r2t(r2t
, cmd
);
122 spin_unlock_bh(&cmd
->r2t_lock
);
125 static int iscsit_wait_for_tag(struct se_session
*se_sess
, int state
, int *cpup
)
128 DEFINE_SBQ_WAIT(wait
);
129 struct sbq_wait_state
*ws
;
130 struct sbitmap_queue
*sbq
;
132 if (state
== TASK_RUNNING
)
135 sbq
= &se_sess
->sess_tag_pool
;
138 sbitmap_prepare_to_wait(sbq
, ws
, &wait
, state
);
139 if (signal_pending_state(state
, current
))
141 tag
= sbitmap_queue_get(sbq
, cpup
);
147 sbitmap_finish_wait(sbq
, ws
, &wait
);
152 * May be called from software interrupt (timer) context for allocating
155 struct iscsit_cmd
*iscsit_allocate_cmd(struct iscsit_conn
*conn
, int state
)
157 struct iscsit_cmd
*cmd
;
158 struct se_session
*se_sess
= conn
->sess
->se_sess
;
161 tag
= sbitmap_queue_get(&se_sess
->sess_tag_pool
, &cpu
);
163 tag
= iscsit_wait_for_tag(se_sess
, state
, &cpu
);
167 size
= sizeof(struct iscsit_cmd
) + conn
->conn_transport
->priv_size
;
168 cmd
= (struct iscsit_cmd
*)(se_sess
->sess_cmd_map
+ (tag
* size
));
169 memset(cmd
, 0, size
);
171 cmd
->se_cmd
.map_tag
= tag
;
172 cmd
->se_cmd
.map_cpu
= cpu
;
174 cmd
->data_direction
= DMA_NONE
;
175 INIT_LIST_HEAD(&cmd
->i_conn_node
);
176 INIT_LIST_HEAD(&cmd
->datain_list
);
177 INIT_LIST_HEAD(&cmd
->cmd_r2t_list
);
178 spin_lock_init(&cmd
->datain_lock
);
179 spin_lock_init(&cmd
->dataout_timeout_lock
);
180 spin_lock_init(&cmd
->istate_lock
);
181 spin_lock_init(&cmd
->error_lock
);
182 spin_lock_init(&cmd
->r2t_lock
);
183 timer_setup(&cmd
->dataout_timer
, iscsit_handle_dataout_timeout
, 0);
187 EXPORT_SYMBOL(iscsit_allocate_cmd
);
189 struct iscsi_seq
*iscsit_get_seq_holder_for_datain(
190 struct iscsit_cmd
*cmd
,
195 for (i
= 0; i
< cmd
->seq_count
; i
++)
196 if (cmd
->seq_list
[i
].seq_send_order
== seq_send_order
)
197 return &cmd
->seq_list
[i
];
202 struct iscsi_seq
*iscsit_get_seq_holder_for_r2t(struct iscsit_cmd
*cmd
)
206 if (!cmd
->seq_list
) {
207 pr_err("struct iscsit_cmd->seq_list is NULL!\n");
211 for (i
= 0; i
< cmd
->seq_count
; i
++) {
212 if (cmd
->seq_list
[i
].type
!= SEQTYPE_NORMAL
)
214 if (cmd
->seq_list
[i
].seq_send_order
== cmd
->seq_send_order
) {
215 cmd
->seq_send_order
++;
216 return &cmd
->seq_list
[i
];
223 struct iscsi_r2t
*iscsit_get_holder_for_r2tsn(
224 struct iscsit_cmd
*cmd
,
227 struct iscsi_r2t
*r2t
;
229 spin_lock_bh(&cmd
->r2t_lock
);
230 list_for_each_entry(r2t
, &cmd
->cmd_r2t_list
, r2t_list
) {
231 if (r2t
->r2t_sn
== r2t_sn
) {
232 spin_unlock_bh(&cmd
->r2t_lock
);
236 spin_unlock_bh(&cmd
->r2t_lock
);
241 static inline int iscsit_check_received_cmdsn(struct iscsit_session
*sess
, u32 cmdsn
)
247 * This is the proper method of checking received CmdSN against
248 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
249 * or order CmdSNs due to multiple connection sessions and/or
252 max_cmdsn
= atomic_read(&sess
->max_cmd_sn
);
253 if (iscsi_sna_gt(cmdsn
, max_cmdsn
)) {
254 pr_err("Received CmdSN: 0x%08x is greater than"
255 " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn
, max_cmdsn
);
256 ret
= CMDSN_MAXCMDSN_OVERRUN
;
258 } else if (cmdsn
== sess
->exp_cmd_sn
) {
260 pr_debug("Received CmdSN matches ExpCmdSN,"
261 " incremented ExpCmdSN to: 0x%08x\n",
263 ret
= CMDSN_NORMAL_OPERATION
;
265 } else if (iscsi_sna_gt(cmdsn
, sess
->exp_cmd_sn
)) {
266 pr_debug("Received CmdSN: 0x%08x is greater"
267 " than ExpCmdSN: 0x%08x, not acknowledging.\n",
268 cmdsn
, sess
->exp_cmd_sn
);
269 ret
= CMDSN_HIGHER_THAN_EXP
;
272 pr_err("Received CmdSN: 0x%08x is less than"
273 " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn
,
275 ret
= CMDSN_LOWER_THAN_EXP
;
282 * Commands may be received out of order if MC/S is in use.
283 * Ensure they are executed in CmdSN order.
285 int iscsit_sequence_cmd(struct iscsit_conn
*conn
, struct iscsit_cmd
*cmd
,
286 unsigned char *buf
, __be32 cmdsn
)
290 u8 reason
= ISCSI_REASON_BOOKMARK_NO_RESOURCES
;
292 mutex_lock(&conn
->sess
->cmdsn_mutex
);
294 cmdsn_ret
= iscsit_check_received_cmdsn(conn
->sess
, be32_to_cpu(cmdsn
));
296 case CMDSN_NORMAL_OPERATION
:
297 ret
= iscsit_execute_cmd(cmd
, 0);
298 if ((ret
>= 0) && !list_empty(&conn
->sess
->sess_ooo_cmdsn_list
))
299 iscsit_execute_ooo_cmdsns(conn
->sess
);
302 ret
= CMDSN_ERROR_CANNOT_RECOVER
;
305 case CMDSN_HIGHER_THAN_EXP
:
306 ret
= iscsit_handle_ooo_cmdsn(conn
->sess
, cmd
, be32_to_cpu(cmdsn
));
309 ret
= CMDSN_ERROR_CANNOT_RECOVER
;
312 ret
= CMDSN_HIGHER_THAN_EXP
;
314 case CMDSN_LOWER_THAN_EXP
:
315 case CMDSN_MAXCMDSN_OVERRUN
:
317 cmd
->i_state
= ISTATE_REMOVE
;
318 iscsit_add_cmd_to_immediate_queue(cmd
, conn
, cmd
->i_state
);
320 * Existing callers for iscsit_sequence_cmd() will silently
321 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
322 * return for CMDSN_MAXCMDSN_OVERRUN as well..
324 ret
= CMDSN_LOWER_THAN_EXP
;
327 mutex_unlock(&conn
->sess
->cmdsn_mutex
);
330 iscsit_reject_cmd(cmd
, reason
, buf
);
334 EXPORT_SYMBOL(iscsit_sequence_cmd
);
336 int iscsit_check_unsolicited_dataout(struct iscsit_cmd
*cmd
, unsigned char *buf
)
338 struct iscsit_conn
*conn
= cmd
->conn
;
339 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
340 struct iscsi_data
*hdr
= (struct iscsi_data
*) buf
;
341 u32 payload_length
= ntoh24(hdr
->dlength
);
343 if (conn
->sess
->sess_ops
->InitialR2T
) {
344 pr_err("Received unexpected unsolicited data"
345 " while InitialR2T=Yes, protocol error.\n");
346 transport_send_check_condition_and_sense(se_cmd
,
347 TCM_UNEXPECTED_UNSOLICITED_DATA
, 0);
351 if ((cmd
->first_burst_len
+ payload_length
) >
352 conn
->sess
->sess_ops
->FirstBurstLength
) {
353 pr_err("Total %u bytes exceeds FirstBurstLength: %u"
354 " for this Unsolicited DataOut Burst.\n",
355 (cmd
->first_burst_len
+ payload_length
),
356 conn
->sess
->sess_ops
->FirstBurstLength
);
357 transport_send_check_condition_and_sense(se_cmd
,
358 TCM_INCORRECT_AMOUNT_OF_DATA
, 0);
362 if (!(hdr
->flags
& ISCSI_FLAG_CMD_FINAL
))
365 if (((cmd
->first_burst_len
+ payload_length
) != cmd
->se_cmd
.data_length
) &&
366 ((cmd
->first_burst_len
+ payload_length
) !=
367 conn
->sess
->sess_ops
->FirstBurstLength
)) {
368 pr_err("Unsolicited non-immediate data received %u"
369 " does not equal FirstBurstLength: %u, and does"
370 " not equal ExpXferLen %u.\n",
371 (cmd
->first_burst_len
+ payload_length
),
372 conn
->sess
->sess_ops
->FirstBurstLength
, cmd
->se_cmd
.data_length
);
373 transport_send_check_condition_and_sense(se_cmd
,
374 TCM_INCORRECT_AMOUNT_OF_DATA
, 0);
380 struct iscsit_cmd
*iscsit_find_cmd_from_itt(
381 struct iscsit_conn
*conn
,
384 struct iscsit_cmd
*cmd
;
386 spin_lock_bh(&conn
->cmd_lock
);
387 list_for_each_entry(cmd
, &conn
->conn_cmd_list
, i_conn_node
) {
388 if (cmd
->init_task_tag
== init_task_tag
) {
389 spin_unlock_bh(&conn
->cmd_lock
);
393 spin_unlock_bh(&conn
->cmd_lock
);
395 pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
396 init_task_tag
, conn
->cid
);
399 EXPORT_SYMBOL(iscsit_find_cmd_from_itt
);
401 struct iscsit_cmd
*iscsit_find_cmd_from_itt_or_dump(
402 struct iscsit_conn
*conn
,
406 struct iscsit_cmd
*cmd
;
408 spin_lock_bh(&conn
->cmd_lock
);
409 list_for_each_entry(cmd
, &conn
->conn_cmd_list
, i_conn_node
) {
410 if (cmd
->cmd_flags
& ICF_GOT_LAST_DATAOUT
)
412 if (cmd
->init_task_tag
== init_task_tag
) {
413 spin_unlock_bh(&conn
->cmd_lock
);
417 spin_unlock_bh(&conn
->cmd_lock
);
419 pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
420 " dumping payload\n", init_task_tag
, conn
->cid
);
422 iscsit_dump_data_payload(conn
, length
, 1);
426 EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump
);
428 struct iscsit_cmd
*iscsit_find_cmd_from_ttt(
429 struct iscsit_conn
*conn
,
432 struct iscsit_cmd
*cmd
= NULL
;
434 spin_lock_bh(&conn
->cmd_lock
);
435 list_for_each_entry(cmd
, &conn
->conn_cmd_list
, i_conn_node
) {
436 if (cmd
->targ_xfer_tag
== targ_xfer_tag
) {
437 spin_unlock_bh(&conn
->cmd_lock
);
441 spin_unlock_bh(&conn
->cmd_lock
);
443 pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
444 targ_xfer_tag
, conn
->cid
);
448 int iscsit_find_cmd_for_recovery(
449 struct iscsit_session
*sess
,
450 struct iscsit_cmd
**cmd_ptr
,
451 struct iscsi_conn_recovery
**cr_ptr
,
454 struct iscsit_cmd
*cmd
= NULL
;
455 struct iscsi_conn_recovery
*cr
;
457 * Scan through the inactive connection recovery list's command list.
458 * If init_task_tag matches the command is still alligent.
460 spin_lock(&sess
->cr_i_lock
);
461 list_for_each_entry(cr
, &sess
->cr_inactive_list
, cr_list
) {
462 spin_lock(&cr
->conn_recovery_cmd_lock
);
463 list_for_each_entry(cmd
, &cr
->conn_recovery_cmd_list
, i_conn_node
) {
464 if (cmd
->init_task_tag
== init_task_tag
) {
465 spin_unlock(&cr
->conn_recovery_cmd_lock
);
466 spin_unlock(&sess
->cr_i_lock
);
473 spin_unlock(&cr
->conn_recovery_cmd_lock
);
475 spin_unlock(&sess
->cr_i_lock
);
477 * Scan through the active connection recovery list's command list.
478 * If init_task_tag matches the command is ready to be reassigned.
480 spin_lock(&sess
->cr_a_lock
);
481 list_for_each_entry(cr
, &sess
->cr_active_list
, cr_list
) {
482 spin_lock(&cr
->conn_recovery_cmd_lock
);
483 list_for_each_entry(cmd
, &cr
->conn_recovery_cmd_list
, i_conn_node
) {
484 if (cmd
->init_task_tag
== init_task_tag
) {
485 spin_unlock(&cr
->conn_recovery_cmd_lock
);
486 spin_unlock(&sess
->cr_a_lock
);
493 spin_unlock(&cr
->conn_recovery_cmd_lock
);
495 spin_unlock(&sess
->cr_a_lock
);
500 void iscsit_add_cmd_to_immediate_queue(
501 struct iscsit_cmd
*cmd
,
502 struct iscsit_conn
*conn
,
505 struct iscsi_queue_req
*qr
;
507 qr
= kmem_cache_zalloc(lio_qr_cache
, GFP_ATOMIC
);
509 pr_err("Unable to allocate memory for"
510 " struct iscsi_queue_req\n");
513 INIT_LIST_HEAD(&qr
->qr_list
);
517 spin_lock_bh(&conn
->immed_queue_lock
);
518 list_add_tail(&qr
->qr_list
, &conn
->immed_queue_list
);
519 atomic_inc(&cmd
->immed_queue_count
);
520 atomic_set(&conn
->check_immediate_queue
, 1);
521 spin_unlock_bh(&conn
->immed_queue_lock
);
523 wake_up(&conn
->queues_wq
);
525 EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue
);
527 struct iscsi_queue_req
*iscsit_get_cmd_from_immediate_queue(struct iscsit_conn
*conn
)
529 struct iscsi_queue_req
*qr
;
531 spin_lock_bh(&conn
->immed_queue_lock
);
532 if (list_empty(&conn
->immed_queue_list
)) {
533 spin_unlock_bh(&conn
->immed_queue_lock
);
536 qr
= list_first_entry(&conn
->immed_queue_list
,
537 struct iscsi_queue_req
, qr_list
);
539 list_del(&qr
->qr_list
);
541 atomic_dec(&qr
->cmd
->immed_queue_count
);
542 spin_unlock_bh(&conn
->immed_queue_lock
);
547 static void iscsit_remove_cmd_from_immediate_queue(
548 struct iscsit_cmd
*cmd
,
549 struct iscsit_conn
*conn
)
551 struct iscsi_queue_req
*qr
, *qr_tmp
;
553 spin_lock_bh(&conn
->immed_queue_lock
);
554 if (!atomic_read(&cmd
->immed_queue_count
)) {
555 spin_unlock_bh(&conn
->immed_queue_lock
);
559 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->immed_queue_list
, qr_list
) {
563 atomic_dec(&qr
->cmd
->immed_queue_count
);
564 list_del(&qr
->qr_list
);
565 kmem_cache_free(lio_qr_cache
, qr
);
567 spin_unlock_bh(&conn
->immed_queue_lock
);
569 if (atomic_read(&cmd
->immed_queue_count
)) {
570 pr_err("ITT: 0x%08x immed_queue_count: %d\n",
572 atomic_read(&cmd
->immed_queue_count
));
576 int iscsit_add_cmd_to_response_queue(
577 struct iscsit_cmd
*cmd
,
578 struct iscsit_conn
*conn
,
581 struct iscsi_queue_req
*qr
;
583 qr
= kmem_cache_zalloc(lio_qr_cache
, GFP_ATOMIC
);
585 pr_err("Unable to allocate memory for"
586 " struct iscsi_queue_req\n");
589 INIT_LIST_HEAD(&qr
->qr_list
);
593 spin_lock_bh(&conn
->response_queue_lock
);
594 list_add_tail(&qr
->qr_list
, &conn
->response_queue_list
);
595 atomic_inc(&cmd
->response_queue_count
);
596 spin_unlock_bh(&conn
->response_queue_lock
);
598 wake_up(&conn
->queues_wq
);
602 struct iscsi_queue_req
*iscsit_get_cmd_from_response_queue(struct iscsit_conn
*conn
)
604 struct iscsi_queue_req
*qr
;
606 spin_lock_bh(&conn
->response_queue_lock
);
607 if (list_empty(&conn
->response_queue_list
)) {
608 spin_unlock_bh(&conn
->response_queue_lock
);
612 qr
= list_first_entry(&conn
->response_queue_list
,
613 struct iscsi_queue_req
, qr_list
);
615 list_del(&qr
->qr_list
);
617 atomic_dec(&qr
->cmd
->response_queue_count
);
618 spin_unlock_bh(&conn
->response_queue_lock
);
623 static void iscsit_remove_cmd_from_response_queue(
624 struct iscsit_cmd
*cmd
,
625 struct iscsit_conn
*conn
)
627 struct iscsi_queue_req
*qr
, *qr_tmp
;
629 spin_lock_bh(&conn
->response_queue_lock
);
630 if (!atomic_read(&cmd
->response_queue_count
)) {
631 spin_unlock_bh(&conn
->response_queue_lock
);
635 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->response_queue_list
,
640 atomic_dec(&qr
->cmd
->response_queue_count
);
641 list_del(&qr
->qr_list
);
642 kmem_cache_free(lio_qr_cache
, qr
);
644 spin_unlock_bh(&conn
->response_queue_lock
);
646 if (atomic_read(&cmd
->response_queue_count
)) {
647 pr_err("ITT: 0x%08x response_queue_count: %d\n",
649 atomic_read(&cmd
->response_queue_count
));
653 bool iscsit_conn_all_queues_empty(struct iscsit_conn
*conn
)
657 spin_lock_bh(&conn
->immed_queue_lock
);
658 empty
= list_empty(&conn
->immed_queue_list
);
659 spin_unlock_bh(&conn
->immed_queue_lock
);
664 spin_lock_bh(&conn
->response_queue_lock
);
665 empty
= list_empty(&conn
->response_queue_list
);
666 spin_unlock_bh(&conn
->response_queue_lock
);
671 void iscsit_free_queue_reqs_for_conn(struct iscsit_conn
*conn
)
673 struct iscsi_queue_req
*qr
, *qr_tmp
;
675 spin_lock_bh(&conn
->immed_queue_lock
);
676 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->immed_queue_list
, qr_list
) {
677 list_del(&qr
->qr_list
);
679 atomic_dec(&qr
->cmd
->immed_queue_count
);
681 kmem_cache_free(lio_qr_cache
, qr
);
683 spin_unlock_bh(&conn
->immed_queue_lock
);
685 spin_lock_bh(&conn
->response_queue_lock
);
686 list_for_each_entry_safe(qr
, qr_tmp
, &conn
->response_queue_list
,
688 list_del(&qr
->qr_list
);
690 atomic_dec(&qr
->cmd
->response_queue_count
);
692 kmem_cache_free(lio_qr_cache
, qr
);
694 spin_unlock_bh(&conn
->response_queue_lock
);
697 void iscsit_release_cmd(struct iscsit_cmd
*cmd
)
699 struct iscsit_session
*sess
;
700 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
702 WARN_ON(!list_empty(&cmd
->i_conn_node
));
705 sess
= cmd
->conn
->sess
;
709 BUG_ON(!sess
|| !sess
->se_sess
);
712 kfree(cmd
->pdu_list
);
713 kfree(cmd
->seq_list
);
715 kfree(cmd
->overflow_buf
);
716 kfree(cmd
->iov_data
);
717 kfree(cmd
->text_in_ptr
);
719 target_free_tag(sess
->se_sess
, se_cmd
);
721 EXPORT_SYMBOL(iscsit_release_cmd
);
723 void __iscsit_free_cmd(struct iscsit_cmd
*cmd
, bool check_queues
)
725 struct iscsit_conn
*conn
= cmd
->conn
;
727 WARN_ON(!list_empty(&cmd
->i_conn_node
));
729 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
730 iscsit_stop_dataout_timer(cmd
);
731 iscsit_free_r2ts_from_list(cmd
);
733 if (cmd
->data_direction
== DMA_FROM_DEVICE
)
734 iscsit_free_all_datain_reqs(cmd
);
736 if (conn
&& check_queues
) {
737 iscsit_remove_cmd_from_immediate_queue(cmd
, conn
);
738 iscsit_remove_cmd_from_response_queue(cmd
, conn
);
741 if (conn
&& conn
->conn_transport
->iscsit_unmap_cmd
)
742 conn
->conn_transport
->iscsit_unmap_cmd(conn
, cmd
);
745 void iscsit_free_cmd(struct iscsit_cmd
*cmd
, bool shutdown
)
747 struct se_cmd
*se_cmd
= cmd
->se_cmd
.se_tfo
? &cmd
->se_cmd
: NULL
;
750 WARN_ON(!list_empty(&cmd
->i_conn_node
));
752 __iscsit_free_cmd(cmd
, shutdown
);
754 rc
= transport_generic_free_cmd(se_cmd
, shutdown
);
755 if (!rc
&& shutdown
&& se_cmd
->se_sess
) {
756 __iscsit_free_cmd(cmd
, shutdown
);
757 target_put_sess_cmd(se_cmd
);
760 iscsit_release_cmd(cmd
);
763 EXPORT_SYMBOL(iscsit_free_cmd
);
765 bool iscsit_check_session_usage_count(struct iscsit_session
*sess
,
768 spin_lock_bh(&sess
->session_usage_lock
);
769 if (sess
->session_usage_count
!= 0) {
770 sess
->session_waiting_on_uc
= 1;
771 spin_unlock_bh(&sess
->session_usage_lock
);
775 wait_for_completion(&sess
->session_waiting_on_uc_comp
);
778 spin_unlock_bh(&sess
->session_usage_lock
);
783 void iscsit_dec_session_usage_count(struct iscsit_session
*sess
)
785 spin_lock_bh(&sess
->session_usage_lock
);
786 sess
->session_usage_count
--;
788 if (!sess
->session_usage_count
&& sess
->session_waiting_on_uc
)
789 complete(&sess
->session_waiting_on_uc_comp
);
791 spin_unlock_bh(&sess
->session_usage_lock
);
794 void iscsit_inc_session_usage_count(struct iscsit_session
*sess
)
796 spin_lock_bh(&sess
->session_usage_lock
);
797 sess
->session_usage_count
++;
798 spin_unlock_bh(&sess
->session_usage_lock
);
801 struct iscsit_conn
*iscsit_get_conn_from_cid(struct iscsit_session
*sess
, u16 cid
)
803 struct iscsit_conn
*conn
;
805 spin_lock_bh(&sess
->conn_lock
);
806 list_for_each_entry(conn
, &sess
->sess_conn_list
, conn_list
) {
807 if ((conn
->cid
== cid
) &&
808 (conn
->conn_state
== TARG_CONN_STATE_LOGGED_IN
)) {
809 iscsit_inc_conn_usage_count(conn
);
810 spin_unlock_bh(&sess
->conn_lock
);
814 spin_unlock_bh(&sess
->conn_lock
);
819 struct iscsit_conn
*iscsit_get_conn_from_cid_rcfr(struct iscsit_session
*sess
, u16 cid
)
821 struct iscsit_conn
*conn
;
823 spin_lock_bh(&sess
->conn_lock
);
824 list_for_each_entry(conn
, &sess
->sess_conn_list
, conn_list
) {
825 if (conn
->cid
== cid
) {
826 iscsit_inc_conn_usage_count(conn
);
827 spin_lock(&conn
->state_lock
);
828 atomic_set(&conn
->connection_wait_rcfr
, 1);
829 spin_unlock(&conn
->state_lock
);
830 spin_unlock_bh(&sess
->conn_lock
);
834 spin_unlock_bh(&sess
->conn_lock
);
839 void iscsit_check_conn_usage_count(struct iscsit_conn
*conn
)
841 spin_lock_bh(&conn
->conn_usage_lock
);
842 if (conn
->conn_usage_count
!= 0) {
843 conn
->conn_waiting_on_uc
= 1;
844 spin_unlock_bh(&conn
->conn_usage_lock
);
846 wait_for_completion(&conn
->conn_waiting_on_uc_comp
);
849 spin_unlock_bh(&conn
->conn_usage_lock
);
852 void iscsit_dec_conn_usage_count(struct iscsit_conn
*conn
)
854 spin_lock_bh(&conn
->conn_usage_lock
);
855 conn
->conn_usage_count
--;
857 if (!conn
->conn_usage_count
&& conn
->conn_waiting_on_uc
)
858 complete(&conn
->conn_waiting_on_uc_comp
);
860 spin_unlock_bh(&conn
->conn_usage_lock
);
863 void iscsit_inc_conn_usage_count(struct iscsit_conn
*conn
)
865 spin_lock_bh(&conn
->conn_usage_lock
);
866 conn
->conn_usage_count
++;
867 spin_unlock_bh(&conn
->conn_usage_lock
);
870 static int iscsit_add_nopin(struct iscsit_conn
*conn
, int want_response
)
873 struct iscsit_cmd
*cmd
;
875 cmd
= iscsit_allocate_cmd(conn
, TASK_RUNNING
);
879 cmd
->iscsi_opcode
= ISCSI_OP_NOOP_IN
;
880 state
= (want_response
) ? ISTATE_SEND_NOPIN_WANT_RESPONSE
:
881 ISTATE_SEND_NOPIN_NO_RESPONSE
;
882 cmd
->init_task_tag
= RESERVED_ITT
;
883 cmd
->targ_xfer_tag
= (want_response
) ?
884 session_get_next_ttt(conn
->sess
) : 0xFFFFFFFF;
885 spin_lock_bh(&conn
->cmd_lock
);
886 list_add_tail(&cmd
->i_conn_node
, &conn
->conn_cmd_list
);
887 spin_unlock_bh(&conn
->cmd_lock
);
890 iscsit_start_nopin_response_timer(conn
);
891 iscsit_add_cmd_to_immediate_queue(cmd
, conn
, state
);
896 void iscsit_handle_nopin_response_timeout(struct timer_list
*t
)
898 struct iscsit_conn
*conn
= from_timer(conn
, t
, nopin_response_timer
);
899 struct iscsit_session
*sess
= conn
->sess
;
901 iscsit_inc_conn_usage_count(conn
);
903 spin_lock_bh(&conn
->nopin_timer_lock
);
904 if (conn
->nopin_response_timer_flags
& ISCSI_TF_STOP
) {
905 spin_unlock_bh(&conn
->nopin_timer_lock
);
906 iscsit_dec_conn_usage_count(conn
);
910 pr_err("Did not receive response to NOPIN on CID: %hu, failing"
911 " connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
912 conn
->cid
, sess
->sess_ops
->InitiatorName
, sess
->isid
,
913 sess
->tpg
->tpg_tiqn
->tiqn
, (u32
)sess
->tpg
->tpgt
);
914 conn
->nopin_response_timer_flags
&= ~ISCSI_TF_RUNNING
;
915 spin_unlock_bh(&conn
->nopin_timer_lock
);
917 iscsit_fill_cxn_timeout_err_stats(sess
);
918 iscsit_cause_connection_reinstatement(conn
, 0);
919 iscsit_dec_conn_usage_count(conn
);
922 void iscsit_mod_nopin_response_timer(struct iscsit_conn
*conn
)
924 struct iscsit_session
*sess
= conn
->sess
;
925 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
927 spin_lock_bh(&conn
->nopin_timer_lock
);
928 if (!(conn
->nopin_response_timer_flags
& ISCSI_TF_RUNNING
)) {
929 spin_unlock_bh(&conn
->nopin_timer_lock
);
933 mod_timer(&conn
->nopin_response_timer
,
934 (get_jiffies_64() + na
->nopin_response_timeout
* HZ
));
935 spin_unlock_bh(&conn
->nopin_timer_lock
);
938 void iscsit_start_nopin_response_timer(struct iscsit_conn
*conn
)
940 struct iscsit_session
*sess
= conn
->sess
;
941 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
943 spin_lock_bh(&conn
->nopin_timer_lock
);
944 if (conn
->nopin_response_timer_flags
& ISCSI_TF_RUNNING
) {
945 spin_unlock_bh(&conn
->nopin_timer_lock
);
949 conn
->nopin_response_timer_flags
&= ~ISCSI_TF_STOP
;
950 conn
->nopin_response_timer_flags
|= ISCSI_TF_RUNNING
;
951 mod_timer(&conn
->nopin_response_timer
,
952 jiffies
+ na
->nopin_response_timeout
* HZ
);
954 pr_debug("Started NOPIN Response Timer on CID: %d to %u"
955 " seconds\n", conn
->cid
, na
->nopin_response_timeout
);
956 spin_unlock_bh(&conn
->nopin_timer_lock
);
959 void iscsit_stop_nopin_response_timer(struct iscsit_conn
*conn
)
961 spin_lock_bh(&conn
->nopin_timer_lock
);
962 if (!(conn
->nopin_response_timer_flags
& ISCSI_TF_RUNNING
)) {
963 spin_unlock_bh(&conn
->nopin_timer_lock
);
966 conn
->nopin_response_timer_flags
|= ISCSI_TF_STOP
;
967 spin_unlock_bh(&conn
->nopin_timer_lock
);
969 del_timer_sync(&conn
->nopin_response_timer
);
971 spin_lock_bh(&conn
->nopin_timer_lock
);
972 conn
->nopin_response_timer_flags
&= ~ISCSI_TF_RUNNING
;
973 spin_unlock_bh(&conn
->nopin_timer_lock
);
976 void iscsit_handle_nopin_timeout(struct timer_list
*t
)
978 struct iscsit_conn
*conn
= from_timer(conn
, t
, nopin_timer
);
980 iscsit_inc_conn_usage_count(conn
);
982 spin_lock_bh(&conn
->nopin_timer_lock
);
983 if (conn
->nopin_timer_flags
& ISCSI_TF_STOP
) {
984 spin_unlock_bh(&conn
->nopin_timer_lock
);
985 iscsit_dec_conn_usage_count(conn
);
988 conn
->nopin_timer_flags
&= ~ISCSI_TF_RUNNING
;
989 spin_unlock_bh(&conn
->nopin_timer_lock
);
991 iscsit_add_nopin(conn
, 1);
992 iscsit_dec_conn_usage_count(conn
);
995 void __iscsit_start_nopin_timer(struct iscsit_conn
*conn
)
997 struct iscsit_session
*sess
= conn
->sess
;
998 struct iscsi_node_attrib
*na
= iscsit_tpg_get_node_attrib(sess
);
1000 lockdep_assert_held(&conn
->nopin_timer_lock
);
1003 * NOPIN timeout is disabled.
1005 if (!na
->nopin_timeout
)
1008 if (conn
->nopin_timer_flags
& ISCSI_TF_RUNNING
)
1011 conn
->nopin_timer_flags
&= ~ISCSI_TF_STOP
;
1012 conn
->nopin_timer_flags
|= ISCSI_TF_RUNNING
;
1013 mod_timer(&conn
->nopin_timer
, jiffies
+ na
->nopin_timeout
* HZ
);
1015 pr_debug("Started NOPIN Timer on CID: %d at %u second"
1016 " interval\n", conn
->cid
, na
->nopin_timeout
);
1019 void iscsit_start_nopin_timer(struct iscsit_conn
*conn
)
1021 spin_lock_bh(&conn
->nopin_timer_lock
);
1022 __iscsit_start_nopin_timer(conn
);
1023 spin_unlock_bh(&conn
->nopin_timer_lock
);
1026 void iscsit_stop_nopin_timer(struct iscsit_conn
*conn
)
1028 spin_lock_bh(&conn
->nopin_timer_lock
);
1029 if (!(conn
->nopin_timer_flags
& ISCSI_TF_RUNNING
)) {
1030 spin_unlock_bh(&conn
->nopin_timer_lock
);
1033 conn
->nopin_timer_flags
|= ISCSI_TF_STOP
;
1034 spin_unlock_bh(&conn
->nopin_timer_lock
);
1036 del_timer_sync(&conn
->nopin_timer
);
1038 spin_lock_bh(&conn
->nopin_timer_lock
);
1039 conn
->nopin_timer_flags
&= ~ISCSI_TF_RUNNING
;
1040 spin_unlock_bh(&conn
->nopin_timer_lock
);
1043 void iscsit_login_timeout(struct timer_list
*t
)
1045 struct iscsit_conn
*conn
= from_timer(conn
, t
, login_timer
);
1046 struct iscsi_login
*login
= conn
->login
;
1048 pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
1050 spin_lock_bh(&conn
->login_timer_lock
);
1051 login
->login_failed
= 1;
1053 if (conn
->login_kworker
) {
1054 pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
1055 conn
->login_kworker
->comm
, conn
->login_kworker
->pid
);
1056 send_sig(SIGINT
, conn
->login_kworker
, 1);
1058 schedule_delayed_work(&conn
->login_work
, 0);
1060 spin_unlock_bh(&conn
->login_timer_lock
);
1063 void iscsit_start_login_timer(struct iscsit_conn
*conn
, struct task_struct
*kthr
)
1065 pr_debug("Login timer started\n");
1067 conn
->login_kworker
= kthr
;
1068 mod_timer(&conn
->login_timer
, jiffies
+ TA_LOGIN_TIMEOUT
* HZ
);
1071 int iscsit_set_login_timer_kworker(struct iscsit_conn
*conn
, struct task_struct
*kthr
)
1073 struct iscsi_login
*login
= conn
->login
;
1076 spin_lock_bh(&conn
->login_timer_lock
);
1077 if (login
->login_failed
) {
1078 /* The timer has already expired */
1081 conn
->login_kworker
= kthr
;
1083 spin_unlock_bh(&conn
->login_timer_lock
);
1088 void iscsit_stop_login_timer(struct iscsit_conn
*conn
)
1090 pr_debug("Login timer stopped\n");
1091 timer_delete_sync(&conn
->login_timer
);
1094 int iscsit_send_tx_data(
1095 struct iscsit_cmd
*cmd
,
1096 struct iscsit_conn
*conn
,
1099 int tx_sent
, tx_size
;
1104 tx_size
= cmd
->tx_size
;
1107 iov
= &cmd
->iov_data
[0];
1108 iov_count
= cmd
->iov_data_count
;
1110 iov
= &cmd
->iov_misc
[0];
1111 iov_count
= cmd
->iov_misc_count
;
1114 tx_sent
= tx_data(conn
, &iov
[0], iov_count
, tx_size
);
1115 if (tx_size
!= tx_sent
) {
1116 if (tx_sent
== -EAGAIN
) {
1117 pr_err("tx_data() returned -EAGAIN\n");
1127 int iscsit_fe_sendpage_sg(
1128 struct iscsit_cmd
*cmd
,
1129 struct iscsit_conn
*conn
)
1131 struct scatterlist
*sg
= cmd
->first_data_sg
;
1132 struct bio_vec bvec
;
1133 struct msghdr msghdr
= { .msg_flags
= MSG_SPLICE_PAGES
, };
1135 u32 tx_hdr_size
, data_len
;
1136 u32 offset
= cmd
->first_data_sg_off
;
1137 int tx_sent
, iov_off
;
1140 tx_hdr_size
= ISCSI_HDR_LEN
;
1141 if (conn
->conn_ops
->HeaderDigest
)
1142 tx_hdr_size
+= ISCSI_CRC_LEN
;
1144 iov
.iov_base
= cmd
->pdu
;
1145 iov
.iov_len
= tx_hdr_size
;
1147 tx_sent
= tx_data(conn
, &iov
, 1, tx_hdr_size
);
1148 if (tx_hdr_size
!= tx_sent
) {
1149 if (tx_sent
== -EAGAIN
) {
1150 pr_err("tx_data() returned -EAGAIN\n");
1156 data_len
= cmd
->tx_size
- tx_hdr_size
- cmd
->padding
;
1158 * Set iov_off used by padding and data digest tx_data() calls below
1159 * in order to determine proper offset into cmd->iov_data[]
1161 if (conn
->conn_ops
->DataDigest
) {
1162 data_len
-= ISCSI_CRC_LEN
;
1164 iov_off
= (cmd
->iov_data_count
- 2);
1166 iov_off
= (cmd
->iov_data_count
- 1);
1168 iov_off
= (cmd
->iov_data_count
- 1);
1171 * Perform sendpage() for each page in the scatterlist
1174 u32 space
= (sg
->length
- offset
);
1175 u32 sub_len
= min_t(u32
, data_len
, space
);
1177 bvec_set_page(&bvec
, sg_page(sg
), sub_len
, sg
->offset
+ offset
);
1178 iov_iter_bvec(&msghdr
.msg_iter
, ITER_SOURCE
, &bvec
, 1, sub_len
);
1180 tx_sent
= conn
->sock
->ops
->sendmsg(conn
->sock
, &msghdr
,
1182 if (tx_sent
!= sub_len
) {
1183 if (tx_sent
== -EAGAIN
) {
1184 pr_err("sendmsg/splice returned -EAGAIN\n");
1188 pr_err("sendmsg/splice failure: %d\n", tx_sent
);
1192 data_len
-= sub_len
;
1199 struct kvec
*iov_p
= &cmd
->iov_data
[iov_off
++];
1201 tx_sent
= tx_data(conn
, iov_p
, 1, cmd
->padding
);
1202 if (cmd
->padding
!= tx_sent
) {
1203 if (tx_sent
== -EAGAIN
) {
1204 pr_err("tx_data() returned -EAGAIN\n");
1212 if (conn
->conn_ops
->DataDigest
) {
1213 struct kvec
*iov_d
= &cmd
->iov_data
[iov_off
];
1215 tx_sent
= tx_data(conn
, iov_d
, 1, ISCSI_CRC_LEN
);
1216 if (ISCSI_CRC_LEN
!= tx_sent
) {
1217 if (tx_sent
== -EAGAIN
) {
1218 pr_err("tx_data() returned -EAGAIN\n");
1229 * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
1230 * back to the Initiator when an expection condition occurs with the
1231 * errors set in status_class and status_detail.
1233 * Parameters: iSCSI Connection, Status Class, Status Detail.
1234 * Returns: 0 on success, -1 on error.
1236 int iscsit_tx_login_rsp(struct iscsit_conn
*conn
, u8 status_class
, u8 status_detail
)
1238 struct iscsi_login_rsp
*hdr
;
1239 struct iscsi_login
*login
= conn
->conn_login
;
1241 login
->login_failed
= 1;
1242 iscsit_collect_login_stats(conn
, status_class
, status_detail
);
1244 memset(&login
->rsp
[0], 0, ISCSI_HDR_LEN
);
1246 hdr
= (struct iscsi_login_rsp
*)&login
->rsp
[0];
1247 hdr
->opcode
= ISCSI_OP_LOGIN_RSP
;
1248 hdr
->status_class
= status_class
;
1249 hdr
->status_detail
= status_detail
;
1250 hdr
->itt
= conn
->login_itt
;
1252 return conn
->conn_transport
->iscsit_put_login_tx(conn
, login
, 0);
1255 void iscsit_print_session_params(struct iscsit_session
*sess
)
1257 struct iscsit_conn
*conn
;
1259 pr_debug("-----------------------------[Session Params for"
1260 " SID: %u]-----------------------------\n", sess
->sid
);
1261 spin_lock_bh(&sess
->conn_lock
);
1262 list_for_each_entry(conn
, &sess
->sess_conn_list
, conn_list
)
1263 iscsi_dump_conn_ops(conn
->conn_ops
);
1264 spin_unlock_bh(&sess
->conn_lock
);
1266 iscsi_dump_sess_ops(sess
->sess_ops
);
1270 struct iscsit_conn
*conn
,
1275 int rx_loop
= 0, total_rx
= 0;
1278 if (!conn
|| !conn
->sock
|| !conn
->conn_ops
)
1281 memset(&msg
, 0, sizeof(struct msghdr
));
1282 iov_iter_kvec(&msg
.msg_iter
, ITER_DEST
, iov
, iov_count
, data
);
1284 while (msg_data_left(&msg
)) {
1285 rx_loop
= sock_recvmsg(conn
->sock
, &msg
, MSG_WAITALL
);
1287 pr_debug("rx_loop: %d total_rx: %d\n",
1291 total_rx
+= rx_loop
;
1292 pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1293 rx_loop
, total_rx
, data
);
1300 struct iscsit_conn
*conn
,
1308 if (!conn
|| !conn
->sock
|| !conn
->conn_ops
)
1312 pr_err("Data length is: %d\n", data
);
1316 memset(&msg
, 0, sizeof(struct msghdr
));
1318 iov_iter_kvec(&msg
.msg_iter
, ITER_SOURCE
, iov
, iov_count
, data
);
1320 while (msg_data_left(&msg
)) {
1321 int tx_loop
= sock_sendmsg(conn
->sock
, &msg
);
1323 pr_debug("tx_loop: %d total_tx %d\n",
1327 total_tx
+= tx_loop
;
1328 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1329 tx_loop
, total_tx
, data
);
1335 void iscsit_collect_login_stats(
1336 struct iscsit_conn
*conn
,
1340 struct iscsi_param
*intrname
= NULL
;
1341 struct iscsi_tiqn
*tiqn
;
1342 struct iscsi_login_stats
*ls
;
1344 tiqn
= iscsit_snmp_get_tiqn(conn
);
1348 ls
= &tiqn
->login_stats
;
1350 spin_lock(&ls
->lock
);
1351 if (status_class
== ISCSI_STATUS_CLS_SUCCESS
)
1353 else if (status_class
== ISCSI_STATUS_CLS_REDIRECT
) {
1355 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_REDIRECT
;
1356 } else if ((status_class
== ISCSI_STATUS_CLS_INITIATOR_ERR
) &&
1357 (status_detail
== ISCSI_LOGIN_STATUS_AUTH_FAILED
)) {
1358 ls
->authenticate_fails
++;
1359 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_AUTHENTICATE
;
1360 } else if ((status_class
== ISCSI_STATUS_CLS_INITIATOR_ERR
) &&
1361 (status_detail
== ISCSI_LOGIN_STATUS_TGT_FORBIDDEN
)) {
1362 ls
->authorize_fails
++;
1363 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_AUTHORIZE
;
1364 } else if ((status_class
== ISCSI_STATUS_CLS_INITIATOR_ERR
) &&
1365 (status_detail
== ISCSI_LOGIN_STATUS_INIT_ERR
)) {
1366 ls
->negotiate_fails
++;
1367 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_NEGOTIATE
;
1370 ls
->last_fail_type
= ISCSI_LOGIN_FAIL_OTHER
;
1373 /* Save initiator name, ip address and time, if it is a failed login */
1374 if (status_class
!= ISCSI_STATUS_CLS_SUCCESS
) {
1375 if (conn
->param_list
)
1376 intrname
= iscsi_find_param_from_key(INITIATORNAME
,
1378 strscpy(ls
->last_intr_fail_name
,
1379 (intrname
? intrname
->value
: "Unknown"),
1380 sizeof(ls
->last_intr_fail_name
));
1382 ls
->last_intr_fail_ip_family
= conn
->login_family
;
1384 ls
->last_intr_fail_sockaddr
= conn
->login_sockaddr
;
1385 ls
->last_fail_time
= get_jiffies_64();
1388 spin_unlock(&ls
->lock
);
1391 struct iscsi_tiqn
*iscsit_snmp_get_tiqn(struct iscsit_conn
*conn
)
1393 struct iscsi_portal_group
*tpg
;
1405 return tpg
->tpg_tiqn
;
1408 void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session
*sess
)
1410 struct iscsi_portal_group
*tpg
= sess
->tpg
;
1411 struct iscsi_tiqn
*tiqn
= tpg
->tpg_tiqn
;
1416 spin_lock_bh(&tiqn
->sess_err_stats
.lock
);
1417 strscpy(tiqn
->sess_err_stats
.last_sess_fail_rem_name
,
1418 sess
->sess_ops
->InitiatorName
,
1419 sizeof(tiqn
->sess_err_stats
.last_sess_fail_rem_name
));
1420 tiqn
->sess_err_stats
.last_sess_failure_type
=
1421 ISCSI_SESS_ERR_CXN_TIMEOUT
;
1422 tiqn
->sess_err_stats
.cxn_timeout_errors
++;
1423 atomic_long_inc(&sess
->conn_timeout_errors
);
1424 spin_unlock_bh(&tiqn
->sess_err_stats
.lock
);