1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level
;
42 module_param_named(debug_level
, isert_debug_level
, int, 0644);
43 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex
);
46 static LIST_HEAD(device_list
);
47 static struct workqueue_struct
*isert_comp_wq
;
48 static struct workqueue_struct
*isert_release_wq
;
51 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
53 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
54 struct isert_rdma_wr
*wr
);
56 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
58 isert_reg_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
59 struct isert_rdma_wr
*wr
);
61 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
);
63 isert_rdma_post_recvl(struct isert_conn
*isert_conn
);
65 isert_rdma_accept(struct isert_conn
*isert_conn
);
66 struct rdma_cm_id
*isert_setup_id(struct isert_np
*isert_np
);
68 static void isert_release_work(struct work_struct
*work
);
71 isert_prot_cmd(struct isert_conn
*conn
, struct se_cmd
*cmd
)
73 return (conn
->pi_support
&&
74 cmd
->prot_op
!= TARGET_PROT_NORMAL
);
79 isert_qp_event_callback(struct ib_event
*e
, void *context
)
81 struct isert_conn
*isert_conn
= context
;
83 isert_err("%s (%d): conn %p\n",
84 ib_event_msg(e
->event
), e
->event
, isert_conn
);
87 case IB_EVENT_COMM_EST
:
88 rdma_notify(isert_conn
->cm_id
, IB_EVENT_COMM_EST
);
90 case IB_EVENT_QP_LAST_WQE_REACHED
:
91 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
99 isert_query_device(struct ib_device
*ib_dev
, struct ib_device_attr
*devattr
)
103 ret
= ib_query_device(ib_dev
, devattr
);
105 isert_err("ib_query_device() failed: %d\n", ret
);
108 isert_dbg("devattr->max_sge: %d\n", devattr
->max_sge
);
109 isert_dbg("devattr->max_sge_rd: %d\n", devattr
->max_sge_rd
);
114 static struct isert_comp
*
115 isert_comp_get(struct isert_conn
*isert_conn
)
117 struct isert_device
*device
= isert_conn
->device
;
118 struct isert_comp
*comp
;
121 mutex_lock(&device_list_mutex
);
122 for (i
= 0; i
< device
->comps_used
; i
++)
123 if (device
->comps
[i
].active_qps
<
124 device
->comps
[min
].active_qps
)
126 comp
= &device
->comps
[min
];
128 mutex_unlock(&device_list_mutex
);
130 isert_info("conn %p, using comp %p min_index: %d\n",
131 isert_conn
, comp
, min
);
137 isert_comp_put(struct isert_comp
*comp
)
139 mutex_lock(&device_list_mutex
);
141 mutex_unlock(&device_list_mutex
);
144 static struct ib_qp
*
145 isert_create_qp(struct isert_conn
*isert_conn
,
146 struct isert_comp
*comp
,
147 struct rdma_cm_id
*cma_id
)
149 struct isert_device
*device
= isert_conn
->device
;
150 struct ib_qp_init_attr attr
;
153 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
154 attr
.event_handler
= isert_qp_event_callback
;
155 attr
.qp_context
= isert_conn
;
156 attr
.send_cq
= comp
->cq
;
157 attr
.recv_cq
= comp
->cq
;
158 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
;
159 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
+ 1;
160 attr
.cap
.max_send_sge
= device
->dev_attr
.max_sge
;
161 isert_conn
->max_sge
= min(device
->dev_attr
.max_sge
,
162 device
->dev_attr
.max_sge_rd
);
163 attr
.cap
.max_recv_sge
= 1;
164 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
165 attr
.qp_type
= IB_QPT_RC
;
166 if (device
->pi_capable
)
167 attr
.create_flags
|= IB_QP_CREATE_SIGNATURE_EN
;
169 ret
= rdma_create_qp(cma_id
, device
->pd
, &attr
);
171 isert_err("rdma_create_qp failed for cma_id %d\n", ret
);
179 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
)
181 struct isert_comp
*comp
;
184 comp
= isert_comp_get(isert_conn
);
185 isert_conn
->qp
= isert_create_qp(isert_conn
, comp
, cma_id
);
186 if (IS_ERR(isert_conn
->qp
)) {
187 ret
= PTR_ERR(isert_conn
->qp
);
193 isert_comp_put(comp
);
198 isert_cq_event_callback(struct ib_event
*e
, void *context
)
200 isert_dbg("event: %d\n", e
->event
);
204 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
206 struct isert_device
*device
= isert_conn
->device
;
207 struct ib_device
*ib_dev
= device
->ib_device
;
208 struct iser_rx_desc
*rx_desc
;
209 struct ib_sge
*rx_sg
;
213 isert_conn
->rx_descs
= kzalloc(ISERT_QP_MAX_RECV_DTOS
*
214 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
215 if (!isert_conn
->rx_descs
)
218 rx_desc
= isert_conn
->rx_descs
;
220 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
221 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
222 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
223 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
226 rx_desc
->dma_addr
= dma_addr
;
228 rx_sg
= &rx_desc
->rx_sg
;
229 rx_sg
->addr
= rx_desc
->dma_addr
;
230 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
231 rx_sg
->lkey
= device
->pd
->local_dma_lkey
;
237 rx_desc
= isert_conn
->rx_descs
;
238 for (j
= 0; j
< i
; j
++, rx_desc
++) {
239 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
240 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
242 kfree(isert_conn
->rx_descs
);
243 isert_conn
->rx_descs
= NULL
;
245 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn
);
251 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
253 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
254 struct iser_rx_desc
*rx_desc
;
257 if (!isert_conn
->rx_descs
)
260 rx_desc
= isert_conn
->rx_descs
;
261 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
262 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
263 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
266 kfree(isert_conn
->rx_descs
);
267 isert_conn
->rx_descs
= NULL
;
270 static void isert_cq_work(struct work_struct
*);
271 static void isert_cq_callback(struct ib_cq
*, void *);
274 isert_free_comps(struct isert_device
*device
)
278 for (i
= 0; i
< device
->comps_used
; i
++) {
279 struct isert_comp
*comp
= &device
->comps
[i
];
282 cancel_work_sync(&comp
->work
);
283 ib_destroy_cq(comp
->cq
);
286 kfree(device
->comps
);
290 isert_alloc_comps(struct isert_device
*device
,
291 struct ib_device_attr
*attr
)
293 int i
, max_cqe
, ret
= 0;
295 device
->comps_used
= min(ISERT_MAX_CQ
, min_t(int, num_online_cpus(),
296 device
->ib_device
->num_comp_vectors
));
298 isert_info("Using %d CQs, %s supports %d vectors support "
299 "Fast registration %d pi_capable %d\n",
300 device
->comps_used
, device
->ib_device
->name
,
301 device
->ib_device
->num_comp_vectors
, device
->use_fastreg
,
304 device
->comps
= kcalloc(device
->comps_used
, sizeof(struct isert_comp
),
306 if (!device
->comps
) {
307 isert_err("Unable to allocate completion contexts\n");
311 max_cqe
= min(ISER_MAX_CQ_LEN
, attr
->max_cqe
);
313 for (i
= 0; i
< device
->comps_used
; i
++) {
314 struct ib_cq_init_attr cq_attr
= {};
315 struct isert_comp
*comp
= &device
->comps
[i
];
317 comp
->device
= device
;
318 INIT_WORK(&comp
->work
, isert_cq_work
);
319 cq_attr
.cqe
= max_cqe
;
320 cq_attr
.comp_vector
= i
;
321 comp
->cq
= ib_create_cq(device
->ib_device
,
323 isert_cq_event_callback
,
326 if (IS_ERR(comp
->cq
)) {
327 isert_err("Unable to allocate cq\n");
328 ret
= PTR_ERR(comp
->cq
);
333 ret
= ib_req_notify_cq(comp
->cq
, IB_CQ_NEXT_COMP
);
340 isert_free_comps(device
);
345 isert_create_device_ib_res(struct isert_device
*device
)
347 struct ib_device_attr
*dev_attr
;
350 dev_attr
= &device
->dev_attr
;
351 ret
= isert_query_device(device
->ib_device
, dev_attr
);
355 /* asign function handlers */
356 if (dev_attr
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
&&
357 dev_attr
->device_cap_flags
& IB_DEVICE_SIGNATURE_HANDOVER
) {
358 device
->use_fastreg
= 1;
359 device
->reg_rdma_mem
= isert_reg_rdma
;
360 device
->unreg_rdma_mem
= isert_unreg_rdma
;
362 device
->use_fastreg
= 0;
363 device
->reg_rdma_mem
= isert_map_rdma
;
364 device
->unreg_rdma_mem
= isert_unmap_cmd
;
367 ret
= isert_alloc_comps(device
, dev_attr
);
371 device
->pd
= ib_alloc_pd(device
->ib_device
);
372 if (IS_ERR(device
->pd
)) {
373 ret
= PTR_ERR(device
->pd
);
374 isert_err("failed to allocate pd, device %p, ret=%d\n",
379 /* Check signature cap */
380 device
->pi_capable
= dev_attr
->device_cap_flags
&
381 IB_DEVICE_SIGNATURE_HANDOVER
? true : false;
386 isert_free_comps(device
);
391 isert_free_device_ib_res(struct isert_device
*device
)
393 isert_info("device %p\n", device
);
395 ib_dealloc_pd(device
->pd
);
396 isert_free_comps(device
);
400 isert_device_put(struct isert_device
*device
)
402 mutex_lock(&device_list_mutex
);
404 isert_info("device %p refcount %d\n", device
, device
->refcount
);
405 if (!device
->refcount
) {
406 isert_free_device_ib_res(device
);
407 list_del(&device
->dev_node
);
410 mutex_unlock(&device_list_mutex
);
413 static struct isert_device
*
414 isert_device_get(struct rdma_cm_id
*cma_id
)
416 struct isert_device
*device
;
419 mutex_lock(&device_list_mutex
);
420 list_for_each_entry(device
, &device_list
, dev_node
) {
421 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
423 isert_info("Found iser device %p refcount %d\n",
424 device
, device
->refcount
);
425 mutex_unlock(&device_list_mutex
);
430 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
432 mutex_unlock(&device_list_mutex
);
433 return ERR_PTR(-ENOMEM
);
436 INIT_LIST_HEAD(&device
->dev_node
);
438 device
->ib_device
= cma_id
->device
;
439 ret
= isert_create_device_ib_res(device
);
442 mutex_unlock(&device_list_mutex
);
447 list_add_tail(&device
->dev_node
, &device_list
);
448 isert_info("Created a new iser device %p refcount %d\n",
449 device
, device
->refcount
);
450 mutex_unlock(&device_list_mutex
);
456 isert_conn_free_fastreg_pool(struct isert_conn
*isert_conn
)
458 struct fast_reg_descriptor
*fr_desc
, *tmp
;
461 if (list_empty(&isert_conn
->fr_pool
))
464 isert_info("Freeing conn %p fastreg pool", isert_conn
);
466 list_for_each_entry_safe(fr_desc
, tmp
,
467 &isert_conn
->fr_pool
, list
) {
468 list_del(&fr_desc
->list
);
469 ib_dereg_mr(fr_desc
->data_mr
);
470 if (fr_desc
->pi_ctx
) {
471 ib_dereg_mr(fr_desc
->pi_ctx
->prot_mr
);
472 ib_dereg_mr(fr_desc
->pi_ctx
->sig_mr
);
473 kfree(fr_desc
->pi_ctx
);
479 if (i
< isert_conn
->fr_pool_size
)
480 isert_warn("Pool still has %d regions registered\n",
481 isert_conn
->fr_pool_size
- i
);
485 isert_create_pi_ctx(struct fast_reg_descriptor
*desc
,
486 struct ib_device
*device
,
489 struct pi_context
*pi_ctx
;
492 pi_ctx
= kzalloc(sizeof(*desc
->pi_ctx
), GFP_KERNEL
);
494 isert_err("Failed to allocate pi context\n");
498 pi_ctx
->prot_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
,
499 ISCSI_ISER_SG_TABLESIZE
);
500 if (IS_ERR(pi_ctx
->prot_mr
)) {
501 isert_err("Failed to allocate prot frmr err=%ld\n",
502 PTR_ERR(pi_ctx
->prot_mr
));
503 ret
= PTR_ERR(pi_ctx
->prot_mr
);
506 desc
->ind
|= ISERT_PROT_KEY_VALID
;
508 pi_ctx
->sig_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_SIGNATURE
, 2);
509 if (IS_ERR(pi_ctx
->sig_mr
)) {
510 isert_err("Failed to allocate signature enabled mr err=%ld\n",
511 PTR_ERR(pi_ctx
->sig_mr
));
512 ret
= PTR_ERR(pi_ctx
->sig_mr
);
516 desc
->pi_ctx
= pi_ctx
;
517 desc
->ind
|= ISERT_SIG_KEY_VALID
;
518 desc
->ind
&= ~ISERT_PROTECTED
;
523 ib_dereg_mr(pi_ctx
->prot_mr
);
531 isert_create_fr_desc(struct ib_device
*ib_device
, struct ib_pd
*pd
,
532 struct fast_reg_descriptor
*fr_desc
)
534 fr_desc
->data_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
,
535 ISCSI_ISER_SG_TABLESIZE
);
536 if (IS_ERR(fr_desc
->data_mr
)) {
537 isert_err("Failed to allocate data frmr err=%ld\n",
538 PTR_ERR(fr_desc
->data_mr
));
539 return PTR_ERR(fr_desc
->data_mr
);
541 fr_desc
->ind
|= ISERT_DATA_KEY_VALID
;
543 isert_dbg("Created fr_desc %p\n", fr_desc
);
549 isert_conn_create_fastreg_pool(struct isert_conn
*isert_conn
)
551 struct fast_reg_descriptor
*fr_desc
;
552 struct isert_device
*device
= isert_conn
->device
;
553 struct se_session
*se_sess
= isert_conn
->conn
->sess
->se_sess
;
554 struct se_node_acl
*se_nacl
= se_sess
->se_node_acl
;
557 * Setup the number of FRMRs based upon the number of tags
558 * available to session in iscsi_target_locate_portal().
560 tag_num
= max_t(u32
, ISCSIT_MIN_TAGS
, se_nacl
->queue_depth
);
561 tag_num
= (tag_num
* 2) + ISCSIT_EXTRA_TAGS
;
563 isert_conn
->fr_pool_size
= 0;
564 for (i
= 0; i
< tag_num
; i
++) {
565 fr_desc
= kzalloc(sizeof(*fr_desc
), GFP_KERNEL
);
567 isert_err("Failed to allocate fast_reg descriptor\n");
572 ret
= isert_create_fr_desc(device
->ib_device
,
573 device
->pd
, fr_desc
);
575 isert_err("Failed to create fastreg descriptor err=%d\n",
581 list_add_tail(&fr_desc
->list
, &isert_conn
->fr_pool
);
582 isert_conn
->fr_pool_size
++;
585 isert_dbg("Creating conn %p fastreg pool size=%d",
586 isert_conn
, isert_conn
->fr_pool_size
);
591 isert_conn_free_fastreg_pool(isert_conn
);
596 isert_init_conn(struct isert_conn
*isert_conn
)
598 isert_conn
->state
= ISER_CONN_INIT
;
599 INIT_LIST_HEAD(&isert_conn
->node
);
600 init_completion(&isert_conn
->login_comp
);
601 init_completion(&isert_conn
->login_req_comp
);
602 init_completion(&isert_conn
->wait
);
603 kref_init(&isert_conn
->kref
);
604 mutex_init(&isert_conn
->mutex
);
605 spin_lock_init(&isert_conn
->pool_lock
);
606 INIT_LIST_HEAD(&isert_conn
->fr_pool
);
607 INIT_WORK(&isert_conn
->release_work
, isert_release_work
);
611 isert_free_login_buf(struct isert_conn
*isert_conn
)
613 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
615 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
616 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
617 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
618 ISCSI_DEF_MAX_RECV_SEG_LEN
,
620 kfree(isert_conn
->login_buf
);
624 isert_alloc_login_buf(struct isert_conn
*isert_conn
,
625 struct ib_device
*ib_dev
)
629 isert_conn
->login_buf
= kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
+
630 ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
631 if (!isert_conn
->login_buf
) {
632 isert_err("Unable to allocate isert_conn->login_buf\n");
636 isert_conn
->login_req_buf
= isert_conn
->login_buf
;
637 isert_conn
->login_rsp_buf
= isert_conn
->login_buf
+
638 ISCSI_DEF_MAX_RECV_SEG_LEN
;
640 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
641 isert_conn
->login_buf
, isert_conn
->login_req_buf
,
642 isert_conn
->login_rsp_buf
);
644 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
645 (void *)isert_conn
->login_req_buf
,
646 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
648 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
650 isert_err("login_req_dma mapping error: %d\n", ret
);
651 isert_conn
->login_req_dma
= 0;
655 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
656 (void *)isert_conn
->login_rsp_buf
,
657 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
659 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
661 isert_err("login_rsp_dma mapping error: %d\n", ret
);
662 isert_conn
->login_rsp_dma
= 0;
663 goto out_req_dma_map
;
669 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
670 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
672 kfree(isert_conn
->login_buf
);
677 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
679 struct isert_np
*isert_np
= cma_id
->context
;
680 struct iscsi_np
*np
= isert_np
->np
;
681 struct isert_conn
*isert_conn
;
682 struct isert_device
*device
;
685 spin_lock_bh(&np
->np_thread_lock
);
687 spin_unlock_bh(&np
->np_thread_lock
);
688 isert_dbg("iscsi_np is not enabled, reject connect request\n");
689 return rdma_reject(cma_id
, NULL
, 0);
691 spin_unlock_bh(&np
->np_thread_lock
);
693 isert_dbg("cma_id: %p, portal: %p\n",
694 cma_id
, cma_id
->context
);
696 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
700 isert_init_conn(isert_conn
);
701 isert_conn
->cm_id
= cma_id
;
703 ret
= isert_alloc_login_buf(isert_conn
, cma_id
->device
);
707 device
= isert_device_get(cma_id
);
708 if (IS_ERR(device
)) {
709 ret
= PTR_ERR(device
);
710 goto out_rsp_dma_map
;
712 isert_conn
->device
= device
;
714 /* Set max inflight RDMA READ requests */
715 isert_conn
->initiator_depth
= min_t(u8
,
716 event
->param
.conn
.initiator_depth
,
717 device
->dev_attr
.max_qp_init_rd_atom
);
718 isert_dbg("Using initiator_depth: %u\n", isert_conn
->initiator_depth
);
720 ret
= isert_conn_setup_qp(isert_conn
, cma_id
);
724 ret
= isert_rdma_post_recvl(isert_conn
);
728 ret
= isert_rdma_accept(isert_conn
);
732 mutex_lock(&isert_np
->mutex
);
733 list_add_tail(&isert_conn
->node
, &isert_np
->accepted
);
734 mutex_unlock(&isert_np
->mutex
);
739 isert_device_put(device
);
741 isert_free_login_buf(isert_conn
);
744 rdma_reject(cma_id
, NULL
, 0);
749 isert_connect_release(struct isert_conn
*isert_conn
)
751 struct isert_device
*device
= isert_conn
->device
;
753 isert_dbg("conn %p\n", isert_conn
);
757 if (device
->use_fastreg
)
758 isert_conn_free_fastreg_pool(isert_conn
);
760 isert_free_rx_descriptors(isert_conn
);
761 if (isert_conn
->cm_id
)
762 rdma_destroy_id(isert_conn
->cm_id
);
764 if (isert_conn
->qp
) {
765 struct isert_comp
*comp
= isert_conn
->qp
->recv_cq
->cq_context
;
767 isert_comp_put(comp
);
768 ib_destroy_qp(isert_conn
->qp
);
771 if (isert_conn
->login_buf
)
772 isert_free_login_buf(isert_conn
);
774 isert_device_put(device
);
780 isert_connected_handler(struct rdma_cm_id
*cma_id
)
782 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
783 struct isert_np
*isert_np
= cma_id
->context
;
785 isert_info("conn %p\n", isert_conn
);
787 mutex_lock(&isert_conn
->mutex
);
788 isert_conn
->state
= ISER_CONN_UP
;
789 kref_get(&isert_conn
->kref
);
790 mutex_unlock(&isert_conn
->mutex
);
792 mutex_lock(&isert_np
->mutex
);
793 list_move_tail(&isert_conn
->node
, &isert_np
->pending
);
794 mutex_unlock(&isert_np
->mutex
);
796 isert_info("np %p: Allow accept_np to continue\n", isert_np
);
801 isert_release_kref(struct kref
*kref
)
803 struct isert_conn
*isert_conn
= container_of(kref
,
804 struct isert_conn
, kref
);
806 isert_info("conn %p final kref %s/%d\n", isert_conn
, current
->comm
,
809 isert_connect_release(isert_conn
);
813 isert_put_conn(struct isert_conn
*isert_conn
)
815 kref_put(&isert_conn
->kref
, isert_release_kref
);
819 * isert_conn_terminate() - Initiate connection termination
820 * @isert_conn: isert connection struct
823 * In case the connection state is FULL_FEATURE, move state
824 * to TEMINATING and start teardown sequence (rdma_disconnect).
825 * In case the connection state is UP, complete flush as well.
827 * This routine must be called with mutex held. Thus it is
828 * safe to call multiple times.
831 isert_conn_terminate(struct isert_conn
*isert_conn
)
835 switch (isert_conn
->state
) {
836 case ISER_CONN_TERMINATING
:
839 case ISER_CONN_FULL_FEATURE
: /* FALLTHRU */
840 isert_info("Terminating conn %p state %d\n",
841 isert_conn
, isert_conn
->state
);
842 isert_conn
->state
= ISER_CONN_TERMINATING
;
843 err
= rdma_disconnect(isert_conn
->cm_id
);
845 isert_warn("Failed rdma_disconnect isert_conn %p\n",
849 isert_warn("conn %p teminating in state %d\n",
850 isert_conn
, isert_conn
->state
);
855 isert_np_cma_handler(struct isert_np
*isert_np
,
856 enum rdma_cm_event_type event
)
858 isert_dbg("%s (%d): isert np %p\n",
859 rdma_event_msg(event
), event
, isert_np
);
862 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
863 isert_np
->cm_id
= NULL
;
865 case RDMA_CM_EVENT_ADDR_CHANGE
:
866 isert_np
->cm_id
= isert_setup_id(isert_np
);
867 if (IS_ERR(isert_np
->cm_id
)) {
868 isert_err("isert np %p setup id failed: %ld\n",
869 isert_np
, PTR_ERR(isert_np
->cm_id
));
870 isert_np
->cm_id
= NULL
;
874 isert_err("isert np %p Unexpected event %d\n",
882 isert_disconnected_handler(struct rdma_cm_id
*cma_id
,
883 enum rdma_cm_event_type event
)
885 struct isert_np
*isert_np
= cma_id
->context
;
886 struct isert_conn
*isert_conn
;
887 bool terminating
= false;
889 if (isert_np
->cm_id
== cma_id
)
890 return isert_np_cma_handler(cma_id
->context
, event
);
892 isert_conn
= cma_id
->qp
->qp_context
;
894 mutex_lock(&isert_conn
->mutex
);
895 terminating
= (isert_conn
->state
== ISER_CONN_TERMINATING
);
896 isert_conn_terminate(isert_conn
);
897 mutex_unlock(&isert_conn
->mutex
);
899 isert_info("conn %p completing wait\n", isert_conn
);
900 complete(&isert_conn
->wait
);
905 mutex_lock(&isert_np
->mutex
);
906 if (!list_empty(&isert_conn
->node
)) {
907 list_del_init(&isert_conn
->node
);
908 isert_put_conn(isert_conn
);
909 queue_work(isert_release_wq
, &isert_conn
->release_work
);
911 mutex_unlock(&isert_np
->mutex
);
918 isert_connect_error(struct rdma_cm_id
*cma_id
)
920 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
922 list_del_init(&isert_conn
->node
);
923 isert_conn
->cm_id
= NULL
;
924 isert_put_conn(isert_conn
);
930 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
934 isert_info("%s (%d): status %d id %p np %p\n",
935 rdma_event_msg(event
->event
), event
->event
,
936 event
->status
, cma_id
, cma_id
->context
);
938 switch (event
->event
) {
939 case RDMA_CM_EVENT_CONNECT_REQUEST
:
940 ret
= isert_connect_request(cma_id
, event
);
942 isert_err("failed handle connect request %d\n", ret
);
944 case RDMA_CM_EVENT_ESTABLISHED
:
945 isert_connected_handler(cma_id
);
947 case RDMA_CM_EVENT_ADDR_CHANGE
: /* FALLTHRU */
948 case RDMA_CM_EVENT_DISCONNECTED
: /* FALLTHRU */
949 case RDMA_CM_EVENT_DEVICE_REMOVAL
: /* FALLTHRU */
950 case RDMA_CM_EVENT_TIMEWAIT_EXIT
: /* FALLTHRU */
951 ret
= isert_disconnected_handler(cma_id
, event
->event
);
953 case RDMA_CM_EVENT_REJECTED
: /* FALLTHRU */
954 case RDMA_CM_EVENT_UNREACHABLE
: /* FALLTHRU */
955 case RDMA_CM_EVENT_CONNECT_ERROR
:
956 ret
= isert_connect_error(cma_id
);
959 isert_err("Unhandled RDMA CMA event: %d\n", event
->event
);
967 isert_post_recvm(struct isert_conn
*isert_conn
, u32 count
)
969 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
971 struct iser_rx_desc
*rx_desc
;
973 for (rx_wr
= isert_conn
->rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
974 rx_desc
= &isert_conn
->rx_descs
[i
];
975 rx_wr
->wr_id
= (uintptr_t)rx_desc
;
976 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
978 rx_wr
->next
= rx_wr
+ 1;
981 rx_wr
->next
= NULL
; /* mark end of work requests list */
983 isert_conn
->post_recv_buf_count
+= count
;
984 ret
= ib_post_recv(isert_conn
->qp
, isert_conn
->rx_wr
,
987 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
988 isert_conn
->post_recv_buf_count
-= count
;
995 isert_post_recv(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
)
997 struct ib_recv_wr
*rx_wr_failed
, rx_wr
;
1000 rx_wr
.wr_id
= (uintptr_t)rx_desc
;
1001 rx_wr
.sg_list
= &rx_desc
->rx_sg
;
1005 isert_conn
->post_recv_buf_count
++;
1006 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, &rx_wr_failed
);
1008 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
1009 isert_conn
->post_recv_buf_count
--;
1016 isert_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
1018 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1019 struct ib_send_wr send_wr
, *send_wr_failed
;
1022 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
1023 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1025 send_wr
.next
= NULL
;
1026 send_wr
.wr_id
= (uintptr_t)tx_desc
;
1027 send_wr
.sg_list
= tx_desc
->tx_sg
;
1028 send_wr
.num_sge
= tx_desc
->num_sge
;
1029 send_wr
.opcode
= IB_WR_SEND
;
1030 send_wr
.send_flags
= IB_SEND_SIGNALED
;
1032 ret
= ib_post_send(isert_conn
->qp
, &send_wr
, &send_wr_failed
);
1034 isert_err("ib_post_send() failed, ret: %d\n", ret
);
1040 isert_create_send_desc(struct isert_conn
*isert_conn
,
1041 struct isert_cmd
*isert_cmd
,
1042 struct iser_tx_desc
*tx_desc
)
1044 struct isert_device
*device
= isert_conn
->device
;
1045 struct ib_device
*ib_dev
= device
->ib_device
;
1047 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
1048 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1050 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_hdr
));
1051 tx_desc
->iser_header
.flags
= ISER_VER
;
1053 tx_desc
->num_sge
= 1;
1054 tx_desc
->isert_cmd
= isert_cmd
;
1056 if (tx_desc
->tx_sg
[0].lkey
!= device
->pd
->local_dma_lkey
) {
1057 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
1058 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
1063 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
1064 struct iser_tx_desc
*tx_desc
)
1066 struct isert_device
*device
= isert_conn
->device
;
1067 struct ib_device
*ib_dev
= device
->ib_device
;
1070 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
1071 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1072 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
1073 isert_err("ib_dma_mapping_error() failed\n");
1077 tx_desc
->dma_addr
= dma_addr
;
1078 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
1079 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
1080 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
1082 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1083 tx_desc
->tx_sg
[0].addr
, tx_desc
->tx_sg
[0].length
,
1084 tx_desc
->tx_sg
[0].lkey
);
1090 isert_init_send_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1091 struct ib_send_wr
*send_wr
)
1093 struct iser_tx_desc
*tx_desc
= &isert_cmd
->tx_desc
;
1095 isert_cmd
->rdma_wr
.iser_ib_op
= ISER_IB_SEND
;
1096 send_wr
->wr_id
= (uintptr_t)&isert_cmd
->tx_desc
;
1097 send_wr
->opcode
= IB_WR_SEND
;
1098 send_wr
->sg_list
= &tx_desc
->tx_sg
[0];
1099 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
1100 send_wr
->send_flags
= IB_SEND_SIGNALED
;
1104 isert_rdma_post_recvl(struct isert_conn
*isert_conn
)
1106 struct ib_recv_wr rx_wr
, *rx_wr_fail
;
1110 memset(&sge
, 0, sizeof(struct ib_sge
));
1111 sge
.addr
= isert_conn
->login_req_dma
;
1112 sge
.length
= ISER_RX_LOGIN_SIZE
;
1113 sge
.lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
1115 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1116 sge
.addr
, sge
.length
, sge
.lkey
);
1118 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
1119 rx_wr
.wr_id
= (uintptr_t)isert_conn
->login_req_buf
;
1120 rx_wr
.sg_list
= &sge
;
1123 isert_conn
->post_recv_buf_count
++;
1124 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, &rx_wr_fail
);
1126 isert_err("ib_post_recv() failed: %d\n", ret
);
1127 isert_conn
->post_recv_buf_count
--;
1134 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
1137 struct isert_conn
*isert_conn
= conn
->context
;
1138 struct isert_device
*device
= isert_conn
->device
;
1139 struct ib_device
*ib_dev
= device
->ib_device
;
1140 struct iser_tx_desc
*tx_desc
= &isert_conn
->login_tx_desc
;
1143 isert_create_send_desc(isert_conn
, NULL
, tx_desc
);
1145 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
1146 sizeof(struct iscsi_hdr
));
1148 isert_init_tx_hdrs(isert_conn
, tx_desc
);
1151 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
1153 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
1154 length
, DMA_TO_DEVICE
);
1156 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
1158 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
1159 length
, DMA_TO_DEVICE
);
1161 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
1162 tx_dsg
->length
= length
;
1163 tx_dsg
->lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
1164 tx_desc
->num_sge
= 2;
1166 if (!login
->login_failed
) {
1167 if (login
->login_complete
) {
1168 if (!conn
->sess
->sess_ops
->SessionType
&&
1169 isert_conn
->device
->use_fastreg
) {
1170 ret
= isert_conn_create_fastreg_pool(isert_conn
);
1172 isert_err("Conn: %p failed to create"
1173 " fastreg pool\n", isert_conn
);
1178 ret
= isert_alloc_rx_descriptors(isert_conn
);
1182 ret
= isert_post_recvm(isert_conn
,
1183 ISERT_QP_MAX_RECV_DTOS
);
1187 /* Now we are in FULL_FEATURE phase */
1188 mutex_lock(&isert_conn
->mutex
);
1189 isert_conn
->state
= ISER_CONN_FULL_FEATURE
;
1190 mutex_unlock(&isert_conn
->mutex
);
1194 ret
= isert_rdma_post_recvl(isert_conn
);
1199 ret
= isert_post_send(isert_conn
, tx_desc
);
1207 isert_rx_login_req(struct isert_conn
*isert_conn
)
1209 struct iser_rx_desc
*rx_desc
= (void *)isert_conn
->login_req_buf
;
1210 int rx_buflen
= isert_conn
->login_req_len
;
1211 struct iscsi_conn
*conn
= isert_conn
->conn
;
1212 struct iscsi_login
*login
= conn
->conn_login
;
1215 isert_info("conn %p\n", isert_conn
);
1217 WARN_ON_ONCE(!login
);
1219 if (login
->first_request
) {
1220 struct iscsi_login_req
*login_req
=
1221 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
1223 * Setup the initial iscsi_login values from the leading
1224 * login request PDU.
1226 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
1227 login
->current_stage
=
1228 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
1230 login
->version_min
= login_req
->min_version
;
1231 login
->version_max
= login_req
->max_version
;
1232 memcpy(login
->isid
, login_req
->isid
, 6);
1233 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1234 login
->init_task_tag
= login_req
->itt
;
1235 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1236 login
->cid
= be16_to_cpu(login_req
->cid
);
1237 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1240 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
1242 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1243 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1244 "MAX_KEY_VALUE_PAIRS: %d\n", size
, rx_buflen
,
1245 MAX_KEY_VALUE_PAIRS
);
1246 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
1248 if (login
->first_request
) {
1249 complete(&isert_conn
->login_comp
);
1252 schedule_delayed_work(&conn
->login_work
, 0);
1255 static struct iscsi_cmd
1256 *isert_allocate_cmd(struct iscsi_conn
*conn
, struct iser_rx_desc
*rx_desc
)
1258 struct isert_conn
*isert_conn
= conn
->context
;
1259 struct isert_cmd
*isert_cmd
;
1260 struct iscsi_cmd
*cmd
;
1262 cmd
= iscsit_allocate_cmd(conn
, TASK_INTERRUPTIBLE
);
1264 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1267 isert_cmd
= iscsit_priv_cmd(cmd
);
1268 isert_cmd
->conn
= isert_conn
;
1269 isert_cmd
->iscsi_cmd
= cmd
;
1270 isert_cmd
->rx_desc
= rx_desc
;
1276 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
1277 struct isert_cmd
*isert_cmd
, struct iscsi_cmd
*cmd
,
1278 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1280 struct iscsi_conn
*conn
= isert_conn
->conn
;
1281 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
1282 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
1283 bool dump_payload
= false;
1284 unsigned int data_len
;
1286 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
1290 imm_data
= cmd
->immediate_data
;
1291 imm_data_len
= cmd
->first_burst_len
;
1292 unsol_data
= cmd
->unsolicited_data
;
1293 data_len
= cmd
->se_cmd
.data_length
;
1295 if (imm_data
&& imm_data_len
== data_len
)
1296 cmd
->se_cmd
.se_cmd_flags
|= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
;
1297 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1300 } else if (rc
> 0) {
1301 dump_payload
= true;
1308 if (imm_data_len
!= data_len
) {
1309 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
1310 sg_copy_from_buffer(cmd
->se_cmd
.t_data_sg
, sg_nents
,
1311 &rx_desc
->data
[0], imm_data_len
);
1312 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1313 sg_nents
, imm_data_len
);
1315 sg_init_table(&isert_cmd
->sg
, 1);
1316 cmd
->se_cmd
.t_data_sg
= &isert_cmd
->sg
;
1317 cmd
->se_cmd
.t_data_nents
= 1;
1318 sg_set_buf(&isert_cmd
->sg
, &rx_desc
->data
[0], imm_data_len
);
1319 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1323 cmd
->write_data_done
+= imm_data_len
;
1325 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
1326 spin_lock_bh(&cmd
->istate_lock
);
1327 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1328 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1329 spin_unlock_bh(&cmd
->istate_lock
);
1333 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
1335 if (!rc
&& dump_payload
== false && unsol_data
)
1336 iscsit_set_unsoliticed_dataout(cmd
);
1337 else if (dump_payload
&& imm_data
)
1338 target_put_sess_cmd(&cmd
->se_cmd
);
1344 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
1345 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1347 struct scatterlist
*sg_start
;
1348 struct iscsi_conn
*conn
= isert_conn
->conn
;
1349 struct iscsi_cmd
*cmd
= NULL
;
1350 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
1351 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
1352 int rc
, sg_nents
, sg_off
, page_off
;
1354 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
1360 * FIXME: Unexpected unsolicited_data out
1362 if (!cmd
->unsolicited_data
) {
1363 isert_err("Received unexpected solicited data payload\n");
1368 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1369 "write_data_done: %u, data_length: %u\n",
1370 unsol_data_len
, cmd
->write_data_done
,
1371 cmd
->se_cmd
.data_length
);
1373 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1374 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1375 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
1376 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1378 * FIXME: Non page-aligned unsolicited_data out
1381 isert_err("unexpected non-page aligned data payload\n");
1385 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1386 "sg_nents: %u from %p %u\n", sg_start
, sg_off
,
1387 sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
1389 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
1392 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1397 * multiple data-outs on the same command can arrive -
1398 * so post the buffer before hand
1400 rc
= isert_post_recv(isert_conn
, rx_desc
);
1402 isert_err("ib_post_recv failed with %d\n", rc
);
1409 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1410 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1413 struct iscsi_conn
*conn
= isert_conn
->conn
;
1414 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1417 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1421 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1424 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1428 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1429 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1430 struct iscsi_text
*hdr
)
1432 struct iscsi_conn
*conn
= isert_conn
->conn
;
1433 u32 payload_length
= ntoh24(hdr
->dlength
);
1435 unsigned char *text_in
= NULL
;
1437 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1441 if (payload_length
) {
1442 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1444 isert_err("Unable to allocate text_in of payload_length: %u\n",
1449 cmd
->text_in_ptr
= text_in
;
1451 memcpy(cmd
->text_in_ptr
, &rx_desc
->data
[0], payload_length
);
1453 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1457 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1458 uint32_t read_stag
, uint64_t read_va
,
1459 uint32_t write_stag
, uint64_t write_va
)
1461 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1462 struct iscsi_conn
*conn
= isert_conn
->conn
;
1463 struct iscsi_cmd
*cmd
;
1464 struct isert_cmd
*isert_cmd
;
1466 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1468 if (conn
->sess
->sess_ops
->SessionType
&&
1469 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1470 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1471 " ignoring\n", opcode
);
1476 case ISCSI_OP_SCSI_CMD
:
1477 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1481 isert_cmd
= iscsit_priv_cmd(cmd
);
1482 isert_cmd
->read_stag
= read_stag
;
1483 isert_cmd
->read_va
= read_va
;
1484 isert_cmd
->write_stag
= write_stag
;
1485 isert_cmd
->write_va
= write_va
;
1487 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
, cmd
,
1488 rx_desc
, (unsigned char *)hdr
);
1490 case ISCSI_OP_NOOP_OUT
:
1491 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1495 isert_cmd
= iscsit_priv_cmd(cmd
);
1496 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
, cmd
,
1497 rx_desc
, (unsigned char *)hdr
);
1499 case ISCSI_OP_SCSI_DATA_OUT
:
1500 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1501 (unsigned char *)hdr
);
1503 case ISCSI_OP_SCSI_TMFUNC
:
1504 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1508 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1509 (unsigned char *)hdr
);
1511 case ISCSI_OP_LOGOUT
:
1512 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1516 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1519 if (be32_to_cpu(hdr
->ttt
) != 0xFFFFFFFF)
1520 cmd
= iscsit_find_cmd_from_itt(conn
, hdr
->itt
);
1522 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1527 isert_cmd
= iscsit_priv_cmd(cmd
);
1528 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
, cmd
,
1529 rx_desc
, (struct iscsi_text
*)hdr
);
1532 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1541 isert_rx_do_work(struct iser_rx_desc
*rx_desc
, struct isert_conn
*isert_conn
)
1543 struct iser_hdr
*iser_hdr
= &rx_desc
->iser_header
;
1544 uint64_t read_va
= 0, write_va
= 0;
1545 uint32_t read_stag
= 0, write_stag
= 0;
1547 switch (iser_hdr
->flags
& 0xF0) {
1549 if (iser_hdr
->flags
& ISER_RSV
) {
1550 read_stag
= be32_to_cpu(iser_hdr
->read_stag
);
1551 read_va
= be64_to_cpu(iser_hdr
->read_va
);
1552 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1553 read_stag
, (unsigned long long)read_va
);
1555 if (iser_hdr
->flags
& ISER_WSV
) {
1556 write_stag
= be32_to_cpu(iser_hdr
->write_stag
);
1557 write_va
= be64_to_cpu(iser_hdr
->write_va
);
1558 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1559 write_stag
, (unsigned long long)write_va
);
1562 isert_dbg("ISER ISCSI_CTRL PDU\n");
1565 isert_err("iSER Hello message\n");
1568 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr
->flags
);
1572 isert_rx_opcode(isert_conn
, rx_desc
,
1573 read_stag
, read_va
, write_stag
, write_va
);
1577 isert_rcv_completion(struct iser_rx_desc
*desc
,
1578 struct isert_conn
*isert_conn
,
1581 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1582 struct iscsi_hdr
*hdr
;
1586 if ((char *)desc
== isert_conn
->login_req_buf
) {
1587 rx_dma
= isert_conn
->login_req_dma
;
1588 rx_buflen
= ISER_RX_LOGIN_SIZE
;
1589 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1592 rx_dma
= desc
->dma_addr
;
1593 rx_buflen
= ISER_RX_PAYLOAD_SIZE
;
1594 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1598 ib_dma_sync_single_for_cpu(ib_dev
, rx_dma
, rx_buflen
, DMA_FROM_DEVICE
);
1600 hdr
= &desc
->iscsi_header
;
1601 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1602 hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1603 (int)(xfer_len
- ISER_HEADERS_LEN
));
1605 if ((char *)desc
== isert_conn
->login_req_buf
) {
1606 isert_conn
->login_req_len
= xfer_len
- ISER_HEADERS_LEN
;
1607 if (isert_conn
->conn
) {
1608 struct iscsi_login
*login
= isert_conn
->conn
->conn_login
;
1610 if (login
&& !login
->first_request
)
1611 isert_rx_login_req(isert_conn
);
1613 mutex_lock(&isert_conn
->mutex
);
1614 complete(&isert_conn
->login_req_comp
);
1615 mutex_unlock(&isert_conn
->mutex
);
1617 isert_rx_do_work(desc
, isert_conn
);
1620 ib_dma_sync_single_for_device(ib_dev
, rx_dma
, rx_buflen
,
1623 isert_conn
->post_recv_buf_count
--;
1627 isert_map_data_buf(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1628 struct scatterlist
*sg
, u32 nents
, u32 length
, u32 offset
,
1629 enum iser_ib_op_code op
, struct isert_data_buf
*data
)
1631 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1633 data
->dma_dir
= op
== ISER_IB_RDMA_WRITE
?
1634 DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
1636 data
->len
= length
- offset
;
1637 data
->offset
= offset
;
1638 data
->sg_off
= data
->offset
/ PAGE_SIZE
;
1640 data
->sg
= &sg
[data
->sg_off
];
1641 data
->nents
= min_t(unsigned int, nents
- data
->sg_off
,
1642 ISCSI_ISER_SG_TABLESIZE
);
1643 data
->len
= min_t(unsigned int, data
->len
, ISCSI_ISER_SG_TABLESIZE
*
1646 data
->dma_nents
= ib_dma_map_sg(ib_dev
, data
->sg
, data
->nents
,
1648 if (unlikely(!data
->dma_nents
)) {
1649 isert_err("Cmd: unable to dma map SGs %p\n", sg
);
1653 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1654 isert_cmd
, data
->dma_nents
, data
->sg
, data
->nents
, data
->len
);
1660 isert_unmap_data_buf(struct isert_conn
*isert_conn
, struct isert_data_buf
*data
)
1662 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1664 ib_dma_unmap_sg(ib_dev
, data
->sg
, data
->nents
, data
->dma_dir
);
1665 memset(data
, 0, sizeof(*data
));
1671 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1673 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1675 isert_dbg("Cmd %p\n", isert_cmd
);
1678 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd
);
1679 isert_unmap_data_buf(isert_conn
, &wr
->data
);
1683 isert_dbg("Cmd %p free send_wr\n", isert_cmd
);
1689 isert_dbg("Cmd %p free ib_sge\n", isert_cmd
);
1696 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1698 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1700 isert_dbg("Cmd %p\n", isert_cmd
);
1703 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd
, wr
->fr_desc
);
1704 if (wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1705 isert_unmap_data_buf(isert_conn
, &wr
->prot
);
1706 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1708 spin_lock_bh(&isert_conn
->pool_lock
);
1709 list_add_tail(&wr
->fr_desc
->list
, &isert_conn
->fr_pool
);
1710 spin_unlock_bh(&isert_conn
->pool_lock
);
1715 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd
);
1716 isert_unmap_data_buf(isert_conn
, &wr
->data
);
1724 isert_put_cmd(struct isert_cmd
*isert_cmd
, bool comp_err
)
1726 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1727 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1728 struct iscsi_conn
*conn
= isert_conn
->conn
;
1729 struct isert_device
*device
= isert_conn
->device
;
1730 struct iscsi_text_rsp
*hdr
;
1732 isert_dbg("Cmd %p\n", isert_cmd
);
1734 switch (cmd
->iscsi_opcode
) {
1735 case ISCSI_OP_SCSI_CMD
:
1736 spin_lock_bh(&conn
->cmd_lock
);
1737 if (!list_empty(&cmd
->i_conn_node
))
1738 list_del_init(&cmd
->i_conn_node
);
1739 spin_unlock_bh(&conn
->cmd_lock
);
1741 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1742 iscsit_stop_dataout_timer(cmd
);
1744 * Check for special case during comp_err where
1745 * WRITE_PENDING has been handed off from core,
1746 * but requires an extra target_put_sess_cmd()
1747 * before transport_generic_free_cmd() below.
1750 cmd
->se_cmd
.t_state
== TRANSPORT_WRITE_PENDING
) {
1751 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1753 target_put_sess_cmd(se_cmd
);
1757 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1758 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1760 case ISCSI_OP_SCSI_TMFUNC
:
1761 spin_lock_bh(&conn
->cmd_lock
);
1762 if (!list_empty(&cmd
->i_conn_node
))
1763 list_del_init(&cmd
->i_conn_node
);
1764 spin_unlock_bh(&conn
->cmd_lock
);
1766 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1768 case ISCSI_OP_REJECT
:
1769 case ISCSI_OP_NOOP_OUT
:
1771 hdr
= (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
1772 /* If the continue bit is on, keep the command alive */
1773 if (hdr
->flags
& ISCSI_FLAG_TEXT_CONTINUE
)
1776 spin_lock_bh(&conn
->cmd_lock
);
1777 if (!list_empty(&cmd
->i_conn_node
))
1778 list_del_init(&cmd
->i_conn_node
);
1779 spin_unlock_bh(&conn
->cmd_lock
);
1782 * Handle special case for REJECT when iscsi_add_reject*() has
1783 * overwritten the original iscsi_opcode assignment, and the
1784 * associated cmd->se_cmd needs to be released.
1786 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1787 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1789 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1796 iscsit_release_cmd(cmd
);
1802 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1804 if (tx_desc
->dma_addr
!= 0) {
1805 isert_dbg("unmap single for tx_desc->dma_addr\n");
1806 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1807 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1808 tx_desc
->dma_addr
= 0;
1813 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1814 struct ib_device
*ib_dev
, bool comp_err
)
1816 if (isert_cmd
->pdu_buf_dma
!= 0) {
1817 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1818 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1819 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1820 isert_cmd
->pdu_buf_dma
= 0;
1823 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1824 isert_put_cmd(isert_cmd
, comp_err
);
1828 isert_check_pi_status(struct se_cmd
*se_cmd
, struct ib_mr
*sig_mr
)
1830 struct ib_mr_status mr_status
;
1833 ret
= ib_check_mr_status(sig_mr
, IB_MR_CHECK_SIG_STATUS
, &mr_status
);
1835 isert_err("ib_check_mr_status failed, ret %d\n", ret
);
1836 goto fail_mr_status
;
1839 if (mr_status
.fail_status
& IB_MR_CHECK_SIG_STATUS
) {
1841 u32 block_size
= se_cmd
->se_dev
->dev_attrib
.block_size
+ 8;
1843 switch (mr_status
.sig_err
.err_type
) {
1844 case IB_SIG_BAD_GUARD
:
1845 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
1847 case IB_SIG_BAD_REFTAG
:
1848 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
1850 case IB_SIG_BAD_APPTAG
:
1851 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
1854 sec_offset_err
= mr_status
.sig_err
.sig_err_offset
;
1855 do_div(sec_offset_err
, block_size
);
1856 se_cmd
->bad_sector
= sec_offset_err
+ se_cmd
->t_task_lba
;
1858 isert_err("PI error found type %d at sector 0x%llx "
1859 "expected 0x%x vs actual 0x%x\n",
1860 mr_status
.sig_err
.err_type
,
1861 (unsigned long long)se_cmd
->bad_sector
,
1862 mr_status
.sig_err
.expected
,
1863 mr_status
.sig_err
.actual
);
1872 isert_completion_rdma_write(struct iser_tx_desc
*tx_desc
,
1873 struct isert_cmd
*isert_cmd
)
1875 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1876 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1877 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1878 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1879 struct isert_device
*device
= isert_conn
->device
;
1882 if (wr
->fr_desc
&& wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1883 ret
= isert_check_pi_status(se_cmd
,
1884 wr
->fr_desc
->pi_ctx
->sig_mr
);
1885 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1888 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1889 wr
->rdma_wr_num
= 0;
1891 transport_send_check_condition_and_sense(se_cmd
,
1894 isert_put_response(isert_conn
->conn
, cmd
);
1898 isert_completion_rdma_read(struct iser_tx_desc
*tx_desc
,
1899 struct isert_cmd
*isert_cmd
)
1901 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1902 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1903 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1904 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1905 struct isert_device
*device
= isert_conn
->device
;
1908 if (wr
->fr_desc
&& wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1909 ret
= isert_check_pi_status(se_cmd
,
1910 wr
->fr_desc
->pi_ctx
->sig_mr
);
1911 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1914 iscsit_stop_dataout_timer(cmd
);
1915 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1916 cmd
->write_data_done
= wr
->data
.len
;
1917 wr
->rdma_wr_num
= 0;
1919 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd
);
1920 spin_lock_bh(&cmd
->istate_lock
);
1921 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1922 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1923 spin_unlock_bh(&cmd
->istate_lock
);
1926 target_put_sess_cmd(se_cmd
);
1927 transport_send_check_condition_and_sense(se_cmd
,
1930 target_execute_cmd(se_cmd
);
1935 isert_do_control_comp(struct work_struct
*work
)
1937 struct isert_cmd
*isert_cmd
= container_of(work
,
1938 struct isert_cmd
, comp_work
);
1939 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1940 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1941 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1943 isert_dbg("Cmd %p i_state %d\n", isert_cmd
, cmd
->i_state
);
1945 switch (cmd
->i_state
) {
1946 case ISTATE_SEND_TASKMGTRSP
:
1947 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1948 case ISTATE_SEND_REJECT
: /* FALLTHRU */
1949 case ISTATE_SEND_TEXTRSP
: /* FALLTHRU */
1950 cmd
->i_state
= ISTATE_SENT_STATUS
;
1951 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
,
1954 case ISTATE_SEND_LOGOUTRSP
:
1955 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1958 isert_err("Unknown i_state %d\n", cmd
->i_state
);
1965 isert_response_completion(struct iser_tx_desc
*tx_desc
,
1966 struct isert_cmd
*isert_cmd
,
1967 struct isert_conn
*isert_conn
,
1968 struct ib_device
*ib_dev
)
1970 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1972 if (cmd
->i_state
== ISTATE_SEND_TASKMGTRSP
||
1973 cmd
->i_state
== ISTATE_SEND_LOGOUTRSP
||
1974 cmd
->i_state
== ISTATE_SEND_REJECT
||
1975 cmd
->i_state
== ISTATE_SEND_TEXTRSP
) {
1976 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1978 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
1979 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
1983 cmd
->i_state
= ISTATE_SENT_STATUS
;
1984 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, false);
1988 isert_snd_completion(struct iser_tx_desc
*tx_desc
,
1989 struct isert_conn
*isert_conn
)
1991 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1992 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1993 struct isert_rdma_wr
*wr
;
1996 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1999 wr
= &isert_cmd
->rdma_wr
;
2001 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd
, wr
->iser_ib_op
);
2003 switch (wr
->iser_ib_op
) {
2005 isert_response_completion(tx_desc
, isert_cmd
,
2006 isert_conn
, ib_dev
);
2008 case ISER_IB_RDMA_WRITE
:
2009 isert_completion_rdma_write(tx_desc
, isert_cmd
);
2011 case ISER_IB_RDMA_READ
:
2012 isert_completion_rdma_read(tx_desc
, isert_cmd
);
2015 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr
->iser_ib_op
);
2022 * is_isert_tx_desc() - Indicate if the completion wr_id
2023 * is a TX descriptor or not.
2024 * @isert_conn: iser connection
2025 * @wr_id: completion WR identifier
2027 * Since we cannot rely on wc opcode in FLUSH errors
2028 * we must work around it by checking if the wr_id address
2029 * falls in the iser connection rx_descs buffer. If so
2030 * it is an RX descriptor, otherwize it is a TX.
2033 is_isert_tx_desc(struct isert_conn
*isert_conn
, void *wr_id
)
2035 void *start
= isert_conn
->rx_descs
;
2036 int len
= ISERT_QP_MAX_RECV_DTOS
* sizeof(*isert_conn
->rx_descs
);
2038 if (wr_id
>= start
&& wr_id
< start
+ len
)
2045 isert_cq_comp_err(struct isert_conn
*isert_conn
, struct ib_wc
*wc
)
2047 if (wc
->wr_id
== ISER_BEACON_WRID
) {
2048 isert_info("conn %p completing wait_comp_err\n",
2050 complete(&isert_conn
->wait_comp_err
);
2051 } else if (is_isert_tx_desc(isert_conn
, (void *)(uintptr_t)wc
->wr_id
)) {
2052 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
2053 struct isert_cmd
*isert_cmd
;
2054 struct iser_tx_desc
*desc
;
2056 desc
= (struct iser_tx_desc
*)(uintptr_t)wc
->wr_id
;
2057 isert_cmd
= desc
->isert_cmd
;
2059 isert_unmap_tx_desc(desc
, ib_dev
);
2061 isert_completion_put(desc
, isert_cmd
, ib_dev
, true);
2063 isert_conn
->post_recv_buf_count
--;
2064 if (!isert_conn
->post_recv_buf_count
)
2065 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
2070 isert_handle_wc(struct ib_wc
*wc
)
2072 struct isert_conn
*isert_conn
;
2073 struct iser_tx_desc
*tx_desc
;
2074 struct iser_rx_desc
*rx_desc
;
2076 isert_conn
= wc
->qp
->qp_context
;
2077 if (likely(wc
->status
== IB_WC_SUCCESS
)) {
2078 if (wc
->opcode
== IB_WC_RECV
) {
2079 rx_desc
= (struct iser_rx_desc
*)(uintptr_t)wc
->wr_id
;
2080 isert_rcv_completion(rx_desc
, isert_conn
, wc
->byte_len
);
2082 tx_desc
= (struct iser_tx_desc
*)(uintptr_t)wc
->wr_id
;
2083 isert_snd_completion(tx_desc
, isert_conn
);
2086 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
2087 isert_err("%s (%d): wr id %llx vend_err %x\n",
2088 ib_wc_status_msg(wc
->status
), wc
->status
,
2089 wc
->wr_id
, wc
->vendor_err
);
2091 isert_dbg("%s (%d): wr id %llx\n",
2092 ib_wc_status_msg(wc
->status
), wc
->status
,
2095 if (wc
->wr_id
!= ISER_FASTREG_LI_WRID
)
2096 isert_cq_comp_err(isert_conn
, wc
);
2101 isert_cq_work(struct work_struct
*work
)
2103 enum { isert_poll_budget
= 65536 };
2104 struct isert_comp
*comp
= container_of(work
, struct isert_comp
,
2106 struct ib_wc
*const wcs
= comp
->wcs
;
2107 int i
, n
, completed
= 0;
2109 while ((n
= ib_poll_cq(comp
->cq
, ARRAY_SIZE(comp
->wcs
), wcs
)) > 0) {
2110 for (i
= 0; i
< n
; i
++)
2111 isert_handle_wc(&wcs
[i
]);
2114 if (completed
>= isert_poll_budget
)
2118 ib_req_notify_cq(comp
->cq
, IB_CQ_NEXT_COMP
);
2122 isert_cq_callback(struct ib_cq
*cq
, void *context
)
2124 struct isert_comp
*comp
= context
;
2126 queue_work(isert_comp_wq
, &comp
->work
);
2130 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
2132 struct ib_send_wr
*wr_failed
;
2135 ret
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
2137 isert_err("ib_post_recv failed with %d\n", ret
);
2141 ret
= ib_post_send(isert_conn
->qp
, &isert_cmd
->tx_desc
.send_wr
,
2144 isert_err("ib_post_send failed with %d\n", ret
);
2151 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2153 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2154 struct isert_conn
*isert_conn
= conn
->context
;
2155 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2156 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
2157 &isert_cmd
->tx_desc
.iscsi_header
;
2159 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2160 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
2161 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2163 * Attach SENSE DATA payload to iSCSI Response PDU
2165 if (cmd
->se_cmd
.sense_buffer
&&
2166 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
2167 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
2168 struct isert_device
*device
= isert_conn
->device
;
2169 struct ib_device
*ib_dev
= device
->ib_device
;
2170 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2171 u32 padding
, pdu_len
;
2173 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
2175 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
2177 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
2178 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
2179 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
2181 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2182 (void *)cmd
->sense_buffer
, pdu_len
,
2185 isert_cmd
->pdu_buf_len
= pdu_len
;
2186 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2187 tx_dsg
->length
= pdu_len
;
2188 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2189 isert_cmd
->tx_desc
.num_sge
= 2;
2192 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2194 isert_dbg("Posting SCSI Response\n");
2196 return isert_post_response(isert_conn
, isert_cmd
);
2200 isert_aborted_task(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2202 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2203 struct isert_conn
*isert_conn
= conn
->context
;
2204 struct isert_device
*device
= isert_conn
->device
;
2206 spin_lock_bh(&conn
->cmd_lock
);
2207 if (!list_empty(&cmd
->i_conn_node
))
2208 list_del_init(&cmd
->i_conn_node
);
2209 spin_unlock_bh(&conn
->cmd_lock
);
2211 if (cmd
->data_direction
== DMA_TO_DEVICE
)
2212 iscsit_stop_dataout_timer(cmd
);
2214 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
2217 static enum target_prot_op
2218 isert_get_sup_prot_ops(struct iscsi_conn
*conn
)
2220 struct isert_conn
*isert_conn
= conn
->context
;
2221 struct isert_device
*device
= isert_conn
->device
;
2223 if (conn
->tpg
->tpg_attrib
.t10_pi
) {
2224 if (device
->pi_capable
) {
2225 isert_info("conn %p PI offload enabled\n", isert_conn
);
2226 isert_conn
->pi_support
= true;
2227 return TARGET_PROT_ALL
;
2231 isert_info("conn %p PI offload disabled\n", isert_conn
);
2232 isert_conn
->pi_support
= false;
2234 return TARGET_PROT_NORMAL
;
2238 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
2239 bool nopout_response
)
2241 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2242 struct isert_conn
*isert_conn
= conn
->context
;
2243 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2245 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2246 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
2247 &isert_cmd
->tx_desc
.iscsi_header
,
2249 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2250 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2252 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn
);
2254 return isert_post_response(isert_conn
, isert_cmd
);
2258 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2260 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2261 struct isert_conn
*isert_conn
= conn
->context
;
2262 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2264 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2265 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
2266 &isert_cmd
->tx_desc
.iscsi_header
);
2267 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2268 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2270 isert_dbg("conn %p Posting Logout Response\n", isert_conn
);
2272 return isert_post_response(isert_conn
, isert_cmd
);
2276 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2278 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2279 struct isert_conn
*isert_conn
= conn
->context
;
2280 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2282 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2283 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
2284 &isert_cmd
->tx_desc
.iscsi_header
);
2285 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2286 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2288 isert_dbg("conn %p Posting Task Management Response\n", isert_conn
);
2290 return isert_post_response(isert_conn
, isert_cmd
);
2294 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2296 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2297 struct isert_conn
*isert_conn
= conn
->context
;
2298 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2299 struct isert_device
*device
= isert_conn
->device
;
2300 struct ib_device
*ib_dev
= device
->ib_device
;
2301 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2302 struct iscsi_reject
*hdr
=
2303 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
2305 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2306 iscsit_build_reject(cmd
, conn
, hdr
);
2307 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2309 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
2310 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2311 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
2313 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
2314 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2315 tx_dsg
->length
= ISCSI_HDR_LEN
;
2316 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2317 isert_cmd
->tx_desc
.num_sge
= 2;
2319 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2321 isert_dbg("conn %p Posting Reject\n", isert_conn
);
2323 return isert_post_response(isert_conn
, isert_cmd
);
2327 isert_put_text_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2329 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2330 struct isert_conn
*isert_conn
= conn
->context
;
2331 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2332 struct iscsi_text_rsp
*hdr
=
2333 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
2337 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2338 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
, ISCSI_INFINIBAND
);
2343 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2346 struct isert_device
*device
= isert_conn
->device
;
2347 struct ib_device
*ib_dev
= device
->ib_device
;
2348 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2349 void *txt_rsp_buf
= cmd
->buf_ptr
;
2351 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2352 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
2354 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
2355 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2356 tx_dsg
->length
= txt_rsp_len
;
2357 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2358 isert_cmd
->tx_desc
.num_sge
= 2;
2360 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2362 isert_dbg("conn %p Text Response\n", isert_conn
);
2364 return isert_post_response(isert_conn
, isert_cmd
);
2368 isert_build_rdma_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
2369 struct ib_sge
*ib_sge
, struct ib_rdma_wr
*rdma_wr
,
2370 u32 data_left
, u32 offset
)
2372 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
2373 struct scatterlist
*sg_start
, *tmp_sg
;
2374 struct isert_device
*device
= isert_conn
->device
;
2375 struct ib_device
*ib_dev
= device
->ib_device
;
2376 u32 sg_off
, page_off
;
2377 int i
= 0, sg_nents
;
2379 sg_off
= offset
/ PAGE_SIZE
;
2380 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2381 sg_nents
= min(cmd
->se_cmd
.t_data_nents
- sg_off
, isert_conn
->max_sge
);
2382 page_off
= offset
% PAGE_SIZE
;
2384 rdma_wr
->wr
.sg_list
= ib_sge
;
2385 rdma_wr
->wr
.wr_id
= (uintptr_t)&isert_cmd
->tx_desc
;
2387 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2389 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2390 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2392 (unsigned long long)tmp_sg
->dma_address
,
2393 tmp_sg
->length
, page_off
);
2395 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, tmp_sg
) + page_off
;
2396 ib_sge
->length
= min_t(u32
, data_left
,
2397 ib_sg_dma_len(ib_dev
, tmp_sg
) - page_off
);
2398 ib_sge
->lkey
= device
->pd
->local_dma_lkey
;
2400 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2401 ib_sge
->addr
, ib_sge
->length
, ib_sge
->lkey
);
2403 data_left
-= ib_sge
->length
;
2407 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge
);
2410 rdma_wr
->wr
.num_sge
= ++i
;
2411 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2412 rdma_wr
->wr
.sg_list
, rdma_wr
->wr
.num_sge
);
2414 return rdma_wr
->wr
.num_sge
;
2418 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2419 struct isert_rdma_wr
*wr
)
2421 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2422 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2423 struct isert_conn
*isert_conn
= conn
->context
;
2424 struct isert_data_buf
*data
= &wr
->data
;
2425 struct ib_rdma_wr
*rdma_wr
;
2426 struct ib_sge
*ib_sge
;
2427 u32 offset
, data_len
, data_left
, rdma_write_max
, va_offset
= 0;
2428 int ret
= 0, i
, ib_sge_cnt
;
2430 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2432 offset
= wr
->iser_ib_op
== ISER_IB_RDMA_READ
? cmd
->write_data_done
: 0;
2433 ret
= isert_map_data_buf(isert_conn
, isert_cmd
, se_cmd
->t_data_sg
,
2434 se_cmd
->t_data_nents
, se_cmd
->data_length
,
2435 offset
, wr
->iser_ib_op
, &wr
->data
);
2439 data_left
= data
->len
;
2440 offset
= data
->offset
;
2442 ib_sge
= kzalloc(sizeof(struct ib_sge
) * data
->nents
, GFP_KERNEL
);
2444 isert_warn("Unable to allocate ib_sge\n");
2448 wr
->ib_sge
= ib_sge
;
2450 wr
->rdma_wr_num
= DIV_ROUND_UP(data
->nents
, isert_conn
->max_sge
);
2451 wr
->rdma_wr
= kzalloc(sizeof(struct ib_rdma_wr
) * wr
->rdma_wr_num
,
2454 isert_dbg("Unable to allocate wr->rdma_wr\n");
2459 wr
->isert_cmd
= isert_cmd
;
2460 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
2462 for (i
= 0; i
< wr
->rdma_wr_num
; i
++) {
2463 rdma_wr
= &isert_cmd
->rdma_wr
.rdma_wr
[i
];
2464 data_len
= min(data_left
, rdma_write_max
);
2466 rdma_wr
->wr
.send_flags
= 0;
2467 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2468 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
2469 rdma_wr
->remote_addr
= isert_cmd
->read_va
+ offset
;
2470 rdma_wr
->rkey
= isert_cmd
->read_stag
;
2471 if (i
+ 1 == wr
->rdma_wr_num
)
2472 rdma_wr
->wr
.next
= &isert_cmd
->tx_desc
.send_wr
;
2474 rdma_wr
->wr
.next
= &wr
->rdma_wr
[i
+ 1].wr
;
2476 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
2477 rdma_wr
->remote_addr
= isert_cmd
->write_va
+ va_offset
;
2478 rdma_wr
->rkey
= isert_cmd
->write_stag
;
2479 if (i
+ 1 == wr
->rdma_wr_num
)
2480 rdma_wr
->wr
.send_flags
= IB_SEND_SIGNALED
;
2482 rdma_wr
->wr
.next
= &wr
->rdma_wr
[i
+ 1].wr
;
2485 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
2486 rdma_wr
, data_len
, offset
);
2487 ib_sge
+= ib_sge_cnt
;
2490 va_offset
+= data_len
;
2491 data_left
-= data_len
;
2496 isert_unmap_data_buf(isert_conn
, data
);
2502 isert_inv_rkey(struct ib_send_wr
*inv_wr
, struct ib_mr
*mr
)
2506 memset(inv_wr
, 0, sizeof(*inv_wr
));
2507 inv_wr
->wr_id
= ISER_FASTREG_LI_WRID
;
2508 inv_wr
->opcode
= IB_WR_LOCAL_INV
;
2509 inv_wr
->ex
.invalidate_rkey
= mr
->rkey
;
2512 rkey
= ib_inc_rkey(mr
->rkey
);
2513 ib_update_fast_reg_key(mr
, rkey
);
2517 isert_fast_reg_mr(struct isert_conn
*isert_conn
,
2518 struct fast_reg_descriptor
*fr_desc
,
2519 struct isert_data_buf
*mem
,
2520 enum isert_indicator ind
,
2523 struct isert_device
*device
= isert_conn
->device
;
2524 struct ib_device
*ib_dev
= device
->ib_device
;
2526 struct ib_reg_wr reg_wr
;
2527 struct ib_send_wr inv_wr
, *bad_wr
, *wr
= NULL
;
2530 if (mem
->dma_nents
== 1) {
2531 sge
->lkey
= device
->pd
->local_dma_lkey
;
2532 sge
->addr
= ib_sg_dma_address(ib_dev
, &mem
->sg
[0]);
2533 sge
->length
= ib_sg_dma_len(ib_dev
, &mem
->sg
[0]);
2534 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2535 sge
->addr
, sge
->length
, sge
->lkey
);
2539 if (ind
== ISERT_DATA_KEY_VALID
)
2540 /* Registering data buffer */
2541 mr
= fr_desc
->data_mr
;
2543 /* Registering protection buffer */
2544 mr
= fr_desc
->pi_ctx
->prot_mr
;
2546 if (!(fr_desc
->ind
& ind
)) {
2547 isert_inv_rkey(&inv_wr
, mr
);
2551 n
= ib_map_mr_sg(mr
, mem
->sg
, mem
->nents
, PAGE_SIZE
);
2552 if (unlikely(n
!= mem
->nents
)) {
2553 isert_err("failed to map mr sg (%d/%d)\n",
2555 return n
< 0 ? n
: -EINVAL
;
2558 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2559 fr_desc
, mem
->nents
, mem
->offset
);
2561 reg_wr
.wr
.next
= NULL
;
2562 reg_wr
.wr
.opcode
= IB_WR_REG_MR
;
2563 reg_wr
.wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2564 reg_wr
.wr
.send_flags
= 0;
2565 reg_wr
.wr
.num_sge
= 0;
2567 reg_wr
.key
= mr
->lkey
;
2568 reg_wr
.access
= IB_ACCESS_LOCAL_WRITE
;
2573 wr
->next
= ®_wr
.wr
;
2575 ret
= ib_post_send(isert_conn
->qp
, wr
, &bad_wr
);
2577 isert_err("fast registration failed, ret:%d\n", ret
);
2580 fr_desc
->ind
&= ~ind
;
2582 sge
->lkey
= mr
->lkey
;
2583 sge
->addr
= mr
->iova
;
2584 sge
->length
= mr
->length
;
2586 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2587 sge
->addr
, sge
->length
, sge
->lkey
);
2593 isert_set_dif_domain(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
,
2594 struct ib_sig_domain
*domain
)
2596 domain
->sig_type
= IB_SIG_TYPE_T10_DIF
;
2597 domain
->sig
.dif
.bg_type
= IB_T10DIF_CRC
;
2598 domain
->sig
.dif
.pi_interval
= se_cmd
->se_dev
->dev_attrib
.block_size
;
2599 domain
->sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
2601 * At the moment we hard code those, but if in the future
2602 * the target core would like to use it, we will take it
2605 domain
->sig
.dif
.apptag_check_mask
= 0xffff;
2606 domain
->sig
.dif
.app_escape
= true;
2607 domain
->sig
.dif
.ref_escape
= true;
2608 if (se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
||
2609 se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
)
2610 domain
->sig
.dif
.ref_remap
= true;
2614 isert_set_sig_attrs(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
)
2616 switch (se_cmd
->prot_op
) {
2617 case TARGET_PROT_DIN_INSERT
:
2618 case TARGET_PROT_DOUT_STRIP
:
2619 sig_attrs
->mem
.sig_type
= IB_SIG_TYPE_NONE
;
2620 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2622 case TARGET_PROT_DOUT_INSERT
:
2623 case TARGET_PROT_DIN_STRIP
:
2624 sig_attrs
->wire
.sig_type
= IB_SIG_TYPE_NONE
;
2625 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2627 case TARGET_PROT_DIN_PASS
:
2628 case TARGET_PROT_DOUT_PASS
:
2629 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2630 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2633 isert_err("Unsupported PI operation %d\n", se_cmd
->prot_op
);
2641 isert_set_prot_checks(u8 prot_checks
)
2643 return (prot_checks
& TARGET_DIF_CHECK_GUARD
? 0xc0 : 0) |
2644 (prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x30 : 0) |
2645 (prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x0f : 0);
2649 isert_reg_sig_mr(struct isert_conn
*isert_conn
,
2650 struct se_cmd
*se_cmd
,
2651 struct isert_rdma_wr
*rdma_wr
,
2652 struct fast_reg_descriptor
*fr_desc
)
2654 struct ib_sig_handover_wr sig_wr
;
2655 struct ib_send_wr inv_wr
, *bad_wr
, *wr
= NULL
;
2656 struct pi_context
*pi_ctx
= fr_desc
->pi_ctx
;
2657 struct ib_sig_attrs sig_attrs
;
2660 memset(&sig_attrs
, 0, sizeof(sig_attrs
));
2661 ret
= isert_set_sig_attrs(se_cmd
, &sig_attrs
);
2665 sig_attrs
.check_mask
= isert_set_prot_checks(se_cmd
->prot_checks
);
2667 if (!(fr_desc
->ind
& ISERT_SIG_KEY_VALID
)) {
2668 isert_inv_rkey(&inv_wr
, pi_ctx
->sig_mr
);
2672 memset(&sig_wr
, 0, sizeof(sig_wr
));
2673 sig_wr
.wr
.opcode
= IB_WR_REG_SIG_MR
;
2674 sig_wr
.wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2675 sig_wr
.wr
.sg_list
= &rdma_wr
->ib_sg
[DATA
];
2676 sig_wr
.wr
.num_sge
= 1;
2677 sig_wr
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
2678 sig_wr
.sig_attrs
= &sig_attrs
;
2679 sig_wr
.sig_mr
= pi_ctx
->sig_mr
;
2680 if (se_cmd
->t_prot_sg
)
2681 sig_wr
.prot
= &rdma_wr
->ib_sg
[PROT
];
2686 wr
->next
= &sig_wr
.wr
;
2688 ret
= ib_post_send(isert_conn
->qp
, wr
, &bad_wr
);
2690 isert_err("fast registration failed, ret:%d\n", ret
);
2693 fr_desc
->ind
&= ~ISERT_SIG_KEY_VALID
;
2695 rdma_wr
->ib_sg
[SIG
].lkey
= pi_ctx
->sig_mr
->lkey
;
2696 rdma_wr
->ib_sg
[SIG
].addr
= 0;
2697 rdma_wr
->ib_sg
[SIG
].length
= se_cmd
->data_length
;
2698 if (se_cmd
->prot_op
!= TARGET_PROT_DIN_STRIP
&&
2699 se_cmd
->prot_op
!= TARGET_PROT_DOUT_INSERT
)
2701 * We have protection guards on the wire
2702 * so we need to set a larget transfer
2704 rdma_wr
->ib_sg
[SIG
].length
+= se_cmd
->prot_length
;
2706 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2707 rdma_wr
->ib_sg
[SIG
].addr
, rdma_wr
->ib_sg
[SIG
].length
,
2708 rdma_wr
->ib_sg
[SIG
].lkey
);
2714 isert_handle_prot_cmd(struct isert_conn
*isert_conn
,
2715 struct isert_cmd
*isert_cmd
,
2716 struct isert_rdma_wr
*wr
)
2718 struct isert_device
*device
= isert_conn
->device
;
2719 struct se_cmd
*se_cmd
= &isert_cmd
->iscsi_cmd
->se_cmd
;
2722 if (!wr
->fr_desc
->pi_ctx
) {
2723 ret
= isert_create_pi_ctx(wr
->fr_desc
,
2727 isert_err("conn %p failed to allocate pi_ctx\n",
2733 if (se_cmd
->t_prot_sg
) {
2734 ret
= isert_map_data_buf(isert_conn
, isert_cmd
,
2736 se_cmd
->t_prot_nents
,
2737 se_cmd
->prot_length
,
2738 0, wr
->iser_ib_op
, &wr
->prot
);
2740 isert_err("conn %p failed to map protection buffer\n",
2745 memset(&wr
->ib_sg
[PROT
], 0, sizeof(wr
->ib_sg
[PROT
]));
2746 ret
= isert_fast_reg_mr(isert_conn
, wr
->fr_desc
, &wr
->prot
,
2747 ISERT_PROT_KEY_VALID
, &wr
->ib_sg
[PROT
]);
2749 isert_err("conn %p failed to fast reg mr\n",
2751 goto unmap_prot_cmd
;
2755 ret
= isert_reg_sig_mr(isert_conn
, se_cmd
, wr
, wr
->fr_desc
);
2757 isert_err("conn %p failed to fast reg mr\n",
2759 goto unmap_prot_cmd
;
2761 wr
->fr_desc
->ind
|= ISERT_PROTECTED
;
2766 if (se_cmd
->t_prot_sg
)
2767 isert_unmap_data_buf(isert_conn
, &wr
->prot
);
2773 isert_reg_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2774 struct isert_rdma_wr
*wr
)
2776 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2777 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2778 struct isert_conn
*isert_conn
= conn
->context
;
2779 struct fast_reg_descriptor
*fr_desc
= NULL
;
2780 struct ib_rdma_wr
*rdma_wr
;
2781 struct ib_sge
*ib_sg
;
2784 unsigned long flags
;
2786 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2788 offset
= wr
->iser_ib_op
== ISER_IB_RDMA_READ
? cmd
->write_data_done
: 0;
2789 ret
= isert_map_data_buf(isert_conn
, isert_cmd
, se_cmd
->t_data_sg
,
2790 se_cmd
->t_data_nents
, se_cmd
->data_length
,
2791 offset
, wr
->iser_ib_op
, &wr
->data
);
2795 if (wr
->data
.dma_nents
!= 1 || isert_prot_cmd(isert_conn
, se_cmd
)) {
2796 spin_lock_irqsave(&isert_conn
->pool_lock
, flags
);
2797 fr_desc
= list_first_entry(&isert_conn
->fr_pool
,
2798 struct fast_reg_descriptor
, list
);
2799 list_del(&fr_desc
->list
);
2800 spin_unlock_irqrestore(&isert_conn
->pool_lock
, flags
);
2801 wr
->fr_desc
= fr_desc
;
2804 ret
= isert_fast_reg_mr(isert_conn
, fr_desc
, &wr
->data
,
2805 ISERT_DATA_KEY_VALID
, &wr
->ib_sg
[DATA
]);
2809 if (isert_prot_cmd(isert_conn
, se_cmd
)) {
2810 ret
= isert_handle_prot_cmd(isert_conn
, isert_cmd
, wr
);
2814 ib_sg
= &wr
->ib_sg
[SIG
];
2816 ib_sg
= &wr
->ib_sg
[DATA
];
2819 memcpy(&wr
->s_ib_sge
, ib_sg
, sizeof(*ib_sg
));
2820 wr
->ib_sge
= &wr
->s_ib_sge
;
2821 wr
->rdma_wr_num
= 1;
2822 memset(&wr
->s_rdma_wr
, 0, sizeof(wr
->s_rdma_wr
));
2823 wr
->rdma_wr
= &wr
->s_rdma_wr
;
2824 wr
->isert_cmd
= isert_cmd
;
2826 rdma_wr
= &isert_cmd
->rdma_wr
.s_rdma_wr
;
2827 rdma_wr
->wr
.sg_list
= &wr
->s_ib_sge
;
2828 rdma_wr
->wr
.num_sge
= 1;
2829 rdma_wr
->wr
.wr_id
= (uintptr_t)&isert_cmd
->tx_desc
;
2830 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2831 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
2832 rdma_wr
->remote_addr
= isert_cmd
->read_va
;
2833 rdma_wr
->rkey
= isert_cmd
->read_stag
;
2834 rdma_wr
->wr
.send_flags
= !isert_prot_cmd(isert_conn
, se_cmd
) ?
2835 0 : IB_SEND_SIGNALED
;
2837 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
2838 rdma_wr
->remote_addr
= isert_cmd
->write_va
;
2839 rdma_wr
->rkey
= isert_cmd
->write_stag
;
2840 rdma_wr
->wr
.send_flags
= IB_SEND_SIGNALED
;
2847 spin_lock_irqsave(&isert_conn
->pool_lock
, flags
);
2848 list_add_tail(&fr_desc
->list
, &isert_conn
->fr_pool
);
2849 spin_unlock_irqrestore(&isert_conn
->pool_lock
, flags
);
2851 isert_unmap_data_buf(isert_conn
, &wr
->data
);
2857 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2859 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2860 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2861 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2862 struct isert_conn
*isert_conn
= conn
->context
;
2863 struct isert_device
*device
= isert_conn
->device
;
2864 struct ib_send_wr
*wr_failed
;
2867 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2868 isert_cmd
, se_cmd
->data_length
);
2870 wr
->iser_ib_op
= ISER_IB_RDMA_WRITE
;
2871 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2873 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2877 if (!isert_prot_cmd(isert_conn
, se_cmd
)) {
2879 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2881 isert_create_send_desc(isert_conn
, isert_cmd
,
2882 &isert_cmd
->tx_desc
);
2883 iscsit_build_rsp_pdu(cmd
, conn
, true, (struct iscsi_scsi_rsp
*)
2884 &isert_cmd
->tx_desc
.iscsi_header
);
2885 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2886 isert_init_send_wr(isert_conn
, isert_cmd
,
2887 &isert_cmd
->tx_desc
.send_wr
);
2888 isert_cmd
->rdma_wr
.s_rdma_wr
.wr
.next
= &isert_cmd
->tx_desc
.send_wr
;
2889 wr
->rdma_wr_num
+= 1;
2891 rc
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
2893 isert_err("ib_post_recv failed with %d\n", rc
);
2898 rc
= ib_post_send(isert_conn
->qp
, &wr
->rdma_wr
->wr
, &wr_failed
);
2900 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2902 if (!isert_prot_cmd(isert_conn
, se_cmd
))
2903 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2904 "READ\n", isert_cmd
);
2906 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2913 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
2915 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2916 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2917 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2918 struct isert_conn
*isert_conn
= conn
->context
;
2919 struct isert_device
*device
= isert_conn
->device
;
2920 struct ib_send_wr
*wr_failed
;
2923 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2924 isert_cmd
, se_cmd
->data_length
, cmd
->write_data_done
);
2925 wr
->iser_ib_op
= ISER_IB_RDMA_READ
;
2926 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2928 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2932 rc
= ib_post_send(isert_conn
->qp
, &wr
->rdma_wr
->wr
, &wr_failed
);
2934 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2936 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2943 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2945 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2950 spin_lock_bh(&conn
->cmd_lock
);
2951 list_del_init(&cmd
->i_conn_node
);
2952 spin_unlock_bh(&conn
->cmd_lock
);
2953 isert_put_cmd(isert_cmd
, true);
2955 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
2956 ret
= isert_put_nopin(cmd
, conn
, false);
2959 isert_err("Unknown immediate state: 0x%02x\n", state
);
2968 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2970 struct isert_conn
*isert_conn
= conn
->context
;
2974 case ISTATE_SEND_LOGOUTRSP
:
2975 ret
= isert_put_logout_rsp(cmd
, conn
);
2977 isert_conn
->logout_posted
= true;
2979 case ISTATE_SEND_NOPIN
:
2980 ret
= isert_put_nopin(cmd
, conn
, true);
2982 case ISTATE_SEND_TASKMGTRSP
:
2983 ret
= isert_put_tm_rsp(cmd
, conn
);
2985 case ISTATE_SEND_REJECT
:
2986 ret
= isert_put_reject(cmd
, conn
);
2988 case ISTATE_SEND_TEXTRSP
:
2989 ret
= isert_put_text_rsp(cmd
, conn
);
2991 case ISTATE_SEND_STATUS
:
2993 * Special case for sending non GOOD SCSI status from TX thread
2994 * context during pre se_cmd excecution failure.
2996 ret
= isert_put_response(conn
, cmd
);
2999 isert_err("Unknown response state: 0x%02x\n", state
);
3008 isert_setup_id(struct isert_np
*isert_np
)
3010 struct iscsi_np
*np
= isert_np
->np
;
3011 struct rdma_cm_id
*id
;
3012 struct sockaddr
*sa
;
3015 sa
= (struct sockaddr
*)&np
->np_sockaddr
;
3016 isert_dbg("ksockaddr: %p, sa: %p\n", &np
->np_sockaddr
, sa
);
3018 id
= rdma_create_id(&init_net
, isert_cma_handler
, isert_np
,
3019 RDMA_PS_TCP
, IB_QPT_RC
);
3021 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id
));
3025 isert_dbg("id %p context %p\n", id
, id
->context
);
3027 ret
= rdma_bind_addr(id
, sa
);
3029 isert_err("rdma_bind_addr() failed: %d\n", ret
);
3033 ret
= rdma_listen(id
, 0);
3035 isert_err("rdma_listen() failed: %d\n", ret
);
3041 rdma_destroy_id(id
);
3043 return ERR_PTR(ret
);
3047 isert_setup_np(struct iscsi_np
*np
,
3048 struct sockaddr_storage
*ksockaddr
)
3050 struct isert_np
*isert_np
;
3051 struct rdma_cm_id
*isert_lid
;
3054 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
3056 isert_err("Unable to allocate struct isert_np\n");
3059 sema_init(&isert_np
->sem
, 0);
3060 mutex_init(&isert_np
->mutex
);
3061 INIT_LIST_HEAD(&isert_np
->accepted
);
3062 INIT_LIST_HEAD(&isert_np
->pending
);
3066 * Setup the np->np_sockaddr from the passed sockaddr setup
3067 * in iscsi_target_configfs.c code..
3069 memcpy(&np
->np_sockaddr
, ksockaddr
,
3070 sizeof(struct sockaddr_storage
));
3072 isert_lid
= isert_setup_id(isert_np
);
3073 if (IS_ERR(isert_lid
)) {
3074 ret
= PTR_ERR(isert_lid
);
3078 isert_np
->cm_id
= isert_lid
;
3079 np
->np_context
= isert_np
;
3090 isert_rdma_accept(struct isert_conn
*isert_conn
)
3092 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
3093 struct rdma_conn_param cp
;
3096 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
3097 cp
.initiator_depth
= isert_conn
->initiator_depth
;
3099 cp
.rnr_retry_count
= 7;
3101 ret
= rdma_accept(cm_id
, &cp
);
3103 isert_err("rdma_accept() failed with: %d\n", ret
);
3111 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
3113 struct isert_conn
*isert_conn
= conn
->context
;
3116 isert_info("before login_req comp conn: %p\n", isert_conn
);
3117 ret
= wait_for_completion_interruptible(&isert_conn
->login_req_comp
);
3119 isert_err("isert_conn %p interrupted before got login req\n",
3123 reinit_completion(&isert_conn
->login_req_comp
);
3126 * For login requests after the first PDU, isert_rx_login_req() will
3127 * kick schedule_delayed_work(&conn->login_work) as the packet is
3128 * received, which turns this callback from iscsi_target_do_login_rx()
3131 if (!login
->first_request
)
3134 isert_rx_login_req(isert_conn
);
3136 isert_info("before login_comp conn: %p\n", conn
);
3137 ret
= wait_for_completion_interruptible(&isert_conn
->login_comp
);
3141 isert_info("processing login->req: %p\n", login
->req
);
3147 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
3148 struct isert_conn
*isert_conn
)
3150 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
3151 struct rdma_route
*cm_route
= &cm_id
->route
;
3153 conn
->login_family
= np
->np_sockaddr
.ss_family
;
3155 conn
->login_sockaddr
= cm_route
->addr
.dst_addr
;
3156 conn
->local_sockaddr
= cm_route
->addr
.src_addr
;
3160 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
3162 struct isert_np
*isert_np
= np
->np_context
;
3163 struct isert_conn
*isert_conn
;
3167 ret
= down_interruptible(&isert_np
->sem
);
3171 spin_lock_bh(&np
->np_thread_lock
);
3172 if (np
->np_thread_state
>= ISCSI_NP_THREAD_RESET
) {
3173 spin_unlock_bh(&np
->np_thread_lock
);
3174 isert_dbg("np_thread_state %d\n",
3175 np
->np_thread_state
);
3177 * No point in stalling here when np_thread
3178 * is in state RESET/SHUTDOWN/EXIT - bail
3182 spin_unlock_bh(&np
->np_thread_lock
);
3184 mutex_lock(&isert_np
->mutex
);
3185 if (list_empty(&isert_np
->pending
)) {
3186 mutex_unlock(&isert_np
->mutex
);
3189 isert_conn
= list_first_entry(&isert_np
->pending
,
3190 struct isert_conn
, node
);
3191 list_del_init(&isert_conn
->node
);
3192 mutex_unlock(&isert_np
->mutex
);
3194 conn
->context
= isert_conn
;
3195 isert_conn
->conn
= conn
;
3197 isert_set_conn_info(np
, conn
, isert_conn
);
3199 isert_dbg("Processing isert_conn: %p\n", isert_conn
);
3205 isert_free_np(struct iscsi_np
*np
)
3207 struct isert_np
*isert_np
= np
->np_context
;
3208 struct isert_conn
*isert_conn
, *n
;
3210 if (isert_np
->cm_id
)
3211 rdma_destroy_id(isert_np
->cm_id
);
3214 * FIXME: At this point we don't have a good way to insure
3215 * that at this point we don't have hanging connections that
3216 * completed RDMA establishment but didn't start iscsi login
3217 * process. So work-around this by cleaning up what ever piled
3218 * up in accepted and pending lists.
3220 mutex_lock(&isert_np
->mutex
);
3221 if (!list_empty(&isert_np
->pending
)) {
3222 isert_info("Still have isert pending connections\n");
3223 list_for_each_entry_safe(isert_conn
, n
,
3226 isert_info("cleaning isert_conn %p state (%d)\n",
3227 isert_conn
, isert_conn
->state
);
3228 isert_connect_release(isert_conn
);
3232 if (!list_empty(&isert_np
->accepted
)) {
3233 isert_info("Still have isert accepted connections\n");
3234 list_for_each_entry_safe(isert_conn
, n
,
3235 &isert_np
->accepted
,
3237 isert_info("cleaning isert_conn %p state (%d)\n",
3238 isert_conn
, isert_conn
->state
);
3239 isert_connect_release(isert_conn
);
3242 mutex_unlock(&isert_np
->mutex
);
3244 np
->np_context
= NULL
;
3248 static void isert_release_work(struct work_struct
*work
)
3250 struct isert_conn
*isert_conn
= container_of(work
,
3254 isert_info("Starting release conn %p\n", isert_conn
);
3256 wait_for_completion(&isert_conn
->wait
);
3258 mutex_lock(&isert_conn
->mutex
);
3259 isert_conn
->state
= ISER_CONN_DOWN
;
3260 mutex_unlock(&isert_conn
->mutex
);
3262 isert_info("Destroying conn %p\n", isert_conn
);
3263 isert_put_conn(isert_conn
);
3267 isert_wait4logout(struct isert_conn
*isert_conn
)
3269 struct iscsi_conn
*conn
= isert_conn
->conn
;
3271 isert_info("conn %p\n", isert_conn
);
3273 if (isert_conn
->logout_posted
) {
3274 isert_info("conn %p wait for conn_logout_comp\n", isert_conn
);
3275 wait_for_completion_timeout(&conn
->conn_logout_comp
,
3276 SECONDS_FOR_LOGOUT_COMP
* HZ
);
3281 isert_wait4cmds(struct iscsi_conn
*conn
)
3283 isert_info("iscsi_conn %p\n", conn
);
3286 target_sess_cmd_list_set_waiting(conn
->sess
->se_sess
);
3287 target_wait_for_sess_cmds(conn
->sess
->se_sess
);
3292 isert_wait4flush(struct isert_conn
*isert_conn
)
3294 struct ib_recv_wr
*bad_wr
;
3296 isert_info("conn %p\n", isert_conn
);
3298 init_completion(&isert_conn
->wait_comp_err
);
3299 isert_conn
->beacon
.wr_id
= ISER_BEACON_WRID
;
3300 /* post an indication that all flush errors were consumed */
3301 if (ib_post_recv(isert_conn
->qp
, &isert_conn
->beacon
, &bad_wr
)) {
3302 isert_err("conn %p failed to post beacon", isert_conn
);
3306 wait_for_completion(&isert_conn
->wait_comp_err
);
3310 * isert_put_unsol_pending_cmds() - Drop commands waiting for
3311 * unsolicitate dataout
3312 * @conn: iscsi connection
3314 * We might still have commands that are waiting for unsolicited
3315 * dataouts messages. We must put the extra reference on those
3316 * before blocking on the target_wait_for_session_cmds
3319 isert_put_unsol_pending_cmds(struct iscsi_conn
*conn
)
3321 struct iscsi_cmd
*cmd
, *tmp
;
3322 static LIST_HEAD(drop_cmd_list
);
3324 spin_lock_bh(&conn
->cmd_lock
);
3325 list_for_each_entry_safe(cmd
, tmp
, &conn
->conn_cmd_list
, i_conn_node
) {
3326 if ((cmd
->cmd_flags
& ICF_NON_IMMEDIATE_UNSOLICITED_DATA
) &&
3327 (cmd
->write_data_done
< conn
->sess
->sess_ops
->FirstBurstLength
) &&
3328 (cmd
->write_data_done
< cmd
->se_cmd
.data_length
))
3329 list_move_tail(&cmd
->i_conn_node
, &drop_cmd_list
);
3331 spin_unlock_bh(&conn
->cmd_lock
);
3333 list_for_each_entry_safe(cmd
, tmp
, &drop_cmd_list
, i_conn_node
) {
3334 list_del_init(&cmd
->i_conn_node
);
3335 if (cmd
->i_state
!= ISTATE_REMOVE
) {
3336 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
3338 isert_info("conn %p dropping cmd %p\n", conn
, cmd
);
3339 isert_put_cmd(isert_cmd
, true);
3344 static void isert_wait_conn(struct iscsi_conn
*conn
)
3346 struct isert_conn
*isert_conn
= conn
->context
;
3348 isert_info("Starting conn %p\n", isert_conn
);
3350 mutex_lock(&isert_conn
->mutex
);
3352 * Only wait for wait_comp_err if the isert_conn made it
3353 * into full feature phase..
3355 if (isert_conn
->state
== ISER_CONN_INIT
) {
3356 mutex_unlock(&isert_conn
->mutex
);
3359 isert_conn_terminate(isert_conn
);
3360 mutex_unlock(&isert_conn
->mutex
);
3362 isert_wait4flush(isert_conn
);
3363 isert_put_unsol_pending_cmds(conn
);
3364 isert_wait4cmds(conn
);
3365 isert_wait4logout(isert_conn
);
3367 queue_work(isert_release_wq
, &isert_conn
->release_work
);
3370 static void isert_free_conn(struct iscsi_conn
*conn
)
3372 struct isert_conn
*isert_conn
= conn
->context
;
3374 isert_wait4flush(isert_conn
);
3375 isert_put_conn(isert_conn
);
3378 static struct iscsit_transport iser_target_transport
= {
3380 .transport_type
= ISCSI_INFINIBAND
,
3381 .priv_size
= sizeof(struct isert_cmd
),
3382 .owner
= THIS_MODULE
,
3383 .iscsit_setup_np
= isert_setup_np
,
3384 .iscsit_accept_np
= isert_accept_np
,
3385 .iscsit_free_np
= isert_free_np
,
3386 .iscsit_wait_conn
= isert_wait_conn
,
3387 .iscsit_free_conn
= isert_free_conn
,
3388 .iscsit_get_login_rx
= isert_get_login_rx
,
3389 .iscsit_put_login_tx
= isert_put_login_tx
,
3390 .iscsit_immediate_queue
= isert_immediate_queue
,
3391 .iscsit_response_queue
= isert_response_queue
,
3392 .iscsit_get_dataout
= isert_get_dataout
,
3393 .iscsit_queue_data_in
= isert_put_datain
,
3394 .iscsit_queue_status
= isert_put_response
,
3395 .iscsit_aborted_task
= isert_aborted_task
,
3396 .iscsit_get_sup_prot_ops
= isert_get_sup_prot_ops
,
3399 static int __init
isert_init(void)
3403 isert_comp_wq
= alloc_workqueue("isert_comp_wq",
3404 WQ_UNBOUND
| WQ_HIGHPRI
, 0);
3405 if (!isert_comp_wq
) {
3406 isert_err("Unable to allocate isert_comp_wq\n");
3411 isert_release_wq
= alloc_workqueue("isert_release_wq", WQ_UNBOUND
,
3412 WQ_UNBOUND_MAX_ACTIVE
);
3413 if (!isert_release_wq
) {
3414 isert_err("Unable to allocate isert_release_wq\n");
3416 goto destroy_comp_wq
;
3419 iscsit_register_transport(&iser_target_transport
);
3420 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3425 destroy_workqueue(isert_comp_wq
);
3430 static void __exit
isert_exit(void)
3432 flush_scheduled_work();
3433 destroy_workqueue(isert_release_wq
);
3434 destroy_workqueue(isert_comp_wq
);
3435 iscsit_unregister_transport(&iser_target_transport
);
3436 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3439 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3440 MODULE_VERSION("1.0");
3441 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3442 MODULE_LICENSE("GPL");
3444 module_init(isert_init
);
3445 module_exit(isert_exit
);