1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
34 #define ISERT_MAX_CONN 8
35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
40 static int isert_debug_level
;
41 module_param_named(debug_level
, isert_debug_level
, int, 0644);
42 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0 (default:0)");
44 static DEFINE_MUTEX(device_list_mutex
);
45 static LIST_HEAD(device_list
);
46 static struct workqueue_struct
*isert_comp_wq
;
47 static struct workqueue_struct
*isert_release_wq
;
50 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
52 isert_map_rdma(struct isert_cmd
*isert_cmd
, struct iscsi_conn
*conn
);
54 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
56 isert_reg_rdma(struct isert_cmd
*isert_cmd
, struct iscsi_conn
*conn
);
58 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
);
60 isert_login_post_recv(struct isert_conn
*isert_conn
);
62 isert_rdma_accept(struct isert_conn
*isert_conn
);
63 struct rdma_cm_id
*isert_setup_id(struct isert_np
*isert_np
);
65 static void isert_release_work(struct work_struct
*work
);
66 static void isert_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
67 static void isert_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
68 static void isert_login_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
69 static void isert_login_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
72 isert_prot_cmd(struct isert_conn
*conn
, struct se_cmd
*cmd
)
74 return (conn
->pi_support
&&
75 cmd
->prot_op
!= TARGET_PROT_NORMAL
);
80 isert_qp_event_callback(struct ib_event
*e
, void *context
)
82 struct isert_conn
*isert_conn
= context
;
84 isert_err("%s (%d): conn %p\n",
85 ib_event_msg(e
->event
), e
->event
, isert_conn
);
88 case IB_EVENT_COMM_EST
:
89 rdma_notify(isert_conn
->cm_id
, IB_EVENT_COMM_EST
);
91 case IB_EVENT_QP_LAST_WQE_REACHED
:
92 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
99 static struct isert_comp
*
100 isert_comp_get(struct isert_conn
*isert_conn
)
102 struct isert_device
*device
= isert_conn
->device
;
103 struct isert_comp
*comp
;
106 mutex_lock(&device_list_mutex
);
107 for (i
= 0; i
< device
->comps_used
; i
++)
108 if (device
->comps
[i
].active_qps
<
109 device
->comps
[min
].active_qps
)
111 comp
= &device
->comps
[min
];
113 mutex_unlock(&device_list_mutex
);
115 isert_info("conn %p, using comp %p min_index: %d\n",
116 isert_conn
, comp
, min
);
122 isert_comp_put(struct isert_comp
*comp
)
124 mutex_lock(&device_list_mutex
);
126 mutex_unlock(&device_list_mutex
);
129 static struct ib_qp
*
130 isert_create_qp(struct isert_conn
*isert_conn
,
131 struct isert_comp
*comp
,
132 struct rdma_cm_id
*cma_id
)
134 struct isert_device
*device
= isert_conn
->device
;
135 struct ib_qp_init_attr attr
;
138 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
139 attr
.event_handler
= isert_qp_event_callback
;
140 attr
.qp_context
= isert_conn
;
141 attr
.send_cq
= comp
->cq
;
142 attr
.recv_cq
= comp
->cq
;
143 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
+ 1;
144 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
+ 1;
145 attr
.cap
.max_send_sge
= device
->ib_device
->attrs
.max_sge
;
146 isert_conn
->max_sge
= min(device
->ib_device
->attrs
.max_sge
,
147 device
->ib_device
->attrs
.max_sge_rd
);
148 attr
.cap
.max_recv_sge
= 1;
149 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
150 attr
.qp_type
= IB_QPT_RC
;
151 if (device
->pi_capable
)
152 attr
.create_flags
|= IB_QP_CREATE_SIGNATURE_EN
;
154 ret
= rdma_create_qp(cma_id
, device
->pd
, &attr
);
156 isert_err("rdma_create_qp failed for cma_id %d\n", ret
);
164 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
)
166 struct isert_comp
*comp
;
169 comp
= isert_comp_get(isert_conn
);
170 isert_conn
->qp
= isert_create_qp(isert_conn
, comp
, cma_id
);
171 if (IS_ERR(isert_conn
->qp
)) {
172 ret
= PTR_ERR(isert_conn
->qp
);
178 isert_comp_put(comp
);
183 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
185 struct isert_device
*device
= isert_conn
->device
;
186 struct ib_device
*ib_dev
= device
->ib_device
;
187 struct iser_rx_desc
*rx_desc
;
188 struct ib_sge
*rx_sg
;
192 isert_conn
->rx_descs
= kzalloc(ISERT_QP_MAX_RECV_DTOS
*
193 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
194 if (!isert_conn
->rx_descs
)
197 rx_desc
= isert_conn
->rx_descs
;
199 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
200 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
201 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
202 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
205 rx_desc
->dma_addr
= dma_addr
;
207 rx_sg
= &rx_desc
->rx_sg
;
208 rx_sg
->addr
= rx_desc
->dma_addr
;
209 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
210 rx_sg
->lkey
= device
->pd
->local_dma_lkey
;
211 rx_desc
->rx_cqe
.done
= isert_recv_done
;
217 rx_desc
= isert_conn
->rx_descs
;
218 for (j
= 0; j
< i
; j
++, rx_desc
++) {
219 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
220 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
222 kfree(isert_conn
->rx_descs
);
223 isert_conn
->rx_descs
= NULL
;
225 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn
);
231 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
233 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
234 struct iser_rx_desc
*rx_desc
;
237 if (!isert_conn
->rx_descs
)
240 rx_desc
= isert_conn
->rx_descs
;
241 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
242 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
243 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
246 kfree(isert_conn
->rx_descs
);
247 isert_conn
->rx_descs
= NULL
;
251 isert_free_comps(struct isert_device
*device
)
255 for (i
= 0; i
< device
->comps_used
; i
++) {
256 struct isert_comp
*comp
= &device
->comps
[i
];
259 ib_free_cq(comp
->cq
);
261 kfree(device
->comps
);
265 isert_alloc_comps(struct isert_device
*device
)
267 int i
, max_cqe
, ret
= 0;
269 device
->comps_used
= min(ISERT_MAX_CQ
, min_t(int, num_online_cpus(),
270 device
->ib_device
->num_comp_vectors
));
272 isert_info("Using %d CQs, %s supports %d vectors support "
273 "Fast registration %d pi_capable %d\n",
274 device
->comps_used
, device
->ib_device
->name
,
275 device
->ib_device
->num_comp_vectors
, device
->use_fastreg
,
278 device
->comps
= kcalloc(device
->comps_used
, sizeof(struct isert_comp
),
280 if (!device
->comps
) {
281 isert_err("Unable to allocate completion contexts\n");
285 max_cqe
= min(ISER_MAX_CQ_LEN
, device
->ib_device
->attrs
.max_cqe
);
287 for (i
= 0; i
< device
->comps_used
; i
++) {
288 struct isert_comp
*comp
= &device
->comps
[i
];
290 comp
->device
= device
;
291 comp
->cq
= ib_alloc_cq(device
->ib_device
, comp
, max_cqe
, i
,
293 if (IS_ERR(comp
->cq
)) {
294 isert_err("Unable to allocate cq\n");
295 ret
= PTR_ERR(comp
->cq
);
303 isert_free_comps(device
);
308 isert_create_device_ib_res(struct isert_device
*device
)
310 struct ib_device
*ib_dev
= device
->ib_device
;
313 isert_dbg("devattr->max_sge: %d\n", ib_dev
->attrs
.max_sge
);
314 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev
->attrs
.max_sge_rd
);
316 /* asign function handlers */
317 if (ib_dev
->attrs
.device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
&&
318 ib_dev
->attrs
.device_cap_flags
& IB_DEVICE_SIGNATURE_HANDOVER
) {
319 device
->use_fastreg
= 1;
320 device
->reg_rdma_mem
= isert_reg_rdma
;
321 device
->unreg_rdma_mem
= isert_unreg_rdma
;
323 device
->use_fastreg
= 0;
324 device
->reg_rdma_mem
= isert_map_rdma
;
325 device
->unreg_rdma_mem
= isert_unmap_cmd
;
328 ret
= isert_alloc_comps(device
);
332 device
->pd
= ib_alloc_pd(ib_dev
);
333 if (IS_ERR(device
->pd
)) {
334 ret
= PTR_ERR(device
->pd
);
335 isert_err("failed to allocate pd, device %p, ret=%d\n",
340 /* Check signature cap */
341 device
->pi_capable
= ib_dev
->attrs
.device_cap_flags
&
342 IB_DEVICE_SIGNATURE_HANDOVER
? true : false;
347 isert_free_comps(device
);
355 isert_free_device_ib_res(struct isert_device
*device
)
357 isert_info("device %p\n", device
);
359 ib_dealloc_pd(device
->pd
);
360 isert_free_comps(device
);
364 isert_device_put(struct isert_device
*device
)
366 mutex_lock(&device_list_mutex
);
368 isert_info("device %p refcount %d\n", device
, device
->refcount
);
369 if (!device
->refcount
) {
370 isert_free_device_ib_res(device
);
371 list_del(&device
->dev_node
);
374 mutex_unlock(&device_list_mutex
);
377 static struct isert_device
*
378 isert_device_get(struct rdma_cm_id
*cma_id
)
380 struct isert_device
*device
;
383 mutex_lock(&device_list_mutex
);
384 list_for_each_entry(device
, &device_list
, dev_node
) {
385 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
387 isert_info("Found iser device %p refcount %d\n",
388 device
, device
->refcount
);
389 mutex_unlock(&device_list_mutex
);
394 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
396 mutex_unlock(&device_list_mutex
);
397 return ERR_PTR(-ENOMEM
);
400 INIT_LIST_HEAD(&device
->dev_node
);
402 device
->ib_device
= cma_id
->device
;
403 ret
= isert_create_device_ib_res(device
);
406 mutex_unlock(&device_list_mutex
);
411 list_add_tail(&device
->dev_node
, &device_list
);
412 isert_info("Created a new iser device %p refcount %d\n",
413 device
, device
->refcount
);
414 mutex_unlock(&device_list_mutex
);
420 isert_conn_free_fastreg_pool(struct isert_conn
*isert_conn
)
422 struct fast_reg_descriptor
*fr_desc
, *tmp
;
425 if (list_empty(&isert_conn
->fr_pool
))
428 isert_info("Freeing conn %p fastreg pool", isert_conn
);
430 list_for_each_entry_safe(fr_desc
, tmp
,
431 &isert_conn
->fr_pool
, list
) {
432 list_del(&fr_desc
->list
);
433 ib_dereg_mr(fr_desc
->data_mr
);
434 if (fr_desc
->pi_ctx
) {
435 ib_dereg_mr(fr_desc
->pi_ctx
->prot_mr
);
436 ib_dereg_mr(fr_desc
->pi_ctx
->sig_mr
);
437 kfree(fr_desc
->pi_ctx
);
443 if (i
< isert_conn
->fr_pool_size
)
444 isert_warn("Pool still has %d regions registered\n",
445 isert_conn
->fr_pool_size
- i
);
449 isert_create_pi_ctx(struct fast_reg_descriptor
*desc
,
450 struct ib_device
*device
,
453 struct pi_context
*pi_ctx
;
456 pi_ctx
= kzalloc(sizeof(*desc
->pi_ctx
), GFP_KERNEL
);
458 isert_err("Failed to allocate pi context\n");
462 pi_ctx
->prot_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
,
463 ISCSI_ISER_SG_TABLESIZE
);
464 if (IS_ERR(pi_ctx
->prot_mr
)) {
465 isert_err("Failed to allocate prot frmr err=%ld\n",
466 PTR_ERR(pi_ctx
->prot_mr
));
467 ret
= PTR_ERR(pi_ctx
->prot_mr
);
470 desc
->ind
|= ISERT_PROT_KEY_VALID
;
472 pi_ctx
->sig_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_SIGNATURE
, 2);
473 if (IS_ERR(pi_ctx
->sig_mr
)) {
474 isert_err("Failed to allocate signature enabled mr err=%ld\n",
475 PTR_ERR(pi_ctx
->sig_mr
));
476 ret
= PTR_ERR(pi_ctx
->sig_mr
);
480 desc
->pi_ctx
= pi_ctx
;
481 desc
->ind
|= ISERT_SIG_KEY_VALID
;
482 desc
->ind
&= ~ISERT_PROTECTED
;
487 ib_dereg_mr(pi_ctx
->prot_mr
);
495 isert_create_fr_desc(struct ib_device
*ib_device
, struct ib_pd
*pd
,
496 struct fast_reg_descriptor
*fr_desc
)
498 fr_desc
->data_mr
= ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
,
499 ISCSI_ISER_SG_TABLESIZE
);
500 if (IS_ERR(fr_desc
->data_mr
)) {
501 isert_err("Failed to allocate data frmr err=%ld\n",
502 PTR_ERR(fr_desc
->data_mr
));
503 return PTR_ERR(fr_desc
->data_mr
);
505 fr_desc
->ind
|= ISERT_DATA_KEY_VALID
;
507 isert_dbg("Created fr_desc %p\n", fr_desc
);
513 isert_conn_create_fastreg_pool(struct isert_conn
*isert_conn
)
515 struct fast_reg_descriptor
*fr_desc
;
516 struct isert_device
*device
= isert_conn
->device
;
517 struct se_session
*se_sess
= isert_conn
->conn
->sess
->se_sess
;
518 struct se_node_acl
*se_nacl
= se_sess
->se_node_acl
;
521 * Setup the number of FRMRs based upon the number of tags
522 * available to session in iscsi_target_locate_portal().
524 tag_num
= max_t(u32
, ISCSIT_MIN_TAGS
, se_nacl
->queue_depth
);
525 tag_num
= (tag_num
* 2) + ISCSIT_EXTRA_TAGS
;
527 isert_conn
->fr_pool_size
= 0;
528 for (i
= 0; i
< tag_num
; i
++) {
529 fr_desc
= kzalloc(sizeof(*fr_desc
), GFP_KERNEL
);
531 isert_err("Failed to allocate fast_reg descriptor\n");
536 ret
= isert_create_fr_desc(device
->ib_device
,
537 device
->pd
, fr_desc
);
539 isert_err("Failed to create fastreg descriptor err=%d\n",
545 list_add_tail(&fr_desc
->list
, &isert_conn
->fr_pool
);
546 isert_conn
->fr_pool_size
++;
549 isert_dbg("Creating conn %p fastreg pool size=%d",
550 isert_conn
, isert_conn
->fr_pool_size
);
555 isert_conn_free_fastreg_pool(isert_conn
);
560 isert_init_conn(struct isert_conn
*isert_conn
)
562 isert_conn
->state
= ISER_CONN_INIT
;
563 INIT_LIST_HEAD(&isert_conn
->node
);
564 init_completion(&isert_conn
->login_comp
);
565 init_completion(&isert_conn
->login_req_comp
);
566 kref_init(&isert_conn
->kref
);
567 mutex_init(&isert_conn
->mutex
);
568 spin_lock_init(&isert_conn
->pool_lock
);
569 INIT_LIST_HEAD(&isert_conn
->fr_pool
);
570 INIT_WORK(&isert_conn
->release_work
, isert_release_work
);
574 isert_free_login_buf(struct isert_conn
*isert_conn
)
576 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
578 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
579 ISER_RX_PAYLOAD_SIZE
, DMA_TO_DEVICE
);
580 kfree(isert_conn
->login_rsp_buf
);
582 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
583 ISER_RX_PAYLOAD_SIZE
,
585 kfree(isert_conn
->login_req_buf
);
589 isert_alloc_login_buf(struct isert_conn
*isert_conn
,
590 struct ib_device
*ib_dev
)
594 isert_conn
->login_req_buf
= kzalloc(sizeof(*isert_conn
->login_req_buf
),
596 if (!isert_conn
->login_req_buf
) {
597 isert_err("Unable to allocate isert_conn->login_buf\n");
601 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
602 isert_conn
->login_req_buf
,
603 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
604 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
606 isert_err("login_req_dma mapping error: %d\n", ret
);
607 isert_conn
->login_req_dma
= 0;
608 goto out_free_login_req_buf
;
611 isert_conn
->login_rsp_buf
= kzalloc(ISER_RX_PAYLOAD_SIZE
, GFP_KERNEL
);
612 if (!isert_conn
->login_rsp_buf
) {
613 isert_err("Unable to allocate isert_conn->login_rspbuf\n");
614 goto out_unmap_login_req_buf
;
617 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
618 isert_conn
->login_rsp_buf
,
619 ISER_RX_PAYLOAD_SIZE
, DMA_TO_DEVICE
);
620 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
622 isert_err("login_rsp_dma mapping error: %d\n", ret
);
623 isert_conn
->login_rsp_dma
= 0;
624 goto out_free_login_rsp_buf
;
629 out_free_login_rsp_buf
:
630 kfree(isert_conn
->login_rsp_buf
);
631 out_unmap_login_req_buf
:
632 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
633 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
634 out_free_login_req_buf
:
635 kfree(isert_conn
->login_req_buf
);
640 isert_set_nego_params(struct isert_conn
*isert_conn
,
641 struct rdma_conn_param
*param
)
643 struct ib_device_attr
*attr
= &isert_conn
->device
->ib_device
->attrs
;
645 /* Set max inflight RDMA READ requests */
646 isert_conn
->initiator_depth
= min_t(u8
, param
->initiator_depth
,
647 attr
->max_qp_init_rd_atom
);
648 isert_dbg("Using initiator_depth: %u\n", isert_conn
->initiator_depth
);
650 if (param
->private_data
) {
651 u8 flags
= *(u8
*)param
->private_data
;
654 * use remote invalidation if the both initiator
655 * and the HCA support it
657 isert_conn
->snd_w_inv
= !(flags
& ISER_SEND_W_INV_NOT_SUP
) &&
658 (attr
->device_cap_flags
&
659 IB_DEVICE_MEM_MGT_EXTENSIONS
);
660 if (isert_conn
->snd_w_inv
)
661 isert_info("Using remote invalidation\n");
666 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
668 struct isert_np
*isert_np
= cma_id
->context
;
669 struct iscsi_np
*np
= isert_np
->np
;
670 struct isert_conn
*isert_conn
;
671 struct isert_device
*device
;
674 spin_lock_bh(&np
->np_thread_lock
);
676 spin_unlock_bh(&np
->np_thread_lock
);
677 isert_dbg("iscsi_np is not enabled, reject connect request\n");
678 return rdma_reject(cma_id
, NULL
, 0);
680 spin_unlock_bh(&np
->np_thread_lock
);
682 isert_dbg("cma_id: %p, portal: %p\n",
683 cma_id
, cma_id
->context
);
685 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
689 isert_init_conn(isert_conn
);
690 isert_conn
->cm_id
= cma_id
;
692 ret
= isert_alloc_login_buf(isert_conn
, cma_id
->device
);
696 device
= isert_device_get(cma_id
);
697 if (IS_ERR(device
)) {
698 ret
= PTR_ERR(device
);
699 goto out_rsp_dma_map
;
701 isert_conn
->device
= device
;
703 isert_set_nego_params(isert_conn
, &event
->param
.conn
);
705 ret
= isert_conn_setup_qp(isert_conn
, cma_id
);
709 ret
= isert_login_post_recv(isert_conn
);
713 ret
= isert_rdma_accept(isert_conn
);
717 mutex_lock(&isert_np
->mutex
);
718 list_add_tail(&isert_conn
->node
, &isert_np
->accepted
);
719 mutex_unlock(&isert_np
->mutex
);
724 isert_device_put(device
);
726 isert_free_login_buf(isert_conn
);
729 rdma_reject(cma_id
, NULL
, 0);
734 isert_connect_release(struct isert_conn
*isert_conn
)
736 struct isert_device
*device
= isert_conn
->device
;
738 isert_dbg("conn %p\n", isert_conn
);
742 if (device
->use_fastreg
)
743 isert_conn_free_fastreg_pool(isert_conn
);
745 isert_free_rx_descriptors(isert_conn
);
746 if (isert_conn
->cm_id
)
747 rdma_destroy_id(isert_conn
->cm_id
);
749 if (isert_conn
->qp
) {
750 struct isert_comp
*comp
= isert_conn
->qp
->recv_cq
->cq_context
;
752 isert_comp_put(comp
);
753 ib_destroy_qp(isert_conn
->qp
);
756 if (isert_conn
->login_req_buf
)
757 isert_free_login_buf(isert_conn
);
759 isert_device_put(device
);
765 isert_connected_handler(struct rdma_cm_id
*cma_id
)
767 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
768 struct isert_np
*isert_np
= cma_id
->context
;
770 isert_info("conn %p\n", isert_conn
);
772 mutex_lock(&isert_conn
->mutex
);
773 isert_conn
->state
= ISER_CONN_UP
;
774 kref_get(&isert_conn
->kref
);
775 mutex_unlock(&isert_conn
->mutex
);
777 mutex_lock(&isert_np
->mutex
);
778 list_move_tail(&isert_conn
->node
, &isert_np
->pending
);
779 mutex_unlock(&isert_np
->mutex
);
781 isert_info("np %p: Allow accept_np to continue\n", isert_np
);
786 isert_release_kref(struct kref
*kref
)
788 struct isert_conn
*isert_conn
= container_of(kref
,
789 struct isert_conn
, kref
);
791 isert_info("conn %p final kref %s/%d\n", isert_conn
, current
->comm
,
794 isert_connect_release(isert_conn
);
798 isert_put_conn(struct isert_conn
*isert_conn
)
800 kref_put(&isert_conn
->kref
, isert_release_kref
);
804 isert_handle_unbound_conn(struct isert_conn
*isert_conn
)
806 struct isert_np
*isert_np
= isert_conn
->cm_id
->context
;
808 mutex_lock(&isert_np
->mutex
);
809 if (!list_empty(&isert_conn
->node
)) {
811 * This means iscsi doesn't know this connection
812 * so schedule a cleanup ourselves
814 list_del_init(&isert_conn
->node
);
815 isert_put_conn(isert_conn
);
816 queue_work(isert_release_wq
, &isert_conn
->release_work
);
818 mutex_unlock(&isert_np
->mutex
);
822 * isert_conn_terminate() - Initiate connection termination
823 * @isert_conn: isert connection struct
826 * In case the connection state is BOUND, move state
827 * to TEMINATING and start teardown sequence (rdma_disconnect).
828 * In case the connection state is UP, complete flush as well.
830 * This routine must be called with mutex held. Thus it is
831 * safe to call multiple times.
834 isert_conn_terminate(struct isert_conn
*isert_conn
)
838 if (isert_conn
->state
>= ISER_CONN_TERMINATING
)
841 isert_info("Terminating conn %p state %d\n",
842 isert_conn
, isert_conn
->state
);
843 isert_conn
->state
= ISER_CONN_TERMINATING
;
844 err
= rdma_disconnect(isert_conn
->cm_id
);
846 isert_warn("Failed rdma_disconnect isert_conn %p\n",
851 isert_np_cma_handler(struct isert_np
*isert_np
,
852 enum rdma_cm_event_type event
)
854 isert_dbg("%s (%d): isert np %p\n",
855 rdma_event_msg(event
), event
, isert_np
);
858 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
859 isert_np
->cm_id
= NULL
;
861 case RDMA_CM_EVENT_ADDR_CHANGE
:
862 isert_np
->cm_id
= isert_setup_id(isert_np
);
863 if (IS_ERR(isert_np
->cm_id
)) {
864 isert_err("isert np %p setup id failed: %ld\n",
865 isert_np
, PTR_ERR(isert_np
->cm_id
));
866 isert_np
->cm_id
= NULL
;
870 isert_err("isert np %p Unexpected event %d\n",
878 isert_disconnected_handler(struct rdma_cm_id
*cma_id
,
879 enum rdma_cm_event_type event
)
881 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
883 mutex_lock(&isert_conn
->mutex
);
884 switch (isert_conn
->state
) {
885 case ISER_CONN_TERMINATING
:
888 isert_conn_terminate(isert_conn
);
889 ib_drain_qp(isert_conn
->qp
);
890 isert_handle_unbound_conn(isert_conn
);
892 case ISER_CONN_BOUND
:
893 case ISER_CONN_FULL_FEATURE
: /* FALLTHRU */
894 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
897 isert_warn("conn %p teminating in state %d\n",
898 isert_conn
, isert_conn
->state
);
900 mutex_unlock(&isert_conn
->mutex
);
906 isert_connect_error(struct rdma_cm_id
*cma_id
)
908 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
910 list_del_init(&isert_conn
->node
);
911 isert_conn
->cm_id
= NULL
;
912 isert_put_conn(isert_conn
);
918 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
920 struct isert_np
*isert_np
= cma_id
->context
;
923 isert_info("%s (%d): status %d id %p np %p\n",
924 rdma_event_msg(event
->event
), event
->event
,
925 event
->status
, cma_id
, cma_id
->context
);
927 if (isert_np
->cm_id
== cma_id
)
928 return isert_np_cma_handler(cma_id
->context
, event
->event
);
930 switch (event
->event
) {
931 case RDMA_CM_EVENT_CONNECT_REQUEST
:
932 ret
= isert_connect_request(cma_id
, event
);
934 isert_err("failed handle connect request %d\n", ret
);
936 case RDMA_CM_EVENT_ESTABLISHED
:
937 isert_connected_handler(cma_id
);
939 case RDMA_CM_EVENT_ADDR_CHANGE
: /* FALLTHRU */
940 case RDMA_CM_EVENT_DISCONNECTED
: /* FALLTHRU */
941 case RDMA_CM_EVENT_DEVICE_REMOVAL
: /* FALLTHRU */
942 case RDMA_CM_EVENT_TIMEWAIT_EXIT
: /* FALLTHRU */
943 ret
= isert_disconnected_handler(cma_id
, event
->event
);
945 case RDMA_CM_EVENT_REJECTED
: /* FALLTHRU */
946 case RDMA_CM_EVENT_UNREACHABLE
: /* FALLTHRU */
947 case RDMA_CM_EVENT_CONNECT_ERROR
:
948 ret
= isert_connect_error(cma_id
);
951 isert_err("Unhandled RDMA CMA event: %d\n", event
->event
);
959 isert_post_recvm(struct isert_conn
*isert_conn
, u32 count
)
961 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
963 struct iser_rx_desc
*rx_desc
;
965 for (rx_wr
= isert_conn
->rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
966 rx_desc
= &isert_conn
->rx_descs
[i
];
968 rx_wr
->wr_cqe
= &rx_desc
->rx_cqe
;
969 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
971 rx_wr
->next
= rx_wr
+ 1;
974 rx_wr
->next
= NULL
; /* mark end of work requests list */
976 ret
= ib_post_recv(isert_conn
->qp
, isert_conn
->rx_wr
,
979 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
985 isert_post_recv(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
)
987 struct ib_recv_wr
*rx_wr_failed
, rx_wr
;
990 rx_wr
.wr_cqe
= &rx_desc
->rx_cqe
;
991 rx_wr
.sg_list
= &rx_desc
->rx_sg
;
995 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, &rx_wr_failed
);
997 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
1003 isert_login_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
1005 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1006 struct ib_send_wr send_wr
, *send_wr_failed
;
1009 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
1010 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1012 tx_desc
->tx_cqe
.done
= isert_login_send_done
;
1014 send_wr
.next
= NULL
;
1015 send_wr
.wr_cqe
= &tx_desc
->tx_cqe
;
1016 send_wr
.sg_list
= tx_desc
->tx_sg
;
1017 send_wr
.num_sge
= tx_desc
->num_sge
;
1018 send_wr
.opcode
= IB_WR_SEND
;
1019 send_wr
.send_flags
= IB_SEND_SIGNALED
;
1021 ret
= ib_post_send(isert_conn
->qp
, &send_wr
, &send_wr_failed
);
1023 isert_err("ib_post_send() failed, ret: %d\n", ret
);
1029 isert_create_send_desc(struct isert_conn
*isert_conn
,
1030 struct isert_cmd
*isert_cmd
,
1031 struct iser_tx_desc
*tx_desc
)
1033 struct isert_device
*device
= isert_conn
->device
;
1034 struct ib_device
*ib_dev
= device
->ib_device
;
1036 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
1037 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1039 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_ctrl
));
1040 tx_desc
->iser_header
.flags
= ISCSI_CTRL
;
1042 tx_desc
->num_sge
= 1;
1044 if (tx_desc
->tx_sg
[0].lkey
!= device
->pd
->local_dma_lkey
) {
1045 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
1046 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
1051 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
1052 struct iser_tx_desc
*tx_desc
)
1054 struct isert_device
*device
= isert_conn
->device
;
1055 struct ib_device
*ib_dev
= device
->ib_device
;
1058 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
1059 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1060 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
1061 isert_err("ib_dma_mapping_error() failed\n");
1065 tx_desc
->dma_addr
= dma_addr
;
1066 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
1067 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
1068 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
1070 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1071 tx_desc
->tx_sg
[0].addr
, tx_desc
->tx_sg
[0].length
,
1072 tx_desc
->tx_sg
[0].lkey
);
1078 isert_init_send_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1079 struct ib_send_wr
*send_wr
)
1081 struct iser_tx_desc
*tx_desc
= &isert_cmd
->tx_desc
;
1083 isert_cmd
->iser_ib_op
= ISER_IB_SEND
;
1084 tx_desc
->tx_cqe
.done
= isert_send_done
;
1085 send_wr
->wr_cqe
= &tx_desc
->tx_cqe
;
1087 if (isert_conn
->snd_w_inv
&& isert_cmd
->inv_rkey
) {
1088 send_wr
->opcode
= IB_WR_SEND_WITH_INV
;
1089 send_wr
->ex
.invalidate_rkey
= isert_cmd
->inv_rkey
;
1091 send_wr
->opcode
= IB_WR_SEND
;
1094 send_wr
->sg_list
= &tx_desc
->tx_sg
[0];
1095 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
1096 send_wr
->send_flags
= IB_SEND_SIGNALED
;
1100 isert_login_post_recv(struct isert_conn
*isert_conn
)
1102 struct ib_recv_wr rx_wr
, *rx_wr_fail
;
1106 memset(&sge
, 0, sizeof(struct ib_sge
));
1107 sge
.addr
= isert_conn
->login_req_dma
;
1108 sge
.length
= ISER_RX_PAYLOAD_SIZE
;
1109 sge
.lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
1111 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1112 sge
.addr
, sge
.length
, sge
.lkey
);
1114 isert_conn
->login_req_buf
->rx_cqe
.done
= isert_login_recv_done
;
1116 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
1117 rx_wr
.wr_cqe
= &isert_conn
->login_req_buf
->rx_cqe
;
1118 rx_wr
.sg_list
= &sge
;
1121 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, &rx_wr_fail
);
1123 isert_err("ib_post_recv() failed: %d\n", ret
);
1129 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
1132 struct isert_conn
*isert_conn
= conn
->context
;
1133 struct isert_device
*device
= isert_conn
->device
;
1134 struct ib_device
*ib_dev
= device
->ib_device
;
1135 struct iser_tx_desc
*tx_desc
= &isert_conn
->login_tx_desc
;
1138 isert_create_send_desc(isert_conn
, NULL
, tx_desc
);
1140 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
1141 sizeof(struct iscsi_hdr
));
1143 isert_init_tx_hdrs(isert_conn
, tx_desc
);
1146 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
1148 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
1149 length
, DMA_TO_DEVICE
);
1151 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
1153 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
1154 length
, DMA_TO_DEVICE
);
1156 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
1157 tx_dsg
->length
= length
;
1158 tx_dsg
->lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
1159 tx_desc
->num_sge
= 2;
1161 if (!login
->login_failed
) {
1162 if (login
->login_complete
) {
1163 if (!conn
->sess
->sess_ops
->SessionType
&&
1164 isert_conn
->device
->use_fastreg
) {
1165 ret
= isert_conn_create_fastreg_pool(isert_conn
);
1167 isert_err("Conn: %p failed to create"
1168 " fastreg pool\n", isert_conn
);
1173 ret
= isert_alloc_rx_descriptors(isert_conn
);
1177 ret
= isert_post_recvm(isert_conn
,
1178 ISERT_QP_MAX_RECV_DTOS
);
1182 /* Now we are in FULL_FEATURE phase */
1183 mutex_lock(&isert_conn
->mutex
);
1184 isert_conn
->state
= ISER_CONN_FULL_FEATURE
;
1185 mutex_unlock(&isert_conn
->mutex
);
1189 ret
= isert_login_post_recv(isert_conn
);
1194 ret
= isert_login_post_send(isert_conn
, tx_desc
);
1202 isert_rx_login_req(struct isert_conn
*isert_conn
)
1204 struct iser_rx_desc
*rx_desc
= isert_conn
->login_req_buf
;
1205 int rx_buflen
= isert_conn
->login_req_len
;
1206 struct iscsi_conn
*conn
= isert_conn
->conn
;
1207 struct iscsi_login
*login
= conn
->conn_login
;
1210 isert_info("conn %p\n", isert_conn
);
1212 WARN_ON_ONCE(!login
);
1214 if (login
->first_request
) {
1215 struct iscsi_login_req
*login_req
=
1216 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
1218 * Setup the initial iscsi_login values from the leading
1219 * login request PDU.
1221 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
1222 login
->current_stage
=
1223 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
1225 login
->version_min
= login_req
->min_version
;
1226 login
->version_max
= login_req
->max_version
;
1227 memcpy(login
->isid
, login_req
->isid
, 6);
1228 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1229 login
->init_task_tag
= login_req
->itt
;
1230 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1231 login
->cid
= be16_to_cpu(login_req
->cid
);
1232 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1235 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
1237 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1238 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1239 "MAX_KEY_VALUE_PAIRS: %d\n", size
, rx_buflen
,
1240 MAX_KEY_VALUE_PAIRS
);
1241 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
1243 if (login
->first_request
) {
1244 complete(&isert_conn
->login_comp
);
1247 schedule_delayed_work(&conn
->login_work
, 0);
1250 static struct iscsi_cmd
1251 *isert_allocate_cmd(struct iscsi_conn
*conn
, struct iser_rx_desc
*rx_desc
)
1253 struct isert_conn
*isert_conn
= conn
->context
;
1254 struct isert_cmd
*isert_cmd
;
1255 struct iscsi_cmd
*cmd
;
1257 cmd
= iscsit_allocate_cmd(conn
, TASK_INTERRUPTIBLE
);
1259 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1262 isert_cmd
= iscsit_priv_cmd(cmd
);
1263 isert_cmd
->conn
= isert_conn
;
1264 isert_cmd
->iscsi_cmd
= cmd
;
1265 isert_cmd
->rx_desc
= rx_desc
;
1271 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
1272 struct isert_cmd
*isert_cmd
, struct iscsi_cmd
*cmd
,
1273 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1275 struct iscsi_conn
*conn
= isert_conn
->conn
;
1276 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
1277 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
1278 bool dump_payload
= false;
1279 unsigned int data_len
;
1281 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
1285 imm_data
= cmd
->immediate_data
;
1286 imm_data_len
= cmd
->first_burst_len
;
1287 unsol_data
= cmd
->unsolicited_data
;
1288 data_len
= cmd
->se_cmd
.data_length
;
1290 if (imm_data
&& imm_data_len
== data_len
)
1291 cmd
->se_cmd
.se_cmd_flags
|= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
;
1292 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1295 } else if (rc
> 0) {
1296 dump_payload
= true;
1303 if (imm_data_len
!= data_len
) {
1304 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
1305 sg_copy_from_buffer(cmd
->se_cmd
.t_data_sg
, sg_nents
,
1306 &rx_desc
->data
[0], imm_data_len
);
1307 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1308 sg_nents
, imm_data_len
);
1310 sg_init_table(&isert_cmd
->sg
, 1);
1311 cmd
->se_cmd
.t_data_sg
= &isert_cmd
->sg
;
1312 cmd
->se_cmd
.t_data_nents
= 1;
1313 sg_set_buf(&isert_cmd
->sg
, &rx_desc
->data
[0], imm_data_len
);
1314 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1318 cmd
->write_data_done
+= imm_data_len
;
1320 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
1321 spin_lock_bh(&cmd
->istate_lock
);
1322 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1323 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1324 spin_unlock_bh(&cmd
->istate_lock
);
1328 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
1330 if (!rc
&& dump_payload
== false && unsol_data
)
1331 iscsit_set_unsoliticed_dataout(cmd
);
1332 else if (dump_payload
&& imm_data
)
1333 target_put_sess_cmd(&cmd
->se_cmd
);
1339 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
1340 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1342 struct scatterlist
*sg_start
;
1343 struct iscsi_conn
*conn
= isert_conn
->conn
;
1344 struct iscsi_cmd
*cmd
= NULL
;
1345 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
1346 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
1347 int rc
, sg_nents
, sg_off
, page_off
;
1349 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
1355 * FIXME: Unexpected unsolicited_data out
1357 if (!cmd
->unsolicited_data
) {
1358 isert_err("Received unexpected solicited data payload\n");
1363 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1364 "write_data_done: %u, data_length: %u\n",
1365 unsol_data_len
, cmd
->write_data_done
,
1366 cmd
->se_cmd
.data_length
);
1368 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1369 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1370 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
1371 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1373 * FIXME: Non page-aligned unsolicited_data out
1376 isert_err("unexpected non-page aligned data payload\n");
1380 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1381 "sg_nents: %u from %p %u\n", sg_start
, sg_off
,
1382 sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
1384 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
1387 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1392 * multiple data-outs on the same command can arrive -
1393 * so post the buffer before hand
1395 rc
= isert_post_recv(isert_conn
, rx_desc
);
1397 isert_err("ib_post_recv failed with %d\n", rc
);
1404 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1405 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1408 struct iscsi_conn
*conn
= isert_conn
->conn
;
1409 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1412 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1416 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1419 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1423 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1424 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1425 struct iscsi_text
*hdr
)
1427 struct iscsi_conn
*conn
= isert_conn
->conn
;
1428 u32 payload_length
= ntoh24(hdr
->dlength
);
1430 unsigned char *text_in
= NULL
;
1432 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1436 if (payload_length
) {
1437 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1439 isert_err("Unable to allocate text_in of payload_length: %u\n",
1444 cmd
->text_in_ptr
= text_in
;
1446 memcpy(cmd
->text_in_ptr
, &rx_desc
->data
[0], payload_length
);
1448 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1452 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1453 uint32_t read_stag
, uint64_t read_va
,
1454 uint32_t write_stag
, uint64_t write_va
)
1456 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1457 struct iscsi_conn
*conn
= isert_conn
->conn
;
1458 struct iscsi_cmd
*cmd
;
1459 struct isert_cmd
*isert_cmd
;
1461 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1463 if (conn
->sess
->sess_ops
->SessionType
&&
1464 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1465 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1466 " ignoring\n", opcode
);
1471 case ISCSI_OP_SCSI_CMD
:
1472 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1476 isert_cmd
= iscsit_priv_cmd(cmd
);
1477 isert_cmd
->read_stag
= read_stag
;
1478 isert_cmd
->read_va
= read_va
;
1479 isert_cmd
->write_stag
= write_stag
;
1480 isert_cmd
->write_va
= write_va
;
1481 isert_cmd
->inv_rkey
= read_stag
? read_stag
: write_stag
;
1483 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
, cmd
,
1484 rx_desc
, (unsigned char *)hdr
);
1486 case ISCSI_OP_NOOP_OUT
:
1487 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1491 isert_cmd
= iscsit_priv_cmd(cmd
);
1492 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
, cmd
,
1493 rx_desc
, (unsigned char *)hdr
);
1495 case ISCSI_OP_SCSI_DATA_OUT
:
1496 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1497 (unsigned char *)hdr
);
1499 case ISCSI_OP_SCSI_TMFUNC
:
1500 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1504 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1505 (unsigned char *)hdr
);
1507 case ISCSI_OP_LOGOUT
:
1508 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1512 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1515 if (be32_to_cpu(hdr
->ttt
) != 0xFFFFFFFF)
1516 cmd
= iscsit_find_cmd_from_itt(conn
, hdr
->itt
);
1518 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1523 isert_cmd
= iscsit_priv_cmd(cmd
);
1524 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
, cmd
,
1525 rx_desc
, (struct iscsi_text
*)hdr
);
1528 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1537 isert_print_wc(struct ib_wc
*wc
, const char *type
)
1539 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1540 isert_err("%s failure: %s (%d) vend_err %x\n", type
,
1541 ib_wc_status_msg(wc
->status
), wc
->status
,
1544 isert_dbg("%s failure: %s (%d)\n", type
,
1545 ib_wc_status_msg(wc
->status
), wc
->status
);
1549 isert_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1551 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1552 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1553 struct iser_rx_desc
*rx_desc
= cqe_to_rx_desc(wc
->wr_cqe
);
1554 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1555 struct iser_ctrl
*iser_ctrl
= &rx_desc
->iser_header
;
1556 uint64_t read_va
= 0, write_va
= 0;
1557 uint32_t read_stag
= 0, write_stag
= 0;
1559 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1560 isert_print_wc(wc
, "recv");
1561 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1562 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1566 ib_dma_sync_single_for_cpu(ib_dev
, rx_desc
->dma_addr
,
1567 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1569 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1570 rx_desc
->dma_addr
, hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1571 (int)(wc
->byte_len
- ISER_HEADERS_LEN
));
1573 switch (iser_ctrl
->flags
& 0xF0) {
1575 if (iser_ctrl
->flags
& ISER_RSV
) {
1576 read_stag
= be32_to_cpu(iser_ctrl
->read_stag
);
1577 read_va
= be64_to_cpu(iser_ctrl
->read_va
);
1578 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1579 read_stag
, (unsigned long long)read_va
);
1581 if (iser_ctrl
->flags
& ISER_WSV
) {
1582 write_stag
= be32_to_cpu(iser_ctrl
->write_stag
);
1583 write_va
= be64_to_cpu(iser_ctrl
->write_va
);
1584 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1585 write_stag
, (unsigned long long)write_va
);
1588 isert_dbg("ISER ISCSI_CTRL PDU\n");
1591 isert_err("iSER Hello message\n");
1594 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl
->flags
);
1598 isert_rx_opcode(isert_conn
, rx_desc
,
1599 read_stag
, read_va
, write_stag
, write_va
);
1601 ib_dma_sync_single_for_device(ib_dev
, rx_desc
->dma_addr
,
1602 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1606 isert_login_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1608 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1609 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1611 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1612 isert_print_wc(wc
, "login recv");
1616 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_req_dma
,
1617 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1619 isert_conn
->login_req_len
= wc
->byte_len
- ISER_HEADERS_LEN
;
1621 if (isert_conn
->conn
) {
1622 struct iscsi_login
*login
= isert_conn
->conn
->conn_login
;
1624 if (login
&& !login
->first_request
)
1625 isert_rx_login_req(isert_conn
);
1628 mutex_lock(&isert_conn
->mutex
);
1629 complete(&isert_conn
->login_req_comp
);
1630 mutex_unlock(&isert_conn
->mutex
);
1632 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_req_dma
,
1633 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1637 isert_map_data_buf(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1638 struct scatterlist
*sg
, u32 nents
, u32 length
, u32 offset
,
1639 enum iser_ib_op_code op
, struct isert_data_buf
*data
)
1641 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1643 data
->dma_dir
= op
== ISER_IB_RDMA_WRITE
?
1644 DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
1646 data
->len
= length
- offset
;
1647 data
->offset
= offset
;
1648 data
->sg_off
= data
->offset
/ PAGE_SIZE
;
1650 data
->sg
= &sg
[data
->sg_off
];
1651 data
->nents
= min_t(unsigned int, nents
- data
->sg_off
,
1652 ISCSI_ISER_SG_TABLESIZE
);
1653 data
->len
= min_t(unsigned int, data
->len
, ISCSI_ISER_SG_TABLESIZE
*
1656 data
->dma_nents
= ib_dma_map_sg(ib_dev
, data
->sg
, data
->nents
,
1658 if (unlikely(!data
->dma_nents
)) {
1659 isert_err("Cmd: unable to dma map SGs %p\n", sg
);
1663 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1664 isert_cmd
, data
->dma_nents
, data
->sg
, data
->nents
, data
->len
);
1670 isert_unmap_data_buf(struct isert_conn
*isert_conn
, struct isert_data_buf
*data
)
1672 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1674 ib_dma_unmap_sg(ib_dev
, data
->sg
, data
->nents
, data
->dma_dir
);
1675 memset(data
, 0, sizeof(*data
));
1681 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1683 isert_dbg("Cmd %p\n", isert_cmd
);
1685 if (isert_cmd
->data
.sg
) {
1686 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd
);
1687 isert_unmap_data_buf(isert_conn
, &isert_cmd
->data
);
1690 if (isert_cmd
->rdma_wr
) {
1691 isert_dbg("Cmd %p free send_wr\n", isert_cmd
);
1692 kfree(isert_cmd
->rdma_wr
);
1693 isert_cmd
->rdma_wr
= NULL
;
1696 if (isert_cmd
->ib_sge
) {
1697 isert_dbg("Cmd %p free ib_sge\n", isert_cmd
);
1698 kfree(isert_cmd
->ib_sge
);
1699 isert_cmd
->ib_sge
= NULL
;
1704 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1706 isert_dbg("Cmd %p\n", isert_cmd
);
1708 if (isert_cmd
->fr_desc
) {
1709 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd
, isert_cmd
->fr_desc
);
1710 if (isert_cmd
->fr_desc
->ind
& ISERT_PROTECTED
) {
1711 isert_unmap_data_buf(isert_conn
, &isert_cmd
->prot
);
1712 isert_cmd
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1714 spin_lock_bh(&isert_conn
->pool_lock
);
1715 list_add_tail(&isert_cmd
->fr_desc
->list
, &isert_conn
->fr_pool
);
1716 spin_unlock_bh(&isert_conn
->pool_lock
);
1717 isert_cmd
->fr_desc
= NULL
;
1720 if (isert_cmd
->data
.sg
) {
1721 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd
);
1722 isert_unmap_data_buf(isert_conn
, &isert_cmd
->data
);
1725 isert_cmd
->ib_sge
= NULL
;
1726 isert_cmd
->rdma_wr
= NULL
;
1730 isert_put_cmd(struct isert_cmd
*isert_cmd
, bool comp_err
)
1732 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1733 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1734 struct iscsi_conn
*conn
= isert_conn
->conn
;
1735 struct isert_device
*device
= isert_conn
->device
;
1736 struct iscsi_text_rsp
*hdr
;
1738 isert_dbg("Cmd %p\n", isert_cmd
);
1740 switch (cmd
->iscsi_opcode
) {
1741 case ISCSI_OP_SCSI_CMD
:
1742 spin_lock_bh(&conn
->cmd_lock
);
1743 if (!list_empty(&cmd
->i_conn_node
))
1744 list_del_init(&cmd
->i_conn_node
);
1745 spin_unlock_bh(&conn
->cmd_lock
);
1747 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1748 iscsit_stop_dataout_timer(cmd
);
1750 * Check for special case during comp_err where
1751 * WRITE_PENDING has been handed off from core,
1752 * but requires an extra target_put_sess_cmd()
1753 * before transport_generic_free_cmd() below.
1756 cmd
->se_cmd
.t_state
== TRANSPORT_WRITE_PENDING
) {
1757 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1759 target_put_sess_cmd(se_cmd
);
1763 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1764 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1766 case ISCSI_OP_SCSI_TMFUNC
:
1767 spin_lock_bh(&conn
->cmd_lock
);
1768 if (!list_empty(&cmd
->i_conn_node
))
1769 list_del_init(&cmd
->i_conn_node
);
1770 spin_unlock_bh(&conn
->cmd_lock
);
1772 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1774 case ISCSI_OP_REJECT
:
1775 case ISCSI_OP_NOOP_OUT
:
1777 hdr
= (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
1778 /* If the continue bit is on, keep the command alive */
1779 if (hdr
->flags
& ISCSI_FLAG_TEXT_CONTINUE
)
1782 spin_lock_bh(&conn
->cmd_lock
);
1783 if (!list_empty(&cmd
->i_conn_node
))
1784 list_del_init(&cmd
->i_conn_node
);
1785 spin_unlock_bh(&conn
->cmd_lock
);
1788 * Handle special case for REJECT when iscsi_add_reject*() has
1789 * overwritten the original iscsi_opcode assignment, and the
1790 * associated cmd->se_cmd needs to be released.
1792 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1793 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1795 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1802 iscsit_release_cmd(cmd
);
1808 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1810 if (tx_desc
->dma_addr
!= 0) {
1811 isert_dbg("unmap single for tx_desc->dma_addr\n");
1812 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1813 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1814 tx_desc
->dma_addr
= 0;
1819 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1820 struct ib_device
*ib_dev
, bool comp_err
)
1822 if (isert_cmd
->pdu_buf_dma
!= 0) {
1823 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1824 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1825 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1826 isert_cmd
->pdu_buf_dma
= 0;
1829 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1830 isert_put_cmd(isert_cmd
, comp_err
);
1834 isert_check_pi_status(struct se_cmd
*se_cmd
, struct ib_mr
*sig_mr
)
1836 struct ib_mr_status mr_status
;
1839 ret
= ib_check_mr_status(sig_mr
, IB_MR_CHECK_SIG_STATUS
, &mr_status
);
1841 isert_err("ib_check_mr_status failed, ret %d\n", ret
);
1842 goto fail_mr_status
;
1845 if (mr_status
.fail_status
& IB_MR_CHECK_SIG_STATUS
) {
1847 u32 block_size
= se_cmd
->se_dev
->dev_attrib
.block_size
+ 8;
1849 switch (mr_status
.sig_err
.err_type
) {
1850 case IB_SIG_BAD_GUARD
:
1851 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
1853 case IB_SIG_BAD_REFTAG
:
1854 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
1856 case IB_SIG_BAD_APPTAG
:
1857 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
1860 sec_offset_err
= mr_status
.sig_err
.sig_err_offset
;
1861 do_div(sec_offset_err
, block_size
);
1862 se_cmd
->bad_sector
= sec_offset_err
+ se_cmd
->t_task_lba
;
1864 isert_err("PI error found type %d at sector 0x%llx "
1865 "expected 0x%x vs actual 0x%x\n",
1866 mr_status
.sig_err
.err_type
,
1867 (unsigned long long)se_cmd
->bad_sector
,
1868 mr_status
.sig_err
.expected
,
1869 mr_status
.sig_err
.actual
);
1878 isert_rdma_write_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1880 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1881 struct isert_device
*device
= isert_conn
->device
;
1882 struct iser_tx_desc
*desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1883 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(desc
);
1884 struct se_cmd
*cmd
= &isert_cmd
->iscsi_cmd
->se_cmd
;
1887 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1888 isert_print_wc(wc
, "rdma write");
1889 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1890 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1891 isert_completion_put(desc
, isert_cmd
, device
->ib_device
, true);
1895 isert_dbg("Cmd %p\n", isert_cmd
);
1897 if (isert_cmd
->fr_desc
&& isert_cmd
->fr_desc
->ind
& ISERT_PROTECTED
) {
1898 ret
= isert_check_pi_status(cmd
,
1899 isert_cmd
->fr_desc
->pi_ctx
->sig_mr
);
1900 isert_cmd
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1903 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1904 isert_cmd
->rdma_wr_num
= 0;
1906 transport_send_check_condition_and_sense(cmd
, cmd
->pi_err
, 0);
1908 isert_put_response(isert_conn
->conn
, isert_cmd
->iscsi_cmd
);
1912 isert_rdma_read_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1914 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1915 struct isert_device
*device
= isert_conn
->device
;
1916 struct iser_tx_desc
*desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1917 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(desc
);
1918 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1919 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1922 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1923 isert_print_wc(wc
, "rdma read");
1924 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1925 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1926 isert_completion_put(desc
, isert_cmd
, device
->ib_device
, true);
1930 isert_dbg("Cmd %p\n", isert_cmd
);
1932 if (isert_cmd
->fr_desc
&& isert_cmd
->fr_desc
->ind
& ISERT_PROTECTED
) {
1933 ret
= isert_check_pi_status(se_cmd
,
1934 isert_cmd
->fr_desc
->pi_ctx
->sig_mr
);
1935 isert_cmd
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1938 iscsit_stop_dataout_timer(cmd
);
1939 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1940 cmd
->write_data_done
= isert_cmd
->data
.len
;
1941 isert_cmd
->rdma_wr_num
= 0;
1943 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd
);
1944 spin_lock_bh(&cmd
->istate_lock
);
1945 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1946 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1947 spin_unlock_bh(&cmd
->istate_lock
);
1950 target_put_sess_cmd(se_cmd
);
1951 transport_send_check_condition_and_sense(se_cmd
,
1954 target_execute_cmd(se_cmd
);
1959 isert_do_control_comp(struct work_struct
*work
)
1961 struct isert_cmd
*isert_cmd
= container_of(work
,
1962 struct isert_cmd
, comp_work
);
1963 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1964 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1965 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1967 isert_dbg("Cmd %p i_state %d\n", isert_cmd
, cmd
->i_state
);
1969 switch (cmd
->i_state
) {
1970 case ISTATE_SEND_TASKMGTRSP
:
1971 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1972 case ISTATE_SEND_REJECT
: /* FALLTHRU */
1973 case ISTATE_SEND_TEXTRSP
: /* FALLTHRU */
1974 cmd
->i_state
= ISTATE_SENT_STATUS
;
1975 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
,
1978 case ISTATE_SEND_LOGOUTRSP
:
1979 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1982 isert_err("Unknown i_state %d\n", cmd
->i_state
);
1989 isert_login_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1991 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1992 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1993 struct iser_tx_desc
*tx_desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1995 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1996 isert_print_wc(wc
, "login send");
1997 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1998 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
2001 isert_unmap_tx_desc(tx_desc
, ib_dev
);
2005 isert_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
2007 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
2008 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
2009 struct iser_tx_desc
*tx_desc
= cqe_to_tx_desc(wc
->wr_cqe
);
2010 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(tx_desc
);
2012 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
2013 isert_print_wc(wc
, "send");
2014 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
2015 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
2016 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, true);
2020 isert_dbg("Cmd %p\n", isert_cmd
);
2022 switch (isert_cmd
->iscsi_cmd
->i_state
) {
2023 case ISTATE_SEND_TASKMGTRSP
:
2024 case ISTATE_SEND_LOGOUTRSP
:
2025 case ISTATE_SEND_REJECT
:
2026 case ISTATE_SEND_TEXTRSP
:
2027 isert_unmap_tx_desc(tx_desc
, ib_dev
);
2029 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
2030 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
2033 isert_cmd
->iscsi_cmd
->i_state
= ISTATE_SENT_STATUS
;
2034 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, false);
2040 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
2042 struct ib_send_wr
*wr_failed
;
2045 ret
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
2047 isert_err("ib_post_recv failed with %d\n", ret
);
2051 ret
= ib_post_send(isert_conn
->qp
, &isert_cmd
->tx_desc
.send_wr
,
2054 isert_err("ib_post_send failed with %d\n", ret
);
2061 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2063 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2064 struct isert_conn
*isert_conn
= conn
->context
;
2065 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2066 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
2067 &isert_cmd
->tx_desc
.iscsi_header
;
2069 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2070 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
2071 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2073 * Attach SENSE DATA payload to iSCSI Response PDU
2075 if (cmd
->se_cmd
.sense_buffer
&&
2076 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
2077 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
2078 struct isert_device
*device
= isert_conn
->device
;
2079 struct ib_device
*ib_dev
= device
->ib_device
;
2080 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2081 u32 padding
, pdu_len
;
2083 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
2085 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
2087 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
2088 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
2089 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
2091 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2092 (void *)cmd
->sense_buffer
, pdu_len
,
2095 isert_cmd
->pdu_buf_len
= pdu_len
;
2096 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2097 tx_dsg
->length
= pdu_len
;
2098 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2099 isert_cmd
->tx_desc
.num_sge
= 2;
2102 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2104 isert_dbg("Posting SCSI Response\n");
2106 return isert_post_response(isert_conn
, isert_cmd
);
2110 isert_aborted_task(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2112 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2113 struct isert_conn
*isert_conn
= conn
->context
;
2114 struct isert_device
*device
= isert_conn
->device
;
2116 spin_lock_bh(&conn
->cmd_lock
);
2117 if (!list_empty(&cmd
->i_conn_node
))
2118 list_del_init(&cmd
->i_conn_node
);
2119 spin_unlock_bh(&conn
->cmd_lock
);
2121 if (cmd
->data_direction
== DMA_TO_DEVICE
)
2122 iscsit_stop_dataout_timer(cmd
);
2124 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
2127 static enum target_prot_op
2128 isert_get_sup_prot_ops(struct iscsi_conn
*conn
)
2130 struct isert_conn
*isert_conn
= conn
->context
;
2131 struct isert_device
*device
= isert_conn
->device
;
2133 if (conn
->tpg
->tpg_attrib
.t10_pi
) {
2134 if (device
->pi_capable
) {
2135 isert_info("conn %p PI offload enabled\n", isert_conn
);
2136 isert_conn
->pi_support
= true;
2137 return TARGET_PROT_ALL
;
2141 isert_info("conn %p PI offload disabled\n", isert_conn
);
2142 isert_conn
->pi_support
= false;
2144 return TARGET_PROT_NORMAL
;
2148 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
2149 bool nopout_response
)
2151 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2152 struct isert_conn
*isert_conn
= conn
->context
;
2153 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2155 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2156 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
2157 &isert_cmd
->tx_desc
.iscsi_header
,
2159 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2160 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2162 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn
);
2164 return isert_post_response(isert_conn
, isert_cmd
);
2168 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2170 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2171 struct isert_conn
*isert_conn
= conn
->context
;
2172 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2174 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2175 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
2176 &isert_cmd
->tx_desc
.iscsi_header
);
2177 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2178 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2180 isert_dbg("conn %p Posting Logout Response\n", isert_conn
);
2182 return isert_post_response(isert_conn
, isert_cmd
);
2186 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2188 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2189 struct isert_conn
*isert_conn
= conn
->context
;
2190 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2192 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2193 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
2194 &isert_cmd
->tx_desc
.iscsi_header
);
2195 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2196 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2198 isert_dbg("conn %p Posting Task Management Response\n", isert_conn
);
2200 return isert_post_response(isert_conn
, isert_cmd
);
2204 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2206 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2207 struct isert_conn
*isert_conn
= conn
->context
;
2208 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2209 struct isert_device
*device
= isert_conn
->device
;
2210 struct ib_device
*ib_dev
= device
->ib_device
;
2211 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2212 struct iscsi_reject
*hdr
=
2213 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
2215 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2216 iscsit_build_reject(cmd
, conn
, hdr
);
2217 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2219 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
2220 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2221 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
2223 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
2224 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2225 tx_dsg
->length
= ISCSI_HDR_LEN
;
2226 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2227 isert_cmd
->tx_desc
.num_sge
= 2;
2229 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2231 isert_dbg("conn %p Posting Reject\n", isert_conn
);
2233 return isert_post_response(isert_conn
, isert_cmd
);
2237 isert_put_text_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2239 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2240 struct isert_conn
*isert_conn
= conn
->context
;
2241 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2242 struct iscsi_text_rsp
*hdr
=
2243 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
2247 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2248 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
, ISCSI_INFINIBAND
);
2253 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2256 struct isert_device
*device
= isert_conn
->device
;
2257 struct ib_device
*ib_dev
= device
->ib_device
;
2258 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2259 void *txt_rsp_buf
= cmd
->buf_ptr
;
2261 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2262 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
2264 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
2265 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2266 tx_dsg
->length
= txt_rsp_len
;
2267 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2268 isert_cmd
->tx_desc
.num_sge
= 2;
2270 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2272 isert_dbg("conn %p Text Response\n", isert_conn
);
2274 return isert_post_response(isert_conn
, isert_cmd
);
2278 isert_build_rdma_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
2279 struct ib_sge
*ib_sge
, struct ib_rdma_wr
*rdma_wr
,
2280 u32 data_left
, u32 offset
)
2282 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
2283 struct scatterlist
*sg_start
, *tmp_sg
;
2284 struct isert_device
*device
= isert_conn
->device
;
2285 struct ib_device
*ib_dev
= device
->ib_device
;
2286 u32 sg_off
, page_off
;
2287 int i
= 0, sg_nents
;
2289 sg_off
= offset
/ PAGE_SIZE
;
2290 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2291 sg_nents
= min(cmd
->se_cmd
.t_data_nents
- sg_off
, isert_conn
->max_sge
);
2292 page_off
= offset
% PAGE_SIZE
;
2294 rdma_wr
->wr
.sg_list
= ib_sge
;
2295 rdma_wr
->wr
.wr_cqe
= &isert_cmd
->tx_desc
.tx_cqe
;
2298 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2300 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2301 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2303 (unsigned long long)tmp_sg
->dma_address
,
2304 tmp_sg
->length
, page_off
);
2306 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, tmp_sg
) + page_off
;
2307 ib_sge
->length
= min_t(u32
, data_left
,
2308 ib_sg_dma_len(ib_dev
, tmp_sg
) - page_off
);
2309 ib_sge
->lkey
= device
->pd
->local_dma_lkey
;
2311 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2312 ib_sge
->addr
, ib_sge
->length
, ib_sge
->lkey
);
2314 data_left
-= ib_sge
->length
;
2318 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge
);
2321 rdma_wr
->wr
.num_sge
= ++i
;
2322 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2323 rdma_wr
->wr
.sg_list
, rdma_wr
->wr
.num_sge
);
2325 return rdma_wr
->wr
.num_sge
;
2329 isert_map_rdma(struct isert_cmd
*isert_cmd
, struct iscsi_conn
*conn
)
2331 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
2332 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2333 struct isert_conn
*isert_conn
= conn
->context
;
2334 struct isert_data_buf
*data
= &isert_cmd
->data
;
2335 struct ib_rdma_wr
*rdma_wr
;
2336 struct ib_sge
*ib_sge
;
2337 u32 offset
, data_len
, data_left
, rdma_write_max
, va_offset
= 0;
2338 int ret
= 0, i
, ib_sge_cnt
;
2340 offset
= isert_cmd
->iser_ib_op
== ISER_IB_RDMA_READ
?
2341 cmd
->write_data_done
: 0;
2342 ret
= isert_map_data_buf(isert_conn
, isert_cmd
, se_cmd
->t_data_sg
,
2343 se_cmd
->t_data_nents
, se_cmd
->data_length
,
2344 offset
, isert_cmd
->iser_ib_op
,
2349 data_left
= data
->len
;
2350 offset
= data
->offset
;
2352 ib_sge
= kzalloc(sizeof(struct ib_sge
) * data
->nents
, GFP_KERNEL
);
2354 isert_warn("Unable to allocate ib_sge\n");
2358 isert_cmd
->ib_sge
= ib_sge
;
2360 isert_cmd
->rdma_wr_num
= DIV_ROUND_UP(data
->nents
, isert_conn
->max_sge
);
2361 isert_cmd
->rdma_wr
= kzalloc(sizeof(struct ib_rdma_wr
) *
2362 isert_cmd
->rdma_wr_num
, GFP_KERNEL
);
2363 if (!isert_cmd
->rdma_wr
) {
2364 isert_dbg("Unable to allocate isert_cmd->rdma_wr\n");
2369 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
2371 for (i
= 0; i
< isert_cmd
->rdma_wr_num
; i
++) {
2372 rdma_wr
= &isert_cmd
->rdma_wr
[i
];
2373 data_len
= min(data_left
, rdma_write_max
);
2375 rdma_wr
->wr
.send_flags
= 0;
2376 if (isert_cmd
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2377 isert_cmd
->tx_desc
.tx_cqe
.done
= isert_rdma_write_done
;
2379 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
2380 rdma_wr
->remote_addr
= isert_cmd
->read_va
+ offset
;
2381 rdma_wr
->rkey
= isert_cmd
->read_stag
;
2382 if (i
+ 1 == isert_cmd
->rdma_wr_num
)
2383 rdma_wr
->wr
.next
= &isert_cmd
->tx_desc
.send_wr
;
2385 rdma_wr
->wr
.next
= &isert_cmd
->rdma_wr
[i
+ 1].wr
;
2387 isert_cmd
->tx_desc
.tx_cqe
.done
= isert_rdma_read_done
;
2389 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
2390 rdma_wr
->remote_addr
= isert_cmd
->write_va
+ va_offset
;
2391 rdma_wr
->rkey
= isert_cmd
->write_stag
;
2392 if (i
+ 1 == isert_cmd
->rdma_wr_num
)
2393 rdma_wr
->wr
.send_flags
= IB_SEND_SIGNALED
;
2395 rdma_wr
->wr
.next
= &isert_cmd
->rdma_wr
[i
+ 1].wr
;
2398 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
2399 rdma_wr
, data_len
, offset
);
2400 ib_sge
+= ib_sge_cnt
;
2403 va_offset
+= data_len
;
2404 data_left
-= data_len
;
2409 isert_unmap_data_buf(isert_conn
, data
);
2415 isert_inv_rkey(struct ib_send_wr
*inv_wr
, struct ib_mr
*mr
)
2419 memset(inv_wr
, 0, sizeof(*inv_wr
));
2420 inv_wr
->wr_cqe
= NULL
;
2421 inv_wr
->opcode
= IB_WR_LOCAL_INV
;
2422 inv_wr
->ex
.invalidate_rkey
= mr
->rkey
;
2425 rkey
= ib_inc_rkey(mr
->rkey
);
2426 ib_update_fast_reg_key(mr
, rkey
);
2430 isert_fast_reg_mr(struct isert_conn
*isert_conn
,
2431 struct fast_reg_descriptor
*fr_desc
,
2432 struct isert_data_buf
*mem
,
2433 enum isert_indicator ind
,
2436 struct isert_device
*device
= isert_conn
->device
;
2437 struct ib_device
*ib_dev
= device
->ib_device
;
2439 struct ib_reg_wr reg_wr
;
2440 struct ib_send_wr inv_wr
, *bad_wr
, *wr
= NULL
;
2443 if (mem
->dma_nents
== 1) {
2444 sge
->lkey
= device
->pd
->local_dma_lkey
;
2445 sge
->addr
= ib_sg_dma_address(ib_dev
, &mem
->sg
[0]);
2446 sge
->length
= ib_sg_dma_len(ib_dev
, &mem
->sg
[0]);
2447 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2448 sge
->addr
, sge
->length
, sge
->lkey
);
2452 if (ind
== ISERT_DATA_KEY_VALID
)
2453 /* Registering data buffer */
2454 mr
= fr_desc
->data_mr
;
2456 /* Registering protection buffer */
2457 mr
= fr_desc
->pi_ctx
->prot_mr
;
2459 if (!(fr_desc
->ind
& ind
)) {
2460 isert_inv_rkey(&inv_wr
, mr
);
2464 n
= ib_map_mr_sg(mr
, mem
->sg
, mem
->nents
, PAGE_SIZE
);
2465 if (unlikely(n
!= mem
->nents
)) {
2466 isert_err("failed to map mr sg (%d/%d)\n",
2468 return n
< 0 ? n
: -EINVAL
;
2471 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2472 fr_desc
, mem
->nents
, mem
->offset
);
2474 reg_wr
.wr
.next
= NULL
;
2475 reg_wr
.wr
.opcode
= IB_WR_REG_MR
;
2476 reg_wr
.wr
.wr_cqe
= NULL
;
2477 reg_wr
.wr
.send_flags
= 0;
2478 reg_wr
.wr
.num_sge
= 0;
2480 reg_wr
.key
= mr
->lkey
;
2481 reg_wr
.access
= IB_ACCESS_LOCAL_WRITE
;
2486 wr
->next
= ®_wr
.wr
;
2488 ret
= ib_post_send(isert_conn
->qp
, wr
, &bad_wr
);
2490 isert_err("fast registration failed, ret:%d\n", ret
);
2493 fr_desc
->ind
&= ~ind
;
2495 sge
->lkey
= mr
->lkey
;
2496 sge
->addr
= mr
->iova
;
2497 sge
->length
= mr
->length
;
2499 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2500 sge
->addr
, sge
->length
, sge
->lkey
);
2506 isert_set_dif_domain(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
,
2507 struct ib_sig_domain
*domain
)
2509 domain
->sig_type
= IB_SIG_TYPE_T10_DIF
;
2510 domain
->sig
.dif
.bg_type
= IB_T10DIF_CRC
;
2511 domain
->sig
.dif
.pi_interval
= se_cmd
->se_dev
->dev_attrib
.block_size
;
2512 domain
->sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
2514 * At the moment we hard code those, but if in the future
2515 * the target core would like to use it, we will take it
2518 domain
->sig
.dif
.apptag_check_mask
= 0xffff;
2519 domain
->sig
.dif
.app_escape
= true;
2520 domain
->sig
.dif
.ref_escape
= true;
2521 if (se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
||
2522 se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
)
2523 domain
->sig
.dif
.ref_remap
= true;
2527 isert_set_sig_attrs(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
)
2529 switch (se_cmd
->prot_op
) {
2530 case TARGET_PROT_DIN_INSERT
:
2531 case TARGET_PROT_DOUT_STRIP
:
2532 sig_attrs
->mem
.sig_type
= IB_SIG_TYPE_NONE
;
2533 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2535 case TARGET_PROT_DOUT_INSERT
:
2536 case TARGET_PROT_DIN_STRIP
:
2537 sig_attrs
->wire
.sig_type
= IB_SIG_TYPE_NONE
;
2538 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2540 case TARGET_PROT_DIN_PASS
:
2541 case TARGET_PROT_DOUT_PASS
:
2542 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2543 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2546 isert_err("Unsupported PI operation %d\n", se_cmd
->prot_op
);
2554 isert_set_prot_checks(u8 prot_checks
)
2556 return (prot_checks
& TARGET_DIF_CHECK_GUARD
? 0xc0 : 0) |
2557 (prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x30 : 0) |
2558 (prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x0f : 0);
2562 isert_reg_sig_mr(struct isert_conn
*isert_conn
,
2563 struct isert_cmd
*isert_cmd
,
2564 struct fast_reg_descriptor
*fr_desc
)
2566 struct se_cmd
*se_cmd
= &isert_cmd
->iscsi_cmd
->se_cmd
;
2567 struct ib_sig_handover_wr sig_wr
;
2568 struct ib_send_wr inv_wr
, *bad_wr
, *wr
= NULL
;
2569 struct pi_context
*pi_ctx
= fr_desc
->pi_ctx
;
2570 struct ib_sig_attrs sig_attrs
;
2573 memset(&sig_attrs
, 0, sizeof(sig_attrs
));
2574 ret
= isert_set_sig_attrs(se_cmd
, &sig_attrs
);
2578 sig_attrs
.check_mask
= isert_set_prot_checks(se_cmd
->prot_checks
);
2580 if (!(fr_desc
->ind
& ISERT_SIG_KEY_VALID
)) {
2581 isert_inv_rkey(&inv_wr
, pi_ctx
->sig_mr
);
2585 memset(&sig_wr
, 0, sizeof(sig_wr
));
2586 sig_wr
.wr
.opcode
= IB_WR_REG_SIG_MR
;
2587 sig_wr
.wr
.wr_cqe
= NULL
;
2588 sig_wr
.wr
.sg_list
= &isert_cmd
->ib_sg
[DATA
];
2589 sig_wr
.wr
.num_sge
= 1;
2590 sig_wr
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
2591 sig_wr
.sig_attrs
= &sig_attrs
;
2592 sig_wr
.sig_mr
= pi_ctx
->sig_mr
;
2593 if (se_cmd
->t_prot_sg
)
2594 sig_wr
.prot
= &isert_cmd
->ib_sg
[PROT
];
2599 wr
->next
= &sig_wr
.wr
;
2601 ret
= ib_post_send(isert_conn
->qp
, wr
, &bad_wr
);
2603 isert_err("fast registration failed, ret:%d\n", ret
);
2606 fr_desc
->ind
&= ~ISERT_SIG_KEY_VALID
;
2608 isert_cmd
->ib_sg
[SIG
].lkey
= pi_ctx
->sig_mr
->lkey
;
2609 isert_cmd
->ib_sg
[SIG
].addr
= 0;
2610 isert_cmd
->ib_sg
[SIG
].length
= se_cmd
->data_length
;
2611 if (se_cmd
->prot_op
!= TARGET_PROT_DIN_STRIP
&&
2612 se_cmd
->prot_op
!= TARGET_PROT_DOUT_INSERT
)
2614 * We have protection guards on the wire
2615 * so we need to set a larget transfer
2617 isert_cmd
->ib_sg
[SIG
].length
+= se_cmd
->prot_length
;
2619 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2620 isert_cmd
->ib_sg
[SIG
].addr
, isert_cmd
->ib_sg
[SIG
].length
,
2621 isert_cmd
->ib_sg
[SIG
].lkey
);
2627 isert_handle_prot_cmd(struct isert_conn
*isert_conn
,
2628 struct isert_cmd
*isert_cmd
)
2630 struct isert_device
*device
= isert_conn
->device
;
2631 struct se_cmd
*se_cmd
= &isert_cmd
->iscsi_cmd
->se_cmd
;
2634 if (!isert_cmd
->fr_desc
->pi_ctx
) {
2635 ret
= isert_create_pi_ctx(isert_cmd
->fr_desc
,
2639 isert_err("conn %p failed to allocate pi_ctx\n",
2645 if (se_cmd
->t_prot_sg
) {
2646 ret
= isert_map_data_buf(isert_conn
, isert_cmd
,
2648 se_cmd
->t_prot_nents
,
2649 se_cmd
->prot_length
,
2651 isert_cmd
->iser_ib_op
,
2654 isert_err("conn %p failed to map protection buffer\n",
2659 memset(&isert_cmd
->ib_sg
[PROT
], 0, sizeof(isert_cmd
->ib_sg
[PROT
]));
2660 ret
= isert_fast_reg_mr(isert_conn
, isert_cmd
->fr_desc
,
2662 ISERT_PROT_KEY_VALID
,
2663 &isert_cmd
->ib_sg
[PROT
]);
2665 isert_err("conn %p failed to fast reg mr\n",
2667 goto unmap_prot_cmd
;
2671 ret
= isert_reg_sig_mr(isert_conn
, isert_cmd
, isert_cmd
->fr_desc
);
2673 isert_err("conn %p failed to fast reg mr\n",
2675 goto unmap_prot_cmd
;
2677 isert_cmd
->fr_desc
->ind
|= ISERT_PROTECTED
;
2682 if (se_cmd
->t_prot_sg
)
2683 isert_unmap_data_buf(isert_conn
, &isert_cmd
->prot
);
2689 isert_reg_rdma(struct isert_cmd
*isert_cmd
, struct iscsi_conn
*conn
)
2691 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
2692 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2693 struct isert_conn
*isert_conn
= conn
->context
;
2694 struct fast_reg_descriptor
*fr_desc
= NULL
;
2695 struct ib_rdma_wr
*rdma_wr
;
2696 struct ib_sge
*ib_sg
;
2699 unsigned long flags
;
2701 offset
= isert_cmd
->iser_ib_op
== ISER_IB_RDMA_READ
?
2702 cmd
->write_data_done
: 0;
2703 ret
= isert_map_data_buf(isert_conn
, isert_cmd
, se_cmd
->t_data_sg
,
2704 se_cmd
->t_data_nents
, se_cmd
->data_length
,
2705 offset
, isert_cmd
->iser_ib_op
,
2710 if (isert_cmd
->data
.dma_nents
!= 1 ||
2711 isert_prot_cmd(isert_conn
, se_cmd
)) {
2712 spin_lock_irqsave(&isert_conn
->pool_lock
, flags
);
2713 fr_desc
= list_first_entry(&isert_conn
->fr_pool
,
2714 struct fast_reg_descriptor
, list
);
2715 list_del(&fr_desc
->list
);
2716 spin_unlock_irqrestore(&isert_conn
->pool_lock
, flags
);
2717 isert_cmd
->fr_desc
= fr_desc
;
2720 ret
= isert_fast_reg_mr(isert_conn
, fr_desc
, &isert_cmd
->data
,
2721 ISERT_DATA_KEY_VALID
, &isert_cmd
->ib_sg
[DATA
]);
2725 if (isert_prot_cmd(isert_conn
, se_cmd
)) {
2726 ret
= isert_handle_prot_cmd(isert_conn
, isert_cmd
);
2730 ib_sg
= &isert_cmd
->ib_sg
[SIG
];
2732 ib_sg
= &isert_cmd
->ib_sg
[DATA
];
2735 memcpy(&isert_cmd
->s_ib_sge
, ib_sg
, sizeof(*ib_sg
));
2736 isert_cmd
->ib_sge
= &isert_cmd
->s_ib_sge
;
2737 isert_cmd
->rdma_wr_num
= 1;
2738 memset(&isert_cmd
->s_rdma_wr
, 0, sizeof(isert_cmd
->s_rdma_wr
));
2739 isert_cmd
->rdma_wr
= &isert_cmd
->s_rdma_wr
;
2741 rdma_wr
= &isert_cmd
->s_rdma_wr
;
2742 rdma_wr
->wr
.sg_list
= &isert_cmd
->s_ib_sge
;
2743 rdma_wr
->wr
.num_sge
= 1;
2744 rdma_wr
->wr
.wr_cqe
= &isert_cmd
->tx_desc
.tx_cqe
;
2745 if (isert_cmd
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2746 isert_cmd
->tx_desc
.tx_cqe
.done
= isert_rdma_write_done
;
2748 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
2749 rdma_wr
->remote_addr
= isert_cmd
->read_va
;
2750 rdma_wr
->rkey
= isert_cmd
->read_stag
;
2751 rdma_wr
->wr
.send_flags
= !isert_prot_cmd(isert_conn
, se_cmd
) ?
2752 0 : IB_SEND_SIGNALED
;
2754 isert_cmd
->tx_desc
.tx_cqe
.done
= isert_rdma_read_done
;
2756 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
2757 rdma_wr
->remote_addr
= isert_cmd
->write_va
;
2758 rdma_wr
->rkey
= isert_cmd
->write_stag
;
2759 rdma_wr
->wr
.send_flags
= IB_SEND_SIGNALED
;
2766 spin_lock_irqsave(&isert_conn
->pool_lock
, flags
);
2767 list_add_tail(&fr_desc
->list
, &isert_conn
->fr_pool
);
2768 spin_unlock_irqrestore(&isert_conn
->pool_lock
, flags
);
2770 isert_unmap_data_buf(isert_conn
, &isert_cmd
->data
);
2776 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2778 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2779 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2780 struct isert_conn
*isert_conn
= conn
->context
;
2781 struct isert_device
*device
= isert_conn
->device
;
2782 struct ib_send_wr
*wr_failed
;
2785 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2786 isert_cmd
, se_cmd
->data_length
);
2788 isert_cmd
->iser_ib_op
= ISER_IB_RDMA_WRITE
;
2789 rc
= device
->reg_rdma_mem(isert_cmd
, conn
);
2791 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2795 if (!isert_prot_cmd(isert_conn
, se_cmd
)) {
2797 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2799 isert_create_send_desc(isert_conn
, isert_cmd
,
2800 &isert_cmd
->tx_desc
);
2801 iscsit_build_rsp_pdu(cmd
, conn
, true, (struct iscsi_scsi_rsp
*)
2802 &isert_cmd
->tx_desc
.iscsi_header
);
2803 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2804 isert_init_send_wr(isert_conn
, isert_cmd
,
2805 &isert_cmd
->tx_desc
.send_wr
);
2806 isert_cmd
->s_rdma_wr
.wr
.next
= &isert_cmd
->tx_desc
.send_wr
;
2807 isert_cmd
->rdma_wr_num
+= 1;
2809 rc
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
2811 isert_err("ib_post_recv failed with %d\n", rc
);
2816 rc
= ib_post_send(isert_conn
->qp
, &isert_cmd
->rdma_wr
->wr
, &wr_failed
);
2818 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2820 if (!isert_prot_cmd(isert_conn
, se_cmd
))
2821 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2822 "READ\n", isert_cmd
);
2824 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2831 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
2833 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2834 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2835 struct isert_conn
*isert_conn
= conn
->context
;
2836 struct isert_device
*device
= isert_conn
->device
;
2837 struct ib_send_wr
*wr_failed
;
2840 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2841 isert_cmd
, se_cmd
->data_length
, cmd
->write_data_done
);
2842 isert_cmd
->iser_ib_op
= ISER_IB_RDMA_READ
;
2843 rc
= device
->reg_rdma_mem(isert_cmd
, conn
);
2845 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2849 rc
= ib_post_send(isert_conn
->qp
, &isert_cmd
->rdma_wr
->wr
, &wr_failed
);
2851 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2853 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2860 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2862 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2867 spin_lock_bh(&conn
->cmd_lock
);
2868 list_del_init(&cmd
->i_conn_node
);
2869 spin_unlock_bh(&conn
->cmd_lock
);
2870 isert_put_cmd(isert_cmd
, true);
2872 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
2873 ret
= isert_put_nopin(cmd
, conn
, false);
2876 isert_err("Unknown immediate state: 0x%02x\n", state
);
2885 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2887 struct isert_conn
*isert_conn
= conn
->context
;
2891 case ISTATE_SEND_LOGOUTRSP
:
2892 ret
= isert_put_logout_rsp(cmd
, conn
);
2894 isert_conn
->logout_posted
= true;
2896 case ISTATE_SEND_NOPIN
:
2897 ret
= isert_put_nopin(cmd
, conn
, true);
2899 case ISTATE_SEND_TASKMGTRSP
:
2900 ret
= isert_put_tm_rsp(cmd
, conn
);
2902 case ISTATE_SEND_REJECT
:
2903 ret
= isert_put_reject(cmd
, conn
);
2905 case ISTATE_SEND_TEXTRSP
:
2906 ret
= isert_put_text_rsp(cmd
, conn
);
2908 case ISTATE_SEND_STATUS
:
2910 * Special case for sending non GOOD SCSI status from TX thread
2911 * context during pre se_cmd excecution failure.
2913 ret
= isert_put_response(conn
, cmd
);
2916 isert_err("Unknown response state: 0x%02x\n", state
);
2925 isert_setup_id(struct isert_np
*isert_np
)
2927 struct iscsi_np
*np
= isert_np
->np
;
2928 struct rdma_cm_id
*id
;
2929 struct sockaddr
*sa
;
2932 sa
= (struct sockaddr
*)&np
->np_sockaddr
;
2933 isert_dbg("ksockaddr: %p, sa: %p\n", &np
->np_sockaddr
, sa
);
2935 id
= rdma_create_id(&init_net
, isert_cma_handler
, isert_np
,
2936 RDMA_PS_TCP
, IB_QPT_RC
);
2938 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id
));
2942 isert_dbg("id %p context %p\n", id
, id
->context
);
2944 ret
= rdma_bind_addr(id
, sa
);
2946 isert_err("rdma_bind_addr() failed: %d\n", ret
);
2950 ret
= rdma_listen(id
, 0);
2952 isert_err("rdma_listen() failed: %d\n", ret
);
2958 rdma_destroy_id(id
);
2960 return ERR_PTR(ret
);
2964 isert_setup_np(struct iscsi_np
*np
,
2965 struct sockaddr_storage
*ksockaddr
)
2967 struct isert_np
*isert_np
;
2968 struct rdma_cm_id
*isert_lid
;
2971 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
2973 isert_err("Unable to allocate struct isert_np\n");
2976 sema_init(&isert_np
->sem
, 0);
2977 mutex_init(&isert_np
->mutex
);
2978 INIT_LIST_HEAD(&isert_np
->accepted
);
2979 INIT_LIST_HEAD(&isert_np
->pending
);
2983 * Setup the np->np_sockaddr from the passed sockaddr setup
2984 * in iscsi_target_configfs.c code..
2986 memcpy(&np
->np_sockaddr
, ksockaddr
,
2987 sizeof(struct sockaddr_storage
));
2989 isert_lid
= isert_setup_id(isert_np
);
2990 if (IS_ERR(isert_lid
)) {
2991 ret
= PTR_ERR(isert_lid
);
2995 isert_np
->cm_id
= isert_lid
;
2996 np
->np_context
= isert_np
;
3007 isert_rdma_accept(struct isert_conn
*isert_conn
)
3009 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
3010 struct rdma_conn_param cp
;
3012 struct iser_cm_hdr rsp_hdr
;
3014 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
3015 cp
.initiator_depth
= isert_conn
->initiator_depth
;
3017 cp
.rnr_retry_count
= 7;
3019 memset(&rsp_hdr
, 0, sizeof(rsp_hdr
));
3020 rsp_hdr
.flags
= ISERT_ZBVA_NOT_USED
;
3021 if (!isert_conn
->snd_w_inv
)
3022 rsp_hdr
.flags
= rsp_hdr
.flags
| ISERT_SEND_W_INV_NOT_USED
;
3023 cp
.private_data
= (void *)&rsp_hdr
;
3024 cp
.private_data_len
= sizeof(rsp_hdr
);
3026 ret
= rdma_accept(cm_id
, &cp
);
3028 isert_err("rdma_accept() failed with: %d\n", ret
);
3036 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
3038 struct isert_conn
*isert_conn
= conn
->context
;
3041 isert_info("before login_req comp conn: %p\n", isert_conn
);
3042 ret
= wait_for_completion_interruptible(&isert_conn
->login_req_comp
);
3044 isert_err("isert_conn %p interrupted before got login req\n",
3048 reinit_completion(&isert_conn
->login_req_comp
);
3051 * For login requests after the first PDU, isert_rx_login_req() will
3052 * kick schedule_delayed_work(&conn->login_work) as the packet is
3053 * received, which turns this callback from iscsi_target_do_login_rx()
3056 if (!login
->first_request
)
3059 isert_rx_login_req(isert_conn
);
3061 isert_info("before login_comp conn: %p\n", conn
);
3062 ret
= wait_for_completion_interruptible(&isert_conn
->login_comp
);
3066 isert_info("processing login->req: %p\n", login
->req
);
3072 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
3073 struct isert_conn
*isert_conn
)
3075 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
3076 struct rdma_route
*cm_route
= &cm_id
->route
;
3078 conn
->login_family
= np
->np_sockaddr
.ss_family
;
3080 conn
->login_sockaddr
= cm_route
->addr
.dst_addr
;
3081 conn
->local_sockaddr
= cm_route
->addr
.src_addr
;
3085 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
3087 struct isert_np
*isert_np
= np
->np_context
;
3088 struct isert_conn
*isert_conn
;
3092 ret
= down_interruptible(&isert_np
->sem
);
3096 spin_lock_bh(&np
->np_thread_lock
);
3097 if (np
->np_thread_state
>= ISCSI_NP_THREAD_RESET
) {
3098 spin_unlock_bh(&np
->np_thread_lock
);
3099 isert_dbg("np_thread_state %d\n",
3100 np
->np_thread_state
);
3102 * No point in stalling here when np_thread
3103 * is in state RESET/SHUTDOWN/EXIT - bail
3107 spin_unlock_bh(&np
->np_thread_lock
);
3109 mutex_lock(&isert_np
->mutex
);
3110 if (list_empty(&isert_np
->pending
)) {
3111 mutex_unlock(&isert_np
->mutex
);
3114 isert_conn
= list_first_entry(&isert_np
->pending
,
3115 struct isert_conn
, node
);
3116 list_del_init(&isert_conn
->node
);
3117 mutex_unlock(&isert_np
->mutex
);
3119 conn
->context
= isert_conn
;
3120 isert_conn
->conn
= conn
;
3121 isert_conn
->state
= ISER_CONN_BOUND
;
3123 isert_set_conn_info(np
, conn
, isert_conn
);
3125 isert_dbg("Processing isert_conn: %p\n", isert_conn
);
3131 isert_free_np(struct iscsi_np
*np
)
3133 struct isert_np
*isert_np
= np
->np_context
;
3134 struct isert_conn
*isert_conn
, *n
;
3136 if (isert_np
->cm_id
)
3137 rdma_destroy_id(isert_np
->cm_id
);
3140 * FIXME: At this point we don't have a good way to insure
3141 * that at this point we don't have hanging connections that
3142 * completed RDMA establishment but didn't start iscsi login
3143 * process. So work-around this by cleaning up what ever piled
3144 * up in accepted and pending lists.
3146 mutex_lock(&isert_np
->mutex
);
3147 if (!list_empty(&isert_np
->pending
)) {
3148 isert_info("Still have isert pending connections\n");
3149 list_for_each_entry_safe(isert_conn
, n
,
3152 isert_info("cleaning isert_conn %p state (%d)\n",
3153 isert_conn
, isert_conn
->state
);
3154 isert_connect_release(isert_conn
);
3158 if (!list_empty(&isert_np
->accepted
)) {
3159 isert_info("Still have isert accepted connections\n");
3160 list_for_each_entry_safe(isert_conn
, n
,
3161 &isert_np
->accepted
,
3163 isert_info("cleaning isert_conn %p state (%d)\n",
3164 isert_conn
, isert_conn
->state
);
3165 isert_connect_release(isert_conn
);
3168 mutex_unlock(&isert_np
->mutex
);
3170 np
->np_context
= NULL
;
3174 static void isert_release_work(struct work_struct
*work
)
3176 struct isert_conn
*isert_conn
= container_of(work
,
3180 isert_info("Starting release conn %p\n", isert_conn
);
3182 mutex_lock(&isert_conn
->mutex
);
3183 isert_conn
->state
= ISER_CONN_DOWN
;
3184 mutex_unlock(&isert_conn
->mutex
);
3186 isert_info("Destroying conn %p\n", isert_conn
);
3187 isert_put_conn(isert_conn
);
3191 isert_wait4logout(struct isert_conn
*isert_conn
)
3193 struct iscsi_conn
*conn
= isert_conn
->conn
;
3195 isert_info("conn %p\n", isert_conn
);
3197 if (isert_conn
->logout_posted
) {
3198 isert_info("conn %p wait for conn_logout_comp\n", isert_conn
);
3199 wait_for_completion_timeout(&conn
->conn_logout_comp
,
3200 SECONDS_FOR_LOGOUT_COMP
* HZ
);
3205 isert_wait4cmds(struct iscsi_conn
*conn
)
3207 isert_info("iscsi_conn %p\n", conn
);
3210 target_sess_cmd_list_set_waiting(conn
->sess
->se_sess
);
3211 target_wait_for_sess_cmds(conn
->sess
->se_sess
);
3216 * isert_put_unsol_pending_cmds() - Drop commands waiting for
3217 * unsolicitate dataout
3218 * @conn: iscsi connection
3220 * We might still have commands that are waiting for unsolicited
3221 * dataouts messages. We must put the extra reference on those
3222 * before blocking on the target_wait_for_session_cmds
3225 isert_put_unsol_pending_cmds(struct iscsi_conn
*conn
)
3227 struct iscsi_cmd
*cmd
, *tmp
;
3228 static LIST_HEAD(drop_cmd_list
);
3230 spin_lock_bh(&conn
->cmd_lock
);
3231 list_for_each_entry_safe(cmd
, tmp
, &conn
->conn_cmd_list
, i_conn_node
) {
3232 if ((cmd
->cmd_flags
& ICF_NON_IMMEDIATE_UNSOLICITED_DATA
) &&
3233 (cmd
->write_data_done
< conn
->sess
->sess_ops
->FirstBurstLength
) &&
3234 (cmd
->write_data_done
< cmd
->se_cmd
.data_length
))
3235 list_move_tail(&cmd
->i_conn_node
, &drop_cmd_list
);
3237 spin_unlock_bh(&conn
->cmd_lock
);
3239 list_for_each_entry_safe(cmd
, tmp
, &drop_cmd_list
, i_conn_node
) {
3240 list_del_init(&cmd
->i_conn_node
);
3241 if (cmd
->i_state
!= ISTATE_REMOVE
) {
3242 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
3244 isert_info("conn %p dropping cmd %p\n", conn
, cmd
);
3245 isert_put_cmd(isert_cmd
, true);
3250 static void isert_wait_conn(struct iscsi_conn
*conn
)
3252 struct isert_conn
*isert_conn
= conn
->context
;
3254 isert_info("Starting conn %p\n", isert_conn
);
3256 mutex_lock(&isert_conn
->mutex
);
3257 isert_conn_terminate(isert_conn
);
3258 mutex_unlock(&isert_conn
->mutex
);
3260 ib_drain_qp(isert_conn
->qp
);
3261 isert_put_unsol_pending_cmds(conn
);
3262 isert_wait4cmds(conn
);
3263 isert_wait4logout(isert_conn
);
3265 queue_work(isert_release_wq
, &isert_conn
->release_work
);
3268 static void isert_free_conn(struct iscsi_conn
*conn
)
3270 struct isert_conn
*isert_conn
= conn
->context
;
3272 ib_drain_qp(isert_conn
->qp
);
3273 isert_put_conn(isert_conn
);
3276 static struct iscsit_transport iser_target_transport
= {
3278 .transport_type
= ISCSI_INFINIBAND
,
3279 .priv_size
= sizeof(struct isert_cmd
),
3280 .owner
= THIS_MODULE
,
3281 .iscsit_setup_np
= isert_setup_np
,
3282 .iscsit_accept_np
= isert_accept_np
,
3283 .iscsit_free_np
= isert_free_np
,
3284 .iscsit_wait_conn
= isert_wait_conn
,
3285 .iscsit_free_conn
= isert_free_conn
,
3286 .iscsit_get_login_rx
= isert_get_login_rx
,
3287 .iscsit_put_login_tx
= isert_put_login_tx
,
3288 .iscsit_immediate_queue
= isert_immediate_queue
,
3289 .iscsit_response_queue
= isert_response_queue
,
3290 .iscsit_get_dataout
= isert_get_dataout
,
3291 .iscsit_queue_data_in
= isert_put_datain
,
3292 .iscsit_queue_status
= isert_put_response
,
3293 .iscsit_aborted_task
= isert_aborted_task
,
3294 .iscsit_get_sup_prot_ops
= isert_get_sup_prot_ops
,
3297 static int __init
isert_init(void)
3301 isert_comp_wq
= alloc_workqueue("isert_comp_wq",
3302 WQ_UNBOUND
| WQ_HIGHPRI
, 0);
3303 if (!isert_comp_wq
) {
3304 isert_err("Unable to allocate isert_comp_wq\n");
3309 isert_release_wq
= alloc_workqueue("isert_release_wq", WQ_UNBOUND
,
3310 WQ_UNBOUND_MAX_ACTIVE
);
3311 if (!isert_release_wq
) {
3312 isert_err("Unable to allocate isert_release_wq\n");
3314 goto destroy_comp_wq
;
3317 iscsit_register_transport(&iser_target_transport
);
3318 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3323 destroy_workqueue(isert_comp_wq
);
3328 static void __exit
isert_exit(void)
3330 flush_scheduled_work();
3331 destroy_workqueue(isert_release_wq
);
3332 destroy_workqueue(isert_comp_wq
);
3333 iscsit_unregister_transport(&iser_target_transport
);
3334 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3337 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3338 MODULE_VERSION("1.0");
3339 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3340 MODULE_LICENSE("GPL");
3342 module_init(isert_init
);
3343 module_exit(isert_exit
);