1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
34 #define ISERT_MAX_CONN 8
35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36 #define ISER_MAX_TX_CQ_LEN \
37 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level
;
42 module_param_named(debug_level
, isert_debug_level
, int, 0644);
43 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex
);
46 static LIST_HEAD(device_list
);
47 static struct workqueue_struct
*isert_comp_wq
;
48 static struct workqueue_struct
*isert_release_wq
;
51 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
);
53 isert_login_post_recv(struct isert_conn
*isert_conn
);
55 isert_rdma_accept(struct isert_conn
*isert_conn
);
56 struct rdma_cm_id
*isert_setup_id(struct isert_np
*isert_np
);
58 static void isert_release_work(struct work_struct
*work
);
59 static void isert_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
60 static void isert_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
61 static void isert_login_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
62 static void isert_login_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
65 isert_prot_cmd(struct isert_conn
*conn
, struct se_cmd
*cmd
)
67 return (conn
->pi_support
&&
68 cmd
->prot_op
!= TARGET_PROT_NORMAL
);
73 isert_qp_event_callback(struct ib_event
*e
, void *context
)
75 struct isert_conn
*isert_conn
= context
;
77 isert_err("%s (%d): conn %p\n",
78 ib_event_msg(e
->event
), e
->event
, isert_conn
);
81 case IB_EVENT_COMM_EST
:
82 rdma_notify(isert_conn
->cm_id
, IB_EVENT_COMM_EST
);
84 case IB_EVENT_QP_LAST_WQE_REACHED
:
85 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
92 static struct isert_comp
*
93 isert_comp_get(struct isert_conn
*isert_conn
)
95 struct isert_device
*device
= isert_conn
->device
;
96 struct isert_comp
*comp
;
99 mutex_lock(&device_list_mutex
);
100 for (i
= 0; i
< device
->comps_used
; i
++)
101 if (device
->comps
[i
].active_qps
<
102 device
->comps
[min
].active_qps
)
104 comp
= &device
->comps
[min
];
106 mutex_unlock(&device_list_mutex
);
108 isert_info("conn %p, using comp %p min_index: %d\n",
109 isert_conn
, comp
, min
);
115 isert_comp_put(struct isert_comp
*comp
)
117 mutex_lock(&device_list_mutex
);
119 mutex_unlock(&device_list_mutex
);
122 static struct ib_qp
*
123 isert_create_qp(struct isert_conn
*isert_conn
,
124 struct isert_comp
*comp
,
125 struct rdma_cm_id
*cma_id
)
127 struct isert_device
*device
= isert_conn
->device
;
128 struct ib_qp_init_attr attr
;
131 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
132 attr
.event_handler
= isert_qp_event_callback
;
133 attr
.qp_context
= isert_conn
;
134 attr
.send_cq
= comp
->cq
;
135 attr
.recv_cq
= comp
->cq
;
136 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
+ 1;
137 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
+ 1;
138 attr
.cap
.max_rdma_ctxs
= ISCSI_DEF_XMIT_CMDS_MAX
;
139 attr
.cap
.max_send_sge
= device
->ib_device
->attrs
.max_sge
;
140 attr
.cap
.max_recv_sge
= 1;
141 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
142 attr
.qp_type
= IB_QPT_RC
;
143 if (device
->pi_capable
)
144 attr
.create_flags
|= IB_QP_CREATE_SIGNATURE_EN
;
146 ret
= rdma_create_qp(cma_id
, device
->pd
, &attr
);
148 isert_err("rdma_create_qp failed for cma_id %d\n", ret
);
156 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
)
158 struct isert_comp
*comp
;
161 comp
= isert_comp_get(isert_conn
);
162 isert_conn
->qp
= isert_create_qp(isert_conn
, comp
, cma_id
);
163 if (IS_ERR(isert_conn
->qp
)) {
164 ret
= PTR_ERR(isert_conn
->qp
);
170 isert_comp_put(comp
);
175 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
177 struct isert_device
*device
= isert_conn
->device
;
178 struct ib_device
*ib_dev
= device
->ib_device
;
179 struct iser_rx_desc
*rx_desc
;
180 struct ib_sge
*rx_sg
;
184 isert_conn
->rx_descs
= kzalloc(ISERT_QP_MAX_RECV_DTOS
*
185 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
186 if (!isert_conn
->rx_descs
)
189 rx_desc
= isert_conn
->rx_descs
;
191 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
192 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
193 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
194 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
197 rx_desc
->dma_addr
= dma_addr
;
199 rx_sg
= &rx_desc
->rx_sg
;
200 rx_sg
->addr
= rx_desc
->dma_addr
;
201 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
202 rx_sg
->lkey
= device
->pd
->local_dma_lkey
;
203 rx_desc
->rx_cqe
.done
= isert_recv_done
;
209 rx_desc
= isert_conn
->rx_descs
;
210 for (j
= 0; j
< i
; j
++, rx_desc
++) {
211 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
212 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
214 kfree(isert_conn
->rx_descs
);
215 isert_conn
->rx_descs
= NULL
;
216 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn
);
221 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
223 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
224 struct iser_rx_desc
*rx_desc
;
227 if (!isert_conn
->rx_descs
)
230 rx_desc
= isert_conn
->rx_descs
;
231 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
232 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
233 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
236 kfree(isert_conn
->rx_descs
);
237 isert_conn
->rx_descs
= NULL
;
241 isert_free_comps(struct isert_device
*device
)
245 for (i
= 0; i
< device
->comps_used
; i
++) {
246 struct isert_comp
*comp
= &device
->comps
[i
];
249 ib_free_cq(comp
->cq
);
251 kfree(device
->comps
);
255 isert_alloc_comps(struct isert_device
*device
)
257 int i
, max_cqe
, ret
= 0;
259 device
->comps_used
= min(ISERT_MAX_CQ
, min_t(int, num_online_cpus(),
260 device
->ib_device
->num_comp_vectors
));
262 isert_info("Using %d CQs, %s supports %d vectors support "
264 device
->comps_used
, device
->ib_device
->name
,
265 device
->ib_device
->num_comp_vectors
,
268 device
->comps
= kcalloc(device
->comps_used
, sizeof(struct isert_comp
),
273 max_cqe
= min(ISER_MAX_CQ_LEN
, device
->ib_device
->attrs
.max_cqe
);
275 for (i
= 0; i
< device
->comps_used
; i
++) {
276 struct isert_comp
*comp
= &device
->comps
[i
];
278 comp
->device
= device
;
279 comp
->cq
= ib_alloc_cq(device
->ib_device
, comp
, max_cqe
, i
,
281 if (IS_ERR(comp
->cq
)) {
282 isert_err("Unable to allocate cq\n");
283 ret
= PTR_ERR(comp
->cq
);
291 isert_free_comps(device
);
296 isert_create_device_ib_res(struct isert_device
*device
)
298 struct ib_device
*ib_dev
= device
->ib_device
;
301 isert_dbg("devattr->max_sge: %d\n", ib_dev
->attrs
.max_sge
);
302 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev
->attrs
.max_sge_rd
);
304 ret
= isert_alloc_comps(device
);
308 device
->pd
= ib_alloc_pd(ib_dev
, 0);
309 if (IS_ERR(device
->pd
)) {
310 ret
= PTR_ERR(device
->pd
);
311 isert_err("failed to allocate pd, device %p, ret=%d\n",
316 /* Check signature cap */
317 device
->pi_capable
= ib_dev
->attrs
.device_cap_flags
&
318 IB_DEVICE_SIGNATURE_HANDOVER
? true : false;
323 isert_free_comps(device
);
331 isert_free_device_ib_res(struct isert_device
*device
)
333 isert_info("device %p\n", device
);
335 ib_dealloc_pd(device
->pd
);
336 isert_free_comps(device
);
340 isert_device_put(struct isert_device
*device
)
342 mutex_lock(&device_list_mutex
);
344 isert_info("device %p refcount %d\n", device
, device
->refcount
);
345 if (!device
->refcount
) {
346 isert_free_device_ib_res(device
);
347 list_del(&device
->dev_node
);
350 mutex_unlock(&device_list_mutex
);
353 static struct isert_device
*
354 isert_device_get(struct rdma_cm_id
*cma_id
)
356 struct isert_device
*device
;
359 mutex_lock(&device_list_mutex
);
360 list_for_each_entry(device
, &device_list
, dev_node
) {
361 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
363 isert_info("Found iser device %p refcount %d\n",
364 device
, device
->refcount
);
365 mutex_unlock(&device_list_mutex
);
370 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
372 mutex_unlock(&device_list_mutex
);
373 return ERR_PTR(-ENOMEM
);
376 INIT_LIST_HEAD(&device
->dev_node
);
378 device
->ib_device
= cma_id
->device
;
379 ret
= isert_create_device_ib_res(device
);
382 mutex_unlock(&device_list_mutex
);
387 list_add_tail(&device
->dev_node
, &device_list
);
388 isert_info("Created a new iser device %p refcount %d\n",
389 device
, device
->refcount
);
390 mutex_unlock(&device_list_mutex
);
396 isert_init_conn(struct isert_conn
*isert_conn
)
398 isert_conn
->state
= ISER_CONN_INIT
;
399 INIT_LIST_HEAD(&isert_conn
->node
);
400 init_completion(&isert_conn
->login_comp
);
401 init_completion(&isert_conn
->login_req_comp
);
402 init_waitqueue_head(&isert_conn
->rem_wait
);
403 kref_init(&isert_conn
->kref
);
404 mutex_init(&isert_conn
->mutex
);
405 INIT_WORK(&isert_conn
->release_work
, isert_release_work
);
409 isert_free_login_buf(struct isert_conn
*isert_conn
)
411 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
413 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
414 ISER_RX_PAYLOAD_SIZE
, DMA_TO_DEVICE
);
415 kfree(isert_conn
->login_rsp_buf
);
417 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
418 ISER_RX_PAYLOAD_SIZE
,
420 kfree(isert_conn
->login_req_buf
);
424 isert_alloc_login_buf(struct isert_conn
*isert_conn
,
425 struct ib_device
*ib_dev
)
429 isert_conn
->login_req_buf
= kzalloc(sizeof(*isert_conn
->login_req_buf
),
431 if (!isert_conn
->login_req_buf
)
434 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
435 isert_conn
->login_req_buf
,
436 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
437 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
439 isert_err("login_req_dma mapping error: %d\n", ret
);
440 isert_conn
->login_req_dma
= 0;
441 goto out_free_login_req_buf
;
444 isert_conn
->login_rsp_buf
= kzalloc(ISER_RX_PAYLOAD_SIZE
, GFP_KERNEL
);
445 if (!isert_conn
->login_rsp_buf
) {
447 goto out_unmap_login_req_buf
;
450 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
451 isert_conn
->login_rsp_buf
,
452 ISER_RX_PAYLOAD_SIZE
, DMA_TO_DEVICE
);
453 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
455 isert_err("login_rsp_dma mapping error: %d\n", ret
);
456 isert_conn
->login_rsp_dma
= 0;
457 goto out_free_login_rsp_buf
;
462 out_free_login_rsp_buf
:
463 kfree(isert_conn
->login_rsp_buf
);
464 out_unmap_login_req_buf
:
465 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
466 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
467 out_free_login_req_buf
:
468 kfree(isert_conn
->login_req_buf
);
473 isert_set_nego_params(struct isert_conn
*isert_conn
,
474 struct rdma_conn_param
*param
)
476 struct ib_device_attr
*attr
= &isert_conn
->device
->ib_device
->attrs
;
478 /* Set max inflight RDMA READ requests */
479 isert_conn
->initiator_depth
= min_t(u8
, param
->initiator_depth
,
480 attr
->max_qp_init_rd_atom
);
481 isert_dbg("Using initiator_depth: %u\n", isert_conn
->initiator_depth
);
483 if (param
->private_data
) {
484 u8 flags
= *(u8
*)param
->private_data
;
487 * use remote invalidation if the both initiator
488 * and the HCA support it
490 isert_conn
->snd_w_inv
= !(flags
& ISER_SEND_W_INV_NOT_SUP
) &&
491 (attr
->device_cap_flags
&
492 IB_DEVICE_MEM_MGT_EXTENSIONS
);
493 if (isert_conn
->snd_w_inv
)
494 isert_info("Using remote invalidation\n");
499 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
501 struct isert_np
*isert_np
= cma_id
->context
;
502 struct iscsi_np
*np
= isert_np
->np
;
503 struct isert_conn
*isert_conn
;
504 struct isert_device
*device
;
507 spin_lock_bh(&np
->np_thread_lock
);
509 spin_unlock_bh(&np
->np_thread_lock
);
510 isert_dbg("iscsi_np is not enabled, reject connect request\n");
511 return rdma_reject(cma_id
, NULL
, 0);
513 spin_unlock_bh(&np
->np_thread_lock
);
515 isert_dbg("cma_id: %p, portal: %p\n",
516 cma_id
, cma_id
->context
);
518 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
522 isert_init_conn(isert_conn
);
523 isert_conn
->cm_id
= cma_id
;
525 ret
= isert_alloc_login_buf(isert_conn
, cma_id
->device
);
529 device
= isert_device_get(cma_id
);
530 if (IS_ERR(device
)) {
531 ret
= PTR_ERR(device
);
532 goto out_rsp_dma_map
;
534 isert_conn
->device
= device
;
536 isert_set_nego_params(isert_conn
, &event
->param
.conn
);
538 ret
= isert_conn_setup_qp(isert_conn
, cma_id
);
542 ret
= isert_login_post_recv(isert_conn
);
546 ret
= isert_rdma_accept(isert_conn
);
550 mutex_lock(&isert_np
->mutex
);
551 list_add_tail(&isert_conn
->node
, &isert_np
->accepted
);
552 mutex_unlock(&isert_np
->mutex
);
557 isert_device_put(device
);
559 isert_free_login_buf(isert_conn
);
562 rdma_reject(cma_id
, NULL
, 0);
567 isert_connect_release(struct isert_conn
*isert_conn
)
569 struct isert_device
*device
= isert_conn
->device
;
571 isert_dbg("conn %p\n", isert_conn
);
575 isert_free_rx_descriptors(isert_conn
);
576 if (isert_conn
->cm_id
&&
577 !isert_conn
->dev_removed
)
578 rdma_destroy_id(isert_conn
->cm_id
);
580 if (isert_conn
->qp
) {
581 struct isert_comp
*comp
= isert_conn
->qp
->recv_cq
->cq_context
;
583 isert_comp_put(comp
);
584 ib_destroy_qp(isert_conn
->qp
);
587 if (isert_conn
->login_req_buf
)
588 isert_free_login_buf(isert_conn
);
590 isert_device_put(device
);
592 if (isert_conn
->dev_removed
)
593 wake_up_interruptible(&isert_conn
->rem_wait
);
599 isert_connected_handler(struct rdma_cm_id
*cma_id
)
601 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
602 struct isert_np
*isert_np
= cma_id
->context
;
604 isert_info("conn %p\n", isert_conn
);
606 mutex_lock(&isert_conn
->mutex
);
607 isert_conn
->state
= ISER_CONN_UP
;
608 kref_get(&isert_conn
->kref
);
609 mutex_unlock(&isert_conn
->mutex
);
611 mutex_lock(&isert_np
->mutex
);
612 list_move_tail(&isert_conn
->node
, &isert_np
->pending
);
613 mutex_unlock(&isert_np
->mutex
);
615 isert_info("np %p: Allow accept_np to continue\n", isert_np
);
620 isert_release_kref(struct kref
*kref
)
622 struct isert_conn
*isert_conn
= container_of(kref
,
623 struct isert_conn
, kref
);
625 isert_info("conn %p final kref %s/%d\n", isert_conn
, current
->comm
,
628 isert_connect_release(isert_conn
);
632 isert_put_conn(struct isert_conn
*isert_conn
)
634 kref_put(&isert_conn
->kref
, isert_release_kref
);
638 isert_handle_unbound_conn(struct isert_conn
*isert_conn
)
640 struct isert_np
*isert_np
= isert_conn
->cm_id
->context
;
642 mutex_lock(&isert_np
->mutex
);
643 if (!list_empty(&isert_conn
->node
)) {
645 * This means iscsi doesn't know this connection
646 * so schedule a cleanup ourselves
648 list_del_init(&isert_conn
->node
);
649 isert_put_conn(isert_conn
);
650 queue_work(isert_release_wq
, &isert_conn
->release_work
);
652 mutex_unlock(&isert_np
->mutex
);
656 * isert_conn_terminate() - Initiate connection termination
657 * @isert_conn: isert connection struct
660 * In case the connection state is BOUND, move state
661 * to TEMINATING and start teardown sequence (rdma_disconnect).
662 * In case the connection state is UP, complete flush as well.
664 * This routine must be called with mutex held. Thus it is
665 * safe to call multiple times.
668 isert_conn_terminate(struct isert_conn
*isert_conn
)
672 if (isert_conn
->state
>= ISER_CONN_TERMINATING
)
675 isert_info("Terminating conn %p state %d\n",
676 isert_conn
, isert_conn
->state
);
677 isert_conn
->state
= ISER_CONN_TERMINATING
;
678 err
= rdma_disconnect(isert_conn
->cm_id
);
680 isert_warn("Failed rdma_disconnect isert_conn %p\n",
685 isert_np_cma_handler(struct isert_np
*isert_np
,
686 enum rdma_cm_event_type event
)
688 isert_dbg("%s (%d): isert np %p\n",
689 rdma_event_msg(event
), event
, isert_np
);
692 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
693 isert_np
->cm_id
= NULL
;
695 case RDMA_CM_EVENT_ADDR_CHANGE
:
696 isert_np
->cm_id
= isert_setup_id(isert_np
);
697 if (IS_ERR(isert_np
->cm_id
)) {
698 isert_err("isert np %p setup id failed: %ld\n",
699 isert_np
, PTR_ERR(isert_np
->cm_id
));
700 isert_np
->cm_id
= NULL
;
704 isert_err("isert np %p Unexpected event %d\n",
712 isert_disconnected_handler(struct rdma_cm_id
*cma_id
,
713 enum rdma_cm_event_type event
)
715 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
717 mutex_lock(&isert_conn
->mutex
);
718 switch (isert_conn
->state
) {
719 case ISER_CONN_TERMINATING
:
722 isert_conn_terminate(isert_conn
);
723 ib_drain_qp(isert_conn
->qp
);
724 isert_handle_unbound_conn(isert_conn
);
726 case ISER_CONN_BOUND
:
727 case ISER_CONN_FULL_FEATURE
: /* FALLTHRU */
728 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
731 isert_warn("conn %p terminating in state %d\n",
732 isert_conn
, isert_conn
->state
);
734 mutex_unlock(&isert_conn
->mutex
);
740 isert_connect_error(struct rdma_cm_id
*cma_id
)
742 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
744 ib_drain_qp(isert_conn
->qp
);
745 list_del_init(&isert_conn
->node
);
746 isert_conn
->cm_id
= NULL
;
747 isert_put_conn(isert_conn
);
753 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
755 struct isert_np
*isert_np
= cma_id
->context
;
756 struct isert_conn
*isert_conn
;
759 isert_info("%s (%d): status %d id %p np %p\n",
760 rdma_event_msg(event
->event
), event
->event
,
761 event
->status
, cma_id
, cma_id
->context
);
763 if (isert_np
->cm_id
== cma_id
)
764 return isert_np_cma_handler(cma_id
->context
, event
->event
);
766 switch (event
->event
) {
767 case RDMA_CM_EVENT_CONNECT_REQUEST
:
768 ret
= isert_connect_request(cma_id
, event
);
770 isert_err("failed handle connect request %d\n", ret
);
772 case RDMA_CM_EVENT_ESTABLISHED
:
773 isert_connected_handler(cma_id
);
775 case RDMA_CM_EVENT_ADDR_CHANGE
: /* FALLTHRU */
776 case RDMA_CM_EVENT_DISCONNECTED
: /* FALLTHRU */
777 case RDMA_CM_EVENT_TIMEWAIT_EXIT
: /* FALLTHRU */
778 ret
= isert_disconnected_handler(cma_id
, event
->event
);
780 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
781 isert_conn
= cma_id
->qp
->qp_context
;
782 isert_conn
->dev_removed
= true;
783 isert_disconnected_handler(cma_id
, event
->event
);
784 wait_event_interruptible(isert_conn
->rem_wait
,
785 isert_conn
->state
== ISER_CONN_DOWN
);
788 * return non-zero from the callback to destroy
792 case RDMA_CM_EVENT_REJECTED
:
793 isert_info("Connection rejected: %s\n",
794 rdma_reject_msg(cma_id
, event
->status
));
796 case RDMA_CM_EVENT_UNREACHABLE
:
797 case RDMA_CM_EVENT_CONNECT_ERROR
:
798 ret
= isert_connect_error(cma_id
);
801 isert_err("Unhandled RDMA CMA event: %d\n", event
->event
);
809 isert_post_recvm(struct isert_conn
*isert_conn
, u32 count
)
811 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
813 struct iser_rx_desc
*rx_desc
;
815 for (rx_wr
= isert_conn
->rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
816 rx_desc
= &isert_conn
->rx_descs
[i
];
818 rx_wr
->wr_cqe
= &rx_desc
->rx_cqe
;
819 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
821 rx_wr
->next
= rx_wr
+ 1;
822 rx_desc
->in_use
= false;
825 rx_wr
->next
= NULL
; /* mark end of work requests list */
827 ret
= ib_post_recv(isert_conn
->qp
, isert_conn
->rx_wr
,
830 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
836 isert_post_recv(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
)
838 struct ib_recv_wr
*rx_wr_failed
, rx_wr
;
841 if (!rx_desc
->in_use
) {
843 * if the descriptor is not in-use we already reposted it
844 * for recv, so just silently return
849 rx_desc
->in_use
= false;
850 rx_wr
.wr_cqe
= &rx_desc
->rx_cqe
;
851 rx_wr
.sg_list
= &rx_desc
->rx_sg
;
855 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, &rx_wr_failed
);
857 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
863 isert_login_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
865 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
866 struct ib_send_wr send_wr
, *send_wr_failed
;
869 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
870 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
872 tx_desc
->tx_cqe
.done
= isert_login_send_done
;
875 send_wr
.wr_cqe
= &tx_desc
->tx_cqe
;
876 send_wr
.sg_list
= tx_desc
->tx_sg
;
877 send_wr
.num_sge
= tx_desc
->num_sge
;
878 send_wr
.opcode
= IB_WR_SEND
;
879 send_wr
.send_flags
= IB_SEND_SIGNALED
;
881 ret
= ib_post_send(isert_conn
->qp
, &send_wr
, &send_wr_failed
);
883 isert_err("ib_post_send() failed, ret: %d\n", ret
);
889 isert_create_send_desc(struct isert_conn
*isert_conn
,
890 struct isert_cmd
*isert_cmd
,
891 struct iser_tx_desc
*tx_desc
)
893 struct isert_device
*device
= isert_conn
->device
;
894 struct ib_device
*ib_dev
= device
->ib_device
;
896 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
897 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
899 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_ctrl
));
900 tx_desc
->iser_header
.flags
= ISCSI_CTRL
;
902 tx_desc
->num_sge
= 1;
904 if (tx_desc
->tx_sg
[0].lkey
!= device
->pd
->local_dma_lkey
) {
905 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
906 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
911 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
912 struct iser_tx_desc
*tx_desc
)
914 struct isert_device
*device
= isert_conn
->device
;
915 struct ib_device
*ib_dev
= device
->ib_device
;
918 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
919 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
920 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
921 isert_err("ib_dma_mapping_error() failed\n");
925 tx_desc
->dma_addr
= dma_addr
;
926 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
927 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
928 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
930 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
931 tx_desc
->tx_sg
[0].addr
, tx_desc
->tx_sg
[0].length
,
932 tx_desc
->tx_sg
[0].lkey
);
938 isert_init_send_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
939 struct ib_send_wr
*send_wr
)
941 struct iser_tx_desc
*tx_desc
= &isert_cmd
->tx_desc
;
943 tx_desc
->tx_cqe
.done
= isert_send_done
;
944 send_wr
->wr_cqe
= &tx_desc
->tx_cqe
;
946 if (isert_conn
->snd_w_inv
&& isert_cmd
->inv_rkey
) {
947 send_wr
->opcode
= IB_WR_SEND_WITH_INV
;
948 send_wr
->ex
.invalidate_rkey
= isert_cmd
->inv_rkey
;
950 send_wr
->opcode
= IB_WR_SEND
;
953 send_wr
->sg_list
= &tx_desc
->tx_sg
[0];
954 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
955 send_wr
->send_flags
= IB_SEND_SIGNALED
;
959 isert_login_post_recv(struct isert_conn
*isert_conn
)
961 struct ib_recv_wr rx_wr
, *rx_wr_fail
;
965 memset(&sge
, 0, sizeof(struct ib_sge
));
966 sge
.addr
= isert_conn
->login_req_dma
;
967 sge
.length
= ISER_RX_PAYLOAD_SIZE
;
968 sge
.lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
970 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
971 sge
.addr
, sge
.length
, sge
.lkey
);
973 isert_conn
->login_req_buf
->rx_cqe
.done
= isert_login_recv_done
;
975 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
976 rx_wr
.wr_cqe
= &isert_conn
->login_req_buf
->rx_cqe
;
977 rx_wr
.sg_list
= &sge
;
980 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, &rx_wr_fail
);
982 isert_err("ib_post_recv() failed: %d\n", ret
);
988 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
991 struct isert_conn
*isert_conn
= conn
->context
;
992 struct isert_device
*device
= isert_conn
->device
;
993 struct ib_device
*ib_dev
= device
->ib_device
;
994 struct iser_tx_desc
*tx_desc
= &isert_conn
->login_tx_desc
;
997 isert_create_send_desc(isert_conn
, NULL
, tx_desc
);
999 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
1000 sizeof(struct iscsi_hdr
));
1002 isert_init_tx_hdrs(isert_conn
, tx_desc
);
1005 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
1007 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
1008 length
, DMA_TO_DEVICE
);
1010 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
1012 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
1013 length
, DMA_TO_DEVICE
);
1015 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
1016 tx_dsg
->length
= length
;
1017 tx_dsg
->lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
1018 tx_desc
->num_sge
= 2;
1020 if (!login
->login_failed
) {
1021 if (login
->login_complete
) {
1022 ret
= isert_alloc_rx_descriptors(isert_conn
);
1026 ret
= isert_post_recvm(isert_conn
,
1027 ISERT_QP_MAX_RECV_DTOS
);
1031 /* Now we are in FULL_FEATURE phase */
1032 mutex_lock(&isert_conn
->mutex
);
1033 isert_conn
->state
= ISER_CONN_FULL_FEATURE
;
1034 mutex_unlock(&isert_conn
->mutex
);
1038 ret
= isert_login_post_recv(isert_conn
);
1043 ret
= isert_login_post_send(isert_conn
, tx_desc
);
1051 isert_rx_login_req(struct isert_conn
*isert_conn
)
1053 struct iser_rx_desc
*rx_desc
= isert_conn
->login_req_buf
;
1054 int rx_buflen
= isert_conn
->login_req_len
;
1055 struct iscsi_conn
*conn
= isert_conn
->conn
;
1056 struct iscsi_login
*login
= conn
->conn_login
;
1059 isert_info("conn %p\n", isert_conn
);
1061 WARN_ON_ONCE(!login
);
1063 if (login
->first_request
) {
1064 struct iscsi_login_req
*login_req
=
1065 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
1067 * Setup the initial iscsi_login values from the leading
1068 * login request PDU.
1070 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
1071 login
->current_stage
=
1072 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
1074 login
->version_min
= login_req
->min_version
;
1075 login
->version_max
= login_req
->max_version
;
1076 memcpy(login
->isid
, login_req
->isid
, 6);
1077 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1078 login
->init_task_tag
= login_req
->itt
;
1079 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1080 login
->cid
= be16_to_cpu(login_req
->cid
);
1081 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1084 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
1086 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1087 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1088 "MAX_KEY_VALUE_PAIRS: %d\n", size
, rx_buflen
,
1089 MAX_KEY_VALUE_PAIRS
);
1090 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
1092 if (login
->first_request
) {
1093 complete(&isert_conn
->login_comp
);
1096 schedule_delayed_work(&conn
->login_work
, 0);
1099 static struct iscsi_cmd
1100 *isert_allocate_cmd(struct iscsi_conn
*conn
, struct iser_rx_desc
*rx_desc
)
1102 struct isert_conn
*isert_conn
= conn
->context
;
1103 struct isert_cmd
*isert_cmd
;
1104 struct iscsi_cmd
*cmd
;
1106 cmd
= iscsit_allocate_cmd(conn
, TASK_INTERRUPTIBLE
);
1108 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1111 isert_cmd
= iscsit_priv_cmd(cmd
);
1112 isert_cmd
->conn
= isert_conn
;
1113 isert_cmd
->iscsi_cmd
= cmd
;
1114 isert_cmd
->rx_desc
= rx_desc
;
1120 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
1121 struct isert_cmd
*isert_cmd
, struct iscsi_cmd
*cmd
,
1122 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1124 struct iscsi_conn
*conn
= isert_conn
->conn
;
1125 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
1126 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
1127 bool dump_payload
= false;
1128 unsigned int data_len
;
1130 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
1134 imm_data
= cmd
->immediate_data
;
1135 imm_data_len
= cmd
->first_burst_len
;
1136 unsol_data
= cmd
->unsolicited_data
;
1137 data_len
= cmd
->se_cmd
.data_length
;
1139 if (imm_data
&& imm_data_len
== data_len
)
1140 cmd
->se_cmd
.se_cmd_flags
|= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
;
1141 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1144 } else if (rc
> 0) {
1145 dump_payload
= true;
1152 if (imm_data_len
!= data_len
) {
1153 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
1154 sg_copy_from_buffer(cmd
->se_cmd
.t_data_sg
, sg_nents
,
1155 &rx_desc
->data
[0], imm_data_len
);
1156 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1157 sg_nents
, imm_data_len
);
1159 sg_init_table(&isert_cmd
->sg
, 1);
1160 cmd
->se_cmd
.t_data_sg
= &isert_cmd
->sg
;
1161 cmd
->se_cmd
.t_data_nents
= 1;
1162 sg_set_buf(&isert_cmd
->sg
, &rx_desc
->data
[0], imm_data_len
);
1163 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1167 cmd
->write_data_done
+= imm_data_len
;
1169 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
1170 spin_lock_bh(&cmd
->istate_lock
);
1171 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1172 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1173 spin_unlock_bh(&cmd
->istate_lock
);
1177 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
1179 if (!rc
&& dump_payload
== false && unsol_data
)
1180 iscsit_set_unsoliticed_dataout(cmd
);
1181 else if (dump_payload
&& imm_data
)
1182 target_put_sess_cmd(&cmd
->se_cmd
);
1188 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
1189 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1191 struct scatterlist
*sg_start
;
1192 struct iscsi_conn
*conn
= isert_conn
->conn
;
1193 struct iscsi_cmd
*cmd
= NULL
;
1194 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
1195 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
1196 int rc
, sg_nents
, sg_off
, page_off
;
1198 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
1204 * FIXME: Unexpected unsolicited_data out
1206 if (!cmd
->unsolicited_data
) {
1207 isert_err("Received unexpected solicited data payload\n");
1212 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1213 "write_data_done: %u, data_length: %u\n",
1214 unsol_data_len
, cmd
->write_data_done
,
1215 cmd
->se_cmd
.data_length
);
1217 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1218 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1219 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
1220 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1222 * FIXME: Non page-aligned unsolicited_data out
1225 isert_err("unexpected non-page aligned data payload\n");
1229 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1230 "sg_nents: %u from %p %u\n", sg_start
, sg_off
,
1231 sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
1233 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
1236 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1241 * multiple data-outs on the same command can arrive -
1242 * so post the buffer before hand
1244 rc
= isert_post_recv(isert_conn
, rx_desc
);
1246 isert_err("ib_post_recv failed with %d\n", rc
);
1253 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1254 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1257 struct iscsi_conn
*conn
= isert_conn
->conn
;
1258 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1261 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1265 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1268 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1272 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1273 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1274 struct iscsi_text
*hdr
)
1276 struct iscsi_conn
*conn
= isert_conn
->conn
;
1277 u32 payload_length
= ntoh24(hdr
->dlength
);
1279 unsigned char *text_in
= NULL
;
1281 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1285 if (payload_length
) {
1286 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1290 cmd
->text_in_ptr
= text_in
;
1292 memcpy(cmd
->text_in_ptr
, &rx_desc
->data
[0], payload_length
);
1294 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1298 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1299 uint32_t read_stag
, uint64_t read_va
,
1300 uint32_t write_stag
, uint64_t write_va
)
1302 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1303 struct iscsi_conn
*conn
= isert_conn
->conn
;
1304 struct iscsi_cmd
*cmd
;
1305 struct isert_cmd
*isert_cmd
;
1307 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1309 if (conn
->sess
->sess_ops
->SessionType
&&
1310 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1311 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1312 " ignoring\n", opcode
);
1317 case ISCSI_OP_SCSI_CMD
:
1318 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1322 isert_cmd
= iscsit_priv_cmd(cmd
);
1323 isert_cmd
->read_stag
= read_stag
;
1324 isert_cmd
->read_va
= read_va
;
1325 isert_cmd
->write_stag
= write_stag
;
1326 isert_cmd
->write_va
= write_va
;
1327 isert_cmd
->inv_rkey
= read_stag
? read_stag
: write_stag
;
1329 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
, cmd
,
1330 rx_desc
, (unsigned char *)hdr
);
1332 case ISCSI_OP_NOOP_OUT
:
1333 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1337 isert_cmd
= iscsit_priv_cmd(cmd
);
1338 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
, cmd
,
1339 rx_desc
, (unsigned char *)hdr
);
1341 case ISCSI_OP_SCSI_DATA_OUT
:
1342 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1343 (unsigned char *)hdr
);
1345 case ISCSI_OP_SCSI_TMFUNC
:
1346 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1350 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1351 (unsigned char *)hdr
);
1353 case ISCSI_OP_LOGOUT
:
1354 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1358 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1361 if (be32_to_cpu(hdr
->ttt
) != 0xFFFFFFFF)
1362 cmd
= iscsit_find_cmd_from_itt(conn
, hdr
->itt
);
1364 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1369 isert_cmd
= iscsit_priv_cmd(cmd
);
1370 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
, cmd
,
1371 rx_desc
, (struct iscsi_text
*)hdr
);
1374 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1383 isert_print_wc(struct ib_wc
*wc
, const char *type
)
1385 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1386 isert_err("%s failure: %s (%d) vend_err %x\n", type
,
1387 ib_wc_status_msg(wc
->status
), wc
->status
,
1390 isert_dbg("%s failure: %s (%d)\n", type
,
1391 ib_wc_status_msg(wc
->status
), wc
->status
);
1395 isert_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1397 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1398 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1399 struct iser_rx_desc
*rx_desc
= cqe_to_rx_desc(wc
->wr_cqe
);
1400 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1401 struct iser_ctrl
*iser_ctrl
= &rx_desc
->iser_header
;
1402 uint64_t read_va
= 0, write_va
= 0;
1403 uint32_t read_stag
= 0, write_stag
= 0;
1405 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1406 isert_print_wc(wc
, "recv");
1407 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1408 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1412 rx_desc
->in_use
= true;
1414 ib_dma_sync_single_for_cpu(ib_dev
, rx_desc
->dma_addr
,
1415 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1417 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1418 rx_desc
->dma_addr
, hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1419 (int)(wc
->byte_len
- ISER_HEADERS_LEN
));
1421 switch (iser_ctrl
->flags
& 0xF0) {
1423 if (iser_ctrl
->flags
& ISER_RSV
) {
1424 read_stag
= be32_to_cpu(iser_ctrl
->read_stag
);
1425 read_va
= be64_to_cpu(iser_ctrl
->read_va
);
1426 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1427 read_stag
, (unsigned long long)read_va
);
1429 if (iser_ctrl
->flags
& ISER_WSV
) {
1430 write_stag
= be32_to_cpu(iser_ctrl
->write_stag
);
1431 write_va
= be64_to_cpu(iser_ctrl
->write_va
);
1432 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1433 write_stag
, (unsigned long long)write_va
);
1436 isert_dbg("ISER ISCSI_CTRL PDU\n");
1439 isert_err("iSER Hello message\n");
1442 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl
->flags
);
1446 isert_rx_opcode(isert_conn
, rx_desc
,
1447 read_stag
, read_va
, write_stag
, write_va
);
1449 ib_dma_sync_single_for_device(ib_dev
, rx_desc
->dma_addr
,
1450 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1454 isert_login_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1456 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1457 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
1459 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1460 isert_print_wc(wc
, "login recv");
1464 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_req_dma
,
1465 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1467 isert_conn
->login_req_len
= wc
->byte_len
- ISER_HEADERS_LEN
;
1469 if (isert_conn
->conn
) {
1470 struct iscsi_login
*login
= isert_conn
->conn
->conn_login
;
1472 if (login
&& !login
->first_request
)
1473 isert_rx_login_req(isert_conn
);
1476 mutex_lock(&isert_conn
->mutex
);
1477 complete(&isert_conn
->login_req_comp
);
1478 mutex_unlock(&isert_conn
->mutex
);
1480 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_req_dma
,
1481 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1485 isert_rdma_rw_ctx_destroy(struct isert_cmd
*cmd
, struct isert_conn
*conn
)
1487 struct se_cmd
*se_cmd
= &cmd
->iscsi_cmd
->se_cmd
;
1488 enum dma_data_direction dir
= target_reverse_dma_direction(se_cmd
);
1490 if (!cmd
->rw
.nr_ops
)
1493 if (isert_prot_cmd(conn
, se_cmd
)) {
1494 rdma_rw_ctx_destroy_signature(&cmd
->rw
, conn
->qp
,
1495 conn
->cm_id
->port_num
, se_cmd
->t_data_sg
,
1496 se_cmd
->t_data_nents
, se_cmd
->t_prot_sg
,
1497 se_cmd
->t_prot_nents
, dir
);
1499 rdma_rw_ctx_destroy(&cmd
->rw
, conn
->qp
, conn
->cm_id
->port_num
,
1500 se_cmd
->t_data_sg
, se_cmd
->t_data_nents
, dir
);
1507 isert_put_cmd(struct isert_cmd
*isert_cmd
, bool comp_err
)
1509 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1510 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1511 struct iscsi_conn
*conn
= isert_conn
->conn
;
1512 struct iscsi_text_rsp
*hdr
;
1514 isert_dbg("Cmd %p\n", isert_cmd
);
1516 switch (cmd
->iscsi_opcode
) {
1517 case ISCSI_OP_SCSI_CMD
:
1518 spin_lock_bh(&conn
->cmd_lock
);
1519 if (!list_empty(&cmd
->i_conn_node
))
1520 list_del_init(&cmd
->i_conn_node
);
1521 spin_unlock_bh(&conn
->cmd_lock
);
1523 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1524 iscsit_stop_dataout_timer(cmd
);
1526 * Check for special case during comp_err where
1527 * WRITE_PENDING has been handed off from core,
1528 * but requires an extra target_put_sess_cmd()
1529 * before transport_generic_free_cmd() below.
1532 cmd
->se_cmd
.t_state
== TRANSPORT_WRITE_PENDING
) {
1533 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1535 target_put_sess_cmd(se_cmd
);
1539 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1540 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1542 case ISCSI_OP_SCSI_TMFUNC
:
1543 spin_lock_bh(&conn
->cmd_lock
);
1544 if (!list_empty(&cmd
->i_conn_node
))
1545 list_del_init(&cmd
->i_conn_node
);
1546 spin_unlock_bh(&conn
->cmd_lock
);
1548 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1550 case ISCSI_OP_REJECT
:
1551 case ISCSI_OP_NOOP_OUT
:
1553 hdr
= (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
1554 /* If the continue bit is on, keep the command alive */
1555 if (hdr
->flags
& ISCSI_FLAG_TEXT_CONTINUE
)
1558 spin_lock_bh(&conn
->cmd_lock
);
1559 if (!list_empty(&cmd
->i_conn_node
))
1560 list_del_init(&cmd
->i_conn_node
);
1561 spin_unlock_bh(&conn
->cmd_lock
);
1564 * Handle special case for REJECT when iscsi_add_reject*() has
1565 * overwritten the original iscsi_opcode assignment, and the
1566 * associated cmd->se_cmd needs to be released.
1568 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1569 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1571 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1576 iscsit_release_cmd(cmd
);
1582 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1584 if (tx_desc
->dma_addr
!= 0) {
1585 isert_dbg("unmap single for tx_desc->dma_addr\n");
1586 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1587 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1588 tx_desc
->dma_addr
= 0;
1593 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1594 struct ib_device
*ib_dev
, bool comp_err
)
1596 if (isert_cmd
->pdu_buf_dma
!= 0) {
1597 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1598 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1599 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1600 isert_cmd
->pdu_buf_dma
= 0;
1603 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1604 isert_put_cmd(isert_cmd
, comp_err
);
1608 isert_check_pi_status(struct se_cmd
*se_cmd
, struct ib_mr
*sig_mr
)
1610 struct ib_mr_status mr_status
;
1613 ret
= ib_check_mr_status(sig_mr
, IB_MR_CHECK_SIG_STATUS
, &mr_status
);
1615 isert_err("ib_check_mr_status failed, ret %d\n", ret
);
1616 goto fail_mr_status
;
1619 if (mr_status
.fail_status
& IB_MR_CHECK_SIG_STATUS
) {
1621 u32 block_size
= se_cmd
->se_dev
->dev_attrib
.block_size
+ 8;
1623 switch (mr_status
.sig_err
.err_type
) {
1624 case IB_SIG_BAD_GUARD
:
1625 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
1627 case IB_SIG_BAD_REFTAG
:
1628 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
1630 case IB_SIG_BAD_APPTAG
:
1631 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
1634 sec_offset_err
= mr_status
.sig_err
.sig_err_offset
;
1635 do_div(sec_offset_err
, block_size
);
1636 se_cmd
->bad_sector
= sec_offset_err
+ se_cmd
->t_task_lba
;
1638 isert_err("PI error found type %d at sector 0x%llx "
1639 "expected 0x%x vs actual 0x%x\n",
1640 mr_status
.sig_err
.err_type
,
1641 (unsigned long long)se_cmd
->bad_sector
,
1642 mr_status
.sig_err
.expected
,
1643 mr_status
.sig_err
.actual
);
1652 isert_rdma_write_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1654 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1655 struct isert_device
*device
= isert_conn
->device
;
1656 struct iser_tx_desc
*desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1657 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(desc
);
1658 struct se_cmd
*cmd
= &isert_cmd
->iscsi_cmd
->se_cmd
;
1661 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1662 isert_print_wc(wc
, "rdma write");
1663 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1664 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1665 isert_completion_put(desc
, isert_cmd
, device
->ib_device
, true);
1669 isert_dbg("Cmd %p\n", isert_cmd
);
1671 ret
= isert_check_pi_status(cmd
, isert_cmd
->rw
.sig
->sig_mr
);
1672 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1676 * transport_generic_request_failure() expects to have
1677 * plus two references to handle queue-full, so re-add
1678 * one here as target-core will have already dropped
1679 * it after the first isert_put_datain() callback.
1681 kref_get(&cmd
->cmd_kref
);
1682 transport_generic_request_failure(cmd
, cmd
->pi_err
);
1685 * XXX: isert_put_response() failure is not retried.
1687 ret
= isert_put_response(isert_conn
->conn
, isert_cmd
->iscsi_cmd
);
1689 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret
);
1694 isert_rdma_read_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1696 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1697 struct isert_device
*device
= isert_conn
->device
;
1698 struct iser_tx_desc
*desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1699 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(desc
);
1700 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1701 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1704 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1705 isert_print_wc(wc
, "rdma read");
1706 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1707 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1708 isert_completion_put(desc
, isert_cmd
, device
->ib_device
, true);
1712 isert_dbg("Cmd %p\n", isert_cmd
);
1714 iscsit_stop_dataout_timer(cmd
);
1716 if (isert_prot_cmd(isert_conn
, se_cmd
))
1717 ret
= isert_check_pi_status(se_cmd
, isert_cmd
->rw
.sig
->sig_mr
);
1718 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1719 cmd
->write_data_done
= 0;
1721 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd
);
1722 spin_lock_bh(&cmd
->istate_lock
);
1723 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1724 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1725 spin_unlock_bh(&cmd
->istate_lock
);
1728 * transport_generic_request_failure() will drop the extra
1729 * se_cmd->cmd_kref reference after T10-PI error, and handle
1730 * any non-zero ->queue_status() callback error retries.
1733 transport_generic_request_failure(se_cmd
, se_cmd
->pi_err
);
1735 target_execute_cmd(se_cmd
);
1739 isert_do_control_comp(struct work_struct
*work
)
1741 struct isert_cmd
*isert_cmd
= container_of(work
,
1742 struct isert_cmd
, comp_work
);
1743 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1744 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1745 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1747 isert_dbg("Cmd %p i_state %d\n", isert_cmd
, cmd
->i_state
);
1749 switch (cmd
->i_state
) {
1750 case ISTATE_SEND_TASKMGTRSP
:
1751 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1753 case ISTATE_SEND_REJECT
:
1754 case ISTATE_SEND_TEXTRSP
:
1755 cmd
->i_state
= ISTATE_SENT_STATUS
;
1756 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
,
1759 case ISTATE_SEND_LOGOUTRSP
:
1760 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1763 isert_err("Unknown i_state %d\n", cmd
->i_state
);
1770 isert_login_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1772 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1773 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1774 struct iser_tx_desc
*tx_desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1776 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1777 isert_print_wc(wc
, "login send");
1778 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1779 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1782 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1786 isert_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1788 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1789 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1790 struct iser_tx_desc
*tx_desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1791 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(tx_desc
);
1793 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1794 isert_print_wc(wc
, "send");
1795 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1796 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1797 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, true);
1801 isert_dbg("Cmd %p\n", isert_cmd
);
1803 switch (isert_cmd
->iscsi_cmd
->i_state
) {
1804 case ISTATE_SEND_TASKMGTRSP
:
1805 case ISTATE_SEND_LOGOUTRSP
:
1806 case ISTATE_SEND_REJECT
:
1807 case ISTATE_SEND_TEXTRSP
:
1808 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1810 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
1811 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
1814 isert_cmd
->iscsi_cmd
->i_state
= ISTATE_SENT_STATUS
;
1815 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, false);
1821 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
1823 struct ib_send_wr
*wr_failed
;
1826 ret
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
1828 isert_err("ib_post_recv failed with %d\n", ret
);
1832 ret
= ib_post_send(isert_conn
->qp
, &isert_cmd
->tx_desc
.send_wr
,
1835 isert_err("ib_post_send failed with %d\n", ret
);
1842 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
1844 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1845 struct isert_conn
*isert_conn
= conn
->context
;
1846 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1847 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
1848 &isert_cmd
->tx_desc
.iscsi_header
;
1850 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1851 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
1852 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1854 * Attach SENSE DATA payload to iSCSI Response PDU
1856 if (cmd
->se_cmd
.sense_buffer
&&
1857 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
1858 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
1859 struct isert_device
*device
= isert_conn
->device
;
1860 struct ib_device
*ib_dev
= device
->ib_device
;
1861 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1862 u32 padding
, pdu_len
;
1864 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
1866 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
1868 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
1869 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
1870 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
1872 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1873 (void *)cmd
->sense_buffer
, pdu_len
,
1875 if (ib_dma_mapping_error(ib_dev
, isert_cmd
->pdu_buf_dma
))
1878 isert_cmd
->pdu_buf_len
= pdu_len
;
1879 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1880 tx_dsg
->length
= pdu_len
;
1881 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
1882 isert_cmd
->tx_desc
.num_sge
= 2;
1885 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1887 isert_dbg("Posting SCSI Response\n");
1889 return isert_post_response(isert_conn
, isert_cmd
);
1893 isert_aborted_task(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
1895 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1896 struct isert_conn
*isert_conn
= conn
->context
;
1898 spin_lock_bh(&conn
->cmd_lock
);
1899 if (!list_empty(&cmd
->i_conn_node
))
1900 list_del_init(&cmd
->i_conn_node
);
1901 spin_unlock_bh(&conn
->cmd_lock
);
1903 if (cmd
->data_direction
== DMA_TO_DEVICE
)
1904 iscsit_stop_dataout_timer(cmd
);
1905 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1908 static enum target_prot_op
1909 isert_get_sup_prot_ops(struct iscsi_conn
*conn
)
1911 struct isert_conn
*isert_conn
= conn
->context
;
1912 struct isert_device
*device
= isert_conn
->device
;
1914 if (conn
->tpg
->tpg_attrib
.t10_pi
) {
1915 if (device
->pi_capable
) {
1916 isert_info("conn %p PI offload enabled\n", isert_conn
);
1917 isert_conn
->pi_support
= true;
1918 return TARGET_PROT_ALL
;
1922 isert_info("conn %p PI offload disabled\n", isert_conn
);
1923 isert_conn
->pi_support
= false;
1925 return TARGET_PROT_NORMAL
;
1929 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
1930 bool nopout_response
)
1932 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1933 struct isert_conn
*isert_conn
= conn
->context
;
1934 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1936 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1937 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
1938 &isert_cmd
->tx_desc
.iscsi_header
,
1940 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1941 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1943 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn
);
1945 return isert_post_response(isert_conn
, isert_cmd
);
1949 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1951 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1952 struct isert_conn
*isert_conn
= conn
->context
;
1953 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1955 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1956 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
1957 &isert_cmd
->tx_desc
.iscsi_header
);
1958 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1959 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1961 isert_dbg("conn %p Posting Logout Response\n", isert_conn
);
1963 return isert_post_response(isert_conn
, isert_cmd
);
1967 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1969 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1970 struct isert_conn
*isert_conn
= conn
->context
;
1971 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1973 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1974 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
1975 &isert_cmd
->tx_desc
.iscsi_header
);
1976 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1977 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1979 isert_dbg("conn %p Posting Task Management Response\n", isert_conn
);
1981 return isert_post_response(isert_conn
, isert_cmd
);
1985 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1987 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1988 struct isert_conn
*isert_conn
= conn
->context
;
1989 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1990 struct isert_device
*device
= isert_conn
->device
;
1991 struct ib_device
*ib_dev
= device
->ib_device
;
1992 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1993 struct iscsi_reject
*hdr
=
1994 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
1996 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1997 iscsit_build_reject(cmd
, conn
, hdr
);
1998 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2000 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
2001 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2002 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
2004 if (ib_dma_mapping_error(ib_dev
, isert_cmd
->pdu_buf_dma
))
2006 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
2007 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2008 tx_dsg
->length
= ISCSI_HDR_LEN
;
2009 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2010 isert_cmd
->tx_desc
.num_sge
= 2;
2012 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2014 isert_dbg("conn %p Posting Reject\n", isert_conn
);
2016 return isert_post_response(isert_conn
, isert_cmd
);
2020 isert_put_text_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2022 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2023 struct isert_conn
*isert_conn
= conn
->context
;
2024 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2025 struct iscsi_text_rsp
*hdr
=
2026 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
2030 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2031 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
, ISCSI_INFINIBAND
);
2036 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2039 struct isert_device
*device
= isert_conn
->device
;
2040 struct ib_device
*ib_dev
= device
->ib_device
;
2041 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2042 void *txt_rsp_buf
= cmd
->buf_ptr
;
2044 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2045 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
2046 if (ib_dma_mapping_error(ib_dev
, isert_cmd
->pdu_buf_dma
))
2049 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
2050 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2051 tx_dsg
->length
= txt_rsp_len
;
2052 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2053 isert_cmd
->tx_desc
.num_sge
= 2;
2055 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2057 isert_dbg("conn %p Text Response\n", isert_conn
);
2059 return isert_post_response(isert_conn
, isert_cmd
);
2063 isert_set_dif_domain(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
,
2064 struct ib_sig_domain
*domain
)
2066 domain
->sig_type
= IB_SIG_TYPE_T10_DIF
;
2067 domain
->sig
.dif
.bg_type
= IB_T10DIF_CRC
;
2068 domain
->sig
.dif
.pi_interval
= se_cmd
->se_dev
->dev_attrib
.block_size
;
2069 domain
->sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
2071 * At the moment we hard code those, but if in the future
2072 * the target core would like to use it, we will take it
2075 domain
->sig
.dif
.apptag_check_mask
= 0xffff;
2076 domain
->sig
.dif
.app_escape
= true;
2077 domain
->sig
.dif
.ref_escape
= true;
2078 if (se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
||
2079 se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
)
2080 domain
->sig
.dif
.ref_remap
= true;
2084 isert_set_sig_attrs(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
)
2086 memset(sig_attrs
, 0, sizeof(*sig_attrs
));
2088 switch (se_cmd
->prot_op
) {
2089 case TARGET_PROT_DIN_INSERT
:
2090 case TARGET_PROT_DOUT_STRIP
:
2091 sig_attrs
->mem
.sig_type
= IB_SIG_TYPE_NONE
;
2092 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2094 case TARGET_PROT_DOUT_INSERT
:
2095 case TARGET_PROT_DIN_STRIP
:
2096 sig_attrs
->wire
.sig_type
= IB_SIG_TYPE_NONE
;
2097 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2099 case TARGET_PROT_DIN_PASS
:
2100 case TARGET_PROT_DOUT_PASS
:
2101 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2102 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2105 isert_err("Unsupported PI operation %d\n", se_cmd
->prot_op
);
2109 sig_attrs
->check_mask
=
2110 (se_cmd
->prot_checks
& TARGET_DIF_CHECK_GUARD
? 0xc0 : 0) |
2111 (se_cmd
->prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x30 : 0) |
2112 (se_cmd
->prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x0f : 0);
2117 isert_rdma_rw_ctx_post(struct isert_cmd
*cmd
, struct isert_conn
*conn
,
2118 struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
2120 struct se_cmd
*se_cmd
= &cmd
->iscsi_cmd
->se_cmd
;
2121 enum dma_data_direction dir
= target_reverse_dma_direction(se_cmd
);
2122 u8 port_num
= conn
->cm_id
->port_num
;
2127 if (cmd
->ctx_init_done
)
2130 if (dir
== DMA_FROM_DEVICE
) {
2131 addr
= cmd
->write_va
;
2132 rkey
= cmd
->write_stag
;
2133 offset
= cmd
->iscsi_cmd
->write_data_done
;
2135 addr
= cmd
->read_va
;
2136 rkey
= cmd
->read_stag
;
2140 if (isert_prot_cmd(conn
, se_cmd
)) {
2141 struct ib_sig_attrs sig_attrs
;
2143 ret
= isert_set_sig_attrs(se_cmd
, &sig_attrs
);
2147 WARN_ON_ONCE(offset
);
2148 ret
= rdma_rw_ctx_signature_init(&cmd
->rw
, conn
->qp
, port_num
,
2149 se_cmd
->t_data_sg
, se_cmd
->t_data_nents
,
2150 se_cmd
->t_prot_sg
, se_cmd
->t_prot_nents
,
2151 &sig_attrs
, addr
, rkey
, dir
);
2153 ret
= rdma_rw_ctx_init(&cmd
->rw
, conn
->qp
, port_num
,
2154 se_cmd
->t_data_sg
, se_cmd
->t_data_nents
,
2155 offset
, addr
, rkey
, dir
);
2159 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd
);
2163 cmd
->ctx_init_done
= true;
2166 ret
= rdma_rw_ctx_post(&cmd
->rw
, conn
->qp
, port_num
, cqe
, chain_wr
);
2168 isert_err("Cmd: %p failed to post RDMA res\n", cmd
);
2173 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2175 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2176 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2177 struct isert_conn
*isert_conn
= conn
->context
;
2178 struct ib_cqe
*cqe
= NULL
;
2179 struct ib_send_wr
*chain_wr
= NULL
;
2182 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2183 isert_cmd
, se_cmd
->data_length
);
2185 if (isert_prot_cmd(isert_conn
, se_cmd
)) {
2186 isert_cmd
->tx_desc
.tx_cqe
.done
= isert_rdma_write_done
;
2187 cqe
= &isert_cmd
->tx_desc
.tx_cqe
;
2190 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2192 isert_create_send_desc(isert_conn
, isert_cmd
,
2193 &isert_cmd
->tx_desc
);
2194 iscsit_build_rsp_pdu(cmd
, conn
, true, (struct iscsi_scsi_rsp
*)
2195 &isert_cmd
->tx_desc
.iscsi_header
);
2196 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2197 isert_init_send_wr(isert_conn
, isert_cmd
,
2198 &isert_cmd
->tx_desc
.send_wr
);
2200 rc
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
2202 isert_err("ib_post_recv failed with %d\n", rc
);
2206 chain_wr
= &isert_cmd
->tx_desc
.send_wr
;
2209 rc
= isert_rdma_rw_ctx_post(isert_cmd
, isert_conn
, cqe
, chain_wr
);
2210 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2216 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
2218 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2221 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2222 isert_cmd
, cmd
->se_cmd
.data_length
, cmd
->write_data_done
);
2224 isert_cmd
->tx_desc
.tx_cqe
.done
= isert_rdma_read_done
;
2225 ret
= isert_rdma_rw_ctx_post(isert_cmd
, conn
->context
,
2226 &isert_cmd
->tx_desc
.tx_cqe
, NULL
);
2228 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2234 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2236 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2241 spin_lock_bh(&conn
->cmd_lock
);
2242 list_del_init(&cmd
->i_conn_node
);
2243 spin_unlock_bh(&conn
->cmd_lock
);
2244 isert_put_cmd(isert_cmd
, true);
2246 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
2247 ret
= isert_put_nopin(cmd
, conn
, false);
2250 isert_err("Unknown immediate state: 0x%02x\n", state
);
2259 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2261 struct isert_conn
*isert_conn
= conn
->context
;
2265 case ISTATE_SEND_LOGOUTRSP
:
2266 ret
= isert_put_logout_rsp(cmd
, conn
);
2268 isert_conn
->logout_posted
= true;
2270 case ISTATE_SEND_NOPIN
:
2271 ret
= isert_put_nopin(cmd
, conn
, true);
2273 case ISTATE_SEND_TASKMGTRSP
:
2274 ret
= isert_put_tm_rsp(cmd
, conn
);
2276 case ISTATE_SEND_REJECT
:
2277 ret
= isert_put_reject(cmd
, conn
);
2279 case ISTATE_SEND_TEXTRSP
:
2280 ret
= isert_put_text_rsp(cmd
, conn
);
2282 case ISTATE_SEND_STATUS
:
2284 * Special case for sending non GOOD SCSI status from TX thread
2285 * context during pre se_cmd excecution failure.
2287 ret
= isert_put_response(conn
, cmd
);
2290 isert_err("Unknown response state: 0x%02x\n", state
);
2299 isert_setup_id(struct isert_np
*isert_np
)
2301 struct iscsi_np
*np
= isert_np
->np
;
2302 struct rdma_cm_id
*id
;
2303 struct sockaddr
*sa
;
2306 sa
= (struct sockaddr
*)&np
->np_sockaddr
;
2307 isert_dbg("ksockaddr: %p, sa: %p\n", &np
->np_sockaddr
, sa
);
2309 id
= rdma_create_id(&init_net
, isert_cma_handler
, isert_np
,
2310 RDMA_PS_TCP
, IB_QPT_RC
);
2312 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id
));
2316 isert_dbg("id %p context %p\n", id
, id
->context
);
2318 ret
= rdma_bind_addr(id
, sa
);
2320 isert_err("rdma_bind_addr() failed: %d\n", ret
);
2324 ret
= rdma_listen(id
, 0);
2326 isert_err("rdma_listen() failed: %d\n", ret
);
2332 rdma_destroy_id(id
);
2334 return ERR_PTR(ret
);
2338 isert_setup_np(struct iscsi_np
*np
,
2339 struct sockaddr_storage
*ksockaddr
)
2341 struct isert_np
*isert_np
;
2342 struct rdma_cm_id
*isert_lid
;
2345 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
2349 sema_init(&isert_np
->sem
, 0);
2350 mutex_init(&isert_np
->mutex
);
2351 INIT_LIST_HEAD(&isert_np
->accepted
);
2352 INIT_LIST_HEAD(&isert_np
->pending
);
2356 * Setup the np->np_sockaddr from the passed sockaddr setup
2357 * in iscsi_target_configfs.c code..
2359 memcpy(&np
->np_sockaddr
, ksockaddr
,
2360 sizeof(struct sockaddr_storage
));
2362 isert_lid
= isert_setup_id(isert_np
);
2363 if (IS_ERR(isert_lid
)) {
2364 ret
= PTR_ERR(isert_lid
);
2368 isert_np
->cm_id
= isert_lid
;
2369 np
->np_context
= isert_np
;
2380 isert_rdma_accept(struct isert_conn
*isert_conn
)
2382 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
2383 struct rdma_conn_param cp
;
2385 struct iser_cm_hdr rsp_hdr
;
2387 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
2388 cp
.initiator_depth
= isert_conn
->initiator_depth
;
2390 cp
.rnr_retry_count
= 7;
2392 memset(&rsp_hdr
, 0, sizeof(rsp_hdr
));
2393 rsp_hdr
.flags
= ISERT_ZBVA_NOT_USED
;
2394 if (!isert_conn
->snd_w_inv
)
2395 rsp_hdr
.flags
= rsp_hdr
.flags
| ISERT_SEND_W_INV_NOT_USED
;
2396 cp
.private_data
= (void *)&rsp_hdr
;
2397 cp
.private_data_len
= sizeof(rsp_hdr
);
2399 ret
= rdma_accept(cm_id
, &cp
);
2401 isert_err("rdma_accept() failed with: %d\n", ret
);
2409 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
2411 struct isert_conn
*isert_conn
= conn
->context
;
2414 isert_info("before login_req comp conn: %p\n", isert_conn
);
2415 ret
= wait_for_completion_interruptible(&isert_conn
->login_req_comp
);
2417 isert_err("isert_conn %p interrupted before got login req\n",
2421 reinit_completion(&isert_conn
->login_req_comp
);
2424 * For login requests after the first PDU, isert_rx_login_req() will
2425 * kick schedule_delayed_work(&conn->login_work) as the packet is
2426 * received, which turns this callback from iscsi_target_do_login_rx()
2429 if (!login
->first_request
)
2432 isert_rx_login_req(isert_conn
);
2434 isert_info("before login_comp conn: %p\n", conn
);
2435 ret
= wait_for_completion_interruptible(&isert_conn
->login_comp
);
2439 isert_info("processing login->req: %p\n", login
->req
);
2445 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
2446 struct isert_conn
*isert_conn
)
2448 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
2449 struct rdma_route
*cm_route
= &cm_id
->route
;
2451 conn
->login_family
= np
->np_sockaddr
.ss_family
;
2453 conn
->login_sockaddr
= cm_route
->addr
.dst_addr
;
2454 conn
->local_sockaddr
= cm_route
->addr
.src_addr
;
2458 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
2460 struct isert_np
*isert_np
= np
->np_context
;
2461 struct isert_conn
*isert_conn
;
2465 ret
= down_interruptible(&isert_np
->sem
);
2469 spin_lock_bh(&np
->np_thread_lock
);
2470 if (np
->np_thread_state
>= ISCSI_NP_THREAD_RESET
) {
2471 spin_unlock_bh(&np
->np_thread_lock
);
2472 isert_dbg("np_thread_state %d\n",
2473 np
->np_thread_state
);
2475 * No point in stalling here when np_thread
2476 * is in state RESET/SHUTDOWN/EXIT - bail
2480 spin_unlock_bh(&np
->np_thread_lock
);
2482 mutex_lock(&isert_np
->mutex
);
2483 if (list_empty(&isert_np
->pending
)) {
2484 mutex_unlock(&isert_np
->mutex
);
2487 isert_conn
= list_first_entry(&isert_np
->pending
,
2488 struct isert_conn
, node
);
2489 list_del_init(&isert_conn
->node
);
2490 mutex_unlock(&isert_np
->mutex
);
2492 conn
->context
= isert_conn
;
2493 isert_conn
->conn
= conn
;
2494 isert_conn
->state
= ISER_CONN_BOUND
;
2496 isert_set_conn_info(np
, conn
, isert_conn
);
2498 isert_dbg("Processing isert_conn: %p\n", isert_conn
);
2504 isert_free_np(struct iscsi_np
*np
)
2506 struct isert_np
*isert_np
= np
->np_context
;
2507 struct isert_conn
*isert_conn
, *n
;
2509 if (isert_np
->cm_id
)
2510 rdma_destroy_id(isert_np
->cm_id
);
2513 * FIXME: At this point we don't have a good way to insure
2514 * that at this point we don't have hanging connections that
2515 * completed RDMA establishment but didn't start iscsi login
2516 * process. So work-around this by cleaning up what ever piled
2517 * up in accepted and pending lists.
2519 mutex_lock(&isert_np
->mutex
);
2520 if (!list_empty(&isert_np
->pending
)) {
2521 isert_info("Still have isert pending connections\n");
2522 list_for_each_entry_safe(isert_conn
, n
,
2525 isert_info("cleaning isert_conn %p state (%d)\n",
2526 isert_conn
, isert_conn
->state
);
2527 isert_connect_release(isert_conn
);
2531 if (!list_empty(&isert_np
->accepted
)) {
2532 isert_info("Still have isert accepted connections\n");
2533 list_for_each_entry_safe(isert_conn
, n
,
2534 &isert_np
->accepted
,
2536 isert_info("cleaning isert_conn %p state (%d)\n",
2537 isert_conn
, isert_conn
->state
);
2538 isert_connect_release(isert_conn
);
2541 mutex_unlock(&isert_np
->mutex
);
2543 np
->np_context
= NULL
;
2547 static void isert_release_work(struct work_struct
*work
)
2549 struct isert_conn
*isert_conn
= container_of(work
,
2553 isert_info("Starting release conn %p\n", isert_conn
);
2555 mutex_lock(&isert_conn
->mutex
);
2556 isert_conn
->state
= ISER_CONN_DOWN
;
2557 mutex_unlock(&isert_conn
->mutex
);
2559 isert_info("Destroying conn %p\n", isert_conn
);
2560 isert_put_conn(isert_conn
);
2564 isert_wait4logout(struct isert_conn
*isert_conn
)
2566 struct iscsi_conn
*conn
= isert_conn
->conn
;
2568 isert_info("conn %p\n", isert_conn
);
2570 if (isert_conn
->logout_posted
) {
2571 isert_info("conn %p wait for conn_logout_comp\n", isert_conn
);
2572 wait_for_completion_timeout(&conn
->conn_logout_comp
,
2573 SECONDS_FOR_LOGOUT_COMP
* HZ
);
2578 isert_wait4cmds(struct iscsi_conn
*conn
)
2580 isert_info("iscsi_conn %p\n", conn
);
2583 target_sess_cmd_list_set_waiting(conn
->sess
->se_sess
);
2584 target_wait_for_sess_cmds(conn
->sess
->se_sess
);
2589 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2590 * unsolicitate dataout
2591 * @conn: iscsi connection
2593 * We might still have commands that are waiting for unsolicited
2594 * dataouts messages. We must put the extra reference on those
2595 * before blocking on the target_wait_for_session_cmds
2598 isert_put_unsol_pending_cmds(struct iscsi_conn
*conn
)
2600 struct iscsi_cmd
*cmd
, *tmp
;
2601 static LIST_HEAD(drop_cmd_list
);
2603 spin_lock_bh(&conn
->cmd_lock
);
2604 list_for_each_entry_safe(cmd
, tmp
, &conn
->conn_cmd_list
, i_conn_node
) {
2605 if ((cmd
->cmd_flags
& ICF_NON_IMMEDIATE_UNSOLICITED_DATA
) &&
2606 (cmd
->write_data_done
< conn
->sess
->sess_ops
->FirstBurstLength
) &&
2607 (cmd
->write_data_done
< cmd
->se_cmd
.data_length
))
2608 list_move_tail(&cmd
->i_conn_node
, &drop_cmd_list
);
2610 spin_unlock_bh(&conn
->cmd_lock
);
2612 list_for_each_entry_safe(cmd
, tmp
, &drop_cmd_list
, i_conn_node
) {
2613 list_del_init(&cmd
->i_conn_node
);
2614 if (cmd
->i_state
!= ISTATE_REMOVE
) {
2615 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2617 isert_info("conn %p dropping cmd %p\n", conn
, cmd
);
2618 isert_put_cmd(isert_cmd
, true);
2623 static void isert_wait_conn(struct iscsi_conn
*conn
)
2625 struct isert_conn
*isert_conn
= conn
->context
;
2627 isert_info("Starting conn %p\n", isert_conn
);
2629 mutex_lock(&isert_conn
->mutex
);
2630 isert_conn_terminate(isert_conn
);
2631 mutex_unlock(&isert_conn
->mutex
);
2633 ib_drain_qp(isert_conn
->qp
);
2634 isert_put_unsol_pending_cmds(conn
);
2635 isert_wait4cmds(conn
);
2636 isert_wait4logout(isert_conn
);
2638 queue_work(isert_release_wq
, &isert_conn
->release_work
);
2641 static void isert_free_conn(struct iscsi_conn
*conn
)
2643 struct isert_conn
*isert_conn
= conn
->context
;
2645 ib_drain_qp(isert_conn
->qp
);
2646 isert_put_conn(isert_conn
);
2649 static void isert_get_rx_pdu(struct iscsi_conn
*conn
)
2651 struct completion comp
;
2653 init_completion(&comp
);
2655 wait_for_completion_interruptible(&comp
);
2658 static struct iscsit_transport iser_target_transport
= {
2660 .transport_type
= ISCSI_INFINIBAND
,
2661 .rdma_shutdown
= true,
2662 .priv_size
= sizeof(struct isert_cmd
),
2663 .owner
= THIS_MODULE
,
2664 .iscsit_setup_np
= isert_setup_np
,
2665 .iscsit_accept_np
= isert_accept_np
,
2666 .iscsit_free_np
= isert_free_np
,
2667 .iscsit_wait_conn
= isert_wait_conn
,
2668 .iscsit_free_conn
= isert_free_conn
,
2669 .iscsit_get_login_rx
= isert_get_login_rx
,
2670 .iscsit_put_login_tx
= isert_put_login_tx
,
2671 .iscsit_immediate_queue
= isert_immediate_queue
,
2672 .iscsit_response_queue
= isert_response_queue
,
2673 .iscsit_get_dataout
= isert_get_dataout
,
2674 .iscsit_queue_data_in
= isert_put_datain
,
2675 .iscsit_queue_status
= isert_put_response
,
2676 .iscsit_aborted_task
= isert_aborted_task
,
2677 .iscsit_get_rx_pdu
= isert_get_rx_pdu
,
2678 .iscsit_get_sup_prot_ops
= isert_get_sup_prot_ops
,
2681 static int __init
isert_init(void)
2685 isert_comp_wq
= alloc_workqueue("isert_comp_wq",
2686 WQ_UNBOUND
| WQ_HIGHPRI
, 0);
2687 if (!isert_comp_wq
) {
2688 isert_err("Unable to allocate isert_comp_wq\n");
2692 isert_release_wq
= alloc_workqueue("isert_release_wq", WQ_UNBOUND
,
2693 WQ_UNBOUND_MAX_ACTIVE
);
2694 if (!isert_release_wq
) {
2695 isert_err("Unable to allocate isert_release_wq\n");
2697 goto destroy_comp_wq
;
2700 iscsit_register_transport(&iser_target_transport
);
2701 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2706 destroy_workqueue(isert_comp_wq
);
2711 static void __exit
isert_exit(void)
2713 flush_scheduled_work();
2714 destroy_workqueue(isert_release_wq
);
2715 destroy_workqueue(isert_comp_wq
);
2716 iscsit_unregister_transport(&iser_target_transport
);
2717 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
2720 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2721 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2722 MODULE_LICENSE("GPL");
2724 module_init(isert_init
);
2725 module_exit(isert_exit
);