1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * This file contains iSCSI extentions for RDMA (iSER) Verbs
5 * (c) Copyright 2013 Datera, Inc.
7 * Nicholas A. Bellinger <nab@linux-iscsi.org>
9 ****************************************************************************/
11 #include <linux/string.h>
12 #include <linux/module.h>
13 #include <linux/scatterlist.h>
14 #include <linux/socket.h>
16 #include <linux/in6.h>
17 #include <rdma/ib_verbs.h>
18 #include <rdma/ib_cm.h>
19 #include <rdma/rdma_cm.h>
20 #include <target/target_core_base.h>
21 #include <target/target_core_fabric.h>
22 #include <target/iscsi/iscsi_transport.h>
23 #include <linux/semaphore.h>
27 static int isert_debug_level
;
28 module_param_named(debug_level
, isert_debug_level
, int, 0644);
29 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0 (default:0)");
31 static int isert_sg_tablesize_set(const char *val
,
32 const struct kernel_param
*kp
);
33 static const struct kernel_param_ops sg_tablesize_ops
= {
34 .set
= isert_sg_tablesize_set
,
38 static int isert_sg_tablesize
= ISCSI_ISER_MIN_SG_TABLESIZE
;
39 module_param_cb(sg_tablesize
, &sg_tablesize_ops
, &isert_sg_tablesize
, 0644);
40 MODULE_PARM_DESC(sg_tablesize
,
41 "Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)");
43 static DEFINE_MUTEX(device_list_mutex
);
44 static LIST_HEAD(device_list
);
45 static struct workqueue_struct
*isert_login_wq
;
46 static struct workqueue_struct
*isert_comp_wq
;
47 static struct workqueue_struct
*isert_release_wq
;
50 isert_put_response(struct iscsit_conn
*conn
, struct iscsit_cmd
*cmd
);
52 isert_login_post_recv(struct isert_conn
*isert_conn
);
54 isert_rdma_accept(struct isert_conn
*isert_conn
);
55 struct rdma_cm_id
*isert_setup_id(struct isert_np
*isert_np
);
57 static void isert_release_work(struct work_struct
*work
);
58 static void isert_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
59 static void isert_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
60 static void isert_login_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
61 static void isert_login_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
63 static int isert_sg_tablesize_set(const char *val
, const struct kernel_param
*kp
)
67 ret
= kstrtoint(val
, 10, &n
);
68 if (ret
!= 0 || n
< ISCSI_ISER_MIN_SG_TABLESIZE
||
69 n
> ISCSI_ISER_MAX_SG_TABLESIZE
)
72 return param_set_int(val
, kp
);
76 isert_prot_cmd(struct isert_conn
*conn
, struct se_cmd
*cmd
)
78 return (conn
->pi_support
&&
79 cmd
->prot_op
!= TARGET_PROT_NORMAL
);
83 isert_qp_event_callback(struct ib_event
*e
, void *context
)
85 struct isert_conn
*isert_conn
= context
;
87 isert_err("%s (%d): conn %p\n",
88 ib_event_msg(e
->event
), e
->event
, isert_conn
);
91 case IB_EVENT_COMM_EST
:
92 rdma_notify(isert_conn
->cm_id
, IB_EVENT_COMM_EST
);
100 isert_create_qp(struct isert_conn
*isert_conn
,
101 struct rdma_cm_id
*cma_id
)
103 u32 cq_size
= ISERT_QP_MAX_REQ_DTOS
+ ISERT_QP_MAX_RECV_DTOS
+ 2;
104 struct isert_device
*device
= isert_conn
->device
;
105 struct ib_device
*ib_dev
= device
->ib_device
;
106 struct ib_qp_init_attr attr
;
109 isert_conn
->cq
= ib_cq_pool_get(ib_dev
, cq_size
, -1, IB_POLL_WORKQUEUE
);
110 if (IS_ERR(isert_conn
->cq
)) {
111 isert_err("Unable to allocate cq\n");
112 ret
= PTR_ERR(isert_conn
->cq
);
115 isert_conn
->cq_size
= cq_size
;
117 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
118 attr
.event_handler
= isert_qp_event_callback
;
119 attr
.qp_context
= isert_conn
;
120 attr
.send_cq
= isert_conn
->cq
;
121 attr
.recv_cq
= isert_conn
->cq
;
122 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
+ 1;
123 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
+ 1;
124 factor
= rdma_rw_mr_factor(device
->ib_device
, cma_id
->port_num
,
126 attr
.cap
.max_rdma_ctxs
= ISCSI_DEF_XMIT_CMDS_MAX
* factor
;
127 attr
.cap
.max_send_sge
= device
->ib_device
->attrs
.max_send_sge
;
128 attr
.cap
.max_recv_sge
= 1;
129 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
130 attr
.qp_type
= IB_QPT_RC
;
131 if (device
->pi_capable
)
132 attr
.create_flags
|= IB_QP_CREATE_INTEGRITY_EN
;
134 ret
= rdma_create_qp(cma_id
, device
->pd
, &attr
);
136 isert_err("rdma_create_qp failed for cma_id %d\n", ret
);
137 ib_cq_pool_put(isert_conn
->cq
, isert_conn
->cq_size
);
146 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
148 struct isert_device
*device
= isert_conn
->device
;
149 struct ib_device
*ib_dev
= device
->ib_device
;
150 struct iser_rx_desc
*rx_desc
;
151 struct ib_sge
*rx_sg
;
155 isert_conn
->rx_descs
= kcalloc(ISERT_QP_MAX_RECV_DTOS
,
156 sizeof(struct iser_rx_desc
),
158 if (!isert_conn
->rx_descs
)
161 rx_desc
= isert_conn
->rx_descs
;
163 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
164 dma_addr
= ib_dma_map_single(ib_dev
, rx_desc
->buf
,
165 ISER_RX_SIZE
, DMA_FROM_DEVICE
);
166 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
169 rx_desc
->dma_addr
= dma_addr
;
171 rx_sg
= &rx_desc
->rx_sg
;
172 rx_sg
->addr
= rx_desc
->dma_addr
+ isert_get_hdr_offset(rx_desc
);
173 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
174 rx_sg
->lkey
= device
->pd
->local_dma_lkey
;
175 rx_desc
->rx_cqe
.done
= isert_recv_done
;
181 rx_desc
= isert_conn
->rx_descs
;
182 for (j
= 0; j
< i
; j
++, rx_desc
++) {
183 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
184 ISER_RX_SIZE
, DMA_FROM_DEVICE
);
186 kfree(isert_conn
->rx_descs
);
187 isert_conn
->rx_descs
= NULL
;
188 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn
);
193 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
195 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
196 struct iser_rx_desc
*rx_desc
;
199 if (!isert_conn
->rx_descs
)
202 rx_desc
= isert_conn
->rx_descs
;
203 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
204 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
205 ISER_RX_SIZE
, DMA_FROM_DEVICE
);
208 kfree(isert_conn
->rx_descs
);
209 isert_conn
->rx_descs
= NULL
;
213 isert_create_device_ib_res(struct isert_device
*device
)
215 struct ib_device
*ib_dev
= device
->ib_device
;
218 isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n",
219 ib_dev
->attrs
.max_send_sge
, ib_dev
->attrs
.max_recv_sge
);
220 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev
->attrs
.max_sge_rd
);
222 device
->pd
= ib_alloc_pd(ib_dev
, 0);
223 if (IS_ERR(device
->pd
)) {
224 ret
= PTR_ERR(device
->pd
);
225 isert_err("failed to allocate pd, device %p, ret=%d\n",
230 /* Check signature cap */
231 if (ib_dev
->attrs
.kernel_cap_flags
& IBK_INTEGRITY_HANDOVER
)
232 device
->pi_capable
= true;
234 device
->pi_capable
= false;
240 isert_free_device_ib_res(struct isert_device
*device
)
242 isert_info("device %p\n", device
);
244 ib_dealloc_pd(device
->pd
);
248 isert_device_put(struct isert_device
*device
)
250 mutex_lock(&device_list_mutex
);
252 isert_info("device %p refcount %d\n", device
, device
->refcount
);
253 if (!device
->refcount
) {
254 isert_free_device_ib_res(device
);
255 list_del(&device
->dev_node
);
258 mutex_unlock(&device_list_mutex
);
261 static struct isert_device
*
262 isert_device_get(struct rdma_cm_id
*cma_id
)
264 struct isert_device
*device
;
267 mutex_lock(&device_list_mutex
);
268 list_for_each_entry(device
, &device_list
, dev_node
) {
269 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
271 isert_info("Found iser device %p refcount %d\n",
272 device
, device
->refcount
);
273 mutex_unlock(&device_list_mutex
);
278 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
280 mutex_unlock(&device_list_mutex
);
281 return ERR_PTR(-ENOMEM
);
284 INIT_LIST_HEAD(&device
->dev_node
);
286 device
->ib_device
= cma_id
->device
;
287 ret
= isert_create_device_ib_res(device
);
290 mutex_unlock(&device_list_mutex
);
295 list_add_tail(&device
->dev_node
, &device_list
);
296 isert_info("Created a new iser device %p refcount %d\n",
297 device
, device
->refcount
);
298 mutex_unlock(&device_list_mutex
);
304 isert_init_conn(struct isert_conn
*isert_conn
)
306 isert_conn
->state
= ISER_CONN_INIT
;
307 INIT_LIST_HEAD(&isert_conn
->node
);
308 init_completion(&isert_conn
->login_comp
);
309 init_completion(&isert_conn
->login_req_comp
);
310 init_waitqueue_head(&isert_conn
->rem_wait
);
311 kref_init(&isert_conn
->kref
);
312 mutex_init(&isert_conn
->mutex
);
313 INIT_WORK(&isert_conn
->release_work
, isert_release_work
);
317 isert_free_login_buf(struct isert_conn
*isert_conn
)
319 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
321 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
322 ISER_RX_PAYLOAD_SIZE
, DMA_TO_DEVICE
);
323 kfree(isert_conn
->login_rsp_buf
);
325 ib_dma_unmap_single(ib_dev
, isert_conn
->login_desc
->dma_addr
,
326 ISER_RX_SIZE
, DMA_FROM_DEVICE
);
327 kfree(isert_conn
->login_desc
);
331 isert_alloc_login_buf(struct isert_conn
*isert_conn
,
332 struct ib_device
*ib_dev
)
336 isert_conn
->login_desc
= kzalloc(sizeof(*isert_conn
->login_desc
),
338 if (!isert_conn
->login_desc
)
341 isert_conn
->login_desc
->dma_addr
= ib_dma_map_single(ib_dev
,
342 isert_conn
->login_desc
->buf
,
343 ISER_RX_SIZE
, DMA_FROM_DEVICE
);
344 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_desc
->dma_addr
);
346 isert_err("login_desc dma mapping error: %d\n", ret
);
347 isert_conn
->login_desc
->dma_addr
= 0;
348 goto out_free_login_desc
;
351 isert_conn
->login_rsp_buf
= kzalloc(ISER_RX_PAYLOAD_SIZE
, GFP_KERNEL
);
352 if (!isert_conn
->login_rsp_buf
) {
354 goto out_unmap_login_desc
;
357 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
358 isert_conn
->login_rsp_buf
,
359 ISER_RX_PAYLOAD_SIZE
, DMA_TO_DEVICE
);
360 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
362 isert_err("login_rsp_dma mapping error: %d\n", ret
);
363 isert_conn
->login_rsp_dma
= 0;
364 goto out_free_login_rsp_buf
;
369 out_free_login_rsp_buf
:
370 kfree(isert_conn
->login_rsp_buf
);
371 out_unmap_login_desc
:
372 ib_dma_unmap_single(ib_dev
, isert_conn
->login_desc
->dma_addr
,
373 ISER_RX_SIZE
, DMA_FROM_DEVICE
);
375 kfree(isert_conn
->login_desc
);
380 isert_set_nego_params(struct isert_conn
*isert_conn
,
381 struct rdma_conn_param
*param
)
383 struct ib_device_attr
*attr
= &isert_conn
->device
->ib_device
->attrs
;
385 /* Set max inflight RDMA READ requests */
386 isert_conn
->initiator_depth
= min_t(u8
, param
->initiator_depth
,
387 attr
->max_qp_init_rd_atom
);
388 isert_dbg("Using initiator_depth: %u\n", isert_conn
->initiator_depth
);
390 if (param
->private_data
) {
391 u8 flags
= *(u8
*)param
->private_data
;
394 * use remote invalidation if the both initiator
395 * and the HCA support it
397 isert_conn
->snd_w_inv
= !(flags
& ISER_SEND_W_INV_NOT_SUP
) &&
398 (attr
->device_cap_flags
&
399 IB_DEVICE_MEM_MGT_EXTENSIONS
);
400 if (isert_conn
->snd_w_inv
)
401 isert_info("Using remote invalidation\n");
406 isert_destroy_qp(struct isert_conn
*isert_conn
)
408 ib_destroy_qp(isert_conn
->qp
);
409 ib_cq_pool_put(isert_conn
->cq
, isert_conn
->cq_size
);
413 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
415 struct isert_np
*isert_np
= cma_id
->context
;
416 struct iscsi_np
*np
= isert_np
->np
;
417 struct isert_conn
*isert_conn
;
418 struct isert_device
*device
;
421 spin_lock_bh(&np
->np_thread_lock
);
423 spin_unlock_bh(&np
->np_thread_lock
);
424 isert_dbg("iscsi_np is not enabled, reject connect request\n");
425 return rdma_reject(cma_id
, NULL
, 0, IB_CM_REJ_CONSUMER_DEFINED
);
427 spin_unlock_bh(&np
->np_thread_lock
);
429 isert_dbg("cma_id: %p, portal: %p\n",
430 cma_id
, cma_id
->context
);
432 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
436 isert_init_conn(isert_conn
);
437 isert_conn
->cm_id
= cma_id
;
439 device
= isert_device_get(cma_id
);
440 if (IS_ERR(device
)) {
441 ret
= PTR_ERR(device
);
444 isert_conn
->device
= device
;
446 ret
= isert_alloc_login_buf(isert_conn
, cma_id
->device
);
450 isert_set_nego_params(isert_conn
, &event
->param
.conn
);
452 isert_conn
->qp
= isert_create_qp(isert_conn
, cma_id
);
453 if (IS_ERR(isert_conn
->qp
)) {
454 ret
= PTR_ERR(isert_conn
->qp
);
455 goto out_rsp_dma_map
;
458 ret
= isert_login_post_recv(isert_conn
);
462 ret
= isert_rdma_accept(isert_conn
);
466 mutex_lock(&isert_np
->mutex
);
467 list_add_tail(&isert_conn
->node
, &isert_np
->accepted
);
468 mutex_unlock(&isert_np
->mutex
);
473 isert_destroy_qp(isert_conn
);
475 isert_free_login_buf(isert_conn
);
477 isert_device_put(device
);
480 rdma_reject(cma_id
, NULL
, 0, IB_CM_REJ_CONSUMER_DEFINED
);
485 isert_connect_release(struct isert_conn
*isert_conn
)
487 struct isert_device
*device
= isert_conn
->device
;
489 isert_dbg("conn %p\n", isert_conn
);
493 isert_free_rx_descriptors(isert_conn
);
494 if (isert_conn
->cm_id
&&
495 !isert_conn
->dev_removed
)
496 rdma_destroy_id(isert_conn
->cm_id
);
499 isert_destroy_qp(isert_conn
);
501 if (isert_conn
->login_desc
)
502 isert_free_login_buf(isert_conn
);
504 isert_device_put(device
);
506 if (isert_conn
->dev_removed
)
507 wake_up_interruptible(&isert_conn
->rem_wait
);
513 isert_connected_handler(struct rdma_cm_id
*cma_id
)
515 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
516 struct isert_np
*isert_np
= cma_id
->context
;
518 isert_info("conn %p\n", isert_conn
);
520 mutex_lock(&isert_conn
->mutex
);
521 isert_conn
->state
= ISER_CONN_UP
;
522 kref_get(&isert_conn
->kref
);
523 mutex_unlock(&isert_conn
->mutex
);
525 mutex_lock(&isert_np
->mutex
);
526 list_move_tail(&isert_conn
->node
, &isert_np
->pending
);
527 mutex_unlock(&isert_np
->mutex
);
529 isert_info("np %p: Allow accept_np to continue\n", isert_np
);
534 isert_release_kref(struct kref
*kref
)
536 struct isert_conn
*isert_conn
= container_of(kref
,
537 struct isert_conn
, kref
);
539 isert_info("conn %p final kref %s/%d\n", isert_conn
, current
->comm
,
542 isert_connect_release(isert_conn
);
546 isert_put_conn(struct isert_conn
*isert_conn
)
548 kref_put(&isert_conn
->kref
, isert_release_kref
);
552 isert_handle_unbound_conn(struct isert_conn
*isert_conn
)
554 struct isert_np
*isert_np
= isert_conn
->cm_id
->context
;
556 mutex_lock(&isert_np
->mutex
);
557 if (!list_empty(&isert_conn
->node
)) {
559 * This means iscsi doesn't know this connection
560 * so schedule a cleanup ourselves
562 list_del_init(&isert_conn
->node
);
563 isert_put_conn(isert_conn
);
564 queue_work(isert_release_wq
, &isert_conn
->release_work
);
566 mutex_unlock(&isert_np
->mutex
);
570 * isert_conn_terminate() - Initiate connection termination
571 * @isert_conn: isert connection struct
574 * In case the connection state is BOUND, move state
575 * to TEMINATING and start teardown sequence (rdma_disconnect).
576 * In case the connection state is UP, complete flush as well.
578 * This routine must be called with mutex held. Thus it is
579 * safe to call multiple times.
582 isert_conn_terminate(struct isert_conn
*isert_conn
)
586 if (isert_conn
->state
>= ISER_CONN_TERMINATING
)
589 isert_info("Terminating conn %p state %d\n",
590 isert_conn
, isert_conn
->state
);
591 isert_conn
->state
= ISER_CONN_TERMINATING
;
592 err
= rdma_disconnect(isert_conn
->cm_id
);
594 isert_warn("Failed rdma_disconnect isert_conn %p\n",
599 isert_np_cma_handler(struct isert_np
*isert_np
,
600 enum rdma_cm_event_type event
)
602 isert_dbg("%s (%d): isert np %p\n",
603 rdma_event_msg(event
), event
, isert_np
);
606 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
607 isert_np
->cm_id
= NULL
;
609 case RDMA_CM_EVENT_ADDR_CHANGE
:
610 isert_np
->cm_id
= isert_setup_id(isert_np
);
611 if (IS_ERR(isert_np
->cm_id
)) {
612 isert_err("isert np %p setup id failed: %ld\n",
613 isert_np
, PTR_ERR(isert_np
->cm_id
));
614 isert_np
->cm_id
= NULL
;
618 isert_err("isert np %p Unexpected event %d\n",
626 isert_disconnected_handler(struct rdma_cm_id
*cma_id
,
627 enum rdma_cm_event_type event
)
629 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
631 mutex_lock(&isert_conn
->mutex
);
632 switch (isert_conn
->state
) {
633 case ISER_CONN_TERMINATING
:
636 isert_conn_terminate(isert_conn
);
637 ib_drain_qp(isert_conn
->qp
);
638 isert_handle_unbound_conn(isert_conn
);
640 case ISER_CONN_BOUND
:
641 case ISER_CONN_FULL_FEATURE
: /* FALLTHRU */
642 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
645 isert_warn("conn %p terminating in state %d\n",
646 isert_conn
, isert_conn
->state
);
648 mutex_unlock(&isert_conn
->mutex
);
654 isert_connect_error(struct rdma_cm_id
*cma_id
)
656 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
657 struct isert_np
*isert_np
= cma_id
->context
;
659 ib_drain_qp(isert_conn
->qp
);
661 mutex_lock(&isert_np
->mutex
);
662 list_del_init(&isert_conn
->node
);
663 mutex_unlock(&isert_np
->mutex
);
664 isert_conn
->cm_id
= NULL
;
665 isert_put_conn(isert_conn
);
671 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
673 struct isert_np
*isert_np
= cma_id
->context
;
674 struct isert_conn
*isert_conn
;
677 isert_info("%s (%d): status %d id %p np %p\n",
678 rdma_event_msg(event
->event
), event
->event
,
679 event
->status
, cma_id
, cma_id
->context
);
681 if (isert_np
->cm_id
== cma_id
)
682 return isert_np_cma_handler(cma_id
->context
, event
->event
);
684 switch (event
->event
) {
685 case RDMA_CM_EVENT_CONNECT_REQUEST
:
686 ret
= isert_connect_request(cma_id
, event
);
688 isert_err("failed handle connect request %d\n", ret
);
690 case RDMA_CM_EVENT_ESTABLISHED
:
691 isert_connected_handler(cma_id
);
693 case RDMA_CM_EVENT_ADDR_CHANGE
:
694 case RDMA_CM_EVENT_DISCONNECTED
:
695 case RDMA_CM_EVENT_TIMEWAIT_EXIT
: /* FALLTHRU */
696 ret
= isert_disconnected_handler(cma_id
, event
->event
);
698 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
699 isert_conn
= cma_id
->qp
->qp_context
;
700 isert_conn
->dev_removed
= true;
701 isert_disconnected_handler(cma_id
, event
->event
);
702 wait_event_interruptible(isert_conn
->rem_wait
,
703 isert_conn
->state
== ISER_CONN_DOWN
);
706 * return non-zero from the callback to destroy
710 case RDMA_CM_EVENT_REJECTED
:
711 isert_info("Connection rejected: %s\n",
712 rdma_reject_msg(cma_id
, event
->status
));
714 case RDMA_CM_EVENT_UNREACHABLE
:
715 case RDMA_CM_EVENT_CONNECT_ERROR
:
716 ret
= isert_connect_error(cma_id
);
719 isert_err("Unhandled RDMA CMA event: %d\n", event
->event
);
727 isert_post_recvm(struct isert_conn
*isert_conn
, u32 count
)
729 struct ib_recv_wr
*rx_wr
;
731 struct iser_rx_desc
*rx_desc
;
733 for (rx_wr
= isert_conn
->rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
734 rx_desc
= &isert_conn
->rx_descs
[i
];
736 rx_wr
->wr_cqe
= &rx_desc
->rx_cqe
;
737 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
739 rx_wr
->next
= rx_wr
+ 1;
740 rx_desc
->in_use
= false;
743 rx_wr
->next
= NULL
; /* mark end of work requests list */
745 ret
= ib_post_recv(isert_conn
->qp
, isert_conn
->rx_wr
, NULL
);
747 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
753 isert_post_recv(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
)
755 struct ib_recv_wr rx_wr
;
758 if (!rx_desc
->in_use
) {
760 * if the descriptor is not in-use we already reposted it
761 * for recv, so just silently return
766 rx_desc
->in_use
= false;
767 rx_wr
.wr_cqe
= &rx_desc
->rx_cqe
;
768 rx_wr
.sg_list
= &rx_desc
->rx_sg
;
772 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, NULL
);
774 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
780 isert_login_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
782 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
783 struct ib_send_wr send_wr
;
786 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
787 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
789 tx_desc
->tx_cqe
.done
= isert_login_send_done
;
792 send_wr
.wr_cqe
= &tx_desc
->tx_cqe
;
793 send_wr
.sg_list
= tx_desc
->tx_sg
;
794 send_wr
.num_sge
= tx_desc
->num_sge
;
795 send_wr
.opcode
= IB_WR_SEND
;
796 send_wr
.send_flags
= IB_SEND_SIGNALED
;
798 ret
= ib_post_send(isert_conn
->qp
, &send_wr
, NULL
);
800 isert_err("ib_post_send() failed, ret: %d\n", ret
);
806 __isert_create_send_desc(struct isert_device
*device
,
807 struct iser_tx_desc
*tx_desc
)
810 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_ctrl
));
811 tx_desc
->iser_header
.flags
= ISCSI_CTRL
;
813 tx_desc
->num_sge
= 1;
815 if (tx_desc
->tx_sg
[0].lkey
!= device
->pd
->local_dma_lkey
) {
816 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
817 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
822 isert_create_send_desc(struct isert_conn
*isert_conn
,
823 struct isert_cmd
*isert_cmd
,
824 struct iser_tx_desc
*tx_desc
)
826 struct isert_device
*device
= isert_conn
->device
;
827 struct ib_device
*ib_dev
= device
->ib_device
;
829 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
830 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
832 __isert_create_send_desc(device
, tx_desc
);
836 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
837 struct iser_tx_desc
*tx_desc
)
839 struct isert_device
*device
= isert_conn
->device
;
840 struct ib_device
*ib_dev
= device
->ib_device
;
843 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
844 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
845 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
846 isert_err("ib_dma_mapping_error() failed\n");
850 tx_desc
->dma_addr
= dma_addr
;
851 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
852 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
853 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
855 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
856 tx_desc
->tx_sg
[0].addr
, tx_desc
->tx_sg
[0].length
,
857 tx_desc
->tx_sg
[0].lkey
);
863 isert_init_send_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
864 struct ib_send_wr
*send_wr
)
866 struct iser_tx_desc
*tx_desc
= &isert_cmd
->tx_desc
;
868 tx_desc
->tx_cqe
.done
= isert_send_done
;
869 send_wr
->wr_cqe
= &tx_desc
->tx_cqe
;
871 if (isert_conn
->snd_w_inv
&& isert_cmd
->inv_rkey
) {
872 send_wr
->opcode
= IB_WR_SEND_WITH_INV
;
873 send_wr
->ex
.invalidate_rkey
= isert_cmd
->inv_rkey
;
875 send_wr
->opcode
= IB_WR_SEND
;
878 send_wr
->sg_list
= &tx_desc
->tx_sg
[0];
879 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
880 send_wr
->send_flags
= IB_SEND_SIGNALED
;
884 isert_login_post_recv(struct isert_conn
*isert_conn
)
886 struct ib_recv_wr rx_wr
;
890 memset(&sge
, 0, sizeof(struct ib_sge
));
891 sge
.addr
= isert_conn
->login_desc
->dma_addr
+
892 isert_get_hdr_offset(isert_conn
->login_desc
);
893 sge
.length
= ISER_RX_PAYLOAD_SIZE
;
894 sge
.lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
896 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
897 sge
.addr
, sge
.length
, sge
.lkey
);
899 isert_conn
->login_desc
->rx_cqe
.done
= isert_login_recv_done
;
901 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
902 rx_wr
.wr_cqe
= &isert_conn
->login_desc
->rx_cqe
;
903 rx_wr
.sg_list
= &sge
;
906 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, NULL
);
908 isert_err("ib_post_recv() failed: %d\n", ret
);
914 isert_put_login_tx(struct iscsit_conn
*conn
, struct iscsi_login
*login
,
917 struct isert_conn
*isert_conn
= conn
->context
;
918 struct isert_device
*device
= isert_conn
->device
;
919 struct ib_device
*ib_dev
= device
->ib_device
;
920 struct iser_tx_desc
*tx_desc
= &isert_conn
->login_tx_desc
;
923 __isert_create_send_desc(device
, tx_desc
);
925 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
926 sizeof(struct iscsi_hdr
));
928 isert_init_tx_hdrs(isert_conn
, tx_desc
);
931 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
933 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
934 length
, DMA_TO_DEVICE
);
936 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
938 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
939 length
, DMA_TO_DEVICE
);
941 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
942 tx_dsg
->length
= length
;
943 tx_dsg
->lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
944 tx_desc
->num_sge
= 2;
946 if (!login
->login_failed
) {
947 if (login
->login_complete
) {
948 ret
= isert_alloc_rx_descriptors(isert_conn
);
952 ret
= isert_post_recvm(isert_conn
,
953 ISERT_QP_MAX_RECV_DTOS
);
957 /* Now we are in FULL_FEATURE phase */
958 mutex_lock(&isert_conn
->mutex
);
959 isert_conn
->state
= ISER_CONN_FULL_FEATURE
;
960 mutex_unlock(&isert_conn
->mutex
);
964 ret
= isert_login_post_recv(isert_conn
);
969 ret
= isert_login_post_send(isert_conn
, tx_desc
);
977 isert_rx_login_req(struct isert_conn
*isert_conn
)
979 struct iser_rx_desc
*rx_desc
= isert_conn
->login_desc
;
980 int rx_buflen
= isert_conn
->login_req_len
;
981 struct iscsit_conn
*conn
= isert_conn
->conn
;
982 struct iscsi_login
*login
= conn
->conn_login
;
985 isert_info("conn %p\n", isert_conn
);
987 WARN_ON_ONCE(!login
);
989 if (login
->first_request
) {
990 struct iscsi_login_req
*login_req
=
991 (struct iscsi_login_req
*)isert_get_iscsi_hdr(rx_desc
);
993 * Setup the initial iscsi_login values from the leading
996 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
997 login
->current_stage
= ISCSI_LOGIN_CURRENT_STAGE(
999 login
->version_min
= login_req
->min_version
;
1000 login
->version_max
= login_req
->max_version
;
1001 memcpy(login
->isid
, login_req
->isid
, 6);
1002 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1003 login
->init_task_tag
= login_req
->itt
;
1004 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1005 login
->cid
= be16_to_cpu(login_req
->cid
);
1006 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1009 memcpy(&login
->req
[0], isert_get_iscsi_hdr(rx_desc
), ISCSI_HDR_LEN
);
1011 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1012 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1013 "MAX_KEY_VALUE_PAIRS: %d\n", size
, rx_buflen
,
1014 MAX_KEY_VALUE_PAIRS
);
1015 memcpy(login
->req_buf
, isert_get_data(rx_desc
), size
);
1017 if (login
->first_request
) {
1018 complete(&isert_conn
->login_comp
);
1021 queue_delayed_work(isert_login_wq
, &conn
->login_work
, 0);
1024 static struct iscsit_cmd
1025 *isert_allocate_cmd(struct iscsit_conn
*conn
, struct iser_rx_desc
*rx_desc
)
1027 struct isert_conn
*isert_conn
= conn
->context
;
1028 struct isert_cmd
*isert_cmd
;
1029 struct iscsit_cmd
*cmd
;
1031 cmd
= iscsit_allocate_cmd(conn
, TASK_INTERRUPTIBLE
);
1033 isert_err("Unable to allocate iscsit_cmd + isert_cmd\n");
1036 isert_cmd
= iscsit_priv_cmd(cmd
);
1037 isert_cmd
->conn
= isert_conn
;
1038 isert_cmd
->iscsit_cmd
= cmd
;
1039 isert_cmd
->rx_desc
= rx_desc
;
1045 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
1046 struct isert_cmd
*isert_cmd
, struct iscsit_cmd
*cmd
,
1047 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1049 struct iscsit_conn
*conn
= isert_conn
->conn
;
1050 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
1051 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
1052 bool dump_payload
= false;
1053 unsigned int data_len
;
1055 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
1059 imm_data
= cmd
->immediate_data
;
1060 imm_data_len
= cmd
->first_burst_len
;
1061 unsol_data
= cmd
->unsolicited_data
;
1062 data_len
= cmd
->se_cmd
.data_length
;
1064 if (imm_data
&& imm_data_len
== data_len
)
1065 cmd
->se_cmd
.se_cmd_flags
|= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
;
1066 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1069 } else if (rc
> 0) {
1070 dump_payload
= true;
1077 if (imm_data_len
!= data_len
) {
1078 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
1079 sg_copy_from_buffer(cmd
->se_cmd
.t_data_sg
, sg_nents
,
1080 isert_get_data(rx_desc
), imm_data_len
);
1081 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1082 sg_nents
, imm_data_len
);
1084 sg_init_table(&isert_cmd
->sg
, 1);
1085 cmd
->se_cmd
.t_data_sg
= &isert_cmd
->sg
;
1086 cmd
->se_cmd
.t_data_nents
= 1;
1087 sg_set_buf(&isert_cmd
->sg
, isert_get_data(rx_desc
),
1089 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1093 cmd
->write_data_done
+= imm_data_len
;
1095 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
1096 spin_lock_bh(&cmd
->istate_lock
);
1097 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1098 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1099 spin_unlock_bh(&cmd
->istate_lock
);
1103 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
1105 if (!rc
&& !dump_payload
&& unsol_data
)
1106 iscsit_set_unsolicited_dataout(cmd
);
1107 else if (dump_payload
&& imm_data
)
1108 target_put_sess_cmd(&cmd
->se_cmd
);
1114 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
1115 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1117 struct scatterlist
*sg_start
;
1118 struct iscsit_conn
*conn
= isert_conn
->conn
;
1119 struct iscsit_cmd
*cmd
= NULL
;
1120 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
1121 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
1122 int rc
, sg_nents
, sg_off
, page_off
;
1124 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
1130 * FIXME: Unexpected unsolicited_data out
1132 if (!cmd
->unsolicited_data
) {
1133 isert_err("Received unexpected solicited data payload\n");
1138 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1139 "write_data_done: %u, data_length: %u\n",
1140 unsol_data_len
, cmd
->write_data_done
,
1141 cmd
->se_cmd
.data_length
);
1143 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1144 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1145 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
1146 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1148 * FIXME: Non page-aligned unsolicited_data out
1151 isert_err("unexpected non-page aligned data payload\n");
1155 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1156 "sg_nents: %u from %p %u\n", sg_start
, sg_off
,
1157 sg_nents
, isert_get_data(rx_desc
), unsol_data_len
);
1159 sg_copy_from_buffer(sg_start
, sg_nents
, isert_get_data(rx_desc
),
1162 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1167 * multiple data-outs on the same command can arrive -
1168 * so post the buffer before hand
1170 return isert_post_recv(isert_conn
, rx_desc
);
1174 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1175 struct iscsit_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1178 struct iscsit_conn
*conn
= isert_conn
->conn
;
1179 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1182 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1186 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1189 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1193 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1194 struct iscsit_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1195 struct iscsi_text
*hdr
)
1197 struct iscsit_conn
*conn
= isert_conn
->conn
;
1198 u32 payload_length
= ntoh24(hdr
->dlength
);
1200 unsigned char *text_in
= NULL
;
1202 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1206 if (payload_length
) {
1207 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1211 cmd
->text_in_ptr
= text_in
;
1213 memcpy(cmd
->text_in_ptr
, isert_get_data(rx_desc
), payload_length
);
1215 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1219 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1220 uint32_t read_stag
, uint64_t read_va
,
1221 uint32_t write_stag
, uint64_t write_va
)
1223 struct iscsi_hdr
*hdr
= isert_get_iscsi_hdr(rx_desc
);
1224 struct iscsit_conn
*conn
= isert_conn
->conn
;
1225 struct iscsit_cmd
*cmd
;
1226 struct isert_cmd
*isert_cmd
;
1228 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1230 if (conn
->sess
->sess_ops
->SessionType
&&
1231 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1232 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1233 " ignoring\n", opcode
);
1238 case ISCSI_OP_SCSI_CMD
:
1239 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1243 isert_cmd
= iscsit_priv_cmd(cmd
);
1244 isert_cmd
->read_stag
= read_stag
;
1245 isert_cmd
->read_va
= read_va
;
1246 isert_cmd
->write_stag
= write_stag
;
1247 isert_cmd
->write_va
= write_va
;
1248 isert_cmd
->inv_rkey
= read_stag
? read_stag
: write_stag
;
1250 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
, cmd
,
1251 rx_desc
, (unsigned char *)hdr
);
1253 case ISCSI_OP_NOOP_OUT
:
1254 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1258 isert_cmd
= iscsit_priv_cmd(cmd
);
1259 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
, cmd
,
1260 rx_desc
, (unsigned char *)hdr
);
1262 case ISCSI_OP_SCSI_DATA_OUT
:
1263 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1264 (unsigned char *)hdr
);
1266 case ISCSI_OP_SCSI_TMFUNC
:
1267 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1271 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1272 (unsigned char *)hdr
);
1274 case ISCSI_OP_LOGOUT
:
1275 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1279 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1282 if (be32_to_cpu(hdr
->ttt
) != 0xFFFFFFFF)
1283 cmd
= iscsit_find_cmd_from_itt(conn
, hdr
->itt
);
1285 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1290 isert_cmd
= iscsit_priv_cmd(cmd
);
1291 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
, cmd
,
1292 rx_desc
, (struct iscsi_text
*)hdr
);
1295 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1304 isert_print_wc(struct ib_wc
*wc
, const char *type
)
1306 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1307 isert_err("%s failure: %s (%d) vend_err %x\n", type
,
1308 ib_wc_status_msg(wc
->status
), wc
->status
,
1311 isert_dbg("%s failure: %s (%d)\n", type
,
1312 ib_wc_status_msg(wc
->status
), wc
->status
);
1316 isert_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1318 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1319 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1320 struct iser_rx_desc
*rx_desc
= cqe_to_rx_desc(wc
->wr_cqe
);
1321 struct iscsi_hdr
*hdr
= isert_get_iscsi_hdr(rx_desc
);
1322 struct iser_ctrl
*iser_ctrl
= isert_get_iser_hdr(rx_desc
);
1323 uint64_t read_va
= 0, write_va
= 0;
1324 uint32_t read_stag
= 0, write_stag
= 0;
1326 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1327 isert_print_wc(wc
, "recv");
1328 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1329 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1333 rx_desc
->in_use
= true;
1335 ib_dma_sync_single_for_cpu(ib_dev
, rx_desc
->dma_addr
,
1336 ISER_RX_SIZE
, DMA_FROM_DEVICE
);
1338 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1339 rx_desc
->dma_addr
, hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1340 (int)(wc
->byte_len
- ISER_HEADERS_LEN
));
1342 switch (iser_ctrl
->flags
& 0xF0) {
1344 if (iser_ctrl
->flags
& ISER_RSV
) {
1345 read_stag
= be32_to_cpu(iser_ctrl
->read_stag
);
1346 read_va
= be64_to_cpu(iser_ctrl
->read_va
);
1347 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1348 read_stag
, (unsigned long long)read_va
);
1350 if (iser_ctrl
->flags
& ISER_WSV
) {
1351 write_stag
= be32_to_cpu(iser_ctrl
->write_stag
);
1352 write_va
= be64_to_cpu(iser_ctrl
->write_va
);
1353 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1354 write_stag
, (unsigned long long)write_va
);
1357 isert_dbg("ISER ISCSI_CTRL PDU\n");
1360 isert_err("iSER Hello message\n");
1363 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl
->flags
);
1367 isert_rx_opcode(isert_conn
, rx_desc
,
1368 read_stag
, read_va
, write_stag
, write_va
);
1370 ib_dma_sync_single_for_device(ib_dev
, rx_desc
->dma_addr
,
1371 ISER_RX_SIZE
, DMA_FROM_DEVICE
);
1375 isert_login_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1377 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1378 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
1380 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1381 isert_print_wc(wc
, "login recv");
1385 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_desc
->dma_addr
,
1386 ISER_RX_SIZE
, DMA_FROM_DEVICE
);
1388 isert_conn
->login_req_len
= wc
->byte_len
- ISER_HEADERS_LEN
;
1390 if (isert_conn
->conn
) {
1391 struct iscsi_login
*login
= isert_conn
->conn
->conn_login
;
1393 if (login
&& !login
->first_request
)
1394 isert_rx_login_req(isert_conn
);
1397 mutex_lock(&isert_conn
->mutex
);
1398 complete(&isert_conn
->login_req_comp
);
1399 mutex_unlock(&isert_conn
->mutex
);
1401 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_desc
->dma_addr
,
1402 ISER_RX_SIZE
, DMA_FROM_DEVICE
);
1406 isert_rdma_rw_ctx_destroy(struct isert_cmd
*cmd
, struct isert_conn
*conn
)
1408 struct se_cmd
*se_cmd
= &cmd
->iscsit_cmd
->se_cmd
;
1409 enum dma_data_direction dir
= target_reverse_dma_direction(se_cmd
);
1411 if (!cmd
->rw
.nr_ops
)
1414 if (isert_prot_cmd(conn
, se_cmd
)) {
1415 rdma_rw_ctx_destroy_signature(&cmd
->rw
, conn
->qp
,
1416 conn
->cm_id
->port_num
, se_cmd
->t_data_sg
,
1417 se_cmd
->t_data_nents
, se_cmd
->t_prot_sg
,
1418 se_cmd
->t_prot_nents
, dir
);
1420 rdma_rw_ctx_destroy(&cmd
->rw
, conn
->qp
, conn
->cm_id
->port_num
,
1421 se_cmd
->t_data_sg
, se_cmd
->t_data_nents
, dir
);
1428 isert_put_cmd(struct isert_cmd
*isert_cmd
, bool comp_err
)
1430 struct iscsit_cmd
*cmd
= isert_cmd
->iscsit_cmd
;
1431 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1432 struct iscsit_conn
*conn
= isert_conn
->conn
;
1433 struct iscsi_text_rsp
*hdr
;
1435 isert_dbg("Cmd %p\n", isert_cmd
);
1437 switch (cmd
->iscsi_opcode
) {
1438 case ISCSI_OP_SCSI_CMD
:
1439 spin_lock_bh(&conn
->cmd_lock
);
1440 if (!list_empty(&cmd
->i_conn_node
))
1441 list_del_init(&cmd
->i_conn_node
);
1442 spin_unlock_bh(&conn
->cmd_lock
);
1444 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1445 iscsit_stop_dataout_timer(cmd
);
1447 * Check for special case during comp_err where
1448 * WRITE_PENDING has been handed off from core,
1449 * but requires an extra target_put_sess_cmd()
1450 * before transport_generic_free_cmd() below.
1453 cmd
->se_cmd
.t_state
== TRANSPORT_WRITE_PENDING
) {
1454 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1456 target_put_sess_cmd(se_cmd
);
1460 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1461 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1463 case ISCSI_OP_SCSI_TMFUNC
:
1464 spin_lock_bh(&conn
->cmd_lock
);
1465 if (!list_empty(&cmd
->i_conn_node
))
1466 list_del_init(&cmd
->i_conn_node
);
1467 spin_unlock_bh(&conn
->cmd_lock
);
1469 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1471 case ISCSI_OP_REJECT
:
1472 case ISCSI_OP_NOOP_OUT
:
1474 hdr
= (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
1475 /* If the continue bit is on, keep the command alive */
1476 if (hdr
->flags
& ISCSI_FLAG_TEXT_CONTINUE
)
1479 spin_lock_bh(&conn
->cmd_lock
);
1480 if (!list_empty(&cmd
->i_conn_node
))
1481 list_del_init(&cmd
->i_conn_node
);
1482 spin_unlock_bh(&conn
->cmd_lock
);
1485 * Handle special case for REJECT when iscsi_add_reject*() has
1486 * overwritten the original iscsi_opcode assignment, and the
1487 * associated cmd->se_cmd needs to be released.
1489 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1490 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1492 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1497 iscsit_release_cmd(cmd
);
1503 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1505 if (tx_desc
->dma_addr
!= 0) {
1506 isert_dbg("unmap single for tx_desc->dma_addr\n");
1507 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1508 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1509 tx_desc
->dma_addr
= 0;
1514 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1515 struct ib_device
*ib_dev
, bool comp_err
)
1517 if (isert_cmd
->pdu_buf_dma
!= 0) {
1518 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1519 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1520 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1521 isert_cmd
->pdu_buf_dma
= 0;
1524 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1525 isert_put_cmd(isert_cmd
, comp_err
);
1529 isert_check_pi_status(struct se_cmd
*se_cmd
, struct ib_mr
*sig_mr
)
1531 struct ib_mr_status mr_status
;
1534 ret
= ib_check_mr_status(sig_mr
, IB_MR_CHECK_SIG_STATUS
, &mr_status
);
1536 isert_err("ib_check_mr_status failed, ret %d\n", ret
);
1537 goto fail_mr_status
;
1540 if (mr_status
.fail_status
& IB_MR_CHECK_SIG_STATUS
) {
1542 u32 block_size
= se_cmd
->se_dev
->dev_attrib
.block_size
+ 8;
1544 switch (mr_status
.sig_err
.err_type
) {
1545 case IB_SIG_BAD_GUARD
:
1546 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
1548 case IB_SIG_BAD_REFTAG
:
1549 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
1551 case IB_SIG_BAD_APPTAG
:
1552 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
1555 sec_offset_err
= mr_status
.sig_err
.sig_err_offset
;
1556 do_div(sec_offset_err
, block_size
);
1557 se_cmd
->sense_info
= sec_offset_err
+ se_cmd
->t_task_lba
;
1559 isert_err("PI error found type %d at sector 0x%llx "
1560 "expected 0x%x vs actual 0x%x\n",
1561 mr_status
.sig_err
.err_type
,
1562 (unsigned long long)se_cmd
->sense_info
,
1563 mr_status
.sig_err
.expected
,
1564 mr_status
.sig_err
.actual
);
1573 isert_rdma_write_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1575 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1576 struct isert_device
*device
= isert_conn
->device
;
1577 struct iser_tx_desc
*desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1578 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(desc
);
1579 struct se_cmd
*cmd
= &isert_cmd
->iscsit_cmd
->se_cmd
;
1582 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1583 isert_print_wc(wc
, "rdma write");
1584 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1585 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1586 isert_completion_put(desc
, isert_cmd
, device
->ib_device
, true);
1590 isert_dbg("Cmd %p\n", isert_cmd
);
1592 ret
= isert_check_pi_status(cmd
, isert_cmd
->rw
.reg
->mr
);
1593 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1597 * transport_generic_request_failure() expects to have
1598 * plus two references to handle queue-full, so re-add
1599 * one here as target-core will have already dropped
1600 * it after the first isert_put_datain() callback.
1602 kref_get(&cmd
->cmd_kref
);
1603 transport_generic_request_failure(cmd
, cmd
->pi_err
);
1606 * XXX: isert_put_response() failure is not retried.
1608 ret
= isert_put_response(isert_conn
->conn
, isert_cmd
->iscsit_cmd
);
1610 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret
);
1615 isert_rdma_read_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1617 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1618 struct isert_device
*device
= isert_conn
->device
;
1619 struct iser_tx_desc
*desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1620 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(desc
);
1621 struct iscsit_cmd
*cmd
= isert_cmd
->iscsit_cmd
;
1622 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1625 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1626 isert_print_wc(wc
, "rdma read");
1627 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1628 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1629 isert_completion_put(desc
, isert_cmd
, device
->ib_device
, true);
1633 isert_dbg("Cmd %p\n", isert_cmd
);
1635 iscsit_stop_dataout_timer(cmd
);
1637 if (isert_prot_cmd(isert_conn
, se_cmd
))
1638 ret
= isert_check_pi_status(se_cmd
, isert_cmd
->rw
.reg
->mr
);
1639 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1640 cmd
->write_data_done
= 0;
1642 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd
);
1643 spin_lock_bh(&cmd
->istate_lock
);
1644 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1645 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1646 spin_unlock_bh(&cmd
->istate_lock
);
1649 * transport_generic_request_failure() will drop the extra
1650 * se_cmd->cmd_kref reference after T10-PI error, and handle
1651 * any non-zero ->queue_status() callback error retries.
1654 transport_generic_request_failure(se_cmd
, se_cmd
->pi_err
);
1656 target_execute_cmd(se_cmd
);
1660 isert_do_control_comp(struct work_struct
*work
)
1662 struct isert_cmd
*isert_cmd
= container_of(work
,
1663 struct isert_cmd
, comp_work
);
1664 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1665 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1666 struct iscsit_cmd
*cmd
= isert_cmd
->iscsit_cmd
;
1668 isert_dbg("Cmd %p i_state %d\n", isert_cmd
, cmd
->i_state
);
1670 switch (cmd
->i_state
) {
1671 case ISTATE_SEND_TASKMGTRSP
:
1672 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1674 case ISTATE_SEND_REJECT
:
1675 case ISTATE_SEND_TEXTRSP
:
1676 cmd
->i_state
= ISTATE_SENT_STATUS
;
1677 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
,
1680 case ISTATE_SEND_LOGOUTRSP
:
1681 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1684 isert_err("Unknown i_state %d\n", cmd
->i_state
);
1691 isert_login_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1693 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1694 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1695 struct iser_tx_desc
*tx_desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1697 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1698 isert_print_wc(wc
, "login send");
1699 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1700 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1703 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1707 isert_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1709 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1710 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1711 struct iser_tx_desc
*tx_desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1712 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(tx_desc
);
1714 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1715 isert_print_wc(wc
, "send");
1716 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1717 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1718 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, true);
1722 isert_dbg("Cmd %p\n", isert_cmd
);
1724 switch (isert_cmd
->iscsit_cmd
->i_state
) {
1725 case ISTATE_SEND_TASKMGTRSP
:
1726 case ISTATE_SEND_LOGOUTRSP
:
1727 case ISTATE_SEND_REJECT
:
1728 case ISTATE_SEND_TEXTRSP
:
1729 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1731 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
1732 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
1735 isert_cmd
->iscsit_cmd
->i_state
= ISTATE_SENT_STATUS
;
1736 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, false);
1742 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
1746 ret
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
1750 ret
= ib_post_send(isert_conn
->qp
, &isert_cmd
->tx_desc
.send_wr
, NULL
);
1752 isert_err("ib_post_send failed with %d\n", ret
);
1759 isert_put_response(struct iscsit_conn
*conn
, struct iscsit_cmd
*cmd
)
1761 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1762 struct isert_conn
*isert_conn
= conn
->context
;
1763 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1764 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
1765 &isert_cmd
->tx_desc
.iscsi_header
;
1767 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1768 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
1769 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1771 * Attach SENSE DATA payload to iSCSI Response PDU
1773 if (cmd
->se_cmd
.sense_buffer
&&
1774 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
1775 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
1776 struct isert_device
*device
= isert_conn
->device
;
1777 struct ib_device
*ib_dev
= device
->ib_device
;
1778 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1779 u32 padding
, pdu_len
;
1781 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
1783 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
1785 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
1786 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
1787 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
1789 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1790 (void *)cmd
->sense_buffer
, pdu_len
,
1792 if (ib_dma_mapping_error(ib_dev
, isert_cmd
->pdu_buf_dma
))
1795 isert_cmd
->pdu_buf_len
= pdu_len
;
1796 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1797 tx_dsg
->length
= pdu_len
;
1798 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
1799 isert_cmd
->tx_desc
.num_sge
= 2;
1802 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1804 isert_dbg("Posting SCSI Response\n");
1806 return isert_post_response(isert_conn
, isert_cmd
);
1810 isert_aborted_task(struct iscsit_conn
*conn
, struct iscsit_cmd
*cmd
)
1812 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1813 struct isert_conn
*isert_conn
= conn
->context
;
1815 spin_lock_bh(&conn
->cmd_lock
);
1816 if (!list_empty(&cmd
->i_conn_node
))
1817 list_del_init(&cmd
->i_conn_node
);
1818 spin_unlock_bh(&conn
->cmd_lock
);
1820 if (cmd
->data_direction
== DMA_TO_DEVICE
)
1821 iscsit_stop_dataout_timer(cmd
);
1822 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1825 static enum target_prot_op
1826 isert_get_sup_prot_ops(struct iscsit_conn
*conn
)
1828 struct isert_conn
*isert_conn
= conn
->context
;
1829 struct isert_device
*device
= isert_conn
->device
;
1831 if (conn
->tpg
->tpg_attrib
.t10_pi
) {
1832 if (device
->pi_capable
) {
1833 isert_info("conn %p PI offload enabled\n", isert_conn
);
1834 isert_conn
->pi_support
= true;
1835 return TARGET_PROT_ALL
;
1839 isert_info("conn %p PI offload disabled\n", isert_conn
);
1840 isert_conn
->pi_support
= false;
1842 return TARGET_PROT_NORMAL
;
1846 isert_put_nopin(struct iscsit_cmd
*cmd
, struct iscsit_conn
*conn
,
1847 bool nopout_response
)
1849 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1850 struct isert_conn
*isert_conn
= conn
->context
;
1851 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1853 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1854 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
1855 &isert_cmd
->tx_desc
.iscsi_header
,
1857 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1858 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1860 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn
);
1862 return isert_post_response(isert_conn
, isert_cmd
);
1866 isert_put_logout_rsp(struct iscsit_cmd
*cmd
, struct iscsit_conn
*conn
)
1868 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1869 struct isert_conn
*isert_conn
= conn
->context
;
1870 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1872 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1873 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
1874 &isert_cmd
->tx_desc
.iscsi_header
);
1875 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1876 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1878 isert_dbg("conn %p Posting Logout Response\n", isert_conn
);
1880 return isert_post_response(isert_conn
, isert_cmd
);
1884 isert_put_tm_rsp(struct iscsit_cmd
*cmd
, struct iscsit_conn
*conn
)
1886 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1887 struct isert_conn
*isert_conn
= conn
->context
;
1888 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1890 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1891 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
1892 &isert_cmd
->tx_desc
.iscsi_header
);
1893 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1894 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1896 isert_dbg("conn %p Posting Task Management Response\n", isert_conn
);
1898 return isert_post_response(isert_conn
, isert_cmd
);
1902 isert_put_reject(struct iscsit_cmd
*cmd
, struct iscsit_conn
*conn
)
1904 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1905 struct isert_conn
*isert_conn
= conn
->context
;
1906 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1907 struct isert_device
*device
= isert_conn
->device
;
1908 struct ib_device
*ib_dev
= device
->ib_device
;
1909 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1910 struct iscsi_reject
*hdr
=
1911 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
1913 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1914 iscsit_build_reject(cmd
, conn
, hdr
);
1915 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1917 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
1918 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1919 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
1921 if (ib_dma_mapping_error(ib_dev
, isert_cmd
->pdu_buf_dma
))
1923 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
1924 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1925 tx_dsg
->length
= ISCSI_HDR_LEN
;
1926 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
1927 isert_cmd
->tx_desc
.num_sge
= 2;
1929 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1931 isert_dbg("conn %p Posting Reject\n", isert_conn
);
1933 return isert_post_response(isert_conn
, isert_cmd
);
1937 isert_put_text_rsp(struct iscsit_cmd
*cmd
, struct iscsit_conn
*conn
)
1939 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1940 struct isert_conn
*isert_conn
= conn
->context
;
1941 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1942 struct iscsi_text_rsp
*hdr
=
1943 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
1947 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1948 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
, ISCSI_INFINIBAND
);
1953 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1956 struct isert_device
*device
= isert_conn
->device
;
1957 struct ib_device
*ib_dev
= device
->ib_device
;
1958 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1959 void *txt_rsp_buf
= cmd
->buf_ptr
;
1961 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1962 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
1963 if (ib_dma_mapping_error(ib_dev
, isert_cmd
->pdu_buf_dma
))
1966 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
1967 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1968 tx_dsg
->length
= txt_rsp_len
;
1969 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
1970 isert_cmd
->tx_desc
.num_sge
= 2;
1972 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1974 isert_dbg("conn %p Text Response\n", isert_conn
);
1976 return isert_post_response(isert_conn
, isert_cmd
);
1980 isert_set_dif_domain(struct se_cmd
*se_cmd
, struct ib_sig_domain
*domain
)
1982 domain
->sig_type
= IB_SIG_TYPE_T10_DIF
;
1983 domain
->sig
.dif
.bg_type
= IB_T10DIF_CRC
;
1984 domain
->sig
.dif
.pi_interval
= se_cmd
->se_dev
->dev_attrib
.block_size
;
1985 domain
->sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
1987 * At the moment we hard code those, but if in the future
1988 * the target core would like to use it, we will take it
1991 domain
->sig
.dif
.apptag_check_mask
= 0xffff;
1992 domain
->sig
.dif
.app_escape
= true;
1993 domain
->sig
.dif
.ref_escape
= true;
1994 if (se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
||
1995 se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
)
1996 domain
->sig
.dif
.ref_remap
= true;
2000 isert_set_sig_attrs(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
)
2002 memset(sig_attrs
, 0, sizeof(*sig_attrs
));
2004 switch (se_cmd
->prot_op
) {
2005 case TARGET_PROT_DIN_INSERT
:
2006 case TARGET_PROT_DOUT_STRIP
:
2007 sig_attrs
->mem
.sig_type
= IB_SIG_TYPE_NONE
;
2008 isert_set_dif_domain(se_cmd
, &sig_attrs
->wire
);
2010 case TARGET_PROT_DOUT_INSERT
:
2011 case TARGET_PROT_DIN_STRIP
:
2012 sig_attrs
->wire
.sig_type
= IB_SIG_TYPE_NONE
;
2013 isert_set_dif_domain(se_cmd
, &sig_attrs
->mem
);
2015 case TARGET_PROT_DIN_PASS
:
2016 case TARGET_PROT_DOUT_PASS
:
2017 isert_set_dif_domain(se_cmd
, &sig_attrs
->wire
);
2018 isert_set_dif_domain(se_cmd
, &sig_attrs
->mem
);
2021 isert_err("Unsupported PI operation %d\n", se_cmd
->prot_op
);
2025 if (se_cmd
->prot_checks
& TARGET_DIF_CHECK_GUARD
)
2026 sig_attrs
->check_mask
|= IB_SIG_CHECK_GUARD
;
2027 if (se_cmd
->prot_checks
& TARGET_DIF_CHECK_APPTAG
)
2028 sig_attrs
->check_mask
|= IB_SIG_CHECK_APPTAG
;
2029 if (se_cmd
->prot_checks
& TARGET_DIF_CHECK_REFTAG
)
2030 sig_attrs
->check_mask
|= IB_SIG_CHECK_REFTAG
;
2036 isert_rdma_rw_ctx_post(struct isert_cmd
*cmd
, struct isert_conn
*conn
,
2037 struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
2039 struct se_cmd
*se_cmd
= &cmd
->iscsit_cmd
->se_cmd
;
2040 enum dma_data_direction dir
= target_reverse_dma_direction(se_cmd
);
2041 u8 port_num
= conn
->cm_id
->port_num
;
2046 if (cmd
->ctx_init_done
)
2049 if (dir
== DMA_FROM_DEVICE
) {
2050 addr
= cmd
->write_va
;
2051 rkey
= cmd
->write_stag
;
2052 offset
= cmd
->iscsit_cmd
->write_data_done
;
2054 addr
= cmd
->read_va
;
2055 rkey
= cmd
->read_stag
;
2059 if (isert_prot_cmd(conn
, se_cmd
)) {
2060 struct ib_sig_attrs sig_attrs
;
2062 ret
= isert_set_sig_attrs(se_cmd
, &sig_attrs
);
2066 WARN_ON_ONCE(offset
);
2067 ret
= rdma_rw_ctx_signature_init(&cmd
->rw
, conn
->qp
, port_num
,
2068 se_cmd
->t_data_sg
, se_cmd
->t_data_nents
,
2069 se_cmd
->t_prot_sg
, se_cmd
->t_prot_nents
,
2070 &sig_attrs
, addr
, rkey
, dir
);
2072 ret
= rdma_rw_ctx_init(&cmd
->rw
, conn
->qp
, port_num
,
2073 se_cmd
->t_data_sg
, se_cmd
->t_data_nents
,
2074 offset
, addr
, rkey
, dir
);
2078 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd
);
2082 cmd
->ctx_init_done
= true;
2085 ret
= rdma_rw_ctx_post(&cmd
->rw
, conn
->qp
, port_num
, cqe
, chain_wr
);
2087 isert_err("Cmd: %p failed to post RDMA res\n", cmd
);
2092 isert_put_datain(struct iscsit_conn
*conn
, struct iscsit_cmd
*cmd
)
2094 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2095 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2096 struct isert_conn
*isert_conn
= conn
->context
;
2097 struct ib_cqe
*cqe
= NULL
;
2098 struct ib_send_wr
*chain_wr
= NULL
;
2101 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2102 isert_cmd
, se_cmd
->data_length
);
2104 if (isert_prot_cmd(isert_conn
, se_cmd
)) {
2105 isert_cmd
->tx_desc
.tx_cqe
.done
= isert_rdma_write_done
;
2106 cqe
= &isert_cmd
->tx_desc
.tx_cqe
;
2109 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2111 isert_create_send_desc(isert_conn
, isert_cmd
,
2112 &isert_cmd
->tx_desc
);
2113 iscsit_build_rsp_pdu(cmd
, conn
, true, (struct iscsi_scsi_rsp
*)
2114 &isert_cmd
->tx_desc
.iscsi_header
);
2115 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2116 isert_init_send_wr(isert_conn
, isert_cmd
,
2117 &isert_cmd
->tx_desc
.send_wr
);
2119 rc
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
2123 chain_wr
= &isert_cmd
->tx_desc
.send_wr
;
2126 rc
= isert_rdma_rw_ctx_post(isert_cmd
, isert_conn
, cqe
, chain_wr
);
2127 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2133 isert_get_dataout(struct iscsit_conn
*conn
, struct iscsit_cmd
*cmd
, bool recovery
)
2135 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2138 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2139 isert_cmd
, cmd
->se_cmd
.data_length
, cmd
->write_data_done
);
2141 isert_cmd
->tx_desc
.tx_cqe
.done
= isert_rdma_read_done
;
2142 ret
= isert_rdma_rw_ctx_post(isert_cmd
, conn
->context
,
2143 &isert_cmd
->tx_desc
.tx_cqe
, NULL
);
2145 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2151 isert_immediate_queue(struct iscsit_conn
*conn
, struct iscsit_cmd
*cmd
, int state
)
2153 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2158 spin_lock_bh(&conn
->cmd_lock
);
2159 list_del_init(&cmd
->i_conn_node
);
2160 spin_unlock_bh(&conn
->cmd_lock
);
2161 isert_put_cmd(isert_cmd
, true);
2163 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
2164 ret
= isert_put_nopin(cmd
, conn
, false);
2167 isert_err("Unknown immediate state: 0x%02x\n", state
);
2176 isert_response_queue(struct iscsit_conn
*conn
, struct iscsit_cmd
*cmd
, int state
)
2178 struct isert_conn
*isert_conn
= conn
->context
;
2182 case ISTATE_SEND_LOGOUTRSP
:
2183 ret
= isert_put_logout_rsp(cmd
, conn
);
2185 isert_conn
->logout_posted
= true;
2187 case ISTATE_SEND_NOPIN
:
2188 ret
= isert_put_nopin(cmd
, conn
, true);
2190 case ISTATE_SEND_TASKMGTRSP
:
2191 ret
= isert_put_tm_rsp(cmd
, conn
);
2193 case ISTATE_SEND_REJECT
:
2194 ret
= isert_put_reject(cmd
, conn
);
2196 case ISTATE_SEND_TEXTRSP
:
2197 ret
= isert_put_text_rsp(cmd
, conn
);
2199 case ISTATE_SEND_STATUS
:
2201 * Special case for sending non GOOD SCSI status from TX thread
2202 * context during pre se_cmd excecution failure.
2204 ret
= isert_put_response(conn
, cmd
);
2207 isert_err("Unknown response state: 0x%02x\n", state
);
2216 isert_setup_id(struct isert_np
*isert_np
)
2218 struct iscsi_np
*np
= isert_np
->np
;
2219 struct rdma_cm_id
*id
;
2220 struct sockaddr
*sa
;
2223 sa
= (struct sockaddr
*)&np
->np_sockaddr
;
2224 isert_dbg("ksockaddr: %p, sa: %p\n", &np
->np_sockaddr
, sa
);
2226 id
= rdma_create_id(&init_net
, isert_cma_handler
, isert_np
,
2227 RDMA_PS_TCP
, IB_QPT_RC
);
2229 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id
));
2233 isert_dbg("id %p context %p\n", id
, id
->context
);
2236 * Allow both IPv4 and IPv6 sockets to bind a single port
2239 ret
= rdma_set_afonly(id
, 1);
2241 isert_err("rdma_set_afonly() failed: %d\n", ret
);
2245 ret
= rdma_bind_addr(id
, sa
);
2247 isert_err("rdma_bind_addr() failed: %d\n", ret
);
2251 ret
= rdma_listen(id
, 0);
2253 isert_err("rdma_listen() failed: %d\n", ret
);
2259 rdma_destroy_id(id
);
2261 return ERR_PTR(ret
);
2265 isert_setup_np(struct iscsi_np
*np
,
2266 struct sockaddr_storage
*ksockaddr
)
2268 struct isert_np
*isert_np
;
2269 struct rdma_cm_id
*isert_lid
;
2272 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
2276 sema_init(&isert_np
->sem
, 0);
2277 mutex_init(&isert_np
->mutex
);
2278 INIT_LIST_HEAD(&isert_np
->accepted
);
2279 INIT_LIST_HEAD(&isert_np
->pending
);
2283 * Setup the np->np_sockaddr from the passed sockaddr setup
2284 * in iscsi_target_configfs.c code..
2286 memcpy(&np
->np_sockaddr
, ksockaddr
,
2287 sizeof(struct sockaddr_storage
));
2289 isert_lid
= isert_setup_id(isert_np
);
2290 if (IS_ERR(isert_lid
)) {
2291 ret
= PTR_ERR(isert_lid
);
2295 isert_np
->cm_id
= isert_lid
;
2296 np
->np_context
= isert_np
;
2307 isert_rdma_accept(struct isert_conn
*isert_conn
)
2309 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
2310 struct rdma_conn_param cp
;
2312 struct iser_cm_hdr rsp_hdr
;
2314 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
2315 cp
.initiator_depth
= isert_conn
->initiator_depth
;
2317 cp
.rnr_retry_count
= 7;
2319 memset(&rsp_hdr
, 0, sizeof(rsp_hdr
));
2320 rsp_hdr
.flags
= ISERT_ZBVA_NOT_USED
;
2321 if (!isert_conn
->snd_w_inv
)
2322 rsp_hdr
.flags
= rsp_hdr
.flags
| ISERT_SEND_W_INV_NOT_USED
;
2323 cp
.private_data
= (void *)&rsp_hdr
;
2324 cp
.private_data_len
= sizeof(rsp_hdr
);
2326 ret
= rdma_accept(cm_id
, &cp
);
2328 isert_err("rdma_accept() failed with: %d\n", ret
);
2336 isert_get_login_rx(struct iscsit_conn
*conn
, struct iscsi_login
*login
)
2338 struct isert_conn
*isert_conn
= conn
->context
;
2341 isert_info("before login_req comp conn: %p\n", isert_conn
);
2342 ret
= wait_for_completion_interruptible(&isert_conn
->login_req_comp
);
2344 isert_err("isert_conn %p interrupted before got login req\n",
2348 reinit_completion(&isert_conn
->login_req_comp
);
2351 * For login requests after the first PDU, isert_rx_login_req() will
2352 * kick queue_delayed_work(isert_login_wq, &conn->login_work) as
2353 * the packet is received, which turns this callback from
2354 * iscsi_target_do_login_rx() into a NOP.
2356 if (!login
->first_request
)
2359 isert_rx_login_req(isert_conn
);
2361 isert_info("before login_comp conn: %p\n", conn
);
2362 ret
= wait_for_completion_interruptible(&isert_conn
->login_comp
);
2366 isert_info("processing login->req: %p\n", login
->req
);
2372 isert_set_conn_info(struct iscsi_np
*np
, struct iscsit_conn
*conn
,
2373 struct isert_conn
*isert_conn
)
2375 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
2376 struct rdma_route
*cm_route
= &cm_id
->route
;
2378 conn
->login_family
= np
->np_sockaddr
.ss_family
;
2380 conn
->login_sockaddr
= cm_route
->addr
.dst_addr
;
2381 conn
->local_sockaddr
= cm_route
->addr
.src_addr
;
2385 isert_accept_np(struct iscsi_np
*np
, struct iscsit_conn
*conn
)
2387 struct isert_np
*isert_np
= np
->np_context
;
2388 struct isert_conn
*isert_conn
;
2392 ret
= down_interruptible(&isert_np
->sem
);
2396 spin_lock_bh(&np
->np_thread_lock
);
2397 if (np
->np_thread_state
>= ISCSI_NP_THREAD_RESET
) {
2398 spin_unlock_bh(&np
->np_thread_lock
);
2399 isert_dbg("np_thread_state %d\n",
2400 np
->np_thread_state
);
2402 * No point in stalling here when np_thread
2403 * is in state RESET/SHUTDOWN/EXIT - bail
2407 spin_unlock_bh(&np
->np_thread_lock
);
2409 mutex_lock(&isert_np
->mutex
);
2410 if (list_empty(&isert_np
->pending
)) {
2411 mutex_unlock(&isert_np
->mutex
);
2414 isert_conn
= list_first_entry(&isert_np
->pending
,
2415 struct isert_conn
, node
);
2416 list_del_init(&isert_conn
->node
);
2417 mutex_unlock(&isert_np
->mutex
);
2419 conn
->context
= isert_conn
;
2420 isert_conn
->conn
= conn
;
2421 isert_conn
->state
= ISER_CONN_BOUND
;
2423 isert_set_conn_info(np
, conn
, isert_conn
);
2425 isert_dbg("Processing isert_conn: %p\n", isert_conn
);
2431 isert_free_np(struct iscsi_np
*np
)
2433 struct isert_np
*isert_np
= np
->np_context
;
2434 struct isert_conn
*isert_conn
, *n
;
2435 LIST_HEAD(drop_conn_list
);
2437 if (isert_np
->cm_id
)
2438 rdma_destroy_id(isert_np
->cm_id
);
2441 * FIXME: At this point we don't have a good way to insure
2442 * that at this point we don't have hanging connections that
2443 * completed RDMA establishment but didn't start iscsi login
2444 * process. So work-around this by cleaning up what ever piled
2445 * up in accepted and pending lists.
2447 mutex_lock(&isert_np
->mutex
);
2448 if (!list_empty(&isert_np
->pending
)) {
2449 isert_info("Still have isert pending connections\n");
2450 list_for_each_entry_safe(isert_conn
, n
,
2453 isert_info("cleaning isert_conn %p state (%d)\n",
2454 isert_conn
, isert_conn
->state
);
2455 list_move_tail(&isert_conn
->node
, &drop_conn_list
);
2459 if (!list_empty(&isert_np
->accepted
)) {
2460 isert_info("Still have isert accepted connections\n");
2461 list_for_each_entry_safe(isert_conn
, n
,
2462 &isert_np
->accepted
,
2464 isert_info("cleaning isert_conn %p state (%d)\n",
2465 isert_conn
, isert_conn
->state
);
2466 list_move_tail(&isert_conn
->node
, &drop_conn_list
);
2469 mutex_unlock(&isert_np
->mutex
);
2471 list_for_each_entry_safe(isert_conn
, n
, &drop_conn_list
, node
) {
2472 list_del_init(&isert_conn
->node
);
2473 isert_connect_release(isert_conn
);
2476 np
->np_context
= NULL
;
2480 static void isert_release_work(struct work_struct
*work
)
2482 struct isert_conn
*isert_conn
= container_of(work
,
2486 isert_info("Starting release conn %p\n", isert_conn
);
2488 mutex_lock(&isert_conn
->mutex
);
2489 isert_conn
->state
= ISER_CONN_DOWN
;
2490 mutex_unlock(&isert_conn
->mutex
);
2492 isert_info("Destroying conn %p\n", isert_conn
);
2493 isert_put_conn(isert_conn
);
2497 isert_wait4logout(struct isert_conn
*isert_conn
)
2499 struct iscsit_conn
*conn
= isert_conn
->conn
;
2501 isert_info("conn %p\n", isert_conn
);
2503 if (isert_conn
->logout_posted
) {
2504 isert_info("conn %p wait for conn_logout_comp\n", isert_conn
);
2505 wait_for_completion_timeout(&conn
->conn_logout_comp
,
2506 SECONDS_FOR_LOGOUT_COMP
* HZ
);
2511 isert_wait4cmds(struct iscsit_conn
*conn
)
2513 isert_info("iscsit_conn %p\n", conn
);
2516 target_stop_cmd_counter(conn
->cmd_cnt
);
2517 target_wait_for_cmds(conn
->cmd_cnt
);
2522 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2523 * unsolicitate dataout
2524 * @conn: iscsi connection
2526 * We might still have commands that are waiting for unsolicited
2527 * dataouts messages. We must put the extra reference on those
2528 * before blocking on the target_wait_for_session_cmds
2531 isert_put_unsol_pending_cmds(struct iscsit_conn
*conn
)
2533 struct iscsit_cmd
*cmd
, *tmp
;
2534 static LIST_HEAD(drop_cmd_list
);
2536 spin_lock_bh(&conn
->cmd_lock
);
2537 list_for_each_entry_safe(cmd
, tmp
, &conn
->conn_cmd_list
, i_conn_node
) {
2538 if ((cmd
->cmd_flags
& ICF_NON_IMMEDIATE_UNSOLICITED_DATA
) &&
2539 (cmd
->write_data_done
< conn
->sess
->sess_ops
->FirstBurstLength
) &&
2540 (cmd
->write_data_done
< cmd
->se_cmd
.data_length
))
2541 list_move_tail(&cmd
->i_conn_node
, &drop_cmd_list
);
2543 spin_unlock_bh(&conn
->cmd_lock
);
2545 list_for_each_entry_safe(cmd
, tmp
, &drop_cmd_list
, i_conn_node
) {
2546 list_del_init(&cmd
->i_conn_node
);
2547 if (cmd
->i_state
!= ISTATE_REMOVE
) {
2548 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2550 isert_info("conn %p dropping cmd %p\n", conn
, cmd
);
2551 isert_put_cmd(isert_cmd
, true);
2556 static void isert_wait_conn(struct iscsit_conn
*conn
)
2558 struct isert_conn
*isert_conn
= conn
->context
;
2560 isert_info("Starting conn %p\n", isert_conn
);
2562 mutex_lock(&isert_conn
->mutex
);
2563 isert_conn_terminate(isert_conn
);
2564 mutex_unlock(&isert_conn
->mutex
);
2566 ib_drain_qp(isert_conn
->qp
);
2567 isert_put_unsol_pending_cmds(conn
);
2568 isert_wait4cmds(conn
);
2569 isert_wait4logout(isert_conn
);
2571 queue_work(isert_release_wq
, &isert_conn
->release_work
);
2574 static void isert_free_conn(struct iscsit_conn
*conn
)
2576 struct isert_conn
*isert_conn
= conn
->context
;
2578 ib_drain_qp(isert_conn
->qp
);
2579 isert_put_conn(isert_conn
);
2582 static void isert_get_rx_pdu(struct iscsit_conn
*conn
)
2584 struct completion comp
;
2586 init_completion(&comp
);
2588 wait_for_completion_interruptible(&comp
);
2591 static struct iscsit_transport iser_target_transport
= {
2593 .transport_type
= ISCSI_INFINIBAND
,
2594 .rdma_shutdown
= true,
2595 .priv_size
= sizeof(struct isert_cmd
),
2596 .owner
= THIS_MODULE
,
2597 .iscsit_setup_np
= isert_setup_np
,
2598 .iscsit_accept_np
= isert_accept_np
,
2599 .iscsit_free_np
= isert_free_np
,
2600 .iscsit_wait_conn
= isert_wait_conn
,
2601 .iscsit_free_conn
= isert_free_conn
,
2602 .iscsit_get_login_rx
= isert_get_login_rx
,
2603 .iscsit_put_login_tx
= isert_put_login_tx
,
2604 .iscsit_immediate_queue
= isert_immediate_queue
,
2605 .iscsit_response_queue
= isert_response_queue
,
2606 .iscsit_get_dataout
= isert_get_dataout
,
2607 .iscsit_queue_data_in
= isert_put_datain
,
2608 .iscsit_queue_status
= isert_put_response
,
2609 .iscsit_aborted_task
= isert_aborted_task
,
2610 .iscsit_get_rx_pdu
= isert_get_rx_pdu
,
2611 .iscsit_get_sup_prot_ops
= isert_get_sup_prot_ops
,
2614 static int __init
isert_init(void)
2616 isert_login_wq
= alloc_workqueue("isert_login_wq", 0, 0);
2617 if (!isert_login_wq
) {
2618 isert_err("Unable to allocate isert_login_wq\n");
2622 isert_comp_wq
= alloc_workqueue("isert_comp_wq",
2623 WQ_UNBOUND
| WQ_HIGHPRI
, 0);
2624 if (!isert_comp_wq
) {
2625 isert_err("Unable to allocate isert_comp_wq\n");
2626 goto destroy_login_wq
;
2629 isert_release_wq
= alloc_workqueue("isert_release_wq", WQ_UNBOUND
,
2630 WQ_UNBOUND_MAX_ACTIVE
);
2631 if (!isert_release_wq
) {
2632 isert_err("Unable to allocate isert_release_wq\n");
2633 goto destroy_comp_wq
;
2636 iscsit_register_transport(&iser_target_transport
);
2637 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2642 destroy_workqueue(isert_comp_wq
);
2644 destroy_workqueue(isert_login_wq
);
2649 static void __exit
isert_exit(void)
2651 flush_workqueue(isert_login_wq
);
2652 destroy_workqueue(isert_release_wq
);
2653 destroy_workqueue(isert_comp_wq
);
2654 iscsit_unregister_transport(&iser_target_transport
);
2655 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
2656 destroy_workqueue(isert_login_wq
);
2659 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2660 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2661 MODULE_LICENSE("GPL");
2663 module_init(isert_init
);
2664 module_exit(isert_exit
);