1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
34 #define ISERT_MAX_CONN 8
35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36 #define ISER_MAX_TX_CQ_LEN \
37 ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level
;
42 module_param_named(debug_level
, isert_debug_level
, int, 0644);
43 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex
);
46 static LIST_HEAD(device_list
);
47 static struct workqueue_struct
*isert_comp_wq
;
48 static struct workqueue_struct
*isert_release_wq
;
51 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
);
53 isert_login_post_recv(struct isert_conn
*isert_conn
);
55 isert_rdma_accept(struct isert_conn
*isert_conn
);
56 struct rdma_cm_id
*isert_setup_id(struct isert_np
*isert_np
);
58 static void isert_release_work(struct work_struct
*work
);
59 static void isert_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
60 static void isert_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
61 static void isert_login_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
62 static void isert_login_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
);
65 isert_prot_cmd(struct isert_conn
*conn
, struct se_cmd
*cmd
)
67 return (conn
->pi_support
&&
68 cmd
->prot_op
!= TARGET_PROT_NORMAL
);
73 isert_qp_event_callback(struct ib_event
*e
, void *context
)
75 struct isert_conn
*isert_conn
= context
;
77 isert_err("%s (%d): conn %p\n",
78 ib_event_msg(e
->event
), e
->event
, isert_conn
);
81 case IB_EVENT_COMM_EST
:
82 rdma_notify(isert_conn
->cm_id
, IB_EVENT_COMM_EST
);
84 case IB_EVENT_QP_LAST_WQE_REACHED
:
85 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
92 static struct isert_comp
*
93 isert_comp_get(struct isert_conn
*isert_conn
)
95 struct isert_device
*device
= isert_conn
->device
;
96 struct isert_comp
*comp
;
99 mutex_lock(&device_list_mutex
);
100 for (i
= 0; i
< device
->comps_used
; i
++)
101 if (device
->comps
[i
].active_qps
<
102 device
->comps
[min
].active_qps
)
104 comp
= &device
->comps
[min
];
106 mutex_unlock(&device_list_mutex
);
108 isert_info("conn %p, using comp %p min_index: %d\n",
109 isert_conn
, comp
, min
);
115 isert_comp_put(struct isert_comp
*comp
)
117 mutex_lock(&device_list_mutex
);
119 mutex_unlock(&device_list_mutex
);
122 static struct ib_qp
*
123 isert_create_qp(struct isert_conn
*isert_conn
,
124 struct isert_comp
*comp
,
125 struct rdma_cm_id
*cma_id
)
127 struct isert_device
*device
= isert_conn
->device
;
128 struct ib_qp_init_attr attr
;
131 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
132 attr
.event_handler
= isert_qp_event_callback
;
133 attr
.qp_context
= isert_conn
;
134 attr
.send_cq
= comp
->cq
;
135 attr
.recv_cq
= comp
->cq
;
136 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
+ 1;
137 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
+ 1;
138 attr
.cap
.max_rdma_ctxs
= ISCSI_DEF_XMIT_CMDS_MAX
;
139 attr
.cap
.max_send_sge
= device
->ib_device
->attrs
.max_send_sge
;
140 attr
.cap
.max_recv_sge
= 1;
141 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
142 attr
.qp_type
= IB_QPT_RC
;
143 if (device
->pi_capable
)
144 attr
.create_flags
|= IB_QP_CREATE_SIGNATURE_EN
;
146 ret
= rdma_create_qp(cma_id
, device
->pd
, &attr
);
148 isert_err("rdma_create_qp failed for cma_id %d\n", ret
);
156 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
)
158 struct isert_comp
*comp
;
161 comp
= isert_comp_get(isert_conn
);
162 isert_conn
->qp
= isert_create_qp(isert_conn
, comp
, cma_id
);
163 if (IS_ERR(isert_conn
->qp
)) {
164 ret
= PTR_ERR(isert_conn
->qp
);
170 isert_comp_put(comp
);
175 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
177 struct isert_device
*device
= isert_conn
->device
;
178 struct ib_device
*ib_dev
= device
->ib_device
;
179 struct iser_rx_desc
*rx_desc
;
180 struct ib_sge
*rx_sg
;
184 isert_conn
->rx_descs
= kcalloc(ISERT_QP_MAX_RECV_DTOS
,
185 sizeof(struct iser_rx_desc
),
187 if (!isert_conn
->rx_descs
)
190 rx_desc
= isert_conn
->rx_descs
;
192 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
193 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
194 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
195 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
198 rx_desc
->dma_addr
= dma_addr
;
200 rx_sg
= &rx_desc
->rx_sg
;
201 rx_sg
->addr
= rx_desc
->dma_addr
;
202 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
203 rx_sg
->lkey
= device
->pd
->local_dma_lkey
;
204 rx_desc
->rx_cqe
.done
= isert_recv_done
;
210 rx_desc
= isert_conn
->rx_descs
;
211 for (j
= 0; j
< i
; j
++, rx_desc
++) {
212 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
213 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
215 kfree(isert_conn
->rx_descs
);
216 isert_conn
->rx_descs
= NULL
;
217 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn
);
222 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
224 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
225 struct iser_rx_desc
*rx_desc
;
228 if (!isert_conn
->rx_descs
)
231 rx_desc
= isert_conn
->rx_descs
;
232 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
233 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
234 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
237 kfree(isert_conn
->rx_descs
);
238 isert_conn
->rx_descs
= NULL
;
242 isert_free_comps(struct isert_device
*device
)
246 for (i
= 0; i
< device
->comps_used
; i
++) {
247 struct isert_comp
*comp
= &device
->comps
[i
];
250 ib_free_cq(comp
->cq
);
252 kfree(device
->comps
);
256 isert_alloc_comps(struct isert_device
*device
)
258 int i
, max_cqe
, ret
= 0;
260 device
->comps_used
= min(ISERT_MAX_CQ
, min_t(int, num_online_cpus(),
261 device
->ib_device
->num_comp_vectors
));
263 isert_info("Using %d CQs, %s supports %d vectors support "
265 device
->comps_used
, dev_name(&device
->ib_device
->dev
),
266 device
->ib_device
->num_comp_vectors
,
269 device
->comps
= kcalloc(device
->comps_used
, sizeof(struct isert_comp
),
274 max_cqe
= min(ISER_MAX_CQ_LEN
, device
->ib_device
->attrs
.max_cqe
);
276 for (i
= 0; i
< device
->comps_used
; i
++) {
277 struct isert_comp
*comp
= &device
->comps
[i
];
279 comp
->device
= device
;
280 comp
->cq
= ib_alloc_cq(device
->ib_device
, comp
, max_cqe
, i
,
282 if (IS_ERR(comp
->cq
)) {
283 isert_err("Unable to allocate cq\n");
284 ret
= PTR_ERR(comp
->cq
);
292 isert_free_comps(device
);
297 isert_create_device_ib_res(struct isert_device
*device
)
299 struct ib_device
*ib_dev
= device
->ib_device
;
302 isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n",
303 ib_dev
->attrs
.max_send_sge
, ib_dev
->attrs
.max_recv_sge
);
304 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev
->attrs
.max_sge_rd
);
306 ret
= isert_alloc_comps(device
);
310 device
->pd
= ib_alloc_pd(ib_dev
, 0);
311 if (IS_ERR(device
->pd
)) {
312 ret
= PTR_ERR(device
->pd
);
313 isert_err("failed to allocate pd, device %p, ret=%d\n",
318 /* Check signature cap */
319 device
->pi_capable
= ib_dev
->attrs
.device_cap_flags
&
320 IB_DEVICE_SIGNATURE_HANDOVER
? true : false;
325 isert_free_comps(device
);
333 isert_free_device_ib_res(struct isert_device
*device
)
335 isert_info("device %p\n", device
);
337 ib_dealloc_pd(device
->pd
);
338 isert_free_comps(device
);
342 isert_device_put(struct isert_device
*device
)
344 mutex_lock(&device_list_mutex
);
346 isert_info("device %p refcount %d\n", device
, device
->refcount
);
347 if (!device
->refcount
) {
348 isert_free_device_ib_res(device
);
349 list_del(&device
->dev_node
);
352 mutex_unlock(&device_list_mutex
);
355 static struct isert_device
*
356 isert_device_get(struct rdma_cm_id
*cma_id
)
358 struct isert_device
*device
;
361 mutex_lock(&device_list_mutex
);
362 list_for_each_entry(device
, &device_list
, dev_node
) {
363 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
365 isert_info("Found iser device %p refcount %d\n",
366 device
, device
->refcount
);
367 mutex_unlock(&device_list_mutex
);
372 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
374 mutex_unlock(&device_list_mutex
);
375 return ERR_PTR(-ENOMEM
);
378 INIT_LIST_HEAD(&device
->dev_node
);
380 device
->ib_device
= cma_id
->device
;
381 ret
= isert_create_device_ib_res(device
);
384 mutex_unlock(&device_list_mutex
);
389 list_add_tail(&device
->dev_node
, &device_list
);
390 isert_info("Created a new iser device %p refcount %d\n",
391 device
, device
->refcount
);
392 mutex_unlock(&device_list_mutex
);
398 isert_init_conn(struct isert_conn
*isert_conn
)
400 isert_conn
->state
= ISER_CONN_INIT
;
401 INIT_LIST_HEAD(&isert_conn
->node
);
402 init_completion(&isert_conn
->login_comp
);
403 init_completion(&isert_conn
->login_req_comp
);
404 init_waitqueue_head(&isert_conn
->rem_wait
);
405 kref_init(&isert_conn
->kref
);
406 mutex_init(&isert_conn
->mutex
);
407 INIT_WORK(&isert_conn
->release_work
, isert_release_work
);
411 isert_free_login_buf(struct isert_conn
*isert_conn
)
413 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
415 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
416 ISER_RX_PAYLOAD_SIZE
, DMA_TO_DEVICE
);
417 kfree(isert_conn
->login_rsp_buf
);
419 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
420 ISER_RX_PAYLOAD_SIZE
,
422 kfree(isert_conn
->login_req_buf
);
426 isert_alloc_login_buf(struct isert_conn
*isert_conn
,
427 struct ib_device
*ib_dev
)
431 isert_conn
->login_req_buf
= kzalloc(sizeof(*isert_conn
->login_req_buf
),
433 if (!isert_conn
->login_req_buf
)
436 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
437 isert_conn
->login_req_buf
,
438 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
439 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
441 isert_err("login_req_dma mapping error: %d\n", ret
);
442 isert_conn
->login_req_dma
= 0;
443 goto out_free_login_req_buf
;
446 isert_conn
->login_rsp_buf
= kzalloc(ISER_RX_PAYLOAD_SIZE
, GFP_KERNEL
);
447 if (!isert_conn
->login_rsp_buf
) {
449 goto out_unmap_login_req_buf
;
452 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
453 isert_conn
->login_rsp_buf
,
454 ISER_RX_PAYLOAD_SIZE
, DMA_TO_DEVICE
);
455 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
457 isert_err("login_rsp_dma mapping error: %d\n", ret
);
458 isert_conn
->login_rsp_dma
= 0;
459 goto out_free_login_rsp_buf
;
464 out_free_login_rsp_buf
:
465 kfree(isert_conn
->login_rsp_buf
);
466 out_unmap_login_req_buf
:
467 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
468 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
469 out_free_login_req_buf
:
470 kfree(isert_conn
->login_req_buf
);
475 isert_set_nego_params(struct isert_conn
*isert_conn
,
476 struct rdma_conn_param
*param
)
478 struct ib_device_attr
*attr
= &isert_conn
->device
->ib_device
->attrs
;
480 /* Set max inflight RDMA READ requests */
481 isert_conn
->initiator_depth
= min_t(u8
, param
->initiator_depth
,
482 attr
->max_qp_init_rd_atom
);
483 isert_dbg("Using initiator_depth: %u\n", isert_conn
->initiator_depth
);
485 if (param
->private_data
) {
486 u8 flags
= *(u8
*)param
->private_data
;
489 * use remote invalidation if the both initiator
490 * and the HCA support it
492 isert_conn
->snd_w_inv
= !(flags
& ISER_SEND_W_INV_NOT_SUP
) &&
493 (attr
->device_cap_flags
&
494 IB_DEVICE_MEM_MGT_EXTENSIONS
);
495 if (isert_conn
->snd_w_inv
)
496 isert_info("Using remote invalidation\n");
501 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
503 struct isert_np
*isert_np
= cma_id
->context
;
504 struct iscsi_np
*np
= isert_np
->np
;
505 struct isert_conn
*isert_conn
;
506 struct isert_device
*device
;
509 spin_lock_bh(&np
->np_thread_lock
);
511 spin_unlock_bh(&np
->np_thread_lock
);
512 isert_dbg("iscsi_np is not enabled, reject connect request\n");
513 return rdma_reject(cma_id
, NULL
, 0);
515 spin_unlock_bh(&np
->np_thread_lock
);
517 isert_dbg("cma_id: %p, portal: %p\n",
518 cma_id
, cma_id
->context
);
520 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
524 isert_init_conn(isert_conn
);
525 isert_conn
->cm_id
= cma_id
;
527 ret
= isert_alloc_login_buf(isert_conn
, cma_id
->device
);
531 device
= isert_device_get(cma_id
);
532 if (IS_ERR(device
)) {
533 ret
= PTR_ERR(device
);
534 goto out_rsp_dma_map
;
536 isert_conn
->device
= device
;
538 isert_set_nego_params(isert_conn
, &event
->param
.conn
);
540 ret
= isert_conn_setup_qp(isert_conn
, cma_id
);
544 ret
= isert_login_post_recv(isert_conn
);
548 ret
= isert_rdma_accept(isert_conn
);
552 mutex_lock(&isert_np
->mutex
);
553 list_add_tail(&isert_conn
->node
, &isert_np
->accepted
);
554 mutex_unlock(&isert_np
->mutex
);
559 isert_device_put(device
);
561 isert_free_login_buf(isert_conn
);
564 rdma_reject(cma_id
, NULL
, 0);
569 isert_connect_release(struct isert_conn
*isert_conn
)
571 struct isert_device
*device
= isert_conn
->device
;
573 isert_dbg("conn %p\n", isert_conn
);
577 isert_free_rx_descriptors(isert_conn
);
578 if (isert_conn
->cm_id
&&
579 !isert_conn
->dev_removed
)
580 rdma_destroy_id(isert_conn
->cm_id
);
582 if (isert_conn
->qp
) {
583 struct isert_comp
*comp
= isert_conn
->qp
->recv_cq
->cq_context
;
585 isert_comp_put(comp
);
586 ib_destroy_qp(isert_conn
->qp
);
589 if (isert_conn
->login_req_buf
)
590 isert_free_login_buf(isert_conn
);
592 isert_device_put(device
);
594 if (isert_conn
->dev_removed
)
595 wake_up_interruptible(&isert_conn
->rem_wait
);
601 isert_connected_handler(struct rdma_cm_id
*cma_id
)
603 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
604 struct isert_np
*isert_np
= cma_id
->context
;
606 isert_info("conn %p\n", isert_conn
);
608 mutex_lock(&isert_conn
->mutex
);
609 isert_conn
->state
= ISER_CONN_UP
;
610 kref_get(&isert_conn
->kref
);
611 mutex_unlock(&isert_conn
->mutex
);
613 mutex_lock(&isert_np
->mutex
);
614 list_move_tail(&isert_conn
->node
, &isert_np
->pending
);
615 mutex_unlock(&isert_np
->mutex
);
617 isert_info("np %p: Allow accept_np to continue\n", isert_np
);
622 isert_release_kref(struct kref
*kref
)
624 struct isert_conn
*isert_conn
= container_of(kref
,
625 struct isert_conn
, kref
);
627 isert_info("conn %p final kref %s/%d\n", isert_conn
, current
->comm
,
630 isert_connect_release(isert_conn
);
634 isert_put_conn(struct isert_conn
*isert_conn
)
636 kref_put(&isert_conn
->kref
, isert_release_kref
);
640 isert_handle_unbound_conn(struct isert_conn
*isert_conn
)
642 struct isert_np
*isert_np
= isert_conn
->cm_id
->context
;
644 mutex_lock(&isert_np
->mutex
);
645 if (!list_empty(&isert_conn
->node
)) {
647 * This means iscsi doesn't know this connection
648 * so schedule a cleanup ourselves
650 list_del_init(&isert_conn
->node
);
651 isert_put_conn(isert_conn
);
652 queue_work(isert_release_wq
, &isert_conn
->release_work
);
654 mutex_unlock(&isert_np
->mutex
);
658 * isert_conn_terminate() - Initiate connection termination
659 * @isert_conn: isert connection struct
662 * In case the connection state is BOUND, move state
663 * to TEMINATING and start teardown sequence (rdma_disconnect).
664 * In case the connection state is UP, complete flush as well.
666 * This routine must be called with mutex held. Thus it is
667 * safe to call multiple times.
670 isert_conn_terminate(struct isert_conn
*isert_conn
)
674 if (isert_conn
->state
>= ISER_CONN_TERMINATING
)
677 isert_info("Terminating conn %p state %d\n",
678 isert_conn
, isert_conn
->state
);
679 isert_conn
->state
= ISER_CONN_TERMINATING
;
680 err
= rdma_disconnect(isert_conn
->cm_id
);
682 isert_warn("Failed rdma_disconnect isert_conn %p\n",
687 isert_np_cma_handler(struct isert_np
*isert_np
,
688 enum rdma_cm_event_type event
)
690 isert_dbg("%s (%d): isert np %p\n",
691 rdma_event_msg(event
), event
, isert_np
);
694 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
695 isert_np
->cm_id
= NULL
;
697 case RDMA_CM_EVENT_ADDR_CHANGE
:
698 isert_np
->cm_id
= isert_setup_id(isert_np
);
699 if (IS_ERR(isert_np
->cm_id
)) {
700 isert_err("isert np %p setup id failed: %ld\n",
701 isert_np
, PTR_ERR(isert_np
->cm_id
));
702 isert_np
->cm_id
= NULL
;
706 isert_err("isert np %p Unexpected event %d\n",
714 isert_disconnected_handler(struct rdma_cm_id
*cma_id
,
715 enum rdma_cm_event_type event
)
717 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
719 mutex_lock(&isert_conn
->mutex
);
720 switch (isert_conn
->state
) {
721 case ISER_CONN_TERMINATING
:
724 isert_conn_terminate(isert_conn
);
725 ib_drain_qp(isert_conn
->qp
);
726 isert_handle_unbound_conn(isert_conn
);
728 case ISER_CONN_BOUND
:
729 case ISER_CONN_FULL_FEATURE
: /* FALLTHRU */
730 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
733 isert_warn("conn %p terminating in state %d\n",
734 isert_conn
, isert_conn
->state
);
736 mutex_unlock(&isert_conn
->mutex
);
742 isert_connect_error(struct rdma_cm_id
*cma_id
)
744 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
746 ib_drain_qp(isert_conn
->qp
);
747 list_del_init(&isert_conn
->node
);
748 isert_conn
->cm_id
= NULL
;
749 isert_put_conn(isert_conn
);
755 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
757 struct isert_np
*isert_np
= cma_id
->context
;
758 struct isert_conn
*isert_conn
;
761 isert_info("%s (%d): status %d id %p np %p\n",
762 rdma_event_msg(event
->event
), event
->event
,
763 event
->status
, cma_id
, cma_id
->context
);
765 if (isert_np
->cm_id
== cma_id
)
766 return isert_np_cma_handler(cma_id
->context
, event
->event
);
768 switch (event
->event
) {
769 case RDMA_CM_EVENT_CONNECT_REQUEST
:
770 ret
= isert_connect_request(cma_id
, event
);
772 isert_err("failed handle connect request %d\n", ret
);
774 case RDMA_CM_EVENT_ESTABLISHED
:
775 isert_connected_handler(cma_id
);
777 case RDMA_CM_EVENT_ADDR_CHANGE
: /* FALLTHRU */
778 case RDMA_CM_EVENT_DISCONNECTED
: /* FALLTHRU */
779 case RDMA_CM_EVENT_TIMEWAIT_EXIT
: /* FALLTHRU */
780 ret
= isert_disconnected_handler(cma_id
, event
->event
);
782 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
783 isert_conn
= cma_id
->qp
->qp_context
;
784 isert_conn
->dev_removed
= true;
785 isert_disconnected_handler(cma_id
, event
->event
);
786 wait_event_interruptible(isert_conn
->rem_wait
,
787 isert_conn
->state
== ISER_CONN_DOWN
);
790 * return non-zero from the callback to destroy
794 case RDMA_CM_EVENT_REJECTED
:
795 isert_info("Connection rejected: %s\n",
796 rdma_reject_msg(cma_id
, event
->status
));
798 case RDMA_CM_EVENT_UNREACHABLE
:
799 case RDMA_CM_EVENT_CONNECT_ERROR
:
800 ret
= isert_connect_error(cma_id
);
803 isert_err("Unhandled RDMA CMA event: %d\n", event
->event
);
811 isert_post_recvm(struct isert_conn
*isert_conn
, u32 count
)
813 struct ib_recv_wr
*rx_wr
;
815 struct iser_rx_desc
*rx_desc
;
817 for (rx_wr
= isert_conn
->rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
818 rx_desc
= &isert_conn
->rx_descs
[i
];
820 rx_wr
->wr_cqe
= &rx_desc
->rx_cqe
;
821 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
823 rx_wr
->next
= rx_wr
+ 1;
824 rx_desc
->in_use
= false;
827 rx_wr
->next
= NULL
; /* mark end of work requests list */
829 ret
= ib_post_recv(isert_conn
->qp
, isert_conn
->rx_wr
, NULL
);
831 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
837 isert_post_recv(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
)
839 struct ib_recv_wr rx_wr
;
842 if (!rx_desc
->in_use
) {
844 * if the descriptor is not in-use we already reposted it
845 * for recv, so just silently return
850 rx_desc
->in_use
= false;
851 rx_wr
.wr_cqe
= &rx_desc
->rx_cqe
;
852 rx_wr
.sg_list
= &rx_desc
->rx_sg
;
856 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, NULL
);
858 isert_err("ib_post_recv() failed with ret: %d\n", ret
);
864 isert_login_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
866 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
867 struct ib_send_wr send_wr
;
870 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
871 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
873 tx_desc
->tx_cqe
.done
= isert_login_send_done
;
876 send_wr
.wr_cqe
= &tx_desc
->tx_cqe
;
877 send_wr
.sg_list
= tx_desc
->tx_sg
;
878 send_wr
.num_sge
= tx_desc
->num_sge
;
879 send_wr
.opcode
= IB_WR_SEND
;
880 send_wr
.send_flags
= IB_SEND_SIGNALED
;
882 ret
= ib_post_send(isert_conn
->qp
, &send_wr
, NULL
);
884 isert_err("ib_post_send() failed, ret: %d\n", ret
);
890 __isert_create_send_desc(struct isert_device
*device
,
891 struct iser_tx_desc
*tx_desc
)
894 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_ctrl
));
895 tx_desc
->iser_header
.flags
= ISCSI_CTRL
;
897 tx_desc
->num_sge
= 1;
899 if (tx_desc
->tx_sg
[0].lkey
!= device
->pd
->local_dma_lkey
) {
900 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
901 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
906 isert_create_send_desc(struct isert_conn
*isert_conn
,
907 struct isert_cmd
*isert_cmd
,
908 struct iser_tx_desc
*tx_desc
)
910 struct isert_device
*device
= isert_conn
->device
;
911 struct ib_device
*ib_dev
= device
->ib_device
;
913 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
914 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
916 __isert_create_send_desc(device
, tx_desc
);
920 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
921 struct iser_tx_desc
*tx_desc
)
923 struct isert_device
*device
= isert_conn
->device
;
924 struct ib_device
*ib_dev
= device
->ib_device
;
927 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
928 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
929 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
930 isert_err("ib_dma_mapping_error() failed\n");
934 tx_desc
->dma_addr
= dma_addr
;
935 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
936 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
937 tx_desc
->tx_sg
[0].lkey
= device
->pd
->local_dma_lkey
;
939 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
940 tx_desc
->tx_sg
[0].addr
, tx_desc
->tx_sg
[0].length
,
941 tx_desc
->tx_sg
[0].lkey
);
947 isert_init_send_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
948 struct ib_send_wr
*send_wr
)
950 struct iser_tx_desc
*tx_desc
= &isert_cmd
->tx_desc
;
952 tx_desc
->tx_cqe
.done
= isert_send_done
;
953 send_wr
->wr_cqe
= &tx_desc
->tx_cqe
;
955 if (isert_conn
->snd_w_inv
&& isert_cmd
->inv_rkey
) {
956 send_wr
->opcode
= IB_WR_SEND_WITH_INV
;
957 send_wr
->ex
.invalidate_rkey
= isert_cmd
->inv_rkey
;
959 send_wr
->opcode
= IB_WR_SEND
;
962 send_wr
->sg_list
= &tx_desc
->tx_sg
[0];
963 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
964 send_wr
->send_flags
= IB_SEND_SIGNALED
;
968 isert_login_post_recv(struct isert_conn
*isert_conn
)
970 struct ib_recv_wr rx_wr
;
974 memset(&sge
, 0, sizeof(struct ib_sge
));
975 sge
.addr
= isert_conn
->login_req_dma
;
976 sge
.length
= ISER_RX_PAYLOAD_SIZE
;
977 sge
.lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
979 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
980 sge
.addr
, sge
.length
, sge
.lkey
);
982 isert_conn
->login_req_buf
->rx_cqe
.done
= isert_login_recv_done
;
984 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
985 rx_wr
.wr_cqe
= &isert_conn
->login_req_buf
->rx_cqe
;
986 rx_wr
.sg_list
= &sge
;
989 ret
= ib_post_recv(isert_conn
->qp
, &rx_wr
, NULL
);
991 isert_err("ib_post_recv() failed: %d\n", ret
);
997 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
1000 struct isert_conn
*isert_conn
= conn
->context
;
1001 struct isert_device
*device
= isert_conn
->device
;
1002 struct ib_device
*ib_dev
= device
->ib_device
;
1003 struct iser_tx_desc
*tx_desc
= &isert_conn
->login_tx_desc
;
1006 __isert_create_send_desc(device
, tx_desc
);
1008 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
1009 sizeof(struct iscsi_hdr
));
1011 isert_init_tx_hdrs(isert_conn
, tx_desc
);
1014 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
1016 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
1017 length
, DMA_TO_DEVICE
);
1019 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
1021 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
1022 length
, DMA_TO_DEVICE
);
1024 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
1025 tx_dsg
->length
= length
;
1026 tx_dsg
->lkey
= isert_conn
->device
->pd
->local_dma_lkey
;
1027 tx_desc
->num_sge
= 2;
1029 if (!login
->login_failed
) {
1030 if (login
->login_complete
) {
1031 ret
= isert_alloc_rx_descriptors(isert_conn
);
1035 ret
= isert_post_recvm(isert_conn
,
1036 ISERT_QP_MAX_RECV_DTOS
);
1040 /* Now we are in FULL_FEATURE phase */
1041 mutex_lock(&isert_conn
->mutex
);
1042 isert_conn
->state
= ISER_CONN_FULL_FEATURE
;
1043 mutex_unlock(&isert_conn
->mutex
);
1047 ret
= isert_login_post_recv(isert_conn
);
1052 ret
= isert_login_post_send(isert_conn
, tx_desc
);
1060 isert_rx_login_req(struct isert_conn
*isert_conn
)
1062 struct iser_rx_desc
*rx_desc
= isert_conn
->login_req_buf
;
1063 int rx_buflen
= isert_conn
->login_req_len
;
1064 struct iscsi_conn
*conn
= isert_conn
->conn
;
1065 struct iscsi_login
*login
= conn
->conn_login
;
1068 isert_info("conn %p\n", isert_conn
);
1070 WARN_ON_ONCE(!login
);
1072 if (login
->first_request
) {
1073 struct iscsi_login_req
*login_req
=
1074 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
1076 * Setup the initial iscsi_login values from the leading
1077 * login request PDU.
1079 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
1080 login
->current_stage
=
1081 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
1083 login
->version_min
= login_req
->min_version
;
1084 login
->version_max
= login_req
->max_version
;
1085 memcpy(login
->isid
, login_req
->isid
, 6);
1086 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1087 login
->init_task_tag
= login_req
->itt
;
1088 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1089 login
->cid
= be16_to_cpu(login_req
->cid
);
1090 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1093 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
1095 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1096 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1097 "MAX_KEY_VALUE_PAIRS: %d\n", size
, rx_buflen
,
1098 MAX_KEY_VALUE_PAIRS
);
1099 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
1101 if (login
->first_request
) {
1102 complete(&isert_conn
->login_comp
);
1105 schedule_delayed_work(&conn
->login_work
, 0);
1108 static struct iscsi_cmd
1109 *isert_allocate_cmd(struct iscsi_conn
*conn
, struct iser_rx_desc
*rx_desc
)
1111 struct isert_conn
*isert_conn
= conn
->context
;
1112 struct isert_cmd
*isert_cmd
;
1113 struct iscsi_cmd
*cmd
;
1115 cmd
= iscsit_allocate_cmd(conn
, TASK_INTERRUPTIBLE
);
1117 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1120 isert_cmd
= iscsit_priv_cmd(cmd
);
1121 isert_cmd
->conn
= isert_conn
;
1122 isert_cmd
->iscsi_cmd
= cmd
;
1123 isert_cmd
->rx_desc
= rx_desc
;
1129 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
1130 struct isert_cmd
*isert_cmd
, struct iscsi_cmd
*cmd
,
1131 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1133 struct iscsi_conn
*conn
= isert_conn
->conn
;
1134 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
1135 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
1136 bool dump_payload
= false;
1137 unsigned int data_len
;
1139 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
1143 imm_data
= cmd
->immediate_data
;
1144 imm_data_len
= cmd
->first_burst_len
;
1145 unsol_data
= cmd
->unsolicited_data
;
1146 data_len
= cmd
->se_cmd
.data_length
;
1148 if (imm_data
&& imm_data_len
== data_len
)
1149 cmd
->se_cmd
.se_cmd_flags
|= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC
;
1150 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1153 } else if (rc
> 0) {
1154 dump_payload
= true;
1161 if (imm_data_len
!= data_len
) {
1162 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
1163 sg_copy_from_buffer(cmd
->se_cmd
.t_data_sg
, sg_nents
,
1164 &rx_desc
->data
[0], imm_data_len
);
1165 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1166 sg_nents
, imm_data_len
);
1168 sg_init_table(&isert_cmd
->sg
, 1);
1169 cmd
->se_cmd
.t_data_sg
= &isert_cmd
->sg
;
1170 cmd
->se_cmd
.t_data_nents
= 1;
1171 sg_set_buf(&isert_cmd
->sg
, &rx_desc
->data
[0], imm_data_len
);
1172 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1176 cmd
->write_data_done
+= imm_data_len
;
1178 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
1179 spin_lock_bh(&cmd
->istate_lock
);
1180 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1181 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1182 spin_unlock_bh(&cmd
->istate_lock
);
1186 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
1188 if (!rc
&& dump_payload
== false && unsol_data
)
1189 iscsit_set_unsoliticed_dataout(cmd
);
1190 else if (dump_payload
&& imm_data
)
1191 target_put_sess_cmd(&cmd
->se_cmd
);
1197 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
1198 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1200 struct scatterlist
*sg_start
;
1201 struct iscsi_conn
*conn
= isert_conn
->conn
;
1202 struct iscsi_cmd
*cmd
= NULL
;
1203 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
1204 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
1205 int rc
, sg_nents
, sg_off
, page_off
;
1207 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
1213 * FIXME: Unexpected unsolicited_data out
1215 if (!cmd
->unsolicited_data
) {
1216 isert_err("Received unexpected solicited data payload\n");
1221 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1222 "write_data_done: %u, data_length: %u\n",
1223 unsol_data_len
, cmd
->write_data_done
,
1224 cmd
->se_cmd
.data_length
);
1226 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1227 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1228 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
1229 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1231 * FIXME: Non page-aligned unsolicited_data out
1234 isert_err("unexpected non-page aligned data payload\n");
1238 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1239 "sg_nents: %u from %p %u\n", sg_start
, sg_off
,
1240 sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
1242 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
1245 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1250 * multiple data-outs on the same command can arrive -
1251 * so post the buffer before hand
1253 rc
= isert_post_recv(isert_conn
, rx_desc
);
1255 isert_err("ib_post_recv failed with %d\n", rc
);
1262 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1263 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1266 struct iscsi_conn
*conn
= isert_conn
->conn
;
1267 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1270 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1274 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1277 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1281 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1282 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1283 struct iscsi_text
*hdr
)
1285 struct iscsi_conn
*conn
= isert_conn
->conn
;
1286 u32 payload_length
= ntoh24(hdr
->dlength
);
1288 unsigned char *text_in
= NULL
;
1290 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1294 if (payload_length
) {
1295 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1299 cmd
->text_in_ptr
= text_in
;
1301 memcpy(cmd
->text_in_ptr
, &rx_desc
->data
[0], payload_length
);
1303 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1307 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1308 uint32_t read_stag
, uint64_t read_va
,
1309 uint32_t write_stag
, uint64_t write_va
)
1311 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1312 struct iscsi_conn
*conn
= isert_conn
->conn
;
1313 struct iscsi_cmd
*cmd
;
1314 struct isert_cmd
*isert_cmd
;
1316 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1318 if (conn
->sess
->sess_ops
->SessionType
&&
1319 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1320 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1321 " ignoring\n", opcode
);
1326 case ISCSI_OP_SCSI_CMD
:
1327 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1331 isert_cmd
= iscsit_priv_cmd(cmd
);
1332 isert_cmd
->read_stag
= read_stag
;
1333 isert_cmd
->read_va
= read_va
;
1334 isert_cmd
->write_stag
= write_stag
;
1335 isert_cmd
->write_va
= write_va
;
1336 isert_cmd
->inv_rkey
= read_stag
? read_stag
: write_stag
;
1338 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
, cmd
,
1339 rx_desc
, (unsigned char *)hdr
);
1341 case ISCSI_OP_NOOP_OUT
:
1342 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1346 isert_cmd
= iscsit_priv_cmd(cmd
);
1347 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
, cmd
,
1348 rx_desc
, (unsigned char *)hdr
);
1350 case ISCSI_OP_SCSI_DATA_OUT
:
1351 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1352 (unsigned char *)hdr
);
1354 case ISCSI_OP_SCSI_TMFUNC
:
1355 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1359 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1360 (unsigned char *)hdr
);
1362 case ISCSI_OP_LOGOUT
:
1363 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1367 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1370 if (be32_to_cpu(hdr
->ttt
) != 0xFFFFFFFF)
1371 cmd
= iscsit_find_cmd_from_itt(conn
, hdr
->itt
);
1373 cmd
= isert_allocate_cmd(conn
, rx_desc
);
1378 isert_cmd
= iscsit_priv_cmd(cmd
);
1379 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
, cmd
,
1380 rx_desc
, (struct iscsi_text
*)hdr
);
1383 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1392 isert_print_wc(struct ib_wc
*wc
, const char *type
)
1394 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1395 isert_err("%s failure: %s (%d) vend_err %x\n", type
,
1396 ib_wc_status_msg(wc
->status
), wc
->status
,
1399 isert_dbg("%s failure: %s (%d)\n", type
,
1400 ib_wc_status_msg(wc
->status
), wc
->status
);
1404 isert_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1406 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1407 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1408 struct iser_rx_desc
*rx_desc
= cqe_to_rx_desc(wc
->wr_cqe
);
1409 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1410 struct iser_ctrl
*iser_ctrl
= &rx_desc
->iser_header
;
1411 uint64_t read_va
= 0, write_va
= 0;
1412 uint32_t read_stag
= 0, write_stag
= 0;
1414 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1415 isert_print_wc(wc
, "recv");
1416 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1417 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1421 rx_desc
->in_use
= true;
1423 ib_dma_sync_single_for_cpu(ib_dev
, rx_desc
->dma_addr
,
1424 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1426 isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1427 rx_desc
->dma_addr
, hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1428 (int)(wc
->byte_len
- ISER_HEADERS_LEN
));
1430 switch (iser_ctrl
->flags
& 0xF0) {
1432 if (iser_ctrl
->flags
& ISER_RSV
) {
1433 read_stag
= be32_to_cpu(iser_ctrl
->read_stag
);
1434 read_va
= be64_to_cpu(iser_ctrl
->read_va
);
1435 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1436 read_stag
, (unsigned long long)read_va
);
1438 if (iser_ctrl
->flags
& ISER_WSV
) {
1439 write_stag
= be32_to_cpu(iser_ctrl
->write_stag
);
1440 write_va
= be64_to_cpu(iser_ctrl
->write_va
);
1441 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1442 write_stag
, (unsigned long long)write_va
);
1445 isert_dbg("ISER ISCSI_CTRL PDU\n");
1448 isert_err("iSER Hello message\n");
1451 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl
->flags
);
1455 isert_rx_opcode(isert_conn
, rx_desc
,
1456 read_stag
, read_va
, write_stag
, write_va
);
1458 ib_dma_sync_single_for_device(ib_dev
, rx_desc
->dma_addr
,
1459 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1463 isert_login_recv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1465 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1466 struct ib_device
*ib_dev
= isert_conn
->device
->ib_device
;
1468 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1469 isert_print_wc(wc
, "login recv");
1473 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_req_dma
,
1474 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1476 isert_conn
->login_req_len
= wc
->byte_len
- ISER_HEADERS_LEN
;
1478 if (isert_conn
->conn
) {
1479 struct iscsi_login
*login
= isert_conn
->conn
->conn_login
;
1481 if (login
&& !login
->first_request
)
1482 isert_rx_login_req(isert_conn
);
1485 mutex_lock(&isert_conn
->mutex
);
1486 complete(&isert_conn
->login_req_comp
);
1487 mutex_unlock(&isert_conn
->mutex
);
1489 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_req_dma
,
1490 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
1494 isert_rdma_rw_ctx_destroy(struct isert_cmd
*cmd
, struct isert_conn
*conn
)
1496 struct se_cmd
*se_cmd
= &cmd
->iscsi_cmd
->se_cmd
;
1497 enum dma_data_direction dir
= target_reverse_dma_direction(se_cmd
);
1499 if (!cmd
->rw
.nr_ops
)
1502 if (isert_prot_cmd(conn
, se_cmd
)) {
1503 rdma_rw_ctx_destroy_signature(&cmd
->rw
, conn
->qp
,
1504 conn
->cm_id
->port_num
, se_cmd
->t_data_sg
,
1505 se_cmd
->t_data_nents
, se_cmd
->t_prot_sg
,
1506 se_cmd
->t_prot_nents
, dir
);
1508 rdma_rw_ctx_destroy(&cmd
->rw
, conn
->qp
, conn
->cm_id
->port_num
,
1509 se_cmd
->t_data_sg
, se_cmd
->t_data_nents
, dir
);
1516 isert_put_cmd(struct isert_cmd
*isert_cmd
, bool comp_err
)
1518 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1519 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1520 struct iscsi_conn
*conn
= isert_conn
->conn
;
1521 struct iscsi_text_rsp
*hdr
;
1523 isert_dbg("Cmd %p\n", isert_cmd
);
1525 switch (cmd
->iscsi_opcode
) {
1526 case ISCSI_OP_SCSI_CMD
:
1527 spin_lock_bh(&conn
->cmd_lock
);
1528 if (!list_empty(&cmd
->i_conn_node
))
1529 list_del_init(&cmd
->i_conn_node
);
1530 spin_unlock_bh(&conn
->cmd_lock
);
1532 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1533 iscsit_stop_dataout_timer(cmd
);
1535 * Check for special case during comp_err where
1536 * WRITE_PENDING has been handed off from core,
1537 * but requires an extra target_put_sess_cmd()
1538 * before transport_generic_free_cmd() below.
1541 cmd
->se_cmd
.t_state
== TRANSPORT_WRITE_PENDING
) {
1542 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1544 target_put_sess_cmd(se_cmd
);
1548 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1549 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1551 case ISCSI_OP_SCSI_TMFUNC
:
1552 spin_lock_bh(&conn
->cmd_lock
);
1553 if (!list_empty(&cmd
->i_conn_node
))
1554 list_del_init(&cmd
->i_conn_node
);
1555 spin_unlock_bh(&conn
->cmd_lock
);
1557 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1559 case ISCSI_OP_REJECT
:
1560 case ISCSI_OP_NOOP_OUT
:
1562 hdr
= (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
1563 /* If the continue bit is on, keep the command alive */
1564 if (hdr
->flags
& ISCSI_FLAG_TEXT_CONTINUE
)
1567 spin_lock_bh(&conn
->cmd_lock
);
1568 if (!list_empty(&cmd
->i_conn_node
))
1569 list_del_init(&cmd
->i_conn_node
);
1570 spin_unlock_bh(&conn
->cmd_lock
);
1573 * Handle special case for REJECT when iscsi_add_reject*() has
1574 * overwritten the original iscsi_opcode assignment, and the
1575 * associated cmd->se_cmd needs to be released.
1577 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1578 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1580 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1585 iscsit_release_cmd(cmd
);
1591 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1593 if (tx_desc
->dma_addr
!= 0) {
1594 isert_dbg("unmap single for tx_desc->dma_addr\n");
1595 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1596 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1597 tx_desc
->dma_addr
= 0;
1602 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1603 struct ib_device
*ib_dev
, bool comp_err
)
1605 if (isert_cmd
->pdu_buf_dma
!= 0) {
1606 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1607 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1608 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1609 isert_cmd
->pdu_buf_dma
= 0;
1612 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1613 isert_put_cmd(isert_cmd
, comp_err
);
1617 isert_check_pi_status(struct se_cmd
*se_cmd
, struct ib_mr
*sig_mr
)
1619 struct ib_mr_status mr_status
;
1622 ret
= ib_check_mr_status(sig_mr
, IB_MR_CHECK_SIG_STATUS
, &mr_status
);
1624 isert_err("ib_check_mr_status failed, ret %d\n", ret
);
1625 goto fail_mr_status
;
1628 if (mr_status
.fail_status
& IB_MR_CHECK_SIG_STATUS
) {
1630 u32 block_size
= se_cmd
->se_dev
->dev_attrib
.block_size
+ 8;
1632 switch (mr_status
.sig_err
.err_type
) {
1633 case IB_SIG_BAD_GUARD
:
1634 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
1636 case IB_SIG_BAD_REFTAG
:
1637 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
1639 case IB_SIG_BAD_APPTAG
:
1640 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
1643 sec_offset_err
= mr_status
.sig_err
.sig_err_offset
;
1644 do_div(sec_offset_err
, block_size
);
1645 se_cmd
->bad_sector
= sec_offset_err
+ se_cmd
->t_task_lba
;
1647 isert_err("PI error found type %d at sector 0x%llx "
1648 "expected 0x%x vs actual 0x%x\n",
1649 mr_status
.sig_err
.err_type
,
1650 (unsigned long long)se_cmd
->bad_sector
,
1651 mr_status
.sig_err
.expected
,
1652 mr_status
.sig_err
.actual
);
1661 isert_rdma_write_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1663 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1664 struct isert_device
*device
= isert_conn
->device
;
1665 struct iser_tx_desc
*desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1666 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(desc
);
1667 struct se_cmd
*cmd
= &isert_cmd
->iscsi_cmd
->se_cmd
;
1670 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1671 isert_print_wc(wc
, "rdma write");
1672 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1673 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1674 isert_completion_put(desc
, isert_cmd
, device
->ib_device
, true);
1678 isert_dbg("Cmd %p\n", isert_cmd
);
1680 ret
= isert_check_pi_status(cmd
, isert_cmd
->rw
.sig
->sig_mr
);
1681 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1685 * transport_generic_request_failure() expects to have
1686 * plus two references to handle queue-full, so re-add
1687 * one here as target-core will have already dropped
1688 * it after the first isert_put_datain() callback.
1690 kref_get(&cmd
->cmd_kref
);
1691 transport_generic_request_failure(cmd
, cmd
->pi_err
);
1694 * XXX: isert_put_response() failure is not retried.
1696 ret
= isert_put_response(isert_conn
->conn
, isert_cmd
->iscsi_cmd
);
1698 pr_warn_ratelimited("isert_put_response() ret: %d\n", ret
);
1703 isert_rdma_read_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1705 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1706 struct isert_device
*device
= isert_conn
->device
;
1707 struct iser_tx_desc
*desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1708 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(desc
);
1709 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1710 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1713 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1714 isert_print_wc(wc
, "rdma read");
1715 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1716 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1717 isert_completion_put(desc
, isert_cmd
, device
->ib_device
, true);
1721 isert_dbg("Cmd %p\n", isert_cmd
);
1723 iscsit_stop_dataout_timer(cmd
);
1725 if (isert_prot_cmd(isert_conn
, se_cmd
))
1726 ret
= isert_check_pi_status(se_cmd
, isert_cmd
->rw
.sig
->sig_mr
);
1727 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1728 cmd
->write_data_done
= 0;
1730 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd
);
1731 spin_lock_bh(&cmd
->istate_lock
);
1732 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1733 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1734 spin_unlock_bh(&cmd
->istate_lock
);
1737 * transport_generic_request_failure() will drop the extra
1738 * se_cmd->cmd_kref reference after T10-PI error, and handle
1739 * any non-zero ->queue_status() callback error retries.
1742 transport_generic_request_failure(se_cmd
, se_cmd
->pi_err
);
1744 target_execute_cmd(se_cmd
);
1748 isert_do_control_comp(struct work_struct
*work
)
1750 struct isert_cmd
*isert_cmd
= container_of(work
,
1751 struct isert_cmd
, comp_work
);
1752 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1753 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1754 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1756 isert_dbg("Cmd %p i_state %d\n", isert_cmd
, cmd
->i_state
);
1758 switch (cmd
->i_state
) {
1759 case ISTATE_SEND_TASKMGTRSP
:
1760 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1762 case ISTATE_SEND_REJECT
:
1763 case ISTATE_SEND_TEXTRSP
:
1764 cmd
->i_state
= ISTATE_SENT_STATUS
;
1765 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
,
1768 case ISTATE_SEND_LOGOUTRSP
:
1769 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1772 isert_err("Unknown i_state %d\n", cmd
->i_state
);
1779 isert_login_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1781 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1782 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1783 struct iser_tx_desc
*tx_desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1785 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1786 isert_print_wc(wc
, "login send");
1787 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1788 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1791 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1795 isert_send_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
1797 struct isert_conn
*isert_conn
= wc
->qp
->qp_context
;
1798 struct ib_device
*ib_dev
= isert_conn
->cm_id
->device
;
1799 struct iser_tx_desc
*tx_desc
= cqe_to_tx_desc(wc
->wr_cqe
);
1800 struct isert_cmd
*isert_cmd
= tx_desc_to_cmd(tx_desc
);
1802 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
1803 isert_print_wc(wc
, "send");
1804 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1805 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1806 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, true);
1810 isert_dbg("Cmd %p\n", isert_cmd
);
1812 switch (isert_cmd
->iscsi_cmd
->i_state
) {
1813 case ISTATE_SEND_TASKMGTRSP
:
1814 case ISTATE_SEND_LOGOUTRSP
:
1815 case ISTATE_SEND_REJECT
:
1816 case ISTATE_SEND_TEXTRSP
:
1817 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1819 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
1820 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
1823 isert_cmd
->iscsi_cmd
->i_state
= ISTATE_SENT_STATUS
;
1824 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, false);
1830 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
1834 ret
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
1836 isert_err("ib_post_recv failed with %d\n", ret
);
1840 ret
= ib_post_send(isert_conn
->qp
, &isert_cmd
->tx_desc
.send_wr
, NULL
);
1842 isert_err("ib_post_send failed with %d\n", ret
);
1849 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
1851 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1852 struct isert_conn
*isert_conn
= conn
->context
;
1853 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1854 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
1855 &isert_cmd
->tx_desc
.iscsi_header
;
1857 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1858 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
1859 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1861 * Attach SENSE DATA payload to iSCSI Response PDU
1863 if (cmd
->se_cmd
.sense_buffer
&&
1864 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
1865 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
1866 struct isert_device
*device
= isert_conn
->device
;
1867 struct ib_device
*ib_dev
= device
->ib_device
;
1868 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1869 u32 padding
, pdu_len
;
1871 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
1873 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
1875 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
1876 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
1877 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
1879 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1880 (void *)cmd
->sense_buffer
, pdu_len
,
1882 if (ib_dma_mapping_error(ib_dev
, isert_cmd
->pdu_buf_dma
))
1885 isert_cmd
->pdu_buf_len
= pdu_len
;
1886 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1887 tx_dsg
->length
= pdu_len
;
1888 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
1889 isert_cmd
->tx_desc
.num_sge
= 2;
1892 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1894 isert_dbg("Posting SCSI Response\n");
1896 return isert_post_response(isert_conn
, isert_cmd
);
1900 isert_aborted_task(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
1902 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1903 struct isert_conn
*isert_conn
= conn
->context
;
1905 spin_lock_bh(&conn
->cmd_lock
);
1906 if (!list_empty(&cmd
->i_conn_node
))
1907 list_del_init(&cmd
->i_conn_node
);
1908 spin_unlock_bh(&conn
->cmd_lock
);
1910 if (cmd
->data_direction
== DMA_TO_DEVICE
)
1911 iscsit_stop_dataout_timer(cmd
);
1912 isert_rdma_rw_ctx_destroy(isert_cmd
, isert_conn
);
1915 static enum target_prot_op
1916 isert_get_sup_prot_ops(struct iscsi_conn
*conn
)
1918 struct isert_conn
*isert_conn
= conn
->context
;
1919 struct isert_device
*device
= isert_conn
->device
;
1921 if (conn
->tpg
->tpg_attrib
.t10_pi
) {
1922 if (device
->pi_capable
) {
1923 isert_info("conn %p PI offload enabled\n", isert_conn
);
1924 isert_conn
->pi_support
= true;
1925 return TARGET_PROT_ALL
;
1929 isert_info("conn %p PI offload disabled\n", isert_conn
);
1930 isert_conn
->pi_support
= false;
1932 return TARGET_PROT_NORMAL
;
1936 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
1937 bool nopout_response
)
1939 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1940 struct isert_conn
*isert_conn
= conn
->context
;
1941 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1943 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1944 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
1945 &isert_cmd
->tx_desc
.iscsi_header
,
1947 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1948 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1950 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn
);
1952 return isert_post_response(isert_conn
, isert_cmd
);
1956 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1958 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1959 struct isert_conn
*isert_conn
= conn
->context
;
1960 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1962 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1963 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
1964 &isert_cmd
->tx_desc
.iscsi_header
);
1965 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1966 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1968 isert_dbg("conn %p Posting Logout Response\n", isert_conn
);
1970 return isert_post_response(isert_conn
, isert_cmd
);
1974 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1976 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1977 struct isert_conn
*isert_conn
= conn
->context
;
1978 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1980 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1981 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
1982 &isert_cmd
->tx_desc
.iscsi_header
);
1983 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1984 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
1986 isert_dbg("conn %p Posting Task Management Response\n", isert_conn
);
1988 return isert_post_response(isert_conn
, isert_cmd
);
1992 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1994 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1995 struct isert_conn
*isert_conn
= conn
->context
;
1996 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1997 struct isert_device
*device
= isert_conn
->device
;
1998 struct ib_device
*ib_dev
= device
->ib_device
;
1999 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2000 struct iscsi_reject
*hdr
=
2001 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
2003 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2004 iscsit_build_reject(cmd
, conn
, hdr
);
2005 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2007 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
2008 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2009 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
2011 if (ib_dma_mapping_error(ib_dev
, isert_cmd
->pdu_buf_dma
))
2013 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
2014 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2015 tx_dsg
->length
= ISCSI_HDR_LEN
;
2016 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2017 isert_cmd
->tx_desc
.num_sge
= 2;
2019 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2021 isert_dbg("conn %p Posting Reject\n", isert_conn
);
2023 return isert_post_response(isert_conn
, isert_cmd
);
2027 isert_put_text_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2029 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2030 struct isert_conn
*isert_conn
= conn
->context
;
2031 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2032 struct iscsi_text_rsp
*hdr
=
2033 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
2037 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2038 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
, ISCSI_INFINIBAND
);
2043 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2046 struct isert_device
*device
= isert_conn
->device
;
2047 struct ib_device
*ib_dev
= device
->ib_device
;
2048 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2049 void *txt_rsp_buf
= cmd
->buf_ptr
;
2051 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2052 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
2053 if (ib_dma_mapping_error(ib_dev
, isert_cmd
->pdu_buf_dma
))
2056 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
2057 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2058 tx_dsg
->length
= txt_rsp_len
;
2059 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
2060 isert_cmd
->tx_desc
.num_sge
= 2;
2062 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
);
2064 isert_dbg("conn %p Text Response\n", isert_conn
);
2066 return isert_post_response(isert_conn
, isert_cmd
);
2070 isert_set_dif_domain(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
,
2071 struct ib_sig_domain
*domain
)
2073 domain
->sig_type
= IB_SIG_TYPE_T10_DIF
;
2074 domain
->sig
.dif
.bg_type
= IB_T10DIF_CRC
;
2075 domain
->sig
.dif
.pi_interval
= se_cmd
->se_dev
->dev_attrib
.block_size
;
2076 domain
->sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
2078 * At the moment we hard code those, but if in the future
2079 * the target core would like to use it, we will take it
2082 domain
->sig
.dif
.apptag_check_mask
= 0xffff;
2083 domain
->sig
.dif
.app_escape
= true;
2084 domain
->sig
.dif
.ref_escape
= true;
2085 if (se_cmd
->prot_type
== TARGET_DIF_TYPE1_PROT
||
2086 se_cmd
->prot_type
== TARGET_DIF_TYPE2_PROT
)
2087 domain
->sig
.dif
.ref_remap
= true;
2091 isert_set_sig_attrs(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
)
2093 memset(sig_attrs
, 0, sizeof(*sig_attrs
));
2095 switch (se_cmd
->prot_op
) {
2096 case TARGET_PROT_DIN_INSERT
:
2097 case TARGET_PROT_DOUT_STRIP
:
2098 sig_attrs
->mem
.sig_type
= IB_SIG_TYPE_NONE
;
2099 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2101 case TARGET_PROT_DOUT_INSERT
:
2102 case TARGET_PROT_DIN_STRIP
:
2103 sig_attrs
->wire
.sig_type
= IB_SIG_TYPE_NONE
;
2104 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2106 case TARGET_PROT_DIN_PASS
:
2107 case TARGET_PROT_DOUT_PASS
:
2108 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->wire
);
2109 isert_set_dif_domain(se_cmd
, sig_attrs
, &sig_attrs
->mem
);
2112 isert_err("Unsupported PI operation %d\n", se_cmd
->prot_op
);
2116 if (se_cmd
->prot_checks
& TARGET_DIF_CHECK_GUARD
)
2117 sig_attrs
->check_mask
|= IB_SIG_CHECK_GUARD
;
2118 if (se_cmd
->prot_checks
& TARGET_DIF_CHECK_APPTAG
)
2119 sig_attrs
->check_mask
|= IB_SIG_CHECK_APPTAG
;
2120 if (se_cmd
->prot_checks
& TARGET_DIF_CHECK_REFTAG
)
2121 sig_attrs
->check_mask
|= IB_SIG_CHECK_REFTAG
;
2127 isert_rdma_rw_ctx_post(struct isert_cmd
*cmd
, struct isert_conn
*conn
,
2128 struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
2130 struct se_cmd
*se_cmd
= &cmd
->iscsi_cmd
->se_cmd
;
2131 enum dma_data_direction dir
= target_reverse_dma_direction(se_cmd
);
2132 u8 port_num
= conn
->cm_id
->port_num
;
2137 if (cmd
->ctx_init_done
)
2140 if (dir
== DMA_FROM_DEVICE
) {
2141 addr
= cmd
->write_va
;
2142 rkey
= cmd
->write_stag
;
2143 offset
= cmd
->iscsi_cmd
->write_data_done
;
2145 addr
= cmd
->read_va
;
2146 rkey
= cmd
->read_stag
;
2150 if (isert_prot_cmd(conn
, se_cmd
)) {
2151 struct ib_sig_attrs sig_attrs
;
2153 ret
= isert_set_sig_attrs(se_cmd
, &sig_attrs
);
2157 WARN_ON_ONCE(offset
);
2158 ret
= rdma_rw_ctx_signature_init(&cmd
->rw
, conn
->qp
, port_num
,
2159 se_cmd
->t_data_sg
, se_cmd
->t_data_nents
,
2160 se_cmd
->t_prot_sg
, se_cmd
->t_prot_nents
,
2161 &sig_attrs
, addr
, rkey
, dir
);
2163 ret
= rdma_rw_ctx_init(&cmd
->rw
, conn
->qp
, port_num
,
2164 se_cmd
->t_data_sg
, se_cmd
->t_data_nents
,
2165 offset
, addr
, rkey
, dir
);
2169 isert_err("Cmd: %p failed to prepare RDMA res\n", cmd
);
2173 cmd
->ctx_init_done
= true;
2176 ret
= rdma_rw_ctx_post(&cmd
->rw
, conn
->qp
, port_num
, cqe
, chain_wr
);
2178 isert_err("Cmd: %p failed to post RDMA res\n", cmd
);
2183 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2185 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2186 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2187 struct isert_conn
*isert_conn
= conn
->context
;
2188 struct ib_cqe
*cqe
= NULL
;
2189 struct ib_send_wr
*chain_wr
= NULL
;
2192 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2193 isert_cmd
, se_cmd
->data_length
);
2195 if (isert_prot_cmd(isert_conn
, se_cmd
)) {
2196 isert_cmd
->tx_desc
.tx_cqe
.done
= isert_rdma_write_done
;
2197 cqe
= &isert_cmd
->tx_desc
.tx_cqe
;
2200 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2202 isert_create_send_desc(isert_conn
, isert_cmd
,
2203 &isert_cmd
->tx_desc
);
2204 iscsit_build_rsp_pdu(cmd
, conn
, true, (struct iscsi_scsi_rsp
*)
2205 &isert_cmd
->tx_desc
.iscsi_header
);
2206 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2207 isert_init_send_wr(isert_conn
, isert_cmd
,
2208 &isert_cmd
->tx_desc
.send_wr
);
2210 rc
= isert_post_recv(isert_conn
, isert_cmd
->rx_desc
);
2212 isert_err("ib_post_recv failed with %d\n", rc
);
2216 chain_wr
= &isert_cmd
->tx_desc
.send_wr
;
2219 rc
= isert_rdma_rw_ctx_post(isert_cmd
, isert_conn
, cqe
, chain_wr
);
2220 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
2226 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
2228 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2231 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2232 isert_cmd
, cmd
->se_cmd
.data_length
, cmd
->write_data_done
);
2234 isert_cmd
->tx_desc
.tx_cqe
.done
= isert_rdma_read_done
;
2235 ret
= isert_rdma_rw_ctx_post(isert_cmd
, conn
->context
,
2236 &isert_cmd
->tx_desc
.tx_cqe
, NULL
);
2238 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
2244 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2246 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2251 spin_lock_bh(&conn
->cmd_lock
);
2252 list_del_init(&cmd
->i_conn_node
);
2253 spin_unlock_bh(&conn
->cmd_lock
);
2254 isert_put_cmd(isert_cmd
, true);
2256 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
2257 ret
= isert_put_nopin(cmd
, conn
, false);
2260 isert_err("Unknown immediate state: 0x%02x\n", state
);
2269 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2271 struct isert_conn
*isert_conn
= conn
->context
;
2275 case ISTATE_SEND_LOGOUTRSP
:
2276 ret
= isert_put_logout_rsp(cmd
, conn
);
2278 isert_conn
->logout_posted
= true;
2280 case ISTATE_SEND_NOPIN
:
2281 ret
= isert_put_nopin(cmd
, conn
, true);
2283 case ISTATE_SEND_TASKMGTRSP
:
2284 ret
= isert_put_tm_rsp(cmd
, conn
);
2286 case ISTATE_SEND_REJECT
:
2287 ret
= isert_put_reject(cmd
, conn
);
2289 case ISTATE_SEND_TEXTRSP
:
2290 ret
= isert_put_text_rsp(cmd
, conn
);
2292 case ISTATE_SEND_STATUS
:
2294 * Special case for sending non GOOD SCSI status from TX thread
2295 * context during pre se_cmd excecution failure.
2297 ret
= isert_put_response(conn
, cmd
);
2300 isert_err("Unknown response state: 0x%02x\n", state
);
2309 isert_setup_id(struct isert_np
*isert_np
)
2311 struct iscsi_np
*np
= isert_np
->np
;
2312 struct rdma_cm_id
*id
;
2313 struct sockaddr
*sa
;
2316 sa
= (struct sockaddr
*)&np
->np_sockaddr
;
2317 isert_dbg("ksockaddr: %p, sa: %p\n", &np
->np_sockaddr
, sa
);
2319 id
= rdma_create_id(&init_net
, isert_cma_handler
, isert_np
,
2320 RDMA_PS_TCP
, IB_QPT_RC
);
2322 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id
));
2326 isert_dbg("id %p context %p\n", id
, id
->context
);
2328 ret
= rdma_bind_addr(id
, sa
);
2330 isert_err("rdma_bind_addr() failed: %d\n", ret
);
2334 ret
= rdma_listen(id
, 0);
2336 isert_err("rdma_listen() failed: %d\n", ret
);
2342 rdma_destroy_id(id
);
2344 return ERR_PTR(ret
);
2348 isert_setup_np(struct iscsi_np
*np
,
2349 struct sockaddr_storage
*ksockaddr
)
2351 struct isert_np
*isert_np
;
2352 struct rdma_cm_id
*isert_lid
;
2355 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
2359 sema_init(&isert_np
->sem
, 0);
2360 mutex_init(&isert_np
->mutex
);
2361 INIT_LIST_HEAD(&isert_np
->accepted
);
2362 INIT_LIST_HEAD(&isert_np
->pending
);
2366 * Setup the np->np_sockaddr from the passed sockaddr setup
2367 * in iscsi_target_configfs.c code..
2369 memcpy(&np
->np_sockaddr
, ksockaddr
,
2370 sizeof(struct sockaddr_storage
));
2372 isert_lid
= isert_setup_id(isert_np
);
2373 if (IS_ERR(isert_lid
)) {
2374 ret
= PTR_ERR(isert_lid
);
2378 isert_np
->cm_id
= isert_lid
;
2379 np
->np_context
= isert_np
;
2390 isert_rdma_accept(struct isert_conn
*isert_conn
)
2392 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
2393 struct rdma_conn_param cp
;
2395 struct iser_cm_hdr rsp_hdr
;
2397 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
2398 cp
.initiator_depth
= isert_conn
->initiator_depth
;
2400 cp
.rnr_retry_count
= 7;
2402 memset(&rsp_hdr
, 0, sizeof(rsp_hdr
));
2403 rsp_hdr
.flags
= ISERT_ZBVA_NOT_USED
;
2404 if (!isert_conn
->snd_w_inv
)
2405 rsp_hdr
.flags
= rsp_hdr
.flags
| ISERT_SEND_W_INV_NOT_USED
;
2406 cp
.private_data
= (void *)&rsp_hdr
;
2407 cp
.private_data_len
= sizeof(rsp_hdr
);
2409 ret
= rdma_accept(cm_id
, &cp
);
2411 isert_err("rdma_accept() failed with: %d\n", ret
);
2419 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
2421 struct isert_conn
*isert_conn
= conn
->context
;
2424 isert_info("before login_req comp conn: %p\n", isert_conn
);
2425 ret
= wait_for_completion_interruptible(&isert_conn
->login_req_comp
);
2427 isert_err("isert_conn %p interrupted before got login req\n",
2431 reinit_completion(&isert_conn
->login_req_comp
);
2434 * For login requests after the first PDU, isert_rx_login_req() will
2435 * kick schedule_delayed_work(&conn->login_work) as the packet is
2436 * received, which turns this callback from iscsi_target_do_login_rx()
2439 if (!login
->first_request
)
2442 isert_rx_login_req(isert_conn
);
2444 isert_info("before login_comp conn: %p\n", conn
);
2445 ret
= wait_for_completion_interruptible(&isert_conn
->login_comp
);
2449 isert_info("processing login->req: %p\n", login
->req
);
2455 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
2456 struct isert_conn
*isert_conn
)
2458 struct rdma_cm_id
*cm_id
= isert_conn
->cm_id
;
2459 struct rdma_route
*cm_route
= &cm_id
->route
;
2461 conn
->login_family
= np
->np_sockaddr
.ss_family
;
2463 conn
->login_sockaddr
= cm_route
->addr
.dst_addr
;
2464 conn
->local_sockaddr
= cm_route
->addr
.src_addr
;
2468 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
2470 struct isert_np
*isert_np
= np
->np_context
;
2471 struct isert_conn
*isert_conn
;
2475 ret
= down_interruptible(&isert_np
->sem
);
2479 spin_lock_bh(&np
->np_thread_lock
);
2480 if (np
->np_thread_state
>= ISCSI_NP_THREAD_RESET
) {
2481 spin_unlock_bh(&np
->np_thread_lock
);
2482 isert_dbg("np_thread_state %d\n",
2483 np
->np_thread_state
);
2485 * No point in stalling here when np_thread
2486 * is in state RESET/SHUTDOWN/EXIT - bail
2490 spin_unlock_bh(&np
->np_thread_lock
);
2492 mutex_lock(&isert_np
->mutex
);
2493 if (list_empty(&isert_np
->pending
)) {
2494 mutex_unlock(&isert_np
->mutex
);
2497 isert_conn
= list_first_entry(&isert_np
->pending
,
2498 struct isert_conn
, node
);
2499 list_del_init(&isert_conn
->node
);
2500 mutex_unlock(&isert_np
->mutex
);
2502 conn
->context
= isert_conn
;
2503 isert_conn
->conn
= conn
;
2504 isert_conn
->state
= ISER_CONN_BOUND
;
2506 isert_set_conn_info(np
, conn
, isert_conn
);
2508 isert_dbg("Processing isert_conn: %p\n", isert_conn
);
2514 isert_free_np(struct iscsi_np
*np
)
2516 struct isert_np
*isert_np
= np
->np_context
;
2517 struct isert_conn
*isert_conn
, *n
;
2519 if (isert_np
->cm_id
)
2520 rdma_destroy_id(isert_np
->cm_id
);
2523 * FIXME: At this point we don't have a good way to insure
2524 * that at this point we don't have hanging connections that
2525 * completed RDMA establishment but didn't start iscsi login
2526 * process. So work-around this by cleaning up what ever piled
2527 * up in accepted and pending lists.
2529 mutex_lock(&isert_np
->mutex
);
2530 if (!list_empty(&isert_np
->pending
)) {
2531 isert_info("Still have isert pending connections\n");
2532 list_for_each_entry_safe(isert_conn
, n
,
2535 isert_info("cleaning isert_conn %p state (%d)\n",
2536 isert_conn
, isert_conn
->state
);
2537 isert_connect_release(isert_conn
);
2541 if (!list_empty(&isert_np
->accepted
)) {
2542 isert_info("Still have isert accepted connections\n");
2543 list_for_each_entry_safe(isert_conn
, n
,
2544 &isert_np
->accepted
,
2546 isert_info("cleaning isert_conn %p state (%d)\n",
2547 isert_conn
, isert_conn
->state
);
2548 isert_connect_release(isert_conn
);
2551 mutex_unlock(&isert_np
->mutex
);
2553 np
->np_context
= NULL
;
2557 static void isert_release_work(struct work_struct
*work
)
2559 struct isert_conn
*isert_conn
= container_of(work
,
2563 isert_info("Starting release conn %p\n", isert_conn
);
2565 mutex_lock(&isert_conn
->mutex
);
2566 isert_conn
->state
= ISER_CONN_DOWN
;
2567 mutex_unlock(&isert_conn
->mutex
);
2569 isert_info("Destroying conn %p\n", isert_conn
);
2570 isert_put_conn(isert_conn
);
2574 isert_wait4logout(struct isert_conn
*isert_conn
)
2576 struct iscsi_conn
*conn
= isert_conn
->conn
;
2578 isert_info("conn %p\n", isert_conn
);
2580 if (isert_conn
->logout_posted
) {
2581 isert_info("conn %p wait for conn_logout_comp\n", isert_conn
);
2582 wait_for_completion_timeout(&conn
->conn_logout_comp
,
2583 SECONDS_FOR_LOGOUT_COMP
* HZ
);
2588 isert_wait4cmds(struct iscsi_conn
*conn
)
2590 isert_info("iscsi_conn %p\n", conn
);
2593 target_sess_cmd_list_set_waiting(conn
->sess
->se_sess
);
2594 target_wait_for_sess_cmds(conn
->sess
->se_sess
);
2599 * isert_put_unsol_pending_cmds() - Drop commands waiting for
2600 * unsolicitate dataout
2601 * @conn: iscsi connection
2603 * We might still have commands that are waiting for unsolicited
2604 * dataouts messages. We must put the extra reference on those
2605 * before blocking on the target_wait_for_session_cmds
2608 isert_put_unsol_pending_cmds(struct iscsi_conn
*conn
)
2610 struct iscsi_cmd
*cmd
, *tmp
;
2611 static LIST_HEAD(drop_cmd_list
);
2613 spin_lock_bh(&conn
->cmd_lock
);
2614 list_for_each_entry_safe(cmd
, tmp
, &conn
->conn_cmd_list
, i_conn_node
) {
2615 if ((cmd
->cmd_flags
& ICF_NON_IMMEDIATE_UNSOLICITED_DATA
) &&
2616 (cmd
->write_data_done
< conn
->sess
->sess_ops
->FirstBurstLength
) &&
2617 (cmd
->write_data_done
< cmd
->se_cmd
.data_length
))
2618 list_move_tail(&cmd
->i_conn_node
, &drop_cmd_list
);
2620 spin_unlock_bh(&conn
->cmd_lock
);
2622 list_for_each_entry_safe(cmd
, tmp
, &drop_cmd_list
, i_conn_node
) {
2623 list_del_init(&cmd
->i_conn_node
);
2624 if (cmd
->i_state
!= ISTATE_REMOVE
) {
2625 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2627 isert_info("conn %p dropping cmd %p\n", conn
, cmd
);
2628 isert_put_cmd(isert_cmd
, true);
2633 static void isert_wait_conn(struct iscsi_conn
*conn
)
2635 struct isert_conn
*isert_conn
= conn
->context
;
2637 isert_info("Starting conn %p\n", isert_conn
);
2639 mutex_lock(&isert_conn
->mutex
);
2640 isert_conn_terminate(isert_conn
);
2641 mutex_unlock(&isert_conn
->mutex
);
2643 ib_drain_qp(isert_conn
->qp
);
2644 isert_put_unsol_pending_cmds(conn
);
2645 isert_wait4cmds(conn
);
2646 isert_wait4logout(isert_conn
);
2648 queue_work(isert_release_wq
, &isert_conn
->release_work
);
2651 static void isert_free_conn(struct iscsi_conn
*conn
)
2653 struct isert_conn
*isert_conn
= conn
->context
;
2655 ib_drain_qp(isert_conn
->qp
);
2656 isert_put_conn(isert_conn
);
2659 static void isert_get_rx_pdu(struct iscsi_conn
*conn
)
2661 struct completion comp
;
2663 init_completion(&comp
);
2665 wait_for_completion_interruptible(&comp
);
2668 static struct iscsit_transport iser_target_transport
= {
2670 .transport_type
= ISCSI_INFINIBAND
,
2671 .rdma_shutdown
= true,
2672 .priv_size
= sizeof(struct isert_cmd
),
2673 .owner
= THIS_MODULE
,
2674 .iscsit_setup_np
= isert_setup_np
,
2675 .iscsit_accept_np
= isert_accept_np
,
2676 .iscsit_free_np
= isert_free_np
,
2677 .iscsit_wait_conn
= isert_wait_conn
,
2678 .iscsit_free_conn
= isert_free_conn
,
2679 .iscsit_get_login_rx
= isert_get_login_rx
,
2680 .iscsit_put_login_tx
= isert_put_login_tx
,
2681 .iscsit_immediate_queue
= isert_immediate_queue
,
2682 .iscsit_response_queue
= isert_response_queue
,
2683 .iscsit_get_dataout
= isert_get_dataout
,
2684 .iscsit_queue_data_in
= isert_put_datain
,
2685 .iscsit_queue_status
= isert_put_response
,
2686 .iscsit_aborted_task
= isert_aborted_task
,
2687 .iscsit_get_rx_pdu
= isert_get_rx_pdu
,
2688 .iscsit_get_sup_prot_ops
= isert_get_sup_prot_ops
,
2691 static int __init
isert_init(void)
2695 isert_comp_wq
= alloc_workqueue("isert_comp_wq",
2696 WQ_UNBOUND
| WQ_HIGHPRI
, 0);
2697 if (!isert_comp_wq
) {
2698 isert_err("Unable to allocate isert_comp_wq\n");
2702 isert_release_wq
= alloc_workqueue("isert_release_wq", WQ_UNBOUND
,
2703 WQ_UNBOUND_MAX_ACTIVE
);
2704 if (!isert_release_wq
) {
2705 isert_err("Unable to allocate isert_release_wq\n");
2707 goto destroy_comp_wq
;
2710 iscsit_register_transport(&iser_target_transport
);
2711 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2716 destroy_workqueue(isert_comp_wq
);
2721 static void __exit
isert_exit(void)
2723 flush_scheduled_work();
2724 destroy_workqueue(isert_release_wq
);
2725 destroy_workqueue(isert_comp_wq
);
2726 iscsit_unregister_transport(&iser_target_transport
);
2727 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
2730 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2731 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2732 MODULE_LICENSE("GPL");
2734 module_init(isert_init
);
2735 module_exit(isert_exit
);