1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <linux/llist.h>
26 #include <rdma/ib_verbs.h>
27 #include <rdma/rdma_cm.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_fabric.h>
30 #include <target/iscsi/iscsi_transport.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
39 static DEFINE_MUTEX(device_list_mutex
);
40 static LIST_HEAD(device_list
);
41 static struct workqueue_struct
*isert_rx_wq
;
42 static struct workqueue_struct
*isert_comp_wq
;
45 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
47 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
48 struct isert_rdma_wr
*wr
);
50 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
52 isert_reg_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
53 struct isert_rdma_wr
*wr
);
56 isert_qp_event_callback(struct ib_event
*e
, void *context
)
58 struct isert_conn
*isert_conn
= (struct isert_conn
*)context
;
60 pr_err("isert_qp_event_callback event: %d\n", e
->event
);
62 case IB_EVENT_COMM_EST
:
63 rdma_notify(isert_conn
->conn_cm_id
, IB_EVENT_COMM_EST
);
65 case IB_EVENT_QP_LAST_WQE_REACHED
:
66 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
74 isert_query_device(struct ib_device
*ib_dev
, struct ib_device_attr
*devattr
)
78 ret
= ib_query_device(ib_dev
, devattr
);
80 pr_err("ib_query_device() failed: %d\n", ret
);
83 pr_debug("devattr->max_sge: %d\n", devattr
->max_sge
);
84 pr_debug("devattr->max_sge_rd: %d\n", devattr
->max_sge_rd
);
90 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
)
92 struct isert_device
*device
= isert_conn
->conn_device
;
93 struct ib_qp_init_attr attr
;
94 int ret
, index
, min_index
= 0;
96 mutex_lock(&device_list_mutex
);
97 for (index
= 0; index
< device
->cqs_used
; index
++)
98 if (device
->cq_active_qps
[index
] <
99 device
->cq_active_qps
[min_index
])
101 device
->cq_active_qps
[min_index
]++;
102 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index
);
103 mutex_unlock(&device_list_mutex
);
105 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
106 attr
.event_handler
= isert_qp_event_callback
;
107 attr
.qp_context
= isert_conn
;
108 attr
.send_cq
= device
->dev_tx_cq
[min_index
];
109 attr
.recv_cq
= device
->dev_rx_cq
[min_index
];
110 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
;
111 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
;
113 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
114 * work-around for RDMA_READ..
116 attr
.cap
.max_send_sge
= device
->dev_attr
.max_sge
- 2;
117 isert_conn
->max_sge
= attr
.cap
.max_send_sge
;
119 attr
.cap
.max_recv_sge
= 1;
120 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
121 attr
.qp_type
= IB_QPT_RC
;
123 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
125 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
126 isert_conn
->conn_pd
->device
);
128 ret
= rdma_create_qp(cma_id
, isert_conn
->conn_pd
, &attr
);
130 pr_err("rdma_create_qp failed for cma_id %d\n", ret
);
133 isert_conn
->conn_qp
= cma_id
->qp
;
134 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
140 isert_cq_event_callback(struct ib_event
*e
, void *context
)
142 pr_debug("isert_cq_event_callback event: %d\n", e
->event
);
146 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
148 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
149 struct iser_rx_desc
*rx_desc
;
150 struct ib_sge
*rx_sg
;
154 isert_conn
->conn_rx_descs
= kzalloc(ISERT_QP_MAX_RECV_DTOS
*
155 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
156 if (!isert_conn
->conn_rx_descs
)
159 rx_desc
= isert_conn
->conn_rx_descs
;
161 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
162 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
163 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
164 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
167 rx_desc
->dma_addr
= dma_addr
;
169 rx_sg
= &rx_desc
->rx_sg
;
170 rx_sg
->addr
= rx_desc
->dma_addr
;
171 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
172 rx_sg
->lkey
= isert_conn
->conn_mr
->lkey
;
175 isert_conn
->conn_rx_desc_head
= 0;
179 rx_desc
= isert_conn
->conn_rx_descs
;
180 for (j
= 0; j
< i
; j
++, rx_desc
++) {
181 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
182 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
184 kfree(isert_conn
->conn_rx_descs
);
185 isert_conn
->conn_rx_descs
= NULL
;
191 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
193 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
194 struct iser_rx_desc
*rx_desc
;
197 if (!isert_conn
->conn_rx_descs
)
200 rx_desc
= isert_conn
->conn_rx_descs
;
201 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
202 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
203 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
206 kfree(isert_conn
->conn_rx_descs
);
207 isert_conn
->conn_rx_descs
= NULL
;
210 static void isert_cq_tx_work(struct work_struct
*);
211 static void isert_cq_tx_callback(struct ib_cq
*, void *);
212 static void isert_cq_rx_work(struct work_struct
*);
213 static void isert_cq_rx_callback(struct ib_cq
*, void *);
216 isert_create_device_ib_res(struct isert_device
*device
)
218 struct ib_device
*ib_dev
= device
->ib_device
;
219 struct isert_cq_desc
*cq_desc
;
220 struct ib_device_attr
*dev_attr
;
223 dev_attr
= &device
->dev_attr
;
224 ret
= isert_query_device(ib_dev
, dev_attr
);
228 /* asign function handlers */
229 if (dev_attr
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
) {
230 device
->use_fastreg
= 1;
231 device
->reg_rdma_mem
= isert_reg_rdma
;
232 device
->unreg_rdma_mem
= isert_unreg_rdma
;
234 device
->use_fastreg
= 0;
235 device
->reg_rdma_mem
= isert_map_rdma
;
236 device
->unreg_rdma_mem
= isert_unmap_cmd
;
239 device
->cqs_used
= min_t(int, num_online_cpus(),
240 device
->ib_device
->num_comp_vectors
);
241 device
->cqs_used
= min(ISERT_MAX_CQ
, device
->cqs_used
);
242 pr_debug("Using %d CQs, device %s supports %d vectors support "
243 "Fast registration %d\n",
244 device
->cqs_used
, device
->ib_device
->name
,
245 device
->ib_device
->num_comp_vectors
, device
->use_fastreg
);
246 device
->cq_desc
= kzalloc(sizeof(struct isert_cq_desc
) *
247 device
->cqs_used
, GFP_KERNEL
);
248 if (!device
->cq_desc
) {
249 pr_err("Unable to allocate device->cq_desc\n");
252 cq_desc
= device
->cq_desc
;
254 for (i
= 0; i
< device
->cqs_used
; i
++) {
255 cq_desc
[i
].device
= device
;
256 cq_desc
[i
].cq_index
= i
;
258 INIT_WORK(&cq_desc
[i
].cq_rx_work
, isert_cq_rx_work
);
259 device
->dev_rx_cq
[i
] = ib_create_cq(device
->ib_device
,
260 isert_cq_rx_callback
,
261 isert_cq_event_callback
,
263 ISER_MAX_RX_CQ_LEN
, i
);
264 if (IS_ERR(device
->dev_rx_cq
[i
])) {
265 ret
= PTR_ERR(device
->dev_rx_cq
[i
]);
266 device
->dev_rx_cq
[i
] = NULL
;
270 INIT_WORK(&cq_desc
[i
].cq_tx_work
, isert_cq_tx_work
);
271 device
->dev_tx_cq
[i
] = ib_create_cq(device
->ib_device
,
272 isert_cq_tx_callback
,
273 isert_cq_event_callback
,
275 ISER_MAX_TX_CQ_LEN
, i
);
276 if (IS_ERR(device
->dev_tx_cq
[i
])) {
277 ret
= PTR_ERR(device
->dev_tx_cq
[i
]);
278 device
->dev_tx_cq
[i
] = NULL
;
282 ret
= ib_req_notify_cq(device
->dev_rx_cq
[i
], IB_CQ_NEXT_COMP
);
286 ret
= ib_req_notify_cq(device
->dev_tx_cq
[i
], IB_CQ_NEXT_COMP
);
294 for (j
= 0; j
< i
; j
++) {
295 cq_desc
= &device
->cq_desc
[j
];
297 if (device
->dev_rx_cq
[j
]) {
298 cancel_work_sync(&cq_desc
->cq_rx_work
);
299 ib_destroy_cq(device
->dev_rx_cq
[j
]);
301 if (device
->dev_tx_cq
[j
]) {
302 cancel_work_sync(&cq_desc
->cq_tx_work
);
303 ib_destroy_cq(device
->dev_tx_cq
[j
]);
306 kfree(device
->cq_desc
);
312 isert_free_device_ib_res(struct isert_device
*device
)
314 struct isert_cq_desc
*cq_desc
;
317 for (i
= 0; i
< device
->cqs_used
; i
++) {
318 cq_desc
= &device
->cq_desc
[i
];
320 cancel_work_sync(&cq_desc
->cq_rx_work
);
321 cancel_work_sync(&cq_desc
->cq_tx_work
);
322 ib_destroy_cq(device
->dev_rx_cq
[i
]);
323 ib_destroy_cq(device
->dev_tx_cq
[i
]);
324 device
->dev_rx_cq
[i
] = NULL
;
325 device
->dev_tx_cq
[i
] = NULL
;
328 kfree(device
->cq_desc
);
332 isert_device_try_release(struct isert_device
*device
)
334 mutex_lock(&device_list_mutex
);
336 if (!device
->refcount
) {
337 isert_free_device_ib_res(device
);
338 list_del(&device
->dev_node
);
341 mutex_unlock(&device_list_mutex
);
344 static struct isert_device
*
345 isert_device_find_by_ib_dev(struct rdma_cm_id
*cma_id
)
347 struct isert_device
*device
;
350 mutex_lock(&device_list_mutex
);
351 list_for_each_entry(device
, &device_list
, dev_node
) {
352 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
354 mutex_unlock(&device_list_mutex
);
359 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
361 mutex_unlock(&device_list_mutex
);
362 return ERR_PTR(-ENOMEM
);
365 INIT_LIST_HEAD(&device
->dev_node
);
367 device
->ib_device
= cma_id
->device
;
368 ret
= isert_create_device_ib_res(device
);
371 mutex_unlock(&device_list_mutex
);
376 list_add_tail(&device
->dev_node
, &device_list
);
377 mutex_unlock(&device_list_mutex
);
383 isert_conn_free_fastreg_pool(struct isert_conn
*isert_conn
)
385 struct fast_reg_descriptor
*fr_desc
, *tmp
;
388 if (list_empty(&isert_conn
->conn_fr_pool
))
391 pr_debug("Freeing conn %p fastreg pool", isert_conn
);
393 list_for_each_entry_safe(fr_desc
, tmp
,
394 &isert_conn
->conn_fr_pool
, list
) {
395 list_del(&fr_desc
->list
);
396 ib_free_fast_reg_page_list(fr_desc
->data_frpl
);
397 ib_dereg_mr(fr_desc
->data_mr
);
402 if (i
< isert_conn
->conn_fr_pool_size
)
403 pr_warn("Pool still has %d regions registered\n",
404 isert_conn
->conn_fr_pool_size
- i
);
408 isert_create_fr_desc(struct ib_device
*ib_device
, struct ib_pd
*pd
,
409 struct fast_reg_descriptor
*fr_desc
)
411 fr_desc
->data_frpl
= ib_alloc_fast_reg_page_list(ib_device
,
412 ISCSI_ISER_SG_TABLESIZE
);
413 if (IS_ERR(fr_desc
->data_frpl
)) {
414 pr_err("Failed to allocate data frpl err=%ld\n",
415 PTR_ERR(fr_desc
->data_frpl
));
416 return PTR_ERR(fr_desc
->data_frpl
);
419 fr_desc
->data_mr
= ib_alloc_fast_reg_mr(pd
, ISCSI_ISER_SG_TABLESIZE
);
420 if (IS_ERR(fr_desc
->data_mr
)) {
421 pr_err("Failed to allocate data frmr err=%ld\n",
422 PTR_ERR(fr_desc
->data_mr
));
423 ib_free_fast_reg_page_list(fr_desc
->data_frpl
);
424 return PTR_ERR(fr_desc
->data_mr
);
426 pr_debug("Create fr_desc %p page_list %p\n",
427 fr_desc
, fr_desc
->data_frpl
->page_list
);
429 fr_desc
->valid
= true;
435 isert_conn_create_fastreg_pool(struct isert_conn
*isert_conn
)
437 struct fast_reg_descriptor
*fr_desc
;
438 struct isert_device
*device
= isert_conn
->conn_device
;
441 INIT_LIST_HEAD(&isert_conn
->conn_fr_pool
);
442 isert_conn
->conn_fr_pool_size
= 0;
443 for (i
= 0; i
< ISCSI_DEF_XMIT_CMDS_MAX
; i
++) {
444 fr_desc
= kzalloc(sizeof(*fr_desc
), GFP_KERNEL
);
446 pr_err("Failed to allocate fast_reg descriptor\n");
451 ret
= isert_create_fr_desc(device
->ib_device
,
452 isert_conn
->conn_pd
, fr_desc
);
454 pr_err("Failed to create fastreg descriptor err=%d\n",
459 list_add_tail(&fr_desc
->list
, &isert_conn
->conn_fr_pool
);
460 isert_conn
->conn_fr_pool_size
++;
463 pr_debug("Creating conn %p fastreg pool size=%d",
464 isert_conn
, isert_conn
->conn_fr_pool_size
);
469 isert_conn_free_fastreg_pool(isert_conn
);
474 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
476 struct iscsi_np
*np
= cma_id
->context
;
477 struct isert_np
*isert_np
= np
->np_context
;
478 struct isert_conn
*isert_conn
;
479 struct isert_device
*device
;
480 struct ib_device
*ib_dev
= cma_id
->device
;
483 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
484 cma_id
, cma_id
->context
);
486 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
488 pr_err("Unable to allocate isert_conn\n");
491 isert_conn
->state
= ISER_CONN_INIT
;
492 INIT_LIST_HEAD(&isert_conn
->conn_accept_node
);
493 init_completion(&isert_conn
->conn_login_comp
);
494 init_waitqueue_head(&isert_conn
->conn_wait
);
495 init_waitqueue_head(&isert_conn
->conn_wait_comp_err
);
496 kref_init(&isert_conn
->conn_kref
);
497 kref_get(&isert_conn
->conn_kref
);
498 mutex_init(&isert_conn
->conn_mutex
);
499 mutex_init(&isert_conn
->conn_comp_mutex
);
500 spin_lock_init(&isert_conn
->conn_lock
);
502 cma_id
->context
= isert_conn
;
503 isert_conn
->conn_cm_id
= cma_id
;
504 isert_conn
->responder_resources
= event
->param
.conn
.responder_resources
;
505 isert_conn
->initiator_depth
= event
->param
.conn
.initiator_depth
;
506 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
507 isert_conn
->responder_resources
, isert_conn
->initiator_depth
);
509 isert_conn
->login_buf
= kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
+
510 ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
511 if (!isert_conn
->login_buf
) {
512 pr_err("Unable to allocate isert_conn->login_buf\n");
517 isert_conn
->login_req_buf
= isert_conn
->login_buf
;
518 isert_conn
->login_rsp_buf
= isert_conn
->login_buf
+
519 ISCSI_DEF_MAX_RECV_SEG_LEN
;
520 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
521 isert_conn
->login_buf
, isert_conn
->login_req_buf
,
522 isert_conn
->login_rsp_buf
);
524 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
525 (void *)isert_conn
->login_req_buf
,
526 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
528 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
530 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
532 isert_conn
->login_req_dma
= 0;
536 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
537 (void *)isert_conn
->login_rsp_buf
,
538 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
540 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
542 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
544 isert_conn
->login_rsp_dma
= 0;
545 goto out_req_dma_map
;
548 device
= isert_device_find_by_ib_dev(cma_id
);
549 if (IS_ERR(device
)) {
550 ret
= PTR_ERR(device
);
551 goto out_rsp_dma_map
;
554 isert_conn
->conn_device
= device
;
555 isert_conn
->conn_pd
= ib_alloc_pd(isert_conn
->conn_device
->ib_device
);
556 if (IS_ERR(isert_conn
->conn_pd
)) {
557 ret
= PTR_ERR(isert_conn
->conn_pd
);
558 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
563 isert_conn
->conn_mr
= ib_get_dma_mr(isert_conn
->conn_pd
,
564 IB_ACCESS_LOCAL_WRITE
);
565 if (IS_ERR(isert_conn
->conn_mr
)) {
566 ret
= PTR_ERR(isert_conn
->conn_mr
);
567 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
572 if (device
->use_fastreg
) {
573 ret
= isert_conn_create_fastreg_pool(isert_conn
);
575 pr_err("Conn: %p failed to create fastreg pool\n",
581 ret
= isert_conn_setup_qp(isert_conn
, cma_id
);
585 mutex_lock(&isert_np
->np_accept_mutex
);
586 list_add_tail(&isert_np
->np_accept_list
, &isert_conn
->conn_accept_node
);
587 mutex_unlock(&isert_np
->np_accept_mutex
);
589 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np
);
590 wake_up(&isert_np
->np_accept_wq
);
594 if (device
->use_fastreg
)
595 isert_conn_free_fastreg_pool(isert_conn
);
597 ib_dereg_mr(isert_conn
->conn_mr
);
599 ib_dealloc_pd(isert_conn
->conn_pd
);
601 isert_device_try_release(device
);
603 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
604 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
606 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
607 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
609 kfree(isert_conn
->login_buf
);
616 isert_connect_release(struct isert_conn
*isert_conn
)
618 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
619 struct isert_device
*device
= isert_conn
->conn_device
;
622 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
624 if (device
&& device
->use_fastreg
)
625 isert_conn_free_fastreg_pool(isert_conn
);
627 if (isert_conn
->conn_qp
) {
628 cq_index
= ((struct isert_cq_desc
*)
629 isert_conn
->conn_qp
->recv_cq
->cq_context
)->cq_index
;
630 pr_debug("isert_connect_release: cq_index: %d\n", cq_index
);
631 isert_conn
->conn_device
->cq_active_qps
[cq_index
]--;
633 rdma_destroy_qp(isert_conn
->conn_cm_id
);
636 isert_free_rx_descriptors(isert_conn
);
637 rdma_destroy_id(isert_conn
->conn_cm_id
);
639 ib_dereg_mr(isert_conn
->conn_mr
);
640 ib_dealloc_pd(isert_conn
->conn_pd
);
642 if (isert_conn
->login_buf
) {
643 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
644 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
645 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
646 ISCSI_DEF_MAX_RECV_SEG_LEN
,
648 kfree(isert_conn
->login_buf
);
653 isert_device_try_release(device
);
655 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
659 isert_connected_handler(struct rdma_cm_id
*cma_id
)
665 isert_release_conn_kref(struct kref
*kref
)
667 struct isert_conn
*isert_conn
= container_of(kref
,
668 struct isert_conn
, conn_kref
);
670 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
671 current
->comm
, current
->pid
);
673 isert_connect_release(isert_conn
);
677 isert_put_conn(struct isert_conn
*isert_conn
)
679 kref_put(&isert_conn
->conn_kref
, isert_release_conn_kref
);
683 isert_disconnect_work(struct work_struct
*work
)
685 struct isert_conn
*isert_conn
= container_of(work
,
686 struct isert_conn
, conn_logout_work
);
688 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
689 mutex_lock(&isert_conn
->conn_mutex
);
690 isert_conn
->state
= ISER_CONN_DOWN
;
692 if (isert_conn
->post_recv_buf_count
== 0 &&
693 atomic_read(&isert_conn
->post_send_buf_count
) == 0) {
694 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
695 mutex_unlock(&isert_conn
->conn_mutex
);
698 if (!isert_conn
->conn_cm_id
) {
699 mutex_unlock(&isert_conn
->conn_mutex
);
700 isert_put_conn(isert_conn
);
703 if (!isert_conn
->logout_posted
) {
704 pr_debug("Calling rdma_disconnect for !logout_posted from"
705 " isert_disconnect_work\n");
706 rdma_disconnect(isert_conn
->conn_cm_id
);
707 mutex_unlock(&isert_conn
->conn_mutex
);
708 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
711 mutex_unlock(&isert_conn
->conn_mutex
);
714 wake_up(&isert_conn
->conn_wait
);
715 isert_put_conn(isert_conn
);
719 isert_disconnected_handler(struct rdma_cm_id
*cma_id
)
721 struct isert_conn
*isert_conn
= (struct isert_conn
*)cma_id
->context
;
723 INIT_WORK(&isert_conn
->conn_logout_work
, isert_disconnect_work
);
724 schedule_work(&isert_conn
->conn_logout_work
);
728 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
732 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
733 event
->event
, event
->status
, cma_id
->context
, cma_id
);
735 switch (event
->event
) {
736 case RDMA_CM_EVENT_CONNECT_REQUEST
:
737 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
738 ret
= isert_connect_request(cma_id
, event
);
740 case RDMA_CM_EVENT_ESTABLISHED
:
741 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
742 isert_connected_handler(cma_id
);
744 case RDMA_CM_EVENT_DISCONNECTED
:
745 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
746 isert_disconnected_handler(cma_id
);
748 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
749 case RDMA_CM_EVENT_ADDR_CHANGE
:
751 case RDMA_CM_EVENT_CONNECT_ERROR
:
753 pr_err("Unknown RDMA CMA event: %d\n", event
->event
);
758 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
767 isert_post_recv(struct isert_conn
*isert_conn
, u32 count
)
769 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
771 unsigned int rx_head
= isert_conn
->conn_rx_desc_head
;
772 struct iser_rx_desc
*rx_desc
;
774 for (rx_wr
= isert_conn
->conn_rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
775 rx_desc
= &isert_conn
->conn_rx_descs
[rx_head
];
776 rx_wr
->wr_id
= (unsigned long)rx_desc
;
777 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
779 rx_wr
->next
= rx_wr
+ 1;
780 rx_head
= (rx_head
+ 1) & (ISERT_QP_MAX_RECV_DTOS
- 1);
784 rx_wr
->next
= NULL
; /* mark end of work requests list */
786 isert_conn
->post_recv_buf_count
+= count
;
787 ret
= ib_post_recv(isert_conn
->conn_qp
, isert_conn
->conn_rx_wr
,
790 pr_err("ib_post_recv() failed with ret: %d\n", ret
);
791 isert_conn
->post_recv_buf_count
-= count
;
793 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count
);
794 isert_conn
->conn_rx_desc_head
= rx_head
;
800 isert_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
802 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
803 struct ib_send_wr send_wr
, *send_wr_failed
;
806 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
807 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
810 send_wr
.wr_id
= (unsigned long)tx_desc
;
811 send_wr
.sg_list
= tx_desc
->tx_sg
;
812 send_wr
.num_sge
= tx_desc
->num_sge
;
813 send_wr
.opcode
= IB_WR_SEND
;
814 send_wr
.send_flags
= IB_SEND_SIGNALED
;
816 atomic_inc(&isert_conn
->post_send_buf_count
);
818 ret
= ib_post_send(isert_conn
->conn_qp
, &send_wr
, &send_wr_failed
);
820 pr_err("ib_post_send() failed, ret: %d\n", ret
);
821 atomic_dec(&isert_conn
->post_send_buf_count
);
828 isert_create_send_desc(struct isert_conn
*isert_conn
,
829 struct isert_cmd
*isert_cmd
,
830 struct iser_tx_desc
*tx_desc
)
832 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
834 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
835 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
837 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_hdr
));
838 tx_desc
->iser_header
.flags
= ISER_VER
;
840 tx_desc
->num_sge
= 1;
841 tx_desc
->isert_cmd
= isert_cmd
;
843 if (tx_desc
->tx_sg
[0].lkey
!= isert_conn
->conn_mr
->lkey
) {
844 tx_desc
->tx_sg
[0].lkey
= isert_conn
->conn_mr
->lkey
;
845 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
850 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
851 struct iser_tx_desc
*tx_desc
)
853 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
856 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
857 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
858 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
859 pr_err("ib_dma_mapping_error() failed\n");
863 tx_desc
->dma_addr
= dma_addr
;
864 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
865 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
866 tx_desc
->tx_sg
[0].lkey
= isert_conn
->conn_mr
->lkey
;
868 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
869 " lkey: 0x%08x\n", tx_desc
->tx_sg
[0].addr
,
870 tx_desc
->tx_sg
[0].length
, tx_desc
->tx_sg
[0].lkey
);
876 isert_init_send_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
877 struct ib_send_wr
*send_wr
, bool coalesce
)
879 struct iser_tx_desc
*tx_desc
= &isert_cmd
->tx_desc
;
881 isert_cmd
->rdma_wr
.iser_ib_op
= ISER_IB_SEND
;
882 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
883 send_wr
->opcode
= IB_WR_SEND
;
884 send_wr
->sg_list
= &tx_desc
->tx_sg
[0];
885 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
887 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
888 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
890 mutex_lock(&isert_conn
->conn_comp_mutex
);
892 ++isert_conn
->conn_comp_batch
< ISERT_COMP_BATCH_COUNT
) {
893 llist_add(&tx_desc
->comp_llnode
, &isert_conn
->conn_comp_llist
);
894 mutex_unlock(&isert_conn
->conn_comp_mutex
);
897 isert_conn
->conn_comp_batch
= 0;
898 tx_desc
->comp_llnode_batch
= llist_del_all(&isert_conn
->conn_comp_llist
);
899 mutex_unlock(&isert_conn
->conn_comp_mutex
);
901 send_wr
->send_flags
= IB_SEND_SIGNALED
;
905 isert_rdma_post_recvl(struct isert_conn
*isert_conn
)
907 struct ib_recv_wr rx_wr
, *rx_wr_fail
;
911 memset(&sge
, 0, sizeof(struct ib_sge
));
912 sge
.addr
= isert_conn
->login_req_dma
;
913 sge
.length
= ISER_RX_LOGIN_SIZE
;
914 sge
.lkey
= isert_conn
->conn_mr
->lkey
;
916 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
917 sge
.addr
, sge
.length
, sge
.lkey
);
919 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
920 rx_wr
.wr_id
= (unsigned long)isert_conn
->login_req_buf
;
921 rx_wr
.sg_list
= &sge
;
924 isert_conn
->post_recv_buf_count
++;
925 ret
= ib_post_recv(isert_conn
->conn_qp
, &rx_wr
, &rx_wr_fail
);
927 pr_err("ib_post_recv() failed: %d\n", ret
);
928 isert_conn
->post_recv_buf_count
--;
931 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
936 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
939 struct isert_conn
*isert_conn
= conn
->context
;
940 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
941 struct iser_tx_desc
*tx_desc
= &isert_conn
->conn_login_tx_desc
;
944 isert_create_send_desc(isert_conn
, NULL
, tx_desc
);
946 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
947 sizeof(struct iscsi_hdr
));
949 isert_init_tx_hdrs(isert_conn
, tx_desc
);
952 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
954 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
955 length
, DMA_TO_DEVICE
);
957 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
959 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
960 length
, DMA_TO_DEVICE
);
962 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
963 tx_dsg
->length
= length
;
964 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
965 tx_desc
->num_sge
= 2;
967 if (!login
->login_failed
) {
968 if (login
->login_complete
) {
969 ret
= isert_alloc_rx_descriptors(isert_conn
);
973 ret
= isert_post_recv(isert_conn
, ISERT_MIN_POSTED_RX
);
977 isert_conn
->state
= ISER_CONN_UP
;
981 ret
= isert_rdma_post_recvl(isert_conn
);
986 ret
= isert_post_send(isert_conn
, tx_desc
);
994 isert_rx_login_req(struct iser_rx_desc
*rx_desc
, int rx_buflen
,
995 struct isert_conn
*isert_conn
)
997 struct iscsi_conn
*conn
= isert_conn
->conn
;
998 struct iscsi_login
*login
= conn
->conn_login
;
1002 pr_err("conn->conn_login is NULL\n");
1007 if (login
->first_request
) {
1008 struct iscsi_login_req
*login_req
=
1009 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
1011 * Setup the initial iscsi_login values from the leading
1012 * login request PDU.
1014 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
1015 login
->current_stage
=
1016 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
1018 login
->version_min
= login_req
->min_version
;
1019 login
->version_max
= login_req
->max_version
;
1020 memcpy(login
->isid
, login_req
->isid
, 6);
1021 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1022 login
->init_task_tag
= login_req
->itt
;
1023 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1024 login
->cid
= be16_to_cpu(login_req
->cid
);
1025 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1028 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
1030 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1031 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1032 size
, rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1033 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
1035 if (login
->first_request
) {
1036 complete(&isert_conn
->conn_login_comp
);
1039 schedule_delayed_work(&conn
->login_work
, 0);
1042 static struct iscsi_cmd
1043 *isert_allocate_cmd(struct iscsi_conn
*conn
)
1045 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1046 struct isert_cmd
*isert_cmd
;
1047 struct iscsi_cmd
*cmd
;
1049 cmd
= iscsit_allocate_cmd(conn
, TASK_INTERRUPTIBLE
);
1051 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1054 isert_cmd
= iscsit_priv_cmd(cmd
);
1055 isert_cmd
->conn
= isert_conn
;
1056 isert_cmd
->iscsi_cmd
= cmd
;
1062 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
1063 struct isert_cmd
*isert_cmd
, struct iscsi_cmd
*cmd
,
1064 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1066 struct iscsi_conn
*conn
= isert_conn
->conn
;
1067 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
1068 struct scatterlist
*sg
;
1069 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
1070 bool dump_payload
= false;
1072 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
1076 imm_data
= cmd
->immediate_data
;
1077 imm_data_len
= cmd
->first_burst_len
;
1078 unsol_data
= cmd
->unsolicited_data
;
1080 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1083 } else if (rc
> 0) {
1084 dump_payload
= true;
1091 sg
= &cmd
->se_cmd
.t_data_sg
[0];
1092 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
1094 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1095 sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
1097 sg_copy_from_buffer(sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
1099 cmd
->write_data_done
+= imm_data_len
;
1101 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
1102 spin_lock_bh(&cmd
->istate_lock
);
1103 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1104 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1105 spin_unlock_bh(&cmd
->istate_lock
);
1109 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
1111 if (!rc
&& dump_payload
== false && unsol_data
)
1112 iscsit_set_unsoliticed_dataout(cmd
);
1118 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
1119 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1121 struct scatterlist
*sg_start
;
1122 struct iscsi_conn
*conn
= isert_conn
->conn
;
1123 struct iscsi_cmd
*cmd
= NULL
;
1124 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
1125 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
1126 int rc
, sg_nents
, sg_off
, page_off
;
1128 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
1134 * FIXME: Unexpected unsolicited_data out
1136 if (!cmd
->unsolicited_data
) {
1137 pr_err("Received unexpected solicited data payload\n");
1142 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1143 unsol_data_len
, cmd
->write_data_done
, cmd
->se_cmd
.data_length
);
1145 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1146 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1147 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
1148 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1150 * FIXME: Non page-aligned unsolicited_data out
1153 pr_err("Received unexpected non-page aligned data payload\n");
1157 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1158 sg_start
, sg_off
, sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
1160 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
1163 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1171 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1172 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1175 struct iscsi_conn
*conn
= isert_conn
->conn
;
1176 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1179 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1183 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1186 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1190 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1191 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1192 struct iscsi_text
*hdr
)
1194 struct iscsi_conn
*conn
= isert_conn
->conn
;
1195 u32 payload_length
= ntoh24(hdr
->dlength
);
1197 unsigned char *text_in
;
1199 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1203 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1205 pr_err("Unable to allocate text_in of payload_length: %u\n",
1209 cmd
->text_in_ptr
= text_in
;
1211 memcpy(cmd
->text_in_ptr
, &rx_desc
->data
[0], payload_length
);
1213 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1217 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1218 uint32_t read_stag
, uint64_t read_va
,
1219 uint32_t write_stag
, uint64_t write_va
)
1221 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1222 struct iscsi_conn
*conn
= isert_conn
->conn
;
1223 struct iscsi_session
*sess
= conn
->sess
;
1224 struct iscsi_cmd
*cmd
;
1225 struct isert_cmd
*isert_cmd
;
1227 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1229 if (sess
->sess_ops
->SessionType
&&
1230 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1231 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1232 " ignoring\n", opcode
);
1237 case ISCSI_OP_SCSI_CMD
:
1238 cmd
= isert_allocate_cmd(conn
);
1242 isert_cmd
= iscsit_priv_cmd(cmd
);
1243 isert_cmd
->read_stag
= read_stag
;
1244 isert_cmd
->read_va
= read_va
;
1245 isert_cmd
->write_stag
= write_stag
;
1246 isert_cmd
->write_va
= write_va
;
1248 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
, cmd
,
1249 rx_desc
, (unsigned char *)hdr
);
1251 case ISCSI_OP_NOOP_OUT
:
1252 cmd
= isert_allocate_cmd(conn
);
1256 isert_cmd
= iscsit_priv_cmd(cmd
);
1257 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
, cmd
,
1258 rx_desc
, (unsigned char *)hdr
);
1260 case ISCSI_OP_SCSI_DATA_OUT
:
1261 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1262 (unsigned char *)hdr
);
1264 case ISCSI_OP_SCSI_TMFUNC
:
1265 cmd
= isert_allocate_cmd(conn
);
1269 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1270 (unsigned char *)hdr
);
1272 case ISCSI_OP_LOGOUT
:
1273 cmd
= isert_allocate_cmd(conn
);
1277 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1279 wait_for_completion_timeout(&conn
->conn_logout_comp
,
1280 SECONDS_FOR_LOGOUT_COMP
*
1284 cmd
= isert_allocate_cmd(conn
);
1288 isert_cmd
= iscsit_priv_cmd(cmd
);
1289 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
, cmd
,
1290 rx_desc
, (struct iscsi_text
*)hdr
);
1293 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1302 isert_rx_do_work(struct iser_rx_desc
*rx_desc
, struct isert_conn
*isert_conn
)
1304 struct iser_hdr
*iser_hdr
= &rx_desc
->iser_header
;
1305 uint64_t read_va
= 0, write_va
= 0;
1306 uint32_t read_stag
= 0, write_stag
= 0;
1309 switch (iser_hdr
->flags
& 0xF0) {
1311 if (iser_hdr
->flags
& ISER_RSV
) {
1312 read_stag
= be32_to_cpu(iser_hdr
->read_stag
);
1313 read_va
= be64_to_cpu(iser_hdr
->read_va
);
1314 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1315 read_stag
, (unsigned long long)read_va
);
1317 if (iser_hdr
->flags
& ISER_WSV
) {
1318 write_stag
= be32_to_cpu(iser_hdr
->write_stag
);
1319 write_va
= be64_to_cpu(iser_hdr
->write_va
);
1320 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1321 write_stag
, (unsigned long long)write_va
);
1324 pr_debug("ISER ISCSI_CTRL PDU\n");
1327 pr_err("iSER Hello message\n");
1330 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr
->flags
);
1334 rc
= isert_rx_opcode(isert_conn
, rx_desc
,
1335 read_stag
, read_va
, write_stag
, write_va
);
1339 isert_rx_completion(struct iser_rx_desc
*desc
, struct isert_conn
*isert_conn
,
1340 unsigned long xfer_len
)
1342 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1343 struct iscsi_hdr
*hdr
;
1345 int rx_buflen
, outstanding
;
1347 if ((char *)desc
== isert_conn
->login_req_buf
) {
1348 rx_dma
= isert_conn
->login_req_dma
;
1349 rx_buflen
= ISER_RX_LOGIN_SIZE
;
1350 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1353 rx_dma
= desc
->dma_addr
;
1354 rx_buflen
= ISER_RX_PAYLOAD_SIZE
;
1355 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1359 ib_dma_sync_single_for_cpu(ib_dev
, rx_dma
, rx_buflen
, DMA_FROM_DEVICE
);
1361 hdr
= &desc
->iscsi_header
;
1362 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1363 hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1364 (int)(xfer_len
- ISER_HEADERS_LEN
));
1366 if ((char *)desc
== isert_conn
->login_req_buf
)
1367 isert_rx_login_req(desc
, xfer_len
- ISER_HEADERS_LEN
,
1370 isert_rx_do_work(desc
, isert_conn
);
1372 ib_dma_sync_single_for_device(ib_dev
, rx_dma
, rx_buflen
,
1375 isert_conn
->post_recv_buf_count
--;
1376 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1377 isert_conn
->post_recv_buf_count
);
1379 if ((char *)desc
== isert_conn
->login_req_buf
)
1382 outstanding
= isert_conn
->post_recv_buf_count
;
1383 if (outstanding
+ ISERT_MIN_POSTED_RX
<= ISERT_QP_MAX_RECV_DTOS
) {
1384 int err
, count
= min(ISERT_QP_MAX_RECV_DTOS
- outstanding
,
1385 ISERT_MIN_POSTED_RX
);
1386 err
= isert_post_recv(isert_conn
, count
);
1388 pr_err("isert_post_recv() count: %d failed, %d\n",
1395 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1397 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1398 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1400 pr_debug("isert_unmap_cmd: %p\n", isert_cmd
);
1402 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd
);
1403 ib_dma_unmap_sg(ib_dev
, wr
->sge
, wr
->num_sge
,
1404 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
1405 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
1410 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd
);
1416 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd
);
1423 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1425 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1426 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1427 LIST_HEAD(unmap_list
);
1429 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd
);
1432 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1433 isert_cmd
, wr
->fr_desc
);
1434 spin_lock_bh(&isert_conn
->conn_lock
);
1435 list_add_tail(&wr
->fr_desc
->list
, &isert_conn
->conn_fr_pool
);
1436 spin_unlock_bh(&isert_conn
->conn_lock
);
1441 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd
);
1442 ib_dma_unmap_sg(ib_dev
, wr
->sge
, wr
->num_sge
,
1443 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
1444 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
1453 isert_put_cmd(struct isert_cmd
*isert_cmd
)
1455 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1456 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1457 struct iscsi_conn
*conn
= isert_conn
->conn
;
1458 struct isert_device
*device
= isert_conn
->conn_device
;
1460 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd
);
1462 switch (cmd
->iscsi_opcode
) {
1463 case ISCSI_OP_SCSI_CMD
:
1464 spin_lock_bh(&conn
->cmd_lock
);
1465 if (!list_empty(&cmd
->i_conn_node
))
1466 list_del(&cmd
->i_conn_node
);
1467 spin_unlock_bh(&conn
->cmd_lock
);
1469 if (cmd
->data_direction
== DMA_TO_DEVICE
)
1470 iscsit_stop_dataout_timer(cmd
);
1472 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1473 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1475 case ISCSI_OP_SCSI_TMFUNC
:
1476 spin_lock_bh(&conn
->cmd_lock
);
1477 if (!list_empty(&cmd
->i_conn_node
))
1478 list_del(&cmd
->i_conn_node
);
1479 spin_unlock_bh(&conn
->cmd_lock
);
1481 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1483 case ISCSI_OP_REJECT
:
1484 case ISCSI_OP_NOOP_OUT
:
1486 spin_lock_bh(&conn
->cmd_lock
);
1487 if (!list_empty(&cmd
->i_conn_node
))
1488 list_del(&cmd
->i_conn_node
);
1489 spin_unlock_bh(&conn
->cmd_lock
);
1492 * Handle special case for REJECT when iscsi_add_reject*() has
1493 * overwritten the original iscsi_opcode assignment, and the
1494 * associated cmd->se_cmd needs to be released.
1496 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1497 pr_debug("Calling transport_generic_free_cmd from"
1498 " isert_put_cmd for 0x%02x\n",
1500 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1507 iscsit_release_cmd(cmd
);
1513 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1515 if (tx_desc
->dma_addr
!= 0) {
1516 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1517 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1518 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1519 tx_desc
->dma_addr
= 0;
1524 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1525 struct ib_device
*ib_dev
)
1527 if (isert_cmd
->pdu_buf_dma
!= 0) {
1528 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1529 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1530 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1531 isert_cmd
->pdu_buf_dma
= 0;
1534 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1535 isert_put_cmd(isert_cmd
);
1539 isert_completion_rdma_read(struct iser_tx_desc
*tx_desc
,
1540 struct isert_cmd
*isert_cmd
)
1542 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1543 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1544 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1545 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1546 struct isert_device
*device
= isert_conn
->conn_device
;
1548 iscsit_stop_dataout_timer(cmd
);
1549 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1550 cmd
->write_data_done
= wr
->cur_rdma_length
;
1552 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd
);
1553 spin_lock_bh(&cmd
->istate_lock
);
1554 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1555 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1556 spin_unlock_bh(&cmd
->istate_lock
);
1558 target_execute_cmd(se_cmd
);
1562 isert_do_control_comp(struct work_struct
*work
)
1564 struct isert_cmd
*isert_cmd
= container_of(work
,
1565 struct isert_cmd
, comp_work
);
1566 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1567 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1568 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1570 switch (cmd
->i_state
) {
1571 case ISTATE_SEND_TASKMGTRSP
:
1572 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1574 atomic_dec(&isert_conn
->post_send_buf_count
);
1575 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1577 cmd
->i_state
= ISTATE_SENT_STATUS
;
1578 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
);
1580 case ISTATE_SEND_REJECT
:
1581 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1582 atomic_dec(&isert_conn
->post_send_buf_count
);
1584 cmd
->i_state
= ISTATE_SENT_STATUS
;
1585 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
);
1587 case ISTATE_SEND_LOGOUTRSP
:
1588 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1590 * Call atomic_dec(&isert_conn->post_send_buf_count)
1591 * from isert_free_conn()
1593 isert_conn
->logout_posted
= true;
1594 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1596 case ISTATE_SEND_TEXTRSP
:
1597 atomic_dec(&isert_conn
->post_send_buf_count
);
1598 cmd
->i_state
= ISTATE_SENT_STATUS
;
1599 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
);
1602 pr_err("Unknown do_control_comp i_state %d\n", cmd
->i_state
);
1609 isert_response_completion(struct iser_tx_desc
*tx_desc
,
1610 struct isert_cmd
*isert_cmd
,
1611 struct isert_conn
*isert_conn
,
1612 struct ib_device
*ib_dev
)
1614 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1616 if (cmd
->i_state
== ISTATE_SEND_TASKMGTRSP
||
1617 cmd
->i_state
== ISTATE_SEND_LOGOUTRSP
||
1618 cmd
->i_state
== ISTATE_SEND_REJECT
||
1619 cmd
->i_state
== ISTATE_SEND_TEXTRSP
) {
1620 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1622 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
1623 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
1626 atomic_dec(&isert_conn
->post_send_buf_count
);
1628 cmd
->i_state
= ISTATE_SENT_STATUS
;
1629 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
);
1633 __isert_send_completion(struct iser_tx_desc
*tx_desc
,
1634 struct isert_conn
*isert_conn
)
1636 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1637 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1638 struct isert_rdma_wr
*wr
;
1641 atomic_dec(&isert_conn
->post_send_buf_count
);
1642 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1645 wr
= &isert_cmd
->rdma_wr
;
1647 switch (wr
->iser_ib_op
) {
1649 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1653 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1654 isert_response_completion(tx_desc
, isert_cmd
,
1655 isert_conn
, ib_dev
);
1657 case ISER_IB_RDMA_WRITE
:
1658 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1661 case ISER_IB_RDMA_READ
:
1662 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1664 atomic_dec(&isert_conn
->post_send_buf_count
);
1665 isert_completion_rdma_read(tx_desc
, isert_cmd
);
1668 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr
->iser_ib_op
);
1675 isert_send_completion(struct iser_tx_desc
*tx_desc
,
1676 struct isert_conn
*isert_conn
)
1678 struct llist_node
*llnode
= tx_desc
->comp_llnode_batch
;
1679 struct iser_tx_desc
*t
;
1681 * Drain coalesced completion llist starting from comp_llnode_batch
1682 * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1685 t
= llist_entry(llnode
, struct iser_tx_desc
, comp_llnode
);
1686 llnode
= llist_next(llnode
);
1687 __isert_send_completion(t
, isert_conn
);
1689 __isert_send_completion(tx_desc
, isert_conn
);
1693 isert_cq_comp_err(struct iser_tx_desc
*tx_desc
, struct isert_conn
*isert_conn
)
1695 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1698 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1701 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1703 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
);
1706 if (isert_conn
->post_recv_buf_count
== 0 &&
1707 atomic_read(&isert_conn
->post_send_buf_count
) == 0) {
1708 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1709 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1711 mutex_lock(&isert_conn
->conn_mutex
);
1712 if (isert_conn
->state
!= ISER_CONN_DOWN
)
1713 isert_conn
->state
= ISER_CONN_TERMINATING
;
1714 mutex_unlock(&isert_conn
->conn_mutex
);
1716 wake_up(&isert_conn
->conn_wait_comp_err
);
1721 isert_cq_tx_work(struct work_struct
*work
)
1723 struct isert_cq_desc
*cq_desc
= container_of(work
,
1724 struct isert_cq_desc
, cq_tx_work
);
1725 struct isert_device
*device
= cq_desc
->device
;
1726 int cq_index
= cq_desc
->cq_index
;
1727 struct ib_cq
*tx_cq
= device
->dev_tx_cq
[cq_index
];
1728 struct isert_conn
*isert_conn
;
1729 struct iser_tx_desc
*tx_desc
;
1732 while (ib_poll_cq(tx_cq
, 1, &wc
) == 1) {
1733 tx_desc
= (struct iser_tx_desc
*)(unsigned long)wc
.wr_id
;
1734 isert_conn
= wc
.qp
->qp_context
;
1736 if (wc
.status
== IB_WC_SUCCESS
) {
1737 isert_send_completion(tx_desc
, isert_conn
);
1739 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1740 pr_debug("TX wc.status: 0x%08x\n", wc
.status
);
1741 pr_debug("TX wc.vendor_err: 0x%08x\n", wc
.vendor_err
);
1742 atomic_dec(&isert_conn
->post_send_buf_count
);
1743 isert_cq_comp_err(tx_desc
, isert_conn
);
1747 ib_req_notify_cq(tx_cq
, IB_CQ_NEXT_COMP
);
1751 isert_cq_tx_callback(struct ib_cq
*cq
, void *context
)
1753 struct isert_cq_desc
*cq_desc
= (struct isert_cq_desc
*)context
;
1755 queue_work(isert_comp_wq
, &cq_desc
->cq_tx_work
);
1759 isert_cq_rx_work(struct work_struct
*work
)
1761 struct isert_cq_desc
*cq_desc
= container_of(work
,
1762 struct isert_cq_desc
, cq_rx_work
);
1763 struct isert_device
*device
= cq_desc
->device
;
1764 int cq_index
= cq_desc
->cq_index
;
1765 struct ib_cq
*rx_cq
= device
->dev_rx_cq
[cq_index
];
1766 struct isert_conn
*isert_conn
;
1767 struct iser_rx_desc
*rx_desc
;
1769 unsigned long xfer_len
;
1771 while (ib_poll_cq(rx_cq
, 1, &wc
) == 1) {
1772 rx_desc
= (struct iser_rx_desc
*)(unsigned long)wc
.wr_id
;
1773 isert_conn
= wc
.qp
->qp_context
;
1775 if (wc
.status
== IB_WC_SUCCESS
) {
1776 xfer_len
= (unsigned long)wc
.byte_len
;
1777 isert_rx_completion(rx_desc
, isert_conn
, xfer_len
);
1779 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1780 if (wc
.status
!= IB_WC_WR_FLUSH_ERR
) {
1781 pr_debug("RX wc.status: 0x%08x\n", wc
.status
);
1782 pr_debug("RX wc.vendor_err: 0x%08x\n",
1785 isert_conn
->post_recv_buf_count
--;
1786 isert_cq_comp_err(NULL
, isert_conn
);
1790 ib_req_notify_cq(rx_cq
, IB_CQ_NEXT_COMP
);
1794 isert_cq_rx_callback(struct ib_cq
*cq
, void *context
)
1796 struct isert_cq_desc
*cq_desc
= (struct isert_cq_desc
*)context
;
1798 queue_work(isert_rx_wq
, &cq_desc
->cq_rx_work
);
1802 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
1804 struct ib_send_wr
*wr_failed
;
1807 atomic_inc(&isert_conn
->post_send_buf_count
);
1809 ret
= ib_post_send(isert_conn
->conn_qp
, &isert_cmd
->tx_desc
.send_wr
,
1812 pr_err("ib_post_send failed with %d\n", ret
);
1813 atomic_dec(&isert_conn
->post_send_buf_count
);
1820 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
1822 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1823 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1824 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1825 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
1826 &isert_cmd
->tx_desc
.iscsi_header
;
1828 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1829 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
1830 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1832 * Attach SENSE DATA payload to iSCSI Response PDU
1834 if (cmd
->se_cmd
.sense_buffer
&&
1835 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
1836 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
1837 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1838 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1839 u32 padding
, pdu_len
;
1841 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
1843 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
1845 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
1846 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
1847 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
1849 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1850 (void *)cmd
->sense_buffer
, pdu_len
,
1853 isert_cmd
->pdu_buf_len
= pdu_len
;
1854 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1855 tx_dsg
->length
= pdu_len
;
1856 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
1857 isert_cmd
->tx_desc
.num_sge
= 2;
1860 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, true);
1862 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1864 return isert_post_response(isert_conn
, isert_cmd
);
1868 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
1869 bool nopout_response
)
1871 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1872 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1873 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1875 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1876 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
1877 &isert_cmd
->tx_desc
.iscsi_header
,
1879 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1880 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, false);
1882 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1884 return isert_post_response(isert_conn
, isert_cmd
);
1888 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1890 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1891 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1892 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1894 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1895 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
1896 &isert_cmd
->tx_desc
.iscsi_header
);
1897 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1898 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, false);
1900 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1902 return isert_post_response(isert_conn
, isert_cmd
);
1906 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1908 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1909 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1910 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1912 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1913 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
1914 &isert_cmd
->tx_desc
.iscsi_header
);
1915 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1916 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, false);
1918 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1920 return isert_post_response(isert_conn
, isert_cmd
);
1924 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1926 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1927 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1928 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1929 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1930 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1931 struct iscsi_reject
*hdr
=
1932 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
1934 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1935 iscsit_build_reject(cmd
, conn
, hdr
);
1936 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1938 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
1939 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1940 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
1942 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
1943 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1944 tx_dsg
->length
= ISCSI_HDR_LEN
;
1945 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
1946 isert_cmd
->tx_desc
.num_sge
= 2;
1948 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, false);
1950 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1952 return isert_post_response(isert_conn
, isert_cmd
);
1956 isert_put_text_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1958 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1959 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1960 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1961 struct iscsi_text_rsp
*hdr
=
1962 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
1966 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1967 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
);
1972 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1975 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1976 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1977 void *txt_rsp_buf
= cmd
->buf_ptr
;
1979 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1980 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
1982 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
1983 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1984 tx_dsg
->length
= txt_rsp_len
;
1985 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
1986 isert_cmd
->tx_desc
.num_sge
= 2;
1988 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, false);
1990 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1992 return isert_post_response(isert_conn
, isert_cmd
);
1996 isert_build_rdma_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1997 struct ib_sge
*ib_sge
, struct ib_send_wr
*send_wr
,
1998 u32 data_left
, u32 offset
)
2000 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
2001 struct scatterlist
*sg_start
, *tmp_sg
;
2002 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2003 u32 sg_off
, page_off
;
2004 int i
= 0, sg_nents
;
2006 sg_off
= offset
/ PAGE_SIZE
;
2007 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2008 sg_nents
= min(cmd
->se_cmd
.t_data_nents
- sg_off
, isert_conn
->max_sge
);
2009 page_off
= offset
% PAGE_SIZE
;
2011 send_wr
->sg_list
= ib_sge
;
2012 send_wr
->num_sge
= sg_nents
;
2013 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
2015 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2017 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2018 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2019 (unsigned long long)tmp_sg
->dma_address
,
2020 tmp_sg
->length
, page_off
);
2022 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, tmp_sg
) + page_off
;
2023 ib_sge
->length
= min_t(u32
, data_left
,
2024 ib_sg_dma_len(ib_dev
, tmp_sg
) - page_off
);
2025 ib_sge
->lkey
= isert_conn
->conn_mr
->lkey
;
2027 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2028 ib_sge
->addr
, ib_sge
->length
, ib_sge
->lkey
);
2030 data_left
-= ib_sge
->length
;
2032 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge
);
2035 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2036 send_wr
->sg_list
, send_wr
->num_sge
);
2042 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2043 struct isert_rdma_wr
*wr
)
2045 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2046 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2047 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2048 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2049 struct ib_send_wr
*send_wr
;
2050 struct ib_sge
*ib_sge
;
2051 struct scatterlist
*sg_start
;
2052 u32 sg_off
= 0, sg_nents
;
2053 u32 offset
= 0, data_len
, data_left
, rdma_write_max
, va_offset
= 0;
2054 int ret
= 0, count
, i
, ib_sge_cnt
;
2056 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2057 data_left
= se_cmd
->data_length
;
2059 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
2060 data_left
= se_cmd
->data_length
- cmd
->write_data_done
;
2061 offset
= cmd
->write_data_done
;
2062 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2065 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2066 sg_nents
= se_cmd
->t_data_nents
- sg_off
;
2068 count
= ib_dma_map_sg(ib_dev
, sg_start
, sg_nents
,
2069 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
2070 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2071 if (unlikely(!count
)) {
2072 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd
);
2076 wr
->num_sge
= sg_nents
;
2077 wr
->cur_rdma_length
= data_left
;
2078 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2079 isert_cmd
, count
, sg_start
, sg_nents
, data_left
);
2081 ib_sge
= kzalloc(sizeof(struct ib_sge
) * sg_nents
, GFP_KERNEL
);
2083 pr_warn("Unable to allocate ib_sge\n");
2087 wr
->ib_sge
= ib_sge
;
2089 wr
->send_wr_num
= DIV_ROUND_UP(sg_nents
, isert_conn
->max_sge
);
2090 wr
->send_wr
= kzalloc(sizeof(struct ib_send_wr
) * wr
->send_wr_num
,
2093 pr_debug("Unable to allocate wr->send_wr\n");
2098 wr
->isert_cmd
= isert_cmd
;
2099 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
2101 for (i
= 0; i
< wr
->send_wr_num
; i
++) {
2102 send_wr
= &isert_cmd
->rdma_wr
.send_wr
[i
];
2103 data_len
= min(data_left
, rdma_write_max
);
2105 send_wr
->send_flags
= 0;
2106 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2107 send_wr
->opcode
= IB_WR_RDMA_WRITE
;
2108 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->read_va
+ offset
;
2109 send_wr
->wr
.rdma
.rkey
= isert_cmd
->read_stag
;
2110 if (i
+ 1 == wr
->send_wr_num
)
2111 send_wr
->next
= &isert_cmd
->tx_desc
.send_wr
;
2113 send_wr
->next
= &wr
->send_wr
[i
+ 1];
2115 send_wr
->opcode
= IB_WR_RDMA_READ
;
2116 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->write_va
+ va_offset
;
2117 send_wr
->wr
.rdma
.rkey
= isert_cmd
->write_stag
;
2118 if (i
+ 1 == wr
->send_wr_num
)
2119 send_wr
->send_flags
= IB_SEND_SIGNALED
;
2121 send_wr
->next
= &wr
->send_wr
[i
+ 1];
2124 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
2125 send_wr
, data_len
, offset
);
2126 ib_sge
+= ib_sge_cnt
;
2129 va_offset
+= data_len
;
2130 data_left
-= data_len
;
2135 ib_dma_unmap_sg(ib_dev
, sg_start
, sg_nents
,
2136 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
2137 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2142 isert_map_fr_pagelist(struct ib_device
*ib_dev
,
2143 struct scatterlist
*sg_start
, int sg_nents
, u64
*fr_pl
)
2145 u64 start_addr
, end_addr
, page
, chunk_start
= 0;
2146 struct scatterlist
*tmp_sg
;
2147 int i
= 0, new_chunk
, last_ent
, n_pages
;
2151 last_ent
= sg_nents
- 1;
2152 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2153 start_addr
= ib_sg_dma_address(ib_dev
, tmp_sg
);
2155 chunk_start
= start_addr
;
2156 end_addr
= start_addr
+ ib_sg_dma_len(ib_dev
, tmp_sg
);
2158 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2159 i
, (unsigned long long)tmp_sg
->dma_address
,
2162 if ((end_addr
& ~PAGE_MASK
) && i
< last_ent
) {
2168 page
= chunk_start
& PAGE_MASK
;
2170 fr_pl
[n_pages
++] = page
;
2171 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2174 } while (page
< end_addr
);
2181 isert_fast_reg_mr(struct fast_reg_descriptor
*fr_desc
,
2182 struct isert_conn
*isert_conn
, struct scatterlist
*sg_start
,
2183 struct ib_sge
*ib_sge
, u32 sg_nents
, u32 offset
,
2184 unsigned int data_len
)
2186 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2187 struct ib_send_wr fr_wr
, inv_wr
;
2188 struct ib_send_wr
*bad_wr
, *wr
= NULL
;
2189 int ret
, pagelist_len
;
2193 sg_nents
= min_t(unsigned int, sg_nents
, ISCSI_ISER_SG_TABLESIZE
);
2194 page_off
= offset
% PAGE_SIZE
;
2196 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2197 fr_desc
, sg_nents
, offset
);
2199 pagelist_len
= isert_map_fr_pagelist(ib_dev
, sg_start
, sg_nents
,
2200 &fr_desc
->data_frpl
->page_list
[0]);
2202 if (!fr_desc
->valid
) {
2203 memset(&inv_wr
, 0, sizeof(inv_wr
));
2204 inv_wr
.opcode
= IB_WR_LOCAL_INV
;
2205 inv_wr
.ex
.invalidate_rkey
= fr_desc
->data_mr
->rkey
;
2208 key
= (u8
)(fr_desc
->data_mr
->rkey
& 0x000000FF);
2209 ib_update_fast_reg_key(fr_desc
->data_mr
, ++key
);
2212 /* Prepare FASTREG WR */
2213 memset(&fr_wr
, 0, sizeof(fr_wr
));
2214 fr_wr
.opcode
= IB_WR_FAST_REG_MR
;
2215 fr_wr
.wr
.fast_reg
.iova_start
=
2216 fr_desc
->data_frpl
->page_list
[0] + page_off
;
2217 fr_wr
.wr
.fast_reg
.page_list
= fr_desc
->data_frpl
;
2218 fr_wr
.wr
.fast_reg
.page_list_len
= pagelist_len
;
2219 fr_wr
.wr
.fast_reg
.page_shift
= PAGE_SHIFT
;
2220 fr_wr
.wr
.fast_reg
.length
= data_len
;
2221 fr_wr
.wr
.fast_reg
.rkey
= fr_desc
->data_mr
->rkey
;
2222 fr_wr
.wr
.fast_reg
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
2229 ret
= ib_post_send(isert_conn
->conn_qp
, wr
, &bad_wr
);
2231 pr_err("fast registration failed, ret:%d\n", ret
);
2234 fr_desc
->valid
= false;
2236 ib_sge
->lkey
= fr_desc
->data_mr
->lkey
;
2237 ib_sge
->addr
= fr_desc
->data_frpl
->page_list
[0] + page_off
;
2238 ib_sge
->length
= data_len
;
2240 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2241 ib_sge
->addr
, ib_sge
->length
, ib_sge
->lkey
);
2247 isert_reg_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2248 struct isert_rdma_wr
*wr
)
2250 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2251 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2252 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2253 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2254 struct ib_send_wr
*send_wr
;
2255 struct ib_sge
*ib_sge
;
2256 struct scatterlist
*sg_start
;
2257 struct fast_reg_descriptor
*fr_desc
;
2258 u32 sg_off
= 0, sg_nents
;
2259 u32 offset
= 0, data_len
, data_left
, rdma_write_max
;
2261 unsigned long flags
;
2263 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2264 data_left
= se_cmd
->data_length
;
2266 offset
= cmd
->write_data_done
;
2267 sg_off
= offset
/ PAGE_SIZE
;
2268 data_left
= se_cmd
->data_length
- cmd
->write_data_done
;
2269 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2272 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2273 sg_nents
= se_cmd
->t_data_nents
- sg_off
;
2275 count
= ib_dma_map_sg(ib_dev
, sg_start
, sg_nents
,
2276 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
2277 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2278 if (unlikely(!count
)) {
2279 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd
);
2283 wr
->num_sge
= sg_nents
;
2284 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2285 isert_cmd
, count
, sg_start
, sg_nents
, data_left
);
2287 memset(&wr
->s_ib_sge
, 0, sizeof(*ib_sge
));
2288 ib_sge
= &wr
->s_ib_sge
;
2289 wr
->ib_sge
= ib_sge
;
2291 wr
->send_wr_num
= 1;
2292 memset(&wr
->s_send_wr
, 0, sizeof(*send_wr
));
2293 wr
->send_wr
= &wr
->s_send_wr
;
2295 wr
->isert_cmd
= isert_cmd
;
2296 rdma_write_max
= ISCSI_ISER_SG_TABLESIZE
* PAGE_SIZE
;
2298 send_wr
= &isert_cmd
->rdma_wr
.s_send_wr
;
2299 send_wr
->sg_list
= ib_sge
;
2300 send_wr
->num_sge
= 1;
2301 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
2302 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2303 send_wr
->opcode
= IB_WR_RDMA_WRITE
;
2304 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->read_va
;
2305 send_wr
->wr
.rdma
.rkey
= isert_cmd
->read_stag
;
2306 send_wr
->send_flags
= 0;
2307 send_wr
->next
= &isert_cmd
->tx_desc
.send_wr
;
2309 send_wr
->opcode
= IB_WR_RDMA_READ
;
2310 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->write_va
;
2311 send_wr
->wr
.rdma
.rkey
= isert_cmd
->write_stag
;
2312 send_wr
->send_flags
= IB_SEND_SIGNALED
;
2315 data_len
= min(data_left
, rdma_write_max
);
2316 wr
->cur_rdma_length
= data_len
;
2318 /* if there is a single dma entry, dma mr is sufficient */
2320 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, &sg_start
[0]);
2321 ib_sge
->length
= ib_sg_dma_len(ib_dev
, &sg_start
[0]);
2322 ib_sge
->lkey
= isert_conn
->conn_mr
->lkey
;
2325 spin_lock_irqsave(&isert_conn
->conn_lock
, flags
);
2326 fr_desc
= list_first_entry(&isert_conn
->conn_fr_pool
,
2327 struct fast_reg_descriptor
, list
);
2328 list_del(&fr_desc
->list
);
2329 spin_unlock_irqrestore(&isert_conn
->conn_lock
, flags
);
2330 wr
->fr_desc
= fr_desc
;
2332 ret
= isert_fast_reg_mr(fr_desc
, isert_conn
, sg_start
,
2333 ib_sge
, sg_nents
, offset
, data_len
);
2335 list_add_tail(&fr_desc
->list
, &isert_conn
->conn_fr_pool
);
2343 ib_dma_unmap_sg(ib_dev
, sg_start
, sg_nents
,
2344 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
2345 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2350 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2352 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2353 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2354 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2355 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2356 struct isert_device
*device
= isert_conn
->conn_device
;
2357 struct ib_send_wr
*wr_failed
;
2360 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2361 isert_cmd
, se_cmd
->data_length
);
2362 wr
->iser_ib_op
= ISER_IB_RDMA_WRITE
;
2363 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2365 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2370 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2372 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2373 iscsit_build_rsp_pdu(cmd
, conn
, true, (struct iscsi_scsi_rsp
*)
2374 &isert_cmd
->tx_desc
.iscsi_header
);
2375 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2376 isert_init_send_wr(isert_conn
, isert_cmd
,
2377 &isert_cmd
->tx_desc
.send_wr
, true);
2379 atomic_inc(&isert_conn
->post_send_buf_count
);
2381 rc
= ib_post_send(isert_conn
->conn_qp
, wr
->send_wr
, &wr_failed
);
2383 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2384 atomic_dec(&isert_conn
->post_send_buf_count
);
2386 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2393 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
2395 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2396 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2397 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2398 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2399 struct isert_device
*device
= isert_conn
->conn_device
;
2400 struct ib_send_wr
*wr_failed
;
2403 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2404 isert_cmd
, se_cmd
->data_length
, cmd
->write_data_done
);
2405 wr
->iser_ib_op
= ISER_IB_RDMA_READ
;
2406 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2408 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2412 atomic_inc(&isert_conn
->post_send_buf_count
);
2414 rc
= ib_post_send(isert_conn
->conn_qp
, wr
->send_wr
, &wr_failed
);
2416 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2417 atomic_dec(&isert_conn
->post_send_buf_count
);
2419 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2426 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2431 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
2432 ret
= isert_put_nopin(cmd
, conn
, false);
2435 pr_err("Unknown immediate state: 0x%02x\n", state
);
2444 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2449 case ISTATE_SEND_LOGOUTRSP
:
2450 ret
= isert_put_logout_rsp(cmd
, conn
);
2452 pr_debug("Returning iSER Logout -EAGAIN\n");
2456 case ISTATE_SEND_NOPIN
:
2457 ret
= isert_put_nopin(cmd
, conn
, true);
2459 case ISTATE_SEND_TASKMGTRSP
:
2460 ret
= isert_put_tm_rsp(cmd
, conn
);
2462 case ISTATE_SEND_REJECT
:
2463 ret
= isert_put_reject(cmd
, conn
);
2465 case ISTATE_SEND_TEXTRSP
:
2466 ret
= isert_put_text_rsp(cmd
, conn
);
2468 case ISTATE_SEND_STATUS
:
2470 * Special case for sending non GOOD SCSI status from TX thread
2471 * context during pre se_cmd excecution failure.
2473 ret
= isert_put_response(conn
, cmd
);
2476 pr_err("Unknown response state: 0x%02x\n", state
);
2485 isert_setup_np(struct iscsi_np
*np
,
2486 struct __kernel_sockaddr_storage
*ksockaddr
)
2488 struct isert_np
*isert_np
;
2489 struct rdma_cm_id
*isert_lid
;
2490 struct sockaddr
*sa
;
2493 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
2495 pr_err("Unable to allocate struct isert_np\n");
2498 init_waitqueue_head(&isert_np
->np_accept_wq
);
2499 mutex_init(&isert_np
->np_accept_mutex
);
2500 INIT_LIST_HEAD(&isert_np
->np_accept_list
);
2501 init_completion(&isert_np
->np_login_comp
);
2503 sa
= (struct sockaddr
*)ksockaddr
;
2504 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr
, sa
);
2506 * Setup the np->np_sockaddr from the passed sockaddr setup
2507 * in iscsi_target_configfs.c code..
2509 memcpy(&np
->np_sockaddr
, ksockaddr
,
2510 sizeof(struct __kernel_sockaddr_storage
));
2512 isert_lid
= rdma_create_id(isert_cma_handler
, np
, RDMA_PS_TCP
,
2514 if (IS_ERR(isert_lid
)) {
2515 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2516 PTR_ERR(isert_lid
));
2517 ret
= PTR_ERR(isert_lid
);
2521 ret
= rdma_bind_addr(isert_lid
, sa
);
2523 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret
);
2527 ret
= rdma_listen(isert_lid
, ISERT_RDMA_LISTEN_BACKLOG
);
2529 pr_err("rdma_listen() for isert_lid failed: %d\n", ret
);
2533 isert_np
->np_cm_id
= isert_lid
;
2534 np
->np_context
= isert_np
;
2535 pr_debug("Setup isert_lid->context: %p\n", isert_lid
->context
);
2540 rdma_destroy_id(isert_lid
);
2547 isert_check_accept_queue(struct isert_np
*isert_np
)
2551 mutex_lock(&isert_np
->np_accept_mutex
);
2552 empty
= list_empty(&isert_np
->np_accept_list
);
2553 mutex_unlock(&isert_np
->np_accept_mutex
);
2559 isert_rdma_accept(struct isert_conn
*isert_conn
)
2561 struct rdma_cm_id
*cm_id
= isert_conn
->conn_cm_id
;
2562 struct rdma_conn_param cp
;
2565 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
2566 cp
.responder_resources
= isert_conn
->responder_resources
;
2567 cp
.initiator_depth
= isert_conn
->initiator_depth
;
2569 cp
.rnr_retry_count
= 7;
2571 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2573 ret
= rdma_accept(cm_id
, &cp
);
2575 pr_err("rdma_accept() failed with: %d\n", ret
);
2579 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2585 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
2587 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2590 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn
);
2592 * For login requests after the first PDU, isert_rx_login_req() will
2593 * kick schedule_delayed_work(&conn->login_work) as the packet is
2594 * received, which turns this callback from iscsi_target_do_login_rx()
2597 if (!login
->first_request
)
2600 ret
= wait_for_completion_interruptible(&isert_conn
->conn_login_comp
);
2604 pr_debug("isert_get_login_rx processing login->req: %p\n", login
->req
);
2609 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
2610 struct isert_conn
*isert_conn
)
2612 struct rdma_cm_id
*cm_id
= isert_conn
->conn_cm_id
;
2613 struct rdma_route
*cm_route
= &cm_id
->route
;
2614 struct sockaddr_in
*sock_in
;
2615 struct sockaddr_in6
*sock_in6
;
2617 conn
->login_family
= np
->np_sockaddr
.ss_family
;
2619 if (np
->np_sockaddr
.ss_family
== AF_INET6
) {
2620 sock_in6
= (struct sockaddr_in6
*)&cm_route
->addr
.dst_addr
;
2621 snprintf(conn
->login_ip
, sizeof(conn
->login_ip
), "%pI6c",
2622 &sock_in6
->sin6_addr
.in6_u
);
2623 conn
->login_port
= ntohs(sock_in6
->sin6_port
);
2625 sock_in6
= (struct sockaddr_in6
*)&cm_route
->addr
.src_addr
;
2626 snprintf(conn
->local_ip
, sizeof(conn
->local_ip
), "%pI6c",
2627 &sock_in6
->sin6_addr
.in6_u
);
2628 conn
->local_port
= ntohs(sock_in6
->sin6_port
);
2630 sock_in
= (struct sockaddr_in
*)&cm_route
->addr
.dst_addr
;
2631 sprintf(conn
->login_ip
, "%pI4",
2632 &sock_in
->sin_addr
.s_addr
);
2633 conn
->login_port
= ntohs(sock_in
->sin_port
);
2635 sock_in
= (struct sockaddr_in
*)&cm_route
->addr
.src_addr
;
2636 sprintf(conn
->local_ip
, "%pI4",
2637 &sock_in
->sin_addr
.s_addr
);
2638 conn
->local_port
= ntohs(sock_in
->sin_port
);
2643 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
2645 struct isert_np
*isert_np
= (struct isert_np
*)np
->np_context
;
2646 struct isert_conn
*isert_conn
;
2647 int max_accept
= 0, ret
;
2650 ret
= wait_event_interruptible(isert_np
->np_accept_wq
,
2651 !isert_check_accept_queue(isert_np
) ||
2652 np
->np_thread_state
== ISCSI_NP_THREAD_RESET
);
2656 spin_lock_bh(&np
->np_thread_lock
);
2657 if (np
->np_thread_state
== ISCSI_NP_THREAD_RESET
) {
2658 spin_unlock_bh(&np
->np_thread_lock
);
2659 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2662 spin_unlock_bh(&np
->np_thread_lock
);
2664 mutex_lock(&isert_np
->np_accept_mutex
);
2665 if (list_empty(&isert_np
->np_accept_list
)) {
2666 mutex_unlock(&isert_np
->np_accept_mutex
);
2670 isert_conn
= list_first_entry(&isert_np
->np_accept_list
,
2671 struct isert_conn
, conn_accept_node
);
2672 list_del_init(&isert_conn
->conn_accept_node
);
2673 mutex_unlock(&isert_np
->np_accept_mutex
);
2675 conn
->context
= isert_conn
;
2676 isert_conn
->conn
= conn
;
2679 ret
= isert_rdma_post_recvl(isert_conn
);
2683 ret
= isert_rdma_accept(isert_conn
);
2687 isert_set_conn_info(np
, conn
, isert_conn
);
2689 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn
);
2694 isert_free_np(struct iscsi_np
*np
)
2696 struct isert_np
*isert_np
= (struct isert_np
*)np
->np_context
;
2698 rdma_destroy_id(isert_np
->np_cm_id
);
2700 np
->np_context
= NULL
;
2704 static int isert_check_state(struct isert_conn
*isert_conn
, int state
)
2708 mutex_lock(&isert_conn
->conn_mutex
);
2709 ret
= (isert_conn
->state
== state
);
2710 mutex_unlock(&isert_conn
->conn_mutex
);
2715 static void isert_free_conn(struct iscsi_conn
*conn
)
2717 struct isert_conn
*isert_conn
= conn
->context
;
2719 pr_debug("isert_free_conn: Starting \n");
2721 * Decrement post_send_buf_count for special case when called
2722 * from isert_do_control_comp() -> iscsit_logout_post_handler()
2724 mutex_lock(&isert_conn
->conn_mutex
);
2725 if (isert_conn
->logout_posted
)
2726 atomic_dec(&isert_conn
->post_send_buf_count
);
2728 if (isert_conn
->conn_cm_id
&& isert_conn
->state
!= ISER_CONN_DOWN
) {
2729 pr_debug("Calling rdma_disconnect from isert_free_conn\n");
2730 rdma_disconnect(isert_conn
->conn_cm_id
);
2733 * Only wait for conn_wait_comp_err if the isert_conn made it
2734 * into full feature phase..
2736 if (isert_conn
->state
== ISER_CONN_UP
) {
2737 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2739 mutex_unlock(&isert_conn
->conn_mutex
);
2741 wait_event(isert_conn
->conn_wait_comp_err
,
2742 (isert_check_state(isert_conn
, ISER_CONN_TERMINATING
)));
2744 wait_event(isert_conn
->conn_wait
,
2745 (isert_check_state(isert_conn
, ISER_CONN_DOWN
)));
2747 isert_put_conn(isert_conn
);
2750 if (isert_conn
->state
== ISER_CONN_INIT
) {
2751 mutex_unlock(&isert_conn
->conn_mutex
);
2752 isert_put_conn(isert_conn
);
2755 pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2757 mutex_unlock(&isert_conn
->conn_mutex
);
2759 wait_event(isert_conn
->conn_wait
,
2760 (isert_check_state(isert_conn
, ISER_CONN_DOWN
)));
2762 isert_put_conn(isert_conn
);
2765 static struct iscsit_transport iser_target_transport
= {
2767 .transport_type
= ISCSI_INFINIBAND
,
2768 .priv_size
= sizeof(struct isert_cmd
),
2769 .owner
= THIS_MODULE
,
2770 .iscsit_setup_np
= isert_setup_np
,
2771 .iscsit_accept_np
= isert_accept_np
,
2772 .iscsit_free_np
= isert_free_np
,
2773 .iscsit_free_conn
= isert_free_conn
,
2774 .iscsit_get_login_rx
= isert_get_login_rx
,
2775 .iscsit_put_login_tx
= isert_put_login_tx
,
2776 .iscsit_immediate_queue
= isert_immediate_queue
,
2777 .iscsit_response_queue
= isert_response_queue
,
2778 .iscsit_get_dataout
= isert_get_dataout
,
2779 .iscsit_queue_data_in
= isert_put_datain
,
2780 .iscsit_queue_status
= isert_put_response
,
2783 static int __init
isert_init(void)
2787 isert_rx_wq
= alloc_workqueue("isert_rx_wq", 0, 0);
2789 pr_err("Unable to allocate isert_rx_wq\n");
2793 isert_comp_wq
= alloc_workqueue("isert_comp_wq", 0, 0);
2794 if (!isert_comp_wq
) {
2795 pr_err("Unable to allocate isert_comp_wq\n");
2800 iscsit_register_transport(&iser_target_transport
);
2801 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2805 destroy_workqueue(isert_rx_wq
);
2809 static void __exit
isert_exit(void)
2811 destroy_workqueue(isert_comp_wq
);
2812 destroy_workqueue(isert_rx_wq
);
2813 iscsit_unregister_transport(&iser_target_transport
);
2814 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2817 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2818 MODULE_VERSION("0.1");
2819 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2820 MODULE_LICENSE("GPL");
2822 module_init(isert_init
);
2823 module_exit(isert_exit
);