1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 RisingTide Systems LLC.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
31 #include "isert_proto.h"
34 #define ISERT_MAX_CONN 8
35 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
36 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 static DEFINE_MUTEX(device_list_mutex
);
39 static LIST_HEAD(device_list
);
40 static struct workqueue_struct
*isert_rx_wq
;
41 static struct workqueue_struct
*isert_comp_wq
;
42 static struct kmem_cache
*isert_cmd_cache
;
45 isert_qp_event_callback(struct ib_event
*e
, void *context
)
47 struct isert_conn
*isert_conn
= (struct isert_conn
*)context
;
49 pr_err("isert_qp_event_callback event: %d\n", e
->event
);
51 case IB_EVENT_COMM_EST
:
52 rdma_notify(isert_conn
->conn_cm_id
, IB_EVENT_COMM_EST
);
54 case IB_EVENT_QP_LAST_WQE_REACHED
:
55 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
63 isert_query_device(struct ib_device
*ib_dev
, struct ib_device_attr
*devattr
)
67 ret
= ib_query_device(ib_dev
, devattr
);
69 pr_err("ib_query_device() failed: %d\n", ret
);
72 pr_debug("devattr->max_sge: %d\n", devattr
->max_sge
);
73 pr_debug("devattr->max_sge_rd: %d\n", devattr
->max_sge_rd
);
79 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
)
81 struct isert_device
*device
= isert_conn
->conn_device
;
82 struct ib_qp_init_attr attr
;
83 struct ib_device_attr devattr
;
84 int ret
, index
, min_index
= 0;
86 memset(&devattr
, 0, sizeof(struct ib_device_attr
));
87 ret
= isert_query_device(cma_id
->device
, &devattr
);
91 mutex_lock(&device_list_mutex
);
92 for (index
= 0; index
< device
->cqs_used
; index
++)
93 if (device
->cq_active_qps
[index
] <
94 device
->cq_active_qps
[min_index
])
96 device
->cq_active_qps
[min_index
]++;
97 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index
);
98 mutex_unlock(&device_list_mutex
);
100 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
101 attr
.event_handler
= isert_qp_event_callback
;
102 attr
.qp_context
= isert_conn
;
103 attr
.send_cq
= device
->dev_tx_cq
[min_index
];
104 attr
.recv_cq
= device
->dev_rx_cq
[min_index
];
105 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
;
106 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
;
108 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
109 * work-around for RDMA_READ..
111 attr
.cap
.max_send_sge
= devattr
.max_sge
- 2;
112 isert_conn
->max_sge
= attr
.cap
.max_send_sge
;
114 attr
.cap
.max_recv_sge
= 1;
115 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
116 attr
.qp_type
= IB_QPT_RC
;
118 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
120 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
121 isert_conn
->conn_pd
->device
);
123 ret
= rdma_create_qp(cma_id
, isert_conn
->conn_pd
, &attr
);
125 pr_err("rdma_create_qp failed for cma_id %d\n", ret
);
128 isert_conn
->conn_qp
= cma_id
->qp
;
129 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
135 isert_cq_event_callback(struct ib_event
*e
, void *context
)
137 pr_debug("isert_cq_event_callback event: %d\n", e
->event
);
141 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
143 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
144 struct iser_rx_desc
*rx_desc
;
145 struct ib_sge
*rx_sg
;
149 isert_conn
->conn_rx_descs
= kzalloc(ISERT_QP_MAX_RECV_DTOS
*
150 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
151 if (!isert_conn
->conn_rx_descs
)
154 rx_desc
= isert_conn
->conn_rx_descs
;
156 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
157 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
158 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
159 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
162 rx_desc
->dma_addr
= dma_addr
;
164 rx_sg
= &rx_desc
->rx_sg
;
165 rx_sg
->addr
= rx_desc
->dma_addr
;
166 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
167 rx_sg
->lkey
= isert_conn
->conn_mr
->lkey
;
170 isert_conn
->conn_rx_desc_head
= 0;
174 rx_desc
= isert_conn
->conn_rx_descs
;
175 for (j
= 0; j
< i
; j
++, rx_desc
++) {
176 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
177 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
179 kfree(isert_conn
->conn_rx_descs
);
180 isert_conn
->conn_rx_descs
= NULL
;
186 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
188 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
189 struct iser_rx_desc
*rx_desc
;
192 if (!isert_conn
->conn_rx_descs
)
195 rx_desc
= isert_conn
->conn_rx_descs
;
196 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
197 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
198 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
201 kfree(isert_conn
->conn_rx_descs
);
202 isert_conn
->conn_rx_descs
= NULL
;
205 static void isert_cq_tx_callback(struct ib_cq
*, void *);
206 static void isert_cq_rx_callback(struct ib_cq
*, void *);
209 isert_create_device_ib_res(struct isert_device
*device
)
211 struct ib_device
*ib_dev
= device
->ib_device
;
212 struct isert_cq_desc
*cq_desc
;
215 device
->cqs_used
= min_t(int, num_online_cpus(),
216 device
->ib_device
->num_comp_vectors
);
217 device
->cqs_used
= min(ISERT_MAX_CQ
, device
->cqs_used
);
218 pr_debug("Using %d CQs, device %s supports %d vectors\n",
219 device
->cqs_used
, device
->ib_device
->name
,
220 device
->ib_device
->num_comp_vectors
);
221 device
->cq_desc
= kzalloc(sizeof(struct isert_cq_desc
) *
222 device
->cqs_used
, GFP_KERNEL
);
223 if (!device
->cq_desc
) {
224 pr_err("Unable to allocate device->cq_desc\n");
227 cq_desc
= device
->cq_desc
;
229 device
->dev_pd
= ib_alloc_pd(ib_dev
);
230 if (IS_ERR(device
->dev_pd
)) {
231 ret
= PTR_ERR(device
->dev_pd
);
232 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret
);
236 for (i
= 0; i
< device
->cqs_used
; i
++) {
237 cq_desc
[i
].device
= device
;
238 cq_desc
[i
].cq_index
= i
;
240 device
->dev_rx_cq
[i
] = ib_create_cq(device
->ib_device
,
241 isert_cq_rx_callback
,
242 isert_cq_event_callback
,
244 ISER_MAX_RX_CQ_LEN
, i
);
245 if (IS_ERR(device
->dev_rx_cq
[i
]))
248 device
->dev_tx_cq
[i
] = ib_create_cq(device
->ib_device
,
249 isert_cq_tx_callback
,
250 isert_cq_event_callback
,
252 ISER_MAX_TX_CQ_LEN
, i
);
253 if (IS_ERR(device
->dev_tx_cq
[i
]))
256 if (ib_req_notify_cq(device
->dev_rx_cq
[i
], IB_CQ_NEXT_COMP
))
259 if (ib_req_notify_cq(device
->dev_tx_cq
[i
], IB_CQ_NEXT_COMP
))
263 device
->dev_mr
= ib_get_dma_mr(device
->dev_pd
, IB_ACCESS_LOCAL_WRITE
);
264 if (IS_ERR(device
->dev_mr
)) {
265 ret
= PTR_ERR(device
->dev_mr
);
266 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret
);
273 for (j
= 0; j
< i
; j
++) {
274 cq_desc
= &device
->cq_desc
[j
];
276 if (device
->dev_rx_cq
[j
]) {
277 cancel_work_sync(&cq_desc
->cq_rx_work
);
278 ib_destroy_cq(device
->dev_rx_cq
[j
]);
280 if (device
->dev_tx_cq
[j
]) {
281 cancel_work_sync(&cq_desc
->cq_tx_work
);
282 ib_destroy_cq(device
->dev_tx_cq
[j
]);
285 ib_dealloc_pd(device
->dev_pd
);
288 kfree(device
->cq_desc
);
294 isert_free_device_ib_res(struct isert_device
*device
)
296 struct isert_cq_desc
*cq_desc
;
299 for (i
= 0; i
< device
->cqs_used
; i
++) {
300 cq_desc
= &device
->cq_desc
[i
];
302 cancel_work_sync(&cq_desc
->cq_rx_work
);
303 cancel_work_sync(&cq_desc
->cq_tx_work
);
304 ib_destroy_cq(device
->dev_rx_cq
[i
]);
305 ib_destroy_cq(device
->dev_tx_cq
[i
]);
306 device
->dev_rx_cq
[i
] = NULL
;
307 device
->dev_tx_cq
[i
] = NULL
;
310 ib_dereg_mr(device
->dev_mr
);
311 ib_dealloc_pd(device
->dev_pd
);
312 kfree(device
->cq_desc
);
316 isert_device_try_release(struct isert_device
*device
)
318 mutex_lock(&device_list_mutex
);
320 if (!device
->refcount
) {
321 isert_free_device_ib_res(device
);
322 list_del(&device
->dev_node
);
325 mutex_unlock(&device_list_mutex
);
328 static struct isert_device
*
329 isert_device_find_by_ib_dev(struct rdma_cm_id
*cma_id
)
331 struct isert_device
*device
;
334 mutex_lock(&device_list_mutex
);
335 list_for_each_entry(device
, &device_list
, dev_node
) {
336 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
338 mutex_unlock(&device_list_mutex
);
343 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
345 mutex_unlock(&device_list_mutex
);
346 return ERR_PTR(-ENOMEM
);
349 INIT_LIST_HEAD(&device
->dev_node
);
351 device
->ib_device
= cma_id
->device
;
352 ret
= isert_create_device_ib_res(device
);
355 mutex_unlock(&device_list_mutex
);
360 list_add_tail(&device
->dev_node
, &device_list
);
361 mutex_unlock(&device_list_mutex
);
367 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
369 struct iscsi_np
*np
= cma_id
->context
;
370 struct isert_np
*isert_np
= np
->np_context
;
371 struct isert_conn
*isert_conn
;
372 struct isert_device
*device
;
373 struct ib_device
*ib_dev
= cma_id
->device
;
376 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
377 cma_id
, cma_id
->context
);
379 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
381 pr_err("Unable to allocate isert_conn\n");
384 isert_conn
->state
= ISER_CONN_INIT
;
385 INIT_LIST_HEAD(&isert_conn
->conn_accept_node
);
386 init_completion(&isert_conn
->conn_login_comp
);
387 init_waitqueue_head(&isert_conn
->conn_wait
);
388 init_waitqueue_head(&isert_conn
->conn_wait_comp_err
);
389 kref_init(&isert_conn
->conn_kref
);
390 kref_get(&isert_conn
->conn_kref
);
391 mutex_init(&isert_conn
->conn_mutex
);
393 cma_id
->context
= isert_conn
;
394 isert_conn
->conn_cm_id
= cma_id
;
395 isert_conn
->responder_resources
= event
->param
.conn
.responder_resources
;
396 isert_conn
->initiator_depth
= event
->param
.conn
.initiator_depth
;
397 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
398 isert_conn
->responder_resources
, isert_conn
->initiator_depth
);
400 isert_conn
->login_buf
= kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
+
401 ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
402 if (!isert_conn
->login_buf
) {
403 pr_err("Unable to allocate isert_conn->login_buf\n");
408 isert_conn
->login_req_buf
= isert_conn
->login_buf
;
409 isert_conn
->login_rsp_buf
= isert_conn
->login_buf
+
410 ISCSI_DEF_MAX_RECV_SEG_LEN
;
411 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
412 isert_conn
->login_buf
, isert_conn
->login_req_buf
,
413 isert_conn
->login_rsp_buf
);
415 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
416 (void *)isert_conn
->login_req_buf
,
417 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
419 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
421 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
423 isert_conn
->login_req_dma
= 0;
427 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
428 (void *)isert_conn
->login_rsp_buf
,
429 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
431 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
433 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
435 isert_conn
->login_rsp_dma
= 0;
436 goto out_req_dma_map
;
439 device
= isert_device_find_by_ib_dev(cma_id
);
440 if (IS_ERR(device
)) {
441 ret
= PTR_ERR(device
);
442 goto out_rsp_dma_map
;
445 isert_conn
->conn_device
= device
;
446 isert_conn
->conn_pd
= device
->dev_pd
;
447 isert_conn
->conn_mr
= device
->dev_mr
;
449 ret
= isert_conn_setup_qp(isert_conn
, cma_id
);
453 mutex_lock(&isert_np
->np_accept_mutex
);
454 list_add_tail(&isert_np
->np_accept_list
, &isert_conn
->conn_accept_node
);
455 mutex_unlock(&isert_np
->np_accept_mutex
);
457 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np
);
458 wake_up(&isert_np
->np_accept_wq
);
462 isert_device_try_release(device
);
464 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
465 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
467 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
468 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
470 kfree(isert_conn
->login_buf
);
477 isert_connect_release(struct isert_conn
*isert_conn
)
479 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
480 struct isert_device
*device
= isert_conn
->conn_device
;
483 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
485 if (isert_conn
->conn_qp
) {
486 cq_index
= ((struct isert_cq_desc
*)
487 isert_conn
->conn_qp
->recv_cq
->cq_context
)->cq_index
;
488 pr_debug("isert_connect_release: cq_index: %d\n", cq_index
);
489 isert_conn
->conn_device
->cq_active_qps
[cq_index
]--;
491 rdma_destroy_qp(isert_conn
->conn_cm_id
);
494 isert_free_rx_descriptors(isert_conn
);
495 rdma_destroy_id(isert_conn
->conn_cm_id
);
497 if (isert_conn
->login_buf
) {
498 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
499 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
500 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
501 ISCSI_DEF_MAX_RECV_SEG_LEN
,
503 kfree(isert_conn
->login_buf
);
508 isert_device_try_release(device
);
510 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
514 isert_connected_handler(struct rdma_cm_id
*cma_id
)
520 isert_release_conn_kref(struct kref
*kref
)
522 struct isert_conn
*isert_conn
= container_of(kref
,
523 struct isert_conn
, conn_kref
);
525 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
526 current
->comm
, current
->pid
);
528 isert_connect_release(isert_conn
);
532 isert_put_conn(struct isert_conn
*isert_conn
)
534 kref_put(&isert_conn
->conn_kref
, isert_release_conn_kref
);
538 isert_disconnect_work(struct work_struct
*work
)
540 struct isert_conn
*isert_conn
= container_of(work
,
541 struct isert_conn
, conn_logout_work
);
543 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
544 mutex_lock(&isert_conn
->conn_mutex
);
545 isert_conn
->state
= ISER_CONN_DOWN
;
547 if (isert_conn
->post_recv_buf_count
== 0 &&
548 atomic_read(&isert_conn
->post_send_buf_count
) == 0) {
549 pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
550 mutex_unlock(&isert_conn
->conn_mutex
);
553 if (!isert_conn
->conn_cm_id
) {
554 mutex_unlock(&isert_conn
->conn_mutex
);
555 isert_put_conn(isert_conn
);
558 if (!isert_conn
->logout_posted
) {
559 pr_debug("Calling rdma_disconnect for !logout_posted from"
560 " isert_disconnect_work\n");
561 rdma_disconnect(isert_conn
->conn_cm_id
);
562 mutex_unlock(&isert_conn
->conn_mutex
);
563 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
566 mutex_unlock(&isert_conn
->conn_mutex
);
569 wake_up(&isert_conn
->conn_wait
);
570 isert_put_conn(isert_conn
);
574 isert_disconnected_handler(struct rdma_cm_id
*cma_id
)
576 struct isert_conn
*isert_conn
= (struct isert_conn
*)cma_id
->context
;
578 INIT_WORK(&isert_conn
->conn_logout_work
, isert_disconnect_work
);
579 schedule_work(&isert_conn
->conn_logout_work
);
583 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
587 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
588 event
->event
, event
->status
, cma_id
->context
, cma_id
);
590 switch (event
->event
) {
591 case RDMA_CM_EVENT_CONNECT_REQUEST
:
592 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
593 ret
= isert_connect_request(cma_id
, event
);
595 case RDMA_CM_EVENT_ESTABLISHED
:
596 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
597 isert_connected_handler(cma_id
);
599 case RDMA_CM_EVENT_DISCONNECTED
:
600 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
601 isert_disconnected_handler(cma_id
);
603 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
604 case RDMA_CM_EVENT_ADDR_CHANGE
:
606 case RDMA_CM_EVENT_CONNECT_ERROR
:
608 pr_err("Unknown RDMA CMA event: %d\n", event
->event
);
613 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
622 isert_post_recv(struct isert_conn
*isert_conn
, u32 count
)
624 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
626 unsigned int rx_head
= isert_conn
->conn_rx_desc_head
;
627 struct iser_rx_desc
*rx_desc
;
629 for (rx_wr
= isert_conn
->conn_rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
630 rx_desc
= &isert_conn
->conn_rx_descs
[rx_head
];
631 rx_wr
->wr_id
= (unsigned long)rx_desc
;
632 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
634 rx_wr
->next
= rx_wr
+ 1;
635 rx_head
= (rx_head
+ 1) & (ISERT_QP_MAX_RECV_DTOS
- 1);
639 rx_wr
->next
= NULL
; /* mark end of work requests list */
641 isert_conn
->post_recv_buf_count
+= count
;
642 ret
= ib_post_recv(isert_conn
->conn_qp
, isert_conn
->conn_rx_wr
,
645 pr_err("ib_post_recv() failed with ret: %d\n", ret
);
646 isert_conn
->post_recv_buf_count
-= count
;
648 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count
);
649 isert_conn
->conn_rx_desc_head
= rx_head
;
655 isert_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
657 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
658 struct ib_send_wr send_wr
, *send_wr_failed
;
661 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
662 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
665 send_wr
.wr_id
= (unsigned long)tx_desc
;
666 send_wr
.sg_list
= tx_desc
->tx_sg
;
667 send_wr
.num_sge
= tx_desc
->num_sge
;
668 send_wr
.opcode
= IB_WR_SEND
;
669 send_wr
.send_flags
= IB_SEND_SIGNALED
;
671 atomic_inc(&isert_conn
->post_send_buf_count
);
673 ret
= ib_post_send(isert_conn
->conn_qp
, &send_wr
, &send_wr_failed
);
675 pr_err("ib_post_send() failed, ret: %d\n", ret
);
676 atomic_dec(&isert_conn
->post_send_buf_count
);
683 isert_create_send_desc(struct isert_conn
*isert_conn
,
684 struct isert_cmd
*isert_cmd
,
685 struct iser_tx_desc
*tx_desc
)
687 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
689 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
690 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
692 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_hdr
));
693 tx_desc
->iser_header
.flags
= ISER_VER
;
695 tx_desc
->num_sge
= 1;
696 tx_desc
->isert_cmd
= isert_cmd
;
698 if (tx_desc
->tx_sg
[0].lkey
!= isert_conn
->conn_mr
->lkey
) {
699 tx_desc
->tx_sg
[0].lkey
= isert_conn
->conn_mr
->lkey
;
700 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
705 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
706 struct iser_tx_desc
*tx_desc
)
708 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
711 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
712 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
713 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
714 pr_err("ib_dma_mapping_error() failed\n");
718 tx_desc
->dma_addr
= dma_addr
;
719 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
720 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
721 tx_desc
->tx_sg
[0].lkey
= isert_conn
->conn_mr
->lkey
;
723 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
724 " lkey: 0x%08x\n", tx_desc
->tx_sg
[0].addr
,
725 tx_desc
->tx_sg
[0].length
, tx_desc
->tx_sg
[0].lkey
);
731 isert_init_send_wr(struct isert_cmd
*isert_cmd
, struct ib_send_wr
*send_wr
)
733 isert_cmd
->rdma_wr
.iser_ib_op
= ISER_IB_SEND
;
734 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
735 send_wr
->opcode
= IB_WR_SEND
;
736 send_wr
->send_flags
= IB_SEND_SIGNALED
;
737 send_wr
->sg_list
= &isert_cmd
->tx_desc
.tx_sg
[0];
738 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
742 isert_rdma_post_recvl(struct isert_conn
*isert_conn
)
744 struct ib_recv_wr rx_wr
, *rx_wr_fail
;
748 memset(&sge
, 0, sizeof(struct ib_sge
));
749 sge
.addr
= isert_conn
->login_req_dma
;
750 sge
.length
= ISER_RX_LOGIN_SIZE
;
751 sge
.lkey
= isert_conn
->conn_mr
->lkey
;
753 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
754 sge
.addr
, sge
.length
, sge
.lkey
);
756 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
757 rx_wr
.wr_id
= (unsigned long)isert_conn
->login_req_buf
;
758 rx_wr
.sg_list
= &sge
;
761 isert_conn
->post_recv_buf_count
++;
762 ret
= ib_post_recv(isert_conn
->conn_qp
, &rx_wr
, &rx_wr_fail
);
764 pr_err("ib_post_recv() failed: %d\n", ret
);
765 isert_conn
->post_recv_buf_count
--;
768 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
773 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
776 struct isert_conn
*isert_conn
= conn
->context
;
777 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
778 struct iser_tx_desc
*tx_desc
= &isert_conn
->conn_login_tx_desc
;
781 isert_create_send_desc(isert_conn
, NULL
, tx_desc
);
783 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
784 sizeof(struct iscsi_hdr
));
786 isert_init_tx_hdrs(isert_conn
, tx_desc
);
789 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
791 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
792 length
, DMA_TO_DEVICE
);
794 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
796 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
797 length
, DMA_TO_DEVICE
);
799 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
800 tx_dsg
->length
= length
;
801 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
802 tx_desc
->num_sge
= 2;
804 if (!login
->login_failed
) {
805 if (login
->login_complete
) {
806 ret
= isert_alloc_rx_descriptors(isert_conn
);
810 ret
= isert_post_recv(isert_conn
, ISERT_MIN_POSTED_RX
);
814 isert_conn
->state
= ISER_CONN_UP
;
818 ret
= isert_rdma_post_recvl(isert_conn
);
823 ret
= isert_post_send(isert_conn
, tx_desc
);
831 isert_rx_login_req(struct iser_rx_desc
*rx_desc
, int rx_buflen
,
832 struct isert_conn
*isert_conn
)
834 struct iscsi_conn
*conn
= isert_conn
->conn
;
835 struct iscsi_login
*login
= conn
->conn_login
;
839 pr_err("conn->conn_login is NULL\n");
844 if (login
->first_request
) {
845 struct iscsi_login_req
*login_req
=
846 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
848 * Setup the initial iscsi_login values from the leading
851 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
852 login
->current_stage
=
853 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
855 login
->version_min
= login_req
->min_version
;
856 login
->version_max
= login_req
->max_version
;
857 memcpy(login
->isid
, login_req
->isid
, 6);
858 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
859 login
->init_task_tag
= login_req
->itt
;
860 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
861 login
->cid
= be16_to_cpu(login_req
->cid
);
862 login
->tsih
= be16_to_cpu(login_req
->tsih
);
865 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
867 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
868 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
869 size
, rx_buflen
, MAX_KEY_VALUE_PAIRS
);
870 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
872 complete(&isert_conn
->conn_login_comp
);
876 isert_release_cmd(struct iscsi_cmd
*cmd
)
878 struct isert_cmd
*isert_cmd
= container_of(cmd
, struct isert_cmd
,
881 pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd
);
886 kmem_cache_free(isert_cmd_cache
, isert_cmd
);
889 static struct iscsi_cmd
890 *isert_alloc_cmd(struct iscsi_conn
*conn
, gfp_t gfp
)
892 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
893 struct isert_cmd
*isert_cmd
;
895 isert_cmd
= kmem_cache_zalloc(isert_cmd_cache
, gfp
);
897 pr_err("Unable to allocate isert_cmd\n");
900 isert_cmd
->conn
= isert_conn
;
901 isert_cmd
->iscsi_cmd
.release_cmd
= &isert_release_cmd
;
903 return &isert_cmd
->iscsi_cmd
;
907 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
908 struct isert_cmd
*isert_cmd
, struct iser_rx_desc
*rx_desc
,
911 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
912 struct iscsi_conn
*conn
= isert_conn
->conn
;
913 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
914 struct scatterlist
*sg
;
915 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
916 bool dump_payload
= false;
918 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
922 imm_data
= cmd
->immediate_data
;
923 imm_data_len
= cmd
->first_burst_len
;
924 unsol_data
= cmd
->unsolicited_data
;
926 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
937 sg
= &cmd
->se_cmd
.t_data_sg
[0];
938 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
940 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
941 sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
943 sg_copy_from_buffer(sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
945 cmd
->write_data_done
+= imm_data_len
;
947 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
948 spin_lock_bh(&cmd
->istate_lock
);
949 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
950 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
951 spin_unlock_bh(&cmd
->istate_lock
);
955 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
957 if (!rc
&& dump_payload
== false && unsol_data
)
958 iscsit_set_unsoliticed_dataout(cmd
);
964 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
965 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
967 struct scatterlist
*sg_start
;
968 struct iscsi_conn
*conn
= isert_conn
->conn
;
969 struct iscsi_cmd
*cmd
= NULL
;
970 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
971 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
972 int rc
, sg_nents
, sg_off
, page_off
;
974 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
980 * FIXME: Unexpected unsolicited_data out
982 if (!cmd
->unsolicited_data
) {
983 pr_err("Received unexpected solicited data payload\n");
988 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
989 unsol_data_len
, cmd
->write_data_done
, cmd
->se_cmd
.data_length
);
991 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
992 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
993 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
994 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
996 * FIXME: Non page-aligned unsolicited_data out
999 pr_err("Received unexpected non-page aligned data payload\n");
1003 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1004 sg_start
, sg_off
, sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
1006 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
1009 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1017 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1018 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1020 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1021 struct iscsi_conn
*conn
= isert_conn
->conn
;
1022 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1025 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1029 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1032 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1036 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1037 struct iser_rx_desc
*rx_desc
, struct iscsi_text
*hdr
)
1039 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1040 struct iscsi_conn
*conn
= isert_conn
->conn
;
1041 u32 payload_length
= ntoh24(hdr
->dlength
);
1043 unsigned char *text_in
;
1045 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1049 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1051 pr_err("Unable to allocate text_in of payload_length: %u\n",
1055 cmd
->text_in_ptr
= text_in
;
1057 memcpy(cmd
->text_in_ptr
, &rx_desc
->data
[0], payload_length
);
1059 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1063 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1064 uint32_t read_stag
, uint64_t read_va
,
1065 uint32_t write_stag
, uint64_t write_va
)
1067 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1068 struct iscsi_conn
*conn
= isert_conn
->conn
;
1069 struct iscsi_session
*sess
= conn
->sess
;
1070 struct iscsi_cmd
*cmd
;
1071 struct isert_cmd
*isert_cmd
;
1073 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1075 if (sess
->sess_ops
->SessionType
&&
1076 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1077 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1078 " ignoring\n", opcode
);
1083 case ISCSI_OP_SCSI_CMD
:
1084 cmd
= iscsit_allocate_cmd(conn
, GFP_KERNEL
);
1088 isert_cmd
= container_of(cmd
, struct isert_cmd
, iscsi_cmd
);
1089 isert_cmd
->read_stag
= read_stag
;
1090 isert_cmd
->read_va
= read_va
;
1091 isert_cmd
->write_stag
= write_stag
;
1092 isert_cmd
->write_va
= write_va
;
1094 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
,
1095 rx_desc
, (unsigned char *)hdr
);
1097 case ISCSI_OP_NOOP_OUT
:
1098 cmd
= iscsit_allocate_cmd(conn
, GFP_KERNEL
);
1102 isert_cmd
= container_of(cmd
, struct isert_cmd
, iscsi_cmd
);
1103 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
,
1104 rx_desc
, (unsigned char *)hdr
);
1106 case ISCSI_OP_SCSI_DATA_OUT
:
1107 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1108 (unsigned char *)hdr
);
1110 case ISCSI_OP_SCSI_TMFUNC
:
1111 cmd
= iscsit_allocate_cmd(conn
, GFP_KERNEL
);
1115 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1116 (unsigned char *)hdr
);
1118 case ISCSI_OP_LOGOUT
:
1119 cmd
= iscsit_allocate_cmd(conn
, GFP_KERNEL
);
1123 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1125 wait_for_completion_timeout(&conn
->conn_logout_comp
,
1126 SECONDS_FOR_LOGOUT_COMP
*
1130 cmd
= iscsit_allocate_cmd(conn
, GFP_KERNEL
);
1134 isert_cmd
= container_of(cmd
, struct isert_cmd
, iscsi_cmd
);
1135 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
,
1136 rx_desc
, (struct iscsi_text
*)hdr
);
1139 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1148 isert_rx_do_work(struct iser_rx_desc
*rx_desc
, struct isert_conn
*isert_conn
)
1150 struct iser_hdr
*iser_hdr
= &rx_desc
->iser_header
;
1151 uint64_t read_va
= 0, write_va
= 0;
1152 uint32_t read_stag
= 0, write_stag
= 0;
1155 switch (iser_hdr
->flags
& 0xF0) {
1157 if (iser_hdr
->flags
& ISER_RSV
) {
1158 read_stag
= be32_to_cpu(iser_hdr
->read_stag
);
1159 read_va
= be64_to_cpu(iser_hdr
->read_va
);
1160 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1161 read_stag
, (unsigned long long)read_va
);
1163 if (iser_hdr
->flags
& ISER_WSV
) {
1164 write_stag
= be32_to_cpu(iser_hdr
->write_stag
);
1165 write_va
= be64_to_cpu(iser_hdr
->write_va
);
1166 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1167 write_stag
, (unsigned long long)write_va
);
1170 pr_debug("ISER ISCSI_CTRL PDU\n");
1173 pr_err("iSER Hello message\n");
1176 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr
->flags
);
1180 rc
= isert_rx_opcode(isert_conn
, rx_desc
,
1181 read_stag
, read_va
, write_stag
, write_va
);
1185 isert_rx_completion(struct iser_rx_desc
*desc
, struct isert_conn
*isert_conn
,
1186 unsigned long xfer_len
)
1188 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1189 struct iscsi_hdr
*hdr
;
1191 int rx_buflen
, outstanding
;
1193 if ((char *)desc
== isert_conn
->login_req_buf
) {
1194 rx_dma
= isert_conn
->login_req_dma
;
1195 rx_buflen
= ISER_RX_LOGIN_SIZE
;
1196 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1199 rx_dma
= desc
->dma_addr
;
1200 rx_buflen
= ISER_RX_PAYLOAD_SIZE
;
1201 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1205 ib_dma_sync_single_for_cpu(ib_dev
, rx_dma
, rx_buflen
, DMA_FROM_DEVICE
);
1207 hdr
= &desc
->iscsi_header
;
1208 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1209 hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1210 (int)(xfer_len
- ISER_HEADERS_LEN
));
1212 if ((char *)desc
== isert_conn
->login_req_buf
)
1213 isert_rx_login_req(desc
, xfer_len
- ISER_HEADERS_LEN
,
1216 isert_rx_do_work(desc
, isert_conn
);
1218 ib_dma_sync_single_for_device(ib_dev
, rx_dma
, rx_buflen
,
1221 isert_conn
->post_recv_buf_count
--;
1222 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1223 isert_conn
->post_recv_buf_count
);
1225 if ((char *)desc
== isert_conn
->login_req_buf
)
1228 outstanding
= isert_conn
->post_recv_buf_count
;
1229 if (outstanding
+ ISERT_MIN_POSTED_RX
<= ISERT_QP_MAX_RECV_DTOS
) {
1230 int err
, count
= min(ISERT_QP_MAX_RECV_DTOS
- outstanding
,
1231 ISERT_MIN_POSTED_RX
);
1232 err
= isert_post_recv(isert_conn
, count
);
1234 pr_err("isert_post_recv() count: %d failed, %d\n",
1241 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1243 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1244 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1246 pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
1249 ib_dma_unmap_sg(ib_dev
, wr
->sge
, wr
->num_sge
, DMA_TO_DEVICE
);
1256 kfree(isert_cmd
->ib_sge
);
1257 isert_cmd
->ib_sge
= NULL
;
1261 isert_put_cmd(struct isert_cmd
*isert_cmd
)
1263 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1264 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1265 struct iscsi_conn
*conn
= isert_conn
->conn
;
1267 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd
);
1269 switch (cmd
->iscsi_opcode
) {
1270 case ISCSI_OP_SCSI_CMD
:
1271 spin_lock_bh(&conn
->cmd_lock
);
1272 if (!list_empty(&cmd
->i_conn_node
))
1273 list_del(&cmd
->i_conn_node
);
1274 spin_unlock_bh(&conn
->cmd_lock
);
1276 if (cmd
->data_direction
== DMA_TO_DEVICE
)
1277 iscsit_stop_dataout_timer(cmd
);
1279 isert_unmap_cmd(isert_cmd
, isert_conn
);
1280 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1282 case ISCSI_OP_SCSI_TMFUNC
:
1283 spin_lock_bh(&conn
->cmd_lock
);
1284 if (!list_empty(&cmd
->i_conn_node
))
1285 list_del(&cmd
->i_conn_node
);
1286 spin_unlock_bh(&conn
->cmd_lock
);
1288 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1290 case ISCSI_OP_REJECT
:
1291 case ISCSI_OP_NOOP_OUT
:
1293 spin_lock_bh(&conn
->cmd_lock
);
1294 if (!list_empty(&cmd
->i_conn_node
))
1295 list_del(&cmd
->i_conn_node
);
1296 spin_unlock_bh(&conn
->cmd_lock
);
1299 * Handle special case for REJECT when iscsi_add_reject*() has
1300 * overwritten the original iscsi_opcode assignment, and the
1301 * associated cmd->se_cmd needs to be released.
1303 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1304 pr_debug("Calling transport_generic_free_cmd from"
1305 " isert_put_cmd for 0x%02x\n",
1307 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1314 isert_release_cmd(cmd
);
1320 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1322 if (tx_desc
->dma_addr
!= 0) {
1323 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1324 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1325 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1326 tx_desc
->dma_addr
= 0;
1331 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1332 struct ib_device
*ib_dev
)
1334 if (isert_cmd
->pdu_buf_dma
!= 0) {
1335 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1336 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1337 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1338 isert_cmd
->pdu_buf_dma
= 0;
1341 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1342 isert_put_cmd(isert_cmd
);
1346 isert_completion_rdma_read(struct iser_tx_desc
*tx_desc
,
1347 struct isert_cmd
*isert_cmd
)
1349 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1350 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1351 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1352 struct ib_device
*ib_dev
= isert_cmd
->conn
->conn_cm_id
->device
;
1354 iscsit_stop_dataout_timer(cmd
);
1357 pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1358 ib_dma_unmap_sg(ib_dev
, wr
->sge
, wr
->num_sge
, DMA_TO_DEVICE
);
1362 if (isert_cmd
->ib_sge
) {
1363 pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1364 kfree(isert_cmd
->ib_sge
);
1365 isert_cmd
->ib_sge
= NULL
;
1368 cmd
->write_data_done
= se_cmd
->data_length
;
1370 pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1371 spin_lock_bh(&cmd
->istate_lock
);
1372 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1373 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1374 spin_unlock_bh(&cmd
->istate_lock
);
1376 target_execute_cmd(se_cmd
);
1380 isert_do_control_comp(struct work_struct
*work
)
1382 struct isert_cmd
*isert_cmd
= container_of(work
,
1383 struct isert_cmd
, comp_work
);
1384 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1385 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1386 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1388 switch (cmd
->i_state
) {
1389 case ISTATE_SEND_TASKMGTRSP
:
1390 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1392 atomic_dec(&isert_conn
->post_send_buf_count
);
1393 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1395 cmd
->i_state
= ISTATE_SENT_STATUS
;
1396 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
);
1398 case ISTATE_SEND_REJECT
:
1399 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1400 atomic_dec(&isert_conn
->post_send_buf_count
);
1402 cmd
->i_state
= ISTATE_SENT_STATUS
;
1403 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
);
1405 case ISTATE_SEND_LOGOUTRSP
:
1406 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1408 * Call atomic_dec(&isert_conn->post_send_buf_count)
1409 * from isert_free_conn()
1411 isert_conn
->logout_posted
= true;
1412 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1414 case ISTATE_SEND_TEXTRSP
:
1415 atomic_dec(&isert_conn
->post_send_buf_count
);
1416 cmd
->i_state
= ISTATE_SENT_STATUS
;
1417 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
);
1420 pr_err("Unknown do_control_comp i_state %d\n", cmd
->i_state
);
1427 isert_response_completion(struct iser_tx_desc
*tx_desc
,
1428 struct isert_cmd
*isert_cmd
,
1429 struct isert_conn
*isert_conn
,
1430 struct ib_device
*ib_dev
)
1432 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1434 if (cmd
->i_state
== ISTATE_SEND_TASKMGTRSP
||
1435 cmd
->i_state
== ISTATE_SEND_LOGOUTRSP
||
1436 cmd
->i_state
== ISTATE_SEND_REJECT
||
1437 cmd
->i_state
== ISTATE_SEND_TEXTRSP
) {
1438 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1440 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
1441 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
1444 atomic_dec(&isert_conn
->post_send_buf_count
);
1446 cmd
->i_state
= ISTATE_SENT_STATUS
;
1447 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
);
1451 isert_send_completion(struct iser_tx_desc
*tx_desc
,
1452 struct isert_conn
*isert_conn
)
1454 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1455 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1456 struct isert_rdma_wr
*wr
;
1459 atomic_dec(&isert_conn
->post_send_buf_count
);
1460 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1463 wr
= &isert_cmd
->rdma_wr
;
1465 switch (wr
->iser_ib_op
) {
1467 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1471 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1472 isert_response_completion(tx_desc
, isert_cmd
,
1473 isert_conn
, ib_dev
);
1475 case ISER_IB_RDMA_WRITE
:
1476 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1479 case ISER_IB_RDMA_READ
:
1480 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1482 atomic_dec(&isert_conn
->post_send_buf_count
);
1483 isert_completion_rdma_read(tx_desc
, isert_cmd
);
1486 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr
->iser_ib_op
);
1493 isert_cq_comp_err(struct iser_tx_desc
*tx_desc
, struct isert_conn
*isert_conn
)
1495 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1498 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1501 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1503 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
);
1506 if (isert_conn
->post_recv_buf_count
== 0 &&
1507 atomic_read(&isert_conn
->post_send_buf_count
) == 0) {
1508 pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
1509 pr_debug("Calling wake_up from isert_cq_comp_err\n");
1511 mutex_lock(&isert_conn
->conn_mutex
);
1512 if (isert_conn
->state
!= ISER_CONN_DOWN
)
1513 isert_conn
->state
= ISER_CONN_TERMINATING
;
1514 mutex_unlock(&isert_conn
->conn_mutex
);
1516 wake_up(&isert_conn
->conn_wait_comp_err
);
1521 isert_cq_tx_work(struct work_struct
*work
)
1523 struct isert_cq_desc
*cq_desc
= container_of(work
,
1524 struct isert_cq_desc
, cq_tx_work
);
1525 struct isert_device
*device
= cq_desc
->device
;
1526 int cq_index
= cq_desc
->cq_index
;
1527 struct ib_cq
*tx_cq
= device
->dev_tx_cq
[cq_index
];
1528 struct isert_conn
*isert_conn
;
1529 struct iser_tx_desc
*tx_desc
;
1532 while (ib_poll_cq(tx_cq
, 1, &wc
) == 1) {
1533 tx_desc
= (struct iser_tx_desc
*)(unsigned long)wc
.wr_id
;
1534 isert_conn
= wc
.qp
->qp_context
;
1536 if (wc
.status
== IB_WC_SUCCESS
) {
1537 isert_send_completion(tx_desc
, isert_conn
);
1539 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1540 pr_debug("TX wc.status: 0x%08x\n", wc
.status
);
1541 pr_debug("TX wc.vendor_err: 0x%08x\n", wc
.vendor_err
);
1542 atomic_dec(&isert_conn
->post_send_buf_count
);
1543 isert_cq_comp_err(tx_desc
, isert_conn
);
1547 ib_req_notify_cq(tx_cq
, IB_CQ_NEXT_COMP
);
1551 isert_cq_tx_callback(struct ib_cq
*cq
, void *context
)
1553 struct isert_cq_desc
*cq_desc
= (struct isert_cq_desc
*)context
;
1555 INIT_WORK(&cq_desc
->cq_tx_work
, isert_cq_tx_work
);
1556 queue_work(isert_comp_wq
, &cq_desc
->cq_tx_work
);
1560 isert_cq_rx_work(struct work_struct
*work
)
1562 struct isert_cq_desc
*cq_desc
= container_of(work
,
1563 struct isert_cq_desc
, cq_rx_work
);
1564 struct isert_device
*device
= cq_desc
->device
;
1565 int cq_index
= cq_desc
->cq_index
;
1566 struct ib_cq
*rx_cq
= device
->dev_rx_cq
[cq_index
];
1567 struct isert_conn
*isert_conn
;
1568 struct iser_rx_desc
*rx_desc
;
1570 unsigned long xfer_len
;
1572 while (ib_poll_cq(rx_cq
, 1, &wc
) == 1) {
1573 rx_desc
= (struct iser_rx_desc
*)(unsigned long)wc
.wr_id
;
1574 isert_conn
= wc
.qp
->qp_context
;
1576 if (wc
.status
== IB_WC_SUCCESS
) {
1577 xfer_len
= (unsigned long)wc
.byte_len
;
1578 isert_rx_completion(rx_desc
, isert_conn
, xfer_len
);
1580 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1581 if (wc
.status
!= IB_WC_WR_FLUSH_ERR
) {
1582 pr_debug("RX wc.status: 0x%08x\n", wc
.status
);
1583 pr_debug("RX wc.vendor_err: 0x%08x\n",
1586 isert_conn
->post_recv_buf_count
--;
1587 isert_cq_comp_err(NULL
, isert_conn
);
1591 ib_req_notify_cq(rx_cq
, IB_CQ_NEXT_COMP
);
1595 isert_cq_rx_callback(struct ib_cq
*cq
, void *context
)
1597 struct isert_cq_desc
*cq_desc
= (struct isert_cq_desc
*)context
;
1599 INIT_WORK(&cq_desc
->cq_rx_work
, isert_cq_rx_work
);
1600 queue_work(isert_rx_wq
, &cq_desc
->cq_rx_work
);
1604 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
1606 struct ib_send_wr
*wr_failed
;
1609 atomic_inc(&isert_conn
->post_send_buf_count
);
1611 ret
= ib_post_send(isert_conn
->conn_qp
, &isert_cmd
->tx_desc
.send_wr
,
1614 pr_err("ib_post_send failed with %d\n", ret
);
1615 atomic_dec(&isert_conn
->post_send_buf_count
);
1622 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
1624 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1625 struct isert_cmd
, iscsi_cmd
);
1626 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1627 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1628 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
1629 &isert_cmd
->tx_desc
.iscsi_header
;
1631 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1632 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
1633 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1635 * Attach SENSE DATA payload to iSCSI Response PDU
1637 if (cmd
->se_cmd
.sense_buffer
&&
1638 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
1639 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
1640 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1641 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1642 u32 padding
, pdu_len
;
1644 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
1646 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
1648 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
1649 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
1650 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
1652 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1653 (void *)cmd
->sense_buffer
, pdu_len
,
1656 isert_cmd
->pdu_buf_len
= pdu_len
;
1657 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1658 tx_dsg
->length
= pdu_len
;
1659 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
1660 isert_cmd
->tx_desc
.num_sge
= 2;
1663 isert_init_send_wr(isert_cmd
, send_wr
);
1665 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1667 return isert_post_response(isert_conn
, isert_cmd
);
1671 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
1672 bool nopout_response
)
1674 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1675 struct isert_cmd
, iscsi_cmd
);
1676 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1677 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1679 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1680 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
1681 &isert_cmd
->tx_desc
.iscsi_header
,
1683 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1684 isert_init_send_wr(isert_cmd
, send_wr
);
1686 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1688 return isert_post_response(isert_conn
, isert_cmd
);
1692 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1694 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1695 struct isert_cmd
, iscsi_cmd
);
1696 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1697 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1699 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1700 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
1701 &isert_cmd
->tx_desc
.iscsi_header
);
1702 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1703 isert_init_send_wr(isert_cmd
, send_wr
);
1705 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1707 return isert_post_response(isert_conn
, isert_cmd
);
1711 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1713 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1714 struct isert_cmd
, iscsi_cmd
);
1715 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1716 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1718 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1719 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
1720 &isert_cmd
->tx_desc
.iscsi_header
);
1721 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1722 isert_init_send_wr(isert_cmd
, send_wr
);
1724 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1726 return isert_post_response(isert_conn
, isert_cmd
);
1730 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1732 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1733 struct isert_cmd
, iscsi_cmd
);
1734 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1735 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1736 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1737 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1738 struct iscsi_reject
*hdr
=
1739 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
1741 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1742 iscsit_build_reject(cmd
, conn
, hdr
);
1743 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1745 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
1746 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1747 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
1749 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
1750 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1751 tx_dsg
->length
= ISCSI_HDR_LEN
;
1752 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
1753 isert_cmd
->tx_desc
.num_sge
= 2;
1755 isert_init_send_wr(isert_cmd
, send_wr
);
1757 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1759 return isert_post_response(isert_conn
, isert_cmd
);
1763 isert_put_text_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
1765 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1766 struct isert_cmd
, iscsi_cmd
);
1767 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1768 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1769 struct iscsi_text_rsp
*hdr
=
1770 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
1774 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1775 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
);
1780 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1783 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1784 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1785 void *txt_rsp_buf
= cmd
->buf_ptr
;
1787 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1788 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
1790 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
1791 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1792 tx_dsg
->length
= txt_rsp_len
;
1793 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
1794 isert_cmd
->tx_desc
.num_sge
= 2;
1796 isert_init_send_wr(isert_cmd
, send_wr
);
1798 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1800 return isert_post_response(isert_conn
, isert_cmd
);
1804 isert_build_rdma_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1805 struct ib_sge
*ib_sge
, struct ib_send_wr
*send_wr
,
1806 u32 data_left
, u32 offset
)
1808 struct iscsi_cmd
*cmd
= &isert_cmd
->iscsi_cmd
;
1809 struct scatterlist
*sg_start
, *tmp_sg
;
1810 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1811 u32 sg_off
, page_off
;
1812 int i
= 0, sg_nents
;
1814 sg_off
= offset
/ PAGE_SIZE
;
1815 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1816 sg_nents
= min(cmd
->se_cmd
.t_data_nents
- sg_off
, isert_conn
->max_sge
);
1817 page_off
= offset
% PAGE_SIZE
;
1819 send_wr
->sg_list
= ib_sge
;
1820 send_wr
->num_sge
= sg_nents
;
1821 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
1823 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1825 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
1826 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1827 (unsigned long long)tmp_sg
->dma_address
,
1828 tmp_sg
->length
, page_off
);
1830 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, tmp_sg
) + page_off
;
1831 ib_sge
->length
= min_t(u32
, data_left
,
1832 ib_sg_dma_len(ib_dev
, tmp_sg
) - page_off
);
1833 ib_sge
->lkey
= isert_conn
->conn_mr
->lkey
;
1835 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u\n",
1836 ib_sge
->addr
, ib_sge
->length
);
1838 data_left
-= ib_sge
->length
;
1840 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge
);
1843 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
1844 send_wr
->sg_list
, send_wr
->num_sge
);
1850 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
1852 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1853 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1854 struct isert_cmd
, iscsi_cmd
);
1855 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1856 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1857 struct ib_send_wr
*wr_failed
, *send_wr
;
1858 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1859 struct ib_sge
*ib_sge
;
1860 struct scatterlist
*sg
;
1861 u32 offset
= 0, data_len
, data_left
, rdma_write_max
;
1862 int rc
, ret
= 0, count
, sg_nents
, i
, ib_sge_cnt
;
1864 pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd
->data_length
);
1866 sg
= &se_cmd
->t_data_sg
[0];
1867 sg_nents
= se_cmd
->t_data_nents
;
1869 count
= ib_dma_map_sg(ib_dev
, sg
, sg_nents
, DMA_TO_DEVICE
);
1870 if (unlikely(!count
)) {
1871 pr_err("Unable to map put_datain SGs\n");
1875 wr
->num_sge
= sg_nents
;
1876 pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
1877 count
, sg
, sg_nents
);
1879 ib_sge
= kzalloc(sizeof(struct ib_sge
) * sg_nents
, GFP_KERNEL
);
1881 pr_warn("Unable to allocate datain ib_sge\n");
1885 isert_cmd
->ib_sge
= ib_sge
;
1887 pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1888 ib_sge
, se_cmd
->t_data_nents
);
1890 wr
->send_wr_num
= DIV_ROUND_UP(sg_nents
, isert_conn
->max_sge
);
1891 wr
->send_wr
= kzalloc(sizeof(struct ib_send_wr
) * wr
->send_wr_num
,
1894 pr_err("Unable to allocate wr->send_wr\n");
1898 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1899 wr
->send_wr
, wr
->send_wr_num
);
1901 iscsit_increment_maxcmdsn(cmd
, conn
->sess
);
1902 cmd
->stat_sn
= conn
->stat_sn
++;
1904 wr
->isert_cmd
= isert_cmd
;
1905 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
1906 data_left
= se_cmd
->data_length
;
1908 for (i
= 0; i
< wr
->send_wr_num
; i
++) {
1909 send_wr
= &isert_cmd
->rdma_wr
.send_wr
[i
];
1910 data_len
= min(data_left
, rdma_write_max
);
1912 send_wr
->opcode
= IB_WR_RDMA_WRITE
;
1913 send_wr
->send_flags
= 0;
1914 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->read_va
+ offset
;
1915 send_wr
->wr
.rdma
.rkey
= isert_cmd
->read_stag
;
1917 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
1918 send_wr
, data_len
, offset
);
1919 ib_sge
+= ib_sge_cnt
;
1921 if (i
+ 1 == wr
->send_wr_num
)
1922 send_wr
->next
= &isert_cmd
->tx_desc
.send_wr
;
1924 send_wr
->next
= &wr
->send_wr
[i
+ 1];
1927 data_left
-= data_len
;
1930 * Build isert_conn->tx_desc for iSCSI response PDU and attach
1932 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1933 iscsit_build_rsp_pdu(cmd
, conn
, false, (struct iscsi_scsi_rsp
*)
1934 &isert_cmd
->tx_desc
.iscsi_header
);
1935 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1936 isert_init_send_wr(isert_cmd
, &isert_cmd
->tx_desc
.send_wr
);
1938 atomic_inc(&isert_conn
->post_send_buf_count
);
1940 rc
= ib_post_send(isert_conn
->conn_qp
, wr
->send_wr
, &wr_failed
);
1942 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
1943 atomic_dec(&isert_conn
->post_send_buf_count
);
1945 pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1949 ib_dma_unmap_sg(ib_dev
, sg
, sg_nents
, DMA_TO_DEVICE
);
1954 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
1956 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1957 struct isert_cmd
*isert_cmd
= container_of(cmd
,
1958 struct isert_cmd
, iscsi_cmd
);
1959 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1960 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1961 struct ib_send_wr
*wr_failed
, *send_wr
;
1962 struct ib_sge
*ib_sge
;
1963 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1964 struct scatterlist
*sg_start
;
1965 u32 sg_off
, sg_nents
, page_off
, va_offset
= 0;
1966 u32 offset
= 0, data_len
, data_left
, rdma_write_max
;
1967 int rc
, ret
= 0, count
, i
, ib_sge_cnt
;
1969 pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
1970 se_cmd
->data_length
, cmd
->write_data_done
);
1972 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1973 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1974 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1976 pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1977 sg_off
, sg_start
, page_off
);
1979 data_left
= se_cmd
->data_length
- cmd
->write_data_done
;
1980 sg_nents
= se_cmd
->t_data_nents
- sg_off
;
1982 pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
1983 data_left
, sg_nents
);
1985 count
= ib_dma_map_sg(ib_dev
, sg_start
, sg_nents
, DMA_FROM_DEVICE
);
1986 if (unlikely(!count
)) {
1987 pr_err("Unable to map get_dataout SGs\n");
1991 wr
->num_sge
= sg_nents
;
1992 pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
1993 count
, sg_start
, sg_nents
);
1995 ib_sge
= kzalloc(sizeof(struct ib_sge
) * sg_nents
, GFP_KERNEL
);
1997 pr_warn("Unable to allocate dataout ib_sge\n");
2001 isert_cmd
->ib_sge
= ib_sge
;
2003 pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
2006 wr
->send_wr_num
= DIV_ROUND_UP(sg_nents
, isert_conn
->max_sge
);
2007 wr
->send_wr
= kzalloc(sizeof(struct ib_send_wr
) * wr
->send_wr_num
,
2010 pr_debug("Unable to allocate wr->send_wr\n");
2014 pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
2015 wr
->send_wr
, wr
->send_wr_num
);
2017 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2019 wr
->iser_ib_op
= ISER_IB_RDMA_READ
;
2020 wr
->isert_cmd
= isert_cmd
;
2021 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
2022 offset
= cmd
->write_data_done
;
2024 for (i
= 0; i
< wr
->send_wr_num
; i
++) {
2025 send_wr
= &isert_cmd
->rdma_wr
.send_wr
[i
];
2026 data_len
= min(data_left
, rdma_write_max
);
2028 send_wr
->opcode
= IB_WR_RDMA_READ
;
2029 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->write_va
+ va_offset
;
2030 send_wr
->wr
.rdma
.rkey
= isert_cmd
->write_stag
;
2032 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
2033 send_wr
, data_len
, offset
);
2034 ib_sge
+= ib_sge_cnt
;
2036 if (i
+ 1 == wr
->send_wr_num
)
2037 send_wr
->send_flags
= IB_SEND_SIGNALED
;
2039 send_wr
->next
= &wr
->send_wr
[i
+ 1];
2042 va_offset
+= data_len
;
2043 data_left
-= data_len
;
2046 atomic_inc(&isert_conn
->post_send_buf_count
);
2048 rc
= ib_post_send(isert_conn
->conn_qp
, wr
->send_wr
, &wr_failed
);
2050 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2051 atomic_dec(&isert_conn
->post_send_buf_count
);
2053 pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
2057 ib_dma_unmap_sg(ib_dev
, sg_start
, sg_nents
, DMA_FROM_DEVICE
);
2062 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2067 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
2068 ret
= isert_put_nopin(cmd
, conn
, false);
2071 pr_err("Unknown immediate state: 0x%02x\n", state
);
2080 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2085 case ISTATE_SEND_LOGOUTRSP
:
2086 ret
= isert_put_logout_rsp(cmd
, conn
);
2088 pr_debug("Returning iSER Logout -EAGAIN\n");
2092 case ISTATE_SEND_NOPIN
:
2093 ret
= isert_put_nopin(cmd
, conn
, true);
2095 case ISTATE_SEND_TASKMGTRSP
:
2096 ret
= isert_put_tm_rsp(cmd
, conn
);
2098 case ISTATE_SEND_REJECT
:
2099 ret
= isert_put_reject(cmd
, conn
);
2101 case ISTATE_SEND_TEXTRSP
:
2102 ret
= isert_put_text_rsp(cmd
, conn
);
2104 case ISTATE_SEND_STATUS
:
2106 * Special case for sending non GOOD SCSI status from TX thread
2107 * context during pre se_cmd excecution failure.
2109 ret
= isert_put_response(conn
, cmd
);
2112 pr_err("Unknown response state: 0x%02x\n", state
);
2121 isert_setup_np(struct iscsi_np
*np
,
2122 struct __kernel_sockaddr_storage
*ksockaddr
)
2124 struct isert_np
*isert_np
;
2125 struct rdma_cm_id
*isert_lid
;
2126 struct sockaddr
*sa
;
2129 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
2131 pr_err("Unable to allocate struct isert_np\n");
2134 init_waitqueue_head(&isert_np
->np_accept_wq
);
2135 mutex_init(&isert_np
->np_accept_mutex
);
2136 INIT_LIST_HEAD(&isert_np
->np_accept_list
);
2137 init_completion(&isert_np
->np_login_comp
);
2139 sa
= (struct sockaddr
*)ksockaddr
;
2140 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr
, sa
);
2142 * Setup the np->np_sockaddr from the passed sockaddr setup
2143 * in iscsi_target_configfs.c code..
2145 memcpy(&np
->np_sockaddr
, ksockaddr
,
2146 sizeof(struct __kernel_sockaddr_storage
));
2148 isert_lid
= rdma_create_id(isert_cma_handler
, np
, RDMA_PS_TCP
,
2150 if (IS_ERR(isert_lid
)) {
2151 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2152 PTR_ERR(isert_lid
));
2153 ret
= PTR_ERR(isert_lid
);
2157 ret
= rdma_bind_addr(isert_lid
, sa
);
2159 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret
);
2163 ret
= rdma_listen(isert_lid
, ISERT_RDMA_LISTEN_BACKLOG
);
2165 pr_err("rdma_listen() for isert_lid failed: %d\n", ret
);
2169 isert_np
->np_cm_id
= isert_lid
;
2170 np
->np_context
= isert_np
;
2171 pr_debug("Setup isert_lid->context: %p\n", isert_lid
->context
);
2176 rdma_destroy_id(isert_lid
);
2183 isert_check_accept_queue(struct isert_np
*isert_np
)
2187 mutex_lock(&isert_np
->np_accept_mutex
);
2188 empty
= list_empty(&isert_np
->np_accept_list
);
2189 mutex_unlock(&isert_np
->np_accept_mutex
);
2195 isert_rdma_accept(struct isert_conn
*isert_conn
)
2197 struct rdma_cm_id
*cm_id
= isert_conn
->conn_cm_id
;
2198 struct rdma_conn_param cp
;
2201 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
2202 cp
.responder_resources
= isert_conn
->responder_resources
;
2203 cp
.initiator_depth
= isert_conn
->initiator_depth
;
2205 cp
.rnr_retry_count
= 7;
2207 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2209 ret
= rdma_accept(cm_id
, &cp
);
2211 pr_err("rdma_accept() failed with: %d\n", ret
);
2215 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2221 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
2223 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2226 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn
);
2228 ret
= wait_for_completion_interruptible(&isert_conn
->conn_login_comp
);
2232 pr_debug("isert_get_login_rx processing login->req: %p\n", login
->req
);
2237 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
2238 struct isert_conn
*isert_conn
)
2240 struct rdma_cm_id
*cm_id
= isert_conn
->conn_cm_id
;
2241 struct rdma_route
*cm_route
= &cm_id
->route
;
2242 struct sockaddr_in
*sock_in
;
2243 struct sockaddr_in6
*sock_in6
;
2245 conn
->login_family
= np
->np_sockaddr
.ss_family
;
2247 if (np
->np_sockaddr
.ss_family
== AF_INET6
) {
2248 sock_in6
= (struct sockaddr_in6
*)&cm_route
->addr
.dst_addr
;
2249 snprintf(conn
->login_ip
, sizeof(conn
->login_ip
), "%pI6c",
2250 &sock_in6
->sin6_addr
.in6_u
);
2251 conn
->login_port
= ntohs(sock_in6
->sin6_port
);
2253 sock_in6
= (struct sockaddr_in6
*)&cm_route
->addr
.src_addr
;
2254 snprintf(conn
->local_ip
, sizeof(conn
->local_ip
), "%pI6c",
2255 &sock_in6
->sin6_addr
.in6_u
);
2256 conn
->local_port
= ntohs(sock_in6
->sin6_port
);
2258 sock_in
= (struct sockaddr_in
*)&cm_route
->addr
.dst_addr
;
2259 sprintf(conn
->login_ip
, "%pI4",
2260 &sock_in
->sin_addr
.s_addr
);
2261 conn
->login_port
= ntohs(sock_in
->sin_port
);
2263 sock_in
= (struct sockaddr_in
*)&cm_route
->addr
.src_addr
;
2264 sprintf(conn
->local_ip
, "%pI4",
2265 &sock_in
->sin_addr
.s_addr
);
2266 conn
->local_port
= ntohs(sock_in
->sin_port
);
2271 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
2273 struct isert_np
*isert_np
= (struct isert_np
*)np
->np_context
;
2274 struct isert_conn
*isert_conn
;
2275 int max_accept
= 0, ret
;
2278 ret
= wait_event_interruptible(isert_np
->np_accept_wq
,
2279 !isert_check_accept_queue(isert_np
) ||
2280 np
->np_thread_state
== ISCSI_NP_THREAD_RESET
);
2284 spin_lock_bh(&np
->np_thread_lock
);
2285 if (np
->np_thread_state
== ISCSI_NP_THREAD_RESET
) {
2286 spin_unlock_bh(&np
->np_thread_lock
);
2287 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
2290 spin_unlock_bh(&np
->np_thread_lock
);
2292 mutex_lock(&isert_np
->np_accept_mutex
);
2293 if (list_empty(&isert_np
->np_accept_list
)) {
2294 mutex_unlock(&isert_np
->np_accept_mutex
);
2298 isert_conn
= list_first_entry(&isert_np
->np_accept_list
,
2299 struct isert_conn
, conn_accept_node
);
2300 list_del_init(&isert_conn
->conn_accept_node
);
2301 mutex_unlock(&isert_np
->np_accept_mutex
);
2303 conn
->context
= isert_conn
;
2304 isert_conn
->conn
= conn
;
2307 ret
= isert_rdma_post_recvl(isert_conn
);
2311 ret
= isert_rdma_accept(isert_conn
);
2315 isert_set_conn_info(np
, conn
, isert_conn
);
2317 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn
);
2322 isert_free_np(struct iscsi_np
*np
)
2324 struct isert_np
*isert_np
= (struct isert_np
*)np
->np_context
;
2326 rdma_destroy_id(isert_np
->np_cm_id
);
2328 np
->np_context
= NULL
;
2332 static int isert_check_state(struct isert_conn
*isert_conn
, int state
)
2336 mutex_lock(&isert_conn
->conn_mutex
);
2337 ret
= (isert_conn
->state
== state
);
2338 mutex_unlock(&isert_conn
->conn_mutex
);
2343 static void isert_free_conn(struct iscsi_conn
*conn
)
2345 struct isert_conn
*isert_conn
= conn
->context
;
2347 pr_debug("isert_free_conn: Starting \n");
2349 * Decrement post_send_buf_count for special case when called
2350 * from isert_do_control_comp() -> iscsit_logout_post_handler()
2352 mutex_lock(&isert_conn
->conn_mutex
);
2353 if (isert_conn
->logout_posted
)
2354 atomic_dec(&isert_conn
->post_send_buf_count
);
2356 if (isert_conn
->conn_cm_id
&& isert_conn
->state
!= ISER_CONN_DOWN
) {
2357 pr_debug("Calling rdma_disconnect from isert_free_conn\n");
2358 rdma_disconnect(isert_conn
->conn_cm_id
);
2361 * Only wait for conn_wait_comp_err if the isert_conn made it
2362 * into full feature phase..
2364 if (isert_conn
->state
== ISER_CONN_UP
) {
2365 pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
2367 mutex_unlock(&isert_conn
->conn_mutex
);
2369 wait_event(isert_conn
->conn_wait_comp_err
,
2370 (isert_check_state(isert_conn
, ISER_CONN_TERMINATING
)));
2372 wait_event(isert_conn
->conn_wait
,
2373 (isert_check_state(isert_conn
, ISER_CONN_DOWN
)));
2375 isert_put_conn(isert_conn
);
2378 if (isert_conn
->state
== ISER_CONN_INIT
) {
2379 mutex_unlock(&isert_conn
->conn_mutex
);
2380 isert_put_conn(isert_conn
);
2383 pr_debug("isert_free_conn: wait_event conn_wait %d\n",
2385 mutex_unlock(&isert_conn
->conn_mutex
);
2387 wait_event(isert_conn
->conn_wait
,
2388 (isert_check_state(isert_conn
, ISER_CONN_DOWN
)));
2390 isert_put_conn(isert_conn
);
2393 static struct iscsit_transport iser_target_transport
= {
2395 .transport_type
= ISCSI_INFINIBAND
,
2396 .owner
= THIS_MODULE
,
2397 .iscsit_setup_np
= isert_setup_np
,
2398 .iscsit_accept_np
= isert_accept_np
,
2399 .iscsit_free_np
= isert_free_np
,
2400 .iscsit_free_conn
= isert_free_conn
,
2401 .iscsit_alloc_cmd
= isert_alloc_cmd
,
2402 .iscsit_get_login_rx
= isert_get_login_rx
,
2403 .iscsit_put_login_tx
= isert_put_login_tx
,
2404 .iscsit_immediate_queue
= isert_immediate_queue
,
2405 .iscsit_response_queue
= isert_response_queue
,
2406 .iscsit_get_dataout
= isert_get_dataout
,
2407 .iscsit_queue_data_in
= isert_put_datain
,
2408 .iscsit_queue_status
= isert_put_response
,
2411 static int __init
isert_init(void)
2415 isert_rx_wq
= alloc_workqueue("isert_rx_wq", 0, 0);
2417 pr_err("Unable to allocate isert_rx_wq\n");
2421 isert_comp_wq
= alloc_workqueue("isert_comp_wq", 0, 0);
2422 if (!isert_comp_wq
) {
2423 pr_err("Unable to allocate isert_comp_wq\n");
2428 isert_cmd_cache
= kmem_cache_create("isert_cmd_cache",
2429 sizeof(struct isert_cmd
), __alignof__(struct isert_cmd
),
2431 if (!isert_cmd_cache
) {
2432 pr_err("Unable to create isert_cmd_cache\n");
2437 iscsit_register_transport(&iser_target_transport
);
2438 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2442 destroy_workqueue(isert_comp_wq
);
2444 destroy_workqueue(isert_rx_wq
);
2448 static void __exit
isert_exit(void)
2450 kmem_cache_destroy(isert_cmd_cache
);
2451 destroy_workqueue(isert_comp_wq
);
2452 destroy_workqueue(isert_rx_wq
);
2453 iscsit_unregister_transport(&iser_target_transport
);
2454 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2457 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2458 MODULE_VERSION("0.1");
2459 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2460 MODULE_LICENSE("GPL");
2462 module_init(isert_init
);
2463 module_exit(isert_exit
);