1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
39 static DEFINE_MUTEX(device_list_mutex
);
40 static LIST_HEAD(device_list
);
41 static struct workqueue_struct
*isert_rx_wq
;
42 static struct workqueue_struct
*isert_comp_wq
;
43 static struct workqueue_struct
*isert_release_wq
;
46 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
48 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
49 struct isert_rdma_wr
*wr
);
51 isert_unreg_rdma_frwr(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
53 isert_reg_rdma_frwr(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
54 struct isert_rdma_wr
*wr
);
56 isert_rdma_post_recvl(struct isert_conn
*isert_conn
);
58 isert_rdma_accept(struct isert_conn
*isert_conn
);
59 struct rdma_cm_id
*isert_setup_id(struct isert_np
*isert_np
);
61 static void isert_release_work(struct work_struct
*work
);
64 isert_qp_event_callback(struct ib_event
*e
, void *context
)
66 struct isert_conn
*isert_conn
= (struct isert_conn
*)context
;
68 pr_err("isert_qp_event_callback event: %d\n", e
->event
);
70 case IB_EVENT_COMM_EST
:
71 rdma_notify(isert_conn
->conn_cm_id
, IB_EVENT_COMM_EST
);
73 case IB_EVENT_QP_LAST_WQE_REACHED
:
74 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
82 isert_query_device(struct ib_device
*ib_dev
, struct ib_device_attr
*devattr
)
86 ret
= ib_query_device(ib_dev
, devattr
);
88 pr_err("ib_query_device() failed: %d\n", ret
);
91 pr_debug("devattr->max_sge: %d\n", devattr
->max_sge
);
92 pr_debug("devattr->max_sge_rd: %d\n", devattr
->max_sge_rd
);
98 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
)
100 struct isert_device
*device
= isert_conn
->conn_device
;
101 struct ib_qp_init_attr attr
;
102 int ret
, index
, min_index
= 0;
104 mutex_lock(&device_list_mutex
);
105 for (index
= 0; index
< device
->cqs_used
; index
++)
106 if (device
->cq_active_qps
[index
] <
107 device
->cq_active_qps
[min_index
])
109 device
->cq_active_qps
[min_index
]++;
110 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index
);
111 mutex_unlock(&device_list_mutex
);
113 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
114 attr
.event_handler
= isert_qp_event_callback
;
115 attr
.qp_context
= isert_conn
;
116 attr
.send_cq
= device
->dev_tx_cq
[min_index
];
117 attr
.recv_cq
= device
->dev_rx_cq
[min_index
];
118 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
;
119 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
;
121 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
122 * work-around for RDMA_READs with ConnectX-2.
124 * Also, still make sure to have at least two SGEs for
125 * outgoing control PDU responses.
127 attr
.cap
.max_send_sge
= max(2, device
->dev_attr
.max_sge
- 2);
128 isert_conn
->max_sge
= attr
.cap
.max_send_sge
;
130 attr
.cap
.max_recv_sge
= 1;
131 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
132 attr
.qp_type
= IB_QPT_RC
;
134 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
136 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
137 isert_conn
->conn_pd
->device
);
139 ret
= rdma_create_qp(cma_id
, isert_conn
->conn_pd
, &attr
);
141 pr_err("rdma_create_qp failed for cma_id %d\n", ret
);
144 isert_conn
->conn_qp
= cma_id
->qp
;
145 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
149 mutex_lock(&device_list_mutex
);
150 device
->cq_active_qps
[min_index
]--;
151 mutex_unlock(&device_list_mutex
);
157 isert_cq_event_callback(struct ib_event
*e
, void *context
)
159 pr_debug("isert_cq_event_callback event: %d\n", e
->event
);
163 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
165 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
166 struct iser_rx_desc
*rx_desc
;
167 struct ib_sge
*rx_sg
;
171 isert_conn
->conn_rx_descs
= kzalloc(ISERT_QP_MAX_RECV_DTOS
*
172 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
173 if (!isert_conn
->conn_rx_descs
)
176 rx_desc
= isert_conn
->conn_rx_descs
;
178 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
179 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
180 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
181 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
184 rx_desc
->dma_addr
= dma_addr
;
186 rx_sg
= &rx_desc
->rx_sg
;
187 rx_sg
->addr
= rx_desc
->dma_addr
;
188 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
189 rx_sg
->lkey
= isert_conn
->conn_mr
->lkey
;
192 isert_conn
->conn_rx_desc_head
= 0;
196 rx_desc
= isert_conn
->conn_rx_descs
;
197 for (j
= 0; j
< i
; j
++, rx_desc
++) {
198 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
199 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
201 kfree(isert_conn
->conn_rx_descs
);
202 isert_conn
->conn_rx_descs
= NULL
;
208 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
210 struct ib_device
*ib_dev
= isert_conn
->conn_device
->ib_device
;
211 struct iser_rx_desc
*rx_desc
;
214 if (!isert_conn
->conn_rx_descs
)
217 rx_desc
= isert_conn
->conn_rx_descs
;
218 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
219 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
220 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
223 kfree(isert_conn
->conn_rx_descs
);
224 isert_conn
->conn_rx_descs
= NULL
;
227 static void isert_cq_tx_work(struct work_struct
*);
228 static void isert_cq_tx_callback(struct ib_cq
*, void *);
229 static void isert_cq_rx_work(struct work_struct
*);
230 static void isert_cq_rx_callback(struct ib_cq
*, void *);
233 isert_create_device_ib_res(struct isert_device
*device
)
235 struct ib_device
*ib_dev
= device
->ib_device
;
236 struct isert_cq_desc
*cq_desc
;
237 struct ib_device_attr
*dev_attr
;
239 int max_rx_cqe
, max_tx_cqe
;
241 dev_attr
= &device
->dev_attr
;
242 ret
= isert_query_device(ib_dev
, dev_attr
);
246 max_rx_cqe
= min(ISER_MAX_RX_CQ_LEN
, dev_attr
->max_cqe
);
247 max_tx_cqe
= min(ISER_MAX_TX_CQ_LEN
, dev_attr
->max_cqe
);
249 /* asign function handlers */
250 if (dev_attr
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
) {
251 device
->use_frwr
= 1;
252 device
->reg_rdma_mem
= isert_reg_rdma_frwr
;
253 device
->unreg_rdma_mem
= isert_unreg_rdma_frwr
;
255 device
->use_frwr
= 0;
256 device
->reg_rdma_mem
= isert_map_rdma
;
257 device
->unreg_rdma_mem
= isert_unmap_cmd
;
260 device
->cqs_used
= min_t(int, num_online_cpus(),
261 device
->ib_device
->num_comp_vectors
);
262 device
->cqs_used
= min(ISERT_MAX_CQ
, device
->cqs_used
);
263 pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
264 device
->cqs_used
, device
->ib_device
->name
,
265 device
->ib_device
->num_comp_vectors
, device
->use_frwr
);
266 device
->cq_desc
= kzalloc(sizeof(struct isert_cq_desc
) *
267 device
->cqs_used
, GFP_KERNEL
);
268 if (!device
->cq_desc
) {
269 pr_err("Unable to allocate device->cq_desc\n");
272 cq_desc
= device
->cq_desc
;
274 device
->dev_pd
= ib_alloc_pd(ib_dev
);
275 if (IS_ERR(device
->dev_pd
)) {
276 ret
= PTR_ERR(device
->dev_pd
);
277 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret
);
281 for (i
= 0; i
< device
->cqs_used
; i
++) {
282 cq_desc
[i
].device
= device
;
283 cq_desc
[i
].cq_index
= i
;
285 INIT_WORK(&cq_desc
[i
].cq_rx_work
, isert_cq_rx_work
);
286 device
->dev_rx_cq
[i
] = ib_create_cq(device
->ib_device
,
287 isert_cq_rx_callback
,
288 isert_cq_event_callback
,
291 if (IS_ERR(device
->dev_rx_cq
[i
])) {
292 ret
= PTR_ERR(device
->dev_rx_cq
[i
]);
293 device
->dev_rx_cq
[i
] = NULL
;
297 INIT_WORK(&cq_desc
[i
].cq_tx_work
, isert_cq_tx_work
);
298 device
->dev_tx_cq
[i
] = ib_create_cq(device
->ib_device
,
299 isert_cq_tx_callback
,
300 isert_cq_event_callback
,
303 if (IS_ERR(device
->dev_tx_cq
[i
])) {
304 ret
= PTR_ERR(device
->dev_tx_cq
[i
]);
305 device
->dev_tx_cq
[i
] = NULL
;
309 ret
= ib_req_notify_cq(device
->dev_rx_cq
[i
], IB_CQ_NEXT_COMP
);
313 ret
= ib_req_notify_cq(device
->dev_tx_cq
[i
], IB_CQ_NEXT_COMP
);
318 device
->dev_mr
= ib_get_dma_mr(device
->dev_pd
, IB_ACCESS_LOCAL_WRITE
);
319 if (IS_ERR(device
->dev_mr
)) {
320 ret
= PTR_ERR(device
->dev_mr
);
321 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret
);
328 for (j
= 0; j
< i
; j
++) {
329 cq_desc
= &device
->cq_desc
[j
];
331 if (device
->dev_rx_cq
[j
]) {
332 cancel_work_sync(&cq_desc
->cq_rx_work
);
333 ib_destroy_cq(device
->dev_rx_cq
[j
]);
335 if (device
->dev_tx_cq
[j
]) {
336 cancel_work_sync(&cq_desc
->cq_tx_work
);
337 ib_destroy_cq(device
->dev_tx_cq
[j
]);
340 ib_dealloc_pd(device
->dev_pd
);
343 kfree(device
->cq_desc
);
349 isert_free_device_ib_res(struct isert_device
*device
)
351 struct isert_cq_desc
*cq_desc
;
354 for (i
= 0; i
< device
->cqs_used
; i
++) {
355 cq_desc
= &device
->cq_desc
[i
];
357 cancel_work_sync(&cq_desc
->cq_rx_work
);
358 cancel_work_sync(&cq_desc
->cq_tx_work
);
359 ib_destroy_cq(device
->dev_rx_cq
[i
]);
360 ib_destroy_cq(device
->dev_tx_cq
[i
]);
361 device
->dev_rx_cq
[i
] = NULL
;
362 device
->dev_tx_cq
[i
] = NULL
;
365 ib_dereg_mr(device
->dev_mr
);
366 ib_dealloc_pd(device
->dev_pd
);
367 kfree(device
->cq_desc
);
371 isert_device_try_release(struct isert_device
*device
)
373 mutex_lock(&device_list_mutex
);
375 if (!device
->refcount
) {
376 isert_free_device_ib_res(device
);
377 list_del(&device
->dev_node
);
380 mutex_unlock(&device_list_mutex
);
383 static struct isert_device
*
384 isert_device_find_by_ib_dev(struct rdma_cm_id
*cma_id
)
386 struct isert_device
*device
;
389 mutex_lock(&device_list_mutex
);
390 list_for_each_entry(device
, &device_list
, dev_node
) {
391 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
393 mutex_unlock(&device_list_mutex
);
398 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
400 mutex_unlock(&device_list_mutex
);
401 return ERR_PTR(-ENOMEM
);
404 INIT_LIST_HEAD(&device
->dev_node
);
406 device
->ib_device
= cma_id
->device
;
407 ret
= isert_create_device_ib_res(device
);
410 mutex_unlock(&device_list_mutex
);
415 list_add_tail(&device
->dev_node
, &device_list
);
416 mutex_unlock(&device_list_mutex
);
422 isert_conn_free_frwr_pool(struct isert_conn
*isert_conn
)
424 struct fast_reg_descriptor
*fr_desc
, *tmp
;
427 if (list_empty(&isert_conn
->conn_frwr_pool
))
430 pr_debug("Freeing conn %p frwr pool", isert_conn
);
432 list_for_each_entry_safe(fr_desc
, tmp
,
433 &isert_conn
->conn_frwr_pool
, list
) {
434 list_del(&fr_desc
->list
);
435 ib_free_fast_reg_page_list(fr_desc
->data_frpl
);
436 ib_dereg_mr(fr_desc
->data_mr
);
441 if (i
< isert_conn
->conn_frwr_pool_size
)
442 pr_warn("Pool still has %d regions registered\n",
443 isert_conn
->conn_frwr_pool_size
- i
);
447 isert_conn_create_frwr_pool(struct isert_conn
*isert_conn
)
449 struct fast_reg_descriptor
*fr_desc
;
450 struct isert_device
*device
= isert_conn
->conn_device
;
451 struct se_session
*se_sess
= isert_conn
->conn
->sess
->se_sess
;
452 struct se_node_acl
*se_nacl
= se_sess
->se_node_acl
;
455 * Setup the number of FRMRs based upon the number of tags
456 * available to session in iscsi_target_locate_portal().
458 tag_num
= max_t(u32
, ISCSIT_MIN_TAGS
, se_nacl
->queue_depth
);
459 tag_num
= (tag_num
* 2) + ISCSIT_EXTRA_TAGS
;
461 isert_conn
->conn_frwr_pool_size
= 0;
462 for (i
= 0; i
< tag_num
; i
++) {
463 fr_desc
= kzalloc(sizeof(*fr_desc
), GFP_KERNEL
);
465 pr_err("Failed to allocate fast_reg descriptor\n");
471 ib_alloc_fast_reg_page_list(device
->ib_device
,
472 ISCSI_ISER_SG_TABLESIZE
);
473 if (IS_ERR(fr_desc
->data_frpl
)) {
474 pr_err("Failed to allocate fr_pg_list err=%ld\n",
475 PTR_ERR(fr_desc
->data_frpl
));
476 ret
= PTR_ERR(fr_desc
->data_frpl
);
480 fr_desc
->data_mr
= ib_alloc_fast_reg_mr(device
->dev_pd
,
481 ISCSI_ISER_SG_TABLESIZE
);
482 if (IS_ERR(fr_desc
->data_mr
)) {
483 pr_err("Failed to allocate frmr err=%ld\n",
484 PTR_ERR(fr_desc
->data_mr
));
485 ret
= PTR_ERR(fr_desc
->data_mr
);
486 ib_free_fast_reg_page_list(fr_desc
->data_frpl
);
489 pr_debug("Create fr_desc %p page_list %p\n",
490 fr_desc
, fr_desc
->data_frpl
->page_list
);
492 fr_desc
->valid
= true;
493 list_add_tail(&fr_desc
->list
, &isert_conn
->conn_frwr_pool
);
494 isert_conn
->conn_frwr_pool_size
++;
497 pr_debug("Creating conn %p frwr pool size=%d",
498 isert_conn
, isert_conn
->conn_frwr_pool_size
);
503 isert_conn_free_frwr_pool(isert_conn
);
508 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
510 struct isert_np
*isert_np
= cma_id
->context
;
511 struct iscsi_np
*np
= isert_np
->np
;
512 struct isert_conn
*isert_conn
;
513 struct isert_device
*device
;
514 struct ib_device
*ib_dev
= cma_id
->device
;
517 spin_lock_bh(&np
->np_thread_lock
);
519 spin_unlock_bh(&np
->np_thread_lock
);
520 pr_debug("iscsi_np is not enabled, reject connect request\n");
521 return rdma_reject(cma_id
, NULL
, 0);
523 spin_unlock_bh(&np
->np_thread_lock
);
525 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
526 cma_id
, cma_id
->context
);
528 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
530 pr_err("Unable to allocate isert_conn\n");
533 isert_conn
->state
= ISER_CONN_INIT
;
534 INIT_LIST_HEAD(&isert_conn
->conn_accept_node
);
535 init_completion(&isert_conn
->conn_login_comp
);
536 init_completion(&isert_conn
->login_req_comp
);
537 init_completion(&isert_conn
->conn_wait
);
538 init_completion(&isert_conn
->conn_wait_comp_err
);
539 kref_init(&isert_conn
->conn_kref
);
540 mutex_init(&isert_conn
->conn_mutex
);
541 spin_lock_init(&isert_conn
->conn_lock
);
542 INIT_LIST_HEAD(&isert_conn
->conn_frwr_pool
);
543 INIT_WORK(&isert_conn
->release_work
, isert_release_work
);
545 isert_conn
->conn_cm_id
= cma_id
;
546 isert_conn
->responder_resources
= event
->param
.conn
.responder_resources
;
547 isert_conn
->initiator_depth
= event
->param
.conn
.initiator_depth
;
548 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
549 isert_conn
->responder_resources
, isert_conn
->initiator_depth
);
551 isert_conn
->login_buf
= kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
+
552 ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
553 if (!isert_conn
->login_buf
) {
554 pr_err("Unable to allocate isert_conn->login_buf\n");
559 isert_conn
->login_req_buf
= isert_conn
->login_buf
;
560 isert_conn
->login_rsp_buf
= isert_conn
->login_buf
+
561 ISCSI_DEF_MAX_RECV_SEG_LEN
;
562 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
563 isert_conn
->login_buf
, isert_conn
->login_req_buf
,
564 isert_conn
->login_rsp_buf
);
566 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
567 (void *)isert_conn
->login_req_buf
,
568 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
570 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
572 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
574 isert_conn
->login_req_dma
= 0;
578 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
579 (void *)isert_conn
->login_rsp_buf
,
580 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
582 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
584 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
586 isert_conn
->login_rsp_dma
= 0;
587 goto out_req_dma_map
;
590 device
= isert_device_find_by_ib_dev(cma_id
);
591 if (IS_ERR(device
)) {
592 ret
= PTR_ERR(device
);
593 goto out_rsp_dma_map
;
596 isert_conn
->conn_device
= device
;
597 isert_conn
->conn_pd
= device
->dev_pd
;
598 isert_conn
->conn_mr
= device
->dev_mr
;
600 ret
= isert_conn_setup_qp(isert_conn
, cma_id
);
604 ret
= isert_rdma_post_recvl(isert_conn
);
608 ret
= isert_rdma_accept(isert_conn
);
612 mutex_lock(&isert_np
->np_accept_mutex
);
613 list_add_tail(&isert_conn
->conn_accept_node
, &isert_np
->np_accept_list
);
614 mutex_unlock(&isert_np
->np_accept_mutex
);
616 pr_debug("isert_connect_request() up np_sem np: %p\n", np
);
617 up(&isert_np
->np_sem
);
621 isert_device_try_release(device
);
623 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
624 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
626 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
627 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
629 kfree(isert_conn
->login_buf
);
632 rdma_reject(cma_id
, NULL
, 0);
637 isert_connect_release(struct isert_conn
*isert_conn
)
639 struct isert_device
*device
= isert_conn
->conn_device
;
641 struct ib_device
*ib_dev
= device
->ib_device
;
643 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
645 if (device
&& device
->use_frwr
)
646 isert_conn_free_frwr_pool(isert_conn
);
648 isert_free_rx_descriptors(isert_conn
);
649 if (isert_conn
->conn_cm_id
)
650 rdma_destroy_id(isert_conn
->conn_cm_id
);
652 if (isert_conn
->conn_qp
) {
653 cq_index
= ((struct isert_cq_desc
*)
654 isert_conn
->conn_qp
->recv_cq
->cq_context
)->cq_index
;
655 pr_debug("isert_connect_release: cq_index: %d\n", cq_index
);
656 mutex_lock(&device_list_mutex
);
657 isert_conn
->conn_device
->cq_active_qps
[cq_index
]--;
658 mutex_unlock(&device_list_mutex
);
660 ib_destroy_qp(isert_conn
->conn_qp
);
663 if (isert_conn
->login_buf
) {
664 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
665 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
666 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
667 ISCSI_DEF_MAX_RECV_SEG_LEN
,
669 kfree(isert_conn
->login_buf
);
674 isert_device_try_release(device
);
676 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
680 isert_connected_handler(struct rdma_cm_id
*cma_id
)
682 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
684 pr_info("conn %p\n", isert_conn
);
686 if (!kref_get_unless_zero(&isert_conn
->conn_kref
)) {
687 pr_warn("conn %p connect_release is running\n", isert_conn
);
691 mutex_lock(&isert_conn
->conn_mutex
);
692 if (isert_conn
->state
!= ISER_CONN_FULL_FEATURE
)
693 isert_conn
->state
= ISER_CONN_UP
;
694 mutex_unlock(&isert_conn
->conn_mutex
);
698 isert_release_conn_kref(struct kref
*kref
)
700 struct isert_conn
*isert_conn
= container_of(kref
,
701 struct isert_conn
, conn_kref
);
703 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
704 current
->comm
, current
->pid
);
706 isert_connect_release(isert_conn
);
710 isert_put_conn(struct isert_conn
*isert_conn
)
712 kref_put(&isert_conn
->conn_kref
, isert_release_conn_kref
);
716 * isert_conn_terminate() - Initiate connection termination
717 * @isert_conn: isert connection struct
720 * In case the connection state is FULL_FEATURE, move state
721 * to TEMINATING and start teardown sequence (rdma_disconnect).
722 * In case the connection state is UP, complete flush as well.
724 * This routine must be called with conn_mutex held. Thus it is
725 * safe to call multiple times.
728 isert_conn_terminate(struct isert_conn
*isert_conn
)
732 switch (isert_conn
->state
) {
733 case ISER_CONN_TERMINATING
:
737 * No flush completions will occur as we didn't
738 * get to ISER_CONN_FULL_FEATURE yet, complete
739 * to allow teardown progress.
741 complete(&isert_conn
->conn_wait_comp_err
);
742 case ISER_CONN_FULL_FEATURE
: /* FALLTHRU */
743 pr_info("Terminating conn %p state %d\n",
744 isert_conn
, isert_conn
->state
);
745 isert_conn
->state
= ISER_CONN_TERMINATING
;
746 err
= rdma_disconnect(isert_conn
->conn_cm_id
);
748 pr_warn("Failed rdma_disconnect isert_conn %p\n",
752 pr_warn("conn %p teminating in state %d\n",
753 isert_conn
, isert_conn
->state
);
758 isert_np_cma_handler(struct isert_np
*isert_np
,
759 enum rdma_cm_event_type event
)
761 pr_debug("isert np %p, handling event %d\n", isert_np
, event
);
764 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
765 isert_np
->np_cm_id
= NULL
;
767 case RDMA_CM_EVENT_ADDR_CHANGE
:
768 isert_np
->np_cm_id
= isert_setup_id(isert_np
);
769 if (IS_ERR(isert_np
->np_cm_id
)) {
770 pr_err("isert np %p setup id failed: %ld\n",
771 isert_np
, PTR_ERR(isert_np
->np_cm_id
));
772 isert_np
->np_cm_id
= NULL
;
776 pr_err("isert np %p Unexpected event %d\n",
784 isert_disconnected_handler(struct rdma_cm_id
*cma_id
,
785 enum rdma_cm_event_type event
)
787 struct isert_np
*isert_np
= cma_id
->context
;
788 struct isert_conn
*isert_conn
;
789 bool terminating
= false;
791 if (isert_np
->np_cm_id
== cma_id
)
792 return isert_np_cma_handler(cma_id
->context
, event
);
794 isert_conn
= cma_id
->qp
->qp_context
;
796 mutex_lock(&isert_conn
->conn_mutex
);
797 terminating
= (isert_conn
->state
== ISER_CONN_TERMINATING
);
798 isert_conn_terminate(isert_conn
);
799 mutex_unlock(&isert_conn
->conn_mutex
);
801 pr_info("conn %p completing conn_wait\n", isert_conn
);
802 complete(&isert_conn
->conn_wait
);
807 mutex_lock(&isert_np
->np_accept_mutex
);
808 if (!list_empty(&isert_conn
->conn_accept_node
)) {
809 list_del_init(&isert_conn
->conn_accept_node
);
810 isert_put_conn(isert_conn
);
811 queue_work(isert_release_wq
, &isert_conn
->release_work
);
813 mutex_unlock(&isert_np
->np_accept_mutex
);
820 isert_connect_error(struct rdma_cm_id
*cma_id
)
822 struct isert_conn
*isert_conn
= cma_id
->qp
->qp_context
;
824 isert_conn
->conn_cm_id
= NULL
;
825 isert_put_conn(isert_conn
);
831 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
835 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
836 event
->event
, event
->status
, cma_id
->context
, cma_id
);
838 switch (event
->event
) {
839 case RDMA_CM_EVENT_CONNECT_REQUEST
:
840 ret
= isert_connect_request(cma_id
, event
);
842 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
845 case RDMA_CM_EVENT_ESTABLISHED
:
846 isert_connected_handler(cma_id
);
848 case RDMA_CM_EVENT_ADDR_CHANGE
: /* FALLTHRU */
849 case RDMA_CM_EVENT_DISCONNECTED
: /* FALLTHRU */
850 case RDMA_CM_EVENT_DEVICE_REMOVAL
: /* FALLTHRU */
851 case RDMA_CM_EVENT_TIMEWAIT_EXIT
: /* FALLTHRU */
852 ret
= isert_disconnected_handler(cma_id
, event
->event
);
854 case RDMA_CM_EVENT_REJECTED
: /* FALLTHRU */
855 case RDMA_CM_EVENT_UNREACHABLE
: /* FALLTHRU */
856 case RDMA_CM_EVENT_CONNECT_ERROR
:
857 ret
= isert_connect_error(cma_id
);
860 pr_err("Unhandled RDMA CMA event: %d\n", event
->event
);
868 isert_post_recv(struct isert_conn
*isert_conn
, u32 count
)
870 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
872 unsigned int rx_head
= isert_conn
->conn_rx_desc_head
;
873 struct iser_rx_desc
*rx_desc
;
875 for (rx_wr
= isert_conn
->conn_rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
876 rx_desc
= &isert_conn
->conn_rx_descs
[rx_head
];
877 rx_wr
->wr_id
= (unsigned long)rx_desc
;
878 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
880 rx_wr
->next
= rx_wr
+ 1;
881 rx_head
= (rx_head
+ 1) & (ISERT_QP_MAX_RECV_DTOS
- 1);
885 rx_wr
->next
= NULL
; /* mark end of work requests list */
887 isert_conn
->post_recv_buf_count
+= count
;
888 ret
= ib_post_recv(isert_conn
->conn_qp
, isert_conn
->conn_rx_wr
,
891 pr_err("ib_post_recv() failed with ret: %d\n", ret
);
892 isert_conn
->post_recv_buf_count
-= count
;
894 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count
);
895 isert_conn
->conn_rx_desc_head
= rx_head
;
901 isert_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
903 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
904 struct ib_send_wr send_wr
, *send_wr_failed
;
907 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
908 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
911 send_wr
.wr_id
= (unsigned long)tx_desc
;
912 send_wr
.sg_list
= tx_desc
->tx_sg
;
913 send_wr
.num_sge
= tx_desc
->num_sge
;
914 send_wr
.opcode
= IB_WR_SEND
;
915 send_wr
.send_flags
= IB_SEND_SIGNALED
;
917 atomic_inc(&isert_conn
->post_send_buf_count
);
919 ret
= ib_post_send(isert_conn
->conn_qp
, &send_wr
, &send_wr_failed
);
921 pr_err("ib_post_send() failed, ret: %d\n", ret
);
922 atomic_dec(&isert_conn
->post_send_buf_count
);
929 isert_create_send_desc(struct isert_conn
*isert_conn
,
930 struct isert_cmd
*isert_cmd
,
931 struct iser_tx_desc
*tx_desc
)
933 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
935 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
936 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
938 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_hdr
));
939 tx_desc
->iser_header
.flags
= ISER_VER
;
941 tx_desc
->num_sge
= 1;
942 tx_desc
->isert_cmd
= isert_cmd
;
944 if (tx_desc
->tx_sg
[0].lkey
!= isert_conn
->conn_mr
->lkey
) {
945 tx_desc
->tx_sg
[0].lkey
= isert_conn
->conn_mr
->lkey
;
946 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
951 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
952 struct iser_tx_desc
*tx_desc
)
954 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
957 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
958 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
959 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
960 pr_err("ib_dma_mapping_error() failed\n");
964 tx_desc
->dma_addr
= dma_addr
;
965 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
966 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
967 tx_desc
->tx_sg
[0].lkey
= isert_conn
->conn_mr
->lkey
;
969 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
970 " lkey: 0x%08x\n", tx_desc
->tx_sg
[0].addr
,
971 tx_desc
->tx_sg
[0].length
, tx_desc
->tx_sg
[0].lkey
);
977 isert_init_send_wr(struct isert_cmd
*isert_cmd
, struct ib_send_wr
*send_wr
)
979 isert_cmd
->rdma_wr
.iser_ib_op
= ISER_IB_SEND
;
980 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
981 send_wr
->opcode
= IB_WR_SEND
;
982 send_wr
->send_flags
= IB_SEND_SIGNALED
;
983 send_wr
->sg_list
= &isert_cmd
->tx_desc
.tx_sg
[0];
984 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
988 isert_rdma_post_recvl(struct isert_conn
*isert_conn
)
990 struct ib_recv_wr rx_wr
, *rx_wr_fail
;
994 memset(&sge
, 0, sizeof(struct ib_sge
));
995 sge
.addr
= isert_conn
->login_req_dma
;
996 sge
.length
= ISER_RX_LOGIN_SIZE
;
997 sge
.lkey
= isert_conn
->conn_mr
->lkey
;
999 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
1000 sge
.addr
, sge
.length
, sge
.lkey
);
1002 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
1003 rx_wr
.wr_id
= (unsigned long)isert_conn
->login_req_buf
;
1004 rx_wr
.sg_list
= &sge
;
1007 isert_conn
->post_recv_buf_count
++;
1008 ret
= ib_post_recv(isert_conn
->conn_qp
, &rx_wr
, &rx_wr_fail
);
1010 pr_err("ib_post_recv() failed: %d\n", ret
);
1011 isert_conn
->post_recv_buf_count
--;
1014 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
1019 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
1022 struct isert_conn
*isert_conn
= conn
->context
;
1023 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1024 struct iser_tx_desc
*tx_desc
= &isert_conn
->conn_login_tx_desc
;
1027 isert_create_send_desc(isert_conn
, NULL
, tx_desc
);
1029 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
1030 sizeof(struct iscsi_hdr
));
1032 isert_init_tx_hdrs(isert_conn
, tx_desc
);
1035 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
1037 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
1038 length
, DMA_TO_DEVICE
);
1040 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
1042 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
1043 length
, DMA_TO_DEVICE
);
1045 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
1046 tx_dsg
->length
= length
;
1047 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
1048 tx_desc
->num_sge
= 2;
1050 if (!login
->login_failed
) {
1051 if (login
->login_complete
) {
1052 if (isert_conn
->conn_device
->use_frwr
) {
1053 ret
= isert_conn_create_frwr_pool(isert_conn
);
1055 pr_err("Conn: %p failed to create"
1056 " frwr_pool\n", isert_conn
);
1061 ret
= isert_alloc_rx_descriptors(isert_conn
);
1065 ret
= isert_post_recv(isert_conn
, ISERT_MIN_POSTED_RX
);
1069 /* Now we are in FULL_FEATURE phase */
1070 mutex_lock(&isert_conn
->conn_mutex
);
1071 isert_conn
->state
= ISER_CONN_FULL_FEATURE
;
1072 mutex_unlock(&isert_conn
->conn_mutex
);
1076 ret
= isert_rdma_post_recvl(isert_conn
);
1081 ret
= isert_post_send(isert_conn
, tx_desc
);
1089 isert_rx_login_req(struct isert_conn
*isert_conn
)
1091 struct iser_rx_desc
*rx_desc
= (void *)isert_conn
->login_req_buf
;
1092 int rx_buflen
= isert_conn
->login_req_len
;
1093 struct iscsi_conn
*conn
= isert_conn
->conn
;
1094 struct iscsi_login
*login
= conn
->conn_login
;
1097 pr_info("conn %p\n", isert_conn
);
1099 WARN_ON_ONCE(!login
);
1101 if (login
->first_request
) {
1102 struct iscsi_login_req
*login_req
=
1103 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
1105 * Setup the initial iscsi_login values from the leading
1106 * login request PDU.
1108 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
1109 login
->current_stage
=
1110 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
1112 login
->version_min
= login_req
->min_version
;
1113 login
->version_max
= login_req
->max_version
;
1114 memcpy(login
->isid
, login_req
->isid
, 6);
1115 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1116 login
->init_task_tag
= login_req
->itt
;
1117 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1118 login
->cid
= be16_to_cpu(login_req
->cid
);
1119 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1122 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
1124 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1125 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1126 size
, rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1127 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
1129 if (login
->first_request
) {
1130 complete(&isert_conn
->conn_login_comp
);
1133 schedule_delayed_work(&conn
->login_work
, 0);
1136 static struct iscsi_cmd
1137 *isert_allocate_cmd(struct iscsi_conn
*conn
, gfp_t gfp
)
1139 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1140 struct isert_cmd
*isert_cmd
;
1141 struct iscsi_cmd
*cmd
;
1143 cmd
= iscsit_allocate_cmd(conn
, gfp
);
1145 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1148 isert_cmd
= iscsit_priv_cmd(cmd
);
1149 isert_cmd
->conn
= isert_conn
;
1150 isert_cmd
->iscsi_cmd
= cmd
;
1156 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
1157 struct isert_cmd
*isert_cmd
, struct iscsi_cmd
*cmd
,
1158 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1160 struct iscsi_conn
*conn
= isert_conn
->conn
;
1161 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
1162 struct scatterlist
*sg
;
1163 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
1164 bool dump_payload
= false;
1166 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
1170 imm_data
= cmd
->immediate_data
;
1171 imm_data_len
= cmd
->first_burst_len
;
1172 unsol_data
= cmd
->unsolicited_data
;
1174 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1177 } else if (rc
> 0) {
1178 dump_payload
= true;
1185 sg
= &cmd
->se_cmd
.t_data_sg
[0];
1186 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
1188 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1189 sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
1191 sg_copy_from_buffer(sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
1193 cmd
->write_data_done
+= imm_data_len
;
1195 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
1196 spin_lock_bh(&cmd
->istate_lock
);
1197 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1198 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1199 spin_unlock_bh(&cmd
->istate_lock
);
1203 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
1205 if (!rc
&& dump_payload
== false && unsol_data
)
1206 iscsit_set_unsoliticed_dataout(cmd
);
1207 else if (dump_payload
&& imm_data
)
1208 target_put_sess_cmd(conn
->sess
->se_sess
, &cmd
->se_cmd
);
1214 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
1215 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1217 struct scatterlist
*sg_start
;
1218 struct iscsi_conn
*conn
= isert_conn
->conn
;
1219 struct iscsi_cmd
*cmd
= NULL
;
1220 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
1221 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
1222 int rc
, sg_nents
, sg_off
, page_off
;
1224 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
1230 * FIXME: Unexpected unsolicited_data out
1232 if (!cmd
->unsolicited_data
) {
1233 pr_err("Received unexpected solicited data payload\n");
1238 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1239 unsol_data_len
, cmd
->write_data_done
, cmd
->se_cmd
.data_length
);
1241 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1242 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1243 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
1244 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1246 * FIXME: Non page-aligned unsolicited_data out
1249 pr_err("Received unexpected non-page aligned data payload\n");
1253 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1254 sg_start
, sg_off
, sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
1256 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
1259 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1267 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1268 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1271 struct iscsi_conn
*conn
= isert_conn
->conn
;
1272 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1275 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1279 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1282 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1286 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1287 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1288 struct iscsi_text
*hdr
)
1290 struct iscsi_conn
*conn
= isert_conn
->conn
;
1291 u32 payload_length
= ntoh24(hdr
->dlength
);
1293 unsigned char *text_in
;
1295 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1299 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1301 pr_err("Unable to allocate text_in of payload_length: %u\n",
1305 cmd
->text_in_ptr
= text_in
;
1307 memcpy(cmd
->text_in_ptr
, &rx_desc
->data
[0], payload_length
);
1309 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1313 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1314 uint32_t read_stag
, uint64_t read_va
,
1315 uint32_t write_stag
, uint64_t write_va
)
1317 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1318 struct iscsi_conn
*conn
= isert_conn
->conn
;
1319 struct iscsi_session
*sess
= conn
->sess
;
1320 struct iscsi_cmd
*cmd
;
1321 struct isert_cmd
*isert_cmd
;
1323 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1325 if (sess
->sess_ops
->SessionType
&&
1326 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1327 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1328 " ignoring\n", opcode
);
1333 case ISCSI_OP_SCSI_CMD
:
1334 cmd
= isert_allocate_cmd(conn
, GFP_KERNEL
);
1338 isert_cmd
= iscsit_priv_cmd(cmd
);
1339 isert_cmd
->read_stag
= read_stag
;
1340 isert_cmd
->read_va
= read_va
;
1341 isert_cmd
->write_stag
= write_stag
;
1342 isert_cmd
->write_va
= write_va
;
1344 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
, cmd
,
1345 rx_desc
, (unsigned char *)hdr
);
1347 case ISCSI_OP_NOOP_OUT
:
1348 cmd
= isert_allocate_cmd(conn
, GFP_KERNEL
);
1352 isert_cmd
= iscsit_priv_cmd(cmd
);
1353 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
, cmd
,
1354 rx_desc
, (unsigned char *)hdr
);
1356 case ISCSI_OP_SCSI_DATA_OUT
:
1357 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1358 (unsigned char *)hdr
);
1360 case ISCSI_OP_SCSI_TMFUNC
:
1361 cmd
= isert_allocate_cmd(conn
, GFP_KERNEL
);
1365 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1366 (unsigned char *)hdr
);
1368 case ISCSI_OP_LOGOUT
:
1369 cmd
= isert_allocate_cmd(conn
, GFP_KERNEL
);
1373 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1375 wait_for_completion_timeout(&conn
->conn_logout_comp
,
1376 SECONDS_FOR_LOGOUT_COMP
*
1380 cmd
= isert_allocate_cmd(conn
, GFP_KERNEL
);
1384 isert_cmd
= iscsit_priv_cmd(cmd
);
1385 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
, cmd
,
1386 rx_desc
, (struct iscsi_text
*)hdr
);
1389 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1398 isert_rx_do_work(struct iser_rx_desc
*rx_desc
, struct isert_conn
*isert_conn
)
1400 struct iser_hdr
*iser_hdr
= &rx_desc
->iser_header
;
1401 uint64_t read_va
= 0, write_va
= 0;
1402 uint32_t read_stag
= 0, write_stag
= 0;
1405 switch (iser_hdr
->flags
& 0xF0) {
1407 if (iser_hdr
->flags
& ISER_RSV
) {
1408 read_stag
= be32_to_cpu(iser_hdr
->read_stag
);
1409 read_va
= be64_to_cpu(iser_hdr
->read_va
);
1410 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1411 read_stag
, (unsigned long long)read_va
);
1413 if (iser_hdr
->flags
& ISER_WSV
) {
1414 write_stag
= be32_to_cpu(iser_hdr
->write_stag
);
1415 write_va
= be64_to_cpu(iser_hdr
->write_va
);
1416 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1417 write_stag
, (unsigned long long)write_va
);
1420 pr_debug("ISER ISCSI_CTRL PDU\n");
1423 pr_err("iSER Hello message\n");
1426 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr
->flags
);
1430 rc
= isert_rx_opcode(isert_conn
, rx_desc
,
1431 read_stag
, read_va
, write_stag
, write_va
);
1435 isert_rx_completion(struct iser_rx_desc
*desc
, struct isert_conn
*isert_conn
,
1436 unsigned long xfer_len
)
1438 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1439 struct iscsi_hdr
*hdr
;
1441 int rx_buflen
, outstanding
;
1443 if ((char *)desc
== isert_conn
->login_req_buf
) {
1444 rx_dma
= isert_conn
->login_req_dma
;
1445 rx_buflen
= ISER_RX_LOGIN_SIZE
;
1446 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1449 rx_dma
= desc
->dma_addr
;
1450 rx_buflen
= ISER_RX_PAYLOAD_SIZE
;
1451 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1455 ib_dma_sync_single_for_cpu(ib_dev
, rx_dma
, rx_buflen
, DMA_FROM_DEVICE
);
1457 hdr
= &desc
->iscsi_header
;
1458 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1459 hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1460 (int)(xfer_len
- ISER_HEADERS_LEN
));
1462 if ((char *)desc
== isert_conn
->login_req_buf
) {
1463 isert_conn
->login_req_len
= xfer_len
- ISER_HEADERS_LEN
;
1464 if (isert_conn
->conn
) {
1465 struct iscsi_login
*login
= isert_conn
->conn
->conn_login
;
1467 if (login
&& !login
->first_request
)
1468 isert_rx_login_req(isert_conn
);
1470 mutex_lock(&isert_conn
->conn_mutex
);
1471 complete(&isert_conn
->login_req_comp
);
1472 mutex_unlock(&isert_conn
->conn_mutex
);
1474 isert_rx_do_work(desc
, isert_conn
);
1477 ib_dma_sync_single_for_device(ib_dev
, rx_dma
, rx_buflen
,
1480 isert_conn
->post_recv_buf_count
--;
1481 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1482 isert_conn
->post_recv_buf_count
);
1484 if ((char *)desc
== isert_conn
->login_req_buf
)
1487 outstanding
= isert_conn
->post_recv_buf_count
;
1488 if (outstanding
+ ISERT_MIN_POSTED_RX
<= ISERT_QP_MAX_RECV_DTOS
) {
1489 int err
, count
= min(ISERT_QP_MAX_RECV_DTOS
- outstanding
,
1490 ISERT_MIN_POSTED_RX
);
1491 err
= isert_post_recv(isert_conn
, count
);
1493 pr_err("isert_post_recv() count: %d failed, %d\n",
1500 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1502 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1503 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1505 pr_debug("isert_unmap_cmd: %p\n", isert_cmd
);
1507 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd
);
1508 ib_dma_unmap_sg(ib_dev
, wr
->sge
, wr
->num_sge
,
1509 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
1510 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
1515 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd
);
1521 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd
);
1528 isert_unreg_rdma_frwr(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1530 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1531 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1532 LIST_HEAD(unmap_list
);
1534 pr_debug("unreg_frwr_cmd: %p\n", isert_cmd
);
1537 pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
1538 isert_cmd
, wr
->fr_desc
);
1539 spin_lock_bh(&isert_conn
->conn_lock
);
1540 list_add_tail(&wr
->fr_desc
->list
, &isert_conn
->conn_frwr_pool
);
1541 spin_unlock_bh(&isert_conn
->conn_lock
);
1546 pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd
);
1547 ib_dma_unmap_sg(ib_dev
, wr
->sge
, wr
->num_sge
,
1548 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
1549 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
1558 isert_put_cmd(struct isert_cmd
*isert_cmd
, bool comp_err
)
1560 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1561 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1562 struct iscsi_conn
*conn
= isert_conn
->conn
;
1563 struct isert_device
*device
= isert_conn
->conn_device
;
1565 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd
);
1567 switch (cmd
->iscsi_opcode
) {
1568 case ISCSI_OP_SCSI_CMD
:
1569 spin_lock_bh(&conn
->cmd_lock
);
1570 if (!list_empty(&cmd
->i_conn_node
))
1571 list_del_init(&cmd
->i_conn_node
);
1572 spin_unlock_bh(&conn
->cmd_lock
);
1574 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1575 iscsit_stop_dataout_timer(cmd
);
1577 * Check for special case during comp_err where
1578 * WRITE_PENDING has been handed off from core,
1579 * but requires an extra target_put_sess_cmd()
1580 * before transport_generic_free_cmd() below.
1583 cmd
->se_cmd
.t_state
== TRANSPORT_WRITE_PENDING
) {
1584 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1586 target_put_sess_cmd(se_cmd
->se_sess
, se_cmd
);
1590 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1591 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1593 case ISCSI_OP_SCSI_TMFUNC
:
1594 spin_lock_bh(&conn
->cmd_lock
);
1595 if (!list_empty(&cmd
->i_conn_node
))
1596 list_del_init(&cmd
->i_conn_node
);
1597 spin_unlock_bh(&conn
->cmd_lock
);
1599 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1601 case ISCSI_OP_REJECT
:
1602 case ISCSI_OP_NOOP_OUT
:
1604 spin_lock_bh(&conn
->cmd_lock
);
1605 if (!list_empty(&cmd
->i_conn_node
))
1606 list_del_init(&cmd
->i_conn_node
);
1607 spin_unlock_bh(&conn
->cmd_lock
);
1610 * Handle special case for REJECT when iscsi_add_reject*() has
1611 * overwritten the original iscsi_opcode assignment, and the
1612 * associated cmd->se_cmd needs to be released.
1614 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1615 pr_debug("Calling transport_generic_free_cmd from"
1616 " isert_put_cmd for 0x%02x\n",
1618 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1625 iscsit_release_cmd(cmd
);
1631 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1633 if (tx_desc
->dma_addr
!= 0) {
1634 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1635 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1636 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1637 tx_desc
->dma_addr
= 0;
1642 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1643 struct ib_device
*ib_dev
, bool comp_err
)
1645 if (isert_cmd
->pdu_buf_dma
!= 0) {
1646 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1647 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1648 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1649 isert_cmd
->pdu_buf_dma
= 0;
1652 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1653 isert_put_cmd(isert_cmd
, comp_err
);
1657 isert_completion_rdma_read(struct iser_tx_desc
*tx_desc
,
1658 struct isert_cmd
*isert_cmd
)
1660 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1661 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1662 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1663 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1664 struct isert_device
*device
= isert_conn
->conn_device
;
1666 iscsit_stop_dataout_timer(cmd
);
1667 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1668 cmd
->write_data_done
= wr
->cur_rdma_length
;
1669 wr
->send_wr_num
= 0;
1671 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd
);
1672 spin_lock_bh(&cmd
->istate_lock
);
1673 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1674 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1675 spin_unlock_bh(&cmd
->istate_lock
);
1677 target_execute_cmd(se_cmd
);
1681 isert_do_control_comp(struct work_struct
*work
)
1683 struct isert_cmd
*isert_cmd
= container_of(work
,
1684 struct isert_cmd
, comp_work
);
1685 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1686 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1687 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1689 switch (cmd
->i_state
) {
1690 case ISTATE_SEND_TASKMGTRSP
:
1691 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1693 atomic_dec(&isert_conn
->post_send_buf_count
);
1694 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1696 cmd
->i_state
= ISTATE_SENT_STATUS
;
1697 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
, false);
1699 case ISTATE_SEND_REJECT
:
1700 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1701 atomic_dec(&isert_conn
->post_send_buf_count
);
1703 cmd
->i_state
= ISTATE_SENT_STATUS
;
1704 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
, false);
1706 case ISTATE_SEND_LOGOUTRSP
:
1707 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1709 atomic_dec(&isert_conn
->post_send_buf_count
);
1710 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1712 case ISTATE_SEND_TEXTRSP
:
1713 atomic_dec(&isert_conn
->post_send_buf_count
);
1714 cmd
->i_state
= ISTATE_SENT_STATUS
;
1715 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
, false);
1718 pr_err("Unknown do_control_comp i_state %d\n", cmd
->i_state
);
1725 isert_response_completion(struct iser_tx_desc
*tx_desc
,
1726 struct isert_cmd
*isert_cmd
,
1727 struct isert_conn
*isert_conn
,
1728 struct ib_device
*ib_dev
)
1730 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1731 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1733 if (cmd
->i_state
== ISTATE_SEND_TASKMGTRSP
||
1734 cmd
->i_state
== ISTATE_SEND_LOGOUTRSP
||
1735 cmd
->i_state
== ISTATE_SEND_REJECT
||
1736 cmd
->i_state
== ISTATE_SEND_TEXTRSP
) {
1737 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1739 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
1740 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
1743 atomic_sub(wr
->send_wr_num
+ 1, &isert_conn
->post_send_buf_count
);
1745 cmd
->i_state
= ISTATE_SENT_STATUS
;
1746 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, false);
1750 isert_send_completion(struct iser_tx_desc
*tx_desc
,
1751 struct isert_conn
*isert_conn
)
1753 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1754 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1755 struct isert_rdma_wr
*wr
;
1758 atomic_dec(&isert_conn
->post_send_buf_count
);
1759 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1762 wr
= &isert_cmd
->rdma_wr
;
1764 switch (wr
->iser_ib_op
) {
1766 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1770 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1771 isert_response_completion(tx_desc
, isert_cmd
,
1772 isert_conn
, ib_dev
);
1774 case ISER_IB_RDMA_WRITE
:
1775 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1778 case ISER_IB_RDMA_READ
:
1779 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1781 atomic_sub(wr
->send_wr_num
, &isert_conn
->post_send_buf_count
);
1782 isert_completion_rdma_read(tx_desc
, isert_cmd
);
1785 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr
->iser_ib_op
);
1792 isert_cq_tx_comp_err(struct iser_tx_desc
*tx_desc
, struct isert_conn
*isert_conn
)
1794 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1795 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1798 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1800 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, true);
1804 isert_cq_rx_comp_err(struct isert_conn
*isert_conn
)
1806 struct iscsi_conn
*conn
= isert_conn
->conn
;
1808 if (isert_conn
->post_recv_buf_count
)
1812 target_sess_cmd_list_set_waiting(conn
->sess
->se_sess
);
1813 target_wait_for_sess_cmds(conn
->sess
->se_sess
);
1816 while (atomic_read(&isert_conn
->post_send_buf_count
))
1819 mutex_lock(&isert_conn
->conn_mutex
);
1820 isert_conn_terminate(isert_conn
);
1821 mutex_unlock(&isert_conn
->conn_mutex
);
1823 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
1825 complete(&isert_conn
->conn_wait_comp_err
);
1829 isert_cq_tx_work(struct work_struct
*work
)
1831 struct isert_cq_desc
*cq_desc
= container_of(work
,
1832 struct isert_cq_desc
, cq_tx_work
);
1833 struct isert_device
*device
= cq_desc
->device
;
1834 int cq_index
= cq_desc
->cq_index
;
1835 struct ib_cq
*tx_cq
= device
->dev_tx_cq
[cq_index
];
1836 struct isert_conn
*isert_conn
;
1837 struct iser_tx_desc
*tx_desc
;
1840 while (ib_poll_cq(tx_cq
, 1, &wc
) == 1) {
1841 tx_desc
= (struct iser_tx_desc
*)(unsigned long)wc
.wr_id
;
1842 isert_conn
= wc
.qp
->qp_context
;
1844 if (wc
.status
== IB_WC_SUCCESS
) {
1845 isert_send_completion(tx_desc
, isert_conn
);
1847 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1848 pr_debug("TX wc.status: 0x%08x\n", wc
.status
);
1849 pr_debug("TX wc.vendor_err: 0x%08x\n", wc
.vendor_err
);
1851 if (wc
.wr_id
!= ISER_FASTREG_LI_WRID
) {
1852 atomic_dec(&isert_conn
->post_send_buf_count
);
1853 isert_cq_tx_comp_err(tx_desc
, isert_conn
);
1858 ib_req_notify_cq(tx_cq
, IB_CQ_NEXT_COMP
);
1862 isert_cq_tx_callback(struct ib_cq
*cq
, void *context
)
1864 struct isert_cq_desc
*cq_desc
= (struct isert_cq_desc
*)context
;
1866 queue_work(isert_comp_wq
, &cq_desc
->cq_tx_work
);
1870 isert_cq_rx_work(struct work_struct
*work
)
1872 struct isert_cq_desc
*cq_desc
= container_of(work
,
1873 struct isert_cq_desc
, cq_rx_work
);
1874 struct isert_device
*device
= cq_desc
->device
;
1875 int cq_index
= cq_desc
->cq_index
;
1876 struct ib_cq
*rx_cq
= device
->dev_rx_cq
[cq_index
];
1877 struct isert_conn
*isert_conn
;
1878 struct iser_rx_desc
*rx_desc
;
1880 unsigned long xfer_len
;
1882 while (ib_poll_cq(rx_cq
, 1, &wc
) == 1) {
1883 rx_desc
= (struct iser_rx_desc
*)(unsigned long)wc
.wr_id
;
1884 isert_conn
= wc
.qp
->qp_context
;
1886 if (wc
.status
== IB_WC_SUCCESS
) {
1887 xfer_len
= (unsigned long)wc
.byte_len
;
1888 isert_rx_completion(rx_desc
, isert_conn
, xfer_len
);
1890 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1891 if (wc
.status
!= IB_WC_WR_FLUSH_ERR
) {
1892 pr_debug("RX wc.status: 0x%08x\n", wc
.status
);
1893 pr_debug("RX wc.vendor_err: 0x%08x\n",
1896 isert_conn
->post_recv_buf_count
--;
1897 isert_cq_rx_comp_err(isert_conn
);
1901 ib_req_notify_cq(rx_cq
, IB_CQ_NEXT_COMP
);
1905 isert_cq_rx_callback(struct ib_cq
*cq
, void *context
)
1907 struct isert_cq_desc
*cq_desc
= (struct isert_cq_desc
*)context
;
1909 queue_work(isert_rx_wq
, &cq_desc
->cq_rx_work
);
1913 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
1915 struct ib_send_wr
*wr_failed
;
1918 atomic_inc(&isert_conn
->post_send_buf_count
);
1920 ret
= ib_post_send(isert_conn
->conn_qp
, &isert_cmd
->tx_desc
.send_wr
,
1923 pr_err("ib_post_send failed with %d\n", ret
);
1924 atomic_dec(&isert_conn
->post_send_buf_count
);
1931 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
1933 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1934 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1935 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1936 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
1937 &isert_cmd
->tx_desc
.iscsi_header
;
1939 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1940 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
1941 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1943 * Attach SENSE DATA payload to iSCSI Response PDU
1945 if (cmd
->se_cmd
.sense_buffer
&&
1946 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
1947 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
1948 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1949 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
1950 u32 padding
, pdu_len
;
1952 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
1954 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
1956 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
1957 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
1958 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
1960 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
1961 (void *)cmd
->sense_buffer
, pdu_len
,
1964 isert_cmd
->pdu_buf_len
= pdu_len
;
1965 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
1966 tx_dsg
->length
= pdu_len
;
1967 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
1968 isert_cmd
->tx_desc
.num_sge
= 2;
1971 isert_init_send_wr(isert_cmd
, send_wr
);
1973 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1975 return isert_post_response(isert_conn
, isert_cmd
);
1979 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
1980 bool nopout_response
)
1982 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
1983 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1984 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
1986 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
1987 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
1988 &isert_cmd
->tx_desc
.iscsi_header
,
1990 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
1991 isert_init_send_wr(isert_cmd
, send_wr
);
1993 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1995 return isert_post_response(isert_conn
, isert_cmd
);
1999 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2001 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2002 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2003 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2005 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2006 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
2007 &isert_cmd
->tx_desc
.iscsi_header
);
2008 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2009 isert_init_send_wr(isert_cmd
, send_wr
);
2011 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2013 return isert_post_response(isert_conn
, isert_cmd
);
2017 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2019 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2020 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2021 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2023 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2024 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
2025 &isert_cmd
->tx_desc
.iscsi_header
);
2026 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2027 isert_init_send_wr(isert_cmd
, send_wr
);
2029 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2031 return isert_post_response(isert_conn
, isert_cmd
);
2035 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2037 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2038 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2039 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2040 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2041 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2042 struct iscsi_reject
*hdr
=
2043 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
2045 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2046 iscsit_build_reject(cmd
, conn
, hdr
);
2047 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2049 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
2050 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2051 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
2053 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
2054 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2055 tx_dsg
->length
= ISCSI_HDR_LEN
;
2056 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
2057 isert_cmd
->tx_desc
.num_sge
= 2;
2059 isert_init_send_wr(isert_cmd
, send_wr
);
2061 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2063 return isert_post_response(isert_conn
, isert_cmd
);
2067 isert_put_text_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2069 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2070 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2071 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2072 struct iscsi_text_rsp
*hdr
=
2073 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
2077 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2078 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
, ISCSI_INFINIBAND
);
2083 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2086 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2087 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2088 void *txt_rsp_buf
= cmd
->buf_ptr
;
2090 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2091 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
2093 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
2094 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2095 tx_dsg
->length
= txt_rsp_len
;
2096 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
2097 isert_cmd
->tx_desc
.num_sge
= 2;
2099 isert_init_send_wr(isert_cmd
, send_wr
);
2101 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2103 return isert_post_response(isert_conn
, isert_cmd
);
2107 isert_build_rdma_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
2108 struct ib_sge
*ib_sge
, struct ib_send_wr
*send_wr
,
2109 u32 data_left
, u32 offset
)
2111 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
2112 struct scatterlist
*sg_start
, *tmp_sg
;
2113 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2114 u32 sg_off
, page_off
;
2115 int i
= 0, sg_nents
;
2117 sg_off
= offset
/ PAGE_SIZE
;
2118 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2119 sg_nents
= min(cmd
->se_cmd
.t_data_nents
- sg_off
, isert_conn
->max_sge
);
2120 page_off
= offset
% PAGE_SIZE
;
2122 send_wr
->sg_list
= ib_sge
;
2123 send_wr
->num_sge
= sg_nents
;
2124 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
2126 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2128 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2129 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2130 (unsigned long long)tmp_sg
->dma_address
,
2131 tmp_sg
->length
, page_off
);
2133 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, tmp_sg
) + page_off
;
2134 ib_sge
->length
= min_t(u32
, data_left
,
2135 ib_sg_dma_len(ib_dev
, tmp_sg
) - page_off
);
2136 ib_sge
->lkey
= isert_conn
->conn_mr
->lkey
;
2138 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2139 ib_sge
->addr
, ib_sge
->length
, ib_sge
->lkey
);
2141 data_left
-= ib_sge
->length
;
2143 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge
);
2146 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2147 send_wr
->sg_list
, send_wr
->num_sge
);
2153 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2154 struct isert_rdma_wr
*wr
)
2156 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2157 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2158 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2159 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2160 struct ib_send_wr
*send_wr
;
2161 struct ib_sge
*ib_sge
;
2162 struct scatterlist
*sg_start
;
2163 u32 sg_off
= 0, sg_nents
;
2164 u32 offset
= 0, data_len
, data_left
, rdma_write_max
, va_offset
= 0;
2165 int ret
= 0, count
, i
, ib_sge_cnt
;
2167 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2168 data_left
= se_cmd
->data_length
;
2170 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
2171 data_left
= se_cmd
->data_length
- cmd
->write_data_done
;
2172 offset
= cmd
->write_data_done
;
2173 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2176 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2177 sg_nents
= se_cmd
->t_data_nents
- sg_off
;
2179 count
= ib_dma_map_sg(ib_dev
, sg_start
, sg_nents
,
2180 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
2181 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2182 if (unlikely(!count
)) {
2183 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd
);
2187 wr
->num_sge
= sg_nents
;
2188 wr
->cur_rdma_length
= data_left
;
2189 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2190 isert_cmd
, count
, sg_start
, sg_nents
, data_left
);
2192 ib_sge
= kzalloc(sizeof(struct ib_sge
) * sg_nents
, GFP_KERNEL
);
2194 pr_warn("Unable to allocate ib_sge\n");
2198 wr
->ib_sge
= ib_sge
;
2200 wr
->send_wr_num
= DIV_ROUND_UP(sg_nents
, isert_conn
->max_sge
);
2201 wr
->send_wr
= kzalloc(sizeof(struct ib_send_wr
) * wr
->send_wr_num
,
2204 pr_debug("Unable to allocate wr->send_wr\n");
2209 wr
->isert_cmd
= isert_cmd
;
2210 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
2212 for (i
= 0; i
< wr
->send_wr_num
; i
++) {
2213 send_wr
= &isert_cmd
->rdma_wr
.send_wr
[i
];
2214 data_len
= min(data_left
, rdma_write_max
);
2216 send_wr
->send_flags
= 0;
2217 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2218 send_wr
->opcode
= IB_WR_RDMA_WRITE
;
2219 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->read_va
+ offset
;
2220 send_wr
->wr
.rdma
.rkey
= isert_cmd
->read_stag
;
2221 if (i
+ 1 == wr
->send_wr_num
)
2222 send_wr
->next
= &isert_cmd
->tx_desc
.send_wr
;
2224 send_wr
->next
= &wr
->send_wr
[i
+ 1];
2226 send_wr
->opcode
= IB_WR_RDMA_READ
;
2227 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->write_va
+ va_offset
;
2228 send_wr
->wr
.rdma
.rkey
= isert_cmd
->write_stag
;
2229 if (i
+ 1 == wr
->send_wr_num
)
2230 send_wr
->send_flags
= IB_SEND_SIGNALED
;
2232 send_wr
->next
= &wr
->send_wr
[i
+ 1];
2235 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
2236 send_wr
, data_len
, offset
);
2237 ib_sge
+= ib_sge_cnt
;
2240 va_offset
+= data_len
;
2241 data_left
-= data_len
;
2246 ib_dma_unmap_sg(ib_dev
, sg_start
, sg_nents
,
2247 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
2248 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2253 isert_map_fr_pagelist(struct ib_device
*ib_dev
,
2254 struct scatterlist
*sg_start
, int sg_nents
, u64
*fr_pl
)
2256 u64 start_addr
, end_addr
, page
, chunk_start
= 0;
2257 struct scatterlist
*tmp_sg
;
2258 int i
= 0, new_chunk
, last_ent
, n_pages
;
2262 last_ent
= sg_nents
- 1;
2263 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2264 start_addr
= ib_sg_dma_address(ib_dev
, tmp_sg
);
2266 chunk_start
= start_addr
;
2267 end_addr
= start_addr
+ ib_sg_dma_len(ib_dev
, tmp_sg
);
2269 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2270 i
, (unsigned long long)tmp_sg
->dma_address
,
2273 if ((end_addr
& ~PAGE_MASK
) && i
< last_ent
) {
2279 page
= chunk_start
& PAGE_MASK
;
2281 fr_pl
[n_pages
++] = page
;
2282 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2285 } while (page
< end_addr
);
2292 isert_fast_reg_mr(struct fast_reg_descriptor
*fr_desc
,
2293 struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
,
2294 struct ib_sge
*ib_sge
, u32 offset
, unsigned int data_len
)
2296 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
2297 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2298 struct scatterlist
*sg_start
;
2299 u32 sg_off
, page_off
;
2300 struct ib_send_wr fr_wr
, inv_wr
;
2301 struct ib_send_wr
*bad_wr
, *wr
= NULL
;
2303 int ret
, sg_nents
, pagelist_len
;
2305 sg_off
= offset
/ PAGE_SIZE
;
2306 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2307 sg_nents
= min_t(unsigned int, cmd
->se_cmd
.t_data_nents
- sg_off
,
2308 ISCSI_ISER_SG_TABLESIZE
);
2309 page_off
= offset
% PAGE_SIZE
;
2311 pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
2312 isert_cmd
, fr_desc
, sg_nents
, sg_off
, offset
);
2314 pagelist_len
= isert_map_fr_pagelist(ib_dev
, sg_start
, sg_nents
,
2315 &fr_desc
->data_frpl
->page_list
[0]);
2317 if (!fr_desc
->valid
) {
2318 memset(&inv_wr
, 0, sizeof(inv_wr
));
2319 inv_wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2320 inv_wr
.opcode
= IB_WR_LOCAL_INV
;
2321 inv_wr
.ex
.invalidate_rkey
= fr_desc
->data_mr
->rkey
;
2324 key
= (u8
)(fr_desc
->data_mr
->rkey
& 0x000000FF);
2325 ib_update_fast_reg_key(fr_desc
->data_mr
, ++key
);
2328 /* Prepare FASTREG WR */
2329 memset(&fr_wr
, 0, sizeof(fr_wr
));
2330 fr_wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2331 fr_wr
.opcode
= IB_WR_FAST_REG_MR
;
2332 fr_wr
.wr
.fast_reg
.iova_start
=
2333 fr_desc
->data_frpl
->page_list
[0] + page_off
;
2334 fr_wr
.wr
.fast_reg
.page_list
= fr_desc
->data_frpl
;
2335 fr_wr
.wr
.fast_reg
.page_list_len
= pagelist_len
;
2336 fr_wr
.wr
.fast_reg
.page_shift
= PAGE_SHIFT
;
2337 fr_wr
.wr
.fast_reg
.length
= data_len
;
2338 fr_wr
.wr
.fast_reg
.rkey
= fr_desc
->data_mr
->rkey
;
2339 fr_wr
.wr
.fast_reg
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
2346 ret
= ib_post_send(isert_conn
->conn_qp
, wr
, &bad_wr
);
2348 pr_err("fast registration failed, ret:%d\n", ret
);
2351 fr_desc
->valid
= false;
2353 ib_sge
->lkey
= fr_desc
->data_mr
->lkey
;
2354 ib_sge
->addr
= fr_desc
->data_frpl
->page_list
[0] + page_off
;
2355 ib_sge
->length
= data_len
;
2357 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2358 ib_sge
->addr
, ib_sge
->length
, ib_sge
->lkey
);
2364 isert_reg_rdma_frwr(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2365 struct isert_rdma_wr
*wr
)
2367 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2368 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2369 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2370 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2371 struct ib_send_wr
*send_wr
;
2372 struct ib_sge
*ib_sge
;
2373 struct scatterlist
*sg_start
;
2374 struct fast_reg_descriptor
*fr_desc
;
2375 u32 sg_off
= 0, sg_nents
;
2376 u32 offset
= 0, data_len
, data_left
, rdma_write_max
;
2378 unsigned long flags
;
2380 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2381 data_left
= se_cmd
->data_length
;
2383 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
2384 data_left
= se_cmd
->data_length
- cmd
->write_data_done
;
2385 offset
= cmd
->write_data_done
;
2386 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2389 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2390 sg_nents
= se_cmd
->t_data_nents
- sg_off
;
2392 count
= ib_dma_map_sg(ib_dev
, sg_start
, sg_nents
,
2393 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
2394 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2395 if (unlikely(!count
)) {
2396 pr_err("Cmd: %p unrable to map SGs\n", isert_cmd
);
2400 wr
->num_sge
= sg_nents
;
2401 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
2402 isert_cmd
, count
, sg_start
, sg_nents
, data_left
);
2404 memset(&wr
->s_ib_sge
, 0, sizeof(*ib_sge
));
2405 ib_sge
= &wr
->s_ib_sge
;
2406 wr
->ib_sge
= ib_sge
;
2408 wr
->send_wr_num
= 1;
2409 memset(&wr
->s_send_wr
, 0, sizeof(*send_wr
));
2410 wr
->send_wr
= &wr
->s_send_wr
;
2412 wr
->isert_cmd
= isert_cmd
;
2413 rdma_write_max
= ISCSI_ISER_SG_TABLESIZE
* PAGE_SIZE
;
2415 send_wr
= &isert_cmd
->rdma_wr
.s_send_wr
;
2416 send_wr
->sg_list
= ib_sge
;
2417 send_wr
->num_sge
= 1;
2418 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
2419 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2420 send_wr
->opcode
= IB_WR_RDMA_WRITE
;
2421 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->read_va
;
2422 send_wr
->wr
.rdma
.rkey
= isert_cmd
->read_stag
;
2423 send_wr
->send_flags
= 0;
2424 send_wr
->next
= &isert_cmd
->tx_desc
.send_wr
;
2426 send_wr
->opcode
= IB_WR_RDMA_READ
;
2427 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->write_va
;
2428 send_wr
->wr
.rdma
.rkey
= isert_cmd
->write_stag
;
2429 send_wr
->send_flags
= IB_SEND_SIGNALED
;
2432 data_len
= min(data_left
, rdma_write_max
);
2433 wr
->cur_rdma_length
= data_len
;
2435 /* if there is a single dma entry, dma mr is sufficient */
2437 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, &sg_start
[0]);
2438 ib_sge
->length
= ib_sg_dma_len(ib_dev
, &sg_start
[0]);
2439 ib_sge
->lkey
= isert_conn
->conn_mr
->lkey
;
2442 spin_lock_irqsave(&isert_conn
->conn_lock
, flags
);
2443 fr_desc
= list_first_entry(&isert_conn
->conn_frwr_pool
,
2444 struct fast_reg_descriptor
, list
);
2445 list_del(&fr_desc
->list
);
2446 spin_unlock_irqrestore(&isert_conn
->conn_lock
, flags
);
2447 wr
->fr_desc
= fr_desc
;
2449 ret
= isert_fast_reg_mr(fr_desc
, isert_cmd
, isert_conn
,
2450 ib_sge
, offset
, data_len
);
2452 list_add_tail(&fr_desc
->list
, &isert_conn
->conn_frwr_pool
);
2460 ib_dma_unmap_sg(ib_dev
, sg_start
, sg_nents
,
2461 (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) ?
2462 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
2467 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2469 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2470 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2471 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2472 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2473 struct isert_device
*device
= isert_conn
->conn_device
;
2474 struct ib_send_wr
*wr_failed
;
2477 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2478 isert_cmd
, se_cmd
->data_length
);
2479 wr
->iser_ib_op
= ISER_IB_RDMA_WRITE
;
2480 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2482 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2487 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2489 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2490 iscsit_build_rsp_pdu(cmd
, conn
, true, (struct iscsi_scsi_rsp
*)
2491 &isert_cmd
->tx_desc
.iscsi_header
);
2492 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2493 isert_init_send_wr(isert_cmd
, &isert_cmd
->tx_desc
.send_wr
);
2495 atomic_add(wr
->send_wr_num
+ 1, &isert_conn
->post_send_buf_count
);
2497 rc
= ib_post_send(isert_conn
->conn_qp
, wr
->send_wr
, &wr_failed
);
2499 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2500 atomic_sub(wr
->send_wr_num
+ 1, &isert_conn
->post_send_buf_count
);
2502 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
2509 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
2511 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2512 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2513 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2514 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2515 struct isert_device
*device
= isert_conn
->conn_device
;
2516 struct ib_send_wr
*wr_failed
;
2519 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2520 isert_cmd
, se_cmd
->data_length
, cmd
->write_data_done
);
2521 wr
->iser_ib_op
= ISER_IB_RDMA_READ
;
2522 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2524 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2528 atomic_add(wr
->send_wr_num
, &isert_conn
->post_send_buf_count
);
2530 rc
= ib_post_send(isert_conn
->conn_qp
, wr
->send_wr
, &wr_failed
);
2532 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2533 atomic_sub(wr
->send_wr_num
, &isert_conn
->post_send_buf_count
);
2535 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2542 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2544 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2549 spin_lock_bh(&conn
->cmd_lock
);
2550 list_del_init(&cmd
->i_conn_node
);
2551 spin_unlock_bh(&conn
->cmd_lock
);
2552 isert_put_cmd(isert_cmd
, true);
2554 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
2555 ret
= isert_put_nopin(cmd
, conn
, false);
2558 pr_err("Unknown immediate state: 0x%02x\n", state
);
2567 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2572 case ISTATE_SEND_LOGOUTRSP
:
2573 ret
= isert_put_logout_rsp(cmd
, conn
);
2575 pr_debug("Returning iSER Logout -EAGAIN\n");
2579 case ISTATE_SEND_NOPIN
:
2580 ret
= isert_put_nopin(cmd
, conn
, true);
2582 case ISTATE_SEND_TASKMGTRSP
:
2583 ret
= isert_put_tm_rsp(cmd
, conn
);
2585 case ISTATE_SEND_REJECT
:
2586 ret
= isert_put_reject(cmd
, conn
);
2588 case ISTATE_SEND_TEXTRSP
:
2589 ret
= isert_put_text_rsp(cmd
, conn
);
2591 case ISTATE_SEND_STATUS
:
2593 * Special case for sending non GOOD SCSI status from TX thread
2594 * context during pre se_cmd excecution failure.
2596 ret
= isert_put_response(conn
, cmd
);
2599 pr_err("Unknown response state: 0x%02x\n", state
);
2608 isert_setup_id(struct isert_np
*isert_np
)
2610 struct iscsi_np
*np
= isert_np
->np
;
2611 struct rdma_cm_id
*id
;
2612 struct sockaddr
*sa
;
2615 sa
= (struct sockaddr
*)&np
->np_sockaddr
;
2616 pr_debug("ksockaddr: %p, sa: %p\n", &np
->np_sockaddr
, sa
);
2618 id
= rdma_create_id(isert_cma_handler
, isert_np
,
2619 RDMA_PS_TCP
, IB_QPT_RC
);
2621 pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id
));
2625 pr_debug("id %p context %p\n", id
, id
->context
);
2627 ret
= rdma_bind_addr(id
, sa
);
2629 pr_err("rdma_bind_addr() failed: %d\n", ret
);
2633 ret
= rdma_listen(id
, ISERT_RDMA_LISTEN_BACKLOG
);
2635 pr_err("rdma_listen() failed: %d\n", ret
);
2641 rdma_destroy_id(id
);
2643 return ERR_PTR(ret
);
2647 isert_setup_np(struct iscsi_np
*np
,
2648 struct __kernel_sockaddr_storage
*ksockaddr
)
2650 struct isert_np
*isert_np
;
2651 struct rdma_cm_id
*isert_lid
;
2654 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
2656 pr_err("Unable to allocate struct isert_np\n");
2659 sema_init(&isert_np
->np_sem
, 0);
2660 mutex_init(&isert_np
->np_accept_mutex
);
2661 INIT_LIST_HEAD(&isert_np
->np_accept_list
);
2662 init_completion(&isert_np
->np_login_comp
);
2666 * Setup the np->np_sockaddr from the passed sockaddr setup
2667 * in iscsi_target_configfs.c code..
2669 memcpy(&np
->np_sockaddr
, ksockaddr
,
2670 sizeof(struct __kernel_sockaddr_storage
));
2672 isert_lid
= isert_setup_id(isert_np
);
2673 if (IS_ERR(isert_lid
)) {
2674 ret
= PTR_ERR(isert_lid
);
2678 isert_np
->np_cm_id
= isert_lid
;
2679 np
->np_context
= isert_np
;
2690 isert_rdma_accept(struct isert_conn
*isert_conn
)
2692 struct rdma_cm_id
*cm_id
= isert_conn
->conn_cm_id
;
2693 struct rdma_conn_param cp
;
2696 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
2697 cp
.responder_resources
= isert_conn
->responder_resources
;
2698 cp
.initiator_depth
= isert_conn
->initiator_depth
;
2700 cp
.rnr_retry_count
= 7;
2702 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2704 ret
= rdma_accept(cm_id
, &cp
);
2706 pr_err("rdma_accept() failed with: %d\n", ret
);
2710 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2716 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
2718 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2721 pr_info("before login_req comp conn: %p\n", isert_conn
);
2722 ret
= wait_for_completion_interruptible(&isert_conn
->login_req_comp
);
2724 pr_err("isert_conn %p interrupted before got login req\n",
2728 INIT_COMPLETION(isert_conn
->login_req_comp
);
2731 * For login requests after the first PDU, isert_rx_login_req() will
2732 * kick schedule_delayed_work(&conn->login_work) as the packet is
2733 * received, which turns this callback from iscsi_target_do_login_rx()
2736 if (!login
->first_request
)
2739 isert_rx_login_req(isert_conn
);
2741 pr_info("before conn_login_comp conn: %p\n", conn
);
2742 ret
= wait_for_completion_interruptible(&isert_conn
->conn_login_comp
);
2746 pr_info("processing login->req: %p\n", login
->req
);
2752 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
2753 struct isert_conn
*isert_conn
)
2755 struct rdma_cm_id
*cm_id
= isert_conn
->conn_cm_id
;
2756 struct rdma_route
*cm_route
= &cm_id
->route
;
2757 struct sockaddr_in
*sock_in
;
2758 struct sockaddr_in6
*sock_in6
;
2760 conn
->login_family
= np
->np_sockaddr
.ss_family
;
2762 if (np
->np_sockaddr
.ss_family
== AF_INET6
) {
2763 sock_in6
= (struct sockaddr_in6
*)&cm_route
->addr
.dst_addr
;
2764 snprintf(conn
->login_ip
, sizeof(conn
->login_ip
), "%pI6c",
2765 &sock_in6
->sin6_addr
.in6_u
);
2766 conn
->login_port
= ntohs(sock_in6
->sin6_port
);
2768 sock_in6
= (struct sockaddr_in6
*)&cm_route
->addr
.src_addr
;
2769 snprintf(conn
->local_ip
, sizeof(conn
->local_ip
), "%pI6c",
2770 &sock_in6
->sin6_addr
.in6_u
);
2771 conn
->local_port
= ntohs(sock_in6
->sin6_port
);
2773 sock_in
= (struct sockaddr_in
*)&cm_route
->addr
.dst_addr
;
2774 sprintf(conn
->login_ip
, "%pI4",
2775 &sock_in
->sin_addr
.s_addr
);
2776 conn
->login_port
= ntohs(sock_in
->sin_port
);
2778 sock_in
= (struct sockaddr_in
*)&cm_route
->addr
.src_addr
;
2779 sprintf(conn
->local_ip
, "%pI4",
2780 &sock_in
->sin_addr
.s_addr
);
2781 conn
->local_port
= ntohs(sock_in
->sin_port
);
2786 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
2788 struct isert_np
*isert_np
= (struct isert_np
*)np
->np_context
;
2789 struct isert_conn
*isert_conn
;
2790 int max_accept
= 0, ret
;
2793 ret
= down_interruptible(&isert_np
->np_sem
);
2797 spin_lock_bh(&np
->np_thread_lock
);
2798 if (np
->np_thread_state
>= ISCSI_NP_THREAD_RESET
) {
2799 spin_unlock_bh(&np
->np_thread_lock
);
2800 pr_debug("np_thread_state %d for isert_accept_np\n",
2801 np
->np_thread_state
);
2803 * No point in stalling here when np_thread
2804 * is in state RESET/SHUTDOWN/EXIT - bail
2808 spin_unlock_bh(&np
->np_thread_lock
);
2810 mutex_lock(&isert_np
->np_accept_mutex
);
2811 if (list_empty(&isert_np
->np_accept_list
)) {
2812 mutex_unlock(&isert_np
->np_accept_mutex
);
2816 isert_conn
= list_first_entry(&isert_np
->np_accept_list
,
2817 struct isert_conn
, conn_accept_node
);
2818 list_del_init(&isert_conn
->conn_accept_node
);
2819 mutex_unlock(&isert_np
->np_accept_mutex
);
2821 conn
->context
= isert_conn
;
2822 isert_conn
->conn
= conn
;
2825 isert_set_conn_info(np
, conn
, isert_conn
);
2827 pr_debug("Processing isert_conn: %p\n", isert_conn
);
2833 isert_free_np(struct iscsi_np
*np
)
2835 struct isert_np
*isert_np
= (struct isert_np
*)np
->np_context
;
2837 if (isert_np
->np_cm_id
)
2838 rdma_destroy_id(isert_np
->np_cm_id
);
2840 np
->np_context
= NULL
;
2844 static void isert_release_work(struct work_struct
*work
)
2846 struct isert_conn
*isert_conn
= container_of(work
,
2850 pr_info("Starting release conn %p\n", isert_conn
);
2852 wait_for_completion(&isert_conn
->conn_wait
);
2854 mutex_lock(&isert_conn
->conn_mutex
);
2855 isert_conn
->state
= ISER_CONN_DOWN
;
2856 mutex_unlock(&isert_conn
->conn_mutex
);
2858 pr_info("Destroying conn %p\n", isert_conn
);
2859 isert_put_conn(isert_conn
);
2862 static void isert_wait_conn(struct iscsi_conn
*conn
)
2864 struct isert_conn
*isert_conn
= conn
->context
;
2866 pr_debug("isert_wait_conn: Starting \n");
2868 mutex_lock(&isert_conn
->conn_mutex
);
2870 * Only wait for conn_wait_comp_err if the isert_conn made it
2871 * into full feature phase..
2873 if (isert_conn
->state
== ISER_CONN_INIT
) {
2874 mutex_unlock(&isert_conn
->conn_mutex
);
2877 isert_conn_terminate(isert_conn
);
2878 mutex_unlock(&isert_conn
->conn_mutex
);
2880 wait_for_completion(&isert_conn
->conn_wait_comp_err
);
2882 queue_work(isert_release_wq
, &isert_conn
->release_work
);
2885 static void isert_free_conn(struct iscsi_conn
*conn
)
2887 struct isert_conn
*isert_conn
= conn
->context
;
2889 isert_put_conn(isert_conn
);
2892 static struct iscsit_transport iser_target_transport
= {
2894 .transport_type
= ISCSI_INFINIBAND
,
2895 .priv_size
= sizeof(struct isert_cmd
),
2896 .owner
= THIS_MODULE
,
2897 .iscsit_setup_np
= isert_setup_np
,
2898 .iscsit_accept_np
= isert_accept_np
,
2899 .iscsit_free_np
= isert_free_np
,
2900 .iscsit_wait_conn
= isert_wait_conn
,
2901 .iscsit_free_conn
= isert_free_conn
,
2902 .iscsit_get_login_rx
= isert_get_login_rx
,
2903 .iscsit_put_login_tx
= isert_put_login_tx
,
2904 .iscsit_immediate_queue
= isert_immediate_queue
,
2905 .iscsit_response_queue
= isert_response_queue
,
2906 .iscsit_get_dataout
= isert_get_dataout
,
2907 .iscsit_queue_data_in
= isert_put_datain
,
2908 .iscsit_queue_status
= isert_put_response
,
2911 static int __init
isert_init(void)
2915 isert_rx_wq
= alloc_workqueue("isert_rx_wq", 0, 0);
2917 pr_err("Unable to allocate isert_rx_wq\n");
2921 isert_comp_wq
= alloc_workqueue("isert_comp_wq", 0, 0);
2922 if (!isert_comp_wq
) {
2923 pr_err("Unable to allocate isert_comp_wq\n");
2928 isert_release_wq
= alloc_workqueue("isert_release_wq", WQ_UNBOUND
,
2929 WQ_UNBOUND_MAX_ACTIVE
);
2930 if (!isert_release_wq
) {
2931 pr_err("Unable to allocate isert_release_wq\n");
2933 goto destroy_comp_wq
;
2936 iscsit_register_transport(&iser_target_transport
);
2937 pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
2942 destroy_workqueue(isert_comp_wq
);
2944 destroy_workqueue(isert_rx_wq
);
2948 static void __exit
isert_exit(void)
2950 flush_scheduled_work();
2951 destroy_workqueue(isert_release_wq
);
2952 destroy_workqueue(isert_comp_wq
);
2953 destroy_workqueue(isert_rx_wq
);
2954 iscsit_unregister_transport(&iser_target_transport
);
2955 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2958 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2959 MODULE_VERSION("0.1");
2960 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2961 MODULE_LICENSE("GPL");
2963 module_init(isert_init
);
2964 module_exit(isert_exit
);