1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <linux/llist.h>
26 #include <rdma/ib_verbs.h>
27 #include <rdma/rdma_cm.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_fabric.h>
30 #include <target/iscsi/iscsi_transport.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
39 static DEFINE_MUTEX(device_list_mutex
);
40 static LIST_HEAD(device_list
);
41 static struct workqueue_struct
*isert_rx_wq
;
42 static struct workqueue_struct
*isert_comp_wq
;
45 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
47 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
48 struct isert_rdma_wr
*wr
);
50 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
);
52 isert_reg_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
53 struct isert_rdma_wr
*wr
);
55 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
);
58 isert_qp_event_callback(struct ib_event
*e
, void *context
)
60 struct isert_conn
*isert_conn
= (struct isert_conn
*)context
;
62 pr_err("isert_qp_event_callback event: %d\n", e
->event
);
64 case IB_EVENT_COMM_EST
:
65 rdma_notify(isert_conn
->conn_cm_id
, IB_EVENT_COMM_EST
);
67 case IB_EVENT_QP_LAST_WQE_REACHED
:
68 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
76 isert_query_device(struct ib_device
*ib_dev
, struct ib_device_attr
*devattr
)
80 ret
= ib_query_device(ib_dev
, devattr
);
82 pr_err("ib_query_device() failed: %d\n", ret
);
85 pr_debug("devattr->max_sge: %d\n", devattr
->max_sge
);
86 pr_debug("devattr->max_sge_rd: %d\n", devattr
->max_sge_rd
);
92 isert_conn_setup_qp(struct isert_conn
*isert_conn
, struct rdma_cm_id
*cma_id
,
95 struct isert_device
*device
= isert_conn
->conn_device
;
96 struct ib_qp_init_attr attr
;
97 int ret
, index
, min_index
= 0;
99 mutex_lock(&device_list_mutex
);
100 for (index
= 0; index
< device
->cqs_used
; index
++)
101 if (device
->cq_active_qps
[index
] <
102 device
->cq_active_qps
[min_index
])
104 device
->cq_active_qps
[min_index
]++;
105 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index
);
106 mutex_unlock(&device_list_mutex
);
108 memset(&attr
, 0, sizeof(struct ib_qp_init_attr
));
109 attr
.event_handler
= isert_qp_event_callback
;
110 attr
.qp_context
= isert_conn
;
111 attr
.send_cq
= device
->dev_tx_cq
[min_index
];
112 attr
.recv_cq
= device
->dev_rx_cq
[min_index
];
113 attr
.cap
.max_send_wr
= ISERT_QP_MAX_REQ_DTOS
;
114 attr
.cap
.max_recv_wr
= ISERT_QP_MAX_RECV_DTOS
;
116 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
117 * work-around for RDMA_READ..
119 attr
.cap
.max_send_sge
= device
->dev_attr
.max_sge
- 2;
120 isert_conn
->max_sge
= attr
.cap
.max_send_sge
;
122 attr
.cap
.max_recv_sge
= 1;
123 attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
124 attr
.qp_type
= IB_QPT_RC
;
126 attr
.create_flags
|= IB_QP_CREATE_SIGNATURE_EN
;
128 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
130 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
131 isert_conn
->conn_pd
->device
);
133 ret
= rdma_create_qp(cma_id
, isert_conn
->conn_pd
, &attr
);
135 pr_err("rdma_create_qp failed for cma_id %d\n", ret
);
138 isert_conn
->conn_qp
= cma_id
->qp
;
139 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
145 isert_cq_event_callback(struct ib_event
*e
, void *context
)
147 pr_debug("isert_cq_event_callback event: %d\n", e
->event
);
151 isert_alloc_rx_descriptors(struct isert_conn
*isert_conn
)
153 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
154 struct iser_rx_desc
*rx_desc
;
155 struct ib_sge
*rx_sg
;
159 isert_conn
->conn_rx_descs
= kzalloc(ISERT_QP_MAX_RECV_DTOS
*
160 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
161 if (!isert_conn
->conn_rx_descs
)
164 rx_desc
= isert_conn
->conn_rx_descs
;
166 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
167 dma_addr
= ib_dma_map_single(ib_dev
, (void *)rx_desc
,
168 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
169 if (ib_dma_mapping_error(ib_dev
, dma_addr
))
172 rx_desc
->dma_addr
= dma_addr
;
174 rx_sg
= &rx_desc
->rx_sg
;
175 rx_sg
->addr
= rx_desc
->dma_addr
;
176 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
177 rx_sg
->lkey
= isert_conn
->conn_mr
->lkey
;
180 isert_conn
->conn_rx_desc_head
= 0;
184 rx_desc
= isert_conn
->conn_rx_descs
;
185 for (j
= 0; j
< i
; j
++, rx_desc
++) {
186 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
187 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
189 kfree(isert_conn
->conn_rx_descs
);
190 isert_conn
->conn_rx_descs
= NULL
;
196 isert_free_rx_descriptors(struct isert_conn
*isert_conn
)
198 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
199 struct iser_rx_desc
*rx_desc
;
202 if (!isert_conn
->conn_rx_descs
)
205 rx_desc
= isert_conn
->conn_rx_descs
;
206 for (i
= 0; i
< ISERT_QP_MAX_RECV_DTOS
; i
++, rx_desc
++) {
207 ib_dma_unmap_single(ib_dev
, rx_desc
->dma_addr
,
208 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
211 kfree(isert_conn
->conn_rx_descs
);
212 isert_conn
->conn_rx_descs
= NULL
;
215 static void isert_cq_tx_work(struct work_struct
*);
216 static void isert_cq_tx_callback(struct ib_cq
*, void *);
217 static void isert_cq_rx_work(struct work_struct
*);
218 static void isert_cq_rx_callback(struct ib_cq
*, void *);
221 isert_create_device_ib_res(struct isert_device
*device
)
223 struct ib_device
*ib_dev
= device
->ib_device
;
224 struct isert_cq_desc
*cq_desc
;
225 struct ib_device_attr
*dev_attr
;
228 dev_attr
= &device
->dev_attr
;
229 ret
= isert_query_device(ib_dev
, dev_attr
);
233 /* asign function handlers */
234 if (dev_attr
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
&&
235 dev_attr
->device_cap_flags
& IB_DEVICE_SIGNATURE_HANDOVER
) {
236 device
->use_fastreg
= 1;
237 device
->reg_rdma_mem
= isert_reg_rdma
;
238 device
->unreg_rdma_mem
= isert_unreg_rdma
;
240 device
->use_fastreg
= 0;
241 device
->reg_rdma_mem
= isert_map_rdma
;
242 device
->unreg_rdma_mem
= isert_unmap_cmd
;
245 /* Check signature cap */
246 device
->pi_capable
= dev_attr
->device_cap_flags
&
247 IB_DEVICE_SIGNATURE_HANDOVER
? true : false;
249 device
->cqs_used
= min_t(int, num_online_cpus(),
250 device
->ib_device
->num_comp_vectors
);
251 device
->cqs_used
= min(ISERT_MAX_CQ
, device
->cqs_used
);
252 pr_debug("Using %d CQs, device %s supports %d vectors support "
253 "Fast registration %d pi_capable %d\n",
254 device
->cqs_used
, device
->ib_device
->name
,
255 device
->ib_device
->num_comp_vectors
, device
->use_fastreg
,
257 device
->cq_desc
= kzalloc(sizeof(struct isert_cq_desc
) *
258 device
->cqs_used
, GFP_KERNEL
);
259 if (!device
->cq_desc
) {
260 pr_err("Unable to allocate device->cq_desc\n");
263 cq_desc
= device
->cq_desc
;
265 for (i
= 0; i
< device
->cqs_used
; i
++) {
266 cq_desc
[i
].device
= device
;
267 cq_desc
[i
].cq_index
= i
;
269 INIT_WORK(&cq_desc
[i
].cq_rx_work
, isert_cq_rx_work
);
270 device
->dev_rx_cq
[i
] = ib_create_cq(device
->ib_device
,
271 isert_cq_rx_callback
,
272 isert_cq_event_callback
,
274 ISER_MAX_RX_CQ_LEN
, i
);
275 if (IS_ERR(device
->dev_rx_cq
[i
])) {
276 ret
= PTR_ERR(device
->dev_rx_cq
[i
]);
277 device
->dev_rx_cq
[i
] = NULL
;
281 INIT_WORK(&cq_desc
[i
].cq_tx_work
, isert_cq_tx_work
);
282 device
->dev_tx_cq
[i
] = ib_create_cq(device
->ib_device
,
283 isert_cq_tx_callback
,
284 isert_cq_event_callback
,
286 ISER_MAX_TX_CQ_LEN
, i
);
287 if (IS_ERR(device
->dev_tx_cq
[i
])) {
288 ret
= PTR_ERR(device
->dev_tx_cq
[i
]);
289 device
->dev_tx_cq
[i
] = NULL
;
293 ret
= ib_req_notify_cq(device
->dev_rx_cq
[i
], IB_CQ_NEXT_COMP
);
297 ret
= ib_req_notify_cq(device
->dev_tx_cq
[i
], IB_CQ_NEXT_COMP
);
305 for (j
= 0; j
< i
; j
++) {
306 cq_desc
= &device
->cq_desc
[j
];
308 if (device
->dev_rx_cq
[j
]) {
309 cancel_work_sync(&cq_desc
->cq_rx_work
);
310 ib_destroy_cq(device
->dev_rx_cq
[j
]);
312 if (device
->dev_tx_cq
[j
]) {
313 cancel_work_sync(&cq_desc
->cq_tx_work
);
314 ib_destroy_cq(device
->dev_tx_cq
[j
]);
317 kfree(device
->cq_desc
);
323 isert_free_device_ib_res(struct isert_device
*device
)
325 struct isert_cq_desc
*cq_desc
;
328 for (i
= 0; i
< device
->cqs_used
; i
++) {
329 cq_desc
= &device
->cq_desc
[i
];
331 cancel_work_sync(&cq_desc
->cq_rx_work
);
332 cancel_work_sync(&cq_desc
->cq_tx_work
);
333 ib_destroy_cq(device
->dev_rx_cq
[i
]);
334 ib_destroy_cq(device
->dev_tx_cq
[i
]);
335 device
->dev_rx_cq
[i
] = NULL
;
336 device
->dev_tx_cq
[i
] = NULL
;
339 kfree(device
->cq_desc
);
343 isert_device_try_release(struct isert_device
*device
)
345 mutex_lock(&device_list_mutex
);
347 if (!device
->refcount
) {
348 isert_free_device_ib_res(device
);
349 list_del(&device
->dev_node
);
352 mutex_unlock(&device_list_mutex
);
355 static struct isert_device
*
356 isert_device_find_by_ib_dev(struct rdma_cm_id
*cma_id
)
358 struct isert_device
*device
;
361 mutex_lock(&device_list_mutex
);
362 list_for_each_entry(device
, &device_list
, dev_node
) {
363 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
) {
365 mutex_unlock(&device_list_mutex
);
370 device
= kzalloc(sizeof(struct isert_device
), GFP_KERNEL
);
372 mutex_unlock(&device_list_mutex
);
373 return ERR_PTR(-ENOMEM
);
376 INIT_LIST_HEAD(&device
->dev_node
);
378 device
->ib_device
= cma_id
->device
;
379 ret
= isert_create_device_ib_res(device
);
382 mutex_unlock(&device_list_mutex
);
387 list_add_tail(&device
->dev_node
, &device_list
);
388 mutex_unlock(&device_list_mutex
);
394 isert_conn_free_fastreg_pool(struct isert_conn
*isert_conn
)
396 struct fast_reg_descriptor
*fr_desc
, *tmp
;
399 if (list_empty(&isert_conn
->conn_fr_pool
))
402 pr_debug("Freeing conn %p fastreg pool", isert_conn
);
404 list_for_each_entry_safe(fr_desc
, tmp
,
405 &isert_conn
->conn_fr_pool
, list
) {
406 list_del(&fr_desc
->list
);
407 ib_free_fast_reg_page_list(fr_desc
->data_frpl
);
408 ib_dereg_mr(fr_desc
->data_mr
);
409 if (fr_desc
->pi_ctx
) {
410 ib_free_fast_reg_page_list(fr_desc
->pi_ctx
->prot_frpl
);
411 ib_dereg_mr(fr_desc
->pi_ctx
->prot_mr
);
412 ib_destroy_mr(fr_desc
->pi_ctx
->sig_mr
);
413 kfree(fr_desc
->pi_ctx
);
419 if (i
< isert_conn
->conn_fr_pool_size
)
420 pr_warn("Pool still has %d regions registered\n",
421 isert_conn
->conn_fr_pool_size
- i
);
425 isert_create_fr_desc(struct ib_device
*ib_device
, struct ib_pd
*pd
,
426 struct fast_reg_descriptor
*fr_desc
, u8 protection
)
430 fr_desc
->data_frpl
= ib_alloc_fast_reg_page_list(ib_device
,
431 ISCSI_ISER_SG_TABLESIZE
);
432 if (IS_ERR(fr_desc
->data_frpl
)) {
433 pr_err("Failed to allocate data frpl err=%ld\n",
434 PTR_ERR(fr_desc
->data_frpl
));
435 return PTR_ERR(fr_desc
->data_frpl
);
438 fr_desc
->data_mr
= ib_alloc_fast_reg_mr(pd
, ISCSI_ISER_SG_TABLESIZE
);
439 if (IS_ERR(fr_desc
->data_mr
)) {
440 pr_err("Failed to allocate data frmr err=%ld\n",
441 PTR_ERR(fr_desc
->data_mr
));
442 ret
= PTR_ERR(fr_desc
->data_mr
);
445 pr_debug("Create fr_desc %p page_list %p\n",
446 fr_desc
, fr_desc
->data_frpl
->page_list
);
447 fr_desc
->ind
|= ISERT_DATA_KEY_VALID
;
450 struct ib_mr_init_attr mr_init_attr
= {0};
451 struct pi_context
*pi_ctx
;
453 fr_desc
->pi_ctx
= kzalloc(sizeof(*fr_desc
->pi_ctx
), GFP_KERNEL
);
454 if (!fr_desc
->pi_ctx
) {
455 pr_err("Failed to allocate pi context\n");
459 pi_ctx
= fr_desc
->pi_ctx
;
461 pi_ctx
->prot_frpl
= ib_alloc_fast_reg_page_list(ib_device
,
462 ISCSI_ISER_SG_TABLESIZE
);
463 if (IS_ERR(pi_ctx
->prot_frpl
)) {
464 pr_err("Failed to allocate prot frpl err=%ld\n",
465 PTR_ERR(pi_ctx
->prot_frpl
));
466 ret
= PTR_ERR(pi_ctx
->prot_frpl
);
470 pi_ctx
->prot_mr
= ib_alloc_fast_reg_mr(pd
, ISCSI_ISER_SG_TABLESIZE
);
471 if (IS_ERR(pi_ctx
->prot_mr
)) {
472 pr_err("Failed to allocate prot frmr err=%ld\n",
473 PTR_ERR(pi_ctx
->prot_mr
));
474 ret
= PTR_ERR(pi_ctx
->prot_mr
);
477 fr_desc
->ind
|= ISERT_PROT_KEY_VALID
;
479 mr_init_attr
.max_reg_descriptors
= 2;
480 mr_init_attr
.flags
|= IB_MR_SIGNATURE_EN
;
481 pi_ctx
->sig_mr
= ib_create_mr(pd
, &mr_init_attr
);
482 if (IS_ERR(pi_ctx
->sig_mr
)) {
483 pr_err("Failed to allocate signature enabled mr err=%ld\n",
484 PTR_ERR(pi_ctx
->sig_mr
));
485 ret
= PTR_ERR(pi_ctx
->sig_mr
);
488 fr_desc
->ind
|= ISERT_SIG_KEY_VALID
;
490 fr_desc
->ind
&= ~ISERT_PROTECTED
;
494 ib_dereg_mr(fr_desc
->pi_ctx
->prot_mr
);
496 ib_free_fast_reg_page_list(fr_desc
->pi_ctx
->prot_frpl
);
498 kfree(fr_desc
->pi_ctx
);
500 ib_dereg_mr(fr_desc
->data_mr
);
502 ib_free_fast_reg_page_list(fr_desc
->data_frpl
);
508 isert_conn_create_fastreg_pool(struct isert_conn
*isert_conn
, u8 pi_support
)
510 struct fast_reg_descriptor
*fr_desc
;
511 struct isert_device
*device
= isert_conn
->conn_device
;
512 struct se_session
*se_sess
= isert_conn
->conn
->sess
->se_sess
;
513 struct se_node_acl
*se_nacl
= se_sess
->se_node_acl
;
516 * Setup the number of FRMRs based upon the number of tags
517 * available to session in iscsi_target_locate_portal().
519 tag_num
= max_t(u32
, ISCSIT_MIN_TAGS
, se_nacl
->queue_depth
);
520 tag_num
= (tag_num
* 2) + ISCSIT_EXTRA_TAGS
;
522 isert_conn
->conn_fr_pool_size
= 0;
523 for (i
= 0; i
< tag_num
; i
++) {
524 fr_desc
= kzalloc(sizeof(*fr_desc
), GFP_KERNEL
);
526 pr_err("Failed to allocate fast_reg descriptor\n");
531 ret
= isert_create_fr_desc(device
->ib_device
,
532 isert_conn
->conn_pd
, fr_desc
,
535 pr_err("Failed to create fastreg descriptor err=%d\n",
541 list_add_tail(&fr_desc
->list
, &isert_conn
->conn_fr_pool
);
542 isert_conn
->conn_fr_pool_size
++;
545 pr_debug("Creating conn %p fastreg pool size=%d",
546 isert_conn
, isert_conn
->conn_fr_pool_size
);
551 isert_conn_free_fastreg_pool(isert_conn
);
556 isert_connect_request(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
558 struct iscsi_np
*np
= cma_id
->context
;
559 struct isert_np
*isert_np
= np
->np_context
;
560 struct isert_conn
*isert_conn
;
561 struct isert_device
*device
;
562 struct ib_device
*ib_dev
= cma_id
->device
;
564 u8 pi_support
= np
->tpg_np
->tpg
->tpg_attrib
.t10_pi
;
566 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
567 cma_id
, cma_id
->context
);
569 isert_conn
= kzalloc(sizeof(struct isert_conn
), GFP_KERNEL
);
571 pr_err("Unable to allocate isert_conn\n");
574 isert_conn
->state
= ISER_CONN_INIT
;
575 INIT_LIST_HEAD(&isert_conn
->conn_accept_node
);
576 init_completion(&isert_conn
->conn_login_comp
);
577 init_completion(&isert_conn
->conn_wait
);
578 init_completion(&isert_conn
->conn_wait_comp_err
);
579 kref_init(&isert_conn
->conn_kref
);
580 kref_get(&isert_conn
->conn_kref
);
581 mutex_init(&isert_conn
->conn_mutex
);
582 spin_lock_init(&isert_conn
->conn_lock
);
583 INIT_LIST_HEAD(&isert_conn
->conn_fr_pool
);
585 cma_id
->context
= isert_conn
;
586 isert_conn
->conn_cm_id
= cma_id
;
587 isert_conn
->responder_resources
= event
->param
.conn
.responder_resources
;
588 isert_conn
->initiator_depth
= event
->param
.conn
.initiator_depth
;
589 pr_debug("Using responder_resources: %u initiator_depth: %u\n",
590 isert_conn
->responder_resources
, isert_conn
->initiator_depth
);
592 isert_conn
->login_buf
= kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
+
593 ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
594 if (!isert_conn
->login_buf
) {
595 pr_err("Unable to allocate isert_conn->login_buf\n");
600 isert_conn
->login_req_buf
= isert_conn
->login_buf
;
601 isert_conn
->login_rsp_buf
= isert_conn
->login_buf
+
602 ISCSI_DEF_MAX_RECV_SEG_LEN
;
603 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
604 isert_conn
->login_buf
, isert_conn
->login_req_buf
,
605 isert_conn
->login_rsp_buf
);
607 isert_conn
->login_req_dma
= ib_dma_map_single(ib_dev
,
608 (void *)isert_conn
->login_req_buf
,
609 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
611 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_req_dma
);
613 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
615 isert_conn
->login_req_dma
= 0;
619 isert_conn
->login_rsp_dma
= ib_dma_map_single(ib_dev
,
620 (void *)isert_conn
->login_rsp_buf
,
621 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
623 ret
= ib_dma_mapping_error(ib_dev
, isert_conn
->login_rsp_dma
);
625 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
627 isert_conn
->login_rsp_dma
= 0;
628 goto out_req_dma_map
;
631 device
= isert_device_find_by_ib_dev(cma_id
);
632 if (IS_ERR(device
)) {
633 ret
= PTR_ERR(device
);
634 goto out_rsp_dma_map
;
637 isert_conn
->conn_device
= device
;
638 isert_conn
->conn_pd
= ib_alloc_pd(isert_conn
->conn_device
->ib_device
);
639 if (IS_ERR(isert_conn
->conn_pd
)) {
640 ret
= PTR_ERR(isert_conn
->conn_pd
);
641 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
646 isert_conn
->conn_mr
= ib_get_dma_mr(isert_conn
->conn_pd
,
647 IB_ACCESS_LOCAL_WRITE
);
648 if (IS_ERR(isert_conn
->conn_mr
)) {
649 ret
= PTR_ERR(isert_conn
->conn_mr
);
650 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
655 if (pi_support
&& !device
->pi_capable
) {
656 pr_err("Protection information requested but not supported\n");
661 ret
= isert_conn_setup_qp(isert_conn
, cma_id
, pi_support
);
665 mutex_lock(&isert_np
->np_accept_mutex
);
666 list_add_tail(&isert_np
->np_accept_list
, &isert_conn
->conn_accept_node
);
667 mutex_unlock(&isert_np
->np_accept_mutex
);
669 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np
);
670 wake_up(&isert_np
->np_accept_wq
);
674 ib_dereg_mr(isert_conn
->conn_mr
);
676 ib_dealloc_pd(isert_conn
->conn_pd
);
678 isert_device_try_release(device
);
680 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
681 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
683 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
684 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_FROM_DEVICE
);
686 kfree(isert_conn
->login_buf
);
693 isert_connect_release(struct isert_conn
*isert_conn
)
695 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
696 struct isert_device
*device
= isert_conn
->conn_device
;
699 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
701 if (device
&& device
->use_fastreg
)
702 isert_conn_free_fastreg_pool(isert_conn
);
704 if (isert_conn
->conn_qp
) {
705 cq_index
= ((struct isert_cq_desc
*)
706 isert_conn
->conn_qp
->recv_cq
->cq_context
)->cq_index
;
707 pr_debug("isert_connect_release: cq_index: %d\n", cq_index
);
708 isert_conn
->conn_device
->cq_active_qps
[cq_index
]--;
710 rdma_destroy_qp(isert_conn
->conn_cm_id
);
713 isert_free_rx_descriptors(isert_conn
);
714 rdma_destroy_id(isert_conn
->conn_cm_id
);
716 ib_dereg_mr(isert_conn
->conn_mr
);
717 ib_dealloc_pd(isert_conn
->conn_pd
);
719 if (isert_conn
->login_buf
) {
720 ib_dma_unmap_single(ib_dev
, isert_conn
->login_rsp_dma
,
721 ISER_RX_LOGIN_SIZE
, DMA_TO_DEVICE
);
722 ib_dma_unmap_single(ib_dev
, isert_conn
->login_req_dma
,
723 ISCSI_DEF_MAX_RECV_SEG_LEN
,
725 kfree(isert_conn
->login_buf
);
730 isert_device_try_release(device
);
732 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
736 isert_connected_handler(struct rdma_cm_id
*cma_id
)
742 isert_release_conn_kref(struct kref
*kref
)
744 struct isert_conn
*isert_conn
= container_of(kref
,
745 struct isert_conn
, conn_kref
);
747 pr_debug("Calling isert_connect_release for final kref %s/%d\n",
748 current
->comm
, current
->pid
);
750 isert_connect_release(isert_conn
);
754 isert_put_conn(struct isert_conn
*isert_conn
)
756 kref_put(&isert_conn
->conn_kref
, isert_release_conn_kref
);
760 isert_disconnect_work(struct work_struct
*work
)
762 struct isert_conn
*isert_conn
= container_of(work
,
763 struct isert_conn
, conn_logout_work
);
765 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
766 mutex_lock(&isert_conn
->conn_mutex
);
767 if (isert_conn
->state
== ISER_CONN_UP
)
768 isert_conn
->state
= ISER_CONN_TERMINATING
;
770 if (isert_conn
->post_recv_buf_count
== 0 &&
771 atomic_read(&isert_conn
->post_send_buf_count
) == 0) {
772 mutex_unlock(&isert_conn
->conn_mutex
);
775 if (!isert_conn
->conn_cm_id
) {
776 mutex_unlock(&isert_conn
->conn_mutex
);
777 isert_put_conn(isert_conn
);
780 if (!isert_conn
->logout_posted
) {
781 pr_debug("Calling rdma_disconnect for !logout_posted from"
782 " isert_disconnect_work\n");
783 rdma_disconnect(isert_conn
->conn_cm_id
);
784 mutex_unlock(&isert_conn
->conn_mutex
);
785 iscsit_cause_connection_reinstatement(isert_conn
->conn
, 0);
788 mutex_unlock(&isert_conn
->conn_mutex
);
791 complete(&isert_conn
->conn_wait
);
792 isert_put_conn(isert_conn
);
796 isert_disconnected_handler(struct rdma_cm_id
*cma_id
)
798 struct isert_conn
*isert_conn
= (struct isert_conn
*)cma_id
->context
;
800 INIT_WORK(&isert_conn
->conn_logout_work
, isert_disconnect_work
);
801 schedule_work(&isert_conn
->conn_logout_work
);
805 isert_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
809 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
810 event
->event
, event
->status
, cma_id
->context
, cma_id
);
812 switch (event
->event
) {
813 case RDMA_CM_EVENT_CONNECT_REQUEST
:
814 pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
815 ret
= isert_connect_request(cma_id
, event
);
817 case RDMA_CM_EVENT_ESTABLISHED
:
818 pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
819 isert_connected_handler(cma_id
);
821 case RDMA_CM_EVENT_DISCONNECTED
:
822 pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
823 isert_disconnected_handler(cma_id
);
825 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
826 case RDMA_CM_EVENT_ADDR_CHANGE
:
828 case RDMA_CM_EVENT_CONNECT_ERROR
:
830 pr_err("Unknown RDMA CMA event: %d\n", event
->event
);
835 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
844 isert_post_recv(struct isert_conn
*isert_conn
, u32 count
)
846 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
848 unsigned int rx_head
= isert_conn
->conn_rx_desc_head
;
849 struct iser_rx_desc
*rx_desc
;
851 for (rx_wr
= isert_conn
->conn_rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
852 rx_desc
= &isert_conn
->conn_rx_descs
[rx_head
];
853 rx_wr
->wr_id
= (unsigned long)rx_desc
;
854 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
856 rx_wr
->next
= rx_wr
+ 1;
857 rx_head
= (rx_head
+ 1) & (ISERT_QP_MAX_RECV_DTOS
- 1);
861 rx_wr
->next
= NULL
; /* mark end of work requests list */
863 isert_conn
->post_recv_buf_count
+= count
;
864 ret
= ib_post_recv(isert_conn
->conn_qp
, isert_conn
->conn_rx_wr
,
867 pr_err("ib_post_recv() failed with ret: %d\n", ret
);
868 isert_conn
->post_recv_buf_count
-= count
;
870 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count
);
871 isert_conn
->conn_rx_desc_head
= rx_head
;
877 isert_post_send(struct isert_conn
*isert_conn
, struct iser_tx_desc
*tx_desc
)
879 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
880 struct ib_send_wr send_wr
, *send_wr_failed
;
883 ib_dma_sync_single_for_device(ib_dev
, tx_desc
->dma_addr
,
884 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
887 send_wr
.wr_id
= (unsigned long)tx_desc
;
888 send_wr
.sg_list
= tx_desc
->tx_sg
;
889 send_wr
.num_sge
= tx_desc
->num_sge
;
890 send_wr
.opcode
= IB_WR_SEND
;
891 send_wr
.send_flags
= IB_SEND_SIGNALED
;
893 atomic_inc(&isert_conn
->post_send_buf_count
);
895 ret
= ib_post_send(isert_conn
->conn_qp
, &send_wr
, &send_wr_failed
);
897 pr_err("ib_post_send() failed, ret: %d\n", ret
);
898 atomic_dec(&isert_conn
->post_send_buf_count
);
905 isert_create_send_desc(struct isert_conn
*isert_conn
,
906 struct isert_cmd
*isert_cmd
,
907 struct iser_tx_desc
*tx_desc
)
909 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
911 ib_dma_sync_single_for_cpu(ib_dev
, tx_desc
->dma_addr
,
912 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
914 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_hdr
));
915 tx_desc
->iser_header
.flags
= ISER_VER
;
917 tx_desc
->num_sge
= 1;
918 tx_desc
->isert_cmd
= isert_cmd
;
920 if (tx_desc
->tx_sg
[0].lkey
!= isert_conn
->conn_mr
->lkey
) {
921 tx_desc
->tx_sg
[0].lkey
= isert_conn
->conn_mr
->lkey
;
922 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc
);
927 isert_init_tx_hdrs(struct isert_conn
*isert_conn
,
928 struct iser_tx_desc
*tx_desc
)
930 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
933 dma_addr
= ib_dma_map_single(ib_dev
, (void *)tx_desc
,
934 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
935 if (ib_dma_mapping_error(ib_dev
, dma_addr
)) {
936 pr_err("ib_dma_mapping_error() failed\n");
940 tx_desc
->dma_addr
= dma_addr
;
941 tx_desc
->tx_sg
[0].addr
= tx_desc
->dma_addr
;
942 tx_desc
->tx_sg
[0].length
= ISER_HEADERS_LEN
;
943 tx_desc
->tx_sg
[0].lkey
= isert_conn
->conn_mr
->lkey
;
945 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
946 " lkey: 0x%08x\n", tx_desc
->tx_sg
[0].addr
,
947 tx_desc
->tx_sg
[0].length
, tx_desc
->tx_sg
[0].lkey
);
953 isert_init_send_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
954 struct ib_send_wr
*send_wr
, bool coalesce
)
956 struct iser_tx_desc
*tx_desc
= &isert_cmd
->tx_desc
;
958 isert_cmd
->rdma_wr
.iser_ib_op
= ISER_IB_SEND
;
959 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
960 send_wr
->opcode
= IB_WR_SEND
;
961 send_wr
->sg_list
= &tx_desc
->tx_sg
[0];
962 send_wr
->num_sge
= isert_cmd
->tx_desc
.num_sge
;
964 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
965 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
967 mutex_lock(&isert_conn
->conn_mutex
);
968 if (coalesce
&& isert_conn
->state
== ISER_CONN_UP
&&
969 ++isert_conn
->conn_comp_batch
< ISERT_COMP_BATCH_COUNT
) {
970 tx_desc
->llnode_active
= true;
971 llist_add(&tx_desc
->comp_llnode
, &isert_conn
->conn_comp_llist
);
972 mutex_unlock(&isert_conn
->conn_mutex
);
975 isert_conn
->conn_comp_batch
= 0;
976 tx_desc
->comp_llnode_batch
= llist_del_all(&isert_conn
->conn_comp_llist
);
977 mutex_unlock(&isert_conn
->conn_mutex
);
979 send_wr
->send_flags
= IB_SEND_SIGNALED
;
983 isert_rdma_post_recvl(struct isert_conn
*isert_conn
)
985 struct ib_recv_wr rx_wr
, *rx_wr_fail
;
989 memset(&sge
, 0, sizeof(struct ib_sge
));
990 sge
.addr
= isert_conn
->login_req_dma
;
991 sge
.length
= ISER_RX_LOGIN_SIZE
;
992 sge
.lkey
= isert_conn
->conn_mr
->lkey
;
994 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
995 sge
.addr
, sge
.length
, sge
.lkey
);
997 memset(&rx_wr
, 0, sizeof(struct ib_recv_wr
));
998 rx_wr
.wr_id
= (unsigned long)isert_conn
->login_req_buf
;
999 rx_wr
.sg_list
= &sge
;
1002 isert_conn
->post_recv_buf_count
++;
1003 ret
= ib_post_recv(isert_conn
->conn_qp
, &rx_wr
, &rx_wr_fail
);
1005 pr_err("ib_post_recv() failed: %d\n", ret
);
1006 isert_conn
->post_recv_buf_count
--;
1009 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
1014 isert_put_login_tx(struct iscsi_conn
*conn
, struct iscsi_login
*login
,
1017 struct isert_conn
*isert_conn
= conn
->context
;
1018 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1019 struct iser_tx_desc
*tx_desc
= &isert_conn
->conn_login_tx_desc
;
1022 isert_create_send_desc(isert_conn
, NULL
, tx_desc
);
1024 memcpy(&tx_desc
->iscsi_header
, &login
->rsp
[0],
1025 sizeof(struct iscsi_hdr
));
1027 isert_init_tx_hdrs(isert_conn
, tx_desc
);
1030 struct ib_sge
*tx_dsg
= &tx_desc
->tx_sg
[1];
1032 ib_dma_sync_single_for_cpu(ib_dev
, isert_conn
->login_rsp_dma
,
1033 length
, DMA_TO_DEVICE
);
1035 memcpy(isert_conn
->login_rsp_buf
, login
->rsp_buf
, length
);
1037 ib_dma_sync_single_for_device(ib_dev
, isert_conn
->login_rsp_dma
,
1038 length
, DMA_TO_DEVICE
);
1040 tx_dsg
->addr
= isert_conn
->login_rsp_dma
;
1041 tx_dsg
->length
= length
;
1042 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
1043 tx_desc
->num_sge
= 2;
1045 if (!login
->login_failed
) {
1046 if (login
->login_complete
) {
1047 if (isert_conn
->conn_device
->use_fastreg
) {
1048 u8 pi_support
= login
->np
->tpg_np
->tpg
->tpg_attrib
.t10_pi
;
1050 ret
= isert_conn_create_fastreg_pool(isert_conn
,
1053 pr_err("Conn: %p failed to create"
1054 " fastreg pool\n", isert_conn
);
1059 ret
= isert_alloc_rx_descriptors(isert_conn
);
1063 ret
= isert_post_recv(isert_conn
, ISERT_MIN_POSTED_RX
);
1067 isert_conn
->state
= ISER_CONN_UP
;
1071 ret
= isert_rdma_post_recvl(isert_conn
);
1076 ret
= isert_post_send(isert_conn
, tx_desc
);
1084 isert_rx_login_req(struct iser_rx_desc
*rx_desc
, int rx_buflen
,
1085 struct isert_conn
*isert_conn
)
1087 struct iscsi_conn
*conn
= isert_conn
->conn
;
1088 struct iscsi_login
*login
= conn
->conn_login
;
1092 pr_err("conn->conn_login is NULL\n");
1097 if (login
->first_request
) {
1098 struct iscsi_login_req
*login_req
=
1099 (struct iscsi_login_req
*)&rx_desc
->iscsi_header
;
1101 * Setup the initial iscsi_login values from the leading
1102 * login request PDU.
1104 login
->leading_connection
= (!login_req
->tsih
) ? 1 : 0;
1105 login
->current_stage
=
1106 (login_req
->flags
& ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK
)
1108 login
->version_min
= login_req
->min_version
;
1109 login
->version_max
= login_req
->max_version
;
1110 memcpy(login
->isid
, login_req
->isid
, 6);
1111 login
->cmd_sn
= be32_to_cpu(login_req
->cmdsn
);
1112 login
->init_task_tag
= login_req
->itt
;
1113 login
->initial_exp_statsn
= be32_to_cpu(login_req
->exp_statsn
);
1114 login
->cid
= be16_to_cpu(login_req
->cid
);
1115 login
->tsih
= be16_to_cpu(login_req
->tsih
);
1118 memcpy(&login
->req
[0], (void *)&rx_desc
->iscsi_header
, ISCSI_HDR_LEN
);
1120 size
= min(rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1121 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
1122 size
, rx_buflen
, MAX_KEY_VALUE_PAIRS
);
1123 memcpy(login
->req_buf
, &rx_desc
->data
[0], size
);
1125 if (login
->first_request
) {
1126 complete(&isert_conn
->conn_login_comp
);
1129 schedule_delayed_work(&conn
->login_work
, 0);
1132 static struct iscsi_cmd
1133 *isert_allocate_cmd(struct iscsi_conn
*conn
)
1135 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
1136 struct isert_cmd
*isert_cmd
;
1137 struct iscsi_cmd
*cmd
;
1139 cmd
= iscsit_allocate_cmd(conn
, TASK_INTERRUPTIBLE
);
1141 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1144 isert_cmd
= iscsit_priv_cmd(cmd
);
1145 isert_cmd
->conn
= isert_conn
;
1146 isert_cmd
->iscsi_cmd
= cmd
;
1152 isert_handle_scsi_cmd(struct isert_conn
*isert_conn
,
1153 struct isert_cmd
*isert_cmd
, struct iscsi_cmd
*cmd
,
1154 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1156 struct iscsi_conn
*conn
= isert_conn
->conn
;
1157 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)buf
;
1158 struct scatterlist
*sg
;
1159 int imm_data
, imm_data_len
, unsol_data
, sg_nents
, rc
;
1160 bool dump_payload
= false;
1162 rc
= iscsit_setup_scsi_cmd(conn
, cmd
, buf
);
1166 imm_data
= cmd
->immediate_data
;
1167 imm_data_len
= cmd
->first_burst_len
;
1168 unsol_data
= cmd
->unsolicited_data
;
1170 rc
= iscsit_process_scsi_cmd(conn
, cmd
, hdr
);
1173 } else if (rc
> 0) {
1174 dump_payload
= true;
1181 sg
= &cmd
->se_cmd
.t_data_sg
[0];
1182 sg_nents
= max(1UL, DIV_ROUND_UP(imm_data_len
, PAGE_SIZE
));
1184 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1185 sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
1187 sg_copy_from_buffer(sg
, sg_nents
, &rx_desc
->data
[0], imm_data_len
);
1189 cmd
->write_data_done
+= imm_data_len
;
1191 if (cmd
->write_data_done
== cmd
->se_cmd
.data_length
) {
1192 spin_lock_bh(&cmd
->istate_lock
);
1193 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1194 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1195 spin_unlock_bh(&cmd
->istate_lock
);
1199 rc
= iscsit_sequence_cmd(conn
, cmd
, buf
, hdr
->cmdsn
);
1201 if (!rc
&& dump_payload
== false && unsol_data
)
1202 iscsit_set_unsoliticed_dataout(cmd
);
1208 isert_handle_iscsi_dataout(struct isert_conn
*isert_conn
,
1209 struct iser_rx_desc
*rx_desc
, unsigned char *buf
)
1211 struct scatterlist
*sg_start
;
1212 struct iscsi_conn
*conn
= isert_conn
->conn
;
1213 struct iscsi_cmd
*cmd
= NULL
;
1214 struct iscsi_data
*hdr
= (struct iscsi_data
*)buf
;
1215 u32 unsol_data_len
= ntoh24(hdr
->dlength
);
1216 int rc
, sg_nents
, sg_off
, page_off
;
1218 rc
= iscsit_check_dataout_hdr(conn
, buf
, &cmd
);
1224 * FIXME: Unexpected unsolicited_data out
1226 if (!cmd
->unsolicited_data
) {
1227 pr_err("Received unexpected solicited data payload\n");
1232 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1233 unsol_data_len
, cmd
->write_data_done
, cmd
->se_cmd
.data_length
);
1235 sg_off
= cmd
->write_data_done
/ PAGE_SIZE
;
1236 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
1237 sg_nents
= max(1UL, DIV_ROUND_UP(unsol_data_len
, PAGE_SIZE
));
1238 page_off
= cmd
->write_data_done
% PAGE_SIZE
;
1240 * FIXME: Non page-aligned unsolicited_data out
1243 pr_err("Received unexpected non-page aligned data payload\n");
1247 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1248 sg_start
, sg_off
, sg_nents
, &rx_desc
->data
[0], unsol_data_len
);
1250 sg_copy_from_buffer(sg_start
, sg_nents
, &rx_desc
->data
[0],
1253 rc
= iscsit_check_dataout_payload(cmd
, hdr
, false);
1261 isert_handle_nop_out(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1262 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1265 struct iscsi_conn
*conn
= isert_conn
->conn
;
1266 struct iscsi_nopout
*hdr
= (struct iscsi_nopout
*)buf
;
1269 rc
= iscsit_setup_nop_out(conn
, cmd
, hdr
);
1273 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1276 return iscsit_process_nop_out(conn
, cmd
, hdr
);
1280 isert_handle_text_cmd(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1281 struct iscsi_cmd
*cmd
, struct iser_rx_desc
*rx_desc
,
1282 struct iscsi_text
*hdr
)
1284 struct iscsi_conn
*conn
= isert_conn
->conn
;
1285 u32 payload_length
= ntoh24(hdr
->dlength
);
1287 unsigned char *text_in
;
1289 rc
= iscsit_setup_text_cmd(conn
, cmd
, hdr
);
1293 text_in
= kzalloc(payload_length
, GFP_KERNEL
);
1295 pr_err("Unable to allocate text_in of payload_length: %u\n",
1299 cmd
->text_in_ptr
= text_in
;
1301 memcpy(cmd
->text_in_ptr
, &rx_desc
->data
[0], payload_length
);
1303 return iscsit_process_text_cmd(conn
, cmd
, hdr
);
1307 isert_rx_opcode(struct isert_conn
*isert_conn
, struct iser_rx_desc
*rx_desc
,
1308 uint32_t read_stag
, uint64_t read_va
,
1309 uint32_t write_stag
, uint64_t write_va
)
1311 struct iscsi_hdr
*hdr
= &rx_desc
->iscsi_header
;
1312 struct iscsi_conn
*conn
= isert_conn
->conn
;
1313 struct iscsi_session
*sess
= conn
->sess
;
1314 struct iscsi_cmd
*cmd
;
1315 struct isert_cmd
*isert_cmd
;
1317 u8 opcode
= (hdr
->opcode
& ISCSI_OPCODE_MASK
);
1319 if (sess
->sess_ops
->SessionType
&&
1320 (!(opcode
& ISCSI_OP_TEXT
) || !(opcode
& ISCSI_OP_LOGOUT
))) {
1321 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1322 " ignoring\n", opcode
);
1327 case ISCSI_OP_SCSI_CMD
:
1328 cmd
= isert_allocate_cmd(conn
);
1332 isert_cmd
= iscsit_priv_cmd(cmd
);
1333 isert_cmd
->read_stag
= read_stag
;
1334 isert_cmd
->read_va
= read_va
;
1335 isert_cmd
->write_stag
= write_stag
;
1336 isert_cmd
->write_va
= write_va
;
1338 ret
= isert_handle_scsi_cmd(isert_conn
, isert_cmd
, cmd
,
1339 rx_desc
, (unsigned char *)hdr
);
1341 case ISCSI_OP_NOOP_OUT
:
1342 cmd
= isert_allocate_cmd(conn
);
1346 isert_cmd
= iscsit_priv_cmd(cmd
);
1347 ret
= isert_handle_nop_out(isert_conn
, isert_cmd
, cmd
,
1348 rx_desc
, (unsigned char *)hdr
);
1350 case ISCSI_OP_SCSI_DATA_OUT
:
1351 ret
= isert_handle_iscsi_dataout(isert_conn
, rx_desc
,
1352 (unsigned char *)hdr
);
1354 case ISCSI_OP_SCSI_TMFUNC
:
1355 cmd
= isert_allocate_cmd(conn
);
1359 ret
= iscsit_handle_task_mgt_cmd(conn
, cmd
,
1360 (unsigned char *)hdr
);
1362 case ISCSI_OP_LOGOUT
:
1363 cmd
= isert_allocate_cmd(conn
);
1367 ret
= iscsit_handle_logout_cmd(conn
, cmd
, (unsigned char *)hdr
);
1369 wait_for_completion_timeout(&conn
->conn_logout_comp
,
1370 SECONDS_FOR_LOGOUT_COMP
*
1374 cmd
= isert_allocate_cmd(conn
);
1378 isert_cmd
= iscsit_priv_cmd(cmd
);
1379 ret
= isert_handle_text_cmd(isert_conn
, isert_cmd
, cmd
,
1380 rx_desc
, (struct iscsi_text
*)hdr
);
1383 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode
);
1392 isert_rx_do_work(struct iser_rx_desc
*rx_desc
, struct isert_conn
*isert_conn
)
1394 struct iser_hdr
*iser_hdr
= &rx_desc
->iser_header
;
1395 uint64_t read_va
= 0, write_va
= 0;
1396 uint32_t read_stag
= 0, write_stag
= 0;
1399 switch (iser_hdr
->flags
& 0xF0) {
1401 if (iser_hdr
->flags
& ISER_RSV
) {
1402 read_stag
= be32_to_cpu(iser_hdr
->read_stag
);
1403 read_va
= be64_to_cpu(iser_hdr
->read_va
);
1404 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1405 read_stag
, (unsigned long long)read_va
);
1407 if (iser_hdr
->flags
& ISER_WSV
) {
1408 write_stag
= be32_to_cpu(iser_hdr
->write_stag
);
1409 write_va
= be64_to_cpu(iser_hdr
->write_va
);
1410 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1411 write_stag
, (unsigned long long)write_va
);
1414 pr_debug("ISER ISCSI_CTRL PDU\n");
1417 pr_err("iSER Hello message\n");
1420 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr
->flags
);
1424 rc
= isert_rx_opcode(isert_conn
, rx_desc
,
1425 read_stag
, read_va
, write_stag
, write_va
);
1429 isert_rx_completion(struct iser_rx_desc
*desc
, struct isert_conn
*isert_conn
,
1430 unsigned long xfer_len
)
1432 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1433 struct iscsi_hdr
*hdr
;
1435 int rx_buflen
, outstanding
;
1437 if ((char *)desc
== isert_conn
->login_req_buf
) {
1438 rx_dma
= isert_conn
->login_req_dma
;
1439 rx_buflen
= ISER_RX_LOGIN_SIZE
;
1440 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1443 rx_dma
= desc
->dma_addr
;
1444 rx_buflen
= ISER_RX_PAYLOAD_SIZE
;
1445 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1449 ib_dma_sync_single_for_cpu(ib_dev
, rx_dma
, rx_buflen
, DMA_FROM_DEVICE
);
1451 hdr
= &desc
->iscsi_header
;
1452 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1453 hdr
->opcode
, hdr
->itt
, hdr
->flags
,
1454 (int)(xfer_len
- ISER_HEADERS_LEN
));
1456 if ((char *)desc
== isert_conn
->login_req_buf
)
1457 isert_rx_login_req(desc
, xfer_len
- ISER_HEADERS_LEN
,
1460 isert_rx_do_work(desc
, isert_conn
);
1462 ib_dma_sync_single_for_device(ib_dev
, rx_dma
, rx_buflen
,
1465 isert_conn
->post_recv_buf_count
--;
1466 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1467 isert_conn
->post_recv_buf_count
);
1469 if ((char *)desc
== isert_conn
->login_req_buf
)
1472 outstanding
= isert_conn
->post_recv_buf_count
;
1473 if (outstanding
+ ISERT_MIN_POSTED_RX
<= ISERT_QP_MAX_RECV_DTOS
) {
1474 int err
, count
= min(ISERT_QP_MAX_RECV_DTOS
- outstanding
,
1475 ISERT_MIN_POSTED_RX
);
1476 err
= isert_post_recv(isert_conn
, count
);
1478 pr_err("isert_post_recv() count: %d failed, %d\n",
1485 isert_map_data_buf(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
1486 struct scatterlist
*sg
, u32 nents
, u32 length
, u32 offset
,
1487 enum iser_ib_op_code op
, struct isert_data_buf
*data
)
1489 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1491 data
->dma_dir
= op
== ISER_IB_RDMA_WRITE
?
1492 DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
1494 data
->len
= length
- offset
;
1495 data
->offset
= offset
;
1496 data
->sg_off
= data
->offset
/ PAGE_SIZE
;
1498 data
->sg
= &sg
[data
->sg_off
];
1499 data
->nents
= min_t(unsigned int, nents
- data
->sg_off
,
1500 ISCSI_ISER_SG_TABLESIZE
);
1501 data
->len
= min_t(unsigned int, data
->len
, ISCSI_ISER_SG_TABLESIZE
*
1504 data
->dma_nents
= ib_dma_map_sg(ib_dev
, data
->sg
, data
->nents
,
1506 if (unlikely(!data
->dma_nents
)) {
1507 pr_err("Cmd: unable to dma map SGs %p\n", sg
);
1511 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1512 isert_cmd
, data
->dma_nents
, data
->sg
, data
->nents
, data
->len
);
1518 isert_unmap_data_buf(struct isert_conn
*isert_conn
, struct isert_data_buf
*data
)
1520 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1522 ib_dma_unmap_sg(ib_dev
, data
->sg
, data
->nents
, data
->dma_dir
);
1523 memset(data
, 0, sizeof(*data
));
1529 isert_unmap_cmd(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1531 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1533 pr_debug("isert_unmap_cmd: %p\n", isert_cmd
);
1536 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd
);
1537 isert_unmap_data_buf(isert_conn
, &wr
->data
);
1541 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd
);
1547 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd
);
1554 isert_unreg_rdma(struct isert_cmd
*isert_cmd
, struct isert_conn
*isert_conn
)
1556 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1557 LIST_HEAD(unmap_list
);
1559 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd
);
1562 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
1563 isert_cmd
, wr
->fr_desc
);
1564 if (wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1565 isert_unmap_data_buf(isert_conn
, &wr
->prot
);
1566 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1568 spin_lock_bh(&isert_conn
->conn_lock
);
1569 list_add_tail(&wr
->fr_desc
->list
, &isert_conn
->conn_fr_pool
);
1570 spin_unlock_bh(&isert_conn
->conn_lock
);
1575 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd
);
1576 isert_unmap_data_buf(isert_conn
, &wr
->data
);
1584 isert_put_cmd(struct isert_cmd
*isert_cmd
, bool comp_err
)
1586 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1587 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1588 struct iscsi_conn
*conn
= isert_conn
->conn
;
1589 struct isert_device
*device
= isert_conn
->conn_device
;
1591 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd
);
1593 switch (cmd
->iscsi_opcode
) {
1594 case ISCSI_OP_SCSI_CMD
:
1595 spin_lock_bh(&conn
->cmd_lock
);
1596 if (!list_empty(&cmd
->i_conn_node
))
1597 list_del_init(&cmd
->i_conn_node
);
1598 spin_unlock_bh(&conn
->cmd_lock
);
1600 if (cmd
->data_direction
== DMA_TO_DEVICE
) {
1601 iscsit_stop_dataout_timer(cmd
);
1603 * Check for special case during comp_err where
1604 * WRITE_PENDING has been handed off from core,
1605 * but requires an extra target_put_sess_cmd()
1606 * before transport_generic_free_cmd() below.
1609 cmd
->se_cmd
.t_state
== TRANSPORT_WRITE_PENDING
) {
1610 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1612 target_put_sess_cmd(se_cmd
->se_sess
, se_cmd
);
1616 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1617 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1619 case ISCSI_OP_SCSI_TMFUNC
:
1620 spin_lock_bh(&conn
->cmd_lock
);
1621 if (!list_empty(&cmd
->i_conn_node
))
1622 list_del_init(&cmd
->i_conn_node
);
1623 spin_unlock_bh(&conn
->cmd_lock
);
1625 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1627 case ISCSI_OP_REJECT
:
1628 case ISCSI_OP_NOOP_OUT
:
1630 spin_lock_bh(&conn
->cmd_lock
);
1631 if (!list_empty(&cmd
->i_conn_node
))
1632 list_del_init(&cmd
->i_conn_node
);
1633 spin_unlock_bh(&conn
->cmd_lock
);
1636 * Handle special case for REJECT when iscsi_add_reject*() has
1637 * overwritten the original iscsi_opcode assignment, and the
1638 * associated cmd->se_cmd needs to be released.
1640 if (cmd
->se_cmd
.se_tfo
!= NULL
) {
1641 pr_debug("Calling transport_generic_free_cmd from"
1642 " isert_put_cmd for 0x%02x\n",
1644 transport_generic_free_cmd(&cmd
->se_cmd
, 0);
1651 iscsit_release_cmd(cmd
);
1657 isert_unmap_tx_desc(struct iser_tx_desc
*tx_desc
, struct ib_device
*ib_dev
)
1659 if (tx_desc
->dma_addr
!= 0) {
1660 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1661 ib_dma_unmap_single(ib_dev
, tx_desc
->dma_addr
,
1662 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
1663 tx_desc
->dma_addr
= 0;
1668 isert_completion_put(struct iser_tx_desc
*tx_desc
, struct isert_cmd
*isert_cmd
,
1669 struct ib_device
*ib_dev
, bool comp_err
)
1671 if (isert_cmd
->pdu_buf_dma
!= 0) {
1672 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
1673 ib_dma_unmap_single(ib_dev
, isert_cmd
->pdu_buf_dma
,
1674 isert_cmd
->pdu_buf_len
, DMA_TO_DEVICE
);
1675 isert_cmd
->pdu_buf_dma
= 0;
1678 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1679 isert_put_cmd(isert_cmd
, comp_err
);
1683 isert_check_pi_status(struct se_cmd
*se_cmd
, struct ib_mr
*sig_mr
)
1685 struct ib_mr_status mr_status
;
1688 ret
= ib_check_mr_status(sig_mr
, IB_MR_CHECK_SIG_STATUS
, &mr_status
);
1690 pr_err("ib_check_mr_status failed, ret %d\n", ret
);
1691 goto fail_mr_status
;
1694 if (mr_status
.fail_status
& IB_MR_CHECK_SIG_STATUS
) {
1696 u32 block_size
= se_cmd
->se_dev
->dev_attrib
.block_size
+ 8;
1698 switch (mr_status
.sig_err
.err_type
) {
1699 case IB_SIG_BAD_GUARD
:
1700 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED
;
1702 case IB_SIG_BAD_REFTAG
:
1703 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED
;
1705 case IB_SIG_BAD_APPTAG
:
1706 se_cmd
->pi_err
= TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED
;
1709 sec_offset_err
= mr_status
.sig_err
.sig_err_offset
;
1710 do_div(sec_offset_err
, block_size
);
1711 se_cmd
->bad_sector
= sec_offset_err
+ se_cmd
->t_task_lba
;
1713 pr_err("isert: PI error found type %d at sector 0x%llx "
1714 "expected 0x%x vs actual 0x%x\n",
1715 mr_status
.sig_err
.err_type
,
1716 (unsigned long long)se_cmd
->bad_sector
,
1717 mr_status
.sig_err
.expected
,
1718 mr_status
.sig_err
.actual
);
1727 isert_completion_rdma_write(struct iser_tx_desc
*tx_desc
,
1728 struct isert_cmd
*isert_cmd
)
1730 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1731 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1732 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1733 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1734 struct isert_device
*device
= isert_conn
->conn_device
;
1737 if (wr
->fr_desc
&& wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1738 ret
= isert_check_pi_status(se_cmd
,
1739 wr
->fr_desc
->pi_ctx
->sig_mr
);
1740 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1743 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1744 wr
->send_wr_num
= 0;
1746 transport_send_check_condition_and_sense(se_cmd
,
1749 isert_put_response(isert_conn
->conn
, cmd
);
1753 isert_completion_rdma_read(struct iser_tx_desc
*tx_desc
,
1754 struct isert_cmd
*isert_cmd
)
1756 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1757 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1758 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
1759 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1760 struct isert_device
*device
= isert_conn
->conn_device
;
1763 if (wr
->fr_desc
&& wr
->fr_desc
->ind
& ISERT_PROTECTED
) {
1764 ret
= isert_check_pi_status(se_cmd
,
1765 wr
->fr_desc
->pi_ctx
->sig_mr
);
1766 wr
->fr_desc
->ind
&= ~ISERT_PROTECTED
;
1769 iscsit_stop_dataout_timer(cmd
);
1770 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
1771 cmd
->write_data_done
= wr
->data
.len
;
1772 wr
->send_wr_num
= 0;
1774 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd
);
1775 spin_lock_bh(&cmd
->istate_lock
);
1776 cmd
->cmd_flags
|= ICF_GOT_LAST_DATAOUT
;
1777 cmd
->i_state
= ISTATE_RECEIVED_LAST_DATAOUT
;
1778 spin_unlock_bh(&cmd
->istate_lock
);
1781 transport_send_check_condition_and_sense(se_cmd
,
1784 target_execute_cmd(se_cmd
);
1788 isert_do_control_comp(struct work_struct
*work
)
1790 struct isert_cmd
*isert_cmd
= container_of(work
,
1791 struct isert_cmd
, comp_work
);
1792 struct isert_conn
*isert_conn
= isert_cmd
->conn
;
1793 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1794 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1796 switch (cmd
->i_state
) {
1797 case ISTATE_SEND_TASKMGTRSP
:
1798 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1800 atomic_dec(&isert_conn
->post_send_buf_count
);
1801 iscsit_tmr_post_handler(cmd
, cmd
->conn
);
1803 cmd
->i_state
= ISTATE_SENT_STATUS
;
1804 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
, false);
1806 case ISTATE_SEND_REJECT
:
1807 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1808 atomic_dec(&isert_conn
->post_send_buf_count
);
1810 cmd
->i_state
= ISTATE_SENT_STATUS
;
1811 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
, false);
1813 case ISTATE_SEND_LOGOUTRSP
:
1814 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1816 * Call atomic_dec(&isert_conn->post_send_buf_count)
1817 * from isert_wait_conn()
1819 isert_conn
->logout_posted
= true;
1820 iscsit_logout_post_handler(cmd
, cmd
->conn
);
1822 case ISTATE_SEND_TEXTRSP
:
1823 atomic_dec(&isert_conn
->post_send_buf_count
);
1824 cmd
->i_state
= ISTATE_SENT_STATUS
;
1825 isert_completion_put(&isert_cmd
->tx_desc
, isert_cmd
, ib_dev
, false);
1828 pr_err("Unknown do_control_comp i_state %d\n", cmd
->i_state
);
1835 isert_response_completion(struct iser_tx_desc
*tx_desc
,
1836 struct isert_cmd
*isert_cmd
,
1837 struct isert_conn
*isert_conn
,
1838 struct ib_device
*ib_dev
)
1840 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
1841 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
1843 if (cmd
->i_state
== ISTATE_SEND_TASKMGTRSP
||
1844 cmd
->i_state
== ISTATE_SEND_LOGOUTRSP
||
1845 cmd
->i_state
== ISTATE_SEND_REJECT
||
1846 cmd
->i_state
== ISTATE_SEND_TEXTRSP
) {
1847 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1849 INIT_WORK(&isert_cmd
->comp_work
, isert_do_control_comp
);
1850 queue_work(isert_comp_wq
, &isert_cmd
->comp_work
);
1855 * If send_wr_num is 0 this means that we got
1856 * RDMA completion and we cleared it and we should
1857 * simply decrement the response post. else the
1858 * response is incorporated in send_wr_num, just
1861 if (wr
->send_wr_num
)
1862 atomic_sub(wr
->send_wr_num
, &isert_conn
->post_send_buf_count
);
1864 atomic_dec(&isert_conn
->post_send_buf_count
);
1866 cmd
->i_state
= ISTATE_SENT_STATUS
;
1867 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, false);
1871 __isert_send_completion(struct iser_tx_desc
*tx_desc
,
1872 struct isert_conn
*isert_conn
)
1874 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1875 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1876 struct isert_rdma_wr
*wr
;
1879 atomic_dec(&isert_conn
->post_send_buf_count
);
1880 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1883 wr
= &isert_cmd
->rdma_wr
;
1885 switch (wr
->iser_ib_op
) {
1887 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1891 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1892 isert_response_completion(tx_desc
, isert_cmd
,
1893 isert_conn
, ib_dev
);
1895 case ISER_IB_RDMA_WRITE
:
1896 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1897 atomic_sub(wr
->send_wr_num
, &isert_conn
->post_send_buf_count
);
1898 isert_completion_rdma_write(tx_desc
, isert_cmd
);
1900 case ISER_IB_RDMA_READ
:
1901 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1903 atomic_sub(wr
->send_wr_num
, &isert_conn
->post_send_buf_count
);
1904 isert_completion_rdma_read(tx_desc
, isert_cmd
);
1907 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr
->iser_ib_op
);
1914 isert_send_completion(struct iser_tx_desc
*tx_desc
,
1915 struct isert_conn
*isert_conn
)
1917 struct llist_node
*llnode
= tx_desc
->comp_llnode_batch
;
1918 struct iser_tx_desc
*t
;
1920 * Drain coalesced completion llist starting from comp_llnode_batch
1921 * setup in isert_init_send_wr(), and then complete trailing tx_desc.
1924 t
= llist_entry(llnode
, struct iser_tx_desc
, comp_llnode
);
1925 llnode
= llist_next(llnode
);
1926 __isert_send_completion(t
, isert_conn
);
1928 __isert_send_completion(tx_desc
, isert_conn
);
1932 isert_cq_drain_comp_llist(struct isert_conn
*isert_conn
, struct ib_device
*ib_dev
)
1934 struct llist_node
*llnode
;
1935 struct isert_rdma_wr
*wr
;
1936 struct iser_tx_desc
*t
;
1938 mutex_lock(&isert_conn
->conn_mutex
);
1939 llnode
= llist_del_all(&isert_conn
->conn_comp_llist
);
1940 isert_conn
->conn_comp_batch
= 0;
1941 mutex_unlock(&isert_conn
->conn_mutex
);
1944 t
= llist_entry(llnode
, struct iser_tx_desc
, comp_llnode
);
1945 llnode
= llist_next(llnode
);
1946 wr
= &t
->isert_cmd
->rdma_wr
;
1949 * If send_wr_num is 0 this means that we got
1950 * RDMA completion and we cleared it and we should
1951 * simply decrement the response post. else the
1952 * response is incorporated in send_wr_num, just
1955 if (wr
->send_wr_num
)
1956 atomic_sub(wr
->send_wr_num
,
1957 &isert_conn
->post_send_buf_count
);
1959 atomic_dec(&isert_conn
->post_send_buf_count
);
1961 isert_completion_put(t
, t
->isert_cmd
, ib_dev
, true);
1966 isert_cq_tx_comp_err(struct iser_tx_desc
*tx_desc
, struct isert_conn
*isert_conn
)
1968 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
1969 struct isert_cmd
*isert_cmd
= tx_desc
->isert_cmd
;
1970 struct llist_node
*llnode
= tx_desc
->comp_llnode_batch
;
1971 struct isert_rdma_wr
*wr
;
1972 struct iser_tx_desc
*t
;
1975 t
= llist_entry(llnode
, struct iser_tx_desc
, comp_llnode
);
1976 llnode
= llist_next(llnode
);
1977 wr
= &t
->isert_cmd
->rdma_wr
;
1980 * If send_wr_num is 0 this means that we got
1981 * RDMA completion and we cleared it and we should
1982 * simply decrement the response post. else the
1983 * response is incorporated in send_wr_num, just
1986 if (wr
->send_wr_num
)
1987 atomic_sub(wr
->send_wr_num
,
1988 &isert_conn
->post_send_buf_count
);
1990 atomic_dec(&isert_conn
->post_send_buf_count
);
1992 isert_completion_put(t
, t
->isert_cmd
, ib_dev
, true);
1994 tx_desc
->comp_llnode_batch
= NULL
;
1997 isert_unmap_tx_desc(tx_desc
, ib_dev
);
1999 isert_completion_put(tx_desc
, isert_cmd
, ib_dev
, true);
2003 isert_cq_rx_comp_err(struct isert_conn
*isert_conn
)
2005 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2006 struct iscsi_conn
*conn
= isert_conn
->conn
;
2008 if (isert_conn
->post_recv_buf_count
)
2011 isert_cq_drain_comp_llist(isert_conn
, ib_dev
);
2014 target_sess_cmd_list_set_waiting(conn
->sess
->se_sess
);
2015 target_wait_for_sess_cmds(conn
->sess
->se_sess
);
2018 while (atomic_read(&isert_conn
->post_send_buf_count
))
2021 mutex_lock(&isert_conn
->conn_mutex
);
2022 isert_conn
->state
= ISER_CONN_DOWN
;
2023 mutex_unlock(&isert_conn
->conn_mutex
);
2025 complete(&isert_conn
->conn_wait_comp_err
);
2029 isert_cq_tx_work(struct work_struct
*work
)
2031 struct isert_cq_desc
*cq_desc
= container_of(work
,
2032 struct isert_cq_desc
, cq_tx_work
);
2033 struct isert_device
*device
= cq_desc
->device
;
2034 int cq_index
= cq_desc
->cq_index
;
2035 struct ib_cq
*tx_cq
= device
->dev_tx_cq
[cq_index
];
2036 struct isert_conn
*isert_conn
;
2037 struct iser_tx_desc
*tx_desc
;
2040 while (ib_poll_cq(tx_cq
, 1, &wc
) == 1) {
2041 tx_desc
= (struct iser_tx_desc
*)(unsigned long)wc
.wr_id
;
2042 isert_conn
= wc
.qp
->qp_context
;
2044 if (wc
.status
== IB_WC_SUCCESS
) {
2045 isert_send_completion(tx_desc
, isert_conn
);
2047 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
2048 pr_debug("TX wc.status: 0x%08x\n", wc
.status
);
2049 pr_debug("TX wc.vendor_err: 0x%08x\n", wc
.vendor_err
);
2051 if (wc
.wr_id
!= ISER_FASTREG_LI_WRID
) {
2052 if (tx_desc
->llnode_active
)
2055 atomic_dec(&isert_conn
->post_send_buf_count
);
2056 isert_cq_tx_comp_err(tx_desc
, isert_conn
);
2061 ib_req_notify_cq(tx_cq
, IB_CQ_NEXT_COMP
);
2065 isert_cq_tx_callback(struct ib_cq
*cq
, void *context
)
2067 struct isert_cq_desc
*cq_desc
= (struct isert_cq_desc
*)context
;
2069 queue_work(isert_comp_wq
, &cq_desc
->cq_tx_work
);
2073 isert_cq_rx_work(struct work_struct
*work
)
2075 struct isert_cq_desc
*cq_desc
= container_of(work
,
2076 struct isert_cq_desc
, cq_rx_work
);
2077 struct isert_device
*device
= cq_desc
->device
;
2078 int cq_index
= cq_desc
->cq_index
;
2079 struct ib_cq
*rx_cq
= device
->dev_rx_cq
[cq_index
];
2080 struct isert_conn
*isert_conn
;
2081 struct iser_rx_desc
*rx_desc
;
2083 unsigned long xfer_len
;
2085 while (ib_poll_cq(rx_cq
, 1, &wc
) == 1) {
2086 rx_desc
= (struct iser_rx_desc
*)(unsigned long)wc
.wr_id
;
2087 isert_conn
= wc
.qp
->qp_context
;
2089 if (wc
.status
== IB_WC_SUCCESS
) {
2090 xfer_len
= (unsigned long)wc
.byte_len
;
2091 isert_rx_completion(rx_desc
, isert_conn
, xfer_len
);
2093 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
2094 if (wc
.status
!= IB_WC_WR_FLUSH_ERR
) {
2095 pr_debug("RX wc.status: 0x%08x\n", wc
.status
);
2096 pr_debug("RX wc.vendor_err: 0x%08x\n",
2099 isert_conn
->post_recv_buf_count
--;
2100 isert_cq_rx_comp_err(isert_conn
);
2104 ib_req_notify_cq(rx_cq
, IB_CQ_NEXT_COMP
);
2108 isert_cq_rx_callback(struct ib_cq
*cq
, void *context
)
2110 struct isert_cq_desc
*cq_desc
= (struct isert_cq_desc
*)context
;
2112 queue_work(isert_rx_wq
, &cq_desc
->cq_rx_work
);
2116 isert_post_response(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
)
2118 struct ib_send_wr
*wr_failed
;
2121 atomic_inc(&isert_conn
->post_send_buf_count
);
2123 ret
= ib_post_send(isert_conn
->conn_qp
, &isert_cmd
->tx_desc
.send_wr
,
2126 pr_err("ib_post_send failed with %d\n", ret
);
2127 atomic_dec(&isert_conn
->post_send_buf_count
);
2134 isert_put_response(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2136 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2137 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2138 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2139 struct iscsi_scsi_rsp
*hdr
= (struct iscsi_scsi_rsp
*)
2140 &isert_cmd
->tx_desc
.iscsi_header
;
2142 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2143 iscsit_build_rsp_pdu(cmd
, conn
, true, hdr
);
2144 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2146 * Attach SENSE DATA payload to iSCSI Response PDU
2148 if (cmd
->se_cmd
.sense_buffer
&&
2149 ((cmd
->se_cmd
.se_cmd_flags
& SCF_TRANSPORT_TASK_SENSE
) ||
2150 (cmd
->se_cmd
.se_cmd_flags
& SCF_EMULATED_TASK_SENSE
))) {
2151 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2152 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2153 u32 padding
, pdu_len
;
2155 put_unaligned_be16(cmd
->se_cmd
.scsi_sense_length
,
2157 cmd
->se_cmd
.scsi_sense_length
+= sizeof(__be16
);
2159 padding
= -(cmd
->se_cmd
.scsi_sense_length
) & 3;
2160 hton24(hdr
->dlength
, (u32
)cmd
->se_cmd
.scsi_sense_length
);
2161 pdu_len
= cmd
->se_cmd
.scsi_sense_length
+ padding
;
2163 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2164 (void *)cmd
->sense_buffer
, pdu_len
,
2167 isert_cmd
->pdu_buf_len
= pdu_len
;
2168 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2169 tx_dsg
->length
= pdu_len
;
2170 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
2171 isert_cmd
->tx_desc
.num_sge
= 2;
2174 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, true);
2176 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2178 return isert_post_response(isert_conn
, isert_cmd
);
2182 isert_aborted_task(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2184 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2185 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2186 struct isert_device
*device
= isert_conn
->conn_device
;
2188 spin_lock_bh(&conn
->cmd_lock
);
2189 if (!list_empty(&cmd
->i_conn_node
))
2190 list_del_init(&cmd
->i_conn_node
);
2191 spin_unlock_bh(&conn
->cmd_lock
);
2193 if (cmd
->data_direction
== DMA_TO_DEVICE
)
2194 iscsit_stop_dataout_timer(cmd
);
2196 device
->unreg_rdma_mem(isert_cmd
, isert_conn
);
2199 static enum target_prot_op
2200 isert_get_sup_prot_ops(struct iscsi_conn
*conn
)
2202 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2203 struct isert_device
*device
= isert_conn
->conn_device
;
2205 if (device
->pi_capable
)
2206 return TARGET_PROT_ALL
;
2208 return TARGET_PROT_NORMAL
;
2212 isert_put_nopin(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
,
2213 bool nopout_response
)
2215 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2216 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2217 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2219 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2220 iscsit_build_nopin_rsp(cmd
, conn
, (struct iscsi_nopin
*)
2221 &isert_cmd
->tx_desc
.iscsi_header
,
2223 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2224 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, false);
2226 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2228 return isert_post_response(isert_conn
, isert_cmd
);
2232 isert_put_logout_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2234 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2235 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2236 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2238 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2239 iscsit_build_logout_rsp(cmd
, conn
, (struct iscsi_logout_rsp
*)
2240 &isert_cmd
->tx_desc
.iscsi_header
);
2241 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2242 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, false);
2244 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2246 return isert_post_response(isert_conn
, isert_cmd
);
2250 isert_put_tm_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2252 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2253 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2254 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2256 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2257 iscsit_build_task_mgt_rsp(cmd
, conn
, (struct iscsi_tm_rsp
*)
2258 &isert_cmd
->tx_desc
.iscsi_header
);
2259 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2260 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, false);
2262 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2264 return isert_post_response(isert_conn
, isert_cmd
);
2268 isert_put_reject(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2270 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2271 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2272 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2273 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2274 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2275 struct iscsi_reject
*hdr
=
2276 (struct iscsi_reject
*)&isert_cmd
->tx_desc
.iscsi_header
;
2278 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2279 iscsit_build_reject(cmd
, conn
, hdr
);
2280 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2282 hton24(hdr
->dlength
, ISCSI_HDR_LEN
);
2283 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2284 (void *)cmd
->buf_ptr
, ISCSI_HDR_LEN
,
2286 isert_cmd
->pdu_buf_len
= ISCSI_HDR_LEN
;
2287 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2288 tx_dsg
->length
= ISCSI_HDR_LEN
;
2289 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
2290 isert_cmd
->tx_desc
.num_sge
= 2;
2292 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, false);
2294 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2296 return isert_post_response(isert_conn
, isert_cmd
);
2300 isert_put_text_rsp(struct iscsi_cmd
*cmd
, struct iscsi_conn
*conn
)
2302 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2303 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2304 struct ib_send_wr
*send_wr
= &isert_cmd
->tx_desc
.send_wr
;
2305 struct iscsi_text_rsp
*hdr
=
2306 (struct iscsi_text_rsp
*)&isert_cmd
->tx_desc
.iscsi_header
;
2310 isert_create_send_desc(isert_conn
, isert_cmd
, &isert_cmd
->tx_desc
);
2311 rc
= iscsit_build_text_rsp(cmd
, conn
, hdr
);
2316 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2319 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2320 struct ib_sge
*tx_dsg
= &isert_cmd
->tx_desc
.tx_sg
[1];
2321 void *txt_rsp_buf
= cmd
->buf_ptr
;
2323 isert_cmd
->pdu_buf_dma
= ib_dma_map_single(ib_dev
,
2324 txt_rsp_buf
, txt_rsp_len
, DMA_TO_DEVICE
);
2326 isert_cmd
->pdu_buf_len
= txt_rsp_len
;
2327 tx_dsg
->addr
= isert_cmd
->pdu_buf_dma
;
2328 tx_dsg
->length
= txt_rsp_len
;
2329 tx_dsg
->lkey
= isert_conn
->conn_mr
->lkey
;
2330 isert_cmd
->tx_desc
.num_sge
= 2;
2332 isert_init_send_wr(isert_conn
, isert_cmd
, send_wr
, false);
2334 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2336 return isert_post_response(isert_conn
, isert_cmd
);
2340 isert_build_rdma_wr(struct isert_conn
*isert_conn
, struct isert_cmd
*isert_cmd
,
2341 struct ib_sge
*ib_sge
, struct ib_send_wr
*send_wr
,
2342 u32 data_left
, u32 offset
)
2344 struct iscsi_cmd
*cmd
= isert_cmd
->iscsi_cmd
;
2345 struct scatterlist
*sg_start
, *tmp_sg
;
2346 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2347 u32 sg_off
, page_off
;
2348 int i
= 0, sg_nents
;
2350 sg_off
= offset
/ PAGE_SIZE
;
2351 sg_start
= &cmd
->se_cmd
.t_data_sg
[sg_off
];
2352 sg_nents
= min(cmd
->se_cmd
.t_data_nents
- sg_off
, isert_conn
->max_sge
);
2353 page_off
= offset
% PAGE_SIZE
;
2355 send_wr
->sg_list
= ib_sge
;
2356 send_wr
->num_sge
= sg_nents
;
2357 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
2359 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2361 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2362 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
2363 (unsigned long long)tmp_sg
->dma_address
,
2364 tmp_sg
->length
, page_off
);
2366 ib_sge
->addr
= ib_sg_dma_address(ib_dev
, tmp_sg
) + page_off
;
2367 ib_sge
->length
= min_t(u32
, data_left
,
2368 ib_sg_dma_len(ib_dev
, tmp_sg
) - page_off
);
2369 ib_sge
->lkey
= isert_conn
->conn_mr
->lkey
;
2371 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n",
2372 ib_sge
->addr
, ib_sge
->length
, ib_sge
->lkey
);
2374 data_left
-= ib_sge
->length
;
2376 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge
);
2379 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2380 send_wr
->sg_list
, send_wr
->num_sge
);
2386 isert_map_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2387 struct isert_rdma_wr
*wr
)
2389 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2390 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2391 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2392 struct isert_data_buf
*data
= &wr
->data
;
2393 struct ib_send_wr
*send_wr
;
2394 struct ib_sge
*ib_sge
;
2395 u32 offset
, data_len
, data_left
, rdma_write_max
, va_offset
= 0;
2396 int ret
= 0, i
, ib_sge_cnt
;
2398 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2400 offset
= wr
->iser_ib_op
== ISER_IB_RDMA_READ
? cmd
->write_data_done
: 0;
2401 ret
= isert_map_data_buf(isert_conn
, isert_cmd
, se_cmd
->t_data_sg
,
2402 se_cmd
->t_data_nents
, se_cmd
->data_length
,
2403 offset
, wr
->iser_ib_op
, &wr
->data
);
2407 data_left
= data
->len
;
2408 offset
= data
->offset
;
2410 ib_sge
= kzalloc(sizeof(struct ib_sge
) * data
->nents
, GFP_KERNEL
);
2412 pr_warn("Unable to allocate ib_sge\n");
2416 wr
->ib_sge
= ib_sge
;
2418 wr
->send_wr_num
= DIV_ROUND_UP(data
->nents
, isert_conn
->max_sge
);
2419 wr
->send_wr
= kzalloc(sizeof(struct ib_send_wr
) * wr
->send_wr_num
,
2422 pr_debug("Unable to allocate wr->send_wr\n");
2427 wr
->isert_cmd
= isert_cmd
;
2428 rdma_write_max
= isert_conn
->max_sge
* PAGE_SIZE
;
2430 for (i
= 0; i
< wr
->send_wr_num
; i
++) {
2431 send_wr
= &isert_cmd
->rdma_wr
.send_wr
[i
];
2432 data_len
= min(data_left
, rdma_write_max
);
2434 send_wr
->send_flags
= 0;
2435 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2436 send_wr
->opcode
= IB_WR_RDMA_WRITE
;
2437 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->read_va
+ offset
;
2438 send_wr
->wr
.rdma
.rkey
= isert_cmd
->read_stag
;
2439 if (i
+ 1 == wr
->send_wr_num
)
2440 send_wr
->next
= &isert_cmd
->tx_desc
.send_wr
;
2442 send_wr
->next
= &wr
->send_wr
[i
+ 1];
2444 send_wr
->opcode
= IB_WR_RDMA_READ
;
2445 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->write_va
+ va_offset
;
2446 send_wr
->wr
.rdma
.rkey
= isert_cmd
->write_stag
;
2447 if (i
+ 1 == wr
->send_wr_num
)
2448 send_wr
->send_flags
= IB_SEND_SIGNALED
;
2450 send_wr
->next
= &wr
->send_wr
[i
+ 1];
2453 ib_sge_cnt
= isert_build_rdma_wr(isert_conn
, isert_cmd
, ib_sge
,
2454 send_wr
, data_len
, offset
);
2455 ib_sge
+= ib_sge_cnt
;
2458 va_offset
+= data_len
;
2459 data_left
-= data_len
;
2464 isert_unmap_data_buf(isert_conn
, data
);
2470 isert_map_fr_pagelist(struct ib_device
*ib_dev
,
2471 struct scatterlist
*sg_start
, int sg_nents
, u64
*fr_pl
)
2473 u64 start_addr
, end_addr
, page
, chunk_start
= 0;
2474 struct scatterlist
*tmp_sg
;
2475 int i
= 0, new_chunk
, last_ent
, n_pages
;
2479 last_ent
= sg_nents
- 1;
2480 for_each_sg(sg_start
, tmp_sg
, sg_nents
, i
) {
2481 start_addr
= ib_sg_dma_address(ib_dev
, tmp_sg
);
2483 chunk_start
= start_addr
;
2484 end_addr
= start_addr
+ ib_sg_dma_len(ib_dev
, tmp_sg
);
2486 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
2487 i
, (unsigned long long)tmp_sg
->dma_address
,
2490 if ((end_addr
& ~PAGE_MASK
) && i
< last_ent
) {
2496 page
= chunk_start
& PAGE_MASK
;
2498 fr_pl
[n_pages
++] = page
;
2499 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
2502 } while (page
< end_addr
);
2509 isert_fast_reg_mr(struct isert_conn
*isert_conn
,
2510 struct fast_reg_descriptor
*fr_desc
,
2511 struct isert_data_buf
*mem
,
2512 enum isert_indicator ind
,
2515 struct ib_device
*ib_dev
= isert_conn
->conn_cm_id
->device
;
2517 struct ib_fast_reg_page_list
*frpl
;
2518 struct ib_send_wr fr_wr
, inv_wr
;
2519 struct ib_send_wr
*bad_wr
, *wr
= NULL
;
2520 int ret
, pagelist_len
;
2524 if (mem
->dma_nents
== 1) {
2525 sge
->lkey
= isert_conn
->conn_mr
->lkey
;
2526 sge
->addr
= ib_sg_dma_address(ib_dev
, &mem
->sg
[0]);
2527 sge
->length
= ib_sg_dma_len(ib_dev
, &mem
->sg
[0]);
2528 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2529 __func__
, __LINE__
, sge
->addr
, sge
->length
,
2534 if (ind
== ISERT_DATA_KEY_VALID
) {
2535 /* Registering data buffer */
2536 mr
= fr_desc
->data_mr
;
2537 frpl
= fr_desc
->data_frpl
;
2539 /* Registering protection buffer */
2540 mr
= fr_desc
->pi_ctx
->prot_mr
;
2541 frpl
= fr_desc
->pi_ctx
->prot_frpl
;
2544 page_off
= mem
->offset
% PAGE_SIZE
;
2546 pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
2547 fr_desc
, mem
->nents
, mem
->offset
);
2549 pagelist_len
= isert_map_fr_pagelist(ib_dev
, mem
->sg
, mem
->nents
,
2550 &frpl
->page_list
[0]);
2552 if (!(fr_desc
->ind
& ISERT_DATA_KEY_VALID
)) {
2553 memset(&inv_wr
, 0, sizeof(inv_wr
));
2554 inv_wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2555 inv_wr
.opcode
= IB_WR_LOCAL_INV
;
2556 inv_wr
.ex
.invalidate_rkey
= mr
->rkey
;
2559 key
= (u8
)(mr
->rkey
& 0x000000FF);
2560 ib_update_fast_reg_key(mr
, ++key
);
2563 /* Prepare FASTREG WR */
2564 memset(&fr_wr
, 0, sizeof(fr_wr
));
2565 fr_wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2566 fr_wr
.opcode
= IB_WR_FAST_REG_MR
;
2567 fr_wr
.wr
.fast_reg
.iova_start
= frpl
->page_list
[0] + page_off
;
2568 fr_wr
.wr
.fast_reg
.page_list
= frpl
;
2569 fr_wr
.wr
.fast_reg
.page_list_len
= pagelist_len
;
2570 fr_wr
.wr
.fast_reg
.page_shift
= PAGE_SHIFT
;
2571 fr_wr
.wr
.fast_reg
.length
= mem
->len
;
2572 fr_wr
.wr
.fast_reg
.rkey
= mr
->rkey
;
2573 fr_wr
.wr
.fast_reg
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
2580 ret
= ib_post_send(isert_conn
->conn_qp
, wr
, &bad_wr
);
2582 pr_err("fast registration failed, ret:%d\n", ret
);
2585 fr_desc
->ind
&= ~ind
;
2587 sge
->lkey
= mr
->lkey
;
2588 sge
->addr
= frpl
->page_list
[0] + page_off
;
2589 sge
->length
= mem
->len
;
2591 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n",
2592 __func__
, __LINE__
, sge
->addr
, sge
->length
,
2598 static inline enum ib_t10_dif_type
2599 se2ib_prot_type(enum target_prot_type prot_type
)
2601 switch (prot_type
) {
2602 case TARGET_DIF_TYPE0_PROT
:
2603 return IB_T10DIF_NONE
;
2604 case TARGET_DIF_TYPE1_PROT
:
2605 return IB_T10DIF_TYPE1
;
2606 case TARGET_DIF_TYPE2_PROT
:
2607 return IB_T10DIF_TYPE2
;
2608 case TARGET_DIF_TYPE3_PROT
:
2609 return IB_T10DIF_TYPE3
;
2611 return IB_T10DIF_NONE
;
2616 isert_set_sig_attrs(struct se_cmd
*se_cmd
, struct ib_sig_attrs
*sig_attrs
)
2618 enum ib_t10_dif_type ib_prot_type
= se2ib_prot_type(se_cmd
->prot_type
);
2620 sig_attrs
->mem
.sig_type
= IB_SIG_TYPE_T10_DIF
;
2621 sig_attrs
->wire
.sig_type
= IB_SIG_TYPE_T10_DIF
;
2622 sig_attrs
->mem
.sig
.dif
.pi_interval
=
2623 se_cmd
->se_dev
->dev_attrib
.block_size
;
2624 sig_attrs
->wire
.sig
.dif
.pi_interval
=
2625 se_cmd
->se_dev
->dev_attrib
.block_size
;
2627 switch (se_cmd
->prot_op
) {
2628 case TARGET_PROT_DIN_INSERT
:
2629 case TARGET_PROT_DOUT_STRIP
:
2630 sig_attrs
->mem
.sig
.dif
.type
= IB_T10DIF_NONE
;
2631 sig_attrs
->wire
.sig
.dif
.type
= ib_prot_type
;
2632 sig_attrs
->wire
.sig
.dif
.bg_type
= IB_T10DIF_CRC
;
2633 sig_attrs
->wire
.sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
2635 case TARGET_PROT_DOUT_INSERT
:
2636 case TARGET_PROT_DIN_STRIP
:
2637 sig_attrs
->mem
.sig
.dif
.type
= ib_prot_type
;
2638 sig_attrs
->mem
.sig
.dif
.bg_type
= IB_T10DIF_CRC
;
2639 sig_attrs
->mem
.sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
2640 sig_attrs
->wire
.sig
.dif
.type
= IB_T10DIF_NONE
;
2642 case TARGET_PROT_DIN_PASS
:
2643 case TARGET_PROT_DOUT_PASS
:
2644 sig_attrs
->mem
.sig
.dif
.type
= ib_prot_type
;
2645 sig_attrs
->mem
.sig
.dif
.bg_type
= IB_T10DIF_CRC
;
2646 sig_attrs
->mem
.sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
2647 sig_attrs
->wire
.sig
.dif
.type
= ib_prot_type
;
2648 sig_attrs
->wire
.sig
.dif
.bg_type
= IB_T10DIF_CRC
;
2649 sig_attrs
->wire
.sig
.dif
.ref_tag
= se_cmd
->reftag_seed
;
2652 pr_err("Unsupported PI operation %d\n", se_cmd
->prot_op
);
2660 isert_set_prot_checks(u8 prot_checks
)
2662 return (prot_checks
& TARGET_DIF_CHECK_GUARD
? 0xc0 : 0) |
2663 (prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x30 : 0) |
2664 (prot_checks
& TARGET_DIF_CHECK_REFTAG
? 0x0f : 0);
2668 isert_reg_sig_mr(struct isert_conn
*isert_conn
, struct se_cmd
*se_cmd
,
2669 struct fast_reg_descriptor
*fr_desc
,
2670 struct ib_sge
*data_sge
, struct ib_sge
*prot_sge
,
2671 struct ib_sge
*sig_sge
)
2673 struct ib_send_wr sig_wr
, inv_wr
;
2674 struct ib_send_wr
*bad_wr
, *wr
= NULL
;
2675 struct pi_context
*pi_ctx
= fr_desc
->pi_ctx
;
2676 struct ib_sig_attrs sig_attrs
;
2680 memset(&sig_attrs
, 0, sizeof(sig_attrs
));
2681 ret
= isert_set_sig_attrs(se_cmd
, &sig_attrs
);
2685 sig_attrs
.check_mask
= isert_set_prot_checks(se_cmd
->prot_checks
);
2687 if (!(fr_desc
->ind
& ISERT_SIG_KEY_VALID
)) {
2688 memset(&inv_wr
, 0, sizeof(inv_wr
));
2689 inv_wr
.opcode
= IB_WR_LOCAL_INV
;
2690 inv_wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2691 inv_wr
.ex
.invalidate_rkey
= pi_ctx
->sig_mr
->rkey
;
2694 key
= (u8
)(pi_ctx
->sig_mr
->rkey
& 0x000000FF);
2695 ib_update_fast_reg_key(pi_ctx
->sig_mr
, ++key
);
2698 memset(&sig_wr
, 0, sizeof(sig_wr
));
2699 sig_wr
.opcode
= IB_WR_REG_SIG_MR
;
2700 sig_wr
.wr_id
= ISER_FASTREG_LI_WRID
;
2701 sig_wr
.sg_list
= data_sge
;
2703 sig_wr
.wr
.sig_handover
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
2704 sig_wr
.wr
.sig_handover
.sig_attrs
= &sig_attrs
;
2705 sig_wr
.wr
.sig_handover
.sig_mr
= pi_ctx
->sig_mr
;
2706 if (se_cmd
->t_prot_sg
)
2707 sig_wr
.wr
.sig_handover
.prot
= prot_sge
;
2714 ret
= ib_post_send(isert_conn
->conn_qp
, wr
, &bad_wr
);
2716 pr_err("fast registration failed, ret:%d\n", ret
);
2719 fr_desc
->ind
&= ~ISERT_SIG_KEY_VALID
;
2721 sig_sge
->lkey
= pi_ctx
->sig_mr
->lkey
;
2723 sig_sge
->length
= se_cmd
->data_length
;
2724 if (se_cmd
->prot_op
!= TARGET_PROT_DIN_STRIP
&&
2725 se_cmd
->prot_op
!= TARGET_PROT_DOUT_INSERT
)
2727 * We have protection guards on the wire
2728 * so we need to set a larget transfer
2730 sig_sge
->length
+= se_cmd
->prot_length
;
2732 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2733 sig_sge
->addr
, sig_sge
->length
,
2740 isert_reg_rdma(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
2741 struct isert_rdma_wr
*wr
)
2743 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2744 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2745 struct isert_conn
*isert_conn
= conn
->context
;
2746 struct ib_sge data_sge
;
2747 struct ib_send_wr
*send_wr
;
2748 struct fast_reg_descriptor
*fr_desc
= NULL
;
2751 unsigned long flags
;
2753 isert_cmd
->tx_desc
.isert_cmd
= isert_cmd
;
2755 offset
= wr
->iser_ib_op
== ISER_IB_RDMA_READ
? cmd
->write_data_done
: 0;
2756 ret
= isert_map_data_buf(isert_conn
, isert_cmd
, se_cmd
->t_data_sg
,
2757 se_cmd
->t_data_nents
, se_cmd
->data_length
,
2758 offset
, wr
->iser_ib_op
, &wr
->data
);
2762 if (wr
->data
.dma_nents
!= 1 ||
2763 se_cmd
->prot_op
!= TARGET_PROT_NORMAL
) {
2764 spin_lock_irqsave(&isert_conn
->conn_lock
, flags
);
2765 fr_desc
= list_first_entry(&isert_conn
->conn_fr_pool
,
2766 struct fast_reg_descriptor
, list
);
2767 list_del(&fr_desc
->list
);
2768 spin_unlock_irqrestore(&isert_conn
->conn_lock
, flags
);
2769 wr
->fr_desc
= fr_desc
;
2772 ret
= isert_fast_reg_mr(isert_conn
, fr_desc
, &wr
->data
,
2773 ISERT_DATA_KEY_VALID
, &data_sge
);
2777 if (se_cmd
->prot_op
!= TARGET_PROT_NORMAL
) {
2778 struct ib_sge prot_sge
, sig_sge
;
2780 if (se_cmd
->t_prot_sg
) {
2781 ret
= isert_map_data_buf(isert_conn
, isert_cmd
,
2783 se_cmd
->t_prot_nents
,
2784 se_cmd
->prot_length
,
2785 0, wr
->iser_ib_op
, &wr
->prot
);
2789 ret
= isert_fast_reg_mr(isert_conn
, fr_desc
, &wr
->prot
,
2790 ISERT_PROT_KEY_VALID
, &prot_sge
);
2792 goto unmap_prot_cmd
;
2795 ret
= isert_reg_sig_mr(isert_conn
, se_cmd
, fr_desc
,
2796 &data_sge
, &prot_sge
, &sig_sge
);
2798 goto unmap_prot_cmd
;
2800 fr_desc
->ind
|= ISERT_PROTECTED
;
2801 memcpy(&wr
->s_ib_sge
, &sig_sge
, sizeof(sig_sge
));
2803 memcpy(&wr
->s_ib_sge
, &data_sge
, sizeof(data_sge
));
2805 wr
->ib_sge
= &wr
->s_ib_sge
;
2806 wr
->send_wr_num
= 1;
2807 memset(&wr
->s_send_wr
, 0, sizeof(*send_wr
));
2808 wr
->send_wr
= &wr
->s_send_wr
;
2809 wr
->isert_cmd
= isert_cmd
;
2811 send_wr
= &isert_cmd
->rdma_wr
.s_send_wr
;
2812 send_wr
->sg_list
= &wr
->s_ib_sge
;
2813 send_wr
->num_sge
= 1;
2814 send_wr
->wr_id
= (unsigned long)&isert_cmd
->tx_desc
;
2815 if (wr
->iser_ib_op
== ISER_IB_RDMA_WRITE
) {
2816 send_wr
->opcode
= IB_WR_RDMA_WRITE
;
2817 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->read_va
;
2818 send_wr
->wr
.rdma
.rkey
= isert_cmd
->read_stag
;
2819 send_wr
->send_flags
= se_cmd
->prot_op
== TARGET_PROT_NORMAL
?
2820 0 : IB_SEND_SIGNALED
;
2822 send_wr
->opcode
= IB_WR_RDMA_READ
;
2823 send_wr
->wr
.rdma
.remote_addr
= isert_cmd
->write_va
;
2824 send_wr
->wr
.rdma
.rkey
= isert_cmd
->write_stag
;
2825 send_wr
->send_flags
= IB_SEND_SIGNALED
;
2830 if (se_cmd
->t_prot_sg
)
2831 isert_unmap_data_buf(isert_conn
, &wr
->prot
);
2834 spin_lock_irqsave(&isert_conn
->conn_lock
, flags
);
2835 list_add_tail(&fr_desc
->list
, &isert_conn
->conn_fr_pool
);
2836 spin_unlock_irqrestore(&isert_conn
->conn_lock
, flags
);
2838 isert_unmap_data_buf(isert_conn
, &wr
->data
);
2844 isert_put_datain(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
2846 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2847 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2848 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2849 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2850 struct isert_device
*device
= isert_conn
->conn_device
;
2851 struct ib_send_wr
*wr_failed
;
2854 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
2855 isert_cmd
, se_cmd
->data_length
);
2856 wr
->iser_ib_op
= ISER_IB_RDMA_WRITE
;
2857 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2859 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2863 if (se_cmd
->prot_op
== TARGET_PROT_NORMAL
) {
2865 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2867 isert_create_send_desc(isert_conn
, isert_cmd
,
2868 &isert_cmd
->tx_desc
);
2869 iscsit_build_rsp_pdu(cmd
, conn
, true, (struct iscsi_scsi_rsp
*)
2870 &isert_cmd
->tx_desc
.iscsi_header
);
2871 isert_init_tx_hdrs(isert_conn
, &isert_cmd
->tx_desc
);
2872 isert_init_send_wr(isert_conn
, isert_cmd
,
2873 &isert_cmd
->tx_desc
.send_wr
, true);
2874 isert_cmd
->rdma_wr
.s_send_wr
.next
= &isert_cmd
->tx_desc
.send_wr
;
2875 wr
->send_wr_num
+= 1;
2878 atomic_add(wr
->send_wr_num
, &isert_conn
->post_send_buf_count
);
2880 rc
= ib_post_send(isert_conn
->conn_qp
, wr
->send_wr
, &wr_failed
);
2882 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2883 atomic_sub(wr
->send_wr_num
, &isert_conn
->post_send_buf_count
);
2886 if (se_cmd
->prot_op
== TARGET_PROT_NORMAL
)
2887 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2888 "READ\n", isert_cmd
);
2890 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2897 isert_get_dataout(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, bool recovery
)
2899 struct se_cmd
*se_cmd
= &cmd
->se_cmd
;
2900 struct isert_cmd
*isert_cmd
= iscsit_priv_cmd(cmd
);
2901 struct isert_rdma_wr
*wr
= &isert_cmd
->rdma_wr
;
2902 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
2903 struct isert_device
*device
= isert_conn
->conn_device
;
2904 struct ib_send_wr
*wr_failed
;
2907 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2908 isert_cmd
, se_cmd
->data_length
, cmd
->write_data_done
);
2909 wr
->iser_ib_op
= ISER_IB_RDMA_READ
;
2910 rc
= device
->reg_rdma_mem(conn
, cmd
, wr
);
2912 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd
);
2916 atomic_add(wr
->send_wr_num
, &isert_conn
->post_send_buf_count
);
2918 rc
= ib_post_send(isert_conn
->conn_qp
, wr
->send_wr
, &wr_failed
);
2920 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2921 atomic_sub(wr
->send_wr_num
, &isert_conn
->post_send_buf_count
);
2923 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2930 isert_immediate_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2935 case ISTATE_SEND_NOPIN_WANT_RESPONSE
:
2936 ret
= isert_put_nopin(cmd
, conn
, false);
2939 pr_err("Unknown immediate state: 0x%02x\n", state
);
2948 isert_response_queue(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
, int state
)
2953 case ISTATE_SEND_LOGOUTRSP
:
2954 ret
= isert_put_logout_rsp(cmd
, conn
);
2956 pr_debug("Returning iSER Logout -EAGAIN\n");
2960 case ISTATE_SEND_NOPIN
:
2961 ret
= isert_put_nopin(cmd
, conn
, true);
2963 case ISTATE_SEND_TASKMGTRSP
:
2964 ret
= isert_put_tm_rsp(cmd
, conn
);
2966 case ISTATE_SEND_REJECT
:
2967 ret
= isert_put_reject(cmd
, conn
);
2969 case ISTATE_SEND_TEXTRSP
:
2970 ret
= isert_put_text_rsp(cmd
, conn
);
2972 case ISTATE_SEND_STATUS
:
2974 * Special case for sending non GOOD SCSI status from TX thread
2975 * context during pre se_cmd excecution failure.
2977 ret
= isert_put_response(conn
, cmd
);
2980 pr_err("Unknown response state: 0x%02x\n", state
);
2989 isert_setup_np(struct iscsi_np
*np
,
2990 struct __kernel_sockaddr_storage
*ksockaddr
)
2992 struct isert_np
*isert_np
;
2993 struct rdma_cm_id
*isert_lid
;
2994 struct sockaddr
*sa
;
2997 isert_np
= kzalloc(sizeof(struct isert_np
), GFP_KERNEL
);
2999 pr_err("Unable to allocate struct isert_np\n");
3002 init_waitqueue_head(&isert_np
->np_accept_wq
);
3003 mutex_init(&isert_np
->np_accept_mutex
);
3004 INIT_LIST_HEAD(&isert_np
->np_accept_list
);
3005 init_completion(&isert_np
->np_login_comp
);
3007 sa
= (struct sockaddr
*)ksockaddr
;
3008 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr
, sa
);
3010 * Setup the np->np_sockaddr from the passed sockaddr setup
3011 * in iscsi_target_configfs.c code..
3013 memcpy(&np
->np_sockaddr
, ksockaddr
,
3014 sizeof(struct __kernel_sockaddr_storage
));
3016 isert_lid
= rdma_create_id(isert_cma_handler
, np
, RDMA_PS_TCP
,
3018 if (IS_ERR(isert_lid
)) {
3019 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
3020 PTR_ERR(isert_lid
));
3021 ret
= PTR_ERR(isert_lid
);
3025 ret
= rdma_bind_addr(isert_lid
, sa
);
3027 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret
);
3031 ret
= rdma_listen(isert_lid
, ISERT_RDMA_LISTEN_BACKLOG
);
3033 pr_err("rdma_listen() for isert_lid failed: %d\n", ret
);
3037 isert_np
->np_cm_id
= isert_lid
;
3038 np
->np_context
= isert_np
;
3039 pr_debug("Setup isert_lid->context: %p\n", isert_lid
->context
);
3044 rdma_destroy_id(isert_lid
);
3051 isert_check_accept_queue(struct isert_np
*isert_np
)
3055 mutex_lock(&isert_np
->np_accept_mutex
);
3056 empty
= list_empty(&isert_np
->np_accept_list
);
3057 mutex_unlock(&isert_np
->np_accept_mutex
);
3063 isert_rdma_accept(struct isert_conn
*isert_conn
)
3065 struct rdma_cm_id
*cm_id
= isert_conn
->conn_cm_id
;
3066 struct rdma_conn_param cp
;
3069 memset(&cp
, 0, sizeof(struct rdma_conn_param
));
3070 cp
.responder_resources
= isert_conn
->responder_resources
;
3071 cp
.initiator_depth
= isert_conn
->initiator_depth
;
3073 cp
.rnr_retry_count
= 7;
3075 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
3077 ret
= rdma_accept(cm_id
, &cp
);
3079 pr_err("rdma_accept() failed with: %d\n", ret
);
3083 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
3089 isert_get_login_rx(struct iscsi_conn
*conn
, struct iscsi_login
*login
)
3091 struct isert_conn
*isert_conn
= (struct isert_conn
*)conn
->context
;
3094 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn
);
3096 * For login requests after the first PDU, isert_rx_login_req() will
3097 * kick schedule_delayed_work(&conn->login_work) as the packet is
3098 * received, which turns this callback from iscsi_target_do_login_rx()
3101 if (!login
->first_request
)
3104 ret
= wait_for_completion_interruptible(&isert_conn
->conn_login_comp
);
3108 pr_debug("isert_get_login_rx processing login->req: %p\n", login
->req
);
3113 isert_set_conn_info(struct iscsi_np
*np
, struct iscsi_conn
*conn
,
3114 struct isert_conn
*isert_conn
)
3116 struct rdma_cm_id
*cm_id
= isert_conn
->conn_cm_id
;
3117 struct rdma_route
*cm_route
= &cm_id
->route
;
3118 struct sockaddr_in
*sock_in
;
3119 struct sockaddr_in6
*sock_in6
;
3121 conn
->login_family
= np
->np_sockaddr
.ss_family
;
3123 if (np
->np_sockaddr
.ss_family
== AF_INET6
) {
3124 sock_in6
= (struct sockaddr_in6
*)&cm_route
->addr
.dst_addr
;
3125 snprintf(conn
->login_ip
, sizeof(conn
->login_ip
), "%pI6c",
3126 &sock_in6
->sin6_addr
.in6_u
);
3127 conn
->login_port
= ntohs(sock_in6
->sin6_port
);
3129 sock_in6
= (struct sockaddr_in6
*)&cm_route
->addr
.src_addr
;
3130 snprintf(conn
->local_ip
, sizeof(conn
->local_ip
), "%pI6c",
3131 &sock_in6
->sin6_addr
.in6_u
);
3132 conn
->local_port
= ntohs(sock_in6
->sin6_port
);
3134 sock_in
= (struct sockaddr_in
*)&cm_route
->addr
.dst_addr
;
3135 sprintf(conn
->login_ip
, "%pI4",
3136 &sock_in
->sin_addr
.s_addr
);
3137 conn
->login_port
= ntohs(sock_in
->sin_port
);
3139 sock_in
= (struct sockaddr_in
*)&cm_route
->addr
.src_addr
;
3140 sprintf(conn
->local_ip
, "%pI4",
3141 &sock_in
->sin_addr
.s_addr
);
3142 conn
->local_port
= ntohs(sock_in
->sin_port
);
3147 isert_accept_np(struct iscsi_np
*np
, struct iscsi_conn
*conn
)
3149 struct isert_np
*isert_np
= (struct isert_np
*)np
->np_context
;
3150 struct isert_conn
*isert_conn
;
3151 int max_accept
= 0, ret
;
3154 ret
= wait_event_interruptible(isert_np
->np_accept_wq
,
3155 !isert_check_accept_queue(isert_np
) ||
3156 np
->np_thread_state
== ISCSI_NP_THREAD_RESET
);
3160 spin_lock_bh(&np
->np_thread_lock
);
3161 if (np
->np_thread_state
== ISCSI_NP_THREAD_RESET
) {
3162 spin_unlock_bh(&np
->np_thread_lock
);
3163 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
3166 spin_unlock_bh(&np
->np_thread_lock
);
3168 mutex_lock(&isert_np
->np_accept_mutex
);
3169 if (list_empty(&isert_np
->np_accept_list
)) {
3170 mutex_unlock(&isert_np
->np_accept_mutex
);
3174 isert_conn
= list_first_entry(&isert_np
->np_accept_list
,
3175 struct isert_conn
, conn_accept_node
);
3176 list_del_init(&isert_conn
->conn_accept_node
);
3177 mutex_unlock(&isert_np
->np_accept_mutex
);
3179 conn
->context
= isert_conn
;
3180 isert_conn
->conn
= conn
;
3183 ret
= isert_rdma_post_recvl(isert_conn
);
3187 ret
= isert_rdma_accept(isert_conn
);
3191 isert_set_conn_info(np
, conn
, isert_conn
);
3193 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn
);
3198 isert_free_np(struct iscsi_np
*np
)
3200 struct isert_np
*isert_np
= (struct isert_np
*)np
->np_context
;
3202 rdma_destroy_id(isert_np
->np_cm_id
);
3204 np
->np_context
= NULL
;
3208 static void isert_wait_conn(struct iscsi_conn
*conn
)
3210 struct isert_conn
*isert_conn
= conn
->context
;
3212 pr_debug("isert_wait_conn: Starting \n");
3214 * Decrement post_send_buf_count for special case when called
3215 * from isert_do_control_comp() -> iscsit_logout_post_handler()
3217 mutex_lock(&isert_conn
->conn_mutex
);
3218 if (isert_conn
->logout_posted
)
3219 atomic_dec(&isert_conn
->post_send_buf_count
);
3221 if (isert_conn
->conn_cm_id
&& isert_conn
->state
!= ISER_CONN_DOWN
) {
3222 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
3223 rdma_disconnect(isert_conn
->conn_cm_id
);
3226 * Only wait for conn_wait_comp_err if the isert_conn made it
3227 * into full feature phase..
3229 if (isert_conn
->state
== ISER_CONN_INIT
) {
3230 mutex_unlock(&isert_conn
->conn_mutex
);
3233 if (isert_conn
->state
== ISER_CONN_UP
)
3234 isert_conn
->state
= ISER_CONN_TERMINATING
;
3235 mutex_unlock(&isert_conn
->conn_mutex
);
3237 wait_for_completion(&isert_conn
->conn_wait_comp_err
);
3239 wait_for_completion(&isert_conn
->conn_wait
);
3242 static void isert_free_conn(struct iscsi_conn
*conn
)
3244 struct isert_conn
*isert_conn
= conn
->context
;
3246 isert_put_conn(isert_conn
);
3249 static struct iscsit_transport iser_target_transport
= {
3251 .transport_type
= ISCSI_INFINIBAND
,
3252 .priv_size
= sizeof(struct isert_cmd
),
3253 .owner
= THIS_MODULE
,
3254 .iscsit_setup_np
= isert_setup_np
,
3255 .iscsit_accept_np
= isert_accept_np
,
3256 .iscsit_free_np
= isert_free_np
,
3257 .iscsit_wait_conn
= isert_wait_conn
,
3258 .iscsit_free_conn
= isert_free_conn
,
3259 .iscsit_get_login_rx
= isert_get_login_rx
,
3260 .iscsit_put_login_tx
= isert_put_login_tx
,
3261 .iscsit_immediate_queue
= isert_immediate_queue
,
3262 .iscsit_response_queue
= isert_response_queue
,
3263 .iscsit_get_dataout
= isert_get_dataout
,
3264 .iscsit_queue_data_in
= isert_put_datain
,
3265 .iscsit_queue_status
= isert_put_response
,
3266 .iscsit_aborted_task
= isert_aborted_task
,
3267 .iscsit_get_sup_prot_ops
= isert_get_sup_prot_ops
,
3270 static int __init
isert_init(void)
3274 isert_rx_wq
= alloc_workqueue("isert_rx_wq", 0, 0);
3276 pr_err("Unable to allocate isert_rx_wq\n");
3280 isert_comp_wq
= alloc_workqueue("isert_comp_wq", 0, 0);
3281 if (!isert_comp_wq
) {
3282 pr_err("Unable to allocate isert_comp_wq\n");
3287 iscsit_register_transport(&iser_target_transport
);
3288 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
3292 destroy_workqueue(isert_rx_wq
);
3296 static void __exit
isert_exit(void)
3298 destroy_workqueue(isert_comp_wq
);
3299 destroy_workqueue(isert_rx_wq
);
3300 iscsit_unregister_transport(&iser_target_transport
);
3301 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
3304 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3305 MODULE_VERSION("0.1");
3306 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3307 MODULE_LICENSE("GPL");
3309 module_init(isert_init
);
3310 module_exit(isert_exit
);