2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kfifo.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_host.h>
41 #include "iscsi_iser.h"
43 /* Register user buffer memory and initialize passive rdma
44 * dto descriptor. Data size is stored in
45 * task->data[ISER_DIR_IN].data_len, Protection size
46 * os stored in task->prot[ISER_DIR_IN].data_len
48 static int iser_prepare_read_cmd(struct iscsi_task
*task
)
51 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
52 struct iser_mem_reg
*mem_reg
;
54 struct iser_ctrl
*hdr
= &iser_task
->desc
.iser_header
;
55 struct iser_data_buf
*buf_in
= &iser_task
->data
[ISER_DIR_IN
];
57 err
= iser_dma_map_task_data(iser_task
,
64 if (scsi_prot_sg_count(iser_task
->sc
)) {
65 struct iser_data_buf
*pbuf_in
= &iser_task
->prot
[ISER_DIR_IN
];
67 err
= iser_dma_map_task_data(iser_task
,
75 err
= iser_reg_rdma_mem(iser_task
, ISER_DIR_IN
, false);
77 iser_err("Failed to set up Data-IN RDMA\n");
80 mem_reg
= &iser_task
->rdma_reg
[ISER_DIR_IN
];
82 hdr
->flags
|= ISER_RSV
;
83 hdr
->read_stag
= cpu_to_be32(mem_reg
->rkey
);
84 hdr
->read_va
= cpu_to_be64(mem_reg
->sge
.addr
);
86 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
87 task
->itt
, mem_reg
->rkey
,
88 (unsigned long long)mem_reg
->sge
.addr
);
93 /* Register user buffer memory and initialize passive rdma
94 * dto descriptor. Data size is stored in
95 * task->data[ISER_DIR_OUT].data_len, Protection size
96 * is stored at task->prot[ISER_DIR_OUT].data_len
99 iser_prepare_write_cmd(struct iscsi_task
*task
,
101 unsigned int unsol_sz
,
104 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
105 struct iser_mem_reg
*mem_reg
;
107 struct iser_ctrl
*hdr
= &iser_task
->desc
.iser_header
;
108 struct iser_data_buf
*buf_out
= &iser_task
->data
[ISER_DIR_OUT
];
109 struct ib_sge
*tx_dsg
= &iser_task
->desc
.tx_sg
[1];
111 err
= iser_dma_map_task_data(iser_task
,
118 if (scsi_prot_sg_count(iser_task
->sc
)) {
119 struct iser_data_buf
*pbuf_out
= &iser_task
->prot
[ISER_DIR_OUT
];
121 err
= iser_dma_map_task_data(iser_task
,
129 err
= iser_reg_rdma_mem(iser_task
, ISER_DIR_OUT
,
130 buf_out
->data_len
== imm_sz
);
132 iser_err("Failed to register write cmd RDMA mem\n");
136 mem_reg
= &iser_task
->rdma_reg
[ISER_DIR_OUT
];
138 if (unsol_sz
< edtl
) {
139 hdr
->flags
|= ISER_WSV
;
140 if (buf_out
->data_len
> imm_sz
) {
141 hdr
->write_stag
= cpu_to_be32(mem_reg
->rkey
);
142 hdr
->write_va
= cpu_to_be64(mem_reg
->sge
.addr
+ unsol_sz
);
145 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
146 task
->itt
, mem_reg
->rkey
,
147 (unsigned long long)mem_reg
->sge
.addr
, unsol_sz
);
151 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
153 tx_dsg
->addr
= mem_reg
->sge
.addr
;
154 tx_dsg
->length
= imm_sz
;
155 tx_dsg
->lkey
= mem_reg
->sge
.lkey
;
156 iser_task
->desc
.num_sge
= 2;
162 /* creates a new tx descriptor and adds header regd buffer */
163 static void iser_create_send_desc(struct iser_conn
*iser_conn
,
164 struct iser_tx_desc
*tx_desc
)
166 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
168 ib_dma_sync_single_for_cpu(device
->ib_device
,
169 tx_desc
->dma_addr
, ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
171 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_ctrl
));
172 tx_desc
->iser_header
.flags
= ISER_VER
;
173 tx_desc
->num_sge
= 1;
176 static void iser_free_login_buf(struct iser_conn
*iser_conn
)
178 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
179 struct iser_login_desc
*desc
= &iser_conn
->login_desc
;
184 ib_dma_unmap_single(device
->ib_device
, desc
->req_dma
,
185 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_TO_DEVICE
);
187 ib_dma_unmap_single(device
->ib_device
, desc
->rsp_dma
,
188 ISER_RX_LOGIN_SIZE
, DMA_FROM_DEVICE
);
193 /* make sure we never redo any unmapping */
198 static int iser_alloc_login_buf(struct iser_conn
*iser_conn
)
200 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
201 struct iser_login_desc
*desc
= &iser_conn
->login_desc
;
203 desc
->req
= kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
, GFP_KERNEL
);
207 desc
->req_dma
= ib_dma_map_single(device
->ib_device
, desc
->req
,
208 ISCSI_DEF_MAX_RECV_SEG_LEN
,
210 if (ib_dma_mapping_error(device
->ib_device
,
214 desc
->rsp
= kmalloc(ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
218 desc
->rsp_dma
= ib_dma_map_single(device
->ib_device
, desc
->rsp
,
221 if (ib_dma_mapping_error(device
->ib_device
,
230 ib_dma_unmap_single(device
->ib_device
, desc
->req_dma
,
231 ISCSI_DEF_MAX_RECV_SEG_LEN
,
239 int iser_alloc_rx_descriptors(struct iser_conn
*iser_conn
,
240 struct iscsi_session
*session
)
244 struct iser_rx_desc
*rx_desc
;
245 struct ib_sge
*rx_sg
;
246 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
247 struct iser_device
*device
= ib_conn
->device
;
249 iser_conn
->qp_max_recv_dtos
= session
->cmds_max
;
250 iser_conn
->qp_max_recv_dtos_mask
= session
->cmds_max
- 1; /* cmds_max is 2^N */
251 iser_conn
->min_posted_rx
= iser_conn
->qp_max_recv_dtos
>> 2;
253 if (device
->reg_ops
->alloc_reg_res(ib_conn
, session
->scsi_cmds_max
,
254 iser_conn
->pages_per_mr
))
255 goto create_rdma_reg_res_failed
;
257 if (iser_alloc_login_buf(iser_conn
))
258 goto alloc_login_buf_fail
;
260 iser_conn
->num_rx_descs
= session
->cmds_max
;
261 iser_conn
->rx_descs
= kmalloc_array(iser_conn
->num_rx_descs
,
262 sizeof(struct iser_rx_desc
),
264 if (!iser_conn
->rx_descs
)
265 goto rx_desc_alloc_fail
;
267 rx_desc
= iser_conn
->rx_descs
;
269 for (i
= 0; i
< iser_conn
->qp_max_recv_dtos
; i
++, rx_desc
++) {
270 dma_addr
= ib_dma_map_single(device
->ib_device
, (void *)rx_desc
,
271 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
272 if (ib_dma_mapping_error(device
->ib_device
, dma_addr
))
273 goto rx_desc_dma_map_failed
;
275 rx_desc
->dma_addr
= dma_addr
;
276 rx_desc
->cqe
.done
= iser_task_rsp
;
277 rx_sg
= &rx_desc
->rx_sg
;
278 rx_sg
->addr
= rx_desc
->dma_addr
;
279 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
280 rx_sg
->lkey
= device
->pd
->local_dma_lkey
;
283 iser_conn
->rx_desc_head
= 0;
286 rx_desc_dma_map_failed
:
287 rx_desc
= iser_conn
->rx_descs
;
288 for (j
= 0; j
< i
; j
++, rx_desc
++)
289 ib_dma_unmap_single(device
->ib_device
, rx_desc
->dma_addr
,
290 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
291 kfree(iser_conn
->rx_descs
);
292 iser_conn
->rx_descs
= NULL
;
294 iser_free_login_buf(iser_conn
);
295 alloc_login_buf_fail
:
296 device
->reg_ops
->free_reg_res(ib_conn
);
297 create_rdma_reg_res_failed
:
298 iser_err("failed allocating rx descriptors / data buffers\n");
302 void iser_free_rx_descriptors(struct iser_conn
*iser_conn
)
305 struct iser_rx_desc
*rx_desc
;
306 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
307 struct iser_device
*device
= ib_conn
->device
;
309 if (device
->reg_ops
->free_reg_res
)
310 device
->reg_ops
->free_reg_res(ib_conn
);
312 rx_desc
= iser_conn
->rx_descs
;
313 for (i
= 0; i
< iser_conn
->qp_max_recv_dtos
; i
++, rx_desc
++)
314 ib_dma_unmap_single(device
->ib_device
, rx_desc
->dma_addr
,
315 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
316 kfree(iser_conn
->rx_descs
);
317 /* make sure we never redo any unmapping */
318 iser_conn
->rx_descs
= NULL
;
320 iser_free_login_buf(iser_conn
);
323 static int iser_post_rx_bufs(struct iscsi_conn
*conn
, struct iscsi_hdr
*req
)
325 struct iser_conn
*iser_conn
= conn
->dd_data
;
326 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
327 struct iscsi_session
*session
= conn
->session
;
329 iser_dbg("req op %x flags %x\n", req
->opcode
, req
->flags
);
330 /* check if this is the last login - going to full feature phase */
331 if ((req
->flags
& ISCSI_FULL_FEATURE_PHASE
) != ISCSI_FULL_FEATURE_PHASE
)
335 * Check that there is one posted recv buffer
336 * (for the last login response).
338 WARN_ON(ib_conn
->post_recv_buf_count
!= 1);
340 if (session
->discovery_sess
) {
341 iser_info("Discovery session, re-using login RX buffer\n");
344 iser_info("Normal session, posting batch of RX %d buffers\n",
345 iser_conn
->min_posted_rx
);
347 /* Initial post receive buffers */
348 if (iser_post_recvm(iser_conn
, iser_conn
->min_posted_rx
))
354 static inline bool iser_signal_comp(u8 sig_count
)
356 return ((sig_count
% ISER_SIGNAL_CMD_COUNT
) == 0);
360 * iser_send_command - send command PDU
361 * @conn: link to matching iscsi connection
362 * @task: SCSI command task
364 int iser_send_command(struct iscsi_conn
*conn
,
365 struct iscsi_task
*task
)
367 struct iser_conn
*iser_conn
= conn
->dd_data
;
368 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
371 struct iser_data_buf
*data_buf
, *prot_buf
;
372 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)task
->hdr
;
373 struct scsi_cmnd
*sc
= task
->sc
;
374 struct iser_tx_desc
*tx_desc
= &iser_task
->desc
;
375 u8 sig_count
= ++iser_conn
->ib_conn
.sig_count
;
377 edtl
= ntohl(hdr
->data_length
);
379 /* build the tx desc regd header and add it to the tx desc dto */
380 tx_desc
->type
= ISCSI_TX_SCSI_COMMAND
;
381 tx_desc
->cqe
.done
= iser_cmd_comp
;
382 iser_create_send_desc(iser_conn
, tx_desc
);
384 if (hdr
->flags
& ISCSI_FLAG_CMD_READ
) {
385 data_buf
= &iser_task
->data
[ISER_DIR_IN
];
386 prot_buf
= &iser_task
->prot
[ISER_DIR_IN
];
388 data_buf
= &iser_task
->data
[ISER_DIR_OUT
];
389 prot_buf
= &iser_task
->prot
[ISER_DIR_OUT
];
392 if (scsi_sg_count(sc
)) { /* using a scatter list */
393 data_buf
->sg
= scsi_sglist(sc
);
394 data_buf
->size
= scsi_sg_count(sc
);
396 data_buf
->data_len
= scsi_bufflen(sc
);
398 if (scsi_prot_sg_count(sc
)) {
399 prot_buf
->sg
= scsi_prot_sglist(sc
);
400 prot_buf
->size
= scsi_prot_sg_count(sc
);
401 prot_buf
->data_len
= (data_buf
->data_len
>>
402 ilog2(sc
->device
->sector_size
)) * 8;
405 if (hdr
->flags
& ISCSI_FLAG_CMD_READ
) {
406 err
= iser_prepare_read_cmd(task
);
408 goto send_command_error
;
410 if (hdr
->flags
& ISCSI_FLAG_CMD_WRITE
) {
411 err
= iser_prepare_write_cmd(task
,
414 task
->unsol_r2t
.data_length
,
417 goto send_command_error
;
420 iser_task
->status
= ISER_TASK_STATUS_STARTED
;
422 err
= iser_post_send(&iser_conn
->ib_conn
, tx_desc
,
423 iser_signal_comp(sig_count
));
428 iser_err("conn %p failed task->itt %d err %d\n",conn
, task
->itt
, err
);
433 * iser_send_data_out - send data out PDU
434 * @conn: link to matching iscsi connection
435 * @task: SCSI command task
436 * @hdr: pointer to the LLD's iSCSI message header
438 int iser_send_data_out(struct iscsi_conn
*conn
,
439 struct iscsi_task
*task
,
440 struct iscsi_data
*hdr
)
442 struct iser_conn
*iser_conn
= conn
->dd_data
;
443 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
444 struct iser_tx_desc
*tx_desc
;
445 struct iser_mem_reg
*mem_reg
;
446 unsigned long buf_offset
;
447 unsigned long data_seg_len
;
450 struct ib_sge
*tx_dsg
;
452 itt
= (__force
uint32_t)hdr
->itt
;
453 data_seg_len
= ntoh24(hdr
->dlength
);
454 buf_offset
= ntohl(hdr
->offset
);
456 iser_dbg("%s itt %d dseg_len %d offset %d\n",
457 __func__
,(int)itt
,(int)data_seg_len
,(int)buf_offset
);
459 tx_desc
= kmem_cache_zalloc(ig
.desc_cache
, GFP_ATOMIC
);
463 tx_desc
->type
= ISCSI_TX_DATAOUT
;
464 tx_desc
->cqe
.done
= iser_dataout_comp
;
465 tx_desc
->iser_header
.flags
= ISER_VER
;
466 memcpy(&tx_desc
->iscsi_header
, hdr
, sizeof(struct iscsi_hdr
));
468 /* build the tx desc */
469 err
= iser_initialize_task_headers(task
, tx_desc
);
471 goto send_data_out_error
;
473 mem_reg
= &iser_task
->rdma_reg
[ISER_DIR_OUT
];
474 tx_dsg
= &tx_desc
->tx_sg
[1];
475 tx_dsg
->addr
= mem_reg
->sge
.addr
+ buf_offset
;
476 tx_dsg
->length
= data_seg_len
;
477 tx_dsg
->lkey
= mem_reg
->sge
.lkey
;
478 tx_desc
->num_sge
= 2;
480 if (buf_offset
+ data_seg_len
> iser_task
->data
[ISER_DIR_OUT
].data_len
) {
481 iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
482 buf_offset
, data_seg_len
,
483 iser_task
->data
[ISER_DIR_OUT
].data_len
, itt
);
485 goto send_data_out_error
;
487 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
488 itt
, buf_offset
, data_seg_len
);
491 err
= iser_post_send(&iser_conn
->ib_conn
, tx_desc
, true);
496 kmem_cache_free(ig
.desc_cache
, tx_desc
);
497 iser_err("conn %p failed err %d\n", conn
, err
);
501 int iser_send_control(struct iscsi_conn
*conn
,
502 struct iscsi_task
*task
)
504 struct iser_conn
*iser_conn
= conn
->dd_data
;
505 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
506 struct iser_tx_desc
*mdesc
= &iser_task
->desc
;
507 unsigned long data_seg_len
;
509 struct iser_device
*device
;
511 /* build the tx desc regd header and add it to the tx desc dto */
512 mdesc
->type
= ISCSI_TX_CONTROL
;
513 mdesc
->cqe
.done
= iser_ctrl_comp
;
514 iser_create_send_desc(iser_conn
, mdesc
);
516 device
= iser_conn
->ib_conn
.device
;
518 data_seg_len
= ntoh24(task
->hdr
->dlength
);
520 if (data_seg_len
> 0) {
521 struct iser_login_desc
*desc
= &iser_conn
->login_desc
;
522 struct ib_sge
*tx_dsg
= &mdesc
->tx_sg
[1];
524 if (task
!= conn
->login_task
) {
525 iser_err("data present on non login task!!!\n");
526 goto send_control_error
;
529 ib_dma_sync_single_for_cpu(device
->ib_device
, desc
->req_dma
,
530 task
->data_count
, DMA_TO_DEVICE
);
532 memcpy(desc
->req
, task
->data
, task
->data_count
);
534 ib_dma_sync_single_for_device(device
->ib_device
, desc
->req_dma
,
535 task
->data_count
, DMA_TO_DEVICE
);
537 tx_dsg
->addr
= desc
->req_dma
;
538 tx_dsg
->length
= task
->data_count
;
539 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
543 if (task
== conn
->login_task
) {
544 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
545 task
->hdr
->opcode
, data_seg_len
);
546 err
= iser_post_recvl(iser_conn
);
548 goto send_control_error
;
549 err
= iser_post_rx_bufs(conn
, task
->hdr
);
551 goto send_control_error
;
554 err
= iser_post_send(&iser_conn
->ib_conn
, mdesc
, true);
559 iser_err("conn %p failed err %d\n",conn
, err
);
563 void iser_login_rsp(struct ib_cq
*cq
, struct ib_wc
*wc
)
565 struct ib_conn
*ib_conn
= wc
->qp
->qp_context
;
566 struct iser_conn
*iser_conn
= to_iser_conn(ib_conn
);
567 struct iser_login_desc
*desc
= iser_login(wc
->wr_cqe
);
568 struct iscsi_hdr
*hdr
;
572 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
573 iser_err_comp(wc
, "login_rsp");
577 ib_dma_sync_single_for_cpu(ib_conn
->device
->ib_device
,
578 desc
->rsp_dma
, ISER_RX_LOGIN_SIZE
,
581 hdr
= desc
->rsp
+ sizeof(struct iser_ctrl
);
582 data
= desc
->rsp
+ ISER_HEADERS_LEN
;
583 length
= wc
->byte_len
- ISER_HEADERS_LEN
;
585 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr
->opcode
,
588 iscsi_iser_recv(iser_conn
->iscsi_conn
, hdr
, data
, length
);
590 ib_dma_sync_single_for_device(ib_conn
->device
->ib_device
,
591 desc
->rsp_dma
, ISER_RX_LOGIN_SIZE
,
594 ib_conn
->post_recv_buf_count
--;
598 iser_inv_desc(struct iser_fr_desc
*desc
, u32 rkey
)
600 if (unlikely((!desc
->sig_protected
&& rkey
!= desc
->rsc
.mr
->rkey
) ||
601 (desc
->sig_protected
&& rkey
!= desc
->rsc
.sig_mr
->rkey
))) {
602 iser_err("Bogus remote invalidation for rkey %#x\n", rkey
);
606 desc
->rsc
.mr_valid
= 0;
612 iser_check_remote_inv(struct iser_conn
*iser_conn
,
614 struct iscsi_hdr
*hdr
)
616 if (wc
->wc_flags
& IB_WC_WITH_INVALIDATE
) {
617 struct iscsi_task
*task
;
618 u32 rkey
= wc
->ex
.invalidate_rkey
;
620 iser_dbg("conn %p: remote invalidation for rkey %#x\n",
623 if (unlikely(!iser_conn
->snd_w_inv
)) {
624 iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
629 task
= iscsi_itt_to_ctask(iser_conn
->iscsi_conn
, hdr
->itt
);
631 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
632 struct iser_fr_desc
*desc
;
634 if (iser_task
->dir
[ISER_DIR_IN
]) {
635 desc
= iser_task
->rdma_reg
[ISER_DIR_IN
].mem_h
;
636 if (unlikely(iser_inv_desc(desc
, rkey
)))
640 if (iser_task
->dir
[ISER_DIR_OUT
]) {
641 desc
= iser_task
->rdma_reg
[ISER_DIR_OUT
].mem_h
;
642 if (unlikely(iser_inv_desc(desc
, rkey
)))
646 iser_err("failed to get task for itt=%d\n", hdr
->itt
);
655 void iser_task_rsp(struct ib_cq
*cq
, struct ib_wc
*wc
)
657 struct ib_conn
*ib_conn
= wc
->qp
->qp_context
;
658 struct iser_conn
*iser_conn
= to_iser_conn(ib_conn
);
659 struct iser_rx_desc
*desc
= iser_rx(wc
->wr_cqe
);
660 struct iscsi_hdr
*hdr
;
662 int outstanding
, count
, err
;
664 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
665 iser_err_comp(wc
, "task_rsp");
669 ib_dma_sync_single_for_cpu(ib_conn
->device
->ib_device
,
670 desc
->dma_addr
, ISER_RX_PAYLOAD_SIZE
,
673 hdr
= &desc
->iscsi_header
;
674 length
= wc
->byte_len
- ISER_HEADERS_LEN
;
676 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr
->opcode
,
679 if (iser_check_remote_inv(iser_conn
, wc
, hdr
)) {
680 iscsi_conn_failure(iser_conn
->iscsi_conn
,
681 ISCSI_ERR_CONN_FAILED
);
685 iscsi_iser_recv(iser_conn
->iscsi_conn
, hdr
, desc
->data
, length
);
687 ib_dma_sync_single_for_device(ib_conn
->device
->ib_device
,
688 desc
->dma_addr
, ISER_RX_PAYLOAD_SIZE
,
691 /* decrementing conn->post_recv_buf_count only --after-- freeing the *
692 * task eliminates the need to worry on tasks which are completed in *
693 * parallel to the execution of iser_conn_term. So the code that waits *
694 * for the posted rx bufs refcount to become zero handles everything */
695 ib_conn
->post_recv_buf_count
--;
697 outstanding
= ib_conn
->post_recv_buf_count
;
698 if (outstanding
+ iser_conn
->min_posted_rx
<= iser_conn
->qp_max_recv_dtos
) {
699 count
= min(iser_conn
->qp_max_recv_dtos
- outstanding
,
700 iser_conn
->min_posted_rx
);
701 err
= iser_post_recvm(iser_conn
, count
);
703 iser_err("posting %d rx bufs err %d\n", count
, err
);
707 void iser_cmd_comp(struct ib_cq
*cq
, struct ib_wc
*wc
)
709 if (unlikely(wc
->status
!= IB_WC_SUCCESS
))
710 iser_err_comp(wc
, "command");
713 void iser_ctrl_comp(struct ib_cq
*cq
, struct ib_wc
*wc
)
715 struct iser_tx_desc
*desc
= iser_tx(wc
->wr_cqe
);
716 struct iscsi_task
*task
;
718 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
719 iser_err_comp(wc
, "control");
723 /* this arithmetic is legal by libiscsi dd_data allocation */
724 task
= (void *)desc
- sizeof(struct iscsi_task
);
725 if (task
->hdr
->itt
== RESERVED_ITT
)
726 iscsi_put_task(task
);
729 void iser_dataout_comp(struct ib_cq
*cq
, struct ib_wc
*wc
)
731 struct iser_tx_desc
*desc
= iser_tx(wc
->wr_cqe
);
732 struct ib_conn
*ib_conn
= wc
->qp
->qp_context
;
733 struct iser_device
*device
= ib_conn
->device
;
735 if (unlikely(wc
->status
!= IB_WC_SUCCESS
))
736 iser_err_comp(wc
, "dataout");
738 ib_dma_unmap_single(device
->ib_device
, desc
->dma_addr
,
739 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
740 kmem_cache_free(ig
.desc_cache
, desc
);
743 void iser_task_rdma_init(struct iscsi_iser_task
*iser_task
)
746 iser_task
->status
= ISER_TASK_STATUS_INIT
;
748 iser_task
->dir
[ISER_DIR_IN
] = 0;
749 iser_task
->dir
[ISER_DIR_OUT
] = 0;
751 iser_task
->data
[ISER_DIR_IN
].data_len
= 0;
752 iser_task
->data
[ISER_DIR_OUT
].data_len
= 0;
754 iser_task
->prot
[ISER_DIR_IN
].data_len
= 0;
755 iser_task
->prot
[ISER_DIR_OUT
].data_len
= 0;
757 iser_task
->prot
[ISER_DIR_IN
].dma_nents
= 0;
758 iser_task
->prot
[ISER_DIR_OUT
].dma_nents
= 0;
760 memset(&iser_task
->rdma_reg
[ISER_DIR_IN
], 0,
761 sizeof(struct iser_mem_reg
));
762 memset(&iser_task
->rdma_reg
[ISER_DIR_OUT
], 0,
763 sizeof(struct iser_mem_reg
));
766 void iser_task_rdma_finalize(struct iscsi_iser_task
*iser_task
)
768 int prot_count
= scsi_prot_sg_count(iser_task
->sc
);
770 if (iser_task
->dir
[ISER_DIR_IN
]) {
771 iser_unreg_rdma_mem(iser_task
, ISER_DIR_IN
);
772 iser_dma_unmap_task_data(iser_task
,
773 &iser_task
->data
[ISER_DIR_IN
],
776 iser_dma_unmap_task_data(iser_task
,
777 &iser_task
->prot
[ISER_DIR_IN
],
781 if (iser_task
->dir
[ISER_DIR_OUT
]) {
782 iser_unreg_rdma_mem(iser_task
, ISER_DIR_OUT
);
783 iser_dma_unmap_task_data(iser_task
,
784 &iser_task
->data
[ISER_DIR_OUT
],
787 iser_dma_unmap_task_data(iser_task
,
788 &iser_task
->prot
[ISER_DIR_OUT
],