2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kfifo.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_host.h>
41 #include "iscsi_iser.h"
43 /* Register user buffer memory and initialize passive rdma
44 * dto descriptor. Data size is stored in
45 * task->data[ISER_DIR_IN].data_len, Protection size
46 * os stored in task->prot[ISER_DIR_IN].data_len
48 static int iser_prepare_read_cmd(struct iscsi_task
*task
)
51 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
52 struct iser_mem_reg
*mem_reg
;
54 struct iser_ctrl
*hdr
= &iser_task
->desc
.iser_header
;
55 struct iser_data_buf
*buf_in
= &iser_task
->data
[ISER_DIR_IN
];
57 err
= iser_dma_map_task_data(iser_task
,
64 if (scsi_prot_sg_count(iser_task
->sc
)) {
65 struct iser_data_buf
*pbuf_in
= &iser_task
->prot
[ISER_DIR_IN
];
67 err
= iser_dma_map_task_data(iser_task
,
75 err
= iser_reg_rdma_mem(iser_task
, ISER_DIR_IN
, false);
77 iser_err("Failed to set up Data-IN RDMA\n");
80 mem_reg
= &iser_task
->rdma_reg
[ISER_DIR_IN
];
82 hdr
->flags
|= ISER_RSV
;
83 hdr
->read_stag
= cpu_to_be32(mem_reg
->rkey
);
84 hdr
->read_va
= cpu_to_be64(mem_reg
->sge
.addr
);
86 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
87 task
->itt
, mem_reg
->rkey
,
88 (unsigned long long)mem_reg
->sge
.addr
);
93 /* Register user buffer memory and initialize passive rdma
94 * dto descriptor. Data size is stored in
95 * task->data[ISER_DIR_OUT].data_len, Protection size
96 * is stored at task->prot[ISER_DIR_OUT].data_len
99 iser_prepare_write_cmd(struct iscsi_task
*task
,
101 unsigned int unsol_sz
,
104 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
105 struct iser_mem_reg
*mem_reg
;
107 struct iser_ctrl
*hdr
= &iser_task
->desc
.iser_header
;
108 struct iser_data_buf
*buf_out
= &iser_task
->data
[ISER_DIR_OUT
];
109 struct ib_sge
*tx_dsg
= &iser_task
->desc
.tx_sg
[1];
111 err
= iser_dma_map_task_data(iser_task
,
118 if (scsi_prot_sg_count(iser_task
->sc
)) {
119 struct iser_data_buf
*pbuf_out
= &iser_task
->prot
[ISER_DIR_OUT
];
121 err
= iser_dma_map_task_data(iser_task
,
129 err
= iser_reg_rdma_mem(iser_task
, ISER_DIR_OUT
,
130 buf_out
->data_len
== imm_sz
);
132 iser_err("Failed to register write cmd RDMA mem\n");
136 mem_reg
= &iser_task
->rdma_reg
[ISER_DIR_OUT
];
138 if (unsol_sz
< edtl
) {
139 hdr
->flags
|= ISER_WSV
;
140 hdr
->write_stag
= cpu_to_be32(mem_reg
->rkey
);
141 hdr
->write_va
= cpu_to_be64(mem_reg
->sge
.addr
+ unsol_sz
);
143 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
144 "VA:%#llX + unsol:%d\n",
145 task
->itt
, mem_reg
->rkey
,
146 (unsigned long long)mem_reg
->sge
.addr
, unsol_sz
);
150 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
152 tx_dsg
->addr
= mem_reg
->sge
.addr
;
153 tx_dsg
->length
= imm_sz
;
154 tx_dsg
->lkey
= mem_reg
->sge
.lkey
;
155 iser_task
->desc
.num_sge
= 2;
161 /* creates a new tx descriptor and adds header regd buffer */
162 static void iser_create_send_desc(struct iser_conn
*iser_conn
,
163 struct iser_tx_desc
*tx_desc
)
165 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
167 ib_dma_sync_single_for_cpu(device
->ib_device
,
168 tx_desc
->dma_addr
, ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
170 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_ctrl
));
171 tx_desc
->iser_header
.flags
= ISER_VER
;
172 tx_desc
->num_sge
= 1;
175 static void iser_free_login_buf(struct iser_conn
*iser_conn
)
177 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
178 struct iser_login_desc
*desc
= &iser_conn
->login_desc
;
183 ib_dma_unmap_single(device
->ib_device
, desc
->req_dma
,
184 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_TO_DEVICE
);
186 ib_dma_unmap_single(device
->ib_device
, desc
->rsp_dma
,
187 ISER_RX_LOGIN_SIZE
, DMA_FROM_DEVICE
);
192 /* make sure we never redo any unmapping */
197 static int iser_alloc_login_buf(struct iser_conn
*iser_conn
)
199 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
200 struct iser_login_desc
*desc
= &iser_conn
->login_desc
;
202 desc
->req
= kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
, GFP_KERNEL
);
206 desc
->req_dma
= ib_dma_map_single(device
->ib_device
, desc
->req
,
207 ISCSI_DEF_MAX_RECV_SEG_LEN
,
209 if (ib_dma_mapping_error(device
->ib_device
,
213 desc
->rsp
= kmalloc(ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
217 desc
->rsp_dma
= ib_dma_map_single(device
->ib_device
, desc
->rsp
,
220 if (ib_dma_mapping_error(device
->ib_device
,
229 ib_dma_unmap_single(device
->ib_device
, desc
->req_dma
,
230 ISCSI_DEF_MAX_RECV_SEG_LEN
,
238 int iser_alloc_rx_descriptors(struct iser_conn
*iser_conn
,
239 struct iscsi_session
*session
)
243 struct iser_rx_desc
*rx_desc
;
244 struct ib_sge
*rx_sg
;
245 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
246 struct iser_device
*device
= ib_conn
->device
;
248 iser_conn
->qp_max_recv_dtos
= session
->cmds_max
;
249 iser_conn
->qp_max_recv_dtos_mask
= session
->cmds_max
- 1; /* cmds_max is 2^N */
250 iser_conn
->min_posted_rx
= iser_conn
->qp_max_recv_dtos
>> 2;
252 if (device
->reg_ops
->alloc_reg_res(ib_conn
, session
->scsi_cmds_max
,
253 iser_conn
->scsi_sg_tablesize
))
254 goto create_rdma_reg_res_failed
;
256 if (iser_alloc_login_buf(iser_conn
))
257 goto alloc_login_buf_fail
;
259 iser_conn
->num_rx_descs
= session
->cmds_max
;
260 iser_conn
->rx_descs
= kmalloc(iser_conn
->num_rx_descs
*
261 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
262 if (!iser_conn
->rx_descs
)
263 goto rx_desc_alloc_fail
;
265 rx_desc
= iser_conn
->rx_descs
;
267 for (i
= 0; i
< iser_conn
->qp_max_recv_dtos
; i
++, rx_desc
++) {
268 dma_addr
= ib_dma_map_single(device
->ib_device
, (void *)rx_desc
,
269 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
270 if (ib_dma_mapping_error(device
->ib_device
, dma_addr
))
271 goto rx_desc_dma_map_failed
;
273 rx_desc
->dma_addr
= dma_addr
;
274 rx_desc
->cqe
.done
= iser_task_rsp
;
275 rx_sg
= &rx_desc
->rx_sg
;
276 rx_sg
->addr
= rx_desc
->dma_addr
;
277 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
278 rx_sg
->lkey
= device
->pd
->local_dma_lkey
;
281 iser_conn
->rx_desc_head
= 0;
284 rx_desc_dma_map_failed
:
285 rx_desc
= iser_conn
->rx_descs
;
286 for (j
= 0; j
< i
; j
++, rx_desc
++)
287 ib_dma_unmap_single(device
->ib_device
, rx_desc
->dma_addr
,
288 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
289 kfree(iser_conn
->rx_descs
);
290 iser_conn
->rx_descs
= NULL
;
292 iser_free_login_buf(iser_conn
);
293 alloc_login_buf_fail
:
294 device
->reg_ops
->free_reg_res(ib_conn
);
295 create_rdma_reg_res_failed
:
296 iser_err("failed allocating rx descriptors / data buffers\n");
300 void iser_free_rx_descriptors(struct iser_conn
*iser_conn
)
303 struct iser_rx_desc
*rx_desc
;
304 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
305 struct iser_device
*device
= ib_conn
->device
;
307 if (device
->reg_ops
->free_reg_res
)
308 device
->reg_ops
->free_reg_res(ib_conn
);
310 rx_desc
= iser_conn
->rx_descs
;
311 for (i
= 0; i
< iser_conn
->qp_max_recv_dtos
; i
++, rx_desc
++)
312 ib_dma_unmap_single(device
->ib_device
, rx_desc
->dma_addr
,
313 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
314 kfree(iser_conn
->rx_descs
);
315 /* make sure we never redo any unmapping */
316 iser_conn
->rx_descs
= NULL
;
318 iser_free_login_buf(iser_conn
);
321 static int iser_post_rx_bufs(struct iscsi_conn
*conn
, struct iscsi_hdr
*req
)
323 struct iser_conn
*iser_conn
= conn
->dd_data
;
324 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
325 struct iscsi_session
*session
= conn
->session
;
327 iser_dbg("req op %x flags %x\n", req
->opcode
, req
->flags
);
328 /* check if this is the last login - going to full feature phase */
329 if ((req
->flags
& ISCSI_FULL_FEATURE_PHASE
) != ISCSI_FULL_FEATURE_PHASE
)
333 * Check that there is one posted recv buffer
334 * (for the last login response).
336 WARN_ON(ib_conn
->post_recv_buf_count
!= 1);
338 if (session
->discovery_sess
) {
339 iser_info("Discovery session, re-using login RX buffer\n");
342 iser_info("Normal session, posting batch of RX %d buffers\n",
343 iser_conn
->min_posted_rx
);
345 /* Initial post receive buffers */
346 if (iser_post_recvm(iser_conn
, iser_conn
->min_posted_rx
))
352 static inline bool iser_signal_comp(u8 sig_count
)
354 return ((sig_count
% ISER_SIGNAL_CMD_COUNT
) == 0);
358 * iser_send_command - send command PDU
360 int iser_send_command(struct iscsi_conn
*conn
,
361 struct iscsi_task
*task
)
363 struct iser_conn
*iser_conn
= conn
->dd_data
;
364 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
367 struct iser_data_buf
*data_buf
, *prot_buf
;
368 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)task
->hdr
;
369 struct scsi_cmnd
*sc
= task
->sc
;
370 struct iser_tx_desc
*tx_desc
= &iser_task
->desc
;
371 u8 sig_count
= ++iser_conn
->ib_conn
.sig_count
;
373 edtl
= ntohl(hdr
->data_length
);
375 /* build the tx desc regd header and add it to the tx desc dto */
376 tx_desc
->type
= ISCSI_TX_SCSI_COMMAND
;
377 tx_desc
->cqe
.done
= iser_cmd_comp
;
378 iser_create_send_desc(iser_conn
, tx_desc
);
380 if (hdr
->flags
& ISCSI_FLAG_CMD_READ
) {
381 data_buf
= &iser_task
->data
[ISER_DIR_IN
];
382 prot_buf
= &iser_task
->prot
[ISER_DIR_IN
];
384 data_buf
= &iser_task
->data
[ISER_DIR_OUT
];
385 prot_buf
= &iser_task
->prot
[ISER_DIR_OUT
];
388 if (scsi_sg_count(sc
)) { /* using a scatter list */
389 data_buf
->sg
= scsi_sglist(sc
);
390 data_buf
->size
= scsi_sg_count(sc
);
392 data_buf
->data_len
= scsi_bufflen(sc
);
394 if (scsi_prot_sg_count(sc
)) {
395 prot_buf
->sg
= scsi_prot_sglist(sc
);
396 prot_buf
->size
= scsi_prot_sg_count(sc
);
397 prot_buf
->data_len
= (data_buf
->data_len
>>
398 ilog2(sc
->device
->sector_size
)) * 8;
401 if (hdr
->flags
& ISCSI_FLAG_CMD_READ
) {
402 err
= iser_prepare_read_cmd(task
);
404 goto send_command_error
;
406 if (hdr
->flags
& ISCSI_FLAG_CMD_WRITE
) {
407 err
= iser_prepare_write_cmd(task
,
410 task
->unsol_r2t
.data_length
,
413 goto send_command_error
;
416 iser_task
->status
= ISER_TASK_STATUS_STARTED
;
418 err
= iser_post_send(&iser_conn
->ib_conn
, tx_desc
,
419 iser_signal_comp(sig_count
));
424 iser_err("conn %p failed task->itt %d err %d\n",conn
, task
->itt
, err
);
429 * iser_send_data_out - send data out PDU
431 int iser_send_data_out(struct iscsi_conn
*conn
,
432 struct iscsi_task
*task
,
433 struct iscsi_data
*hdr
)
435 struct iser_conn
*iser_conn
= conn
->dd_data
;
436 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
437 struct iser_tx_desc
*tx_desc
= NULL
;
438 struct iser_mem_reg
*mem_reg
;
439 unsigned long buf_offset
;
440 unsigned long data_seg_len
;
443 struct ib_sge
*tx_dsg
;
445 itt
= (__force
uint32_t)hdr
->itt
;
446 data_seg_len
= ntoh24(hdr
->dlength
);
447 buf_offset
= ntohl(hdr
->offset
);
449 iser_dbg("%s itt %d dseg_len %d offset %d\n",
450 __func__
,(int)itt
,(int)data_seg_len
,(int)buf_offset
);
452 tx_desc
= kmem_cache_zalloc(ig
.desc_cache
, GFP_ATOMIC
);
453 if (tx_desc
== NULL
) {
454 iser_err("Failed to alloc desc for post dataout\n");
458 tx_desc
->type
= ISCSI_TX_DATAOUT
;
459 tx_desc
->cqe
.done
= iser_dataout_comp
;
460 tx_desc
->iser_header
.flags
= ISER_VER
;
461 memcpy(&tx_desc
->iscsi_header
, hdr
, sizeof(struct iscsi_hdr
));
463 /* build the tx desc */
464 err
= iser_initialize_task_headers(task
, tx_desc
);
466 goto send_data_out_error
;
468 mem_reg
= &iser_task
->rdma_reg
[ISER_DIR_OUT
];
469 tx_dsg
= &tx_desc
->tx_sg
[1];
470 tx_dsg
->addr
= mem_reg
->sge
.addr
+ buf_offset
;
471 tx_dsg
->length
= data_seg_len
;
472 tx_dsg
->lkey
= mem_reg
->sge
.lkey
;
473 tx_desc
->num_sge
= 2;
475 if (buf_offset
+ data_seg_len
> iser_task
->data
[ISER_DIR_OUT
].data_len
) {
476 iser_err("Offset:%ld & DSL:%ld in Data-Out "
477 "inconsistent with total len:%ld, itt:%d\n",
478 buf_offset
, data_seg_len
,
479 iser_task
->data
[ISER_DIR_OUT
].data_len
, itt
);
481 goto send_data_out_error
;
483 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
484 itt
, buf_offset
, data_seg_len
);
487 err
= iser_post_send(&iser_conn
->ib_conn
, tx_desc
, true);
492 kmem_cache_free(ig
.desc_cache
, tx_desc
);
493 iser_err("conn %p failed err %d\n", conn
, err
);
497 int iser_send_control(struct iscsi_conn
*conn
,
498 struct iscsi_task
*task
)
500 struct iser_conn
*iser_conn
= conn
->dd_data
;
501 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
502 struct iser_tx_desc
*mdesc
= &iser_task
->desc
;
503 unsigned long data_seg_len
;
505 struct iser_device
*device
;
507 /* build the tx desc regd header and add it to the tx desc dto */
508 mdesc
->type
= ISCSI_TX_CONTROL
;
509 mdesc
->cqe
.done
= iser_ctrl_comp
;
510 iser_create_send_desc(iser_conn
, mdesc
);
512 device
= iser_conn
->ib_conn
.device
;
514 data_seg_len
= ntoh24(task
->hdr
->dlength
);
516 if (data_seg_len
> 0) {
517 struct iser_login_desc
*desc
= &iser_conn
->login_desc
;
518 struct ib_sge
*tx_dsg
= &mdesc
->tx_sg
[1];
520 if (task
!= conn
->login_task
) {
521 iser_err("data present on non login task!!!\n");
522 goto send_control_error
;
525 ib_dma_sync_single_for_cpu(device
->ib_device
, desc
->req_dma
,
526 task
->data_count
, DMA_TO_DEVICE
);
528 memcpy(desc
->req
, task
->data
, task
->data_count
);
530 ib_dma_sync_single_for_device(device
->ib_device
, desc
->req_dma
,
531 task
->data_count
, DMA_TO_DEVICE
);
533 tx_dsg
->addr
= desc
->req_dma
;
534 tx_dsg
->length
= task
->data_count
;
535 tx_dsg
->lkey
= device
->pd
->local_dma_lkey
;
539 if (task
== conn
->login_task
) {
540 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
541 task
->hdr
->opcode
, data_seg_len
);
542 err
= iser_post_recvl(iser_conn
);
544 goto send_control_error
;
545 err
= iser_post_rx_bufs(conn
, task
->hdr
);
547 goto send_control_error
;
550 err
= iser_post_send(&iser_conn
->ib_conn
, mdesc
, true);
555 iser_err("conn %p failed err %d\n",conn
, err
);
559 void iser_login_rsp(struct ib_cq
*cq
, struct ib_wc
*wc
)
561 struct ib_conn
*ib_conn
= wc
->qp
->qp_context
;
562 struct iser_conn
*iser_conn
= to_iser_conn(ib_conn
);
563 struct iser_login_desc
*desc
= iser_login(wc
->wr_cqe
);
564 struct iscsi_hdr
*hdr
;
568 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
569 iser_err_comp(wc
, "login_rsp");
573 ib_dma_sync_single_for_cpu(ib_conn
->device
->ib_device
,
574 desc
->rsp_dma
, ISER_RX_LOGIN_SIZE
,
577 hdr
= desc
->rsp
+ sizeof(struct iser_ctrl
);
578 data
= desc
->rsp
+ ISER_HEADERS_LEN
;
579 length
= wc
->byte_len
- ISER_HEADERS_LEN
;
581 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr
->opcode
,
584 iscsi_iser_recv(iser_conn
->iscsi_conn
, hdr
, data
, length
);
586 ib_dma_sync_single_for_device(ib_conn
->device
->ib_device
,
587 desc
->rsp_dma
, ISER_RX_LOGIN_SIZE
,
590 ib_conn
->post_recv_buf_count
--;
594 iser_inv_desc(struct iser_fr_desc
*desc
, u32 rkey
)
596 if (likely(rkey
== desc
->rsc
.mr
->rkey
))
597 desc
->rsc
.mr_valid
= 0;
598 else if (likely(rkey
== desc
->pi_ctx
->sig_mr
->rkey
))
599 desc
->pi_ctx
->sig_mr_valid
= 0;
603 iser_check_remote_inv(struct iser_conn
*iser_conn
,
605 struct iscsi_hdr
*hdr
)
607 if (wc
->wc_flags
& IB_WC_WITH_INVALIDATE
) {
608 struct iscsi_task
*task
;
609 u32 rkey
= wc
->ex
.invalidate_rkey
;
611 iser_dbg("conn %p: remote invalidation for rkey %#x\n",
614 if (unlikely(!iser_conn
->snd_w_inv
)) {
615 iser_err("conn %p: unexepected remote invalidation, "
616 "terminating connection\n", iser_conn
);
620 task
= iscsi_itt_to_ctask(iser_conn
->iscsi_conn
, hdr
->itt
);
622 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
623 struct iser_fr_desc
*desc
;
625 if (iser_task
->dir
[ISER_DIR_IN
]) {
626 desc
= iser_task
->rdma_reg
[ISER_DIR_IN
].mem_h
;
627 iser_inv_desc(desc
, rkey
);
630 if (iser_task
->dir
[ISER_DIR_OUT
]) {
631 desc
= iser_task
->rdma_reg
[ISER_DIR_OUT
].mem_h
;
632 iser_inv_desc(desc
, rkey
);
635 iser_err("failed to get task for itt=%d\n", hdr
->itt
);
644 void iser_task_rsp(struct ib_cq
*cq
, struct ib_wc
*wc
)
646 struct ib_conn
*ib_conn
= wc
->qp
->qp_context
;
647 struct iser_conn
*iser_conn
= to_iser_conn(ib_conn
);
648 struct iser_rx_desc
*desc
= iser_rx(wc
->wr_cqe
);
649 struct iscsi_hdr
*hdr
;
651 int outstanding
, count
, err
;
653 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
654 iser_err_comp(wc
, "task_rsp");
658 ib_dma_sync_single_for_cpu(ib_conn
->device
->ib_device
,
659 desc
->dma_addr
, ISER_RX_PAYLOAD_SIZE
,
662 hdr
= &desc
->iscsi_header
;
663 length
= wc
->byte_len
- ISER_HEADERS_LEN
;
665 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr
->opcode
,
668 if (iser_check_remote_inv(iser_conn
, wc
, hdr
)) {
669 iscsi_conn_failure(iser_conn
->iscsi_conn
,
670 ISCSI_ERR_CONN_FAILED
);
674 iscsi_iser_recv(iser_conn
->iscsi_conn
, hdr
, desc
->data
, length
);
676 ib_dma_sync_single_for_device(ib_conn
->device
->ib_device
,
677 desc
->dma_addr
, ISER_RX_PAYLOAD_SIZE
,
680 /* decrementing conn->post_recv_buf_count only --after-- freeing the *
681 * task eliminates the need to worry on tasks which are completed in *
682 * parallel to the execution of iser_conn_term. So the code that waits *
683 * for the posted rx bufs refcount to become zero handles everything */
684 ib_conn
->post_recv_buf_count
--;
686 outstanding
= ib_conn
->post_recv_buf_count
;
687 if (outstanding
+ iser_conn
->min_posted_rx
<= iser_conn
->qp_max_recv_dtos
) {
688 count
= min(iser_conn
->qp_max_recv_dtos
- outstanding
,
689 iser_conn
->min_posted_rx
);
690 err
= iser_post_recvm(iser_conn
, count
);
692 iser_err("posting %d rx bufs err %d\n", count
, err
);
696 void iser_cmd_comp(struct ib_cq
*cq
, struct ib_wc
*wc
)
698 if (unlikely(wc
->status
!= IB_WC_SUCCESS
))
699 iser_err_comp(wc
, "command");
702 void iser_ctrl_comp(struct ib_cq
*cq
, struct ib_wc
*wc
)
704 struct iser_tx_desc
*desc
= iser_tx(wc
->wr_cqe
);
705 struct iscsi_task
*task
;
707 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
708 iser_err_comp(wc
, "control");
712 /* this arithmetic is legal by libiscsi dd_data allocation */
713 task
= (void *)desc
- sizeof(struct iscsi_task
);
714 if (task
->hdr
->itt
== RESERVED_ITT
)
715 iscsi_put_task(task
);
718 void iser_dataout_comp(struct ib_cq
*cq
, struct ib_wc
*wc
)
720 struct iser_tx_desc
*desc
= iser_tx(wc
->wr_cqe
);
721 struct ib_conn
*ib_conn
= wc
->qp
->qp_context
;
722 struct iser_device
*device
= ib_conn
->device
;
724 if (unlikely(wc
->status
!= IB_WC_SUCCESS
))
725 iser_err_comp(wc
, "dataout");
727 ib_dma_unmap_single(device
->ib_device
, desc
->dma_addr
,
728 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
729 kmem_cache_free(ig
.desc_cache
, desc
);
732 void iser_task_rdma_init(struct iscsi_iser_task
*iser_task
)
735 iser_task
->status
= ISER_TASK_STATUS_INIT
;
737 iser_task
->dir
[ISER_DIR_IN
] = 0;
738 iser_task
->dir
[ISER_DIR_OUT
] = 0;
740 iser_task
->data
[ISER_DIR_IN
].data_len
= 0;
741 iser_task
->data
[ISER_DIR_OUT
].data_len
= 0;
743 iser_task
->prot
[ISER_DIR_IN
].data_len
= 0;
744 iser_task
->prot
[ISER_DIR_OUT
].data_len
= 0;
746 memset(&iser_task
->rdma_reg
[ISER_DIR_IN
], 0,
747 sizeof(struct iser_mem_reg
));
748 memset(&iser_task
->rdma_reg
[ISER_DIR_OUT
], 0,
749 sizeof(struct iser_mem_reg
));
752 void iser_task_rdma_finalize(struct iscsi_iser_task
*iser_task
)
754 int prot_count
= scsi_prot_sg_count(iser_task
->sc
);
756 if (iser_task
->dir
[ISER_DIR_IN
]) {
757 iser_unreg_rdma_mem(iser_task
, ISER_DIR_IN
);
758 iser_dma_unmap_task_data(iser_task
,
759 &iser_task
->data
[ISER_DIR_IN
],
762 iser_dma_unmap_task_data(iser_task
,
763 &iser_task
->prot
[ISER_DIR_IN
],
767 if (iser_task
->dir
[ISER_DIR_OUT
]) {
768 iser_unreg_rdma_mem(iser_task
, ISER_DIR_OUT
);
769 iser_dma_unmap_task_data(iser_task
,
770 &iser_task
->data
[ISER_DIR_OUT
],
773 iser_dma_unmap_task_data(iser_task
,
774 &iser_task
->prot
[ISER_DIR_OUT
],