2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kfifo.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_host.h>
41 #include "iscsi_iser.h"
43 /* Register user buffer memory and initialize passive rdma
44 * dto descriptor. Total data size is stored in
45 * iser_task->data[ISER_DIR_IN].data_len
47 static int iser_prepare_read_cmd(struct iscsi_task
*task
,
51 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
52 struct iser_device
*device
= iser_task
->iser_conn
->ib_conn
->device
;
53 struct iser_regd_buf
*regd_buf
;
55 struct iser_hdr
*hdr
= &iser_task
->desc
.iser_header
;
56 struct iser_data_buf
*buf_in
= &iser_task
->data
[ISER_DIR_IN
];
58 err
= iser_dma_map_task_data(iser_task
,
65 if (edtl
> iser_task
->data
[ISER_DIR_IN
].data_len
) {
66 iser_err("Total data length: %ld, less than EDTL: "
67 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
68 iser_task
->data
[ISER_DIR_IN
].data_len
, edtl
,
69 task
->itt
, iser_task
->iser_conn
);
73 err
= device
->iser_reg_rdma_mem(iser_task
, ISER_DIR_IN
);
75 iser_err("Failed to set up Data-IN RDMA\n");
78 regd_buf
= &iser_task
->rdma_regd
[ISER_DIR_IN
];
80 hdr
->flags
|= ISER_RSV
;
81 hdr
->read_stag
= cpu_to_be32(regd_buf
->reg
.rkey
);
82 hdr
->read_va
= cpu_to_be64(regd_buf
->reg
.va
);
84 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
85 task
->itt
, regd_buf
->reg
.rkey
,
86 (unsigned long long)regd_buf
->reg
.va
);
91 /* Register user buffer memory and initialize passive rdma
92 * dto descriptor. Total data size is stored in
93 * task->data[ISER_DIR_OUT].data_len
96 iser_prepare_write_cmd(struct iscsi_task
*task
,
98 unsigned int unsol_sz
,
101 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
102 struct iser_device
*device
= iser_task
->iser_conn
->ib_conn
->device
;
103 struct iser_regd_buf
*regd_buf
;
105 struct iser_hdr
*hdr
= &iser_task
->desc
.iser_header
;
106 struct iser_data_buf
*buf_out
= &iser_task
->data
[ISER_DIR_OUT
];
107 struct ib_sge
*tx_dsg
= &iser_task
->desc
.tx_sg
[1];
109 err
= iser_dma_map_task_data(iser_task
,
116 if (edtl
> iser_task
->data
[ISER_DIR_OUT
].data_len
) {
117 iser_err("Total data length: %ld, less than EDTL: %d, "
118 "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
119 iser_task
->data
[ISER_DIR_OUT
].data_len
,
120 edtl
, task
->itt
, task
->conn
);
124 err
= device
->iser_reg_rdma_mem(iser_task
, ISER_DIR_OUT
);
126 iser_err("Failed to register write cmd RDMA mem\n");
130 regd_buf
= &iser_task
->rdma_regd
[ISER_DIR_OUT
];
132 if (unsol_sz
< edtl
) {
133 hdr
->flags
|= ISER_WSV
;
134 hdr
->write_stag
= cpu_to_be32(regd_buf
->reg
.rkey
);
135 hdr
->write_va
= cpu_to_be64(regd_buf
->reg
.va
+ unsol_sz
);
137 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
138 "VA:%#llX + unsol:%d\n",
139 task
->itt
, regd_buf
->reg
.rkey
,
140 (unsigned long long)regd_buf
->reg
.va
, unsol_sz
);
144 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
146 tx_dsg
->addr
= regd_buf
->reg
.va
;
147 tx_dsg
->length
= imm_sz
;
148 tx_dsg
->lkey
= regd_buf
->reg
.lkey
;
149 iser_task
->desc
.num_sge
= 2;
155 /* creates a new tx descriptor and adds header regd buffer */
156 static void iser_create_send_desc(struct iser_conn
*ib_conn
,
157 struct iser_tx_desc
*tx_desc
)
159 struct iser_device
*device
= ib_conn
->device
;
161 ib_dma_sync_single_for_cpu(device
->ib_device
,
162 tx_desc
->dma_addr
, ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
164 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_hdr
));
165 tx_desc
->iser_header
.flags
= ISER_VER
;
167 tx_desc
->num_sge
= 1;
169 if (tx_desc
->tx_sg
[0].lkey
!= device
->mr
->lkey
) {
170 tx_desc
->tx_sg
[0].lkey
= device
->mr
->lkey
;
171 iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc
);
175 static void iser_free_login_buf(struct iser_conn
*ib_conn
)
177 if (!ib_conn
->login_buf
)
180 if (ib_conn
->login_req_dma
)
181 ib_dma_unmap_single(ib_conn
->device
->ib_device
,
182 ib_conn
->login_req_dma
,
183 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_TO_DEVICE
);
185 if (ib_conn
->login_resp_dma
)
186 ib_dma_unmap_single(ib_conn
->device
->ib_device
,
187 ib_conn
->login_resp_dma
,
188 ISER_RX_LOGIN_SIZE
, DMA_FROM_DEVICE
);
190 kfree(ib_conn
->login_buf
);
192 /* make sure we never redo any unmapping */
193 ib_conn
->login_req_dma
= 0;
194 ib_conn
->login_resp_dma
= 0;
195 ib_conn
->login_buf
= NULL
;
198 static int iser_alloc_login_buf(struct iser_conn
*ib_conn
)
200 struct iser_device
*device
;
201 int req_err
, resp_err
;
203 BUG_ON(ib_conn
->device
== NULL
);
205 device
= ib_conn
->device
;
207 ib_conn
->login_buf
= kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
+
208 ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
209 if (!ib_conn
->login_buf
)
212 ib_conn
->login_req_buf
= ib_conn
->login_buf
;
213 ib_conn
->login_resp_buf
= ib_conn
->login_buf
+
214 ISCSI_DEF_MAX_RECV_SEG_LEN
;
216 ib_conn
->login_req_dma
= ib_dma_map_single(ib_conn
->device
->ib_device
,
217 (void *)ib_conn
->login_req_buf
,
218 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_TO_DEVICE
);
220 ib_conn
->login_resp_dma
= ib_dma_map_single(ib_conn
->device
->ib_device
,
221 (void *)ib_conn
->login_resp_buf
,
222 ISER_RX_LOGIN_SIZE
, DMA_FROM_DEVICE
);
224 req_err
= ib_dma_mapping_error(device
->ib_device
,
225 ib_conn
->login_req_dma
);
226 resp_err
= ib_dma_mapping_error(device
->ib_device
,
227 ib_conn
->login_resp_dma
);
229 if (req_err
|| resp_err
) {
231 ib_conn
->login_req_dma
= 0;
233 ib_conn
->login_resp_dma
= 0;
239 iser_free_login_buf(ib_conn
);
242 iser_err("unable to alloc or map login buf\n");
246 int iser_alloc_rx_descriptors(struct iser_conn
*ib_conn
, struct iscsi_session
*session
)
250 struct iser_rx_desc
*rx_desc
;
251 struct ib_sge
*rx_sg
;
252 struct iser_device
*device
= ib_conn
->device
;
254 ib_conn
->qp_max_recv_dtos
= session
->cmds_max
;
255 ib_conn
->qp_max_recv_dtos_mask
= session
->cmds_max
- 1; /* cmds_max is 2^N */
256 ib_conn
->min_posted_rx
= ib_conn
->qp_max_recv_dtos
>> 2;
258 if (device
->iser_alloc_rdma_reg_res(ib_conn
, session
->scsi_cmds_max
))
259 goto create_rdma_reg_res_failed
;
261 if (iser_alloc_login_buf(ib_conn
))
262 goto alloc_login_buf_fail
;
264 ib_conn
->rx_descs
= kmalloc(session
->cmds_max
*
265 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
266 if (!ib_conn
->rx_descs
)
267 goto rx_desc_alloc_fail
;
269 rx_desc
= ib_conn
->rx_descs
;
271 for (i
= 0; i
< ib_conn
->qp_max_recv_dtos
; i
++, rx_desc
++) {
272 dma_addr
= ib_dma_map_single(device
->ib_device
, (void *)rx_desc
,
273 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
274 if (ib_dma_mapping_error(device
->ib_device
, dma_addr
))
275 goto rx_desc_dma_map_failed
;
277 rx_desc
->dma_addr
= dma_addr
;
279 rx_sg
= &rx_desc
->rx_sg
;
280 rx_sg
->addr
= rx_desc
->dma_addr
;
281 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
282 rx_sg
->lkey
= device
->mr
->lkey
;
285 ib_conn
->rx_desc_head
= 0;
288 rx_desc_dma_map_failed
:
289 rx_desc
= ib_conn
->rx_descs
;
290 for (j
= 0; j
< i
; j
++, rx_desc
++)
291 ib_dma_unmap_single(device
->ib_device
, rx_desc
->dma_addr
,
292 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
293 kfree(ib_conn
->rx_descs
);
294 ib_conn
->rx_descs
= NULL
;
296 iser_free_login_buf(ib_conn
);
297 alloc_login_buf_fail
:
298 device
->iser_free_rdma_reg_res(ib_conn
);
299 create_rdma_reg_res_failed
:
300 iser_err("failed allocating rx descriptors / data buffers\n");
304 void iser_free_rx_descriptors(struct iser_conn
*ib_conn
)
307 struct iser_rx_desc
*rx_desc
;
308 struct iser_device
*device
= ib_conn
->device
;
310 if (!ib_conn
->rx_descs
)
313 if (device
->iser_free_rdma_reg_res
)
314 device
->iser_free_rdma_reg_res(ib_conn
);
316 rx_desc
= ib_conn
->rx_descs
;
317 for (i
= 0; i
< ib_conn
->qp_max_recv_dtos
; i
++, rx_desc
++)
318 ib_dma_unmap_single(device
->ib_device
, rx_desc
->dma_addr
,
319 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
320 kfree(ib_conn
->rx_descs
);
321 /* make sure we never redo any unmapping */
322 ib_conn
->rx_descs
= NULL
;
325 iser_free_login_buf(ib_conn
);
328 static int iser_post_rx_bufs(struct iscsi_conn
*conn
, struct iscsi_hdr
*req
)
330 struct iscsi_iser_conn
*iser_conn
= conn
->dd_data
;
331 struct iscsi_session
*session
= conn
->session
;
333 iser_dbg("req op %x flags %x\n", req
->opcode
, req
->flags
);
334 /* check if this is the last login - going to full feature phase */
335 if ((req
->flags
& ISCSI_FULL_FEATURE_PHASE
) != ISCSI_FULL_FEATURE_PHASE
)
339 * Check that there is one posted recv buffer (for the last login
340 * response) and no posted send buffers left - they must have been
341 * consumed during previous login phases.
343 WARN_ON(iser_conn
->ib_conn
->post_recv_buf_count
!= 1);
344 WARN_ON(atomic_read(&iser_conn
->ib_conn
->post_send_buf_count
) != 0);
346 if (session
->discovery_sess
) {
347 iser_info("Discovery session, re-using login RX buffer\n");
350 iser_info("Normal session, posting batch of RX %d buffers\n",
351 iser_conn
->ib_conn
->min_posted_rx
);
353 /* Initial post receive buffers */
354 if (iser_post_recvm(iser_conn
->ib_conn
,
355 iser_conn
->ib_conn
->min_posted_rx
))
362 * iser_send_command - send command PDU
364 int iser_send_command(struct iscsi_conn
*conn
,
365 struct iscsi_task
*task
)
367 struct iscsi_iser_conn
*iser_conn
= conn
->dd_data
;
368 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
371 struct iser_data_buf
*data_buf
;
372 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)task
->hdr
;
373 struct scsi_cmnd
*sc
= task
->sc
;
374 struct iser_tx_desc
*tx_desc
= &iser_task
->desc
;
376 edtl
= ntohl(hdr
->data_length
);
378 /* build the tx desc regd header and add it to the tx desc dto */
379 tx_desc
->type
= ISCSI_TX_SCSI_COMMAND
;
380 iser_create_send_desc(iser_conn
->ib_conn
, tx_desc
);
382 if (hdr
->flags
& ISCSI_FLAG_CMD_READ
)
383 data_buf
= &iser_task
->data
[ISER_DIR_IN
];
385 data_buf
= &iser_task
->data
[ISER_DIR_OUT
];
387 if (scsi_sg_count(sc
)) { /* using a scatter list */
388 data_buf
->buf
= scsi_sglist(sc
);
389 data_buf
->size
= scsi_sg_count(sc
);
392 data_buf
->data_len
= scsi_bufflen(sc
);
394 if (hdr
->flags
& ISCSI_FLAG_CMD_READ
) {
395 err
= iser_prepare_read_cmd(task
, edtl
);
397 goto send_command_error
;
399 if (hdr
->flags
& ISCSI_FLAG_CMD_WRITE
) {
400 err
= iser_prepare_write_cmd(task
,
403 task
->unsol_r2t
.data_length
,
406 goto send_command_error
;
409 iser_task
->status
= ISER_TASK_STATUS_STARTED
;
411 err
= iser_post_send(iser_conn
->ib_conn
, tx_desc
);
416 iser_err("conn %p failed task->itt %d err %d\n",conn
, task
->itt
, err
);
421 * iser_send_data_out - send data out PDU
423 int iser_send_data_out(struct iscsi_conn
*conn
,
424 struct iscsi_task
*task
,
425 struct iscsi_data
*hdr
)
427 struct iscsi_iser_conn
*iser_conn
= conn
->dd_data
;
428 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
429 struct iser_tx_desc
*tx_desc
= NULL
;
430 struct iser_regd_buf
*regd_buf
;
431 unsigned long buf_offset
;
432 unsigned long data_seg_len
;
435 struct ib_sge
*tx_dsg
;
437 itt
= (__force
uint32_t)hdr
->itt
;
438 data_seg_len
= ntoh24(hdr
->dlength
);
439 buf_offset
= ntohl(hdr
->offset
);
441 iser_dbg("%s itt %d dseg_len %d offset %d\n",
442 __func__
,(int)itt
,(int)data_seg_len
,(int)buf_offset
);
444 tx_desc
= kmem_cache_zalloc(ig
.desc_cache
, GFP_ATOMIC
);
445 if (tx_desc
== NULL
) {
446 iser_err("Failed to alloc desc for post dataout\n");
450 tx_desc
->type
= ISCSI_TX_DATAOUT
;
451 tx_desc
->iser_header
.flags
= ISER_VER
;
452 memcpy(&tx_desc
->iscsi_header
, hdr
, sizeof(struct iscsi_hdr
));
454 /* build the tx desc */
455 iser_initialize_task_headers(task
, tx_desc
);
457 regd_buf
= &iser_task
->rdma_regd
[ISER_DIR_OUT
];
458 tx_dsg
= &tx_desc
->tx_sg
[1];
459 tx_dsg
->addr
= regd_buf
->reg
.va
+ buf_offset
;
460 tx_dsg
->length
= data_seg_len
;
461 tx_dsg
->lkey
= regd_buf
->reg
.lkey
;
462 tx_desc
->num_sge
= 2;
464 if (buf_offset
+ data_seg_len
> iser_task
->data
[ISER_DIR_OUT
].data_len
) {
465 iser_err("Offset:%ld & DSL:%ld in Data-Out "
466 "inconsistent with total len:%ld, itt:%d\n",
467 buf_offset
, data_seg_len
,
468 iser_task
->data
[ISER_DIR_OUT
].data_len
, itt
);
470 goto send_data_out_error
;
472 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
473 itt
, buf_offset
, data_seg_len
);
476 err
= iser_post_send(iser_conn
->ib_conn
, tx_desc
);
481 kmem_cache_free(ig
.desc_cache
, tx_desc
);
482 iser_err("conn %p failed err %d\n",conn
, err
);
486 int iser_send_control(struct iscsi_conn
*conn
,
487 struct iscsi_task
*task
)
489 struct iscsi_iser_conn
*iser_conn
= conn
->dd_data
;
490 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
491 struct iser_tx_desc
*mdesc
= &iser_task
->desc
;
492 unsigned long data_seg_len
;
494 struct iser_device
*device
;
495 struct iser_conn
*ib_conn
= iser_conn
->ib_conn
;
497 /* build the tx desc regd header and add it to the tx desc dto */
498 mdesc
->type
= ISCSI_TX_CONTROL
;
499 iser_create_send_desc(iser_conn
->ib_conn
, mdesc
);
501 device
= iser_conn
->ib_conn
->device
;
503 data_seg_len
= ntoh24(task
->hdr
->dlength
);
505 if (data_seg_len
> 0) {
506 struct ib_sge
*tx_dsg
= &mdesc
->tx_sg
[1];
507 if (task
!= conn
->login_task
) {
508 iser_err("data present on non login task!!!\n");
509 goto send_control_error
;
512 ib_dma_sync_single_for_cpu(device
->ib_device
,
513 ib_conn
->login_req_dma
, task
->data_count
,
516 memcpy(iser_conn
->ib_conn
->login_req_buf
, task
->data
,
519 ib_dma_sync_single_for_device(device
->ib_device
,
520 ib_conn
->login_req_dma
, task
->data_count
,
523 tx_dsg
->addr
= iser_conn
->ib_conn
->login_req_dma
;
524 tx_dsg
->length
= task
->data_count
;
525 tx_dsg
->lkey
= device
->mr
->lkey
;
529 if (task
== conn
->login_task
) {
530 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
531 task
->hdr
->opcode
, data_seg_len
);
532 err
= iser_post_recvl(iser_conn
->ib_conn
);
534 goto send_control_error
;
535 err
= iser_post_rx_bufs(conn
, task
->hdr
);
537 goto send_control_error
;
540 err
= iser_post_send(iser_conn
->ib_conn
, mdesc
);
545 iser_err("conn %p failed err %d\n",conn
, err
);
550 * iser_rcv_dto_completion - recv DTO completion
552 void iser_rcv_completion(struct iser_rx_desc
*rx_desc
,
553 unsigned long rx_xfer_len
,
554 struct iser_conn
*ib_conn
)
556 struct iscsi_iser_conn
*conn
= ib_conn
->iser_conn
;
557 struct iscsi_hdr
*hdr
;
559 int rx_buflen
, outstanding
, count
, err
;
561 /* differentiate between login to all other PDUs */
562 if ((char *)rx_desc
== ib_conn
->login_resp_buf
) {
563 rx_dma
= ib_conn
->login_resp_dma
;
564 rx_buflen
= ISER_RX_LOGIN_SIZE
;
566 rx_dma
= rx_desc
->dma_addr
;
567 rx_buflen
= ISER_RX_PAYLOAD_SIZE
;
570 ib_dma_sync_single_for_cpu(ib_conn
->device
->ib_device
, rx_dma
,
571 rx_buflen
, DMA_FROM_DEVICE
);
573 hdr
= &rx_desc
->iscsi_header
;
575 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr
->opcode
,
576 hdr
->itt
, (int)(rx_xfer_len
- ISER_HEADERS_LEN
));
578 iscsi_iser_recv(conn
->iscsi_conn
, hdr
,
579 rx_desc
->data
, rx_xfer_len
- ISER_HEADERS_LEN
);
581 ib_dma_sync_single_for_device(ib_conn
->device
->ib_device
, rx_dma
,
582 rx_buflen
, DMA_FROM_DEVICE
);
584 /* decrementing conn->post_recv_buf_count only --after-- freeing the *
585 * task eliminates the need to worry on tasks which are completed in *
586 * parallel to the execution of iser_conn_term. So the code that waits *
587 * for the posted rx bufs refcount to become zero handles everything */
588 conn
->ib_conn
->post_recv_buf_count
--;
590 if (rx_dma
== ib_conn
->login_resp_dma
)
593 outstanding
= ib_conn
->post_recv_buf_count
;
594 if (outstanding
+ ib_conn
->min_posted_rx
<= ib_conn
->qp_max_recv_dtos
) {
595 count
= min(ib_conn
->qp_max_recv_dtos
- outstanding
,
596 ib_conn
->min_posted_rx
);
597 err
= iser_post_recvm(ib_conn
, count
);
599 iser_err("posting %d rx bufs err %d\n", count
, err
);
603 void iser_snd_completion(struct iser_tx_desc
*tx_desc
,
604 struct iser_conn
*ib_conn
)
606 struct iscsi_task
*task
;
607 struct iser_device
*device
= ib_conn
->device
;
609 if (tx_desc
->type
== ISCSI_TX_DATAOUT
) {
610 ib_dma_unmap_single(device
->ib_device
, tx_desc
->dma_addr
,
611 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
612 kmem_cache_free(ig
.desc_cache
, tx_desc
);
615 atomic_dec(&ib_conn
->post_send_buf_count
);
617 if (tx_desc
->type
== ISCSI_TX_CONTROL
) {
618 /* this arithmetic is legal by libiscsi dd_data allocation */
619 task
= (void *) ((long)(void *)tx_desc
-
620 sizeof(struct iscsi_task
));
621 if (task
->hdr
->itt
== RESERVED_ITT
)
622 iscsi_put_task(task
);
626 void iser_task_rdma_init(struct iscsi_iser_task
*iser_task
)
629 iser_task
->status
= ISER_TASK_STATUS_INIT
;
631 iser_task
->dir
[ISER_DIR_IN
] = 0;
632 iser_task
->dir
[ISER_DIR_OUT
] = 0;
634 iser_task
->data
[ISER_DIR_IN
].data_len
= 0;
635 iser_task
->data
[ISER_DIR_OUT
].data_len
= 0;
637 memset(&iser_task
->rdma_regd
[ISER_DIR_IN
], 0,
638 sizeof(struct iser_regd_buf
));
639 memset(&iser_task
->rdma_regd
[ISER_DIR_OUT
], 0,
640 sizeof(struct iser_regd_buf
));
643 void iser_task_rdma_finalize(struct iscsi_iser_task
*iser_task
)
645 struct iser_device
*device
= iser_task
->iser_conn
->ib_conn
->device
;
646 int is_rdma_aligned
= 1;
648 /* if we were reading, copy back to unaligned sglist,
649 * anyway dma_unmap and free the copy
651 if (iser_task
->data_copy
[ISER_DIR_IN
].copy_buf
!= NULL
) {
653 iser_finalize_rdma_unaligned_sg(iser_task
, ISER_DIR_IN
);
655 if (iser_task
->data_copy
[ISER_DIR_OUT
].copy_buf
!= NULL
) {
657 iser_finalize_rdma_unaligned_sg(iser_task
, ISER_DIR_OUT
);
660 if (iser_task
->dir
[ISER_DIR_IN
])
661 device
->iser_unreg_rdma_mem(iser_task
, ISER_DIR_IN
);
663 if (iser_task
->dir
[ISER_DIR_OUT
])
664 device
->iser_unreg_rdma_mem(iser_task
, ISER_DIR_OUT
);
666 /* if the data was unaligned, it was already unmapped and then copied */
668 iser_dma_unmap_task_data(iser_task
);