2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kfifo.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_host.h>
41 #include "iscsi_iser.h"
43 /* Register user buffer memory and initialize passive rdma
44 * dto descriptor. Data size is stored in
45 * task->data[ISER_DIR_IN].data_len, Protection size
46 * os stored in task->prot[ISER_DIR_IN].data_len
48 static int iser_prepare_read_cmd(struct iscsi_task
*task
)
51 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
52 struct iser_device
*device
= iser_task
->iser_conn
->ib_conn
.device
;
53 struct iser_mem_reg
*mem_reg
;
55 struct iser_hdr
*hdr
= &iser_task
->desc
.iser_header
;
56 struct iser_data_buf
*buf_in
= &iser_task
->data
[ISER_DIR_IN
];
58 err
= iser_dma_map_task_data(iser_task
,
65 if (scsi_prot_sg_count(iser_task
->sc
)) {
66 struct iser_data_buf
*pbuf_in
= &iser_task
->prot
[ISER_DIR_IN
];
68 err
= iser_dma_map_task_data(iser_task
,
76 err
= device
->iser_reg_rdma_mem(iser_task
, ISER_DIR_IN
);
78 iser_err("Failed to set up Data-IN RDMA\n");
81 mem_reg
= &iser_task
->rdma_reg
[ISER_DIR_IN
];
83 hdr
->flags
|= ISER_RSV
;
84 hdr
->read_stag
= cpu_to_be32(mem_reg
->rkey
);
85 hdr
->read_va
= cpu_to_be64(mem_reg
->sge
.addr
);
87 iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
88 task
->itt
, mem_reg
->rkey
,
89 (unsigned long long)mem_reg
->sge
.addr
);
94 /* Register user buffer memory and initialize passive rdma
95 * dto descriptor. Data size is stored in
96 * task->data[ISER_DIR_OUT].data_len, Protection size
97 * is stored at task->prot[ISER_DIR_OUT].data_len
100 iser_prepare_write_cmd(struct iscsi_task
*task
,
102 unsigned int unsol_sz
,
105 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
106 struct iser_device
*device
= iser_task
->iser_conn
->ib_conn
.device
;
107 struct iser_mem_reg
*mem_reg
;
109 struct iser_hdr
*hdr
= &iser_task
->desc
.iser_header
;
110 struct iser_data_buf
*buf_out
= &iser_task
->data
[ISER_DIR_OUT
];
111 struct ib_sge
*tx_dsg
= &iser_task
->desc
.tx_sg
[1];
113 err
= iser_dma_map_task_data(iser_task
,
120 if (scsi_prot_sg_count(iser_task
->sc
)) {
121 struct iser_data_buf
*pbuf_out
= &iser_task
->prot
[ISER_DIR_OUT
];
123 err
= iser_dma_map_task_data(iser_task
,
131 err
= device
->iser_reg_rdma_mem(iser_task
, ISER_DIR_OUT
);
133 iser_err("Failed to register write cmd RDMA mem\n");
137 mem_reg
= &iser_task
->rdma_reg
[ISER_DIR_OUT
];
139 if (unsol_sz
< edtl
) {
140 hdr
->flags
|= ISER_WSV
;
141 hdr
->write_stag
= cpu_to_be32(mem_reg
->rkey
);
142 hdr
->write_va
= cpu_to_be64(mem_reg
->sge
.addr
+ unsol_sz
);
144 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
145 "VA:%#llX + unsol:%d\n",
146 task
->itt
, mem_reg
->rkey
,
147 (unsigned long long)mem_reg
->sge
.addr
, unsol_sz
);
151 iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
153 tx_dsg
->addr
= mem_reg
->sge
.addr
;
154 tx_dsg
->length
= imm_sz
;
155 tx_dsg
->lkey
= mem_reg
->sge
.lkey
;
156 iser_task
->desc
.num_sge
= 2;
162 /* creates a new tx descriptor and adds header regd buffer */
163 static void iser_create_send_desc(struct iser_conn
*iser_conn
,
164 struct iser_tx_desc
*tx_desc
)
166 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
168 ib_dma_sync_single_for_cpu(device
->ib_device
,
169 tx_desc
->dma_addr
, ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
171 memset(&tx_desc
->iser_header
, 0, sizeof(struct iser_hdr
));
172 tx_desc
->iser_header
.flags
= ISER_VER
;
174 tx_desc
->num_sge
= 1;
176 if (tx_desc
->tx_sg
[0].lkey
!= device
->mr
->lkey
) {
177 tx_desc
->tx_sg
[0].lkey
= device
->mr
->lkey
;
178 iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc
);
182 static void iser_free_login_buf(struct iser_conn
*iser_conn
)
184 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
186 if (!iser_conn
->login_buf
)
189 if (iser_conn
->login_req_dma
)
190 ib_dma_unmap_single(device
->ib_device
,
191 iser_conn
->login_req_dma
,
192 ISCSI_DEF_MAX_RECV_SEG_LEN
, DMA_TO_DEVICE
);
194 if (iser_conn
->login_resp_dma
)
195 ib_dma_unmap_single(device
->ib_device
,
196 iser_conn
->login_resp_dma
,
197 ISER_RX_LOGIN_SIZE
, DMA_FROM_DEVICE
);
199 kfree(iser_conn
->login_buf
);
201 /* make sure we never redo any unmapping */
202 iser_conn
->login_req_dma
= 0;
203 iser_conn
->login_resp_dma
= 0;
204 iser_conn
->login_buf
= NULL
;
207 static int iser_alloc_login_buf(struct iser_conn
*iser_conn
)
209 struct iser_device
*device
= iser_conn
->ib_conn
.device
;
210 int req_err
, resp_err
;
212 BUG_ON(device
== NULL
);
214 iser_conn
->login_buf
= kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN
+
215 ISER_RX_LOGIN_SIZE
, GFP_KERNEL
);
216 if (!iser_conn
->login_buf
)
219 iser_conn
->login_req_buf
= iser_conn
->login_buf
;
220 iser_conn
->login_resp_buf
= iser_conn
->login_buf
+
221 ISCSI_DEF_MAX_RECV_SEG_LEN
;
223 iser_conn
->login_req_dma
= ib_dma_map_single(device
->ib_device
,
224 iser_conn
->login_req_buf
,
225 ISCSI_DEF_MAX_RECV_SEG_LEN
,
228 iser_conn
->login_resp_dma
= ib_dma_map_single(device
->ib_device
,
229 iser_conn
->login_resp_buf
,
233 req_err
= ib_dma_mapping_error(device
->ib_device
,
234 iser_conn
->login_req_dma
);
235 resp_err
= ib_dma_mapping_error(device
->ib_device
,
236 iser_conn
->login_resp_dma
);
238 if (req_err
|| resp_err
) {
240 iser_conn
->login_req_dma
= 0;
242 iser_conn
->login_resp_dma
= 0;
248 iser_free_login_buf(iser_conn
);
251 iser_err("unable to alloc or map login buf\n");
255 int iser_alloc_rx_descriptors(struct iser_conn
*iser_conn
,
256 struct iscsi_session
*session
)
260 struct iser_rx_desc
*rx_desc
;
261 struct ib_sge
*rx_sg
;
262 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
263 struct iser_device
*device
= ib_conn
->device
;
265 iser_conn
->qp_max_recv_dtos
= session
->cmds_max
;
266 iser_conn
->qp_max_recv_dtos_mask
= session
->cmds_max
- 1; /* cmds_max is 2^N */
267 iser_conn
->min_posted_rx
= iser_conn
->qp_max_recv_dtos
>> 2;
269 if (device
->iser_alloc_rdma_reg_res(ib_conn
, session
->scsi_cmds_max
))
270 goto create_rdma_reg_res_failed
;
272 if (iser_alloc_login_buf(iser_conn
))
273 goto alloc_login_buf_fail
;
275 iser_conn
->num_rx_descs
= session
->cmds_max
;
276 iser_conn
->rx_descs
= kmalloc(iser_conn
->num_rx_descs
*
277 sizeof(struct iser_rx_desc
), GFP_KERNEL
);
278 if (!iser_conn
->rx_descs
)
279 goto rx_desc_alloc_fail
;
281 rx_desc
= iser_conn
->rx_descs
;
283 for (i
= 0; i
< iser_conn
->qp_max_recv_dtos
; i
++, rx_desc
++) {
284 dma_addr
= ib_dma_map_single(device
->ib_device
, (void *)rx_desc
,
285 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
286 if (ib_dma_mapping_error(device
->ib_device
, dma_addr
))
287 goto rx_desc_dma_map_failed
;
289 rx_desc
->dma_addr
= dma_addr
;
291 rx_sg
= &rx_desc
->rx_sg
;
292 rx_sg
->addr
= rx_desc
->dma_addr
;
293 rx_sg
->length
= ISER_RX_PAYLOAD_SIZE
;
294 rx_sg
->lkey
= device
->mr
->lkey
;
297 iser_conn
->rx_desc_head
= 0;
300 rx_desc_dma_map_failed
:
301 rx_desc
= iser_conn
->rx_descs
;
302 for (j
= 0; j
< i
; j
++, rx_desc
++)
303 ib_dma_unmap_single(device
->ib_device
, rx_desc
->dma_addr
,
304 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
305 kfree(iser_conn
->rx_descs
);
306 iser_conn
->rx_descs
= NULL
;
308 iser_free_login_buf(iser_conn
);
309 alloc_login_buf_fail
:
310 device
->iser_free_rdma_reg_res(ib_conn
);
311 create_rdma_reg_res_failed
:
312 iser_err("failed allocating rx descriptors / data buffers\n");
316 void iser_free_rx_descriptors(struct iser_conn
*iser_conn
)
319 struct iser_rx_desc
*rx_desc
;
320 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
321 struct iser_device
*device
= ib_conn
->device
;
323 if (device
->iser_free_rdma_reg_res
)
324 device
->iser_free_rdma_reg_res(ib_conn
);
326 rx_desc
= iser_conn
->rx_descs
;
327 for (i
= 0; i
< iser_conn
->qp_max_recv_dtos
; i
++, rx_desc
++)
328 ib_dma_unmap_single(device
->ib_device
, rx_desc
->dma_addr
,
329 ISER_RX_PAYLOAD_SIZE
, DMA_FROM_DEVICE
);
330 kfree(iser_conn
->rx_descs
);
331 /* make sure we never redo any unmapping */
332 iser_conn
->rx_descs
= NULL
;
334 iser_free_login_buf(iser_conn
);
337 static int iser_post_rx_bufs(struct iscsi_conn
*conn
, struct iscsi_hdr
*req
)
339 struct iser_conn
*iser_conn
= conn
->dd_data
;
340 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
341 struct iscsi_session
*session
= conn
->session
;
343 iser_dbg("req op %x flags %x\n", req
->opcode
, req
->flags
);
344 /* check if this is the last login - going to full feature phase */
345 if ((req
->flags
& ISCSI_FULL_FEATURE_PHASE
) != ISCSI_FULL_FEATURE_PHASE
)
349 * Check that there is one posted recv buffer
350 * (for the last login response).
352 WARN_ON(ib_conn
->post_recv_buf_count
!= 1);
354 if (session
->discovery_sess
) {
355 iser_info("Discovery session, re-using login RX buffer\n");
358 iser_info("Normal session, posting batch of RX %d buffers\n",
359 iser_conn
->min_posted_rx
);
361 /* Initial post receive buffers */
362 if (iser_post_recvm(iser_conn
, iser_conn
->min_posted_rx
))
368 static inline bool iser_signal_comp(u8 sig_count
)
370 return ((sig_count
% ISER_SIGNAL_CMD_COUNT
) == 0);
374 * iser_send_command - send command PDU
376 int iser_send_command(struct iscsi_conn
*conn
,
377 struct iscsi_task
*task
)
379 struct iser_conn
*iser_conn
= conn
->dd_data
;
380 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
383 struct iser_data_buf
*data_buf
, *prot_buf
;
384 struct iscsi_scsi_req
*hdr
= (struct iscsi_scsi_req
*)task
->hdr
;
385 struct scsi_cmnd
*sc
= task
->sc
;
386 struct iser_tx_desc
*tx_desc
= &iser_task
->desc
;
387 u8 sig_count
= ++iser_conn
->ib_conn
.sig_count
;
389 edtl
= ntohl(hdr
->data_length
);
391 /* build the tx desc regd header and add it to the tx desc dto */
392 tx_desc
->type
= ISCSI_TX_SCSI_COMMAND
;
393 iser_create_send_desc(iser_conn
, tx_desc
);
395 if (hdr
->flags
& ISCSI_FLAG_CMD_READ
) {
396 data_buf
= &iser_task
->data
[ISER_DIR_IN
];
397 prot_buf
= &iser_task
->prot
[ISER_DIR_IN
];
399 data_buf
= &iser_task
->data
[ISER_DIR_OUT
];
400 prot_buf
= &iser_task
->prot
[ISER_DIR_OUT
];
403 if (scsi_sg_count(sc
)) { /* using a scatter list */
404 data_buf
->sg
= scsi_sglist(sc
);
405 data_buf
->size
= scsi_sg_count(sc
);
407 data_buf
->data_len
= scsi_bufflen(sc
);
409 if (scsi_prot_sg_count(sc
)) {
410 prot_buf
->sg
= scsi_prot_sglist(sc
);
411 prot_buf
->size
= scsi_prot_sg_count(sc
);
412 prot_buf
->data_len
= (data_buf
->data_len
>>
413 ilog2(sc
->device
->sector_size
)) * 8;
416 if (hdr
->flags
& ISCSI_FLAG_CMD_READ
) {
417 err
= iser_prepare_read_cmd(task
);
419 goto send_command_error
;
421 if (hdr
->flags
& ISCSI_FLAG_CMD_WRITE
) {
422 err
= iser_prepare_write_cmd(task
,
425 task
->unsol_r2t
.data_length
,
428 goto send_command_error
;
431 iser_task
->status
= ISER_TASK_STATUS_STARTED
;
433 err
= iser_post_send(&iser_conn
->ib_conn
, tx_desc
,
434 iser_signal_comp(sig_count
));
439 iser_err("conn %p failed task->itt %d err %d\n",conn
, task
->itt
, err
);
444 * iser_send_data_out - send data out PDU
446 int iser_send_data_out(struct iscsi_conn
*conn
,
447 struct iscsi_task
*task
,
448 struct iscsi_data
*hdr
)
450 struct iser_conn
*iser_conn
= conn
->dd_data
;
451 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
452 struct iser_tx_desc
*tx_desc
= NULL
;
453 struct iser_mem_reg
*mem_reg
;
454 unsigned long buf_offset
;
455 unsigned long data_seg_len
;
458 struct ib_sge
*tx_dsg
;
460 itt
= (__force
uint32_t)hdr
->itt
;
461 data_seg_len
= ntoh24(hdr
->dlength
);
462 buf_offset
= ntohl(hdr
->offset
);
464 iser_dbg("%s itt %d dseg_len %d offset %d\n",
465 __func__
,(int)itt
,(int)data_seg_len
,(int)buf_offset
);
467 tx_desc
= kmem_cache_zalloc(ig
.desc_cache
, GFP_ATOMIC
);
468 if (tx_desc
== NULL
) {
469 iser_err("Failed to alloc desc for post dataout\n");
473 tx_desc
->type
= ISCSI_TX_DATAOUT
;
474 tx_desc
->iser_header
.flags
= ISER_VER
;
475 memcpy(&tx_desc
->iscsi_header
, hdr
, sizeof(struct iscsi_hdr
));
477 /* build the tx desc */
478 iser_initialize_task_headers(task
, tx_desc
);
480 mem_reg
= &iser_task
->rdma_reg
[ISER_DIR_OUT
];
481 tx_dsg
= &tx_desc
->tx_sg
[1];
482 tx_dsg
->addr
= mem_reg
->sge
.addr
+ buf_offset
;
483 tx_dsg
->length
= data_seg_len
;
484 tx_dsg
->lkey
= mem_reg
->sge
.lkey
;
485 tx_desc
->num_sge
= 2;
487 if (buf_offset
+ data_seg_len
> iser_task
->data
[ISER_DIR_OUT
].data_len
) {
488 iser_err("Offset:%ld & DSL:%ld in Data-Out "
489 "inconsistent with total len:%ld, itt:%d\n",
490 buf_offset
, data_seg_len
,
491 iser_task
->data
[ISER_DIR_OUT
].data_len
, itt
);
493 goto send_data_out_error
;
495 iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
496 itt
, buf_offset
, data_seg_len
);
499 err
= iser_post_send(&iser_conn
->ib_conn
, tx_desc
, true);
504 kmem_cache_free(ig
.desc_cache
, tx_desc
);
505 iser_err("conn %p failed err %d\n",conn
, err
);
509 int iser_send_control(struct iscsi_conn
*conn
,
510 struct iscsi_task
*task
)
512 struct iser_conn
*iser_conn
= conn
->dd_data
;
513 struct iscsi_iser_task
*iser_task
= task
->dd_data
;
514 struct iser_tx_desc
*mdesc
= &iser_task
->desc
;
515 unsigned long data_seg_len
;
517 struct iser_device
*device
;
519 /* build the tx desc regd header and add it to the tx desc dto */
520 mdesc
->type
= ISCSI_TX_CONTROL
;
521 iser_create_send_desc(iser_conn
, mdesc
);
523 device
= iser_conn
->ib_conn
.device
;
525 data_seg_len
= ntoh24(task
->hdr
->dlength
);
527 if (data_seg_len
> 0) {
528 struct ib_sge
*tx_dsg
= &mdesc
->tx_sg
[1];
529 if (task
!= conn
->login_task
) {
530 iser_err("data present on non login task!!!\n");
531 goto send_control_error
;
534 ib_dma_sync_single_for_cpu(device
->ib_device
,
535 iser_conn
->login_req_dma
, task
->data_count
,
538 memcpy(iser_conn
->login_req_buf
, task
->data
, task
->data_count
);
540 ib_dma_sync_single_for_device(device
->ib_device
,
541 iser_conn
->login_req_dma
, task
->data_count
,
544 tx_dsg
->addr
= iser_conn
->login_req_dma
;
545 tx_dsg
->length
= task
->data_count
;
546 tx_dsg
->lkey
= device
->mr
->lkey
;
550 if (task
== conn
->login_task
) {
551 iser_dbg("op %x dsl %lx, posting login rx buffer\n",
552 task
->hdr
->opcode
, data_seg_len
);
553 err
= iser_post_recvl(iser_conn
);
555 goto send_control_error
;
556 err
= iser_post_rx_bufs(conn
, task
->hdr
);
558 goto send_control_error
;
561 err
= iser_post_send(&iser_conn
->ib_conn
, mdesc
, true);
566 iser_err("conn %p failed err %d\n",conn
, err
);
571 * iser_rcv_dto_completion - recv DTO completion
573 void iser_rcv_completion(struct iser_rx_desc
*rx_desc
,
574 unsigned long rx_xfer_len
,
575 struct ib_conn
*ib_conn
)
577 struct iser_conn
*iser_conn
= container_of(ib_conn
, struct iser_conn
,
579 struct iscsi_hdr
*hdr
;
581 int rx_buflen
, outstanding
, count
, err
;
583 /* differentiate between login to all other PDUs */
584 if ((char *)rx_desc
== iser_conn
->login_resp_buf
) {
585 rx_dma
= iser_conn
->login_resp_dma
;
586 rx_buflen
= ISER_RX_LOGIN_SIZE
;
588 rx_dma
= rx_desc
->dma_addr
;
589 rx_buflen
= ISER_RX_PAYLOAD_SIZE
;
592 ib_dma_sync_single_for_cpu(ib_conn
->device
->ib_device
, rx_dma
,
593 rx_buflen
, DMA_FROM_DEVICE
);
595 hdr
= &rx_desc
->iscsi_header
;
597 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr
->opcode
,
598 hdr
->itt
, (int)(rx_xfer_len
- ISER_HEADERS_LEN
));
600 iscsi_iser_recv(iser_conn
->iscsi_conn
, hdr
, rx_desc
->data
,
601 rx_xfer_len
- ISER_HEADERS_LEN
);
603 ib_dma_sync_single_for_device(ib_conn
->device
->ib_device
, rx_dma
,
604 rx_buflen
, DMA_FROM_DEVICE
);
606 /* decrementing conn->post_recv_buf_count only --after-- freeing the *
607 * task eliminates the need to worry on tasks which are completed in *
608 * parallel to the execution of iser_conn_term. So the code that waits *
609 * for the posted rx bufs refcount to become zero handles everything */
610 ib_conn
->post_recv_buf_count
--;
612 if (rx_dma
== iser_conn
->login_resp_dma
)
615 outstanding
= ib_conn
->post_recv_buf_count
;
616 if (outstanding
+ iser_conn
->min_posted_rx
<= iser_conn
->qp_max_recv_dtos
) {
617 count
= min(iser_conn
->qp_max_recv_dtos
- outstanding
,
618 iser_conn
->min_posted_rx
);
619 err
= iser_post_recvm(iser_conn
, count
);
621 iser_err("posting %d rx bufs err %d\n", count
, err
);
625 void iser_snd_completion(struct iser_tx_desc
*tx_desc
,
626 struct ib_conn
*ib_conn
)
628 struct iscsi_task
*task
;
629 struct iser_device
*device
= ib_conn
->device
;
631 if (tx_desc
->type
== ISCSI_TX_DATAOUT
) {
632 ib_dma_unmap_single(device
->ib_device
, tx_desc
->dma_addr
,
633 ISER_HEADERS_LEN
, DMA_TO_DEVICE
);
634 kmem_cache_free(ig
.desc_cache
, tx_desc
);
638 if (tx_desc
&& tx_desc
->type
== ISCSI_TX_CONTROL
) {
639 /* this arithmetic is legal by libiscsi dd_data allocation */
640 task
= (void *) ((long)(void *)tx_desc
-
641 sizeof(struct iscsi_task
));
642 if (task
->hdr
->itt
== RESERVED_ITT
)
643 iscsi_put_task(task
);
647 void iser_task_rdma_init(struct iscsi_iser_task
*iser_task
)
650 iser_task
->status
= ISER_TASK_STATUS_INIT
;
652 iser_task
->dir
[ISER_DIR_IN
] = 0;
653 iser_task
->dir
[ISER_DIR_OUT
] = 0;
655 iser_task
->data
[ISER_DIR_IN
].data_len
= 0;
656 iser_task
->data
[ISER_DIR_OUT
].data_len
= 0;
658 iser_task
->prot
[ISER_DIR_IN
].data_len
= 0;
659 iser_task
->prot
[ISER_DIR_OUT
].data_len
= 0;
661 memset(&iser_task
->rdma_reg
[ISER_DIR_IN
], 0,
662 sizeof(struct iser_mem_reg
));
663 memset(&iser_task
->rdma_reg
[ISER_DIR_OUT
], 0,
664 sizeof(struct iser_mem_reg
));
667 void iser_task_rdma_finalize(struct iscsi_iser_task
*iser_task
)
669 struct iser_device
*device
= iser_task
->iser_conn
->ib_conn
.device
;
670 int is_rdma_data_aligned
= 1;
671 int is_rdma_prot_aligned
= 1;
672 int prot_count
= scsi_prot_sg_count(iser_task
->sc
);
674 /* if we were reading, copy back to unaligned sglist,
675 * anyway dma_unmap and free the copy
677 if (iser_task
->data
[ISER_DIR_IN
].orig_sg
) {
678 is_rdma_data_aligned
= 0;
679 iser_finalize_rdma_unaligned_sg(iser_task
,
680 &iser_task
->data
[ISER_DIR_IN
],
684 if (iser_task
->data
[ISER_DIR_OUT
].orig_sg
) {
685 is_rdma_data_aligned
= 0;
686 iser_finalize_rdma_unaligned_sg(iser_task
,
687 &iser_task
->data
[ISER_DIR_OUT
],
691 if (iser_task
->prot
[ISER_DIR_IN
].orig_sg
) {
692 is_rdma_prot_aligned
= 0;
693 iser_finalize_rdma_unaligned_sg(iser_task
,
694 &iser_task
->prot
[ISER_DIR_IN
],
698 if (iser_task
->prot
[ISER_DIR_OUT
].orig_sg
) {
699 is_rdma_prot_aligned
= 0;
700 iser_finalize_rdma_unaligned_sg(iser_task
,
701 &iser_task
->prot
[ISER_DIR_OUT
],
705 if (iser_task
->dir
[ISER_DIR_IN
]) {
706 device
->iser_unreg_rdma_mem(iser_task
, ISER_DIR_IN
);
707 if (is_rdma_data_aligned
)
708 iser_dma_unmap_task_data(iser_task
,
709 &iser_task
->data
[ISER_DIR_IN
],
711 if (prot_count
&& is_rdma_prot_aligned
)
712 iser_dma_unmap_task_data(iser_task
,
713 &iser_task
->prot
[ISER_DIR_IN
],
717 if (iser_task
->dir
[ISER_DIR_OUT
]) {
718 device
->iser_unreg_rdma_mem(iser_task
, ISER_DIR_OUT
);
719 if (is_rdma_data_aligned
)
720 iser_dma_unmap_task_data(iser_task
,
721 &iser_task
->data
[ISER_DIR_OUT
],
723 if (prot_count
&& is_rdma_prot_aligned
)
724 iser_dma_unmap_task_data(iser_task
,
725 &iser_task
->prot
[ISER_DIR_OUT
],