2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/slab.h>
37 #include <linux/highmem.h>
38 #include <linux/scatterlist.h>
40 #include "iscsi_iser.h"
43 iser_free_bounce_sg(struct iser_data_buf
*data
)
45 struct scatterlist
*sg
;
48 for_each_sg(data
->sg
, sg
, data
->size
, count
)
49 __free_page(sg_page(sg
));
53 data
->sg
= data
->orig_sg
;
54 data
->size
= data
->orig_size
;
60 iser_alloc_bounce_sg(struct iser_data_buf
*data
)
62 struct scatterlist
*sg
;
64 unsigned long length
= data
->data_len
;
65 int i
= 0, nents
= DIV_ROUND_UP(length
, PAGE_SIZE
);
67 sg
= kcalloc(nents
, sizeof(*sg
), GFP_ATOMIC
);
71 sg_init_table(sg
, nents
);
73 u32 page_len
= min_t(u32
, length
, PAGE_SIZE
);
75 page
= alloc_page(GFP_ATOMIC
);
79 sg_set_page(&sg
[i
], page
, page_len
, 0);
84 data
->orig_sg
= data
->sg
;
85 data
->orig_size
= data
->size
;
93 __free_page(sg_page(&sg
[i
- 1]));
100 iser_copy_bounce(struct iser_data_buf
*data
, bool to_buffer
)
102 struct scatterlist
*osg
, *bsg
= data
->sg
;
104 unsigned int left
= data
->data_len
;
105 unsigned int bsg_off
= 0;
108 for_each_sg(data
->orig_sg
, osg
, data
->orig_size
, i
) {
109 unsigned int copy_len
, osg_off
= 0;
111 oaddr
= kmap_atomic(sg_page(osg
)) + osg
->offset
;
112 copy_len
= min(left
, osg
->length
);
114 unsigned int len
= min(copy_len
, bsg
->length
- bsg_off
);
116 baddr
= kmap_atomic(sg_page(bsg
)) + bsg
->offset
;
118 memcpy(baddr
+ bsg_off
, oaddr
+ osg_off
, len
);
120 memcpy(oaddr
+ osg_off
, baddr
+ bsg_off
, len
);
122 kunmap_atomic(baddr
- bsg
->offset
);
127 if (bsg_off
>= bsg
->length
) {
132 kunmap_atomic(oaddr
- osg
->offset
);
138 iser_copy_from_bounce(struct iser_data_buf
*data
)
140 iser_copy_bounce(data
, false);
144 iser_copy_to_bounce(struct iser_data_buf
*data
)
146 iser_copy_bounce(data
, true);
149 struct fast_reg_descriptor
*
150 iser_reg_desc_get(struct ib_conn
*ib_conn
)
152 struct fast_reg_descriptor
*desc
;
155 spin_lock_irqsave(&ib_conn
->lock
, flags
);
156 desc
= list_first_entry(&ib_conn
->fastreg
.pool
,
157 struct fast_reg_descriptor
, list
);
158 list_del(&desc
->list
);
159 spin_unlock_irqrestore(&ib_conn
->lock
, flags
);
165 iser_reg_desc_put(struct ib_conn
*ib_conn
,
166 struct fast_reg_descriptor
*desc
)
170 spin_lock_irqsave(&ib_conn
->lock
, flags
);
171 list_add(&desc
->list
, &ib_conn
->fastreg
.pool
);
172 spin_unlock_irqrestore(&ib_conn
->lock
, flags
);
176 * iser_start_rdma_unaligned_sg
178 static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task
*iser_task
,
179 struct iser_data_buf
*data
,
180 enum iser_data_dir cmd_dir
)
182 struct ib_device
*dev
= iser_task
->iser_conn
->ib_conn
.device
->ib_device
;
185 rc
= iser_alloc_bounce_sg(data
);
187 iser_err("Failed to allocate bounce for data len %lu\n",
192 if (cmd_dir
== ISER_DIR_OUT
)
193 iser_copy_to_bounce(data
);
195 data
->dma_nents
= ib_dma_map_sg(dev
, data
->sg
, data
->size
,
196 (cmd_dir
== ISER_DIR_OUT
) ?
197 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
198 if (!data
->dma_nents
) {
199 iser_err("Got dma_nents %d, something went wrong...\n",
207 iser_free_bounce_sg(data
);
212 * iser_finalize_rdma_unaligned_sg
215 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task
*iser_task
,
216 struct iser_data_buf
*data
,
217 enum iser_data_dir cmd_dir
)
219 struct ib_device
*dev
= iser_task
->iser_conn
->ib_conn
.device
->ib_device
;
221 ib_dma_unmap_sg(dev
, data
->sg
, data
->size
,
222 (cmd_dir
== ISER_DIR_OUT
) ?
223 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
225 if (cmd_dir
== ISER_DIR_IN
)
226 iser_copy_from_bounce(data
);
228 iser_free_bounce_sg(data
);
231 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
234 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
235 * and returns the length of resulting physical address array (may be less than
236 * the original due to possible compaction).
238 * we build a "page vec" under the assumption that the SG meets the RDMA
239 * alignment requirements. Other then the first and last SG elements, all
240 * the "internal" elements can be compacted into a list whose elements are
241 * dma addresses of physical pages. The code supports also the weird case
242 * where --few fragments of the same page-- are present in the SG as
243 * consecutive elements. Also, it handles one entry SG.
246 static int iser_sg_to_page_vec(struct iser_data_buf
*data
,
247 struct ib_device
*ibdev
, u64
*pages
,
248 int *offset
, int *data_size
)
250 struct scatterlist
*sg
, *sgl
= data
->sg
;
251 u64 start_addr
, end_addr
, page
, chunk_start
= 0;
252 unsigned long total_sz
= 0;
253 unsigned int dma_len
;
254 int i
, new_chunk
, cur_page
, last_ent
= data
->dma_nents
- 1;
256 /* compute the offset of first element */
257 *offset
= (u64
) sgl
[0].offset
& ~MASK_4K
;
261 for_each_sg(sgl
, sg
, data
->dma_nents
, i
) {
262 start_addr
= ib_sg_dma_address(ibdev
, sg
);
264 chunk_start
= start_addr
;
265 dma_len
= ib_sg_dma_len(ibdev
, sg
);
266 end_addr
= start_addr
+ dma_len
;
269 /* collect page fragments until aligned or end of SG list */
270 if (!IS_4K_ALIGNED(end_addr
) && i
< last_ent
) {
276 /* address of the first page in the contiguous chunk;
277 masking relevant for the very first SG entry,
278 which might be unaligned */
279 page
= chunk_start
& MASK_4K
;
281 pages
[cur_page
++] = page
;
283 } while (page
< end_addr
);
286 *data_size
= total_sz
;
287 iser_dbg("page_vec->data_size:%d cur_page %d\n",
288 *data_size
, cur_page
);
294 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
295 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
296 * the number of entries which are aligned correctly. Supports the case where
297 * consecutive SG elements are actually fragments of the same physcial page.
299 static int iser_data_buf_aligned_len(struct iser_data_buf
*data
,
300 struct ib_device
*ibdev
)
302 struct scatterlist
*sg
, *sgl
, *next_sg
= NULL
;
303 u64 start_addr
, end_addr
;
304 int i
, ret_len
, start_check
= 0;
306 if (data
->dma_nents
== 1)
310 start_addr
= ib_sg_dma_address(ibdev
, sgl
);
312 for_each_sg(sgl
, sg
, data
->dma_nents
, i
) {
313 if (start_check
&& !IS_4K_ALIGNED(start_addr
))
316 next_sg
= sg_next(sg
);
320 end_addr
= start_addr
+ ib_sg_dma_len(ibdev
, sg
);
321 start_addr
= ib_sg_dma_address(ibdev
, next_sg
);
323 if (end_addr
== start_addr
) {
329 if (!IS_4K_ALIGNED(end_addr
))
332 ret_len
= (next_sg
) ? i
: i
+1;
333 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
334 ret_len
, data
->dma_nents
, data
);
338 static void iser_data_buf_dump(struct iser_data_buf
*data
,
339 struct ib_device
*ibdev
)
341 struct scatterlist
*sg
;
344 for_each_sg(data
->sg
, sg
, data
->dma_nents
, i
)
345 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
346 "off:0x%x sz:0x%x dma_len:0x%x\n",
347 i
, (unsigned long)ib_sg_dma_address(ibdev
, sg
),
348 sg_page(sg
), sg
->offset
,
349 sg
->length
, ib_sg_dma_len(ibdev
, sg
));
352 static void iser_dump_page_vec(struct iser_page_vec
*page_vec
)
356 iser_err("page vec length %d data size %d\n",
357 page_vec
->length
, page_vec
->data_size
);
358 for (i
= 0; i
< page_vec
->length
; i
++)
359 iser_err("%d %lx\n",i
,(unsigned long)page_vec
->pages
[i
]);
362 int iser_dma_map_task_data(struct iscsi_iser_task
*iser_task
,
363 struct iser_data_buf
*data
,
364 enum iser_data_dir iser_dir
,
365 enum dma_data_direction dma_dir
)
367 struct ib_device
*dev
;
369 iser_task
->dir
[iser_dir
] = 1;
370 dev
= iser_task
->iser_conn
->ib_conn
.device
->ib_device
;
372 data
->dma_nents
= ib_dma_map_sg(dev
, data
->sg
, data
->size
, dma_dir
);
373 if (data
->dma_nents
== 0) {
374 iser_err("dma_map_sg failed!!!\n");
380 void iser_dma_unmap_task_data(struct iscsi_iser_task
*iser_task
,
381 struct iser_data_buf
*data
,
382 enum dma_data_direction dir
)
384 struct ib_device
*dev
;
386 dev
= iser_task
->iser_conn
->ib_conn
.device
->ib_device
;
387 ib_dma_unmap_sg(dev
, data
->sg
, data
->size
, dir
);
391 iser_reg_dma(struct iser_device
*device
, struct iser_data_buf
*mem
,
392 struct iser_mem_reg
*reg
)
394 struct scatterlist
*sg
= mem
->sg
;
396 reg
->sge
.lkey
= device
->mr
->lkey
;
397 reg
->rkey
= device
->mr
->rkey
;
398 reg
->sge
.addr
= ib_sg_dma_address(device
->ib_device
, &sg
[0]);
399 reg
->sge
.length
= ib_sg_dma_len(device
->ib_device
, &sg
[0]);
401 iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
402 " length=0x%x\n", reg
->sge
.lkey
, reg
->rkey
,
403 reg
->sge
.addr
, reg
->sge
.length
);
408 static int fall_to_bounce_buf(struct iscsi_iser_task
*iser_task
,
409 struct iser_data_buf
*mem
,
410 enum iser_data_dir cmd_dir
,
413 struct iscsi_conn
*iscsi_conn
= iser_task
->iser_conn
->iscsi_conn
;
414 struct iser_device
*device
= iser_task
->iser_conn
->ib_conn
.device
;
416 iscsi_conn
->fmr_unalign_cnt
++;
417 iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
418 aligned_len
, mem
->size
);
420 if (iser_debug_level
> 0)
421 iser_data_buf_dump(mem
, device
->ib_device
);
423 /* unmap the command data before accessing it */
424 iser_dma_unmap_task_data(iser_task
, mem
,
425 (cmd_dir
== ISER_DIR_OUT
) ?
426 DMA_TO_DEVICE
: DMA_FROM_DEVICE
);
428 /* allocate copy buf, if we are writing, copy the */
429 /* unaligned scatterlist, dma map the copy */
430 if (iser_start_rdma_unaligned_sg(iser_task
, mem
, cmd_dir
) != 0)
437 * iser_reg_page_vec - Register physical memory
439 * returns: 0 on success, errno code on failure
442 int iser_reg_page_vec(struct iscsi_iser_task
*iser_task
,
443 struct iser_data_buf
*mem
,
444 struct iser_page_vec
*page_vec
,
445 struct iser_mem_reg
*mem_reg
)
447 struct ib_conn
*ib_conn
= &iser_task
->iser_conn
->ib_conn
;
448 struct iser_device
*device
= ib_conn
->device
;
449 struct ib_pool_fmr
*fmr
;
452 plen
= iser_sg_to_page_vec(mem
, device
->ib_device
,
455 &page_vec
->data_size
);
456 page_vec
->length
= plen
;
457 if (plen
* SIZE_4K
< page_vec
->data_size
) {
458 iser_err("page vec too short to hold this SG\n");
459 iser_data_buf_dump(mem
, device
->ib_device
);
460 iser_dump_page_vec(page_vec
);
464 fmr
= ib_fmr_pool_map_phys(ib_conn
->fmr
.pool
,
470 iser_err("ib_fmr_pool_map_phys failed: %d\n", ret
);
474 mem_reg
->sge
.lkey
= fmr
->fmr
->lkey
;
475 mem_reg
->rkey
= fmr
->fmr
->rkey
;
476 mem_reg
->sge
.addr
= page_vec
->pages
[0] + page_vec
->offset
;
477 mem_reg
->sge
.length
= page_vec
->data_size
;
478 mem_reg
->mem_h
= fmr
;
484 * Unregister (previosuly registered using FMR) memory.
485 * If memory is non-FMR does nothing.
487 void iser_unreg_mem_fmr(struct iscsi_iser_task
*iser_task
,
488 enum iser_data_dir cmd_dir
)
490 struct iser_mem_reg
*reg
= &iser_task
->rdma_reg
[cmd_dir
];
496 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg
->mem_h
);
498 ret
= ib_fmr_pool_unmap((struct ib_pool_fmr
*)reg
->mem_h
);
500 iser_err("ib_fmr_pool_unmap failed %d\n", ret
);
505 void iser_unreg_mem_fastreg(struct iscsi_iser_task
*iser_task
,
506 enum iser_data_dir cmd_dir
)
508 struct iser_mem_reg
*reg
= &iser_task
->rdma_reg
[cmd_dir
];
513 iser_reg_desc_put(&iser_task
->iser_conn
->ib_conn
,
519 * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
520 * using FMR (if possible) obtaining rkey and va
522 * returns 0 on success, errno code on failure
524 int iser_reg_rdma_mem_fmr(struct iscsi_iser_task
*iser_task
,
525 enum iser_data_dir cmd_dir
)
527 struct ib_conn
*ib_conn
= &iser_task
->iser_conn
->ib_conn
;
528 struct iser_device
*device
= ib_conn
->device
;
529 struct ib_device
*ibdev
= device
->ib_device
;
530 struct iser_data_buf
*mem
= &iser_task
->data
[cmd_dir
];
531 struct iser_mem_reg
*mem_reg
;
536 mem_reg
= &iser_task
->rdma_reg
[cmd_dir
];
538 aligned_len
= iser_data_buf_aligned_len(mem
, ibdev
);
539 if (aligned_len
!= mem
->dma_nents
) {
540 err
= fall_to_bounce_buf(iser_task
, mem
,
541 cmd_dir
, aligned_len
);
543 iser_err("failed to allocate bounce buffer\n");
548 /* if there a single dma entry, FMR is not needed */
549 if (mem
->dma_nents
== 1) {
550 return iser_reg_dma(device
, mem
, mem_reg
);
551 } else { /* use FMR for multiple dma entries */
552 err
= iser_reg_page_vec(iser_task
, mem
, ib_conn
->fmr
.page_vec
,
554 if (err
&& err
!= -EAGAIN
) {
555 iser_data_buf_dump(mem
, ibdev
);
556 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
558 ntoh24(iser_task
->desc
.iscsi_header
.dlength
));
559 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
560 ib_conn
->fmr
.page_vec
->data_size
,
561 ib_conn
->fmr
.page_vec
->length
,
562 ib_conn
->fmr
.page_vec
->offset
);
563 for (i
= 0; i
< ib_conn
->fmr
.page_vec
->length
; i
++)
564 iser_err("page_vec[%d] = 0x%llx\n", i
,
565 (unsigned long long)ib_conn
->fmr
.page_vec
->pages
[i
]);
574 iser_set_dif_domain(struct scsi_cmnd
*sc
, struct ib_sig_attrs
*sig_attrs
,
575 struct ib_sig_domain
*domain
)
577 domain
->sig_type
= IB_SIG_TYPE_T10_DIF
;
578 domain
->sig
.dif
.pi_interval
= scsi_prot_interval(sc
);
579 domain
->sig
.dif
.ref_tag
= scsi_prot_ref_tag(sc
);
581 * At the moment we hard code those, but in the future
582 * we will take them from sc.
584 domain
->sig
.dif
.apptag_check_mask
= 0xffff;
585 domain
->sig
.dif
.app_escape
= true;
586 domain
->sig
.dif
.ref_escape
= true;
587 if (sc
->prot_flags
& SCSI_PROT_REF_INCREMENT
)
588 domain
->sig
.dif
.ref_remap
= true;
592 iser_set_sig_attrs(struct scsi_cmnd
*sc
, struct ib_sig_attrs
*sig_attrs
)
594 switch (scsi_get_prot_op(sc
)) {
595 case SCSI_PROT_WRITE_INSERT
:
596 case SCSI_PROT_READ_STRIP
:
597 sig_attrs
->mem
.sig_type
= IB_SIG_TYPE_NONE
;
598 iser_set_dif_domain(sc
, sig_attrs
, &sig_attrs
->wire
);
599 sig_attrs
->wire
.sig
.dif
.bg_type
= IB_T10DIF_CRC
;
601 case SCSI_PROT_READ_INSERT
:
602 case SCSI_PROT_WRITE_STRIP
:
603 sig_attrs
->wire
.sig_type
= IB_SIG_TYPE_NONE
;
604 iser_set_dif_domain(sc
, sig_attrs
, &sig_attrs
->mem
);
605 sig_attrs
->mem
.sig
.dif
.bg_type
= sc
->prot_flags
& SCSI_PROT_IP_CHECKSUM
?
606 IB_T10DIF_CSUM
: IB_T10DIF_CRC
;
608 case SCSI_PROT_READ_PASS
:
609 case SCSI_PROT_WRITE_PASS
:
610 iser_set_dif_domain(sc
, sig_attrs
, &sig_attrs
->wire
);
611 sig_attrs
->wire
.sig
.dif
.bg_type
= IB_T10DIF_CRC
;
612 iser_set_dif_domain(sc
, sig_attrs
, &sig_attrs
->mem
);
613 sig_attrs
->mem
.sig
.dif
.bg_type
= sc
->prot_flags
& SCSI_PROT_IP_CHECKSUM
?
614 IB_T10DIF_CSUM
: IB_T10DIF_CRC
;
617 iser_err("Unsupported PI operation %d\n",
618 scsi_get_prot_op(sc
));
626 iser_set_prot_checks(struct scsi_cmnd
*sc
, u8
*mask
)
629 if (sc
->prot_flags
& SCSI_PROT_REF_CHECK
)
630 *mask
|= ISER_CHECK_REFTAG
;
631 if (sc
->prot_flags
& SCSI_PROT_GUARD_CHECK
)
632 *mask
|= ISER_CHECK_GUARD
;
636 iser_inv_rkey(struct ib_send_wr
*inv_wr
, struct ib_mr
*mr
)
640 memset(inv_wr
, 0, sizeof(*inv_wr
));
641 inv_wr
->opcode
= IB_WR_LOCAL_INV
;
642 inv_wr
->wr_id
= ISER_FASTREG_LI_WRID
;
643 inv_wr
->ex
.invalidate_rkey
= mr
->rkey
;
645 rkey
= ib_inc_rkey(mr
->rkey
);
646 ib_update_fast_reg_key(mr
, rkey
);
650 iser_reg_sig_mr(struct iscsi_iser_task
*iser_task
,
651 struct fast_reg_descriptor
*desc
,
652 struct iser_mem_reg
*data_reg
,
653 struct iser_mem_reg
*prot_reg
,
654 struct iser_mem_reg
*sig_reg
)
656 struct ib_conn
*ib_conn
= &iser_task
->iser_conn
->ib_conn
;
657 struct iser_pi_context
*pi_ctx
= desc
->pi_ctx
;
658 struct ib_send_wr sig_wr
, inv_wr
;
659 struct ib_send_wr
*bad_wr
, *wr
= NULL
;
660 struct ib_sig_attrs sig_attrs
;
663 memset(&sig_attrs
, 0, sizeof(sig_attrs
));
664 ret
= iser_set_sig_attrs(iser_task
->sc
, &sig_attrs
);
668 iser_set_prot_checks(iser_task
->sc
, &sig_attrs
.check_mask
);
670 if (!(desc
->reg_indicators
& ISER_SIG_KEY_VALID
)) {
671 iser_inv_rkey(&inv_wr
, pi_ctx
->sig_mr
);
675 memset(&sig_wr
, 0, sizeof(sig_wr
));
676 sig_wr
.opcode
= IB_WR_REG_SIG_MR
;
677 sig_wr
.wr_id
= ISER_FASTREG_LI_WRID
;
678 sig_wr
.sg_list
= &data_reg
->sge
;
680 sig_wr
.wr
.sig_handover
.sig_attrs
= &sig_attrs
;
681 sig_wr
.wr
.sig_handover
.sig_mr
= pi_ctx
->sig_mr
;
682 if (scsi_prot_sg_count(iser_task
->sc
))
683 sig_wr
.wr
.sig_handover
.prot
= &prot_reg
->sge
;
684 sig_wr
.wr
.sig_handover
.access_flags
= IB_ACCESS_LOCAL_WRITE
|
685 IB_ACCESS_REMOTE_READ
|
686 IB_ACCESS_REMOTE_WRITE
;
693 ret
= ib_post_send(ib_conn
->qp
, wr
, &bad_wr
);
695 iser_err("reg_sig_mr failed, ret:%d\n", ret
);
698 desc
->reg_indicators
&= ~ISER_SIG_KEY_VALID
;
700 sig_reg
->sge
.lkey
= pi_ctx
->sig_mr
->lkey
;
701 sig_reg
->rkey
= pi_ctx
->sig_mr
->rkey
;
702 sig_reg
->sge
.addr
= 0;
703 sig_reg
->sge
.length
= scsi_transfer_length(iser_task
->sc
);
705 iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
706 sig_reg
->sge
.lkey
, sig_reg
->rkey
, sig_reg
->sge
.addr
,
707 sig_reg
->sge
.length
);
712 static int iser_fast_reg_mr(struct iscsi_iser_task
*iser_task
,
713 struct iser_data_buf
*mem
,
714 struct fast_reg_descriptor
*desc
,
715 enum iser_reg_indicator ind
,
716 struct iser_mem_reg
*reg
)
718 struct ib_conn
*ib_conn
= &iser_task
->iser_conn
->ib_conn
;
719 struct iser_device
*device
= ib_conn
->device
;
721 struct ib_fast_reg_page_list
*frpl
;
722 struct ib_send_wr fastreg_wr
, inv_wr
;
723 struct ib_send_wr
*bad_wr
, *wr
= NULL
;
724 int ret
, offset
, size
, plen
;
726 /* if there a single dma entry, dma mr suffices */
727 if (mem
->dma_nents
== 1)
728 return iser_reg_dma(device
, mem
, reg
);
730 if (ind
== ISER_DATA_KEY_VALID
) {
732 frpl
= desc
->data_frpl
;
734 mr
= desc
->pi_ctx
->prot_mr
;
735 frpl
= desc
->pi_ctx
->prot_frpl
;
738 plen
= iser_sg_to_page_vec(mem
, device
->ib_device
, frpl
->page_list
,
740 if (plen
* SIZE_4K
< size
) {
741 iser_err("fast reg page_list too short to hold this SG\n");
745 if (!(desc
->reg_indicators
& ind
)) {
746 iser_inv_rkey(&inv_wr
, mr
);
750 /* Prepare FASTREG WR */
751 memset(&fastreg_wr
, 0, sizeof(fastreg_wr
));
752 fastreg_wr
.wr_id
= ISER_FASTREG_LI_WRID
;
753 fastreg_wr
.opcode
= IB_WR_FAST_REG_MR
;
754 fastreg_wr
.wr
.fast_reg
.iova_start
= frpl
->page_list
[0] + offset
;
755 fastreg_wr
.wr
.fast_reg
.page_list
= frpl
;
756 fastreg_wr
.wr
.fast_reg
.page_list_len
= plen
;
757 fastreg_wr
.wr
.fast_reg
.page_shift
= SHIFT_4K
;
758 fastreg_wr
.wr
.fast_reg
.length
= size
;
759 fastreg_wr
.wr
.fast_reg
.rkey
= mr
->rkey
;
760 fastreg_wr
.wr
.fast_reg
.access_flags
= (IB_ACCESS_LOCAL_WRITE
|
761 IB_ACCESS_REMOTE_WRITE
|
762 IB_ACCESS_REMOTE_READ
);
767 wr
->next
= &fastreg_wr
;
769 ret
= ib_post_send(ib_conn
->qp
, wr
, &bad_wr
);
771 iser_err("fast registration failed, ret:%d\n", ret
);
774 desc
->reg_indicators
&= ~ind
;
776 reg
->sge
.lkey
= mr
->lkey
;
777 reg
->rkey
= mr
->rkey
;
778 reg
->sge
.addr
= frpl
->page_list
[0] + offset
;
779 reg
->sge
.length
= size
;
785 * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
786 * using Fast Registration WR (if possible) obtaining rkey and va
788 * returns 0 on success, errno code on failure
790 int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task
*iser_task
,
791 enum iser_data_dir cmd_dir
)
793 struct ib_conn
*ib_conn
= &iser_task
->iser_conn
->ib_conn
;
794 struct iser_device
*device
= ib_conn
->device
;
795 struct ib_device
*ibdev
= device
->ib_device
;
796 struct iser_data_buf
*mem
= &iser_task
->data
[cmd_dir
];
797 struct iser_mem_reg
*mem_reg
= &iser_task
->rdma_reg
[cmd_dir
];
798 struct fast_reg_descriptor
*desc
= NULL
;
799 int err
, aligned_len
;
801 aligned_len
= iser_data_buf_aligned_len(mem
, ibdev
);
802 if (aligned_len
!= mem
->dma_nents
) {
803 err
= fall_to_bounce_buf(iser_task
, mem
,
804 cmd_dir
, aligned_len
);
806 iser_err("failed to allocate bounce buffer\n");
811 if (mem
->dma_nents
!= 1 ||
812 scsi_get_prot_op(iser_task
->sc
) != SCSI_PROT_NORMAL
) {
813 desc
= iser_reg_desc_get(ib_conn
);
814 mem_reg
->mem_h
= desc
;
817 err
= iser_fast_reg_mr(iser_task
, mem
, desc
,
818 ISER_DATA_KEY_VALID
, mem_reg
);
822 if (scsi_get_prot_op(iser_task
->sc
) != SCSI_PROT_NORMAL
) {
823 struct iser_mem_reg prot_reg
;
825 memset(&prot_reg
, 0, sizeof(prot_reg
));
826 if (scsi_prot_sg_count(iser_task
->sc
)) {
827 mem
= &iser_task
->prot
[cmd_dir
];
828 aligned_len
= iser_data_buf_aligned_len(mem
, ibdev
);
829 if (aligned_len
!= mem
->dma_nents
) {
830 err
= fall_to_bounce_buf(iser_task
, mem
,
831 cmd_dir
, aligned_len
);
833 iser_err("failed to allocate bounce buffer\n");
838 err
= iser_fast_reg_mr(iser_task
, mem
, desc
,
839 ISER_PROT_KEY_VALID
, &prot_reg
);
844 err
= iser_reg_sig_mr(iser_task
, desc
, mem_reg
,
847 iser_err("Failed to register signature mr\n");
850 desc
->reg_indicators
|= ISER_FASTREG_PROTECTED
;
856 iser_reg_desc_put(ib_conn
, desc
);