2 * Copyright (c) 2016 HGST, a Western Digital Company.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 #include <linux/moduleparam.h>
14 #include <linux/slab.h>
15 #include <rdma/mr_pool.h>
25 static bool rdma_rw_force_mr
;
26 module_param_named(force_mr
, rdma_rw_force_mr
, bool, 0);
27 MODULE_PARM_DESC(force_mr
, "Force usage of MRs for RDMA READ/WRITE operations");
30 * Check if the device might use memory registration. This is currently only
31 * true for iWarp devices. In the future we can hopefully fine tune this based
32 * on HCA driver input.
34 static inline bool rdma_rw_can_use_mr(struct ib_device
*dev
, u8 port_num
)
36 if (rdma_protocol_iwarp(dev
, port_num
))
38 if (unlikely(rdma_rw_force_mr
))
44 * Check if the device will use memory registration for this RW operation.
45 * We currently always use memory registrations for iWarp RDMA READs, and
46 * have a debug option to force usage of MRs.
48 * XXX: In the future we can hopefully fine tune this based on HCA driver
51 static inline bool rdma_rw_io_needs_mr(struct ib_device
*dev
, u8 port_num
,
52 enum dma_data_direction dir
, int dma_nents
)
54 if (rdma_protocol_iwarp(dev
, port_num
) && dir
== DMA_FROM_DEVICE
)
56 if (unlikely(rdma_rw_force_mr
))
61 static inline u32
rdma_rw_fr_page_list_len(struct ib_device
*dev
)
63 /* arbitrary limit to avoid allocating gigantic resources */
64 return min_t(u32
, dev
->attrs
.max_fast_reg_page_list_len
, 256);
67 /* Caller must have zero-initialized *reg. */
68 static int rdma_rw_init_one_mr(struct ib_qp
*qp
, u8 port_num
,
69 struct rdma_rw_reg_ctx
*reg
, struct scatterlist
*sg
,
70 u32 sg_cnt
, u32 offset
)
72 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
73 u32 nents
= min(sg_cnt
, pages_per_mr
);
76 reg
->mr
= ib_mr_pool_get(qp
, &qp
->rdma_mrs
);
80 if (reg
->mr
->need_inval
) {
81 reg
->inv_wr
.opcode
= IB_WR_LOCAL_INV
;
82 reg
->inv_wr
.ex
.invalidate_rkey
= reg
->mr
->lkey
;
83 reg
->inv_wr
.next
= ®
->reg_wr
.wr
;
86 reg
->inv_wr
.next
= NULL
;
89 ret
= ib_map_mr_sg(reg
->mr
, sg
, nents
, &offset
, PAGE_SIZE
);
90 if (ret
< 0 || ret
< nents
) {
91 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, reg
->mr
);
95 reg
->reg_wr
.wr
.opcode
= IB_WR_REG_MR
;
96 reg
->reg_wr
.mr
= reg
->mr
;
97 reg
->reg_wr
.access
= IB_ACCESS_LOCAL_WRITE
;
98 if (rdma_protocol_iwarp(qp
->device
, port_num
))
99 reg
->reg_wr
.access
|= IB_ACCESS_REMOTE_WRITE
;
102 reg
->sge
.addr
= reg
->mr
->iova
;
103 reg
->sge
.length
= reg
->mr
->length
;
107 static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
108 u8 port_num
, struct scatterlist
*sg
, u32 sg_cnt
, u32 offset
,
109 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
111 struct rdma_rw_reg_ctx
*prev
= NULL
;
112 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
113 int i
, j
, ret
= 0, count
= 0;
115 ctx
->nr_ops
= (sg_cnt
+ pages_per_mr
- 1) / pages_per_mr
;
116 ctx
->reg
= kcalloc(ctx
->nr_ops
, sizeof(*ctx
->reg
), GFP_KERNEL
);
122 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
123 struct rdma_rw_reg_ctx
*reg
= &ctx
->reg
[i
];
124 u32 nents
= min(sg_cnt
, pages_per_mr
);
126 ret
= rdma_rw_init_one_mr(qp
, port_num
, reg
, sg
, sg_cnt
,
133 if (reg
->mr
->need_inval
)
134 prev
->wr
.wr
.next
= ®
->inv_wr
;
136 prev
->wr
.wr
.next
= ®
->reg_wr
.wr
;
139 reg
->reg_wr
.wr
.next
= ®
->wr
.wr
;
141 reg
->wr
.wr
.sg_list
= ®
->sge
;
142 reg
->wr
.wr
.num_sge
= 1;
143 reg
->wr
.remote_addr
= remote_addr
;
145 if (dir
== DMA_TO_DEVICE
) {
146 reg
->wr
.wr
.opcode
= IB_WR_RDMA_WRITE
;
147 } else if (!rdma_cap_read_inv(qp
->device
, port_num
)) {
148 reg
->wr
.wr
.opcode
= IB_WR_RDMA_READ
;
150 reg
->wr
.wr
.opcode
= IB_WR_RDMA_READ_WITH_INV
;
151 reg
->wr
.wr
.ex
.invalidate_rkey
= reg
->mr
->lkey
;
155 remote_addr
+= reg
->sge
.length
;
157 for (j
= 0; j
< nents
; j
++)
164 prev
->wr
.wr
.next
= NULL
;
166 ctx
->type
= RDMA_RW_MR
;
171 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->reg
[i
].mr
);
177 static int rdma_rw_init_map_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
178 struct scatterlist
*sg
, u32 sg_cnt
, u32 offset
,
179 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
181 struct ib_device
*dev
= qp
->pd
->device
;
182 u32 max_sge
= dir
== DMA_TO_DEVICE
? qp
->max_write_sge
:
185 u32 total_len
= 0, i
, j
;
187 ctx
->nr_ops
= DIV_ROUND_UP(sg_cnt
, max_sge
);
189 ctx
->map
.sges
= sge
= kcalloc(sg_cnt
, sizeof(*sge
), GFP_KERNEL
);
193 ctx
->map
.wrs
= kcalloc(ctx
->nr_ops
, sizeof(*ctx
->map
.wrs
), GFP_KERNEL
);
197 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
198 struct ib_rdma_wr
*rdma_wr
= &ctx
->map
.wrs
[i
];
199 u32 nr_sge
= min(sg_cnt
, max_sge
);
201 if (dir
== DMA_TO_DEVICE
)
202 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
204 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
205 rdma_wr
->remote_addr
= remote_addr
+ total_len
;
206 rdma_wr
->rkey
= rkey
;
207 rdma_wr
->wr
.num_sge
= nr_sge
;
208 rdma_wr
->wr
.sg_list
= sge
;
210 for (j
= 0; j
< nr_sge
; j
++, sg
= sg_next(sg
)) {
211 sge
->addr
= ib_sg_dma_address(dev
, sg
) + offset
;
212 sge
->length
= ib_sg_dma_len(dev
, sg
) - offset
;
213 sge
->lkey
= qp
->pd
->local_dma_lkey
;
215 total_len
+= sge
->length
;
221 rdma_wr
->wr
.next
= i
+ 1 < ctx
->nr_ops
?
222 &ctx
->map
.wrs
[i
+ 1].wr
: NULL
;
225 ctx
->type
= RDMA_RW_MULTI_WR
;
229 kfree(ctx
->map
.sges
);
234 static int rdma_rw_init_single_wr(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
235 struct scatterlist
*sg
, u32 offset
, u64 remote_addr
, u32 rkey
,
236 enum dma_data_direction dir
)
238 struct ib_device
*dev
= qp
->pd
->device
;
239 struct ib_rdma_wr
*rdma_wr
= &ctx
->single
.wr
;
243 ctx
->single
.sge
.lkey
= qp
->pd
->local_dma_lkey
;
244 ctx
->single
.sge
.addr
= ib_sg_dma_address(dev
, sg
) + offset
;
245 ctx
->single
.sge
.length
= ib_sg_dma_len(dev
, sg
) - offset
;
247 memset(rdma_wr
, 0, sizeof(*rdma_wr
));
248 if (dir
== DMA_TO_DEVICE
)
249 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
251 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
252 rdma_wr
->wr
.sg_list
= &ctx
->single
.sge
;
253 rdma_wr
->wr
.num_sge
= 1;
254 rdma_wr
->remote_addr
= remote_addr
;
255 rdma_wr
->rkey
= rkey
;
257 ctx
->type
= RDMA_RW_SINGLE_WR
;
262 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
263 * @ctx: context to initialize
264 * @qp: queue pair to operate on
265 * @port_num: port num to which the connection is bound
266 * @sg: scatterlist to READ/WRITE from/to
267 * @sg_cnt: number of entries in @sg
268 * @sg_offset: current byte offset into @sg
269 * @remote_addr:remote address to read/write (relative to @rkey)
270 * @rkey: remote key to operate on
271 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
273 * Returns the number of WQEs that will be needed on the workqueue if
274 * successful, or a negative error code.
276 int rdma_rw_ctx_init(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
277 struct scatterlist
*sg
, u32 sg_cnt
, u32 sg_offset
,
278 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
280 struct ib_device
*dev
= qp
->pd
->device
;
283 ret
= ib_dma_map_sg(dev
, sg
, sg_cnt
, dir
);
289 * Skip to the S/G entry that sg_offset falls into:
292 u32 len
= ib_sg_dma_len(dev
, sg
);
303 if (WARN_ON_ONCE(sg_cnt
== 0))
306 if (rdma_rw_io_needs_mr(qp
->device
, port_num
, dir
, sg_cnt
)) {
307 ret
= rdma_rw_init_mr_wrs(ctx
, qp
, port_num
, sg
, sg_cnt
,
308 sg_offset
, remote_addr
, rkey
, dir
);
309 } else if (sg_cnt
> 1) {
310 ret
= rdma_rw_init_map_wrs(ctx
, qp
, sg
, sg_cnt
, sg_offset
,
311 remote_addr
, rkey
, dir
);
313 ret
= rdma_rw_init_single_wr(ctx
, qp
, sg
, sg_offset
,
314 remote_addr
, rkey
, dir
);
322 ib_dma_unmap_sg(dev
, sg
, sg_cnt
, dir
);
325 EXPORT_SYMBOL(rdma_rw_ctx_init
);
328 * rdma_rw_ctx_signature_init - initialize a RW context with signature offload
329 * @ctx: context to initialize
330 * @qp: queue pair to operate on
331 * @port_num: port num to which the connection is bound
332 * @sg: scatterlist to READ/WRITE from/to
333 * @sg_cnt: number of entries in @sg
334 * @prot_sg: scatterlist to READ/WRITE protection information from/to
335 * @prot_sg_cnt: number of entries in @prot_sg
336 * @sig_attrs: signature offloading algorithms
337 * @remote_addr:remote address to read/write (relative to @rkey)
338 * @rkey: remote key to operate on
339 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
341 * Returns the number of WQEs that will be needed on the workqueue if
342 * successful, or a negative error code.
344 int rdma_rw_ctx_signature_init(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
345 u8 port_num
, struct scatterlist
*sg
, u32 sg_cnt
,
346 struct scatterlist
*prot_sg
, u32 prot_sg_cnt
,
347 struct ib_sig_attrs
*sig_attrs
,
348 u64 remote_addr
, u32 rkey
, enum dma_data_direction dir
)
350 struct ib_device
*dev
= qp
->pd
->device
;
351 u32 pages_per_mr
= rdma_rw_fr_page_list_len(qp
->pd
->device
);
352 struct ib_rdma_wr
*rdma_wr
;
353 struct ib_send_wr
*prev_wr
= NULL
;
356 if (sg_cnt
> pages_per_mr
|| prot_sg_cnt
> pages_per_mr
) {
357 pr_err("SG count too large\n");
361 ret
= ib_dma_map_sg(dev
, sg
, sg_cnt
, dir
);
366 ret
= ib_dma_map_sg(dev
, prot_sg
, prot_sg_cnt
, dir
);
373 ctx
->type
= RDMA_RW_SIG_MR
;
375 ctx
->sig
= kcalloc(1, sizeof(*ctx
->sig
), GFP_KERNEL
);
378 goto out_unmap_prot_sg
;
381 ret
= rdma_rw_init_one_mr(qp
, port_num
, &ctx
->sig
->data
, sg
, sg_cnt
, 0);
385 prev_wr
= &ctx
->sig
->data
.reg_wr
.wr
;
387 ret
= rdma_rw_init_one_mr(qp
, port_num
, &ctx
->sig
->prot
,
388 prot_sg
, prot_sg_cnt
, 0);
390 goto out_destroy_data_mr
;
393 if (ctx
->sig
->prot
.inv_wr
.next
)
394 prev_wr
->next
= &ctx
->sig
->prot
.inv_wr
;
396 prev_wr
->next
= &ctx
->sig
->prot
.reg_wr
.wr
;
397 prev_wr
= &ctx
->sig
->prot
.reg_wr
.wr
;
399 ctx
->sig
->sig_mr
= ib_mr_pool_get(qp
, &qp
->sig_mrs
);
400 if (!ctx
->sig
->sig_mr
) {
402 goto out_destroy_prot_mr
;
405 if (ctx
->sig
->sig_mr
->need_inval
) {
406 memset(&ctx
->sig
->sig_inv_wr
, 0, sizeof(ctx
->sig
->sig_inv_wr
));
408 ctx
->sig
->sig_inv_wr
.opcode
= IB_WR_LOCAL_INV
;
409 ctx
->sig
->sig_inv_wr
.ex
.invalidate_rkey
= ctx
->sig
->sig_mr
->rkey
;
411 prev_wr
->next
= &ctx
->sig
->sig_inv_wr
;
412 prev_wr
= &ctx
->sig
->sig_inv_wr
;
415 ctx
->sig
->sig_wr
.wr
.opcode
= IB_WR_REG_SIG_MR
;
416 ctx
->sig
->sig_wr
.wr
.wr_cqe
= NULL
;
417 ctx
->sig
->sig_wr
.wr
.sg_list
= &ctx
->sig
->data
.sge
;
418 ctx
->sig
->sig_wr
.wr
.num_sge
= 1;
419 ctx
->sig
->sig_wr
.access_flags
= IB_ACCESS_LOCAL_WRITE
;
420 ctx
->sig
->sig_wr
.sig_attrs
= sig_attrs
;
421 ctx
->sig
->sig_wr
.sig_mr
= ctx
->sig
->sig_mr
;
423 ctx
->sig
->sig_wr
.prot
= &ctx
->sig
->prot
.sge
;
424 prev_wr
->next
= &ctx
->sig
->sig_wr
.wr
;
425 prev_wr
= &ctx
->sig
->sig_wr
.wr
;
428 ctx
->sig
->sig_sge
.addr
= 0;
429 ctx
->sig
->sig_sge
.length
= ctx
->sig
->data
.sge
.length
;
430 if (sig_attrs
->wire
.sig_type
!= IB_SIG_TYPE_NONE
)
431 ctx
->sig
->sig_sge
.length
+= ctx
->sig
->prot
.sge
.length
;
433 rdma_wr
= &ctx
->sig
->data
.wr
;
434 rdma_wr
->wr
.sg_list
= &ctx
->sig
->sig_sge
;
435 rdma_wr
->wr
.num_sge
= 1;
436 rdma_wr
->remote_addr
= remote_addr
;
437 rdma_wr
->rkey
= rkey
;
438 if (dir
== DMA_TO_DEVICE
)
439 rdma_wr
->wr
.opcode
= IB_WR_RDMA_WRITE
;
441 rdma_wr
->wr
.opcode
= IB_WR_RDMA_READ
;
442 prev_wr
->next
= &rdma_wr
->wr
;
443 prev_wr
= &rdma_wr
->wr
;
450 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->prot
.mr
);
452 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->data
.mr
);
456 ib_dma_unmap_sg(dev
, prot_sg
, prot_sg_cnt
, dir
);
458 ib_dma_unmap_sg(dev
, sg
, sg_cnt
, dir
);
461 EXPORT_SYMBOL(rdma_rw_ctx_signature_init
);
464 * Now that we are going to post the WRs we can update the lkey and need_inval
465 * state on the MRs. If we were doing this at init time, we would get double
466 * or missing invalidations if a context was initialized but not actually
469 static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx
*reg
, bool need_inval
)
471 reg
->mr
->need_inval
= need_inval
;
472 ib_update_fast_reg_key(reg
->mr
, ib_inc_rkey(reg
->mr
->lkey
));
473 reg
->reg_wr
.key
= reg
->mr
->lkey
;
474 reg
->sge
.lkey
= reg
->mr
->lkey
;
478 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
479 * @ctx: context to operate on
480 * @qp: queue pair to operate on
481 * @port_num: port num to which the connection is bound
482 * @cqe: completion queue entry for the last WR
483 * @chain_wr: WR to append to the posted chain
485 * Return the WR chain for the set of RDMA READ/WRITE operations described by
486 * @ctx, as well as any memory registration operations needed. If @chain_wr
487 * is non-NULL the WR it points to will be appended to the chain of WRs posted.
488 * If @chain_wr is not set @cqe must be set so that the caller gets a
489 * completion notification.
491 struct ib_send_wr
*rdma_rw_ctx_wrs(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
492 u8 port_num
, struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
494 struct ib_send_wr
*first_wr
, *last_wr
;
499 rdma_rw_update_lkey(&ctx
->sig
->data
, true);
500 if (ctx
->sig
->prot
.mr
)
501 rdma_rw_update_lkey(&ctx
->sig
->prot
, true);
503 ctx
->sig
->sig_mr
->need_inval
= true;
504 ib_update_fast_reg_key(ctx
->sig
->sig_mr
,
505 ib_inc_rkey(ctx
->sig
->sig_mr
->lkey
));
506 ctx
->sig
->sig_sge
.lkey
= ctx
->sig
->sig_mr
->lkey
;
508 if (ctx
->sig
->data
.inv_wr
.next
)
509 first_wr
= &ctx
->sig
->data
.inv_wr
;
511 first_wr
= &ctx
->sig
->data
.reg_wr
.wr
;
512 last_wr
= &ctx
->sig
->data
.wr
.wr
;
515 for (i
= 0; i
< ctx
->nr_ops
; i
++) {
516 rdma_rw_update_lkey(&ctx
->reg
[i
],
517 ctx
->reg
[i
].wr
.wr
.opcode
!=
518 IB_WR_RDMA_READ_WITH_INV
);
521 if (ctx
->reg
[0].inv_wr
.next
)
522 first_wr
= &ctx
->reg
[0].inv_wr
;
524 first_wr
= &ctx
->reg
[0].reg_wr
.wr
;
525 last_wr
= &ctx
->reg
[ctx
->nr_ops
- 1].wr
.wr
;
527 case RDMA_RW_MULTI_WR
:
528 first_wr
= &ctx
->map
.wrs
[0].wr
;
529 last_wr
= &ctx
->map
.wrs
[ctx
->nr_ops
- 1].wr
;
531 case RDMA_RW_SINGLE_WR
:
532 first_wr
= &ctx
->single
.wr
.wr
;
533 last_wr
= &ctx
->single
.wr
.wr
;
540 last_wr
->next
= chain_wr
;
542 last_wr
->wr_cqe
= cqe
;
543 last_wr
->send_flags
|= IB_SEND_SIGNALED
;
548 EXPORT_SYMBOL(rdma_rw_ctx_wrs
);
551 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
552 * @ctx: context to operate on
553 * @qp: queue pair to operate on
554 * @port_num: port num to which the connection is bound
555 * @cqe: completion queue entry for the last WR
556 * @chain_wr: WR to append to the posted chain
558 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
559 * any memory registration operations needed. If @chain_wr is non-NULL the
560 * WR it points to will be appended to the chain of WRs posted. If @chain_wr
561 * is not set @cqe must be set so that the caller gets a completion
564 int rdma_rw_ctx_post(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
565 struct ib_cqe
*cqe
, struct ib_send_wr
*chain_wr
)
567 struct ib_send_wr
*first_wr
;
569 first_wr
= rdma_rw_ctx_wrs(ctx
, qp
, port_num
, cqe
, chain_wr
);
570 return ib_post_send(qp
, first_wr
, NULL
);
572 EXPORT_SYMBOL(rdma_rw_ctx_post
);
575 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
576 * @ctx: context to release
577 * @qp: queue pair to operate on
578 * @port_num: port num to which the connection is bound
579 * @sg: scatterlist that was used for the READ/WRITE
580 * @sg_cnt: number of entries in @sg
581 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
583 void rdma_rw_ctx_destroy(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
, u8 port_num
,
584 struct scatterlist
*sg
, u32 sg_cnt
, enum dma_data_direction dir
)
590 for (i
= 0; i
< ctx
->nr_ops
; i
++)
591 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->reg
[i
].mr
);
594 case RDMA_RW_MULTI_WR
:
596 kfree(ctx
->map
.sges
);
598 case RDMA_RW_SINGLE_WR
:
605 ib_dma_unmap_sg(qp
->pd
->device
, sg
, sg_cnt
, dir
);
607 EXPORT_SYMBOL(rdma_rw_ctx_destroy
);
610 * rdma_rw_ctx_destroy_signature - release all resources allocated by
611 * rdma_rw_ctx_init_signature
612 * @ctx: context to release
613 * @qp: queue pair to operate on
614 * @port_num: port num to which the connection is bound
615 * @sg: scatterlist that was used for the READ/WRITE
616 * @sg_cnt: number of entries in @sg
617 * @prot_sg: scatterlist that was used for the READ/WRITE of the PI
618 * @prot_sg_cnt: number of entries in @prot_sg
619 * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
621 void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx
*ctx
, struct ib_qp
*qp
,
622 u8 port_num
, struct scatterlist
*sg
, u32 sg_cnt
,
623 struct scatterlist
*prot_sg
, u32 prot_sg_cnt
,
624 enum dma_data_direction dir
)
626 if (WARN_ON_ONCE(ctx
->type
!= RDMA_RW_SIG_MR
))
629 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->data
.mr
);
630 ib_dma_unmap_sg(qp
->pd
->device
, sg
, sg_cnt
, dir
);
632 if (ctx
->sig
->prot
.mr
) {
633 ib_mr_pool_put(qp
, &qp
->rdma_mrs
, ctx
->sig
->prot
.mr
);
634 ib_dma_unmap_sg(qp
->pd
->device
, prot_sg
, prot_sg_cnt
, dir
);
637 ib_mr_pool_put(qp
, &qp
->sig_mrs
, ctx
->sig
->sig_mr
);
640 EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature
);
643 * rdma_rw_mr_factor - return number of MRs required for a payload
644 * @device: device handling the connection
645 * @port_num: port num to which the connection is bound
646 * @maxpages: maximum payload pages per rdma_rw_ctx
648 * Returns the number of MRs the device requires to move @maxpayload
649 * bytes. The returned value is used during transport creation to
650 * compute max_rdma_ctxts and the size of the transport's Send and
651 * Send Completion Queues.
653 unsigned int rdma_rw_mr_factor(struct ib_device
*device
, u8 port_num
,
654 unsigned int maxpages
)
656 unsigned int mr_pages
;
658 if (rdma_rw_can_use_mr(device
, port_num
))
659 mr_pages
= rdma_rw_fr_page_list_len(device
);
661 mr_pages
= device
->attrs
.max_sge_rd
;
662 return DIV_ROUND_UP(maxpages
, mr_pages
);
664 EXPORT_SYMBOL(rdma_rw_mr_factor
);
666 void rdma_rw_init_qp(struct ib_device
*dev
, struct ib_qp_init_attr
*attr
)
670 WARN_ON_ONCE(attr
->port_num
== 0);
673 * Each context needs at least one RDMA READ or WRITE WR.
675 * For some hardware we might need more, eventually we should ask the
676 * HCA driver for a multiplier here.
681 * If the devices needs MRs to perform RDMA READ or WRITE operations,
682 * we'll need two additional MRs for the registrations and the
685 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
)
686 factor
+= 6; /* (inv + reg) * (data + prot + sig) */
687 else if (rdma_rw_can_use_mr(dev
, attr
->port_num
))
688 factor
+= 2; /* inv + reg */
690 attr
->cap
.max_send_wr
+= factor
* attr
->cap
.max_rdma_ctxs
;
693 * But maybe we were just too high in the sky and the device doesn't
694 * even support all we need, and we'll have to live with what we get..
696 attr
->cap
.max_send_wr
=
697 min_t(u32
, attr
->cap
.max_send_wr
, dev
->attrs
.max_qp_wr
);
700 int rdma_rw_init_mrs(struct ib_qp
*qp
, struct ib_qp_init_attr
*attr
)
702 struct ib_device
*dev
= qp
->pd
->device
;
703 u32 nr_mrs
= 0, nr_sig_mrs
= 0;
706 if (attr
->create_flags
& IB_QP_CREATE_SIGNATURE_EN
) {
707 nr_sig_mrs
= attr
->cap
.max_rdma_ctxs
;
708 nr_mrs
= attr
->cap
.max_rdma_ctxs
* 2;
709 } else if (rdma_rw_can_use_mr(dev
, attr
->port_num
)) {
710 nr_mrs
= attr
->cap
.max_rdma_ctxs
;
714 ret
= ib_mr_pool_init(qp
, &qp
->rdma_mrs
, nr_mrs
,
716 rdma_rw_fr_page_list_len(dev
));
718 pr_err("%s: failed to allocated %d MRs\n",
725 ret
= ib_mr_pool_init(qp
, &qp
->sig_mrs
, nr_sig_mrs
,
726 IB_MR_TYPE_SIGNATURE
, 2);
728 pr_err("%s: failed to allocated %d SIG MRs\n",
730 goto out_free_rdma_mrs
;
737 ib_mr_pool_destroy(qp
, &qp
->rdma_mrs
);
741 void rdma_rw_cleanup_mrs(struct ib_qp
*qp
)
743 ib_mr_pool_destroy(qp
, &qp
->sig_mrs
);
744 ib_mr_pool_destroy(qp
, &qp
->rdma_mrs
);