2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 * lfsr (linear feedback shift register) with period 255
40 static u8
rxe_get_key(void)
46 key
|= (0 != (key
& 0x100)) ^ (0 != (key
& 0x10))
47 ^ (0 != (key
& 0x80)) ^ (0 != (key
& 0x40));
54 int mem_check_range(struct rxe_mem
*mem
, u64 iova
, size_t length
)
57 case RXE_MEM_TYPE_DMA
:
61 case RXE_MEM_TYPE_FMR
:
62 if (iova
< mem
->iova
||
63 length
> mem
->length
||
64 iova
> mem
->iova
+ mem
->length
- length
)
73 #define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \
74 | IB_ACCESS_REMOTE_WRITE \
75 | IB_ACCESS_REMOTE_ATOMIC)
77 static void rxe_mem_init(int access
, struct rxe_mem
*mem
)
79 u32 lkey
= mem
->pelem
.index
<< 8 | rxe_get_key();
80 u32 rkey
= (access
& IB_ACCESS_REMOTE
) ? lkey
: 0;
82 if (mem
->pelem
.pool
->type
== RXE_TYPE_MR
) {
83 mem
->ibmr
.lkey
= lkey
;
84 mem
->ibmr
.rkey
= rkey
;
89 mem
->state
= RXE_MEM_STATE_INVALID
;
90 mem
->type
= RXE_MEM_TYPE_NONE
;
91 mem
->map_shift
= ilog2(RXE_BUF_PER_MAP
);
94 void rxe_mem_cleanup(struct rxe_pool_entry
*arg
)
96 struct rxe_mem
*mem
= container_of(arg
, typeof(*mem
), pelem
);
100 ib_umem_release(mem
->umem
);
103 for (i
= 0; i
< mem
->num_map
; i
++)
110 static int rxe_mem_alloc(struct rxe_dev
*rxe
, struct rxe_mem
*mem
, int num_buf
)
114 struct rxe_map
**map
= mem
->map
;
116 num_map
= (num_buf
+ RXE_BUF_PER_MAP
- 1) / RXE_BUF_PER_MAP
;
118 mem
->map
= kmalloc_array(num_map
, sizeof(*map
), GFP_KERNEL
);
122 for (i
= 0; i
< num_map
; i
++) {
123 mem
->map
[i
] = kmalloc(sizeof(**map
), GFP_KERNEL
);
128 BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP
));
130 mem
->map_shift
= ilog2(RXE_BUF_PER_MAP
);
131 mem
->map_mask
= RXE_BUF_PER_MAP
- 1;
133 mem
->num_buf
= num_buf
;
134 mem
->num_map
= num_map
;
135 mem
->max_buf
= num_map
* RXE_BUF_PER_MAP
;
140 for (i
--; i
>= 0; i
--)
148 int rxe_mem_init_dma(struct rxe_dev
*rxe
, struct rxe_pd
*pd
,
149 int access
, struct rxe_mem
*mem
)
151 rxe_mem_init(access
, mem
);
154 mem
->access
= access
;
155 mem
->state
= RXE_MEM_STATE_VALID
;
156 mem
->type
= RXE_MEM_TYPE_DMA
;
161 int rxe_mem_init_user(struct rxe_dev
*rxe
, struct rxe_pd
*pd
, u64 start
,
162 u64 length
, u64 iova
, int access
, struct ib_udata
*udata
,
166 struct rxe_map
**map
;
167 struct rxe_phys_buf
*buf
= NULL
;
168 struct ib_umem
*umem
;
169 struct scatterlist
*sg
;
174 umem
= ib_umem_get(pd
->ibpd
.uobject
->context
, start
, length
, access
, 0);
176 pr_warn("err %d from rxe_umem_get\n",
183 num_buf
= umem
->nmap
;
185 rxe_mem_init(access
, mem
);
187 err
= rxe_mem_alloc(rxe
, mem
, num_buf
);
189 pr_warn("err %d from rxe_mem_alloc\n", err
);
190 ib_umem_release(umem
);
194 mem
->page_shift
= umem
->page_shift
;
195 mem
->page_mask
= BIT(umem
->page_shift
) - 1;
202 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
203 vaddr
= page_address(sg_page(sg
));
205 pr_warn("null vaddr\n");
210 buf
->addr
= (uintptr_t)vaddr
;
211 buf
->size
= BIT(umem
->page_shift
);
215 if (num_buf
>= RXE_BUF_PER_MAP
) {
225 mem
->access
= access
;
226 mem
->length
= length
;
229 mem
->offset
= ib_umem_offset(umem
);
230 mem
->state
= RXE_MEM_STATE_VALID
;
231 mem
->type
= RXE_MEM_TYPE_MR
;
239 int rxe_mem_init_fast(struct rxe_dev
*rxe
, struct rxe_pd
*pd
,
240 int max_pages
, struct rxe_mem
*mem
)
244 rxe_mem_init(0, mem
);
246 /* In fastreg, we also set the rkey */
247 mem
->ibmr
.rkey
= mem
->ibmr
.lkey
;
249 err
= rxe_mem_alloc(rxe
, mem
, max_pages
);
254 mem
->max_buf
= max_pages
;
255 mem
->state
= RXE_MEM_STATE_FREE
;
256 mem
->type
= RXE_MEM_TYPE_MR
;
264 static void lookup_iova(
271 size_t offset
= iova
- mem
->iova
+ mem
->offset
;
276 if (likely(mem
->page_shift
)) {
277 *offset_out
= offset
& mem
->page_mask
;
278 offset
>>= mem
->page_shift
;
279 *n_out
= offset
& mem
->map_mask
;
280 *m_out
= offset
>> mem
->map_shift
;
285 length
= mem
->map
[map_index
]->buf
[buf_index
].size
;
287 while (offset
>= length
) {
291 if (buf_index
== RXE_BUF_PER_MAP
) {
295 length
= mem
->map
[map_index
]->buf
[buf_index
].size
;
300 *offset_out
= offset
;
304 void *iova_to_vaddr(struct rxe_mem
*mem
, u64 iova
, int length
)
310 if (mem
->state
!= RXE_MEM_STATE_VALID
) {
311 pr_warn("mem not in valid state\n");
317 addr
= (void *)(uintptr_t)iova
;
321 if (mem_check_range(mem
, iova
, length
)) {
322 pr_warn("range violation\n");
327 lookup_iova(mem
, iova
, &m
, &n
, &offset
);
329 if (offset
+ length
> mem
->map
[m
]->buf
[n
].size
) {
330 pr_warn("crosses page boundary\n");
335 addr
= (void *)(uintptr_t)mem
->map
[m
]->buf
[n
].addr
+ offset
;
341 /* copy data from a range (vaddr, vaddr+length-1) to or from
342 * a mem object starting at iova. Compute incremental value of
343 * crc32 if crcp is not zero. caller must hold a reference to mem
345 int rxe_mem_copy(struct rxe_mem
*mem
, u64 iova
, void *addr
, int length
,
346 enum copy_direction dir
, u32
*crcp
)
351 struct rxe_map
**map
;
352 struct rxe_phys_buf
*buf
;
356 u32 crc
= crcp
? (*crcp
) : 0;
361 if (mem
->type
== RXE_MEM_TYPE_DMA
) {
364 src
= (dir
== to_mem_obj
) ?
365 addr
: ((void *)(uintptr_t)iova
);
367 dest
= (dir
== to_mem_obj
) ?
368 ((void *)(uintptr_t)iova
) : addr
;
370 memcpy(dest
, src
, length
);
373 *crcp
= rxe_crc32(to_rdev(mem
->pd
->ibpd
.device
),
374 *crcp
, dest
, length
);
379 WARN_ON_ONCE(!mem
->map
);
381 err
= mem_check_range(mem
, iova
, length
);
387 lookup_iova(mem
, iova
, &m
, &i
, &offset
);
390 buf
= map
[0]->buf
+ i
;
395 va
= (u8
*)(uintptr_t)buf
->addr
+ offset
;
396 src
= (dir
== to_mem_obj
) ? addr
: va
;
397 dest
= (dir
== to_mem_obj
) ? va
: addr
;
399 bytes
= buf
->size
- offset
;
404 memcpy(dest
, src
, bytes
);
407 crc
= rxe_crc32(to_rdev(mem
->pd
->ibpd
.device
),
417 if (i
== RXE_BUF_PER_MAP
) {
433 /* copy data in or out of a wqe, i.e. sg list
434 * under the control of a dma descriptor
440 struct rxe_dma_info
*dma
,
443 enum copy_direction dir
,
447 struct rxe_sge
*sge
= &dma
->sge
[dma
->cur_sge
];
448 int offset
= dma
->sge_offset
;
449 int resid
= dma
->resid
;
450 struct rxe_mem
*mem
= NULL
;
457 if (length
> resid
) {
462 if (sge
->length
&& (offset
< sge
->length
)) {
463 mem
= lookup_mem(pd
, access
, sge
->lkey
, lookup_local
);
473 if (offset
>= sge
->length
) {
482 if (dma
->cur_sge
>= dma
->num_sge
) {
488 mem
= lookup_mem(pd
, access
, sge
->lkey
,
499 if (bytes
> sge
->length
- offset
)
500 bytes
= sge
->length
- offset
;
503 iova
= sge
->addr
+ offset
;
505 err
= rxe_mem_copy(mem
, iova
, addr
, bytes
, dir
, crcp
);
516 dma
->sge_offset
= offset
;
531 int advance_dma_data(struct rxe_dma_info
*dma
, unsigned int length
)
533 struct rxe_sge
*sge
= &dma
->sge
[dma
->cur_sge
];
534 int offset
= dma
->sge_offset
;
535 int resid
= dma
->resid
;
540 if (offset
>= sge
->length
) {
544 if (dma
->cur_sge
>= dma
->num_sge
)
550 if (bytes
> sge
->length
- offset
)
551 bytes
= sge
->length
- offset
;
558 dma
->sge_offset
= offset
;
564 /* (1) find the mem (mr or mw) corresponding to lkey/rkey
565 * depending on lookup_type
566 * (2) verify that the (qp) pd matches the mem pd
567 * (3) verify that the mem can support the requested access
568 * (4) verify that mem state is valid
570 struct rxe_mem
*lookup_mem(struct rxe_pd
*pd
, int access
, u32 key
,
571 enum lookup_type type
)
574 struct rxe_dev
*rxe
= to_rdev(pd
->ibpd
.device
);
575 int index
= key
>> 8;
577 if (index
>= RXE_MIN_MR_INDEX
&& index
<= RXE_MAX_MR_INDEX
) {
578 mem
= rxe_pool_get_index(&rxe
->mr_pool
, index
);
585 if ((type
== lookup_local
&& mem
->lkey
!= key
) ||
586 (type
== lookup_remote
&& mem
->rkey
!= key
))
592 if (access
&& !(access
& mem
->access
))
595 if (mem
->state
!= RXE_MEM_STATE_VALID
)
606 int rxe_mem_map_pages(struct rxe_dev
*rxe
, struct rxe_mem
*mem
,
607 u64
*page
, int num_pages
, u64 iova
)
612 struct rxe_map
**map
;
613 struct rxe_phys_buf
*buf
;
616 if (num_pages
> mem
->max_buf
) {
622 page_size
= 1 << mem
->page_shift
;
626 for (i
= 0; i
< num_pages
; i
++) {
628 buf
->size
= page_size
;
632 if (num_buf
== RXE_BUF_PER_MAP
) {
641 mem
->length
= num_pages
<< mem
->page_shift
;
642 mem
->state
= RXE_MEM_STATE_VALID
;