2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 * lfsr (linear feedback shift register) with period 255
40 static u8
rxe_get_key(void)
46 key
|= (0 != (key
& 0x100)) ^ (0 != (key
& 0x10))
47 ^ (0 != (key
& 0x80)) ^ (0 != (key
& 0x40));
54 int mem_check_range(struct rxe_mem
*mem
, u64 iova
, size_t length
)
57 case RXE_MEM_TYPE_DMA
:
61 case RXE_MEM_TYPE_FMR
:
62 return ((iova
< mem
->iova
) ||
63 ((iova
+ length
) > (mem
->iova
+ mem
->length
))) ?
71 #define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \
72 | IB_ACCESS_REMOTE_WRITE \
73 | IB_ACCESS_REMOTE_ATOMIC)
75 static void rxe_mem_init(int access
, struct rxe_mem
*mem
)
77 u32 lkey
= mem
->pelem
.index
<< 8 | rxe_get_key();
78 u32 rkey
= (access
& IB_ACCESS_REMOTE
) ? lkey
: 0;
80 if (mem
->pelem
.pool
->type
== RXE_TYPE_MR
) {
81 mem
->ibmr
.lkey
= lkey
;
82 mem
->ibmr
.rkey
= rkey
;
87 mem
->state
= RXE_MEM_STATE_INVALID
;
88 mem
->type
= RXE_MEM_TYPE_NONE
;
89 mem
->map_shift
= ilog2(RXE_BUF_PER_MAP
);
92 void rxe_mem_cleanup(void *arg
)
94 struct rxe_mem
*mem
= arg
;
98 ib_umem_release(mem
->umem
);
101 for (i
= 0; i
< mem
->num_map
; i
++)
108 static int rxe_mem_alloc(struct rxe_dev
*rxe
, struct rxe_mem
*mem
, int num_buf
)
112 struct rxe_map
**map
= mem
->map
;
114 num_map
= (num_buf
+ RXE_BUF_PER_MAP
- 1) / RXE_BUF_PER_MAP
;
116 mem
->map
= kmalloc_array(num_map
, sizeof(*map
), GFP_KERNEL
);
120 for (i
= 0; i
< num_map
; i
++) {
121 mem
->map
[i
] = kmalloc(sizeof(**map
), GFP_KERNEL
);
126 WARN_ON(!is_power_of_2(RXE_BUF_PER_MAP
));
128 mem
->map_shift
= ilog2(RXE_BUF_PER_MAP
);
129 mem
->map_mask
= RXE_BUF_PER_MAP
- 1;
131 mem
->num_buf
= num_buf
;
132 mem
->num_map
= num_map
;
133 mem
->max_buf
= num_map
* RXE_BUF_PER_MAP
;
138 for (i
--; i
>= 0; i
--)
146 int rxe_mem_init_dma(struct rxe_dev
*rxe
, struct rxe_pd
*pd
,
147 int access
, struct rxe_mem
*mem
)
149 rxe_mem_init(access
, mem
);
152 mem
->access
= access
;
153 mem
->state
= RXE_MEM_STATE_VALID
;
154 mem
->type
= RXE_MEM_TYPE_DMA
;
159 int rxe_mem_init_user(struct rxe_dev
*rxe
, struct rxe_pd
*pd
, u64 start
,
160 u64 length
, u64 iova
, int access
, struct ib_udata
*udata
,
164 struct rxe_map
**map
;
165 struct rxe_phys_buf
*buf
= NULL
;
166 struct ib_umem
*umem
;
167 struct scatterlist
*sg
;
172 umem
= ib_umem_get(pd
->ibpd
.uobject
->context
, start
, length
, access
, 0);
174 pr_warn("err %d from rxe_umem_get\n",
181 num_buf
= umem
->nmap
;
183 rxe_mem_init(access
, mem
);
185 err
= rxe_mem_alloc(rxe
, mem
, num_buf
);
187 pr_warn("err %d from rxe_mem_alloc\n", err
);
188 ib_umem_release(umem
);
192 WARN_ON(!is_power_of_2(umem
->page_size
));
194 mem
->page_shift
= ilog2(umem
->page_size
);
195 mem
->page_mask
= umem
->page_size
- 1;
202 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
203 vaddr
= page_address(sg_page(sg
));
205 pr_warn("null vaddr\n");
210 buf
->addr
= (uintptr_t)vaddr
;
211 buf
->size
= umem
->page_size
;
215 if (num_buf
>= RXE_BUF_PER_MAP
) {
225 mem
->access
= access
;
226 mem
->length
= length
;
229 mem
->offset
= ib_umem_offset(umem
);
230 mem
->state
= RXE_MEM_STATE_VALID
;
231 mem
->type
= RXE_MEM_TYPE_MR
;
239 int rxe_mem_init_fast(struct rxe_dev
*rxe
, struct rxe_pd
*pd
,
240 int max_pages
, struct rxe_mem
*mem
)
244 rxe_mem_init(0, mem
);
246 /* In fastreg, we also set the rkey */
247 mem
->ibmr
.rkey
= mem
->ibmr
.lkey
;
249 err
= rxe_mem_alloc(rxe
, mem
, max_pages
);
254 mem
->max_buf
= max_pages
;
255 mem
->state
= RXE_MEM_STATE_FREE
;
256 mem
->type
= RXE_MEM_TYPE_MR
;
264 static void lookup_iova(
271 size_t offset
= iova
- mem
->iova
+ mem
->offset
;
276 if (likely(mem
->page_shift
)) {
277 *offset_out
= offset
& mem
->page_mask
;
278 offset
>>= mem
->page_shift
;
279 *n_out
= offset
& mem
->map_mask
;
280 *m_out
= offset
>> mem
->map_shift
;
285 length
= mem
->map
[map_index
]->buf
[buf_index
].size
;
287 while (offset
>= length
) {
291 if (buf_index
== RXE_BUF_PER_MAP
) {
295 length
= mem
->map
[map_index
]->buf
[buf_index
].size
;
300 *offset_out
= offset
;
304 void *iova_to_vaddr(struct rxe_mem
*mem
, u64 iova
, int length
)
310 if (mem
->state
!= RXE_MEM_STATE_VALID
) {
311 pr_warn("mem not in valid state\n");
317 addr
= (void *)(uintptr_t)iova
;
321 if (mem_check_range(mem
, iova
, length
)) {
322 pr_warn("range violation\n");
327 lookup_iova(mem
, iova
, &m
, &n
, &offset
);
329 if (offset
+ length
> mem
->map
[m
]->buf
[n
].size
) {
330 pr_warn("crosses page boundary\n");
335 addr
= (void *)(uintptr_t)mem
->map
[m
]->buf
[n
].addr
+ offset
;
341 /* copy data from a range (vaddr, vaddr+length-1) to or from
342 * a mem object starting at iova. Compute incremental value of
343 * crc32 if crcp is not zero. caller must hold a reference to mem
345 int rxe_mem_copy(struct rxe_mem
*mem
, u64 iova
, void *addr
, int length
,
346 enum copy_direction dir
, u32
*crcp
)
351 struct rxe_map
**map
;
352 struct rxe_phys_buf
*buf
;
356 u32 crc
= crcp
? (*crcp
) : 0;
358 if (mem
->type
== RXE_MEM_TYPE_DMA
) {
361 src
= (dir
== to_mem_obj
) ?
362 addr
: ((void *)(uintptr_t)iova
);
364 dest
= (dir
== to_mem_obj
) ?
365 ((void *)(uintptr_t)iova
) : addr
;
368 *crcp
= crc32_le(*crcp
, src
, length
);
370 memcpy(dest
, src
, length
);
377 err
= mem_check_range(mem
, iova
, length
);
383 lookup_iova(mem
, iova
, &m
, &i
, &offset
);
386 buf
= map
[0]->buf
+ i
;
391 va
= (u8
*)(uintptr_t)buf
->addr
+ offset
;
392 src
= (dir
== to_mem_obj
) ? addr
: va
;
393 dest
= (dir
== to_mem_obj
) ? va
: addr
;
395 bytes
= buf
->size
- offset
;
401 crc
= crc32_le(crc
, src
, bytes
);
403 memcpy(dest
, src
, bytes
);
412 if (i
== RXE_BUF_PER_MAP
) {
428 /* copy data in or out of a wqe, i.e. sg list
429 * under the control of a dma descriptor
435 struct rxe_dma_info
*dma
,
438 enum copy_direction dir
,
442 struct rxe_sge
*sge
= &dma
->sge
[dma
->cur_sge
];
443 int offset
= dma
->sge_offset
;
444 int resid
= dma
->resid
;
445 struct rxe_mem
*mem
= NULL
;
452 if (length
> resid
) {
457 if (sge
->length
&& (offset
< sge
->length
)) {
458 mem
= lookup_mem(pd
, access
, sge
->lkey
, lookup_local
);
468 if (offset
>= sge
->length
) {
477 if (dma
->cur_sge
>= dma
->num_sge
) {
483 mem
= lookup_mem(pd
, access
, sge
->lkey
,
494 if (bytes
> sge
->length
- offset
)
495 bytes
= sge
->length
- offset
;
498 iova
= sge
->addr
+ offset
;
500 err
= rxe_mem_copy(mem
, iova
, addr
, bytes
, dir
, crcp
);
511 dma
->sge_offset
= offset
;
526 int advance_dma_data(struct rxe_dma_info
*dma
, unsigned int length
)
528 struct rxe_sge
*sge
= &dma
->sge
[dma
->cur_sge
];
529 int offset
= dma
->sge_offset
;
530 int resid
= dma
->resid
;
535 if (offset
>= sge
->length
) {
539 if (dma
->cur_sge
>= dma
->num_sge
)
545 if (bytes
> sge
->length
- offset
)
546 bytes
= sge
->length
- offset
;
553 dma
->sge_offset
= offset
;
559 /* (1) find the mem (mr or mw) corresponding to lkey/rkey
560 * depending on lookup_type
561 * (2) verify that the (qp) pd matches the mem pd
562 * (3) verify that the mem can support the requested access
563 * (4) verify that mem state is valid
565 struct rxe_mem
*lookup_mem(struct rxe_pd
*pd
, int access
, u32 key
,
566 enum lookup_type type
)
569 struct rxe_dev
*rxe
= to_rdev(pd
->ibpd
.device
);
570 int index
= key
>> 8;
572 if (index
>= RXE_MIN_MR_INDEX
&& index
<= RXE_MAX_MR_INDEX
) {
573 mem
= rxe_pool_get_index(&rxe
->mr_pool
, index
);
580 if ((type
== lookup_local
&& mem
->lkey
!= key
) ||
581 (type
== lookup_remote
&& mem
->rkey
!= key
))
587 if (access
&& !(access
& mem
->access
))
590 if (mem
->state
!= RXE_MEM_STATE_VALID
)
601 int rxe_mem_map_pages(struct rxe_dev
*rxe
, struct rxe_mem
*mem
,
602 u64
*page
, int num_pages
, u64 iova
)
607 struct rxe_map
**map
;
608 struct rxe_phys_buf
*buf
;
611 if (num_pages
> mem
->max_buf
) {
617 page_size
= 1 << mem
->page_shift
;
621 for (i
= 0; i
< num_pages
; i
++) {
623 buf
->size
= page_size
;
627 if (num_buf
== RXE_BUF_PER_MAP
) {
636 mem
->length
= num_pages
<< mem
->page_shift
;
637 mem
->state
= RXE_MEM_STATE_VALID
;