1 /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
9 struct siw_umem
*siw_umem_get(u64 start
, u64 len
, bool writable
);
10 void siw_umem_release(struct siw_umem
*umem
, bool dirty
);
11 struct siw_pbl
*siw_pbl_alloc(u32 num_buf
);
12 dma_addr_t
siw_pbl_get_buffer(struct siw_pbl
*pbl
, u64 off
, int *len
, int *idx
);
13 struct siw_mem
*siw_mem_id2obj(struct siw_device
*sdev
, int stag_index
);
14 int siw_mem_add(struct siw_device
*sdev
, struct siw_mem
*m
);
15 int siw_invalidate_stag(struct ib_pd
*pd
, u32 stag
);
16 int siw_check_mem(struct ib_pd
*pd
, struct siw_mem
*mem
, u64 addr
,
17 enum ib_access_flags perms
, int len
);
18 int siw_check_sge(struct ib_pd
*pd
, struct siw_sge
*sge
,
19 struct siw_mem
*mem
[], enum ib_access_flags perms
,
21 void siw_wqe_put_mem(struct siw_wqe
*wqe
, enum siw_opcode op
);
22 int siw_mr_add_mem(struct siw_mr
*mr
, struct ib_pd
*pd
, void *mem_obj
,
23 u64 start
, u64 len
, int rights
);
24 void siw_mr_drop_mem(struct siw_mr
*mr
);
25 void siw_free_mem(struct kref
*ref
);
27 static inline void siw_mem_put(struct siw_mem
*mem
)
29 kref_put(&mem
->ref
, siw_free_mem
);
32 static inline struct siw_mr
*siw_mem2mr(struct siw_mem
*m
)
34 return container_of(m
, struct siw_mr
, mem
);
37 static inline void siw_unref_mem_sgl(struct siw_mem
**mem
, unsigned int num_sge
)
50 #define CHUNK_SHIFT 9 /* sets number of pages per chunk */
51 #define PAGES_PER_CHUNK (_AC(1, UL) << CHUNK_SHIFT)
52 #define CHUNK_MASK (~(PAGES_PER_CHUNK - 1))
53 #define PAGE_CHUNK_SIZE (PAGES_PER_CHUNK * sizeof(struct page *))
58 * Get page pointer for address on given umem.
60 * @umem: two dimensional list of page pointers
61 * @addr: user virtual address
63 static inline struct page
*siw_get_upage(struct siw_umem
*umem
, u64 addr
)
65 unsigned int page_idx
= (addr
- umem
->fp_addr
) >> PAGE_SHIFT
,
66 chunk_idx
= page_idx
>> CHUNK_SHIFT
,
67 page_in_chunk
= page_idx
& ~CHUNK_MASK
;
69 if (likely(page_idx
< umem
->num_pages
))
70 return umem
->page_chunk
[chunk_idx
].plist
[page_in_chunk
];