2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <rdma/ib_umem.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/interval_tree.h>
42 struct ib_ucontext_per_mm
*per_mm
;
45 * An array of the pages included in the on-demand paging umem.
46 * Indices of pages that are currently not mapped into the device will
49 struct page
**page_list
;
51 * An array of the same size as page_list, with DMA addresses mapped
52 * for pages the pages in page_list. The lower two bits designate
53 * access permissions. See ODP_READ_ALLOWED_BIT and
54 * ODP_WRITE_ALLOWED_BIT.
58 * The umem_mutex protects the page_list and dma_list fields of an ODP
59 * umem, allowing only a single thread to map/unmap pages. The mutex
60 * also protects access to the mmu notifier counters.
62 struct mutex umem_mutex
;
63 void *private; /* for the HW driver to use. */
70 struct interval_tree_node interval_tree
;
73 * An implicit odp umem cannot be DMA mapped, has 0 length, and serves
74 * only as an anchor for the driver to hold onto the per_mm. FIXME:
75 * This should be removed and drivers should work with the per_mm
80 struct completion notifier_completion
;
82 unsigned int page_shift
;
83 struct work_struct work
;
86 static inline struct ib_umem_odp
*to_ib_umem_odp(struct ib_umem
*umem
)
88 return container_of(umem
, struct ib_umem_odp
, umem
);
91 /* Returns the first page of an ODP umem. */
92 static inline unsigned long ib_umem_start(struct ib_umem_odp
*umem_odp
)
94 return umem_odp
->interval_tree
.start
;
97 /* Returns the address of the page after the last one of an ODP umem. */
98 static inline unsigned long ib_umem_end(struct ib_umem_odp
*umem_odp
)
100 return umem_odp
->interval_tree
.last
+ 1;
103 static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp
*umem_odp
)
105 return (ib_umem_end(umem_odp
) - ib_umem_start(umem_odp
)) >>
106 umem_odp
->page_shift
;
110 * The lower 2 bits of the DMA address signal the R/W permissions for
111 * the entry. To upgrade the permissions, provide the appropriate
112 * bitmask to the map_dma_pages function.
114 * Be aware that upgrading a mapped address might result in change of
115 * the DMA address for the page.
117 #define ODP_READ_ALLOWED_BIT (1<<0ULL)
118 #define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
120 #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
122 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
124 struct ib_ucontext_per_mm
{
125 struct mmu_notifier mn
;
128 struct rb_root_cached umem_tree
;
129 /* Protects umem_tree */
130 struct rw_semaphore umem_rwsem
;
133 struct ib_umem_odp
*ib_umem_odp_get(struct ib_udata
*udata
, unsigned long addr
,
134 size_t size
, int access
);
135 struct ib_umem_odp
*ib_umem_odp_alloc_implicit(struct ib_udata
*udata
,
137 struct ib_umem_odp
*ib_umem_odp_alloc_child(struct ib_umem_odp
*root_umem
,
138 unsigned long addr
, size_t size
);
139 void ib_umem_odp_release(struct ib_umem_odp
*umem_odp
);
141 int ib_umem_odp_map_dma_pages(struct ib_umem_odp
*umem_odp
, u64 start_offset
,
142 u64 bcnt
, u64 access_mask
,
143 unsigned long current_seq
);
145 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp
*umem_odp
, u64 start_offset
,
148 typedef int (*umem_call_back
)(struct ib_umem_odp
*item
, u64 start
, u64 end
,
151 * Call the callback on each ib_umem in the range. Returns the logical or of
152 * the return values of the functions called.
154 int rbt_ib_umem_for_each_in_range(struct rb_root_cached
*root
,
157 bool blockable
, void *cookie
);
160 * Find first region intersecting with address range.
161 * Return NULL if not found
163 static inline struct ib_umem_odp
*
164 rbt_ib_umem_lookup(struct rb_root_cached
*root
, u64 addr
, u64 length
)
166 struct interval_tree_node
*node
;
168 node
= interval_tree_iter_first(root
, addr
, addr
+ length
- 1);
171 return container_of(node
, struct ib_umem_odp
, interval_tree
);
175 static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp
*umem_odp
,
176 unsigned long mmu_seq
)
179 * This code is strongly based on the KVM code from
180 * mmu_notifier_retry. Should be called with
181 * the relevant locks taken (umem_odp->umem_mutex
182 * and the ucontext umem_mutex semaphore locked for read).
185 if (unlikely(umem_odp
->notifiers_count
))
187 if (umem_odp
->notifiers_seq
!= mmu_seq
)
192 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
194 static inline struct ib_umem_odp
*ib_umem_odp_get(struct ib_udata
*udata
,
196 size_t size
, int access
)
198 return ERR_PTR(-EINVAL
);
201 static inline void ib_umem_odp_release(struct ib_umem_odp
*umem_odp
) {}
203 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
205 #endif /* IB_UMEM_ODP_H */