1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
5 #ifndef __IO_PAGETABLE_H
6 #define __IO_PAGETABLE_H
8 #include <linux/interval_tree.h>
9 #include <linux/kref.h>
10 #include <linux/mutex.h>
11 #include <linux/xarray.h>
13 #include "iommufd_private.h"
18 * Each io_pagetable is composed of intervals of areas which cover regions of
19 * the iova that are backed by something. iova not covered by areas is not
20 * populated in the page table. Each area is fully populated with pages.
22 * iovas are in byte units, but must be iopt->iova_alignment aligned.
24 * pages can be NULL, this means some other thread is still working on setting
25 * up or tearing down the area. When observed under the write side of the
26 * domain_rwsem a NULL pages must mean the area is still being setup and no
29 * storage_domain points at an arbitrary iommu_domain that is holding the PFNs
30 * for this area. It is locked by the pages->mutex. This simplifies the locking
31 * as the pages code can rely on the storage_domain without having to get the
32 * iopt->domains_rwsem.
34 * The io_pagetable::iova_rwsem protects node
35 * The iopt_pages::mutex protects pages_node
36 * iopt and iommu_prot are immutable
37 * The pages::mutex protects num_accesses
40 struct interval_tree_node node
;
41 struct interval_tree_node pages_node
;
42 struct io_pagetable
*iopt
;
43 struct iopt_pages
*pages
;
44 struct iommu_domain
*storage_domain
;
45 /* How many bytes into the first page the area starts */
46 unsigned int page_offset
;
47 /* IOMMU_READ, IOMMU_WRITE, etc */
49 bool prevent_access
: 1;
50 unsigned int num_accesses
;
54 struct interval_tree_node node
;
57 struct iopt_reserved
{
58 struct interval_tree_node node
;
62 int iopt_area_fill_domains(struct iopt_area
*area
, struct iopt_pages
*pages
);
63 void iopt_area_unfill_domains(struct iopt_area
*area
, struct iopt_pages
*pages
);
65 int iopt_area_fill_domain(struct iopt_area
*area
, struct iommu_domain
*domain
);
66 void iopt_area_unfill_domain(struct iopt_area
*area
, struct iopt_pages
*pages
,
67 struct iommu_domain
*domain
);
68 void iopt_area_unmap_domain(struct iopt_area
*area
,
69 struct iommu_domain
*domain
);
71 static inline unsigned long iopt_area_index(struct iopt_area
*area
)
73 return area
->pages_node
.start
;
76 static inline unsigned long iopt_area_last_index(struct iopt_area
*area
)
78 return area
->pages_node
.last
;
81 static inline unsigned long iopt_area_iova(struct iopt_area
*area
)
83 return area
->node
.start
;
86 static inline unsigned long iopt_area_last_iova(struct iopt_area
*area
)
88 return area
->node
.last
;
91 static inline size_t iopt_area_length(struct iopt_area
*area
)
93 return (area
->node
.last
- area
->node
.start
) + 1;
97 * Number of bytes from the start of the iopt_pages that the iova begins.
98 * iopt_area_start_byte() / PAGE_SIZE encodes the starting page index
99 * iopt_area_start_byte() % PAGE_SIZE encodes the offset within that page
101 static inline unsigned long iopt_area_start_byte(struct iopt_area
*area
,
104 if (IS_ENABLED(CONFIG_IOMMUFD_TEST
))
105 WARN_ON(iova
< iopt_area_iova(area
) ||
106 iova
> iopt_area_last_iova(area
));
107 return (iova
- iopt_area_iova(area
)) + area
->page_offset
+
108 iopt_area_index(area
) * PAGE_SIZE
;
111 static inline unsigned long iopt_area_iova_to_index(struct iopt_area
*area
,
114 return iopt_area_start_byte(area
, iova
) / PAGE_SIZE
;
117 #define __make_iopt_iter(name) \
118 static inline struct iopt_##name *iopt_##name##_iter_first( \
119 struct io_pagetable *iopt, unsigned long start, \
120 unsigned long last) \
122 struct interval_tree_node *node; \
124 lockdep_assert_held(&iopt->iova_rwsem); \
125 node = interval_tree_iter_first(&iopt->name##_itree, start, \
129 return container_of(node, struct iopt_##name, node); \
131 static inline struct iopt_##name *iopt_##name##_iter_next( \
132 struct iopt_##name *last_node, unsigned long start, \
133 unsigned long last) \
135 struct interval_tree_node *node; \
137 node = interval_tree_iter_next(&last_node->node, start, last); \
140 return container_of(node, struct iopt_##name, node); \
143 __make_iopt_iter(area
)
144 __make_iopt_iter(allowed
)
145 __make_iopt_iter(reserved
)
147 struct iopt_area_contig_iter
{
148 unsigned long cur_iova
;
149 unsigned long last_iova
;
150 struct iopt_area
*area
;
152 struct iopt_area
*iopt_area_contig_init(struct iopt_area_contig_iter
*iter
,
153 struct io_pagetable
*iopt
,
155 unsigned long last_iova
);
156 struct iopt_area
*iopt_area_contig_next(struct iopt_area_contig_iter
*iter
);
158 static inline bool iopt_area_contig_done(struct iopt_area_contig_iter
*iter
)
160 return iter
->area
&& iter
->last_iova
<= iopt_area_last_iova(iter
->area
);
164 * Iterate over a contiguous list of areas that span the iova,last_iova range.
165 * The caller must check iopt_area_contig_done() after the loop to see if
166 * contiguous areas existed.
168 #define iopt_for_each_contig_area(iter, area, iopt, iova, last_iova) \
169 for (area = iopt_area_contig_init(iter, iopt, iova, last_iova); area; \
170 area = iopt_area_contig_next(iter))
173 IOPT_PAGES_ACCOUNT_NONE
= 0,
174 IOPT_PAGES_ACCOUNT_USER
= 1,
175 IOPT_PAGES_ACCOUNT_MM
= 2,
179 * This holds a pinned page list for multiple areas of IO address space. The
180 * pages always originate from a linear chunk of userspace VA. Multiple
181 * io_pagetable's, through their iopt_area's, can share a single iopt_pages
182 * which avoids multi-pinning and double accounting of page consumption.
184 * indexes in this structure are measured in PAGE_SIZE units, are 0 based from
185 * the start of the uptr and extend to npages. pages are pinned dynamically
186 * according to the intervals in the access_itree and domains_itree, npinned
187 * records the current number of pages pinned.
195 struct task_struct
*source_task
;
196 struct mm_struct
*source_mm
;
197 struct user_struct
*source_user
;
202 struct xarray pinned_pfns
;
203 /* Of iopt_pages_access::node */
204 struct rb_root_cached access_itree
;
205 /* Of iopt_area::pages_node */
206 struct rb_root_cached domains_itree
;
209 struct iopt_pages
*iopt_alloc_pages(void __user
*uptr
, unsigned long length
,
211 void iopt_release_pages(struct kref
*kref
);
212 static inline void iopt_put_pages(struct iopt_pages
*pages
)
214 kref_put(&pages
->kref
, iopt_release_pages
);
217 void iopt_pages_fill_from_xarray(struct iopt_pages
*pages
, unsigned long start
,
218 unsigned long last
, struct page
**out_pages
);
219 int iopt_pages_fill_xarray(struct iopt_pages
*pages
, unsigned long start
,
220 unsigned long last
, struct page
**out_pages
);
221 void iopt_pages_unfill_xarray(struct iopt_pages
*pages
, unsigned long start
,
224 int iopt_area_add_access(struct iopt_area
*area
, unsigned long start
,
225 unsigned long last
, struct page
**out_pages
,
227 void iopt_area_remove_access(struct iopt_area
*area
, unsigned long start
,
229 int iopt_pages_rw_access(struct iopt_pages
*pages
, unsigned long start_byte
,
230 void *data
, unsigned long length
, unsigned int flags
);
233 * Each interval represents an active iopt_access_pages(), it acts as an
234 * interval lock that keeps the PFNs pinned and stored in the xarray.
236 struct iopt_pages_access
{
237 struct interval_tree_node node
;