1 // SPDX-License-Identifier: GPL-2.0
3 * Helpers for IOMMU drivers implementing SVA
5 #include <linux/mmu_context.h>
6 #include <linux/mutex.h>
7 #include <linux/sched/mm.h>
8 #include <linux/iommu.h>
10 #include "iommu-priv.h"
12 static DEFINE_MUTEX(iommu_sva_lock
);
13 static struct iommu_domain
*iommu_sva_domain_alloc(struct device
*dev
,
14 struct mm_struct
*mm
);
16 /* Allocate a PASID for the mm within range (inclusive) */
17 static struct iommu_mm_data
*iommu_alloc_mm_data(struct mm_struct
*mm
, struct device
*dev
)
19 struct iommu_mm_data
*iommu_mm
;
22 lockdep_assert_held(&iommu_sva_lock
);
24 if (!arch_pgtable_dma_compat(mm
))
25 return ERR_PTR(-EBUSY
);
27 iommu_mm
= mm
->iommu_mm
;
28 /* Is a PASID already associated with this mm? */
30 if (iommu_mm
->pasid
>= dev
->iommu
->max_pasids
)
31 return ERR_PTR(-EOVERFLOW
);
35 iommu_mm
= kzalloc(sizeof(struct iommu_mm_data
), GFP_KERNEL
);
37 return ERR_PTR(-ENOMEM
);
39 pasid
= iommu_alloc_global_pasid(dev
);
40 if (pasid
== IOMMU_PASID_INVALID
) {
42 return ERR_PTR(-ENOSPC
);
44 iommu_mm
->pasid
= pasid
;
45 INIT_LIST_HEAD(&iommu_mm
->sva_domains
);
47 * Make sure the write to mm->iommu_mm is not reordered in front of
48 * initialization to iommu_mm fields. If it does, readers may see a
49 * valid iommu_mm with uninitialized values.
51 smp_store_release(&mm
->iommu_mm
, iommu_mm
);
56 * iommu_sva_bind_device() - Bind a process address space to a device
58 * @mm: the mm to bind, caller must hold a reference to mm_users
60 * Create a bond between device and address space, allowing the device to
61 * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
62 * bond already exists between @device and @mm, an additional internal
63 * reference is taken. Caller must call iommu_sva_unbind_device()
64 * to release each reference.
66 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
67 * initialize the required SVA features.
69 * On error, returns an ERR_PTR value.
71 struct iommu_sva
*iommu_sva_bind_device(struct device
*dev
, struct mm_struct
*mm
)
73 struct iommu_group
*group
= dev
->iommu_group
;
74 struct iommu_attach_handle
*attach_handle
;
75 struct iommu_mm_data
*iommu_mm
;
76 struct iommu_domain
*domain
;
77 struct iommu_sva
*handle
;
81 return ERR_PTR(-ENODEV
);
83 mutex_lock(&iommu_sva_lock
);
85 /* Allocate mm->pasid if necessary. */
86 iommu_mm
= iommu_alloc_mm_data(mm
, dev
);
87 if (IS_ERR(iommu_mm
)) {
88 ret
= PTR_ERR(iommu_mm
);
92 /* A bond already exists, just take a reference`. */
93 attach_handle
= iommu_attach_handle_get(group
, iommu_mm
->pasid
, IOMMU_DOMAIN_SVA
);
94 if (!IS_ERR(attach_handle
)) {
95 handle
= container_of(attach_handle
, struct iommu_sva
, handle
);
96 if (attach_handle
->domain
->mm
!= mm
) {
100 refcount_inc(&handle
->users
);
101 mutex_unlock(&iommu_sva_lock
);
105 if (PTR_ERR(attach_handle
) != -ENOENT
) {
106 ret
= PTR_ERR(attach_handle
);
110 handle
= kzalloc(sizeof(*handle
), GFP_KERNEL
);
116 /* Search for an existing domain. */
117 list_for_each_entry(domain
, &mm
->iommu_mm
->sva_domains
, next
) {
118 ret
= iommu_attach_device_pasid(domain
, dev
, iommu_mm
->pasid
,
126 /* Allocate a new domain and set it on device pasid. */
127 domain
= iommu_sva_domain_alloc(dev
, mm
);
128 if (IS_ERR(domain
)) {
129 ret
= PTR_ERR(domain
);
130 goto out_free_handle
;
133 ret
= iommu_attach_device_pasid(domain
, dev
, iommu_mm
->pasid
,
136 goto out_free_domain
;
138 list_add(&domain
->next
, &mm
->iommu_mm
->sva_domains
);
141 refcount_set(&handle
->users
, 1);
142 mutex_unlock(&iommu_sva_lock
);
147 iommu_domain_free(domain
);
151 mutex_unlock(&iommu_sva_lock
);
154 EXPORT_SYMBOL_GPL(iommu_sva_bind_device
);
157 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
158 * @handle: the handle returned by iommu_sva_bind_device()
160 * Put reference to a bond between device and address space. The device should
161 * not be issuing any more transaction for this PASID. All outstanding page
162 * requests for this PASID must have been flushed to the IOMMU.
164 void iommu_sva_unbind_device(struct iommu_sva
*handle
)
166 struct iommu_domain
*domain
= handle
->handle
.domain
;
167 struct iommu_mm_data
*iommu_mm
= domain
->mm
->iommu_mm
;
168 struct device
*dev
= handle
->dev
;
170 mutex_lock(&iommu_sva_lock
);
171 if (!refcount_dec_and_test(&handle
->users
)) {
172 mutex_unlock(&iommu_sva_lock
);
176 iommu_detach_device_pasid(domain
, dev
, iommu_mm
->pasid
);
177 if (--domain
->users
== 0) {
178 list_del(&domain
->next
);
179 iommu_domain_free(domain
);
181 mutex_unlock(&iommu_sva_lock
);
184 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device
);
186 u32
iommu_sva_get_pasid(struct iommu_sva
*handle
)
188 struct iommu_domain
*domain
= handle
->handle
.domain
;
190 return mm_get_enqcmd_pasid(domain
->mm
);
192 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid
);
194 void mm_pasid_drop(struct mm_struct
*mm
)
196 struct iommu_mm_data
*iommu_mm
= mm
->iommu_mm
;
201 iommu_free_global_pasid(iommu_mm
->pasid
);
206 * I/O page fault handler for SVA
208 static enum iommu_page_response_code
209 iommu_sva_handle_mm(struct iommu_fault
*fault
, struct mm_struct
*mm
)
212 struct vm_area_struct
*vma
;
213 unsigned int access_flags
= 0;
214 unsigned int fault_flags
= FAULT_FLAG_REMOTE
;
215 struct iommu_fault_page_request
*prm
= &fault
->prm
;
216 enum iommu_page_response_code status
= IOMMU_PAGE_RESP_INVALID
;
218 if (!(prm
->flags
& IOMMU_FAULT_PAGE_REQUEST_PASID_VALID
))
221 if (!mmget_not_zero(mm
))
226 vma
= vma_lookup(mm
, prm
->addr
);
231 if (prm
->perm
& IOMMU_FAULT_PERM_READ
)
232 access_flags
|= VM_READ
;
234 if (prm
->perm
& IOMMU_FAULT_PERM_WRITE
) {
235 access_flags
|= VM_WRITE
;
236 fault_flags
|= FAULT_FLAG_WRITE
;
239 if (prm
->perm
& IOMMU_FAULT_PERM_EXEC
) {
240 access_flags
|= VM_EXEC
;
241 fault_flags
|= FAULT_FLAG_INSTRUCTION
;
244 if (!(prm
->perm
& IOMMU_FAULT_PERM_PRIV
))
245 fault_flags
|= FAULT_FLAG_USER
;
247 if (access_flags
& ~vma
->vm_flags
)
251 ret
= handle_mm_fault(vma
, prm
->addr
, fault_flags
, NULL
);
252 status
= ret
& VM_FAULT_ERROR
? IOMMU_PAGE_RESP_INVALID
:
253 IOMMU_PAGE_RESP_SUCCESS
;
256 mmap_read_unlock(mm
);
262 static void iommu_sva_handle_iopf(struct work_struct
*work
)
264 struct iopf_fault
*iopf
;
265 struct iopf_group
*group
;
266 enum iommu_page_response_code status
= IOMMU_PAGE_RESP_SUCCESS
;
268 group
= container_of(work
, struct iopf_group
, work
);
269 list_for_each_entry(iopf
, &group
->faults
, list
) {
271 * For the moment, errors are sticky: don't handle subsequent
272 * faults in the group if there is an error.
274 if (status
!= IOMMU_PAGE_RESP_SUCCESS
)
277 status
= iommu_sva_handle_mm(&iopf
->fault
,
278 group
->attach_handle
->domain
->mm
);
281 iopf_group_response(group
, status
);
282 iopf_free_group(group
);
285 static int iommu_sva_iopf_handler(struct iopf_group
*group
)
287 struct iommu_fault_param
*fault_param
= group
->fault_param
;
289 INIT_WORK(&group
->work
, iommu_sva_handle_iopf
);
290 if (!queue_work(fault_param
->queue
->wq
, &group
->work
))
296 static struct iommu_domain
*iommu_sva_domain_alloc(struct device
*dev
,
297 struct mm_struct
*mm
)
299 const struct iommu_ops
*ops
= dev_iommu_ops(dev
);
300 struct iommu_domain
*domain
;
302 if (ops
->domain_alloc_sva
) {
303 domain
= ops
->domain_alloc_sva(dev
, mm
);
307 domain
= ops
->domain_alloc(IOMMU_DOMAIN_SVA
);
309 return ERR_PTR(-ENOMEM
);
312 domain
->type
= IOMMU_DOMAIN_SVA
;
316 domain
->iopf_handler
= iommu_sva_iopf_handler
;