1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2015 Intel Corporation.
5 * Authors: David Woodhouse <dwmw2@infradead.org>
8 #include <linux/intel-iommu.h>
9 #include <linux/mmu_notifier.h>
10 #include <linux/sched.h>
11 #include <linux/sched/mm.h>
12 #include <linux/slab.h>
13 #include <linux/intel-svm.h>
14 #include <linux/rculist.h>
15 #include <linux/pci.h>
16 #include <linux/pci-ats.h>
17 #include <linux/dmar.h>
18 #include <linux/interrupt.h>
19 #include <linux/mm_types.h>
22 #include "intel-pasid.h"
24 static irqreturn_t
prq_event_thread(int irq
, void *d
);
26 int intel_svm_init(struct intel_iommu
*iommu
)
28 if (cpu_feature_enabled(X86_FEATURE_GBPAGES
) &&
29 !cap_fl1gp_support(iommu
->cap
))
32 if (cpu_feature_enabled(X86_FEATURE_LA57
) &&
33 !cap_5lp_support(iommu
->cap
))
41 int intel_svm_enable_prq(struct intel_iommu
*iommu
)
46 pages
= alloc_pages(GFP_KERNEL
| __GFP_ZERO
, PRQ_ORDER
);
48 pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
52 iommu
->prq
= page_address(pages
);
54 irq
= dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED
+ iommu
->seq_id
, iommu
->node
, iommu
);
56 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
60 free_pages((unsigned long)iommu
->prq
, PRQ_ORDER
);
66 snprintf(iommu
->prq_name
, sizeof(iommu
->prq_name
), "dmar%d-prq", iommu
->seq_id
);
68 ret
= request_threaded_irq(irq
, NULL
, prq_event_thread
, IRQF_ONESHOT
,
69 iommu
->prq_name
, iommu
);
71 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
77 dmar_writeq(iommu
->reg
+ DMAR_PQH_REG
, 0ULL);
78 dmar_writeq(iommu
->reg
+ DMAR_PQT_REG
, 0ULL);
79 dmar_writeq(iommu
->reg
+ DMAR_PQA_REG
, virt_to_phys(iommu
->prq
) | PRQ_ORDER
);
84 int intel_svm_finish_prq(struct intel_iommu
*iommu
)
86 dmar_writeq(iommu
->reg
+ DMAR_PQH_REG
, 0ULL);
87 dmar_writeq(iommu
->reg
+ DMAR_PQT_REG
, 0ULL);
88 dmar_writeq(iommu
->reg
+ DMAR_PQA_REG
, 0ULL);
91 free_irq(iommu
->pr_irq
, iommu
);
92 dmar_free_hwirq(iommu
->pr_irq
);
96 free_pages((unsigned long)iommu
->prq
, PRQ_ORDER
);
102 static void intel_flush_svm_range_dev (struct intel_svm
*svm
, struct intel_svm_dev
*sdev
,
103 unsigned long address
, unsigned long pages
, int ih
)
108 * Do PASID granu IOTLB invalidation if page selective capability is
111 if (pages
== -1 || !cap_pgsel_inv(svm
->iommu
->cap
)) {
112 desc
.qw0
= QI_EIOTLB_PASID(svm
->pasid
) |
113 QI_EIOTLB_DID(sdev
->did
) |
114 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID
) |
118 int mask
= ilog2(__roundup_pow_of_two(pages
));
120 desc
.qw0
= QI_EIOTLB_PASID(svm
->pasid
) |
121 QI_EIOTLB_DID(sdev
->did
) |
122 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID
) |
124 desc
.qw1
= QI_EIOTLB_ADDR(address
) |
130 qi_submit_sync(&desc
, svm
->iommu
);
132 if (sdev
->dev_iotlb
) {
133 desc
.qw0
= QI_DEV_EIOTLB_PASID(svm
->pasid
) |
134 QI_DEV_EIOTLB_SID(sdev
->sid
) |
135 QI_DEV_EIOTLB_QDEP(sdev
->qdep
) |
138 desc
.qw1
= QI_DEV_EIOTLB_ADDR(-1ULL >> 1) |
140 } else if (pages
> 1) {
141 /* The least significant zero bit indicates the size. So,
142 * for example, an "address" value of 0x12345f000 will
143 * flush from 0x123440000 to 0x12347ffff (256KiB). */
144 unsigned long last
= address
+ ((unsigned long)(pages
- 1) << VTD_PAGE_SHIFT
);
145 unsigned long mask
= __rounddown_pow_of_two(address
^ last
);
147 desc
.qw1
= QI_DEV_EIOTLB_ADDR((address
& ~mask
) |
148 (mask
- 1)) | QI_DEV_EIOTLB_SIZE
;
150 desc
.qw1
= QI_DEV_EIOTLB_ADDR(address
);
154 qi_submit_sync(&desc
, svm
->iommu
);
158 static void intel_flush_svm_range(struct intel_svm
*svm
, unsigned long address
,
159 unsigned long pages
, int ih
)
161 struct intel_svm_dev
*sdev
;
164 list_for_each_entry_rcu(sdev
, &svm
->devs
, list
)
165 intel_flush_svm_range_dev(svm
, sdev
, address
, pages
, ih
);
169 /* Pages have been freed at this point */
170 static void intel_invalidate_range(struct mmu_notifier
*mn
,
171 struct mm_struct
*mm
,
172 unsigned long start
, unsigned long end
)
174 struct intel_svm
*svm
= container_of(mn
, struct intel_svm
, notifier
);
176 intel_flush_svm_range(svm
, start
,
177 (end
- start
+ PAGE_SIZE
- 1) >> VTD_PAGE_SHIFT
, 0);
180 static void intel_mm_release(struct mmu_notifier
*mn
, struct mm_struct
*mm
)
182 struct intel_svm
*svm
= container_of(mn
, struct intel_svm
, notifier
);
183 struct intel_svm_dev
*sdev
;
185 /* This might end up being called from exit_mmap(), *before* the page
186 * tables are cleared. And __mmu_notifier_release() will delete us from
187 * the list of notifiers so that our invalidate_range() callback doesn't
188 * get called when the page tables are cleared. So we need to protect
189 * against hardware accessing those page tables.
191 * We do it by clearing the entry in the PASID table and then flushing
192 * the IOTLB and the PASID table caches. This might upset hardware;
193 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
194 * page) so that we end up taking a fault that the hardware really
195 * *has* to handle gracefully without affecting other processes.
198 list_for_each_entry_rcu(sdev
, &svm
->devs
, list
) {
199 intel_pasid_tear_down_entry(svm
->iommu
, sdev
->dev
, svm
->pasid
);
200 intel_flush_svm_range_dev(svm
, sdev
, 0, -1, 0);
206 static const struct mmu_notifier_ops intel_mmuops
= {
207 .release
= intel_mm_release
,
208 .invalidate_range
= intel_invalidate_range
,
211 static DEFINE_MUTEX(pasid_mutex
);
212 static LIST_HEAD(global_svm_list
);
214 int intel_svm_bind_mm(struct device
*dev
, int *pasid
, int flags
, struct svm_dev_ops
*ops
)
216 struct intel_iommu
*iommu
= intel_svm_device_to_iommu(dev
);
217 struct device_domain_info
*info
;
218 struct intel_svm_dev
*sdev
;
219 struct intel_svm
*svm
= NULL
;
220 struct mm_struct
*mm
= NULL
;
224 if (!iommu
|| dmar_disabled
)
227 if (dev_is_pci(dev
)) {
228 pasid_max
= pci_max_pasids(to_pci_dev(dev
));
234 if (flags
& SVM_FLAG_SUPERVISOR_MODE
) {
235 if (!ecap_srs(iommu
->ecap
))
238 mm
= get_task_mm(current
);
242 mutex_lock(&pasid_mutex
);
243 if (pasid
&& !(flags
& SVM_FLAG_PRIVATE_PASID
)) {
246 list_for_each_entry(t
, &global_svm_list
, list
) {
247 if (t
->mm
!= mm
|| (t
->flags
& SVM_FLAG_PRIVATE_PASID
))
251 if (svm
->pasid
>= pasid_max
) {
253 "Limited PASID width. Cannot use existing PASID %d\n",
259 list_for_each_entry(sdev
, &svm
->devs
, list
) {
260 if (dev
== sdev
->dev
) {
261 if (sdev
->ops
!= ops
) {
274 sdev
= kzalloc(sizeof(*sdev
), GFP_KERNEL
);
281 ret
= intel_iommu_enable_pasid(iommu
, dev
);
283 /* If they don't actually want to assign a PASID, this is
284 * just an enabling check/preparation. */
289 info
= dev
->archdata
.iommu
;
290 if (!info
|| !info
->pasid_supported
) {
295 sdev
->did
= FLPT_DEFAULT_DID
;
296 sdev
->sid
= PCI_DEVID(info
->bus
, info
->devfn
);
297 if (info
->ats_enabled
) {
299 sdev
->qdep
= info
->ats_qdep
;
300 if (sdev
->qdep
>= QI_DEV_EIOTLB_MAX_INVS
)
304 /* Finish the setup now we know we're keeping it */
307 init_rcu_head(&sdev
->rcu
);
310 svm
= kzalloc(sizeof(*svm
), GFP_KERNEL
);
318 if (pasid_max
> intel_pasid_max_id
)
319 pasid_max
= intel_pasid_max_id
;
321 /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
322 ret
= intel_pasid_alloc_id(svm
,
323 !!cap_caching_mode(iommu
->cap
),
324 pasid_max
- 1, GFP_KERNEL
);
331 svm
->notifier
.ops
= &intel_mmuops
;
334 INIT_LIST_HEAD_RCU(&svm
->devs
);
335 INIT_LIST_HEAD(&svm
->list
);
338 ret
= mmu_notifier_register(&svm
->notifier
, mm
);
340 intel_pasid_free_id(svm
->pasid
);
347 spin_lock(&iommu
->lock
);
348 ret
= intel_pasid_setup_first_level(iommu
, dev
,
349 mm
? mm
->pgd
: init_mm
.pgd
,
350 svm
->pasid
, FLPT_DEFAULT_DID
,
351 mm
? 0 : PASID_FLAG_SUPERVISOR_MODE
);
352 spin_unlock(&iommu
->lock
);
355 mmu_notifier_unregister(&svm
->notifier
, mm
);
356 intel_pasid_free_id(svm
->pasid
);
362 list_add_tail(&svm
->list
, &global_svm_list
);
365 * Binding a new device with existing PASID, need to setup
368 spin_lock(&iommu
->lock
);
369 ret
= intel_pasid_setup_first_level(iommu
, dev
,
370 mm
? mm
->pgd
: init_mm
.pgd
,
371 svm
->pasid
, FLPT_DEFAULT_DID
,
372 mm
? 0 : PASID_FLAG_SUPERVISOR_MODE
);
373 spin_unlock(&iommu
->lock
);
379 list_add_rcu(&sdev
->list
, &svm
->devs
);
385 mutex_unlock(&pasid_mutex
);
390 EXPORT_SYMBOL_GPL(intel_svm_bind_mm
);
392 int intel_svm_unbind_mm(struct device
*dev
, int pasid
)
394 struct intel_svm_dev
*sdev
;
395 struct intel_iommu
*iommu
;
396 struct intel_svm
*svm
;
399 mutex_lock(&pasid_mutex
);
400 iommu
= intel_svm_device_to_iommu(dev
);
404 svm
= intel_pasid_lookup_id(pasid
);
408 list_for_each_entry(sdev
, &svm
->devs
, list
) {
409 if (dev
== sdev
->dev
) {
413 list_del_rcu(&sdev
->list
);
414 /* Flush the PASID cache and IOTLB for this device.
415 * Note that we do depend on the hardware *not* using
416 * the PASID any more. Just as we depend on other
417 * devices never using PASIDs that they have no right
418 * to use. We have a *shared* PASID table, because it's
419 * large and has to be physically contiguous. So it's
420 * hard to be as defensive as we might like. */
421 intel_pasid_tear_down_entry(iommu
, dev
, svm
->pasid
);
422 intel_flush_svm_range_dev(svm
, sdev
, 0, -1, 0);
423 kfree_rcu(sdev
, rcu
);
425 if (list_empty(&svm
->devs
)) {
426 intel_pasid_free_id(svm
->pasid
);
428 mmu_notifier_unregister(&svm
->notifier
, svm
->mm
);
430 list_del(&svm
->list
);
432 /* We mandate that no page faults may be outstanding
433 * for the PASID when intel_svm_unbind_mm() is called.
434 * If that is not obeyed, subtle errors will happen.
435 * Let's make them less subtle... */
436 memset(svm
, 0x6b, sizeof(*svm
));
444 mutex_unlock(&pasid_mutex
);
448 EXPORT_SYMBOL_GPL(intel_svm_unbind_mm
);
450 int intel_svm_is_pasid_valid(struct device
*dev
, int pasid
)
452 struct intel_iommu
*iommu
;
453 struct intel_svm
*svm
;
456 mutex_lock(&pasid_mutex
);
457 iommu
= intel_svm_device_to_iommu(dev
);
461 svm
= intel_pasid_lookup_id(pasid
);
465 /* init_mm is used in this case */
468 else if (atomic_read(&svm
->mm
->mm_users
) > 0)
474 mutex_unlock(&pasid_mutex
);
478 EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid
);
480 /* Page request queue descriptor */
481 struct page_req_dsc
{
486 u64 priv_data_present
:1;
509 #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
511 static bool access_error(struct vm_area_struct
*vma
, struct page_req_dsc
*req
)
513 unsigned long requested
= 0;
516 requested
|= VM_EXEC
;
519 requested
|= VM_READ
;
522 requested
|= VM_WRITE
;
524 return (requested
& ~vma
->vm_flags
) != 0;
527 static bool is_canonical_address(u64 addr
)
529 int shift
= 64 - (__VIRTUAL_MASK_SHIFT
+ 1);
530 long saddr
= (long) addr
;
532 return (((saddr
<< shift
) >> shift
) == saddr
);
535 static irqreturn_t
prq_event_thread(int irq
, void *d
)
537 struct intel_iommu
*iommu
= d
;
538 struct intel_svm
*svm
= NULL
;
539 int head
, tail
, handled
= 0;
541 /* Clear PPR bit before reading head/tail registers, to
542 * ensure that we get a new interrupt if needed. */
543 writel(DMA_PRS_PPR
, iommu
->reg
+ DMAR_PRS_REG
);
545 tail
= dmar_readq(iommu
->reg
+ DMAR_PQT_REG
) & PRQ_RING_MASK
;
546 head
= dmar_readq(iommu
->reg
+ DMAR_PQH_REG
) & PRQ_RING_MASK
;
547 while (head
!= tail
) {
548 struct intel_svm_dev
*sdev
;
549 struct vm_area_struct
*vma
;
550 struct page_req_dsc
*req
;
558 req
= &iommu
->prq
[head
/ sizeof(*req
)];
560 result
= QI_RESP_FAILURE
;
561 address
= (u64
)req
->addr
<< VTD_PAGE_SHIFT
;
562 if (!req
->pasid_present
) {
563 pr_err("%s: Page request without PASID: %08llx %08llx\n",
564 iommu
->name
, ((unsigned long long *)req
)[0],
565 ((unsigned long long *)req
)[1]);
569 if (!svm
|| svm
->pasid
!= req
->pasid
) {
571 svm
= intel_pasid_lookup_id(req
->pasid
);
572 /* It *can't* go away, because the driver is not permitted
573 * to unbind the mm while any page faults are outstanding.
574 * So we only need RCU to protect the internal idr code. */
578 pr_err("%s: Page request for invalid PASID %d: %08llx %08llx\n",
579 iommu
->name
, req
->pasid
, ((unsigned long long *)req
)[0],
580 ((unsigned long long *)req
)[1]);
585 result
= QI_RESP_INVALID
;
586 /* Since we're using init_mm.pgd directly, we should never take
587 * any faults on kernel addresses. */
590 /* If the mm is already defunct, don't handle faults. */
591 if (!mmget_not_zero(svm
->mm
))
594 /* If address is not canonical, return invalid response */
595 if (!is_canonical_address(address
))
598 down_read(&svm
->mm
->mmap_sem
);
599 vma
= find_extend_vma(svm
->mm
, address
);
600 if (!vma
|| address
< vma
->vm_start
)
603 if (access_error(vma
, req
))
606 ret
= handle_mm_fault(vma
, address
,
607 req
->wr_req
? FAULT_FLAG_WRITE
: 0);
608 if (ret
& VM_FAULT_ERROR
)
611 result
= QI_RESP_SUCCESS
;
613 up_read(&svm
->mm
->mmap_sem
);
616 /* Accounting for major/minor faults? */
618 list_for_each_entry_rcu(sdev
, &svm
->devs
, list
) {
619 if (sdev
->sid
== req
->rid
)
622 /* Other devices can go away, but the drivers are not permitted
623 * to unbind while any page faults might be in flight. So it's
624 * OK to drop the 'lock' here now we have it. */
627 if (WARN_ON(&sdev
->list
== &svm
->devs
))
630 if (sdev
&& sdev
->ops
&& sdev
->ops
->fault_cb
) {
631 int rwxp
= (req
->rd_req
<< 3) | (req
->wr_req
<< 2) |
632 (req
->exe_req
<< 1) | (req
->pm_req
);
633 sdev
->ops
->fault_cb(sdev
->dev
, req
->pasid
, req
->addr
,
634 req
->priv_data
, rwxp
, result
);
636 /* We get here in the error case where the PASID lookup failed,
637 and these can be NULL. Do not use them below this point! */
641 if (req
->lpig
|| req
->priv_data_present
) {
643 * Per VT-d spec. v3.0 ch7.7, system software must
644 * respond with page group response if private data
645 * is present (PDP) or last page in group (LPIG) bit
646 * is set. This is an additional VT-d feature beyond
649 resp
.qw0
= QI_PGRP_PASID(req
->pasid
) |
650 QI_PGRP_DID(req
->rid
) |
651 QI_PGRP_PASID_P(req
->pasid_present
) |
652 QI_PGRP_PDP(req
->pasid_present
) |
653 QI_PGRP_RESP_CODE(result
) |
655 resp
.qw1
= QI_PGRP_IDX(req
->prg_index
) |
656 QI_PGRP_LPIG(req
->lpig
);
658 if (req
->priv_data_present
)
659 memcpy(&resp
.qw2
, req
->priv_data
,
660 sizeof(req
->priv_data
));
664 qi_submit_sync(&resp
, iommu
);
666 head
= (head
+ sizeof(*req
)) & PRQ_RING_MASK
;
669 dmar_writeq(iommu
->reg
+ DMAR_PQH_REG
, tail
);
671 return IRQ_RETVAL(handled
);