1 // SPDX-License-Identifier: GPL-2.0
3 * intel-pasid.c - PASID idr, table and entry manipulation
5 * Copyright (C) 2018 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
10 #define pr_fmt(fmt) "DMAR: " fmt
12 #include <linux/bitops.h>
13 #include <linux/cpufeature.h>
14 #include <linux/dmar.h>
15 #include <linux/intel-iommu.h>
16 #include <linux/iommu.h>
17 #include <linux/memory.h>
18 #include <linux/pci.h>
19 #include <linux/pci-ats.h>
20 #include <linux/spinlock.h>
25 * Intel IOMMU system wide PASID name space:
27 static DEFINE_SPINLOCK(pasid_lock
);
28 u32 intel_pasid_max_id
= PASID_MAX
;
30 int vcmd_alloc_pasid(struct intel_iommu
*iommu
, u32
*pasid
)
37 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
38 dmar_writeq(iommu
->reg
+ DMAR_VCMD_REG
, VCMD_CMD_ALLOC
);
39 IOMMU_WAIT_OP(iommu
, DMAR_VCRSP_REG
, dmar_readq
,
40 !(res
& VCMD_VRSP_IP
), res
);
41 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
43 status_code
= VCMD_VRSP_SC(res
);
44 switch (status_code
) {
45 case VCMD_VRSP_SC_SUCCESS
:
46 *pasid
= VCMD_VRSP_RESULT_PASID(res
);
48 case VCMD_VRSP_SC_NO_PASID_AVAIL
:
49 pr_info("IOMMU: %s: No PASID available\n", iommu
->name
);
54 pr_warn("IOMMU: %s: Unexpected error code %d\n",
55 iommu
->name
, status_code
);
61 void vcmd_free_pasid(struct intel_iommu
*iommu
, u32 pasid
)
67 raw_spin_lock_irqsave(&iommu
->register_lock
, flags
);
68 dmar_writeq(iommu
->reg
+ DMAR_VCMD_REG
,
69 VCMD_CMD_OPERAND(pasid
) | VCMD_CMD_FREE
);
70 IOMMU_WAIT_OP(iommu
, DMAR_VCRSP_REG
, dmar_readq
,
71 !(res
& VCMD_VRSP_IP
), res
);
72 raw_spin_unlock_irqrestore(&iommu
->register_lock
, flags
);
74 status_code
= VCMD_VRSP_SC(res
);
75 switch (status_code
) {
76 case VCMD_VRSP_SC_SUCCESS
:
78 case VCMD_VRSP_SC_INVALID_PASID
:
79 pr_info("IOMMU: %s: Invalid PASID\n", iommu
->name
);
82 pr_warn("IOMMU: %s: Unexpected error code %d\n",
83 iommu
->name
, status_code
);
88 * Per device pasid table management:
91 device_attach_pasid_table(struct device_domain_info
*info
,
92 struct pasid_table
*pasid_table
)
94 info
->pasid_table
= pasid_table
;
95 list_add(&info
->table
, &pasid_table
->dev
);
99 device_detach_pasid_table(struct device_domain_info
*info
,
100 struct pasid_table
*pasid_table
)
102 info
->pasid_table
= NULL
;
103 list_del(&info
->table
);
106 struct pasid_table_opaque
{
107 struct pasid_table
**pasid_table
;
113 static int search_pasid_table(struct device_domain_info
*info
, void *opaque
)
115 struct pasid_table_opaque
*data
= opaque
;
117 if (info
->iommu
->segment
== data
->segment
&&
118 info
->bus
== data
->bus
&&
119 info
->devfn
== data
->devfn
&&
121 *data
->pasid_table
= info
->pasid_table
;
128 static int get_alias_pasid_table(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
130 struct pasid_table_opaque
*data
= opaque
;
132 data
->segment
= pci_domain_nr(pdev
->bus
);
133 data
->bus
= PCI_BUS_NUM(alias
);
134 data
->devfn
= alias
& 0xff;
136 return for_each_device_domain(&search_pasid_table
, data
);
140 * Allocate a pasid table for @dev. It should be called in a
141 * single-thread context.
143 int intel_pasid_alloc_table(struct device
*dev
)
145 struct device_domain_info
*info
;
146 struct pasid_table
*pasid_table
;
147 struct pasid_table_opaque data
;
154 info
= get_domain_info(dev
);
155 if (WARN_ON(!info
|| !dev_is_pci(dev
) || info
->pasid_table
))
158 /* DMA alias device already has a pasid table, use it: */
159 data
.pasid_table
= &pasid_table
;
160 ret
= pci_for_each_dma_alias(to_pci_dev(dev
),
161 &get_alias_pasid_table
, &data
);
165 pasid_table
= kzalloc(sizeof(*pasid_table
), GFP_KERNEL
);
168 INIT_LIST_HEAD(&pasid_table
->dev
);
170 if (info
->pasid_supported
)
171 max_pasid
= min_t(u32
, pci_max_pasids(to_pci_dev(dev
)),
174 size
= max_pasid
>> (PASID_PDE_SHIFT
- 3);
175 order
= size
? get_order(size
) : 0;
176 pages
= alloc_pages_node(info
->iommu
->node
,
177 GFP_KERNEL
| __GFP_ZERO
, order
);
183 pasid_table
->table
= page_address(pages
);
184 pasid_table
->order
= order
;
185 pasid_table
->max_pasid
= 1 << (order
+ PAGE_SHIFT
+ 3);
188 device_attach_pasid_table(info
, pasid_table
);
193 void intel_pasid_free_table(struct device
*dev
)
195 struct device_domain_info
*info
;
196 struct pasid_table
*pasid_table
;
197 struct pasid_dir_entry
*dir
;
198 struct pasid_entry
*table
;
201 info
= get_domain_info(dev
);
202 if (!info
|| !dev_is_pci(dev
) || !info
->pasid_table
)
205 pasid_table
= info
->pasid_table
;
206 device_detach_pasid_table(info
, pasid_table
);
208 if (!list_empty(&pasid_table
->dev
))
211 /* Free scalable mode PASID directory tables: */
212 dir
= pasid_table
->table
;
213 max_pde
= pasid_table
->max_pasid
>> PASID_PDE_SHIFT
;
214 for (i
= 0; i
< max_pde
; i
++) {
215 table
= get_pasid_table_from_pde(&dir
[i
]);
216 free_pgtable_page(table
);
219 free_pages((unsigned long)pasid_table
->table
, pasid_table
->order
);
223 struct pasid_table
*intel_pasid_get_table(struct device
*dev
)
225 struct device_domain_info
*info
;
227 info
= get_domain_info(dev
);
231 return info
->pasid_table
;
234 int intel_pasid_get_dev_max_id(struct device
*dev
)
236 struct device_domain_info
*info
;
238 info
= get_domain_info(dev
);
239 if (!info
|| !info
->pasid_table
)
242 return info
->pasid_table
->max_pasid
;
245 struct pasid_entry
*intel_pasid_get_entry(struct device
*dev
, u32 pasid
)
247 struct device_domain_info
*info
;
248 struct pasid_table
*pasid_table
;
249 struct pasid_dir_entry
*dir
;
250 struct pasid_entry
*entries
;
251 int dir_index
, index
;
253 pasid_table
= intel_pasid_get_table(dev
);
254 if (WARN_ON(!pasid_table
|| pasid
>= intel_pasid_get_dev_max_id(dev
)))
257 dir
= pasid_table
->table
;
258 info
= get_domain_info(dev
);
259 dir_index
= pasid
>> PASID_PDE_SHIFT
;
260 index
= pasid
& PASID_PTE_MASK
;
262 spin_lock(&pasid_lock
);
263 entries
= get_pasid_table_from_pde(&dir
[dir_index
]);
265 entries
= alloc_pgtable_page(info
->iommu
->node
);
267 spin_unlock(&pasid_lock
);
271 WRITE_ONCE(dir
[dir_index
].val
,
272 (u64
)virt_to_phys(entries
) | PASID_PTE_PRESENT
);
274 spin_unlock(&pasid_lock
);
276 return &entries
[index
];
280 * Interfaces for PASID table entry manipulation:
282 static inline void pasid_clear_entry(struct pasid_entry
*pe
)
284 WRITE_ONCE(pe
->val
[0], 0);
285 WRITE_ONCE(pe
->val
[1], 0);
286 WRITE_ONCE(pe
->val
[2], 0);
287 WRITE_ONCE(pe
->val
[3], 0);
288 WRITE_ONCE(pe
->val
[4], 0);
289 WRITE_ONCE(pe
->val
[5], 0);
290 WRITE_ONCE(pe
->val
[6], 0);
291 WRITE_ONCE(pe
->val
[7], 0);
294 static inline void pasid_clear_entry_with_fpd(struct pasid_entry
*pe
)
296 WRITE_ONCE(pe
->val
[0], PASID_PTE_FPD
);
297 WRITE_ONCE(pe
->val
[1], 0);
298 WRITE_ONCE(pe
->val
[2], 0);
299 WRITE_ONCE(pe
->val
[3], 0);
300 WRITE_ONCE(pe
->val
[4], 0);
301 WRITE_ONCE(pe
->val
[5], 0);
302 WRITE_ONCE(pe
->val
[6], 0);
303 WRITE_ONCE(pe
->val
[7], 0);
307 intel_pasid_clear_entry(struct device
*dev
, u32 pasid
, bool fault_ignore
)
309 struct pasid_entry
*pe
;
311 pe
= intel_pasid_get_entry(dev
, pasid
);
315 if (fault_ignore
&& pasid_pte_is_present(pe
))
316 pasid_clear_entry_with_fpd(pe
);
318 pasid_clear_entry(pe
);
321 static inline void pasid_set_bits(u64
*ptr
, u64 mask
, u64 bits
)
325 old
= READ_ONCE(*ptr
);
326 WRITE_ONCE(*ptr
, (old
& ~mask
) | bits
);
330 * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
334 pasid_set_domain_id(struct pasid_entry
*pe
, u64 value
)
336 pasid_set_bits(&pe
->val
[1], GENMASK_ULL(15, 0), value
);
340 * Get domain ID value of a scalable mode PASID entry.
343 pasid_get_domain_id(struct pasid_entry
*pe
)
345 return (u16
)(READ_ONCE(pe
->val
[1]) & GENMASK_ULL(15, 0));
349 * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
350 * of a scalable mode PASID entry.
353 pasid_set_slptr(struct pasid_entry
*pe
, u64 value
)
355 pasid_set_bits(&pe
->val
[0], VTD_PAGE_MASK
, value
);
359 * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
363 pasid_set_address_width(struct pasid_entry
*pe
, u64 value
)
365 pasid_set_bits(&pe
->val
[0], GENMASK_ULL(4, 2), value
<< 2);
369 * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
370 * of a scalable mode PASID entry.
373 pasid_set_translation_type(struct pasid_entry
*pe
, u64 value
)
375 pasid_set_bits(&pe
->val
[0], GENMASK_ULL(8, 6), value
<< 6);
379 * Enable fault processing by clearing the FPD(Fault Processing
380 * Disable) field (Bit 1) of a scalable mode PASID entry.
382 static inline void pasid_set_fault_enable(struct pasid_entry
*pe
)
384 pasid_set_bits(&pe
->val
[0], 1 << 1, 0);
388 * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
389 * scalable mode PASID entry.
391 static inline void pasid_set_sre(struct pasid_entry
*pe
)
393 pasid_set_bits(&pe
->val
[2], 1 << 0, 1);
397 * Setup the P(Present) field (Bit 0) of a scalable mode PASID
400 static inline void pasid_set_present(struct pasid_entry
*pe
)
402 pasid_set_bits(&pe
->val
[0], 1 << 0, 1);
406 * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
409 static inline void pasid_set_page_snoop(struct pasid_entry
*pe
, bool value
)
411 pasid_set_bits(&pe
->val
[1], 1 << 23, value
<< 23);
415 * Setup the First Level Page table Pointer field (Bit 140~191)
416 * of a scalable mode PASID entry.
419 pasid_set_flptr(struct pasid_entry
*pe
, u64 value
)
421 pasid_set_bits(&pe
->val
[2], VTD_PAGE_MASK
, value
);
425 * Setup the First Level Paging Mode field (Bit 130~131) of a
426 * scalable mode PASID entry.
429 pasid_set_flpm(struct pasid_entry
*pe
, u64 value
)
431 pasid_set_bits(&pe
->val
[2], GENMASK_ULL(3, 2), value
<< 2);
435 * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
436 * of a scalable mode PASID entry.
439 pasid_set_eafe(struct pasid_entry
*pe
)
441 pasid_set_bits(&pe
->val
[2], 1 << 7, 1 << 7);
445 pasid_cache_invalidation_with_pasid(struct intel_iommu
*iommu
,
450 desc
.qw0
= QI_PC_DID(did
) | QI_PC_GRAN(QI_PC_PASID_SEL
) |
451 QI_PC_PASID(pasid
) | QI_PC_TYPE
;
456 qi_submit_sync(iommu
, &desc
, 1, 0);
460 iotlb_invalidation_with_pasid(struct intel_iommu
*iommu
, u16 did
, u32 pasid
)
464 desc
.qw0
= QI_EIOTLB_PASID(pasid
) | QI_EIOTLB_DID(did
) |
465 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID
) | QI_EIOTLB_TYPE
;
470 qi_submit_sync(iommu
, &desc
, 1, 0);
474 devtlb_invalidation_with_pasid(struct intel_iommu
*iommu
,
475 struct device
*dev
, u32 pasid
)
477 struct device_domain_info
*info
;
478 u16 sid
, qdep
, pfsid
;
480 info
= get_domain_info(dev
);
481 if (!info
|| !info
->ats_enabled
)
484 sid
= info
->bus
<< 8 | info
->devfn
;
485 qdep
= info
->ats_qdep
;
489 * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID),
490 * devTLB flush w/o PASID should be used. For non-zero PASID under
491 * SVA usage, device could do DMA with multiple PASIDs. It is more
492 * efficient to flush devTLB specific to the PASID.
494 if (pasid
== PASID_RID2PASID
)
495 qi_flush_dev_iotlb(iommu
, sid
, pfsid
, qdep
, 0, 64 - VTD_PAGE_SHIFT
);
497 qi_flush_dev_iotlb_pasid(iommu
, sid
, pfsid
, pasid
, qdep
, 0, 64 - VTD_PAGE_SHIFT
);
500 void intel_pasid_tear_down_entry(struct intel_iommu
*iommu
, struct device
*dev
,
501 u32 pasid
, bool fault_ignore
)
503 struct pasid_entry
*pte
;
506 pte
= intel_pasid_get_entry(dev
, pasid
);
510 did
= pasid_get_domain_id(pte
);
511 intel_pasid_clear_entry(dev
, pasid
, fault_ignore
);
513 if (!ecap_coherent(iommu
->ecap
))
514 clflush_cache_range(pte
, sizeof(*pte
));
516 pasid_cache_invalidation_with_pasid(iommu
, did
, pasid
);
517 iotlb_invalidation_with_pasid(iommu
, did
, pasid
);
519 /* Device IOTLB doesn't need to be flushed in caching mode. */
520 if (!cap_caching_mode(iommu
->cap
))
521 devtlb_invalidation_with_pasid(iommu
, dev
, pasid
);
524 static void pasid_flush_caches(struct intel_iommu
*iommu
,
525 struct pasid_entry
*pte
,
528 if (!ecap_coherent(iommu
->ecap
))
529 clflush_cache_range(pte
, sizeof(*pte
));
531 if (cap_caching_mode(iommu
->cap
)) {
532 pasid_cache_invalidation_with_pasid(iommu
, did
, pasid
);
533 iotlb_invalidation_with_pasid(iommu
, did
, pasid
);
535 iommu_flush_write_buffer(iommu
);
540 * Set up the scalable mode pasid table entry for first only
543 int intel_pasid_setup_first_level(struct intel_iommu
*iommu
,
544 struct device
*dev
, pgd_t
*pgd
,
545 u32 pasid
, u16 did
, int flags
)
547 struct pasid_entry
*pte
;
549 if (!ecap_flts(iommu
->ecap
)) {
550 pr_err("No first level translation support on %s\n",
555 pte
= intel_pasid_get_entry(dev
, pasid
);
559 pasid_clear_entry(pte
);
561 /* Setup the first level page table pointer: */
562 pasid_set_flptr(pte
, (u64
)__pa(pgd
));
563 if (flags
& PASID_FLAG_SUPERVISOR_MODE
) {
564 if (!ecap_srs(iommu
->ecap
)) {
565 pr_err("No supervisor request support on %s\n",
572 if (flags
& PASID_FLAG_FL5LP
) {
573 if (cap_5lp_support(iommu
->cap
)) {
574 pasid_set_flpm(pte
, 1);
576 pr_err("No 5-level paging support for first-level\n");
577 pasid_clear_entry(pte
);
582 pasid_set_domain_id(pte
, did
);
583 pasid_set_address_width(pte
, iommu
->agaw
);
584 pasid_set_page_snoop(pte
, !!ecap_smpwc(iommu
->ecap
));
586 /* Setup Present and PASID Granular Transfer Type: */
587 pasid_set_translation_type(pte
, PASID_ENTRY_PGTT_FL_ONLY
);
588 pasid_set_present(pte
);
589 pasid_flush_caches(iommu
, pte
, pasid
, did
);
595 * Skip top levels of page tables for iommu which has less agaw
596 * than default. Unnecessary for PT mode.
598 static inline int iommu_skip_agaw(struct dmar_domain
*domain
,
599 struct intel_iommu
*iommu
,
600 struct dma_pte
**pgd
)
604 for (agaw
= domain
->agaw
; agaw
> iommu
->agaw
; agaw
--) {
605 *pgd
= phys_to_virt(dma_pte_addr(*pgd
));
606 if (!dma_pte_present(*pgd
))
614 * Set up the scalable mode pasid entry for second only translation type.
616 int intel_pasid_setup_second_level(struct intel_iommu
*iommu
,
617 struct dmar_domain
*domain
,
618 struct device
*dev
, u32 pasid
)
620 struct pasid_entry
*pte
;
627 * If hardware advertises no support for second level
628 * translation, return directly.
630 if (!ecap_slts(iommu
->ecap
)) {
631 pr_err("No second level translation support on %s\n",
637 agaw
= iommu_skip_agaw(domain
, iommu
, &pgd
);
639 dev_err(dev
, "Invalid domain page table\n");
643 pgd_val
= virt_to_phys(pgd
);
644 did
= domain
->iommu_did
[iommu
->seq_id
];
646 pte
= intel_pasid_get_entry(dev
, pasid
);
648 dev_err(dev
, "Failed to get pasid entry of PASID %d\n", pasid
);
652 pasid_clear_entry(pte
);
653 pasid_set_domain_id(pte
, did
);
654 pasid_set_slptr(pte
, pgd_val
);
655 pasid_set_address_width(pte
, agaw
);
656 pasid_set_translation_type(pte
, PASID_ENTRY_PGTT_SL_ONLY
);
657 pasid_set_fault_enable(pte
);
658 pasid_set_page_snoop(pte
, !!ecap_smpwc(iommu
->ecap
));
661 * Since it is a second level only translation setup, we should
662 * set SRE bit as well (addresses are expected to be GPAs).
665 pasid_set_present(pte
);
666 pasid_flush_caches(iommu
, pte
, pasid
, did
);
672 * Set up the scalable mode pasid entry for passthrough translation type.
674 int intel_pasid_setup_pass_through(struct intel_iommu
*iommu
,
675 struct dmar_domain
*domain
,
676 struct device
*dev
, u32 pasid
)
678 u16 did
= FLPT_DEFAULT_DID
;
679 struct pasid_entry
*pte
;
681 pte
= intel_pasid_get_entry(dev
, pasid
);
683 dev_err(dev
, "Failed to get pasid entry of PASID %d\n", pasid
);
687 pasid_clear_entry(pte
);
688 pasid_set_domain_id(pte
, did
);
689 pasid_set_address_width(pte
, iommu
->agaw
);
690 pasid_set_translation_type(pte
, PASID_ENTRY_PGTT_PT
);
691 pasid_set_fault_enable(pte
);
692 pasid_set_page_snoop(pte
, !!ecap_smpwc(iommu
->ecap
));
695 * We should set SRE bit as well since the addresses are expected
699 pasid_set_present(pte
);
700 pasid_flush_caches(iommu
, pte
, pasid
, did
);
706 intel_pasid_setup_bind_data(struct intel_iommu
*iommu
, struct pasid_entry
*pte
,
707 struct iommu_gpasid_bind_data_vtd
*pasid_data
)
710 * Not all guest PASID table entry fields are passed down during bind,
711 * here we only set up the ones that are dependent on guest settings.
712 * Execution related bits such as NXE, SMEP are not supported.
713 * Other fields, such as snoop related, are set based on host needs
714 * regardless of guest settings.
716 if (pasid_data
->flags
& IOMMU_SVA_VTD_GPASID_SRE
) {
717 if (!ecap_srs(iommu
->ecap
)) {
718 pr_err_ratelimited("No supervisor request support on %s\n",
725 if (pasid_data
->flags
& IOMMU_SVA_VTD_GPASID_EAFE
) {
726 if (!ecap_eafs(iommu
->ecap
)) {
727 pr_err_ratelimited("No extended access flag support on %s\n",
735 * Memory type is only applicable to devices inside processor coherent
736 * domain. Will add MTS support once coherent devices are available.
738 if (pasid_data
->flags
& IOMMU_SVA_VTD_GPASID_MTS_MASK
) {
739 pr_warn_ratelimited("No memory type support %s\n",
748 * intel_pasid_setup_nested() - Set up PASID entry for nested translation.
749 * This could be used for guest shared virtual address. In this case, the
750 * first level page tables are used for GVA-GPA translation in the guest,
751 * second level page tables are used for GPA-HPA translation.
753 * @iommu: IOMMU which the device belong to
754 * @dev: Device to be set up for translation
755 * @gpgd: FLPTPTR: First Level Page translation pointer in GPA
756 * @pasid: PASID to be programmed in the device PASID table
757 * @pasid_data: Additional PASID info from the guest bind request
758 * @domain: Domain info for setting up second level page tables
759 * @addr_width: Address width of the first level (guest)
761 int intel_pasid_setup_nested(struct intel_iommu
*iommu
, struct device
*dev
,
762 pgd_t
*gpgd
, u32 pasid
,
763 struct iommu_gpasid_bind_data_vtd
*pasid_data
,
764 struct dmar_domain
*domain
, int addr_width
)
766 struct pasid_entry
*pte
;
773 if (!ecap_nest(iommu
->ecap
)) {
774 pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
779 if (!(domain
->flags
& DOMAIN_FLAG_NESTING_MODE
)) {
780 pr_err_ratelimited("Domain is not in nesting mode, %x\n",
785 pte
= intel_pasid_get_entry(dev
, pasid
);
790 * Caller must ensure PASID entry is not in use, i.e. not bind the
791 * same PASID to the same device twice.
793 if (pasid_pte_is_present(pte
))
796 pasid_clear_entry(pte
);
798 /* Sanity checking performed by caller to make sure address
799 * width matching in two dimensions:
803 switch (addr_width
) {
805 case ADDR_WIDTH_5LEVEL
:
806 if (!cpu_feature_enabled(X86_FEATURE_LA57
) ||
807 !cap_5lp_support(iommu
->cap
)) {
808 dev_err_ratelimited(dev
,
809 "5-level paging not supported\n");
813 pasid_set_flpm(pte
, 1);
816 case ADDR_WIDTH_4LEVEL
:
817 pasid_set_flpm(pte
, 0);
820 dev_err_ratelimited(dev
, "Invalid guest address width %d\n",
825 /* First level PGD is in GPA, must be supported by the second level */
826 if ((uintptr_t)gpgd
> domain
->max_addr
) {
827 dev_err_ratelimited(dev
,
828 "Guest PGD %lx not supported, max %llx\n",
829 (uintptr_t)gpgd
, domain
->max_addr
);
832 pasid_set_flptr(pte
, (uintptr_t)gpgd
);
834 ret
= intel_pasid_setup_bind_data(iommu
, pte
, pasid_data
);
838 /* Setup the second level based on the given domain */
841 agaw
= iommu_skip_agaw(domain
, iommu
, &pgd
);
843 dev_err_ratelimited(dev
, "Invalid domain page table\n");
846 pgd_val
= virt_to_phys(pgd
);
847 pasid_set_slptr(pte
, pgd_val
);
848 pasid_set_fault_enable(pte
);
850 did
= domain
->iommu_did
[iommu
->seq_id
];
851 pasid_set_domain_id(pte
, did
);
853 pasid_set_address_width(pte
, agaw
);
854 pasid_set_page_snoop(pte
, !!ecap_smpwc(iommu
->ecap
));
856 pasid_set_translation_type(pte
, PASID_ENTRY_PGTT_NESTED
);
857 pasid_set_present(pte
);
858 pasid_flush_caches(iommu
, pte
, pasid
, did
);