1 // SPDX-License-Identifier: GPL-2.0
3 * nested.c - nested mode translation support
5 * Copyright (C) 2023 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 * Jacob Pan <jacob.jun.pan@linux.intel.com>
9 * Yi Liu <yi.l.liu@intel.com>
12 #define pr_fmt(fmt) "DMAR: " fmt
14 #include <linux/iommu.h>
15 #include <linux/pci.h>
16 #include <linux/pci-ats.h>
21 static int intel_nested_attach_dev(struct iommu_domain
*domain
,
24 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
25 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
26 struct intel_iommu
*iommu
= info
->iommu
;
31 device_block_translation(dev
);
33 if (iommu
->agaw
< dmar_domain
->s2_domain
->agaw
) {
34 dev_err_ratelimited(dev
, "Adjusted guest address width not compatible\n");
39 * Stage-1 domain cannot work alone, it is nested on a s2_domain.
40 * The s2_domain will be used in nested translation, hence needs
41 * to ensure the s2_domain is compatible with this IOMMU.
43 ret
= paging_domain_compatible(&dmar_domain
->s2_domain
->domain
, dev
);
45 dev_err_ratelimited(dev
, "s2 domain is not compatible\n");
49 ret
= domain_attach_iommu(dmar_domain
, iommu
);
51 dev_err_ratelimited(dev
, "Failed to attach domain to iommu\n");
55 ret
= cache_tag_assign_domain(dmar_domain
, dev
, IOMMU_NO_PASID
);
59 ret
= intel_pasid_setup_nested(iommu
, dev
,
60 IOMMU_NO_PASID
, dmar_domain
);
64 info
->domain
= dmar_domain
;
65 spin_lock_irqsave(&dmar_domain
->lock
, flags
);
66 list_add(&info
->link
, &dmar_domain
->devices
);
67 spin_unlock_irqrestore(&dmar_domain
->lock
, flags
);
71 cache_tag_unassign_domain(dmar_domain
, dev
, IOMMU_NO_PASID
);
73 domain_detach_iommu(dmar_domain
, iommu
);
78 static void intel_nested_domain_free(struct iommu_domain
*domain
)
80 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
81 struct dmar_domain
*s2_domain
= dmar_domain
->s2_domain
;
83 spin_lock(&s2_domain
->s1_lock
);
84 list_del(&dmar_domain
->s2_link
);
85 spin_unlock(&s2_domain
->s1_lock
);
86 kfree(dmar_domain
->qi_batch
);
90 static int intel_nested_cache_invalidate_user(struct iommu_domain
*domain
,
91 struct iommu_user_data_array
*array
)
93 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
94 struct iommu_hwpt_vtd_s1_invalidate inv_entry
;
95 u32 index
, processed
= 0;
98 if (array
->type
!= IOMMU_HWPT_INVALIDATE_DATA_VTD_S1
) {
103 for (index
= 0; index
< array
->entry_num
; index
++) {
104 ret
= iommu_copy_struct_from_user_array(&inv_entry
, array
,
105 IOMMU_HWPT_INVALIDATE_DATA_VTD_S1
,
110 if ((inv_entry
.flags
& ~IOMMU_VTD_INV_FLAGS_LEAF
) ||
111 inv_entry
.__reserved
) {
116 if (!IS_ALIGNED(inv_entry
.addr
, VTD_PAGE_SIZE
) ||
117 ((inv_entry
.npages
== U64_MAX
) && inv_entry
.addr
)) {
122 cache_tag_flush_range(dmar_domain
, inv_entry
.addr
,
123 inv_entry
.addr
+ nrpages_to_size(inv_entry
.npages
) - 1,
124 inv_entry
.flags
& IOMMU_VTD_INV_FLAGS_LEAF
);
129 array
->entry_num
= processed
;
133 static int domain_setup_nested(struct intel_iommu
*iommu
,
134 struct dmar_domain
*domain
,
135 struct device
*dev
, ioasid_t pasid
,
136 struct iommu_domain
*old
)
139 return intel_pasid_setup_nested(iommu
, dev
, pasid
, domain
);
140 return intel_pasid_replace_nested(iommu
, dev
, pasid
,
141 iommu_domain_did(old
, iommu
),
145 static int intel_nested_set_dev_pasid(struct iommu_domain
*domain
,
146 struct device
*dev
, ioasid_t pasid
,
147 struct iommu_domain
*old
)
149 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
150 struct dmar_domain
*dmar_domain
= to_dmar_domain(domain
);
151 struct intel_iommu
*iommu
= info
->iommu
;
152 struct dev_pasid_info
*dev_pasid
;
155 if (!pasid_supported(iommu
) || dev_is_real_dma_subdevice(dev
))
158 if (context_copied(iommu
, info
->bus
, info
->devfn
))
161 ret
= paging_domain_compatible(&dmar_domain
->s2_domain
->domain
, dev
);
165 dev_pasid
= domain_add_dev_pasid(domain
, dev
, pasid
);
166 if (IS_ERR(dev_pasid
))
167 return PTR_ERR(dev_pasid
);
169 ret
= domain_setup_nested(iommu
, dmar_domain
, dev
, pasid
, old
);
171 goto out_remove_dev_pasid
;
173 domain_remove_dev_pasid(old
, dev
, pasid
);
177 out_remove_dev_pasid
:
178 domain_remove_dev_pasid(domain
, dev
, pasid
);
182 static const struct iommu_domain_ops intel_nested_domain_ops
= {
183 .attach_dev
= intel_nested_attach_dev
,
184 .set_dev_pasid
= intel_nested_set_dev_pasid
,
185 .free
= intel_nested_domain_free
,
186 .cache_invalidate_user
= intel_nested_cache_invalidate_user
,
189 struct iommu_domain
*
190 intel_iommu_domain_alloc_nested(struct device
*dev
, struct iommu_domain
*parent
,
192 const struct iommu_user_data
*user_data
)
194 struct device_domain_info
*info
= dev_iommu_priv_get(dev
);
195 struct dmar_domain
*s2_domain
= to_dmar_domain(parent
);
196 struct intel_iommu
*iommu
= info
->iommu
;
197 struct iommu_hwpt_vtd_s1 vtd
;
198 struct dmar_domain
*domain
;
201 if (!nested_supported(iommu
) || flags
)
202 return ERR_PTR(-EOPNOTSUPP
);
204 /* Must be nested domain */
205 if (user_data
->type
!= IOMMU_HWPT_DATA_VTD_S1
)
206 return ERR_PTR(-EOPNOTSUPP
);
207 if (parent
->ops
!= intel_iommu_ops
.default_domain_ops
||
208 !s2_domain
->nested_parent
)
209 return ERR_PTR(-EINVAL
);
211 ret
= iommu_copy_struct_from_user(&vtd
, user_data
,
212 IOMMU_HWPT_DATA_VTD_S1
, __reserved
);
216 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL_ACCOUNT
);
218 return ERR_PTR(-ENOMEM
);
220 domain
->use_first_level
= true;
221 domain
->s2_domain
= s2_domain
;
222 domain
->s1_cfg
= vtd
;
223 domain
->domain
.ops
= &intel_nested_domain_ops
;
224 domain
->domain
.type
= IOMMU_DOMAIN_NESTED
;
225 INIT_LIST_HEAD(&domain
->devices
);
226 INIT_LIST_HEAD(&domain
->dev_pasids
);
227 INIT_LIST_HEAD(&domain
->cache_tags
);
228 spin_lock_init(&domain
->lock
);
229 spin_lock_init(&domain
->cache_lock
);
230 xa_init(&domain
->iommu_array
);
232 spin_lock(&s2_domain
->s1_lock
);
233 list_add(&domain
->s2_link
, &s2_domain
->s1_domains
);
234 spin_unlock(&s2_domain
->s1_lock
);
236 return &domain
->domain
;