2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
29 #define pr_fmt(fmt) "arm-smmu: " fmt
31 #include <linux/acpi.h>
32 #include <linux/acpi_iort.h>
33 #include <linux/atomic.h>
34 #include <linux/delay.h>
35 #include <linux/dma-iommu.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/err.h>
38 #include <linux/interrupt.h>
40 #include <linux/io-64-nonatomic-hi-lo.h>
41 #include <linux/iommu.h>
42 #include <linux/iopoll.h>
43 #include <linux/module.h>
45 #include <linux/of_address.h>
46 #include <linux/of_device.h>
47 #include <linux/of_iommu.h>
48 #include <linux/pci.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
53 #include <linux/amba/bus.h>
55 #include "io-pgtable.h"
57 /* Maximum number of context banks per SMMU */
58 #define ARM_SMMU_MAX_CBS 128
60 /* SMMU global address space */
61 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
62 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 #define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
75 * Some 64-bit registers only make sense to write atomically, but in such
76 * cases all the data relevant to AArch32 formats lies within the lower word,
77 * therefore this actually makes more sense than it might first appear.
80 #define smmu_write_atomic_lq writeq_relaxed
82 #define smmu_write_atomic_lq writel_relaxed
85 /* Configuration registers */
86 #define ARM_SMMU_GR0_sCR0 0x0
87 #define sCR0_CLIENTPD (1 << 0)
88 #define sCR0_GFRE (1 << 1)
89 #define sCR0_GFIE (1 << 2)
90 #define sCR0_GCFGFRE (1 << 4)
91 #define sCR0_GCFGFIE (1 << 5)
92 #define sCR0_USFCFG (1 << 10)
93 #define sCR0_VMIDPNE (1 << 11)
94 #define sCR0_PTM (1 << 12)
95 #define sCR0_FB (1 << 13)
96 #define sCR0_VMID16EN (1 << 31)
97 #define sCR0_BSU_SHIFT 14
98 #define sCR0_BSU_MASK 0x3
100 /* Auxiliary Configuration register */
101 #define ARM_SMMU_GR0_sACR 0x10
103 /* Identification registers */
104 #define ARM_SMMU_GR0_ID0 0x20
105 #define ARM_SMMU_GR0_ID1 0x24
106 #define ARM_SMMU_GR0_ID2 0x28
107 #define ARM_SMMU_GR0_ID3 0x2c
108 #define ARM_SMMU_GR0_ID4 0x30
109 #define ARM_SMMU_GR0_ID5 0x34
110 #define ARM_SMMU_GR0_ID6 0x38
111 #define ARM_SMMU_GR0_ID7 0x3c
112 #define ARM_SMMU_GR0_sGFSR 0x48
113 #define ARM_SMMU_GR0_sGFSYNR0 0x50
114 #define ARM_SMMU_GR0_sGFSYNR1 0x54
115 #define ARM_SMMU_GR0_sGFSYNR2 0x58
117 #define ID0_S1TS (1 << 30)
118 #define ID0_S2TS (1 << 29)
119 #define ID0_NTS (1 << 28)
120 #define ID0_SMS (1 << 27)
121 #define ID0_ATOSNS (1 << 26)
122 #define ID0_PTFS_NO_AARCH32 (1 << 25)
123 #define ID0_PTFS_NO_AARCH32S (1 << 24)
124 #define ID0_CTTW (1 << 14)
125 #define ID0_NUMIRPT_SHIFT 16
126 #define ID0_NUMIRPT_MASK 0xff
127 #define ID0_NUMSIDB_SHIFT 9
128 #define ID0_NUMSIDB_MASK 0xf
129 #define ID0_NUMSMRG_SHIFT 0
130 #define ID0_NUMSMRG_MASK 0xff
132 #define ID1_PAGESIZE (1 << 31)
133 #define ID1_NUMPAGENDXB_SHIFT 28
134 #define ID1_NUMPAGENDXB_MASK 7
135 #define ID1_NUMS2CB_SHIFT 16
136 #define ID1_NUMS2CB_MASK 0xff
137 #define ID1_NUMCB_SHIFT 0
138 #define ID1_NUMCB_MASK 0xff
140 #define ID2_OAS_SHIFT 4
141 #define ID2_OAS_MASK 0xf
142 #define ID2_IAS_SHIFT 0
143 #define ID2_IAS_MASK 0xf
144 #define ID2_UBS_SHIFT 8
145 #define ID2_UBS_MASK 0xf
146 #define ID2_PTFS_4K (1 << 12)
147 #define ID2_PTFS_16K (1 << 13)
148 #define ID2_PTFS_64K (1 << 14)
149 #define ID2_VMID16 (1 << 15)
151 #define ID7_MAJOR_SHIFT 4
152 #define ID7_MAJOR_MASK 0xf
154 /* Global TLB invalidation */
155 #define ARM_SMMU_GR0_TLBIVMID 0x64
156 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
157 #define ARM_SMMU_GR0_TLBIALLH 0x6c
158 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
159 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
160 #define sTLBGSTATUS_GSACTIVE (1 << 0)
161 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163 /* Stream mapping registers */
164 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
165 #define SMR_VALID (1 << 31)
166 #define SMR_MASK_SHIFT 16
167 #define SMR_ID_SHIFT 0
169 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
170 #define S2CR_CBNDX_SHIFT 0
171 #define S2CR_CBNDX_MASK 0xff
172 #define S2CR_TYPE_SHIFT 16
173 #define S2CR_TYPE_MASK 0x3
174 enum arm_smmu_s2cr_type
{
180 #define S2CR_PRIVCFG_SHIFT 24
181 #define S2CR_PRIVCFG_MASK 0x3
182 enum arm_smmu_s2cr_privcfg
{
183 S2CR_PRIVCFG_DEFAULT
,
189 /* Context bank attribute registers */
190 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
191 #define CBAR_VMID_SHIFT 0
192 #define CBAR_VMID_MASK 0xff
193 #define CBAR_S1_BPSHCFG_SHIFT 8
194 #define CBAR_S1_BPSHCFG_MASK 3
195 #define CBAR_S1_BPSHCFG_NSH 3
196 #define CBAR_S1_MEMATTR_SHIFT 12
197 #define CBAR_S1_MEMATTR_MASK 0xf
198 #define CBAR_S1_MEMATTR_WB 0xf
199 #define CBAR_TYPE_SHIFT 16
200 #define CBAR_TYPE_MASK 0x3
201 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
202 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
203 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
204 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
205 #define CBAR_IRPTNDX_SHIFT 24
206 #define CBAR_IRPTNDX_MASK 0xff
208 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
209 #define CBA2R_RW64_32BIT (0 << 0)
210 #define CBA2R_RW64_64BIT (1 << 0)
211 #define CBA2R_VMID_SHIFT 16
212 #define CBA2R_VMID_MASK 0xffff
214 /* Translation context bank */
215 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
216 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
218 #define ARM_SMMU_CB_SCTLR 0x0
219 #define ARM_SMMU_CB_ACTLR 0x4
220 #define ARM_SMMU_CB_RESUME 0x8
221 #define ARM_SMMU_CB_TTBCR2 0x10
222 #define ARM_SMMU_CB_TTBR0 0x20
223 #define ARM_SMMU_CB_TTBR1 0x28
224 #define ARM_SMMU_CB_TTBCR 0x30
225 #define ARM_SMMU_CB_CONTEXTIDR 0x34
226 #define ARM_SMMU_CB_S1_MAIR0 0x38
227 #define ARM_SMMU_CB_S1_MAIR1 0x3c
228 #define ARM_SMMU_CB_PAR 0x50
229 #define ARM_SMMU_CB_FSR 0x58
230 #define ARM_SMMU_CB_FAR 0x60
231 #define ARM_SMMU_CB_FSYNR0 0x68
232 #define ARM_SMMU_CB_S1_TLBIVA 0x600
233 #define ARM_SMMU_CB_S1_TLBIASID 0x610
234 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
235 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
236 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
237 #define ARM_SMMU_CB_ATS1PR 0x800
238 #define ARM_SMMU_CB_ATSR 0x8f0
240 #define SCTLR_S1_ASIDPNE (1 << 12)
241 #define SCTLR_CFCFG (1 << 7)
242 #define SCTLR_CFIE (1 << 6)
243 #define SCTLR_CFRE (1 << 5)
244 #define SCTLR_E (1 << 4)
245 #define SCTLR_AFE (1 << 2)
246 #define SCTLR_TRE (1 << 1)
247 #define SCTLR_M (1 << 0)
249 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
251 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
252 #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
254 #define CB_PAR_F (1 << 0)
256 #define ATSR_ACTIVE (1 << 0)
258 #define RESUME_RETRY (0 << 0)
259 #define RESUME_TERMINATE (1 << 0)
261 #define TTBCR2_SEP_SHIFT 15
262 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
264 #define TTBRn_ASID_SHIFT 48
266 #define FSR_MULTI (1 << 31)
267 #define FSR_SS (1 << 30)
268 #define FSR_UUT (1 << 8)
269 #define FSR_ASF (1 << 7)
270 #define FSR_TLBLKF (1 << 6)
271 #define FSR_TLBMCF (1 << 5)
272 #define FSR_EF (1 << 4)
273 #define FSR_PF (1 << 3)
274 #define FSR_AFF (1 << 2)
275 #define FSR_TF (1 << 1)
277 #define FSR_IGN (FSR_AFF | FSR_ASF | \
278 FSR_TLBMCF | FSR_TLBLKF)
279 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
280 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
282 #define FSYNR0_WNR (1 << 4)
284 static int force_stage
;
285 module_param(force_stage
, int, S_IRUGO
);
286 MODULE_PARM_DESC(force_stage
,
287 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
288 static bool disable_bypass
;
289 module_param(disable_bypass
, bool, S_IRUGO
);
290 MODULE_PARM_DESC(disable_bypass
,
291 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
293 enum arm_smmu_arch_version
{
299 enum arm_smmu_implementation
{
305 struct arm_smmu_s2cr
{
306 struct iommu_group
*group
;
308 enum arm_smmu_s2cr_type type
;
309 enum arm_smmu_s2cr_privcfg privcfg
;
313 #define s2cr_init_val (struct arm_smmu_s2cr){ \
314 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
317 struct arm_smmu_smr
{
323 struct arm_smmu_master_cfg
{
324 struct arm_smmu_device
*smmu
;
327 #define INVALID_SMENDX -1
328 #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
329 #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
330 #define fwspec_smendx(fw, i) \
331 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
332 #define for_each_cfg_sme(fw, i, idx) \
333 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
335 struct arm_smmu_device
{
340 unsigned long pgshift
;
342 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
343 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
344 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
345 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
346 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
347 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
348 #define ARM_SMMU_FEAT_VMID16 (1 << 6)
349 #define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
350 #define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
351 #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
352 #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
353 #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
356 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
358 enum arm_smmu_arch_version version
;
359 enum arm_smmu_implementation model
;
361 u32 num_context_banks
;
362 u32 num_s2_context_banks
;
363 DECLARE_BITMAP(context_map
, ARM_SMMU_MAX_CBS
);
366 u32 num_mapping_groups
;
369 struct arm_smmu_smr
*smrs
;
370 struct arm_smmu_s2cr
*s2crs
;
371 struct mutex stream_map_mutex
;
373 unsigned long va_size
;
374 unsigned long ipa_size
;
375 unsigned long pa_size
;
376 unsigned long pgsize_bitmap
;
379 u32 num_context_irqs
;
382 u32 cavium_id_base
; /* Specific to Cavium */
385 enum arm_smmu_context_fmt
{
386 ARM_SMMU_CTX_FMT_NONE
,
387 ARM_SMMU_CTX_FMT_AARCH64
,
388 ARM_SMMU_CTX_FMT_AARCH32_L
,
389 ARM_SMMU_CTX_FMT_AARCH32_S
,
392 struct arm_smmu_cfg
{
396 enum arm_smmu_context_fmt fmt
;
398 #define INVALID_IRPTNDX 0xff
400 #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
401 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
403 enum arm_smmu_domain_stage
{
404 ARM_SMMU_DOMAIN_S1
= 0,
406 ARM_SMMU_DOMAIN_NESTED
,
409 struct arm_smmu_domain
{
410 struct arm_smmu_device
*smmu
;
411 struct io_pgtable_ops
*pgtbl_ops
;
412 spinlock_t pgtbl_lock
;
413 struct arm_smmu_cfg cfg
;
414 enum arm_smmu_domain_stage stage
;
415 struct mutex init_mutex
; /* Protects smmu pointer */
416 struct iommu_domain domain
;
419 struct arm_smmu_option_prop
{
424 static atomic_t cavium_smmu_context_count
= ATOMIC_INIT(0);
426 static bool using_legacy_binding
, using_generic_binding
;
428 static struct arm_smmu_option_prop arm_smmu_options
[] = {
429 { ARM_SMMU_OPT_SECURE_CFG_ACCESS
, "calxeda,smmu-secure-config-access" },
433 static struct arm_smmu_domain
*to_smmu_domain(struct iommu_domain
*dom
)
435 return container_of(dom
, struct arm_smmu_domain
, domain
);
438 static void parse_driver_options(struct arm_smmu_device
*smmu
)
443 if (of_property_read_bool(smmu
->dev
->of_node
,
444 arm_smmu_options
[i
].prop
)) {
445 smmu
->options
|= arm_smmu_options
[i
].opt
;
446 dev_notice(smmu
->dev
, "option %s\n",
447 arm_smmu_options
[i
].prop
);
449 } while (arm_smmu_options
[++i
].opt
);
452 static struct device_node
*dev_get_dev_node(struct device
*dev
)
454 if (dev_is_pci(dev
)) {
455 struct pci_bus
*bus
= to_pci_dev(dev
)->bus
;
457 while (!pci_is_root_bus(bus
))
459 return of_node_get(bus
->bridge
->parent
->of_node
);
462 return of_node_get(dev
->of_node
);
465 static int __arm_smmu_get_pci_sid(struct pci_dev
*pdev
, u16 alias
, void *data
)
467 *((__be32
*)data
) = cpu_to_be32(alias
);
468 return 0; /* Continue walking */
471 static int __find_legacy_master_phandle(struct device
*dev
, void *data
)
473 struct of_phandle_iterator
*it
= *(void **)data
;
474 struct device_node
*np
= it
->node
;
477 of_for_each_phandle(it
, err
, dev
->of_node
, "mmu-masters",
478 "#stream-id-cells", 0)
479 if (it
->node
== np
) {
480 *(void **)data
= dev
;
484 return err
== -ENOENT
? 0 : err
;
487 static struct platform_driver arm_smmu_driver
;
488 static struct iommu_ops arm_smmu_ops
;
490 static int arm_smmu_register_legacy_master(struct device
*dev
,
491 struct arm_smmu_device
**smmu
)
493 struct device
*smmu_dev
;
494 struct device_node
*np
;
495 struct of_phandle_iterator it
;
501 np
= dev_get_dev_node(dev
);
502 if (!np
|| !of_find_property(np
, "#stream-id-cells", NULL
)) {
508 err
= driver_for_each_device(&arm_smmu_driver
.driver
, NULL
, &data
,
509 __find_legacy_master_phandle
);
517 if (dev_is_pci(dev
)) {
518 /* "mmu-masters" assumes Stream ID == Requester ID */
519 pci_for_each_dma_alias(to_pci_dev(dev
), __arm_smmu_get_pci_sid
,
525 err
= iommu_fwspec_init(dev
, &smmu_dev
->of_node
->fwnode
,
530 sids
= kcalloc(it
.cur_count
, sizeof(*sids
), GFP_KERNEL
);
534 *smmu
= dev_get_drvdata(smmu_dev
);
535 of_phandle_iterator_args(&it
, sids
, it
.cur_count
);
536 err
= iommu_fwspec_add_ids(dev
, sids
, it
.cur_count
);
541 static int __arm_smmu_alloc_bitmap(unsigned long *map
, int start
, int end
)
546 idx
= find_next_zero_bit(map
, end
, start
);
549 } while (test_and_set_bit(idx
, map
));
554 static void __arm_smmu_free_bitmap(unsigned long *map
, int idx
)
559 /* Wait for any pending TLB invalidations to complete */
560 static void __arm_smmu_tlb_sync(struct arm_smmu_device
*smmu
)
563 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
565 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_sTLBGSYNC
);
566 while (readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sTLBGSTATUS
)
567 & sTLBGSTATUS_GSACTIVE
) {
569 if (++count
== TLB_LOOP_TIMEOUT
) {
570 dev_err_ratelimited(smmu
->dev
,
571 "TLB sync timed out -- SMMU may be deadlocked\n");
578 static void arm_smmu_tlb_sync(void *cookie
)
580 struct arm_smmu_domain
*smmu_domain
= cookie
;
581 __arm_smmu_tlb_sync(smmu_domain
->smmu
);
584 static void arm_smmu_tlb_inv_context(void *cookie
)
586 struct arm_smmu_domain
*smmu_domain
= cookie
;
587 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
588 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
589 bool stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
593 base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
594 writel_relaxed(ARM_SMMU_CB_ASID(smmu
, cfg
),
595 base
+ ARM_SMMU_CB_S1_TLBIASID
);
597 base
= ARM_SMMU_GR0(smmu
);
598 writel_relaxed(ARM_SMMU_CB_VMID(smmu
, cfg
),
599 base
+ ARM_SMMU_GR0_TLBIVMID
);
602 __arm_smmu_tlb_sync(smmu
);
605 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova
, size_t size
,
606 size_t granule
, bool leaf
, void *cookie
)
608 struct arm_smmu_domain
*smmu_domain
= cookie
;
609 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
610 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
611 bool stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
615 reg
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
616 reg
+= leaf
? ARM_SMMU_CB_S1_TLBIVAL
: ARM_SMMU_CB_S1_TLBIVA
;
618 if (cfg
->fmt
!= ARM_SMMU_CTX_FMT_AARCH64
) {
620 iova
|= ARM_SMMU_CB_ASID(smmu
, cfg
);
622 writel_relaxed(iova
, reg
);
624 } while (size
-= granule
);
627 iova
|= (u64
)ARM_SMMU_CB_ASID(smmu
, cfg
) << 48;
629 writeq_relaxed(iova
, reg
);
630 iova
+= granule
>> 12;
631 } while (size
-= granule
);
633 } else if (smmu
->version
== ARM_SMMU_V2
) {
634 reg
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
635 reg
+= leaf
? ARM_SMMU_CB_S2_TLBIIPAS2L
:
636 ARM_SMMU_CB_S2_TLBIIPAS2
;
639 smmu_write_atomic_lq(iova
, reg
);
640 iova
+= granule
>> 12;
641 } while (size
-= granule
);
643 reg
= ARM_SMMU_GR0(smmu
) + ARM_SMMU_GR0_TLBIVMID
;
644 writel_relaxed(ARM_SMMU_CB_VMID(smmu
, cfg
), reg
);
648 static const struct iommu_gather_ops arm_smmu_gather_ops
= {
649 .tlb_flush_all
= arm_smmu_tlb_inv_context
,
650 .tlb_add_flush
= arm_smmu_tlb_inv_range_nosync
,
651 .tlb_sync
= arm_smmu_tlb_sync
,
654 static irqreturn_t
arm_smmu_context_fault(int irq
, void *dev
)
658 struct iommu_domain
*domain
= dev
;
659 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
660 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
661 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
662 void __iomem
*cb_base
;
664 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
665 fsr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSR
);
667 if (!(fsr
& FSR_FAULT
))
670 fsynr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSYNR0
);
671 iova
= readq_relaxed(cb_base
+ ARM_SMMU_CB_FAR
);
673 dev_err_ratelimited(smmu
->dev
,
674 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
675 fsr
, iova
, fsynr
, cfg
->cbndx
);
677 writel(fsr
, cb_base
+ ARM_SMMU_CB_FSR
);
681 static irqreturn_t
arm_smmu_global_fault(int irq
, void *dev
)
683 u32 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
;
684 struct arm_smmu_device
*smmu
= dev
;
685 void __iomem
*gr0_base
= ARM_SMMU_GR0_NS(smmu
);
687 gfsr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSR
);
688 gfsynr0
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR0
);
689 gfsynr1
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR1
);
690 gfsynr2
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR2
);
695 dev_err_ratelimited(smmu
->dev
,
696 "Unexpected global fault, this could be serious\n");
697 dev_err_ratelimited(smmu
->dev
,
698 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
699 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
);
701 writel(gfsr
, gr0_base
+ ARM_SMMU_GR0_sGFSR
);
705 static void arm_smmu_init_context_bank(struct arm_smmu_domain
*smmu_domain
,
706 struct io_pgtable_cfg
*pgtbl_cfg
)
711 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
712 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
713 void __iomem
*cb_base
, *gr1_base
;
715 gr1_base
= ARM_SMMU_GR1(smmu
);
716 stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
717 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
719 if (smmu
->version
> ARM_SMMU_V1
) {
720 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH64
)
721 reg
= CBA2R_RW64_64BIT
;
723 reg
= CBA2R_RW64_32BIT
;
724 /* 16-bit VMIDs live in CBA2R */
725 if (smmu
->features
& ARM_SMMU_FEAT_VMID16
)
726 reg
|= ARM_SMMU_CB_VMID(smmu
, cfg
) << CBA2R_VMID_SHIFT
;
728 writel_relaxed(reg
, gr1_base
+ ARM_SMMU_GR1_CBA2R(cfg
->cbndx
));
733 if (smmu
->version
< ARM_SMMU_V2
)
734 reg
|= cfg
->irptndx
<< CBAR_IRPTNDX_SHIFT
;
737 * Use the weakest shareability/memory types, so they are
738 * overridden by the ttbcr/pte.
741 reg
|= (CBAR_S1_BPSHCFG_NSH
<< CBAR_S1_BPSHCFG_SHIFT
) |
742 (CBAR_S1_MEMATTR_WB
<< CBAR_S1_MEMATTR_SHIFT
);
743 } else if (!(smmu
->features
& ARM_SMMU_FEAT_VMID16
)) {
744 /* 8-bit VMIDs live in CBAR */
745 reg
|= ARM_SMMU_CB_VMID(smmu
, cfg
) << CBAR_VMID_SHIFT
;
747 writel_relaxed(reg
, gr1_base
+ ARM_SMMU_GR1_CBAR(cfg
->cbndx
));
751 u16 asid
= ARM_SMMU_CB_ASID(smmu
, cfg
);
753 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH32_S
) {
754 reg
= pgtbl_cfg
->arm_v7s_cfg
.ttbr
[0];
755 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR0
);
756 reg
= pgtbl_cfg
->arm_v7s_cfg
.ttbr
[1];
757 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR1
);
758 writel_relaxed(asid
, cb_base
+ ARM_SMMU_CB_CONTEXTIDR
);
760 reg64
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[0];
761 reg64
|= (u64
)asid
<< TTBRn_ASID_SHIFT
;
762 writeq_relaxed(reg64
, cb_base
+ ARM_SMMU_CB_TTBR0
);
763 reg64
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[1];
764 reg64
|= (u64
)asid
<< TTBRn_ASID_SHIFT
;
765 writeq_relaxed(reg64
, cb_base
+ ARM_SMMU_CB_TTBR1
);
768 reg64
= pgtbl_cfg
->arm_lpae_s2_cfg
.vttbr
;
769 writeq_relaxed(reg64
, cb_base
+ ARM_SMMU_CB_TTBR0
);
774 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH32_S
) {
775 reg
= pgtbl_cfg
->arm_v7s_cfg
.tcr
;
778 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
;
779 reg2
= pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
>> 32;
780 reg2
|= TTBCR2_SEP_UPSTREAM
;
782 if (smmu
->version
> ARM_SMMU_V1
)
783 writel_relaxed(reg2
, cb_base
+ ARM_SMMU_CB_TTBCR2
);
785 reg
= pgtbl_cfg
->arm_lpae_s2_cfg
.vtcr
;
787 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR
);
789 /* MAIRs (stage-1 only) */
791 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH32_S
) {
792 reg
= pgtbl_cfg
->arm_v7s_cfg
.prrr
;
793 reg2
= pgtbl_cfg
->arm_v7s_cfg
.nmrr
;
795 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[0];
796 reg2
= pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[1];
798 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_S1_MAIR0
);
799 writel_relaxed(reg2
, cb_base
+ ARM_SMMU_CB_S1_MAIR1
);
803 reg
= SCTLR_CFIE
| SCTLR_CFRE
| SCTLR_AFE
| SCTLR_TRE
| SCTLR_M
;
805 reg
|= SCTLR_S1_ASIDPNE
;
809 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_SCTLR
);
812 static int arm_smmu_init_domain_context(struct iommu_domain
*domain
,
813 struct arm_smmu_device
*smmu
)
815 int irq
, start
, ret
= 0;
816 unsigned long ias
, oas
;
817 struct io_pgtable_ops
*pgtbl_ops
;
818 struct io_pgtable_cfg pgtbl_cfg
;
819 enum io_pgtable_fmt fmt
;
820 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
821 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
823 mutex_lock(&smmu_domain
->init_mutex
);
824 if (smmu_domain
->smmu
)
828 * Mapping the requested stage onto what we support is surprisingly
829 * complicated, mainly because the spec allows S1+S2 SMMUs without
830 * support for nested translation. That means we end up with the
833 * Requested Supported Actual
843 * Note that you can't actually request stage-2 mappings.
845 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
))
846 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S2
;
847 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
))
848 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
851 * Choosing a suitable context format is even more fiddly. Until we
852 * grow some way for the caller to express a preference, and/or move
853 * the decision into the io-pgtable code where it arguably belongs,
854 * just aim for the closest thing to the rest of the system, and hope
855 * that the hardware isn't esoteric enough that we can't assume AArch64
856 * support to be a superset of AArch32 support...
858 if (smmu
->features
& ARM_SMMU_FEAT_FMT_AARCH32_L
)
859 cfg
->fmt
= ARM_SMMU_CTX_FMT_AARCH32_L
;
860 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S
) &&
861 !IS_ENABLED(CONFIG_64BIT
) && !IS_ENABLED(CONFIG_ARM_LPAE
) &&
862 (smmu
->features
& ARM_SMMU_FEAT_FMT_AARCH32_S
) &&
863 (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
))
864 cfg
->fmt
= ARM_SMMU_CTX_FMT_AARCH32_S
;
865 if ((IS_ENABLED(CONFIG_64BIT
) || cfg
->fmt
== ARM_SMMU_CTX_FMT_NONE
) &&
866 (smmu
->features
& (ARM_SMMU_FEAT_FMT_AARCH64_64K
|
867 ARM_SMMU_FEAT_FMT_AARCH64_16K
|
868 ARM_SMMU_FEAT_FMT_AARCH64_4K
)))
869 cfg
->fmt
= ARM_SMMU_CTX_FMT_AARCH64
;
871 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_NONE
) {
876 switch (smmu_domain
->stage
) {
877 case ARM_SMMU_DOMAIN_S1
:
878 cfg
->cbar
= CBAR_TYPE_S1_TRANS_S2_BYPASS
;
879 start
= smmu
->num_s2_context_banks
;
881 oas
= smmu
->ipa_size
;
882 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH64
) {
883 fmt
= ARM_64_LPAE_S1
;
884 } else if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH32_L
) {
885 fmt
= ARM_32_LPAE_S1
;
886 ias
= min(ias
, 32UL);
887 oas
= min(oas
, 40UL);
890 ias
= min(ias
, 32UL);
891 oas
= min(oas
, 32UL);
894 case ARM_SMMU_DOMAIN_NESTED
:
896 * We will likely want to change this if/when KVM gets
899 case ARM_SMMU_DOMAIN_S2
:
900 cfg
->cbar
= CBAR_TYPE_S2_TRANS
;
902 ias
= smmu
->ipa_size
;
904 if (cfg
->fmt
== ARM_SMMU_CTX_FMT_AARCH64
) {
905 fmt
= ARM_64_LPAE_S2
;
907 fmt
= ARM_32_LPAE_S2
;
908 ias
= min(ias
, 40UL);
909 oas
= min(oas
, 40UL);
917 ret
= __arm_smmu_alloc_bitmap(smmu
->context_map
, start
,
918 smmu
->num_context_banks
);
923 if (smmu
->version
< ARM_SMMU_V2
) {
924 cfg
->irptndx
= atomic_inc_return(&smmu
->irptndx
);
925 cfg
->irptndx
%= smmu
->num_context_irqs
;
927 cfg
->irptndx
= cfg
->cbndx
;
930 pgtbl_cfg
= (struct io_pgtable_cfg
) {
931 .pgsize_bitmap
= smmu
->pgsize_bitmap
,
934 .tlb
= &arm_smmu_gather_ops
,
935 .iommu_dev
= smmu
->dev
,
938 smmu_domain
->smmu
= smmu
;
939 pgtbl_ops
= alloc_io_pgtable_ops(fmt
, &pgtbl_cfg
, smmu_domain
);
945 /* Update the domain's page sizes to reflect the page table format */
946 domain
->pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
947 domain
->geometry
.aperture_end
= (1UL << ias
) - 1;
948 domain
->geometry
.force_aperture
= true;
950 /* Initialise the context bank with our page table cfg */
951 arm_smmu_init_context_bank(smmu_domain
, &pgtbl_cfg
);
954 * Request context fault interrupt. Do this last to avoid the
955 * handler seeing a half-initialised domain state.
957 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
958 ret
= devm_request_irq(smmu
->dev
, irq
, arm_smmu_context_fault
,
959 IRQF_SHARED
, "arm-smmu-context-fault", domain
);
961 dev_err(smmu
->dev
, "failed to request context IRQ %d (%u)\n",
963 cfg
->irptndx
= INVALID_IRPTNDX
;
966 mutex_unlock(&smmu_domain
->init_mutex
);
968 /* Publish page table ops for map/unmap */
969 smmu_domain
->pgtbl_ops
= pgtbl_ops
;
973 smmu_domain
->smmu
= NULL
;
975 mutex_unlock(&smmu_domain
->init_mutex
);
979 static void arm_smmu_destroy_domain_context(struct iommu_domain
*domain
)
981 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
982 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
983 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
984 void __iomem
*cb_base
;
991 * Disable the context bank and free the page tables before freeing
994 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
995 writel_relaxed(0, cb_base
+ ARM_SMMU_CB_SCTLR
);
997 if (cfg
->irptndx
!= INVALID_IRPTNDX
) {
998 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
999 devm_free_irq(smmu
->dev
, irq
, domain
);
1002 free_io_pgtable_ops(smmu_domain
->pgtbl_ops
);
1003 __arm_smmu_free_bitmap(smmu
->context_map
, cfg
->cbndx
);
1006 static struct iommu_domain
*arm_smmu_domain_alloc(unsigned type
)
1008 struct arm_smmu_domain
*smmu_domain
;
1010 if (type
!= IOMMU_DOMAIN_UNMANAGED
&& type
!= IOMMU_DOMAIN_DMA
)
1013 * Allocate the domain and initialise some of its data structures.
1014 * We can't really do anything meaningful until we've added a
1017 smmu_domain
= kzalloc(sizeof(*smmu_domain
), GFP_KERNEL
);
1021 if (type
== IOMMU_DOMAIN_DMA
&& (using_legacy_binding
||
1022 iommu_get_dma_cookie(&smmu_domain
->domain
))) {
1027 mutex_init(&smmu_domain
->init_mutex
);
1028 spin_lock_init(&smmu_domain
->pgtbl_lock
);
1030 return &smmu_domain
->domain
;
1033 static void arm_smmu_domain_free(struct iommu_domain
*domain
)
1035 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1038 * Free the domain resources. We assume that all devices have
1039 * already been detached.
1041 iommu_put_dma_cookie(domain
);
1042 arm_smmu_destroy_domain_context(domain
);
1046 static void arm_smmu_write_smr(struct arm_smmu_device
*smmu
, int idx
)
1048 struct arm_smmu_smr
*smr
= smmu
->smrs
+ idx
;
1049 u32 reg
= smr
->id
<< SMR_ID_SHIFT
| smr
->mask
<< SMR_MASK_SHIFT
;
1053 writel_relaxed(reg
, ARM_SMMU_GR0(smmu
) + ARM_SMMU_GR0_SMR(idx
));
1056 static void arm_smmu_write_s2cr(struct arm_smmu_device
*smmu
, int idx
)
1058 struct arm_smmu_s2cr
*s2cr
= smmu
->s2crs
+ idx
;
1059 u32 reg
= (s2cr
->type
& S2CR_TYPE_MASK
) << S2CR_TYPE_SHIFT
|
1060 (s2cr
->cbndx
& S2CR_CBNDX_MASK
) << S2CR_CBNDX_SHIFT
|
1061 (s2cr
->privcfg
& S2CR_PRIVCFG_MASK
) << S2CR_PRIVCFG_SHIFT
;
1063 writel_relaxed(reg
, ARM_SMMU_GR0(smmu
) + ARM_SMMU_GR0_S2CR(idx
));
1066 static void arm_smmu_write_sme(struct arm_smmu_device
*smmu
, int idx
)
1068 arm_smmu_write_s2cr(smmu
, idx
);
1070 arm_smmu_write_smr(smmu
, idx
);
1073 static int arm_smmu_find_sme(struct arm_smmu_device
*smmu
, u16 id
, u16 mask
)
1075 struct arm_smmu_smr
*smrs
= smmu
->smrs
;
1076 int i
, free_idx
= -ENOSPC
;
1078 /* Stream indexing is blissfully easy */
1082 /* Validating SMRs is... less so */
1083 for (i
= 0; i
< smmu
->num_mapping_groups
; ++i
) {
1084 if (!smrs
[i
].valid
) {
1086 * Note the first free entry we come across, which
1087 * we'll claim in the end if nothing else matches.
1094 * If the new entry is _entirely_ matched by an existing entry,
1095 * then reuse that, with the guarantee that there also cannot
1096 * be any subsequent conflicting entries. In normal use we'd
1097 * expect simply identical entries for this case, but there's
1098 * no harm in accommodating the generalisation.
1100 if ((mask
& smrs
[i
].mask
) == mask
&&
1101 !((id
^ smrs
[i
].id
) & ~smrs
[i
].mask
))
1104 * If the new entry has any other overlap with an existing one,
1105 * though, then there always exists at least one stream ID
1106 * which would cause a conflict, and we can't allow that risk.
1108 if (!((id
^ smrs
[i
].id
) & ~(smrs
[i
].mask
| mask
)))
1115 static bool arm_smmu_free_sme(struct arm_smmu_device
*smmu
, int idx
)
1117 if (--smmu
->s2crs
[idx
].count
)
1120 smmu
->s2crs
[idx
] = s2cr_init_val
;
1122 smmu
->smrs
[idx
].valid
= false;
1127 static int arm_smmu_master_alloc_smes(struct device
*dev
)
1129 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1130 struct arm_smmu_master_cfg
*cfg
= fwspec
->iommu_priv
;
1131 struct arm_smmu_device
*smmu
= cfg
->smmu
;
1132 struct arm_smmu_smr
*smrs
= smmu
->smrs
;
1133 struct iommu_group
*group
;
1136 mutex_lock(&smmu
->stream_map_mutex
);
1137 /* Figure out a viable stream map entry allocation */
1138 for_each_cfg_sme(fwspec
, i
, idx
) {
1139 u16 sid
= fwspec
->ids
[i
];
1140 u16 mask
= fwspec
->ids
[i
] >> SMR_MASK_SHIFT
;
1142 if (idx
!= INVALID_SMENDX
) {
1147 ret
= arm_smmu_find_sme(smmu
, sid
, mask
);
1152 if (smrs
&& smmu
->s2crs
[idx
].count
== 0) {
1154 smrs
[idx
].mask
= mask
;
1155 smrs
[idx
].valid
= true;
1157 smmu
->s2crs
[idx
].count
++;
1158 cfg
->smendx
[i
] = (s16
)idx
;
1161 group
= iommu_group_get_for_dev(dev
);
1163 group
= ERR_PTR(-ENOMEM
);
1164 if (IS_ERR(group
)) {
1165 ret
= PTR_ERR(group
);
1168 iommu_group_put(group
);
1170 /* It worked! Now, poke the actual hardware */
1171 for_each_cfg_sme(fwspec
, i
, idx
) {
1172 arm_smmu_write_sme(smmu
, idx
);
1173 smmu
->s2crs
[idx
].group
= group
;
1176 mutex_unlock(&smmu
->stream_map_mutex
);
1181 arm_smmu_free_sme(smmu
, cfg
->smendx
[i
]);
1182 cfg
->smendx
[i
] = INVALID_SMENDX
;
1184 mutex_unlock(&smmu
->stream_map_mutex
);
1188 static void arm_smmu_master_free_smes(struct iommu_fwspec
*fwspec
)
1190 struct arm_smmu_device
*smmu
= fwspec_smmu(fwspec
);
1191 struct arm_smmu_master_cfg
*cfg
= fwspec
->iommu_priv
;
1194 mutex_lock(&smmu
->stream_map_mutex
);
1195 for_each_cfg_sme(fwspec
, i
, idx
) {
1196 if (arm_smmu_free_sme(smmu
, idx
))
1197 arm_smmu_write_sme(smmu
, idx
);
1198 cfg
->smendx
[i
] = INVALID_SMENDX
;
1200 mutex_unlock(&smmu
->stream_map_mutex
);
1203 static int arm_smmu_domain_add_master(struct arm_smmu_domain
*smmu_domain
,
1204 struct iommu_fwspec
*fwspec
)
1206 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1207 struct arm_smmu_s2cr
*s2cr
= smmu
->s2crs
;
1208 enum arm_smmu_s2cr_type type
= S2CR_TYPE_TRANS
;
1209 u8 cbndx
= smmu_domain
->cfg
.cbndx
;
1212 for_each_cfg_sme(fwspec
, i
, idx
) {
1213 if (type
== s2cr
[idx
].type
&& cbndx
== s2cr
[idx
].cbndx
)
1216 s2cr
[idx
].type
= type
;
1217 s2cr
[idx
].privcfg
= S2CR_PRIVCFG_UNPRIV
;
1218 s2cr
[idx
].cbndx
= cbndx
;
1219 arm_smmu_write_s2cr(smmu
, idx
);
1224 static int arm_smmu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1227 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1228 struct arm_smmu_device
*smmu
;
1229 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1231 if (!fwspec
|| fwspec
->ops
!= &arm_smmu_ops
) {
1232 dev_err(dev
, "cannot attach to SMMU, is it on the same bus?\n");
1237 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1238 * domains between of_xlate() and add_device() - we have no way to cope
1239 * with that, so until ARM gets converted to rely on groups and default
1240 * domains, just say no (but more politely than by dereferencing NULL).
1241 * This should be at least a WARN_ON once that's sorted.
1243 if (!fwspec
->iommu_priv
)
1246 smmu
= fwspec_smmu(fwspec
);
1247 /* Ensure that the domain is finalised */
1248 ret
= arm_smmu_init_domain_context(domain
, smmu
);
1253 * Sanity check the domain. We don't support domains across
1256 if (smmu_domain
->smmu
!= smmu
) {
1258 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1259 dev_name(smmu_domain
->smmu
->dev
), dev_name(smmu
->dev
));
1263 /* Looks ok, so add the device to the domain */
1264 return arm_smmu_domain_add_master(smmu_domain
, fwspec
);
1267 static int arm_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
1268 phys_addr_t paddr
, size_t size
, int prot
)
1271 unsigned long flags
;
1272 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1273 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1278 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1279 ret
= ops
->map(ops
, iova
, paddr
, size
, prot
);
1280 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1284 static size_t arm_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
1288 unsigned long flags
;
1289 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1290 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1295 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1296 ret
= ops
->unmap(ops
, iova
, size
);
1297 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1301 static phys_addr_t
arm_smmu_iova_to_phys_hard(struct iommu_domain
*domain
,
1304 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1305 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1306 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
1307 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1308 struct device
*dev
= smmu
->dev
;
1309 void __iomem
*cb_base
;
1314 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
1316 /* ATS1 registers can only be written atomically */
1317 va
= iova
& ~0xfffUL
;
1318 if (smmu
->version
== ARM_SMMU_V2
)
1319 smmu_write_atomic_lq(va
, cb_base
+ ARM_SMMU_CB_ATS1PR
);
1320 else /* Register is only 32-bit in v1 */
1321 writel_relaxed(va
, cb_base
+ ARM_SMMU_CB_ATS1PR
);
1323 if (readl_poll_timeout_atomic(cb_base
+ ARM_SMMU_CB_ATSR
, tmp
,
1324 !(tmp
& ATSR_ACTIVE
), 5, 50)) {
1326 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1328 return ops
->iova_to_phys(ops
, iova
);
1331 phys
= readq_relaxed(cb_base
+ ARM_SMMU_CB_PAR
);
1332 if (phys
& CB_PAR_F
) {
1333 dev_err(dev
, "translation fault!\n");
1334 dev_err(dev
, "PAR = 0x%llx\n", phys
);
1338 return (phys
& GENMASK_ULL(39, 12)) | (iova
& 0xfff);
1341 static phys_addr_t
arm_smmu_iova_to_phys(struct iommu_domain
*domain
,
1345 unsigned long flags
;
1346 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1347 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1352 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1353 if (smmu_domain
->smmu
->features
& ARM_SMMU_FEAT_TRANS_OPS
&&
1354 smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1355 ret
= arm_smmu_iova_to_phys_hard(domain
, iova
);
1357 ret
= ops
->iova_to_phys(ops
, iova
);
1360 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1365 static bool arm_smmu_capable(enum iommu_cap cap
)
1368 case IOMMU_CAP_CACHE_COHERENCY
:
1370 * Return true here as the SMMU can always send out coherent
1374 case IOMMU_CAP_INTR_REMAP
:
1375 return true; /* MSIs are just memory writes */
1376 case IOMMU_CAP_NOEXEC
:
1383 static int arm_smmu_match_node(struct device
*dev
, void *data
)
1385 return dev
->fwnode
== data
;
1389 struct arm_smmu_device
*arm_smmu_get_by_fwnode(struct fwnode_handle
*fwnode
)
1391 struct device
*dev
= driver_find_device(&arm_smmu_driver
.driver
, NULL
,
1392 fwnode
, arm_smmu_match_node
);
1394 return dev
? dev_get_drvdata(dev
) : NULL
;
1397 static int arm_smmu_add_device(struct device
*dev
)
1399 struct arm_smmu_device
*smmu
;
1400 struct arm_smmu_master_cfg
*cfg
;
1401 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1404 if (using_legacy_binding
) {
1405 ret
= arm_smmu_register_legacy_master(dev
, &smmu
);
1406 fwspec
= dev
->iommu_fwspec
;
1409 } else if (fwspec
&& fwspec
->ops
== &arm_smmu_ops
) {
1410 smmu
= arm_smmu_get_by_fwnode(fwspec
->iommu_fwnode
);
1416 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
1417 u16 sid
= fwspec
->ids
[i
];
1418 u16 mask
= fwspec
->ids
[i
] >> SMR_MASK_SHIFT
;
1420 if (sid
& ~smmu
->streamid_mask
) {
1421 dev_err(dev
, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1422 sid
, smmu
->streamid_mask
);
1425 if (mask
& ~smmu
->smr_mask_mask
) {
1426 dev_err(dev
, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1427 sid
, smmu
->smr_mask_mask
);
1433 cfg
= kzalloc(offsetof(struct arm_smmu_master_cfg
, smendx
[i
]),
1439 fwspec
->iommu_priv
= cfg
;
1441 cfg
->smendx
[i
] = INVALID_SMENDX
;
1443 ret
= arm_smmu_master_alloc_smes(dev
);
1451 kfree(fwspec
->iommu_priv
);
1452 iommu_fwspec_free(dev
);
1456 static void arm_smmu_remove_device(struct device
*dev
)
1458 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1460 if (!fwspec
|| fwspec
->ops
!= &arm_smmu_ops
)
1463 arm_smmu_master_free_smes(fwspec
);
1464 iommu_group_remove_device(dev
);
1465 kfree(fwspec
->iommu_priv
);
1466 iommu_fwspec_free(dev
);
1469 static struct iommu_group
*arm_smmu_device_group(struct device
*dev
)
1471 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1472 struct arm_smmu_device
*smmu
= fwspec_smmu(fwspec
);
1473 struct iommu_group
*group
= NULL
;
1476 for_each_cfg_sme(fwspec
, i
, idx
) {
1477 if (group
&& smmu
->s2crs
[idx
].group
&&
1478 group
!= smmu
->s2crs
[idx
].group
)
1479 return ERR_PTR(-EINVAL
);
1481 group
= smmu
->s2crs
[idx
].group
;
1485 return iommu_group_ref_get(group
);
1487 if (dev_is_pci(dev
))
1488 group
= pci_device_group(dev
);
1490 group
= generic_device_group(dev
);
1495 static int arm_smmu_domain_get_attr(struct iommu_domain
*domain
,
1496 enum iommu_attr attr
, void *data
)
1498 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1501 case DOMAIN_ATTR_NESTING
:
1502 *(int *)data
= (smmu_domain
->stage
== ARM_SMMU_DOMAIN_NESTED
);
1509 static int arm_smmu_domain_set_attr(struct iommu_domain
*domain
,
1510 enum iommu_attr attr
, void *data
)
1513 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1515 mutex_lock(&smmu_domain
->init_mutex
);
1518 case DOMAIN_ATTR_NESTING
:
1519 if (smmu_domain
->smmu
) {
1525 smmu_domain
->stage
= ARM_SMMU_DOMAIN_NESTED
;
1527 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
1535 mutex_unlock(&smmu_domain
->init_mutex
);
1539 static int arm_smmu_of_xlate(struct device
*dev
, struct of_phandle_args
*args
)
1543 if (args
->args_count
> 0)
1544 fwid
|= (u16
)args
->args
[0];
1546 if (args
->args_count
> 1)
1547 fwid
|= (u16
)args
->args
[1] << SMR_MASK_SHIFT
;
1549 return iommu_fwspec_add_ids(dev
, &fwid
, 1);
1552 static struct iommu_ops arm_smmu_ops
= {
1553 .capable
= arm_smmu_capable
,
1554 .domain_alloc
= arm_smmu_domain_alloc
,
1555 .domain_free
= arm_smmu_domain_free
,
1556 .attach_dev
= arm_smmu_attach_dev
,
1557 .map
= arm_smmu_map
,
1558 .unmap
= arm_smmu_unmap
,
1559 .map_sg
= default_iommu_map_sg
,
1560 .iova_to_phys
= arm_smmu_iova_to_phys
,
1561 .add_device
= arm_smmu_add_device
,
1562 .remove_device
= arm_smmu_remove_device
,
1563 .device_group
= arm_smmu_device_group
,
1564 .domain_get_attr
= arm_smmu_domain_get_attr
,
1565 .domain_set_attr
= arm_smmu_domain_set_attr
,
1566 .of_xlate
= arm_smmu_of_xlate
,
1567 .pgsize_bitmap
= -1UL, /* Restricted during device attach */
1570 static void arm_smmu_device_reset(struct arm_smmu_device
*smmu
)
1572 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1573 void __iomem
*cb_base
;
1577 /* clear global FSR */
1578 reg
= readl_relaxed(ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sGFSR
);
1579 writel(reg
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sGFSR
);
1582 * Reset stream mapping groups: Initial values mark all SMRn as
1583 * invalid and all S2CRn as bypass unless overridden.
1585 for (i
= 0; i
< smmu
->num_mapping_groups
; ++i
)
1586 arm_smmu_write_sme(smmu
, i
);
1588 if (smmu
->model
== ARM_MMU500
) {
1590 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1591 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1592 * bit is only present in MMU-500r2 onwards.
1594 reg
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID7
);
1595 major
= (reg
>> ID7_MAJOR_SHIFT
) & ID7_MAJOR_MASK
;
1596 reg
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sACR
);
1598 reg
&= ~ARM_MMU500_ACR_CACHE_LOCK
;
1600 * Allow unmatched Stream IDs to allocate bypass
1601 * TLB entries for reduced latency.
1603 reg
|= ARM_MMU500_ACR_SMTNMB_TLBEN
;
1604 writel_relaxed(reg
, gr0_base
+ ARM_SMMU_GR0_sACR
);
1607 /* Make sure all context banks are disabled and clear CB_FSR */
1608 for (i
= 0; i
< smmu
->num_context_banks
; ++i
) {
1609 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, i
);
1610 writel_relaxed(0, cb_base
+ ARM_SMMU_CB_SCTLR
);
1611 writel_relaxed(FSR_FAULT
, cb_base
+ ARM_SMMU_CB_FSR
);
1613 * Disable MMU-500's not-particularly-beneficial next-page
1614 * prefetcher for the sake of errata #841119 and #826419.
1616 if (smmu
->model
== ARM_MMU500
) {
1617 reg
= readl_relaxed(cb_base
+ ARM_SMMU_CB_ACTLR
);
1618 reg
&= ~ARM_MMU500_ACTLR_CPRE
;
1619 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_ACTLR
);
1623 /* Invalidate the TLB, just in case */
1624 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLH
);
1625 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLNSNH
);
1627 reg
= readl_relaxed(ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1629 /* Enable fault reporting */
1630 reg
|= (sCR0_GFRE
| sCR0_GFIE
| sCR0_GCFGFRE
| sCR0_GCFGFIE
);
1632 /* Disable TLB broadcasting. */
1633 reg
|= (sCR0_VMIDPNE
| sCR0_PTM
);
1635 /* Enable client access, handling unmatched streams as appropriate */
1636 reg
&= ~sCR0_CLIENTPD
;
1640 reg
&= ~sCR0_USFCFG
;
1642 /* Disable forced broadcasting */
1645 /* Don't upgrade barriers */
1646 reg
&= ~(sCR0_BSU_MASK
<< sCR0_BSU_SHIFT
);
1648 if (smmu
->features
& ARM_SMMU_FEAT_VMID16
)
1649 reg
|= sCR0_VMID16EN
;
1651 /* Push the button */
1652 __arm_smmu_tlb_sync(smmu
);
1653 writel(reg
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1656 static int arm_smmu_id_size_to_bits(int size
)
1675 static int arm_smmu_device_cfg_probe(struct arm_smmu_device
*smmu
)
1678 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1680 bool cttw_reg
, cttw_fw
= smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
;
1683 dev_notice(smmu
->dev
, "probing hardware configuration...\n");
1684 dev_notice(smmu
->dev
, "SMMUv%d with:\n",
1685 smmu
->version
== ARM_SMMU_V2
? 2 : 1);
1688 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID0
);
1690 /* Restrict available stages based on module parameter */
1691 if (force_stage
== 1)
1692 id
&= ~(ID0_S2TS
| ID0_NTS
);
1693 else if (force_stage
== 2)
1694 id
&= ~(ID0_S1TS
| ID0_NTS
);
1696 if (id
& ID0_S1TS
) {
1697 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S1
;
1698 dev_notice(smmu
->dev
, "\tstage 1 translation\n");
1701 if (id
& ID0_S2TS
) {
1702 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S2
;
1703 dev_notice(smmu
->dev
, "\tstage 2 translation\n");
1707 smmu
->features
|= ARM_SMMU_FEAT_TRANS_NESTED
;
1708 dev_notice(smmu
->dev
, "\tnested translation\n");
1711 if (!(smmu
->features
&
1712 (ARM_SMMU_FEAT_TRANS_S1
| ARM_SMMU_FEAT_TRANS_S2
))) {
1713 dev_err(smmu
->dev
, "\tno translation support!\n");
1717 if ((id
& ID0_S1TS
) &&
1718 ((smmu
->version
< ARM_SMMU_V2
) || !(id
& ID0_ATOSNS
))) {
1719 smmu
->features
|= ARM_SMMU_FEAT_TRANS_OPS
;
1720 dev_notice(smmu
->dev
, "\taddress translation ops\n");
1724 * In order for DMA API calls to work properly, we must defer to what
1725 * the FW says about coherency, regardless of what the hardware claims.
1726 * Fortunately, this also opens up a workaround for systems where the
1727 * ID register value has ended up configured incorrectly.
1729 cttw_reg
= !!(id
& ID0_CTTW
);
1730 if (cttw_fw
|| cttw_reg
)
1731 dev_notice(smmu
->dev
, "\t%scoherent table walk\n",
1732 cttw_fw
? "" : "non-");
1733 if (cttw_fw
!= cttw_reg
)
1734 dev_notice(smmu
->dev
,
1735 "\t(IDR0.CTTW overridden by FW configuration)\n");
1737 /* Max. number of entries we have for stream matching/indexing */
1738 size
= 1 << ((id
>> ID0_NUMSIDB_SHIFT
) & ID0_NUMSIDB_MASK
);
1739 smmu
->streamid_mask
= size
- 1;
1743 smmu
->features
|= ARM_SMMU_FEAT_STREAM_MATCH
;
1744 size
= (id
>> ID0_NUMSMRG_SHIFT
) & ID0_NUMSMRG_MASK
;
1747 "stream-matching supported, but no SMRs present!\n");
1752 * SMR.ID bits may not be preserved if the corresponding MASK
1753 * bits are set, so check each one separately. We can reject
1754 * masters later if they try to claim IDs outside these masks.
1756 smr
= smmu
->streamid_mask
<< SMR_ID_SHIFT
;
1757 writel_relaxed(smr
, gr0_base
+ ARM_SMMU_GR0_SMR(0));
1758 smr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_SMR(0));
1759 smmu
->streamid_mask
= smr
>> SMR_ID_SHIFT
;
1761 smr
= smmu
->streamid_mask
<< SMR_MASK_SHIFT
;
1762 writel_relaxed(smr
, gr0_base
+ ARM_SMMU_GR0_SMR(0));
1763 smr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_SMR(0));
1764 smmu
->smr_mask_mask
= smr
>> SMR_MASK_SHIFT
;
1766 /* Zero-initialised to mark as invalid */
1767 smmu
->smrs
= devm_kcalloc(smmu
->dev
, size
, sizeof(*smmu
->smrs
),
1772 dev_notice(smmu
->dev
,
1773 "\tstream matching with %lu register groups, mask 0x%x",
1774 size
, smmu
->smr_mask_mask
);
1776 /* s2cr->type == 0 means translation, so initialise explicitly */
1777 smmu
->s2crs
= devm_kmalloc_array(smmu
->dev
, size
, sizeof(*smmu
->s2crs
),
1781 for (i
= 0; i
< size
; i
++)
1782 smmu
->s2crs
[i
] = s2cr_init_val
;
1784 smmu
->num_mapping_groups
= size
;
1785 mutex_init(&smmu
->stream_map_mutex
);
1787 if (smmu
->version
< ARM_SMMU_V2
|| !(id
& ID0_PTFS_NO_AARCH32
)) {
1788 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH32_L
;
1789 if (!(id
& ID0_PTFS_NO_AARCH32S
))
1790 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH32_S
;
1794 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID1
);
1795 smmu
->pgshift
= (id
& ID1_PAGESIZE
) ? 16 : 12;
1797 /* Check for size mismatch of SMMU address space from mapped region */
1798 size
= 1 << (((id
>> ID1_NUMPAGENDXB_SHIFT
) & ID1_NUMPAGENDXB_MASK
) + 1);
1799 size
*= 2 << smmu
->pgshift
;
1800 if (smmu
->size
!= size
)
1802 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1805 smmu
->num_s2_context_banks
= (id
>> ID1_NUMS2CB_SHIFT
) & ID1_NUMS2CB_MASK
;
1806 smmu
->num_context_banks
= (id
>> ID1_NUMCB_SHIFT
) & ID1_NUMCB_MASK
;
1807 if (smmu
->num_s2_context_banks
> smmu
->num_context_banks
) {
1808 dev_err(smmu
->dev
, "impossible number of S2 context banks!\n");
1811 dev_notice(smmu
->dev
, "\t%u context banks (%u stage-2 only)\n",
1812 smmu
->num_context_banks
, smmu
->num_s2_context_banks
);
1814 * Cavium CN88xx erratum #27704.
1815 * Ensure ASID and VMID allocation is unique across all SMMUs in
1818 if (smmu
->model
== CAVIUM_SMMUV2
) {
1819 smmu
->cavium_id_base
=
1820 atomic_add_return(smmu
->num_context_banks
,
1821 &cavium_smmu_context_count
);
1822 smmu
->cavium_id_base
-= smmu
->num_context_banks
;
1826 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID2
);
1827 size
= arm_smmu_id_size_to_bits((id
>> ID2_IAS_SHIFT
) & ID2_IAS_MASK
);
1828 smmu
->ipa_size
= size
;
1830 /* The output mask is also applied for bypass */
1831 size
= arm_smmu_id_size_to_bits((id
>> ID2_OAS_SHIFT
) & ID2_OAS_MASK
);
1832 smmu
->pa_size
= size
;
1834 if (id
& ID2_VMID16
)
1835 smmu
->features
|= ARM_SMMU_FEAT_VMID16
;
1838 * What the page table walker can address actually depends on which
1839 * descriptor format is in use, but since a) we don't know that yet,
1840 * and b) it can vary per context bank, this will have to do...
1842 if (dma_set_mask_and_coherent(smmu
->dev
, DMA_BIT_MASK(size
)))
1844 "failed to set DMA mask for table walker\n");
1846 if (smmu
->version
< ARM_SMMU_V2
) {
1847 smmu
->va_size
= smmu
->ipa_size
;
1848 if (smmu
->version
== ARM_SMMU_V1_64K
)
1849 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH64_64K
;
1851 size
= (id
>> ID2_UBS_SHIFT
) & ID2_UBS_MASK
;
1852 smmu
->va_size
= arm_smmu_id_size_to_bits(size
);
1853 if (id
& ID2_PTFS_4K
)
1854 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH64_4K
;
1855 if (id
& ID2_PTFS_16K
)
1856 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH64_16K
;
1857 if (id
& ID2_PTFS_64K
)
1858 smmu
->features
|= ARM_SMMU_FEAT_FMT_AARCH64_64K
;
1861 /* Now we've corralled the various formats, what'll it do? */
1862 if (smmu
->features
& ARM_SMMU_FEAT_FMT_AARCH32_S
)
1863 smmu
->pgsize_bitmap
|= SZ_4K
| SZ_64K
| SZ_1M
| SZ_16M
;
1864 if (smmu
->features
&
1865 (ARM_SMMU_FEAT_FMT_AARCH32_L
| ARM_SMMU_FEAT_FMT_AARCH64_4K
))
1866 smmu
->pgsize_bitmap
|= SZ_4K
| SZ_2M
| SZ_1G
;
1867 if (smmu
->features
& ARM_SMMU_FEAT_FMT_AARCH64_16K
)
1868 smmu
->pgsize_bitmap
|= SZ_16K
| SZ_32M
;
1869 if (smmu
->features
& ARM_SMMU_FEAT_FMT_AARCH64_64K
)
1870 smmu
->pgsize_bitmap
|= SZ_64K
| SZ_512M
;
1872 if (arm_smmu_ops
.pgsize_bitmap
== -1UL)
1873 arm_smmu_ops
.pgsize_bitmap
= smmu
->pgsize_bitmap
;
1875 arm_smmu_ops
.pgsize_bitmap
|= smmu
->pgsize_bitmap
;
1876 dev_notice(smmu
->dev
, "\tSupported page sizes: 0x%08lx\n",
1877 smmu
->pgsize_bitmap
);
1880 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
)
1881 dev_notice(smmu
->dev
, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1882 smmu
->va_size
, smmu
->ipa_size
);
1884 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
)
1885 dev_notice(smmu
->dev
, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1886 smmu
->ipa_size
, smmu
->pa_size
);
1891 struct arm_smmu_match_data
{
1892 enum arm_smmu_arch_version version
;
1893 enum arm_smmu_implementation model
;
1896 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1897 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1899 ARM_SMMU_MATCH_DATA(smmu_generic_v1
, ARM_SMMU_V1
, GENERIC_SMMU
);
1900 ARM_SMMU_MATCH_DATA(smmu_generic_v2
, ARM_SMMU_V2
, GENERIC_SMMU
);
1901 ARM_SMMU_MATCH_DATA(arm_mmu401
, ARM_SMMU_V1_64K
, GENERIC_SMMU
);
1902 ARM_SMMU_MATCH_DATA(arm_mmu500
, ARM_SMMU_V2
, ARM_MMU500
);
1903 ARM_SMMU_MATCH_DATA(cavium_smmuv2
, ARM_SMMU_V2
, CAVIUM_SMMUV2
);
1905 static const struct of_device_id arm_smmu_of_match
[] = {
1906 { .compatible
= "arm,smmu-v1", .data
= &smmu_generic_v1
},
1907 { .compatible
= "arm,smmu-v2", .data
= &smmu_generic_v2
},
1908 { .compatible
= "arm,mmu-400", .data
= &smmu_generic_v1
},
1909 { .compatible
= "arm,mmu-401", .data
= &arm_mmu401
},
1910 { .compatible
= "arm,mmu-500", .data
= &arm_mmu500
},
1911 { .compatible
= "cavium,smmu-v2", .data
= &cavium_smmuv2
},
1914 MODULE_DEVICE_TABLE(of
, arm_smmu_of_match
);
1917 static int acpi_smmu_get_data(u32 model
, struct arm_smmu_device
*smmu
)
1922 case ACPI_IORT_SMMU_V1
:
1923 case ACPI_IORT_SMMU_CORELINK_MMU400
:
1924 smmu
->version
= ARM_SMMU_V1
;
1925 smmu
->model
= GENERIC_SMMU
;
1927 case ACPI_IORT_SMMU_V2
:
1928 smmu
->version
= ARM_SMMU_V2
;
1929 smmu
->model
= GENERIC_SMMU
;
1931 case ACPI_IORT_SMMU_CORELINK_MMU500
:
1932 smmu
->version
= ARM_SMMU_V2
;
1933 smmu
->model
= ARM_MMU500
;
1942 static int arm_smmu_device_acpi_probe(struct platform_device
*pdev
,
1943 struct arm_smmu_device
*smmu
)
1945 struct device
*dev
= smmu
->dev
;
1946 struct acpi_iort_node
*node
=
1947 *(struct acpi_iort_node
**)dev_get_platdata(dev
);
1948 struct acpi_iort_smmu
*iort_smmu
;
1951 /* Retrieve SMMU1/2 specific data */
1952 iort_smmu
= (struct acpi_iort_smmu
*)node
->node_data
;
1954 ret
= acpi_smmu_get_data(iort_smmu
->model
, smmu
);
1958 /* Ignore the configuration access interrupt */
1959 smmu
->num_global_irqs
= 1;
1961 if (iort_smmu
->flags
& ACPI_IORT_SMMU_COHERENT_WALK
)
1962 smmu
->features
|= ARM_SMMU_FEAT_COHERENT_WALK
;
1967 static inline int arm_smmu_device_acpi_probe(struct platform_device
*pdev
,
1968 struct arm_smmu_device
*smmu
)
1974 static int arm_smmu_device_dt_probe(struct platform_device
*pdev
,
1975 struct arm_smmu_device
*smmu
)
1977 const struct arm_smmu_match_data
*data
;
1978 struct device
*dev
= &pdev
->dev
;
1979 bool legacy_binding
;
1981 if (of_property_read_u32(dev
->of_node
, "#global-interrupts",
1982 &smmu
->num_global_irqs
)) {
1983 dev_err(dev
, "missing #global-interrupts property\n");
1987 data
= of_device_get_match_data(dev
);
1988 smmu
->version
= data
->version
;
1989 smmu
->model
= data
->model
;
1991 parse_driver_options(smmu
);
1993 legacy_binding
= of_find_property(dev
->of_node
, "mmu-masters", NULL
);
1994 if (legacy_binding
&& !using_generic_binding
) {
1995 if (!using_legacy_binding
)
1996 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1997 using_legacy_binding
= true;
1998 } else if (!legacy_binding
&& !using_legacy_binding
) {
1999 using_generic_binding
= true;
2001 dev_err(dev
, "not probing due to mismatched DT properties\n");
2005 if (of_dma_is_coherent(dev
->of_node
))
2006 smmu
->features
|= ARM_SMMU_FEAT_COHERENT_WALK
;
2011 static int arm_smmu_device_probe(struct platform_device
*pdev
)
2013 struct resource
*res
;
2014 struct arm_smmu_device
*smmu
;
2015 struct device
*dev
= &pdev
->dev
;
2016 int num_irqs
, i
, err
;
2018 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
2020 dev_err(dev
, "failed to allocate arm_smmu_device\n");
2026 err
= arm_smmu_device_dt_probe(pdev
, smmu
);
2028 err
= arm_smmu_device_acpi_probe(pdev
, smmu
);
2033 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2034 smmu
->base
= devm_ioremap_resource(dev
, res
);
2035 if (IS_ERR(smmu
->base
))
2036 return PTR_ERR(smmu
->base
);
2037 smmu
->size
= resource_size(res
);
2040 while ((res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, num_irqs
))) {
2042 if (num_irqs
> smmu
->num_global_irqs
)
2043 smmu
->num_context_irqs
++;
2046 if (!smmu
->num_context_irqs
) {
2047 dev_err(dev
, "found %d interrupts but expected at least %d\n",
2048 num_irqs
, smmu
->num_global_irqs
+ 1);
2052 smmu
->irqs
= devm_kzalloc(dev
, sizeof(*smmu
->irqs
) * num_irqs
,
2055 dev_err(dev
, "failed to allocate %d irqs\n", num_irqs
);
2059 for (i
= 0; i
< num_irqs
; ++i
) {
2060 int irq
= platform_get_irq(pdev
, i
);
2063 dev_err(dev
, "failed to get irq index %d\n", i
);
2066 smmu
->irqs
[i
] = irq
;
2069 err
= arm_smmu_device_cfg_probe(smmu
);
2073 if (smmu
->version
== ARM_SMMU_V2
&&
2074 smmu
->num_context_banks
!= smmu
->num_context_irqs
) {
2076 "found only %d context interrupt(s) but %d required\n",
2077 smmu
->num_context_irqs
, smmu
->num_context_banks
);
2081 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
) {
2082 err
= devm_request_irq(smmu
->dev
, smmu
->irqs
[i
],
2083 arm_smmu_global_fault
,
2085 "arm-smmu global fault",
2088 dev_err(dev
, "failed to request global IRQ %d (%u)\n",
2094 iommu_register_instance(dev
->fwnode
, &arm_smmu_ops
);
2095 platform_set_drvdata(pdev
, smmu
);
2096 arm_smmu_device_reset(smmu
);
2098 /* Oh, for a proper bus abstraction */
2099 if (!iommu_present(&platform_bus_type
))
2100 bus_set_iommu(&platform_bus_type
, &arm_smmu_ops
);
2101 #ifdef CONFIG_ARM_AMBA
2102 if (!iommu_present(&amba_bustype
))
2103 bus_set_iommu(&amba_bustype
, &arm_smmu_ops
);
2106 if (!iommu_present(&pci_bus_type
)) {
2108 bus_set_iommu(&pci_bus_type
, &arm_smmu_ops
);
2114 static int arm_smmu_device_remove(struct platform_device
*pdev
)
2116 struct arm_smmu_device
*smmu
= platform_get_drvdata(pdev
);
2121 if (!bitmap_empty(smmu
->context_map
, ARM_SMMU_MAX_CBS
))
2122 dev_err(&pdev
->dev
, "removing device with active domains!\n");
2124 /* Turn the thing off */
2125 writel(sCR0_CLIENTPD
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
2129 static struct platform_driver arm_smmu_driver
= {
2132 .of_match_table
= of_match_ptr(arm_smmu_of_match
),
2134 .probe
= arm_smmu_device_probe
,
2135 .remove
= arm_smmu_device_remove
,
2138 static int __init
arm_smmu_init(void)
2140 static bool registered
;
2144 ret
= platform_driver_register(&arm_smmu_driver
);
2150 static void __exit
arm_smmu_exit(void)
2152 return platform_driver_unregister(&arm_smmu_driver
);
2155 subsys_initcall(arm_smmu_init
);
2156 module_exit(arm_smmu_exit
);
2158 static int __init
arm_smmu_of_init(struct device_node
*np
)
2160 int ret
= arm_smmu_init();
2165 if (!of_platform_device_create(np
, NULL
, platform_bus_type
.dev_root
))
2170 IOMMU_OF_DECLARE(arm_smmuv1
, "arm,smmu-v1", arm_smmu_of_init
);
2171 IOMMU_OF_DECLARE(arm_smmuv2
, "arm,smmu-v2", arm_smmu_of_init
);
2172 IOMMU_OF_DECLARE(arm_mmu400
, "arm,mmu-400", arm_smmu_of_init
);
2173 IOMMU_OF_DECLARE(arm_mmu401
, "arm,mmu-401", arm_smmu_of_init
);
2174 IOMMU_OF_DECLARE(arm_mmu500
, "arm,mmu-500", arm_smmu_of_init
);
2175 IOMMU_OF_DECLARE(cavium_smmuv2
, "cavium,smmu-v2", arm_smmu_of_init
);
2178 static int __init
arm_smmu_acpi_init(struct acpi_table_header
*table
)
2180 if (iort_node_match(ACPI_IORT_NODE_SMMU
))
2181 return arm_smmu_init();
2185 IORT_ACPI_DECLARE(arm_smmu
, ACPI_SIG_IORT
, arm_smmu_acpi_init
);
2188 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2189 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2190 MODULE_LICENSE("GPL v2");