1 // SPDX-License-Identifier: GPL-2.0
3 * IOMMU API for ARM architected SMMUv3 implementations.
5 * Copyright (C) 2015 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
9 * This driver is powered by bad coffee and bombay mix.
12 #include <linux/acpi.h>
13 #include <linux/acpi_iort.h>
14 #include <linux/bitfield.h>
15 #include <linux/bitops.h>
16 #include <linux/crash_dump.h>
17 #include <linux/delay.h>
18 #include <linux/dma-iommu.h>
19 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/io-pgtable.h>
22 #include <linux/iommu.h>
23 #include <linux/iopoll.h>
24 #include <linux/init.h>
25 #include <linux/moduleparam.h>
26 #include <linux/msi.h>
28 #include <linux/of_address.h>
29 #include <linux/of_iommu.h>
30 #include <linux/of_platform.h>
31 #include <linux/pci.h>
32 #include <linux/pci-ats.h>
33 #include <linux/platform_device.h>
35 #include <linux/amba/bus.h>
38 #define ARM_SMMU_IDR0 0x0
39 #define IDR0_ST_LVL GENMASK(28, 27)
40 #define IDR0_ST_LVL_2LVL 1
41 #define IDR0_STALL_MODEL GENMASK(25, 24)
42 #define IDR0_STALL_MODEL_STALL 0
43 #define IDR0_STALL_MODEL_FORCE 2
44 #define IDR0_TTENDIAN GENMASK(22, 21)
45 #define IDR0_TTENDIAN_MIXED 0
46 #define IDR0_TTENDIAN_LE 2
47 #define IDR0_TTENDIAN_BE 3
48 #define IDR0_CD2L (1 << 19)
49 #define IDR0_VMID16 (1 << 18)
50 #define IDR0_PRI (1 << 16)
51 #define IDR0_SEV (1 << 14)
52 #define IDR0_MSI (1 << 13)
53 #define IDR0_ASID16 (1 << 12)
54 #define IDR0_ATS (1 << 10)
55 #define IDR0_HYP (1 << 9)
56 #define IDR0_COHACC (1 << 4)
57 #define IDR0_TTF GENMASK(3, 2)
58 #define IDR0_TTF_AARCH64 2
59 #define IDR0_TTF_AARCH32_64 3
60 #define IDR0_S1P (1 << 1)
61 #define IDR0_S2P (1 << 0)
63 #define ARM_SMMU_IDR1 0x4
64 #define IDR1_TABLES_PRESET (1 << 30)
65 #define IDR1_QUEUES_PRESET (1 << 29)
66 #define IDR1_REL (1 << 28)
67 #define IDR1_CMDQS GENMASK(25, 21)
68 #define IDR1_EVTQS GENMASK(20, 16)
69 #define IDR1_PRIQS GENMASK(15, 11)
70 #define IDR1_SSIDSIZE GENMASK(10, 6)
71 #define IDR1_SIDSIZE GENMASK(5, 0)
73 #define ARM_SMMU_IDR5 0x14
74 #define IDR5_STALL_MAX GENMASK(31, 16)
75 #define IDR5_GRAN64K (1 << 6)
76 #define IDR5_GRAN16K (1 << 5)
77 #define IDR5_GRAN4K (1 << 4)
78 #define IDR5_OAS GENMASK(2, 0)
79 #define IDR5_OAS_32_BIT 0
80 #define IDR5_OAS_36_BIT 1
81 #define IDR5_OAS_40_BIT 2
82 #define IDR5_OAS_42_BIT 3
83 #define IDR5_OAS_44_BIT 4
84 #define IDR5_OAS_48_BIT 5
85 #define IDR5_OAS_52_BIT 6
86 #define IDR5_VAX GENMASK(11, 10)
87 #define IDR5_VAX_52_BIT 1
89 #define ARM_SMMU_CR0 0x20
90 #define CR0_ATSCHK (1 << 4)
91 #define CR0_CMDQEN (1 << 3)
92 #define CR0_EVTQEN (1 << 2)
93 #define CR0_PRIQEN (1 << 1)
94 #define CR0_SMMUEN (1 << 0)
96 #define ARM_SMMU_CR0ACK 0x24
98 #define ARM_SMMU_CR1 0x28
99 #define CR1_TABLE_SH GENMASK(11, 10)
100 #define CR1_TABLE_OC GENMASK(9, 8)
101 #define CR1_TABLE_IC GENMASK(7, 6)
102 #define CR1_QUEUE_SH GENMASK(5, 4)
103 #define CR1_QUEUE_OC GENMASK(3, 2)
104 #define CR1_QUEUE_IC GENMASK(1, 0)
105 /* CR1 cacheability fields don't quite follow the usual TCR-style encoding */
106 #define CR1_CACHE_NC 0
107 #define CR1_CACHE_WB 1
108 #define CR1_CACHE_WT 2
110 #define ARM_SMMU_CR2 0x2c
111 #define CR2_PTM (1 << 2)
112 #define CR2_RECINVSID (1 << 1)
113 #define CR2_E2H (1 << 0)
115 #define ARM_SMMU_GBPA 0x44
116 #define GBPA_UPDATE (1 << 31)
117 #define GBPA_ABORT (1 << 20)
119 #define ARM_SMMU_IRQ_CTRL 0x50
120 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
121 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
122 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
124 #define ARM_SMMU_IRQ_CTRLACK 0x54
126 #define ARM_SMMU_GERROR 0x60
127 #define GERROR_SFM_ERR (1 << 8)
128 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
129 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
130 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
131 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
132 #define GERROR_PRIQ_ABT_ERR (1 << 3)
133 #define GERROR_EVTQ_ABT_ERR (1 << 2)
134 #define GERROR_CMDQ_ERR (1 << 0)
135 #define GERROR_ERR_MASK 0xfd
137 #define ARM_SMMU_GERRORN 0x64
139 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
140 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
141 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
143 #define ARM_SMMU_STRTAB_BASE 0x80
144 #define STRTAB_BASE_RA (1UL << 62)
145 #define STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
147 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
148 #define STRTAB_BASE_CFG_FMT GENMASK(17, 16)
149 #define STRTAB_BASE_CFG_FMT_LINEAR 0
150 #define STRTAB_BASE_CFG_FMT_2LVL 1
151 #define STRTAB_BASE_CFG_SPLIT GENMASK(10, 6)
152 #define STRTAB_BASE_CFG_LOG2SIZE GENMASK(5, 0)
154 #define ARM_SMMU_CMDQ_BASE 0x90
155 #define ARM_SMMU_CMDQ_PROD 0x98
156 #define ARM_SMMU_CMDQ_CONS 0x9c
158 #define ARM_SMMU_EVTQ_BASE 0xa0
159 #define ARM_SMMU_EVTQ_PROD 0x100a8
160 #define ARM_SMMU_EVTQ_CONS 0x100ac
161 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
162 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
163 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
165 #define ARM_SMMU_PRIQ_BASE 0xc0
166 #define ARM_SMMU_PRIQ_PROD 0x100c8
167 #define ARM_SMMU_PRIQ_CONS 0x100cc
168 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
169 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
170 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
172 /* Common MSI config fields */
173 #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
174 #define MSI_CFG2_SH GENMASK(5, 4)
175 #define MSI_CFG2_MEMATTR GENMASK(3, 0)
177 /* Common memory attribute values */
178 #define ARM_SMMU_SH_NSH 0
179 #define ARM_SMMU_SH_OSH 2
180 #define ARM_SMMU_SH_ISH 3
181 #define ARM_SMMU_MEMATTR_DEVICE_nGnRE 0x1
182 #define ARM_SMMU_MEMATTR_OIWB 0xf
184 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
185 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
186 #define Q_OVERFLOW_FLAG (1 << 31)
187 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
188 #define Q_ENT(q, p) ((q)->base + \
189 Q_IDX(q, p) * (q)->ent_dwords)
191 #define Q_BASE_RWA (1UL << 62)
192 #define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
193 #define Q_BASE_LOG2SIZE GENMASK(4, 0)
195 /* Ensure DMA allocations are naturally aligned */
196 #ifdef CONFIG_CMA_ALIGNMENT
197 #define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
199 #define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
205 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
206 * 2lvl: 128k L1 entries,
207 * 256 lazy entries per table (each table covers a PCI bus)
209 #define STRTAB_L1_SZ_SHIFT 20
210 #define STRTAB_SPLIT 8
212 #define STRTAB_L1_DESC_DWORDS 1
213 #define STRTAB_L1_DESC_SPAN GENMASK_ULL(4, 0)
214 #define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6)
216 #define STRTAB_STE_DWORDS 8
217 #define STRTAB_STE_0_V (1UL << 0)
218 #define STRTAB_STE_0_CFG GENMASK_ULL(3, 1)
219 #define STRTAB_STE_0_CFG_ABORT 0
220 #define STRTAB_STE_0_CFG_BYPASS 4
221 #define STRTAB_STE_0_CFG_S1_TRANS 5
222 #define STRTAB_STE_0_CFG_S2_TRANS 6
224 #define STRTAB_STE_0_S1FMT GENMASK_ULL(5, 4)
225 #define STRTAB_STE_0_S1FMT_LINEAR 0
226 #define STRTAB_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
227 #define STRTAB_STE_0_S1CDMAX GENMASK_ULL(63, 59)
229 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
230 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
231 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
232 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
233 #define STRTAB_STE_1_S1CIR GENMASK_ULL(3, 2)
234 #define STRTAB_STE_1_S1COR GENMASK_ULL(5, 4)
235 #define STRTAB_STE_1_S1CSH GENMASK_ULL(7, 6)
237 #define STRTAB_STE_1_S1STALLD (1UL << 27)
239 #define STRTAB_STE_1_EATS GENMASK_ULL(29, 28)
240 #define STRTAB_STE_1_EATS_ABT 0UL
241 #define STRTAB_STE_1_EATS_TRANS 1UL
242 #define STRTAB_STE_1_EATS_S1CHK 2UL
244 #define STRTAB_STE_1_STRW GENMASK_ULL(31, 30)
245 #define STRTAB_STE_1_STRW_NSEL1 0UL
246 #define STRTAB_STE_1_STRW_EL2 2UL
248 #define STRTAB_STE_1_SHCFG GENMASK_ULL(45, 44)
249 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
251 #define STRTAB_STE_2_S2VMID GENMASK_ULL(15, 0)
252 #define STRTAB_STE_2_VTCR GENMASK_ULL(50, 32)
253 #define STRTAB_STE_2_S2AA64 (1UL << 51)
254 #define STRTAB_STE_2_S2ENDI (1UL << 52)
255 #define STRTAB_STE_2_S2PTW (1UL << 54)
256 #define STRTAB_STE_2_S2R (1UL << 58)
258 #define STRTAB_STE_3_S2TTB_MASK GENMASK_ULL(51, 4)
260 /* Context descriptor (stage-1 only) */
261 #define CTXDESC_CD_DWORDS 8
262 #define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
263 #define ARM64_TCR_T0SZ GENMASK_ULL(5, 0)
264 #define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
265 #define ARM64_TCR_TG0 GENMASK_ULL(15, 14)
266 #define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
267 #define ARM64_TCR_IRGN0 GENMASK_ULL(9, 8)
268 #define CTXDESC_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
269 #define ARM64_TCR_ORGN0 GENMASK_ULL(11, 10)
270 #define CTXDESC_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
271 #define ARM64_TCR_SH0 GENMASK_ULL(13, 12)
272 #define CTXDESC_CD_0_TCR_EPD0 (1ULL << 14)
273 #define ARM64_TCR_EPD0 (1ULL << 7)
274 #define CTXDESC_CD_0_TCR_EPD1 (1ULL << 30)
275 #define ARM64_TCR_EPD1 (1ULL << 23)
277 #define CTXDESC_CD_0_ENDI (1UL << 15)
278 #define CTXDESC_CD_0_V (1UL << 31)
280 #define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
281 #define ARM64_TCR_IPS GENMASK_ULL(34, 32)
282 #define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
283 #define ARM64_TCR_TBI0 (1ULL << 37)
285 #define CTXDESC_CD_0_AA64 (1UL << 41)
286 #define CTXDESC_CD_0_S (1UL << 44)
287 #define CTXDESC_CD_0_R (1UL << 45)
288 #define CTXDESC_CD_0_A (1UL << 46)
289 #define CTXDESC_CD_0_ASET (1UL << 47)
290 #define CTXDESC_CD_0_ASID GENMASK_ULL(63, 48)
292 #define CTXDESC_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
294 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
295 #define ARM_SMMU_TCR2CD(tcr, fld) FIELD_PREP(CTXDESC_CD_0_TCR_##fld, \
296 FIELD_GET(ARM64_TCR_##fld, tcr))
299 #define CMDQ_ENT_SZ_SHIFT 4
300 #define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
301 #define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
303 #define CMDQ_CONS_ERR GENMASK(30, 24)
304 #define CMDQ_ERR_CERROR_NONE_IDX 0
305 #define CMDQ_ERR_CERROR_ILL_IDX 1
306 #define CMDQ_ERR_CERROR_ABT_IDX 2
307 #define CMDQ_ERR_CERROR_ATC_INV_IDX 3
309 #define CMDQ_0_OP GENMASK_ULL(7, 0)
310 #define CMDQ_0_SSV (1UL << 11)
312 #define CMDQ_PREFETCH_0_SID GENMASK_ULL(63, 32)
313 #define CMDQ_PREFETCH_1_SIZE GENMASK_ULL(4, 0)
314 #define CMDQ_PREFETCH_1_ADDR_MASK GENMASK_ULL(63, 12)
316 #define CMDQ_CFGI_0_SID GENMASK_ULL(63, 32)
317 #define CMDQ_CFGI_1_LEAF (1UL << 0)
318 #define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
320 #define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
321 #define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
322 #define CMDQ_TLBI_1_LEAF (1UL << 0)
323 #define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
324 #define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
326 #define CMDQ_ATC_0_SSID GENMASK_ULL(31, 12)
327 #define CMDQ_ATC_0_SID GENMASK_ULL(63, 32)
328 #define CMDQ_ATC_0_GLOBAL (1UL << 9)
329 #define CMDQ_ATC_1_SIZE GENMASK_ULL(5, 0)
330 #define CMDQ_ATC_1_ADDR_MASK GENMASK_ULL(63, 12)
332 #define CMDQ_PRI_0_SSID GENMASK_ULL(31, 12)
333 #define CMDQ_PRI_0_SID GENMASK_ULL(63, 32)
334 #define CMDQ_PRI_1_GRPID GENMASK_ULL(8, 0)
335 #define CMDQ_PRI_1_RESP GENMASK_ULL(13, 12)
337 #define CMDQ_SYNC_0_CS GENMASK_ULL(13, 12)
338 #define CMDQ_SYNC_0_CS_NONE 0
339 #define CMDQ_SYNC_0_CS_IRQ 1
340 #define CMDQ_SYNC_0_CS_SEV 2
341 #define CMDQ_SYNC_0_MSH GENMASK_ULL(23, 22)
342 #define CMDQ_SYNC_0_MSIATTR GENMASK_ULL(27, 24)
343 #define CMDQ_SYNC_0_MSIDATA GENMASK_ULL(63, 32)
344 #define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
347 #define EVTQ_ENT_SZ_SHIFT 5
348 #define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
349 #define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
351 #define EVTQ_0_ID GENMASK_ULL(7, 0)
354 #define PRIQ_ENT_SZ_SHIFT 4
355 #define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
356 #define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
358 #define PRIQ_0_SID GENMASK_ULL(31, 0)
359 #define PRIQ_0_SSID GENMASK_ULL(51, 32)
360 #define PRIQ_0_PERM_PRIV (1UL << 58)
361 #define PRIQ_0_PERM_EXEC (1UL << 59)
362 #define PRIQ_0_PERM_READ (1UL << 60)
363 #define PRIQ_0_PERM_WRITE (1UL << 61)
364 #define PRIQ_0_PRG_LAST (1UL << 62)
365 #define PRIQ_0_SSID_V (1UL << 63)
367 #define PRIQ_1_PRG_IDX GENMASK_ULL(8, 0)
368 #define PRIQ_1_ADDR_MASK GENMASK_ULL(63, 12)
370 /* High-level queue structures */
371 #define ARM_SMMU_POLL_TIMEOUT_US 100
372 #define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000 /* 1s! */
373 #define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10
375 #define MSI_IOVA_BASE 0x8000000
376 #define MSI_IOVA_LENGTH 0x100000
379 * not really modular, but the easiest way to keep compat with existing
380 * bootargs behaviour is to continue using module_param_named here.
382 static bool disable_bypass
= 1;
383 module_param_named(disable_bypass
, disable_bypass
, bool, S_IRUGO
);
384 MODULE_PARM_DESC(disable_bypass
,
385 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
393 enum arm_smmu_msi_index
{
400 static phys_addr_t arm_smmu_msi_cfg
[ARM_SMMU_MAX_MSIS
][3] = {
402 ARM_SMMU_EVTQ_IRQ_CFG0
,
403 ARM_SMMU_EVTQ_IRQ_CFG1
,
404 ARM_SMMU_EVTQ_IRQ_CFG2
,
406 [GERROR_MSI_INDEX
] = {
407 ARM_SMMU_GERROR_IRQ_CFG0
,
408 ARM_SMMU_GERROR_IRQ_CFG1
,
409 ARM_SMMU_GERROR_IRQ_CFG2
,
412 ARM_SMMU_PRIQ_IRQ_CFG0
,
413 ARM_SMMU_PRIQ_IRQ_CFG1
,
414 ARM_SMMU_PRIQ_IRQ_CFG2
,
418 struct arm_smmu_cmdq_ent
{
421 bool substream_valid
;
423 /* Command-specific fields */
425 #define CMDQ_OP_PREFETCH_CFG 0x1
432 #define CMDQ_OP_CFGI_STE 0x3
433 #define CMDQ_OP_CFGI_ALL 0x4
442 #define CMDQ_OP_TLBI_NH_ASID 0x11
443 #define CMDQ_OP_TLBI_NH_VA 0x12
444 #define CMDQ_OP_TLBI_EL2_ALL 0x20
445 #define CMDQ_OP_TLBI_S12_VMALL 0x28
446 #define CMDQ_OP_TLBI_S2_IPA 0x2a
447 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
455 #define CMDQ_OP_ATC_INV 0x40
456 #define ATC_INV_SIZE_ALL 52
465 #define CMDQ_OP_PRI_RESP 0x41
473 #define CMDQ_OP_CMD_SYNC 0x46
481 struct arm_smmu_queue
{
482 int irq
; /* Wired interrupt */
493 u32 __iomem
*prod_reg
;
494 u32 __iomem
*cons_reg
;
497 struct arm_smmu_cmdq
{
498 struct arm_smmu_queue q
;
502 struct arm_smmu_evtq
{
503 struct arm_smmu_queue q
;
507 struct arm_smmu_priq
{
508 struct arm_smmu_queue q
;
511 /* High-level stream table and context descriptor structures */
512 struct arm_smmu_strtab_l1_desc
{
516 dma_addr_t l2ptr_dma
;
519 struct arm_smmu_s1_cfg
{
521 dma_addr_t cdptr_dma
;
523 struct arm_smmu_ctx_desc
{
531 struct arm_smmu_s2_cfg
{
537 struct arm_smmu_strtab_cfg
{
539 dma_addr_t strtab_dma
;
540 struct arm_smmu_strtab_l1_desc
*l1_desc
;
541 unsigned int num_l1_ents
;
547 /* An SMMUv3 instance */
548 struct arm_smmu_device
{
552 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
553 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
554 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
555 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
556 #define ARM_SMMU_FEAT_PRI (1 << 4)
557 #define ARM_SMMU_FEAT_ATS (1 << 5)
558 #define ARM_SMMU_FEAT_SEV (1 << 6)
559 #define ARM_SMMU_FEAT_MSI (1 << 7)
560 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
561 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
562 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
563 #define ARM_SMMU_FEAT_STALLS (1 << 11)
564 #define ARM_SMMU_FEAT_HYP (1 << 12)
565 #define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
566 #define ARM_SMMU_FEAT_VAX (1 << 14)
569 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
570 #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
573 struct arm_smmu_cmdq cmdq
;
574 struct arm_smmu_evtq evtq
;
575 struct arm_smmu_priq priq
;
582 unsigned long ias
; /* IPA */
583 unsigned long oas
; /* PA */
584 unsigned long pgsize_bitmap
;
586 #define ARM_SMMU_MAX_ASIDS (1 << 16)
587 unsigned int asid_bits
;
588 DECLARE_BITMAP(asid_map
, ARM_SMMU_MAX_ASIDS
);
590 #define ARM_SMMU_MAX_VMIDS (1 << 16)
591 unsigned int vmid_bits
;
592 DECLARE_BITMAP(vmid_map
, ARM_SMMU_MAX_VMIDS
);
594 unsigned int ssid_bits
;
595 unsigned int sid_bits
;
597 struct arm_smmu_strtab_cfg strtab_cfg
;
599 /* Hi16xx adds an extra 32 bits of goodness to its MSI payload */
605 /* IOMMU core code handle */
606 struct iommu_device iommu
;
609 /* SMMU private data for each master */
610 struct arm_smmu_master
{
611 struct arm_smmu_device
*smmu
;
613 struct arm_smmu_domain
*domain
;
614 struct list_head domain_head
;
616 unsigned int num_sids
;
620 /* SMMU private data for an IOMMU domain */
621 enum arm_smmu_domain_stage
{
622 ARM_SMMU_DOMAIN_S1
= 0,
624 ARM_SMMU_DOMAIN_NESTED
,
625 ARM_SMMU_DOMAIN_BYPASS
,
628 struct arm_smmu_domain
{
629 struct arm_smmu_device
*smmu
;
630 struct mutex init_mutex
; /* Protects smmu pointer */
632 struct io_pgtable_ops
*pgtbl_ops
;
635 enum arm_smmu_domain_stage stage
;
637 struct arm_smmu_s1_cfg s1_cfg
;
638 struct arm_smmu_s2_cfg s2_cfg
;
641 struct iommu_domain domain
;
643 struct list_head devices
;
644 spinlock_t devices_lock
;
647 struct arm_smmu_option_prop
{
652 static struct arm_smmu_option_prop arm_smmu_options
[] = {
653 { ARM_SMMU_OPT_SKIP_PREFETCH
, "hisilicon,broken-prefetch-cmd" },
654 { ARM_SMMU_OPT_PAGE0_REGS_ONLY
, "cavium,cn9900-broken-page1-regspace"},
658 static inline void __iomem
*arm_smmu_page1_fixup(unsigned long offset
,
659 struct arm_smmu_device
*smmu
)
661 if ((offset
> SZ_64K
) &&
662 (smmu
->options
& ARM_SMMU_OPT_PAGE0_REGS_ONLY
))
665 return smmu
->base
+ offset
;
668 static struct arm_smmu_domain
*to_smmu_domain(struct iommu_domain
*dom
)
670 return container_of(dom
, struct arm_smmu_domain
, domain
);
673 static void parse_driver_options(struct arm_smmu_device
*smmu
)
678 if (of_property_read_bool(smmu
->dev
->of_node
,
679 arm_smmu_options
[i
].prop
)) {
680 smmu
->options
|= arm_smmu_options
[i
].opt
;
681 dev_notice(smmu
->dev
, "option %s\n",
682 arm_smmu_options
[i
].prop
);
684 } while (arm_smmu_options
[++i
].opt
);
687 /* Low-level queue manipulation functions */
688 static bool queue_full(struct arm_smmu_queue
*q
)
690 return Q_IDX(q
, q
->prod
) == Q_IDX(q
, q
->cons
) &&
691 Q_WRP(q
, q
->prod
) != Q_WRP(q
, q
->cons
);
694 static bool queue_empty(struct arm_smmu_queue
*q
)
696 return Q_IDX(q
, q
->prod
) == Q_IDX(q
, q
->cons
) &&
697 Q_WRP(q
, q
->prod
) == Q_WRP(q
, q
->cons
);
700 static void queue_sync_cons(struct arm_smmu_queue
*q
)
702 q
->cons
= readl_relaxed(q
->cons_reg
);
705 static void queue_inc_cons(struct arm_smmu_queue
*q
)
707 u32 cons
= (Q_WRP(q
, q
->cons
) | Q_IDX(q
, q
->cons
)) + 1;
709 q
->cons
= Q_OVF(q
, q
->cons
) | Q_WRP(q
, cons
) | Q_IDX(q
, cons
);
712 * Ensure that all CPU accesses (reads and writes) to the queue
713 * are complete before we update the cons pointer.
716 writel_relaxed(q
->cons
, q
->cons_reg
);
719 static int queue_sync_prod(struct arm_smmu_queue
*q
)
722 u32 prod
= readl_relaxed(q
->prod_reg
);
724 if (Q_OVF(q
, prod
) != Q_OVF(q
, q
->prod
))
731 static void queue_inc_prod(struct arm_smmu_queue
*q
)
733 u32 prod
= (Q_WRP(q
, q
->prod
) | Q_IDX(q
, q
->prod
)) + 1;
735 q
->prod
= Q_OVF(q
, q
->prod
) | Q_WRP(q
, prod
) | Q_IDX(q
, prod
);
736 writel(q
->prod
, q
->prod_reg
);
740 * Wait for the SMMU to consume items. If sync is true, wait until the queue
741 * is empty. Otherwise, wait until there is at least one free slot.
743 static int queue_poll_cons(struct arm_smmu_queue
*q
, bool sync
, bool wfe
)
746 unsigned int delay
= 1, spin_cnt
= 0;
748 /* Wait longer if it's a CMD_SYNC */
749 timeout
= ktime_add_us(ktime_get(), sync
?
750 ARM_SMMU_CMDQ_SYNC_TIMEOUT_US
:
751 ARM_SMMU_POLL_TIMEOUT_US
);
753 while (queue_sync_cons(q
), (sync
? !queue_empty(q
) : queue_full(q
))) {
754 if (ktime_compare(ktime_get(), timeout
) > 0)
759 } else if (++spin_cnt
< ARM_SMMU_CMDQ_SYNC_SPIN_COUNT
) {
772 static void queue_write(__le64
*dst
, u64
*src
, size_t n_dwords
)
776 for (i
= 0; i
< n_dwords
; ++i
)
777 *dst
++ = cpu_to_le64(*src
++);
780 static int queue_insert_raw(struct arm_smmu_queue
*q
, u64
*ent
)
785 queue_write(Q_ENT(q
, q
->prod
), ent
, q
->ent_dwords
);
790 static void queue_read(__le64
*dst
, u64
*src
, size_t n_dwords
)
794 for (i
= 0; i
< n_dwords
; ++i
)
795 *dst
++ = le64_to_cpu(*src
++);
798 static int queue_remove_raw(struct arm_smmu_queue
*q
, u64
*ent
)
803 queue_read(ent
, Q_ENT(q
, q
->cons
), q
->ent_dwords
);
808 /* High-level queue accessors */
809 static int arm_smmu_cmdq_build_cmd(u64
*cmd
, struct arm_smmu_cmdq_ent
*ent
)
811 memset(cmd
, 0, 1 << CMDQ_ENT_SZ_SHIFT
);
812 cmd
[0] |= FIELD_PREP(CMDQ_0_OP
, ent
->opcode
);
814 switch (ent
->opcode
) {
815 case CMDQ_OP_TLBI_EL2_ALL
:
816 case CMDQ_OP_TLBI_NSNH_ALL
:
818 case CMDQ_OP_PREFETCH_CFG
:
819 cmd
[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID
, ent
->prefetch
.sid
);
820 cmd
[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE
, ent
->prefetch
.size
);
821 cmd
[1] |= ent
->prefetch
.addr
& CMDQ_PREFETCH_1_ADDR_MASK
;
823 case CMDQ_OP_CFGI_STE
:
824 cmd
[0] |= FIELD_PREP(CMDQ_CFGI_0_SID
, ent
->cfgi
.sid
);
825 cmd
[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF
, ent
->cfgi
.leaf
);
827 case CMDQ_OP_CFGI_ALL
:
828 /* Cover the entire SID range */
829 cmd
[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE
, 31);
831 case CMDQ_OP_TLBI_NH_VA
:
832 cmd
[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID
, ent
->tlbi
.asid
);
833 cmd
[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF
, ent
->tlbi
.leaf
);
834 cmd
[1] |= ent
->tlbi
.addr
& CMDQ_TLBI_1_VA_MASK
;
836 case CMDQ_OP_TLBI_S2_IPA
:
837 cmd
[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID
, ent
->tlbi
.vmid
);
838 cmd
[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF
, ent
->tlbi
.leaf
);
839 cmd
[1] |= ent
->tlbi
.addr
& CMDQ_TLBI_1_IPA_MASK
;
841 case CMDQ_OP_TLBI_NH_ASID
:
842 cmd
[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID
, ent
->tlbi
.asid
);
844 case CMDQ_OP_TLBI_S12_VMALL
:
845 cmd
[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID
, ent
->tlbi
.vmid
);
847 case CMDQ_OP_ATC_INV
:
848 cmd
[0] |= FIELD_PREP(CMDQ_0_SSV
, ent
->substream_valid
);
849 cmd
[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL
, ent
->atc
.global
);
850 cmd
[0] |= FIELD_PREP(CMDQ_ATC_0_SSID
, ent
->atc
.ssid
);
851 cmd
[0] |= FIELD_PREP(CMDQ_ATC_0_SID
, ent
->atc
.sid
);
852 cmd
[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE
, ent
->atc
.size
);
853 cmd
[1] |= ent
->atc
.addr
& CMDQ_ATC_1_ADDR_MASK
;
855 case CMDQ_OP_PRI_RESP
:
856 cmd
[0] |= FIELD_PREP(CMDQ_0_SSV
, ent
->substream_valid
);
857 cmd
[0] |= FIELD_PREP(CMDQ_PRI_0_SSID
, ent
->pri
.ssid
);
858 cmd
[0] |= FIELD_PREP(CMDQ_PRI_0_SID
, ent
->pri
.sid
);
859 cmd
[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID
, ent
->pri
.grpid
);
860 switch (ent
->pri
.resp
) {
868 cmd
[1] |= FIELD_PREP(CMDQ_PRI_1_RESP
, ent
->pri
.resp
);
870 case CMDQ_OP_CMD_SYNC
:
871 if (ent
->sync
.msiaddr
)
872 cmd
[0] |= FIELD_PREP(CMDQ_SYNC_0_CS
, CMDQ_SYNC_0_CS_IRQ
);
874 cmd
[0] |= FIELD_PREP(CMDQ_SYNC_0_CS
, CMDQ_SYNC_0_CS_SEV
);
875 cmd
[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH
, ARM_SMMU_SH_ISH
);
876 cmd
[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR
, ARM_SMMU_MEMATTR_OIWB
);
878 * Commands are written little-endian, but we want the SMMU to
879 * receive MSIData, and thus write it back to memory, in CPU
880 * byte order, so big-endian needs an extra byteswap here.
882 cmd
[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA
,
883 cpu_to_le32(ent
->sync
.msidata
));
884 cmd
[1] |= ent
->sync
.msiaddr
& CMDQ_SYNC_1_MSIADDR_MASK
;
893 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device
*smmu
)
895 static const char *cerror_str
[] = {
896 [CMDQ_ERR_CERROR_NONE_IDX
] = "No error",
897 [CMDQ_ERR_CERROR_ILL_IDX
] = "Illegal command",
898 [CMDQ_ERR_CERROR_ABT_IDX
] = "Abort on command fetch",
899 [CMDQ_ERR_CERROR_ATC_INV_IDX
] = "ATC invalidate timeout",
903 u64 cmd
[CMDQ_ENT_DWORDS
];
904 struct arm_smmu_queue
*q
= &smmu
->cmdq
.q
;
905 u32 cons
= readl_relaxed(q
->cons_reg
);
906 u32 idx
= FIELD_GET(CMDQ_CONS_ERR
, cons
);
907 struct arm_smmu_cmdq_ent cmd_sync
= {
908 .opcode
= CMDQ_OP_CMD_SYNC
,
911 dev_err(smmu
->dev
, "CMDQ error (cons 0x%08x): %s\n", cons
,
912 idx
< ARRAY_SIZE(cerror_str
) ? cerror_str
[idx
] : "Unknown");
915 case CMDQ_ERR_CERROR_ABT_IDX
:
916 dev_err(smmu
->dev
, "retrying command fetch\n");
917 case CMDQ_ERR_CERROR_NONE_IDX
:
919 case CMDQ_ERR_CERROR_ATC_INV_IDX
:
921 * ATC Invalidation Completion timeout. CONS is still pointing
922 * at the CMD_SYNC. Attempt to complete other pending commands
923 * by repeating the CMD_SYNC, though we might well end up back
924 * here since the ATC invalidation may still be pending.
927 case CMDQ_ERR_CERROR_ILL_IDX
:
934 * We may have concurrent producers, so we need to be careful
935 * not to touch any of the shadow cmdq state.
937 queue_read(cmd
, Q_ENT(q
, cons
), q
->ent_dwords
);
938 dev_err(smmu
->dev
, "skipping command in error state:\n");
939 for (i
= 0; i
< ARRAY_SIZE(cmd
); ++i
)
940 dev_err(smmu
->dev
, "\t0x%016llx\n", (unsigned long long)cmd
[i
]);
942 /* Convert the erroneous command into a CMD_SYNC */
943 if (arm_smmu_cmdq_build_cmd(cmd
, &cmd_sync
)) {
944 dev_err(smmu
->dev
, "failed to convert to CMD_SYNC\n");
948 queue_write(Q_ENT(q
, cons
), cmd
, q
->ent_dwords
);
951 static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device
*smmu
, u64
*cmd
)
953 struct arm_smmu_queue
*q
= &smmu
->cmdq
.q
;
954 bool wfe
= !!(smmu
->features
& ARM_SMMU_FEAT_SEV
);
956 smmu
->prev_cmd_opcode
= FIELD_GET(CMDQ_0_OP
, cmd
[0]);
958 while (queue_insert_raw(q
, cmd
) == -ENOSPC
) {
959 if (queue_poll_cons(q
, false, wfe
))
960 dev_err_ratelimited(smmu
->dev
, "CMDQ timeout\n");
964 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device
*smmu
,
965 struct arm_smmu_cmdq_ent
*ent
)
967 u64 cmd
[CMDQ_ENT_DWORDS
];
970 if (arm_smmu_cmdq_build_cmd(cmd
, ent
)) {
971 dev_warn(smmu
->dev
, "ignoring unknown CMDQ opcode 0x%x\n",
976 spin_lock_irqsave(&smmu
->cmdq
.lock
, flags
);
977 arm_smmu_cmdq_insert_cmd(smmu
, cmd
);
978 spin_unlock_irqrestore(&smmu
->cmdq
.lock
, flags
);
982 * The difference between val and sync_idx is bounded by the maximum size of
983 * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic.
985 static int __arm_smmu_sync_poll_msi(struct arm_smmu_device
*smmu
, u32 sync_idx
)
990 timeout
= ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US
);
991 val
= smp_cond_load_acquire(&smmu
->sync_count
,
992 (int)(VAL
- sync_idx
) >= 0 ||
993 !ktime_before(ktime_get(), timeout
));
995 return (int)(val
- sync_idx
) < 0 ? -ETIMEDOUT
: 0;
998 static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device
*smmu
)
1000 u64 cmd
[CMDQ_ENT_DWORDS
];
1001 unsigned long flags
;
1002 struct arm_smmu_cmdq_ent ent
= {
1003 .opcode
= CMDQ_OP_CMD_SYNC
,
1005 .msiaddr
= virt_to_phys(&smmu
->sync_count
),
1009 spin_lock_irqsave(&smmu
->cmdq
.lock
, flags
);
1011 /* Piggy-back on the previous command if it's a SYNC */
1012 if (smmu
->prev_cmd_opcode
== CMDQ_OP_CMD_SYNC
) {
1013 ent
.sync
.msidata
= smmu
->sync_nr
;
1015 ent
.sync
.msidata
= ++smmu
->sync_nr
;
1016 arm_smmu_cmdq_build_cmd(cmd
, &ent
);
1017 arm_smmu_cmdq_insert_cmd(smmu
, cmd
);
1020 spin_unlock_irqrestore(&smmu
->cmdq
.lock
, flags
);
1022 return __arm_smmu_sync_poll_msi(smmu
, ent
.sync
.msidata
);
1025 static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device
*smmu
)
1027 u64 cmd
[CMDQ_ENT_DWORDS
];
1028 unsigned long flags
;
1029 bool wfe
= !!(smmu
->features
& ARM_SMMU_FEAT_SEV
);
1030 struct arm_smmu_cmdq_ent ent
= { .opcode
= CMDQ_OP_CMD_SYNC
};
1033 arm_smmu_cmdq_build_cmd(cmd
, &ent
);
1035 spin_lock_irqsave(&smmu
->cmdq
.lock
, flags
);
1036 arm_smmu_cmdq_insert_cmd(smmu
, cmd
);
1037 ret
= queue_poll_cons(&smmu
->cmdq
.q
, true, wfe
);
1038 spin_unlock_irqrestore(&smmu
->cmdq
.lock
, flags
);
1043 static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device
*smmu
)
1046 bool msi
= (smmu
->features
& ARM_SMMU_FEAT_MSI
) &&
1047 (smmu
->features
& ARM_SMMU_FEAT_COHERENCY
);
1049 ret
= msi
? __arm_smmu_cmdq_issue_sync_msi(smmu
)
1050 : __arm_smmu_cmdq_issue_sync(smmu
);
1052 dev_err_ratelimited(smmu
->dev
, "CMD_SYNC timeout\n");
1056 /* Context descriptor manipulation functions */
1057 static u64
arm_smmu_cpu_tcr_to_cd(u64 tcr
)
1061 /* Repack the TCR. Just care about TTBR0 for now */
1062 val
|= ARM_SMMU_TCR2CD(tcr
, T0SZ
);
1063 val
|= ARM_SMMU_TCR2CD(tcr
, TG0
);
1064 val
|= ARM_SMMU_TCR2CD(tcr
, IRGN0
);
1065 val
|= ARM_SMMU_TCR2CD(tcr
, ORGN0
);
1066 val
|= ARM_SMMU_TCR2CD(tcr
, SH0
);
1067 val
|= ARM_SMMU_TCR2CD(tcr
, EPD0
);
1068 val
|= ARM_SMMU_TCR2CD(tcr
, EPD1
);
1069 val
|= ARM_SMMU_TCR2CD(tcr
, IPS
);
1074 static void arm_smmu_write_ctx_desc(struct arm_smmu_device
*smmu
,
1075 struct arm_smmu_s1_cfg
*cfg
)
1080 * We don't need to issue any invalidation here, as we'll invalidate
1081 * the STE when installing the new entry anyway.
1083 val
= arm_smmu_cpu_tcr_to_cd(cfg
->cd
.tcr
) |
1087 CTXDESC_CD_0_R
| CTXDESC_CD_0_A
| CTXDESC_CD_0_ASET
|
1088 CTXDESC_CD_0_AA64
| FIELD_PREP(CTXDESC_CD_0_ASID
, cfg
->cd
.asid
) |
1091 /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
1092 if (smmu
->features
& ARM_SMMU_FEAT_STALL_FORCE
)
1093 val
|= CTXDESC_CD_0_S
;
1095 cfg
->cdptr
[0] = cpu_to_le64(val
);
1097 val
= cfg
->cd
.ttbr
& CTXDESC_CD_1_TTB0_MASK
;
1098 cfg
->cdptr
[1] = cpu_to_le64(val
);
1100 cfg
->cdptr
[3] = cpu_to_le64(cfg
->cd
.mair
);
1103 /* Stream table manipulation functions */
1105 arm_smmu_write_strtab_l1_desc(__le64
*dst
, struct arm_smmu_strtab_l1_desc
*desc
)
1109 val
|= FIELD_PREP(STRTAB_L1_DESC_SPAN
, desc
->span
);
1110 val
|= desc
->l2ptr_dma
& STRTAB_L1_DESC_L2PTR_MASK
;
1112 *dst
= cpu_to_le64(val
);
1115 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device
*smmu
, u32 sid
)
1117 struct arm_smmu_cmdq_ent cmd
= {
1118 .opcode
= CMDQ_OP_CFGI_STE
,
1125 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
1126 arm_smmu_cmdq_issue_sync(smmu
);
1129 static void arm_smmu_write_strtab_ent(struct arm_smmu_master
*master
, u32 sid
,
1133 * This is hideously complicated, but we only really care about
1134 * three cases at the moment:
1136 * 1. Invalid (all zero) -> bypass/fault (init)
1137 * 2. Bypass/fault -> translation/bypass (attach)
1138 * 3. Translation/bypass -> bypass/fault (detach)
1140 * Given that we can't update the STE atomically and the SMMU
1141 * doesn't read the thing in a defined order, that leaves us
1142 * with the following maintenance requirements:
1144 * 1. Update Config, return (init time STEs aren't live)
1145 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1146 * 3. Update Config, sync
1148 u64 val
= le64_to_cpu(dst
[0]);
1149 bool ste_live
= false;
1150 struct arm_smmu_device
*smmu
= NULL
;
1151 struct arm_smmu_s1_cfg
*s1_cfg
= NULL
;
1152 struct arm_smmu_s2_cfg
*s2_cfg
= NULL
;
1153 struct arm_smmu_domain
*smmu_domain
= NULL
;
1154 struct arm_smmu_cmdq_ent prefetch_cmd
= {
1155 .opcode
= CMDQ_OP_PREFETCH_CFG
,
1162 smmu_domain
= master
->domain
;
1163 smmu
= master
->smmu
;
1167 switch (smmu_domain
->stage
) {
1168 case ARM_SMMU_DOMAIN_S1
:
1169 s1_cfg
= &smmu_domain
->s1_cfg
;
1171 case ARM_SMMU_DOMAIN_S2
:
1172 case ARM_SMMU_DOMAIN_NESTED
:
1173 s2_cfg
= &smmu_domain
->s2_cfg
;
1180 if (val
& STRTAB_STE_0_V
) {
1181 switch (FIELD_GET(STRTAB_STE_0_CFG
, val
)) {
1182 case STRTAB_STE_0_CFG_BYPASS
:
1184 case STRTAB_STE_0_CFG_S1_TRANS
:
1185 case STRTAB_STE_0_CFG_S2_TRANS
:
1188 case STRTAB_STE_0_CFG_ABORT
:
1192 BUG(); /* STE corruption */
1196 /* Nuke the existing STE_0 value, as we're going to rewrite it */
1197 val
= STRTAB_STE_0_V
;
1200 if (!smmu_domain
|| !(s1_cfg
|| s2_cfg
)) {
1201 if (!smmu_domain
&& disable_bypass
)
1202 val
|= FIELD_PREP(STRTAB_STE_0_CFG
, STRTAB_STE_0_CFG_ABORT
);
1204 val
|= FIELD_PREP(STRTAB_STE_0_CFG
, STRTAB_STE_0_CFG_BYPASS
);
1206 dst
[0] = cpu_to_le64(val
);
1207 dst
[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG
,
1208 STRTAB_STE_1_SHCFG_INCOMING
));
1209 dst
[2] = 0; /* Nuke the VMID */
1211 * The SMMU can perform negative caching, so we must sync
1212 * the STE regardless of whether the old value was live.
1215 arm_smmu_sync_ste_for_sid(smmu
, sid
);
1221 dst
[1] = cpu_to_le64(
1222 FIELD_PREP(STRTAB_STE_1_S1CIR
, STRTAB_STE_1_S1C_CACHE_WBRA
) |
1223 FIELD_PREP(STRTAB_STE_1_S1COR
, STRTAB_STE_1_S1C_CACHE_WBRA
) |
1224 FIELD_PREP(STRTAB_STE_1_S1CSH
, ARM_SMMU_SH_ISH
) |
1225 FIELD_PREP(STRTAB_STE_1_STRW
, STRTAB_STE_1_STRW_NSEL1
));
1227 if (smmu
->features
& ARM_SMMU_FEAT_STALLS
&&
1228 !(smmu
->features
& ARM_SMMU_FEAT_STALL_FORCE
))
1229 dst
[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD
);
1231 val
|= (s1_cfg
->cdptr_dma
& STRTAB_STE_0_S1CTXPTR_MASK
) |
1232 FIELD_PREP(STRTAB_STE_0_CFG
, STRTAB_STE_0_CFG_S1_TRANS
);
1237 dst
[2] = cpu_to_le64(
1238 FIELD_PREP(STRTAB_STE_2_S2VMID
, s2_cfg
->vmid
) |
1239 FIELD_PREP(STRTAB_STE_2_VTCR
, s2_cfg
->vtcr
) |
1241 STRTAB_STE_2_S2ENDI
|
1243 STRTAB_STE_2_S2PTW
| STRTAB_STE_2_S2AA64
|
1246 dst
[3] = cpu_to_le64(s2_cfg
->vttbr
& STRTAB_STE_3_S2TTB_MASK
);
1248 val
|= FIELD_PREP(STRTAB_STE_0_CFG
, STRTAB_STE_0_CFG_S2_TRANS
);
1251 if (master
->ats_enabled
)
1252 dst
[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS
,
1253 STRTAB_STE_1_EATS_TRANS
));
1255 arm_smmu_sync_ste_for_sid(smmu
, sid
);
1256 dst
[0] = cpu_to_le64(val
);
1257 arm_smmu_sync_ste_for_sid(smmu
, sid
);
1259 /* It's likely that we'll want to use the new STE soon */
1260 if (!(smmu
->options
& ARM_SMMU_OPT_SKIP_PREFETCH
))
1261 arm_smmu_cmdq_issue_cmd(smmu
, &prefetch_cmd
);
1264 static void arm_smmu_init_bypass_stes(u64
*strtab
, unsigned int nent
)
1268 for (i
= 0; i
< nent
; ++i
) {
1269 arm_smmu_write_strtab_ent(NULL
, -1, strtab
);
1270 strtab
+= STRTAB_STE_DWORDS
;
1274 static int arm_smmu_init_l2_strtab(struct arm_smmu_device
*smmu
, u32 sid
)
1278 struct arm_smmu_strtab_cfg
*cfg
= &smmu
->strtab_cfg
;
1279 struct arm_smmu_strtab_l1_desc
*desc
= &cfg
->l1_desc
[sid
>> STRTAB_SPLIT
];
1284 size
= 1 << (STRTAB_SPLIT
+ ilog2(STRTAB_STE_DWORDS
) + 3);
1285 strtab
= &cfg
->strtab
[(sid
>> STRTAB_SPLIT
) * STRTAB_L1_DESC_DWORDS
];
1287 desc
->span
= STRTAB_SPLIT
+ 1;
1288 desc
->l2ptr
= dmam_alloc_coherent(smmu
->dev
, size
, &desc
->l2ptr_dma
,
1289 GFP_KERNEL
| __GFP_ZERO
);
1292 "failed to allocate l2 stream table for SID %u\n",
1297 arm_smmu_init_bypass_stes(desc
->l2ptr
, 1 << STRTAB_SPLIT
);
1298 arm_smmu_write_strtab_l1_desc(strtab
, desc
);
1302 /* IRQ and event handlers */
1303 static irqreturn_t
arm_smmu_evtq_thread(int irq
, void *dev
)
1306 struct arm_smmu_device
*smmu
= dev
;
1307 struct arm_smmu_queue
*q
= &smmu
->evtq
.q
;
1308 u64 evt
[EVTQ_ENT_DWORDS
];
1311 while (!queue_remove_raw(q
, evt
)) {
1312 u8 id
= FIELD_GET(EVTQ_0_ID
, evt
[0]);
1314 dev_info(smmu
->dev
, "event 0x%02x received:\n", id
);
1315 for (i
= 0; i
< ARRAY_SIZE(evt
); ++i
)
1316 dev_info(smmu
->dev
, "\t0x%016llx\n",
1317 (unsigned long long)evt
[i
]);
1322 * Not much we can do on overflow, so scream and pretend we're
1325 if (queue_sync_prod(q
) == -EOVERFLOW
)
1326 dev_err(smmu
->dev
, "EVTQ overflow detected -- events lost\n");
1327 } while (!queue_empty(q
));
1329 /* Sync our overflow flag, as we believe we're up to speed */
1330 q
->cons
= Q_OVF(q
, q
->prod
) | Q_WRP(q
, q
->cons
) | Q_IDX(q
, q
->cons
);
1334 static void arm_smmu_handle_ppr(struct arm_smmu_device
*smmu
, u64
*evt
)
1340 sid
= FIELD_GET(PRIQ_0_SID
, evt
[0]);
1341 ssv
= FIELD_GET(PRIQ_0_SSID_V
, evt
[0]);
1342 ssid
= ssv
? FIELD_GET(PRIQ_0_SSID
, evt
[0]) : 0;
1343 last
= FIELD_GET(PRIQ_0_PRG_LAST
, evt
[0]);
1344 grpid
= FIELD_GET(PRIQ_1_PRG_IDX
, evt
[1]);
1346 dev_info(smmu
->dev
, "unexpected PRI request received:\n");
1348 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1349 sid
, ssid
, grpid
, last
? "L" : "",
1350 evt
[0] & PRIQ_0_PERM_PRIV
? "" : "un",
1351 evt
[0] & PRIQ_0_PERM_READ
? "R" : "",
1352 evt
[0] & PRIQ_0_PERM_WRITE
? "W" : "",
1353 evt
[0] & PRIQ_0_PERM_EXEC
? "X" : "",
1354 evt
[1] & PRIQ_1_ADDR_MASK
);
1357 struct arm_smmu_cmdq_ent cmd
= {
1358 .opcode
= CMDQ_OP_PRI_RESP
,
1359 .substream_valid
= ssv
,
1364 .resp
= PRI_RESP_DENY
,
1368 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
1372 static irqreturn_t
arm_smmu_priq_thread(int irq
, void *dev
)
1374 struct arm_smmu_device
*smmu
= dev
;
1375 struct arm_smmu_queue
*q
= &smmu
->priq
.q
;
1376 u64 evt
[PRIQ_ENT_DWORDS
];
1379 while (!queue_remove_raw(q
, evt
))
1380 arm_smmu_handle_ppr(smmu
, evt
);
1382 if (queue_sync_prod(q
) == -EOVERFLOW
)
1383 dev_err(smmu
->dev
, "PRIQ overflow detected -- requests lost\n");
1384 } while (!queue_empty(q
));
1386 /* Sync our overflow flag, as we believe we're up to speed */
1387 q
->cons
= Q_OVF(q
, q
->prod
) | Q_WRP(q
, q
->cons
) | Q_IDX(q
, q
->cons
);
1388 writel(q
->cons
, q
->cons_reg
);
1392 static int arm_smmu_device_disable(struct arm_smmu_device
*smmu
);
1394 static irqreturn_t
arm_smmu_gerror_handler(int irq
, void *dev
)
1396 u32 gerror
, gerrorn
, active
;
1397 struct arm_smmu_device
*smmu
= dev
;
1399 gerror
= readl_relaxed(smmu
->base
+ ARM_SMMU_GERROR
);
1400 gerrorn
= readl_relaxed(smmu
->base
+ ARM_SMMU_GERRORN
);
1402 active
= gerror
^ gerrorn
;
1403 if (!(active
& GERROR_ERR_MASK
))
1404 return IRQ_NONE
; /* No errors pending */
1407 "unexpected global error reported (0x%08x), this could be serious\n",
1410 if (active
& GERROR_SFM_ERR
) {
1411 dev_err(smmu
->dev
, "device has entered Service Failure Mode!\n");
1412 arm_smmu_device_disable(smmu
);
1415 if (active
& GERROR_MSI_GERROR_ABT_ERR
)
1416 dev_warn(smmu
->dev
, "GERROR MSI write aborted\n");
1418 if (active
& GERROR_MSI_PRIQ_ABT_ERR
)
1419 dev_warn(smmu
->dev
, "PRIQ MSI write aborted\n");
1421 if (active
& GERROR_MSI_EVTQ_ABT_ERR
)
1422 dev_warn(smmu
->dev
, "EVTQ MSI write aborted\n");
1424 if (active
& GERROR_MSI_CMDQ_ABT_ERR
)
1425 dev_warn(smmu
->dev
, "CMDQ MSI write aborted\n");
1427 if (active
& GERROR_PRIQ_ABT_ERR
)
1428 dev_err(smmu
->dev
, "PRIQ write aborted -- events may have been lost\n");
1430 if (active
& GERROR_EVTQ_ABT_ERR
)
1431 dev_err(smmu
->dev
, "EVTQ write aborted -- events may have been lost\n");
1433 if (active
& GERROR_CMDQ_ERR
)
1434 arm_smmu_cmdq_skip_err(smmu
);
1436 writel(gerror
, smmu
->base
+ ARM_SMMU_GERRORN
);
1440 static irqreturn_t
arm_smmu_combined_irq_thread(int irq
, void *dev
)
1442 struct arm_smmu_device
*smmu
= dev
;
1444 arm_smmu_evtq_thread(irq
, dev
);
1445 if (smmu
->features
& ARM_SMMU_FEAT_PRI
)
1446 arm_smmu_priq_thread(irq
, dev
);
1451 static irqreturn_t
arm_smmu_combined_irq_handler(int irq
, void *dev
)
1453 arm_smmu_gerror_handler(irq
, dev
);
1454 return IRQ_WAKE_THREAD
;
1458 arm_smmu_atc_inv_to_cmd(int ssid
, unsigned long iova
, size_t size
,
1459 struct arm_smmu_cmdq_ent
*cmd
)
1463 /* ATC invalidates are always on 4096-bytes pages */
1464 size_t inval_grain_shift
= 12;
1465 unsigned long page_start
, page_end
;
1467 *cmd
= (struct arm_smmu_cmdq_ent
) {
1468 .opcode
= CMDQ_OP_ATC_INV
,
1469 .substream_valid
= !!ssid
,
1474 cmd
->atc
.size
= ATC_INV_SIZE_ALL
;
1478 page_start
= iova
>> inval_grain_shift
;
1479 page_end
= (iova
+ size
- 1) >> inval_grain_shift
;
1482 * In an ATS Invalidate Request, the address must be aligned on the
1483 * range size, which must be a power of two number of page sizes. We
1484 * thus have to choose between grossly over-invalidating the region, or
1485 * splitting the invalidation into multiple commands. For simplicity
1486 * we'll go with the first solution, but should refine it in the future
1487 * if multiple commands are shown to be more efficient.
1489 * Find the smallest power of two that covers the range. The most
1490 * significant differing bit between the start and end addresses,
1491 * fls(start ^ end), indicates the required span. For example:
1493 * We want to invalidate pages [8; 11]. This is already the ideal range:
1494 * x = 0b1000 ^ 0b1011 = 0b11
1495 * span = 1 << fls(x) = 4
1497 * To invalidate pages [7; 10], we need to invalidate [0; 15]:
1498 * x = 0b0111 ^ 0b1010 = 0b1101
1499 * span = 1 << fls(x) = 16
1501 log2_span
= fls_long(page_start
^ page_end
);
1502 span_mask
= (1ULL << log2_span
) - 1;
1504 page_start
&= ~span_mask
;
1506 cmd
->atc
.addr
= page_start
<< inval_grain_shift
;
1507 cmd
->atc
.size
= log2_span
;
1510 static int arm_smmu_atc_inv_master(struct arm_smmu_master
*master
,
1511 struct arm_smmu_cmdq_ent
*cmd
)
1515 if (!master
->ats_enabled
)
1518 for (i
= 0; i
< master
->num_sids
; i
++) {
1519 cmd
->atc
.sid
= master
->sids
[i
];
1520 arm_smmu_cmdq_issue_cmd(master
->smmu
, cmd
);
1523 return arm_smmu_cmdq_issue_sync(master
->smmu
);
1526 static int arm_smmu_atc_inv_domain(struct arm_smmu_domain
*smmu_domain
,
1527 int ssid
, unsigned long iova
, size_t size
)
1530 unsigned long flags
;
1531 struct arm_smmu_cmdq_ent cmd
;
1532 struct arm_smmu_master
*master
;
1534 if (!(smmu_domain
->smmu
->features
& ARM_SMMU_FEAT_ATS
))
1537 arm_smmu_atc_inv_to_cmd(ssid
, iova
, size
, &cmd
);
1539 spin_lock_irqsave(&smmu_domain
->devices_lock
, flags
);
1540 list_for_each_entry(master
, &smmu_domain
->devices
, domain_head
)
1541 ret
|= arm_smmu_atc_inv_master(master
, &cmd
);
1542 spin_unlock_irqrestore(&smmu_domain
->devices_lock
, flags
);
1544 return ret
? -ETIMEDOUT
: 0;
1547 /* IO_PGTABLE API */
1548 static void arm_smmu_tlb_sync(void *cookie
)
1550 struct arm_smmu_domain
*smmu_domain
= cookie
;
1552 arm_smmu_cmdq_issue_sync(smmu_domain
->smmu
);
1555 static void arm_smmu_tlb_inv_context(void *cookie
)
1557 struct arm_smmu_domain
*smmu_domain
= cookie
;
1558 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1559 struct arm_smmu_cmdq_ent cmd
;
1561 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1562 cmd
.opcode
= CMDQ_OP_TLBI_NH_ASID
;
1563 cmd
.tlbi
.asid
= smmu_domain
->s1_cfg
.cd
.asid
;
1566 cmd
.opcode
= CMDQ_OP_TLBI_S12_VMALL
;
1567 cmd
.tlbi
.vmid
= smmu_domain
->s2_cfg
.vmid
;
1571 * NOTE: when io-pgtable is in non-strict mode, we may get here with
1572 * PTEs previously cleared by unmaps on the current CPU not yet visible
1573 * to the SMMU. We are relying on the DSB implicit in queue_inc_prod()
1574 * to guarantee those are observed before the TLBI. Do be careful, 007.
1576 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
1577 arm_smmu_cmdq_issue_sync(smmu
);
1580 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova
, size_t size
,
1581 size_t granule
, bool leaf
, void *cookie
)
1583 struct arm_smmu_domain
*smmu_domain
= cookie
;
1584 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1585 struct arm_smmu_cmdq_ent cmd
= {
1592 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1593 cmd
.opcode
= CMDQ_OP_TLBI_NH_VA
;
1594 cmd
.tlbi
.asid
= smmu_domain
->s1_cfg
.cd
.asid
;
1596 cmd
.opcode
= CMDQ_OP_TLBI_S2_IPA
;
1597 cmd
.tlbi
.vmid
= smmu_domain
->s2_cfg
.vmid
;
1601 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
1602 cmd
.tlbi
.addr
+= granule
;
1603 } while (size
-= granule
);
1606 static const struct iommu_gather_ops arm_smmu_gather_ops
= {
1607 .tlb_flush_all
= arm_smmu_tlb_inv_context
,
1608 .tlb_add_flush
= arm_smmu_tlb_inv_range_nosync
,
1609 .tlb_sync
= arm_smmu_tlb_sync
,
1613 static bool arm_smmu_capable(enum iommu_cap cap
)
1616 case IOMMU_CAP_CACHE_COHERENCY
:
1618 case IOMMU_CAP_NOEXEC
:
1625 static struct iommu_domain
*arm_smmu_domain_alloc(unsigned type
)
1627 struct arm_smmu_domain
*smmu_domain
;
1629 if (type
!= IOMMU_DOMAIN_UNMANAGED
&&
1630 type
!= IOMMU_DOMAIN_DMA
&&
1631 type
!= IOMMU_DOMAIN_IDENTITY
)
1635 * Allocate the domain and initialise some of its data structures.
1636 * We can't really do anything meaningful until we've added a
1639 smmu_domain
= kzalloc(sizeof(*smmu_domain
), GFP_KERNEL
);
1643 if (type
== IOMMU_DOMAIN_DMA
&&
1644 iommu_get_dma_cookie(&smmu_domain
->domain
)) {
1649 mutex_init(&smmu_domain
->init_mutex
);
1650 INIT_LIST_HEAD(&smmu_domain
->devices
);
1651 spin_lock_init(&smmu_domain
->devices_lock
);
1653 return &smmu_domain
->domain
;
1656 static int arm_smmu_bitmap_alloc(unsigned long *map
, int span
)
1658 int idx
, size
= 1 << span
;
1661 idx
= find_first_zero_bit(map
, size
);
1664 } while (test_and_set_bit(idx
, map
));
1669 static void arm_smmu_bitmap_free(unsigned long *map
, int idx
)
1671 clear_bit(idx
, map
);
1674 static void arm_smmu_domain_free(struct iommu_domain
*domain
)
1676 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1677 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1679 iommu_put_dma_cookie(domain
);
1680 free_io_pgtable_ops(smmu_domain
->pgtbl_ops
);
1682 /* Free the CD and ASID, if we allocated them */
1683 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1684 struct arm_smmu_s1_cfg
*cfg
= &smmu_domain
->s1_cfg
;
1687 dmam_free_coherent(smmu_domain
->smmu
->dev
,
1688 CTXDESC_CD_DWORDS
<< 3,
1692 arm_smmu_bitmap_free(smmu
->asid_map
, cfg
->cd
.asid
);
1695 struct arm_smmu_s2_cfg
*cfg
= &smmu_domain
->s2_cfg
;
1697 arm_smmu_bitmap_free(smmu
->vmid_map
, cfg
->vmid
);
1703 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain
*smmu_domain
,
1704 struct io_pgtable_cfg
*pgtbl_cfg
)
1708 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1709 struct arm_smmu_s1_cfg
*cfg
= &smmu_domain
->s1_cfg
;
1711 asid
= arm_smmu_bitmap_alloc(smmu
->asid_map
, smmu
->asid_bits
);
1715 cfg
->cdptr
= dmam_alloc_coherent(smmu
->dev
, CTXDESC_CD_DWORDS
<< 3,
1717 GFP_KERNEL
| __GFP_ZERO
);
1719 dev_warn(smmu
->dev
, "failed to allocate context descriptor\n");
1724 cfg
->cd
.asid
= (u16
)asid
;
1725 cfg
->cd
.ttbr
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[0];
1726 cfg
->cd
.tcr
= pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
;
1727 cfg
->cd
.mair
= pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[0];
1731 arm_smmu_bitmap_free(smmu
->asid_map
, asid
);
1735 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain
*smmu_domain
,
1736 struct io_pgtable_cfg
*pgtbl_cfg
)
1739 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1740 struct arm_smmu_s2_cfg
*cfg
= &smmu_domain
->s2_cfg
;
1742 vmid
= arm_smmu_bitmap_alloc(smmu
->vmid_map
, smmu
->vmid_bits
);
1746 cfg
->vmid
= (u16
)vmid
;
1747 cfg
->vttbr
= pgtbl_cfg
->arm_lpae_s2_cfg
.vttbr
;
1748 cfg
->vtcr
= pgtbl_cfg
->arm_lpae_s2_cfg
.vtcr
;
1752 static int arm_smmu_domain_finalise(struct iommu_domain
*domain
)
1755 unsigned long ias
, oas
;
1756 enum io_pgtable_fmt fmt
;
1757 struct io_pgtable_cfg pgtbl_cfg
;
1758 struct io_pgtable_ops
*pgtbl_ops
;
1759 int (*finalise_stage_fn
)(struct arm_smmu_domain
*,
1760 struct io_pgtable_cfg
*);
1761 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1762 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1764 if (domain
->type
== IOMMU_DOMAIN_IDENTITY
) {
1765 smmu_domain
->stage
= ARM_SMMU_DOMAIN_BYPASS
;
1769 /* Restrict the stage to what we can actually support */
1770 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
))
1771 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S2
;
1772 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
))
1773 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
1775 switch (smmu_domain
->stage
) {
1776 case ARM_SMMU_DOMAIN_S1
:
1777 ias
= (smmu
->features
& ARM_SMMU_FEAT_VAX
) ? 52 : 48;
1778 ias
= min_t(unsigned long, ias
, VA_BITS
);
1780 fmt
= ARM_64_LPAE_S1
;
1781 finalise_stage_fn
= arm_smmu_domain_finalise_s1
;
1783 case ARM_SMMU_DOMAIN_NESTED
:
1784 case ARM_SMMU_DOMAIN_S2
:
1787 fmt
= ARM_64_LPAE_S2
;
1788 finalise_stage_fn
= arm_smmu_domain_finalise_s2
;
1794 pgtbl_cfg
= (struct io_pgtable_cfg
) {
1795 .pgsize_bitmap
= smmu
->pgsize_bitmap
,
1798 .coherent_walk
= smmu
->features
& ARM_SMMU_FEAT_COHERENCY
,
1799 .tlb
= &arm_smmu_gather_ops
,
1800 .iommu_dev
= smmu
->dev
,
1803 if (smmu_domain
->non_strict
)
1804 pgtbl_cfg
.quirks
|= IO_PGTABLE_QUIRK_NON_STRICT
;
1806 pgtbl_ops
= alloc_io_pgtable_ops(fmt
, &pgtbl_cfg
, smmu_domain
);
1810 domain
->pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
1811 domain
->geometry
.aperture_end
= (1UL << pgtbl_cfg
.ias
) - 1;
1812 domain
->geometry
.force_aperture
= true;
1814 ret
= finalise_stage_fn(smmu_domain
, &pgtbl_cfg
);
1816 free_io_pgtable_ops(pgtbl_ops
);
1820 smmu_domain
->pgtbl_ops
= pgtbl_ops
;
1824 static __le64
*arm_smmu_get_step_for_sid(struct arm_smmu_device
*smmu
, u32 sid
)
1827 struct arm_smmu_strtab_cfg
*cfg
= &smmu
->strtab_cfg
;
1829 if (smmu
->features
& ARM_SMMU_FEAT_2_LVL_STRTAB
) {
1830 struct arm_smmu_strtab_l1_desc
*l1_desc
;
1833 /* Two-level walk */
1834 idx
= (sid
>> STRTAB_SPLIT
) * STRTAB_L1_DESC_DWORDS
;
1835 l1_desc
= &cfg
->l1_desc
[idx
];
1836 idx
= (sid
& ((1 << STRTAB_SPLIT
) - 1)) * STRTAB_STE_DWORDS
;
1837 step
= &l1_desc
->l2ptr
[idx
];
1839 /* Simple linear lookup */
1840 step
= &cfg
->strtab
[sid
* STRTAB_STE_DWORDS
];
1846 static void arm_smmu_install_ste_for_dev(struct arm_smmu_master
*master
)
1849 struct arm_smmu_device
*smmu
= master
->smmu
;
1851 for (i
= 0; i
< master
->num_sids
; ++i
) {
1852 u32 sid
= master
->sids
[i
];
1853 __le64
*step
= arm_smmu_get_step_for_sid(smmu
, sid
);
1855 /* Bridged PCI devices may end up with duplicated IDs */
1856 for (j
= 0; j
< i
; j
++)
1857 if (master
->sids
[j
] == sid
)
1862 arm_smmu_write_strtab_ent(master
, sid
, step
);
1866 static int arm_smmu_enable_ats(struct arm_smmu_master
*master
)
1870 struct pci_dev
*pdev
;
1871 struct arm_smmu_device
*smmu
= master
->smmu
;
1872 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(master
->dev
);
1874 if (!(smmu
->features
& ARM_SMMU_FEAT_ATS
) || !dev_is_pci(master
->dev
) ||
1875 !(fwspec
->flags
& IOMMU_FWSPEC_PCI_RC_ATS
) || pci_ats_disabled())
1878 pdev
= to_pci_dev(master
->dev
);
1879 if (pdev
->untrusted
)
1882 /* Smallest Translation Unit: log2 of the smallest supported granule */
1883 stu
= __ffs(smmu
->pgsize_bitmap
);
1885 ret
= pci_enable_ats(pdev
, stu
);
1889 master
->ats_enabled
= true;
1893 static void arm_smmu_disable_ats(struct arm_smmu_master
*master
)
1895 struct arm_smmu_cmdq_ent cmd
;
1897 if (!master
->ats_enabled
|| !dev_is_pci(master
->dev
))
1900 arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd
);
1901 arm_smmu_atc_inv_master(master
, &cmd
);
1902 pci_disable_ats(to_pci_dev(master
->dev
));
1903 master
->ats_enabled
= false;
1906 static void arm_smmu_detach_dev(struct arm_smmu_master
*master
)
1908 unsigned long flags
;
1909 struct arm_smmu_domain
*smmu_domain
= master
->domain
;
1914 spin_lock_irqsave(&smmu_domain
->devices_lock
, flags
);
1915 list_del(&master
->domain_head
);
1916 spin_unlock_irqrestore(&smmu_domain
->devices_lock
, flags
);
1918 master
->domain
= NULL
;
1919 arm_smmu_install_ste_for_dev(master
);
1921 arm_smmu_disable_ats(master
);
1924 static int arm_smmu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1927 unsigned long flags
;
1928 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
1929 struct arm_smmu_device
*smmu
;
1930 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1931 struct arm_smmu_master
*master
;
1936 master
= fwspec
->iommu_priv
;
1937 smmu
= master
->smmu
;
1939 arm_smmu_detach_dev(master
);
1941 mutex_lock(&smmu_domain
->init_mutex
);
1943 if (!smmu_domain
->smmu
) {
1944 smmu_domain
->smmu
= smmu
;
1945 ret
= arm_smmu_domain_finalise(domain
);
1947 smmu_domain
->smmu
= NULL
;
1950 } else if (smmu_domain
->smmu
!= smmu
) {
1952 "cannot attach to SMMU %s (upstream of %s)\n",
1953 dev_name(smmu_domain
->smmu
->dev
),
1954 dev_name(smmu
->dev
));
1959 master
->domain
= smmu_domain
;
1961 spin_lock_irqsave(&smmu_domain
->devices_lock
, flags
);
1962 list_add(&master
->domain_head
, &smmu_domain
->devices
);
1963 spin_unlock_irqrestore(&smmu_domain
->devices_lock
, flags
);
1965 if (smmu_domain
->stage
!= ARM_SMMU_DOMAIN_BYPASS
)
1966 arm_smmu_enable_ats(master
);
1968 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
)
1969 arm_smmu_write_ctx_desc(smmu
, &smmu_domain
->s1_cfg
);
1971 arm_smmu_install_ste_for_dev(master
);
1973 mutex_unlock(&smmu_domain
->init_mutex
);
1977 static int arm_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
1978 phys_addr_t paddr
, size_t size
, int prot
)
1980 struct io_pgtable_ops
*ops
= to_smmu_domain(domain
)->pgtbl_ops
;
1985 return ops
->map(ops
, iova
, paddr
, size
, prot
);
1989 arm_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
, size_t size
)
1992 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1993 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1998 ret
= ops
->unmap(ops
, iova
, size
);
1999 if (ret
&& arm_smmu_atc_inv_domain(smmu_domain
, 0, iova
, size
))
2005 static void arm_smmu_flush_iotlb_all(struct iommu_domain
*domain
)
2007 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
2009 if (smmu_domain
->smmu
)
2010 arm_smmu_tlb_inv_context(smmu_domain
);
2013 static void arm_smmu_iotlb_sync(struct iommu_domain
*domain
)
2015 struct arm_smmu_device
*smmu
= to_smmu_domain(domain
)->smmu
;
2018 arm_smmu_cmdq_issue_sync(smmu
);
2022 arm_smmu_iova_to_phys(struct iommu_domain
*domain
, dma_addr_t iova
)
2024 struct io_pgtable_ops
*ops
= to_smmu_domain(domain
)->pgtbl_ops
;
2026 if (domain
->type
== IOMMU_DOMAIN_IDENTITY
)
2032 return ops
->iova_to_phys(ops
, iova
);
2035 static struct platform_driver arm_smmu_driver
;
2037 static int arm_smmu_match_node(struct device
*dev
, void *data
)
2039 return dev
->fwnode
== data
;
2043 struct arm_smmu_device
*arm_smmu_get_by_fwnode(struct fwnode_handle
*fwnode
)
2045 struct device
*dev
= driver_find_device(&arm_smmu_driver
.driver
, NULL
,
2046 fwnode
, arm_smmu_match_node
);
2048 return dev
? dev_get_drvdata(dev
) : NULL
;
2051 static bool arm_smmu_sid_in_range(struct arm_smmu_device
*smmu
, u32 sid
)
2053 unsigned long limit
= smmu
->strtab_cfg
.num_l1_ents
;
2055 if (smmu
->features
& ARM_SMMU_FEAT_2_LVL_STRTAB
)
2056 limit
*= 1UL << STRTAB_SPLIT
;
2061 static struct iommu_ops arm_smmu_ops
;
2063 static int arm_smmu_add_device(struct device
*dev
)
2066 struct arm_smmu_device
*smmu
;
2067 struct arm_smmu_master
*master
;
2068 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
2069 struct iommu_group
*group
;
2071 if (!fwspec
|| fwspec
->ops
!= &arm_smmu_ops
)
2074 * We _can_ actually withstand dodgy bus code re-calling add_device()
2075 * without an intervening remove_device()/of_xlate() sequence, but
2076 * we're not going to do so quietly...
2078 if (WARN_ON_ONCE(fwspec
->iommu_priv
)) {
2079 master
= fwspec
->iommu_priv
;
2080 smmu
= master
->smmu
;
2082 smmu
= arm_smmu_get_by_fwnode(fwspec
->iommu_fwnode
);
2085 master
= kzalloc(sizeof(*master
), GFP_KERNEL
);
2090 master
->smmu
= smmu
;
2091 master
->sids
= fwspec
->ids
;
2092 master
->num_sids
= fwspec
->num_ids
;
2093 fwspec
->iommu_priv
= master
;
2096 /* Check the SIDs are in range of the SMMU and our stream table */
2097 for (i
= 0; i
< master
->num_sids
; i
++) {
2098 u32 sid
= master
->sids
[i
];
2100 if (!arm_smmu_sid_in_range(smmu
, sid
))
2103 /* Ensure l2 strtab is initialised */
2104 if (smmu
->features
& ARM_SMMU_FEAT_2_LVL_STRTAB
) {
2105 ret
= arm_smmu_init_l2_strtab(smmu
, sid
);
2111 group
= iommu_group_get_for_dev(dev
);
2112 if (!IS_ERR(group
)) {
2113 iommu_group_put(group
);
2114 iommu_device_link(&smmu
->iommu
, dev
);
2117 return PTR_ERR_OR_ZERO(group
);
2120 static void arm_smmu_remove_device(struct device
*dev
)
2122 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
2123 struct arm_smmu_master
*master
;
2124 struct arm_smmu_device
*smmu
;
2126 if (!fwspec
|| fwspec
->ops
!= &arm_smmu_ops
)
2129 master
= fwspec
->iommu_priv
;
2130 smmu
= master
->smmu
;
2131 arm_smmu_detach_dev(master
);
2132 iommu_group_remove_device(dev
);
2133 iommu_device_unlink(&smmu
->iommu
, dev
);
2135 iommu_fwspec_free(dev
);
2138 static struct iommu_group
*arm_smmu_device_group(struct device
*dev
)
2140 struct iommu_group
*group
;
2143 * We don't support devices sharing stream IDs other than PCI RID
2144 * aliases, since the necessary ID-to-device lookup becomes rather
2145 * impractical given a potential sparse 32-bit stream ID space.
2147 if (dev_is_pci(dev
))
2148 group
= pci_device_group(dev
);
2150 group
= generic_device_group(dev
);
2155 static int arm_smmu_domain_get_attr(struct iommu_domain
*domain
,
2156 enum iommu_attr attr
, void *data
)
2158 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
2160 switch (domain
->type
) {
2161 case IOMMU_DOMAIN_UNMANAGED
:
2163 case DOMAIN_ATTR_NESTING
:
2164 *(int *)data
= (smmu_domain
->stage
== ARM_SMMU_DOMAIN_NESTED
);
2170 case IOMMU_DOMAIN_DMA
:
2172 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
:
2173 *(int *)data
= smmu_domain
->non_strict
;
2184 static int arm_smmu_domain_set_attr(struct iommu_domain
*domain
,
2185 enum iommu_attr attr
, void *data
)
2188 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
2190 mutex_lock(&smmu_domain
->init_mutex
);
2192 switch (domain
->type
) {
2193 case IOMMU_DOMAIN_UNMANAGED
:
2195 case DOMAIN_ATTR_NESTING
:
2196 if (smmu_domain
->smmu
) {
2202 smmu_domain
->stage
= ARM_SMMU_DOMAIN_NESTED
;
2204 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
2210 case IOMMU_DOMAIN_DMA
:
2212 case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
:
2213 smmu_domain
->non_strict
= *(int *)data
;
2224 mutex_unlock(&smmu_domain
->init_mutex
);
2228 static int arm_smmu_of_xlate(struct device
*dev
, struct of_phandle_args
*args
)
2230 return iommu_fwspec_add_ids(dev
, args
->args
, 1);
2233 static void arm_smmu_get_resv_regions(struct device
*dev
,
2234 struct list_head
*head
)
2236 struct iommu_resv_region
*region
;
2237 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
2239 region
= iommu_alloc_resv_region(MSI_IOVA_BASE
, MSI_IOVA_LENGTH
,
2240 prot
, IOMMU_RESV_SW_MSI
);
2244 list_add_tail(®ion
->list
, head
);
2246 iommu_dma_get_resv_regions(dev
, head
);
2249 static void arm_smmu_put_resv_regions(struct device
*dev
,
2250 struct list_head
*head
)
2252 struct iommu_resv_region
*entry
, *next
;
2254 list_for_each_entry_safe(entry
, next
, head
, list
)
2258 static struct iommu_ops arm_smmu_ops
= {
2259 .capable
= arm_smmu_capable
,
2260 .domain_alloc
= arm_smmu_domain_alloc
,
2261 .domain_free
= arm_smmu_domain_free
,
2262 .attach_dev
= arm_smmu_attach_dev
,
2263 .map
= arm_smmu_map
,
2264 .unmap
= arm_smmu_unmap
,
2265 .flush_iotlb_all
= arm_smmu_flush_iotlb_all
,
2266 .iotlb_sync
= arm_smmu_iotlb_sync
,
2267 .iova_to_phys
= arm_smmu_iova_to_phys
,
2268 .add_device
= arm_smmu_add_device
,
2269 .remove_device
= arm_smmu_remove_device
,
2270 .device_group
= arm_smmu_device_group
,
2271 .domain_get_attr
= arm_smmu_domain_get_attr
,
2272 .domain_set_attr
= arm_smmu_domain_set_attr
,
2273 .of_xlate
= arm_smmu_of_xlate
,
2274 .get_resv_regions
= arm_smmu_get_resv_regions
,
2275 .put_resv_regions
= arm_smmu_put_resv_regions
,
2276 .pgsize_bitmap
= -1UL, /* Restricted during device attach */
2279 /* Probing and initialisation functions */
2280 static int arm_smmu_init_one_queue(struct arm_smmu_device
*smmu
,
2281 struct arm_smmu_queue
*q
,
2282 unsigned long prod_off
,
2283 unsigned long cons_off
,
2284 size_t dwords
, const char *name
)
2289 qsz
= ((1 << q
->max_n_shift
) * dwords
) << 3;
2290 q
->base
= dmam_alloc_coherent(smmu
->dev
, qsz
, &q
->base_dma
,
2292 if (q
->base
|| qsz
< PAGE_SIZE
)
2300 "failed to allocate queue (0x%zx bytes) for %s\n",
2305 if (!WARN_ON(q
->base_dma
& (qsz
- 1))) {
2306 dev_info(smmu
->dev
, "allocated %u entries for %s\n",
2307 1 << q
->max_n_shift
, name
);
2310 q
->prod_reg
= arm_smmu_page1_fixup(prod_off
, smmu
);
2311 q
->cons_reg
= arm_smmu_page1_fixup(cons_off
, smmu
);
2312 q
->ent_dwords
= dwords
;
2314 q
->q_base
= Q_BASE_RWA
;
2315 q
->q_base
|= q
->base_dma
& Q_BASE_ADDR_MASK
;
2316 q
->q_base
|= FIELD_PREP(Q_BASE_LOG2SIZE
, q
->max_n_shift
);
2318 q
->prod
= q
->cons
= 0;
2322 static int arm_smmu_init_queues(struct arm_smmu_device
*smmu
)
2327 spin_lock_init(&smmu
->cmdq
.lock
);
2328 ret
= arm_smmu_init_one_queue(smmu
, &smmu
->cmdq
.q
, ARM_SMMU_CMDQ_PROD
,
2329 ARM_SMMU_CMDQ_CONS
, CMDQ_ENT_DWORDS
,
2335 ret
= arm_smmu_init_one_queue(smmu
, &smmu
->evtq
.q
, ARM_SMMU_EVTQ_PROD
,
2336 ARM_SMMU_EVTQ_CONS
, EVTQ_ENT_DWORDS
,
2342 if (!(smmu
->features
& ARM_SMMU_FEAT_PRI
))
2345 return arm_smmu_init_one_queue(smmu
, &smmu
->priq
.q
, ARM_SMMU_PRIQ_PROD
,
2346 ARM_SMMU_PRIQ_CONS
, PRIQ_ENT_DWORDS
,
2350 static int arm_smmu_init_l1_strtab(struct arm_smmu_device
*smmu
)
2353 struct arm_smmu_strtab_cfg
*cfg
= &smmu
->strtab_cfg
;
2354 size_t size
= sizeof(*cfg
->l1_desc
) * cfg
->num_l1_ents
;
2355 void *strtab
= smmu
->strtab_cfg
.strtab
;
2357 cfg
->l1_desc
= devm_kzalloc(smmu
->dev
, size
, GFP_KERNEL
);
2358 if (!cfg
->l1_desc
) {
2359 dev_err(smmu
->dev
, "failed to allocate l1 stream table desc\n");
2363 for (i
= 0; i
< cfg
->num_l1_ents
; ++i
) {
2364 arm_smmu_write_strtab_l1_desc(strtab
, &cfg
->l1_desc
[i
]);
2365 strtab
+= STRTAB_L1_DESC_DWORDS
<< 3;
2371 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device
*smmu
)
2376 struct arm_smmu_strtab_cfg
*cfg
= &smmu
->strtab_cfg
;
2378 /* Calculate the L1 size, capped to the SIDSIZE. */
2379 size
= STRTAB_L1_SZ_SHIFT
- (ilog2(STRTAB_L1_DESC_DWORDS
) + 3);
2380 size
= min(size
, smmu
->sid_bits
- STRTAB_SPLIT
);
2381 cfg
->num_l1_ents
= 1 << size
;
2383 size
+= STRTAB_SPLIT
;
2384 if (size
< smmu
->sid_bits
)
2386 "2-level strtab only covers %u/%u bits of SID\n",
2387 size
, smmu
->sid_bits
);
2389 l1size
= cfg
->num_l1_ents
* (STRTAB_L1_DESC_DWORDS
<< 3);
2390 strtab
= dmam_alloc_coherent(smmu
->dev
, l1size
, &cfg
->strtab_dma
,
2391 GFP_KERNEL
| __GFP_ZERO
);
2394 "failed to allocate l1 stream table (%u bytes)\n",
2398 cfg
->strtab
= strtab
;
2400 /* Configure strtab_base_cfg for 2 levels */
2401 reg
= FIELD_PREP(STRTAB_BASE_CFG_FMT
, STRTAB_BASE_CFG_FMT_2LVL
);
2402 reg
|= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE
, size
);
2403 reg
|= FIELD_PREP(STRTAB_BASE_CFG_SPLIT
, STRTAB_SPLIT
);
2404 cfg
->strtab_base_cfg
= reg
;
2406 return arm_smmu_init_l1_strtab(smmu
);
2409 static int arm_smmu_init_strtab_linear(struct arm_smmu_device
*smmu
)
2414 struct arm_smmu_strtab_cfg
*cfg
= &smmu
->strtab_cfg
;
2416 size
= (1 << smmu
->sid_bits
) * (STRTAB_STE_DWORDS
<< 3);
2417 strtab
= dmam_alloc_coherent(smmu
->dev
, size
, &cfg
->strtab_dma
,
2418 GFP_KERNEL
| __GFP_ZERO
);
2421 "failed to allocate linear stream table (%u bytes)\n",
2425 cfg
->strtab
= strtab
;
2426 cfg
->num_l1_ents
= 1 << smmu
->sid_bits
;
2428 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2429 reg
= FIELD_PREP(STRTAB_BASE_CFG_FMT
, STRTAB_BASE_CFG_FMT_LINEAR
);
2430 reg
|= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE
, smmu
->sid_bits
);
2431 cfg
->strtab_base_cfg
= reg
;
2433 arm_smmu_init_bypass_stes(strtab
, cfg
->num_l1_ents
);
2437 static int arm_smmu_init_strtab(struct arm_smmu_device
*smmu
)
2442 if (smmu
->features
& ARM_SMMU_FEAT_2_LVL_STRTAB
)
2443 ret
= arm_smmu_init_strtab_2lvl(smmu
);
2445 ret
= arm_smmu_init_strtab_linear(smmu
);
2450 /* Set the strtab base address */
2451 reg
= smmu
->strtab_cfg
.strtab_dma
& STRTAB_BASE_ADDR_MASK
;
2452 reg
|= STRTAB_BASE_RA
;
2453 smmu
->strtab_cfg
.strtab_base
= reg
;
2455 /* Allocate the first VMID for stage-2 bypass STEs */
2456 set_bit(0, smmu
->vmid_map
);
2460 static int arm_smmu_init_structures(struct arm_smmu_device
*smmu
)
2464 ret
= arm_smmu_init_queues(smmu
);
2468 return arm_smmu_init_strtab(smmu
);
2471 static int arm_smmu_write_reg_sync(struct arm_smmu_device
*smmu
, u32 val
,
2472 unsigned int reg_off
, unsigned int ack_off
)
2476 writel_relaxed(val
, smmu
->base
+ reg_off
);
2477 return readl_relaxed_poll_timeout(smmu
->base
+ ack_off
, reg
, reg
== val
,
2478 1, ARM_SMMU_POLL_TIMEOUT_US
);
2481 /* GBPA is "special" */
2482 static int arm_smmu_update_gbpa(struct arm_smmu_device
*smmu
, u32 set
, u32 clr
)
2485 u32 reg
, __iomem
*gbpa
= smmu
->base
+ ARM_SMMU_GBPA
;
2487 ret
= readl_relaxed_poll_timeout(gbpa
, reg
, !(reg
& GBPA_UPDATE
),
2488 1, ARM_SMMU_POLL_TIMEOUT_US
);
2494 writel_relaxed(reg
| GBPA_UPDATE
, gbpa
);
2495 ret
= readl_relaxed_poll_timeout(gbpa
, reg
, !(reg
& GBPA_UPDATE
),
2496 1, ARM_SMMU_POLL_TIMEOUT_US
);
2499 dev_err(smmu
->dev
, "GBPA not responding to update\n");
2503 static void arm_smmu_free_msis(void *data
)
2505 struct device
*dev
= data
;
2506 platform_msi_domain_free_irqs(dev
);
2509 static void arm_smmu_write_msi_msg(struct msi_desc
*desc
, struct msi_msg
*msg
)
2511 phys_addr_t doorbell
;
2512 struct device
*dev
= msi_desc_to_dev(desc
);
2513 struct arm_smmu_device
*smmu
= dev_get_drvdata(dev
);
2514 phys_addr_t
*cfg
= arm_smmu_msi_cfg
[desc
->platform
.msi_index
];
2516 doorbell
= (((u64
)msg
->address_hi
) << 32) | msg
->address_lo
;
2517 doorbell
&= MSI_CFG0_ADDR_MASK
;
2519 writeq_relaxed(doorbell
, smmu
->base
+ cfg
[0]);
2520 writel_relaxed(msg
->data
, smmu
->base
+ cfg
[1]);
2521 writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE
, smmu
->base
+ cfg
[2]);
2524 static void arm_smmu_setup_msis(struct arm_smmu_device
*smmu
)
2526 struct msi_desc
*desc
;
2527 int ret
, nvec
= ARM_SMMU_MAX_MSIS
;
2528 struct device
*dev
= smmu
->dev
;
2530 /* Clear the MSI address regs */
2531 writeq_relaxed(0, smmu
->base
+ ARM_SMMU_GERROR_IRQ_CFG0
);
2532 writeq_relaxed(0, smmu
->base
+ ARM_SMMU_EVTQ_IRQ_CFG0
);
2534 if (smmu
->features
& ARM_SMMU_FEAT_PRI
)
2535 writeq_relaxed(0, smmu
->base
+ ARM_SMMU_PRIQ_IRQ_CFG0
);
2539 if (!(smmu
->features
& ARM_SMMU_FEAT_MSI
))
2542 if (!dev
->msi_domain
) {
2543 dev_info(smmu
->dev
, "msi_domain absent - falling back to wired irqs\n");
2547 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2548 ret
= platform_msi_domain_alloc_irqs(dev
, nvec
, arm_smmu_write_msi_msg
);
2550 dev_warn(dev
, "failed to allocate MSIs - falling back to wired irqs\n");
2554 for_each_msi_entry(desc
, dev
) {
2555 switch (desc
->platform
.msi_index
) {
2556 case EVTQ_MSI_INDEX
:
2557 smmu
->evtq
.q
.irq
= desc
->irq
;
2559 case GERROR_MSI_INDEX
:
2560 smmu
->gerr_irq
= desc
->irq
;
2562 case PRIQ_MSI_INDEX
:
2563 smmu
->priq
.q
.irq
= desc
->irq
;
2565 default: /* Unknown */
2570 /* Add callback to free MSIs on teardown */
2571 devm_add_action(dev
, arm_smmu_free_msis
, dev
);
2574 static void arm_smmu_setup_unique_irqs(struct arm_smmu_device
*smmu
)
2578 arm_smmu_setup_msis(smmu
);
2580 /* Request interrupt lines */
2581 irq
= smmu
->evtq
.q
.irq
;
2583 ret
= devm_request_threaded_irq(smmu
->dev
, irq
, NULL
,
2584 arm_smmu_evtq_thread
,
2586 "arm-smmu-v3-evtq", smmu
);
2588 dev_warn(smmu
->dev
, "failed to enable evtq irq\n");
2590 dev_warn(smmu
->dev
, "no evtq irq - events will not be reported!\n");
2593 irq
= smmu
->gerr_irq
;
2595 ret
= devm_request_irq(smmu
->dev
, irq
, arm_smmu_gerror_handler
,
2596 0, "arm-smmu-v3-gerror", smmu
);
2598 dev_warn(smmu
->dev
, "failed to enable gerror irq\n");
2600 dev_warn(smmu
->dev
, "no gerr irq - errors will not be reported!\n");
2603 if (smmu
->features
& ARM_SMMU_FEAT_PRI
) {
2604 irq
= smmu
->priq
.q
.irq
;
2606 ret
= devm_request_threaded_irq(smmu
->dev
, irq
, NULL
,
2607 arm_smmu_priq_thread
,
2613 "failed to enable priq irq\n");
2615 dev_warn(smmu
->dev
, "no priq irq - PRI will be broken\n");
2620 static int arm_smmu_setup_irqs(struct arm_smmu_device
*smmu
)
2623 u32 irqen_flags
= IRQ_CTRL_EVTQ_IRQEN
| IRQ_CTRL_GERROR_IRQEN
;
2625 /* Disable IRQs first */
2626 ret
= arm_smmu_write_reg_sync(smmu
, 0, ARM_SMMU_IRQ_CTRL
,
2627 ARM_SMMU_IRQ_CTRLACK
);
2629 dev_err(smmu
->dev
, "failed to disable irqs\n");
2633 irq
= smmu
->combined_irq
;
2636 * Cavium ThunderX2 implementation doesn't support unique irq
2637 * lines. Use a single irq line for all the SMMUv3 interrupts.
2639 ret
= devm_request_threaded_irq(smmu
->dev
, irq
,
2640 arm_smmu_combined_irq_handler
,
2641 arm_smmu_combined_irq_thread
,
2643 "arm-smmu-v3-combined-irq", smmu
);
2645 dev_warn(smmu
->dev
, "failed to enable combined irq\n");
2647 arm_smmu_setup_unique_irqs(smmu
);
2649 if (smmu
->features
& ARM_SMMU_FEAT_PRI
)
2650 irqen_flags
|= IRQ_CTRL_PRIQ_IRQEN
;
2652 /* Enable interrupt generation on the SMMU */
2653 ret
= arm_smmu_write_reg_sync(smmu
, irqen_flags
,
2654 ARM_SMMU_IRQ_CTRL
, ARM_SMMU_IRQ_CTRLACK
);
2656 dev_warn(smmu
->dev
, "failed to enable irqs\n");
2661 static int arm_smmu_device_disable(struct arm_smmu_device
*smmu
)
2665 ret
= arm_smmu_write_reg_sync(smmu
, 0, ARM_SMMU_CR0
, ARM_SMMU_CR0ACK
);
2667 dev_err(smmu
->dev
, "failed to clear cr0\n");
2672 static int arm_smmu_device_reset(struct arm_smmu_device
*smmu
, bool bypass
)
2676 struct arm_smmu_cmdq_ent cmd
;
2678 /* Clear CR0 and sync (disables SMMU and queue processing) */
2679 reg
= readl_relaxed(smmu
->base
+ ARM_SMMU_CR0
);
2680 if (reg
& CR0_SMMUEN
) {
2681 dev_warn(smmu
->dev
, "SMMU currently enabled! Resetting...\n");
2682 WARN_ON(is_kdump_kernel() && !disable_bypass
);
2683 arm_smmu_update_gbpa(smmu
, GBPA_ABORT
, 0);
2686 ret
= arm_smmu_device_disable(smmu
);
2690 /* CR1 (table and queue memory attributes) */
2691 reg
= FIELD_PREP(CR1_TABLE_SH
, ARM_SMMU_SH_ISH
) |
2692 FIELD_PREP(CR1_TABLE_OC
, CR1_CACHE_WB
) |
2693 FIELD_PREP(CR1_TABLE_IC
, CR1_CACHE_WB
) |
2694 FIELD_PREP(CR1_QUEUE_SH
, ARM_SMMU_SH_ISH
) |
2695 FIELD_PREP(CR1_QUEUE_OC
, CR1_CACHE_WB
) |
2696 FIELD_PREP(CR1_QUEUE_IC
, CR1_CACHE_WB
);
2697 writel_relaxed(reg
, smmu
->base
+ ARM_SMMU_CR1
);
2699 /* CR2 (random crap) */
2700 reg
= CR2_PTM
| CR2_RECINVSID
| CR2_E2H
;
2701 writel_relaxed(reg
, smmu
->base
+ ARM_SMMU_CR2
);
2704 writeq_relaxed(smmu
->strtab_cfg
.strtab_base
,
2705 smmu
->base
+ ARM_SMMU_STRTAB_BASE
);
2706 writel_relaxed(smmu
->strtab_cfg
.strtab_base_cfg
,
2707 smmu
->base
+ ARM_SMMU_STRTAB_BASE_CFG
);
2710 writeq_relaxed(smmu
->cmdq
.q
.q_base
, smmu
->base
+ ARM_SMMU_CMDQ_BASE
);
2711 writel_relaxed(smmu
->cmdq
.q
.prod
, smmu
->base
+ ARM_SMMU_CMDQ_PROD
);
2712 writel_relaxed(smmu
->cmdq
.q
.cons
, smmu
->base
+ ARM_SMMU_CMDQ_CONS
);
2714 enables
= CR0_CMDQEN
;
2715 ret
= arm_smmu_write_reg_sync(smmu
, enables
, ARM_SMMU_CR0
,
2718 dev_err(smmu
->dev
, "failed to enable command queue\n");
2722 /* Invalidate any cached configuration */
2723 cmd
.opcode
= CMDQ_OP_CFGI_ALL
;
2724 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
2725 arm_smmu_cmdq_issue_sync(smmu
);
2727 /* Invalidate any stale TLB entries */
2728 if (smmu
->features
& ARM_SMMU_FEAT_HYP
) {
2729 cmd
.opcode
= CMDQ_OP_TLBI_EL2_ALL
;
2730 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
2733 cmd
.opcode
= CMDQ_OP_TLBI_NSNH_ALL
;
2734 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
2735 arm_smmu_cmdq_issue_sync(smmu
);
2738 writeq_relaxed(smmu
->evtq
.q
.q_base
, smmu
->base
+ ARM_SMMU_EVTQ_BASE
);
2739 writel_relaxed(smmu
->evtq
.q
.prod
,
2740 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD
, smmu
));
2741 writel_relaxed(smmu
->evtq
.q
.cons
,
2742 arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS
, smmu
));
2744 enables
|= CR0_EVTQEN
;
2745 ret
= arm_smmu_write_reg_sync(smmu
, enables
, ARM_SMMU_CR0
,
2748 dev_err(smmu
->dev
, "failed to enable event queue\n");
2753 if (smmu
->features
& ARM_SMMU_FEAT_PRI
) {
2754 writeq_relaxed(smmu
->priq
.q
.q_base
,
2755 smmu
->base
+ ARM_SMMU_PRIQ_BASE
);
2756 writel_relaxed(smmu
->priq
.q
.prod
,
2757 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD
, smmu
));
2758 writel_relaxed(smmu
->priq
.q
.cons
,
2759 arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS
, smmu
));
2761 enables
|= CR0_PRIQEN
;
2762 ret
= arm_smmu_write_reg_sync(smmu
, enables
, ARM_SMMU_CR0
,
2765 dev_err(smmu
->dev
, "failed to enable PRI queue\n");
2770 if (smmu
->features
& ARM_SMMU_FEAT_ATS
) {
2771 enables
|= CR0_ATSCHK
;
2772 ret
= arm_smmu_write_reg_sync(smmu
, enables
, ARM_SMMU_CR0
,
2775 dev_err(smmu
->dev
, "failed to enable ATS check\n");
2780 ret
= arm_smmu_setup_irqs(smmu
);
2782 dev_err(smmu
->dev
, "failed to setup irqs\n");
2786 if (is_kdump_kernel())
2787 enables
&= ~(CR0_EVTQEN
| CR0_PRIQEN
);
2789 /* Enable the SMMU interface, or ensure bypass */
2790 if (!bypass
|| disable_bypass
) {
2791 enables
|= CR0_SMMUEN
;
2793 ret
= arm_smmu_update_gbpa(smmu
, 0, GBPA_ABORT
);
2797 ret
= arm_smmu_write_reg_sync(smmu
, enables
, ARM_SMMU_CR0
,
2800 dev_err(smmu
->dev
, "failed to enable SMMU interface\n");
2807 static int arm_smmu_device_hw_probe(struct arm_smmu_device
*smmu
)
2810 bool coherent
= smmu
->features
& ARM_SMMU_FEAT_COHERENCY
;
2813 reg
= readl_relaxed(smmu
->base
+ ARM_SMMU_IDR0
);
2815 /* 2-level structures */
2816 if (FIELD_GET(IDR0_ST_LVL
, reg
) == IDR0_ST_LVL_2LVL
)
2817 smmu
->features
|= ARM_SMMU_FEAT_2_LVL_STRTAB
;
2819 if (reg
& IDR0_CD2L
)
2820 smmu
->features
|= ARM_SMMU_FEAT_2_LVL_CDTAB
;
2823 * Translation table endianness.
2824 * We currently require the same endianness as the CPU, but this
2825 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2827 switch (FIELD_GET(IDR0_TTENDIAN
, reg
)) {
2828 case IDR0_TTENDIAN_MIXED
:
2829 smmu
->features
|= ARM_SMMU_FEAT_TT_LE
| ARM_SMMU_FEAT_TT_BE
;
2832 case IDR0_TTENDIAN_BE
:
2833 smmu
->features
|= ARM_SMMU_FEAT_TT_BE
;
2836 case IDR0_TTENDIAN_LE
:
2837 smmu
->features
|= ARM_SMMU_FEAT_TT_LE
;
2841 dev_err(smmu
->dev
, "unknown/unsupported TT endianness!\n");
2845 /* Boolean feature flags */
2846 if (IS_ENABLED(CONFIG_PCI_PRI
) && reg
& IDR0_PRI
)
2847 smmu
->features
|= ARM_SMMU_FEAT_PRI
;
2849 if (IS_ENABLED(CONFIG_PCI_ATS
) && reg
& IDR0_ATS
)
2850 smmu
->features
|= ARM_SMMU_FEAT_ATS
;
2853 smmu
->features
|= ARM_SMMU_FEAT_SEV
;
2856 smmu
->features
|= ARM_SMMU_FEAT_MSI
;
2859 smmu
->features
|= ARM_SMMU_FEAT_HYP
;
2862 * The coherency feature as set by FW is used in preference to the ID
2863 * register, but warn on mismatch.
2865 if (!!(reg
& IDR0_COHACC
) != coherent
)
2866 dev_warn(smmu
->dev
, "IDR0.COHACC overridden by FW configuration (%s)\n",
2867 coherent
? "true" : "false");
2869 switch (FIELD_GET(IDR0_STALL_MODEL
, reg
)) {
2870 case IDR0_STALL_MODEL_FORCE
:
2871 smmu
->features
|= ARM_SMMU_FEAT_STALL_FORCE
;
2873 case IDR0_STALL_MODEL_STALL
:
2874 smmu
->features
|= ARM_SMMU_FEAT_STALLS
;
2878 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S1
;
2881 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S2
;
2883 if (!(reg
& (IDR0_S1P
| IDR0_S2P
))) {
2884 dev_err(smmu
->dev
, "no translation support!\n");
2888 /* We only support the AArch64 table format at present */
2889 switch (FIELD_GET(IDR0_TTF
, reg
)) {
2890 case IDR0_TTF_AARCH32_64
:
2893 case IDR0_TTF_AARCH64
:
2896 dev_err(smmu
->dev
, "AArch64 table format not supported!\n");
2900 /* ASID/VMID sizes */
2901 smmu
->asid_bits
= reg
& IDR0_ASID16
? 16 : 8;
2902 smmu
->vmid_bits
= reg
& IDR0_VMID16
? 16 : 8;
2905 reg
= readl_relaxed(smmu
->base
+ ARM_SMMU_IDR1
);
2906 if (reg
& (IDR1_TABLES_PRESET
| IDR1_QUEUES_PRESET
| IDR1_REL
)) {
2907 dev_err(smmu
->dev
, "embedded implementation not supported\n");
2911 /* Queue sizes, capped to ensure natural alignment */
2912 smmu
->cmdq
.q
.max_n_shift
= min_t(u32
, CMDQ_MAX_SZ_SHIFT
,
2913 FIELD_GET(IDR1_CMDQS
, reg
));
2914 if (!smmu
->cmdq
.q
.max_n_shift
) {
2915 /* Odd alignment restrictions on the base, so ignore for now */
2916 dev_err(smmu
->dev
, "unit-length command queue not supported\n");
2920 smmu
->evtq
.q
.max_n_shift
= min_t(u32
, EVTQ_MAX_SZ_SHIFT
,
2921 FIELD_GET(IDR1_EVTQS
, reg
));
2922 smmu
->priq
.q
.max_n_shift
= min_t(u32
, PRIQ_MAX_SZ_SHIFT
,
2923 FIELD_GET(IDR1_PRIQS
, reg
));
2925 /* SID/SSID sizes */
2926 smmu
->ssid_bits
= FIELD_GET(IDR1_SSIDSIZE
, reg
);
2927 smmu
->sid_bits
= FIELD_GET(IDR1_SIDSIZE
, reg
);
2930 * If the SMMU supports fewer bits than would fill a single L2 stream
2931 * table, use a linear table instead.
2933 if (smmu
->sid_bits
<= STRTAB_SPLIT
)
2934 smmu
->features
&= ~ARM_SMMU_FEAT_2_LVL_STRTAB
;
2937 reg
= readl_relaxed(smmu
->base
+ ARM_SMMU_IDR5
);
2939 /* Maximum number of outstanding stalls */
2940 smmu
->evtq
.max_stalls
= FIELD_GET(IDR5_STALL_MAX
, reg
);
2943 if (reg
& IDR5_GRAN64K
)
2944 smmu
->pgsize_bitmap
|= SZ_64K
| SZ_512M
;
2945 if (reg
& IDR5_GRAN16K
)
2946 smmu
->pgsize_bitmap
|= SZ_16K
| SZ_32M
;
2947 if (reg
& IDR5_GRAN4K
)
2948 smmu
->pgsize_bitmap
|= SZ_4K
| SZ_2M
| SZ_1G
;
2950 /* Input address size */
2951 if (FIELD_GET(IDR5_VAX
, reg
) == IDR5_VAX_52_BIT
)
2952 smmu
->features
|= ARM_SMMU_FEAT_VAX
;
2954 /* Output address size */
2955 switch (FIELD_GET(IDR5_OAS
, reg
)) {
2956 case IDR5_OAS_32_BIT
:
2959 case IDR5_OAS_36_BIT
:
2962 case IDR5_OAS_40_BIT
:
2965 case IDR5_OAS_42_BIT
:
2968 case IDR5_OAS_44_BIT
:
2971 case IDR5_OAS_52_BIT
:
2973 smmu
->pgsize_bitmap
|= 1ULL << 42; /* 4TB */
2977 "unknown output address size. Truncating to 48-bit\n");
2979 case IDR5_OAS_48_BIT
:
2983 if (arm_smmu_ops
.pgsize_bitmap
== -1UL)
2984 arm_smmu_ops
.pgsize_bitmap
= smmu
->pgsize_bitmap
;
2986 arm_smmu_ops
.pgsize_bitmap
|= smmu
->pgsize_bitmap
;
2988 /* Set the DMA mask for our table walker */
2989 if (dma_set_mask_and_coherent(smmu
->dev
, DMA_BIT_MASK(smmu
->oas
)))
2991 "failed to set DMA mask for table walker\n");
2993 smmu
->ias
= max(smmu
->ias
, smmu
->oas
);
2995 dev_info(smmu
->dev
, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2996 smmu
->ias
, smmu
->oas
, smmu
->features
);
3001 static void acpi_smmu_get_options(u32 model
, struct arm_smmu_device
*smmu
)
3004 case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
:
3005 smmu
->options
|= ARM_SMMU_OPT_PAGE0_REGS_ONLY
;
3007 case ACPI_IORT_SMMU_V3_HISILICON_HI161X
:
3008 smmu
->options
|= ARM_SMMU_OPT_SKIP_PREFETCH
;
3012 dev_notice(smmu
->dev
, "option mask 0x%x\n", smmu
->options
);
3015 static int arm_smmu_device_acpi_probe(struct platform_device
*pdev
,
3016 struct arm_smmu_device
*smmu
)
3018 struct acpi_iort_smmu_v3
*iort_smmu
;
3019 struct device
*dev
= smmu
->dev
;
3020 struct acpi_iort_node
*node
;
3022 node
= *(struct acpi_iort_node
**)dev_get_platdata(dev
);
3024 /* Retrieve SMMUv3 specific data */
3025 iort_smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
3027 acpi_smmu_get_options(iort_smmu
->model
, smmu
);
3029 if (iort_smmu
->flags
& ACPI_IORT_SMMU_V3_COHACC_OVERRIDE
)
3030 smmu
->features
|= ARM_SMMU_FEAT_COHERENCY
;
3035 static inline int arm_smmu_device_acpi_probe(struct platform_device
*pdev
,
3036 struct arm_smmu_device
*smmu
)
3042 static int arm_smmu_device_dt_probe(struct platform_device
*pdev
,
3043 struct arm_smmu_device
*smmu
)
3045 struct device
*dev
= &pdev
->dev
;
3049 if (of_property_read_u32(dev
->of_node
, "#iommu-cells", &cells
))
3050 dev_err(dev
, "missing #iommu-cells property\n");
3051 else if (cells
!= 1)
3052 dev_err(dev
, "invalid #iommu-cells value (%d)\n", cells
);
3056 parse_driver_options(smmu
);
3058 if (of_dma_is_coherent(dev
->of_node
))
3059 smmu
->features
|= ARM_SMMU_FEAT_COHERENCY
;
3064 static unsigned long arm_smmu_resource_size(struct arm_smmu_device
*smmu
)
3066 if (smmu
->options
& ARM_SMMU_OPT_PAGE0_REGS_ONLY
)
3072 static int arm_smmu_device_probe(struct platform_device
*pdev
)
3075 struct resource
*res
;
3076 resource_size_t ioaddr
;
3077 struct arm_smmu_device
*smmu
;
3078 struct device
*dev
= &pdev
->dev
;
3081 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
3083 dev_err(dev
, "failed to allocate arm_smmu_device\n");
3089 ret
= arm_smmu_device_dt_probe(pdev
, smmu
);
3091 ret
= arm_smmu_device_acpi_probe(pdev
, smmu
);
3096 /* Set bypass mode according to firmware probing result */
3100 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3101 if (resource_size(res
) + 1 < arm_smmu_resource_size(smmu
)) {
3102 dev_err(dev
, "MMIO region too small (%pr)\n", res
);
3105 ioaddr
= res
->start
;
3107 smmu
->base
= devm_ioremap_resource(dev
, res
);
3108 if (IS_ERR(smmu
->base
))
3109 return PTR_ERR(smmu
->base
);
3111 /* Interrupt lines */
3113 irq
= platform_get_irq_byname(pdev
, "combined");
3115 smmu
->combined_irq
= irq
;
3117 irq
= platform_get_irq_byname(pdev
, "eventq");
3119 smmu
->evtq
.q
.irq
= irq
;
3121 irq
= platform_get_irq_byname(pdev
, "priq");
3123 smmu
->priq
.q
.irq
= irq
;
3125 irq
= platform_get_irq_byname(pdev
, "gerror");
3127 smmu
->gerr_irq
= irq
;
3130 ret
= arm_smmu_device_hw_probe(smmu
);
3134 /* Initialise in-memory data structures */
3135 ret
= arm_smmu_init_structures(smmu
);
3139 /* Record our private device structure */
3140 platform_set_drvdata(pdev
, smmu
);
3142 /* Reset the device */
3143 ret
= arm_smmu_device_reset(smmu
, bypass
);
3147 /* And we're up. Go go go! */
3148 ret
= iommu_device_sysfs_add(&smmu
->iommu
, dev
, NULL
,
3149 "smmu3.%pa", &ioaddr
);
3153 iommu_device_set_ops(&smmu
->iommu
, &arm_smmu_ops
);
3154 iommu_device_set_fwnode(&smmu
->iommu
, dev
->fwnode
);
3156 ret
= iommu_device_register(&smmu
->iommu
);
3158 dev_err(dev
, "Failed to register iommu\n");
3163 if (pci_bus_type
.iommu_ops
!= &arm_smmu_ops
) {
3165 ret
= bus_set_iommu(&pci_bus_type
, &arm_smmu_ops
);
3170 #ifdef CONFIG_ARM_AMBA
3171 if (amba_bustype
.iommu_ops
!= &arm_smmu_ops
) {
3172 ret
= bus_set_iommu(&amba_bustype
, &arm_smmu_ops
);
3177 if (platform_bus_type
.iommu_ops
!= &arm_smmu_ops
) {
3178 ret
= bus_set_iommu(&platform_bus_type
, &arm_smmu_ops
);
3185 static void arm_smmu_device_shutdown(struct platform_device
*pdev
)
3187 struct arm_smmu_device
*smmu
= platform_get_drvdata(pdev
);
3189 arm_smmu_device_disable(smmu
);
3192 static const struct of_device_id arm_smmu_of_match
[] = {
3193 { .compatible
= "arm,smmu-v3", },
3197 static struct platform_driver arm_smmu_driver
= {
3199 .name
= "arm-smmu-v3",
3200 .of_match_table
= of_match_ptr(arm_smmu_of_match
),
3201 .suppress_bind_attrs
= true,
3203 .probe
= arm_smmu_device_probe
,
3204 .shutdown
= arm_smmu_device_shutdown
,
3206 builtin_platform_driver(arm_smmu_driver
);