tcp: fix lockdep splat in tcp_snd_una_update()
[linux/fpc-iii.git] / drivers / iommu / arm-smmu.c
blob7c39ac4b9c537df09128a0c106d3bfbd009f7e7f
1 /*
2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
29 #define pr_fmt(fmt) "arm-smmu: " fmt
31 #include <linux/delay.h>
32 #include <linux/dma-iommu.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/err.h>
35 #include <linux/interrupt.h>
36 #include <linux/io.h>
37 #include <linux/iommu.h>
38 #include <linux/iopoll.h>
39 #include <linux/module.h>
40 #include <linux/of.h>
41 #include <linux/of_address.h>
42 #include <linux/pci.h>
43 #include <linux/platform_device.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
47 #include <linux/amba/bus.h>
49 #include "io-pgtable.h"
51 /* Maximum number of stream IDs assigned to a single device */
52 #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
54 /* Maximum number of context banks per SMMU */
55 #define ARM_SMMU_MAX_CBS 128
57 /* Maximum number of mapping groups per SMMU */
58 #define ARM_SMMU_MAX_SMRS 128
60 /* SMMU global address space */
61 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
62 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67 * nsGFSYNR0: 0x450)
69 #define ARM_SMMU_GR0_NS(smmu) \
70 ((smmu)->base + \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0))
74 #ifdef CONFIG_64BIT
75 #define smmu_writeq writeq_relaxed
76 #else
77 #define smmu_writeq(reg64, addr) \
78 do { \
79 u64 __val = (reg64); \
80 void __iomem *__addr = (addr); \
81 writel_relaxed(__val >> 32, __addr + 4); \
82 writel_relaxed(__val, __addr); \
83 } while (0)
84 #endif
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0 0x0
88 #define sCR0_CLIENTPD (1 << 0)
89 #define sCR0_GFRE (1 << 1)
90 #define sCR0_GFIE (1 << 2)
91 #define sCR0_GCFGFRE (1 << 4)
92 #define sCR0_GCFGFIE (1 << 5)
93 #define sCR0_USFCFG (1 << 10)
94 #define sCR0_VMIDPNE (1 << 11)
95 #define sCR0_PTM (1 << 12)
96 #define sCR0_FB (1 << 13)
97 #define sCR0_BSU_SHIFT 14
98 #define sCR0_BSU_MASK 0x3
100 /* Identification registers */
101 #define ARM_SMMU_GR0_ID0 0x20
102 #define ARM_SMMU_GR0_ID1 0x24
103 #define ARM_SMMU_GR0_ID2 0x28
104 #define ARM_SMMU_GR0_ID3 0x2c
105 #define ARM_SMMU_GR0_ID4 0x30
106 #define ARM_SMMU_GR0_ID5 0x34
107 #define ARM_SMMU_GR0_ID6 0x38
108 #define ARM_SMMU_GR0_ID7 0x3c
109 #define ARM_SMMU_GR0_sGFSR 0x48
110 #define ARM_SMMU_GR0_sGFSYNR0 0x50
111 #define ARM_SMMU_GR0_sGFSYNR1 0x54
112 #define ARM_SMMU_GR0_sGFSYNR2 0x58
114 #define ID0_S1TS (1 << 30)
115 #define ID0_S2TS (1 << 29)
116 #define ID0_NTS (1 << 28)
117 #define ID0_SMS (1 << 27)
118 #define ID0_ATOSNS (1 << 26)
119 #define ID0_CTTW (1 << 14)
120 #define ID0_NUMIRPT_SHIFT 16
121 #define ID0_NUMIRPT_MASK 0xff
122 #define ID0_NUMSIDB_SHIFT 9
123 #define ID0_NUMSIDB_MASK 0xf
124 #define ID0_NUMSMRG_SHIFT 0
125 #define ID0_NUMSMRG_MASK 0xff
127 #define ID1_PAGESIZE (1 << 31)
128 #define ID1_NUMPAGENDXB_SHIFT 28
129 #define ID1_NUMPAGENDXB_MASK 7
130 #define ID1_NUMS2CB_SHIFT 16
131 #define ID1_NUMS2CB_MASK 0xff
132 #define ID1_NUMCB_SHIFT 0
133 #define ID1_NUMCB_MASK 0xff
135 #define ID2_OAS_SHIFT 4
136 #define ID2_OAS_MASK 0xf
137 #define ID2_IAS_SHIFT 0
138 #define ID2_IAS_MASK 0xf
139 #define ID2_UBS_SHIFT 8
140 #define ID2_UBS_MASK 0xf
141 #define ID2_PTFS_4K (1 << 12)
142 #define ID2_PTFS_16K (1 << 13)
143 #define ID2_PTFS_64K (1 << 14)
145 /* Global TLB invalidation */
146 #define ARM_SMMU_GR0_TLBIVMID 0x64
147 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
148 #define ARM_SMMU_GR0_TLBIALLH 0x6c
149 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
150 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
151 #define sTLBGSTATUS_GSACTIVE (1 << 0)
152 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
154 /* Stream mapping registers */
155 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
156 #define SMR_VALID (1 << 31)
157 #define SMR_MASK_SHIFT 16
158 #define SMR_MASK_MASK 0x7fff
159 #define SMR_ID_SHIFT 0
160 #define SMR_ID_MASK 0x7fff
162 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
163 #define S2CR_CBNDX_SHIFT 0
164 #define S2CR_CBNDX_MASK 0xff
165 #define S2CR_TYPE_SHIFT 16
166 #define S2CR_TYPE_MASK 0x3
167 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
168 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
169 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
171 #define S2CR_PRIVCFG_SHIFT 24
172 #define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
174 /* Context bank attribute registers */
175 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
176 #define CBAR_VMID_SHIFT 0
177 #define CBAR_VMID_MASK 0xff
178 #define CBAR_S1_BPSHCFG_SHIFT 8
179 #define CBAR_S1_BPSHCFG_MASK 3
180 #define CBAR_S1_BPSHCFG_NSH 3
181 #define CBAR_S1_MEMATTR_SHIFT 12
182 #define CBAR_S1_MEMATTR_MASK 0xf
183 #define CBAR_S1_MEMATTR_WB 0xf
184 #define CBAR_TYPE_SHIFT 16
185 #define CBAR_TYPE_MASK 0x3
186 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
187 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
188 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
189 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
190 #define CBAR_IRPTNDX_SHIFT 24
191 #define CBAR_IRPTNDX_MASK 0xff
193 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
194 #define CBA2R_RW64_32BIT (0 << 0)
195 #define CBA2R_RW64_64BIT (1 << 0)
197 /* Translation context bank */
198 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
199 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
201 #define ARM_SMMU_CB_SCTLR 0x0
202 #define ARM_SMMU_CB_RESUME 0x8
203 #define ARM_SMMU_CB_TTBCR2 0x10
204 #define ARM_SMMU_CB_TTBR0 0x20
205 #define ARM_SMMU_CB_TTBR1 0x28
206 #define ARM_SMMU_CB_TTBCR 0x30
207 #define ARM_SMMU_CB_S1_MAIR0 0x38
208 #define ARM_SMMU_CB_S1_MAIR1 0x3c
209 #define ARM_SMMU_CB_PAR_LO 0x50
210 #define ARM_SMMU_CB_PAR_HI 0x54
211 #define ARM_SMMU_CB_FSR 0x58
212 #define ARM_SMMU_CB_FAR_LO 0x60
213 #define ARM_SMMU_CB_FAR_HI 0x64
214 #define ARM_SMMU_CB_FSYNR0 0x68
215 #define ARM_SMMU_CB_S1_TLBIVA 0x600
216 #define ARM_SMMU_CB_S1_TLBIASID 0x610
217 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
218 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
219 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
220 #define ARM_SMMU_CB_ATS1PR 0x800
221 #define ARM_SMMU_CB_ATSR 0x8f0
223 #define SCTLR_S1_ASIDPNE (1 << 12)
224 #define SCTLR_CFCFG (1 << 7)
225 #define SCTLR_CFIE (1 << 6)
226 #define SCTLR_CFRE (1 << 5)
227 #define SCTLR_E (1 << 4)
228 #define SCTLR_AFE (1 << 2)
229 #define SCTLR_TRE (1 << 1)
230 #define SCTLR_M (1 << 0)
231 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
233 #define CB_PAR_F (1 << 0)
235 #define ATSR_ACTIVE (1 << 0)
237 #define RESUME_RETRY (0 << 0)
238 #define RESUME_TERMINATE (1 << 0)
240 #define TTBCR2_SEP_SHIFT 15
241 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
243 #define TTBRn_ASID_SHIFT 48
245 #define FSR_MULTI (1 << 31)
246 #define FSR_SS (1 << 30)
247 #define FSR_UUT (1 << 8)
248 #define FSR_ASF (1 << 7)
249 #define FSR_TLBLKF (1 << 6)
250 #define FSR_TLBMCF (1 << 5)
251 #define FSR_EF (1 << 4)
252 #define FSR_PF (1 << 3)
253 #define FSR_AFF (1 << 2)
254 #define FSR_TF (1 << 1)
256 #define FSR_IGN (FSR_AFF | FSR_ASF | \
257 FSR_TLBMCF | FSR_TLBLKF)
258 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
259 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
261 #define FSYNR0_WNR (1 << 4)
263 static int force_stage;
264 module_param(force_stage, int, S_IRUGO);
265 MODULE_PARM_DESC(force_stage,
266 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
267 static bool disable_bypass;
268 module_param(disable_bypass, bool, S_IRUGO);
269 MODULE_PARM_DESC(disable_bypass,
270 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
272 enum arm_smmu_arch_version {
273 ARM_SMMU_V1 = 1,
274 ARM_SMMU_V2,
277 struct arm_smmu_smr {
278 u8 idx;
279 u16 mask;
280 u16 id;
283 struct arm_smmu_master_cfg {
284 int num_streamids;
285 u16 streamids[MAX_MASTER_STREAMIDS];
286 struct arm_smmu_smr *smrs;
289 struct arm_smmu_master {
290 struct device_node *of_node;
291 struct rb_node node;
292 struct arm_smmu_master_cfg cfg;
295 struct arm_smmu_device {
296 struct device *dev;
298 void __iomem *base;
299 unsigned long size;
300 unsigned long pgshift;
302 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
303 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
304 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
305 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
306 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
307 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
308 u32 features;
310 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
311 u32 options;
312 enum arm_smmu_arch_version version;
314 u32 num_context_banks;
315 u32 num_s2_context_banks;
316 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
317 atomic_t irptndx;
319 u32 num_mapping_groups;
320 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
322 unsigned long va_size;
323 unsigned long ipa_size;
324 unsigned long pa_size;
326 u32 num_global_irqs;
327 u32 num_context_irqs;
328 unsigned int *irqs;
330 struct list_head list;
331 struct rb_root masters;
334 struct arm_smmu_cfg {
335 u8 cbndx;
336 u8 irptndx;
337 u32 cbar;
339 #define INVALID_IRPTNDX 0xff
341 #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
342 #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
344 enum arm_smmu_domain_stage {
345 ARM_SMMU_DOMAIN_S1 = 0,
346 ARM_SMMU_DOMAIN_S2,
347 ARM_SMMU_DOMAIN_NESTED,
350 struct arm_smmu_domain {
351 struct arm_smmu_device *smmu;
352 struct io_pgtable_ops *pgtbl_ops;
353 spinlock_t pgtbl_lock;
354 struct arm_smmu_cfg cfg;
355 enum arm_smmu_domain_stage stage;
356 struct mutex init_mutex; /* Protects smmu pointer */
357 struct iommu_domain domain;
360 static struct iommu_ops arm_smmu_ops;
362 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
363 static LIST_HEAD(arm_smmu_devices);
365 struct arm_smmu_option_prop {
366 u32 opt;
367 const char *prop;
370 static struct arm_smmu_option_prop arm_smmu_options[] = {
371 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
372 { 0, NULL},
375 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
377 return container_of(dom, struct arm_smmu_domain, domain);
380 static void parse_driver_options(struct arm_smmu_device *smmu)
382 int i = 0;
384 do {
385 if (of_property_read_bool(smmu->dev->of_node,
386 arm_smmu_options[i].prop)) {
387 smmu->options |= arm_smmu_options[i].opt;
388 dev_notice(smmu->dev, "option %s\n",
389 arm_smmu_options[i].prop);
391 } while (arm_smmu_options[++i].opt);
394 static struct device_node *dev_get_dev_node(struct device *dev)
396 if (dev_is_pci(dev)) {
397 struct pci_bus *bus = to_pci_dev(dev)->bus;
399 while (!pci_is_root_bus(bus))
400 bus = bus->parent;
401 return bus->bridge->parent->of_node;
404 return dev->of_node;
407 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
408 struct device_node *dev_node)
410 struct rb_node *node = smmu->masters.rb_node;
412 while (node) {
413 struct arm_smmu_master *master;
415 master = container_of(node, struct arm_smmu_master, node);
417 if (dev_node < master->of_node)
418 node = node->rb_left;
419 else if (dev_node > master->of_node)
420 node = node->rb_right;
421 else
422 return master;
425 return NULL;
428 static struct arm_smmu_master_cfg *
429 find_smmu_master_cfg(struct device *dev)
431 struct arm_smmu_master_cfg *cfg = NULL;
432 struct iommu_group *group = iommu_group_get(dev);
434 if (group) {
435 cfg = iommu_group_get_iommudata(group);
436 iommu_group_put(group);
439 return cfg;
442 static int insert_smmu_master(struct arm_smmu_device *smmu,
443 struct arm_smmu_master *master)
445 struct rb_node **new, *parent;
447 new = &smmu->masters.rb_node;
448 parent = NULL;
449 while (*new) {
450 struct arm_smmu_master *this
451 = container_of(*new, struct arm_smmu_master, node);
453 parent = *new;
454 if (master->of_node < this->of_node)
455 new = &((*new)->rb_left);
456 else if (master->of_node > this->of_node)
457 new = &((*new)->rb_right);
458 else
459 return -EEXIST;
462 rb_link_node(&master->node, parent, new);
463 rb_insert_color(&master->node, &smmu->masters);
464 return 0;
467 static int register_smmu_master(struct arm_smmu_device *smmu,
468 struct device *dev,
469 struct of_phandle_args *masterspec)
471 int i;
472 struct arm_smmu_master *master;
474 master = find_smmu_master(smmu, masterspec->np);
475 if (master) {
476 dev_err(dev,
477 "rejecting multiple registrations for master device %s\n",
478 masterspec->np->name);
479 return -EBUSY;
482 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
483 dev_err(dev,
484 "reached maximum number (%d) of stream IDs for master device %s\n",
485 MAX_MASTER_STREAMIDS, masterspec->np->name);
486 return -ENOSPC;
489 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
490 if (!master)
491 return -ENOMEM;
493 master->of_node = masterspec->np;
494 master->cfg.num_streamids = masterspec->args_count;
496 for (i = 0; i < master->cfg.num_streamids; ++i) {
497 u16 streamid = masterspec->args[i];
499 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
500 (streamid >= smmu->num_mapping_groups)) {
501 dev_err(dev,
502 "stream ID for master device %s greater than maximum allowed (%d)\n",
503 masterspec->np->name, smmu->num_mapping_groups);
504 return -ERANGE;
506 master->cfg.streamids[i] = streamid;
508 return insert_smmu_master(smmu, master);
511 static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
513 struct arm_smmu_device *smmu;
514 struct arm_smmu_master *master = NULL;
515 struct device_node *dev_node = dev_get_dev_node(dev);
517 spin_lock(&arm_smmu_devices_lock);
518 list_for_each_entry(smmu, &arm_smmu_devices, list) {
519 master = find_smmu_master(smmu, dev_node);
520 if (master)
521 break;
523 spin_unlock(&arm_smmu_devices_lock);
525 return master ? smmu : NULL;
528 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
530 int idx;
532 do {
533 idx = find_next_zero_bit(map, end, start);
534 if (idx == end)
535 return -ENOSPC;
536 } while (test_and_set_bit(idx, map));
538 return idx;
541 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
543 clear_bit(idx, map);
546 /* Wait for any pending TLB invalidations to complete */
547 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
549 int count = 0;
550 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
552 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
553 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
554 & sTLBGSTATUS_GSACTIVE) {
555 cpu_relax();
556 if (++count == TLB_LOOP_TIMEOUT) {
557 dev_err_ratelimited(smmu->dev,
558 "TLB sync timed out -- SMMU may be deadlocked\n");
559 return;
561 udelay(1);
565 static void arm_smmu_tlb_sync(void *cookie)
567 struct arm_smmu_domain *smmu_domain = cookie;
568 __arm_smmu_tlb_sync(smmu_domain->smmu);
571 static void arm_smmu_tlb_inv_context(void *cookie)
573 struct arm_smmu_domain *smmu_domain = cookie;
574 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
575 struct arm_smmu_device *smmu = smmu_domain->smmu;
576 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
577 void __iomem *base;
579 if (stage1) {
580 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
581 writel_relaxed(ARM_SMMU_CB_ASID(cfg),
582 base + ARM_SMMU_CB_S1_TLBIASID);
583 } else {
584 base = ARM_SMMU_GR0(smmu);
585 writel_relaxed(ARM_SMMU_CB_VMID(cfg),
586 base + ARM_SMMU_GR0_TLBIVMID);
589 __arm_smmu_tlb_sync(smmu);
592 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
593 size_t granule, bool leaf, void *cookie)
595 struct arm_smmu_domain *smmu_domain = cookie;
596 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
597 struct arm_smmu_device *smmu = smmu_domain->smmu;
598 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
599 void __iomem *reg;
601 if (stage1) {
602 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
603 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
605 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
606 iova &= ~12UL;
607 iova |= ARM_SMMU_CB_ASID(cfg);
608 do {
609 writel_relaxed(iova, reg);
610 iova += granule;
611 } while (size -= granule);
612 #ifdef CONFIG_64BIT
613 } else {
614 iova >>= 12;
615 iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
616 do {
617 writeq_relaxed(iova, reg);
618 iova += granule >> 12;
619 } while (size -= granule);
620 #endif
622 #ifdef CONFIG_64BIT
623 } else if (smmu->version == ARM_SMMU_V2) {
624 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
625 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
626 ARM_SMMU_CB_S2_TLBIIPAS2;
627 iova >>= 12;
628 do {
629 writeq_relaxed(iova, reg);
630 iova += granule >> 12;
631 } while (size -= granule);
632 #endif
633 } else {
634 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
635 writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
639 static struct iommu_gather_ops arm_smmu_gather_ops = {
640 .tlb_flush_all = arm_smmu_tlb_inv_context,
641 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
642 .tlb_sync = arm_smmu_tlb_sync,
645 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
647 int flags, ret;
648 u32 fsr, far, fsynr, resume;
649 unsigned long iova;
650 struct iommu_domain *domain = dev;
651 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
652 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
653 struct arm_smmu_device *smmu = smmu_domain->smmu;
654 void __iomem *cb_base;
656 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
657 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
659 if (!(fsr & FSR_FAULT))
660 return IRQ_NONE;
662 if (fsr & FSR_IGN)
663 dev_err_ratelimited(smmu->dev,
664 "Unexpected context fault (fsr 0x%x)\n",
665 fsr);
667 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
668 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
670 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
671 iova = far;
672 #ifdef CONFIG_64BIT
673 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
674 iova |= ((unsigned long)far << 32);
675 #endif
677 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
678 ret = IRQ_HANDLED;
679 resume = RESUME_RETRY;
680 } else {
681 dev_err_ratelimited(smmu->dev,
682 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
683 iova, fsynr, cfg->cbndx);
684 ret = IRQ_NONE;
685 resume = RESUME_TERMINATE;
688 /* Clear the faulting FSR */
689 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
691 /* Retry or terminate any stalled transactions */
692 if (fsr & FSR_SS)
693 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
695 return ret;
698 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
700 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
701 struct arm_smmu_device *smmu = dev;
702 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
704 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
705 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
706 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
707 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
709 if (!gfsr)
710 return IRQ_NONE;
712 dev_err_ratelimited(smmu->dev,
713 "Unexpected global fault, this could be serious\n");
714 dev_err_ratelimited(smmu->dev,
715 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
716 gfsr, gfsynr0, gfsynr1, gfsynr2);
718 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
719 return IRQ_HANDLED;
722 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
723 struct io_pgtable_cfg *pgtbl_cfg)
725 u32 reg;
726 u64 reg64;
727 bool stage1;
728 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
729 struct arm_smmu_device *smmu = smmu_domain->smmu;
730 void __iomem *cb_base, *gr1_base;
732 gr1_base = ARM_SMMU_GR1(smmu);
733 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
734 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
736 if (smmu->version > ARM_SMMU_V1) {
738 * CBA2R.
739 * *Must* be initialised before CBAR thanks to VMID16
740 * architectural oversight affected some implementations.
742 #ifdef CONFIG_64BIT
743 reg = CBA2R_RW64_64BIT;
744 #else
745 reg = CBA2R_RW64_32BIT;
746 #endif
747 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
750 /* CBAR */
751 reg = cfg->cbar;
752 if (smmu->version == ARM_SMMU_V1)
753 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
756 * Use the weakest shareability/memory types, so they are
757 * overridden by the ttbcr/pte.
759 if (stage1) {
760 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
761 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
762 } else {
763 reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
765 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
767 /* TTBRs */
768 if (stage1) {
769 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
771 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
772 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
774 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
775 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
776 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
777 } else {
778 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
779 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
782 /* TTBCR */
783 if (stage1) {
784 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
785 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
786 if (smmu->version > ARM_SMMU_V1) {
787 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
788 reg |= TTBCR2_SEP_UPSTREAM;
789 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
791 } else {
792 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
793 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
796 /* MAIRs (stage-1 only) */
797 if (stage1) {
798 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
799 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
800 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
801 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
804 /* SCTLR */
805 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
806 if (stage1)
807 reg |= SCTLR_S1_ASIDPNE;
808 #ifdef __BIG_ENDIAN
809 reg |= SCTLR_E;
810 #endif
811 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
814 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
815 struct arm_smmu_device *smmu)
817 int irq, start, ret = 0;
818 unsigned long ias, oas;
819 struct io_pgtable_ops *pgtbl_ops;
820 struct io_pgtable_cfg pgtbl_cfg;
821 enum io_pgtable_fmt fmt;
822 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
823 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
825 mutex_lock(&smmu_domain->init_mutex);
826 if (smmu_domain->smmu)
827 goto out_unlock;
829 /* We're bypassing these SIDs, so don't allocate an actual context */
830 if (domain->type == IOMMU_DOMAIN_DMA) {
831 smmu_domain->smmu = smmu;
832 goto out_unlock;
836 * Mapping the requested stage onto what we support is surprisingly
837 * complicated, mainly because the spec allows S1+S2 SMMUs without
838 * support for nested translation. That means we end up with the
839 * following table:
841 * Requested Supported Actual
842 * S1 N S1
843 * S1 S1+S2 S1
844 * S1 S2 S2
845 * S1 S1 S1
846 * N N N
847 * N S1+S2 S2
848 * N S2 S2
849 * N S1 S1
851 * Note that you can't actually request stage-2 mappings.
853 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
854 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
855 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
856 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
858 switch (smmu_domain->stage) {
859 case ARM_SMMU_DOMAIN_S1:
860 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
861 start = smmu->num_s2_context_banks;
862 ias = smmu->va_size;
863 oas = smmu->ipa_size;
864 if (IS_ENABLED(CONFIG_64BIT))
865 fmt = ARM_64_LPAE_S1;
866 else
867 fmt = ARM_32_LPAE_S1;
868 break;
869 case ARM_SMMU_DOMAIN_NESTED:
871 * We will likely want to change this if/when KVM gets
872 * involved.
874 case ARM_SMMU_DOMAIN_S2:
875 cfg->cbar = CBAR_TYPE_S2_TRANS;
876 start = 0;
877 ias = smmu->ipa_size;
878 oas = smmu->pa_size;
879 if (IS_ENABLED(CONFIG_64BIT))
880 fmt = ARM_64_LPAE_S2;
881 else
882 fmt = ARM_32_LPAE_S2;
883 break;
884 default:
885 ret = -EINVAL;
886 goto out_unlock;
889 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
890 smmu->num_context_banks);
891 if (IS_ERR_VALUE(ret))
892 goto out_unlock;
894 cfg->cbndx = ret;
895 if (smmu->version == ARM_SMMU_V1) {
896 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
897 cfg->irptndx %= smmu->num_context_irqs;
898 } else {
899 cfg->irptndx = cfg->cbndx;
902 pgtbl_cfg = (struct io_pgtable_cfg) {
903 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
904 .ias = ias,
905 .oas = oas,
906 .tlb = &arm_smmu_gather_ops,
907 .iommu_dev = smmu->dev,
910 smmu_domain->smmu = smmu;
911 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
912 if (!pgtbl_ops) {
913 ret = -ENOMEM;
914 goto out_clear_smmu;
917 /* Update our support page sizes to reflect the page table format */
918 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
920 /* Initialise the context bank with our page table cfg */
921 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
924 * Request context fault interrupt. Do this last to avoid the
925 * handler seeing a half-initialised domain state.
927 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
928 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
929 "arm-smmu-context-fault", domain);
930 if (IS_ERR_VALUE(ret)) {
931 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
932 cfg->irptndx, irq);
933 cfg->irptndx = INVALID_IRPTNDX;
936 mutex_unlock(&smmu_domain->init_mutex);
938 /* Publish page table ops for map/unmap */
939 smmu_domain->pgtbl_ops = pgtbl_ops;
940 return 0;
942 out_clear_smmu:
943 smmu_domain->smmu = NULL;
944 out_unlock:
945 mutex_unlock(&smmu_domain->init_mutex);
946 return ret;
949 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
951 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
952 struct arm_smmu_device *smmu = smmu_domain->smmu;
953 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
954 void __iomem *cb_base;
955 int irq;
957 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
958 return;
961 * Disable the context bank and free the page tables before freeing
962 * it.
964 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
965 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
967 if (cfg->irptndx != INVALID_IRPTNDX) {
968 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
969 free_irq(irq, domain);
972 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
973 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
976 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
978 struct arm_smmu_domain *smmu_domain;
980 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
981 return NULL;
983 * Allocate the domain and initialise some of its data structures.
984 * We can't really do anything meaningful until we've added a
985 * master.
987 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
988 if (!smmu_domain)
989 return NULL;
991 if (type == IOMMU_DOMAIN_DMA &&
992 iommu_get_dma_cookie(&smmu_domain->domain)) {
993 kfree(smmu_domain);
994 return NULL;
997 mutex_init(&smmu_domain->init_mutex);
998 spin_lock_init(&smmu_domain->pgtbl_lock);
1000 return &smmu_domain->domain;
1003 static void arm_smmu_domain_free(struct iommu_domain *domain)
1005 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1008 * Free the domain resources. We assume that all devices have
1009 * already been detached.
1011 iommu_put_dma_cookie(domain);
1012 arm_smmu_destroy_domain_context(domain);
1013 kfree(smmu_domain);
1016 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1017 struct arm_smmu_master_cfg *cfg)
1019 int i;
1020 struct arm_smmu_smr *smrs;
1021 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1023 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1024 return 0;
1026 if (cfg->smrs)
1027 return -EEXIST;
1029 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
1030 if (!smrs) {
1031 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1032 cfg->num_streamids);
1033 return -ENOMEM;
1036 /* Allocate the SMRs on the SMMU */
1037 for (i = 0; i < cfg->num_streamids; ++i) {
1038 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1039 smmu->num_mapping_groups);
1040 if (IS_ERR_VALUE(idx)) {
1041 dev_err(smmu->dev, "failed to allocate free SMR\n");
1042 goto err_free_smrs;
1045 smrs[i] = (struct arm_smmu_smr) {
1046 .idx = idx,
1047 .mask = 0, /* We don't currently share SMRs */
1048 .id = cfg->streamids[i],
1052 /* It worked! Now, poke the actual hardware */
1053 for (i = 0; i < cfg->num_streamids; ++i) {
1054 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1055 smrs[i].mask << SMR_MASK_SHIFT;
1056 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1059 cfg->smrs = smrs;
1060 return 0;
1062 err_free_smrs:
1063 while (--i >= 0)
1064 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1065 kfree(smrs);
1066 return -ENOSPC;
1069 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1070 struct arm_smmu_master_cfg *cfg)
1072 int i;
1073 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1074 struct arm_smmu_smr *smrs = cfg->smrs;
1076 if (!smrs)
1077 return;
1079 /* Invalidate the SMRs before freeing back to the allocator */
1080 for (i = 0; i < cfg->num_streamids; ++i) {
1081 u8 idx = smrs[i].idx;
1083 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1084 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1087 cfg->smrs = NULL;
1088 kfree(smrs);
1091 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1092 struct arm_smmu_master_cfg *cfg)
1094 int i, ret;
1095 struct arm_smmu_device *smmu = smmu_domain->smmu;
1096 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1099 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1100 * for all devices behind the SMMU. Note that we need to take
1101 * care configuring SMRs for devices both a platform_device and
1102 * and a PCI device (i.e. a PCI host controller)
1104 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1105 return 0;
1107 /* Devices in an IOMMU group may already be configured */
1108 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1109 if (ret)
1110 return ret == -EEXIST ? 0 : ret;
1112 for (i = 0; i < cfg->num_streamids; ++i) {
1113 u32 idx, s2cr;
1115 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1116 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
1117 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
1118 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1121 return 0;
1124 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1125 struct arm_smmu_master_cfg *cfg)
1127 int i;
1128 struct arm_smmu_device *smmu = smmu_domain->smmu;
1129 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1131 /* An IOMMU group is torn down by the first device to be removed */
1132 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1133 return;
1136 * We *must* clear the S2CR first, because freeing the SMR means
1137 * that it can be re-allocated immediately.
1139 for (i = 0; i < cfg->num_streamids; ++i) {
1140 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1141 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1143 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1146 arm_smmu_master_free_smrs(smmu, cfg);
1149 static void arm_smmu_detach_dev(struct device *dev,
1150 struct arm_smmu_master_cfg *cfg)
1152 struct iommu_domain *domain = dev->archdata.iommu;
1153 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1155 dev->archdata.iommu = NULL;
1156 arm_smmu_domain_remove_master(smmu_domain, cfg);
1159 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1161 int ret;
1162 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1163 struct arm_smmu_device *smmu;
1164 struct arm_smmu_master_cfg *cfg;
1166 smmu = find_smmu_for_device(dev);
1167 if (!smmu) {
1168 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1169 return -ENXIO;
1172 /* Ensure that the domain is finalised */
1173 ret = arm_smmu_init_domain_context(domain, smmu);
1174 if (IS_ERR_VALUE(ret))
1175 return ret;
1178 * Sanity check the domain. We don't support domains across
1179 * different SMMUs.
1181 if (smmu_domain->smmu != smmu) {
1182 dev_err(dev,
1183 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1184 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1185 return -EINVAL;
1188 /* Looks ok, so add the device to the domain */
1189 cfg = find_smmu_master_cfg(dev);
1190 if (!cfg)
1191 return -ENODEV;
1193 /* Detach the dev from its current domain */
1194 if (dev->archdata.iommu)
1195 arm_smmu_detach_dev(dev, cfg);
1197 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1198 if (!ret)
1199 dev->archdata.iommu = domain;
1200 return ret;
1203 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1204 phys_addr_t paddr, size_t size, int prot)
1206 int ret;
1207 unsigned long flags;
1208 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1209 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1211 if (!ops)
1212 return -ENODEV;
1214 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1215 ret = ops->map(ops, iova, paddr, size, prot);
1216 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1217 return ret;
1220 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1221 size_t size)
1223 size_t ret;
1224 unsigned long flags;
1225 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1226 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1228 if (!ops)
1229 return 0;
1231 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1232 ret = ops->unmap(ops, iova, size);
1233 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1234 return ret;
1237 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1238 dma_addr_t iova)
1240 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1241 struct arm_smmu_device *smmu = smmu_domain->smmu;
1242 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1243 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1244 struct device *dev = smmu->dev;
1245 void __iomem *cb_base;
1246 u32 tmp;
1247 u64 phys;
1248 unsigned long va;
1250 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1252 /* ATS1 registers can only be written atomically */
1253 va = iova & ~0xfffUL;
1254 if (smmu->version == ARM_SMMU_V2)
1255 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1256 else
1257 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1259 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1260 !(tmp & ATSR_ACTIVE), 5, 50)) {
1261 dev_err(dev,
1262 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1263 &iova);
1264 return ops->iova_to_phys(ops, iova);
1267 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1268 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1270 if (phys & CB_PAR_F) {
1271 dev_err(dev, "translation fault!\n");
1272 dev_err(dev, "PAR = 0x%llx\n", phys);
1273 return 0;
1276 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1279 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1280 dma_addr_t iova)
1282 phys_addr_t ret;
1283 unsigned long flags;
1284 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1285 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1287 if (!ops)
1288 return 0;
1290 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1291 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1292 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1293 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1294 } else {
1295 ret = ops->iova_to_phys(ops, iova);
1298 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1300 return ret;
1303 static bool arm_smmu_capable(enum iommu_cap cap)
1305 switch (cap) {
1306 case IOMMU_CAP_CACHE_COHERENCY:
1308 * Return true here as the SMMU can always send out coherent
1309 * requests.
1311 return true;
1312 case IOMMU_CAP_INTR_REMAP:
1313 return true; /* MSIs are just memory writes */
1314 case IOMMU_CAP_NOEXEC:
1315 return true;
1316 default:
1317 return false;
1321 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1323 *((u16 *)data) = alias;
1324 return 0; /* Continue walking */
1327 static void __arm_smmu_release_pci_iommudata(void *data)
1329 kfree(data);
1332 static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1333 struct iommu_group *group)
1335 struct arm_smmu_master_cfg *cfg;
1336 u16 sid;
1337 int i;
1339 cfg = iommu_group_get_iommudata(group);
1340 if (!cfg) {
1341 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1342 if (!cfg)
1343 return -ENOMEM;
1345 iommu_group_set_iommudata(group, cfg,
1346 __arm_smmu_release_pci_iommudata);
1349 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1350 return -ENOSPC;
1353 * Assume Stream ID == Requester ID for now.
1354 * We need a way to describe the ID mappings in FDT.
1356 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1357 for (i = 0; i < cfg->num_streamids; ++i)
1358 if (cfg->streamids[i] == sid)
1359 break;
1361 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1362 if (i == cfg->num_streamids)
1363 cfg->streamids[cfg->num_streamids++] = sid;
1365 return 0;
1368 static int arm_smmu_init_platform_device(struct device *dev,
1369 struct iommu_group *group)
1371 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1372 struct arm_smmu_master *master;
1374 if (!smmu)
1375 return -ENODEV;
1377 master = find_smmu_master(smmu, dev->of_node);
1378 if (!master)
1379 return -ENODEV;
1381 iommu_group_set_iommudata(group, &master->cfg, NULL);
1383 return 0;
1386 static int arm_smmu_add_device(struct device *dev)
1388 struct iommu_group *group;
1390 group = iommu_group_get_for_dev(dev);
1391 if (IS_ERR(group))
1392 return PTR_ERR(group);
1394 iommu_group_put(group);
1395 return 0;
1398 static void arm_smmu_remove_device(struct device *dev)
1400 iommu_group_remove_device(dev);
1403 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1405 struct iommu_group *group;
1406 int ret;
1408 if (dev_is_pci(dev))
1409 group = pci_device_group(dev);
1410 else
1411 group = generic_device_group(dev);
1413 if (IS_ERR(group))
1414 return group;
1416 if (dev_is_pci(dev))
1417 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1418 else
1419 ret = arm_smmu_init_platform_device(dev, group);
1421 if (ret) {
1422 iommu_group_put(group);
1423 group = ERR_PTR(ret);
1426 return group;
1429 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1430 enum iommu_attr attr, void *data)
1432 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1434 switch (attr) {
1435 case DOMAIN_ATTR_NESTING:
1436 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1437 return 0;
1438 default:
1439 return -ENODEV;
1443 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1444 enum iommu_attr attr, void *data)
1446 int ret = 0;
1447 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1449 mutex_lock(&smmu_domain->init_mutex);
1451 switch (attr) {
1452 case DOMAIN_ATTR_NESTING:
1453 if (smmu_domain->smmu) {
1454 ret = -EPERM;
1455 goto out_unlock;
1458 if (*(int *)data)
1459 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1460 else
1461 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1463 break;
1464 default:
1465 ret = -ENODEV;
1468 out_unlock:
1469 mutex_unlock(&smmu_domain->init_mutex);
1470 return ret;
1473 static struct iommu_ops arm_smmu_ops = {
1474 .capable = arm_smmu_capable,
1475 .domain_alloc = arm_smmu_domain_alloc,
1476 .domain_free = arm_smmu_domain_free,
1477 .attach_dev = arm_smmu_attach_dev,
1478 .map = arm_smmu_map,
1479 .unmap = arm_smmu_unmap,
1480 .map_sg = default_iommu_map_sg,
1481 .iova_to_phys = arm_smmu_iova_to_phys,
1482 .add_device = arm_smmu_add_device,
1483 .remove_device = arm_smmu_remove_device,
1484 .device_group = arm_smmu_device_group,
1485 .domain_get_attr = arm_smmu_domain_get_attr,
1486 .domain_set_attr = arm_smmu_domain_set_attr,
1487 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1490 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1492 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1493 void __iomem *cb_base;
1494 int i = 0;
1495 u32 reg;
1497 /* clear global FSR */
1498 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1499 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1501 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1502 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1503 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1504 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
1505 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
1508 /* Make sure all context banks are disabled and clear CB_FSR */
1509 for (i = 0; i < smmu->num_context_banks; ++i) {
1510 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1511 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1512 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1515 /* Invalidate the TLB, just in case */
1516 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1517 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1519 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1521 /* Enable fault reporting */
1522 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1524 /* Disable TLB broadcasting. */
1525 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1527 /* Enable client access, handling unmatched streams as appropriate */
1528 reg &= ~sCR0_CLIENTPD;
1529 if (disable_bypass)
1530 reg |= sCR0_USFCFG;
1531 else
1532 reg &= ~sCR0_USFCFG;
1534 /* Disable forced broadcasting */
1535 reg &= ~sCR0_FB;
1537 /* Don't upgrade barriers */
1538 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1540 /* Push the button */
1541 __arm_smmu_tlb_sync(smmu);
1542 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1545 static int arm_smmu_id_size_to_bits(int size)
1547 switch (size) {
1548 case 0:
1549 return 32;
1550 case 1:
1551 return 36;
1552 case 2:
1553 return 40;
1554 case 3:
1555 return 42;
1556 case 4:
1557 return 44;
1558 case 5:
1559 default:
1560 return 48;
1564 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1566 unsigned long size;
1567 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1568 u32 id;
1569 bool cttw_dt, cttw_reg;
1571 dev_notice(smmu->dev, "probing hardware configuration...\n");
1572 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1574 /* ID0 */
1575 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1577 /* Restrict available stages based on module parameter */
1578 if (force_stage == 1)
1579 id &= ~(ID0_S2TS | ID0_NTS);
1580 else if (force_stage == 2)
1581 id &= ~(ID0_S1TS | ID0_NTS);
1583 if (id & ID0_S1TS) {
1584 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1585 dev_notice(smmu->dev, "\tstage 1 translation\n");
1588 if (id & ID0_S2TS) {
1589 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1590 dev_notice(smmu->dev, "\tstage 2 translation\n");
1593 if (id & ID0_NTS) {
1594 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1595 dev_notice(smmu->dev, "\tnested translation\n");
1598 if (!(smmu->features &
1599 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1600 dev_err(smmu->dev, "\tno translation support!\n");
1601 return -ENODEV;
1604 if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
1605 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1606 dev_notice(smmu->dev, "\taddress translation ops\n");
1610 * In order for DMA API calls to work properly, we must defer to what
1611 * the DT says about coherency, regardless of what the hardware claims.
1612 * Fortunately, this also opens up a workaround for systems where the
1613 * ID register value has ended up configured incorrectly.
1615 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1616 cttw_reg = !!(id & ID0_CTTW);
1617 if (cttw_dt)
1618 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1619 if (cttw_dt || cttw_reg)
1620 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1621 cttw_dt ? "" : "non-");
1622 if (cttw_dt != cttw_reg)
1623 dev_notice(smmu->dev,
1624 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1626 if (id & ID0_SMS) {
1627 u32 smr, sid, mask;
1629 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1630 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1631 ID0_NUMSMRG_MASK;
1632 if (smmu->num_mapping_groups == 0) {
1633 dev_err(smmu->dev,
1634 "stream-matching supported, but no SMRs present!\n");
1635 return -ENODEV;
1638 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1639 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1640 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1641 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1643 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1644 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1645 if ((mask & sid) != sid) {
1646 dev_err(smmu->dev,
1647 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1648 mask, sid);
1649 return -ENODEV;
1652 dev_notice(smmu->dev,
1653 "\tstream matching with %u register groups, mask 0x%x",
1654 smmu->num_mapping_groups, mask);
1655 } else {
1656 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1657 ID0_NUMSIDB_MASK;
1660 /* ID1 */
1661 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1662 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1664 /* Check for size mismatch of SMMU address space from mapped region */
1665 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1666 size *= 2 << smmu->pgshift;
1667 if (smmu->size != size)
1668 dev_warn(smmu->dev,
1669 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1670 size, smmu->size);
1672 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1673 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1674 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1675 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1676 return -ENODEV;
1678 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1679 smmu->num_context_banks, smmu->num_s2_context_banks);
1681 /* ID2 */
1682 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1683 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1684 smmu->ipa_size = size;
1686 /* The output mask is also applied for bypass */
1687 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1688 smmu->pa_size = size;
1691 * What the page table walker can address actually depends on which
1692 * descriptor format is in use, but since a) we don't know that yet,
1693 * and b) it can vary per context bank, this will have to do...
1695 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1696 dev_warn(smmu->dev,
1697 "failed to set DMA mask for table walker\n");
1699 if (smmu->version == ARM_SMMU_V1) {
1700 smmu->va_size = smmu->ipa_size;
1701 size = SZ_4K | SZ_2M | SZ_1G;
1702 } else {
1703 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1704 smmu->va_size = arm_smmu_id_size_to_bits(size);
1705 #ifndef CONFIG_64BIT
1706 smmu->va_size = min(32UL, smmu->va_size);
1707 #endif
1708 size = 0;
1709 if (id & ID2_PTFS_4K)
1710 size |= SZ_4K | SZ_2M | SZ_1G;
1711 if (id & ID2_PTFS_16K)
1712 size |= SZ_16K | SZ_32M;
1713 if (id & ID2_PTFS_64K)
1714 size |= SZ_64K | SZ_512M;
1717 arm_smmu_ops.pgsize_bitmap &= size;
1718 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1720 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1721 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1722 smmu->va_size, smmu->ipa_size);
1724 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1725 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1726 smmu->ipa_size, smmu->pa_size);
1728 return 0;
1731 static const struct of_device_id arm_smmu_of_match[] = {
1732 { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
1733 { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
1734 { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
1735 { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
1736 { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
1737 { },
1739 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1741 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1743 const struct of_device_id *of_id;
1744 struct resource *res;
1745 struct arm_smmu_device *smmu;
1746 struct device *dev = &pdev->dev;
1747 struct rb_node *node;
1748 struct of_phandle_args masterspec;
1749 int num_irqs, i, err;
1751 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1752 if (!smmu) {
1753 dev_err(dev, "failed to allocate arm_smmu_device\n");
1754 return -ENOMEM;
1756 smmu->dev = dev;
1758 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1759 smmu->version = (enum arm_smmu_arch_version)of_id->data;
1761 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1762 smmu->base = devm_ioremap_resource(dev, res);
1763 if (IS_ERR(smmu->base))
1764 return PTR_ERR(smmu->base);
1765 smmu->size = resource_size(res);
1767 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1768 &smmu->num_global_irqs)) {
1769 dev_err(dev, "missing #global-interrupts property\n");
1770 return -ENODEV;
1773 num_irqs = 0;
1774 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1775 num_irqs++;
1776 if (num_irqs > smmu->num_global_irqs)
1777 smmu->num_context_irqs++;
1780 if (!smmu->num_context_irqs) {
1781 dev_err(dev, "found %d interrupts but expected at least %d\n",
1782 num_irqs, smmu->num_global_irqs + 1);
1783 return -ENODEV;
1786 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1787 GFP_KERNEL);
1788 if (!smmu->irqs) {
1789 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1790 return -ENOMEM;
1793 for (i = 0; i < num_irqs; ++i) {
1794 int irq = platform_get_irq(pdev, i);
1796 if (irq < 0) {
1797 dev_err(dev, "failed to get irq index %d\n", i);
1798 return -ENODEV;
1800 smmu->irqs[i] = irq;
1803 err = arm_smmu_device_cfg_probe(smmu);
1804 if (err)
1805 return err;
1807 i = 0;
1808 smmu->masters = RB_ROOT;
1809 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1810 "#stream-id-cells", i,
1811 &masterspec)) {
1812 err = register_smmu_master(smmu, dev, &masterspec);
1813 if (err) {
1814 dev_err(dev, "failed to add master %s\n",
1815 masterspec.np->name);
1816 goto out_put_masters;
1819 i++;
1821 dev_notice(dev, "registered %d master devices\n", i);
1823 parse_driver_options(smmu);
1825 if (smmu->version > ARM_SMMU_V1 &&
1826 smmu->num_context_banks != smmu->num_context_irqs) {
1827 dev_err(dev,
1828 "found only %d context interrupt(s) but %d required\n",
1829 smmu->num_context_irqs, smmu->num_context_banks);
1830 err = -ENODEV;
1831 goto out_put_masters;
1834 for (i = 0; i < smmu->num_global_irqs; ++i) {
1835 err = request_irq(smmu->irqs[i],
1836 arm_smmu_global_fault,
1837 IRQF_SHARED,
1838 "arm-smmu global fault",
1839 smmu);
1840 if (err) {
1841 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1842 i, smmu->irqs[i]);
1843 goto out_free_irqs;
1847 INIT_LIST_HEAD(&smmu->list);
1848 spin_lock(&arm_smmu_devices_lock);
1849 list_add(&smmu->list, &arm_smmu_devices);
1850 spin_unlock(&arm_smmu_devices_lock);
1852 arm_smmu_device_reset(smmu);
1853 return 0;
1855 out_free_irqs:
1856 while (i--)
1857 free_irq(smmu->irqs[i], smmu);
1859 out_put_masters:
1860 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1861 struct arm_smmu_master *master
1862 = container_of(node, struct arm_smmu_master, node);
1863 of_node_put(master->of_node);
1866 return err;
1869 static int arm_smmu_device_remove(struct platform_device *pdev)
1871 int i;
1872 struct device *dev = &pdev->dev;
1873 struct arm_smmu_device *curr, *smmu = NULL;
1874 struct rb_node *node;
1876 spin_lock(&arm_smmu_devices_lock);
1877 list_for_each_entry(curr, &arm_smmu_devices, list) {
1878 if (curr->dev == dev) {
1879 smmu = curr;
1880 list_del(&smmu->list);
1881 break;
1884 spin_unlock(&arm_smmu_devices_lock);
1886 if (!smmu)
1887 return -ENODEV;
1889 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1890 struct arm_smmu_master *master
1891 = container_of(node, struct arm_smmu_master, node);
1892 of_node_put(master->of_node);
1895 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
1896 dev_err(dev, "removing device with active domains!\n");
1898 for (i = 0; i < smmu->num_global_irqs; ++i)
1899 free_irq(smmu->irqs[i], smmu);
1901 /* Turn the thing off */
1902 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1903 return 0;
1906 static struct platform_driver arm_smmu_driver = {
1907 .driver = {
1908 .name = "arm-smmu",
1909 .of_match_table = of_match_ptr(arm_smmu_of_match),
1911 .probe = arm_smmu_device_dt_probe,
1912 .remove = arm_smmu_device_remove,
1915 static int __init arm_smmu_init(void)
1917 struct device_node *np;
1918 int ret;
1921 * Play nice with systems that don't have an ARM SMMU by checking that
1922 * an ARM SMMU exists in the system before proceeding with the driver
1923 * and IOMMU bus operation registration.
1925 np = of_find_matching_node(NULL, arm_smmu_of_match);
1926 if (!np)
1927 return 0;
1929 of_node_put(np);
1931 ret = platform_driver_register(&arm_smmu_driver);
1932 if (ret)
1933 return ret;
1935 /* Oh, for a proper bus abstraction */
1936 if (!iommu_present(&platform_bus_type))
1937 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1939 #ifdef CONFIG_ARM_AMBA
1940 if (!iommu_present(&amba_bustype))
1941 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1942 #endif
1944 #ifdef CONFIG_PCI
1945 if (!iommu_present(&pci_bus_type))
1946 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
1947 #endif
1949 return 0;
1952 static void __exit arm_smmu_exit(void)
1954 return platform_driver_unregister(&arm_smmu_driver);
1957 subsys_initcall(arm_smmu_init);
1958 module_exit(arm_smmu_exit);
1960 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1961 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1962 MODULE_LICENSE("GPL v2");