1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
6 #include <linux/bitops.h>
7 #include <linux/debugfs.h>
9 #include <linux/iommu.h>
10 #include <linux/kernel.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <linux/slab.h>
15 #include <linux/dma-mapping.h>
17 #include <soc/tegra/ahb.h>
18 #include <soc/tegra/mc.h>
20 struct tegra_smmu_group
{
21 struct list_head list
;
22 const struct tegra_smmu_group_soc
*soc
;
23 struct iommu_group
*group
;
31 const struct tegra_smmu_soc
*soc
;
33 struct list_head groups
;
35 unsigned long pfn_mask
;
36 unsigned long tlb_mask
;
41 struct list_head list
;
43 struct dentry
*debugfs
;
45 struct iommu_device iommu
; /* IOMMU Core code handle */
48 struct tegra_smmu_as
{
49 struct iommu_domain domain
;
50 struct tegra_smmu
*smmu
;
51 unsigned int use_count
;
60 static struct tegra_smmu_as
*to_smmu_as(struct iommu_domain
*dom
)
62 return container_of(dom
, struct tegra_smmu_as
, domain
);
65 static inline void smmu_writel(struct tegra_smmu
*smmu
, u32 value
,
68 writel(value
, smmu
->regs
+ offset
);
71 static inline u32
smmu_readl(struct tegra_smmu
*smmu
, unsigned long offset
)
73 return readl(smmu
->regs
+ offset
);
76 #define SMMU_CONFIG 0x010
77 #define SMMU_CONFIG_ENABLE (1 << 0)
79 #define SMMU_TLB_CONFIG 0x14
80 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
81 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
82 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
83 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
85 #define SMMU_PTC_CONFIG 0x18
86 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
87 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
88 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
90 #define SMMU_PTB_ASID 0x01c
91 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
93 #define SMMU_PTB_DATA 0x020
94 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
96 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
98 #define SMMU_TLB_FLUSH 0x030
99 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
100 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
101 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
102 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
103 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
104 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
105 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
106 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
108 #define SMMU_PTC_FLUSH 0x034
109 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
110 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
112 #define SMMU_PTC_FLUSH_HI 0x9b8
113 #define SMMU_PTC_FLUSH_HI_MASK 0x3
115 /* per-SWGROUP SMMU_*_ASID register */
116 #define SMMU_ASID_ENABLE (1 << 31)
117 #define SMMU_ASID_MASK 0x7f
118 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
120 /* page table definitions */
121 #define SMMU_NUM_PDE 1024
122 #define SMMU_NUM_PTE 1024
124 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
125 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
127 #define SMMU_PDE_SHIFT 22
128 #define SMMU_PTE_SHIFT 12
130 #define SMMU_PD_READABLE (1 << 31)
131 #define SMMU_PD_WRITABLE (1 << 30)
132 #define SMMU_PD_NONSECURE (1 << 29)
134 #define SMMU_PDE_READABLE (1 << 31)
135 #define SMMU_PDE_WRITABLE (1 << 30)
136 #define SMMU_PDE_NONSECURE (1 << 29)
137 #define SMMU_PDE_NEXT (1 << 28)
139 #define SMMU_PTE_READABLE (1 << 31)
140 #define SMMU_PTE_WRITABLE (1 << 30)
141 #define SMMU_PTE_NONSECURE (1 << 29)
143 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
146 static unsigned int iova_pd_index(unsigned long iova
)
148 return (iova
>> SMMU_PDE_SHIFT
) & (SMMU_NUM_PDE
- 1);
151 static unsigned int iova_pt_index(unsigned long iova
)
153 return (iova
>> SMMU_PTE_SHIFT
) & (SMMU_NUM_PTE
- 1);
156 static bool smmu_dma_addr_valid(struct tegra_smmu
*smmu
, dma_addr_t addr
)
159 return (addr
& smmu
->pfn_mask
) == addr
;
162 static dma_addr_t
smmu_pde_to_dma(struct tegra_smmu
*smmu
, u32 pde
)
164 return (dma_addr_t
)(pde
& smmu
->pfn_mask
) << 12;
167 static void smmu_flush_ptc_all(struct tegra_smmu
*smmu
)
169 smmu_writel(smmu
, SMMU_PTC_FLUSH_TYPE_ALL
, SMMU_PTC_FLUSH
);
172 static inline void smmu_flush_ptc(struct tegra_smmu
*smmu
, dma_addr_t dma
,
173 unsigned long offset
)
177 offset
&= ~(smmu
->mc
->soc
->atom_size
- 1);
179 if (smmu
->mc
->soc
->num_address_bits
> 32) {
180 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
181 value
= (dma
>> 32) & SMMU_PTC_FLUSH_HI_MASK
;
185 smmu_writel(smmu
, value
, SMMU_PTC_FLUSH_HI
);
188 value
= (dma
+ offset
) | SMMU_PTC_FLUSH_TYPE_ADR
;
189 smmu_writel(smmu
, value
, SMMU_PTC_FLUSH
);
192 static inline void smmu_flush_tlb(struct tegra_smmu
*smmu
)
194 smmu_writel(smmu
, SMMU_TLB_FLUSH_VA_MATCH_ALL
, SMMU_TLB_FLUSH
);
197 static inline void smmu_flush_tlb_asid(struct tegra_smmu
*smmu
,
202 if (smmu
->soc
->num_asids
== 4)
203 value
= (asid
& 0x3) << 29;
205 value
= (asid
& 0x7f) << 24;
207 value
|= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_VA_MATCH_ALL
;
208 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
211 static inline void smmu_flush_tlb_section(struct tegra_smmu
*smmu
,
217 if (smmu
->soc
->num_asids
== 4)
218 value
= (asid
& 0x3) << 29;
220 value
= (asid
& 0x7f) << 24;
222 value
|= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_VA_SECTION(iova
);
223 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
226 static inline void smmu_flush_tlb_group(struct tegra_smmu
*smmu
,
232 if (smmu
->soc
->num_asids
== 4)
233 value
= (asid
& 0x3) << 29;
235 value
= (asid
& 0x7f) << 24;
237 value
|= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_VA_GROUP(iova
);
238 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
241 static inline void smmu_flush(struct tegra_smmu
*smmu
)
243 smmu_readl(smmu
, SMMU_PTB_ASID
);
246 static int tegra_smmu_alloc_asid(struct tegra_smmu
*smmu
, unsigned int *idp
)
250 mutex_lock(&smmu
->lock
);
252 id
= find_first_zero_bit(smmu
->asids
, smmu
->soc
->num_asids
);
253 if (id
>= smmu
->soc
->num_asids
) {
254 mutex_unlock(&smmu
->lock
);
258 set_bit(id
, smmu
->asids
);
261 mutex_unlock(&smmu
->lock
);
265 static void tegra_smmu_free_asid(struct tegra_smmu
*smmu
, unsigned int id
)
267 mutex_lock(&smmu
->lock
);
268 clear_bit(id
, smmu
->asids
);
269 mutex_unlock(&smmu
->lock
);
272 static bool tegra_smmu_capable(enum iommu_cap cap
)
277 static struct iommu_domain
*tegra_smmu_domain_alloc(unsigned type
)
279 struct tegra_smmu_as
*as
;
281 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
284 as
= kzalloc(sizeof(*as
), GFP_KERNEL
);
288 as
->attr
= SMMU_PD_READABLE
| SMMU_PD_WRITABLE
| SMMU_PD_NONSECURE
;
290 as
->pd
= alloc_page(GFP_KERNEL
| __GFP_DMA
| __GFP_ZERO
);
296 as
->count
= kcalloc(SMMU_NUM_PDE
, sizeof(u32
), GFP_KERNEL
);
303 as
->pts
= kcalloc(SMMU_NUM_PDE
, sizeof(*as
->pts
), GFP_KERNEL
);
312 as
->domain
.geometry
.aperture_start
= 0;
313 as
->domain
.geometry
.aperture_end
= 0xffffffff;
314 as
->domain
.geometry
.force_aperture
= true;
319 static void tegra_smmu_domain_free(struct iommu_domain
*domain
)
321 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
323 /* TODO: free page directory and page tables */
325 WARN_ON_ONCE(as
->use_count
);
331 static const struct tegra_smmu_swgroup
*
332 tegra_smmu_find_swgroup(struct tegra_smmu
*smmu
, unsigned int swgroup
)
334 const struct tegra_smmu_swgroup
*group
= NULL
;
337 for (i
= 0; i
< smmu
->soc
->num_swgroups
; i
++) {
338 if (smmu
->soc
->swgroups
[i
].swgroup
== swgroup
) {
339 group
= &smmu
->soc
->swgroups
[i
];
347 static void tegra_smmu_enable(struct tegra_smmu
*smmu
, unsigned int swgroup
,
350 const struct tegra_smmu_swgroup
*group
;
354 group
= tegra_smmu_find_swgroup(smmu
, swgroup
);
356 value
= smmu_readl(smmu
, group
->reg
);
357 value
&= ~SMMU_ASID_MASK
;
358 value
|= SMMU_ASID_VALUE(asid
);
359 value
|= SMMU_ASID_ENABLE
;
360 smmu_writel(smmu
, value
, group
->reg
);
362 pr_warn("%s group from swgroup %u not found\n", __func__
,
364 /* No point moving ahead if group was not found */
368 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
369 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
371 if (client
->swgroup
!= swgroup
)
374 value
= smmu_readl(smmu
, client
->smmu
.reg
);
375 value
|= BIT(client
->smmu
.bit
);
376 smmu_writel(smmu
, value
, client
->smmu
.reg
);
380 static void tegra_smmu_disable(struct tegra_smmu
*smmu
, unsigned int swgroup
,
383 const struct tegra_smmu_swgroup
*group
;
387 group
= tegra_smmu_find_swgroup(smmu
, swgroup
);
389 value
= smmu_readl(smmu
, group
->reg
);
390 value
&= ~SMMU_ASID_MASK
;
391 value
|= SMMU_ASID_VALUE(asid
);
392 value
&= ~SMMU_ASID_ENABLE
;
393 smmu_writel(smmu
, value
, group
->reg
);
396 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
397 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
399 if (client
->swgroup
!= swgroup
)
402 value
= smmu_readl(smmu
, client
->smmu
.reg
);
403 value
&= ~BIT(client
->smmu
.bit
);
404 smmu_writel(smmu
, value
, client
->smmu
.reg
);
408 static int tegra_smmu_as_prepare(struct tegra_smmu
*smmu
,
409 struct tegra_smmu_as
*as
)
414 if (as
->use_count
> 0) {
419 as
->pd_dma
= dma_map_page(smmu
->dev
, as
->pd
, 0, SMMU_SIZE_PD
,
421 if (dma_mapping_error(smmu
->dev
, as
->pd_dma
))
424 /* We can't handle 64-bit DMA addresses */
425 if (!smmu_dma_addr_valid(smmu
, as
->pd_dma
)) {
430 err
= tegra_smmu_alloc_asid(smmu
, &as
->id
);
434 smmu_flush_ptc(smmu
, as
->pd_dma
, 0);
435 smmu_flush_tlb_asid(smmu
, as
->id
);
437 smmu_writel(smmu
, as
->id
& 0x7f, SMMU_PTB_ASID
);
438 value
= SMMU_PTB_DATA_VALUE(as
->pd_dma
, as
->attr
);
439 smmu_writel(smmu
, value
, SMMU_PTB_DATA
);
448 dma_unmap_page(smmu
->dev
, as
->pd_dma
, SMMU_SIZE_PD
, DMA_TO_DEVICE
);
452 static void tegra_smmu_as_unprepare(struct tegra_smmu
*smmu
,
453 struct tegra_smmu_as
*as
)
455 if (--as
->use_count
> 0)
458 tegra_smmu_free_asid(smmu
, as
->id
);
460 dma_unmap_page(smmu
->dev
, as
->pd_dma
, SMMU_SIZE_PD
, DMA_TO_DEVICE
);
465 static int tegra_smmu_attach_dev(struct iommu_domain
*domain
,
468 struct tegra_smmu
*smmu
= dev
->archdata
.iommu
;
469 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
470 struct device_node
*np
= dev
->of_node
;
471 struct of_phandle_args args
;
472 unsigned int index
= 0;
475 while (!of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
477 unsigned int swgroup
= args
.args
[0];
479 if (args
.np
!= smmu
->dev
->of_node
) {
480 of_node_put(args
.np
);
484 of_node_put(args
.np
);
486 err
= tegra_smmu_as_prepare(smmu
, as
);
490 tegra_smmu_enable(smmu
, swgroup
, as
->id
);
500 static void tegra_smmu_detach_dev(struct iommu_domain
*domain
, struct device
*dev
)
502 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
503 struct device_node
*np
= dev
->of_node
;
504 struct tegra_smmu
*smmu
= as
->smmu
;
505 struct of_phandle_args args
;
506 unsigned int index
= 0;
508 while (!of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
510 unsigned int swgroup
= args
.args
[0];
512 if (args
.np
!= smmu
->dev
->of_node
) {
513 of_node_put(args
.np
);
517 of_node_put(args
.np
);
519 tegra_smmu_disable(smmu
, swgroup
, as
->id
);
520 tegra_smmu_as_unprepare(smmu
, as
);
525 static void tegra_smmu_set_pde(struct tegra_smmu_as
*as
, unsigned long iova
,
528 unsigned int pd_index
= iova_pd_index(iova
);
529 struct tegra_smmu
*smmu
= as
->smmu
;
530 u32
*pd
= page_address(as
->pd
);
531 unsigned long offset
= pd_index
* sizeof(*pd
);
533 /* Set the page directory entry first */
534 pd
[pd_index
] = value
;
536 /* The flush the page directory entry from caches */
537 dma_sync_single_range_for_device(smmu
->dev
, as
->pd_dma
, offset
,
538 sizeof(*pd
), DMA_TO_DEVICE
);
540 /* And flush the iommu */
541 smmu_flush_ptc(smmu
, as
->pd_dma
, offset
);
542 smmu_flush_tlb_section(smmu
, as
->id
, iova
);
546 static u32
*tegra_smmu_pte_offset(struct page
*pt_page
, unsigned long iova
)
548 u32
*pt
= page_address(pt_page
);
550 return pt
+ iova_pt_index(iova
);
553 static u32
*tegra_smmu_pte_lookup(struct tegra_smmu_as
*as
, unsigned long iova
,
556 unsigned int pd_index
= iova_pd_index(iova
);
557 struct tegra_smmu
*smmu
= as
->smmu
;
558 struct page
*pt_page
;
561 pt_page
= as
->pts
[pd_index
];
565 pd
= page_address(as
->pd
);
566 *dmap
= smmu_pde_to_dma(smmu
, pd
[pd_index
]);
568 return tegra_smmu_pte_offset(pt_page
, iova
);
571 static u32
*as_get_pte(struct tegra_smmu_as
*as
, dma_addr_t iova
,
574 unsigned int pde
= iova_pd_index(iova
);
575 struct tegra_smmu
*smmu
= as
->smmu
;
581 page
= alloc_page(GFP_KERNEL
| __GFP_DMA
| __GFP_ZERO
);
585 dma
= dma_map_page(smmu
->dev
, page
, 0, SMMU_SIZE_PT
,
587 if (dma_mapping_error(smmu
->dev
, dma
)) {
592 if (!smmu_dma_addr_valid(smmu
, dma
)) {
593 dma_unmap_page(smmu
->dev
, dma
, SMMU_SIZE_PT
,
601 tegra_smmu_set_pde(as
, iova
, SMMU_MK_PDE(dma
, SMMU_PDE_ATTR
|
606 u32
*pd
= page_address(as
->pd
);
608 *dmap
= smmu_pde_to_dma(smmu
, pd
[pde
]);
611 return tegra_smmu_pte_offset(as
->pts
[pde
], iova
);
614 static void tegra_smmu_pte_get_use(struct tegra_smmu_as
*as
, unsigned long iova
)
616 unsigned int pd_index
= iova_pd_index(iova
);
618 as
->count
[pd_index
]++;
621 static void tegra_smmu_pte_put_use(struct tegra_smmu_as
*as
, unsigned long iova
)
623 unsigned int pde
= iova_pd_index(iova
);
624 struct page
*page
= as
->pts
[pde
];
627 * When no entries in this page table are used anymore, return the
628 * memory page to the system.
630 if (--as
->count
[pde
] == 0) {
631 struct tegra_smmu
*smmu
= as
->smmu
;
632 u32
*pd
= page_address(as
->pd
);
633 dma_addr_t pte_dma
= smmu_pde_to_dma(smmu
, pd
[pde
]);
635 tegra_smmu_set_pde(as
, iova
, 0);
637 dma_unmap_page(smmu
->dev
, pte_dma
, SMMU_SIZE_PT
, DMA_TO_DEVICE
);
643 static void tegra_smmu_set_pte(struct tegra_smmu_as
*as
, unsigned long iova
,
644 u32
*pte
, dma_addr_t pte_dma
, u32 val
)
646 struct tegra_smmu
*smmu
= as
->smmu
;
647 unsigned long offset
= offset_in_page(pte
);
651 dma_sync_single_range_for_device(smmu
->dev
, pte_dma
, offset
,
653 smmu_flush_ptc(smmu
, pte_dma
, offset
);
654 smmu_flush_tlb_group(smmu
, as
->id
, iova
);
658 static int tegra_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
659 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
)
661 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
666 pte
= as_get_pte(as
, iova
, &pte_dma
);
670 /* If we aren't overwriting a pre-existing entry, increment use */
672 tegra_smmu_pte_get_use(as
, iova
);
674 pte_attrs
= SMMU_PTE_NONSECURE
;
676 if (prot
& IOMMU_READ
)
677 pte_attrs
|= SMMU_PTE_READABLE
;
679 if (prot
& IOMMU_WRITE
)
680 pte_attrs
|= SMMU_PTE_WRITABLE
;
682 tegra_smmu_set_pte(as
, iova
, pte
, pte_dma
,
683 __phys_to_pfn(paddr
) | pte_attrs
);
688 static size_t tegra_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
689 size_t size
, struct iommu_iotlb_gather
*gather
)
691 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
695 pte
= tegra_smmu_pte_lookup(as
, iova
, &pte_dma
);
699 tegra_smmu_set_pte(as
, iova
, pte
, pte_dma
, 0);
700 tegra_smmu_pte_put_use(as
, iova
);
705 static phys_addr_t
tegra_smmu_iova_to_phys(struct iommu_domain
*domain
,
708 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
713 pte
= tegra_smmu_pte_lookup(as
, iova
, &pte_dma
);
717 pfn
= *pte
& as
->smmu
->pfn_mask
;
719 return PFN_PHYS(pfn
);
722 static struct tegra_smmu
*tegra_smmu_find(struct device_node
*np
)
724 struct platform_device
*pdev
;
727 pdev
= of_find_device_by_node(np
);
731 mc
= platform_get_drvdata(pdev
);
738 static int tegra_smmu_configure(struct tegra_smmu
*smmu
, struct device
*dev
,
739 struct of_phandle_args
*args
)
741 const struct iommu_ops
*ops
= smmu
->iommu
.ops
;
744 err
= iommu_fwspec_init(dev
, &dev
->of_node
->fwnode
, ops
);
746 dev_err(dev
, "failed to initialize fwspec: %d\n", err
);
750 err
= ops
->of_xlate(dev
, args
);
752 dev_err(dev
, "failed to parse SW group ID: %d\n", err
);
753 iommu_fwspec_free(dev
);
760 static int tegra_smmu_add_device(struct device
*dev
)
762 struct device_node
*np
= dev
->of_node
;
763 struct tegra_smmu
*smmu
= NULL
;
764 struct iommu_group
*group
;
765 struct of_phandle_args args
;
766 unsigned int index
= 0;
769 while (of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
771 smmu
= tegra_smmu_find(args
.np
);
773 err
= tegra_smmu_configure(smmu
, dev
, &args
);
774 of_node_put(args
.np
);
780 * Only a single IOMMU master interface is currently
781 * supported by the Linux kernel, so abort after the
784 dev
->archdata
.iommu
= smmu
;
786 iommu_device_link(&smmu
->iommu
, dev
);
791 of_node_put(args
.np
);
798 group
= iommu_group_get_for_dev(dev
);
800 return PTR_ERR(group
);
802 iommu_group_put(group
);
807 static void tegra_smmu_remove_device(struct device
*dev
)
809 struct tegra_smmu
*smmu
= dev
->archdata
.iommu
;
812 iommu_device_unlink(&smmu
->iommu
, dev
);
814 dev
->archdata
.iommu
= NULL
;
815 iommu_group_remove_device(dev
);
818 static const struct tegra_smmu_group_soc
*
819 tegra_smmu_find_group(struct tegra_smmu
*smmu
, unsigned int swgroup
)
823 for (i
= 0; i
< smmu
->soc
->num_groups
; i
++)
824 for (j
= 0; j
< smmu
->soc
->groups
[i
].num_swgroups
; j
++)
825 if (smmu
->soc
->groups
[i
].swgroups
[j
] == swgroup
)
826 return &smmu
->soc
->groups
[i
];
831 static struct iommu_group
*tegra_smmu_group_get(struct tegra_smmu
*smmu
,
832 unsigned int swgroup
)
834 const struct tegra_smmu_group_soc
*soc
;
835 struct tegra_smmu_group
*group
;
837 soc
= tegra_smmu_find_group(smmu
, swgroup
);
841 mutex_lock(&smmu
->lock
);
843 list_for_each_entry(group
, &smmu
->groups
, list
)
844 if (group
->soc
== soc
) {
845 mutex_unlock(&smmu
->lock
);
849 group
= devm_kzalloc(smmu
->dev
, sizeof(*group
), GFP_KERNEL
);
851 mutex_unlock(&smmu
->lock
);
855 INIT_LIST_HEAD(&group
->list
);
858 group
->group
= iommu_group_alloc();
859 if (IS_ERR(group
->group
)) {
860 devm_kfree(smmu
->dev
, group
);
861 mutex_unlock(&smmu
->lock
);
865 list_add_tail(&group
->list
, &smmu
->groups
);
866 mutex_unlock(&smmu
->lock
);
871 static struct iommu_group
*tegra_smmu_device_group(struct device
*dev
)
873 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
874 struct tegra_smmu
*smmu
= dev
->archdata
.iommu
;
875 struct iommu_group
*group
;
877 group
= tegra_smmu_group_get(smmu
, fwspec
->ids
[0]);
879 group
= generic_device_group(dev
);
884 static int tegra_smmu_of_xlate(struct device
*dev
,
885 struct of_phandle_args
*args
)
887 u32 id
= args
->args
[0];
889 return iommu_fwspec_add_ids(dev
, &id
, 1);
892 static const struct iommu_ops tegra_smmu_ops
= {
893 .capable
= tegra_smmu_capable
,
894 .domain_alloc
= tegra_smmu_domain_alloc
,
895 .domain_free
= tegra_smmu_domain_free
,
896 .attach_dev
= tegra_smmu_attach_dev
,
897 .detach_dev
= tegra_smmu_detach_dev
,
898 .add_device
= tegra_smmu_add_device
,
899 .remove_device
= tegra_smmu_remove_device
,
900 .device_group
= tegra_smmu_device_group
,
901 .map
= tegra_smmu_map
,
902 .unmap
= tegra_smmu_unmap
,
903 .iova_to_phys
= tegra_smmu_iova_to_phys
,
904 .of_xlate
= tegra_smmu_of_xlate
,
905 .pgsize_bitmap
= SZ_4K
,
908 static void tegra_smmu_ahb_enable(void)
910 static const struct of_device_id ahb_match
[] = {
911 { .compatible
= "nvidia,tegra30-ahb", },
914 struct device_node
*ahb
;
916 ahb
= of_find_matching_node(NULL
, ahb_match
);
918 tegra_ahb_enable_smmu(ahb
);
923 static int tegra_smmu_swgroups_show(struct seq_file
*s
, void *data
)
925 struct tegra_smmu
*smmu
= s
->private;
929 seq_printf(s
, "swgroup enabled ASID\n");
930 seq_printf(s
, "------------------------\n");
932 for (i
= 0; i
< smmu
->soc
->num_swgroups
; i
++) {
933 const struct tegra_smmu_swgroup
*group
= &smmu
->soc
->swgroups
[i
];
937 value
= smmu_readl(smmu
, group
->reg
);
939 if (value
& SMMU_ASID_ENABLE
)
944 asid
= value
& SMMU_ASID_MASK
;
946 seq_printf(s
, "%-9s %-7s %#04x\n", group
->name
, status
,
953 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups
);
955 static int tegra_smmu_clients_show(struct seq_file
*s
, void *data
)
957 struct tegra_smmu
*smmu
= s
->private;
961 seq_printf(s
, "client enabled\n");
962 seq_printf(s
, "--------------------\n");
964 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
965 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
968 value
= smmu_readl(smmu
, client
->smmu
.reg
);
970 if (value
& BIT(client
->smmu
.bit
))
975 seq_printf(s
, "%-12s %s\n", client
->name
, status
);
981 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients
);
983 static void tegra_smmu_debugfs_init(struct tegra_smmu
*smmu
)
985 smmu
->debugfs
= debugfs_create_dir("smmu", NULL
);
989 debugfs_create_file("swgroups", S_IRUGO
, smmu
->debugfs
, smmu
,
990 &tegra_smmu_swgroups_fops
);
991 debugfs_create_file("clients", S_IRUGO
, smmu
->debugfs
, smmu
,
992 &tegra_smmu_clients_fops
);
995 static void tegra_smmu_debugfs_exit(struct tegra_smmu
*smmu
)
997 debugfs_remove_recursive(smmu
->debugfs
);
1000 struct tegra_smmu
*tegra_smmu_probe(struct device
*dev
,
1001 const struct tegra_smmu_soc
*soc
,
1002 struct tegra_mc
*mc
)
1004 struct tegra_smmu
*smmu
;
1009 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
1011 return ERR_PTR(-ENOMEM
);
1014 * This is a bit of a hack. Ideally we'd want to simply return this
1015 * value. However the IOMMU registration process will attempt to add
1016 * all devices to the IOMMU when bus_set_iommu() is called. In order
1017 * not to rely on global variables to track the IOMMU instance, we
1018 * set it here so that it can be looked up from the .add_device()
1019 * callback via the IOMMU device's .drvdata field.
1023 size
= BITS_TO_LONGS(soc
->num_asids
) * sizeof(long);
1025 smmu
->asids
= devm_kzalloc(dev
, size
, GFP_KERNEL
);
1027 return ERR_PTR(-ENOMEM
);
1029 INIT_LIST_HEAD(&smmu
->groups
);
1030 mutex_init(&smmu
->lock
);
1032 smmu
->regs
= mc
->regs
;
1037 smmu
->pfn_mask
= BIT_MASK(mc
->soc
->num_address_bits
- PAGE_SHIFT
) - 1;
1038 dev_dbg(dev
, "address bits: %u, PFN mask: %#lx\n",
1039 mc
->soc
->num_address_bits
, smmu
->pfn_mask
);
1040 smmu
->tlb_mask
= (smmu
->soc
->num_tlb_lines
<< 1) - 1;
1041 dev_dbg(dev
, "TLB lines: %u, mask: %#lx\n", smmu
->soc
->num_tlb_lines
,
1044 value
= SMMU_PTC_CONFIG_ENABLE
| SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1046 if (soc
->supports_request_limit
)
1047 value
|= SMMU_PTC_CONFIG_REQ_LIMIT(8);
1049 smmu_writel(smmu
, value
, SMMU_PTC_CONFIG
);
1051 value
= SMMU_TLB_CONFIG_HIT_UNDER_MISS
|
1052 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu
);
1054 if (soc
->supports_round_robin_arbitration
)
1055 value
|= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION
;
1057 smmu_writel(smmu
, value
, SMMU_TLB_CONFIG
);
1059 smmu_flush_ptc_all(smmu
);
1060 smmu_flush_tlb(smmu
);
1061 smmu_writel(smmu
, SMMU_CONFIG_ENABLE
, SMMU_CONFIG
);
1064 tegra_smmu_ahb_enable();
1066 err
= iommu_device_sysfs_add(&smmu
->iommu
, dev
, NULL
, dev_name(dev
));
1068 return ERR_PTR(err
);
1070 iommu_device_set_ops(&smmu
->iommu
, &tegra_smmu_ops
);
1071 iommu_device_set_fwnode(&smmu
->iommu
, dev
->fwnode
);
1073 err
= iommu_device_register(&smmu
->iommu
);
1075 iommu_device_sysfs_remove(&smmu
->iommu
);
1076 return ERR_PTR(err
);
1079 err
= bus_set_iommu(&platform_bus_type
, &tegra_smmu_ops
);
1081 iommu_device_unregister(&smmu
->iommu
);
1082 iommu_device_sysfs_remove(&smmu
->iommu
);
1083 return ERR_PTR(err
);
1086 if (IS_ENABLED(CONFIG_DEBUG_FS
))
1087 tegra_smmu_debugfs_init(smmu
);
1092 void tegra_smmu_remove(struct tegra_smmu
*smmu
)
1094 iommu_device_unregister(&smmu
->iommu
);
1095 iommu_device_sysfs_remove(&smmu
->iommu
);
1097 if (IS_ENABLED(CONFIG_DEBUG_FS
))
1098 tegra_smmu_debugfs_exit(smmu
);