1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
6 #include <linux/bitops.h>
7 #include <linux/debugfs.h>
9 #include <linux/iommu.h>
10 #include <linux/kernel.h>
12 #include <linux/of_device.h>
13 #include <linux/pci.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/dma-mapping.h>
19 #include <soc/tegra/ahb.h>
20 #include <soc/tegra/mc.h>
22 struct tegra_smmu_group
{
23 struct list_head list
;
24 struct tegra_smmu
*smmu
;
25 const struct tegra_smmu_group_soc
*soc
;
26 struct iommu_group
*group
;
35 const struct tegra_smmu_soc
*soc
;
37 struct list_head groups
;
39 unsigned long pfn_mask
;
40 unsigned long tlb_mask
;
45 struct list_head list
;
47 struct dentry
*debugfs
;
49 struct iommu_device iommu
; /* IOMMU Core code handle */
52 struct tegra_smmu_as
{
53 struct iommu_domain domain
;
54 struct tegra_smmu
*smmu
;
55 unsigned int use_count
;
65 static struct tegra_smmu_as
*to_smmu_as(struct iommu_domain
*dom
)
67 return container_of(dom
, struct tegra_smmu_as
, domain
);
70 static inline void smmu_writel(struct tegra_smmu
*smmu
, u32 value
,
73 writel(value
, smmu
->regs
+ offset
);
76 static inline u32
smmu_readl(struct tegra_smmu
*smmu
, unsigned long offset
)
78 return readl(smmu
->regs
+ offset
);
81 #define SMMU_CONFIG 0x010
82 #define SMMU_CONFIG_ENABLE (1 << 0)
84 #define SMMU_TLB_CONFIG 0x14
85 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
86 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
87 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
88 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
90 #define SMMU_PTC_CONFIG 0x18
91 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
92 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
93 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
95 #define SMMU_PTB_ASID 0x01c
96 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
98 #define SMMU_PTB_DATA 0x020
99 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
101 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
103 #define SMMU_TLB_FLUSH 0x030
104 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
105 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
106 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
107 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
108 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
109 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
110 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
111 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
113 #define SMMU_PTC_FLUSH 0x034
114 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
115 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
117 #define SMMU_PTC_FLUSH_HI 0x9b8
118 #define SMMU_PTC_FLUSH_HI_MASK 0x3
120 /* per-SWGROUP SMMU_*_ASID register */
121 #define SMMU_ASID_ENABLE (1 << 31)
122 #define SMMU_ASID_MASK 0x7f
123 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
125 /* page table definitions */
126 #define SMMU_NUM_PDE 1024
127 #define SMMU_NUM_PTE 1024
129 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
130 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
132 #define SMMU_PDE_SHIFT 22
133 #define SMMU_PTE_SHIFT 12
135 #define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
136 #define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
137 #define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
138 #define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
140 #define SMMU_PD_READABLE (1 << 31)
141 #define SMMU_PD_WRITABLE (1 << 30)
142 #define SMMU_PD_NONSECURE (1 << 29)
144 #define SMMU_PDE_READABLE (1 << 31)
145 #define SMMU_PDE_WRITABLE (1 << 30)
146 #define SMMU_PDE_NONSECURE (1 << 29)
147 #define SMMU_PDE_NEXT (1 << 28)
149 #define SMMU_PTE_READABLE (1 << 31)
150 #define SMMU_PTE_WRITABLE (1 << 30)
151 #define SMMU_PTE_NONSECURE (1 << 29)
153 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
156 static unsigned int iova_pd_index(unsigned long iova
)
158 return (iova
>> SMMU_PDE_SHIFT
) & (SMMU_NUM_PDE
- 1);
161 static unsigned int iova_pt_index(unsigned long iova
)
163 return (iova
>> SMMU_PTE_SHIFT
) & (SMMU_NUM_PTE
- 1);
166 static bool smmu_dma_addr_valid(struct tegra_smmu
*smmu
, dma_addr_t addr
)
169 return (addr
& smmu
->pfn_mask
) == addr
;
172 static dma_addr_t
smmu_pde_to_dma(struct tegra_smmu
*smmu
, u32 pde
)
174 return (dma_addr_t
)(pde
& smmu
->pfn_mask
) << 12;
177 static void smmu_flush_ptc_all(struct tegra_smmu
*smmu
)
179 smmu_writel(smmu
, SMMU_PTC_FLUSH_TYPE_ALL
, SMMU_PTC_FLUSH
);
182 static inline void smmu_flush_ptc(struct tegra_smmu
*smmu
, dma_addr_t dma
,
183 unsigned long offset
)
187 offset
&= ~(smmu
->mc
->soc
->atom_size
- 1);
189 if (smmu
->mc
->soc
->num_address_bits
> 32) {
190 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
191 value
= (dma
>> 32) & SMMU_PTC_FLUSH_HI_MASK
;
195 smmu_writel(smmu
, value
, SMMU_PTC_FLUSH_HI
);
198 value
= (dma
+ offset
) | SMMU_PTC_FLUSH_TYPE_ADR
;
199 smmu_writel(smmu
, value
, SMMU_PTC_FLUSH
);
202 static inline void smmu_flush_tlb(struct tegra_smmu
*smmu
)
204 smmu_writel(smmu
, SMMU_TLB_FLUSH_VA_MATCH_ALL
, SMMU_TLB_FLUSH
);
207 static inline void smmu_flush_tlb_asid(struct tegra_smmu
*smmu
,
212 if (smmu
->soc
->num_asids
== 4)
213 value
= (asid
& 0x3) << 29;
215 value
= (asid
& 0x7f) << 24;
217 value
|= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_VA_MATCH_ALL
;
218 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
221 static inline void smmu_flush_tlb_section(struct tegra_smmu
*smmu
,
227 if (smmu
->soc
->num_asids
== 4)
228 value
= (asid
& 0x3) << 29;
230 value
= (asid
& 0x7f) << 24;
232 value
|= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_VA_SECTION(iova
);
233 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
236 static inline void smmu_flush_tlb_group(struct tegra_smmu
*smmu
,
242 if (smmu
->soc
->num_asids
== 4)
243 value
= (asid
& 0x3) << 29;
245 value
= (asid
& 0x7f) << 24;
247 value
|= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_VA_GROUP(iova
);
248 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
251 static inline void smmu_flush(struct tegra_smmu
*smmu
)
253 smmu_readl(smmu
, SMMU_PTB_ASID
);
256 static int tegra_smmu_alloc_asid(struct tegra_smmu
*smmu
, unsigned int *idp
)
260 id
= find_first_zero_bit(smmu
->asids
, smmu
->soc
->num_asids
);
261 if (id
>= smmu
->soc
->num_asids
)
264 set_bit(id
, smmu
->asids
);
270 static void tegra_smmu_free_asid(struct tegra_smmu
*smmu
, unsigned int id
)
272 clear_bit(id
, smmu
->asids
);
275 static bool tegra_smmu_capable(enum iommu_cap cap
)
280 static struct iommu_domain
*tegra_smmu_domain_alloc(unsigned type
)
282 struct tegra_smmu_as
*as
;
284 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
287 as
= kzalloc(sizeof(*as
), GFP_KERNEL
);
291 as
->attr
= SMMU_PD_READABLE
| SMMU_PD_WRITABLE
| SMMU_PD_NONSECURE
;
293 as
->pd
= alloc_page(GFP_KERNEL
| __GFP_DMA
| __GFP_ZERO
);
299 as
->count
= kcalloc(SMMU_NUM_PDE
, sizeof(u32
), GFP_KERNEL
);
306 as
->pts
= kcalloc(SMMU_NUM_PDE
, sizeof(*as
->pts
), GFP_KERNEL
);
314 spin_lock_init(&as
->lock
);
317 as
->domain
.geometry
.aperture_start
= 0;
318 as
->domain
.geometry
.aperture_end
= 0xffffffff;
319 as
->domain
.geometry
.force_aperture
= true;
324 static void tegra_smmu_domain_free(struct iommu_domain
*domain
)
326 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
328 /* TODO: free page directory and page tables */
330 WARN_ON_ONCE(as
->use_count
);
336 static const struct tegra_smmu_swgroup
*
337 tegra_smmu_find_swgroup(struct tegra_smmu
*smmu
, unsigned int swgroup
)
339 const struct tegra_smmu_swgroup
*group
= NULL
;
342 for (i
= 0; i
< smmu
->soc
->num_swgroups
; i
++) {
343 if (smmu
->soc
->swgroups
[i
].swgroup
== swgroup
) {
344 group
= &smmu
->soc
->swgroups
[i
];
352 static void tegra_smmu_enable(struct tegra_smmu
*smmu
, unsigned int swgroup
,
355 const struct tegra_smmu_swgroup
*group
;
359 group
= tegra_smmu_find_swgroup(smmu
, swgroup
);
361 value
= smmu_readl(smmu
, group
->reg
);
362 value
&= ~SMMU_ASID_MASK
;
363 value
|= SMMU_ASID_VALUE(asid
);
364 value
|= SMMU_ASID_ENABLE
;
365 smmu_writel(smmu
, value
, group
->reg
);
367 pr_warn("%s group from swgroup %u not found\n", __func__
,
369 /* No point moving ahead if group was not found */
373 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
374 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
376 if (client
->swgroup
!= swgroup
)
379 value
= smmu_readl(smmu
, client
->smmu
.reg
);
380 value
|= BIT(client
->smmu
.bit
);
381 smmu_writel(smmu
, value
, client
->smmu
.reg
);
385 static void tegra_smmu_disable(struct tegra_smmu
*smmu
, unsigned int swgroup
,
388 const struct tegra_smmu_swgroup
*group
;
392 group
= tegra_smmu_find_swgroup(smmu
, swgroup
);
394 value
= smmu_readl(smmu
, group
->reg
);
395 value
&= ~SMMU_ASID_MASK
;
396 value
|= SMMU_ASID_VALUE(asid
);
397 value
&= ~SMMU_ASID_ENABLE
;
398 smmu_writel(smmu
, value
, group
->reg
);
401 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
402 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
404 if (client
->swgroup
!= swgroup
)
407 value
= smmu_readl(smmu
, client
->smmu
.reg
);
408 value
&= ~BIT(client
->smmu
.bit
);
409 smmu_writel(smmu
, value
, client
->smmu
.reg
);
413 static int tegra_smmu_as_prepare(struct tegra_smmu
*smmu
,
414 struct tegra_smmu_as
*as
)
419 mutex_lock(&smmu
->lock
);
421 if (as
->use_count
> 0) {
426 as
->pd_dma
= dma_map_page(smmu
->dev
, as
->pd
, 0, SMMU_SIZE_PD
,
428 if (dma_mapping_error(smmu
->dev
, as
->pd_dma
)) {
433 /* We can't handle 64-bit DMA addresses */
434 if (!smmu_dma_addr_valid(smmu
, as
->pd_dma
)) {
439 err
= tegra_smmu_alloc_asid(smmu
, &as
->id
);
443 smmu_flush_ptc(smmu
, as
->pd_dma
, 0);
444 smmu_flush_tlb_asid(smmu
, as
->id
);
446 smmu_writel(smmu
, as
->id
& 0x7f, SMMU_PTB_ASID
);
447 value
= SMMU_PTB_DATA_VALUE(as
->pd_dma
, as
->attr
);
448 smmu_writel(smmu
, value
, SMMU_PTB_DATA
);
454 mutex_unlock(&smmu
->lock
);
459 dma_unmap_page(smmu
->dev
, as
->pd_dma
, SMMU_SIZE_PD
, DMA_TO_DEVICE
);
461 mutex_unlock(&smmu
->lock
);
466 static void tegra_smmu_as_unprepare(struct tegra_smmu
*smmu
,
467 struct tegra_smmu_as
*as
)
469 mutex_lock(&smmu
->lock
);
471 if (--as
->use_count
> 0) {
472 mutex_unlock(&smmu
->lock
);
476 tegra_smmu_free_asid(smmu
, as
->id
);
478 dma_unmap_page(smmu
->dev
, as
->pd_dma
, SMMU_SIZE_PD
, DMA_TO_DEVICE
);
482 mutex_unlock(&smmu
->lock
);
485 static int tegra_smmu_attach_dev(struct iommu_domain
*domain
,
488 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
489 struct tegra_smmu
*smmu
= dev_iommu_priv_get(dev
);
490 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
497 for (index
= 0; index
< fwspec
->num_ids
; index
++) {
498 err
= tegra_smmu_as_prepare(smmu
, as
);
502 tegra_smmu_enable(smmu
, fwspec
->ids
[index
], as
->id
);
512 tegra_smmu_disable(smmu
, fwspec
->ids
[index
], as
->id
);
513 tegra_smmu_as_unprepare(smmu
, as
);
519 static void tegra_smmu_detach_dev(struct iommu_domain
*domain
, struct device
*dev
)
521 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
522 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
523 struct tegra_smmu
*smmu
= as
->smmu
;
529 for (index
= 0; index
< fwspec
->num_ids
; index
++) {
530 tegra_smmu_disable(smmu
, fwspec
->ids
[index
], as
->id
);
531 tegra_smmu_as_unprepare(smmu
, as
);
535 static void tegra_smmu_set_pde(struct tegra_smmu_as
*as
, unsigned long iova
,
538 unsigned int pd_index
= iova_pd_index(iova
);
539 struct tegra_smmu
*smmu
= as
->smmu
;
540 u32
*pd
= page_address(as
->pd
);
541 unsigned long offset
= pd_index
* sizeof(*pd
);
543 /* Set the page directory entry first */
544 pd
[pd_index
] = value
;
546 /* The flush the page directory entry from caches */
547 dma_sync_single_range_for_device(smmu
->dev
, as
->pd_dma
, offset
,
548 sizeof(*pd
), DMA_TO_DEVICE
);
550 /* And flush the iommu */
551 smmu_flush_ptc(smmu
, as
->pd_dma
, offset
);
552 smmu_flush_tlb_section(smmu
, as
->id
, iova
);
556 static u32
*tegra_smmu_pte_offset(struct page
*pt_page
, unsigned long iova
)
558 u32
*pt
= page_address(pt_page
);
560 return pt
+ iova_pt_index(iova
);
563 static u32
*tegra_smmu_pte_lookup(struct tegra_smmu_as
*as
, unsigned long iova
,
566 unsigned int pd_index
= iova_pd_index(iova
);
567 struct tegra_smmu
*smmu
= as
->smmu
;
568 struct page
*pt_page
;
571 pt_page
= as
->pts
[pd_index
];
575 pd
= page_address(as
->pd
);
576 *dmap
= smmu_pde_to_dma(smmu
, pd
[pd_index
]);
578 return tegra_smmu_pte_offset(pt_page
, iova
);
581 static u32
*as_get_pte(struct tegra_smmu_as
*as
, dma_addr_t iova
,
582 dma_addr_t
*dmap
, struct page
*page
)
584 unsigned int pde
= iova_pd_index(iova
);
585 struct tegra_smmu
*smmu
= as
->smmu
;
590 dma
= dma_map_page(smmu
->dev
, page
, 0, SMMU_SIZE_PT
,
592 if (dma_mapping_error(smmu
->dev
, dma
)) {
597 if (!smmu_dma_addr_valid(smmu
, dma
)) {
598 dma_unmap_page(smmu
->dev
, dma
, SMMU_SIZE_PT
,
606 tegra_smmu_set_pde(as
, iova
, SMMU_MK_PDE(dma
, SMMU_PDE_ATTR
|
611 u32
*pd
= page_address(as
->pd
);
613 *dmap
= smmu_pde_to_dma(smmu
, pd
[pde
]);
616 return tegra_smmu_pte_offset(as
->pts
[pde
], iova
);
619 static void tegra_smmu_pte_get_use(struct tegra_smmu_as
*as
, unsigned long iova
)
621 unsigned int pd_index
= iova_pd_index(iova
);
623 as
->count
[pd_index
]++;
626 static void tegra_smmu_pte_put_use(struct tegra_smmu_as
*as
, unsigned long iova
)
628 unsigned int pde
= iova_pd_index(iova
);
629 struct page
*page
= as
->pts
[pde
];
632 * When no entries in this page table are used anymore, return the
633 * memory page to the system.
635 if (--as
->count
[pde
] == 0) {
636 struct tegra_smmu
*smmu
= as
->smmu
;
637 u32
*pd
= page_address(as
->pd
);
638 dma_addr_t pte_dma
= smmu_pde_to_dma(smmu
, pd
[pde
]);
640 tegra_smmu_set_pde(as
, iova
, 0);
642 dma_unmap_page(smmu
->dev
, pte_dma
, SMMU_SIZE_PT
, DMA_TO_DEVICE
);
648 static void tegra_smmu_set_pte(struct tegra_smmu_as
*as
, unsigned long iova
,
649 u32
*pte
, dma_addr_t pte_dma
, u32 val
)
651 struct tegra_smmu
*smmu
= as
->smmu
;
652 unsigned long offset
= SMMU_OFFSET_IN_PAGE(pte
);
656 dma_sync_single_range_for_device(smmu
->dev
, pte_dma
, offset
,
658 smmu_flush_ptc(smmu
, pte_dma
, offset
);
659 smmu_flush_tlb_group(smmu
, as
->id
, iova
);
663 static struct page
*as_get_pde_page(struct tegra_smmu_as
*as
,
664 unsigned long iova
, gfp_t gfp
,
665 unsigned long *flags
)
667 unsigned int pde
= iova_pd_index(iova
);
668 struct page
*page
= as
->pts
[pde
];
670 /* at first check whether allocation needs to be done at all */
675 * In order to prevent exhaustion of the atomic memory pool, we
676 * allocate page in a sleeping context if GFP flags permit. Hence
677 * spinlock needs to be unlocked and re-locked after allocation.
679 if (!(gfp
& __GFP_ATOMIC
))
680 spin_unlock_irqrestore(&as
->lock
, *flags
);
682 page
= alloc_page(gfp
| __GFP_DMA
| __GFP_ZERO
);
684 if (!(gfp
& __GFP_ATOMIC
))
685 spin_lock_irqsave(&as
->lock
, *flags
);
688 * In a case of blocking allocation, a concurrent mapping may win
689 * the PDE allocation. In this case the allocated page isn't needed
690 * if allocation succeeded and the allocation failure isn't fatal.
703 __tegra_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
704 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
,
705 unsigned long *flags
)
707 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
713 page
= as_get_pde_page(as
, iova
, gfp
, flags
);
717 pte
= as_get_pte(as
, iova
, &pte_dma
, page
);
721 /* If we aren't overwriting a pre-existing entry, increment use */
723 tegra_smmu_pte_get_use(as
, iova
);
725 pte_attrs
= SMMU_PTE_NONSECURE
;
727 if (prot
& IOMMU_READ
)
728 pte_attrs
|= SMMU_PTE_READABLE
;
730 if (prot
& IOMMU_WRITE
)
731 pte_attrs
|= SMMU_PTE_WRITABLE
;
733 tegra_smmu_set_pte(as
, iova
, pte
, pte_dma
,
734 SMMU_PHYS_PFN(paddr
) | pte_attrs
);
740 __tegra_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
741 size_t size
, struct iommu_iotlb_gather
*gather
)
743 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
747 pte
= tegra_smmu_pte_lookup(as
, iova
, &pte_dma
);
751 tegra_smmu_set_pte(as
, iova
, pte
, pte_dma
, 0);
752 tegra_smmu_pte_put_use(as
, iova
);
757 static int tegra_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
758 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
)
760 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
764 spin_lock_irqsave(&as
->lock
, flags
);
765 ret
= __tegra_smmu_map(domain
, iova
, paddr
, size
, prot
, gfp
, &flags
);
766 spin_unlock_irqrestore(&as
->lock
, flags
);
771 static size_t tegra_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
772 size_t size
, struct iommu_iotlb_gather
*gather
)
774 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
777 spin_lock_irqsave(&as
->lock
, flags
);
778 size
= __tegra_smmu_unmap(domain
, iova
, size
, gather
);
779 spin_unlock_irqrestore(&as
->lock
, flags
);
784 static phys_addr_t
tegra_smmu_iova_to_phys(struct iommu_domain
*domain
,
787 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
792 pte
= tegra_smmu_pte_lookup(as
, iova
, &pte_dma
);
796 pfn
= *pte
& as
->smmu
->pfn_mask
;
798 return SMMU_PFN_PHYS(pfn
) + SMMU_OFFSET_IN_PAGE(iova
);
801 static struct iommu_device
*tegra_smmu_probe_device(struct device
*dev
)
803 struct tegra_smmu
*smmu
= dev_iommu_priv_get(dev
);
806 return ERR_PTR(-ENODEV
);
811 static void tegra_smmu_release_device(struct device
*dev
) {}
813 static const struct tegra_smmu_group_soc
*
814 tegra_smmu_find_group(struct tegra_smmu
*smmu
, unsigned int swgroup
)
818 for (i
= 0; i
< smmu
->soc
->num_groups
; i
++)
819 for (j
= 0; j
< smmu
->soc
->groups
[i
].num_swgroups
; j
++)
820 if (smmu
->soc
->groups
[i
].swgroups
[j
] == swgroup
)
821 return &smmu
->soc
->groups
[i
];
826 static void tegra_smmu_group_release(void *iommu_data
)
828 struct tegra_smmu_group
*group
= iommu_data
;
829 struct tegra_smmu
*smmu
= group
->smmu
;
831 mutex_lock(&smmu
->lock
);
832 list_del(&group
->list
);
833 mutex_unlock(&smmu
->lock
);
836 static struct iommu_group
*tegra_smmu_device_group(struct device
*dev
)
838 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
839 struct tegra_smmu
*smmu
= dev_iommu_priv_get(dev
);
840 const struct tegra_smmu_group_soc
*soc
;
841 unsigned int swgroup
= fwspec
->ids
[0];
842 struct tegra_smmu_group
*group
;
843 struct iommu_group
*grp
;
845 /* Find group_soc associating with swgroup */
846 soc
= tegra_smmu_find_group(smmu
, swgroup
);
848 mutex_lock(&smmu
->lock
);
850 /* Find existing iommu_group associating with swgroup or group_soc */
851 list_for_each_entry(group
, &smmu
->groups
, list
)
852 if ((group
->swgroup
== swgroup
) || (soc
&& group
->soc
== soc
)) {
853 grp
= iommu_group_ref_get(group
->group
);
854 mutex_unlock(&smmu
->lock
);
858 group
= devm_kzalloc(smmu
->dev
, sizeof(*group
), GFP_KERNEL
);
860 mutex_unlock(&smmu
->lock
);
864 INIT_LIST_HEAD(&group
->list
);
865 group
->swgroup
= swgroup
;
870 group
->group
= pci_device_group(dev
);
872 group
->group
= generic_device_group(dev
);
874 if (IS_ERR(group
->group
)) {
875 devm_kfree(smmu
->dev
, group
);
876 mutex_unlock(&smmu
->lock
);
880 iommu_group_set_iommudata(group
->group
, group
, tegra_smmu_group_release
);
882 iommu_group_set_name(group
->group
, soc
->name
);
883 list_add_tail(&group
->list
, &smmu
->groups
);
884 mutex_unlock(&smmu
->lock
);
889 static int tegra_smmu_of_xlate(struct device
*dev
,
890 struct of_phandle_args
*args
)
892 struct platform_device
*iommu_pdev
= of_find_device_by_node(args
->np
);
893 struct tegra_mc
*mc
= platform_get_drvdata(iommu_pdev
);
894 u32 id
= args
->args
[0];
897 * Note: we are here releasing the reference of &iommu_pdev->dev, which
898 * is mc->dev. Although some functions in tegra_smmu_ops may keep using
899 * its private data beyond this point, it's still safe to do so because
900 * the SMMU parent device is the same as the MC, so the reference count
901 * isn't strictly necessary.
903 put_device(&iommu_pdev
->dev
);
905 dev_iommu_priv_set(dev
, mc
->smmu
);
907 return iommu_fwspec_add_ids(dev
, &id
, 1);
910 static const struct iommu_ops tegra_smmu_ops
= {
911 .capable
= tegra_smmu_capable
,
912 .domain_alloc
= tegra_smmu_domain_alloc
,
913 .domain_free
= tegra_smmu_domain_free
,
914 .attach_dev
= tegra_smmu_attach_dev
,
915 .detach_dev
= tegra_smmu_detach_dev
,
916 .probe_device
= tegra_smmu_probe_device
,
917 .release_device
= tegra_smmu_release_device
,
918 .device_group
= tegra_smmu_device_group
,
919 .map
= tegra_smmu_map
,
920 .unmap
= tegra_smmu_unmap
,
921 .iova_to_phys
= tegra_smmu_iova_to_phys
,
922 .of_xlate
= tegra_smmu_of_xlate
,
923 .pgsize_bitmap
= SZ_4K
,
926 static void tegra_smmu_ahb_enable(void)
928 static const struct of_device_id ahb_match
[] = {
929 { .compatible
= "nvidia,tegra30-ahb", },
932 struct device_node
*ahb
;
934 ahb
= of_find_matching_node(NULL
, ahb_match
);
936 tegra_ahb_enable_smmu(ahb
);
941 static int tegra_smmu_swgroups_show(struct seq_file
*s
, void *data
)
943 struct tegra_smmu
*smmu
= s
->private;
947 seq_printf(s
, "swgroup enabled ASID\n");
948 seq_printf(s
, "------------------------\n");
950 for (i
= 0; i
< smmu
->soc
->num_swgroups
; i
++) {
951 const struct tegra_smmu_swgroup
*group
= &smmu
->soc
->swgroups
[i
];
955 value
= smmu_readl(smmu
, group
->reg
);
957 if (value
& SMMU_ASID_ENABLE
)
962 asid
= value
& SMMU_ASID_MASK
;
964 seq_printf(s
, "%-9s %-7s %#04x\n", group
->name
, status
,
971 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups
);
973 static int tegra_smmu_clients_show(struct seq_file
*s
, void *data
)
975 struct tegra_smmu
*smmu
= s
->private;
979 seq_printf(s
, "client enabled\n");
980 seq_printf(s
, "--------------------\n");
982 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
983 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
986 value
= smmu_readl(smmu
, client
->smmu
.reg
);
988 if (value
& BIT(client
->smmu
.bit
))
993 seq_printf(s
, "%-12s %s\n", client
->name
, status
);
999 DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients
);
1001 static void tegra_smmu_debugfs_init(struct tegra_smmu
*smmu
)
1003 smmu
->debugfs
= debugfs_create_dir("smmu", NULL
);
1007 debugfs_create_file("swgroups", S_IRUGO
, smmu
->debugfs
, smmu
,
1008 &tegra_smmu_swgroups_fops
);
1009 debugfs_create_file("clients", S_IRUGO
, smmu
->debugfs
, smmu
,
1010 &tegra_smmu_clients_fops
);
1013 static void tegra_smmu_debugfs_exit(struct tegra_smmu
*smmu
)
1015 debugfs_remove_recursive(smmu
->debugfs
);
1018 struct tegra_smmu
*tegra_smmu_probe(struct device
*dev
,
1019 const struct tegra_smmu_soc
*soc
,
1020 struct tegra_mc
*mc
)
1022 struct tegra_smmu
*smmu
;
1027 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
1029 return ERR_PTR(-ENOMEM
);
1031 size
= BITS_TO_LONGS(soc
->num_asids
) * sizeof(long);
1033 smmu
->asids
= devm_kzalloc(dev
, size
, GFP_KERNEL
);
1035 return ERR_PTR(-ENOMEM
);
1037 INIT_LIST_HEAD(&smmu
->groups
);
1038 mutex_init(&smmu
->lock
);
1040 smmu
->regs
= mc
->regs
;
1046 BIT_MASK(mc
->soc
->num_address_bits
- SMMU_PTE_SHIFT
) - 1;
1047 dev_dbg(dev
, "address bits: %u, PFN mask: %#lx\n",
1048 mc
->soc
->num_address_bits
, smmu
->pfn_mask
);
1049 smmu
->tlb_mask
= (1 << fls(smmu
->soc
->num_tlb_lines
)) - 1;
1050 dev_dbg(dev
, "TLB lines: %u, mask: %#lx\n", smmu
->soc
->num_tlb_lines
,
1053 value
= SMMU_PTC_CONFIG_ENABLE
| SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
1055 if (soc
->supports_request_limit
)
1056 value
|= SMMU_PTC_CONFIG_REQ_LIMIT(8);
1058 smmu_writel(smmu
, value
, SMMU_PTC_CONFIG
);
1060 value
= SMMU_TLB_CONFIG_HIT_UNDER_MISS
|
1061 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu
);
1063 if (soc
->supports_round_robin_arbitration
)
1064 value
|= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION
;
1066 smmu_writel(smmu
, value
, SMMU_TLB_CONFIG
);
1068 smmu_flush_ptc_all(smmu
);
1069 smmu_flush_tlb(smmu
);
1070 smmu_writel(smmu
, SMMU_CONFIG_ENABLE
, SMMU_CONFIG
);
1073 tegra_smmu_ahb_enable();
1075 err
= iommu_device_sysfs_add(&smmu
->iommu
, dev
, NULL
, dev_name(dev
));
1077 return ERR_PTR(err
);
1079 iommu_device_set_ops(&smmu
->iommu
, &tegra_smmu_ops
);
1080 iommu_device_set_fwnode(&smmu
->iommu
, dev
->fwnode
);
1082 err
= iommu_device_register(&smmu
->iommu
);
1086 err
= bus_set_iommu(&platform_bus_type
, &tegra_smmu_ops
);
1091 err
= bus_set_iommu(&pci_bus_type
, &tegra_smmu_ops
);
1093 goto unset_platform_bus
;
1096 if (IS_ENABLED(CONFIG_DEBUG_FS
))
1097 tegra_smmu_debugfs_init(smmu
);
1101 unset_platform_bus
: __maybe_unused
;
1102 bus_set_iommu(&platform_bus_type
, NULL
);
1104 iommu_device_unregister(&smmu
->iommu
);
1106 iommu_device_sysfs_remove(&smmu
->iommu
);
1108 return ERR_PTR(err
);
1111 void tegra_smmu_remove(struct tegra_smmu
*smmu
)
1113 iommu_device_unregister(&smmu
->iommu
);
1114 iommu_device_sysfs_remove(&smmu
->iommu
);
1116 if (IS_ENABLED(CONFIG_DEBUG_FS
))
1117 tegra_smmu_debugfs_exit(smmu
);