2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/bitops.h>
10 #include <linux/debugfs.h>
11 #include <linux/err.h>
12 #include <linux/iommu.h>
13 #include <linux/kernel.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
19 #include <soc/tegra/ahb.h>
20 #include <soc/tegra/mc.h>
27 const struct tegra_smmu_soc
*soc
;
29 unsigned long pfn_mask
;
30 unsigned long tlb_mask
;
35 struct list_head list
;
37 struct dentry
*debugfs
;
40 struct tegra_smmu_as
{
41 struct iommu_domain domain
;
42 struct tegra_smmu
*smmu
;
43 unsigned int use_count
;
52 static struct tegra_smmu_as
*to_smmu_as(struct iommu_domain
*dom
)
54 return container_of(dom
, struct tegra_smmu_as
, domain
);
57 static inline void smmu_writel(struct tegra_smmu
*smmu
, u32 value
,
60 writel(value
, smmu
->regs
+ offset
);
63 static inline u32
smmu_readl(struct tegra_smmu
*smmu
, unsigned long offset
)
65 return readl(smmu
->regs
+ offset
);
68 #define SMMU_CONFIG 0x010
69 #define SMMU_CONFIG_ENABLE (1 << 0)
71 #define SMMU_TLB_CONFIG 0x14
72 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
73 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
74 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
75 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
77 #define SMMU_PTC_CONFIG 0x18
78 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
79 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
80 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
82 #define SMMU_PTB_ASID 0x01c
83 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
85 #define SMMU_PTB_DATA 0x020
86 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
88 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
90 #define SMMU_TLB_FLUSH 0x030
91 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
92 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
93 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
94 #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
95 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
96 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
97 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
98 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
99 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
101 #define SMMU_PTC_FLUSH 0x034
102 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
103 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
105 #define SMMU_PTC_FLUSH_HI 0x9b8
106 #define SMMU_PTC_FLUSH_HI_MASK 0x3
108 /* per-SWGROUP SMMU_*_ASID register */
109 #define SMMU_ASID_ENABLE (1 << 31)
110 #define SMMU_ASID_MASK 0x7f
111 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
113 /* page table definitions */
114 #define SMMU_NUM_PDE 1024
115 #define SMMU_NUM_PTE 1024
117 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
118 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
120 #define SMMU_PDE_SHIFT 22
121 #define SMMU_PTE_SHIFT 12
123 #define SMMU_PD_READABLE (1 << 31)
124 #define SMMU_PD_WRITABLE (1 << 30)
125 #define SMMU_PD_NONSECURE (1 << 29)
127 #define SMMU_PDE_READABLE (1 << 31)
128 #define SMMU_PDE_WRITABLE (1 << 30)
129 #define SMMU_PDE_NONSECURE (1 << 29)
130 #define SMMU_PDE_NEXT (1 << 28)
132 #define SMMU_PTE_READABLE (1 << 31)
133 #define SMMU_PTE_WRITABLE (1 << 30)
134 #define SMMU_PTE_NONSECURE (1 << 29)
136 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
138 #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
141 static unsigned int iova_pd_index(unsigned long iova
)
143 return (iova
>> SMMU_PDE_SHIFT
) & (SMMU_NUM_PDE
- 1);
146 static unsigned int iova_pt_index(unsigned long iova
)
148 return (iova
>> SMMU_PTE_SHIFT
) & (SMMU_NUM_PTE
- 1);
151 static bool smmu_dma_addr_valid(struct tegra_smmu
*smmu
, dma_addr_t addr
)
154 return (addr
& smmu
->pfn_mask
) == addr
;
157 static dma_addr_t
smmu_pde_to_dma(u32 pde
)
162 static void smmu_flush_ptc_all(struct tegra_smmu
*smmu
)
164 smmu_writel(smmu
, SMMU_PTC_FLUSH_TYPE_ALL
, SMMU_PTC_FLUSH
);
167 static inline void smmu_flush_ptc(struct tegra_smmu
*smmu
, dma_addr_t dma
,
168 unsigned long offset
)
172 offset
&= ~(smmu
->mc
->soc
->atom_size
- 1);
174 if (smmu
->mc
->soc
->num_address_bits
> 32) {
175 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
176 value
= (dma
>> 32) & SMMU_PTC_FLUSH_HI_MASK
;
180 smmu_writel(smmu
, value
, SMMU_PTC_FLUSH_HI
);
183 value
= (dma
+ offset
) | SMMU_PTC_FLUSH_TYPE_ADR
;
184 smmu_writel(smmu
, value
, SMMU_PTC_FLUSH
);
187 static inline void smmu_flush_tlb(struct tegra_smmu
*smmu
)
189 smmu_writel(smmu
, SMMU_TLB_FLUSH_VA_MATCH_ALL
, SMMU_TLB_FLUSH
);
192 static inline void smmu_flush_tlb_asid(struct tegra_smmu
*smmu
,
197 value
= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_ASID(asid
) |
198 SMMU_TLB_FLUSH_VA_MATCH_ALL
;
199 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
202 static inline void smmu_flush_tlb_section(struct tegra_smmu
*smmu
,
208 value
= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_ASID(asid
) |
209 SMMU_TLB_FLUSH_VA_SECTION(iova
);
210 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
213 static inline void smmu_flush_tlb_group(struct tegra_smmu
*smmu
,
219 value
= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_ASID(asid
) |
220 SMMU_TLB_FLUSH_VA_GROUP(iova
);
221 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
224 static inline void smmu_flush(struct tegra_smmu
*smmu
)
226 smmu_readl(smmu
, SMMU_CONFIG
);
229 static int tegra_smmu_alloc_asid(struct tegra_smmu
*smmu
, unsigned int *idp
)
233 mutex_lock(&smmu
->lock
);
235 id
= find_first_zero_bit(smmu
->asids
, smmu
->soc
->num_asids
);
236 if (id
>= smmu
->soc
->num_asids
) {
237 mutex_unlock(&smmu
->lock
);
241 set_bit(id
, smmu
->asids
);
244 mutex_unlock(&smmu
->lock
);
248 static void tegra_smmu_free_asid(struct tegra_smmu
*smmu
, unsigned int id
)
250 mutex_lock(&smmu
->lock
);
251 clear_bit(id
, smmu
->asids
);
252 mutex_unlock(&smmu
->lock
);
255 static bool tegra_smmu_capable(enum iommu_cap cap
)
260 static struct iommu_domain
*tegra_smmu_domain_alloc(unsigned type
)
262 struct tegra_smmu_as
*as
;
264 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
267 as
= kzalloc(sizeof(*as
), GFP_KERNEL
);
271 as
->attr
= SMMU_PD_READABLE
| SMMU_PD_WRITABLE
| SMMU_PD_NONSECURE
;
273 as
->pd
= alloc_page(GFP_KERNEL
| __GFP_DMA
| __GFP_ZERO
);
279 as
->count
= kcalloc(SMMU_NUM_PDE
, sizeof(u32
), GFP_KERNEL
);
286 as
->pts
= kcalloc(SMMU_NUM_PDE
, sizeof(*as
->pts
), GFP_KERNEL
);
295 as
->domain
.geometry
.aperture_start
= 0;
296 as
->domain
.geometry
.aperture_end
= 0xffffffff;
297 as
->domain
.geometry
.force_aperture
= true;
302 static void tegra_smmu_domain_free(struct iommu_domain
*domain
)
304 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
306 /* TODO: free page directory and page tables */
311 static const struct tegra_smmu_swgroup
*
312 tegra_smmu_find_swgroup(struct tegra_smmu
*smmu
, unsigned int swgroup
)
314 const struct tegra_smmu_swgroup
*group
= NULL
;
317 for (i
= 0; i
< smmu
->soc
->num_swgroups
; i
++) {
318 if (smmu
->soc
->swgroups
[i
].swgroup
== swgroup
) {
319 group
= &smmu
->soc
->swgroups
[i
];
327 static void tegra_smmu_enable(struct tegra_smmu
*smmu
, unsigned int swgroup
,
330 const struct tegra_smmu_swgroup
*group
;
334 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
335 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
337 if (client
->swgroup
!= swgroup
)
340 value
= smmu_readl(smmu
, client
->smmu
.reg
);
341 value
|= BIT(client
->smmu
.bit
);
342 smmu_writel(smmu
, value
, client
->smmu
.reg
);
345 group
= tegra_smmu_find_swgroup(smmu
, swgroup
);
347 value
= smmu_readl(smmu
, group
->reg
);
348 value
&= ~SMMU_ASID_MASK
;
349 value
|= SMMU_ASID_VALUE(asid
);
350 value
|= SMMU_ASID_ENABLE
;
351 smmu_writel(smmu
, value
, group
->reg
);
355 static void tegra_smmu_disable(struct tegra_smmu
*smmu
, unsigned int swgroup
,
358 const struct tegra_smmu_swgroup
*group
;
362 group
= tegra_smmu_find_swgroup(smmu
, swgroup
);
364 value
= smmu_readl(smmu
, group
->reg
);
365 value
&= ~SMMU_ASID_MASK
;
366 value
|= SMMU_ASID_VALUE(asid
);
367 value
&= ~SMMU_ASID_ENABLE
;
368 smmu_writel(smmu
, value
, group
->reg
);
371 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
372 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
374 if (client
->swgroup
!= swgroup
)
377 value
= smmu_readl(smmu
, client
->smmu
.reg
);
378 value
&= ~BIT(client
->smmu
.bit
);
379 smmu_writel(smmu
, value
, client
->smmu
.reg
);
383 static int tegra_smmu_as_prepare(struct tegra_smmu
*smmu
,
384 struct tegra_smmu_as
*as
)
389 if (as
->use_count
> 0) {
394 as
->pd_dma
= dma_map_page(smmu
->dev
, as
->pd
, 0, SMMU_SIZE_PD
,
396 if (dma_mapping_error(smmu
->dev
, as
->pd_dma
))
399 /* We can't handle 64-bit DMA addresses */
400 if (!smmu_dma_addr_valid(smmu
, as
->pd_dma
)) {
405 err
= tegra_smmu_alloc_asid(smmu
, &as
->id
);
409 smmu_flush_ptc(smmu
, as
->pd_dma
, 0);
410 smmu_flush_tlb_asid(smmu
, as
->id
);
412 smmu_writel(smmu
, as
->id
& 0x7f, SMMU_PTB_ASID
);
413 value
= SMMU_PTB_DATA_VALUE(as
->pd_dma
, as
->attr
);
414 smmu_writel(smmu
, value
, SMMU_PTB_DATA
);
423 dma_unmap_page(smmu
->dev
, as
->pd_dma
, SMMU_SIZE_PD
, DMA_TO_DEVICE
);
427 static void tegra_smmu_as_unprepare(struct tegra_smmu
*smmu
,
428 struct tegra_smmu_as
*as
)
430 if (--as
->use_count
> 0)
433 tegra_smmu_free_asid(smmu
, as
->id
);
435 dma_unmap_page(smmu
->dev
, as
->pd_dma
, SMMU_SIZE_PD
, DMA_TO_DEVICE
);
440 static int tegra_smmu_attach_dev(struct iommu_domain
*domain
,
443 struct tegra_smmu
*smmu
= dev
->archdata
.iommu
;
444 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
445 struct device_node
*np
= dev
->of_node
;
446 struct of_phandle_args args
;
447 unsigned int index
= 0;
450 while (!of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
452 unsigned int swgroup
= args
.args
[0];
454 if (args
.np
!= smmu
->dev
->of_node
) {
455 of_node_put(args
.np
);
459 of_node_put(args
.np
);
461 err
= tegra_smmu_as_prepare(smmu
, as
);
465 tegra_smmu_enable(smmu
, swgroup
, as
->id
);
475 static void tegra_smmu_detach_dev(struct iommu_domain
*domain
, struct device
*dev
)
477 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
478 struct device_node
*np
= dev
->of_node
;
479 struct tegra_smmu
*smmu
= as
->smmu
;
480 struct of_phandle_args args
;
481 unsigned int index
= 0;
483 while (!of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
485 unsigned int swgroup
= args
.args
[0];
487 if (args
.np
!= smmu
->dev
->of_node
) {
488 of_node_put(args
.np
);
492 of_node_put(args
.np
);
494 tegra_smmu_disable(smmu
, swgroup
, as
->id
);
495 tegra_smmu_as_unprepare(smmu
, as
);
500 static void tegra_smmu_set_pde(struct tegra_smmu_as
*as
, unsigned long iova
,
503 unsigned int pd_index
= iova_pd_index(iova
);
504 struct tegra_smmu
*smmu
= as
->smmu
;
505 u32
*pd
= page_address(as
->pd
);
506 unsigned long offset
= pd_index
* sizeof(*pd
);
508 /* Set the page directory entry first */
509 pd
[pd_index
] = value
;
511 /* The flush the page directory entry from caches */
512 dma_sync_single_range_for_device(smmu
->dev
, as
->pd_dma
, offset
,
513 sizeof(*pd
), DMA_TO_DEVICE
);
515 /* And flush the iommu */
516 smmu_flush_ptc(smmu
, as
->pd_dma
, offset
);
517 smmu_flush_tlb_section(smmu
, as
->id
, iova
);
521 static u32
*tegra_smmu_pte_offset(struct page
*pt_page
, unsigned long iova
)
523 u32
*pt
= page_address(pt_page
);
525 return pt
+ iova_pt_index(iova
);
528 static u32
*tegra_smmu_pte_lookup(struct tegra_smmu_as
*as
, unsigned long iova
,
531 unsigned int pd_index
= iova_pd_index(iova
);
532 struct page
*pt_page
;
535 pt_page
= as
->pts
[pd_index
];
539 pd
= page_address(as
->pd
);
540 *dmap
= smmu_pde_to_dma(pd
[pd_index
]);
542 return tegra_smmu_pte_offset(pt_page
, iova
);
545 static u32
*as_get_pte(struct tegra_smmu_as
*as
, dma_addr_t iova
,
548 unsigned int pde
= iova_pd_index(iova
);
549 struct tegra_smmu
*smmu
= as
->smmu
;
555 page
= alloc_page(GFP_KERNEL
| __GFP_DMA
| __GFP_ZERO
);
559 dma
= dma_map_page(smmu
->dev
, page
, 0, SMMU_SIZE_PT
,
561 if (dma_mapping_error(smmu
->dev
, dma
)) {
566 if (!smmu_dma_addr_valid(smmu
, dma
)) {
567 dma_unmap_page(smmu
->dev
, dma
, SMMU_SIZE_PT
,
575 tegra_smmu_set_pde(as
, iova
, SMMU_MK_PDE(dma
, SMMU_PDE_ATTR
|
580 u32
*pd
= page_address(as
->pd
);
582 *dmap
= smmu_pde_to_dma(pd
[pde
]);
585 return tegra_smmu_pte_offset(as
->pts
[pde
], iova
);
588 static void tegra_smmu_pte_get_use(struct tegra_smmu_as
*as
, unsigned long iova
)
590 unsigned int pd_index
= iova_pd_index(iova
);
592 as
->count
[pd_index
]++;
595 static void tegra_smmu_pte_put_use(struct tegra_smmu_as
*as
, unsigned long iova
)
597 unsigned int pde
= iova_pd_index(iova
);
598 struct page
*page
= as
->pts
[pde
];
601 * When no entries in this page table are used anymore, return the
602 * memory page to the system.
604 if (--as
->count
[pde
] == 0) {
605 struct tegra_smmu
*smmu
= as
->smmu
;
606 u32
*pd
= page_address(as
->pd
);
607 dma_addr_t pte_dma
= smmu_pde_to_dma(pd
[pde
]);
609 tegra_smmu_set_pde(as
, iova
, 0);
611 dma_unmap_page(smmu
->dev
, pte_dma
, SMMU_SIZE_PT
, DMA_TO_DEVICE
);
617 static void tegra_smmu_set_pte(struct tegra_smmu_as
*as
, unsigned long iova
,
618 u32
*pte
, dma_addr_t pte_dma
, u32 val
)
620 struct tegra_smmu
*smmu
= as
->smmu
;
621 unsigned long offset
= offset_in_page(pte
);
625 dma_sync_single_range_for_device(smmu
->dev
, pte_dma
, offset
,
627 smmu_flush_ptc(smmu
, pte_dma
, offset
);
628 smmu_flush_tlb_group(smmu
, as
->id
, iova
);
632 static int tegra_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
633 phys_addr_t paddr
, size_t size
, int prot
)
635 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
639 pte
= as_get_pte(as
, iova
, &pte_dma
);
643 /* If we aren't overwriting a pre-existing entry, increment use */
645 tegra_smmu_pte_get_use(as
, iova
);
647 tegra_smmu_set_pte(as
, iova
, pte
, pte_dma
,
648 __phys_to_pfn(paddr
) | SMMU_PTE_ATTR
);
653 static size_t tegra_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
656 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
660 pte
= tegra_smmu_pte_lookup(as
, iova
, &pte_dma
);
664 tegra_smmu_set_pte(as
, iova
, pte
, pte_dma
, 0);
665 tegra_smmu_pte_put_use(as
, iova
);
670 static phys_addr_t
tegra_smmu_iova_to_phys(struct iommu_domain
*domain
,
673 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
678 pte
= tegra_smmu_pte_lookup(as
, iova
, &pte_dma
);
682 pfn
= *pte
& as
->smmu
->pfn_mask
;
684 return PFN_PHYS(pfn
);
687 static struct tegra_smmu
*tegra_smmu_find(struct device_node
*np
)
689 struct platform_device
*pdev
;
692 pdev
= of_find_device_by_node(np
);
696 mc
= platform_get_drvdata(pdev
);
703 static int tegra_smmu_add_device(struct device
*dev
)
705 struct device_node
*np
= dev
->of_node
;
706 struct of_phandle_args args
;
707 unsigned int index
= 0;
709 while (of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
711 struct tegra_smmu
*smmu
;
713 smmu
= tegra_smmu_find(args
.np
);
716 * Only a single IOMMU master interface is currently
717 * supported by the Linux kernel, so abort after the
720 dev
->archdata
.iommu
= smmu
;
730 static void tegra_smmu_remove_device(struct device
*dev
)
732 dev
->archdata
.iommu
= NULL
;
735 static const struct iommu_ops tegra_smmu_ops
= {
736 .capable
= tegra_smmu_capable
,
737 .domain_alloc
= tegra_smmu_domain_alloc
,
738 .domain_free
= tegra_smmu_domain_free
,
739 .attach_dev
= tegra_smmu_attach_dev
,
740 .detach_dev
= tegra_smmu_detach_dev
,
741 .add_device
= tegra_smmu_add_device
,
742 .remove_device
= tegra_smmu_remove_device
,
743 .map
= tegra_smmu_map
,
744 .unmap
= tegra_smmu_unmap
,
745 .map_sg
= default_iommu_map_sg
,
746 .iova_to_phys
= tegra_smmu_iova_to_phys
,
748 .pgsize_bitmap
= SZ_4K
,
751 static void tegra_smmu_ahb_enable(void)
753 static const struct of_device_id ahb_match
[] = {
754 { .compatible
= "nvidia,tegra30-ahb", },
757 struct device_node
*ahb
;
759 ahb
= of_find_matching_node(NULL
, ahb_match
);
761 tegra_ahb_enable_smmu(ahb
);
766 static int tegra_smmu_swgroups_show(struct seq_file
*s
, void *data
)
768 struct tegra_smmu
*smmu
= s
->private;
772 seq_printf(s
, "swgroup enabled ASID\n");
773 seq_printf(s
, "------------------------\n");
775 for (i
= 0; i
< smmu
->soc
->num_swgroups
; i
++) {
776 const struct tegra_smmu_swgroup
*group
= &smmu
->soc
->swgroups
[i
];
780 value
= smmu_readl(smmu
, group
->reg
);
782 if (value
& SMMU_ASID_ENABLE
)
787 asid
= value
& SMMU_ASID_MASK
;
789 seq_printf(s
, "%-9s %-7s %#04x\n", group
->name
, status
,
796 static int tegra_smmu_swgroups_open(struct inode
*inode
, struct file
*file
)
798 return single_open(file
, tegra_smmu_swgroups_show
, inode
->i_private
);
801 static const struct file_operations tegra_smmu_swgroups_fops
= {
802 .open
= tegra_smmu_swgroups_open
,
805 .release
= single_release
,
808 static int tegra_smmu_clients_show(struct seq_file
*s
, void *data
)
810 struct tegra_smmu
*smmu
= s
->private;
814 seq_printf(s
, "client enabled\n");
815 seq_printf(s
, "--------------------\n");
817 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
818 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
821 value
= smmu_readl(smmu
, client
->smmu
.reg
);
823 if (value
& BIT(client
->smmu
.bit
))
828 seq_printf(s
, "%-12s %s\n", client
->name
, status
);
834 static int tegra_smmu_clients_open(struct inode
*inode
, struct file
*file
)
836 return single_open(file
, tegra_smmu_clients_show
, inode
->i_private
);
839 static const struct file_operations tegra_smmu_clients_fops
= {
840 .open
= tegra_smmu_clients_open
,
843 .release
= single_release
,
846 static void tegra_smmu_debugfs_init(struct tegra_smmu
*smmu
)
848 smmu
->debugfs
= debugfs_create_dir("smmu", NULL
);
852 debugfs_create_file("swgroups", S_IRUGO
, smmu
->debugfs
, smmu
,
853 &tegra_smmu_swgroups_fops
);
854 debugfs_create_file("clients", S_IRUGO
, smmu
->debugfs
, smmu
,
855 &tegra_smmu_clients_fops
);
858 static void tegra_smmu_debugfs_exit(struct tegra_smmu
*smmu
)
860 debugfs_remove_recursive(smmu
->debugfs
);
863 struct tegra_smmu
*tegra_smmu_probe(struct device
*dev
,
864 const struct tegra_smmu_soc
*soc
,
867 struct tegra_smmu
*smmu
;
872 /* This can happen on Tegra20 which doesn't have an SMMU */
876 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
878 return ERR_PTR(-ENOMEM
);
881 * This is a bit of a hack. Ideally we'd want to simply return this
882 * value. However the IOMMU registration process will attempt to add
883 * all devices to the IOMMU when bus_set_iommu() is called. In order
884 * not to rely on global variables to track the IOMMU instance, we
885 * set it here so that it can be looked up from the .add_device()
886 * callback via the IOMMU device's .drvdata field.
890 size
= BITS_TO_LONGS(soc
->num_asids
) * sizeof(long);
892 smmu
->asids
= devm_kzalloc(dev
, size
, GFP_KERNEL
);
894 return ERR_PTR(-ENOMEM
);
896 mutex_init(&smmu
->lock
);
898 smmu
->regs
= mc
->regs
;
903 smmu
->pfn_mask
= BIT_MASK(mc
->soc
->num_address_bits
- PAGE_SHIFT
) - 1;
904 dev_dbg(dev
, "address bits: %u, PFN mask: %#lx\n",
905 mc
->soc
->num_address_bits
, smmu
->pfn_mask
);
906 smmu
->tlb_mask
= (smmu
->soc
->num_tlb_lines
<< 1) - 1;
907 dev_dbg(dev
, "TLB lines: %u, mask: %#lx\n", smmu
->soc
->num_tlb_lines
,
910 value
= SMMU_PTC_CONFIG_ENABLE
| SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
912 if (soc
->supports_request_limit
)
913 value
|= SMMU_PTC_CONFIG_REQ_LIMIT(8);
915 smmu_writel(smmu
, value
, SMMU_PTC_CONFIG
);
917 value
= SMMU_TLB_CONFIG_HIT_UNDER_MISS
|
918 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu
);
920 if (soc
->supports_round_robin_arbitration
)
921 value
|= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION
;
923 smmu_writel(smmu
, value
, SMMU_TLB_CONFIG
);
925 smmu_flush_ptc_all(smmu
);
926 smmu_flush_tlb(smmu
);
927 smmu_writel(smmu
, SMMU_CONFIG_ENABLE
, SMMU_CONFIG
);
930 tegra_smmu_ahb_enable();
932 err
= bus_set_iommu(&platform_bus_type
, &tegra_smmu_ops
);
936 if (IS_ENABLED(CONFIG_DEBUG_FS
))
937 tegra_smmu_debugfs_init(smmu
);
942 void tegra_smmu_remove(struct tegra_smmu
*smmu
)
944 if (IS_ENABLED(CONFIG_DEBUG_FS
))
945 tegra_smmu_debugfs_exit(smmu
);