2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/bitops.h>
10 #include <linux/err.h>
11 #include <linux/iommu.h>
12 #include <linux/kernel.h>
14 #include <linux/of_device.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
18 #include <soc/tegra/ahb.h>
19 #include <soc/tegra/mc.h>
26 const struct tegra_smmu_soc
*soc
;
28 unsigned long pfn_mask
;
29 unsigned long tlb_mask
;
34 struct list_head list
;
37 struct tegra_smmu_as
{
38 struct iommu_domain domain
;
39 struct tegra_smmu
*smmu
;
40 unsigned int use_count
;
47 static struct tegra_smmu_as
*to_smmu_as(struct iommu_domain
*dom
)
49 return container_of(dom
, struct tegra_smmu_as
, domain
);
52 static inline void smmu_writel(struct tegra_smmu
*smmu
, u32 value
,
55 writel(value
, smmu
->regs
+ offset
);
58 static inline u32
smmu_readl(struct tegra_smmu
*smmu
, unsigned long offset
)
60 return readl(smmu
->regs
+ offset
);
63 #define SMMU_CONFIG 0x010
64 #define SMMU_CONFIG_ENABLE (1 << 0)
66 #define SMMU_TLB_CONFIG 0x14
67 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
68 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
69 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
70 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
72 #define SMMU_PTC_CONFIG 0x18
73 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
74 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
75 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
77 #define SMMU_PTB_ASID 0x01c
78 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
80 #define SMMU_PTB_DATA 0x020
81 #define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
83 #define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
85 #define SMMU_TLB_FLUSH 0x030
86 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
87 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
88 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
89 #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
90 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
91 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
92 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
93 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
94 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
96 #define SMMU_PTC_FLUSH 0x034
97 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
98 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
100 #define SMMU_PTC_FLUSH_HI 0x9b8
101 #define SMMU_PTC_FLUSH_HI_MASK 0x3
103 /* per-SWGROUP SMMU_*_ASID register */
104 #define SMMU_ASID_ENABLE (1 << 31)
105 #define SMMU_ASID_MASK 0x7f
106 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
108 /* page table definitions */
109 #define SMMU_NUM_PDE 1024
110 #define SMMU_NUM_PTE 1024
112 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
113 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
115 #define SMMU_PDE_SHIFT 22
116 #define SMMU_PTE_SHIFT 12
118 #define SMMU_PD_READABLE (1 << 31)
119 #define SMMU_PD_WRITABLE (1 << 30)
120 #define SMMU_PD_NONSECURE (1 << 29)
122 #define SMMU_PDE_READABLE (1 << 31)
123 #define SMMU_PDE_WRITABLE (1 << 30)
124 #define SMMU_PDE_NONSECURE (1 << 29)
125 #define SMMU_PDE_NEXT (1 << 28)
127 #define SMMU_PTE_READABLE (1 << 31)
128 #define SMMU_PTE_WRITABLE (1 << 30)
129 #define SMMU_PTE_NONSECURE (1 << 29)
131 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
133 #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
136 static inline void smmu_flush_ptc(struct tegra_smmu
*smmu
, struct page
*page
,
137 unsigned long offset
)
139 phys_addr_t phys
= page
? page_to_phys(page
) : 0;
143 offset
&= ~(smmu
->mc
->soc
->atom_size
- 1);
145 if (smmu
->mc
->soc
->num_address_bits
> 32) {
146 #ifdef CONFIG_PHYS_ADDR_T_64BIT
147 value
= (phys
>> 32) & SMMU_PTC_FLUSH_HI_MASK
;
151 smmu_writel(smmu
, value
, SMMU_PTC_FLUSH_HI
);
154 value
= (phys
+ offset
) | SMMU_PTC_FLUSH_TYPE_ADR
;
156 value
= SMMU_PTC_FLUSH_TYPE_ALL
;
159 smmu_writel(smmu
, value
, SMMU_PTC_FLUSH
);
162 static inline void smmu_flush_tlb(struct tegra_smmu
*smmu
)
164 smmu_writel(smmu
, SMMU_TLB_FLUSH_VA_MATCH_ALL
, SMMU_TLB_FLUSH
);
167 static inline void smmu_flush_tlb_asid(struct tegra_smmu
*smmu
,
172 value
= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_ASID(asid
) |
173 SMMU_TLB_FLUSH_VA_MATCH_ALL
;
174 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
177 static inline void smmu_flush_tlb_section(struct tegra_smmu
*smmu
,
183 value
= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_ASID(asid
) |
184 SMMU_TLB_FLUSH_VA_SECTION(iova
);
185 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
188 static inline void smmu_flush_tlb_group(struct tegra_smmu
*smmu
,
194 value
= SMMU_TLB_FLUSH_ASID_MATCH
| SMMU_TLB_FLUSH_ASID(asid
) |
195 SMMU_TLB_FLUSH_VA_GROUP(iova
);
196 smmu_writel(smmu
, value
, SMMU_TLB_FLUSH
);
199 static inline void smmu_flush(struct tegra_smmu
*smmu
)
201 smmu_readl(smmu
, SMMU_CONFIG
);
204 static int tegra_smmu_alloc_asid(struct tegra_smmu
*smmu
, unsigned int *idp
)
208 mutex_lock(&smmu
->lock
);
210 id
= find_first_zero_bit(smmu
->asids
, smmu
->soc
->num_asids
);
211 if (id
>= smmu
->soc
->num_asids
) {
212 mutex_unlock(&smmu
->lock
);
216 set_bit(id
, smmu
->asids
);
219 mutex_unlock(&smmu
->lock
);
223 static void tegra_smmu_free_asid(struct tegra_smmu
*smmu
, unsigned int id
)
225 mutex_lock(&smmu
->lock
);
226 clear_bit(id
, smmu
->asids
);
227 mutex_unlock(&smmu
->lock
);
230 static bool tegra_smmu_capable(enum iommu_cap cap
)
235 static struct iommu_domain
*tegra_smmu_domain_alloc(unsigned type
)
237 struct tegra_smmu_as
*as
;
241 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
244 as
= kzalloc(sizeof(*as
), GFP_KERNEL
);
248 as
->attr
= SMMU_PD_READABLE
| SMMU_PD_WRITABLE
| SMMU_PD_NONSECURE
;
250 as
->pd
= alloc_page(GFP_KERNEL
| __GFP_DMA
);
256 as
->count
= alloc_page(GFP_KERNEL
);
264 pd
= page_address(as
->pd
);
265 SetPageReserved(as
->pd
);
267 for (i
= 0; i
< SMMU_NUM_PDE
; i
++)
270 /* clear PDE usage counters */
271 pd
= page_address(as
->count
);
272 SetPageReserved(as
->count
);
274 for (i
= 0; i
< SMMU_NUM_PDE
; i
++)
278 as
->domain
.geometry
.aperture_start
= 0;
279 as
->domain
.geometry
.aperture_end
= 0xffffffff;
280 as
->domain
.geometry
.force_aperture
= true;
285 static void tegra_smmu_domain_free(struct iommu_domain
*domain
)
287 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
289 /* TODO: free page directory and page tables */
290 ClearPageReserved(as
->pd
);
295 static const struct tegra_smmu_swgroup
*
296 tegra_smmu_find_swgroup(struct tegra_smmu
*smmu
, unsigned int swgroup
)
298 const struct tegra_smmu_swgroup
*group
= NULL
;
301 for (i
= 0; i
< smmu
->soc
->num_swgroups
; i
++) {
302 if (smmu
->soc
->swgroups
[i
].swgroup
== swgroup
) {
303 group
= &smmu
->soc
->swgroups
[i
];
311 static void tegra_smmu_enable(struct tegra_smmu
*smmu
, unsigned int swgroup
,
314 const struct tegra_smmu_swgroup
*group
;
318 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
319 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
321 if (client
->swgroup
!= swgroup
)
324 value
= smmu_readl(smmu
, client
->smmu
.reg
);
325 value
|= BIT(client
->smmu
.bit
);
326 smmu_writel(smmu
, value
, client
->smmu
.reg
);
329 group
= tegra_smmu_find_swgroup(smmu
, swgroup
);
331 value
= smmu_readl(smmu
, group
->reg
);
332 value
&= ~SMMU_ASID_MASK
;
333 value
|= SMMU_ASID_VALUE(asid
);
334 value
|= SMMU_ASID_ENABLE
;
335 smmu_writel(smmu
, value
, group
->reg
);
339 static void tegra_smmu_disable(struct tegra_smmu
*smmu
, unsigned int swgroup
,
342 const struct tegra_smmu_swgroup
*group
;
346 group
= tegra_smmu_find_swgroup(smmu
, swgroup
);
348 value
= smmu_readl(smmu
, group
->reg
);
349 value
&= ~SMMU_ASID_MASK
;
350 value
|= SMMU_ASID_VALUE(asid
);
351 value
&= ~SMMU_ASID_ENABLE
;
352 smmu_writel(smmu
, value
, group
->reg
);
355 for (i
= 0; i
< smmu
->soc
->num_clients
; i
++) {
356 const struct tegra_mc_client
*client
= &smmu
->soc
->clients
[i
];
358 if (client
->swgroup
!= swgroup
)
361 value
= smmu_readl(smmu
, client
->smmu
.reg
);
362 value
&= ~BIT(client
->smmu
.bit
);
363 smmu_writel(smmu
, value
, client
->smmu
.reg
);
367 static int tegra_smmu_as_prepare(struct tegra_smmu
*smmu
,
368 struct tegra_smmu_as
*as
)
373 if (as
->use_count
> 0) {
378 err
= tegra_smmu_alloc_asid(smmu
, &as
->id
);
382 smmu
->soc
->ops
->flush_dcache(as
->pd
, 0, SMMU_SIZE_PD
);
383 smmu_flush_ptc(smmu
, as
->pd
, 0);
384 smmu_flush_tlb_asid(smmu
, as
->id
);
386 smmu_writel(smmu
, as
->id
& 0x7f, SMMU_PTB_ASID
);
387 value
= SMMU_PTB_DATA_VALUE(as
->pd
, as
->attr
);
388 smmu_writel(smmu
, value
, SMMU_PTB_DATA
);
397 static void tegra_smmu_as_unprepare(struct tegra_smmu
*smmu
,
398 struct tegra_smmu_as
*as
)
400 if (--as
->use_count
> 0)
403 tegra_smmu_free_asid(smmu
, as
->id
);
407 static int tegra_smmu_attach_dev(struct iommu_domain
*domain
,
410 struct tegra_smmu
*smmu
= dev
->archdata
.iommu
;
411 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
412 struct device_node
*np
= dev
->of_node
;
413 struct of_phandle_args args
;
414 unsigned int index
= 0;
417 while (!of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
419 unsigned int swgroup
= args
.args
[0];
421 if (args
.np
!= smmu
->dev
->of_node
) {
422 of_node_put(args
.np
);
426 of_node_put(args
.np
);
428 err
= tegra_smmu_as_prepare(smmu
, as
);
432 tegra_smmu_enable(smmu
, swgroup
, as
->id
);
442 static void tegra_smmu_detach_dev(struct iommu_domain
*domain
, struct device
*dev
)
444 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
445 struct device_node
*np
= dev
->of_node
;
446 struct tegra_smmu
*smmu
= as
->smmu
;
447 struct of_phandle_args args
;
448 unsigned int index
= 0;
450 while (!of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
452 unsigned int swgroup
= args
.args
[0];
454 if (args
.np
!= smmu
->dev
->of_node
) {
455 of_node_put(args
.np
);
459 of_node_put(args
.np
);
461 tegra_smmu_disable(smmu
, swgroup
, as
->id
);
462 tegra_smmu_as_unprepare(smmu
, as
);
467 static u32
*as_get_pte(struct tegra_smmu_as
*as
, dma_addr_t iova
,
470 u32
*pd
= page_address(as
->pd
), *pt
, *count
;
471 u32 pde
= (iova
>> SMMU_PDE_SHIFT
) & 0x3ff;
472 u32 pte
= (iova
>> SMMU_PTE_SHIFT
) & 0x3ff;
473 struct tegra_smmu
*smmu
= as
->smmu
;
478 page
= alloc_page(GFP_KERNEL
| __GFP_DMA
);
482 pt
= page_address(page
);
483 SetPageReserved(page
);
485 for (i
= 0; i
< SMMU_NUM_PTE
; i
++)
488 smmu
->soc
->ops
->flush_dcache(page
, 0, SMMU_SIZE_PT
);
490 pd
[pde
] = SMMU_MK_PDE(page
, SMMU_PDE_ATTR
| SMMU_PDE_NEXT
);
492 smmu
->soc
->ops
->flush_dcache(as
->pd
, pde
<< 2, 4);
493 smmu_flush_ptc(smmu
, as
->pd
, pde
<< 2);
494 smmu_flush_tlb_section(smmu
, as
->id
, iova
);
497 page
= pfn_to_page(pd
[pde
] & smmu
->pfn_mask
);
498 pt
= page_address(page
);
503 /* Keep track of entries in this page table. */
504 count
= page_address(as
->count
);
511 static void as_put_pte(struct tegra_smmu_as
*as
, dma_addr_t iova
)
513 u32 pde
= (iova
>> SMMU_PDE_SHIFT
) & 0x3ff;
514 u32 pte
= (iova
>> SMMU_PTE_SHIFT
) & 0x3ff;
515 u32
*count
= page_address(as
->count
);
516 u32
*pd
= page_address(as
->pd
), *pt
;
519 page
= pfn_to_page(pd
[pde
] & as
->smmu
->pfn_mask
);
520 pt
= page_address(page
);
523 * When no entries in this page table are used anymore, return the
524 * memory page to the system.
527 if (--count
[pde
] == 0) {
528 ClearPageReserved(page
);
537 static int tegra_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
538 phys_addr_t paddr
, size_t size
, int prot
)
540 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
541 struct tegra_smmu
*smmu
= as
->smmu
;
542 unsigned long offset
;
546 pte
= as_get_pte(as
, iova
, &page
);
550 *pte
= __phys_to_pfn(paddr
) | SMMU_PTE_ATTR
;
551 offset
= offset_in_page(pte
);
553 smmu
->soc
->ops
->flush_dcache(page
, offset
, 4);
554 smmu_flush_ptc(smmu
, page
, offset
);
555 smmu_flush_tlb_group(smmu
, as
->id
, iova
);
561 static size_t tegra_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
564 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
565 struct tegra_smmu
*smmu
= as
->smmu
;
566 unsigned long offset
;
570 pte
= as_get_pte(as
, iova
, &page
);
574 offset
= offset_in_page(pte
);
575 as_put_pte(as
, iova
);
577 smmu
->soc
->ops
->flush_dcache(page
, offset
, 4);
578 smmu_flush_ptc(smmu
, page
, offset
);
579 smmu_flush_tlb_group(smmu
, as
->id
, iova
);
585 static phys_addr_t
tegra_smmu_iova_to_phys(struct iommu_domain
*domain
,
588 struct tegra_smmu_as
*as
= to_smmu_as(domain
);
593 pte
= as_get_pte(as
, iova
, &page
);
594 pfn
= *pte
& as
->smmu
->pfn_mask
;
596 return PFN_PHYS(pfn
);
599 static struct tegra_smmu
*tegra_smmu_find(struct device_node
*np
)
601 struct platform_device
*pdev
;
604 pdev
= of_find_device_by_node(np
);
608 mc
= platform_get_drvdata(pdev
);
615 static int tegra_smmu_add_device(struct device
*dev
)
617 struct device_node
*np
= dev
->of_node
;
618 struct of_phandle_args args
;
619 unsigned int index
= 0;
621 while (of_parse_phandle_with_args(np
, "iommus", "#iommu-cells", index
,
623 struct tegra_smmu
*smmu
;
625 smmu
= tegra_smmu_find(args
.np
);
628 * Only a single IOMMU master interface is currently
629 * supported by the Linux kernel, so abort after the
632 dev
->archdata
.iommu
= smmu
;
642 static void tegra_smmu_remove_device(struct device
*dev
)
644 dev
->archdata
.iommu
= NULL
;
647 static const struct iommu_ops tegra_smmu_ops
= {
648 .capable
= tegra_smmu_capable
,
649 .domain_alloc
= tegra_smmu_domain_alloc
,
650 .domain_free
= tegra_smmu_domain_free
,
651 .attach_dev
= tegra_smmu_attach_dev
,
652 .detach_dev
= tegra_smmu_detach_dev
,
653 .add_device
= tegra_smmu_add_device
,
654 .remove_device
= tegra_smmu_remove_device
,
655 .map
= tegra_smmu_map
,
656 .unmap
= tegra_smmu_unmap
,
657 .map_sg
= default_iommu_map_sg
,
658 .iova_to_phys
= tegra_smmu_iova_to_phys
,
660 .pgsize_bitmap
= SZ_4K
,
663 static void tegra_smmu_ahb_enable(void)
665 static const struct of_device_id ahb_match
[] = {
666 { .compatible
= "nvidia,tegra30-ahb", },
669 struct device_node
*ahb
;
671 ahb
= of_find_matching_node(NULL
, ahb_match
);
673 tegra_ahb_enable_smmu(ahb
);
678 struct tegra_smmu
*tegra_smmu_probe(struct device
*dev
,
679 const struct tegra_smmu_soc
*soc
,
682 struct tegra_smmu
*smmu
;
687 /* This can happen on Tegra20 which doesn't have an SMMU */
691 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
693 return ERR_PTR(-ENOMEM
);
696 * This is a bit of a hack. Ideally we'd want to simply return this
697 * value. However the IOMMU registration process will attempt to add
698 * all devices to the IOMMU when bus_set_iommu() is called. In order
699 * not to rely on global variables to track the IOMMU instance, we
700 * set it here so that it can be looked up from the .add_device()
701 * callback via the IOMMU device's .drvdata field.
705 size
= BITS_TO_LONGS(soc
->num_asids
) * sizeof(long);
707 smmu
->asids
= devm_kzalloc(dev
, size
, GFP_KERNEL
);
709 return ERR_PTR(-ENOMEM
);
711 mutex_init(&smmu
->lock
);
713 smmu
->regs
= mc
->regs
;
718 smmu
->pfn_mask
= BIT_MASK(mc
->soc
->num_address_bits
- PAGE_SHIFT
) - 1;
719 dev_dbg(dev
, "address bits: %u, PFN mask: %#lx\n",
720 mc
->soc
->num_address_bits
, smmu
->pfn_mask
);
721 smmu
->tlb_mask
= (smmu
->soc
->num_tlb_lines
<< 1) - 1;
722 dev_dbg(dev
, "TLB lines: %u, mask: %#lx\n", smmu
->soc
->num_tlb_lines
,
725 value
= SMMU_PTC_CONFIG_ENABLE
| SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
727 if (soc
->supports_request_limit
)
728 value
|= SMMU_PTC_CONFIG_REQ_LIMIT(8);
730 smmu_writel(smmu
, value
, SMMU_PTC_CONFIG
);
732 value
= SMMU_TLB_CONFIG_HIT_UNDER_MISS
|
733 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu
);
735 if (soc
->supports_round_robin_arbitration
)
736 value
|= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION
;
738 smmu_writel(smmu
, value
, SMMU_TLB_CONFIG
);
740 smmu_flush_ptc(smmu
, NULL
, 0);
741 smmu_flush_tlb(smmu
);
742 smmu_writel(smmu
, SMMU_CONFIG_ENABLE
, SMMU_CONFIG
);
745 tegra_smmu_ahb_enable();
747 err
= bus_set_iommu(&platform_bus_type
, &tegra_smmu_ops
);