gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / drivers / iommu / tegra-smmu.c
blobe0ff5f4d7fed56a590952648ffcb50fa7568d9a9
1 /*
2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
9 #include <linux/bitops.h>
10 #include <linux/err.h>
11 #include <linux/iommu.h>
12 #include <linux/kernel.h>
13 #include <linux/of.h>
14 #include <linux/of_device.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
18 #include <soc/tegra/ahb.h>
19 #include <soc/tegra/mc.h>
21 struct tegra_smmu {
22 void __iomem *regs;
23 struct device *dev;
25 struct tegra_mc *mc;
26 const struct tegra_smmu_soc *soc;
28 unsigned long pfn_mask;
29 unsigned long tlb_mask;
31 unsigned long *asids;
32 struct mutex lock;
34 struct list_head list;
37 struct tegra_smmu_as {
38 struct iommu_domain domain;
39 struct tegra_smmu *smmu;
40 unsigned int use_count;
41 struct page *count;
42 struct page *pd;
43 unsigned id;
44 u32 attr;
47 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
49 return container_of(dom, struct tegra_smmu_as, domain);
52 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
53 unsigned long offset)
55 writel(value, smmu->regs + offset);
58 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
60 return readl(smmu->regs + offset);
63 #define SMMU_CONFIG 0x010
64 #define SMMU_CONFIG_ENABLE (1 << 0)
66 #define SMMU_TLB_CONFIG 0x14
67 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
68 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
69 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
70 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
72 #define SMMU_PTC_CONFIG 0x18
73 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
74 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
75 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
77 #define SMMU_PTB_ASID 0x01c
78 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
80 #define SMMU_PTB_DATA 0x020
81 #define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
83 #define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
85 #define SMMU_TLB_FLUSH 0x030
86 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
87 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
88 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
89 #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
90 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
91 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
92 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
93 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
94 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
96 #define SMMU_PTC_FLUSH 0x034
97 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
98 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
100 #define SMMU_PTC_FLUSH_HI 0x9b8
101 #define SMMU_PTC_FLUSH_HI_MASK 0x3
103 /* per-SWGROUP SMMU_*_ASID register */
104 #define SMMU_ASID_ENABLE (1 << 31)
105 #define SMMU_ASID_MASK 0x7f
106 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
108 /* page table definitions */
109 #define SMMU_NUM_PDE 1024
110 #define SMMU_NUM_PTE 1024
112 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
113 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
115 #define SMMU_PDE_SHIFT 22
116 #define SMMU_PTE_SHIFT 12
118 #define SMMU_PD_READABLE (1 << 31)
119 #define SMMU_PD_WRITABLE (1 << 30)
120 #define SMMU_PD_NONSECURE (1 << 29)
122 #define SMMU_PDE_READABLE (1 << 31)
123 #define SMMU_PDE_WRITABLE (1 << 30)
124 #define SMMU_PDE_NONSECURE (1 << 29)
125 #define SMMU_PDE_NEXT (1 << 28)
127 #define SMMU_PTE_READABLE (1 << 31)
128 #define SMMU_PTE_WRITABLE (1 << 30)
129 #define SMMU_PTE_NONSECURE (1 << 29)
131 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
132 SMMU_PDE_NONSECURE)
133 #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
134 SMMU_PTE_NONSECURE)
136 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
137 unsigned long offset)
139 phys_addr_t phys = page ? page_to_phys(page) : 0;
140 u32 value;
142 if (page) {
143 offset &= ~(smmu->mc->soc->atom_size - 1);
145 if (smmu->mc->soc->num_address_bits > 32) {
146 #ifdef CONFIG_PHYS_ADDR_T_64BIT
147 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
148 #else
149 value = 0;
150 #endif
151 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
154 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
155 } else {
156 value = SMMU_PTC_FLUSH_TYPE_ALL;
159 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
162 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
164 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
167 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
168 unsigned long asid)
170 u32 value;
172 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
173 SMMU_TLB_FLUSH_VA_MATCH_ALL;
174 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
177 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
178 unsigned long asid,
179 unsigned long iova)
181 u32 value;
183 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
184 SMMU_TLB_FLUSH_VA_SECTION(iova);
185 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
188 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
189 unsigned long asid,
190 unsigned long iova)
192 u32 value;
194 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
195 SMMU_TLB_FLUSH_VA_GROUP(iova);
196 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
199 static inline void smmu_flush(struct tegra_smmu *smmu)
201 smmu_readl(smmu, SMMU_CONFIG);
204 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
206 unsigned long id;
208 mutex_lock(&smmu->lock);
210 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
211 if (id >= smmu->soc->num_asids) {
212 mutex_unlock(&smmu->lock);
213 return -ENOSPC;
216 set_bit(id, smmu->asids);
217 *idp = id;
219 mutex_unlock(&smmu->lock);
220 return 0;
223 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
225 mutex_lock(&smmu->lock);
226 clear_bit(id, smmu->asids);
227 mutex_unlock(&smmu->lock);
230 static bool tegra_smmu_capable(enum iommu_cap cap)
232 return false;
235 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
237 struct tegra_smmu_as *as;
238 unsigned int i;
239 uint32_t *pd;
241 if (type != IOMMU_DOMAIN_UNMANAGED)
242 return NULL;
244 as = kzalloc(sizeof(*as), GFP_KERNEL);
245 if (!as)
246 return NULL;
248 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
250 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
251 if (!as->pd) {
252 kfree(as);
253 return NULL;
256 as->count = alloc_page(GFP_KERNEL);
257 if (!as->count) {
258 __free_page(as->pd);
259 kfree(as);
260 return NULL;
263 /* clear PDEs */
264 pd = page_address(as->pd);
265 SetPageReserved(as->pd);
267 for (i = 0; i < SMMU_NUM_PDE; i++)
268 pd[i] = 0;
270 /* clear PDE usage counters */
271 pd = page_address(as->count);
272 SetPageReserved(as->count);
274 for (i = 0; i < SMMU_NUM_PDE; i++)
275 pd[i] = 0;
277 /* setup aperture */
278 as->domain.geometry.aperture_start = 0;
279 as->domain.geometry.aperture_end = 0xffffffff;
280 as->domain.geometry.force_aperture = true;
282 return &as->domain;
285 static void tegra_smmu_domain_free(struct iommu_domain *domain)
287 struct tegra_smmu_as *as = to_smmu_as(domain);
289 /* TODO: free page directory and page tables */
290 ClearPageReserved(as->pd);
292 kfree(as);
295 static const struct tegra_smmu_swgroup *
296 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
298 const struct tegra_smmu_swgroup *group = NULL;
299 unsigned int i;
301 for (i = 0; i < smmu->soc->num_swgroups; i++) {
302 if (smmu->soc->swgroups[i].swgroup == swgroup) {
303 group = &smmu->soc->swgroups[i];
304 break;
308 return group;
311 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
312 unsigned int asid)
314 const struct tegra_smmu_swgroup *group;
315 unsigned int i;
316 u32 value;
318 for (i = 0; i < smmu->soc->num_clients; i++) {
319 const struct tegra_mc_client *client = &smmu->soc->clients[i];
321 if (client->swgroup != swgroup)
322 continue;
324 value = smmu_readl(smmu, client->smmu.reg);
325 value |= BIT(client->smmu.bit);
326 smmu_writel(smmu, value, client->smmu.reg);
329 group = tegra_smmu_find_swgroup(smmu, swgroup);
330 if (group) {
331 value = smmu_readl(smmu, group->reg);
332 value &= ~SMMU_ASID_MASK;
333 value |= SMMU_ASID_VALUE(asid);
334 value |= SMMU_ASID_ENABLE;
335 smmu_writel(smmu, value, group->reg);
339 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
340 unsigned int asid)
342 const struct tegra_smmu_swgroup *group;
343 unsigned int i;
344 u32 value;
346 group = tegra_smmu_find_swgroup(smmu, swgroup);
347 if (group) {
348 value = smmu_readl(smmu, group->reg);
349 value &= ~SMMU_ASID_MASK;
350 value |= SMMU_ASID_VALUE(asid);
351 value &= ~SMMU_ASID_ENABLE;
352 smmu_writel(smmu, value, group->reg);
355 for (i = 0; i < smmu->soc->num_clients; i++) {
356 const struct tegra_mc_client *client = &smmu->soc->clients[i];
358 if (client->swgroup != swgroup)
359 continue;
361 value = smmu_readl(smmu, client->smmu.reg);
362 value &= ~BIT(client->smmu.bit);
363 smmu_writel(smmu, value, client->smmu.reg);
367 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
368 struct tegra_smmu_as *as)
370 u32 value;
371 int err;
373 if (as->use_count > 0) {
374 as->use_count++;
375 return 0;
378 err = tegra_smmu_alloc_asid(smmu, &as->id);
379 if (err < 0)
380 return err;
382 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
383 smmu_flush_ptc(smmu, as->pd, 0);
384 smmu_flush_tlb_asid(smmu, as->id);
386 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
387 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
388 smmu_writel(smmu, value, SMMU_PTB_DATA);
389 smmu_flush(smmu);
391 as->smmu = smmu;
392 as->use_count++;
394 return 0;
397 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
398 struct tegra_smmu_as *as)
400 if (--as->use_count > 0)
401 return;
403 tegra_smmu_free_asid(smmu, as->id);
404 as->smmu = NULL;
407 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
408 struct device *dev)
410 struct tegra_smmu *smmu = dev->archdata.iommu;
411 struct tegra_smmu_as *as = to_smmu_as(domain);
412 struct device_node *np = dev->of_node;
413 struct of_phandle_args args;
414 unsigned int index = 0;
415 int err = 0;
417 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
418 &args)) {
419 unsigned int swgroup = args.args[0];
421 if (args.np != smmu->dev->of_node) {
422 of_node_put(args.np);
423 continue;
426 of_node_put(args.np);
428 err = tegra_smmu_as_prepare(smmu, as);
429 if (err < 0)
430 return err;
432 tegra_smmu_enable(smmu, swgroup, as->id);
433 index++;
436 if (index == 0)
437 return -ENODEV;
439 return 0;
442 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
444 struct tegra_smmu_as *as = to_smmu_as(domain);
445 struct device_node *np = dev->of_node;
446 struct tegra_smmu *smmu = as->smmu;
447 struct of_phandle_args args;
448 unsigned int index = 0;
450 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
451 &args)) {
452 unsigned int swgroup = args.args[0];
454 if (args.np != smmu->dev->of_node) {
455 of_node_put(args.np);
456 continue;
459 of_node_put(args.np);
461 tegra_smmu_disable(smmu, swgroup, as->id);
462 tegra_smmu_as_unprepare(smmu, as);
463 index++;
467 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
468 struct page **pagep)
470 u32 *pd = page_address(as->pd), *pt, *count;
471 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
472 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
473 struct tegra_smmu *smmu = as->smmu;
474 struct page *page;
475 unsigned int i;
477 if (pd[pde] == 0) {
478 page = alloc_page(GFP_KERNEL | __GFP_DMA);
479 if (!page)
480 return NULL;
482 pt = page_address(page);
483 SetPageReserved(page);
485 for (i = 0; i < SMMU_NUM_PTE; i++)
486 pt[i] = 0;
488 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
490 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
492 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
493 smmu_flush_ptc(smmu, as->pd, pde << 2);
494 smmu_flush_tlb_section(smmu, as->id, iova);
495 smmu_flush(smmu);
496 } else {
497 page = pfn_to_page(pd[pde] & smmu->pfn_mask);
498 pt = page_address(page);
501 *pagep = page;
503 /* Keep track of entries in this page table. */
504 count = page_address(as->count);
505 if (pt[pte] == 0)
506 count[pde]++;
508 return &pt[pte];
511 static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
513 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
514 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
515 u32 *count = page_address(as->count);
516 u32 *pd = page_address(as->pd), *pt;
517 struct page *page;
519 page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
520 pt = page_address(page);
523 * When no entries in this page table are used anymore, return the
524 * memory page to the system.
526 if (pt[pte] != 0) {
527 if (--count[pde] == 0) {
528 ClearPageReserved(page);
529 __free_page(page);
530 pd[pde] = 0;
533 pt[pte] = 0;
537 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
538 phys_addr_t paddr, size_t size, int prot)
540 struct tegra_smmu_as *as = to_smmu_as(domain);
541 struct tegra_smmu *smmu = as->smmu;
542 unsigned long offset;
543 struct page *page;
544 u32 *pte;
546 pte = as_get_pte(as, iova, &page);
547 if (!pte)
548 return -ENOMEM;
550 *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
551 offset = offset_in_page(pte);
553 smmu->soc->ops->flush_dcache(page, offset, 4);
554 smmu_flush_ptc(smmu, page, offset);
555 smmu_flush_tlb_group(smmu, as->id, iova);
556 smmu_flush(smmu);
558 return 0;
561 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
562 size_t size)
564 struct tegra_smmu_as *as = to_smmu_as(domain);
565 struct tegra_smmu *smmu = as->smmu;
566 unsigned long offset;
567 struct page *page;
568 u32 *pte;
570 pte = as_get_pte(as, iova, &page);
571 if (!pte)
572 return 0;
574 offset = offset_in_page(pte);
575 as_put_pte(as, iova);
577 smmu->soc->ops->flush_dcache(page, offset, 4);
578 smmu_flush_ptc(smmu, page, offset);
579 smmu_flush_tlb_group(smmu, as->id, iova);
580 smmu_flush(smmu);
582 return size;
585 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
586 dma_addr_t iova)
588 struct tegra_smmu_as *as = to_smmu_as(domain);
589 struct page *page;
590 unsigned long pfn;
591 u32 *pte;
593 pte = as_get_pte(as, iova, &page);
594 pfn = *pte & as->smmu->pfn_mask;
596 return PFN_PHYS(pfn);
599 static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
601 struct platform_device *pdev;
602 struct tegra_mc *mc;
604 pdev = of_find_device_by_node(np);
605 if (!pdev)
606 return NULL;
608 mc = platform_get_drvdata(pdev);
609 if (!mc)
610 return NULL;
612 return mc->smmu;
615 static int tegra_smmu_add_device(struct device *dev)
617 struct device_node *np = dev->of_node;
618 struct of_phandle_args args;
619 unsigned int index = 0;
621 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
622 &args) == 0) {
623 struct tegra_smmu *smmu;
625 smmu = tegra_smmu_find(args.np);
626 if (smmu) {
628 * Only a single IOMMU master interface is currently
629 * supported by the Linux kernel, so abort after the
630 * first match.
632 dev->archdata.iommu = smmu;
633 break;
636 index++;
639 return 0;
642 static void tegra_smmu_remove_device(struct device *dev)
644 dev->archdata.iommu = NULL;
647 static const struct iommu_ops tegra_smmu_ops = {
648 .capable = tegra_smmu_capable,
649 .domain_alloc = tegra_smmu_domain_alloc,
650 .domain_free = tegra_smmu_domain_free,
651 .attach_dev = tegra_smmu_attach_dev,
652 .detach_dev = tegra_smmu_detach_dev,
653 .add_device = tegra_smmu_add_device,
654 .remove_device = tegra_smmu_remove_device,
655 .map = tegra_smmu_map,
656 .unmap = tegra_smmu_unmap,
657 .map_sg = default_iommu_map_sg,
658 .iova_to_phys = tegra_smmu_iova_to_phys,
660 .pgsize_bitmap = SZ_4K,
663 static void tegra_smmu_ahb_enable(void)
665 static const struct of_device_id ahb_match[] = {
666 { .compatible = "nvidia,tegra30-ahb", },
669 struct device_node *ahb;
671 ahb = of_find_matching_node(NULL, ahb_match);
672 if (ahb) {
673 tegra_ahb_enable_smmu(ahb);
674 of_node_put(ahb);
678 struct tegra_smmu *tegra_smmu_probe(struct device *dev,
679 const struct tegra_smmu_soc *soc,
680 struct tegra_mc *mc)
682 struct tegra_smmu *smmu;
683 size_t size;
684 u32 value;
685 int err;
687 /* This can happen on Tegra20 which doesn't have an SMMU */
688 if (!soc)
689 return NULL;
691 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
692 if (!smmu)
693 return ERR_PTR(-ENOMEM);
696 * This is a bit of a hack. Ideally we'd want to simply return this
697 * value. However the IOMMU registration process will attempt to add
698 * all devices to the IOMMU when bus_set_iommu() is called. In order
699 * not to rely on global variables to track the IOMMU instance, we
700 * set it here so that it can be looked up from the .add_device()
701 * callback via the IOMMU device's .drvdata field.
703 mc->smmu = smmu;
705 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
707 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
708 if (!smmu->asids)
709 return ERR_PTR(-ENOMEM);
711 mutex_init(&smmu->lock);
713 smmu->regs = mc->regs;
714 smmu->soc = soc;
715 smmu->dev = dev;
716 smmu->mc = mc;
718 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
719 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
720 mc->soc->num_address_bits, smmu->pfn_mask);
721 smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
722 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
723 smmu->tlb_mask);
725 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
727 if (soc->supports_request_limit)
728 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
730 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
732 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
733 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
735 if (soc->supports_round_robin_arbitration)
736 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
738 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
740 smmu_flush_ptc(smmu, NULL, 0);
741 smmu_flush_tlb(smmu);
742 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
743 smmu_flush(smmu);
745 tegra_smmu_ahb_enable();
747 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
748 if (err < 0)
749 return ERR_PTR(err);
751 return smmu;