2 * IOMMU API for GART in Tegra20
4 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #define pr_fmt(fmt) "%s(): " fmt, __func__
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
28 #include <linux/list.h>
29 #include <linux/device.h>
31 #include <linux/iommu.h>
34 #include <asm/cacheflush.h>
36 /* bitmap of the page sizes currently supported */
37 #define GART_IOMMU_PGSIZES (SZ_4K)
39 #define GART_REG_BASE 0x24
40 #define GART_CONFIG (0x24 - GART_REG_BASE)
41 #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
42 #define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
43 #define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
45 #define GART_PAGE_SHIFT 12
46 #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
47 #define GART_PAGE_MASK \
48 (~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
52 struct list_head list
;
58 u32 page_count
; /* total remappable size */
59 dma_addr_t iovmm_base
; /* offset to vmm_area */
60 spinlock_t pte_lock
; /* for pagetable */
61 struct list_head client
;
62 spinlock_t client_lock
; /* for client list */
65 struct iommu_device iommu
; /* IOMMU Core handle */
69 struct iommu_domain domain
; /* generic domain handle */
70 struct gart_device
*gart
; /* link to gart device */
73 static struct gart_device
*gart_handle
; /* unique for a system */
75 #define GART_PTE(_pfn) \
76 (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
78 static struct gart_domain
*to_gart_domain(struct iommu_domain
*dom
)
80 return container_of(dom
, struct gart_domain
, domain
);
84 * Any interaction between any block on PPSB and a block on APB or AHB
85 * must have these read-back to ensure the APB/AHB bus transaction is
86 * complete before initiating activity on the PPSB block.
88 #define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG))
90 #define for_each_gart_pte(gart, iova) \
91 for (iova = gart->iovmm_base; \
92 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
93 iova += GART_PAGE_SIZE)
95 static inline void gart_set_pte(struct gart_device
*gart
,
96 unsigned long offs
, u32 pte
)
98 writel(offs
, gart
->regs
+ GART_ENTRY_ADDR
);
99 writel(pte
, gart
->regs
+ GART_ENTRY_DATA
);
101 dev_dbg(gart
->dev
, "%s %08lx:%08x\n",
102 pte
? "map" : "unmap", offs
, pte
& GART_PAGE_MASK
);
105 static inline unsigned long gart_read_pte(struct gart_device
*gart
,
110 writel(offs
, gart
->regs
+ GART_ENTRY_ADDR
);
111 pte
= readl(gart
->regs
+ GART_ENTRY_DATA
);
116 static void do_gart_setup(struct gart_device
*gart
, const u32
*data
)
120 for_each_gart_pte(gart
, iova
)
121 gart_set_pte(gart
, iova
, data
? *(data
++) : 0);
123 writel(1, gart
->regs
+ GART_CONFIG
);
124 FLUSH_GART_REGS(gart
);
128 static void gart_dump_table(struct gart_device
*gart
)
133 spin_lock_irqsave(&gart
->pte_lock
, flags
);
134 for_each_gart_pte(gart
, iova
) {
137 pte
= gart_read_pte(gart
, iova
);
139 dev_dbg(gart
->dev
, "%s %08lx:%08lx\n",
140 (GART_ENTRY_PHYS_ADDR_VALID
& pte
) ? "v" : " ",
141 iova
, pte
& GART_PAGE_MASK
);
143 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
146 static inline void gart_dump_table(struct gart_device
*gart
)
151 static inline bool gart_iova_range_valid(struct gart_device
*gart
,
152 unsigned long iova
, size_t bytes
)
154 unsigned long iova_start
, iova_end
, gart_start
, gart_end
;
157 iova_end
= iova_start
+ bytes
- 1;
158 gart_start
= gart
->iovmm_base
;
159 gart_end
= gart_start
+ gart
->page_count
* GART_PAGE_SIZE
- 1;
161 if (iova_start
< gart_start
)
163 if (iova_end
> gart_end
)
168 static int gart_iommu_attach_dev(struct iommu_domain
*domain
,
171 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
172 struct gart_device
*gart
= gart_domain
->gart
;
173 struct gart_client
*client
, *c
;
176 client
= devm_kzalloc(gart
->dev
, sizeof(*c
), GFP_KERNEL
);
181 spin_lock(&gart
->client_lock
);
182 list_for_each_entry(c
, &gart
->client
, list
) {
185 "%s is already attached\n", dev_name(dev
));
190 list_add(&client
->list
, &gart
->client
);
191 spin_unlock(&gart
->client_lock
);
192 dev_dbg(gart
->dev
, "Attached %s\n", dev_name(dev
));
196 devm_kfree(gart
->dev
, client
);
197 spin_unlock(&gart
->client_lock
);
201 static void gart_iommu_detach_dev(struct iommu_domain
*domain
,
204 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
205 struct gart_device
*gart
= gart_domain
->gart
;
206 struct gart_client
*c
;
208 spin_lock(&gart
->client_lock
);
210 list_for_each_entry(c
, &gart
->client
, list
) {
213 devm_kfree(gart
->dev
, c
);
214 dev_dbg(gart
->dev
, "Detached %s\n", dev_name(dev
));
218 dev_err(gart
->dev
, "Couldn't find\n");
220 spin_unlock(&gart
->client_lock
);
223 static struct iommu_domain
*gart_iommu_domain_alloc(unsigned type
)
225 struct gart_domain
*gart_domain
;
226 struct gart_device
*gart
;
228 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
235 gart_domain
= kzalloc(sizeof(*gart_domain
), GFP_KERNEL
);
239 gart_domain
->gart
= gart
;
240 gart_domain
->domain
.geometry
.aperture_start
= gart
->iovmm_base
;
241 gart_domain
->domain
.geometry
.aperture_end
= gart
->iovmm_base
+
242 gart
->page_count
* GART_PAGE_SIZE
- 1;
243 gart_domain
->domain
.geometry
.force_aperture
= true;
245 return &gart_domain
->domain
;
248 static void gart_iommu_domain_free(struct iommu_domain
*domain
)
250 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
251 struct gart_device
*gart
= gart_domain
->gart
;
254 spin_lock(&gart
->client_lock
);
255 if (!list_empty(&gart
->client
)) {
256 struct gart_client
*c
;
258 list_for_each_entry(c
, &gart
->client
, list
)
259 gart_iommu_detach_dev(domain
, c
->dev
);
261 spin_unlock(&gart
->client_lock
);
267 static int gart_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
268 phys_addr_t pa
, size_t bytes
, int prot
)
270 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
271 struct gart_device
*gart
= gart_domain
->gart
;
275 if (!gart_iova_range_valid(gart
, iova
, bytes
))
278 spin_lock_irqsave(&gart
->pte_lock
, flags
);
279 pfn
= __phys_to_pfn(pa
);
280 if (!pfn_valid(pfn
)) {
281 dev_err(gart
->dev
, "Invalid page: %pa\n", &pa
);
282 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
285 gart_set_pte(gart
, iova
, GART_PTE(pfn
));
286 FLUSH_GART_REGS(gart
);
287 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
291 static size_t gart_iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
294 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
295 struct gart_device
*gart
= gart_domain
->gart
;
298 if (!gart_iova_range_valid(gart
, iova
, bytes
))
301 spin_lock_irqsave(&gart
->pte_lock
, flags
);
302 gart_set_pte(gart
, iova
, 0);
303 FLUSH_GART_REGS(gart
);
304 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
308 static phys_addr_t
gart_iommu_iova_to_phys(struct iommu_domain
*domain
,
311 struct gart_domain
*gart_domain
= to_gart_domain(domain
);
312 struct gart_device
*gart
= gart_domain
->gart
;
317 if (!gart_iova_range_valid(gart
, iova
, 0))
320 spin_lock_irqsave(&gart
->pte_lock
, flags
);
321 pte
= gart_read_pte(gart
, iova
);
322 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
324 pa
= (pte
& GART_PAGE_MASK
);
325 if (!pfn_valid(__phys_to_pfn(pa
))) {
326 dev_err(gart
->dev
, "No entry for %08llx:%pa\n",
327 (unsigned long long)iova
, &pa
);
328 gart_dump_table(gart
);
334 static bool gart_iommu_capable(enum iommu_cap cap
)
339 static int gart_iommu_add_device(struct device
*dev
)
341 struct iommu_group
*group
= iommu_group_get_for_dev(dev
);
344 return PTR_ERR(group
);
346 iommu_group_put(group
);
348 iommu_device_link(&gart_handle
->iommu
, dev
);
353 static void gart_iommu_remove_device(struct device
*dev
)
355 iommu_group_remove_device(dev
);
356 iommu_device_unlink(&gart_handle
->iommu
, dev
);
359 static const struct iommu_ops gart_iommu_ops
= {
360 .capable
= gart_iommu_capable
,
361 .domain_alloc
= gart_iommu_domain_alloc
,
362 .domain_free
= gart_iommu_domain_free
,
363 .attach_dev
= gart_iommu_attach_dev
,
364 .detach_dev
= gart_iommu_detach_dev
,
365 .add_device
= gart_iommu_add_device
,
366 .remove_device
= gart_iommu_remove_device
,
367 .device_group
= generic_device_group
,
368 .map
= gart_iommu_map
,
369 .map_sg
= default_iommu_map_sg
,
370 .unmap
= gart_iommu_unmap
,
371 .iova_to_phys
= gart_iommu_iova_to_phys
,
372 .pgsize_bitmap
= GART_IOMMU_PGSIZES
,
375 static int tegra_gart_suspend(struct device
*dev
)
377 struct gart_device
*gart
= dev_get_drvdata(dev
);
379 u32
*data
= gart
->savedata
;
382 spin_lock_irqsave(&gart
->pte_lock
, flags
);
383 for_each_gart_pte(gart
, iova
)
384 *(data
++) = gart_read_pte(gart
, iova
);
385 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
389 static int tegra_gart_resume(struct device
*dev
)
391 struct gart_device
*gart
= dev_get_drvdata(dev
);
394 spin_lock_irqsave(&gart
->pte_lock
, flags
);
395 do_gart_setup(gart
, gart
->savedata
);
396 spin_unlock_irqrestore(&gart
->pte_lock
, flags
);
400 static int tegra_gart_probe(struct platform_device
*pdev
)
402 struct gart_device
*gart
;
403 struct resource
*res
, *res_remap
;
404 void __iomem
*gart_regs
;
405 struct device
*dev
= &pdev
->dev
;
411 BUILD_BUG_ON(PAGE_SHIFT
!= GART_PAGE_SHIFT
);
413 /* the GART memory aperture is required */
414 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
415 res_remap
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
416 if (!res
|| !res_remap
) {
417 dev_err(dev
, "GART memory aperture expected\n");
421 gart
= devm_kzalloc(dev
, sizeof(*gart
), GFP_KERNEL
);
423 dev_err(dev
, "failed to allocate gart_device\n");
427 gart_regs
= devm_ioremap(dev
, res
->start
, resource_size(res
));
429 dev_err(dev
, "failed to remap GART registers\n");
433 ret
= iommu_device_sysfs_add(&gart
->iommu
, &pdev
->dev
, NULL
,
434 dev_name(&pdev
->dev
));
436 dev_err(dev
, "Failed to register IOMMU in sysfs\n");
440 iommu_device_set_ops(&gart
->iommu
, &gart_iommu_ops
);
442 ret
= iommu_device_register(&gart
->iommu
);
444 dev_err(dev
, "Failed to register IOMMU\n");
445 iommu_device_sysfs_remove(&gart
->iommu
);
449 gart
->dev
= &pdev
->dev
;
450 spin_lock_init(&gart
->pte_lock
);
451 spin_lock_init(&gart
->client_lock
);
452 INIT_LIST_HEAD(&gart
->client
);
453 gart
->regs
= gart_regs
;
454 gart
->iovmm_base
= (dma_addr_t
)res_remap
->start
;
455 gart
->page_count
= (resource_size(res_remap
) >> GART_PAGE_SHIFT
);
457 gart
->savedata
= vmalloc(sizeof(u32
) * gart
->page_count
);
458 if (!gart
->savedata
) {
459 dev_err(dev
, "failed to allocate context save area\n");
463 platform_set_drvdata(pdev
, gart
);
464 do_gart_setup(gart
, NULL
);
471 static int tegra_gart_remove(struct platform_device
*pdev
)
473 struct gart_device
*gart
= platform_get_drvdata(pdev
);
475 iommu_device_unregister(&gart
->iommu
);
476 iommu_device_sysfs_remove(&gart
->iommu
);
478 writel(0, gart
->regs
+ GART_CONFIG
);
480 vfree(gart
->savedata
);
485 static const struct dev_pm_ops tegra_gart_pm_ops
= {
486 .suspend
= tegra_gart_suspend
,
487 .resume
= tegra_gart_resume
,
490 static const struct of_device_id tegra_gart_of_match
[] = {
491 { .compatible
= "nvidia,tegra20-gart", },
494 MODULE_DEVICE_TABLE(of
, tegra_gart_of_match
);
496 static struct platform_driver tegra_gart_driver
= {
497 .probe
= tegra_gart_probe
,
498 .remove
= tegra_gart_remove
,
500 .name
= "tegra-gart",
501 .pm
= &tegra_gart_pm_ops
,
502 .of_match_table
= tegra_gart_of_match
,
506 static int tegra_gart_init(void)
508 return platform_driver_register(&tegra_gart_driver
);
511 static void __exit
tegra_gart_exit(void)
513 platform_driver_unregister(&tegra_gart_driver
);
516 subsys_initcall(tegra_gart_init
);
517 module_exit(tegra_gart_exit
);
519 MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
520 MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
521 MODULE_ALIAS("platform:tegra-gart");
522 MODULE_LICENSE("GPL v2");