4 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/clk.h>
20 #include <linux/dma-mapping.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/of_device.h>
26 #include <linux/slab.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/host1x.h>
30 #undef CREATE_TRACE_POINTS
38 #include "hw/host1x01.h"
39 #include "hw/host1x02.h"
40 #include "hw/host1x04.h"
41 #include "hw/host1x05.h"
42 #include "hw/host1x06.h"
44 void host1x_hypervisor_writel(struct host1x
*host1x
, u32 v
, u32 r
)
46 writel(v
, host1x
->hv_regs
+ r
);
49 u32
host1x_hypervisor_readl(struct host1x
*host1x
, u32 r
)
51 return readl(host1x
->hv_regs
+ r
);
54 void host1x_sync_writel(struct host1x
*host1x
, u32 v
, u32 r
)
56 void __iomem
*sync_regs
= host1x
->regs
+ host1x
->info
->sync_offset
;
58 writel(v
, sync_regs
+ r
);
61 u32
host1x_sync_readl(struct host1x
*host1x
, u32 r
)
63 void __iomem
*sync_regs
= host1x
->regs
+ host1x
->info
->sync_offset
;
65 return readl(sync_regs
+ r
);
68 void host1x_ch_writel(struct host1x_channel
*ch
, u32 v
, u32 r
)
70 writel(v
, ch
->regs
+ r
);
73 u32
host1x_ch_readl(struct host1x_channel
*ch
, u32 r
)
75 return readl(ch
->regs
+ r
);
78 static const struct host1x_info host1x01_info
= {
83 .init
= host1x01_init
,
84 .sync_offset
= 0x3000,
85 .dma_mask
= DMA_BIT_MASK(32),
88 static const struct host1x_info host1x02_info
= {
93 .init
= host1x02_init
,
94 .sync_offset
= 0x3000,
95 .dma_mask
= DMA_BIT_MASK(32),
98 static const struct host1x_info host1x04_info
= {
103 .init
= host1x04_init
,
104 .sync_offset
= 0x2100,
105 .dma_mask
= DMA_BIT_MASK(34),
108 static const struct host1x_info host1x05_info
= {
113 .init
= host1x05_init
,
114 .sync_offset
= 0x2100,
115 .dma_mask
= DMA_BIT_MASK(34),
118 static const struct host1x_info host1x06_info
= {
123 .init
= host1x06_init
,
125 .dma_mask
= DMA_BIT_MASK(34),
126 .has_hypervisor
= true,
129 static const struct of_device_id host1x_of_match
[] = {
130 { .compatible
= "nvidia,tegra186-host1x", .data
= &host1x06_info
, },
131 { .compatible
= "nvidia,tegra210-host1x", .data
= &host1x05_info
, },
132 { .compatible
= "nvidia,tegra124-host1x", .data
= &host1x04_info
, },
133 { .compatible
= "nvidia,tegra114-host1x", .data
= &host1x02_info
, },
134 { .compatible
= "nvidia,tegra30-host1x", .data
= &host1x01_info
, },
135 { .compatible
= "nvidia,tegra20-host1x", .data
= &host1x01_info
, },
138 MODULE_DEVICE_TABLE(of
, host1x_of_match
);
140 static int host1x_probe(struct platform_device
*pdev
)
143 struct resource
*regs
, *hv_regs
= NULL
;
147 host
= devm_kzalloc(&pdev
->dev
, sizeof(*host
), GFP_KERNEL
);
151 host
->info
= of_device_get_match_data(&pdev
->dev
);
153 if (host
->info
->has_hypervisor
) {
154 regs
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "vm");
156 dev_err(&pdev
->dev
, "failed to get vm registers\n");
160 hv_regs
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
164 "failed to get hypervisor registers\n");
168 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
170 dev_err(&pdev
->dev
, "failed to get registers\n");
175 syncpt_irq
= platform_get_irq(pdev
, 0);
176 if (syncpt_irq
< 0) {
177 dev_err(&pdev
->dev
, "failed to get IRQ: %d\n", syncpt_irq
);
181 mutex_init(&host
->devices_lock
);
182 INIT_LIST_HEAD(&host
->devices
);
183 INIT_LIST_HEAD(&host
->list
);
184 host
->dev
= &pdev
->dev
;
186 /* set common host1x device data */
187 platform_set_drvdata(pdev
, host
);
189 host
->regs
= devm_ioremap_resource(&pdev
->dev
, regs
);
190 if (IS_ERR(host
->regs
))
191 return PTR_ERR(host
->regs
);
193 if (host
->info
->has_hypervisor
) {
194 host
->hv_regs
= devm_ioremap_resource(&pdev
->dev
, hv_regs
);
195 if (IS_ERR(host
->hv_regs
))
196 return PTR_ERR(host
->hv_regs
);
199 dma_set_mask_and_coherent(host
->dev
, host
->info
->dma_mask
);
201 if (host
->info
->init
) {
202 err
= host
->info
->init(host
);
207 host
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
208 if (IS_ERR(host
->clk
)) {
209 dev_err(&pdev
->dev
, "failed to get clock\n");
210 err
= PTR_ERR(host
->clk
);
214 host
->rst
= devm_reset_control_get(&pdev
->dev
, "host1x");
215 if (IS_ERR(host
->rst
)) {
216 err
= PTR_ERR(host
->rst
);
217 dev_err(&pdev
->dev
, "failed to get reset: %d\n", err
);
221 host
->group
= iommu_group_get(&pdev
->dev
);
223 struct iommu_domain_geometry
*geometry
;
226 host
->domain
= iommu_domain_alloc(&platform_bus_type
);
232 err
= iommu_attach_group(host
->domain
, host
->group
);
234 if (err
== -ENODEV
) {
235 iommu_domain_free(host
->domain
);
237 iommu_group_put(host
->group
);
242 goto fail_free_domain
;
245 geometry
= &host
->domain
->geometry
;
247 order
= __ffs(host
->domain
->pgsize_bitmap
);
248 init_iova_domain(&host
->iova
, 1UL << order
,
249 geometry
->aperture_start
>> order
);
250 host
->iova_end
= geometry
->aperture_end
;
254 err
= host1x_channel_list_init(&host
->channel_list
,
255 host
->info
->nb_channels
);
257 dev_err(&pdev
->dev
, "failed to initialize channel list\n");
258 goto fail_detach_device
;
261 err
= clk_prepare_enable(host
->clk
);
263 dev_err(&pdev
->dev
, "failed to enable clock\n");
264 goto fail_free_channels
;
267 err
= reset_control_deassert(host
->rst
);
269 dev_err(&pdev
->dev
, "failed to deassert reset: %d\n", err
);
270 goto fail_unprepare_disable
;
273 err
= host1x_syncpt_init(host
);
275 dev_err(&pdev
->dev
, "failed to initialize syncpts\n");
276 goto fail_reset_assert
;
279 err
= host1x_intr_init(host
, syncpt_irq
);
281 dev_err(&pdev
->dev
, "failed to initialize interrupts\n");
282 goto fail_deinit_syncpt
;
285 host1x_debug_init(host
);
287 err
= host1x_register(host
);
289 goto fail_deinit_intr
;
294 host1x_intr_deinit(host
);
296 host1x_syncpt_deinit(host
);
298 reset_control_assert(host
->rst
);
299 fail_unprepare_disable
:
300 clk_disable_unprepare(host
->clk
);
302 host1x_channel_list_free(&host
->channel_list
);
304 if (host
->group
&& host
->domain
) {
305 put_iova_domain(&host
->iova
);
306 iommu_detach_group(host
->domain
, host
->group
);
310 iommu_domain_free(host
->domain
);
312 iommu_group_put(host
->group
);
317 static int host1x_remove(struct platform_device
*pdev
)
319 struct host1x
*host
= platform_get_drvdata(pdev
);
321 host1x_unregister(host
);
322 host1x_intr_deinit(host
);
323 host1x_syncpt_deinit(host
);
324 reset_control_assert(host
->rst
);
325 clk_disable_unprepare(host
->clk
);
328 put_iova_domain(&host
->iova
);
329 iommu_detach_group(host
->domain
, host
->group
);
330 iommu_domain_free(host
->domain
);
331 iommu_group_put(host
->group
);
337 static struct platform_driver tegra_host1x_driver
= {
339 .name
= "tegra-host1x",
340 .of_match_table
= host1x_of_match
,
342 .probe
= host1x_probe
,
343 .remove
= host1x_remove
,
346 static struct platform_driver
* const drivers
[] = {
347 &tegra_host1x_driver
,
351 static int __init
tegra_host1x_init(void)
355 err
= bus_register(&host1x_bus_type
);
359 err
= platform_register_drivers(drivers
, ARRAY_SIZE(drivers
));
361 bus_unregister(&host1x_bus_type
);
365 module_init(tegra_host1x_init
);
367 static void __exit
tegra_host1x_exit(void)
369 platform_unregister_drivers(drivers
, ARRAY_SIZE(drivers
));
370 bus_unregister(&host1x_bus_type
);
372 module_exit(tegra_host1x_exit
);
374 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
375 MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
376 MODULE_DESCRIPTION("Host1x driver for Tegra products");
377 MODULE_LICENSE("GPL");