1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, NVIDIA Corporation.
7 #include <linux/delay.h>
8 #include <linux/host1x.h>
9 #include <linux/iommu.h>
10 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_platform.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/reset.h>
18 #include <soc/tegra/pmc.h>
35 struct tegra_drm_client client
;
36 struct host1x_channel
*channel
;
39 struct reset_control
*rst
;
41 /* Platform configuration */
42 const struct vic_config
*config
;
45 static inline struct vic
*to_vic(struct tegra_drm_client
*client
)
47 return container_of(client
, struct vic
, client
);
50 static void vic_writel(struct vic
*vic
, u32 value
, unsigned int offset
)
52 writel(value
, vic
->regs
+ offset
);
55 static int vic_runtime_resume(struct device
*dev
)
57 struct vic
*vic
= dev_get_drvdata(dev
);
60 err
= clk_prepare_enable(vic
->clk
);
66 err
= reset_control_deassert(vic
->rst
);
75 clk_disable_unprepare(vic
->clk
);
79 static int vic_runtime_suspend(struct device
*dev
)
81 struct vic
*vic
= dev_get_drvdata(dev
);
84 err
= reset_control_assert(vic
->rst
);
88 usleep_range(2000, 4000);
90 clk_disable_unprepare(vic
->clk
);
97 static int vic_boot(struct vic
*vic
)
99 #ifdef CONFIG_IOMMU_API
100 struct iommu_fwspec
*spec
= dev_iommu_fwspec_get(vic
->dev
);
102 u32 fce_ucode_size
, fce_bin_data_offset
;
109 #ifdef CONFIG_IOMMU_API
110 if (vic
->config
->supports_sid
&& spec
) {
113 value
= TRANSCFG_ATT(1, TRANSCFG_SID_FALCON
) |
114 TRANSCFG_ATT(0, TRANSCFG_SID_HW
);
115 vic_writel(vic
, value
, VIC_TFBIF_TRANSCFG
);
117 if (spec
->num_ids
> 0) {
118 value
= spec
->ids
[0] & 0xffff;
120 vic_writel(vic
, value
, VIC_THI_STREAMID0
);
121 vic_writel(vic
, value
, VIC_THI_STREAMID1
);
126 /* setup clockgating registers */
127 vic_writel(vic
, CG_IDLE_CG_DLY_CNT(4) |
129 CG_WAKEUP_DLY_CNT(4),
130 NV_PVIC_MISC_PRI_VIC_CG
);
132 err
= falcon_boot(&vic
->falcon
);
136 hdr
= vic
->falcon
.firmware
.virt
;
137 fce_bin_data_offset
= *(u32
*)(hdr
+ VIC_UCODE_FCE_DATA_OFFSET
);
138 hdr
= vic
->falcon
.firmware
.virt
+
139 *(u32
*)(hdr
+ VIC_UCODE_FCE_HEADER_OFFSET
);
140 fce_ucode_size
= *(u32
*)(hdr
+ FCE_UCODE_SIZE_OFFSET
);
142 falcon_execute_method(&vic
->falcon
, VIC_SET_APPLICATION_ID
, 1);
143 falcon_execute_method(&vic
->falcon
, VIC_SET_FCE_UCODE_SIZE
,
145 falcon_execute_method(&vic
->falcon
, VIC_SET_FCE_UCODE_OFFSET
,
146 (vic
->falcon
.firmware
.iova
+ fce_bin_data_offset
)
149 err
= falcon_wait_idle(&vic
->falcon
);
152 "failed to set application ID and FCE base\n");
161 static int vic_init(struct host1x_client
*client
)
163 struct tegra_drm_client
*drm
= host1x_to_drm_client(client
);
164 struct drm_device
*dev
= dev_get_drvdata(client
->host
);
165 struct tegra_drm
*tegra
= dev
->dev_private
;
166 struct vic
*vic
= to_vic(drm
);
169 err
= host1x_client_iommu_attach(client
);
170 if (err
< 0 && err
!= -ENODEV
) {
171 dev_err(vic
->dev
, "failed to attach to domain: %d\n", err
);
175 vic
->channel
= host1x_channel_request(client
);
181 client
->syncpts
[0] = host1x_syncpt_request(client
, 0);
182 if (!client
->syncpts
[0]) {
187 err
= tegra_drm_register_client(tegra
, drm
);
192 * Inherit the DMA parameters (such as maximum segment size) from the
193 * parent host1x device.
195 client
->dev
->dma_parms
= client
->host
->dma_parms
;
200 host1x_syncpt_free(client
->syncpts
[0]);
202 host1x_channel_put(vic
->channel
);
204 host1x_client_iommu_detach(client
);
209 static int vic_exit(struct host1x_client
*client
)
211 struct tegra_drm_client
*drm
= host1x_to_drm_client(client
);
212 struct drm_device
*dev
= dev_get_drvdata(client
->host
);
213 struct tegra_drm
*tegra
= dev
->dev_private
;
214 struct vic
*vic
= to_vic(drm
);
217 /* avoid a dangling pointer just in case this disappears */
218 client
->dev
->dma_parms
= NULL
;
220 err
= tegra_drm_unregister_client(tegra
, drm
);
224 host1x_syncpt_free(client
->syncpts
[0]);
225 host1x_channel_put(vic
->channel
);
226 host1x_client_iommu_detach(client
);
229 dma_unmap_single(vic
->dev
, vic
->falcon
.firmware
.phys
,
230 vic
->falcon
.firmware
.size
, DMA_TO_DEVICE
);
231 tegra_drm_free(tegra
, vic
->falcon
.firmware
.size
,
232 vic
->falcon
.firmware
.virt
,
233 vic
->falcon
.firmware
.iova
);
235 dma_free_coherent(vic
->dev
, vic
->falcon
.firmware
.size
,
236 vic
->falcon
.firmware
.virt
,
237 vic
->falcon
.firmware
.iova
);
243 static const struct host1x_client_ops vic_client_ops
= {
248 static int vic_load_firmware(struct vic
*vic
)
250 struct host1x_client
*client
= &vic
->client
.base
;
251 struct tegra_drm
*tegra
= vic
->client
.drm
;
257 if (vic
->falcon
.firmware
.virt
)
260 err
= falcon_read_firmware(&vic
->falcon
, vic
->config
->firmware
);
264 size
= vic
->falcon
.firmware
.size
;
266 if (!client
->group
) {
267 virt
= dma_alloc_coherent(vic
->dev
, size
, &iova
, GFP_KERNEL
);
269 err
= dma_mapping_error(vic
->dev
, iova
);
273 virt
= tegra_drm_alloc(tegra
, size
, &iova
);
276 vic
->falcon
.firmware
.virt
= virt
;
277 vic
->falcon
.firmware
.iova
= iova
;
279 err
= falcon_load_firmware(&vic
->falcon
);
284 * In this case we have received an IOVA from the shared domain, so we
285 * need to make sure to get the physical address so that the DMA API
286 * knows what memory pages to flush the cache for.
291 phys
= dma_map_single(vic
->dev
, virt
, size
, DMA_TO_DEVICE
);
293 err
= dma_mapping_error(vic
->dev
, phys
);
297 vic
->falcon
.firmware
.phys
= phys
;
304 dma_free_coherent(vic
->dev
, size
, virt
, iova
);
306 tegra_drm_free(tegra
, size
, virt
, iova
);
311 static int vic_open_channel(struct tegra_drm_client
*client
,
312 struct tegra_drm_context
*context
)
314 struct vic
*vic
= to_vic(client
);
317 err
= pm_runtime_get_sync(vic
->dev
);
321 err
= vic_load_firmware(vic
);
329 context
->channel
= host1x_channel_get(vic
->channel
);
330 if (!context
->channel
) {
338 pm_runtime_put(vic
->dev
);
342 static void vic_close_channel(struct tegra_drm_context
*context
)
344 struct vic
*vic
= to_vic(context
->client
);
346 host1x_channel_put(context
->channel
);
348 pm_runtime_put(vic
->dev
);
351 static const struct tegra_drm_client_ops vic_ops
= {
352 .open_channel
= vic_open_channel
,
353 .close_channel
= vic_close_channel
,
354 .submit
= tegra_drm_submit
,
357 #define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin"
359 static const struct vic_config vic_t124_config
= {
360 .firmware
= NVIDIA_TEGRA_124_VIC_FIRMWARE
,
362 .supports_sid
= false,
365 #define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
367 static const struct vic_config vic_t210_config
= {
368 .firmware
= NVIDIA_TEGRA_210_VIC_FIRMWARE
,
370 .supports_sid
= false,
373 #define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
375 static const struct vic_config vic_t186_config
= {
376 .firmware
= NVIDIA_TEGRA_186_VIC_FIRMWARE
,
378 .supports_sid
= true,
381 #define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
383 static const struct vic_config vic_t194_config
= {
384 .firmware
= NVIDIA_TEGRA_194_VIC_FIRMWARE
,
386 .supports_sid
= true,
389 static const struct of_device_id tegra_vic_of_match
[] = {
390 { .compatible
= "nvidia,tegra124-vic", .data
= &vic_t124_config
},
391 { .compatible
= "nvidia,tegra210-vic", .data
= &vic_t210_config
},
392 { .compatible
= "nvidia,tegra186-vic", .data
= &vic_t186_config
},
393 { .compatible
= "nvidia,tegra194-vic", .data
= &vic_t194_config
},
396 MODULE_DEVICE_TABLE(of
, tegra_vic_of_match
);
398 static int vic_probe(struct platform_device
*pdev
)
400 struct device
*dev
= &pdev
->dev
;
401 struct host1x_syncpt
**syncpts
;
402 struct resource
*regs
;
406 /* inherit DMA mask from host1x parent */
407 err
= dma_coerce_mask_and_coherent(dev
, *dev
->parent
->dma_mask
);
409 dev_err(&pdev
->dev
, "failed to set DMA mask: %d\n", err
);
413 vic
= devm_kzalloc(dev
, sizeof(*vic
), GFP_KERNEL
);
417 vic
->config
= of_device_get_match_data(dev
);
419 syncpts
= devm_kzalloc(dev
, sizeof(*syncpts
), GFP_KERNEL
);
423 regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
425 dev_err(&pdev
->dev
, "failed to get registers\n");
429 vic
->regs
= devm_ioremap_resource(dev
, regs
);
430 if (IS_ERR(vic
->regs
))
431 return PTR_ERR(vic
->regs
);
433 vic
->clk
= devm_clk_get(dev
, NULL
);
434 if (IS_ERR(vic
->clk
)) {
435 dev_err(&pdev
->dev
, "failed to get clock\n");
436 return PTR_ERR(vic
->clk
);
439 if (!dev
->pm_domain
) {
440 vic
->rst
= devm_reset_control_get(dev
, "vic");
441 if (IS_ERR(vic
->rst
)) {
442 dev_err(&pdev
->dev
, "failed to get reset\n");
443 return PTR_ERR(vic
->rst
);
447 vic
->falcon
.dev
= dev
;
448 vic
->falcon
.regs
= vic
->regs
;
450 err
= falcon_init(&vic
->falcon
);
454 platform_set_drvdata(pdev
, vic
);
456 INIT_LIST_HEAD(&vic
->client
.base
.list
);
457 vic
->client
.base
.ops
= &vic_client_ops
;
458 vic
->client
.base
.dev
= dev
;
459 vic
->client
.base
.class = HOST1X_CLASS_VIC
;
460 vic
->client
.base
.syncpts
= syncpts
;
461 vic
->client
.base
.num_syncpts
= 1;
464 INIT_LIST_HEAD(&vic
->client
.list
);
465 vic
->client
.version
= vic
->config
->version
;
466 vic
->client
.ops
= &vic_ops
;
468 err
= host1x_client_register(&vic
->client
.base
);
470 dev_err(dev
, "failed to register host1x client: %d\n", err
);
474 pm_runtime_enable(&pdev
->dev
);
475 if (!pm_runtime_enabled(&pdev
->dev
)) {
476 err
= vic_runtime_resume(&pdev
->dev
);
478 goto unregister_client
;
484 host1x_client_unregister(&vic
->client
.base
);
486 falcon_exit(&vic
->falcon
);
491 static int vic_remove(struct platform_device
*pdev
)
493 struct vic
*vic
= platform_get_drvdata(pdev
);
496 err
= host1x_client_unregister(&vic
->client
.base
);
498 dev_err(&pdev
->dev
, "failed to unregister host1x client: %d\n",
503 if (pm_runtime_enabled(&pdev
->dev
))
504 pm_runtime_disable(&pdev
->dev
);
506 vic_runtime_suspend(&pdev
->dev
);
508 falcon_exit(&vic
->falcon
);
513 static const struct dev_pm_ops vic_pm_ops
= {
514 SET_RUNTIME_PM_OPS(vic_runtime_suspend
, vic_runtime_resume
, NULL
)
517 struct platform_driver tegra_vic_driver
= {
520 .of_match_table
= tegra_vic_of_match
,
524 .remove
= vic_remove
,
527 #if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)
528 MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE
);
530 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
531 MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE
);
533 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
534 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE
);
536 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
537 MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE
);