1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2014 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
9 #include "adreno_gpu.h"
11 bool hang_debug
= false;
12 MODULE_PARM_DESC(hang_debug
, "Dump registers when hang is detected (can be slow!)");
13 module_param_named(hang_debug
, hang_debug
, bool, 0600);
15 bool snapshot_debugbus
= false;
16 MODULE_PARM_DESC(snapshot_debugbus
, "Include debugbus sections in GPU devcoredump (if not fused off)");
17 module_param_named(snapshot_debugbus
, snapshot_debugbus
, bool, 0600);
19 bool allow_vram_carveout
= false;
20 MODULE_PARM_DESC(allow_vram_carveout
, "Allow using VRAM Carveout, in place of IOMMU");
21 module_param_named(allow_vram_carveout
, allow_vram_carveout
, bool, 0600);
23 int enable_preemption
= -1;
24 MODULE_PARM_DESC(enable_preemption
, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))");
25 module_param(enable_preemption
, int, 0600);
27 extern const struct adreno_gpulist a2xx_gpulist
;
28 extern const struct adreno_gpulist a3xx_gpulist
;
29 extern const struct adreno_gpulist a4xx_gpulist
;
30 extern const struct adreno_gpulist a5xx_gpulist
;
31 extern const struct adreno_gpulist a6xx_gpulist
;
32 extern const struct adreno_gpulist a7xx_gpulist
;
34 static const struct adreno_gpulist
*gpulists
[] = {
43 static const struct adreno_info
*adreno_info(uint32_t chip_id
)
46 for (int i
= 0; i
< ARRAY_SIZE(gpulists
); i
++) {
47 for (int j
= 0; j
< gpulists
[i
]->gpus_count
; j
++) {
48 const struct adreno_info
*info
= &gpulists
[i
]->gpus
[j
];
50 if (info
->machine
&& !of_machine_is_compatible(info
->machine
))
53 for (int k
= 0; info
->chip_ids
[k
]; k
++)
54 if (info
->chip_ids
[k
] == chip_id
)
62 struct msm_gpu
*adreno_load_gpu(struct drm_device
*dev
)
64 struct msm_drm_private
*priv
= dev
->dev_private
;
65 struct platform_device
*pdev
= priv
->gpu_pdev
;
66 struct msm_gpu
*gpu
= NULL
;
67 struct adreno_gpu
*adreno_gpu
;
71 gpu
= dev_to_gpu(&pdev
->dev
);
74 dev_err_once(dev
->dev
, "no GPU device was found\n");
78 adreno_gpu
= to_adreno_gpu(gpu
);
81 * The number one reason for HW init to fail is if the firmware isn't
82 * loaded yet. Try that first and don't bother continuing on
86 ret
= adreno_load_fw(adreno_gpu
);
90 if (gpu
->funcs
->ucode_load
) {
91 ret
= gpu
->funcs
->ucode_load(gpu
);
97 * Now that we have firmware loaded, and are ready to begin
98 * booting the gpu, go ahead and enable runpm:
100 pm_runtime_enable(&pdev
->dev
);
102 ret
= pm_runtime_get_sync(&pdev
->dev
);
104 pm_runtime_put_noidle(&pdev
->dev
);
105 DRM_DEV_ERROR(dev
->dev
, "Couldn't power up the GPU: %d\n", ret
);
106 goto err_disable_rpm
;
109 mutex_lock(&gpu
->lock
);
110 ret
= msm_gpu_hw_init(gpu
);
111 mutex_unlock(&gpu
->lock
);
113 DRM_DEV_ERROR(dev
->dev
, "gpu hw init failed: %d\n", ret
);
117 pm_runtime_put_autosuspend(&pdev
->dev
);
119 #ifdef CONFIG_DEBUG_FS
120 if (gpu
->funcs
->debugfs_init
) {
121 gpu
->funcs
->debugfs_init(gpu
, dev
->primary
);
122 gpu
->funcs
->debugfs_init(gpu
, dev
->render
);
129 pm_runtime_put_sync_suspend(&pdev
->dev
);
131 pm_runtime_disable(&pdev
->dev
);
136 static int find_chipid(struct device
*dev
, uint32_t *chipid
)
138 struct device_node
*node
= dev
->of_node
;
142 /* first search the compat strings for qcom,adreno-XYZ.W: */
143 ret
= of_property_read_string_index(node
, "compatible", 0, &compat
);
145 unsigned int r
, patch
;
147 if (sscanf(compat
, "qcom,adreno-%u.%u", &r
, &patch
) == 2 ||
148 sscanf(compat
, "amd,imageon-%u.%u", &r
, &patch
) == 2) {
149 uint32_t core
, major
, minor
;
157 *chipid
= (core
<< 24) |
165 if (sscanf(compat
, "qcom,adreno-%08x", chipid
) == 1)
169 /* and if that fails, fall back to legacy "qcom,chipid" property: */
170 ret
= of_property_read_u32(node
, "qcom,chipid", chipid
);
172 DRM_DEV_ERROR(dev
, "could not parse qcom,chipid: %d\n", ret
);
176 dev_warn(dev
, "Using legacy qcom,chipid binding!\n");
181 static int adreno_bind(struct device
*dev
, struct device
*master
, void *data
)
183 static struct adreno_platform_config config
= {};
184 const struct adreno_info
*info
;
185 struct msm_drm_private
*priv
= dev_get_drvdata(master
);
186 struct drm_device
*drm
= priv
->dev
;
190 ret
= find_chipid(dev
, &config
.chip_id
);
194 dev
->platform_data
= &config
;
195 priv
->gpu_pdev
= to_platform_device(dev
);
197 info
= adreno_info(config
.chip_id
);
199 dev_warn(drm
->dev
, "Unknown GPU revision: %"ADRENO_CHIPID_FMT
"\n",
200 ADRENO_CHIPID_ARGS(config
.chip_id
));
206 DBG("Found GPU: %"ADRENO_CHIPID_FMT
, ADRENO_CHIPID_ARGS(config
.chip_id
));
208 priv
->is_a2xx
= info
->family
< ADRENO_3XX
;
209 priv
->has_cached_coherent
=
210 !!(info
->quirks
& ADRENO_QUIRK_HAS_CACHED_COHERENT
);
212 gpu
= info
->init(drm
);
214 dev_warn(drm
->dev
, "failed to load adreno gpu\n");
218 ret
= dev_pm_opp_of_find_icc_paths(dev
, NULL
);
225 static int adreno_system_suspend(struct device
*dev
);
226 static void adreno_unbind(struct device
*dev
, struct device
*master
,
229 struct msm_drm_private
*priv
= dev_get_drvdata(master
);
230 struct msm_gpu
*gpu
= dev_to_gpu(dev
);
232 if (pm_runtime_enabled(dev
))
233 WARN_ON_ONCE(adreno_system_suspend(dev
));
234 gpu
->funcs
->destroy(gpu
);
236 priv
->gpu_pdev
= NULL
;
239 static const struct component_ops a3xx_ops
= {
241 .unbind
= adreno_unbind
,
244 static void adreno_device_register_headless(void)
246 /* on imx5, we don't have a top-level mdp/dpu node
247 * this creates a dummy node for the driver for that case
249 struct platform_device_info dummy_info
= {
259 platform_device_register_full(&dummy_info
);
262 static int adreno_probe(struct platform_device
*pdev
)
267 ret
= component_add(&pdev
->dev
, &a3xx_ops
);
271 if (of_device_is_compatible(pdev
->dev
.of_node
, "amd,imageon"))
272 adreno_device_register_headless();
277 static void adreno_remove(struct platform_device
*pdev
)
279 component_del(&pdev
->dev
, &a3xx_ops
);
282 static void adreno_shutdown(struct platform_device
*pdev
)
284 WARN_ON_ONCE(adreno_system_suspend(&pdev
->dev
));
287 static const struct of_device_id dt_match
[] = {
288 { .compatible
= "qcom,adreno" },
289 { .compatible
= "qcom,adreno-3xx" },
290 /* for compatibility with imx5 gpu: */
291 { .compatible
= "amd,imageon" },
292 /* for backwards compat w/ downstream kgsl DT files: */
293 { .compatible
= "qcom,kgsl-3d0" },
297 static int adreno_runtime_resume(struct device
*dev
)
299 struct msm_gpu
*gpu
= dev_to_gpu(dev
);
301 return gpu
->funcs
->pm_resume(gpu
);
304 static int adreno_runtime_suspend(struct device
*dev
)
306 struct msm_gpu
*gpu
= dev_to_gpu(dev
);
309 * We should be holding a runpm ref, which will prevent
310 * runtime suspend. In the system suspend path, we've
311 * already waited for active jobs to complete.
313 WARN_ON_ONCE(gpu
->active_submits
);
315 return gpu
->funcs
->pm_suspend(gpu
);
318 static void suspend_scheduler(struct msm_gpu
*gpu
)
323 * Shut down the scheduler before we force suspend, so that
324 * suspend isn't racing with scheduler kthread feeding us
327 * Note, we just want to park the thread, and let any jobs
328 * that are already on the hw queue complete normally, as
329 * opposed to the drm_sched_stop() path used for handling
330 * faulting/timed-out jobs. We can't really cancel any jobs
331 * already on the hw queue without racing with the GPU.
333 for (i
= 0; i
< gpu
->nr_rings
; i
++) {
334 struct drm_gpu_scheduler
*sched
= &gpu
->rb
[i
]->sched
;
336 drm_sched_wqueue_stop(sched
);
340 static void resume_scheduler(struct msm_gpu
*gpu
)
344 for (i
= 0; i
< gpu
->nr_rings
; i
++) {
345 struct drm_gpu_scheduler
*sched
= &gpu
->rb
[i
]->sched
;
347 drm_sched_wqueue_start(sched
);
351 static int adreno_system_suspend(struct device
*dev
)
353 struct msm_gpu
*gpu
= dev_to_gpu(dev
);
359 suspend_scheduler(gpu
);
361 remaining
= wait_event_timeout(gpu
->retire_event
,
362 gpu
->active_submits
== 0,
363 msecs_to_jiffies(1000));
364 if (remaining
== 0) {
365 dev_err(dev
, "Timeout waiting for GPU to suspend\n");
370 ret
= pm_runtime_force_suspend(dev
);
373 resume_scheduler(gpu
);
378 static int adreno_system_resume(struct device
*dev
)
380 struct msm_gpu
*gpu
= dev_to_gpu(dev
);
385 resume_scheduler(gpu
);
386 return pm_runtime_force_resume(dev
);
389 static const struct dev_pm_ops adreno_pm_ops
= {
390 SYSTEM_SLEEP_PM_OPS(adreno_system_suspend
, adreno_system_resume
)
391 RUNTIME_PM_OPS(adreno_runtime_suspend
, adreno_runtime_resume
, NULL
)
394 static struct platform_driver adreno_driver
= {
395 .probe
= adreno_probe
,
396 .remove
= adreno_remove
,
397 .shutdown
= adreno_shutdown
,
400 .of_match_table
= dt_match
,
401 .pm
= &adreno_pm_ops
,
405 void __init
adreno_register(void)
407 platform_driver_register(&adreno_driver
);
410 void __exit
adreno_unregister(void)
412 platform_driver_unregister(&adreno_driver
);