treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / msm / adreno / a6xx_gmu.c
blob983afeaee737ea27593f988ae5eb00a68e0a1c76
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
4 #include <linux/clk.h>
5 #include <linux/interconnect.h>
6 #include <linux/pm_domain.h>
7 #include <linux/pm_opp.h>
8 #include <soc/qcom/cmd-db.h>
10 #include "a6xx_gpu.h"
11 #include "a6xx_gmu.xml.h"
13 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
15 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
16 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
17 struct msm_gpu *gpu = &adreno_gpu->base;
18 struct drm_device *dev = gpu->dev;
19 struct msm_drm_private *priv = dev->dev_private;
21 /* FIXME: add a banner here */
22 gmu->hung = true;
24 /* Turn off the hangcheck timer while we are resetting */
25 del_timer(&gpu->hangcheck_timer);
27 /* Queue the GPU handler because we need to treat this as a recovery */
28 queue_work(priv->wq, &gpu->recover_work);
31 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
33 struct a6xx_gmu *gmu = data;
34 u32 status;
36 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
37 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
39 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
40 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
42 a6xx_gmu_fault(gmu);
45 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
46 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
48 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
49 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
50 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
52 return IRQ_HANDLED;
55 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
57 struct a6xx_gmu *gmu = data;
58 u32 status;
60 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
61 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
63 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
64 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
66 a6xx_gmu_fault(gmu);
69 return IRQ_HANDLED;
72 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
74 u32 val;
76 /* This can be called from gpu state code so make sure GMU is valid */
77 if (!gmu->initialized)
78 return false;
80 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
82 return !(val &
83 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
84 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
87 /* Check to see if the GX rail is still powered */
88 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
90 u32 val;
92 /* This can be called from gpu state code so make sure GMU is valid */
93 if (!gmu->initialized)
94 return false;
96 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
98 return !(val &
99 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
100 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
103 static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
105 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
106 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
107 struct msm_gpu *gpu = &adreno_gpu->base;
108 int ret;
110 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
112 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
113 ((3 & 0xf) << 28) | index);
116 * Send an invalid index as a vote for the bus bandwidth and let the
117 * firmware decide on the right vote
119 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
121 /* Set and clear the OOB for DCVS to trigger the GMU */
122 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
123 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
125 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
126 if (ret)
127 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
129 gmu->freq = gmu->gpu_freqs[index];
132 * Eventually we will want to scale the path vote with the frequency but
133 * for now leave it at max so that the performance is nominal.
135 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
138 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
140 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
141 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
142 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
143 u32 perf_index = 0;
145 if (freq == gmu->freq)
146 return;
148 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
149 if (freq == gmu->gpu_freqs[perf_index])
150 break;
152 gmu->current_perf_index = perf_index;
154 __a6xx_gmu_set_freq(gmu, perf_index);
157 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
159 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
160 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
161 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
163 return gmu->freq;
166 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
168 u32 val;
169 int local = gmu->idle_level;
171 /* SPTP and IFPC both report as IFPC */
172 if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
173 local = GMU_IDLE_STATE_IFPC;
175 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
177 if (val == local) {
178 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
179 !a6xx_gmu_gx_is_on(gmu))
180 return true;
183 return false;
186 /* Wait for the GMU to get to its most idle state */
187 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
189 return spin_until(a6xx_gmu_check_idle_level(gmu));
192 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
194 int ret;
195 u32 val;
197 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
198 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
200 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
201 val == 0xbabeface, 100, 10000);
203 if (ret)
204 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
206 return ret;
209 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
211 u32 val;
212 int ret;
214 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
216 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
217 val & 1, 100, 10000);
218 if (ret)
219 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
221 return ret;
224 /* Trigger a OOB (out of band) request to the GMU */
225 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
227 int ret;
228 u32 val;
229 int request, ack;
230 const char *name;
232 switch (state) {
233 case GMU_OOB_GPU_SET:
234 request = GMU_OOB_GPU_SET_REQUEST;
235 ack = GMU_OOB_GPU_SET_ACK;
236 name = "GPU_SET";
237 break;
238 case GMU_OOB_BOOT_SLUMBER:
239 request = GMU_OOB_BOOT_SLUMBER_REQUEST;
240 ack = GMU_OOB_BOOT_SLUMBER_ACK;
241 name = "BOOT_SLUMBER";
242 break;
243 case GMU_OOB_DCVS_SET:
244 request = GMU_OOB_DCVS_REQUEST;
245 ack = GMU_OOB_DCVS_ACK;
246 name = "GPU_DCVS";
247 break;
248 default:
249 return -EINVAL;
252 /* Trigger the equested OOB operation */
253 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
255 /* Wait for the acknowledge interrupt */
256 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
257 val & (1 << ack), 100, 10000);
259 if (ret)
260 DRM_DEV_ERROR(gmu->dev,
261 "Timeout waiting for GMU OOB set %s: 0x%x\n",
262 name,
263 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
265 /* Clear the acknowledge interrupt */
266 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
268 return ret;
271 /* Clear a pending OOB state in the GMU */
272 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
274 switch (state) {
275 case GMU_OOB_GPU_SET:
276 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
277 1 << GMU_OOB_GPU_SET_CLEAR);
278 break;
279 case GMU_OOB_BOOT_SLUMBER:
280 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
281 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
282 break;
283 case GMU_OOB_DCVS_SET:
284 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
285 1 << GMU_OOB_DCVS_CLEAR);
286 break;
290 /* Enable CPU control of SPTP power power collapse */
291 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
293 int ret;
294 u32 val;
296 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
298 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
299 (val & 0x38) == 0x28, 1, 100);
301 if (ret) {
302 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
303 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
306 return 0;
309 /* Disable CPU control of SPTP power power collapse */
310 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
312 u32 val;
313 int ret;
315 /* Make sure retention is on */
316 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
318 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
320 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
321 (val & 0x04), 100, 10000);
323 if (ret)
324 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
325 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
328 /* Let the GMU know we are starting a boot sequence */
329 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
331 u32 vote;
333 /* Let the GMU know we are getting ready for boot */
334 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
336 /* Choose the "default" power level as the highest available */
337 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
339 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
340 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
342 /* Let the GMU know the boot sequence has started */
343 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
346 /* Let the GMU know that we are about to go into slumber */
347 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
349 int ret;
351 /* Disable the power counter so the GMU isn't busy */
352 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
354 /* Disable SPTP_PC if the CPU is responsible for it */
355 if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
356 a6xx_sptprac_disable(gmu);
358 /* Tell the GMU to get ready to slumber */
359 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
361 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
362 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
364 if (!ret) {
365 /* Check to see if the GMU really did slumber */
366 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
367 != 0x0f) {
368 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
369 ret = -ETIMEDOUT;
373 /* Put fence into allow mode */
374 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
375 return ret;
378 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
380 int ret;
381 u32 val;
383 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
384 /* Wait for the register to finish posting */
385 wmb();
387 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
388 val & (1 << 1), 100, 10000);
389 if (ret) {
390 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
391 return ret;
394 ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
395 !val, 100, 10000);
397 if (ret) {
398 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
399 return ret;
402 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
404 /* Set up CX GMU counter 0 to count busy ticks */
405 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
406 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
408 /* Enable the power counter */
409 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
410 return 0;
413 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
415 int ret;
416 u32 val;
418 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
420 ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
421 val, val & (1 << 16), 100, 10000);
422 if (ret)
423 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
425 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
428 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
430 return msm_writel(value, ptr + (offset << 2));
433 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
434 const char *name);
436 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
438 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
439 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
440 struct platform_device *pdev = to_platform_device(gmu->dev);
441 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
442 void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
444 if (!pdcptr || !seqptr)
445 goto err;
447 /* Disable SDE clock gating */
448 gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
450 /* Setup RSC PDC handshake for sleep and wakeup */
451 gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
452 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
453 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
454 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
455 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
456 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
457 gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
458 gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
459 gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
460 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
461 gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
463 /* Load RSC sequencer uCode for sleep and wakeup */
464 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
465 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
466 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
467 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
468 gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
470 /* Load PDC sequencer uCode for power up and power down sequence */
471 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
472 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
473 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
474 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
475 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
477 /* Set TCS commands used by PDC sequence for low power modes */
478 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
479 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
480 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
481 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
482 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
483 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
484 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
485 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
486 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
488 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
489 if (adreno_is_a618(adreno_gpu))
490 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090);
491 else
492 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
493 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
495 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
496 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
497 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
498 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
499 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
500 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
502 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
503 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
504 if (adreno_is_a618(adreno_gpu))
505 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
506 else
507 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
510 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
511 if (adreno_is_a618(adreno_gpu))
512 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090);
513 else
514 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
515 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
517 /* Setup GPU PDC */
518 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
519 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
521 /* ensure no writes happen before the uCode is fully written */
522 wmb();
524 err:
525 if (!IS_ERR_OR_NULL(pdcptr))
526 iounmap(pdcptr);
527 if (!IS_ERR_OR_NULL(seqptr))
528 iounmap(seqptr);
532 * The lowest 16 bits of this value are the number of XO clock cycles for main
533 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
534 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
537 #define GMU_PWR_COL_HYST 0x000a1680
539 /* Set up the idle state for the GMU */
540 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
542 /* Disable GMU WB/RB buffer */
543 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
545 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
547 switch (gmu->idle_level) {
548 case GMU_IDLE_STATE_IFPC:
549 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
550 GMU_PWR_COL_HYST);
551 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
552 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
553 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
554 /* Fall through */
555 case GMU_IDLE_STATE_SPTP:
556 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
557 GMU_PWR_COL_HYST);
558 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
559 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
560 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
563 /* Enable RPMh GPU client */
564 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
565 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
566 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
567 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
568 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
569 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
570 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
573 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
575 static bool rpmh_init;
576 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
577 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
578 int i, ret;
579 u32 chipid;
580 u32 *image;
582 if (state == GMU_WARM_BOOT) {
583 ret = a6xx_rpmh_start(gmu);
584 if (ret)
585 return ret;
586 } else {
587 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
588 "GMU firmware is not loaded\n"))
589 return -ENOENT;
591 /* Sanity check the size of the firmware that was loaded */
592 if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
593 DRM_DEV_ERROR(gmu->dev,
594 "GMU firmware is bigger than the available region\n");
595 return -EINVAL;
598 /* Turn on register retention */
599 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
601 /* We only need to load the RPMh microcode once */
602 if (!rpmh_init) {
603 a6xx_gmu_rpmh_init(gmu);
604 rpmh_init = true;
605 } else {
606 ret = a6xx_rpmh_start(gmu);
607 if (ret)
608 return ret;
611 image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
613 for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
614 gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
615 image[i]);
618 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
619 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
621 /* Write the iova of the HFI table */
622 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
623 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
625 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
626 (1 << 31) | (0xa << 18) | (0xa0));
628 chipid = adreno_gpu->rev.core << 24;
629 chipid |= adreno_gpu->rev.major << 16;
630 chipid |= adreno_gpu->rev.minor << 12;
631 chipid |= adreno_gpu->rev.patchid << 8;
633 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
635 /* Set up the lowest idle level on the GMU */
636 a6xx_gmu_power_config(gmu);
638 ret = a6xx_gmu_start(gmu);
639 if (ret)
640 return ret;
642 ret = a6xx_gmu_gfx_rail_on(gmu);
643 if (ret)
644 return ret;
646 /* Enable SPTP_PC if the CPU is responsible for it */
647 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
648 ret = a6xx_sptprac_enable(gmu);
649 if (ret)
650 return ret;
653 ret = a6xx_gmu_hfi_start(gmu);
654 if (ret)
655 return ret;
657 /* FIXME: Do we need this wmb() here? */
658 wmb();
660 return 0;
663 #define A6XX_HFI_IRQ_MASK \
664 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
666 #define A6XX_GMU_IRQ_MASK \
667 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
668 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
669 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
671 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
673 disable_irq(gmu->gmu_irq);
674 disable_irq(gmu->hfi_irq);
676 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
677 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
680 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
682 u32 val;
684 /* Make sure there are no outstanding RPMh votes */
685 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
686 (val & 1), 100, 10000);
687 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
688 (val & 1), 100, 10000);
689 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
690 (val & 1), 100, 10000);
691 gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
692 (val & 1), 100, 1000);
695 /* Force the GMU off in case it isn't responsive */
696 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
698 /* Flush all the queues */
699 a6xx_hfi_stop(gmu);
701 /* Stop the interrupts */
702 a6xx_gmu_irq_disable(gmu);
704 /* Force off SPTP in case the GMU is managing it */
705 a6xx_sptprac_disable(gmu);
707 /* Make sure there are no outstanding RPMh votes */
708 a6xx_gmu_rpmh_off(gmu);
711 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
713 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
714 struct msm_gpu *gpu = &adreno_gpu->base;
715 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
716 int status, ret;
718 if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
719 return 0;
721 gmu->hung = false;
723 /* Turn on the resources */
724 pm_runtime_get_sync(gmu->dev);
726 /* Use a known rate to bring up the GMU */
727 clk_set_rate(gmu->core_clk, 200000000);
728 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
729 if (ret) {
730 pm_runtime_put(gmu->dev);
731 return ret;
734 /* Set the bus quota to a reasonable value for boot */
735 icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
737 /* Enable the GMU interrupt */
738 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
739 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
740 enable_irq(gmu->gmu_irq);
742 /* Check to see if we are doing a cold or warm boot */
743 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
744 GMU_WARM_BOOT : GMU_COLD_BOOT;
746 ret = a6xx_gmu_fw_start(gmu, status);
747 if (ret)
748 goto out;
750 ret = a6xx_hfi_start(gmu, status);
751 if (ret)
752 goto out;
755 * Turn on the GMU firmware fault interrupt after we know the boot
756 * sequence is successful
758 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
759 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
760 enable_irq(gmu->hfi_irq);
762 /* Set the GPU to the current freq */
763 __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
766 * "enable" the GX power domain which won't actually do anything but it
767 * will make sure that the refcounting is correct in case we need to
768 * bring down the GX after a GMU failure
770 if (!IS_ERR_OR_NULL(gmu->gxpd))
771 pm_runtime_get(gmu->gxpd);
773 out:
774 /* On failure, shut down the GMU to leave it in a good state */
775 if (ret) {
776 disable_irq(gmu->gmu_irq);
777 a6xx_rpmh_stop(gmu);
778 pm_runtime_put(gmu->dev);
781 return ret;
784 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
786 u32 reg;
788 if (!gmu->initialized)
789 return true;
791 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
793 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
794 return false;
796 return true;
799 /* Gracefully try to shut down the GMU and by extension the GPU */
800 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
802 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
803 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
804 struct msm_gpu *gpu = &adreno_gpu->base;
805 u32 val;
808 * The GMU may still be in slumber unless the GPU started so check and
809 * skip putting it back into slumber if so
811 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
813 if (val != 0xf) {
814 int ret = a6xx_gmu_wait_for_idle(gmu);
816 /* If the GMU isn't responding assume it is hung */
817 if (ret) {
818 a6xx_gmu_force_off(gmu);
819 return;
822 /* Clear the VBIF pipe before shutting down */
823 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
824 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
825 == 0xf);
826 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
828 /* tell the GMU we want to slumber */
829 a6xx_gmu_notify_slumber(gmu);
831 ret = gmu_poll_timeout(gmu,
832 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
833 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
834 100, 10000);
837 * Let the user know we failed to slumber but don't worry too
838 * much because we are powering down anyway
841 if (ret)
842 DRM_DEV_ERROR(gmu->dev,
843 "Unable to slumber GMU: status = 0%x/0%x\n",
844 gmu_read(gmu,
845 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
846 gmu_read(gmu,
847 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
850 /* Turn off HFI */
851 a6xx_hfi_stop(gmu);
853 /* Stop the interrupts and mask the hardware */
854 a6xx_gmu_irq_disable(gmu);
856 /* Tell RPMh to power off the GPU */
857 a6xx_rpmh_stop(gmu);
861 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
863 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
864 struct msm_gpu *gpu = &a6xx_gpu->base.base;
866 if (!pm_runtime_active(gmu->dev))
867 return 0;
870 * Force the GMU off if we detected a hang, otherwise try to shut it
871 * down gracefully
873 if (gmu->hung)
874 a6xx_gmu_force_off(gmu);
875 else
876 a6xx_gmu_shutdown(gmu);
878 /* Remove the bus vote */
879 icc_set_bw(gpu->icc_path, 0, 0);
882 * Make sure the GX domain is off before turning off the GMU (CX)
883 * domain. Usually the GMU does this but only if the shutdown sequence
884 * was successful
886 if (!IS_ERR_OR_NULL(gmu->gxpd))
887 pm_runtime_put_sync(gmu->gxpd);
889 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
891 pm_runtime_put_sync(gmu->dev);
893 return 0;
896 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
898 int count, i;
899 u64 iova;
901 if (IS_ERR_OR_NULL(bo))
902 return;
904 count = bo->size >> PAGE_SHIFT;
905 iova = bo->iova;
907 for (i = 0; i < count; i++, iova += PAGE_SIZE) {
908 iommu_unmap(gmu->domain, iova, PAGE_SIZE);
909 __free_pages(bo->pages[i], 0);
912 kfree(bo->pages);
913 kfree(bo);
916 static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
917 size_t size)
919 struct a6xx_gmu_bo *bo;
920 int ret, count, i;
922 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
923 if (!bo)
924 return ERR_PTR(-ENOMEM);
926 bo->size = PAGE_ALIGN(size);
928 count = bo->size >> PAGE_SHIFT;
930 bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
931 if (!bo->pages) {
932 kfree(bo);
933 return ERR_PTR(-ENOMEM);
936 for (i = 0; i < count; i++) {
937 bo->pages[i] = alloc_page(GFP_KERNEL);
938 if (!bo->pages[i])
939 goto err;
942 bo->iova = gmu->uncached_iova_base;
944 for (i = 0; i < count; i++) {
945 ret = iommu_map(gmu->domain,
946 bo->iova + (PAGE_SIZE * i),
947 page_to_phys(bo->pages[i]), PAGE_SIZE,
948 IOMMU_READ | IOMMU_WRITE);
950 if (ret) {
951 DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
953 for (i = i - 1 ; i >= 0; i--)
954 iommu_unmap(gmu->domain,
955 bo->iova + (PAGE_SIZE * i),
956 PAGE_SIZE);
958 goto err;
962 bo->virt = vmap(bo->pages, count, VM_IOREMAP,
963 pgprot_writecombine(PAGE_KERNEL));
964 if (!bo->virt)
965 goto err;
967 /* Align future IOVA addresses on 1MB boundaries */
968 gmu->uncached_iova_base += ALIGN(size, SZ_1M);
970 return bo;
972 err:
973 for (i = 0; i < count; i++) {
974 if (bo->pages[i])
975 __free_pages(bo->pages[i], 0);
978 kfree(bo->pages);
979 kfree(bo);
981 return ERR_PTR(-ENOMEM);
984 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
986 int ret;
989 * The GMU address space is hardcoded to treat the range
990 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
991 * between the GMU and the CPU will live in this space
993 gmu->uncached_iova_base = 0x60000000;
996 gmu->domain = iommu_domain_alloc(&platform_bus_type);
997 if (!gmu->domain)
998 return -ENODEV;
1000 ret = iommu_attach_device(gmu->domain, gmu->dev);
1002 if (ret) {
1003 iommu_domain_free(gmu->domain);
1004 gmu->domain = NULL;
1007 return ret;
1010 /* Return the 'arc-level' for the given frequency */
1011 static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
1012 unsigned long freq)
1014 struct dev_pm_opp *opp;
1015 unsigned int val;
1017 if (!freq)
1018 return 0;
1020 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1021 if (IS_ERR(opp))
1022 return 0;
1024 val = dev_pm_opp_get_level(opp);
1026 dev_pm_opp_put(opp);
1028 return val;
1031 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1032 unsigned long *freqs, int freqs_count, const char *id)
1034 int i, j;
1035 const u16 *pri, *sec;
1036 size_t pri_count, sec_count;
1038 pri = cmd_db_read_aux_data(id, &pri_count);
1039 if (IS_ERR(pri))
1040 return PTR_ERR(pri);
1042 * The data comes back as an array of unsigned shorts so adjust the
1043 * count accordingly
1045 pri_count >>= 1;
1046 if (!pri_count)
1047 return -EINVAL;
1049 sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1050 if (IS_ERR(sec))
1051 return PTR_ERR(sec);
1053 sec_count >>= 1;
1054 if (!sec_count)
1055 return -EINVAL;
1057 /* Construct a vote for each frequency */
1058 for (i = 0; i < freqs_count; i++) {
1059 u8 pindex = 0, sindex = 0;
1060 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1062 /* Get the primary index that matches the arc level */
1063 for (j = 0; j < pri_count; j++) {
1064 if (pri[j] >= level) {
1065 pindex = j;
1066 break;
1070 if (j == pri_count) {
1071 DRM_DEV_ERROR(dev,
1072 "Level %u not found in in the RPMh list\n",
1073 level);
1074 DRM_DEV_ERROR(dev, "Available levels:\n");
1075 for (j = 0; j < pri_count; j++)
1076 DRM_DEV_ERROR(dev, " %u\n", pri[j]);
1078 return -EINVAL;
1082 * Look for a level in in the secondary list that matches. If
1083 * nothing fits, use the maximum non zero vote
1086 for (j = 0; j < sec_count; j++) {
1087 if (sec[j] >= level) {
1088 sindex = j;
1089 break;
1090 } else if (sec[j]) {
1091 sindex = j;
1095 /* Construct the vote */
1096 votes[i] = ((pri[pindex] & 0xffff) << 16) |
1097 (sindex << 8) | pindex;
1100 return 0;
1104 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1105 * to construct the list of votes on the CPU and send it over. Query the RPMh
1106 * voltage levels and build the votes
1109 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1111 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1112 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1113 struct msm_gpu *gpu = &adreno_gpu->base;
1114 int ret;
1116 /* Build the GX votes */
1117 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1118 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1120 /* Build the CX votes */
1121 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1122 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1124 return ret;
1127 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1128 u32 size)
1130 int count = dev_pm_opp_get_opp_count(dev);
1131 struct dev_pm_opp *opp;
1132 int i, index = 0;
1133 unsigned long freq = 1;
1136 * The OPP table doesn't contain the "off" frequency level so we need to
1137 * add 1 to the table size to account for it
1140 if (WARN(count + 1 > size,
1141 "The GMU frequency table is being truncated\n"))
1142 count = size - 1;
1144 /* Set the "off" frequency */
1145 freqs[index++] = 0;
1147 for (i = 0; i < count; i++) {
1148 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1149 if (IS_ERR(opp))
1150 break;
1152 dev_pm_opp_put(opp);
1153 freqs[index++] = freq++;
1156 return index;
1159 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1161 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1162 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1163 struct msm_gpu *gpu = &adreno_gpu->base;
1165 int ret = 0;
1168 * The GMU handles its own frequency switching so build a list of
1169 * available frequencies to send during initialization
1171 ret = dev_pm_opp_of_add_table(gmu->dev);
1172 if (ret) {
1173 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1174 return ret;
1177 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1178 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1181 * The GMU also handles GPU frequency switching so build a list
1182 * from the GPU OPP table
1184 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1185 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1187 gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
1189 /* Build the list of RPMh votes that we'll send to the GMU */
1190 return a6xx_gmu_rpmh_votes_init(gmu);
1193 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1195 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1197 if (ret < 1)
1198 return ret;
1200 gmu->nr_clocks = ret;
1202 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1203 gmu->nr_clocks, "gmu");
1205 return 0;
1208 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1209 const char *name)
1211 void __iomem *ret;
1212 struct resource *res = platform_get_resource_byname(pdev,
1213 IORESOURCE_MEM, name);
1215 if (!res) {
1216 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1217 return ERR_PTR(-EINVAL);
1220 ret = ioremap(res->start, resource_size(res));
1221 if (!ret) {
1222 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1223 return ERR_PTR(-EINVAL);
1226 return ret;
1229 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1230 const char *name, irq_handler_t handler)
1232 int irq, ret;
1234 irq = platform_get_irq_byname(pdev, name);
1236 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1237 if (ret) {
1238 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1239 name, ret);
1240 return ret;
1243 disable_irq(irq);
1245 return irq;
1248 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1250 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1252 if (!gmu->initialized)
1253 return;
1255 pm_runtime_force_suspend(gmu->dev);
1257 if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1258 pm_runtime_disable(gmu->gxpd);
1259 dev_pm_domain_detach(gmu->gxpd, false);
1262 iounmap(gmu->mmio);
1263 gmu->mmio = NULL;
1265 a6xx_gmu_memory_free(gmu, gmu->hfi);
1267 iommu_detach_device(gmu->domain, gmu->dev);
1269 iommu_domain_free(gmu->domain);
1271 free_irq(gmu->gmu_irq, gmu);
1272 free_irq(gmu->hfi_irq, gmu);
1274 /* Drop reference taken in of_find_device_by_node */
1275 put_device(gmu->dev);
1277 gmu->initialized = false;
1280 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1282 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1283 struct platform_device *pdev = of_find_device_by_node(node);
1284 int ret;
1286 if (!pdev)
1287 return -ENODEV;
1289 gmu->dev = &pdev->dev;
1291 of_dma_configure(gmu->dev, node, true);
1293 /* Fow now, don't do anything fancy until we get our feet under us */
1294 gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1296 pm_runtime_enable(gmu->dev);
1298 /* Get the list of clocks */
1299 ret = a6xx_gmu_clocks_probe(gmu);
1300 if (ret)
1301 goto err_put_device;
1303 /* Set up the IOMMU context bank */
1304 ret = a6xx_gmu_memory_probe(gmu);
1305 if (ret)
1306 goto err_put_device;
1308 /* Allocate memory for for the HFI queues */
1309 gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1310 if (IS_ERR(gmu->hfi))
1311 goto err_memory;
1313 /* Allocate memory for the GMU debug region */
1314 gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1315 if (IS_ERR(gmu->debug))
1316 goto err_memory;
1318 /* Map the GMU registers */
1319 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1320 if (IS_ERR(gmu->mmio))
1321 goto err_memory;
1323 /* Get the HFI and GMU interrupts */
1324 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1325 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1327 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1328 goto err_mmio;
1331 * Get a link to the GX power domain to reset the GPU in case of GMU
1332 * crash
1334 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1336 /* Get the power levels for the GMU and GPU */
1337 a6xx_gmu_pwrlevels_probe(gmu);
1339 /* Set up the HFI queues */
1340 a6xx_hfi_init(gmu);
1342 gmu->initialized = true;
1344 return 0;
1346 err_mmio:
1347 iounmap(gmu->mmio);
1348 free_irq(gmu->gmu_irq, gmu);
1349 free_irq(gmu->hfi_irq, gmu);
1350 err_memory:
1351 a6xx_gmu_memory_free(gmu, gmu->hfi);
1353 if (gmu->domain) {
1354 iommu_detach_device(gmu->domain, gmu->dev);
1356 iommu_domain_free(gmu->domain);
1358 ret = -ENODEV;
1360 err_put_device:
1361 /* Drop reference taken in of_find_device_by_node */
1362 put_device(gmu->dev);
1364 return ret;