1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
5 #include <linux/interconnect.h>
6 #include <linux/pm_domain.h>
7 #include <linux/pm_opp.h>
8 #include <soc/qcom/cmd-db.h>
11 #include "a6xx_gmu.xml.h"
13 static void a6xx_gmu_fault(struct a6xx_gmu
*gmu
)
15 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
16 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
17 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
18 struct drm_device
*dev
= gpu
->dev
;
19 struct msm_drm_private
*priv
= dev
->dev_private
;
21 /* FIXME: add a banner here */
24 /* Turn off the hangcheck timer while we are resetting */
25 del_timer(&gpu
->hangcheck_timer
);
27 /* Queue the GPU handler because we need to treat this as a recovery */
28 queue_work(priv
->wq
, &gpu
->recover_work
);
31 static irqreturn_t
a6xx_gmu_irq(int irq
, void *data
)
33 struct a6xx_gmu
*gmu
= data
;
36 status
= gmu_read(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS
);
37 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR
, status
);
39 if (status
& A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE
) {
40 dev_err_ratelimited(gmu
->dev
, "GMU watchdog expired\n");
45 if (status
& A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR
)
46 dev_err_ratelimited(gmu
->dev
, "GMU AHB bus error\n");
48 if (status
& A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR
)
49 dev_err_ratelimited(gmu
->dev
, "GMU fence error: 0x%x\n",
50 gmu_read(gmu
, REG_A6XX_GMU_AHB_FENCE_STATUS
));
55 static irqreturn_t
a6xx_hfi_irq(int irq
, void *data
)
57 struct a6xx_gmu
*gmu
= data
;
60 status
= gmu_read(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
);
61 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
, status
);
63 if (status
& A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT
) {
64 dev_err_ratelimited(gmu
->dev
, "GMU firmware fault\n");
72 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu
*gmu
)
76 /* This can be called from gpu state code so make sure GMU is valid */
77 if (!gmu
->initialized
)
80 val
= gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
);
83 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF
|
84 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF
));
87 /* Check to see if the GX rail is still powered */
88 bool a6xx_gmu_gx_is_on(struct a6xx_gmu
*gmu
)
92 /* This can be called from gpu state code so make sure GMU is valid */
93 if (!gmu
->initialized
)
96 val
= gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
);
99 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF
|
100 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF
));
103 static void __a6xx_gmu_set_freq(struct a6xx_gmu
*gmu
, int index
)
105 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
106 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
107 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
110 gmu_write(gmu
, REG_A6XX_GMU_DCVS_ACK_OPTION
, 0);
112 gmu_write(gmu
, REG_A6XX_GMU_DCVS_PERF_SETTING
,
113 ((3 & 0xf) << 28) | index
);
116 * Send an invalid index as a vote for the bus bandwidth and let the
117 * firmware decide on the right vote
119 gmu_write(gmu
, REG_A6XX_GMU_DCVS_BW_SETTING
, 0xff);
121 /* Set and clear the OOB for DCVS to trigger the GMU */
122 a6xx_gmu_set_oob(gmu
, GMU_OOB_DCVS_SET
);
123 a6xx_gmu_clear_oob(gmu
, GMU_OOB_DCVS_SET
);
125 ret
= gmu_read(gmu
, REG_A6XX_GMU_DCVS_RETURN
);
127 dev_err(gmu
->dev
, "GMU set GPU frequency error: %d\n", ret
);
129 gmu
->freq
= gmu
->gpu_freqs
[index
];
132 * Eventually we will want to scale the path vote with the frequency but
133 * for now leave it at max so that the performance is nominal.
135 icc_set_bw(gpu
->icc_path
, 0, MBps_to_icc(7216));
138 void a6xx_gmu_set_freq(struct msm_gpu
*gpu
, unsigned long freq
)
140 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
141 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
142 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
145 if (freq
== gmu
->freq
)
148 for (perf_index
= 0; perf_index
< gmu
->nr_gpu_freqs
- 1; perf_index
++)
149 if (freq
== gmu
->gpu_freqs
[perf_index
])
152 gmu
->current_perf_index
= perf_index
;
154 __a6xx_gmu_set_freq(gmu
, perf_index
);
157 unsigned long a6xx_gmu_get_freq(struct msm_gpu
*gpu
)
159 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
160 struct a6xx_gpu
*a6xx_gpu
= to_a6xx_gpu(adreno_gpu
);
161 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
166 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu
*gmu
)
169 int local
= gmu
->idle_level
;
171 /* SPTP and IFPC both report as IFPC */
172 if (gmu
->idle_level
== GMU_IDLE_STATE_SPTP
)
173 local
= GMU_IDLE_STATE_IFPC
;
175 val
= gmu_read(gmu
, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE
);
178 if (gmu
->idle_level
!= GMU_IDLE_STATE_IFPC
||
179 !a6xx_gmu_gx_is_on(gmu
))
186 /* Wait for the GMU to get to its most idle state */
187 int a6xx_gmu_wait_for_idle(struct a6xx_gmu
*gmu
)
189 return spin_until(a6xx_gmu_check_idle_level(gmu
));
192 static int a6xx_gmu_start(struct a6xx_gmu
*gmu
)
197 gmu_write(gmu
, REG_A6XX_GMU_CM3_SYSRESET
, 1);
198 gmu_write(gmu
, REG_A6XX_GMU_CM3_SYSRESET
, 0);
200 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_CM3_FW_INIT_RESULT
, val
,
201 val
== 0xbabeface, 100, 10000);
204 DRM_DEV_ERROR(gmu
->dev
, "GMU firmware initialization timed out\n");
209 static int a6xx_gmu_hfi_start(struct a6xx_gmu
*gmu
)
214 gmu_write(gmu
, REG_A6XX_GMU_HFI_CTRL_INIT
, 1);
216 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_HFI_CTRL_STATUS
, val
,
217 val
& 1, 100, 10000);
219 DRM_DEV_ERROR(gmu
->dev
, "Unable to start the HFI queues\n");
224 /* Trigger a OOB (out of band) request to the GMU */
225 int a6xx_gmu_set_oob(struct a6xx_gmu
*gmu
, enum a6xx_gmu_oob_state state
)
233 case GMU_OOB_GPU_SET
:
234 request
= GMU_OOB_GPU_SET_REQUEST
;
235 ack
= GMU_OOB_GPU_SET_ACK
;
238 case GMU_OOB_BOOT_SLUMBER
:
239 request
= GMU_OOB_BOOT_SLUMBER_REQUEST
;
240 ack
= GMU_OOB_BOOT_SLUMBER_ACK
;
241 name
= "BOOT_SLUMBER";
243 case GMU_OOB_DCVS_SET
:
244 request
= GMU_OOB_DCVS_REQUEST
;
245 ack
= GMU_OOB_DCVS_ACK
;
252 /* Trigger the equested OOB operation */
253 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
, 1 << request
);
255 /* Wait for the acknowledge interrupt */
256 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
, val
,
257 val
& (1 << ack
), 100, 10000);
260 DRM_DEV_ERROR(gmu
->dev
,
261 "Timeout waiting for GMU OOB set %s: 0x%x\n",
263 gmu_read(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_INFO
));
265 /* Clear the acknowledge interrupt */
266 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
, 1 << ack
);
271 /* Clear a pending OOB state in the GMU */
272 void a6xx_gmu_clear_oob(struct a6xx_gmu
*gmu
, enum a6xx_gmu_oob_state state
)
275 case GMU_OOB_GPU_SET
:
276 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
,
277 1 << GMU_OOB_GPU_SET_CLEAR
);
279 case GMU_OOB_BOOT_SLUMBER
:
280 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
,
281 1 << GMU_OOB_BOOT_SLUMBER_CLEAR
);
283 case GMU_OOB_DCVS_SET
:
284 gmu_write(gmu
, REG_A6XX_GMU_HOST2GMU_INTR_SET
,
285 1 << GMU_OOB_DCVS_CLEAR
);
290 /* Enable CPU control of SPTP power power collapse */
291 static int a6xx_sptprac_enable(struct a6xx_gmu
*gmu
)
296 gmu_write(gmu
, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL
, 0x778000);
298 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
, val
,
299 (val
& 0x38) == 0x28, 1, 100);
302 DRM_DEV_ERROR(gmu
->dev
, "Unable to power on SPTPRAC: 0x%x\n",
303 gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
));
309 /* Disable CPU control of SPTP power power collapse */
310 static void a6xx_sptprac_disable(struct a6xx_gmu
*gmu
)
315 /* Make sure retention is on */
316 gmu_rmw(gmu
, REG_A6XX_GPU_CC_GX_GDSCR
, 0, (1 << 11));
318 gmu_write(gmu
, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL
, 0x778001);
320 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
, val
,
321 (val
& 0x04), 100, 10000);
324 DRM_DEV_ERROR(gmu
->dev
, "failed to power off SPTPRAC: 0x%x\n",
325 gmu_read(gmu
, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS
));
328 /* Let the GMU know we are starting a boot sequence */
329 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu
*gmu
)
333 /* Let the GMU know we are getting ready for boot */
334 gmu_write(gmu
, REG_A6XX_GMU_BOOT_SLUMBER_OPTION
, 0);
336 /* Choose the "default" power level as the highest available */
337 vote
= gmu
->gx_arc_votes
[gmu
->nr_gpu_freqs
- 1];
339 gmu_write(gmu
, REG_A6XX_GMU_GX_VOTE_IDX
, vote
& 0xff);
340 gmu_write(gmu
, REG_A6XX_GMU_MX_VOTE_IDX
, (vote
>> 8) & 0xff);
342 /* Let the GMU know the boot sequence has started */
343 return a6xx_gmu_set_oob(gmu
, GMU_OOB_BOOT_SLUMBER
);
346 /* Let the GMU know that we are about to go into slumber */
347 static int a6xx_gmu_notify_slumber(struct a6xx_gmu
*gmu
)
351 /* Disable the power counter so the GMU isn't busy */
352 gmu_write(gmu
, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE
, 0);
354 /* Disable SPTP_PC if the CPU is responsible for it */
355 if (gmu
->idle_level
< GMU_IDLE_STATE_SPTP
)
356 a6xx_sptprac_disable(gmu
);
358 /* Tell the GMU to get ready to slumber */
359 gmu_write(gmu
, REG_A6XX_GMU_BOOT_SLUMBER_OPTION
, 1);
361 ret
= a6xx_gmu_set_oob(gmu
, GMU_OOB_BOOT_SLUMBER
);
362 a6xx_gmu_clear_oob(gmu
, GMU_OOB_BOOT_SLUMBER
);
365 /* Check to see if the GMU really did slumber */
366 if (gmu_read(gmu
, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE
)
368 DRM_DEV_ERROR(gmu
->dev
, "The GMU did not go into slumber\n");
373 /* Put fence into allow mode */
374 gmu_write(gmu
, REG_A6XX_GMU_AO_AHB_FENCE_CTRL
, 0);
378 static int a6xx_rpmh_start(struct a6xx_gmu
*gmu
)
383 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 1 << 1);
384 /* Wait for the register to finish posting */
387 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GMU_RSCC_CONTROL_ACK
, val
,
388 val
& (1 << 1), 100, 10000);
390 DRM_DEV_ERROR(gmu
->dev
, "Unable to power on the GPU RSC\n");
394 ret
= gmu_poll_timeout(gmu
, REG_A6XX_RSCC_SEQ_BUSY_DRV0
, val
,
398 DRM_DEV_ERROR(gmu
->dev
, "GPU RSC sequence stuck while waking up the GPU\n");
402 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 0);
404 /* Set up CX GMU counter 0 to count busy ticks */
405 gmu_write(gmu
, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK
, 0xff000000);
406 gmu_rmw(gmu
, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0
, 0xff, 0x20);
408 /* Enable the power counter */
409 gmu_write(gmu
, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE
, 1);
413 static void a6xx_rpmh_stop(struct a6xx_gmu
*gmu
)
418 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 1);
420 ret
= gmu_poll_timeout(gmu
, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0
,
421 val
, val
& (1 << 16), 100, 10000);
423 DRM_DEV_ERROR(gmu
->dev
, "Unable to power off the GPU RSC\n");
425 gmu_write(gmu
, REG_A6XX_GMU_RSCC_CONTROL_REQ
, 0);
428 static inline void pdc_write(void __iomem
*ptr
, u32 offset
, u32 value
)
430 return msm_writel(value
, ptr
+ (offset
<< 2));
433 static void __iomem
*a6xx_gmu_get_mmio(struct platform_device
*pdev
,
436 static void a6xx_gmu_rpmh_init(struct a6xx_gmu
*gmu
)
438 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
439 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
440 struct platform_device
*pdev
= to_platform_device(gmu
->dev
);
441 void __iomem
*pdcptr
= a6xx_gmu_get_mmio(pdev
, "gmu_pdc");
442 void __iomem
*seqptr
= a6xx_gmu_get_mmio(pdev
, "gmu_pdc_seq");
444 if (!pdcptr
|| !seqptr
)
447 /* Disable SDE clock gating */
448 gmu_write(gmu
, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0
, BIT(24));
450 /* Setup RSC PDC handshake for sleep and wakeup */
451 gmu_write(gmu
, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0
, 1);
452 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA
, 0);
453 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR
, 0);
454 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA
+ 2, 0);
455 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR
+ 2, 0);
456 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA
+ 4, 0x80000000);
457 gmu_write(gmu
, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR
+ 4, 0);
458 gmu_write(gmu
, REG_A6XX_RSCC_OVERRIDE_START_ADDR
, 0);
459 gmu_write(gmu
, REG_A6XX_RSCC_PDC_SEQ_START_ADDR
, 0x4520);
460 gmu_write(gmu
, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO
, 0x4510);
461 gmu_write(gmu
, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI
, 0x4514);
463 /* Load RSC sequencer uCode for sleep and wakeup */
464 gmu_write(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
, 0xa7a506a0);
465 gmu_write(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 1, 0xa1e6a6e7);
466 gmu_write(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 2, 0xa2e081e1);
467 gmu_write(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 3, 0xe9a982e2);
468 gmu_write(gmu
, REG_A6XX_RSCC_SEQ_MEM_0_DRV0
+ 4, 0x0020e8a8);
470 /* Load PDC sequencer uCode for power up and power down sequence */
471 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
, 0xfebea1e1);
472 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 1, 0xa5a4a3a2);
473 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 2, 0x8382a6e0);
474 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 3, 0xbce3e284);
475 pdc_write(seqptr
, REG_A6XX_PDC_GPU_SEQ_MEM_0
+ 4, 0x002081fc);
477 /* Set TCS commands used by PDC sequence for low power modes */
478 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK
, 7);
479 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK
, 0);
480 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CONTROL
, 0);
481 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID
, 0x10108);
482 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR
, 0x30010);
483 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA
, 1);
484 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID
+ 4, 0x10108);
485 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR
+ 4, 0x30000);
486 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA
+ 4, 0x0);
488 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID
+ 8, 0x10108);
489 if (adreno_is_a618(adreno_gpu
))
490 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR
+ 8, 0x30090);
492 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR
+ 8, 0x30080);
493 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA
+ 8, 0x0);
495 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK
, 7);
496 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK
, 0);
497 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CONTROL
, 0);
498 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID
, 0x10108);
499 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR
, 0x30010);
500 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
, 2);
502 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID
+ 4, 0x10108);
503 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR
+ 4, 0x30000);
504 if (adreno_is_a618(adreno_gpu
))
505 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
+ 4, 0x2);
507 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
+ 4, 0x3);
510 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID
+ 8, 0x10108);
511 if (adreno_is_a618(adreno_gpu
))
512 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR
+ 8, 0x30090);
514 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR
+ 8, 0x30080);
515 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA
+ 8, 0x3);
518 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_SEQ_START_ADDR
, 0);
519 pdc_write(pdcptr
, REG_A6XX_PDC_GPU_ENABLE_PDC
, 0x80000001);
521 /* ensure no writes happen before the uCode is fully written */
525 if (!IS_ERR_OR_NULL(pdcptr
))
527 if (!IS_ERR_OR_NULL(seqptr
))
532 * The lowest 16 bits of this value are the number of XO clock cycles for main
533 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
534 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
537 #define GMU_PWR_COL_HYST 0x000a1680
539 /* Set up the idle state for the GMU */
540 static void a6xx_gmu_power_config(struct a6xx_gmu
*gmu
)
542 /* Disable GMU WB/RB buffer */
543 gmu_write(gmu
, REG_A6XX_GMU_SYS_BUS_CONFIG
, 0x1);
545 gmu_write(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL
, 0x9c40400);
547 switch (gmu
->idle_level
) {
548 case GMU_IDLE_STATE_IFPC
:
549 gmu_write(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST
,
551 gmu_rmw(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL
, 0,
552 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE
|
553 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE
);
555 case GMU_IDLE_STATE_SPTP
:
556 gmu_write(gmu
, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST
,
558 gmu_rmw(gmu
, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL
, 0,
559 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE
|
560 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE
);
563 /* Enable RPMh GPU client */
564 gmu_rmw(gmu
, REG_A6XX_GMU_RPMH_CTRL
, 0,
565 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE
|
566 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE
|
567 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE
|
568 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE
|
569 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE
|
570 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE
);
573 static int a6xx_gmu_fw_start(struct a6xx_gmu
*gmu
, unsigned int state
)
575 static bool rpmh_init
;
576 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
577 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
582 if (state
== GMU_WARM_BOOT
) {
583 ret
= a6xx_rpmh_start(gmu
);
587 if (WARN(!adreno_gpu
->fw
[ADRENO_FW_GMU
],
588 "GMU firmware is not loaded\n"))
591 /* Sanity check the size of the firmware that was loaded */
592 if (adreno_gpu
->fw
[ADRENO_FW_GMU
]->size
> 0x8000) {
593 DRM_DEV_ERROR(gmu
->dev
,
594 "GMU firmware is bigger than the available region\n");
598 /* Turn on register retention */
599 gmu_write(gmu
, REG_A6XX_GMU_GENERAL_7
, 1);
601 /* We only need to load the RPMh microcode once */
603 a6xx_gmu_rpmh_init(gmu
);
606 ret
= a6xx_rpmh_start(gmu
);
611 image
= (u32
*) adreno_gpu
->fw
[ADRENO_FW_GMU
]->data
;
613 for (i
= 0; i
< adreno_gpu
->fw
[ADRENO_FW_GMU
]->size
>> 2; i
++)
614 gmu_write(gmu
, REG_A6XX_GMU_CM3_ITCM_START
+ i
,
618 gmu_write(gmu
, REG_A6XX_GMU_CM3_FW_INIT_RESULT
, 0);
619 gmu_write(gmu
, REG_A6XX_GMU_CM3_BOOT_CONFIG
, 0x02);
621 /* Write the iova of the HFI table */
622 gmu_write(gmu
, REG_A6XX_GMU_HFI_QTBL_ADDR
, gmu
->hfi
->iova
);
623 gmu_write(gmu
, REG_A6XX_GMU_HFI_QTBL_INFO
, 1);
625 gmu_write(gmu
, REG_A6XX_GMU_AHB_FENCE_RANGE_0
,
626 (1 << 31) | (0xa << 18) | (0xa0));
628 chipid
= adreno_gpu
->rev
.core
<< 24;
629 chipid
|= adreno_gpu
->rev
.major
<< 16;
630 chipid
|= adreno_gpu
->rev
.minor
<< 12;
631 chipid
|= adreno_gpu
->rev
.patchid
<< 8;
633 gmu_write(gmu
, REG_A6XX_GMU_HFI_SFR_ADDR
, chipid
);
635 /* Set up the lowest idle level on the GMU */
636 a6xx_gmu_power_config(gmu
);
638 ret
= a6xx_gmu_start(gmu
);
642 ret
= a6xx_gmu_gfx_rail_on(gmu
);
646 /* Enable SPTP_PC if the CPU is responsible for it */
647 if (gmu
->idle_level
< GMU_IDLE_STATE_SPTP
) {
648 ret
= a6xx_sptprac_enable(gmu
);
653 ret
= a6xx_gmu_hfi_start(gmu
);
657 /* FIXME: Do we need this wmb() here? */
663 #define A6XX_HFI_IRQ_MASK \
664 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
666 #define A6XX_GMU_IRQ_MASK \
667 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
668 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
669 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
671 static void a6xx_gmu_irq_disable(struct a6xx_gmu
*gmu
)
673 disable_irq(gmu
->gmu_irq
);
674 disable_irq(gmu
->hfi_irq
);
676 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK
, ~0);
677 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_MASK
, ~0);
680 static void a6xx_gmu_rpmh_off(struct a6xx_gmu
*gmu
)
684 /* Make sure there are no outstanding RPMh votes */
685 gmu_poll_timeout(gmu
, REG_A6XX_RSCC_TCS0_DRV0_STATUS
, val
,
686 (val
& 1), 100, 10000);
687 gmu_poll_timeout(gmu
, REG_A6XX_RSCC_TCS1_DRV0_STATUS
, val
,
688 (val
& 1), 100, 10000);
689 gmu_poll_timeout(gmu
, REG_A6XX_RSCC_TCS2_DRV0_STATUS
, val
,
690 (val
& 1), 100, 10000);
691 gmu_poll_timeout(gmu
, REG_A6XX_RSCC_TCS3_DRV0_STATUS
, val
,
692 (val
& 1), 100, 1000);
695 /* Force the GMU off in case it isn't responsive */
696 static void a6xx_gmu_force_off(struct a6xx_gmu
*gmu
)
698 /* Flush all the queues */
701 /* Stop the interrupts */
702 a6xx_gmu_irq_disable(gmu
);
704 /* Force off SPTP in case the GMU is managing it */
705 a6xx_sptprac_disable(gmu
);
707 /* Make sure there are no outstanding RPMh votes */
708 a6xx_gmu_rpmh_off(gmu
);
711 int a6xx_gmu_resume(struct a6xx_gpu
*a6xx_gpu
)
713 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
714 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
715 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
718 if (WARN(!gmu
->initialized
, "The GMU is not set up yet\n"))
723 /* Turn on the resources */
724 pm_runtime_get_sync(gmu
->dev
);
726 /* Use a known rate to bring up the GMU */
727 clk_set_rate(gmu
->core_clk
, 200000000);
728 ret
= clk_bulk_prepare_enable(gmu
->nr_clocks
, gmu
->clocks
);
730 pm_runtime_put(gmu
->dev
);
734 /* Set the bus quota to a reasonable value for boot */
735 icc_set_bw(gpu
->icc_path
, 0, MBps_to_icc(3072));
737 /* Enable the GMU interrupt */
738 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR
, ~0);
739 gmu_write(gmu
, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK
, ~A6XX_GMU_IRQ_MASK
);
740 enable_irq(gmu
->gmu_irq
);
742 /* Check to see if we are doing a cold or warm boot */
743 status
= gmu_read(gmu
, REG_A6XX_GMU_GENERAL_7
) == 1 ?
744 GMU_WARM_BOOT
: GMU_COLD_BOOT
;
746 ret
= a6xx_gmu_fw_start(gmu
, status
);
750 ret
= a6xx_hfi_start(gmu
, status
);
755 * Turn on the GMU firmware fault interrupt after we know the boot
756 * sequence is successful
758 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_CLR
, ~0);
759 gmu_write(gmu
, REG_A6XX_GMU_GMU2HOST_INTR_MASK
, ~A6XX_HFI_IRQ_MASK
);
760 enable_irq(gmu
->hfi_irq
);
762 /* Set the GPU to the current freq */
763 __a6xx_gmu_set_freq(gmu
, gmu
->current_perf_index
);
766 * "enable" the GX power domain which won't actually do anything but it
767 * will make sure that the refcounting is correct in case we need to
768 * bring down the GX after a GMU failure
770 if (!IS_ERR_OR_NULL(gmu
->gxpd
))
771 pm_runtime_get(gmu
->gxpd
);
774 /* On failure, shut down the GMU to leave it in a good state */
776 disable_irq(gmu
->gmu_irq
);
778 pm_runtime_put(gmu
->dev
);
784 bool a6xx_gmu_isidle(struct a6xx_gmu
*gmu
)
788 if (!gmu
->initialized
)
791 reg
= gmu_read(gmu
, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS
);
793 if (reg
& A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB
)
799 /* Gracefully try to shut down the GMU and by extension the GPU */
800 static void a6xx_gmu_shutdown(struct a6xx_gmu
*gmu
)
802 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
803 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
804 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
808 * The GMU may still be in slumber unless the GPU started so check and
809 * skip putting it back into slumber if so
811 val
= gmu_read(gmu
, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE
);
814 int ret
= a6xx_gmu_wait_for_idle(gmu
);
816 /* If the GMU isn't responding assume it is hung */
818 a6xx_gmu_force_off(gmu
);
822 /* Clear the VBIF pipe before shutting down */
823 gpu_write(gpu
, REG_A6XX_VBIF_XIN_HALT_CTRL0
, 0xf);
824 spin_until((gpu_read(gpu
, REG_A6XX_VBIF_XIN_HALT_CTRL1
) & 0xf)
826 gpu_write(gpu
, REG_A6XX_VBIF_XIN_HALT_CTRL0
, 0);
828 /* tell the GMU we want to slumber */
829 a6xx_gmu_notify_slumber(gmu
);
831 ret
= gmu_poll_timeout(gmu
,
832 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS
, val
,
833 !(val
& A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB
),
837 * Let the user know we failed to slumber but don't worry too
838 * much because we are powering down anyway
842 DRM_DEV_ERROR(gmu
->dev
,
843 "Unable to slumber GMU: status = 0%x/0%x\n",
845 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS
),
847 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2
));
853 /* Stop the interrupts and mask the hardware */
854 a6xx_gmu_irq_disable(gmu
);
856 /* Tell RPMh to power off the GPU */
861 int a6xx_gmu_stop(struct a6xx_gpu
*a6xx_gpu
)
863 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
864 struct msm_gpu
*gpu
= &a6xx_gpu
->base
.base
;
866 if (!pm_runtime_active(gmu
->dev
))
870 * Force the GMU off if we detected a hang, otherwise try to shut it
874 a6xx_gmu_force_off(gmu
);
876 a6xx_gmu_shutdown(gmu
);
878 /* Remove the bus vote */
879 icc_set_bw(gpu
->icc_path
, 0, 0);
882 * Make sure the GX domain is off before turning off the GMU (CX)
883 * domain. Usually the GMU does this but only if the shutdown sequence
886 if (!IS_ERR_OR_NULL(gmu
->gxpd
))
887 pm_runtime_put_sync(gmu
->gxpd
);
889 clk_bulk_disable_unprepare(gmu
->nr_clocks
, gmu
->clocks
);
891 pm_runtime_put_sync(gmu
->dev
);
896 static void a6xx_gmu_memory_free(struct a6xx_gmu
*gmu
, struct a6xx_gmu_bo
*bo
)
901 if (IS_ERR_OR_NULL(bo
))
904 count
= bo
->size
>> PAGE_SHIFT
;
907 for (i
= 0; i
< count
; i
++, iova
+= PAGE_SIZE
) {
908 iommu_unmap(gmu
->domain
, iova
, PAGE_SIZE
);
909 __free_pages(bo
->pages
[i
], 0);
916 static struct a6xx_gmu_bo
*a6xx_gmu_memory_alloc(struct a6xx_gmu
*gmu
,
919 struct a6xx_gmu_bo
*bo
;
922 bo
= kzalloc(sizeof(*bo
), GFP_KERNEL
);
924 return ERR_PTR(-ENOMEM
);
926 bo
->size
= PAGE_ALIGN(size
);
928 count
= bo
->size
>> PAGE_SHIFT
;
930 bo
->pages
= kcalloc(count
, sizeof(struct page
*), GFP_KERNEL
);
933 return ERR_PTR(-ENOMEM
);
936 for (i
= 0; i
< count
; i
++) {
937 bo
->pages
[i
] = alloc_page(GFP_KERNEL
);
942 bo
->iova
= gmu
->uncached_iova_base
;
944 for (i
= 0; i
< count
; i
++) {
945 ret
= iommu_map(gmu
->domain
,
946 bo
->iova
+ (PAGE_SIZE
* i
),
947 page_to_phys(bo
->pages
[i
]), PAGE_SIZE
,
948 IOMMU_READ
| IOMMU_WRITE
);
951 DRM_DEV_ERROR(gmu
->dev
, "Unable to map GMU buffer object\n");
953 for (i
= i
- 1 ; i
>= 0; i
--)
954 iommu_unmap(gmu
->domain
,
955 bo
->iova
+ (PAGE_SIZE
* i
),
962 bo
->virt
= vmap(bo
->pages
, count
, VM_IOREMAP
,
963 pgprot_writecombine(PAGE_KERNEL
));
967 /* Align future IOVA addresses on 1MB boundaries */
968 gmu
->uncached_iova_base
+= ALIGN(size
, SZ_1M
);
973 for (i
= 0; i
< count
; i
++) {
975 __free_pages(bo
->pages
[i
], 0);
981 return ERR_PTR(-ENOMEM
);
984 static int a6xx_gmu_memory_probe(struct a6xx_gmu
*gmu
)
989 * The GMU address space is hardcoded to treat the range
990 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
991 * between the GMU and the CPU will live in this space
993 gmu
->uncached_iova_base
= 0x60000000;
996 gmu
->domain
= iommu_domain_alloc(&platform_bus_type
);
1000 ret
= iommu_attach_device(gmu
->domain
, gmu
->dev
);
1003 iommu_domain_free(gmu
->domain
);
1010 /* Return the 'arc-level' for the given frequency */
1011 static unsigned int a6xx_gmu_get_arc_level(struct device
*dev
,
1014 struct dev_pm_opp
*opp
;
1020 opp
= dev_pm_opp_find_freq_exact(dev
, freq
, true);
1024 val
= dev_pm_opp_get_level(opp
);
1026 dev_pm_opp_put(opp
);
1031 static int a6xx_gmu_rpmh_arc_votes_init(struct device
*dev
, u32
*votes
,
1032 unsigned long *freqs
, int freqs_count
, const char *id
)
1035 const u16
*pri
, *sec
;
1036 size_t pri_count
, sec_count
;
1038 pri
= cmd_db_read_aux_data(id
, &pri_count
);
1040 return PTR_ERR(pri
);
1042 * The data comes back as an array of unsigned shorts so adjust the
1049 sec
= cmd_db_read_aux_data("mx.lvl", &sec_count
);
1051 return PTR_ERR(sec
);
1057 /* Construct a vote for each frequency */
1058 for (i
= 0; i
< freqs_count
; i
++) {
1059 u8 pindex
= 0, sindex
= 0;
1060 unsigned int level
= a6xx_gmu_get_arc_level(dev
, freqs
[i
]);
1062 /* Get the primary index that matches the arc level */
1063 for (j
= 0; j
< pri_count
; j
++) {
1064 if (pri
[j
] >= level
) {
1070 if (j
== pri_count
) {
1072 "Level %u not found in in the RPMh list\n",
1074 DRM_DEV_ERROR(dev
, "Available levels:\n");
1075 for (j
= 0; j
< pri_count
; j
++)
1076 DRM_DEV_ERROR(dev
, " %u\n", pri
[j
]);
1082 * Look for a level in in the secondary list that matches. If
1083 * nothing fits, use the maximum non zero vote
1086 for (j
= 0; j
< sec_count
; j
++) {
1087 if (sec
[j
] >= level
) {
1090 } else if (sec
[j
]) {
1095 /* Construct the vote */
1096 votes
[i
] = ((pri
[pindex
] & 0xffff) << 16) |
1097 (sindex
<< 8) | pindex
;
1104 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1105 * to construct the list of votes on the CPU and send it over. Query the RPMh
1106 * voltage levels and build the votes
1109 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu
*gmu
)
1111 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
1112 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
1113 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
1116 /* Build the GX votes */
1117 ret
= a6xx_gmu_rpmh_arc_votes_init(&gpu
->pdev
->dev
, gmu
->gx_arc_votes
,
1118 gmu
->gpu_freqs
, gmu
->nr_gpu_freqs
, "gfx.lvl");
1120 /* Build the CX votes */
1121 ret
|= a6xx_gmu_rpmh_arc_votes_init(gmu
->dev
, gmu
->cx_arc_votes
,
1122 gmu
->gmu_freqs
, gmu
->nr_gmu_freqs
, "cx.lvl");
1127 static int a6xx_gmu_build_freq_table(struct device
*dev
, unsigned long *freqs
,
1130 int count
= dev_pm_opp_get_opp_count(dev
);
1131 struct dev_pm_opp
*opp
;
1133 unsigned long freq
= 1;
1136 * The OPP table doesn't contain the "off" frequency level so we need to
1137 * add 1 to the table size to account for it
1140 if (WARN(count
+ 1 > size
,
1141 "The GMU frequency table is being truncated\n"))
1144 /* Set the "off" frequency */
1147 for (i
= 0; i
< count
; i
++) {
1148 opp
= dev_pm_opp_find_freq_ceil(dev
, &freq
);
1152 dev_pm_opp_put(opp
);
1153 freqs
[index
++] = freq
++;
1159 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu
*gmu
)
1161 struct a6xx_gpu
*a6xx_gpu
= container_of(gmu
, struct a6xx_gpu
, gmu
);
1162 struct adreno_gpu
*adreno_gpu
= &a6xx_gpu
->base
;
1163 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
1168 * The GMU handles its own frequency switching so build a list of
1169 * available frequencies to send during initialization
1171 ret
= dev_pm_opp_of_add_table(gmu
->dev
);
1173 DRM_DEV_ERROR(gmu
->dev
, "Unable to set the OPP table for the GMU\n");
1177 gmu
->nr_gmu_freqs
= a6xx_gmu_build_freq_table(gmu
->dev
,
1178 gmu
->gmu_freqs
, ARRAY_SIZE(gmu
->gmu_freqs
));
1181 * The GMU also handles GPU frequency switching so build a list
1182 * from the GPU OPP table
1184 gmu
->nr_gpu_freqs
= a6xx_gmu_build_freq_table(&gpu
->pdev
->dev
,
1185 gmu
->gpu_freqs
, ARRAY_SIZE(gmu
->gpu_freqs
));
1187 gmu
->current_perf_index
= gmu
->nr_gpu_freqs
- 1;
1189 /* Build the list of RPMh votes that we'll send to the GMU */
1190 return a6xx_gmu_rpmh_votes_init(gmu
);
1193 static int a6xx_gmu_clocks_probe(struct a6xx_gmu
*gmu
)
1195 int ret
= devm_clk_bulk_get_all(gmu
->dev
, &gmu
->clocks
);
1200 gmu
->nr_clocks
= ret
;
1202 gmu
->core_clk
= msm_clk_bulk_get_clock(gmu
->clocks
,
1203 gmu
->nr_clocks
, "gmu");
1208 static void __iomem
*a6xx_gmu_get_mmio(struct platform_device
*pdev
,
1212 struct resource
*res
= platform_get_resource_byname(pdev
,
1213 IORESOURCE_MEM
, name
);
1216 DRM_DEV_ERROR(&pdev
->dev
, "Unable to find the %s registers\n", name
);
1217 return ERR_PTR(-EINVAL
);
1220 ret
= ioremap(res
->start
, resource_size(res
));
1222 DRM_DEV_ERROR(&pdev
->dev
, "Unable to map the %s registers\n", name
);
1223 return ERR_PTR(-EINVAL
);
1229 static int a6xx_gmu_get_irq(struct a6xx_gmu
*gmu
, struct platform_device
*pdev
,
1230 const char *name
, irq_handler_t handler
)
1234 irq
= platform_get_irq_byname(pdev
, name
);
1236 ret
= request_irq(irq
, handler
, IRQF_TRIGGER_HIGH
, name
, gmu
);
1238 DRM_DEV_ERROR(&pdev
->dev
, "Unable to get interrupt %s %d\n",
1248 void a6xx_gmu_remove(struct a6xx_gpu
*a6xx_gpu
)
1250 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
1252 if (!gmu
->initialized
)
1255 pm_runtime_force_suspend(gmu
->dev
);
1257 if (!IS_ERR_OR_NULL(gmu
->gxpd
)) {
1258 pm_runtime_disable(gmu
->gxpd
);
1259 dev_pm_domain_detach(gmu
->gxpd
, false);
1265 a6xx_gmu_memory_free(gmu
, gmu
->hfi
);
1267 iommu_detach_device(gmu
->domain
, gmu
->dev
);
1269 iommu_domain_free(gmu
->domain
);
1271 free_irq(gmu
->gmu_irq
, gmu
);
1272 free_irq(gmu
->hfi_irq
, gmu
);
1274 /* Drop reference taken in of_find_device_by_node */
1275 put_device(gmu
->dev
);
1277 gmu
->initialized
= false;
1280 int a6xx_gmu_init(struct a6xx_gpu
*a6xx_gpu
, struct device_node
*node
)
1282 struct a6xx_gmu
*gmu
= &a6xx_gpu
->gmu
;
1283 struct platform_device
*pdev
= of_find_device_by_node(node
);
1289 gmu
->dev
= &pdev
->dev
;
1291 of_dma_configure(gmu
->dev
, node
, true);
1293 /* Fow now, don't do anything fancy until we get our feet under us */
1294 gmu
->idle_level
= GMU_IDLE_STATE_ACTIVE
;
1296 pm_runtime_enable(gmu
->dev
);
1298 /* Get the list of clocks */
1299 ret
= a6xx_gmu_clocks_probe(gmu
);
1301 goto err_put_device
;
1303 /* Set up the IOMMU context bank */
1304 ret
= a6xx_gmu_memory_probe(gmu
);
1306 goto err_put_device
;
1308 /* Allocate memory for for the HFI queues */
1309 gmu
->hfi
= a6xx_gmu_memory_alloc(gmu
, SZ_16K
);
1310 if (IS_ERR(gmu
->hfi
))
1313 /* Allocate memory for the GMU debug region */
1314 gmu
->debug
= a6xx_gmu_memory_alloc(gmu
, SZ_16K
);
1315 if (IS_ERR(gmu
->debug
))
1318 /* Map the GMU registers */
1319 gmu
->mmio
= a6xx_gmu_get_mmio(pdev
, "gmu");
1320 if (IS_ERR(gmu
->mmio
))
1323 /* Get the HFI and GMU interrupts */
1324 gmu
->hfi_irq
= a6xx_gmu_get_irq(gmu
, pdev
, "hfi", a6xx_hfi_irq
);
1325 gmu
->gmu_irq
= a6xx_gmu_get_irq(gmu
, pdev
, "gmu", a6xx_gmu_irq
);
1327 if (gmu
->hfi_irq
< 0 || gmu
->gmu_irq
< 0)
1331 * Get a link to the GX power domain to reset the GPU in case of GMU
1334 gmu
->gxpd
= dev_pm_domain_attach_by_name(gmu
->dev
, "gx");
1336 /* Get the power levels for the GMU and GPU */
1337 a6xx_gmu_pwrlevels_probe(gmu
);
1339 /* Set up the HFI queues */
1342 gmu
->initialized
= true;
1348 free_irq(gmu
->gmu_irq
, gmu
);
1349 free_irq(gmu
->hfi_irq
, gmu
);
1351 a6xx_gmu_memory_free(gmu
, gmu
->hfi
);
1354 iommu_detach_device(gmu
->domain
, gmu
->dev
);
1356 iommu_domain_free(gmu
->domain
);
1361 /* Drop reference taken in of_find_device_by_node */
1362 put_device(gmu
->dev
);