Linux 4.19.133
[linux/fpc-iii.git] / drivers / gpu / drm / msm / adreno / a5xx_gpu.c
blobba6f3c14495c0ceb605276e81edce5ddb055a1c3
1 /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/cpumask.h>
17 #include <linux/qcom_scm.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/of_address.h>
20 #include <linux/soc/qcom/mdt_loader.h>
21 #include <linux/pm_opp.h>
22 #include <linux/nvmem-consumer.h>
23 #include <linux/iopoll.h>
24 #include <linux/slab.h>
25 #include "msm_gem.h"
26 #include "msm_mmu.h"
27 #include "a5xx_gpu.h"
29 extern bool hang_debug;
30 static void a5xx_dump(struct msm_gpu *gpu);
32 #define GPU_PAS_ID 13
34 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
36 struct device *dev = &gpu->pdev->dev;
37 const struct firmware *fw;
38 struct device_node *np, *mem_np;
39 struct resource r;
40 phys_addr_t mem_phys;
41 ssize_t mem_size;
42 void *mem_region = NULL;
43 int ret;
45 if (!IS_ENABLED(CONFIG_ARCH_QCOM))
46 return -EINVAL;
48 np = of_get_child_by_name(dev->of_node, "zap-shader");
49 if (!np)
50 return -ENODEV;
52 mem_np = of_parse_phandle(np, "memory-region", 0);
53 of_node_put(np);
54 if (!mem_np)
55 return -EINVAL;
57 ret = of_address_to_resource(mem_np, 0, &r);
58 of_node_put(mem_np);
59 if (ret)
60 return ret;
62 mem_phys = r.start;
63 mem_size = resource_size(&r);
65 /* Request the MDT file for the firmware */
66 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
67 if (IS_ERR(fw)) {
68 DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
69 return PTR_ERR(fw);
72 /* Figure out how much memory we need */
73 mem_size = qcom_mdt_get_size(fw);
74 if (mem_size < 0) {
75 ret = mem_size;
76 goto out;
79 /* Allocate memory for the firmware image */
80 mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
81 if (!mem_region) {
82 ret = -ENOMEM;
83 goto out;
87 * Load the rest of the MDT
89 * Note that we could be dealing with two different paths, since
90 * with upstream linux-firmware it would be in a qcom/ subdir..
91 * adreno_request_fw() handles this, but qcom_mdt_load() does
92 * not. But since we've already gotten thru adreno_request_fw()
93 * we know which of the two cases it is:
95 if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
96 ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
97 mem_region, mem_phys, mem_size, NULL);
98 } else {
99 char *newname;
101 newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
103 ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
104 mem_region, mem_phys, mem_size, NULL);
105 kfree(newname);
107 if (ret)
108 goto out;
110 /* Send the image to the secure world */
111 ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID);
112 if (ret)
113 DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
115 out:
116 if (mem_region)
117 memunmap(mem_region);
119 release_firmware(fw);
121 return ret;
124 static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
126 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
127 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
128 uint32_t wptr;
129 unsigned long flags;
131 spin_lock_irqsave(&ring->lock, flags);
133 /* Copy the shadow to the actual register */
134 ring->cur = ring->next;
136 /* Make sure to wrap wptr if we need to */
137 wptr = get_wptr(ring);
139 spin_unlock_irqrestore(&ring->lock, flags);
141 /* Make sure everything is posted before making a decision */
142 mb();
144 /* Update HW if this is the current ring and we are not in preempt */
145 if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
146 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
149 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit,
150 struct msm_file_private *ctx)
152 struct msm_drm_private *priv = gpu->dev->dev_private;
153 struct msm_ringbuffer *ring = submit->ring;
154 struct msm_gem_object *obj;
155 uint32_t *ptr, dwords;
156 unsigned int i;
158 for (i = 0; i < submit->nr_cmds; i++) {
159 switch (submit->cmd[i].type) {
160 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
161 break;
162 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
163 if (priv->lastctx == ctx)
164 break;
165 case MSM_SUBMIT_CMD_BUF:
166 /* copy commands into RB: */
167 obj = submit->bos[submit->cmd[i].idx].obj;
168 dwords = submit->cmd[i].size;
170 ptr = msm_gem_get_vaddr(&obj->base);
172 /* _get_vaddr() shouldn't fail at this point,
173 * since we've already mapped it once in
174 * submit_reloc()
176 if (WARN_ON(!ptr))
177 return;
179 for (i = 0; i < dwords; i++) {
180 /* normally the OUT_PKTn() would wait
181 * for space for the packet. But since
182 * we just OUT_RING() the whole thing,
183 * need to call adreno_wait_ring()
184 * ourself:
186 adreno_wait_ring(ring, 1);
187 OUT_RING(ring, ptr[i]);
190 msm_gem_put_vaddr(&obj->base);
192 break;
196 a5xx_flush(gpu, ring);
197 a5xx_preempt_trigger(gpu);
199 /* we might not necessarily have a cmd from userspace to
200 * trigger an event to know that submit has completed, so
201 * do this manually:
203 a5xx_idle(gpu, ring);
204 ring->memptrs->fence = submit->seqno;
205 msm_gpu_retire(gpu);
208 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
209 struct msm_file_private *ctx)
211 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
212 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
213 struct msm_drm_private *priv = gpu->dev->dev_private;
214 struct msm_ringbuffer *ring = submit->ring;
215 unsigned int i, ibs = 0;
217 if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
218 priv->lastctx = NULL;
219 a5xx_submit_in_rb(gpu, submit, ctx);
220 return;
223 OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
224 OUT_RING(ring, 0x02);
226 /* Turn off protected mode to write to special registers */
227 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
228 OUT_RING(ring, 0);
230 /* Set the save preemption record for the ring/command */
231 OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
232 OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
233 OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
235 /* Turn back on protected mode */
236 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
237 OUT_RING(ring, 1);
239 /* Enable local preemption for finegrain preemption */
240 OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
241 OUT_RING(ring, 0x02);
243 /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
244 OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
245 OUT_RING(ring, 0x02);
247 /* Submit the commands */
248 for (i = 0; i < submit->nr_cmds; i++) {
249 switch (submit->cmd[i].type) {
250 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
251 break;
252 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
253 if (priv->lastctx == ctx)
254 break;
255 case MSM_SUBMIT_CMD_BUF:
256 OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
257 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
258 OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
259 OUT_RING(ring, submit->cmd[i].size);
260 ibs++;
261 break;
266 * Write the render mode to NULL (0) to indicate to the CP that the IBs
267 * are done rendering - otherwise a lucky preemption would start
268 * replaying from the last checkpoint
270 OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
271 OUT_RING(ring, 0);
272 OUT_RING(ring, 0);
273 OUT_RING(ring, 0);
274 OUT_RING(ring, 0);
275 OUT_RING(ring, 0);
277 /* Turn off IB level preemptions */
278 OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
279 OUT_RING(ring, 0x01);
281 /* Write the fence to the scratch register */
282 OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
283 OUT_RING(ring, submit->seqno);
286 * Execute a CACHE_FLUSH_TS event. This will ensure that the
287 * timestamp is written to the memory and then triggers the interrupt
289 OUT_PKT7(ring, CP_EVENT_WRITE, 4);
290 OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
291 OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
292 OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
293 OUT_RING(ring, submit->seqno);
295 /* Yield the floor on command completion */
296 OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
298 * If dword[2:1] are non zero, they specify an address for the CP to
299 * write the value of dword[3] to on preemption complete. Write 0 to
300 * skip the write
302 OUT_RING(ring, 0x00);
303 OUT_RING(ring, 0x00);
304 /* Data value - not used if the address above is 0 */
305 OUT_RING(ring, 0x01);
306 /* Set bit 0 to trigger an interrupt on preempt complete */
307 OUT_RING(ring, 0x01);
309 a5xx_flush(gpu, ring);
311 /* Check to see if we need to start preemption */
312 a5xx_preempt_trigger(gpu);
315 static const struct {
316 u32 offset;
317 u32 value;
318 } a5xx_hwcg[] = {
319 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
320 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
321 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
322 {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
323 {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
324 {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
325 {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
326 {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
327 {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
328 {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
329 {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
330 {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
331 {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
332 {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
333 {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
334 {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
335 {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
336 {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
337 {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
338 {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
339 {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
340 {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
341 {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
342 {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
343 {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
344 {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
345 {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
346 {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
347 {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
348 {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
349 {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
350 {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
351 {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
352 {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
353 {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
354 {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
355 {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
356 {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
357 {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
358 {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
359 {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
360 {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
361 {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
362 {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
363 {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
364 {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
365 {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
366 {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
367 {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
368 {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
369 {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
370 {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
371 {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
372 {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
373 {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
374 {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
375 {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
376 {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
377 {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
378 {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
379 {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
380 {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
381 {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
382 {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
383 {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
384 {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
385 {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
386 {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
387 {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
388 {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
389 {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
390 {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
391 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
392 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
393 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
394 {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
395 {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
396 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
397 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
398 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
399 {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
400 {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
401 {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
402 {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
403 {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
404 {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
405 {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
406 {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
407 {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
408 {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
409 {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
410 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
413 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
415 unsigned int i;
417 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
418 gpu_write(gpu, a5xx_hwcg[i].offset,
419 state ? a5xx_hwcg[i].value : 0);
421 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
422 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
425 static int a5xx_me_init(struct msm_gpu *gpu)
427 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
428 struct msm_ringbuffer *ring = gpu->rb[0];
430 OUT_PKT7(ring, CP_ME_INIT, 8);
432 OUT_RING(ring, 0x0000002F);
434 /* Enable multiple hardware contexts */
435 OUT_RING(ring, 0x00000003);
437 /* Enable error detection */
438 OUT_RING(ring, 0x20000000);
440 /* Don't enable header dump */
441 OUT_RING(ring, 0x00000000);
442 OUT_RING(ring, 0x00000000);
444 /* Specify workarounds for various microcode issues */
445 if (adreno_is_a530(adreno_gpu)) {
446 /* Workaround for token end syncs
447 * Force a WFI after every direct-render 3D mode draw and every
448 * 2D mode 3 draw
450 OUT_RING(ring, 0x0000000B);
451 } else {
452 /* No workarounds enabled */
453 OUT_RING(ring, 0x00000000);
456 OUT_RING(ring, 0x00000000);
457 OUT_RING(ring, 0x00000000);
459 gpu->funcs->flush(gpu, ring);
460 return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
463 static int a5xx_preempt_start(struct msm_gpu *gpu)
465 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
466 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
467 struct msm_ringbuffer *ring = gpu->rb[0];
469 if (gpu->nr_rings == 1)
470 return 0;
472 /* Turn off protected mode to write to special registers */
473 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
474 OUT_RING(ring, 0);
476 /* Set the save preemption record for the ring/command */
477 OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
478 OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
479 OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
481 /* Turn back on protected mode */
482 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
483 OUT_RING(ring, 1);
485 OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
486 OUT_RING(ring, 0x00);
488 OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
489 OUT_RING(ring, 0x01);
491 OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
492 OUT_RING(ring, 0x01);
494 /* Yield the floor on command completion */
495 OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
496 OUT_RING(ring, 0x00);
497 OUT_RING(ring, 0x00);
498 OUT_RING(ring, 0x01);
499 OUT_RING(ring, 0x01);
501 gpu->funcs->flush(gpu, ring);
503 return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
506 static int a5xx_ucode_init(struct msm_gpu *gpu)
508 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
509 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
510 int ret;
512 if (!a5xx_gpu->pm4_bo) {
513 a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
514 adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
516 if (IS_ERR(a5xx_gpu->pm4_bo)) {
517 ret = PTR_ERR(a5xx_gpu->pm4_bo);
518 a5xx_gpu->pm4_bo = NULL;
519 dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
520 ret);
521 return ret;
525 if (!a5xx_gpu->pfp_bo) {
526 a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
527 adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
529 if (IS_ERR(a5xx_gpu->pfp_bo)) {
530 ret = PTR_ERR(a5xx_gpu->pfp_bo);
531 a5xx_gpu->pfp_bo = NULL;
532 dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
533 ret);
534 return ret;
538 gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
539 REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
541 gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
542 REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
544 return 0;
547 #define SCM_GPU_ZAP_SHADER_RESUME 0
549 static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
551 int ret;
553 ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
554 if (ret)
555 DRM_ERROR("%s: zap-shader resume failed: %d\n",
556 gpu->name, ret);
558 return ret;
561 static int a5xx_zap_shader_init(struct msm_gpu *gpu)
563 static bool loaded;
564 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
565 struct platform_device *pdev = gpu->pdev;
566 int ret;
569 * If the zap shader is already loaded into memory we just need to kick
570 * the remote processor to reinitialize it
572 if (loaded)
573 return a5xx_zap_shader_resume(gpu);
575 /* We need SCM to be able to load the firmware */
576 if (!qcom_scm_is_available()) {
577 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
578 return -EPROBE_DEFER;
581 /* Each GPU has a target specific zap shader firmware name to use */
582 if (!adreno_gpu->info->zapfw) {
583 DRM_DEV_ERROR(&pdev->dev,
584 "Zap shader firmware file not specified for this target\n");
585 return -ENODEV;
588 ret = zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw);
590 loaded = !ret;
592 return ret;
595 #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
596 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
597 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
598 A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
599 A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
600 A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
601 A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
602 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
603 A5XX_RBBM_INT_0_MASK_CP_SW | \
604 A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
605 A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
606 A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
608 static int a5xx_hw_init(struct msm_gpu *gpu)
610 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
611 int ret;
613 gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
615 /* Make all blocks contribute to the GPU BUSY perf counter */
616 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
618 /* Enable RBBM error reporting bits */
619 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
621 if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
623 * Mask out the activity signals from RB1-3 to avoid false
624 * positives
627 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
628 0xF0000000);
629 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
630 0xFFFFFFFF);
631 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
632 0xFFFFFFFF);
633 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
634 0xFFFFFFFF);
635 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
636 0xFFFFFFFF);
637 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
638 0xFFFFFFFF);
639 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
640 0xFFFFFFFF);
641 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
642 0xFFFFFFFF);
645 /* Enable fault detection */
646 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
647 (1 << 30) | 0xFFFF);
649 /* Turn on performance counters */
650 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
652 /* Select CP0 to always count cycles */
653 gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
655 /* Select RBBM0 to countable 6 to get the busy status for devfreq */
656 gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
658 /* Increase VFD cache access so LRZ and other data gets evicted less */
659 gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
661 /* Disable L2 bypass in the UCHE */
662 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
663 gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
664 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
665 gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
667 /* Set the GMEM VA range (0 to gpu->gmem) */
668 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
669 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
670 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
671 0x00100000 + adreno_gpu->gmem - 1);
672 gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
674 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
675 gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
676 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
677 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
679 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
681 if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
682 gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
684 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
686 /* Enable USE_RETENTION_FLOPS */
687 gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
689 /* Enable ME/PFP split notification */
690 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
692 /* Enable HWCG */
693 a5xx_set_hwcg(gpu, true);
695 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
697 /* Set the highest bank bit */
698 gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, 2 << 7);
699 gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, 2 << 1);
701 /* Protect registers from the CP */
702 gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
704 /* RBBM */
705 gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
706 gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
707 gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
708 gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
709 gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
710 gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
712 /* Content protect */
713 gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
714 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
715 16));
716 gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
717 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
719 /* CP */
720 gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
721 gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
722 gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
723 gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
725 /* RB */
726 gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
727 gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
729 /* VPC */
730 gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
731 gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
733 /* UCHE */
734 gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
736 if (adreno_is_a530(adreno_gpu))
737 gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
738 ADRENO_PROTECT_RW(0x10000, 0x8000));
740 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
742 * Disable the trusted memory range - we don't actually supported secure
743 * memory rendering at this point in time and we don't want to block off
744 * part of the virtual memory space.
746 gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
747 REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
748 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
750 ret = adreno_hw_init(gpu);
751 if (ret)
752 return ret;
754 a5xx_preempt_hw_init(gpu);
756 a5xx_gpmu_ucode_init(gpu);
758 ret = a5xx_ucode_init(gpu);
759 if (ret)
760 return ret;
762 /* Disable the interrupts through the initial bringup stage */
763 gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
765 /* Clear ME_HALT to start the micro engine */
766 gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
767 ret = a5xx_me_init(gpu);
768 if (ret)
769 return ret;
771 ret = a5xx_power_init(gpu);
772 if (ret)
773 return ret;
776 * Send a pipeline event stat to get misbehaving counters to start
777 * ticking correctly
779 if (adreno_is_a530(adreno_gpu)) {
780 OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
781 OUT_RING(gpu->rb[0], 0x0F);
783 gpu->funcs->flush(gpu, gpu->rb[0]);
784 if (!a5xx_idle(gpu, gpu->rb[0]))
785 return -EINVAL;
789 * Try to load a zap shader into the secure world. If successful
790 * we can use the CP to switch out of secure mode. If not then we
791 * have no resource but to try to switch ourselves out manually. If we
792 * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
793 * be blocked and a permissions violation will soon follow.
795 ret = a5xx_zap_shader_init(gpu);
796 if (!ret) {
797 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
798 OUT_RING(gpu->rb[0], 0x00000000);
800 gpu->funcs->flush(gpu, gpu->rb[0]);
801 if (!a5xx_idle(gpu, gpu->rb[0]))
802 return -EINVAL;
803 } else {
804 /* Print a warning so if we die, we know why */
805 dev_warn_once(gpu->dev->dev,
806 "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
807 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
810 /* Last step - yield the ringbuffer */
811 a5xx_preempt_start(gpu);
813 return 0;
816 static void a5xx_recover(struct msm_gpu *gpu)
818 int i;
820 adreno_dump_info(gpu);
822 for (i = 0; i < 8; i++) {
823 printk("CP_SCRATCH_REG%d: %u\n", i,
824 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
827 if (hang_debug)
828 a5xx_dump(gpu);
830 gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
831 gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
832 gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
833 adreno_recover(gpu);
836 static void a5xx_destroy(struct msm_gpu *gpu)
838 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
839 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
841 DBG("%s", gpu->name);
843 a5xx_preempt_fini(gpu);
845 if (a5xx_gpu->pm4_bo) {
846 if (a5xx_gpu->pm4_iova)
847 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
848 drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
851 if (a5xx_gpu->pfp_bo) {
852 if (a5xx_gpu->pfp_iova)
853 msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
854 drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
857 if (a5xx_gpu->gpmu_bo) {
858 if (a5xx_gpu->gpmu_iova)
859 msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
860 drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
863 adreno_gpu_cleanup(adreno_gpu);
864 kfree(a5xx_gpu);
867 static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
869 if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
870 return false;
873 * Nearly every abnormality ends up pausing the GPU and triggering a
874 * fault so we can safely just watch for this one interrupt to fire
876 return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
877 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
880 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
882 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
883 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
885 if (ring != a5xx_gpu->cur_ring) {
886 WARN(1, "Tried to idle a non-current ringbuffer\n");
887 return false;
890 /* wait for CP to drain ringbuffer: */
891 if (!adreno_idle(gpu, ring))
892 return false;
894 if (spin_until(_a5xx_check_idle(gpu))) {
895 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
896 gpu->name, __builtin_return_address(0),
897 gpu_read(gpu, REG_A5XX_RBBM_STATUS),
898 gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
899 gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
900 gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
901 return false;
904 return true;
907 static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
909 struct msm_gpu *gpu = arg;
910 pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
911 iova, flags,
912 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
913 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
914 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
915 gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
917 return -EFAULT;
920 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
922 u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
924 if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
925 u32 val;
927 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
930 * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
931 * read it twice
934 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
935 val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
937 dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
938 val);
941 if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
942 dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
943 gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
945 if (status & A5XX_CP_INT_CP_DMA_ERROR)
946 dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
948 if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
949 u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
951 dev_err_ratelimited(gpu->dev->dev,
952 "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
953 val & (1 << 24) ? "WRITE" : "READ",
954 (val & 0xFFFFF) >> 2, val);
957 if (status & A5XX_CP_INT_CP_AHB_ERROR) {
958 u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
959 const char *access[16] = { "reserved", "reserved",
960 "timestamp lo", "timestamp hi", "pfp read", "pfp write",
961 "", "", "me read", "me write", "", "", "crashdump read",
962 "crashdump write" };
964 dev_err_ratelimited(gpu->dev->dev,
965 "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
966 status & 0xFFFFF, access[(status >> 24) & 0xF],
967 (status & (1 << 31)), status);
971 static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
973 if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
974 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
976 dev_err_ratelimited(gpu->dev->dev,
977 "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
978 val & (1 << 28) ? "WRITE" : "READ",
979 (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
980 (val >> 24) & 0xF);
982 /* Clear the error */
983 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
985 /* Clear the interrupt */
986 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
987 A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
990 if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
991 dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
993 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
994 dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
995 gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
997 if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
998 dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
999 gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
1001 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
1002 dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
1003 gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
1005 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
1006 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
1008 if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
1009 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
1012 static void a5xx_uche_err_irq(struct msm_gpu *gpu)
1014 uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
1016 addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
1018 dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
1019 addr);
1022 static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
1024 dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
1027 static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
1029 struct drm_device *dev = gpu->dev;
1030 struct msm_drm_private *priv = dev->dev_private;
1031 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
1033 dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
1034 ring ? ring->id : -1, ring ? ring->seqno : 0,
1035 gpu_read(gpu, REG_A5XX_RBBM_STATUS),
1036 gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
1037 gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
1038 gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
1039 gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
1040 gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
1041 gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
1043 /* Turn off the hangcheck timer to keep it from bothering us */
1044 del_timer(&gpu->hangcheck_timer);
1046 queue_work(priv->wq, &gpu->recover_work);
1049 #define RBBM_ERROR_MASK \
1050 (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
1051 A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
1052 A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
1053 A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
1054 A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
1055 A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
1057 static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
1059 u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
1062 * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
1063 * before the source is cleared the interrupt will storm.
1065 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
1066 status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
1068 /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
1069 if (status & RBBM_ERROR_MASK)
1070 a5xx_rbbm_err_irq(gpu, status);
1072 if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
1073 a5xx_cp_err_irq(gpu);
1075 if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
1076 a5xx_fault_detect_irq(gpu);
1078 if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
1079 a5xx_uche_err_irq(gpu);
1081 if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
1082 a5xx_gpmu_err_irq(gpu);
1084 if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
1085 a5xx_preempt_trigger(gpu);
1086 msm_gpu_retire(gpu);
1089 if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
1090 a5xx_preempt_irq(gpu);
1092 return IRQ_HANDLED;
1095 static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
1096 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
1097 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
1098 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
1099 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
1100 REG_A5XX_CP_RB_RPTR_ADDR_HI),
1101 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
1102 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
1103 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
1106 static const u32 a5xx_registers[] = {
1107 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
1108 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
1109 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
1110 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
1111 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
1112 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
1113 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
1114 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
1115 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
1116 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
1117 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
1118 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
1119 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
1120 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
1121 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
1122 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
1123 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
1124 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
1125 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
1126 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
1127 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
1128 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
1129 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
1130 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
1131 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
1132 0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
1133 0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
1134 0xAC60, 0xAC60, ~0,
1137 static void a5xx_dump(struct msm_gpu *gpu)
1139 dev_info(gpu->dev->dev, "status: %08x\n",
1140 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
1141 adreno_dump(gpu);
1144 static int a5xx_pm_resume(struct msm_gpu *gpu)
1146 int ret;
1148 /* Turn on the core power */
1149 ret = msm_gpu_pm_resume(gpu);
1150 if (ret)
1151 return ret;
1153 /* Turn the RBCCU domain first to limit the chances of voltage droop */
1154 gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
1156 /* Wait 3 usecs before polling */
1157 udelay(3);
1159 ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
1160 (1 << 20), (1 << 20));
1161 if (ret) {
1162 DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
1163 gpu->name,
1164 gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
1165 return ret;
1168 /* Turn on the SP domain */
1169 gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
1170 ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
1171 (1 << 20), (1 << 20));
1172 if (ret)
1173 DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
1174 gpu->name);
1176 return ret;
1179 static int a5xx_pm_suspend(struct msm_gpu *gpu)
1181 /* Clear the VBIF pipe before shutting down */
1182 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
1183 spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
1185 gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
1188 * Reset the VBIF before power collapse to avoid issue with FIFO
1189 * entries
1191 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
1192 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
1194 return msm_gpu_pm_suspend(gpu);
1197 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
1199 *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
1200 REG_A5XX_RBBM_PERFCTR_CP_0_HI);
1202 return 0;
1205 struct a5xx_crashdumper {
1206 void *ptr;
1207 struct drm_gem_object *bo;
1208 u64 iova;
1211 struct a5xx_gpu_state {
1212 struct msm_gpu_state base;
1213 u32 *hlsqregs;
1216 #define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
1217 readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
1218 interval, timeout)
1220 static int a5xx_crashdumper_init(struct msm_gpu *gpu,
1221 struct a5xx_crashdumper *dumper)
1223 dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
1224 SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
1225 &dumper->bo, &dumper->iova);
1227 if (IS_ERR(dumper->ptr))
1228 return PTR_ERR(dumper->ptr);
1230 return 0;
1233 static void a5xx_crashdumper_free(struct msm_gpu *gpu,
1234 struct a5xx_crashdumper *dumper)
1236 msm_gem_put_iova(dumper->bo, gpu->aspace);
1237 msm_gem_put_vaddr(dumper->bo);
1239 drm_gem_object_unreference(dumper->bo);
1242 static int a5xx_crashdumper_run(struct msm_gpu *gpu,
1243 struct a5xx_crashdumper *dumper)
1245 u32 val;
1247 if (IS_ERR_OR_NULL(dumper->ptr))
1248 return -EINVAL;
1250 gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
1251 REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
1253 gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
1255 return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
1256 val & 0x04, 100, 10000);
1260 * These are a list of the registers that need to be read through the HLSQ
1261 * aperture through the crashdumper. These are not nominally accessible from
1262 * the CPU on a secure platform.
1264 static const struct {
1265 u32 type;
1266 u32 regoffset;
1267 u32 count;
1268 } a5xx_hlsq_aperture_regs[] = {
1269 { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
1270 { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
1271 { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
1272 { 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */
1273 { 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */
1274 { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
1275 { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
1276 { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
1277 { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
1278 { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
1279 { 0x3a, 0x0f00, 0x1c }, /* TP non-context */
1280 { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
1281 { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
1282 { 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */
1283 { 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */
1286 static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
1287 struct a5xx_gpu_state *a5xx_state)
1289 struct a5xx_crashdumper dumper = { 0 };
1290 u32 offset, count = 0;
1291 u64 *ptr;
1292 int i;
1294 if (a5xx_crashdumper_init(gpu, &dumper))
1295 return;
1297 /* The script will be written at offset 0 */
1298 ptr = dumper.ptr;
1300 /* Start writing the data at offset 256k */
1301 offset = dumper.iova + (256 * SZ_1K);
1303 /* Count how many additional registers to get from the HLSQ aperture */
1304 for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
1305 count += a5xx_hlsq_aperture_regs[i].count;
1307 a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
1308 if (!a5xx_state->hlsqregs)
1309 return;
1311 /* Build the crashdump script */
1312 for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
1313 u32 type = a5xx_hlsq_aperture_regs[i].type;
1314 u32 c = a5xx_hlsq_aperture_regs[i].count;
1316 /* Write the register to select the desired bank */
1317 *ptr++ = ((u64) type << 8);
1318 *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
1319 (1 << 21) | 1;
1321 *ptr++ = offset;
1322 *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
1323 | c;
1325 offset += c * sizeof(u32);
1328 /* Write two zeros to close off the script */
1329 *ptr++ = 0;
1330 *ptr++ = 0;
1332 if (a5xx_crashdumper_run(gpu, &dumper)) {
1333 kfree(a5xx_state->hlsqregs);
1334 a5xx_crashdumper_free(gpu, &dumper);
1335 return;
1338 /* Copy the data from the crashdumper to the state */
1339 memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
1340 count * sizeof(u32));
1342 a5xx_crashdumper_free(gpu, &dumper);
1345 static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
1347 struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
1348 GFP_KERNEL);
1350 if (!a5xx_state)
1351 return ERR_PTR(-ENOMEM);
1353 /* Temporarily disable hardware clock gating before reading the hw */
1354 a5xx_set_hwcg(gpu, false);
1356 /* First get the generic state from the adreno core */
1357 adreno_gpu_state_get(gpu, &(a5xx_state->base));
1359 a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
1361 /* Get the HLSQ regs with the help of the crashdumper */
1362 a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
1364 a5xx_set_hwcg(gpu, true);
1366 return &a5xx_state->base;
1369 static void a5xx_gpu_state_destroy(struct kref *kref)
1371 struct msm_gpu_state *state = container_of(kref,
1372 struct msm_gpu_state, ref);
1373 struct a5xx_gpu_state *a5xx_state = container_of(state,
1374 struct a5xx_gpu_state, base);
1376 kfree(a5xx_state->hlsqregs);
1378 adreno_gpu_state_destroy(state);
1379 kfree(a5xx_state);
1382 int a5xx_gpu_state_put(struct msm_gpu_state *state)
1384 if (IS_ERR_OR_NULL(state))
1385 return 1;
1387 return kref_put(&state->ref, a5xx_gpu_state_destroy);
1391 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
1392 void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
1393 struct drm_printer *p)
1395 int i, j;
1396 u32 pos = 0;
1397 struct a5xx_gpu_state *a5xx_state = container_of(state,
1398 struct a5xx_gpu_state, base);
1400 if (IS_ERR_OR_NULL(state))
1401 return;
1403 adreno_show(gpu, state, p);
1405 /* Dump the additional a5xx HLSQ registers */
1406 if (!a5xx_state->hlsqregs)
1407 return;
1409 drm_printf(p, "registers-hlsq:\n");
1411 for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
1412 u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
1413 u32 c = a5xx_hlsq_aperture_regs[i].count;
1415 for (j = 0; j < c; j++, pos++, o++) {
1417 * To keep the crashdump simple we pull the entire range
1418 * for each register type but not all of the registers
1419 * in the range are valid. Fortunately invalid registers
1420 * stick out like a sore thumb with a value of
1421 * 0xdeadbeef
1423 if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
1424 continue;
1426 drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
1427 o << 2, a5xx_state->hlsqregs[pos]);
1431 #endif
1433 static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
1435 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1436 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1438 return a5xx_gpu->cur_ring;
1441 static int a5xx_gpu_busy(struct msm_gpu *gpu, uint64_t *value)
1443 *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
1444 REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
1446 return 0;
1449 static const struct adreno_gpu_funcs funcs = {
1450 .base = {
1451 .get_param = adreno_get_param,
1452 .hw_init = a5xx_hw_init,
1453 .pm_suspend = a5xx_pm_suspend,
1454 .pm_resume = a5xx_pm_resume,
1455 .recover = a5xx_recover,
1456 .submit = a5xx_submit,
1457 .flush = a5xx_flush,
1458 .active_ring = a5xx_active_ring,
1459 .irq = a5xx_irq,
1460 .destroy = a5xx_destroy,
1461 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
1462 .show = a5xx_show,
1463 #endif
1464 #if defined(CONFIG_DEBUG_FS)
1465 .debugfs_init = a5xx_debugfs_init,
1466 #endif
1467 .gpu_busy = a5xx_gpu_busy,
1468 .gpu_state_get = a5xx_gpu_state_get,
1469 .gpu_state_put = a5xx_gpu_state_put,
1471 .get_timestamp = a5xx_get_timestamp,
1474 static void check_speed_bin(struct device *dev)
1476 struct nvmem_cell *cell;
1477 u32 bin, val;
1479 cell = nvmem_cell_get(dev, "speed_bin");
1481 /* If a nvmem cell isn't defined, nothing to do */
1482 if (IS_ERR(cell))
1483 return;
1485 bin = *((u32 *) nvmem_cell_read(cell, NULL));
1486 nvmem_cell_put(cell);
1488 val = (1 << bin);
1490 dev_pm_opp_set_supported_hw(dev, &val, 1);
1493 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
1495 struct msm_drm_private *priv = dev->dev_private;
1496 struct platform_device *pdev = priv->gpu_pdev;
1497 struct a5xx_gpu *a5xx_gpu = NULL;
1498 struct adreno_gpu *adreno_gpu;
1499 struct msm_gpu *gpu;
1500 int ret;
1502 if (!pdev) {
1503 dev_err(dev->dev, "No A5XX device is defined\n");
1504 return ERR_PTR(-ENXIO);
1507 a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
1508 if (!a5xx_gpu)
1509 return ERR_PTR(-ENOMEM);
1511 adreno_gpu = &a5xx_gpu->base;
1512 gpu = &adreno_gpu->base;
1514 adreno_gpu->registers = a5xx_registers;
1515 adreno_gpu->reg_offsets = a5xx_register_offsets;
1517 a5xx_gpu->lm_leakage = 0x4E001A;
1519 check_speed_bin(&pdev->dev);
1521 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
1522 if (ret) {
1523 a5xx_destroy(&(a5xx_gpu->base.base));
1524 return ERR_PTR(ret);
1527 if (gpu->aspace)
1528 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
1530 /* Set up the preemption specific bits and pieces for each ringbuffer */
1531 a5xx_preempt_init(gpu);
1533 return gpu;