2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "adreno_gpu.h"
24 #define RB_SIZE SZ_32K
27 int adreno_get_param(struct msm_gpu
*gpu
, uint32_t param
, uint64_t *value
)
29 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
32 case MSM_PARAM_GPU_ID
:
33 *value
= adreno_gpu
->info
->revn
;
35 case MSM_PARAM_GMEM_SIZE
:
36 *value
= adreno_gpu
->gmem
;
38 case MSM_PARAM_CHIP_ID
:
39 *value
= adreno_gpu
->rev
.patchid
|
40 (adreno_gpu
->rev
.minor
<< 8) |
41 (adreno_gpu
->rev
.major
<< 16) |
42 (adreno_gpu
->rev
.core
<< 24);
45 DBG("%s: invalid param: %u", gpu
->name
, param
);
50 #define rbmemptr(adreno_gpu, member) \
51 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
53 int adreno_hw_init(struct msm_gpu
*gpu
)
55 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
60 ret
= msm_gem_get_iova(gpu
->rb
->bo
, gpu
->id
, &gpu
->rb_iova
);
63 dev_err(gpu
->dev
->dev
, "could not map ringbuffer: %d\n", ret
);
67 /* Setup REG_CP_RB_CNTL: */
68 adreno_gpu_write(adreno_gpu
, REG_ADRENO_CP_RB_CNTL
,
69 /* size is log2(quad-words): */
70 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu
->rb
->size
/ 8)) |
71 AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE
/ 8)));
73 /* Setup ringbuffer address: */
74 adreno_gpu_write(adreno_gpu
, REG_ADRENO_CP_RB_BASE
, gpu
->rb_iova
);
75 adreno_gpu_write(adreno_gpu
, REG_ADRENO_CP_RB_RPTR_ADDR
,
76 rbmemptr(adreno_gpu
, rptr
));
78 /* Setup scratch/timestamp: */
79 adreno_gpu_write(adreno_gpu
, REG_ADRENO_SCRATCH_ADDR
,
80 rbmemptr(adreno_gpu
, fence
));
82 adreno_gpu_write(adreno_gpu
, REG_ADRENO_SCRATCH_UMSK
, 0x1);
87 static uint32_t get_wptr(struct msm_ringbuffer
*ring
)
89 return ring
->cur
- ring
->start
;
92 uint32_t adreno_last_fence(struct msm_gpu
*gpu
)
94 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
95 return adreno_gpu
->memptrs
->fence
;
98 void adreno_recover(struct msm_gpu
*gpu
)
100 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
101 struct drm_device
*dev
= gpu
->dev
;
104 gpu
->funcs
->pm_suspend(gpu
);
106 /* reset ringbuffer: */
107 gpu
->rb
->cur
= gpu
->rb
->start
;
109 /* reset completed fence seqno, just discard anything pending: */
110 adreno_gpu
->memptrs
->fence
= gpu
->submitted_fence
;
111 adreno_gpu
->memptrs
->rptr
= 0;
112 adreno_gpu
->memptrs
->wptr
= 0;
114 gpu
->funcs
->pm_resume(gpu
);
115 ret
= gpu
->funcs
->hw_init(gpu
);
117 dev_err(dev
->dev
, "gpu hw init failed: %d\n", ret
);
122 int adreno_submit(struct msm_gpu
*gpu
, struct msm_gem_submit
*submit
,
123 struct msm_file_private
*ctx
)
125 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
126 struct msm_drm_private
*priv
= gpu
->dev
->dev_private
;
127 struct msm_ringbuffer
*ring
= gpu
->rb
;
130 for (i
= 0; i
< submit
->nr_cmds
; i
++) {
131 switch (submit
->cmd
[i
].type
) {
132 case MSM_SUBMIT_CMD_IB_TARGET_BUF
:
133 /* ignore IB-targets */
135 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF
:
136 /* ignore if there has not been a ctx switch: */
137 if (priv
->lastctx
== ctx
)
139 case MSM_SUBMIT_CMD_BUF
:
140 OUT_PKT3(ring
, CP_INDIRECT_BUFFER_PFD
, 2);
141 OUT_RING(ring
, submit
->cmd
[i
].iova
);
142 OUT_RING(ring
, submit
->cmd
[i
].size
);
148 /* on a320, at least, we seem to need to pad things out to an
149 * even number of qwords to avoid issue w/ CP hanging on wrap-
155 OUT_PKT0(ring
, REG_AXXX_CP_SCRATCH_REG2
, 1);
156 OUT_RING(ring
, submit
->fence
);
158 if (adreno_is_a3xx(adreno_gpu
) || adreno_is_a4xx(adreno_gpu
)) {
159 /* Flush HLSQ lazy updates to make sure there is nothing
160 * pending for indirect loads after the timestamp has
163 OUT_PKT3(ring
, CP_EVENT_WRITE
, 1);
164 OUT_RING(ring
, HLSQ_FLUSH
);
166 OUT_PKT3(ring
, CP_WAIT_FOR_IDLE
, 1);
167 OUT_RING(ring
, 0x00000000);
170 OUT_PKT3(ring
, CP_EVENT_WRITE
, 3);
171 OUT_RING(ring
, CACHE_FLUSH_TS
);
172 OUT_RING(ring
, rbmemptr(adreno_gpu
, fence
));
173 OUT_RING(ring
, submit
->fence
);
175 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
176 OUT_PKT3(ring
, CP_INTERRUPT
, 1);
177 OUT_RING(ring
, 0x80000000);
179 /* Workaround for missing irq issue on 8x16/a306. Unsure if the
180 * root cause is a platform issue or some a306 quirk, but this
181 * keeps things humming along:
183 if (adreno_is_a306(adreno_gpu
)) {
184 OUT_PKT3(ring
, CP_WAIT_FOR_IDLE
, 1);
185 OUT_RING(ring
, 0x00000000);
186 OUT_PKT3(ring
, CP_INTERRUPT
, 1);
187 OUT_RING(ring
, 0x80000000);
191 if (adreno_is_a3xx(adreno_gpu
)) {
192 /* Dummy set-constant to trigger context rollover */
193 OUT_PKT3(ring
, CP_SET_CONSTANT
, 2);
194 OUT_RING(ring
, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG
));
195 OUT_RING(ring
, 0x00000000);
199 gpu
->funcs
->flush(gpu
);
204 void adreno_flush(struct msm_gpu
*gpu
)
206 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
207 uint32_t wptr
= get_wptr(gpu
->rb
);
209 /* ensure writes to ringbuffer have hit system memory: */
212 adreno_gpu_write(adreno_gpu
, REG_ADRENO_CP_RB_WPTR
, wptr
);
215 void adreno_idle(struct msm_gpu
*gpu
)
217 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
218 uint32_t wptr
= get_wptr(gpu
->rb
);
220 /* wait for CP to drain ringbuffer: */
221 if (spin_until(adreno_gpu
->memptrs
->rptr
== wptr
))
222 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu
->name
);
224 /* TODO maybe we need to reset GPU here to recover from hang? */
227 #ifdef CONFIG_DEBUG_FS
228 void adreno_show(struct msm_gpu
*gpu
, struct seq_file
*m
)
230 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
233 seq_printf(m
, "revision: %d (%d.%d.%d.%d)\n",
234 adreno_gpu
->info
->revn
, adreno_gpu
->rev
.core
,
235 adreno_gpu
->rev
.major
, adreno_gpu
->rev
.minor
,
236 adreno_gpu
->rev
.patchid
);
238 seq_printf(m
, "fence: %d/%d\n", adreno_gpu
->memptrs
->fence
,
239 gpu
->submitted_fence
);
240 seq_printf(m
, "rptr: %d\n", adreno_gpu
->memptrs
->rptr
);
241 seq_printf(m
, "wptr: %d\n", adreno_gpu
->memptrs
->wptr
);
242 seq_printf(m
, "rb wptr: %d\n", get_wptr(gpu
->rb
));
244 gpu
->funcs
->pm_resume(gpu
);
246 /* dump these out in a form that can be parsed by demsm: */
247 seq_printf(m
, "IO:region %s 00000000 00020000\n", gpu
->name
);
248 for (i
= 0; adreno_gpu
->registers
[i
] != ~0; i
+= 2) {
249 uint32_t start
= adreno_gpu
->registers
[i
];
250 uint32_t end
= adreno_gpu
->registers
[i
+1];
253 for (addr
= start
; addr
<= end
; addr
++) {
254 uint32_t val
= gpu_read(gpu
, addr
);
255 seq_printf(m
, "IO:R %08x %08x\n", addr
<<2, val
);
259 gpu
->funcs
->pm_suspend(gpu
);
263 /* Dump common gpu status and scratch registers on any hang, to make
264 * the hangcheck logs more useful. The scratch registers seem always
265 * safe to read when GPU has hung (unlike some other regs, depending
266 * on how the GPU hung), and they are useful to match up to cmdstream
267 * dumps when debugging hangs:
269 void adreno_dump_info(struct msm_gpu
*gpu
)
271 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
274 printk("revision: %d (%d.%d.%d.%d)\n",
275 adreno_gpu
->info
->revn
, adreno_gpu
->rev
.core
,
276 adreno_gpu
->rev
.major
, adreno_gpu
->rev
.minor
,
277 adreno_gpu
->rev
.patchid
);
279 printk("fence: %d/%d\n", adreno_gpu
->memptrs
->fence
,
280 gpu
->submitted_fence
);
281 printk("rptr: %d\n", adreno_gpu
->memptrs
->rptr
);
282 printk("wptr: %d\n", adreno_gpu
->memptrs
->wptr
);
283 printk("rb wptr: %d\n", get_wptr(gpu
->rb
));
285 for (i
= 0; i
< 8; i
++) {
286 printk("CP_SCRATCH_REG%d: %u\n", i
,
287 gpu_read(gpu
, REG_AXXX_CP_SCRATCH_REG0
+ i
));
291 /* would be nice to not have to duplicate the _show() stuff with printk(): */
292 void adreno_dump(struct msm_gpu
*gpu
)
294 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
297 /* dump these out in a form that can be parsed by demsm: */
298 printk("IO:region %s 00000000 00020000\n", gpu
->name
);
299 for (i
= 0; adreno_gpu
->registers
[i
] != ~0; i
+= 2) {
300 uint32_t start
= adreno_gpu
->registers
[i
];
301 uint32_t end
= adreno_gpu
->registers
[i
+1];
304 for (addr
= start
; addr
<= end
; addr
++) {
305 uint32_t val
= gpu_read(gpu
, addr
);
306 printk("IO:R %08x %08x\n", addr
<<2, val
);
311 static uint32_t ring_freewords(struct msm_gpu
*gpu
)
313 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
314 uint32_t size
= gpu
->rb
->size
/ 4;
315 uint32_t wptr
= get_wptr(gpu
->rb
);
316 uint32_t rptr
= adreno_gpu
->memptrs
->rptr
;
317 return (rptr
+ (size
- 1) - wptr
) % size
;
320 void adreno_wait_ring(struct msm_gpu
*gpu
, uint32_t ndwords
)
322 if (spin_until(ring_freewords(gpu
) >= ndwords
))
323 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu
->name
);
326 static const char *iommu_ports
[] = {
327 "gfx3d_user", "gfx3d_priv",
328 "gfx3d1_user", "gfx3d1_priv",
331 int adreno_gpu_init(struct drm_device
*drm
, struct platform_device
*pdev
,
332 struct adreno_gpu
*adreno_gpu
, const struct adreno_gpu_funcs
*funcs
)
334 struct adreno_platform_config
*config
= pdev
->dev
.platform_data
;
335 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
339 adreno_gpu
->funcs
= funcs
;
340 adreno_gpu
->info
= adreno_info(config
->rev
);
341 adreno_gpu
->gmem
= adreno_gpu
->info
->gmem
;
342 adreno_gpu
->revn
= adreno_gpu
->info
->revn
;
343 adreno_gpu
->rev
= config
->rev
;
345 gpu
->fast_rate
= config
->fast_rate
;
346 gpu
->slow_rate
= config
->slow_rate
;
347 gpu
->bus_freq
= config
->bus_freq
;
348 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
349 gpu
->bus_scale_table
= config
->bus_scale_table
;
352 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
353 gpu
->fast_rate
, gpu
->slow_rate
, gpu
->bus_freq
);
355 ret
= msm_gpu_init(drm
, pdev
, &adreno_gpu
->base
, &funcs
->base
,
356 adreno_gpu
->info
->name
, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
361 ret
= request_firmware(&adreno_gpu
->pm4
, adreno_gpu
->info
->pm4fw
, drm
->dev
);
363 dev_err(drm
->dev
, "failed to load %s PM4 firmware: %d\n",
364 adreno_gpu
->info
->pm4fw
, ret
);
368 ret
= request_firmware(&adreno_gpu
->pfp
, adreno_gpu
->info
->pfpfw
, drm
->dev
);
370 dev_err(drm
->dev
, "failed to load %s PFP firmware: %d\n",
371 adreno_gpu
->info
->pfpfw
, ret
);
377 ret
= mmu
->funcs
->attach(mmu
, iommu_ports
,
378 ARRAY_SIZE(iommu_ports
));
383 mutex_lock(&drm
->struct_mutex
);
384 adreno_gpu
->memptrs_bo
= msm_gem_new(drm
, sizeof(*adreno_gpu
->memptrs
),
386 mutex_unlock(&drm
->struct_mutex
);
387 if (IS_ERR(adreno_gpu
->memptrs_bo
)) {
388 ret
= PTR_ERR(adreno_gpu
->memptrs_bo
);
389 adreno_gpu
->memptrs_bo
= NULL
;
390 dev_err(drm
->dev
, "could not allocate memptrs: %d\n", ret
);
394 adreno_gpu
->memptrs
= msm_gem_vaddr(adreno_gpu
->memptrs_bo
);
395 if (!adreno_gpu
->memptrs
) {
396 dev_err(drm
->dev
, "could not vmap memptrs\n");
400 ret
= msm_gem_get_iova(adreno_gpu
->memptrs_bo
, gpu
->id
,
401 &adreno_gpu
->memptrs_iova
);
403 dev_err(drm
->dev
, "could not map memptrs: %d\n", ret
);
410 void adreno_gpu_cleanup(struct adreno_gpu
*gpu
)
412 if (gpu
->memptrs_bo
) {
413 if (gpu
->memptrs_iova
)
414 msm_gem_put_iova(gpu
->memptrs_bo
, gpu
->base
.id
);
415 drm_gem_object_unreference_unlocked(gpu
->memptrs_bo
);
417 release_firmware(gpu
->pm4
);
418 release_firmware(gpu
->pfp
);
419 msm_gpu_cleanup(&gpu
->base
);