2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "adreno_gpu.h"
24 #define RB_SIZE SZ_32K
27 int adreno_get_param(struct msm_gpu
*gpu
, uint32_t param
, uint64_t *value
)
29 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
32 case MSM_PARAM_GPU_ID
:
33 *value
= adreno_gpu
->info
->revn
;
35 case MSM_PARAM_GMEM_SIZE
:
36 *value
= adreno_gpu
->gmem
;
38 case MSM_PARAM_CHIP_ID
:
39 *value
= adreno_gpu
->rev
.patchid
|
40 (adreno_gpu
->rev
.minor
<< 8) |
41 (adreno_gpu
->rev
.major
<< 16) |
42 (adreno_gpu
->rev
.core
<< 24);
45 DBG("%s: invalid param: %u", gpu
->name
, param
);
50 #define rbmemptr(adreno_gpu, member) \
51 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
53 int adreno_hw_init(struct msm_gpu
*gpu
)
55 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
60 ret
= msm_gem_get_iova(gpu
->rb
->bo
, gpu
->id
, &gpu
->rb_iova
);
63 dev_err(gpu
->dev
->dev
, "could not map ringbuffer: %d\n", ret
);
67 /* Setup REG_CP_RB_CNTL: */
68 adreno_gpu_write(adreno_gpu
, REG_ADRENO_CP_RB_CNTL
,
69 /* size is log2(quad-words): */
70 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu
->rb
->size
/ 8)) |
71 AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE
/ 8)));
73 /* Setup ringbuffer address: */
74 adreno_gpu_write(adreno_gpu
, REG_ADRENO_CP_RB_BASE
, gpu
->rb_iova
);
75 adreno_gpu_write(adreno_gpu
, REG_ADRENO_CP_RB_RPTR_ADDR
,
76 rbmemptr(adreno_gpu
, rptr
));
78 /* Setup scratch/timestamp: */
79 adreno_gpu_write(adreno_gpu
, REG_ADRENO_SCRATCH_ADDR
,
80 rbmemptr(adreno_gpu
, fence
));
82 adreno_gpu_write(adreno_gpu
, REG_ADRENO_SCRATCH_UMSK
, 0x1);
87 static uint32_t get_wptr(struct msm_ringbuffer
*ring
)
89 return ring
->cur
- ring
->start
;
92 uint32_t adreno_last_fence(struct msm_gpu
*gpu
)
94 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
95 return adreno_gpu
->memptrs
->fence
;
98 void adreno_recover(struct msm_gpu
*gpu
)
100 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
101 struct drm_device
*dev
= gpu
->dev
;
104 gpu
->funcs
->pm_suspend(gpu
);
106 /* reset ringbuffer: */
107 gpu
->rb
->cur
= gpu
->rb
->start
;
109 /* reset completed fence seqno, just discard anything pending: */
110 adreno_gpu
->memptrs
->fence
= gpu
->submitted_fence
;
111 adreno_gpu
->memptrs
->rptr
= 0;
112 adreno_gpu
->memptrs
->wptr
= 0;
114 gpu
->funcs
->pm_resume(gpu
);
115 ret
= gpu
->funcs
->hw_init(gpu
);
117 dev_err(dev
->dev
, "gpu hw init failed: %d\n", ret
);
122 int adreno_submit(struct msm_gpu
*gpu
, struct msm_gem_submit
*submit
,
123 struct msm_file_private
*ctx
)
125 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
126 struct msm_drm_private
*priv
= gpu
->dev
->dev_private
;
127 struct msm_ringbuffer
*ring
= gpu
->rb
;
130 for (i
= 0; i
< submit
->nr_cmds
; i
++) {
131 switch (submit
->cmd
[i
].type
) {
132 case MSM_SUBMIT_CMD_IB_TARGET_BUF
:
133 /* ignore IB-targets */
135 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF
:
136 /* ignore if there has not been a ctx switch: */
137 if (priv
->lastctx
== ctx
)
139 case MSM_SUBMIT_CMD_BUF
:
140 OUT_PKT3(ring
, CP_INDIRECT_BUFFER_PFD
, 2);
141 OUT_RING(ring
, submit
->cmd
[i
].iova
);
142 OUT_RING(ring
, submit
->cmd
[i
].size
);
148 /* on a320, at least, we seem to need to pad things out to an
149 * even number of qwords to avoid issue w/ CP hanging on wrap-
155 OUT_PKT0(ring
, REG_AXXX_CP_SCRATCH_REG2
, 1);
156 OUT_RING(ring
, submit
->fence
);
158 if (adreno_is_a3xx(adreno_gpu
) || adreno_is_a4xx(adreno_gpu
)) {
159 /* Flush HLSQ lazy updates to make sure there is nothing
160 * pending for indirect loads after the timestamp has
163 OUT_PKT3(ring
, CP_EVENT_WRITE
, 1);
164 OUT_RING(ring
, HLSQ_FLUSH
);
166 OUT_PKT3(ring
, CP_WAIT_FOR_IDLE
, 1);
167 OUT_RING(ring
, 0x00000000);
170 OUT_PKT3(ring
, CP_EVENT_WRITE
, 3);
171 OUT_RING(ring
, CACHE_FLUSH_TS
);
172 OUT_RING(ring
, rbmemptr(adreno_gpu
, fence
));
173 OUT_RING(ring
, submit
->fence
);
175 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
176 OUT_PKT3(ring
, CP_INTERRUPT
, 1);
177 OUT_RING(ring
, 0x80000000);
180 if (adreno_is_a3xx(adreno_gpu
)) {
181 /* Dummy set-constant to trigger context rollover */
182 OUT_PKT3(ring
, CP_SET_CONSTANT
, 2);
183 OUT_RING(ring
, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG
));
184 OUT_RING(ring
, 0x00000000);
188 gpu
->funcs
->flush(gpu
);
193 void adreno_flush(struct msm_gpu
*gpu
)
195 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
196 uint32_t wptr
= get_wptr(gpu
->rb
);
198 /* ensure writes to ringbuffer have hit system memory: */
201 adreno_gpu_write(adreno_gpu
, REG_ADRENO_CP_RB_WPTR
, wptr
);
204 void adreno_idle(struct msm_gpu
*gpu
)
206 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
207 uint32_t wptr
= get_wptr(gpu
->rb
);
209 /* wait for CP to drain ringbuffer: */
210 if (spin_until(adreno_gpu
->memptrs
->rptr
== wptr
))
211 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu
->name
);
213 /* TODO maybe we need to reset GPU here to recover from hang? */
216 #ifdef CONFIG_DEBUG_FS
217 void adreno_show(struct msm_gpu
*gpu
, struct seq_file
*m
)
219 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
222 seq_printf(m
, "revision: %d (%d.%d.%d.%d)\n",
223 adreno_gpu
->info
->revn
, adreno_gpu
->rev
.core
,
224 adreno_gpu
->rev
.major
, adreno_gpu
->rev
.minor
,
225 adreno_gpu
->rev
.patchid
);
227 seq_printf(m
, "fence: %d/%d\n", adreno_gpu
->memptrs
->fence
,
228 gpu
->submitted_fence
);
229 seq_printf(m
, "rptr: %d\n", adreno_gpu
->memptrs
->rptr
);
230 seq_printf(m
, "wptr: %d\n", adreno_gpu
->memptrs
->wptr
);
231 seq_printf(m
, "rb wptr: %d\n", get_wptr(gpu
->rb
));
233 gpu
->funcs
->pm_resume(gpu
);
235 /* dump these out in a form that can be parsed by demsm: */
236 seq_printf(m
, "IO:region %s 00000000 00020000\n", gpu
->name
);
237 for (i
= 0; adreno_gpu
->registers
[i
] != ~0; i
+= 2) {
238 uint32_t start
= adreno_gpu
->registers
[i
];
239 uint32_t end
= adreno_gpu
->registers
[i
+1];
242 for (addr
= start
; addr
<= end
; addr
++) {
243 uint32_t val
= gpu_read(gpu
, addr
);
244 seq_printf(m
, "IO:R %08x %08x\n", addr
<<2, val
);
248 gpu
->funcs
->pm_suspend(gpu
);
252 /* would be nice to not have to duplicate the _show() stuff with printk(): */
253 void adreno_dump(struct msm_gpu
*gpu
)
255 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
258 printk("revision: %d (%d.%d.%d.%d)\n",
259 adreno_gpu
->info
->revn
, adreno_gpu
->rev
.core
,
260 adreno_gpu
->rev
.major
, adreno_gpu
->rev
.minor
,
261 adreno_gpu
->rev
.patchid
);
263 printk("fence: %d/%d\n", adreno_gpu
->memptrs
->fence
,
264 gpu
->submitted_fence
);
265 printk("rptr: %d\n", adreno_gpu
->memptrs
->rptr
);
266 printk("wptr: %d\n", adreno_gpu
->memptrs
->wptr
);
267 printk("rb wptr: %d\n", get_wptr(gpu
->rb
));
269 /* dump these out in a form that can be parsed by demsm: */
270 printk("IO:region %s 00000000 00020000\n", gpu
->name
);
271 for (i
= 0; adreno_gpu
->registers
[i
] != ~0; i
+= 2) {
272 uint32_t start
= adreno_gpu
->registers
[i
];
273 uint32_t end
= adreno_gpu
->registers
[i
+1];
276 for (addr
= start
; addr
<= end
; addr
++) {
277 uint32_t val
= gpu_read(gpu
, addr
);
278 printk("IO:R %08x %08x\n", addr
<<2, val
);
283 static uint32_t ring_freewords(struct msm_gpu
*gpu
)
285 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
286 uint32_t size
= gpu
->rb
->size
/ 4;
287 uint32_t wptr
= get_wptr(gpu
->rb
);
288 uint32_t rptr
= adreno_gpu
->memptrs
->rptr
;
289 return (rptr
+ (size
- 1) - wptr
) % size
;
292 void adreno_wait_ring(struct msm_gpu
*gpu
, uint32_t ndwords
)
294 if (spin_until(ring_freewords(gpu
) >= ndwords
))
295 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu
->name
);
298 static const char *iommu_ports
[] = {
299 "gfx3d_user", "gfx3d_priv",
300 "gfx3d1_user", "gfx3d1_priv",
303 int adreno_gpu_init(struct drm_device
*drm
, struct platform_device
*pdev
,
304 struct adreno_gpu
*adreno_gpu
, const struct adreno_gpu_funcs
*funcs
)
306 struct adreno_platform_config
*config
= pdev
->dev
.platform_data
;
307 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
311 adreno_gpu
->funcs
= funcs
;
312 adreno_gpu
->info
= adreno_info(config
->rev
);
313 adreno_gpu
->gmem
= adreno_gpu
->info
->gmem
;
314 adreno_gpu
->revn
= adreno_gpu
->info
->revn
;
315 adreno_gpu
->rev
= config
->rev
;
317 gpu
->fast_rate
= config
->fast_rate
;
318 gpu
->slow_rate
= config
->slow_rate
;
319 gpu
->bus_freq
= config
->bus_freq
;
320 #ifdef CONFIG_MSM_BUS_SCALING
321 gpu
->bus_scale_table
= config
->bus_scale_table
;
324 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
325 gpu
->fast_rate
, gpu
->slow_rate
, gpu
->bus_freq
);
327 ret
= msm_gpu_init(drm
, pdev
, &adreno_gpu
->base
, &funcs
->base
,
328 adreno_gpu
->info
->name
, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
333 ret
= request_firmware(&adreno_gpu
->pm4
, adreno_gpu
->info
->pm4fw
, drm
->dev
);
335 dev_err(drm
->dev
, "failed to load %s PM4 firmware: %d\n",
336 adreno_gpu
->info
->pm4fw
, ret
);
340 ret
= request_firmware(&adreno_gpu
->pfp
, adreno_gpu
->info
->pfpfw
, drm
->dev
);
342 dev_err(drm
->dev
, "failed to load %s PFP firmware: %d\n",
343 adreno_gpu
->info
->pfpfw
, ret
);
349 ret
= mmu
->funcs
->attach(mmu
, iommu_ports
,
350 ARRAY_SIZE(iommu_ports
));
355 mutex_lock(&drm
->struct_mutex
);
356 adreno_gpu
->memptrs_bo
= msm_gem_new(drm
, sizeof(*adreno_gpu
->memptrs
),
358 mutex_unlock(&drm
->struct_mutex
);
359 if (IS_ERR(adreno_gpu
->memptrs_bo
)) {
360 ret
= PTR_ERR(adreno_gpu
->memptrs_bo
);
361 adreno_gpu
->memptrs_bo
= NULL
;
362 dev_err(drm
->dev
, "could not allocate memptrs: %d\n", ret
);
366 adreno_gpu
->memptrs
= msm_gem_vaddr(adreno_gpu
->memptrs_bo
);
367 if (!adreno_gpu
->memptrs
) {
368 dev_err(drm
->dev
, "could not vmap memptrs\n");
372 ret
= msm_gem_get_iova(adreno_gpu
->memptrs_bo
, gpu
->id
,
373 &adreno_gpu
->memptrs_iova
);
375 dev_err(drm
->dev
, "could not map memptrs: %d\n", ret
);
382 void adreno_gpu_cleanup(struct adreno_gpu
*gpu
)
384 if (gpu
->memptrs_bo
) {
385 if (gpu
->memptrs_iova
)
386 msm_gem_put_iova(gpu
->memptrs_bo
, gpu
->base
.id
);
387 drm_gem_object_unreference(gpu
->memptrs_bo
);
389 release_firmware(gpu
->pm4
);
390 release_firmware(gpu
->pfp
);
391 msm_gpu_cleanup(&gpu
->base
);