2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include "adreno_gpu.h"
22 struct adreno_rev rev
;
25 const char *pm4fw
, *pfpfw
;
31 static const struct adreno_info gpulist
[] = {
33 .rev
= ADRENO_REV(3, 0, 5, ANY_ID
),
36 .pm4fw
= "a300_pm4.fw",
37 .pfpfw
= "a300_pfp.fw",
40 .rev
= ADRENO_REV(3, 2, ANY_ID
, ANY_ID
),
43 .pm4fw
= "a300_pm4.fw",
44 .pfpfw
= "a300_pfp.fw",
47 .rev
= ADRENO_REV(3, 3, 0, 0),
50 .pm4fw
= "a330_pm4.fw",
51 .pfpfw
= "a330_pfp.fw",
56 #define RB_SIZE SZ_32K
59 int adreno_get_param(struct msm_gpu
*gpu
, uint32_t param
, uint64_t *value
)
61 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
64 case MSM_PARAM_GPU_ID
:
65 *value
= adreno_gpu
->info
->revn
;
67 case MSM_PARAM_GMEM_SIZE
:
68 *value
= adreno_gpu
->info
->gmem
;
71 DBG("%s: invalid param: %u", gpu
->name
, param
);
76 #define rbmemptr(adreno_gpu, member) \
77 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
79 int adreno_hw_init(struct msm_gpu
*gpu
)
81 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
85 /* Setup REG_CP_RB_CNTL: */
86 gpu_write(gpu
, REG_AXXX_CP_RB_CNTL
,
87 /* size is log2(quad-words): */
88 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu
->rb
->size
/ 8)) |
89 AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE
));
91 /* Setup ringbuffer address: */
92 gpu_write(gpu
, REG_AXXX_CP_RB_BASE
, gpu
->rb_iova
);
93 gpu_write(gpu
, REG_AXXX_CP_RB_RPTR_ADDR
, rbmemptr(adreno_gpu
, rptr
));
95 /* Setup scratch/timestamp: */
96 gpu_write(gpu
, REG_AXXX_SCRATCH_ADDR
, rbmemptr(adreno_gpu
, fence
));
98 gpu_write(gpu
, REG_AXXX_SCRATCH_UMSK
, 0x1);
103 static uint32_t get_wptr(struct msm_ringbuffer
*ring
)
105 return ring
->cur
- ring
->start
;
108 uint32_t adreno_last_fence(struct msm_gpu
*gpu
)
110 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
111 return adreno_gpu
->memptrs
->fence
;
114 void adreno_recover(struct msm_gpu
*gpu
)
116 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
117 struct drm_device
*dev
= gpu
->dev
;
120 gpu
->funcs
->pm_suspend(gpu
);
122 /* reset ringbuffer: */
123 gpu
->rb
->cur
= gpu
->rb
->start
;
125 /* reset completed fence seqno, just discard anything pending: */
126 adreno_gpu
->memptrs
->fence
= gpu
->submitted_fence
;
127 adreno_gpu
->memptrs
->rptr
= 0;
128 adreno_gpu
->memptrs
->wptr
= 0;
130 gpu
->funcs
->pm_resume(gpu
);
131 ret
= gpu
->funcs
->hw_init(gpu
);
133 dev_err(dev
->dev
, "gpu hw init failed: %d\n", ret
);
138 int adreno_submit(struct msm_gpu
*gpu
, struct msm_gem_submit
*submit
,
139 struct msm_file_private
*ctx
)
141 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
142 struct msm_drm_private
*priv
= gpu
->dev
->dev_private
;
143 struct msm_ringbuffer
*ring
= gpu
->rb
;
146 for (i
= 0; i
< submit
->nr_cmds
; i
++) {
147 switch (submit
->cmd
[i
].type
) {
148 case MSM_SUBMIT_CMD_IB_TARGET_BUF
:
149 /* ignore IB-targets */
151 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF
:
152 /* ignore if there has not been a ctx switch: */
153 if (priv
->lastctx
== ctx
)
155 case MSM_SUBMIT_CMD_BUF
:
156 OUT_PKT3(ring
, CP_INDIRECT_BUFFER_PFD
, 2);
157 OUT_RING(ring
, submit
->cmd
[i
].iova
);
158 OUT_RING(ring
, submit
->cmd
[i
].size
);
164 /* on a320, at least, we seem to need to pad things out to an
165 * even number of qwords to avoid issue w/ CP hanging on wrap-
171 OUT_PKT0(ring
, REG_AXXX_CP_SCRATCH_REG2
, 1);
172 OUT_RING(ring
, submit
->fence
);
174 if (adreno_is_a3xx(adreno_gpu
)) {
175 /* Flush HLSQ lazy updates to make sure there is nothing
176 * pending for indirect loads after the timestamp has
179 OUT_PKT3(ring
, CP_EVENT_WRITE
, 1);
180 OUT_RING(ring
, HLSQ_FLUSH
);
182 OUT_PKT3(ring
, CP_WAIT_FOR_IDLE
, 1);
183 OUT_RING(ring
, 0x00000000);
186 OUT_PKT3(ring
, CP_EVENT_WRITE
, 3);
187 OUT_RING(ring
, CACHE_FLUSH_TS
);
188 OUT_RING(ring
, rbmemptr(adreno_gpu
, fence
));
189 OUT_RING(ring
, submit
->fence
);
191 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
192 OUT_PKT3(ring
, CP_INTERRUPT
, 1);
193 OUT_RING(ring
, 0x80000000);
196 if (adreno_is_a3xx(adreno_gpu
)) {
197 /* Dummy set-constant to trigger context rollover */
198 OUT_PKT3(ring
, CP_SET_CONSTANT
, 2);
199 OUT_RING(ring
, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG
));
200 OUT_RING(ring
, 0x00000000);
204 gpu
->funcs
->flush(gpu
);
209 void adreno_flush(struct msm_gpu
*gpu
)
211 uint32_t wptr
= get_wptr(gpu
->rb
);
213 /* ensure writes to ringbuffer have hit system memory: */
216 gpu_write(gpu
, REG_AXXX_CP_RB_WPTR
, wptr
);
219 void adreno_idle(struct msm_gpu
*gpu
)
221 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
222 uint32_t rptr
, wptr
= get_wptr(gpu
->rb
);
225 t
= jiffies
+ ADRENO_IDLE_TIMEOUT
;
227 /* then wait for CP to drain ringbuffer: */
229 rptr
= adreno_gpu
->memptrs
->rptr
;
232 } while(time_before(jiffies
, t
));
234 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu
->name
);
236 /* TODO maybe we need to reset GPU here to recover from hang? */
239 #ifdef CONFIG_DEBUG_FS
240 void adreno_show(struct msm_gpu
*gpu
, struct seq_file
*m
)
242 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
244 seq_printf(m
, "revision: %d (%d.%d.%d.%d)\n",
245 adreno_gpu
->info
->revn
, adreno_gpu
->rev
.core
,
246 adreno_gpu
->rev
.major
, adreno_gpu
->rev
.minor
,
247 adreno_gpu
->rev
.patchid
);
249 seq_printf(m
, "fence: %d/%d\n", adreno_gpu
->memptrs
->fence
,
250 gpu
->submitted_fence
);
251 seq_printf(m
, "rptr: %d\n", adreno_gpu
->memptrs
->rptr
);
252 seq_printf(m
, "wptr: %d\n", adreno_gpu
->memptrs
->wptr
);
253 seq_printf(m
, "rb wptr: %d\n", get_wptr(gpu
->rb
));
257 void adreno_wait_ring(struct msm_gpu
*gpu
, uint32_t ndwords
)
259 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
261 unsigned long t
= jiffies
+ ADRENO_IDLE_TIMEOUT
;
263 uint32_t size
= gpu
->rb
->size
/ 4;
264 uint32_t wptr
= get_wptr(gpu
->rb
);
265 uint32_t rptr
= adreno_gpu
->memptrs
->rptr
;
266 freedwords
= (rptr
+ (size
- 1) - wptr
) % size
;
268 if (time_after(jiffies
, t
)) {
269 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu
->name
);
272 } while(freedwords
< ndwords
);
275 static const char *iommu_ports
[] = {
276 "gfx3d_user", "gfx3d_priv",
277 "gfx3d1_user", "gfx3d1_priv",
280 static inline bool _rev_match(uint8_t entry
, uint8_t id
)
282 return (entry
== ANY_ID
) || (entry
== id
);
285 int adreno_gpu_init(struct drm_device
*drm
, struct platform_device
*pdev
,
286 struct adreno_gpu
*gpu
, const struct adreno_gpu_funcs
*funcs
,
287 struct adreno_rev rev
)
292 for (i
= 0; i
< ARRAY_SIZE(gpulist
); i
++) {
293 const struct adreno_info
*info
= &gpulist
[i
];
294 if (_rev_match(info
->rev
.core
, rev
.core
) &&
295 _rev_match(info
->rev
.major
, rev
.major
) &&
296 _rev_match(info
->rev
.minor
, rev
.minor
) &&
297 _rev_match(info
->rev
.patchid
, rev
.patchid
)) {
299 gpu
->revn
= info
->revn
;
304 if (i
== ARRAY_SIZE(gpulist
)) {
305 dev_err(drm
->dev
, "Unknown GPU revision: %u.%u.%u.%u\n",
306 rev
.core
, rev
.major
, rev
.minor
, rev
.patchid
);
310 DBG("Found GPU: %s (%u.%u.%u.%u)", gpu
->info
->name
,
311 rev
.core
, rev
.major
, rev
.minor
, rev
.patchid
);
316 ret
= request_firmware(&gpu
->pm4
, gpu
->info
->pm4fw
, drm
->dev
);
318 dev_err(drm
->dev
, "failed to load %s PM4 firmware: %d\n",
319 gpu
->info
->pm4fw
, ret
);
323 ret
= request_firmware(&gpu
->pfp
, gpu
->info
->pfpfw
, drm
->dev
);
325 dev_err(drm
->dev
, "failed to load %s PFP firmware: %d\n",
326 gpu
->info
->pfpfw
, ret
);
330 ret
= msm_gpu_init(drm
, pdev
, &gpu
->base
, &funcs
->base
,
331 gpu
->info
->name
, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
336 ret
= msm_iommu_attach(drm
, gpu
->base
.iommu
,
337 iommu_ports
, ARRAY_SIZE(iommu_ports
));
341 gpu
->memptrs_bo
= msm_gem_new(drm
, sizeof(*gpu
->memptrs
),
343 if (IS_ERR(gpu
->memptrs_bo
)) {
344 ret
= PTR_ERR(gpu
->memptrs_bo
);
345 gpu
->memptrs_bo
= NULL
;
346 dev_err(drm
->dev
, "could not allocate memptrs: %d\n", ret
);
350 gpu
->memptrs
= msm_gem_vaddr_locked(gpu
->memptrs_bo
);
352 dev_err(drm
->dev
, "could not vmap memptrs\n");
356 ret
= msm_gem_get_iova_locked(gpu
->memptrs_bo
, gpu
->base
.id
,
359 dev_err(drm
->dev
, "could not map memptrs: %d\n", ret
);
366 void adreno_gpu_cleanup(struct adreno_gpu
*gpu
)
368 if (gpu
->memptrs_bo
) {
369 if (gpu
->memptrs_iova
)
370 msm_gem_put_iova(gpu
->memptrs_bo
, gpu
->base
.id
);
371 drm_gem_object_unreference(gpu
->memptrs_bo
);
374 release_firmware(gpu
->pm4
);
376 release_firmware(gpu
->pfp
);
377 msm_gpu_cleanup(&gpu
->base
);