2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include "adreno_gpu.h"
23 struct adreno_rev rev
;
26 const char *pm4fw
, *pfpfw
;
32 static const struct adreno_info gpulist
[] = {
34 .rev
= ADRENO_REV(3, 0, 5, ANY_ID
),
37 .pm4fw
= "a300_pm4.fw",
38 .pfpfw
= "a300_pfp.fw",
41 .rev
= ADRENO_REV(3, 2, ANY_ID
, ANY_ID
),
44 .pm4fw
= "a300_pm4.fw",
45 .pfpfw
= "a300_pfp.fw",
48 .rev
= ADRENO_REV(3, 3, 0, ANY_ID
),
51 .pm4fw
= "a330_pm4.fw",
52 .pfpfw
= "a330_pfp.fw",
57 MODULE_FIRMWARE("a300_pm4.fw");
58 MODULE_FIRMWARE("a300_pfp.fw");
59 MODULE_FIRMWARE("a330_pm4.fw");
60 MODULE_FIRMWARE("a330_pfp.fw");
62 #define RB_SIZE SZ_32K
65 int adreno_get_param(struct msm_gpu
*gpu
, uint32_t param
, uint64_t *value
)
67 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
70 case MSM_PARAM_GPU_ID
:
71 *value
= adreno_gpu
->info
->revn
;
73 case MSM_PARAM_GMEM_SIZE
:
74 *value
= adreno_gpu
->gmem
;
77 DBG("%s: invalid param: %u", gpu
->name
, param
);
82 #define rbmemptr(adreno_gpu, member) \
83 ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
85 int adreno_hw_init(struct msm_gpu
*gpu
)
87 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
91 /* Setup REG_CP_RB_CNTL: */
92 gpu_write(gpu
, REG_AXXX_CP_RB_CNTL
,
93 /* size is log2(quad-words): */
94 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu
->rb
->size
/ 8)) |
95 AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE
/ 8)));
97 /* Setup ringbuffer address: */
98 gpu_write(gpu
, REG_AXXX_CP_RB_BASE
, gpu
->rb_iova
);
99 gpu_write(gpu
, REG_AXXX_CP_RB_RPTR_ADDR
, rbmemptr(adreno_gpu
, rptr
));
101 /* Setup scratch/timestamp: */
102 gpu_write(gpu
, REG_AXXX_SCRATCH_ADDR
, rbmemptr(adreno_gpu
, fence
));
104 gpu_write(gpu
, REG_AXXX_SCRATCH_UMSK
, 0x1);
109 static uint32_t get_wptr(struct msm_ringbuffer
*ring
)
111 return ring
->cur
- ring
->start
;
114 uint32_t adreno_last_fence(struct msm_gpu
*gpu
)
116 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
117 return adreno_gpu
->memptrs
->fence
;
120 void adreno_recover(struct msm_gpu
*gpu
)
122 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
123 struct drm_device
*dev
= gpu
->dev
;
126 gpu
->funcs
->pm_suspend(gpu
);
128 /* reset ringbuffer: */
129 gpu
->rb
->cur
= gpu
->rb
->start
;
131 /* reset completed fence seqno, just discard anything pending: */
132 adreno_gpu
->memptrs
->fence
= gpu
->submitted_fence
;
133 adreno_gpu
->memptrs
->rptr
= 0;
134 adreno_gpu
->memptrs
->wptr
= 0;
136 gpu
->funcs
->pm_resume(gpu
);
137 ret
= gpu
->funcs
->hw_init(gpu
);
139 dev_err(dev
->dev
, "gpu hw init failed: %d\n", ret
);
144 int adreno_submit(struct msm_gpu
*gpu
, struct msm_gem_submit
*submit
,
145 struct msm_file_private
*ctx
)
147 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
148 struct msm_drm_private
*priv
= gpu
->dev
->dev_private
;
149 struct msm_ringbuffer
*ring
= gpu
->rb
;
152 for (i
= 0; i
< submit
->nr_cmds
; i
++) {
153 switch (submit
->cmd
[i
].type
) {
154 case MSM_SUBMIT_CMD_IB_TARGET_BUF
:
155 /* ignore IB-targets */
157 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF
:
158 /* ignore if there has not been a ctx switch: */
159 if (priv
->lastctx
== ctx
)
161 case MSM_SUBMIT_CMD_BUF
:
162 OUT_PKT3(ring
, CP_INDIRECT_BUFFER_PFD
, 2);
163 OUT_RING(ring
, submit
->cmd
[i
].iova
);
164 OUT_RING(ring
, submit
->cmd
[i
].size
);
170 /* on a320, at least, we seem to need to pad things out to an
171 * even number of qwords to avoid issue w/ CP hanging on wrap-
177 OUT_PKT0(ring
, REG_AXXX_CP_SCRATCH_REG2
, 1);
178 OUT_RING(ring
, submit
->fence
);
180 if (adreno_is_a3xx(adreno_gpu
)) {
181 /* Flush HLSQ lazy updates to make sure there is nothing
182 * pending for indirect loads after the timestamp has
185 OUT_PKT3(ring
, CP_EVENT_WRITE
, 1);
186 OUT_RING(ring
, HLSQ_FLUSH
);
188 OUT_PKT3(ring
, CP_WAIT_FOR_IDLE
, 1);
189 OUT_RING(ring
, 0x00000000);
192 OUT_PKT3(ring
, CP_EVENT_WRITE
, 3);
193 OUT_RING(ring
, CACHE_FLUSH_TS
);
194 OUT_RING(ring
, rbmemptr(adreno_gpu
, fence
));
195 OUT_RING(ring
, submit
->fence
);
197 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
198 OUT_PKT3(ring
, CP_INTERRUPT
, 1);
199 OUT_RING(ring
, 0x80000000);
202 if (adreno_is_a3xx(adreno_gpu
)) {
203 /* Dummy set-constant to trigger context rollover */
204 OUT_PKT3(ring
, CP_SET_CONSTANT
, 2);
205 OUT_RING(ring
, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG
));
206 OUT_RING(ring
, 0x00000000);
210 gpu
->funcs
->flush(gpu
);
215 void adreno_flush(struct msm_gpu
*gpu
)
217 uint32_t wptr
= get_wptr(gpu
->rb
);
219 /* ensure writes to ringbuffer have hit system memory: */
222 gpu_write(gpu
, REG_AXXX_CP_RB_WPTR
, wptr
);
225 void adreno_idle(struct msm_gpu
*gpu
)
227 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
228 uint32_t rptr
, wptr
= get_wptr(gpu
->rb
);
231 t
= jiffies
+ ADRENO_IDLE_TIMEOUT
;
233 /* then wait for CP to drain ringbuffer: */
235 rptr
= adreno_gpu
->memptrs
->rptr
;
238 } while(time_before(jiffies
, t
));
240 DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu
->name
);
242 /* TODO maybe we need to reset GPU here to recover from hang? */
245 #ifdef CONFIG_DEBUG_FS
246 void adreno_show(struct msm_gpu
*gpu
, struct seq_file
*m
)
248 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
250 seq_printf(m
, "revision: %d (%d.%d.%d.%d)\n",
251 adreno_gpu
->info
->revn
, adreno_gpu
->rev
.core
,
252 adreno_gpu
->rev
.major
, adreno_gpu
->rev
.minor
,
253 adreno_gpu
->rev
.patchid
);
255 seq_printf(m
, "fence: %d/%d\n", adreno_gpu
->memptrs
->fence
,
256 gpu
->submitted_fence
);
257 seq_printf(m
, "rptr: %d\n", adreno_gpu
->memptrs
->rptr
);
258 seq_printf(m
, "wptr: %d\n", adreno_gpu
->memptrs
->wptr
);
259 seq_printf(m
, "rb wptr: %d\n", get_wptr(gpu
->rb
));
263 void adreno_wait_ring(struct msm_gpu
*gpu
, uint32_t ndwords
)
265 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
267 unsigned long t
= jiffies
+ ADRENO_IDLE_TIMEOUT
;
269 uint32_t size
= gpu
->rb
->size
/ 4;
270 uint32_t wptr
= get_wptr(gpu
->rb
);
271 uint32_t rptr
= adreno_gpu
->memptrs
->rptr
;
272 freedwords
= (rptr
+ (size
- 1) - wptr
) % size
;
274 if (time_after(jiffies
, t
)) {
275 DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu
->name
);
278 } while(freedwords
< ndwords
);
281 static const char *iommu_ports
[] = {
282 "gfx3d_user", "gfx3d_priv",
283 "gfx3d1_user", "gfx3d1_priv",
286 static inline bool _rev_match(uint8_t entry
, uint8_t id
)
288 return (entry
== ANY_ID
) || (entry
== id
);
291 int adreno_gpu_init(struct drm_device
*drm
, struct platform_device
*pdev
,
292 struct adreno_gpu
*gpu
, const struct adreno_gpu_funcs
*funcs
,
293 struct adreno_rev rev
)
299 for (i
= 0; i
< ARRAY_SIZE(gpulist
); i
++) {
300 const struct adreno_info
*info
= &gpulist
[i
];
301 if (_rev_match(info
->rev
.core
, rev
.core
) &&
302 _rev_match(info
->rev
.major
, rev
.major
) &&
303 _rev_match(info
->rev
.minor
, rev
.minor
) &&
304 _rev_match(info
->rev
.patchid
, rev
.patchid
)) {
306 gpu
->revn
= info
->revn
;
311 if (i
== ARRAY_SIZE(gpulist
)) {
312 dev_err(drm
->dev
, "Unknown GPU revision: %u.%u.%u.%u\n",
313 rev
.core
, rev
.major
, rev
.minor
, rev
.patchid
);
317 DBG("Found GPU: %s (%u.%u.%u.%u)", gpu
->info
->name
,
318 rev
.core
, rev
.major
, rev
.minor
, rev
.patchid
);
321 gpu
->gmem
= gpu
->info
->gmem
;
324 ret
= request_firmware(&gpu
->pm4
, gpu
->info
->pm4fw
, drm
->dev
);
326 dev_err(drm
->dev
, "failed to load %s PM4 firmware: %d\n",
327 gpu
->info
->pm4fw
, ret
);
331 ret
= request_firmware(&gpu
->pfp
, gpu
->info
->pfpfw
, drm
->dev
);
333 dev_err(drm
->dev
, "failed to load %s PFP firmware: %d\n",
334 gpu
->info
->pfpfw
, ret
);
338 ret
= msm_gpu_init(drm
, pdev
, &gpu
->base
, &funcs
->base
,
339 gpu
->info
->name
, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
346 ret
= mmu
->funcs
->attach(mmu
, iommu_ports
,
347 ARRAY_SIZE(iommu_ports
));
352 gpu
->memptrs_bo
= msm_gem_new(drm
, sizeof(*gpu
->memptrs
),
354 if (IS_ERR(gpu
->memptrs_bo
)) {
355 ret
= PTR_ERR(gpu
->memptrs_bo
);
356 gpu
->memptrs_bo
= NULL
;
357 dev_err(drm
->dev
, "could not allocate memptrs: %d\n", ret
);
361 gpu
->memptrs
= msm_gem_vaddr_locked(gpu
->memptrs_bo
);
363 dev_err(drm
->dev
, "could not vmap memptrs\n");
367 ret
= msm_gem_get_iova_locked(gpu
->memptrs_bo
, gpu
->base
.id
,
370 dev_err(drm
->dev
, "could not map memptrs: %d\n", ret
);
377 void adreno_gpu_cleanup(struct adreno_gpu
*gpu
)
379 if (gpu
->memptrs_bo
) {
380 if (gpu
->memptrs_iova
)
381 msm_gem_put_iova(gpu
->memptrs_bo
, gpu
->base
.id
);
382 drm_gem_object_unreference(gpu
->memptrs_bo
);
385 release_firmware(gpu
->pm4
);
387 release_firmware(gpu
->pfp
);
388 msm_gpu_cleanup(&gpu
->base
);