1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #ifndef __MSM_RINGBUFFER_H__
8 #define __MSM_RINGBUFFER_H__
12 #define rbmemptr(ring, member) \
13 ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
15 #define rbmemptr_stats(ring, index, member) \
16 (rbmemptr((ring), stats) + \
17 ((index) * sizeof(struct msm_gpu_submit_stats)) + \
18 offsetof(struct msm_gpu_submit_stats, member))
20 struct msm_gpu_submit_stats
{
27 #define MSM_GPU_SUBMIT_STATS_COUNT 64
29 struct msm_rbmemptrs
{
30 volatile uint32_t rptr
;
31 volatile uint32_t fence
;
33 volatile struct msm_gpu_submit_stats stats
[MSM_GPU_SUBMIT_STATS_COUNT
];
37 struct msm_ringbuffer
{
40 struct drm_gem_object
*bo
;
41 uint32_t *start
, *end
, *cur
, *next
;
44 * List of in-flight submits on this ring. Protected by submit_lock.
46 struct list_head submits
;
47 spinlock_t submit_lock
;
51 uint32_t hangcheck_fence
;
52 struct msm_rbmemptrs
*memptrs
;
53 uint64_t memptrs_iova
;
54 struct msm_fence_context
*fctx
;
57 * preempt_lock protects preemption and serializes wptr updates against
58 * preemption. Can be aquired from irq context.
60 spinlock_t preempt_lock
;
63 struct msm_ringbuffer
*msm_ringbuffer_new(struct msm_gpu
*gpu
, int id
,
64 void *memptrs
, uint64_t memptrs_iova
);
65 void msm_ringbuffer_destroy(struct msm_ringbuffer
*ring
);
67 /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
70 OUT_RING(struct msm_ringbuffer
*ring
, uint32_t data
)
73 * ring->next points to the current command being written - it won't be
74 * committed as ring->cur until the flush
76 if (ring
->next
== ring
->end
)
77 ring
->next
= ring
->start
;
78 *(ring
->next
++) = data
;
81 #endif /* __MSM_RINGBUFFER_H__ */