2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #ifndef __ETNAVIV_GPU_H__
18 #define __ETNAVIV_GPU_H__
20 #include <linux/clk.h>
21 #include <linux/regulator/consumer.h>
23 #include "etnaviv_drv.h"
25 struct etnaviv_gem_submit
;
26 struct etnaviv_vram_mapping
;
28 struct etnaviv_chip_identity
{
35 /* Supported feature fields. */
38 /* Supported minor feature fields. */
41 /* Supported minor feature 1 fields. */
44 /* Supported minor feature 2 fields. */
47 /* Supported minor feature 3 fields. */
50 /* Supported minor feature 4 fields. */
53 /* Supported minor feature 5 fields. */
56 /* Number of streams supported. */
59 /* Total number of temporary registers per thread. */
62 /* Maximum number of threads. */
65 /* Number of shader cores. */
66 u32 shader_core_count
;
68 /* Size of the vertex cache. */
69 u32 vertex_cache_size
;
71 /* Number of entries in the vertex output buffer. */
72 u32 vertex_output_buffer_size
;
74 /* Number of pixel pipes. */
77 /* Number of instructions. */
78 u32 instruction_count
;
80 /* Number of constants. */
86 /* Number of varyings */
90 struct etnaviv_event
{
92 struct dma_fence
*fence
;
95 struct etnaviv_cmdbuf
;
98 struct drm_device
*drm
;
101 struct etnaviv_chip_identity identity
;
102 struct etnaviv_file_private
*lastctx
;
106 struct etnaviv_cmdbuf
*buffer
;
109 /* bus base address of memory */
112 /* event management: */
113 struct etnaviv_event event
[30];
114 struct completion event_free
;
115 spinlock_t event_spinlock
;
117 /* list of currently in-flight command buffers */
118 struct list_head active_cmd_list
;
122 /* Fencing support */
127 wait_queue_head_t fence_event
;
129 spinlock_t fence_spinlock
;
131 /* worker for handling active-list retiring: */
132 struct work_struct retire_work
;
137 struct etnaviv_iommu
*mmu
;
141 struct clk
*clk_core
;
142 struct clk
*clk_shader
;
145 #define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */
146 #define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD)
147 struct timer_list hangcheck_timer
;
149 u32 hangcheck_dma_addr
;
150 struct work_struct recover_work
;
153 struct etnaviv_cmdbuf
{
154 /* device this cmdbuf is allocated for */
155 struct etnaviv_gpu
*gpu
;
156 /* user context key, must be unique between all active users */
157 struct etnaviv_file_private
*ctx
;
158 /* cmdbuf properties */
163 /* vram node used if the cmdbuf is mapped through the MMUv2 */
164 struct drm_mm_node vram_node
;
165 /* fence after which this buffer is to be disposed */
166 struct dma_fence
*fence
;
167 /* target exec state */
169 /* per GPU in-flight list */
170 struct list_head node
;
171 /* BOs attached to this command buffer */
173 struct etnaviv_vram_mapping
*bo_map
[0];
176 static inline void gpu_write(struct etnaviv_gpu
*gpu
, u32 reg
, u32 data
)
178 etnaviv_writel(data
, gpu
->mmio
+ reg
);
181 static inline u32
gpu_read(struct etnaviv_gpu
*gpu
, u32 reg
)
183 return etnaviv_readl(gpu
->mmio
+ reg
);
186 static inline bool fence_completed(struct etnaviv_gpu
*gpu
, u32 fence
)
188 return fence_after_eq(gpu
->completed_fence
, fence
);
191 static inline bool fence_retired(struct etnaviv_gpu
*gpu
, u32 fence
)
193 return fence_after_eq(gpu
->retired_fence
, fence
);
196 int etnaviv_gpu_get_param(struct etnaviv_gpu
*gpu
, u32 param
, u64
*value
);
198 int etnaviv_gpu_init(struct etnaviv_gpu
*gpu
);
200 #ifdef CONFIG_DEBUG_FS
201 int etnaviv_gpu_debugfs(struct etnaviv_gpu
*gpu
, struct seq_file
*m
);
204 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object
*etnaviv_obj
,
205 unsigned int context
, bool exclusive
);
207 void etnaviv_gpu_retire(struct etnaviv_gpu
*gpu
);
208 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu
*gpu
,
209 u32 fence
, struct timespec
*timeout
);
210 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu
*gpu
,
211 struct etnaviv_gem_object
*etnaviv_obj
, struct timespec
*timeout
);
212 int etnaviv_gpu_submit(struct etnaviv_gpu
*gpu
,
213 struct etnaviv_gem_submit
*submit
, struct etnaviv_cmdbuf
*cmdbuf
);
214 struct etnaviv_cmdbuf
*etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu
*gpu
,
215 u32 size
, size_t nr_bos
);
216 void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf
*cmdbuf
);
217 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu
*gpu
);
218 void etnaviv_gpu_pm_put(struct etnaviv_gpu
*gpu
);
219 int etnaviv_gpu_wait_idle(struct etnaviv_gpu
*gpu
, unsigned int timeout_ms
);
220 void etnaviv_gpu_start_fe(struct etnaviv_gpu
*gpu
, u32 address
, u16 prefetch
);
222 extern struct platform_driver etnaviv_gpu_driver
;
224 #endif /* __ETNAVIV_GPU_H__ */