1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #ifndef __ETNAVIV_GPU_H__
7 #define __ETNAVIV_GPU_H__
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_mmu.h"
12 #include "etnaviv_drv.h"
14 struct etnaviv_gem_submit
;
15 struct etnaviv_vram_mapping
;
17 struct etnaviv_chip_identity
{
24 /* Supported feature fields. */
27 /* Supported minor feature fields. */
41 /* Number of streams supported. */
44 /* Total number of temporary registers per thread. */
47 /* Maximum number of threads. */
50 /* Number of shader cores. */
51 u32 shader_core_count
;
53 /* Size of the vertex cache. */
54 u32 vertex_cache_size
;
56 /* Number of entries in the vertex output buffer. */
57 u32 vertex_output_buffer_size
;
59 /* Number of pixel pipes. */
62 /* Number of instructions. */
63 u32 instruction_count
;
65 /* Number of constants. */
71 /* Number of varyings */
75 enum etnaviv_sec_mode
{
81 struct etnaviv_event
{
82 struct dma_fence
*fence
;
83 struct etnaviv_gem_submit
*submit
;
85 void (*sync_point
)(struct etnaviv_gpu
*gpu
, struct etnaviv_event
*event
);
88 struct etnaviv_cmdbuf_suballoc
;
92 #define ETNA_NR_EVENTS 30
95 struct drm_device
*drm
;
96 struct thermal_cooling_device
*cooling
;
99 struct etnaviv_chip_identity identity
;
100 enum etnaviv_sec_mode sec_mode
;
101 struct workqueue_struct
*wq
;
102 struct drm_gpu_scheduler sched
;
106 struct etnaviv_cmdbuf buffer
;
109 /* event management: */
110 DECLARE_BITMAP(event_bitmap
, ETNA_NR_EVENTS
);
111 struct etnaviv_event event
[ETNA_NR_EVENTS
];
112 struct completion event_free
;
113 spinlock_t event_spinlock
;
117 /* Fencing support */
118 struct mutex fence_lock
;
119 struct idr fence_idr
;
122 wait_queue_head_t fence_event
;
124 spinlock_t fence_spinlock
;
126 /* worker for handling 'sync' points: */
127 struct work_struct sync_point_work
;
128 int sync_point_event
;
131 u32 hangcheck_dma_addr
;
136 struct etnaviv_iommu_context
*mmu_context
;
137 unsigned int flush_seq
;
142 struct clk
*clk_core
;
143 struct clk
*clk_shader
;
145 unsigned int freq_scale
;
146 unsigned long base_rate_core
;
147 unsigned long base_rate_shader
;
150 static inline void gpu_write(struct etnaviv_gpu
*gpu
, u32 reg
, u32 data
)
152 writel(data
, gpu
->mmio
+ reg
);
155 static inline u32
gpu_read(struct etnaviv_gpu
*gpu
, u32 reg
)
157 return readl(gpu
->mmio
+ reg
);
160 int etnaviv_gpu_get_param(struct etnaviv_gpu
*gpu
, u32 param
, u64
*value
);
162 int etnaviv_gpu_init(struct etnaviv_gpu
*gpu
);
163 bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu
*gpu
);
165 #ifdef CONFIG_DEBUG_FS
166 int etnaviv_gpu_debugfs(struct etnaviv_gpu
*gpu
, struct seq_file
*m
);
169 void etnaviv_gpu_recover_hang(struct etnaviv_gpu
*gpu
);
170 void etnaviv_gpu_retire(struct etnaviv_gpu
*gpu
);
171 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu
*gpu
,
172 u32 fence
, struct drm_etnaviv_timespec
*timeout
);
173 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu
*gpu
,
174 struct etnaviv_gem_object
*etnaviv_obj
,
175 struct drm_etnaviv_timespec
*timeout
);
176 struct dma_fence
*etnaviv_gpu_submit(struct etnaviv_gem_submit
*submit
);
177 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu
*gpu
);
178 void etnaviv_gpu_pm_put(struct etnaviv_gpu
*gpu
);
179 int etnaviv_gpu_wait_idle(struct etnaviv_gpu
*gpu
, unsigned int timeout_ms
);
180 void etnaviv_gpu_start_fe(struct etnaviv_gpu
*gpu
, u32 address
, u16 prefetch
);
182 extern struct platform_driver etnaviv_gpu_driver
;
184 #endif /* __ETNAVIV_GPU_H__ */