2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef __AMDGPU_GFX_H__
25 #define __AMDGPU_GFX_H__
30 #include "clearstate_defs.h"
31 #include "amdgpu_ring.h"
32 #include "amdgpu_rlc.h"
34 /* GFX current status */
35 #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
36 #define AMDGPU_GFX_SAFE_MODE 0x00000001L
37 #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
38 #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
39 #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
41 #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
42 #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
45 struct amdgpu_bo
*hpd_eop_obj
;
47 struct amdgpu_bo
*mec_fw_obj
;
51 u32 num_queue_per_pipe
;
52 void *mqd_backup
[AMDGPU_MAX_COMPUTE_RINGS
+ 1];
54 /* These are the resources for which amdgpu takes ownership */
55 DECLARE_BITMAP(queue_bitmap
, AMDGPU_MAX_COMPUTE_QUEUES
);
58 enum amdgpu_unmap_queues_action
{
61 DISABLE_PROCESS_QUEUES
,
62 PREEMPT_QUEUES_NO_UNMAP
,
65 struct kiq_pm4_funcs
{
66 /* Support ASIC-specific kiq pm4 packets*/
67 void (*kiq_set_resources
)(struct amdgpu_ring
*kiq_ring
,
69 void (*kiq_map_queues
)(struct amdgpu_ring
*kiq_ring
,
70 struct amdgpu_ring
*ring
);
71 void (*kiq_unmap_queues
)(struct amdgpu_ring
*kiq_ring
,
72 struct amdgpu_ring
*ring
,
73 enum amdgpu_unmap_queues_action action
,
74 u64 gpu_addr
, u64 seq
);
75 void (*kiq_query_status
)(struct amdgpu_ring
*kiq_ring
,
76 struct amdgpu_ring
*ring
,
79 void (*kiq_invalidate_tlbs
)(struct amdgpu_ring
*kiq_ring
,
80 uint16_t pasid
, uint32_t flush_type
,
83 int set_resources_size
;
85 int unmap_queues_size
;
86 int query_status_size
;
87 int invalidate_tlbs_size
;
92 struct amdgpu_bo
*eop_obj
;
94 struct amdgpu_ring ring
;
95 struct amdgpu_irq_src irq
;
96 const struct kiq_pm4_funcs
*pmf
;
100 * GPU scratch registers structures, functions & helpers
102 struct amdgpu_scratch
{
111 #define AMDGPU_GFX_MAX_SE 4
112 #define AMDGPU_GFX_MAX_SH_PER_SE 2
114 struct amdgpu_rb_config
{
115 uint32_t rb_backend_disable
;
116 uint32_t user_rb_backend_disable
;
117 uint32_t raster_config
;
118 uint32_t raster_config_1
;
121 struct gb_addr_config
{
122 uint16_t pipe_interleave_size
;
124 uint8_t max_compress_frags
;
127 uint8_t num_rb_per_se
;
130 struct amdgpu_gfx_config
{
131 unsigned max_shader_engines
;
132 unsigned max_tile_pipes
;
133 unsigned max_cu_per_sh
;
134 unsigned max_sh_per_se
;
135 unsigned max_backends_per_se
;
136 unsigned max_texture_channel_caches
;
138 unsigned max_gs_threads
;
139 unsigned max_hw_contexts
;
140 unsigned sc_prim_fifo_size_frontend
;
141 unsigned sc_prim_fifo_size_backend
;
142 unsigned sc_hiz_tile_fifo_size
;
143 unsigned sc_earlyz_tile_fifo_size
;
145 unsigned num_tile_pipes
;
146 unsigned backend_enable_mask
;
147 unsigned mem_max_burst_length_bytes
;
148 unsigned mem_row_size_in_kb
;
149 unsigned shader_engine_tile_size
;
151 unsigned multi_gpu_tile_size
;
152 unsigned mc_arb_ramcfg
;
153 unsigned gb_addr_config
;
155 unsigned gs_vgt_table_depth
;
156 unsigned gs_prim_buffer_depth
;
158 uint32_t tile_mode_array
[32];
159 uint32_t macrotile_mode_array
[16];
161 struct gb_addr_config gb_addr_config_fields
;
162 struct amdgpu_rb_config rb_config
[AMDGPU_GFX_MAX_SE
][AMDGPU_GFX_MAX_SH_PER_SE
];
164 /* gfx configure feature */
165 uint32_t double_offchip_lds_buf
;
166 /* cached value of DB_DEBUG2 */
168 /* gfx10 specific config */
169 uint32_t num_sc_per_sh
;
170 uint32_t num_packer_per_sc
;
171 uint32_t pa_sc_tile_steering_override
;
172 uint64_t tcc_disabled_mask
;
175 struct amdgpu_cu_info
{
176 uint32_t simd_per_cu
;
177 uint32_t max_waves_per_simd
;
178 uint32_t wave_front_size
;
179 uint32_t max_scratch_slots_per_cu
;
182 /* total active CU number */
185 uint32_t ao_cu_bitmap
[4][4];
186 uint32_t bitmap
[4][4];
189 struct amdgpu_gfx_funcs
{
190 /* get the gpu clock counter */
191 uint64_t (*get_gpu_clock_counter
)(struct amdgpu_device
*adev
);
192 void (*select_se_sh
)(struct amdgpu_device
*adev
, u32 se_num
,
193 u32 sh_num
, u32 instance
);
194 void (*read_wave_data
)(struct amdgpu_device
*adev
, uint32_t simd
,
195 uint32_t wave
, uint32_t *dst
, int *no_fields
);
196 void (*read_wave_vgprs
)(struct amdgpu_device
*adev
, uint32_t simd
,
197 uint32_t wave
, uint32_t thread
, uint32_t start
,
198 uint32_t size
, uint32_t *dst
);
199 void (*read_wave_sgprs
)(struct amdgpu_device
*adev
, uint32_t simd
,
200 uint32_t wave
, uint32_t start
, uint32_t size
,
202 void (*select_me_pipe_q
)(struct amdgpu_device
*adev
, u32 me
, u32 pipe
,
203 u32 queue
, u32 vmid
);
204 int (*ras_error_inject
)(struct amdgpu_device
*adev
, void *inject_if
);
205 int (*query_ras_error_count
) (struct amdgpu_device
*adev
, void *ras_error_status
);
209 struct work_struct work
;
214 struct amdgpu_bo
*pfp_fw_obj
;
215 uint64_t pfp_fw_gpu_addr
;
216 uint32_t *pfp_fw_ptr
;
220 struct amdgpu_bo
*ce_fw_obj
;
221 uint64_t ce_fw_gpu_addr
;
226 struct amdgpu_bo
*me_fw_obj
;
227 uint64_t me_fw_gpu_addr
;
230 uint32_t num_pipe_per_me
;
231 uint32_t num_queue_per_pipe
;
232 void *mqd_backup
[AMDGPU_MAX_GFX_RINGS
];
234 /* These are the resources for which amdgpu takes ownership */
235 DECLARE_BITMAP(queue_bitmap
, AMDGPU_MAX_GFX_QUEUES
);
239 struct mutex gpu_clock_mutex
;
240 struct amdgpu_gfx_config config
;
241 struct amdgpu_rlc rlc
;
242 struct amdgpu_pfp pfp
;
245 struct amdgpu_mec mec
;
246 struct amdgpu_kiq kiq
;
247 struct amdgpu_scratch scratch
;
248 const struct firmware
*me_fw
; /* ME firmware */
249 uint32_t me_fw_version
;
250 const struct firmware
*pfp_fw
; /* PFP firmware */
251 uint32_t pfp_fw_version
;
252 const struct firmware
*ce_fw
; /* CE firmware */
253 uint32_t ce_fw_version
;
254 const struct firmware
*rlc_fw
; /* RLC firmware */
255 uint32_t rlc_fw_version
;
256 const struct firmware
*mec_fw
; /* MEC firmware */
257 uint32_t mec_fw_version
;
258 const struct firmware
*mec2_fw
; /* MEC2 firmware */
259 uint32_t mec2_fw_version
;
260 uint32_t me_feature_version
;
261 uint32_t ce_feature_version
;
262 uint32_t pfp_feature_version
;
263 uint32_t rlc_feature_version
;
264 uint32_t rlc_srlc_fw_version
;
265 uint32_t rlc_srlc_feature_version
;
266 uint32_t rlc_srlg_fw_version
;
267 uint32_t rlc_srlg_feature_version
;
268 uint32_t rlc_srls_fw_version
;
269 uint32_t rlc_srls_feature_version
;
270 uint32_t mec_feature_version
;
271 uint32_t mec2_feature_version
;
272 bool mec_fw_write_wait
;
273 bool me_fw_write_wait
;
274 bool cp_fw_write_wait
;
275 struct amdgpu_ring gfx_ring
[AMDGPU_MAX_GFX_RINGS
];
276 struct drm_gpu_scheduler
*gfx_sched
[AMDGPU_MAX_GFX_RINGS
];
277 uint32_t num_gfx_sched
;
278 unsigned num_gfx_rings
;
279 struct amdgpu_ring compute_ring
[AMDGPU_MAX_COMPUTE_RINGS
];
280 struct drm_gpu_scheduler
*compute_sched
[AMDGPU_MAX_COMPUTE_RINGS
];
281 uint32_t num_compute_sched
;
282 unsigned num_compute_rings
;
283 struct amdgpu_irq_src eop_irq
;
284 struct amdgpu_irq_src priv_reg_irq
;
285 struct amdgpu_irq_src priv_inst_irq
;
286 struct amdgpu_irq_src cp_ecc_error_irq
;
287 struct amdgpu_irq_src sq_irq
;
288 struct sq_work sq_work
;
291 uint32_t gfx_current_status
;
293 unsigned ce_ram_size
;
294 struct amdgpu_cu_info cu_info
;
295 const struct amdgpu_gfx_funcs
*funcs
;
298 uint32_t grbm_soft_reset
;
299 uint32_t srbm_soft_reset
;
302 bool gfx_off_state
; /* true: enabled, false: disabled */
303 struct mutex gfx_off_mutex
;
304 uint32_t gfx_off_req_count
; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
305 struct delayed_work gfx_off_delay_work
;
307 /* pipe reservation */
308 struct mutex pipe_reserve_mutex
;
309 DECLARE_BITMAP (pipe_reserve_bitmap
, AMDGPU_MAX_COMPUTE_QUEUES
);
312 struct ras_common_if
*ras_if
;
315 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
316 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
317 #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid))
320 * amdgpu_gfx_create_bitmask - create a bitmask
322 * @bit_width: length of the mask
324 * create a variable length bit mask.
325 * Returns the bitmask.
327 static inline u32
amdgpu_gfx_create_bitmask(u32 bit_width
)
329 return (u32
)((1ULL << bit_width
) - 1);
332 int amdgpu_gfx_scratch_get(struct amdgpu_device
*adev
, uint32_t *reg
);
333 void amdgpu_gfx_scratch_free(struct amdgpu_device
*adev
, uint32_t reg
);
335 void amdgpu_gfx_parse_disable_cu(unsigned *mask
, unsigned max_se
,
338 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device
*adev
,
339 struct amdgpu_ring
*ring
,
340 struct amdgpu_irq_src
*irq
);
342 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring
*ring
);
344 void amdgpu_gfx_kiq_fini(struct amdgpu_device
*adev
);
345 int amdgpu_gfx_kiq_init(struct amdgpu_device
*adev
,
348 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device
*adev
,
350 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device
*adev
);
351 int amdgpu_gfx_disable_kcq(struct amdgpu_device
*adev
);
352 int amdgpu_gfx_enable_kcq(struct amdgpu_device
*adev
);
354 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device
*adev
);
355 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device
*adev
);
357 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device
*adev
, int mec
,
358 int pipe
, int queue
);
359 void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device
*adev
, int bit
,
360 int *mec
, int *pipe
, int *queue
);
361 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device
*adev
, int mec
,
362 int pipe
, int queue
);
363 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device
*adev
, int me
,
364 int pipe
, int queue
);
365 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device
*adev
, int bit
,
366 int *me
, int *pipe
, int *queue
);
367 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device
*adev
, int me
,
368 int pipe
, int queue
);
369 void amdgpu_gfx_off_ctrl(struct amdgpu_device
*adev
, bool enable
);
370 int amdgpu_gfx_ras_late_init(struct amdgpu_device
*adev
);
371 void amdgpu_gfx_ras_fini(struct amdgpu_device
*adev
);
372 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device
*adev
,
374 struct amdgpu_iv_entry
*entry
);
375 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device
*adev
,
376 struct amdgpu_irq_src
*source
,
377 struct amdgpu_iv_entry
*entry
);