2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/printk.h>
25 #include <linux/slab.h>
26 #include <linux/mm_types.h>
29 #include "kfd_mqd_manager.h"
31 #include "cik_structs.h"
32 #include "oss/oss_2_4_sh_mask.h"
34 static inline struct cik_mqd
*get_mqd(void *mqd
)
36 return (struct cik_mqd
*)mqd
;
39 static inline struct cik_sdma_rlc_registers
*get_sdma_mqd(void *mqd
)
41 return (struct cik_sdma_rlc_registers
*)mqd
;
44 static void update_cu_mask(struct mqd_manager
*mm
, void *mqd
,
45 struct queue_properties
*q
)
48 uint32_t se_mask
[4] = {0}; /* 4 is the max # of SEs */
50 if (q
->cu_mask_count
== 0)
53 mqd_symmetrically_map_cu_mask(mm
,
54 q
->cu_mask
, q
->cu_mask_count
, se_mask
);
57 m
->compute_static_thread_mgmt_se0
= se_mask
[0];
58 m
->compute_static_thread_mgmt_se1
= se_mask
[1];
59 m
->compute_static_thread_mgmt_se2
= se_mask
[2];
60 m
->compute_static_thread_mgmt_se3
= se_mask
[3];
62 pr_debug("Update cu mask to %#x %#x %#x %#x\n",
63 m
->compute_static_thread_mgmt_se0
,
64 m
->compute_static_thread_mgmt_se1
,
65 m
->compute_static_thread_mgmt_se2
,
66 m
->compute_static_thread_mgmt_se3
);
69 static int init_mqd(struct mqd_manager
*mm
, void **mqd
,
70 struct kfd_mem_obj
**mqd_mem_obj
, uint64_t *gart_addr
,
71 struct queue_properties
*q
)
77 retval
= kfd_gtt_sa_allocate(mm
->dev
, sizeof(struct cik_mqd
),
83 m
= (struct cik_mqd
*) (*mqd_mem_obj
)->cpu_ptr
;
84 addr
= (*mqd_mem_obj
)->gpu_addr
;
86 memset(m
, 0, ALIGN(sizeof(struct cik_mqd
), 256));
88 m
->header
= 0xC0310800;
89 m
->compute_pipelinestat_enable
= 1;
90 m
->compute_static_thread_mgmt_se0
= 0xFFFFFFFF;
91 m
->compute_static_thread_mgmt_se1
= 0xFFFFFFFF;
92 m
->compute_static_thread_mgmt_se2
= 0xFFFFFFFF;
93 m
->compute_static_thread_mgmt_se3
= 0xFFFFFFFF;
96 * Make sure to use the last queue state saved on mqd when the cp
97 * reassigns the queue, so when queue is switched on/off (e.g over
98 * subscription or quantum timeout) the context will be consistent
100 m
->cp_hqd_persistent_state
=
101 DEFAULT_CP_HQD_PERSISTENT_STATE
| PRELOAD_REQ
;
103 m
->cp_mqd_control
= MQD_CONTROL_PRIV_STATE_EN
;
104 m
->cp_mqd_base_addr_lo
= lower_32_bits(addr
);
105 m
->cp_mqd_base_addr_hi
= upper_32_bits(addr
);
107 m
->cp_hqd_quantum
= QUANTUM_EN
| QUANTUM_SCALE_1MS
|
108 QUANTUM_DURATION(10);
112 * Identifies the pipe relative priority when this queue is connected
113 * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
114 * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
115 * 0 = CS_LOW (typically below GFX)
116 * 1 = CS_MEDIUM (typically between HP3D and GFX
117 * 2 = CS_HIGH (typically above HP3D)
119 m
->cp_hqd_pipe_priority
= 1;
120 m
->cp_hqd_queue_priority
= 15;
122 if (q
->format
== KFD_QUEUE_FORMAT_AQL
)
123 m
->cp_hqd_iq_rptr
= AQL_ENABLE
;
128 retval
= mm
->update_mqd(mm
, m
, q
);
133 static int init_mqd_sdma(struct mqd_manager
*mm
, void **mqd
,
134 struct kfd_mem_obj
**mqd_mem_obj
, uint64_t *gart_addr
,
135 struct queue_properties
*q
)
138 struct cik_sdma_rlc_registers
*m
;
140 retval
= kfd_gtt_sa_allocate(mm
->dev
,
141 sizeof(struct cik_sdma_rlc_registers
),
147 m
= (struct cik_sdma_rlc_registers
*) (*mqd_mem_obj
)->cpu_ptr
;
149 memset(m
, 0, sizeof(struct cik_sdma_rlc_registers
));
153 *gart_addr
= (*mqd_mem_obj
)->gpu_addr
;
155 retval
= mm
->update_mqd(mm
, m
, q
);
160 static void uninit_mqd(struct mqd_manager
*mm
, void *mqd
,
161 struct kfd_mem_obj
*mqd_mem_obj
)
163 kfd_gtt_sa_free(mm
->dev
, mqd_mem_obj
);
166 static void uninit_mqd_sdma(struct mqd_manager
*mm
, void *mqd
,
167 struct kfd_mem_obj
*mqd_mem_obj
)
169 kfd_gtt_sa_free(mm
->dev
, mqd_mem_obj
);
172 static int load_mqd(struct mqd_manager
*mm
, void *mqd
, uint32_t pipe_id
,
173 uint32_t queue_id
, struct queue_properties
*p
,
174 struct mm_struct
*mms
)
176 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
177 uint32_t wptr_shift
= (p
->format
== KFD_QUEUE_FORMAT_AQL
? 4 : 0);
178 uint32_t wptr_mask
= (uint32_t)((p
->queue_size
/ 4) - 1);
180 return mm
->dev
->kfd2kgd
->hqd_load(mm
->dev
->kgd
, mqd
, pipe_id
, queue_id
,
181 (uint32_t __user
*)p
->write_ptr
,
182 wptr_shift
, wptr_mask
, mms
);
185 static int load_mqd_sdma(struct mqd_manager
*mm
, void *mqd
,
186 uint32_t pipe_id
, uint32_t queue_id
,
187 struct queue_properties
*p
, struct mm_struct
*mms
)
189 return mm
->dev
->kfd2kgd
->hqd_sdma_load(mm
->dev
->kgd
, mqd
,
190 (uint32_t __user
*)p
->write_ptr
,
194 static int __update_mqd(struct mqd_manager
*mm
, void *mqd
,
195 struct queue_properties
*q
, unsigned int atc_bit
)
200 m
->cp_hqd_pq_control
= DEFAULT_RPTR_BLOCK_SIZE
|
201 DEFAULT_MIN_AVAIL_SIZE
;
202 m
->cp_hqd_ib_control
= DEFAULT_MIN_IB_AVAIL_SIZE
;
204 m
->cp_hqd_pq_control
|= PQ_ATC_EN
;
205 m
->cp_hqd_ib_control
|= IB_ATC_EN
;
209 * Calculating queue size which is log base 2 of actual queue size -1
210 * dwords and another -1 for ffs
212 m
->cp_hqd_pq_control
|= order_base_2(q
->queue_size
/ 4) - 1;
213 m
->cp_hqd_pq_base_lo
= lower_32_bits((uint64_t)q
->queue_address
>> 8);
214 m
->cp_hqd_pq_base_hi
= upper_32_bits((uint64_t)q
->queue_address
>> 8);
215 m
->cp_hqd_pq_rptr_report_addr_lo
= lower_32_bits((uint64_t)q
->read_ptr
);
216 m
->cp_hqd_pq_rptr_report_addr_hi
= upper_32_bits((uint64_t)q
->read_ptr
);
217 m
->cp_hqd_pq_doorbell_control
= DOORBELL_OFFSET(q
->doorbell_off
);
219 m
->cp_hqd_vmid
= q
->vmid
;
221 if (q
->format
== KFD_QUEUE_FORMAT_AQL
)
222 m
->cp_hqd_pq_control
|= NO_UPDATE_RPTR
;
224 update_cu_mask(mm
, mqd
, q
);
226 q
->is_active
= (q
->queue_size
> 0 &&
227 q
->queue_address
!= 0 &&
228 q
->queue_percent
> 0 &&
234 static int update_mqd(struct mqd_manager
*mm
, void *mqd
,
235 struct queue_properties
*q
)
237 return __update_mqd(mm
, mqd
, q
, 1);
240 static int update_mqd_hawaii(struct mqd_manager
*mm
, void *mqd
,
241 struct queue_properties
*q
)
243 return __update_mqd(mm
, mqd
, q
, 0);
246 static int update_mqd_sdma(struct mqd_manager
*mm
, void *mqd
,
247 struct queue_properties
*q
)
249 struct cik_sdma_rlc_registers
*m
;
251 m
= get_sdma_mqd(mqd
);
252 m
->sdma_rlc_rb_cntl
= order_base_2(q
->queue_size
/ 4)
253 << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT
|
254 q
->vmid
<< SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT
|
255 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT
|
256 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT
;
258 m
->sdma_rlc_rb_base
= lower_32_bits(q
->queue_address
>> 8);
259 m
->sdma_rlc_rb_base_hi
= upper_32_bits(q
->queue_address
>> 8);
260 m
->sdma_rlc_rb_rptr_addr_lo
= lower_32_bits((uint64_t)q
->read_ptr
);
261 m
->sdma_rlc_rb_rptr_addr_hi
= upper_32_bits((uint64_t)q
->read_ptr
);
262 m
->sdma_rlc_doorbell
=
263 q
->doorbell_off
<< SDMA0_RLC0_DOORBELL__OFFSET__SHIFT
;
265 m
->sdma_rlc_virtual_addr
= q
->sdma_vm_addr
;
267 m
->sdma_engine_id
= q
->sdma_engine_id
;
268 m
->sdma_queue_id
= q
->sdma_queue_id
;
270 q
->is_active
= (q
->queue_size
> 0 &&
271 q
->queue_address
!= 0 &&
272 q
->queue_percent
> 0 &&
278 static int destroy_mqd(struct mqd_manager
*mm
, void *mqd
,
279 enum kfd_preempt_type type
,
280 unsigned int timeout
, uint32_t pipe_id
,
283 return mm
->dev
->kfd2kgd
->hqd_destroy(mm
->dev
->kgd
, mqd
, type
, timeout
,
288 * preempt type here is ignored because there is only one way
289 * to preempt sdma queue
291 static int destroy_mqd_sdma(struct mqd_manager
*mm
, void *mqd
,
292 enum kfd_preempt_type type
,
293 unsigned int timeout
, uint32_t pipe_id
,
296 return mm
->dev
->kfd2kgd
->hqd_sdma_destroy(mm
->dev
->kgd
, mqd
, timeout
);
299 static bool is_occupied(struct mqd_manager
*mm
, void *mqd
,
300 uint64_t queue_address
, uint32_t pipe_id
,
304 return mm
->dev
->kfd2kgd
->hqd_is_occupied(mm
->dev
->kgd
, queue_address
,
309 static bool is_occupied_sdma(struct mqd_manager
*mm
, void *mqd
,
310 uint64_t queue_address
, uint32_t pipe_id
,
313 return mm
->dev
->kfd2kgd
->hqd_sdma_is_occupied(mm
->dev
->kgd
, mqd
);
317 * HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
318 * The HIQ queue in Kaveri is using the same MQD structure as all the user mode
319 * queues but with different initial values.
322 static int init_mqd_hiq(struct mqd_manager
*mm
, void **mqd
,
323 struct kfd_mem_obj
**mqd_mem_obj
, uint64_t *gart_addr
,
324 struct queue_properties
*q
)
326 return init_mqd(mm
, mqd
, mqd_mem_obj
, gart_addr
, q
);
329 static int update_mqd_hiq(struct mqd_manager
*mm
, void *mqd
,
330 struct queue_properties
*q
)
335 m
->cp_hqd_pq_control
= DEFAULT_RPTR_BLOCK_SIZE
|
336 DEFAULT_MIN_AVAIL_SIZE
|
341 * Calculating queue size which is log base 2 of actual queue
344 m
->cp_hqd_pq_control
|= order_base_2(q
->queue_size
/ 4) - 1;
345 m
->cp_hqd_pq_base_lo
= lower_32_bits((uint64_t)q
->queue_address
>> 8);
346 m
->cp_hqd_pq_base_hi
= upper_32_bits((uint64_t)q
->queue_address
>> 8);
347 m
->cp_hqd_pq_rptr_report_addr_lo
= lower_32_bits((uint64_t)q
->read_ptr
);
348 m
->cp_hqd_pq_rptr_report_addr_hi
= upper_32_bits((uint64_t)q
->read_ptr
);
349 m
->cp_hqd_pq_doorbell_control
= DOORBELL_OFFSET(q
->doorbell_off
);
351 m
->cp_hqd_vmid
= q
->vmid
;
353 q
->is_active
= (q
->queue_size
> 0 &&
354 q
->queue_address
!= 0 &&
355 q
->queue_percent
> 0 &&
361 #if defined(CONFIG_DEBUG_FS)
363 static int debugfs_show_mqd(struct seq_file
*m
, void *data
)
365 seq_hex_dump(m
, " ", DUMP_PREFIX_OFFSET
, 32, 4,
366 data
, sizeof(struct cik_mqd
), false);
370 static int debugfs_show_mqd_sdma(struct seq_file
*m
, void *data
)
372 seq_hex_dump(m
, " ", DUMP_PREFIX_OFFSET
, 32, 4,
373 data
, sizeof(struct cik_sdma_rlc_registers
), false);
380 struct mqd_manager
*mqd_manager_init_cik(enum KFD_MQD_TYPE type
,
383 struct mqd_manager
*mqd
;
385 if (WARN_ON(type
>= KFD_MQD_TYPE_MAX
))
388 mqd
= kzalloc(sizeof(*mqd
), GFP_KERNEL
);
395 case KFD_MQD_TYPE_CP
:
396 case KFD_MQD_TYPE_COMPUTE
:
397 mqd
->init_mqd
= init_mqd
;
398 mqd
->uninit_mqd
= uninit_mqd
;
399 mqd
->load_mqd
= load_mqd
;
400 mqd
->update_mqd
= update_mqd
;
401 mqd
->destroy_mqd
= destroy_mqd
;
402 mqd
->is_occupied
= is_occupied
;
403 #if defined(CONFIG_DEBUG_FS)
404 mqd
->debugfs_show_mqd
= debugfs_show_mqd
;
407 case KFD_MQD_TYPE_HIQ
:
408 mqd
->init_mqd
= init_mqd_hiq
;
409 mqd
->uninit_mqd
= uninit_mqd
;
410 mqd
->load_mqd
= load_mqd
;
411 mqd
->update_mqd
= update_mqd_hiq
;
412 mqd
->destroy_mqd
= destroy_mqd
;
413 mqd
->is_occupied
= is_occupied
;
414 #if defined(CONFIG_DEBUG_FS)
415 mqd
->debugfs_show_mqd
= debugfs_show_mqd
;
418 case KFD_MQD_TYPE_SDMA
:
419 mqd
->init_mqd
= init_mqd_sdma
;
420 mqd
->uninit_mqd
= uninit_mqd_sdma
;
421 mqd
->load_mqd
= load_mqd_sdma
;
422 mqd
->update_mqd
= update_mqd_sdma
;
423 mqd
->destroy_mqd
= destroy_mqd_sdma
;
424 mqd
->is_occupied
= is_occupied_sdma
;
425 #if defined(CONFIG_DEBUG_FS)
426 mqd
->debugfs_show_mqd
= debugfs_show_mqd_sdma
;
437 struct mqd_manager
*mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type
,
440 struct mqd_manager
*mqd
;
442 mqd
= mqd_manager_init_cik(type
, dev
);
445 if ((type
== KFD_MQD_TYPE_CP
) || (type
== KFD_MQD_TYPE_COMPUTE
))
446 mqd
->update_mqd
= update_mqd_hawaii
;