2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "kfd_device_queue_manager.h"
25 #include "gca/gfx_8_0_enum.h"
26 #include "gca/gfx_8_0_sh_mask.h"
27 #include "oss/oss_3_0_sh_mask.h"
29 static bool set_cache_memory_policy_vi(struct device_queue_manager
*dqm
,
30 struct qcm_process_device
*qpd
,
31 enum cache_policy default_policy
,
32 enum cache_policy alternate_policy
,
33 void __user
*alternate_aperture_base
,
34 uint64_t alternate_aperture_size
);
35 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager
*dqm
,
36 struct qcm_process_device
*qpd
,
37 enum cache_policy default_policy
,
38 enum cache_policy alternate_policy
,
39 void __user
*alternate_aperture_base
,
40 uint64_t alternate_aperture_size
);
41 static int update_qpd_vi(struct device_queue_manager
*dqm
,
42 struct qcm_process_device
*qpd
);
43 static int update_qpd_vi_tonga(struct device_queue_manager
*dqm
,
44 struct qcm_process_device
*qpd
);
45 static void init_sdma_vm(struct device_queue_manager
*dqm
, struct queue
*q
,
46 struct qcm_process_device
*qpd
);
47 static void init_sdma_vm_tonga(struct device_queue_manager
*dqm
,
49 struct qcm_process_device
*qpd
);
51 void device_queue_manager_init_vi(
52 struct device_queue_manager_asic_ops
*asic_ops
)
54 asic_ops
->set_cache_memory_policy
= set_cache_memory_policy_vi
;
55 asic_ops
->update_qpd
= update_qpd_vi
;
56 asic_ops
->init_sdma_vm
= init_sdma_vm
;
59 void device_queue_manager_init_vi_tonga(
60 struct device_queue_manager_asic_ops
*asic_ops
)
62 asic_ops
->set_cache_memory_policy
= set_cache_memory_policy_vi_tonga
;
63 asic_ops
->update_qpd
= update_qpd_vi_tonga
;
64 asic_ops
->init_sdma_vm
= init_sdma_vm_tonga
;
67 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble
)
69 /* In 64-bit mode, we can only control the top 3 bits of the LDS,
70 * scratch and GPUVM apertures.
71 * The hardware fills in the remaining 59 bits according to the
73 * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
74 * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
75 * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
77 * (where X/Y is the configurable nybble with the low-bit 0)
79 * LDS and scratch will have the same top nybble programmed in the
80 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
81 * GPUVM can have a different top nybble programmed in the
82 * top 3 bits of SH_MEM_BASES.SHARED_BASE.
83 * We don't bother to support different top nybbles
84 * for LDS/Scratch and GPUVM.
87 WARN_ON((top_address_nybble
& 1) || top_address_nybble
> 0xE ||
88 top_address_nybble
== 0);
90 return top_address_nybble
<< 12 |
91 (top_address_nybble
<< 12) <<
92 SH_MEM_BASES__SHARED_BASE__SHIFT
;
95 static bool set_cache_memory_policy_vi(struct device_queue_manager
*dqm
,
96 struct qcm_process_device
*qpd
,
97 enum cache_policy default_policy
,
98 enum cache_policy alternate_policy
,
99 void __user
*alternate_aperture_base
,
100 uint64_t alternate_aperture_size
)
102 uint32_t default_mtype
;
105 default_mtype
= (default_policy
== cache_policy_coherent
) ?
109 ape1_mtype
= (alternate_policy
== cache_policy_coherent
) ?
113 qpd
->sh_mem_config
= (qpd
->sh_mem_config
&
114 SH_MEM_CONFIG__ADDRESS_MODE_MASK
) |
115 SH_MEM_ALIGNMENT_MODE_UNALIGNED
<<
116 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT
|
117 default_mtype
<< SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT
|
118 ape1_mtype
<< SH_MEM_CONFIG__APE1_MTYPE__SHIFT
|
119 SH_MEM_CONFIG__PRIVATE_ATC_MASK
;
124 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager
*dqm
,
125 struct qcm_process_device
*qpd
,
126 enum cache_policy default_policy
,
127 enum cache_policy alternate_policy
,
128 void __user
*alternate_aperture_base
,
129 uint64_t alternate_aperture_size
)
131 uint32_t default_mtype
;
134 default_mtype
= (default_policy
== cache_policy_coherent
) ?
138 ape1_mtype
= (alternate_policy
== cache_policy_coherent
) ?
143 SH_MEM_ALIGNMENT_MODE_UNALIGNED
<<
144 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT
|
145 default_mtype
<< SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT
|
146 ape1_mtype
<< SH_MEM_CONFIG__APE1_MTYPE__SHIFT
;
151 static int update_qpd_vi(struct device_queue_manager
*dqm
,
152 struct qcm_process_device
*qpd
)
154 struct kfd_process_device
*pdd
;
157 pdd
= qpd_to_pdd(qpd
);
159 /* check if sh_mem_config register already configured */
160 if (qpd
->sh_mem_config
== 0) {
162 SH_MEM_ALIGNMENT_MODE_UNALIGNED
<<
163 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT
|
164 MTYPE_CC
<< SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT
|
165 MTYPE_CC
<< SH_MEM_CONFIG__APE1_MTYPE__SHIFT
|
166 SH_MEM_CONFIG__PRIVATE_ATC_MASK
;
168 qpd
->sh_mem_ape1_limit
= 0;
169 qpd
->sh_mem_ape1_base
= 0;
172 if (qpd
->pqm
->process
->is_32bit_user_mode
) {
173 temp
= get_sh_mem_bases_32(pdd
);
174 qpd
->sh_mem_bases
= temp
<< SH_MEM_BASES__SHARED_BASE__SHIFT
;
175 qpd
->sh_mem_config
|= SH_MEM_ADDRESS_MODE_HSA32
<<
176 SH_MEM_CONFIG__ADDRESS_MODE__SHIFT
;
178 temp
= get_sh_mem_bases_nybble_64(pdd
);
179 qpd
->sh_mem_bases
= compute_sh_mem_bases_64bit(temp
);
180 qpd
->sh_mem_config
|= SH_MEM_ADDRESS_MODE_HSA64
<<
181 SH_MEM_CONFIG__ADDRESS_MODE__SHIFT
;
182 qpd
->sh_mem_config
|= 1 <<
183 SH_MEM_CONFIG__PRIVATE_ATC__SHIFT
;
186 pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
187 qpd
->pqm
->process
->is_32bit_user_mode
, temp
, qpd
->sh_mem_bases
);
192 static int update_qpd_vi_tonga(struct device_queue_manager
*dqm
,
193 struct qcm_process_device
*qpd
)
195 struct kfd_process_device
*pdd
;
198 pdd
= qpd_to_pdd(qpd
);
200 /* check if sh_mem_config register already configured */
201 if (qpd
->sh_mem_config
== 0) {
203 SH_MEM_ALIGNMENT_MODE_UNALIGNED
<<
204 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT
|
206 SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT
|
208 SH_MEM_CONFIG__APE1_MTYPE__SHIFT
;
210 qpd
->sh_mem_ape1_limit
= 0;
211 qpd
->sh_mem_ape1_base
= 0;
214 /* On dGPU we're always in GPUVM64 addressing mode with 64-bit
215 * aperture addresses.
217 temp
= get_sh_mem_bases_nybble_64(pdd
);
218 qpd
->sh_mem_bases
= compute_sh_mem_bases_64bit(temp
);
220 pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
221 temp
, qpd
->sh_mem_bases
);
226 static void init_sdma_vm(struct device_queue_manager
*dqm
, struct queue
*q
,
227 struct qcm_process_device
*qpd
)
229 uint32_t value
= (1 << SDMA0_RLC0_VIRTUAL_ADDR__ATC__SHIFT
);
231 if (q
->process
->is_32bit_user_mode
)
232 value
|= (1 << SDMA0_RLC0_VIRTUAL_ADDR__PTR32__SHIFT
) |
233 get_sh_mem_bases_32(qpd_to_pdd(qpd
));
235 value
|= ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd
))) <<
236 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT
) &
237 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK
;
239 q
->properties
.sdma_vm_addr
= value
;
242 static void init_sdma_vm_tonga(struct device_queue_manager
*dqm
,
244 struct qcm_process_device
*qpd
)
246 /* On dGPU we're always in GPUVM64 addressing mode with 64-bit
247 * aperture addresses.
249 q
->properties
.sdma_vm_addr
=
250 ((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd
))) <<
251 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT
) &
252 SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK
;