2 * Copyright 2017 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Andres Rodriguez
26 #include "amdgpu_ring.h"
28 static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper
*mapper
,
34 if (hw_ip
> AMDGPU_MAX_IP_NUM
)
37 mapper
->hw_ip
= hw_ip
;
38 mutex_init(&mapper
->lock
);
40 memset(mapper
->queue_map
, 0, sizeof(mapper
->queue_map
));
45 static struct amdgpu_ring
*amdgpu_get_cached_map(struct amdgpu_queue_mapper
*mapper
,
48 return mapper
->queue_map
[ring
];
51 static int amdgpu_update_cached_map(struct amdgpu_queue_mapper
*mapper
,
52 int ring
, struct amdgpu_ring
*pring
)
54 if (WARN_ON(mapper
->queue_map
[ring
])) {
55 DRM_ERROR("Un-expected ring re-map\n");
59 mapper
->queue_map
[ring
] = pring
;
64 static int amdgpu_identity_map(struct amdgpu_device
*adev
,
65 struct amdgpu_queue_mapper
*mapper
,
67 struct amdgpu_ring
**out_ring
)
69 switch (mapper
->hw_ip
) {
70 case AMDGPU_HW_IP_GFX
:
71 *out_ring
= &adev
->gfx
.gfx_ring
[ring
];
73 case AMDGPU_HW_IP_COMPUTE
:
74 *out_ring
= &adev
->gfx
.compute_ring
[ring
];
76 case AMDGPU_HW_IP_DMA
:
77 *out_ring
= &adev
->sdma
.instance
[ring
].ring
;
79 case AMDGPU_HW_IP_UVD
:
80 *out_ring
= &adev
->uvd
.inst
[0].ring
;
82 case AMDGPU_HW_IP_VCE
:
83 *out_ring
= &adev
->vce
.ring
[ring
];
85 case AMDGPU_HW_IP_UVD_ENC
:
86 *out_ring
= &adev
->uvd
.inst
[0].ring_enc
[ring
];
88 case AMDGPU_HW_IP_VCN_DEC
:
89 *out_ring
= &adev
->vcn
.ring_dec
;
91 case AMDGPU_HW_IP_VCN_ENC
:
92 *out_ring
= &adev
->vcn
.ring_enc
[ring
];
94 case AMDGPU_HW_IP_VCN_JPEG
:
95 *out_ring
= &adev
->vcn
.ring_jpeg
;
99 DRM_ERROR("unknown HW IP type: %d\n", mapper
->hw_ip
);
103 return amdgpu_update_cached_map(mapper
, ring
, *out_ring
);
106 static enum amdgpu_ring_type
amdgpu_hw_ip_to_ring_type(int hw_ip
)
109 case AMDGPU_HW_IP_GFX
:
110 return AMDGPU_RING_TYPE_GFX
;
111 case AMDGPU_HW_IP_COMPUTE
:
112 return AMDGPU_RING_TYPE_COMPUTE
;
113 case AMDGPU_HW_IP_DMA
:
114 return AMDGPU_RING_TYPE_SDMA
;
115 case AMDGPU_HW_IP_UVD
:
116 return AMDGPU_RING_TYPE_UVD
;
117 case AMDGPU_HW_IP_VCE
:
118 return AMDGPU_RING_TYPE_VCE
;
120 DRM_ERROR("Invalid HW IP specified %d\n", hw_ip
);
125 static int amdgpu_lru_map(struct amdgpu_device
*adev
,
126 struct amdgpu_queue_mapper
*mapper
,
127 u32 user_ring
, bool lru_pipe_order
,
128 struct amdgpu_ring
**out_ring
)
131 int ring_type
= amdgpu_hw_ip_to_ring_type(mapper
->hw_ip
);
132 int ring_blacklist
[AMDGPU_MAX_RINGS
];
133 struct amdgpu_ring
*ring
;
135 /* 0 is a valid ring index, so initialize to -1 */
136 memset(ring_blacklist
, 0xff, sizeof(ring_blacklist
));
138 for (i
= 0, j
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
139 ring
= mapper
->queue_map
[i
];
141 ring_blacklist
[j
++] = ring
->idx
;
144 r
= amdgpu_ring_lru_get(adev
, ring_type
, ring_blacklist
,
145 j
, lru_pipe_order
, out_ring
);
149 return amdgpu_update_cached_map(mapper
, user_ring
, *out_ring
);
153 * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
155 * @adev: amdgpu_device pointer
156 * @mgr: amdgpu_queue_mgr structure holding queue information
158 * Initialize the the selected @mgr (all asics).
160 * Returns 0 on success, error on failure.
162 int amdgpu_queue_mgr_init(struct amdgpu_device
*adev
,
163 struct amdgpu_queue_mgr
*mgr
)
170 memset(mgr
, 0, sizeof(*mgr
));
172 for (i
= 0; i
< AMDGPU_MAX_IP_NUM
; ++i
) {
173 r
= amdgpu_queue_mapper_init(&mgr
->mapper
[i
], i
);
182 * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct
184 * @adev: amdgpu_device pointer
185 * @mgr: amdgpu_queue_mgr structure holding queue information
187 * De-initialize the the selected @mgr (all asics).
189 * Returns 0 on success, error on failure.
191 int amdgpu_queue_mgr_fini(struct amdgpu_device
*adev
,
192 struct amdgpu_queue_mgr
*mgr
)
198 * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring
200 * @adev: amdgpu_device pointer
201 * @mgr: amdgpu_queue_mgr structure holding queue information
203 * @instance: HW instance
204 * @ring: user ring id
205 * @our_ring: pointer to mapped amdgpu_ring
207 * Map a userspace ring id to an appropriate kernel ring. Different
208 * policies are configurable at a HW IP level.
210 * Returns 0 on success, error on failure.
212 int amdgpu_queue_mgr_map(struct amdgpu_device
*adev
,
213 struct amdgpu_queue_mgr
*mgr
,
214 u32 hw_ip
, u32 instance
, u32 ring
,
215 struct amdgpu_ring
**out_ring
)
217 int i
, r
, ip_num_rings
= 0;
218 struct amdgpu_queue_mapper
*mapper
= &mgr
->mapper
[hw_ip
];
220 if (!adev
|| !mgr
|| !out_ring
)
223 if (hw_ip
>= AMDGPU_MAX_IP_NUM
)
226 if (ring
>= AMDGPU_MAX_RINGS
)
229 /* Right now all IPs have only one instance - multiple rings. */
231 DRM_DEBUG("invalid ip instance: %d\n", instance
);
236 case AMDGPU_HW_IP_GFX
:
237 ip_num_rings
= adev
->gfx
.num_gfx_rings
;
239 case AMDGPU_HW_IP_COMPUTE
:
240 ip_num_rings
= adev
->gfx
.num_compute_rings
;
242 case AMDGPU_HW_IP_DMA
:
243 ip_num_rings
= adev
->sdma
.num_instances
;
245 case AMDGPU_HW_IP_UVD
:
246 for (i
= 0; i
< adev
->uvd
.num_uvd_inst
; i
++) {
247 if (!(adev
->uvd
.harvest_config
& (1 << i
)))
251 case AMDGPU_HW_IP_VCE
:
252 ip_num_rings
= adev
->vce
.num_rings
;
254 case AMDGPU_HW_IP_UVD_ENC
:
255 for (i
= 0; i
< adev
->uvd
.num_uvd_inst
; i
++) {
256 if (!(adev
->uvd
.harvest_config
& (1 << i
)))
260 adev
->uvd
.num_enc_rings
* ip_num_rings
;
262 case AMDGPU_HW_IP_VCN_DEC
:
265 case AMDGPU_HW_IP_VCN_ENC
:
266 ip_num_rings
= adev
->vcn
.num_enc_rings
;
268 case AMDGPU_HW_IP_VCN_JPEG
:
272 DRM_DEBUG("unknown ip type: %d\n", hw_ip
);
276 if (ring
>= ip_num_rings
) {
277 DRM_DEBUG("Ring index:%d exceeds maximum:%d for ip:%d\n",
278 ring
, ip_num_rings
, hw_ip
);
282 mutex_lock(&mapper
->lock
);
284 *out_ring
= amdgpu_get_cached_map(mapper
, ring
);
291 switch (mapper
->hw_ip
) {
292 case AMDGPU_HW_IP_GFX
:
293 case AMDGPU_HW_IP_UVD
:
294 case AMDGPU_HW_IP_VCE
:
295 case AMDGPU_HW_IP_UVD_ENC
:
296 case AMDGPU_HW_IP_VCN_DEC
:
297 case AMDGPU_HW_IP_VCN_ENC
:
298 case AMDGPU_HW_IP_VCN_JPEG
:
299 r
= amdgpu_identity_map(adev
, mapper
, ring
, out_ring
);
301 case AMDGPU_HW_IP_DMA
:
302 r
= amdgpu_lru_map(adev
, mapper
, ring
, false, out_ring
);
304 case AMDGPU_HW_IP_COMPUTE
:
305 r
= amdgpu_lru_map(adev
, mapper
, ring
, true, out_ring
);
310 DRM_DEBUG("unknown HW IP type: %d\n", mapper
->hw_ip
);
314 mutex_unlock(&mapper
->lock
);