2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: monk liu <monk.liu@amd.com>
28 int amdgpu_ctx_init(struct amdgpu_device
*adev
, bool kernel
,
29 struct amdgpu_ctx
*ctx
)
34 memset(ctx
, 0, sizeof(*ctx
));
36 kref_init(&ctx
->refcount
);
37 spin_lock_init(&ctx
->ring_lock
);
38 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
39 ctx
->rings
[i
].sequence
= 1;
41 if (amdgpu_enable_scheduler
) {
42 /* create context entity for each ring */
43 for (i
= 0; i
< adev
->num_rings
; i
++) {
44 struct amd_sched_rq
*rq
;
46 rq
= &adev
->rings
[i
]->sched
.kernel_rq
;
48 rq
= &adev
->rings
[i
]->sched
.sched_rq
;
49 r
= amd_sched_entity_init(&adev
->rings
[i
]->sched
,
50 &ctx
->rings
[i
].entity
,
51 rq
, amdgpu_sched_jobs
);
56 if (i
< adev
->num_rings
) {
57 for (j
= 0; j
< i
; j
++)
58 amd_sched_entity_fini(&adev
->rings
[j
]->sched
,
59 &ctx
->rings
[j
].entity
);
67 void amdgpu_ctx_fini(struct amdgpu_ctx
*ctx
)
69 struct amdgpu_device
*adev
= ctx
->adev
;
75 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
76 for (j
= 0; j
< AMDGPU_CTX_MAX_CS_PENDING
; ++j
)
77 fence_put(ctx
->rings
[i
].fences
[j
]);
79 if (amdgpu_enable_scheduler
) {
80 for (i
= 0; i
< adev
->num_rings
; i
++)
81 amd_sched_entity_fini(&adev
->rings
[i
]->sched
,
82 &ctx
->rings
[i
].entity
);
86 static int amdgpu_ctx_alloc(struct amdgpu_device
*adev
,
87 struct amdgpu_fpriv
*fpriv
,
90 struct amdgpu_ctx_mgr
*mgr
= &fpriv
->ctx_mgr
;
91 struct amdgpu_ctx
*ctx
;
94 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
98 mutex_lock(&mgr
->lock
);
99 r
= idr_alloc(&mgr
->ctx_handles
, ctx
, 1, 0, GFP_KERNEL
);
101 mutex_unlock(&mgr
->lock
);
106 r
= amdgpu_ctx_init(adev
, false, ctx
);
107 mutex_unlock(&mgr
->lock
);
112 static void amdgpu_ctx_do_release(struct kref
*ref
)
114 struct amdgpu_ctx
*ctx
;
116 ctx
= container_of(ref
, struct amdgpu_ctx
, refcount
);
118 amdgpu_ctx_fini(ctx
);
123 static int amdgpu_ctx_free(struct amdgpu_fpriv
*fpriv
, uint32_t id
)
125 struct amdgpu_ctx_mgr
*mgr
= &fpriv
->ctx_mgr
;
126 struct amdgpu_ctx
*ctx
;
128 mutex_lock(&mgr
->lock
);
129 ctx
= idr_find(&mgr
->ctx_handles
, id
);
131 idr_remove(&mgr
->ctx_handles
, id
);
132 kref_put(&ctx
->refcount
, amdgpu_ctx_do_release
);
133 mutex_unlock(&mgr
->lock
);
136 mutex_unlock(&mgr
->lock
);
140 static int amdgpu_ctx_query(struct amdgpu_device
*adev
,
141 struct amdgpu_fpriv
*fpriv
, uint32_t id
,
142 union drm_amdgpu_ctx_out
*out
)
144 struct amdgpu_ctx
*ctx
;
145 struct amdgpu_ctx_mgr
*mgr
;
146 unsigned reset_counter
;
151 mgr
= &fpriv
->ctx_mgr
;
152 mutex_lock(&mgr
->lock
);
153 ctx
= idr_find(&mgr
->ctx_handles
, id
);
155 mutex_unlock(&mgr
->lock
);
159 /* TODO: these two are always zero */
160 out
->state
.flags
= 0x0;
161 out
->state
.hangs
= 0x0;
163 /* determine if a GPU reset has occured since the last call */
164 reset_counter
= atomic_read(&adev
->gpu_reset_counter
);
165 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
166 if (ctx
->reset_counter
== reset_counter
)
167 out
->state
.reset_status
= AMDGPU_CTX_NO_RESET
;
169 out
->state
.reset_status
= AMDGPU_CTX_UNKNOWN_RESET
;
170 ctx
->reset_counter
= reset_counter
;
172 mutex_unlock(&mgr
->lock
);
176 int amdgpu_ctx_ioctl(struct drm_device
*dev
, void *data
,
177 struct drm_file
*filp
)
182 union drm_amdgpu_ctx
*args
= data
;
183 struct amdgpu_device
*adev
= dev
->dev_private
;
184 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
187 id
= args
->in
.ctx_id
;
189 switch (args
->in
.op
) {
190 case AMDGPU_CTX_OP_ALLOC_CTX
:
191 r
= amdgpu_ctx_alloc(adev
, fpriv
, &id
);
192 args
->out
.alloc
.ctx_id
= id
;
194 case AMDGPU_CTX_OP_FREE_CTX
:
195 r
= amdgpu_ctx_free(fpriv
, id
);
197 case AMDGPU_CTX_OP_QUERY_STATE
:
198 r
= amdgpu_ctx_query(adev
, fpriv
, id
, &args
->out
);
207 struct amdgpu_ctx
*amdgpu_ctx_get(struct amdgpu_fpriv
*fpriv
, uint32_t id
)
209 struct amdgpu_ctx
*ctx
;
210 struct amdgpu_ctx_mgr
*mgr
;
215 mgr
= &fpriv
->ctx_mgr
;
217 mutex_lock(&mgr
->lock
);
218 ctx
= idr_find(&mgr
->ctx_handles
, id
);
220 kref_get(&ctx
->refcount
);
221 mutex_unlock(&mgr
->lock
);
225 int amdgpu_ctx_put(struct amdgpu_ctx
*ctx
)
230 kref_put(&ctx
->refcount
, amdgpu_ctx_do_release
);
234 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx
*ctx
, struct amdgpu_ring
*ring
,
237 struct amdgpu_ctx_ring
*cring
= & ctx
->rings
[ring
->idx
];
238 uint64_t seq
= cring
->sequence
;
240 struct fence
*other
= NULL
;
242 idx
= seq
% AMDGPU_CTX_MAX_CS_PENDING
;
243 other
= cring
->fences
[idx
];
246 r
= fence_wait_timeout(other
, false, MAX_SCHEDULE_TIMEOUT
);
248 DRM_ERROR("Error (%ld) waiting for fence!\n", r
);
253 spin_lock(&ctx
->ring_lock
);
254 cring
->fences
[idx
] = fence
;
256 spin_unlock(&ctx
->ring_lock
);
263 struct fence
*amdgpu_ctx_get_fence(struct amdgpu_ctx
*ctx
,
264 struct amdgpu_ring
*ring
, uint64_t seq
)
266 struct amdgpu_ctx_ring
*cring
= & ctx
->rings
[ring
->idx
];
269 spin_lock(&ctx
->ring_lock
);
271 if (seq
>= cring
->sequence
) {
272 spin_unlock(&ctx
->ring_lock
);
273 return ERR_PTR(-EINVAL
);
277 if (seq
+ AMDGPU_CTX_MAX_CS_PENDING
< cring
->sequence
) {
278 spin_unlock(&ctx
->ring_lock
);
282 fence
= fence_get(cring
->fences
[seq
% AMDGPU_CTX_MAX_CS_PENDING
]);
283 spin_unlock(&ctx
->ring_lock
);
288 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr
*mgr
)
290 mutex_init(&mgr
->lock
);
291 idr_init(&mgr
->ctx_handles
);
294 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr
*mgr
)
296 struct amdgpu_ctx
*ctx
;
300 idp
= &mgr
->ctx_handles
;
302 idr_for_each_entry(idp
, ctx
, id
) {
303 if (kref_put(&ctx
->refcount
, amdgpu_ctx_do_release
) != 1)
304 DRM_ERROR("ctx %p is still alive\n", ctx
);
307 idr_destroy(&mgr
->ctx_handles
);
308 mutex_destroy(&mgr
->lock
);