2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
31 #include <linux/seq_file.h>
32 #include <asm/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/list.h>
35 #include <linux/kref.h>
38 #include "radeon_reg.h"
41 int radeon_fence_emit(struct radeon_device
*rdev
, struct radeon_fence
*fence
)
43 unsigned long irq_flags
;
45 write_lock_irqsave(&rdev
->fence_drv
.lock
, irq_flags
);
47 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
50 fence
->seq
= atomic_add_return(1, &rdev
->fence_drv
.seq
);
51 if (!rdev
->cp
.ready
) {
52 /* FIXME: cp is not running assume everythings is done right
55 WREG32(rdev
->fence_drv
.scratch_reg
, fence
->seq
);
57 radeon_fence_ring_emit(rdev
, fence
);
60 fence
->timeout
= jiffies
+ ((2000 * HZ
) / 1000);
61 list_del(&fence
->list
);
62 list_add_tail(&fence
->list
, &rdev
->fence_drv
.emited
);
63 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
67 static bool radeon_fence_poll_locked(struct radeon_device
*rdev
)
69 struct radeon_fence
*fence
;
70 struct list_head
*i
, *n
;
80 seq
= RREG32(rdev
->fence_drv
.scratch_reg
);
81 rdev
->fence_drv
.last_seq
= seq
;
83 list_for_each(i
, &rdev
->fence_drv
.emited
) {
84 fence
= list_entry(i
, struct radeon_fence
, list
);
85 if (fence
->seq
== seq
) {
90 /* all fence previous to this one are considered as signaled */
96 list_add_tail(i
, &rdev
->fence_drv
.signaled
);
97 fence
= list_entry(i
, struct radeon_fence
, list
);
98 fence
->signaled
= true;
100 } while (i
!= &rdev
->fence_drv
.emited
);
106 static void radeon_fence_destroy(struct kref
*kref
)
108 unsigned long irq_flags
;
109 struct radeon_fence
*fence
;
111 fence
= container_of(kref
, struct radeon_fence
, kref
);
112 write_lock_irqsave(&fence
->rdev
->fence_drv
.lock
, irq_flags
);
113 list_del(&fence
->list
);
114 fence
->emited
= false;
115 write_unlock_irqrestore(&fence
->rdev
->fence_drv
.lock
, irq_flags
);
119 int radeon_fence_create(struct radeon_device
*rdev
, struct radeon_fence
**fence
)
121 unsigned long irq_flags
;
123 *fence
= kmalloc(sizeof(struct radeon_fence
), GFP_KERNEL
);
124 if ((*fence
) == NULL
) {
127 kref_init(&((*fence
)->kref
));
128 (*fence
)->rdev
= rdev
;
129 (*fence
)->emited
= false;
130 (*fence
)->signaled
= false;
132 INIT_LIST_HEAD(&(*fence
)->list
);
134 write_lock_irqsave(&rdev
->fence_drv
.lock
, irq_flags
);
135 list_add_tail(&(*fence
)->list
, &rdev
->fence_drv
.created
);
136 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
141 bool radeon_fence_signaled(struct radeon_fence
*fence
)
143 struct radeon_device
*rdev
= fence
->rdev
;
144 unsigned long irq_flags
;
145 bool signaled
= false;
147 if (rdev
->gpu_lockup
) {
153 write_lock_irqsave(&fence
->rdev
->fence_drv
.lock
, irq_flags
);
154 signaled
= fence
->signaled
;
155 /* if we are shuting down report all fence as signaled */
156 if (fence
->rdev
->shutdown
) {
159 if (!fence
->emited
) {
160 WARN(1, "Querying an unemited fence : %p !\n", fence
);
164 radeon_fence_poll_locked(fence
->rdev
);
165 signaled
= fence
->signaled
;
167 write_unlock_irqrestore(&fence
->rdev
->fence_drv
.lock
, irq_flags
);
171 int r600_fence_wait(struct radeon_fence
*fence
, bool intr
, bool lazy
)
173 struct radeon_device
*rdev
;
178 __set_current_state(intr
? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
);
181 if (radeon_fence_signaled(fence
))
184 if (time_after_eq(jiffies
, fence
->timeout
)) {
192 if (intr
&& signal_pending(current
)) {
197 __set_current_state(TASK_RUNNING
);
202 int radeon_fence_wait(struct radeon_fence
*fence
, bool intr
)
204 struct radeon_device
*rdev
;
205 unsigned long cur_jiffies
;
206 unsigned long timeout
;
207 bool expired
= false;
211 WARN(1, "Querying an invalid fence : %p !\n", fence
);
215 if (radeon_fence_signaled(fence
)) {
219 if (rdev
->family
>= CHIP_R600
) {
220 r
= r600_fence_wait(fence
, intr
, 0);
221 if (r
== -ERESTARTSYS
)
227 cur_jiffies
= jiffies
;
229 if (time_after(fence
->timeout
, cur_jiffies
)) {
230 timeout
= fence
->timeout
- cur_jiffies
;
234 r
= wait_event_interruptible_timeout(rdev
->fence_drv
.queue
,
235 radeon_fence_signaled(fence
), timeout
);
236 if (unlikely(r
== -ERESTARTSYS
)) {
240 r
= wait_event_timeout(rdev
->fence_drv
.queue
,
241 radeon_fence_signaled(fence
), timeout
);
243 if (unlikely(!radeon_fence_signaled(fence
))) {
244 if (unlikely(r
== 0)) {
247 if (unlikely(expired
)) {
249 if (time_after(cur_jiffies
, fence
->timeout
)) {
250 timeout
= cur_jiffies
- fence
->timeout
;
252 timeout
= jiffies_to_msecs(timeout
);
254 DRM_ERROR("fence(%p:0x%08X) %lums timeout "
255 "going to reset GPU\n",
256 fence
, fence
->seq
, timeout
);
257 radeon_gpu_reset(rdev
);
258 WREG32(rdev
->fence_drv
.scratch_reg
, fence
->seq
);
263 if (unlikely(expired
)) {
264 rdev
->fence_drv
.count_timeout
++;
265 cur_jiffies
= jiffies
;
267 if (time_after(cur_jiffies
, fence
->timeout
)) {
268 timeout
= cur_jiffies
- fence
->timeout
;
270 timeout
= jiffies_to_msecs(timeout
);
271 DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
272 fence
, fence
->seq
, timeout
);
273 DRM_ERROR("last signaled fence(0x%08X)\n",
274 rdev
->fence_drv
.last_seq
);
279 int radeon_fence_wait_next(struct radeon_device
*rdev
)
281 unsigned long irq_flags
;
282 struct radeon_fence
*fence
;
285 if (rdev
->gpu_lockup
) {
288 write_lock_irqsave(&rdev
->fence_drv
.lock
, irq_flags
);
289 if (list_empty(&rdev
->fence_drv
.emited
)) {
290 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
293 fence
= list_entry(rdev
->fence_drv
.emited
.next
,
294 struct radeon_fence
, list
);
295 radeon_fence_ref(fence
);
296 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
297 r
= radeon_fence_wait(fence
, false);
298 radeon_fence_unref(&fence
);
302 int radeon_fence_wait_last(struct radeon_device
*rdev
)
304 unsigned long irq_flags
;
305 struct radeon_fence
*fence
;
308 if (rdev
->gpu_lockup
) {
311 write_lock_irqsave(&rdev
->fence_drv
.lock
, irq_flags
);
312 if (list_empty(&rdev
->fence_drv
.emited
)) {
313 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
316 fence
= list_entry(rdev
->fence_drv
.emited
.prev
,
317 struct radeon_fence
, list
);
318 radeon_fence_ref(fence
);
319 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
320 r
= radeon_fence_wait(fence
, false);
321 radeon_fence_unref(&fence
);
325 struct radeon_fence
*radeon_fence_ref(struct radeon_fence
*fence
)
327 kref_get(&fence
->kref
);
331 void radeon_fence_unref(struct radeon_fence
**fence
)
333 struct radeon_fence
*tmp
= *fence
;
337 kref_put(&tmp
->kref
, &radeon_fence_destroy
);
341 void radeon_fence_process(struct radeon_device
*rdev
)
343 unsigned long irq_flags
;
346 write_lock_irqsave(&rdev
->fence_drv
.lock
, irq_flags
);
347 wake
= radeon_fence_poll_locked(rdev
);
348 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
350 wake_up_all(&rdev
->fence_drv
.queue
);
354 int radeon_fence_driver_init(struct radeon_device
*rdev
)
356 unsigned long irq_flags
;
359 write_lock_irqsave(&rdev
->fence_drv
.lock
, irq_flags
);
360 r
= radeon_scratch_get(rdev
, &rdev
->fence_drv
.scratch_reg
);
362 DRM_ERROR("Fence failed to get a scratch register.");
363 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
366 WREG32(rdev
->fence_drv
.scratch_reg
, 0);
367 atomic_set(&rdev
->fence_drv
.seq
, 0);
368 INIT_LIST_HEAD(&rdev
->fence_drv
.created
);
369 INIT_LIST_HEAD(&rdev
->fence_drv
.emited
);
370 INIT_LIST_HEAD(&rdev
->fence_drv
.signaled
);
371 rdev
->fence_drv
.count_timeout
= 0;
372 init_waitqueue_head(&rdev
->fence_drv
.queue
);
373 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
374 if (radeon_debugfs_fence_init(rdev
)) {
375 DRM_ERROR("Failed to register debugfs file for fence !\n");
380 void radeon_fence_driver_fini(struct radeon_device
*rdev
)
382 unsigned long irq_flags
;
384 wake_up_all(&rdev
->fence_drv
.queue
);
385 write_lock_irqsave(&rdev
->fence_drv
.lock
, irq_flags
);
386 radeon_scratch_free(rdev
, rdev
->fence_drv
.scratch_reg
);
387 write_unlock_irqrestore(&rdev
->fence_drv
.lock
, irq_flags
);
388 DRM_INFO("radeon: fence finalized\n");
395 #if defined(CONFIG_DEBUG_FS)
396 static int radeon_debugfs_fence_info(struct seq_file
*m
, void *data
)
398 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
399 struct drm_device
*dev
= node
->minor
->dev
;
400 struct radeon_device
*rdev
= dev
->dev_private
;
401 struct radeon_fence
*fence
;
403 seq_printf(m
, "Last signaled fence 0x%08X\n",
404 RREG32(rdev
->fence_drv
.scratch_reg
));
405 if (!list_empty(&rdev
->fence_drv
.emited
)) {
406 fence
= list_entry(rdev
->fence_drv
.emited
.prev
,
407 struct radeon_fence
, list
);
408 seq_printf(m
, "Last emited fence %p with 0x%08X\n",
414 static struct drm_info_list radeon_debugfs_fence_list
[] = {
415 {"radeon_fence_info", &radeon_debugfs_fence_info
, 0, NULL
},
419 int radeon_debugfs_fence_init(struct radeon_device
*rdev
)
421 #if defined(CONFIG_DEBUG_FS)
422 return radeon_debugfs_add_files(rdev
, radeon_debugfs_fence_list
, 1);