2 * Copyright 2011 Christian König.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <deathsimple@vodafone.de>
32 #include "radeon_trace.h"
34 int radeon_semaphore_create(struct radeon_device
*rdev
,
35 struct radeon_semaphore
**semaphore
)
39 *semaphore
= kmalloc(sizeof(struct radeon_semaphore
), GFP_KERNEL
);
40 if (*semaphore
== NULL
) {
43 r
= radeon_sa_bo_new(rdev
, &rdev
->ring_tmp_bo
,
44 &(*semaphore
)->sa_bo
, 8, 8, true);
50 (*semaphore
)->waiters
= 0;
51 (*semaphore
)->gpu_addr
= radeon_sa_bo_gpu_addr((*semaphore
)->sa_bo
);
52 *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore
)->sa_bo
)) = 0;
54 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
)
55 (*semaphore
)->sync_to
[i
] = NULL
;
60 bool radeon_semaphore_emit_signal(struct radeon_device
*rdev
, int ridx
,
61 struct radeon_semaphore
*semaphore
)
63 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
65 trace_radeon_semaphore_signale(ridx
, semaphore
);
67 if (radeon_semaphore_ring_emit(rdev
, ridx
, ring
, semaphore
, false)) {
70 /* for debugging lockup only, used by sysfs debug files */
71 ring
->last_semaphore_signal_addr
= semaphore
->gpu_addr
;
77 bool radeon_semaphore_emit_wait(struct radeon_device
*rdev
, int ridx
,
78 struct radeon_semaphore
*semaphore
)
80 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
82 trace_radeon_semaphore_wait(ridx
, semaphore
);
84 if (radeon_semaphore_ring_emit(rdev
, ridx
, ring
, semaphore
, true)) {
87 /* for debugging lockup only, used by sysfs debug files */
88 ring
->last_semaphore_wait_addr
= semaphore
->gpu_addr
;
95 * radeon_semaphore_sync_to - use the semaphore to sync to a fence
97 * @semaphore: semaphore object to add fence to
98 * @fence: fence to sync to
100 * Sync to the fence using this semaphore object
102 void radeon_semaphore_sync_to(struct radeon_semaphore
*semaphore
,
103 struct radeon_fence
*fence
)
105 struct radeon_fence
*other
;
110 other
= semaphore
->sync_to
[fence
->ring
];
111 semaphore
->sync_to
[fence
->ring
] = radeon_fence_later(fence
, other
);
115 * radeon_semaphore_sync_rings - sync ring to all registered fences
117 * @rdev: radeon_device pointer
118 * @semaphore: semaphore object to use for sync
119 * @ring: ring that needs sync
121 * Ensure that all registered fences are signaled before letting
122 * the ring continue. The caller must hold the ring lock.
124 int radeon_semaphore_sync_rings(struct radeon_device
*rdev
,
125 struct radeon_semaphore
*semaphore
,
130 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
131 struct radeon_fence
*fence
= semaphore
->sync_to
[i
];
133 /* check if we really need to sync */
134 if (!radeon_fence_need_sync(fence
, ring
))
137 /* prevent GPU deadlocks */
138 if (!rdev
->ring
[i
].ready
) {
139 dev_err(rdev
->dev
, "Syncing to a disabled ring!");
143 /* allocate enough space for sync command */
144 r
= radeon_ring_alloc(rdev
, &rdev
->ring
[i
], 16);
149 /* emit the signal semaphore */
150 if (!radeon_semaphore_emit_signal(rdev
, i
, semaphore
)) {
151 /* signaling wasn't successful wait manually */
152 radeon_ring_undo(&rdev
->ring
[i
]);
153 radeon_fence_wait_locked(fence
);
157 /* we assume caller has already allocated space on waiters ring */
158 if (!radeon_semaphore_emit_wait(rdev
, ring
, semaphore
)) {
159 /* waiting wasn't successful wait manually */
160 radeon_ring_undo(&rdev
->ring
[i
]);
161 radeon_fence_wait_locked(fence
);
165 radeon_ring_commit(rdev
, &rdev
->ring
[i
]);
166 radeon_fence_note_sync(fence
, ring
);
172 void radeon_semaphore_free(struct radeon_device
*rdev
,
173 struct radeon_semaphore
**semaphore
,
174 struct radeon_fence
*fence
)
176 if (semaphore
== NULL
|| *semaphore
== NULL
) {
179 if ((*semaphore
)->waiters
> 0) {
180 dev_err(rdev
->dev
, "semaphore %p has more waiters than signalers,"
181 " hardware lockup imminent!\n", *semaphore
);
183 radeon_sa_bo_free(rdev
, &(*semaphore
)->sa_bo
, fence
);