2 * Copyright 2011 Red Hat, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "qxl_object.h"
24 #include <trace/events/fence.h>
27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
28 * into 256 byte chunks for now - gives 16 cmds per page.
30 * use an ida to index into the chunks?
32 /* manage releaseables */
33 /* stack them 16 high for now -drawable object is 191 */
34 #define RELEASE_SIZE 256
35 #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
36 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
37 #define SURFACE_RELEASE_SIZE 128
38 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
40 static const int release_size_per_bo
[] = { RELEASE_SIZE
, SURFACE_RELEASE_SIZE
, RELEASE_SIZE
};
41 static const int releases_per_bo
[] = { RELEASES_PER_BO
, SURFACE_RELEASES_PER_BO
, RELEASES_PER_BO
};
43 static const char *qxl_get_driver_name(struct fence
*fence
)
48 static const char *qxl_get_timeline_name(struct fence
*fence
)
53 static bool qxl_nop_signaling(struct fence
*fence
)
55 /* fences are always automatically signaled, so just pretend we did this.. */
59 static long qxl_fence_wait(struct fence
*fence
, bool intr
, signed long timeout
)
61 struct qxl_device
*qdev
;
62 struct qxl_release
*release
;
63 int count
= 0, sc
= 0;
64 bool have_drawable_releases
;
65 unsigned long cur
, end
= jiffies
+ timeout
;
67 qdev
= container_of(fence
->lock
, struct qxl_device
, release_lock
);
68 release
= container_of(fence
, struct qxl_release
, base
);
69 have_drawable_releases
= release
->type
== QXL_RELEASE_DRAWABLE
;
74 if (fence_is_signaled(fence
))
77 qxl_io_notify_oom(qdev
);
79 for (count
= 0; count
< 11; count
++) {
80 if (!qxl_queue_garbage_collect(qdev
, true))
83 if (fence_is_signaled(fence
))
87 if (fence_is_signaled(fence
))
90 if (have_drawable_releases
|| sc
< 4) {
93 usleep_range(500, 1000);
95 if (time_after(jiffies
, end
))
98 if (have_drawable_releases
&& sc
> 300) {
99 FENCE_WARN(fence
, "failed to wait on release %d "
100 "after spincount %d\n",
101 fence
->context
& ~0xf0000000, sc
);
107 * yeah, original sync_obj_wait gave up after 3 spins when
108 * have_drawable_releases is not set.
113 if (time_after(cur
, end
))
118 static const struct fence_ops qxl_fence_ops
= {
119 .get_driver_name
= qxl_get_driver_name
,
120 .get_timeline_name
= qxl_get_timeline_name
,
121 .enable_signaling
= qxl_nop_signaling
,
122 .wait
= qxl_fence_wait
,
126 qxl_release_alloc(struct qxl_device
*qdev
, int type
,
127 struct qxl_release
**ret
)
129 struct qxl_release
*release
;
131 size_t size
= sizeof(*release
);
133 release
= kmalloc(size
, GFP_KERNEL
);
135 DRM_ERROR("Out of memory\n");
138 release
->base
.ops
= NULL
;
139 release
->type
= type
;
140 release
->release_offset
= 0;
141 release
->surface_release_id
= 0;
142 INIT_LIST_HEAD(&release
->bos
);
144 idr_preload(GFP_KERNEL
);
145 spin_lock(&qdev
->release_idr_lock
);
146 handle
= idr_alloc(&qdev
->release_idr
, release
, 1, 0, GFP_NOWAIT
);
147 release
->base
.seqno
= ++qdev
->release_seqno
;
148 spin_unlock(&qdev
->release_idr_lock
);
156 QXL_INFO(qdev
, "allocated release %d\n", handle
);
157 release
->id
= handle
;
162 qxl_release_free_list(struct qxl_release
*release
)
164 while (!list_empty(&release
->bos
)) {
165 struct qxl_bo_list
*entry
;
168 entry
= container_of(release
->bos
.next
,
169 struct qxl_bo_list
, tv
.head
);
170 bo
= to_qxl_bo(entry
->tv
.bo
);
172 list_del(&entry
->tv
.head
);
178 qxl_release_free(struct qxl_device
*qdev
,
179 struct qxl_release
*release
)
181 QXL_INFO(qdev
, "release %d, type %d\n", release
->id
,
184 if (release
->surface_release_id
)
185 qxl_surface_id_dealloc(qdev
, release
->surface_release_id
);
187 spin_lock(&qdev
->release_idr_lock
);
188 idr_remove(&qdev
->release_idr
, release
->id
);
189 spin_unlock(&qdev
->release_idr_lock
);
191 if (release
->base
.ops
) {
192 WARN_ON(list_empty(&release
->bos
));
193 qxl_release_free_list(release
);
195 fence_signal(&release
->base
);
196 fence_put(&release
->base
);
198 qxl_release_free_list(release
);
203 static int qxl_release_bo_alloc(struct qxl_device
*qdev
,
207 /* pin releases bo's they are too messy to evict */
208 ret
= qxl_bo_create(qdev
, PAGE_SIZE
, false, true,
209 QXL_GEM_DOMAIN_VRAM
, NULL
,
214 int qxl_release_list_add(struct qxl_release
*release
, struct qxl_bo
*bo
)
216 struct qxl_bo_list
*entry
;
218 list_for_each_entry(entry
, &release
->bos
, tv
.head
) {
219 if (entry
->tv
.bo
== &bo
->tbo
)
223 entry
= kmalloc(sizeof(struct qxl_bo_list
), GFP_KERNEL
);
228 entry
->tv
.bo
= &bo
->tbo
;
229 entry
->tv
.shared
= false;
230 list_add_tail(&entry
->tv
.head
, &release
->bos
);
234 static int qxl_release_validate_bo(struct qxl_bo
*bo
)
238 if (!bo
->pin_count
) {
239 qxl_ttm_placement_from_domain(bo
, bo
->type
, false);
240 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
,
246 ret
= reservation_object_reserve_shared(bo
->tbo
.resv
);
250 /* allocate a surface for reserved + validated buffers */
251 ret
= qxl_bo_check_id(bo
->gem_base
.dev
->dev_private
, bo
);
257 int qxl_release_reserve_list(struct qxl_release
*release
, bool no_intr
)
260 struct qxl_bo_list
*entry
;
262 /* if only one object on the release its the release itself
263 since these objects are pinned no need to reserve */
264 if (list_is_singular(&release
->bos
))
267 ret
= ttm_eu_reserve_buffers(&release
->ticket
, &release
->bos
,
272 list_for_each_entry(entry
, &release
->bos
, tv
.head
) {
273 struct qxl_bo
*bo
= to_qxl_bo(entry
->tv
.bo
);
275 ret
= qxl_release_validate_bo(bo
);
277 ttm_eu_backoff_reservation(&release
->ticket
, &release
->bos
);
284 void qxl_release_backoff_reserve_list(struct qxl_release
*release
)
286 /* if only one object on the release its the release itself
287 since these objects are pinned no need to reserve */
288 if (list_is_singular(&release
->bos
))
291 ttm_eu_backoff_reservation(&release
->ticket
, &release
->bos
);
295 int qxl_alloc_surface_release_reserved(struct qxl_device
*qdev
,
296 enum qxl_surface_cmd_type surface_cmd_type
,
297 struct qxl_release
*create_rel
,
298 struct qxl_release
**release
)
300 if (surface_cmd_type
== QXL_SURFACE_CMD_DESTROY
&& create_rel
) {
302 struct qxl_bo_list
*entry
= list_first_entry(&create_rel
->bos
, struct qxl_bo_list
, tv
.head
);
304 union qxl_release_info
*info
;
306 /* stash the release after the create command */
307 idr_ret
= qxl_release_alloc(qdev
, QXL_RELEASE_SURFACE_CMD
, release
);
310 bo
= to_qxl_bo(entry
->tv
.bo
);
312 (*release
)->release_offset
= create_rel
->release_offset
+ 64;
314 qxl_release_list_add(*release
, bo
);
316 info
= qxl_release_map(qdev
, *release
);
318 qxl_release_unmap(qdev
, *release
, info
);
322 return qxl_alloc_release_reserved(qdev
, sizeof(struct qxl_surface_cmd
),
323 QXL_RELEASE_SURFACE_CMD
, release
, NULL
);
326 int qxl_alloc_release_reserved(struct qxl_device
*qdev
, unsigned long size
,
327 int type
, struct qxl_release
**release
,
333 union qxl_release_info
*info
;
336 if (type
== QXL_RELEASE_DRAWABLE
)
338 else if (type
== QXL_RELEASE_SURFACE_CMD
)
340 else if (type
== QXL_RELEASE_CURSOR_CMD
)
343 DRM_ERROR("got illegal type: %d\n", type
);
347 idr_ret
= qxl_release_alloc(qdev
, type
, release
);
354 mutex_lock(&qdev
->release_mutex
);
355 if (qdev
->current_release_bo_offset
[cur_idx
] + 1 >= releases_per_bo
[cur_idx
]) {
356 qxl_bo_unref(&qdev
->current_release_bo
[cur_idx
]);
357 qdev
->current_release_bo_offset
[cur_idx
] = 0;
358 qdev
->current_release_bo
[cur_idx
] = NULL
;
360 if (!qdev
->current_release_bo
[cur_idx
]) {
361 ret
= qxl_release_bo_alloc(qdev
, &qdev
->current_release_bo
[cur_idx
]);
363 mutex_unlock(&qdev
->release_mutex
);
364 qxl_release_free(qdev
, *release
);
369 bo
= qxl_bo_ref(qdev
->current_release_bo
[cur_idx
]);
371 (*release
)->release_offset
= qdev
->current_release_bo_offset
[cur_idx
] * release_size_per_bo
[cur_idx
];
372 qdev
->current_release_bo_offset
[cur_idx
]++;
377 mutex_unlock(&qdev
->release_mutex
);
379 ret
= qxl_release_list_add(*release
, bo
);
382 qxl_release_free(qdev
, *release
);
386 info
= qxl_release_map(qdev
, *release
);
388 qxl_release_unmap(qdev
, *release
, info
);
393 struct qxl_release
*qxl_release_from_id_locked(struct qxl_device
*qdev
,
396 struct qxl_release
*release
;
398 spin_lock(&qdev
->release_idr_lock
);
399 release
= idr_find(&qdev
->release_idr
, id
);
400 spin_unlock(&qdev
->release_idr_lock
);
402 DRM_ERROR("failed to find id in release_idr\n");
409 union qxl_release_info
*qxl_release_map(struct qxl_device
*qdev
,
410 struct qxl_release
*release
)
413 union qxl_release_info
*info
;
414 struct qxl_bo_list
*entry
= list_first_entry(&release
->bos
, struct qxl_bo_list
, tv
.head
);
415 struct qxl_bo
*bo
= to_qxl_bo(entry
->tv
.bo
);
417 ptr
= qxl_bo_kmap_atomic_page(qdev
, bo
, release
->release_offset
& PAGE_SIZE
);
420 info
= ptr
+ (release
->release_offset
& ~PAGE_SIZE
);
424 void qxl_release_unmap(struct qxl_device
*qdev
,
425 struct qxl_release
*release
,
426 union qxl_release_info
*info
)
428 struct qxl_bo_list
*entry
= list_first_entry(&release
->bos
, struct qxl_bo_list
, tv
.head
);
429 struct qxl_bo
*bo
= to_qxl_bo(entry
->tv
.bo
);
432 ptr
= ((void *)info
) - (release
->release_offset
& ~PAGE_SIZE
);
433 qxl_bo_kunmap_atomic_page(qdev
, bo
, ptr
);
436 void qxl_release_fence_buffer_objects(struct qxl_release
*release
)
438 struct ttm_buffer_object
*bo
;
439 struct ttm_bo_global
*glob
;
440 struct ttm_bo_device
*bdev
;
441 struct ttm_bo_driver
*driver
;
443 struct ttm_validate_buffer
*entry
;
444 struct qxl_device
*qdev
;
446 /* if only one object on the release its the release itself
447 since these objects are pinned no need to reserve */
448 if (list_is_singular(&release
->bos
) || list_empty(&release
->bos
))
451 bo
= list_first_entry(&release
->bos
, struct ttm_validate_buffer
, head
)->bo
;
453 qdev
= container_of(bdev
, struct qxl_device
, mman
.bdev
);
456 * Since we never really allocated a context and we don't want to conflict,
457 * set the highest bits. This will break if we really allow exporting of dma-bufs.
459 fence_init(&release
->base
, &qxl_fence_ops
, &qdev
->release_lock
,
460 release
->id
| 0xf0000000, release
->base
.seqno
);
461 trace_fence_emit(&release
->base
);
463 driver
= bdev
->driver
;
466 spin_lock(&glob
->lru_lock
);
468 list_for_each_entry(entry
, &release
->bos
, head
) {
472 reservation_object_add_shared_fence(bo
->resv
, &release
->base
);
473 ttm_bo_add_to_lru(bo
);
474 __ttm_bo_unreserve(bo
);
476 spin_unlock(&glob
->lru_lock
);
477 ww_acquire_fini(&release
->ticket
);