2 * Copyright 2011 Red Hat, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include "qxl_object.h"
24 #include <trace/events/dma_fence.h>
27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
28 * into 256 byte chunks for now - gives 16 cmds per page.
30 * use an ida to index into the chunks?
32 /* manage releaseables */
33 /* stack them 16 high for now -drawable object is 191 */
34 #define RELEASE_SIZE 256
35 #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
36 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
37 #define SURFACE_RELEASE_SIZE 128
38 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
40 static const int release_size_per_bo
[] = { RELEASE_SIZE
, SURFACE_RELEASE_SIZE
, RELEASE_SIZE
};
41 static const int releases_per_bo
[] = { RELEASES_PER_BO
, SURFACE_RELEASES_PER_BO
, RELEASES_PER_BO
};
43 static const char *qxl_get_driver_name(struct dma_fence
*fence
)
48 static const char *qxl_get_timeline_name(struct dma_fence
*fence
)
53 static bool qxl_nop_signaling(struct dma_fence
*fence
)
55 /* fences are always automatically signaled, so just pretend we did this.. */
59 static long qxl_fence_wait(struct dma_fence
*fence
, bool intr
,
62 struct qxl_device
*qdev
;
63 struct qxl_release
*release
;
64 int count
= 0, sc
= 0;
65 bool have_drawable_releases
;
66 unsigned long cur
, end
= jiffies
+ timeout
;
68 qdev
= container_of(fence
->lock
, struct qxl_device
, release_lock
);
69 release
= container_of(fence
, struct qxl_release
, base
);
70 have_drawable_releases
= release
->type
== QXL_RELEASE_DRAWABLE
;
75 if (dma_fence_is_signaled(fence
))
78 qxl_io_notify_oom(qdev
);
80 for (count
= 0; count
< 11; count
++) {
81 if (!qxl_queue_garbage_collect(qdev
, true))
84 if (dma_fence_is_signaled(fence
))
88 if (dma_fence_is_signaled(fence
))
91 if (have_drawable_releases
|| sc
< 4) {
94 usleep_range(500, 1000);
96 if (time_after(jiffies
, end
))
99 if (have_drawable_releases
&& sc
> 300) {
100 DMA_FENCE_WARN(fence
, "failed to wait on release %llu "
101 "after spincount %d\n",
102 fence
->context
& ~0xf0000000, sc
);
108 * yeah, original sync_obj_wait gave up after 3 spins when
109 * have_drawable_releases is not set.
114 if (time_after(cur
, end
))
119 static const struct dma_fence_ops qxl_fence_ops
= {
120 .get_driver_name
= qxl_get_driver_name
,
121 .get_timeline_name
= qxl_get_timeline_name
,
122 .enable_signaling
= qxl_nop_signaling
,
123 .wait
= qxl_fence_wait
,
127 qxl_release_alloc(struct qxl_device
*qdev
, int type
,
128 struct qxl_release
**ret
)
130 struct qxl_release
*release
;
132 size_t size
= sizeof(*release
);
134 release
= kmalloc(size
, GFP_KERNEL
);
136 DRM_ERROR("Out of memory\n");
139 release
->base
.ops
= NULL
;
140 release
->type
= type
;
141 release
->release_offset
= 0;
142 release
->surface_release_id
= 0;
143 INIT_LIST_HEAD(&release
->bos
);
145 idr_preload(GFP_KERNEL
);
146 spin_lock(&qdev
->release_idr_lock
);
147 handle
= idr_alloc(&qdev
->release_idr
, release
, 1, 0, GFP_NOWAIT
);
148 release
->base
.seqno
= ++qdev
->release_seqno
;
149 spin_unlock(&qdev
->release_idr_lock
);
157 DRM_DEBUG_DRIVER("allocated release %d\n", handle
);
158 release
->id
= handle
;
163 qxl_release_free_list(struct qxl_release
*release
)
165 while (!list_empty(&release
->bos
)) {
166 struct qxl_bo_list
*entry
;
169 entry
= container_of(release
->bos
.next
,
170 struct qxl_bo_list
, tv
.head
);
171 bo
= to_qxl_bo(entry
->tv
.bo
);
173 list_del(&entry
->tv
.head
);
179 qxl_release_free(struct qxl_device
*qdev
,
180 struct qxl_release
*release
)
182 DRM_DEBUG_DRIVER("release %d, type %d\n", release
->id
, release
->type
);
184 if (release
->surface_release_id
)
185 qxl_surface_id_dealloc(qdev
, release
->surface_release_id
);
187 spin_lock(&qdev
->release_idr_lock
);
188 idr_remove(&qdev
->release_idr
, release
->id
);
189 spin_unlock(&qdev
->release_idr_lock
);
191 if (release
->base
.ops
) {
192 WARN_ON(list_empty(&release
->bos
));
193 qxl_release_free_list(release
);
195 dma_fence_signal(&release
->base
);
196 dma_fence_put(&release
->base
);
198 qxl_release_free_list(release
);
203 static int qxl_release_bo_alloc(struct qxl_device
*qdev
,
206 /* pin releases bo's they are too messy to evict */
207 return qxl_bo_create(qdev
, PAGE_SIZE
, false, true,
208 QXL_GEM_DOMAIN_VRAM
, NULL
, bo
);
211 int qxl_release_list_add(struct qxl_release
*release
, struct qxl_bo
*bo
)
213 struct qxl_bo_list
*entry
;
215 list_for_each_entry(entry
, &release
->bos
, tv
.head
) {
216 if (entry
->tv
.bo
== &bo
->tbo
)
220 entry
= kmalloc(sizeof(struct qxl_bo_list
), GFP_KERNEL
);
225 entry
->tv
.bo
= &bo
->tbo
;
226 entry
->tv
.shared
= false;
227 list_add_tail(&entry
->tv
.head
, &release
->bos
);
231 static int qxl_release_validate_bo(struct qxl_bo
*bo
)
233 struct ttm_operation_ctx ctx
= { true, false };
236 if (!bo
->pin_count
) {
237 qxl_ttm_placement_from_domain(bo
, bo
->type
, false);
238 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
243 ret
= reservation_object_reserve_shared(bo
->tbo
.resv
);
247 /* allocate a surface for reserved + validated buffers */
248 ret
= qxl_bo_check_id(bo
->gem_base
.dev
->dev_private
, bo
);
254 int qxl_release_reserve_list(struct qxl_release
*release
, bool no_intr
)
257 struct qxl_bo_list
*entry
;
259 /* if only one object on the release its the release itself
260 since these objects are pinned no need to reserve */
261 if (list_is_singular(&release
->bos
))
264 ret
= ttm_eu_reserve_buffers(&release
->ticket
, &release
->bos
,
269 list_for_each_entry(entry
, &release
->bos
, tv
.head
) {
270 struct qxl_bo
*bo
= to_qxl_bo(entry
->tv
.bo
);
272 ret
= qxl_release_validate_bo(bo
);
274 ttm_eu_backoff_reservation(&release
->ticket
, &release
->bos
);
281 void qxl_release_backoff_reserve_list(struct qxl_release
*release
)
283 /* if only one object on the release its the release itself
284 since these objects are pinned no need to reserve */
285 if (list_is_singular(&release
->bos
))
288 ttm_eu_backoff_reservation(&release
->ticket
, &release
->bos
);
292 int qxl_alloc_surface_release_reserved(struct qxl_device
*qdev
,
293 enum qxl_surface_cmd_type surface_cmd_type
,
294 struct qxl_release
*create_rel
,
295 struct qxl_release
**release
)
297 if (surface_cmd_type
== QXL_SURFACE_CMD_DESTROY
&& create_rel
) {
299 struct qxl_bo_list
*entry
= list_first_entry(&create_rel
->bos
, struct qxl_bo_list
, tv
.head
);
301 union qxl_release_info
*info
;
303 /* stash the release after the create command */
304 idr_ret
= qxl_release_alloc(qdev
, QXL_RELEASE_SURFACE_CMD
, release
);
307 bo
= to_qxl_bo(entry
->tv
.bo
);
309 (*release
)->release_offset
= create_rel
->release_offset
+ 64;
311 qxl_release_list_add(*release
, bo
);
313 info
= qxl_release_map(qdev
, *release
);
315 qxl_release_unmap(qdev
, *release
, info
);
319 return qxl_alloc_release_reserved(qdev
, sizeof(struct qxl_surface_cmd
),
320 QXL_RELEASE_SURFACE_CMD
, release
, NULL
);
323 int qxl_alloc_release_reserved(struct qxl_device
*qdev
, unsigned long size
,
324 int type
, struct qxl_release
**release
,
330 union qxl_release_info
*info
;
333 if (type
== QXL_RELEASE_DRAWABLE
)
335 else if (type
== QXL_RELEASE_SURFACE_CMD
)
337 else if (type
== QXL_RELEASE_CURSOR_CMD
)
340 DRM_ERROR("got illegal type: %d\n", type
);
344 idr_ret
= qxl_release_alloc(qdev
, type
, release
);
351 mutex_lock(&qdev
->release_mutex
);
352 if (qdev
->current_release_bo_offset
[cur_idx
] + 1 >= releases_per_bo
[cur_idx
]) {
353 qxl_bo_unref(&qdev
->current_release_bo
[cur_idx
]);
354 qdev
->current_release_bo_offset
[cur_idx
] = 0;
355 qdev
->current_release_bo
[cur_idx
] = NULL
;
357 if (!qdev
->current_release_bo
[cur_idx
]) {
358 ret
= qxl_release_bo_alloc(qdev
, &qdev
->current_release_bo
[cur_idx
]);
360 mutex_unlock(&qdev
->release_mutex
);
361 qxl_release_free(qdev
, *release
);
366 bo
= qxl_bo_ref(qdev
->current_release_bo
[cur_idx
]);
368 (*release
)->release_offset
= qdev
->current_release_bo_offset
[cur_idx
] * release_size_per_bo
[cur_idx
];
369 qdev
->current_release_bo_offset
[cur_idx
]++;
374 mutex_unlock(&qdev
->release_mutex
);
376 ret
= qxl_release_list_add(*release
, bo
);
379 qxl_release_free(qdev
, *release
);
383 info
= qxl_release_map(qdev
, *release
);
385 qxl_release_unmap(qdev
, *release
, info
);
390 struct qxl_release
*qxl_release_from_id_locked(struct qxl_device
*qdev
,
393 struct qxl_release
*release
;
395 spin_lock(&qdev
->release_idr_lock
);
396 release
= idr_find(&qdev
->release_idr
, id
);
397 spin_unlock(&qdev
->release_idr_lock
);
399 DRM_ERROR("failed to find id in release_idr\n");
406 union qxl_release_info
*qxl_release_map(struct qxl_device
*qdev
,
407 struct qxl_release
*release
)
410 union qxl_release_info
*info
;
411 struct qxl_bo_list
*entry
= list_first_entry(&release
->bos
, struct qxl_bo_list
, tv
.head
);
412 struct qxl_bo
*bo
= to_qxl_bo(entry
->tv
.bo
);
414 ptr
= qxl_bo_kmap_atomic_page(qdev
, bo
, release
->release_offset
& PAGE_SIZE
);
417 info
= ptr
+ (release
->release_offset
& ~PAGE_SIZE
);
421 void qxl_release_unmap(struct qxl_device
*qdev
,
422 struct qxl_release
*release
,
423 union qxl_release_info
*info
)
425 struct qxl_bo_list
*entry
= list_first_entry(&release
->bos
, struct qxl_bo_list
, tv
.head
);
426 struct qxl_bo
*bo
= to_qxl_bo(entry
->tv
.bo
);
429 ptr
= ((void *)info
) - (release
->release_offset
& ~PAGE_SIZE
);
430 qxl_bo_kunmap_atomic_page(qdev
, bo
, ptr
);
433 void qxl_release_fence_buffer_objects(struct qxl_release
*release
)
435 struct ttm_buffer_object
*bo
;
436 struct ttm_bo_global
*glob
;
437 struct ttm_bo_device
*bdev
;
438 struct ttm_bo_driver
*driver
;
440 struct ttm_validate_buffer
*entry
;
441 struct qxl_device
*qdev
;
443 /* if only one object on the release its the release itself
444 since these objects are pinned no need to reserve */
445 if (list_is_singular(&release
->bos
) || list_empty(&release
->bos
))
448 bo
= list_first_entry(&release
->bos
, struct ttm_validate_buffer
, head
)->bo
;
450 qdev
= container_of(bdev
, struct qxl_device
, mman
.bdev
);
453 * Since we never really allocated a context and we don't want to conflict,
454 * set the highest bits. This will break if we really allow exporting of dma-bufs.
456 dma_fence_init(&release
->base
, &qxl_fence_ops
, &qdev
->release_lock
,
457 release
->id
| 0xf0000000, release
->base
.seqno
);
458 trace_dma_fence_emit(&release
->base
);
460 driver
= bdev
->driver
;
463 spin_lock(&glob
->lru_lock
);
465 list_for_each_entry(entry
, &release
->bos
, head
) {
469 reservation_object_add_shared_fence(bo
->resv
, &release
->base
);
470 ttm_bo_add_to_lru(bo
);
471 reservation_object_unlock(bo
->resv
);
473 spin_unlock(&glob
->lru_lock
);
474 ww_acquire_fini(&release
->ticket
);