2 * Copyright 2011 Red Hat, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/delay.h>
25 #include <trace/events/dma_fence.h>
28 #include "qxl_object.h"
31 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
32 * into 256 byte chunks for now - gives 16 cmds per page.
34 * use an ida to index into the chunks?
36 /* manage releaseables */
37 /* stack them 16 high for now -drawable object is 191 */
38 #define RELEASE_SIZE 256
39 #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
40 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
41 #define SURFACE_RELEASE_SIZE 128
42 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
44 static const int release_size_per_bo
[] = { RELEASE_SIZE
, SURFACE_RELEASE_SIZE
, RELEASE_SIZE
};
45 static const int releases_per_bo
[] = { RELEASES_PER_BO
, SURFACE_RELEASES_PER_BO
, RELEASES_PER_BO
};
47 static const char *qxl_get_driver_name(struct dma_fence
*fence
)
52 static const char *qxl_get_timeline_name(struct dma_fence
*fence
)
57 static long qxl_fence_wait(struct dma_fence
*fence
, bool intr
,
60 struct qxl_device
*qdev
;
61 struct qxl_release
*release
;
62 int count
= 0, sc
= 0;
63 bool have_drawable_releases
;
64 unsigned long cur
, end
= jiffies
+ timeout
;
66 qdev
= container_of(fence
->lock
, struct qxl_device
, release_lock
);
67 release
= container_of(fence
, struct qxl_release
, base
);
68 have_drawable_releases
= release
->type
== QXL_RELEASE_DRAWABLE
;
73 if (dma_fence_is_signaled(fence
))
76 qxl_io_notify_oom(qdev
);
78 for (count
= 0; count
< 11; count
++) {
79 if (!qxl_queue_garbage_collect(qdev
, true))
82 if (dma_fence_is_signaled(fence
))
86 if (dma_fence_is_signaled(fence
))
89 if (have_drawable_releases
|| sc
< 4) {
92 usleep_range(500, 1000);
94 if (time_after(jiffies
, end
))
97 if (have_drawable_releases
&& sc
> 300) {
98 DMA_FENCE_WARN(fence
, "failed to wait on release %llu "
99 "after spincount %d\n",
100 fence
->context
& ~0xf0000000, sc
);
106 * yeah, original sync_obj_wait gave up after 3 spins when
107 * have_drawable_releases is not set.
112 if (time_after(cur
, end
))
117 static const struct dma_fence_ops qxl_fence_ops
= {
118 .get_driver_name
= qxl_get_driver_name
,
119 .get_timeline_name
= qxl_get_timeline_name
,
120 .wait
= qxl_fence_wait
,
124 qxl_release_alloc(struct qxl_device
*qdev
, int type
,
125 struct qxl_release
**ret
)
127 struct qxl_release
*release
;
129 size_t size
= sizeof(*release
);
131 release
= kmalloc(size
, GFP_KERNEL
);
133 DRM_ERROR("Out of memory\n");
136 release
->base
.ops
= NULL
;
137 release
->type
= type
;
138 release
->release_offset
= 0;
139 release
->surface_release_id
= 0;
140 INIT_LIST_HEAD(&release
->bos
);
142 idr_preload(GFP_KERNEL
);
143 spin_lock(&qdev
->release_idr_lock
);
144 handle
= idr_alloc(&qdev
->release_idr
, release
, 1, 0, GFP_NOWAIT
);
145 release
->base
.seqno
= ++qdev
->release_seqno
;
146 spin_unlock(&qdev
->release_idr_lock
);
154 DRM_DEBUG_DRIVER("allocated release %d\n", handle
);
155 release
->id
= handle
;
160 qxl_release_free_list(struct qxl_release
*release
)
162 while (!list_empty(&release
->bos
)) {
163 struct qxl_bo_list
*entry
;
166 entry
= container_of(release
->bos
.next
,
167 struct qxl_bo_list
, tv
.head
);
168 bo
= to_qxl_bo(entry
->tv
.bo
);
170 list_del(&entry
->tv
.head
);
173 release
->release_bo
= NULL
;
177 qxl_release_free(struct qxl_device
*qdev
,
178 struct qxl_release
*release
)
180 DRM_DEBUG_DRIVER("release %d, type %d\n", release
->id
, release
->type
);
182 if (release
->surface_release_id
)
183 qxl_surface_id_dealloc(qdev
, release
->surface_release_id
);
185 spin_lock(&qdev
->release_idr_lock
);
186 idr_remove(&qdev
->release_idr
, release
->id
);
187 spin_unlock(&qdev
->release_idr_lock
);
189 if (release
->base
.ops
) {
190 WARN_ON(list_empty(&release
->bos
));
191 qxl_release_free_list(release
);
193 dma_fence_signal(&release
->base
);
194 dma_fence_put(&release
->base
);
196 qxl_release_free_list(release
);
201 static int qxl_release_bo_alloc(struct qxl_device
*qdev
,
204 /* pin releases bo's they are too messy to evict */
205 return qxl_bo_create(qdev
, PAGE_SIZE
, false, true,
206 QXL_GEM_DOMAIN_VRAM
, NULL
, bo
);
209 int qxl_release_list_add(struct qxl_release
*release
, struct qxl_bo
*bo
)
211 struct qxl_bo_list
*entry
;
213 list_for_each_entry(entry
, &release
->bos
, tv
.head
) {
214 if (entry
->tv
.bo
== &bo
->tbo
)
218 entry
= kmalloc(sizeof(struct qxl_bo_list
), GFP_KERNEL
);
223 entry
->tv
.bo
= &bo
->tbo
;
224 entry
->tv
.num_shared
= 0;
225 list_add_tail(&entry
->tv
.head
, &release
->bos
);
229 static int qxl_release_validate_bo(struct qxl_bo
*bo
)
231 struct ttm_operation_ctx ctx
= { true, false };
234 if (!bo
->tbo
.pin_count
) {
235 qxl_ttm_placement_from_domain(bo
, bo
->type
);
236 ret
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
241 ret
= dma_resv_reserve_shared(bo
->tbo
.base
.resv
, 1);
245 /* allocate a surface for reserved + validated buffers */
246 ret
= qxl_bo_check_id(to_qxl(bo
->tbo
.base
.dev
), bo
);
252 int qxl_release_reserve_list(struct qxl_release
*release
, bool no_intr
)
255 struct qxl_bo_list
*entry
;
257 /* if only one object on the release its the release itself
258 since these objects are pinned no need to reserve */
259 if (list_is_singular(&release
->bos
))
262 ret
= ttm_eu_reserve_buffers(&release
->ticket
, &release
->bos
,
267 list_for_each_entry(entry
, &release
->bos
, tv
.head
) {
268 struct qxl_bo
*bo
= to_qxl_bo(entry
->tv
.bo
);
270 ret
= qxl_release_validate_bo(bo
);
272 ttm_eu_backoff_reservation(&release
->ticket
, &release
->bos
);
279 void qxl_release_backoff_reserve_list(struct qxl_release
*release
)
281 /* if only one object on the release its the release itself
282 since these objects are pinned no need to reserve */
283 if (list_is_singular(&release
->bos
))
286 ttm_eu_backoff_reservation(&release
->ticket
, &release
->bos
);
289 int qxl_alloc_surface_release_reserved(struct qxl_device
*qdev
,
290 enum qxl_surface_cmd_type surface_cmd_type
,
291 struct qxl_release
*create_rel
,
292 struct qxl_release
**release
)
294 if (surface_cmd_type
== QXL_SURFACE_CMD_DESTROY
&& create_rel
) {
297 union qxl_release_info
*info
;
299 /* stash the release after the create command */
300 idr_ret
= qxl_release_alloc(qdev
, QXL_RELEASE_SURFACE_CMD
, release
);
303 bo
= create_rel
->release_bo
;
305 (*release
)->release_bo
= bo
;
306 (*release
)->release_offset
= create_rel
->release_offset
+ 64;
308 qxl_release_list_add(*release
, bo
);
310 info
= qxl_release_map(qdev
, *release
);
312 qxl_release_unmap(qdev
, *release
, info
);
316 return qxl_alloc_release_reserved(qdev
, sizeof(struct qxl_surface_cmd
),
317 QXL_RELEASE_SURFACE_CMD
, release
, NULL
);
320 int qxl_alloc_release_reserved(struct qxl_device
*qdev
, unsigned long size
,
321 int type
, struct qxl_release
**release
,
327 union qxl_release_info
*info
;
330 if (type
== QXL_RELEASE_DRAWABLE
)
332 else if (type
== QXL_RELEASE_SURFACE_CMD
)
334 else if (type
== QXL_RELEASE_CURSOR_CMD
)
337 DRM_ERROR("got illegal type: %d\n", type
);
341 idr_ret
= qxl_release_alloc(qdev
, type
, release
);
348 mutex_lock(&qdev
->release_mutex
);
349 if (qdev
->current_release_bo_offset
[cur_idx
] + 1 >= releases_per_bo
[cur_idx
]) {
350 qxl_bo_unref(&qdev
->current_release_bo
[cur_idx
]);
351 qdev
->current_release_bo_offset
[cur_idx
] = 0;
352 qdev
->current_release_bo
[cur_idx
] = NULL
;
354 if (!qdev
->current_release_bo
[cur_idx
]) {
355 ret
= qxl_release_bo_alloc(qdev
, &qdev
->current_release_bo
[cur_idx
]);
357 mutex_unlock(&qdev
->release_mutex
);
358 qxl_release_free(qdev
, *release
);
363 bo
= qxl_bo_ref(qdev
->current_release_bo
[cur_idx
]);
365 (*release
)->release_bo
= bo
;
366 (*release
)->release_offset
= qdev
->current_release_bo_offset
[cur_idx
] * release_size_per_bo
[cur_idx
];
367 qdev
->current_release_bo_offset
[cur_idx
]++;
372 mutex_unlock(&qdev
->release_mutex
);
374 ret
= qxl_release_list_add(*release
, bo
);
377 qxl_release_free(qdev
, *release
);
381 info
= qxl_release_map(qdev
, *release
);
383 qxl_release_unmap(qdev
, *release
, info
);
388 struct qxl_release
*qxl_release_from_id_locked(struct qxl_device
*qdev
,
391 struct qxl_release
*release
;
393 spin_lock(&qdev
->release_idr_lock
);
394 release
= idr_find(&qdev
->release_idr
, id
);
395 spin_unlock(&qdev
->release_idr_lock
);
397 DRM_ERROR("failed to find id in release_idr\n");
404 union qxl_release_info
*qxl_release_map(struct qxl_device
*qdev
,
405 struct qxl_release
*release
)
408 union qxl_release_info
*info
;
409 struct qxl_bo
*bo
= release
->release_bo
;
411 ptr
= qxl_bo_kmap_atomic_page(qdev
, bo
, release
->release_offset
& PAGE_MASK
);
414 info
= ptr
+ (release
->release_offset
& ~PAGE_MASK
);
418 void qxl_release_unmap(struct qxl_device
*qdev
,
419 struct qxl_release
*release
,
420 union qxl_release_info
*info
)
422 struct qxl_bo
*bo
= release
->release_bo
;
425 ptr
= ((void *)info
) - (release
->release_offset
& ~PAGE_MASK
);
426 qxl_bo_kunmap_atomic_page(qdev
, bo
, ptr
);
429 void qxl_release_fence_buffer_objects(struct qxl_release
*release
)
431 struct ttm_buffer_object
*bo
;
432 struct ttm_bo_device
*bdev
;
433 struct ttm_validate_buffer
*entry
;
434 struct qxl_device
*qdev
;
436 /* if only one object on the release its the release itself
437 since these objects are pinned no need to reserve */
438 if (list_is_singular(&release
->bos
) || list_empty(&release
->bos
))
441 bo
= list_first_entry(&release
->bos
, struct ttm_validate_buffer
, head
)->bo
;
443 qdev
= container_of(bdev
, struct qxl_device
, mman
.bdev
);
446 * Since we never really allocated a context and we don't want to conflict,
447 * set the highest bits. This will break if we really allow exporting of dma-bufs.
449 dma_fence_init(&release
->base
, &qxl_fence_ops
, &qdev
->release_lock
,
450 release
->id
| 0xf0000000, release
->base
.seqno
);
451 trace_dma_fence_emit(&release
->base
);
453 spin_lock(&ttm_bo_glob
.lru_lock
);
455 list_for_each_entry(entry
, &release
->bos
, head
) {
458 dma_resv_add_shared_fence(bo
->base
.resv
, &release
->base
);
459 ttm_bo_move_to_lru_tail(bo
, NULL
);
460 dma_resv_unlock(bo
->base
.resv
);
462 spin_unlock(&ttm_bo_glob
.lru_lock
);
463 ww_acquire_fini(&release
->ticket
);