2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 /* QXL cmd/ring handling */
28 #include <linux/delay.h>
30 #include <drm/drm_util.h>
33 #include "qxl_object.h"
35 static int qxl_reap_surface_id(struct qxl_device
*qdev
, int max_to_reap
);
38 struct qxl_ring_header header
;
47 wait_queue_head_t
*push_event
;
51 void qxl_ring_free(struct qxl_ring
*ring
)
56 void qxl_ring_init_hdr(struct qxl_ring
*ring
)
58 ring
->ring
->header
.notify_on_prod
= ring
->n_elements
;
62 qxl_ring_create(struct qxl_ring_header
*header
,
67 wait_queue_head_t
*push_event
)
69 struct qxl_ring
*ring
;
71 ring
= kmalloc(sizeof(*ring
), GFP_KERNEL
);
75 ring
->ring
= (struct ring
*)header
;
76 ring
->element_size
= element_size
;
77 ring
->n_elements
= n_elements
;
78 ring
->prod_notify
= prod_notify
;
79 ring
->push_event
= push_event
;
81 qxl_ring_init_hdr(ring
);
82 spin_lock_init(&ring
->lock
);
86 static int qxl_check_header(struct qxl_ring
*ring
)
89 struct qxl_ring_header
*header
= &(ring
->ring
->header
);
92 spin_lock_irqsave(&ring
->lock
, flags
);
93 ret
= header
->prod
- header
->cons
< header
->num_items
;
95 header
->notify_on_cons
= header
->cons
+ 1;
96 spin_unlock_irqrestore(&ring
->lock
, flags
);
100 int qxl_check_idle(struct qxl_ring
*ring
)
103 struct qxl_ring_header
*header
= &(ring
->ring
->header
);
106 spin_lock_irqsave(&ring
->lock
, flags
);
107 ret
= header
->prod
== header
->cons
;
108 spin_unlock_irqrestore(&ring
->lock
, flags
);
112 int qxl_ring_push(struct qxl_ring
*ring
,
113 const void *new_elt
, bool interruptible
)
115 struct qxl_ring_header
*header
= &(ring
->ring
->header
);
120 spin_lock_irqsave(&ring
->lock
, flags
);
121 if (header
->prod
- header
->cons
== header
->num_items
) {
122 header
->notify_on_cons
= header
->cons
+ 1;
124 spin_unlock_irqrestore(&ring
->lock
, flags
);
125 if (!drm_can_sleep()) {
126 while (!qxl_check_header(ring
))
130 ret
= wait_event_interruptible(*ring
->push_event
,
131 qxl_check_header(ring
));
135 wait_event(*ring
->push_event
,
136 qxl_check_header(ring
));
140 spin_lock_irqsave(&ring
->lock
, flags
);
143 idx
= header
->prod
& (ring
->n_elements
- 1);
144 elt
= ring
->ring
->elements
+ idx
* ring
->element_size
;
146 memcpy((void *)elt
, new_elt
, ring
->element_size
);
152 if (header
->prod
== header
->notify_on_prod
)
153 outb(0, ring
->prod_notify
);
155 spin_unlock_irqrestore(&ring
->lock
, flags
);
159 static bool qxl_ring_pop(struct qxl_ring
*ring
,
162 volatile struct qxl_ring_header
*header
= &(ring
->ring
->header
);
163 volatile uint8_t *ring_elt
;
167 spin_lock_irqsave(&ring
->lock
, flags
);
168 if (header
->cons
== header
->prod
) {
169 header
->notify_on_prod
= header
->cons
+ 1;
170 spin_unlock_irqrestore(&ring
->lock
, flags
);
174 idx
= header
->cons
& (ring
->n_elements
- 1);
175 ring_elt
= ring
->ring
->elements
+ idx
* ring
->element_size
;
177 memcpy(element
, (void *)ring_elt
, ring
->element_size
);
181 spin_unlock_irqrestore(&ring
->lock
, flags
);
186 qxl_push_command_ring_release(struct qxl_device
*qdev
, struct qxl_release
*release
,
187 uint32_t type
, bool interruptible
)
189 struct qxl_command cmd
;
192 cmd
.data
= qxl_bo_physical_address(qdev
, release
->release_bo
, release
->release_offset
);
194 return qxl_ring_push(qdev
->command_ring
, &cmd
, interruptible
);
198 qxl_push_cursor_ring_release(struct qxl_device
*qdev
, struct qxl_release
*release
,
199 uint32_t type
, bool interruptible
)
201 struct qxl_command cmd
;
204 cmd
.data
= qxl_bo_physical_address(qdev
, release
->release_bo
, release
->release_offset
);
206 return qxl_ring_push(qdev
->cursor_ring
, &cmd
, interruptible
);
209 bool qxl_queue_garbage_collect(struct qxl_device
*qdev
, bool flush
)
211 if (!qxl_check_idle(qdev
->release_ring
)) {
212 schedule_work(&qdev
->gc_work
);
214 flush_work(&qdev
->gc_work
);
220 int qxl_garbage_collect(struct qxl_device
*qdev
)
222 struct qxl_release
*release
;
223 uint64_t id
, next_id
;
225 union qxl_release_info
*info
;
227 while (qxl_ring_pop(qdev
->release_ring
, &id
)) {
228 DRM_DEBUG_DRIVER("popped %lld\n", id
);
230 release
= qxl_release_from_id_locked(qdev
, id
);
234 info
= qxl_release_map(qdev
, release
);
235 next_id
= info
->next
;
236 qxl_release_unmap(qdev
, release
, info
);
238 DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id
,
241 switch (release
->type
) {
242 case QXL_RELEASE_DRAWABLE
:
243 case QXL_RELEASE_SURFACE_CMD
:
244 case QXL_RELEASE_CURSOR_CMD
:
247 DRM_ERROR("unexpected release type\n");
252 qxl_release_free(qdev
, release
);
257 DRM_DEBUG_DRIVER("%d\n", i
);
262 int qxl_alloc_bo_reserved(struct qxl_device
*qdev
,
263 struct qxl_release
*release
,
270 ret
= qxl_bo_create(qdev
, size
, false /* not kernel - device */,
271 false, QXL_GEM_DOMAIN_VRAM
, NULL
, &bo
);
273 DRM_ERROR("failed to allocate VRAM BO\n");
276 ret
= qxl_release_list_add(release
, bo
);
287 static int wait_for_io_cmd_user(struct qxl_device
*qdev
, uint8_t val
, long port
, bool intr
)
290 long addr
= qdev
->io_base
+ port
;
293 mutex_lock(&qdev
->async_io_mutex
);
294 irq_num
= atomic_read(&qdev
->irq_received_io_cmd
);
295 if (qdev
->last_sent_io_cmd
> irq_num
) {
297 ret
= wait_event_interruptible_timeout(qdev
->io_cmd_event
,
298 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
300 ret
= wait_event_timeout(qdev
->io_cmd_event
,
301 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
302 /* 0 is timeout, just bail the "hw" has gone away */
305 irq_num
= atomic_read(&qdev
->irq_received_io_cmd
);
308 qdev
->last_sent_io_cmd
= irq_num
+ 1;
310 ret
= wait_event_interruptible_timeout(qdev
->io_cmd_event
,
311 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
313 ret
= wait_event_timeout(qdev
->io_cmd_event
,
314 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
318 mutex_unlock(&qdev
->async_io_mutex
);
322 static void wait_for_io_cmd(struct qxl_device
*qdev
, uint8_t val
, long port
)
327 ret
= wait_for_io_cmd_user(qdev
, val
, port
, false);
328 if (ret
== -ERESTARTSYS
)
332 int qxl_io_update_area(struct qxl_device
*qdev
, struct qxl_bo
*surf
,
333 const struct qxl_rect
*area
)
336 uint32_t surface_width
, surface_height
;
339 if (!surf
->hw_surf_alloc
)
340 DRM_ERROR("got io update area with no hw surface\n");
342 if (surf
->is_primary
)
345 surface_id
= surf
->surface_id
;
346 surface_width
= surf
->surf
.width
;
347 surface_height
= surf
->surf
.height
;
349 if (area
->left
< 0 || area
->top
< 0 ||
350 area
->right
> surface_width
|| area
->bottom
> surface_height
)
353 mutex_lock(&qdev
->update_area_mutex
);
354 qdev
->ram_header
->update_area
= *area
;
355 qdev
->ram_header
->update_surface
= surface_id
;
356 ret
= wait_for_io_cmd_user(qdev
, 0, QXL_IO_UPDATE_AREA_ASYNC
, true);
357 mutex_unlock(&qdev
->update_area_mutex
);
361 void qxl_io_notify_oom(struct qxl_device
*qdev
)
363 outb(0, qdev
->io_base
+ QXL_IO_NOTIFY_OOM
);
366 void qxl_io_flush_release(struct qxl_device
*qdev
)
368 outb(0, qdev
->io_base
+ QXL_IO_FLUSH_RELEASE
);
371 void qxl_io_flush_surfaces(struct qxl_device
*qdev
)
373 wait_for_io_cmd(qdev
, 0, QXL_IO_FLUSH_SURFACES_ASYNC
);
376 void qxl_io_destroy_primary(struct qxl_device
*qdev
)
378 wait_for_io_cmd(qdev
, 0, QXL_IO_DESTROY_PRIMARY_ASYNC
);
379 qdev
->primary_bo
->is_primary
= false;
380 drm_gem_object_put(&qdev
->primary_bo
->tbo
.base
);
381 qdev
->primary_bo
= NULL
;
384 void qxl_io_create_primary(struct qxl_device
*qdev
, struct qxl_bo
*bo
)
386 struct qxl_surface_create
*create
;
388 if (WARN_ON(qdev
->primary_bo
))
391 DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev
, qdev
->ram_header
);
392 create
= &qdev
->ram_header
->create_surface
;
393 create
->format
= bo
->surf
.format
;
394 create
->width
= bo
->surf
.width
;
395 create
->height
= bo
->surf
.height
;
396 create
->stride
= bo
->surf
.stride
;
397 create
->mem
= qxl_bo_physical_address(qdev
, bo
, 0);
399 DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create
->mem
, bo
->kptr
);
401 create
->flags
= QXL_SURF_FLAG_KEEP_DATA
;
402 create
->type
= QXL_SURF_TYPE_PRIMARY
;
404 wait_for_io_cmd(qdev
, 0, QXL_IO_CREATE_PRIMARY_ASYNC
);
405 qdev
->primary_bo
= bo
;
406 qdev
->primary_bo
->is_primary
= true;
407 drm_gem_object_get(&qdev
->primary_bo
->tbo
.base
);
410 void qxl_io_memslot_add(struct qxl_device
*qdev
, uint8_t id
)
412 DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id
);
413 wait_for_io_cmd(qdev
, id
, QXL_IO_MEMSLOT_ADD_ASYNC
);
416 void qxl_io_reset(struct qxl_device
*qdev
)
418 outb(0, qdev
->io_base
+ QXL_IO_RESET
);
421 void qxl_io_monitors_config(struct qxl_device
*qdev
)
423 wait_for_io_cmd(qdev
, 0, QXL_IO_MONITORS_CONFIG_ASYNC
);
426 int qxl_surface_id_alloc(struct qxl_device
*qdev
,
433 idr_preload(GFP_ATOMIC
);
434 spin_lock(&qdev
->surf_id_idr_lock
);
435 idr_ret
= idr_alloc(&qdev
->surf_id_idr
, NULL
, 1, 0, GFP_NOWAIT
);
436 spin_unlock(&qdev
->surf_id_idr_lock
);
442 if (handle
>= qdev
->rom
->n_surfaces
) {
444 spin_lock(&qdev
->surf_id_idr_lock
);
445 idr_remove(&qdev
->surf_id_idr
, handle
);
446 spin_unlock(&qdev
->surf_id_idr_lock
);
447 qxl_reap_surface_id(qdev
, 2);
450 surf
->surface_id
= handle
;
452 spin_lock(&qdev
->surf_id_idr_lock
);
453 qdev
->last_alloced_surf_id
= handle
;
454 spin_unlock(&qdev
->surf_id_idr_lock
);
458 void qxl_surface_id_dealloc(struct qxl_device
*qdev
,
461 spin_lock(&qdev
->surf_id_idr_lock
);
462 idr_remove(&qdev
->surf_id_idr
, surface_id
);
463 spin_unlock(&qdev
->surf_id_idr_lock
);
466 int qxl_hw_surface_alloc(struct qxl_device
*qdev
,
469 struct qxl_surface_cmd
*cmd
;
470 struct qxl_release
*release
;
473 if (surf
->hw_surf_alloc
)
476 ret
= qxl_alloc_surface_release_reserved(qdev
, QXL_SURFACE_CMD_CREATE
,
482 ret
= qxl_release_reserve_list(release
, true);
484 qxl_release_free(qdev
, release
);
487 cmd
= (struct qxl_surface_cmd
*)qxl_release_map(qdev
, release
);
488 cmd
->type
= QXL_SURFACE_CMD_CREATE
;
489 cmd
->flags
= QXL_SURF_FLAG_KEEP_DATA
;
490 cmd
->u
.surface_create
.format
= surf
->surf
.format
;
491 cmd
->u
.surface_create
.width
= surf
->surf
.width
;
492 cmd
->u
.surface_create
.height
= surf
->surf
.height
;
493 cmd
->u
.surface_create
.stride
= surf
->surf
.stride
;
494 cmd
->u
.surface_create
.data
= qxl_bo_physical_address(qdev
, surf
, 0);
495 cmd
->surface_id
= surf
->surface_id
;
496 qxl_release_unmap(qdev
, release
, &cmd
->release_info
);
498 surf
->surf_create
= release
;
500 /* no need to add a release to the fence for this surface bo,
501 since it is only released when we ask to destroy the surface
502 and it would never signal otherwise */
503 qxl_release_fence_buffer_objects(release
);
504 qxl_push_command_ring_release(qdev
, release
, QXL_CMD_SURFACE
, false);
506 surf
->hw_surf_alloc
= true;
507 spin_lock(&qdev
->surf_id_idr_lock
);
508 idr_replace(&qdev
->surf_id_idr
, surf
, surf
->surface_id
);
509 spin_unlock(&qdev
->surf_id_idr_lock
);
513 int qxl_hw_surface_dealloc(struct qxl_device
*qdev
,
516 struct qxl_surface_cmd
*cmd
;
517 struct qxl_release
*release
;
521 if (!surf
->hw_surf_alloc
)
524 ret
= qxl_alloc_surface_release_reserved(qdev
, QXL_SURFACE_CMD_DESTROY
,
530 surf
->surf_create
= NULL
;
531 /* remove the surface from the idr, but not the surface id yet */
532 spin_lock(&qdev
->surf_id_idr_lock
);
533 idr_replace(&qdev
->surf_id_idr
, NULL
, surf
->surface_id
);
534 spin_unlock(&qdev
->surf_id_idr_lock
);
535 surf
->hw_surf_alloc
= false;
537 id
= surf
->surface_id
;
538 surf
->surface_id
= 0;
540 release
->surface_release_id
= id
;
541 cmd
= (struct qxl_surface_cmd
*)qxl_release_map(qdev
, release
);
542 cmd
->type
= QXL_SURFACE_CMD_DESTROY
;
543 cmd
->surface_id
= id
;
544 qxl_release_unmap(qdev
, release
, &cmd
->release_info
);
546 qxl_release_fence_buffer_objects(release
);
547 qxl_push_command_ring_release(qdev
, release
, QXL_CMD_SURFACE
, false);
552 static int qxl_update_surface(struct qxl_device
*qdev
, struct qxl_bo
*surf
)
554 struct qxl_rect rect
;
557 /* if we are evicting, we need to make sure the surface is up
560 rect
.right
= surf
->surf
.width
;
562 rect
.bottom
= surf
->surf
.height
;
564 ret
= qxl_io_update_area(qdev
, surf
, &rect
);
565 if (ret
== -ERESTARTSYS
)
570 static void qxl_surface_evict_locked(struct qxl_device
*qdev
, struct qxl_bo
*surf
, bool do_update_area
)
572 /* no need to update area if we are just freeing the surface normally */
574 qxl_update_surface(qdev
, surf
);
576 /* nuke the surface id at the hw */
577 qxl_hw_surface_dealloc(qdev
, surf
);
580 void qxl_surface_evict(struct qxl_device
*qdev
, struct qxl_bo
*surf
, bool do_update_area
)
582 mutex_lock(&qdev
->surf_evict_mutex
);
583 qxl_surface_evict_locked(qdev
, surf
, do_update_area
);
584 mutex_unlock(&qdev
->surf_evict_mutex
);
587 static int qxl_reap_surf(struct qxl_device
*qdev
, struct qxl_bo
*surf
, bool stall
)
591 ret
= qxl_bo_reserve(surf
);
596 mutex_unlock(&qdev
->surf_evict_mutex
);
598 ret
= ttm_bo_wait(&surf
->tbo
, true, !stall
);
601 mutex_lock(&qdev
->surf_evict_mutex
);
603 qxl_bo_unreserve(surf
);
607 qxl_surface_evict_locked(qdev
, surf
, true);
608 qxl_bo_unreserve(surf
);
612 static int qxl_reap_surface_id(struct qxl_device
*qdev
, int max_to_reap
)
619 mutex_lock(&qdev
->surf_evict_mutex
);
622 spin_lock(&qdev
->surf_id_idr_lock
);
623 start
= qdev
->last_alloced_surf_id
+ 1;
624 spin_unlock(&qdev
->surf_id_idr_lock
);
626 for (i
= start
; i
< start
+ qdev
->rom
->n_surfaces
; i
++) {
628 int surfid
= i
% qdev
->rom
->n_surfaces
;
630 /* this avoids the case where the objects is in the
631 idr but has been evicted half way - its makes
632 the idr lookup atomic with the eviction */
633 spin_lock(&qdev
->surf_id_idr_lock
);
634 objptr
= idr_find(&qdev
->surf_id_idr
, surfid
);
635 spin_unlock(&qdev
->surf_id_idr_lock
);
640 ret
= qxl_reap_surf(qdev
, objptr
, stall
);
643 if (num_reaped
>= max_to_reap
)
646 if (num_reaped
== 0 && stall
== false) {
651 mutex_unlock(&qdev
->surf_evict_mutex
);
653 usleep_range(500, 1000);
654 qxl_queue_garbage_collect(qdev
, true);