2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 /* QXL cmd/ring handling */
28 #include <drm/drm_util.h>
31 #include "qxl_object.h"
33 static int qxl_reap_surface_id(struct qxl_device
*qdev
, int max_to_reap
);
36 struct qxl_ring_header header
;
45 wait_queue_head_t
*push_event
;
49 void qxl_ring_free(struct qxl_ring
*ring
)
54 void qxl_ring_init_hdr(struct qxl_ring
*ring
)
56 ring
->ring
->header
.notify_on_prod
= ring
->n_elements
;
60 qxl_ring_create(struct qxl_ring_header
*header
,
65 wait_queue_head_t
*push_event
)
67 struct qxl_ring
*ring
;
69 ring
= kmalloc(sizeof(*ring
), GFP_KERNEL
);
73 ring
->ring
= (struct ring
*)header
;
74 ring
->element_size
= element_size
;
75 ring
->n_elements
= n_elements
;
76 ring
->prod_notify
= prod_notify
;
77 ring
->push_event
= push_event
;
79 qxl_ring_init_hdr(ring
);
80 spin_lock_init(&ring
->lock
);
84 static int qxl_check_header(struct qxl_ring
*ring
)
87 struct qxl_ring_header
*header
= &(ring
->ring
->header
);
90 spin_lock_irqsave(&ring
->lock
, flags
);
91 ret
= header
->prod
- header
->cons
< header
->num_items
;
93 header
->notify_on_cons
= header
->cons
+ 1;
94 spin_unlock_irqrestore(&ring
->lock
, flags
);
98 int qxl_check_idle(struct qxl_ring
*ring
)
101 struct qxl_ring_header
*header
= &(ring
->ring
->header
);
104 spin_lock_irqsave(&ring
->lock
, flags
);
105 ret
= header
->prod
== header
->cons
;
106 spin_unlock_irqrestore(&ring
->lock
, flags
);
110 int qxl_ring_push(struct qxl_ring
*ring
,
111 const void *new_elt
, bool interruptible
)
113 struct qxl_ring_header
*header
= &(ring
->ring
->header
);
118 spin_lock_irqsave(&ring
->lock
, flags
);
119 if (header
->prod
- header
->cons
== header
->num_items
) {
120 header
->notify_on_cons
= header
->cons
+ 1;
122 spin_unlock_irqrestore(&ring
->lock
, flags
);
123 if (!drm_can_sleep()) {
124 while (!qxl_check_header(ring
))
128 ret
= wait_event_interruptible(*ring
->push_event
,
129 qxl_check_header(ring
));
133 wait_event(*ring
->push_event
,
134 qxl_check_header(ring
));
138 spin_lock_irqsave(&ring
->lock
, flags
);
141 idx
= header
->prod
& (ring
->n_elements
- 1);
142 elt
= ring
->ring
->elements
+ idx
* ring
->element_size
;
144 memcpy((void *)elt
, new_elt
, ring
->element_size
);
150 if (header
->prod
== header
->notify_on_prod
)
151 outb(0, ring
->prod_notify
);
153 spin_unlock_irqrestore(&ring
->lock
, flags
);
157 static bool qxl_ring_pop(struct qxl_ring
*ring
,
160 volatile struct qxl_ring_header
*header
= &(ring
->ring
->header
);
161 volatile uint8_t *ring_elt
;
165 spin_lock_irqsave(&ring
->lock
, flags
);
166 if (header
->cons
== header
->prod
) {
167 header
->notify_on_prod
= header
->cons
+ 1;
168 spin_unlock_irqrestore(&ring
->lock
, flags
);
172 idx
= header
->cons
& (ring
->n_elements
- 1);
173 ring_elt
= ring
->ring
->elements
+ idx
* ring
->element_size
;
175 memcpy(element
, (void *)ring_elt
, ring
->element_size
);
179 spin_unlock_irqrestore(&ring
->lock
, flags
);
184 qxl_push_command_ring_release(struct qxl_device
*qdev
, struct qxl_release
*release
,
185 uint32_t type
, bool interruptible
)
187 struct qxl_command cmd
;
190 cmd
.data
= qxl_bo_physical_address(qdev
, release
->release_bo
, release
->release_offset
);
192 return qxl_ring_push(qdev
->command_ring
, &cmd
, interruptible
);
196 qxl_push_cursor_ring_release(struct qxl_device
*qdev
, struct qxl_release
*release
,
197 uint32_t type
, bool interruptible
)
199 struct qxl_command cmd
;
202 cmd
.data
= qxl_bo_physical_address(qdev
, release
->release_bo
, release
->release_offset
);
204 return qxl_ring_push(qdev
->cursor_ring
, &cmd
, interruptible
);
207 bool qxl_queue_garbage_collect(struct qxl_device
*qdev
, bool flush
)
209 if (!qxl_check_idle(qdev
->release_ring
)) {
210 schedule_work(&qdev
->gc_work
);
212 flush_work(&qdev
->gc_work
);
218 int qxl_garbage_collect(struct qxl_device
*qdev
)
220 struct qxl_release
*release
;
221 uint64_t id
, next_id
;
223 union qxl_release_info
*info
;
225 while (qxl_ring_pop(qdev
->release_ring
, &id
)) {
226 DRM_DEBUG_DRIVER("popped %lld\n", id
);
228 release
= qxl_release_from_id_locked(qdev
, id
);
232 info
= qxl_release_map(qdev
, release
);
233 next_id
= info
->next
;
234 qxl_release_unmap(qdev
, release
, info
);
236 DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id
,
239 switch (release
->type
) {
240 case QXL_RELEASE_DRAWABLE
:
241 case QXL_RELEASE_SURFACE_CMD
:
242 case QXL_RELEASE_CURSOR_CMD
:
245 DRM_ERROR("unexpected release type\n");
250 qxl_release_free(qdev
, release
);
255 DRM_DEBUG_DRIVER("%d\n", i
);
260 int qxl_alloc_bo_reserved(struct qxl_device
*qdev
,
261 struct qxl_release
*release
,
268 ret
= qxl_bo_create(qdev
, size
, false /* not kernel - device */,
269 false, QXL_GEM_DOMAIN_VRAM
, NULL
, &bo
);
271 DRM_ERROR("failed to allocate VRAM BO\n");
274 ret
= qxl_release_list_add(release
, bo
);
285 static int wait_for_io_cmd_user(struct qxl_device
*qdev
, uint8_t val
, long port
, bool intr
)
288 long addr
= qdev
->io_base
+ port
;
291 mutex_lock(&qdev
->async_io_mutex
);
292 irq_num
= atomic_read(&qdev
->irq_received_io_cmd
);
293 if (qdev
->last_sent_io_cmd
> irq_num
) {
295 ret
= wait_event_interruptible_timeout(qdev
->io_cmd_event
,
296 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
298 ret
= wait_event_timeout(qdev
->io_cmd_event
,
299 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
300 /* 0 is timeout, just bail the "hw" has gone away */
303 irq_num
= atomic_read(&qdev
->irq_received_io_cmd
);
306 qdev
->last_sent_io_cmd
= irq_num
+ 1;
308 ret
= wait_event_interruptible_timeout(qdev
->io_cmd_event
,
309 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
311 ret
= wait_event_timeout(qdev
->io_cmd_event
,
312 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
316 mutex_unlock(&qdev
->async_io_mutex
);
320 static void wait_for_io_cmd(struct qxl_device
*qdev
, uint8_t val
, long port
)
325 ret
= wait_for_io_cmd_user(qdev
, val
, port
, false);
326 if (ret
== -ERESTARTSYS
)
330 int qxl_io_update_area(struct qxl_device
*qdev
, struct qxl_bo
*surf
,
331 const struct qxl_rect
*area
)
334 uint32_t surface_width
, surface_height
;
337 if (!surf
->hw_surf_alloc
)
338 DRM_ERROR("got io update area with no hw surface\n");
340 if (surf
->is_primary
)
343 surface_id
= surf
->surface_id
;
344 surface_width
= surf
->surf
.width
;
345 surface_height
= surf
->surf
.height
;
347 if (area
->left
< 0 || area
->top
< 0 ||
348 area
->right
> surface_width
|| area
->bottom
> surface_height
)
351 mutex_lock(&qdev
->update_area_mutex
);
352 qdev
->ram_header
->update_area
= *area
;
353 qdev
->ram_header
->update_surface
= surface_id
;
354 ret
= wait_for_io_cmd_user(qdev
, 0, QXL_IO_UPDATE_AREA_ASYNC
, true);
355 mutex_unlock(&qdev
->update_area_mutex
);
359 void qxl_io_notify_oom(struct qxl_device
*qdev
)
361 outb(0, qdev
->io_base
+ QXL_IO_NOTIFY_OOM
);
364 void qxl_io_flush_release(struct qxl_device
*qdev
)
366 outb(0, qdev
->io_base
+ QXL_IO_FLUSH_RELEASE
);
369 void qxl_io_flush_surfaces(struct qxl_device
*qdev
)
371 wait_for_io_cmd(qdev
, 0, QXL_IO_FLUSH_SURFACES_ASYNC
);
374 void qxl_io_destroy_primary(struct qxl_device
*qdev
)
376 wait_for_io_cmd(qdev
, 0, QXL_IO_DESTROY_PRIMARY_ASYNC
);
377 qdev
->primary_bo
->is_primary
= false;
378 drm_gem_object_put_unlocked(&qdev
->primary_bo
->gem_base
);
379 qdev
->primary_bo
= NULL
;
382 void qxl_io_create_primary(struct qxl_device
*qdev
, struct qxl_bo
*bo
)
384 struct qxl_surface_create
*create
;
386 if (WARN_ON(qdev
->primary_bo
))
389 DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev
, qdev
->ram_header
);
390 create
= &qdev
->ram_header
->create_surface
;
391 create
->format
= bo
->surf
.format
;
392 create
->width
= bo
->surf
.width
;
393 create
->height
= bo
->surf
.height
;
394 create
->stride
= bo
->surf
.stride
;
395 create
->mem
= qxl_bo_physical_address(qdev
, bo
, 0);
397 DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create
->mem
, bo
->kptr
);
399 create
->flags
= QXL_SURF_FLAG_KEEP_DATA
;
400 create
->type
= QXL_SURF_TYPE_PRIMARY
;
402 wait_for_io_cmd(qdev
, 0, QXL_IO_CREATE_PRIMARY_ASYNC
);
403 qdev
->primary_bo
= bo
;
404 qdev
->primary_bo
->is_primary
= true;
405 drm_gem_object_get(&qdev
->primary_bo
->gem_base
);
408 void qxl_io_memslot_add(struct qxl_device
*qdev
, uint8_t id
)
410 DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id
);
411 wait_for_io_cmd(qdev
, id
, QXL_IO_MEMSLOT_ADD_ASYNC
);
414 void qxl_io_reset(struct qxl_device
*qdev
)
416 outb(0, qdev
->io_base
+ QXL_IO_RESET
);
419 void qxl_io_monitors_config(struct qxl_device
*qdev
)
421 wait_for_io_cmd(qdev
, 0, QXL_IO_MONITORS_CONFIG_ASYNC
);
424 int qxl_surface_id_alloc(struct qxl_device
*qdev
,
431 idr_preload(GFP_ATOMIC
);
432 spin_lock(&qdev
->surf_id_idr_lock
);
433 idr_ret
= idr_alloc(&qdev
->surf_id_idr
, NULL
, 1, 0, GFP_NOWAIT
);
434 spin_unlock(&qdev
->surf_id_idr_lock
);
440 if (handle
>= qdev
->rom
->n_surfaces
) {
442 spin_lock(&qdev
->surf_id_idr_lock
);
443 idr_remove(&qdev
->surf_id_idr
, handle
);
444 spin_unlock(&qdev
->surf_id_idr_lock
);
445 qxl_reap_surface_id(qdev
, 2);
448 surf
->surface_id
= handle
;
450 spin_lock(&qdev
->surf_id_idr_lock
);
451 qdev
->last_alloced_surf_id
= handle
;
452 spin_unlock(&qdev
->surf_id_idr_lock
);
456 void qxl_surface_id_dealloc(struct qxl_device
*qdev
,
459 spin_lock(&qdev
->surf_id_idr_lock
);
460 idr_remove(&qdev
->surf_id_idr
, surface_id
);
461 spin_unlock(&qdev
->surf_id_idr_lock
);
464 int qxl_hw_surface_alloc(struct qxl_device
*qdev
,
467 struct qxl_surface_cmd
*cmd
;
468 struct qxl_release
*release
;
471 if (surf
->hw_surf_alloc
)
474 ret
= qxl_alloc_surface_release_reserved(qdev
, QXL_SURFACE_CMD_CREATE
,
480 ret
= qxl_release_reserve_list(release
, true);
484 cmd
= (struct qxl_surface_cmd
*)qxl_release_map(qdev
, release
);
485 cmd
->type
= QXL_SURFACE_CMD_CREATE
;
486 cmd
->flags
= QXL_SURF_FLAG_KEEP_DATA
;
487 cmd
->u
.surface_create
.format
= surf
->surf
.format
;
488 cmd
->u
.surface_create
.width
= surf
->surf
.width
;
489 cmd
->u
.surface_create
.height
= surf
->surf
.height
;
490 cmd
->u
.surface_create
.stride
= surf
->surf
.stride
;
491 cmd
->u
.surface_create
.data
= qxl_bo_physical_address(qdev
, surf
, 0);
492 cmd
->surface_id
= surf
->surface_id
;
493 qxl_release_unmap(qdev
, release
, &cmd
->release_info
);
495 surf
->surf_create
= release
;
497 /* no need to add a release to the fence for this surface bo,
498 since it is only released when we ask to destroy the surface
499 and it would never signal otherwise */
500 qxl_push_command_ring_release(qdev
, release
, QXL_CMD_SURFACE
, false);
501 qxl_release_fence_buffer_objects(release
);
503 surf
->hw_surf_alloc
= true;
504 spin_lock(&qdev
->surf_id_idr_lock
);
505 idr_replace(&qdev
->surf_id_idr
, surf
, surf
->surface_id
);
506 spin_unlock(&qdev
->surf_id_idr_lock
);
510 int qxl_hw_surface_dealloc(struct qxl_device
*qdev
,
513 struct qxl_surface_cmd
*cmd
;
514 struct qxl_release
*release
;
518 if (!surf
->hw_surf_alloc
)
521 ret
= qxl_alloc_surface_release_reserved(qdev
, QXL_SURFACE_CMD_DESTROY
,
527 surf
->surf_create
= NULL
;
528 /* remove the surface from the idr, but not the surface id yet */
529 spin_lock(&qdev
->surf_id_idr_lock
);
530 idr_replace(&qdev
->surf_id_idr
, NULL
, surf
->surface_id
);
531 spin_unlock(&qdev
->surf_id_idr_lock
);
532 surf
->hw_surf_alloc
= false;
534 id
= surf
->surface_id
;
535 surf
->surface_id
= 0;
537 release
->surface_release_id
= id
;
538 cmd
= (struct qxl_surface_cmd
*)qxl_release_map(qdev
, release
);
539 cmd
->type
= QXL_SURFACE_CMD_DESTROY
;
540 cmd
->surface_id
= id
;
541 qxl_release_unmap(qdev
, release
, &cmd
->release_info
);
543 qxl_push_command_ring_release(qdev
, release
, QXL_CMD_SURFACE
, false);
545 qxl_release_fence_buffer_objects(release
);
550 static int qxl_update_surface(struct qxl_device
*qdev
, struct qxl_bo
*surf
)
552 struct qxl_rect rect
;
555 /* if we are evicting, we need to make sure the surface is up
558 rect
.right
= surf
->surf
.width
;
560 rect
.bottom
= surf
->surf
.height
;
562 ret
= qxl_io_update_area(qdev
, surf
, &rect
);
563 if (ret
== -ERESTARTSYS
)
568 static void qxl_surface_evict_locked(struct qxl_device
*qdev
, struct qxl_bo
*surf
, bool do_update_area
)
570 /* no need to update area if we are just freeing the surface normally */
572 qxl_update_surface(qdev
, surf
);
574 /* nuke the surface id at the hw */
575 qxl_hw_surface_dealloc(qdev
, surf
);
578 void qxl_surface_evict(struct qxl_device
*qdev
, struct qxl_bo
*surf
, bool do_update_area
)
580 mutex_lock(&qdev
->surf_evict_mutex
);
581 qxl_surface_evict_locked(qdev
, surf
, do_update_area
);
582 mutex_unlock(&qdev
->surf_evict_mutex
);
585 static int qxl_reap_surf(struct qxl_device
*qdev
, struct qxl_bo
*surf
, bool stall
)
589 ret
= qxl_bo_reserve(surf
, false);
594 mutex_unlock(&qdev
->surf_evict_mutex
);
596 ret
= ttm_bo_wait(&surf
->tbo
, true, !stall
);
599 mutex_lock(&qdev
->surf_evict_mutex
);
601 qxl_bo_unreserve(surf
);
605 qxl_surface_evict_locked(qdev
, surf
, true);
606 qxl_bo_unreserve(surf
);
610 static int qxl_reap_surface_id(struct qxl_device
*qdev
, int max_to_reap
)
617 mutex_lock(&qdev
->surf_evict_mutex
);
620 spin_lock(&qdev
->surf_id_idr_lock
);
621 start
= qdev
->last_alloced_surf_id
+ 1;
622 spin_unlock(&qdev
->surf_id_idr_lock
);
624 for (i
= start
; i
< start
+ qdev
->rom
->n_surfaces
; i
++) {
626 int surfid
= i
% qdev
->rom
->n_surfaces
;
628 /* this avoids the case where the objects is in the
629 idr but has been evicted half way - its makes
630 the idr lookup atomic with the eviction */
631 spin_lock(&qdev
->surf_id_idr_lock
);
632 objptr
= idr_find(&qdev
->surf_id_idr
, surfid
);
633 spin_unlock(&qdev
->surf_id_idr_lock
);
638 ret
= qxl_reap_surf(qdev
, objptr
, stall
);
641 if (num_reaped
>= max_to_reap
)
644 if (num_reaped
== 0 && stall
== false) {
649 mutex_unlock(&qdev
->surf_evict_mutex
);
651 usleep_range(500, 1000);
652 qxl_queue_garbage_collect(qdev
, true);