2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 /* QXL cmd/ring handling */
29 #include "qxl_object.h"
31 static int qxl_reap_surface_id(struct qxl_device
*qdev
, int max_to_reap
);
34 struct qxl_ring_header header
;
43 wait_queue_head_t
*push_event
;
47 void qxl_ring_free(struct qxl_ring
*ring
)
52 void qxl_ring_init_hdr(struct qxl_ring
*ring
)
54 ring
->ring
->header
.notify_on_prod
= ring
->n_elements
;
58 qxl_ring_create(struct qxl_ring_header
*header
,
63 wait_queue_head_t
*push_event
)
65 struct qxl_ring
*ring
;
67 ring
= kmalloc(sizeof(*ring
), GFP_KERNEL
);
71 ring
->ring
= (struct ring
*)header
;
72 ring
->element_size
= element_size
;
73 ring
->n_elements
= n_elements
;
74 ring
->prod_notify
= prod_notify
;
75 ring
->push_event
= push_event
;
77 qxl_ring_init_hdr(ring
);
78 spin_lock_init(&ring
->lock
);
82 static int qxl_check_header(struct qxl_ring
*ring
)
85 struct qxl_ring_header
*header
= &(ring
->ring
->header
);
87 spin_lock_irqsave(&ring
->lock
, flags
);
88 ret
= header
->prod
- header
->cons
< header
->num_items
;
90 header
->notify_on_cons
= header
->cons
+ 1;
91 spin_unlock_irqrestore(&ring
->lock
, flags
);
95 int qxl_check_idle(struct qxl_ring
*ring
)
98 struct qxl_ring_header
*header
= &(ring
->ring
->header
);
100 spin_lock_irqsave(&ring
->lock
, flags
);
101 ret
= header
->prod
== header
->cons
;
102 spin_unlock_irqrestore(&ring
->lock
, flags
);
106 int qxl_ring_push(struct qxl_ring
*ring
,
107 const void *new_elt
, bool interruptible
)
109 struct qxl_ring_header
*header
= &(ring
->ring
->header
);
113 spin_lock_irqsave(&ring
->lock
, flags
);
114 if (header
->prod
- header
->cons
== header
->num_items
) {
115 header
->notify_on_cons
= header
->cons
+ 1;
117 spin_unlock_irqrestore(&ring
->lock
, flags
);
118 if (!drm_can_sleep()) {
119 while (!qxl_check_header(ring
))
123 ret
= wait_event_interruptible(*ring
->push_event
,
124 qxl_check_header(ring
));
128 wait_event(*ring
->push_event
,
129 qxl_check_header(ring
));
133 spin_lock_irqsave(&ring
->lock
, flags
);
136 idx
= header
->prod
& (ring
->n_elements
- 1);
137 elt
= ring
->ring
->elements
+ idx
* ring
->element_size
;
139 memcpy((void *)elt
, new_elt
, ring
->element_size
);
145 if (header
->prod
== header
->notify_on_prod
)
146 outb(0, ring
->prod_notify
);
148 spin_unlock_irqrestore(&ring
->lock
, flags
);
152 static bool qxl_ring_pop(struct qxl_ring
*ring
,
155 volatile struct qxl_ring_header
*header
= &(ring
->ring
->header
);
156 volatile uint8_t *ring_elt
;
159 spin_lock_irqsave(&ring
->lock
, flags
);
160 if (header
->cons
== header
->prod
) {
161 header
->notify_on_prod
= header
->cons
+ 1;
162 spin_unlock_irqrestore(&ring
->lock
, flags
);
166 idx
= header
->cons
& (ring
->n_elements
- 1);
167 ring_elt
= ring
->ring
->elements
+ idx
* ring
->element_size
;
169 memcpy(element
, (void *)ring_elt
, ring
->element_size
);
173 spin_unlock_irqrestore(&ring
->lock
, flags
);
178 qxl_push_command_ring_release(struct qxl_device
*qdev
, struct qxl_release
*release
,
179 uint32_t type
, bool interruptible
)
181 struct qxl_command cmd
;
182 struct qxl_bo_list
*entry
= list_first_entry(&release
->bos
, struct qxl_bo_list
, tv
.head
);
185 cmd
.data
= qxl_bo_physical_address(qdev
, to_qxl_bo(entry
->tv
.bo
), release
->release_offset
);
187 return qxl_ring_push(qdev
->command_ring
, &cmd
, interruptible
);
191 qxl_push_cursor_ring_release(struct qxl_device
*qdev
, struct qxl_release
*release
,
192 uint32_t type
, bool interruptible
)
194 struct qxl_command cmd
;
195 struct qxl_bo_list
*entry
= list_first_entry(&release
->bos
, struct qxl_bo_list
, tv
.head
);
198 cmd
.data
= qxl_bo_physical_address(qdev
, to_qxl_bo(entry
->tv
.bo
), release
->release_offset
);
200 return qxl_ring_push(qdev
->cursor_ring
, &cmd
, interruptible
);
203 bool qxl_queue_garbage_collect(struct qxl_device
*qdev
, bool flush
)
205 if (!qxl_check_idle(qdev
->release_ring
)) {
206 queue_work(qdev
->gc_queue
, &qdev
->gc_work
);
208 flush_work(&qdev
->gc_work
);
214 int qxl_garbage_collect(struct qxl_device
*qdev
)
216 struct qxl_release
*release
;
217 uint64_t id
, next_id
;
219 union qxl_release_info
*info
;
221 while (qxl_ring_pop(qdev
->release_ring
, &id
)) {
222 QXL_INFO(qdev
, "popped %lld\n", id
);
224 release
= qxl_release_from_id_locked(qdev
, id
);
228 info
= qxl_release_map(qdev
, release
);
229 next_id
= info
->next
;
230 qxl_release_unmap(qdev
, release
, info
);
232 QXL_INFO(qdev
, "popped %lld, next %lld\n", id
,
235 switch (release
->type
) {
236 case QXL_RELEASE_DRAWABLE
:
237 case QXL_RELEASE_SURFACE_CMD
:
238 case QXL_RELEASE_CURSOR_CMD
:
241 DRM_ERROR("unexpected release type\n");
246 qxl_release_free(qdev
, release
);
251 QXL_INFO(qdev
, "%s: %lld\n", __func__
, i
);
256 int qxl_alloc_bo_reserved(struct qxl_device
*qdev
,
257 struct qxl_release
*release
,
264 ret
= qxl_bo_create(qdev
, size
, false /* not kernel - device */,
265 false, QXL_GEM_DOMAIN_VRAM
, NULL
, &bo
);
267 DRM_ERROR("failed to allocate VRAM BO\n");
270 ret
= qxl_release_list_add(release
, bo
);
281 static int wait_for_io_cmd_user(struct qxl_device
*qdev
, uint8_t val
, long port
, bool intr
)
284 long addr
= qdev
->io_base
+ port
;
287 mutex_lock(&qdev
->async_io_mutex
);
288 irq_num
= atomic_read(&qdev
->irq_received_io_cmd
);
289 if (qdev
->last_sent_io_cmd
> irq_num
) {
291 ret
= wait_event_interruptible_timeout(qdev
->io_cmd_event
,
292 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
294 ret
= wait_event_timeout(qdev
->io_cmd_event
,
295 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
296 /* 0 is timeout, just bail the "hw" has gone away */
299 irq_num
= atomic_read(&qdev
->irq_received_io_cmd
);
302 qdev
->last_sent_io_cmd
= irq_num
+ 1;
304 ret
= wait_event_interruptible_timeout(qdev
->io_cmd_event
,
305 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
307 ret
= wait_event_timeout(qdev
->io_cmd_event
,
308 atomic_read(&qdev
->irq_received_io_cmd
) > irq_num
, 5*HZ
);
312 mutex_unlock(&qdev
->async_io_mutex
);
316 static void wait_for_io_cmd(struct qxl_device
*qdev
, uint8_t val
, long port
)
321 ret
= wait_for_io_cmd_user(qdev
, val
, port
, false);
322 if (ret
== -ERESTARTSYS
)
326 int qxl_io_update_area(struct qxl_device
*qdev
, struct qxl_bo
*surf
,
327 const struct qxl_rect
*area
)
330 uint32_t surface_width
, surface_height
;
333 if (!surf
->hw_surf_alloc
)
334 DRM_ERROR("got io update area with no hw surface\n");
336 if (surf
->is_primary
)
339 surface_id
= surf
->surface_id
;
340 surface_width
= surf
->surf
.width
;
341 surface_height
= surf
->surf
.height
;
343 if (area
->left
< 0 || area
->top
< 0 ||
344 area
->right
> surface_width
|| area
->bottom
> surface_height
) {
345 qxl_io_log(qdev
, "%s: not doing area update for "
346 "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__
, surface_id
, area
->left
,
347 area
->top
, area
->right
, area
->bottom
, surface_width
, surface_height
);
350 mutex_lock(&qdev
->update_area_mutex
);
351 qdev
->ram_header
->update_area
= *area
;
352 qdev
->ram_header
->update_surface
= surface_id
;
353 ret
= wait_for_io_cmd_user(qdev
, 0, QXL_IO_UPDATE_AREA_ASYNC
, true);
354 mutex_unlock(&qdev
->update_area_mutex
);
358 void qxl_io_notify_oom(struct qxl_device
*qdev
)
360 outb(0, qdev
->io_base
+ QXL_IO_NOTIFY_OOM
);
363 void qxl_io_flush_release(struct qxl_device
*qdev
)
365 outb(0, qdev
->io_base
+ QXL_IO_FLUSH_RELEASE
);
368 void qxl_io_flush_surfaces(struct qxl_device
*qdev
)
370 wait_for_io_cmd(qdev
, 0, QXL_IO_FLUSH_SURFACES_ASYNC
);
374 void qxl_io_destroy_primary(struct qxl_device
*qdev
)
376 wait_for_io_cmd(qdev
, 0, QXL_IO_DESTROY_PRIMARY_ASYNC
);
379 void qxl_io_create_primary(struct qxl_device
*qdev
,
380 unsigned offset
, struct qxl_bo
*bo
)
382 struct qxl_surface_create
*create
;
384 QXL_INFO(qdev
, "%s: qdev %p, ram_header %p\n", __func__
, qdev
,
386 create
= &qdev
->ram_header
->create_surface
;
387 create
->format
= bo
->surf
.format
;
388 create
->width
= bo
->surf
.width
;
389 create
->height
= bo
->surf
.height
;
390 create
->stride
= bo
->surf
.stride
;
391 create
->mem
= qxl_bo_physical_address(qdev
, bo
, offset
);
393 QXL_INFO(qdev
, "%s: mem = %llx, from %p\n", __func__
, create
->mem
,
396 create
->flags
= QXL_SURF_FLAG_KEEP_DATA
;
397 create
->type
= QXL_SURF_TYPE_PRIMARY
;
399 wait_for_io_cmd(qdev
, 0, QXL_IO_CREATE_PRIMARY_ASYNC
);
402 void qxl_io_memslot_add(struct qxl_device
*qdev
, uint8_t id
)
404 QXL_INFO(qdev
, "qxl_memslot_add %d\n", id
);
405 wait_for_io_cmd(qdev
, id
, QXL_IO_MEMSLOT_ADD_ASYNC
);
408 void qxl_io_log(struct qxl_device
*qdev
, const char *fmt
, ...)
413 vsnprintf(qdev
->ram_header
->log_buf
, QXL_LOG_BUF_SIZE
, fmt
, args
);
416 * DO not do a DRM output here - this will call printk, which will
417 * call back into qxl for rendering (qxl_fb)
419 outb(0, qdev
->io_base
+ QXL_IO_LOG
);
422 void qxl_io_reset(struct qxl_device
*qdev
)
424 outb(0, qdev
->io_base
+ QXL_IO_RESET
);
427 void qxl_io_monitors_config(struct qxl_device
*qdev
)
429 qxl_io_log(qdev
, "%s: %d [%dx%d+%d+%d]\n", __func__
,
430 qdev
->monitors_config
?
431 qdev
->monitors_config
->count
: -1,
432 qdev
->monitors_config
&& qdev
->monitors_config
->count
?
433 qdev
->monitors_config
->heads
[0].width
: -1,
434 qdev
->monitors_config
&& qdev
->monitors_config
->count
?
435 qdev
->monitors_config
->heads
[0].height
: -1,
436 qdev
->monitors_config
&& qdev
->monitors_config
->count
?
437 qdev
->monitors_config
->heads
[0].x
: -1,
438 qdev
->monitors_config
&& qdev
->monitors_config
->count
?
439 qdev
->monitors_config
->heads
[0].y
: -1
442 wait_for_io_cmd(qdev
, 0, QXL_IO_MONITORS_CONFIG_ASYNC
);
445 int qxl_surface_id_alloc(struct qxl_device
*qdev
,
452 idr_preload(GFP_ATOMIC
);
453 spin_lock(&qdev
->surf_id_idr_lock
);
454 idr_ret
= idr_alloc(&qdev
->surf_id_idr
, NULL
, 1, 0, GFP_NOWAIT
);
455 spin_unlock(&qdev
->surf_id_idr_lock
);
461 if (handle
>= qdev
->rom
->n_surfaces
) {
463 spin_lock(&qdev
->surf_id_idr_lock
);
464 idr_remove(&qdev
->surf_id_idr
, handle
);
465 spin_unlock(&qdev
->surf_id_idr_lock
);
466 qxl_reap_surface_id(qdev
, 2);
469 surf
->surface_id
= handle
;
471 spin_lock(&qdev
->surf_id_idr_lock
);
472 qdev
->last_alloced_surf_id
= handle
;
473 spin_unlock(&qdev
->surf_id_idr_lock
);
477 void qxl_surface_id_dealloc(struct qxl_device
*qdev
,
480 spin_lock(&qdev
->surf_id_idr_lock
);
481 idr_remove(&qdev
->surf_id_idr
, surface_id
);
482 spin_unlock(&qdev
->surf_id_idr_lock
);
485 int qxl_hw_surface_alloc(struct qxl_device
*qdev
,
487 struct ttm_mem_reg
*new_mem
)
489 struct qxl_surface_cmd
*cmd
;
490 struct qxl_release
*release
;
493 if (surf
->hw_surf_alloc
)
496 ret
= qxl_alloc_surface_release_reserved(qdev
, QXL_SURFACE_CMD_CREATE
,
502 ret
= qxl_release_reserve_list(release
, true);
506 cmd
= (struct qxl_surface_cmd
*)qxl_release_map(qdev
, release
);
507 cmd
->type
= QXL_SURFACE_CMD_CREATE
;
508 cmd
->u
.surface_create
.format
= surf
->surf
.format
;
509 cmd
->u
.surface_create
.width
= surf
->surf
.width
;
510 cmd
->u
.surface_create
.height
= surf
->surf
.height
;
511 cmd
->u
.surface_create
.stride
= surf
->surf
.stride
;
513 int slot_id
= surf
->type
== QXL_GEM_DOMAIN_VRAM
? qdev
->main_mem_slot
: qdev
->surfaces_mem_slot
;
514 struct qxl_memslot
*slot
= &(qdev
->mem_slots
[slot_id
]);
516 /* TODO - need to hold one of the locks to read tbo.offset */
517 cmd
->u
.surface_create
.data
= slot
->high_bits
;
519 cmd
->u
.surface_create
.data
|= (new_mem
->start
<< PAGE_SHIFT
) + surf
->tbo
.bdev
->man
[new_mem
->mem_type
].gpu_offset
;
521 cmd
->u
.surface_create
.data
= qxl_bo_physical_address(qdev
, surf
, 0);
522 cmd
->surface_id
= surf
->surface_id
;
523 qxl_release_unmap(qdev
, release
, &cmd
->release_info
);
525 surf
->surf_create
= release
;
527 /* no need to add a release to the fence for this surface bo,
528 since it is only released when we ask to destroy the surface
529 and it would never signal otherwise */
530 qxl_push_command_ring_release(qdev
, release
, QXL_CMD_SURFACE
, false);
531 qxl_release_fence_buffer_objects(release
);
533 surf
->hw_surf_alloc
= true;
534 spin_lock(&qdev
->surf_id_idr_lock
);
535 idr_replace(&qdev
->surf_id_idr
, surf
, surf
->surface_id
);
536 spin_unlock(&qdev
->surf_id_idr_lock
);
540 int qxl_hw_surface_dealloc(struct qxl_device
*qdev
,
543 struct qxl_surface_cmd
*cmd
;
544 struct qxl_release
*release
;
548 if (!surf
->hw_surf_alloc
)
551 ret
= qxl_alloc_surface_release_reserved(qdev
, QXL_SURFACE_CMD_DESTROY
,
557 surf
->surf_create
= NULL
;
558 /* remove the surface from the idr, but not the surface id yet */
559 spin_lock(&qdev
->surf_id_idr_lock
);
560 idr_replace(&qdev
->surf_id_idr
, NULL
, surf
->surface_id
);
561 spin_unlock(&qdev
->surf_id_idr_lock
);
562 surf
->hw_surf_alloc
= false;
564 id
= surf
->surface_id
;
565 surf
->surface_id
= 0;
567 release
->surface_release_id
= id
;
568 cmd
= (struct qxl_surface_cmd
*)qxl_release_map(qdev
, release
);
569 cmd
->type
= QXL_SURFACE_CMD_DESTROY
;
570 cmd
->surface_id
= id
;
571 qxl_release_unmap(qdev
, release
, &cmd
->release_info
);
573 qxl_push_command_ring_release(qdev
, release
, QXL_CMD_SURFACE
, false);
575 qxl_release_fence_buffer_objects(release
);
580 int qxl_update_surface(struct qxl_device
*qdev
, struct qxl_bo
*surf
)
582 struct qxl_rect rect
;
585 /* if we are evicting, we need to make sure the surface is up
588 rect
.right
= surf
->surf
.width
;
590 rect
.bottom
= surf
->surf
.height
;
592 ret
= qxl_io_update_area(qdev
, surf
, &rect
);
593 if (ret
== -ERESTARTSYS
)
598 static void qxl_surface_evict_locked(struct qxl_device
*qdev
, struct qxl_bo
*surf
, bool do_update_area
)
600 /* no need to update area if we are just freeing the surface normally */
602 qxl_update_surface(qdev
, surf
);
604 /* nuke the surface id at the hw */
605 qxl_hw_surface_dealloc(qdev
, surf
);
608 void qxl_surface_evict(struct qxl_device
*qdev
, struct qxl_bo
*surf
, bool do_update_area
)
610 mutex_lock(&qdev
->surf_evict_mutex
);
611 qxl_surface_evict_locked(qdev
, surf
, do_update_area
);
612 mutex_unlock(&qdev
->surf_evict_mutex
);
615 static int qxl_reap_surf(struct qxl_device
*qdev
, struct qxl_bo
*surf
, bool stall
)
619 ret
= qxl_bo_reserve(surf
, false);
623 if (surf
->fence
.num_active_releases
> 0 && stall
== false) {
624 qxl_bo_unreserve(surf
);
629 mutex_unlock(&qdev
->surf_evict_mutex
);
631 spin_lock(&surf
->tbo
.bdev
->fence_lock
);
632 ret
= ttm_bo_wait(&surf
->tbo
, true, true, !stall
);
633 spin_unlock(&surf
->tbo
.bdev
->fence_lock
);
636 mutex_lock(&qdev
->surf_evict_mutex
);
638 qxl_bo_unreserve(surf
);
642 qxl_surface_evict_locked(qdev
, surf
, true);
643 qxl_bo_unreserve(surf
);
647 static int qxl_reap_surface_id(struct qxl_device
*qdev
, int max_to_reap
)
654 mutex_lock(&qdev
->surf_evict_mutex
);
657 spin_lock(&qdev
->surf_id_idr_lock
);
658 start
= qdev
->last_alloced_surf_id
+ 1;
659 spin_unlock(&qdev
->surf_id_idr_lock
);
661 for (i
= start
; i
< start
+ qdev
->rom
->n_surfaces
; i
++) {
663 int surfid
= i
% qdev
->rom
->n_surfaces
;
665 /* this avoids the case where the objects is in the
666 idr but has been evicted half way - its makes
667 the idr lookup atomic with the eviction */
668 spin_lock(&qdev
->surf_id_idr_lock
);
669 objptr
= idr_find(&qdev
->surf_id_idr
, surfid
);
670 spin_unlock(&qdev
->surf_id_idr_lock
);
675 ret
= qxl_reap_surf(qdev
, objptr
, stall
);
678 if (num_reaped
>= max_to_reap
)
681 if (num_reaped
== 0 && stall
== false) {
686 mutex_unlock(&qdev
->surf_evict_mutex
);
688 usleep_range(500, 1000);
689 qxl_queue_garbage_collect(qdev
, true);