1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 /* Originally a fake version of the buffer manager so that we can
29 * prototype the changes in a driver fairly quickly, has been fleshed
30 * out to a fully functional interim solution.
32 * Basically wraps the old style memory management in the new
33 * programming interface, but is more expressive and avoids many of
34 * the bugs in the old texture manager.
44 #include "intel_bufmgr.h"
45 #include "intel_bufmgr_priv.h"
49 #include "libdrm_macros.h"
50 #include "libdrm_lists.h"
52 #define DBG(...) do { \
53 if (bufmgr_fake->bufmgr.debug) \
54 drmMsg(__VA_ARGS__); \
59 #define BM_NO_BACKING_STORE 0x00000001
60 #define BM_NO_FENCE_SUBDATA 0x00000002
61 #define BM_PINNED 0x00000004
63 /* Wrapper around mm.c's mem_block, which understands that you must
64 * wait for fences to expire before memory can be freed. This is
65 * specific to our use of memcpy for uploads - an upload that was
66 * processed through the command queue wouldn't need to care about
69 #define MAX_RELOCS 4096
71 struct fake_buffer_reloc
{
72 /** Buffer object that the relocation points at. */
73 drm_intel_bo
*target_buf
;
74 /** Offset of the relocation entry within reloc_buf. */
77 * Cached value of the offset when we last performed this relocation.
79 uint32_t last_target_offset
;
80 /** Value added to target_buf's offset to get the relocation entry. */
82 /** Cache domains the target buffer is read into. */
83 uint32_t read_domains
;
84 /** Cache domain the target buffer will have dirty cachelines in. */
85 uint32_t write_domain
;
89 struct block
*next
, *prev
;
90 struct mem_block
*mem
; /* BM_MEM_AGP */
93 * Marks that the block is currently in the aperture and has yet to be
96 unsigned on_hardware
:1;
98 * Marks that the block is currently fenced (being used by rendering)
99 * and can't be freed until @fence is passed.
103 /** Fence cookie for the block. */
104 unsigned fence
; /* Split to read_fence, write_fence */
110 typedef struct _bufmgr_fake
{
111 drm_intel_bufmgr bufmgr
;
113 pthread_mutex_t lock
;
115 unsigned long low_offset
;
119 struct mem_block
*heap
;
121 unsigned buf_nr
; /* for generating ids */
124 * List of blocks which are currently in the GART but haven't been
127 struct block on_hardware
;
129 * List of blocks which are in the GART and have an active fence on
134 * List of blocks which have an expired fence and are ready to be
139 unsigned int last_fence
;
142 unsigned need_fence
:1;
146 * Driver callback to emit a fence, returning the cookie.
148 * This allows the driver to hook in a replacement for the DRM usage in
151 * Currently, this also requires that a write flush be emitted before
152 * emitting the fence, but this should change.
154 unsigned int (*fence_emit
) (void *private);
155 /** Driver callback to wait for a fence cookie to have passed. */
156 void (*fence_wait
) (unsigned int fence
, void *private);
160 * Driver callback to execute a buffer.
162 * This allows the driver to hook in a replacement for the DRM usage in
165 int (*exec
) (drm_intel_bo
*bo
, unsigned int used
, void *priv
);
168 /** Driver-supplied argument to driver callbacks */
171 * Pointer to kernel-updated sarea data for the last completed user irq
173 volatile int *last_dispatch
;
179 int performed_rendering
;
180 } drm_intel_bufmgr_fake
;
182 typedef struct _drm_intel_bo_fake
{
185 unsigned id
; /* debug only */
190 * has the card written to this buffer - we make need to copy it back
192 unsigned card_dirty
:1;
193 unsigned int refcount
;
194 /* Flags may consist of any of the DRM_BO flags, plus
195 * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the
196 * first two driver private flags.
199 /** Cache domains the target buffer is read into. */
200 uint32_t read_domains
;
201 /** Cache domain the target buffer will have dirty cachelines in. */
202 uint32_t write_domain
;
204 unsigned int alignment
;
205 int is_static
, validated
;
206 unsigned int map_count
;
208 /** relocation list */
209 struct fake_buffer_reloc
*relocs
;
212 * Total size of the target_bos of this buffer.
214 * Used for estimation in check_aperture.
216 unsigned int child_size
;
220 void (*invalidate_cb
) (drm_intel_bo
*bo
, void *ptr
);
221 void *invalidate_ptr
;
224 static int clear_fenced(drm_intel_bufmgr_fake
*bufmgr_fake
,
225 unsigned int fence_cookie
);
227 #define MAXFENCE 0x7fffffff
230 FENCE_LTE(unsigned a
, unsigned b
)
235 if (a
< b
&& b
- a
< (1 << 24))
238 if (a
> b
&& MAXFENCE
- a
+ b
< (1 << 24))
245 drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr
*bufmgr
,
246 unsigned int (*emit
) (void *priv
),
247 void (*wait
) (unsigned int fence
,
251 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*) bufmgr
;
253 bufmgr_fake
->fence_emit
= emit
;
254 bufmgr_fake
->fence_wait
= wait
;
255 bufmgr_fake
->fence_priv
= priv
;
259 _fence_emit_internal(drm_intel_bufmgr_fake
*bufmgr_fake
)
261 struct drm_i915_irq_emit ie
;
264 if (bufmgr_fake
->fence_emit
!= NULL
) {
265 seq
= bufmgr_fake
->fence_emit(bufmgr_fake
->fence_priv
);
270 ret
= drmCommandWriteRead(bufmgr_fake
->fd
, DRM_I915_IRQ_EMIT
,
273 drmMsg("%s: drm_i915_irq_emit: %d\n", __func__
, ret
);
277 DBG("emit 0x%08x\n", seq
);
282 _fence_wait_internal(drm_intel_bufmgr_fake
*bufmgr_fake
, int seq
)
284 struct drm_i915_irq_wait iw
;
285 int hw_seq
, busy_count
= 0;
289 if (bufmgr_fake
->fence_wait
!= NULL
) {
290 bufmgr_fake
->fence_wait(seq
, bufmgr_fake
->fence_priv
);
291 clear_fenced(bufmgr_fake
, seq
);
297 DBG("wait 0x%08x\n", iw
.irq_seq
);
299 /* The kernel IRQ_WAIT implementation is all sorts of broken.
300 * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit
302 * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
304 * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
306 * 4) It returns -EBUSY in 3 seconds even if the hardware is still
307 * successfully chewing through buffers.
309 * Assume that in userland we treat sequence numbers as ints, which
310 * makes some of the comparisons convenient, since the sequence
311 * numbers are all positive signed integers.
313 * From this we get several cases we need to handle. Here's a timeline.
314 * 0x2 0x7 0x7ffffff8 0x7ffffffd
316 * ------------------------------------------------------------
318 * A) Normal wait for hw to catch up
321 * ------------------------------------------------------------
322 * seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to
325 * B) Normal wait for a sequence number that's already passed.
328 * ------------------------------------------------------------
329 * seq - hw_seq = -5. If we call IRQ_WAIT, it returns 0 quickly.
331 * C) Hardware has already wrapped around ahead of us
334 * ------------------------------------------------------------
335 * seq - hw_seq = 0x80000000 - 5. If we called IRQ_WAIT, it would wait
336 * for hw_seq >= seq, which may never occur. Thus, we want to catch
337 * this in userland and return 0.
339 * D) We've wrapped around ahead of the hardware.
342 * ------------------------------------------------------------
343 * seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would
344 * return 0 quickly because hw_seq >= seq, even though the hardware
345 * isn't caught up. Thus, we need to catch this early return in
346 * userland and bother the kernel until the hardware really does
349 * E) Hardware might wrap after we test in userland.
352 * ------------------------------------------------------------
353 * seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >=
354 * hw_seq and wait. However, suppose hw_seq wraps before we make it
355 * into the kernel. The kernel sees hw_seq >= seq and waits for 3
356 * seconds then returns -EBUSY. This is case C). We should catch
357 * this and then return successfully.
359 * F) Hardware might take a long time on a buffer.
362 * -------------------------------------------------------------------
363 * seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5
364 * take too long, it will return -EBUSY. Batchbuffers in the
365 * gltestperf demo were seen to take up to 7 seconds. We should
366 * catch early -EBUSY return and keep trying.
370 /* Keep a copy of last_dispatch so that if the wait -EBUSYs
371 * because the hardware didn't catch up in 3 seconds, we can
372 * see if it at least made progress and retry.
374 hw_seq
= *bufmgr_fake
->last_dispatch
;
377 if (seq
- hw_seq
> 0x40000000)
380 ret
= drmCommandWrite(bufmgr_fake
->fd
, DRM_I915_IRQ_WAIT
,
383 kernel_lied
= (ret
== 0) && (seq
- *bufmgr_fake
->last_dispatch
<
388 && (seq
- *bufmgr_fake
->last_dispatch
> 0x40000000))
391 /* Catch case F: Allow up to 15 seconds chewing on one buffer. */
392 if ((ret
== -EBUSY
) && (hw_seq
!= *bufmgr_fake
->last_dispatch
))
396 } while (kernel_lied
|| ret
== -EAGAIN
|| ret
== -EINTR
||
397 (ret
== -EBUSY
&& busy_count
< 5));
400 drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__
,
401 __LINE__
, strerror(-ret
));
404 clear_fenced(bufmgr_fake
, seq
);
408 _fence_test(drm_intel_bufmgr_fake
*bufmgr_fake
, unsigned fence
)
410 /* Slight problem with wrap-around:
412 return fence
== 0 || FENCE_LTE(fence
, bufmgr_fake
->last_fence
);
416 * Allocate a memory manager block for the buffer.
419 alloc_block(drm_intel_bo
*bo
)
421 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
422 drm_intel_bufmgr_fake
*bufmgr_fake
=
423 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
424 struct block
*block
= (struct block
*)calloc(sizeof *block
, 1);
425 unsigned int align_log2
= ffs(bo_fake
->alignment
) - 1;
431 sz
= (bo
->size
+ bo_fake
->alignment
- 1) & ~(bo_fake
->alignment
- 1);
433 block
->mem
= mmAllocMem(bufmgr_fake
->heap
, sz
, align_log2
, 0);
439 DRMINITLISTHEAD(block
);
441 /* Insert at head or at tail??? */
442 DRMLISTADDTAIL(block
, &bufmgr_fake
->lru
);
444 block
->virtual = (uint8_t *) bufmgr_fake
->virtual +
445 block
->mem
->ofs
- bufmgr_fake
->low_offset
;
448 bo_fake
->block
= block
;
453 /* Release the card storage associated with buf:
456 free_block(drm_intel_bufmgr_fake
*bufmgr_fake
, struct block
*block
,
459 drm_intel_bo_fake
*bo_fake
;
460 DBG("free block %p %08x %d %d\n", block
, block
->mem
->ofs
,
461 block
->on_hardware
, block
->fenced
);
466 bo_fake
= (drm_intel_bo_fake
*) block
->bo
;
468 if (bo_fake
->flags
& (BM_PINNED
| BM_NO_BACKING_STORE
))
471 if (!skip_dirty_copy
&& (bo_fake
->card_dirty
== 1)) {
472 memcpy(bo_fake
->backing_store
, block
->virtual, block
->bo
->size
);
473 bo_fake
->card_dirty
= 0;
477 if (block
->on_hardware
) {
479 } else if (block
->fenced
) {
482 DBG(" - free immediately\n");
485 mmFreeMem(block
->mem
);
491 alloc_backing_store(drm_intel_bo
*bo
)
493 drm_intel_bufmgr_fake
*bufmgr_fake
=
494 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
495 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
496 assert(!bo_fake
->backing_store
);
497 assert(!(bo_fake
->flags
& (BM_PINNED
| BM_NO_BACKING_STORE
)));
499 bo_fake
->backing_store
= malloc(bo
->size
);
501 DBG("alloc_backing - buf %d %p %lu\n", bo_fake
->id
,
502 bo_fake
->backing_store
, bo
->size
);
503 assert(bo_fake
->backing_store
);
507 free_backing_store(drm_intel_bo
*bo
)
509 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
511 if (bo_fake
->backing_store
) {
512 assert(!(bo_fake
->flags
& (BM_PINNED
| BM_NO_BACKING_STORE
)));
513 free(bo_fake
->backing_store
);
514 bo_fake
->backing_store
= NULL
;
519 set_dirty(drm_intel_bo
*bo
)
521 drm_intel_bufmgr_fake
*bufmgr_fake
=
522 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
523 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
525 if (bo_fake
->flags
& BM_NO_BACKING_STORE
526 && bo_fake
->invalidate_cb
!= NULL
)
527 bo_fake
->invalidate_cb(bo
, bo_fake
->invalidate_ptr
);
529 assert(!(bo_fake
->flags
& BM_PINNED
));
531 DBG("set_dirty - buf %d\n", bo_fake
->id
);
536 evict_lru(drm_intel_bufmgr_fake
*bufmgr_fake
, unsigned int max_fence
)
538 struct block
*block
, *tmp
;
540 DBG("%s\n", __func__
);
542 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->lru
) {
543 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) block
->bo
;
545 if (bo_fake
!= NULL
&& (bo_fake
->flags
& BM_NO_FENCE_SUBDATA
))
548 if (block
->fence
&& max_fence
&& !FENCE_LTE(block
->fence
,
552 set_dirty(&bo_fake
->bo
);
553 bo_fake
->block
= NULL
;
555 free_block(bufmgr_fake
, block
, 0);
563 evict_mru(drm_intel_bufmgr_fake
*bufmgr_fake
)
565 struct block
*block
, *tmp
;
567 DBG("%s\n", __func__
);
569 DRMLISTFOREACHSAFEREVERSE(block
, tmp
, &bufmgr_fake
->lru
) {
570 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) block
->bo
;
572 if (bo_fake
&& (bo_fake
->flags
& BM_NO_FENCE_SUBDATA
))
575 set_dirty(&bo_fake
->bo
);
576 bo_fake
->block
= NULL
;
578 free_block(bufmgr_fake
, block
, 0);
586 * Removes all objects from the fenced list older than the given fence.
589 clear_fenced(drm_intel_bufmgr_fake
*bufmgr_fake
, unsigned int fence_cookie
)
591 struct block
*block
, *tmp
;
594 bufmgr_fake
->last_fence
= fence_cookie
;
595 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->fenced
) {
596 assert(block
->fenced
);
598 if (_fence_test(bufmgr_fake
, block
->fence
)) {
603 DBG("delayed free: offset %x sz %x\n",
604 block
->mem
->ofs
, block
->mem
->size
);
606 mmFreeMem(block
->mem
);
609 DBG("return to lru: offset %x sz %x\n",
610 block
->mem
->ofs
, block
->mem
->size
);
612 DRMLISTADDTAIL(block
, &bufmgr_fake
->lru
);
617 /* Blocks are ordered by fence, so if one fails, all
618 * from here will fail also:
620 DBG("fence not passed: offset %x sz %x %d %d \n",
621 block
->mem
->ofs
, block
->mem
->size
, block
->fence
,
622 bufmgr_fake
->last_fence
);
627 DBG("%s: %d\n", __func__
, ret
);
632 fence_blocks(drm_intel_bufmgr_fake
*bufmgr_fake
, unsigned fence
)
634 struct block
*block
, *tmp
;
636 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->on_hardware
) {
637 DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n",
638 block
, block
->mem
->size
, block
->mem
->ofs
, block
->bo
, fence
);
639 block
->fence
= fence
;
641 block
->on_hardware
= 0;
644 /* Move to tail of pending list here
647 DRMLISTADDTAIL(block
, &bufmgr_fake
->fenced
);
650 assert(DRMLISTEMPTY(&bufmgr_fake
->on_hardware
));
654 evict_and_alloc_block(drm_intel_bo
*bo
)
656 drm_intel_bufmgr_fake
*bufmgr_fake
=
657 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
658 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
660 assert(bo_fake
->block
== NULL
);
662 /* Search for already free memory:
667 /* If we're not thrashing, allow lru eviction to dig deeper into
668 * recently used textures. We'll probably be thrashing soon:
670 if (!bufmgr_fake
->thrashing
) {
671 while (evict_lru(bufmgr_fake
, 0))
676 /* Keep thrashing counter alive?
678 if (bufmgr_fake
->thrashing
)
679 bufmgr_fake
->thrashing
= 20;
681 /* Wait on any already pending fences - here we are waiting for any
682 * freed memory that has been submitted to hardware and fenced to
685 while (!DRMLISTEMPTY(&bufmgr_fake
->fenced
)) {
686 uint32_t fence
= bufmgr_fake
->fenced
.next
->fence
;
687 _fence_wait_internal(bufmgr_fake
, fence
);
693 if (!DRMLISTEMPTY(&bufmgr_fake
->on_hardware
)) {
694 while (!DRMLISTEMPTY(&bufmgr_fake
->fenced
)) {
695 uint32_t fence
= bufmgr_fake
->fenced
.next
->fence
;
696 _fence_wait_internal(bufmgr_fake
, fence
);
699 if (!bufmgr_fake
->thrashing
) {
702 bufmgr_fake
->thrashing
= 20;
708 while (evict_mru(bufmgr_fake
))
712 DBG("%s 0x%lx bytes failed\n", __func__
, bo
->size
);
717 /***********************************************************************
722 * Wait for hardware idle by emitting a fence and waiting for it.
725 drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake
*bufmgr_fake
)
729 cookie
= _fence_emit_internal(bufmgr_fake
);
730 _fence_wait_internal(bufmgr_fake
, cookie
);
734 * Wait for rendering to a buffer to complete.
736 * It is assumed that the batchbuffer which performed the rendering included
737 * the necessary flushing.
740 drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo
*bo
)
742 drm_intel_bufmgr_fake
*bufmgr_fake
=
743 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
744 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
746 if (bo_fake
->block
== NULL
|| !bo_fake
->block
->fenced
)
749 _fence_wait_internal(bufmgr_fake
, bo_fake
->block
->fence
);
753 drm_intel_fake_bo_wait_rendering(drm_intel_bo
*bo
)
755 drm_intel_bufmgr_fake
*bufmgr_fake
=
756 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
758 pthread_mutex_lock(&bufmgr_fake
->lock
);
759 drm_intel_fake_bo_wait_rendering_locked(bo
);
760 pthread_mutex_unlock(&bufmgr_fake
->lock
);
763 /* Specifically ignore texture memory sharing.
764 * -- just evict everything
765 * -- and wait for idle
768 drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr
*bufmgr
)
770 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*) bufmgr
;
771 struct block
*block
, *tmp
;
773 pthread_mutex_lock(&bufmgr_fake
->lock
);
775 bufmgr_fake
->need_fence
= 1;
776 bufmgr_fake
->fail
= 0;
778 /* Wait for hardware idle. We don't know where acceleration has been
779 * happening, so we'll need to wait anyway before letting anything get
780 * put on the card again.
782 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake
);
784 /* Check that we hadn't released the lock without having fenced the last
787 assert(DRMLISTEMPTY(&bufmgr_fake
->fenced
));
788 assert(DRMLISTEMPTY(&bufmgr_fake
->on_hardware
));
790 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->lru
) {
791 assert(_fence_test(bufmgr_fake
, block
->fence
));
792 set_dirty(block
->bo
);
795 pthread_mutex_unlock(&bufmgr_fake
->lock
);
798 static drm_intel_bo
*
799 drm_intel_fake_bo_alloc(drm_intel_bufmgr
*bufmgr
,
802 unsigned int alignment
)
804 drm_intel_bufmgr_fake
*bufmgr_fake
;
805 drm_intel_bo_fake
*bo_fake
;
807 bufmgr_fake
= (drm_intel_bufmgr_fake
*) bufmgr
;
811 bo_fake
= calloc(1, sizeof(*bo_fake
));
815 bo_fake
->bo
.size
= size
;
816 bo_fake
->bo
.offset
= -1;
817 bo_fake
->bo
.virtual = NULL
;
818 bo_fake
->bo
.bufmgr
= bufmgr
;
819 bo_fake
->refcount
= 1;
821 /* Alignment must be a power of two */
822 assert((alignment
& (alignment
- 1)) == 0);
825 bo_fake
->alignment
= alignment
;
826 bo_fake
->id
= ++bufmgr_fake
->buf_nr
;
827 bo_fake
->name
= name
;
829 bo_fake
->is_static
= 0;
831 DBG("drm_bo_alloc: (buf %d: %s, %lu kb)\n", bo_fake
->id
, bo_fake
->name
,
832 bo_fake
->bo
.size
/ 1024);
837 static drm_intel_bo
*
838 drm_intel_fake_bo_alloc_tiled(drm_intel_bufmgr
* bufmgr
,
840 int x
, int y
, int cpp
,
841 uint32_t *tiling_mode
,
842 unsigned long *pitch
,
845 unsigned long stride
, aligned_y
;
847 /* No runtime tiling support for fake. */
848 *tiling_mode
= I915_TILING_NONE
;
850 /* Align it for being a render target. Shouldn't need anything else. */
852 stride
= ROUND_UP_TO(stride
, 64);
854 /* 965 subspan loading alignment */
855 aligned_y
= ALIGN(y
, 2);
859 return drm_intel_fake_bo_alloc(bufmgr
, name
, stride
* aligned_y
,
863 drm_public drm_intel_bo
*
864 drm_intel_bo_fake_alloc_static(drm_intel_bufmgr
*bufmgr
,
866 unsigned long offset
,
867 unsigned long size
, void *virtual)
869 drm_intel_bufmgr_fake
*bufmgr_fake
;
870 drm_intel_bo_fake
*bo_fake
;
872 bufmgr_fake
= (drm_intel_bufmgr_fake
*) bufmgr
;
876 bo_fake
= calloc(1, sizeof(*bo_fake
));
880 bo_fake
->bo
.size
= size
;
881 bo_fake
->bo
.offset
= offset
;
882 bo_fake
->bo
.virtual = virtual;
883 bo_fake
->bo
.bufmgr
= bufmgr
;
884 bo_fake
->refcount
= 1;
885 bo_fake
->id
= ++bufmgr_fake
->buf_nr
;
886 bo_fake
->name
= name
;
887 bo_fake
->flags
= BM_PINNED
;
888 bo_fake
->is_static
= 1;
890 DBG("drm_bo_alloc_static: (buf %d: %s, %lu kb)\n", bo_fake
->id
,
891 bo_fake
->name
, bo_fake
->bo
.size
/ 1024);
897 drm_intel_fake_bo_reference(drm_intel_bo
*bo
)
899 drm_intel_bufmgr_fake
*bufmgr_fake
=
900 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
901 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
903 pthread_mutex_lock(&bufmgr_fake
->lock
);
905 pthread_mutex_unlock(&bufmgr_fake
->lock
);
909 drm_intel_fake_bo_reference_locked(drm_intel_bo
*bo
)
911 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
917 drm_intel_fake_bo_unreference_locked(drm_intel_bo
*bo
)
919 drm_intel_bufmgr_fake
*bufmgr_fake
=
920 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
921 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
924 if (--bo_fake
->refcount
== 0) {
925 assert(bo_fake
->map_count
== 0);
926 /* No remaining references, so free it */
928 free_block(bufmgr_fake
, bo_fake
->block
, 1);
929 free_backing_store(bo
);
931 for (i
= 0; i
< bo_fake
->nr_relocs
; i
++)
932 drm_intel_fake_bo_unreference_locked(bo_fake
->relocs
[i
].
935 DBG("drm_bo_unreference: free buf %d %s\n", bo_fake
->id
,
938 free(bo_fake
->relocs
);
944 drm_intel_fake_bo_unreference(drm_intel_bo
*bo
)
946 drm_intel_bufmgr_fake
*bufmgr_fake
=
947 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
949 pthread_mutex_lock(&bufmgr_fake
->lock
);
950 drm_intel_fake_bo_unreference_locked(bo
);
951 pthread_mutex_unlock(&bufmgr_fake
->lock
);
955 * Set the buffer as not requiring backing store, and instead get the callback
956 * invoked whenever it would be set dirty.
959 drm_intel_bo_fake_disable_backing_store(drm_intel_bo
*bo
,
960 void (*invalidate_cb
) (drm_intel_bo
*bo
,
964 drm_intel_bufmgr_fake
*bufmgr_fake
=
965 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
966 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
968 pthread_mutex_lock(&bufmgr_fake
->lock
);
970 if (bo_fake
->backing_store
)
971 free_backing_store(bo
);
973 bo_fake
->flags
|= BM_NO_BACKING_STORE
;
975 DBG("disable_backing_store set buf %d dirty\n", bo_fake
->id
);
977 bo_fake
->invalidate_cb
= invalidate_cb
;
978 bo_fake
->invalidate_ptr
= ptr
;
980 /* Note that it is invalid right from the start. Also note
981 * invalidate_cb is called with the bufmgr locked, so cannot
982 * itself make bufmgr calls.
984 if (invalidate_cb
!= NULL
)
985 invalidate_cb(bo
, ptr
);
987 pthread_mutex_unlock(&bufmgr_fake
->lock
);
991 * Map a buffer into bo->virtual, allocating either card memory space (If
992 * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
995 drm_intel_fake_bo_map_locked(drm_intel_bo
*bo
, int write_enable
)
997 drm_intel_bufmgr_fake
*bufmgr_fake
=
998 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
999 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
1001 /* Static buffers are always mapped. */
1002 if (bo_fake
->is_static
) {
1003 if (bo_fake
->card_dirty
) {
1004 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake
);
1005 bo_fake
->card_dirty
= 0;
1010 /* Allow recursive mapping. Mesa may recursively map buffers with
1011 * nested display loops, and it is used internally in bufmgr_fake
1014 if (bo_fake
->map_count
++ != 0)
1018 DBG("drm_bo_map: (buf %d: %s, %lu kb)\n", bo_fake
->id
,
1019 bo_fake
->name
, bo_fake
->bo
.size
/ 1024);
1021 if (bo
->virtual != NULL
) {
1022 drmMsg("%s: already mapped\n", __func__
);
1024 } else if (bo_fake
->flags
& (BM_NO_BACKING_STORE
| BM_PINNED
)) {
1026 if (!bo_fake
->block
&& !evict_and_alloc_block(bo
)) {
1027 DBG("%s: alloc failed\n", __func__
);
1028 bufmgr_fake
->fail
= 1;
1031 assert(bo_fake
->block
);
1034 if (!(bo_fake
->flags
& BM_NO_FENCE_SUBDATA
) &&
1035 bo_fake
->block
->fenced
) {
1036 drm_intel_fake_bo_wait_rendering_locked
1040 bo
->virtual = bo_fake
->block
->virtual;
1046 if (bo_fake
->backing_store
== 0)
1047 alloc_backing_store(bo
);
1049 if ((bo_fake
->card_dirty
== 1) && bo_fake
->block
) {
1050 if (bo_fake
->block
->fenced
)
1051 drm_intel_fake_bo_wait_rendering_locked
1054 memcpy(bo_fake
->backing_store
,
1055 bo_fake
->block
->virtual,
1056 bo_fake
->block
->bo
->size
);
1057 bo_fake
->card_dirty
= 0;
1060 bo
->virtual = bo_fake
->backing_store
;
1068 drm_intel_fake_bo_map(drm_intel_bo
*bo
, int write_enable
)
1070 drm_intel_bufmgr_fake
*bufmgr_fake
=
1071 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
1074 pthread_mutex_lock(&bufmgr_fake
->lock
);
1075 ret
= drm_intel_fake_bo_map_locked(bo
, write_enable
);
1076 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1082 drm_intel_fake_bo_unmap_locked(drm_intel_bo
*bo
)
1084 drm_intel_bufmgr_fake
*bufmgr_fake
=
1085 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
1086 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
1088 /* Static buffers are always mapped. */
1089 if (bo_fake
->is_static
)
1092 assert(bo_fake
->map_count
!= 0);
1093 if (--bo_fake
->map_count
!= 0)
1096 DBG("drm_bo_unmap: (buf %d: %s, %lu kb)\n", bo_fake
->id
, bo_fake
->name
,
1097 bo_fake
->bo
.size
/ 1024);
1104 static int drm_intel_fake_bo_unmap(drm_intel_bo
*bo
)
1106 drm_intel_bufmgr_fake
*bufmgr_fake
=
1107 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
1110 pthread_mutex_lock(&bufmgr_fake
->lock
);
1111 ret
= drm_intel_fake_bo_unmap_locked(bo
);
1112 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1118 drm_intel_fake_bo_subdata(drm_intel_bo
*bo
, unsigned long offset
,
1119 unsigned long size
, const void *data
)
1123 if (size
== 0 || data
== NULL
)
1126 ret
= drm_intel_bo_map(bo
, 1);
1129 memcpy((unsigned char *)bo
->virtual + offset
, data
, size
);
1130 drm_intel_bo_unmap(bo
);
1135 drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake
*bufmgr_fake
)
1137 struct block
*block
, *tmp
;
1139 bufmgr_fake
->performed_rendering
= 0;
1140 /* okay for ever BO that is on the HW kick it off.
1141 seriously not afraid of the POLICE right now */
1142 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->on_hardware
) {
1143 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) block
->bo
;
1145 block
->on_hardware
= 0;
1146 free_block(bufmgr_fake
, block
, 0);
1147 bo_fake
->block
= NULL
;
1148 bo_fake
->validated
= 0;
1149 if (!(bo_fake
->flags
& BM_NO_BACKING_STORE
))
1156 drm_intel_fake_bo_validate(drm_intel_bo
*bo
)
1158 drm_intel_bufmgr_fake
*bufmgr_fake
;
1159 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
1161 bufmgr_fake
= (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
1163 DBG("drm_bo_validate: (buf %d: %s, %lu kb)\n", bo_fake
->id
,
1164 bo_fake
->name
, bo_fake
->bo
.size
/ 1024);
1166 /* Sanity check: Buffers should be unmapped before being validated.
1167 * This is not so much of a problem for bufmgr_fake, but TTM refuses,
1168 * and the problem is harder to debug there.
1170 assert(bo_fake
->map_count
== 0);
1172 if (bo_fake
->is_static
) {
1173 /* Add it to the needs-fence list */
1174 bufmgr_fake
->need_fence
= 1;
1178 /* Allocate the card memory */
1179 if (!bo_fake
->block
&& !evict_and_alloc_block(bo
)) {
1180 bufmgr_fake
->fail
= 1;
1181 DBG("Failed to validate buf %d:%s\n", bo_fake
->id
,
1186 assert(bo_fake
->block
);
1187 assert(bo_fake
->block
->bo
== &bo_fake
->bo
);
1189 bo
->offset
= bo_fake
->block
->mem
->ofs
;
1191 /* Upload the buffer contents if necessary */
1192 if (bo_fake
->dirty
) {
1193 DBG("Upload dirty buf %d:%s, sz %lu offset 0x%x\n", bo_fake
->id
,
1194 bo_fake
->name
, bo
->size
, bo_fake
->block
->mem
->ofs
);
1196 assert(!(bo_fake
->flags
& (BM_NO_BACKING_STORE
| BM_PINNED
)));
1198 /* Actually, should be able to just wait for a fence on the
1199 * memory, which we would be tracking when we free it. Waiting
1200 * for idle is a sufficiently large hammer for now.
1202 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake
);
1204 /* we may never have mapped this BO so it might not have any
1205 * backing store if this happens it should be rare, but 0 the
1206 * card memory in any case */
1207 if (bo_fake
->backing_store
)
1208 memcpy(bo_fake
->block
->virtual, bo_fake
->backing_store
,
1211 memset(bo_fake
->block
->virtual, 0, bo
->size
);
1216 bo_fake
->block
->fenced
= 0;
1217 bo_fake
->block
->on_hardware
= 1;
1218 DRMLISTDEL(bo_fake
->block
);
1219 DRMLISTADDTAIL(bo_fake
->block
, &bufmgr_fake
->on_hardware
);
1221 bo_fake
->validated
= 1;
1222 bufmgr_fake
->need_fence
= 1;
1228 drm_intel_fake_fence_validated(drm_intel_bufmgr
*bufmgr
)
1230 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*) bufmgr
;
1231 unsigned int cookie
;
1233 cookie
= _fence_emit_internal(bufmgr_fake
);
1234 fence_blocks(bufmgr_fake
, cookie
);
1236 DBG("drm_fence_validated: 0x%08x cookie\n", cookie
);
1240 drm_intel_fake_destroy(drm_intel_bufmgr
*bufmgr
)
1242 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*) bufmgr
;
1244 pthread_mutex_destroy(&bufmgr_fake
->lock
);
1245 mmDestroy(bufmgr_fake
->heap
);
1250 drm_intel_fake_emit_reloc(drm_intel_bo
*bo
, uint32_t offset
,
1251 drm_intel_bo
*target_bo
, uint32_t target_offset
,
1252 uint32_t read_domains
, uint32_t write_domain
)
1254 drm_intel_bufmgr_fake
*bufmgr_fake
=
1255 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
1256 struct fake_buffer_reloc
*r
;
1257 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
1258 drm_intel_bo_fake
*target_fake
= (drm_intel_bo_fake
*) target_bo
;
1261 pthread_mutex_lock(&bufmgr_fake
->lock
);
1266 if (bo_fake
->relocs
== NULL
) {
1268 malloc(sizeof(struct fake_buffer_reloc
) * MAX_RELOCS
);
1271 r
= &bo_fake
->relocs
[bo_fake
->nr_relocs
++];
1273 assert(bo_fake
->nr_relocs
<= MAX_RELOCS
);
1275 drm_intel_fake_bo_reference_locked(target_bo
);
1277 if (!target_fake
->is_static
) {
1278 bo_fake
->child_size
+=
1279 ALIGN(target_bo
->size
, target_fake
->alignment
);
1280 bo_fake
->child_size
+= target_fake
->child_size
;
1282 r
->target_buf
= target_bo
;
1284 r
->last_target_offset
= target_bo
->offset
;
1285 r
->delta
= target_offset
;
1286 r
->read_domains
= read_domains
;
1287 r
->write_domain
= write_domain
;
1289 if (bufmgr_fake
->debug
) {
1290 /* Check that a conflicting relocation hasn't already been
1293 for (i
= 0; i
< bo_fake
->nr_relocs
- 1; i
++) {
1294 struct fake_buffer_reloc
*r2
= &bo_fake
->relocs
[i
];
1296 assert(r
->offset
!= r2
->offset
);
1300 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1306 * Incorporates the validation flags associated with each relocation into
1307 * the combined validation flags for the buffer on this batchbuffer submission.
1310 drm_intel_fake_calculate_domains(drm_intel_bo
*bo
)
1312 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
1315 for (i
= 0; i
< bo_fake
->nr_relocs
; i
++) {
1316 struct fake_buffer_reloc
*r
= &bo_fake
->relocs
[i
];
1317 drm_intel_bo_fake
*target_fake
=
1318 (drm_intel_bo_fake
*) r
->target_buf
;
1320 /* Do the same for the tree of buffers we depend on */
1321 drm_intel_fake_calculate_domains(r
->target_buf
);
1323 target_fake
->read_domains
|= r
->read_domains
;
1324 target_fake
->write_domain
|= r
->write_domain
;
1329 drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo
*bo
)
1331 drm_intel_bufmgr_fake
*bufmgr_fake
=
1332 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
1333 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
1336 assert(bo_fake
->map_count
== 0);
1338 for (i
= 0; i
< bo_fake
->nr_relocs
; i
++) {
1339 struct fake_buffer_reloc
*r
= &bo_fake
->relocs
[i
];
1340 drm_intel_bo_fake
*target_fake
=
1341 (drm_intel_bo_fake
*) r
->target_buf
;
1342 uint32_t reloc_data
;
1344 /* Validate the target buffer if that hasn't been done. */
1345 if (!target_fake
->validated
) {
1347 drm_intel_fake_reloc_and_validate_buffer(r
->target_buf
);
1349 if (bo
->virtual != NULL
)
1350 drm_intel_fake_bo_unmap_locked(bo
);
1355 /* Calculate the value of the relocation entry. */
1356 if (r
->target_buf
->offset
!= r
->last_target_offset
) {
1357 reloc_data
= r
->target_buf
->offset
+ r
->delta
;
1359 if (bo
->virtual == NULL
)
1360 drm_intel_fake_bo_map_locked(bo
, 1);
1362 *(uint32_t *) ((uint8_t *) bo
->virtual + r
->offset
) =
1365 r
->last_target_offset
= r
->target_buf
->offset
;
1369 if (bo
->virtual != NULL
)
1370 drm_intel_fake_bo_unmap_locked(bo
);
1372 if (bo_fake
->write_domain
!= 0) {
1373 if (!(bo_fake
->flags
& (BM_NO_BACKING_STORE
| BM_PINNED
))) {
1374 if (bo_fake
->backing_store
== 0)
1375 alloc_backing_store(bo
);
1377 bo_fake
->card_dirty
= 1;
1378 bufmgr_fake
->performed_rendering
= 1;
1381 return drm_intel_fake_bo_validate(bo
);
1385 drm_intel_bo_fake_post_submit(drm_intel_bo
*bo
)
1387 drm_intel_bufmgr_fake
*bufmgr_fake
=
1388 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
1389 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo
;
1392 for (i
= 0; i
< bo_fake
->nr_relocs
; i
++) {
1393 struct fake_buffer_reloc
*r
= &bo_fake
->relocs
[i
];
1394 drm_intel_bo_fake
*target_fake
=
1395 (drm_intel_bo_fake
*) r
->target_buf
;
1397 if (target_fake
->validated
)
1398 drm_intel_bo_fake_post_submit(r
->target_buf
);
1400 DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
1401 bo_fake
->name
, (uint32_t) bo
->offset
, r
->offset
,
1402 target_fake
->name
, (uint32_t) r
->target_buf
->offset
,
1406 assert(bo_fake
->map_count
== 0);
1407 bo_fake
->validated
= 0;
1408 bo_fake
->read_domains
= 0;
1409 bo_fake
->write_domain
= 0;
1413 drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr
*bufmgr
,
1414 int (*exec
) (drm_intel_bo
*bo
,
1419 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*) bufmgr
;
1421 bufmgr_fake
->exec
= exec
;
1422 bufmgr_fake
->exec_priv
= priv
;
1426 drm_intel_fake_bo_exec(drm_intel_bo
*bo
, int used
,
1427 drm_clip_rect_t
* cliprects
, int num_cliprects
, int DR4
)
1429 drm_intel_bufmgr_fake
*bufmgr_fake
=
1430 (drm_intel_bufmgr_fake
*) bo
->bufmgr
;
1431 drm_intel_bo_fake
*batch_fake
= (drm_intel_bo_fake
*) bo
;
1432 struct drm_i915_batchbuffer batch
;
1434 int retry_count
= 0;
1436 pthread_mutex_lock(&bufmgr_fake
->lock
);
1438 bufmgr_fake
->performed_rendering
= 0;
1440 drm_intel_fake_calculate_domains(bo
);
1442 batch_fake
->read_domains
= I915_GEM_DOMAIN_COMMAND
;
1444 /* we've ran out of RAM so blow the whole lot away and retry */
1446 ret
= drm_intel_fake_reloc_and_validate_buffer(bo
);
1447 if (bufmgr_fake
->fail
== 1) {
1448 if (retry_count
== 0) {
1450 drm_intel_fake_kick_all_locked(bufmgr_fake
);
1451 bufmgr_fake
->fail
= 0;
1453 } else /* dump out the memory here */
1454 mmDumpMemInfo(bufmgr_fake
->heap
);
1459 if (bufmgr_fake
->exec
!= NULL
) {
1460 ret
= bufmgr_fake
->exec(bo
, used
, bufmgr_fake
->exec_priv
);
1462 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1466 batch
.start
= bo
->offset
;
1468 batch
.cliprects
= cliprects
;
1469 batch
.num_cliprects
= num_cliprects
;
1474 (bufmgr_fake
->fd
, DRM_I915_BATCHBUFFER
, &batch
,
1476 drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno
);
1477 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1482 drm_intel_fake_fence_validated(bo
->bufmgr
);
1484 drm_intel_bo_fake_post_submit(bo
);
1486 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1492 * Return an error if the list of BOs will exceed the aperture size.
1494 * This is a rough guess and likely to fail, as during the validate sequence we
1495 * may place a buffer in an inopportune spot early on and then fail to fit
1496 * a set smaller than the aperture.
1499 drm_intel_fake_check_aperture_space(drm_intel_bo
** bo_array
, int count
)
1501 drm_intel_bufmgr_fake
*bufmgr_fake
=
1502 (drm_intel_bufmgr_fake
*) bo_array
[0]->bufmgr
;
1503 unsigned int sz
= 0;
1506 for (i
= 0; i
< count
; i
++) {
1507 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) bo_array
[i
];
1509 if (bo_fake
== NULL
)
1512 if (!bo_fake
->is_static
)
1513 sz
+= ALIGN(bo_array
[i
]->size
, bo_fake
->alignment
);
1514 sz
+= bo_fake
->child_size
;
1517 if (sz
> bufmgr_fake
->size
) {
1518 DBG("check_space: overflowed bufmgr size, %ukb vs %lukb\n",
1519 sz
/ 1024, bufmgr_fake
->size
/ 1024);
1523 DBG("drm_check_space: sz %ukb vs bufgr %lukb\n", sz
/ 1024,
1524 bufmgr_fake
->size
/ 1024);
1529 * Evicts all buffers, waiting for fences to pass and copying contents out
1532 * Used by the X Server on LeaveVT, when the card memory is no longer our
1536 drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr
*bufmgr
)
1538 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*) bufmgr
;
1539 struct block
*block
, *tmp
;
1541 pthread_mutex_lock(&bufmgr_fake
->lock
);
1543 bufmgr_fake
->need_fence
= 1;
1544 bufmgr_fake
->fail
= 0;
1546 /* Wait for hardware idle. We don't know where acceleration has been
1547 * happening, so we'll need to wait anyway before letting anything get
1548 * put on the card again.
1550 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake
);
1552 /* Check that we hadn't released the lock without having fenced the last
1555 assert(DRMLISTEMPTY(&bufmgr_fake
->fenced
));
1556 assert(DRMLISTEMPTY(&bufmgr_fake
->on_hardware
));
1558 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->lru
) {
1559 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*) block
->bo
;
1560 /* Releases the memory, and memcpys dirty contents out if
1563 free_block(bufmgr_fake
, block
, 0);
1564 bo_fake
->block
= NULL
;
1567 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1571 drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr
*bufmgr
,
1572 volatile unsigned int
1575 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*) bufmgr
;
1577 bufmgr_fake
->last_dispatch
= (volatile int *)last_dispatch
;
1580 drm_public drm_intel_bufmgr
*
1581 drm_intel_bufmgr_fake_init(int fd
, unsigned long low_offset
,
1582 void *low_virtual
, unsigned long size
,
1583 volatile unsigned int *last_dispatch
)
1585 drm_intel_bufmgr_fake
*bufmgr_fake
;
1587 bufmgr_fake
= calloc(1, sizeof(*bufmgr_fake
));
1589 if (pthread_mutex_init(&bufmgr_fake
->lock
, NULL
) != 0) {
1594 /* Initialize allocator */
1595 DRMINITLISTHEAD(&bufmgr_fake
->fenced
);
1596 DRMINITLISTHEAD(&bufmgr_fake
->on_hardware
);
1597 DRMINITLISTHEAD(&bufmgr_fake
->lru
);
1599 bufmgr_fake
->low_offset
= low_offset
;
1600 bufmgr_fake
->virtual = low_virtual
;
1601 bufmgr_fake
->size
= size
;
1602 bufmgr_fake
->heap
= mmInit(low_offset
, size
);
1604 /* Hook in methods */
1605 bufmgr_fake
->bufmgr
.bo_alloc
= drm_intel_fake_bo_alloc
;
1606 bufmgr_fake
->bufmgr
.bo_alloc_for_render
= drm_intel_fake_bo_alloc
;
1607 bufmgr_fake
->bufmgr
.bo_alloc_tiled
= drm_intel_fake_bo_alloc_tiled
;
1608 bufmgr_fake
->bufmgr
.bo_reference
= drm_intel_fake_bo_reference
;
1609 bufmgr_fake
->bufmgr
.bo_unreference
= drm_intel_fake_bo_unreference
;
1610 bufmgr_fake
->bufmgr
.bo_map
= drm_intel_fake_bo_map
;
1611 bufmgr_fake
->bufmgr
.bo_unmap
= drm_intel_fake_bo_unmap
;
1612 bufmgr_fake
->bufmgr
.bo_subdata
= drm_intel_fake_bo_subdata
;
1613 bufmgr_fake
->bufmgr
.bo_wait_rendering
=
1614 drm_intel_fake_bo_wait_rendering
;
1615 bufmgr_fake
->bufmgr
.bo_emit_reloc
= drm_intel_fake_emit_reloc
;
1616 bufmgr_fake
->bufmgr
.destroy
= drm_intel_fake_destroy
;
1617 bufmgr_fake
->bufmgr
.bo_exec
= drm_intel_fake_bo_exec
;
1618 bufmgr_fake
->bufmgr
.check_aperture_space
=
1619 drm_intel_fake_check_aperture_space
;
1620 bufmgr_fake
->bufmgr
.debug
= 0;
1622 bufmgr_fake
->fd
= fd
;
1623 bufmgr_fake
->last_dispatch
= (volatile int *)last_dispatch
;
1625 return &bufmgr_fake
->bufmgr
;