1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 /* Originally a fake version of the buffer manager so that we can
29 * prototype the changes in a driver fairly quickly, has been fleshed
30 * out to a fully functional interim solution.
32 * Basically wraps the old style memory management in the new
33 * programming interface, but is more expressive and avoids many of
34 * the bugs in the old texture manager.
47 #include "intel_bufmgr.h"
48 #include "intel_bufmgr_priv.h"
52 #include "libdrm_lists.h"
54 #define ALIGN(value, alignment) ((value + alignment - 1) & ~(alignment - 1))
56 #define DBG(...) do { \
57 if (bufmgr_fake->bufmgr.debug) \
58 drmMsg(__VA_ARGS__); \
63 #define BM_NO_BACKING_STORE 0x00000001
64 #define BM_NO_FENCE_SUBDATA 0x00000002
65 #define BM_PINNED 0x00000004
67 /* Wrapper around mm.c's mem_block, which understands that you must
68 * wait for fences to expire before memory can be freed. This is
69 * specific to our use of memcpy for uploads - an upload that was
70 * processed through the command queue wouldn't need to care about
73 #define MAX_RELOCS 4096
75 struct fake_buffer_reloc
77 /** Buffer object that the relocation points at. */
78 drm_intel_bo
*target_buf
;
79 /** Offset of the relocation entry within reloc_buf. */
81 /** Cached value of the offset when we last performed this relocation. */
82 uint32_t last_target_offset
;
83 /** Value added to target_buf's offset to get the relocation entry. */
85 /** Cache domains the target buffer is read into. */
86 uint32_t read_domains
;
87 /** Cache domain the target buffer will have dirty cachelines in. */
88 uint32_t write_domain
;
92 struct block
*next
, *prev
;
93 struct mem_block
*mem
; /* BM_MEM_AGP */
96 * Marks that the block is currently in the aperture and has yet to be
99 unsigned on_hardware
:1;
101 * Marks that the block is currently fenced (being used by rendering) and
102 * can't be freed until @fence is passed.
106 /** Fence cookie for the block. */
107 unsigned fence
; /* Split to read_fence, write_fence */
113 typedef struct _bufmgr_fake
{
114 drm_intel_bufmgr bufmgr
;
116 pthread_mutex_t lock
;
118 unsigned long low_offset
;
122 struct mem_block
*heap
;
124 unsigned buf_nr
; /* for generating ids */
127 * List of blocks which are currently in the GART but haven't been
130 struct block on_hardware
;
132 * List of blocks which are in the GART and have an active fence on them.
136 * List of blocks which have an expired fence and are ready to be evicted.
140 unsigned int last_fence
;
143 unsigned need_fence
:1;
147 * Driver callback to emit a fence, returning the cookie.
149 * This allows the driver to hook in a replacement for the DRM usage in
152 * Currently, this also requires that a write flush be emitted before
153 * emitting the fence, but this should change.
155 unsigned int (*fence_emit
)(void *private);
156 /** Driver callback to wait for a fence cookie to have passed. */
157 void (*fence_wait
)(unsigned int fence
, void *private);
161 * Driver callback to execute a buffer.
163 * This allows the driver to hook in a replacement for the DRM usage in
166 int (*exec
)(drm_intel_bo
*bo
, unsigned int used
, void *priv
);
169 /** Driver-supplied argument to driver callbacks */
171 /* Pointer to kernel-updated sarea data for the last completed user irq */
172 volatile int *last_dispatch
;
178 int performed_rendering
;
179 } drm_intel_bufmgr_fake
;
181 typedef struct _drm_intel_bo_fake
{
184 unsigned id
; /* debug only */
188 /** has the card written to this buffer - we make need to copy it back */
189 unsigned card_dirty
:1;
190 unsigned int refcount
;
191 /* Flags may consist of any of the DRM_BO flags, plus
192 * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two
193 * driver private flags.
196 /** Cache domains the target buffer is read into. */
197 uint32_t read_domains
;
198 /** Cache domain the target buffer will have dirty cachelines in. */
199 uint32_t write_domain
;
201 unsigned int alignment
;
202 int is_static
, validated
;
203 unsigned int map_count
;
205 /** relocation list */
206 struct fake_buffer_reloc
*relocs
;
209 * Total size of the target_bos of this buffer.
211 * Used for estimation in check_aperture.
213 unsigned int child_size
;
217 void (*invalidate_cb
)(drm_intel_bo
*bo
, void *ptr
);
218 void *invalidate_ptr
;
221 static int clear_fenced(drm_intel_bufmgr_fake
*bufmgr_fake
,
222 unsigned int fence_cookie
);
224 #define MAXFENCE 0x7fffffff
226 static int FENCE_LTE( unsigned a
, unsigned b
)
231 if (a
< b
&& b
- a
< (1<<24))
234 if (a
> b
&& MAXFENCE
- a
+ b
< (1<<24))
240 void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr
*bufmgr
,
241 unsigned int (*emit
)(void *priv
),
242 void (*wait
)(unsigned int fence
,
246 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bufmgr
;
248 bufmgr_fake
->fence_emit
= emit
;
249 bufmgr_fake
->fence_wait
= wait
;
250 bufmgr_fake
->fence_priv
= priv
;
254 _fence_emit_internal(drm_intel_bufmgr_fake
*bufmgr_fake
)
256 struct drm_i915_irq_emit ie
;
259 if (bufmgr_fake
->fence_emit
!= NULL
) {
260 seq
= bufmgr_fake
->fence_emit(bufmgr_fake
->fence_priv
);
265 ret
= drmCommandWriteRead(bufmgr_fake
->fd
, DRM_I915_IRQ_EMIT
,
268 drmMsg("%s: drm_i915_irq_emit: %d\n", __FUNCTION__
, ret
);
272 DBG("emit 0x%08x\n", seq
);
277 _fence_wait_internal(drm_intel_bufmgr_fake
*bufmgr_fake
, int seq
)
279 struct drm_i915_irq_wait iw
;
280 int hw_seq
, busy_count
= 0;
284 if (bufmgr_fake
->fence_wait
!= NULL
) {
285 bufmgr_fake
->fence_wait(seq
, bufmgr_fake
->fence_priv
);
286 clear_fenced(bufmgr_fake
, seq
);
290 DBG("wait 0x%08x\n", iw
.irq_seq
);
294 /* The kernel IRQ_WAIT implementation is all sorts of broken.
295 * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit unsigned
297 * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
299 * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
301 * 4) It returns -EBUSY in 3 seconds even if the hardware is still
302 * successfully chewing through buffers.
304 * Assume that in userland we treat sequence numbers as ints, which makes
305 * some of the comparisons convenient, since the sequence numbers are
306 * all postive signed integers.
308 * From this we get several cases we need to handle. Here's a timeline.
309 * 0x2 0x7 0x7ffffff8 0x7ffffffd
311 * -------------------------------------------------------------------
313 * A) Normal wait for hw to catch up
316 * -------------------------------------------------------------------
317 * seq - hw_seq = 5. If we call IRQ_WAIT, it will wait for hw to catch up.
319 * B) Normal wait for a sequence number that's already passed.
322 * -------------------------------------------------------------------
323 * seq - hw_seq = -5. If we call IRQ_WAIT, it returns 0 quickly.
325 * C) Hardware has already wrapped around ahead of us
328 * -------------------------------------------------------------------
329 * seq - hw_seq = 0x80000000 - 5. If we called IRQ_WAIT, it would wait
330 * for hw_seq >= seq, which may never occur. Thus, we want to catch this
331 * in userland and return 0.
333 * D) We've wrapped around ahead of the hardware.
336 * -------------------------------------------------------------------
337 * seq - hw_seq = -(0x80000000 - 5). If we called IRQ_WAIT, it would return
338 * 0 quickly because hw_seq >= seq, even though the hardware isn't caught up.
339 * Thus, we need to catch this early return in userland and bother the
340 * kernel until the hardware really does catch up.
342 * E) Hardware might wrap after we test in userland.
345 * -------------------------------------------------------------------
346 * seq - hw_seq = 5. If we call IRQ_WAIT, it will likely see seq >= hw_seq
347 * and wait. However, suppose hw_seq wraps before we make it into the
348 * kernel. The kernel sees hw_seq >= seq and waits for 3 seconds then
349 * returns -EBUSY. This is case C). We should catch this and then return
352 * F) Hardware might take a long time on a buffer.
355 * -------------------------------------------------------------------
356 * seq - hw_seq = 5. If we call IRQ_WAIT, if sequence 2 through 5 take too
357 * long, it will return -EBUSY. Batchbuffers in the gltestperf demo were
358 * seen to take up to 7 seconds. We should catch early -EBUSY return
363 /* Keep a copy of last_dispatch so that if the wait -EBUSYs because the
364 * hardware didn't catch up in 3 seconds, we can see if it at least made
365 * progress and retry.
367 hw_seq
= *bufmgr_fake
->last_dispatch
;
370 if (seq
- hw_seq
> 0x40000000)
373 ret
= drmCommandWrite(bufmgr_fake
->fd
, DRM_I915_IRQ_WAIT
,
376 kernel_lied
= (ret
== 0) && (seq
- *bufmgr_fake
->last_dispatch
<
380 if (ret
== -EBUSY
&& (seq
- *bufmgr_fake
->last_dispatch
> 0x40000000))
383 /* Catch case F: Allow up to 15 seconds chewing on one buffer. */
384 if ((ret
== -EBUSY
) && (hw_seq
!= *bufmgr_fake
->last_dispatch
))
388 } while (kernel_lied
|| ret
== -EAGAIN
|| ret
== -EINTR
||
389 (ret
== -EBUSY
&& busy_count
< 5));
392 drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__
, __LINE__
,
396 clear_fenced(bufmgr_fake
, seq
);
400 _fence_test(drm_intel_bufmgr_fake
*bufmgr_fake
, unsigned fence
)
402 /* Slight problem with wrap-around:
404 return fence
== 0 || FENCE_LTE(fence
, bufmgr_fake
->last_fence
);
408 * Allocate a memory manager block for the buffer.
411 alloc_block(drm_intel_bo
*bo
)
413 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
414 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
415 struct block
*block
= (struct block
*)calloc(sizeof *block
, 1);
416 unsigned int align_log2
= ffs(bo_fake
->alignment
) - 1;
422 sz
= (bo
->size
+ bo_fake
->alignment
- 1) & ~(bo_fake
->alignment
- 1);
424 block
->mem
= mmAllocMem(bufmgr_fake
->heap
, sz
, align_log2
, 0);
430 DRMINITLISTHEAD(block
);
432 /* Insert at head or at tail???
434 DRMLISTADDTAIL(block
, &bufmgr_fake
->lru
);
436 block
->virtual = (uint8_t *)bufmgr_fake
->virtual +
437 block
->mem
->ofs
- bufmgr_fake
->low_offset
;
440 bo_fake
->block
= block
;
445 /* Release the card storage associated with buf:
447 static void free_block(drm_intel_bufmgr_fake
*bufmgr_fake
, struct block
*block
,
450 drm_intel_bo_fake
*bo_fake
;
451 DBG("free block %p %08x %d %d\n", block
, block
->mem
->ofs
, block
->on_hardware
, block
->fenced
);
456 bo_fake
= (drm_intel_bo_fake
*)block
->bo
;
458 if (bo_fake
->flags
& (BM_PINNED
| BM_NO_BACKING_STORE
))
461 if (!skip_dirty_copy
&& (bo_fake
->card_dirty
== 1)) {
462 memcpy(bo_fake
->backing_store
, block
->virtual, block
->bo
->size
);
463 bo_fake
->card_dirty
= 0;
467 if (block
->on_hardware
) {
470 else if (block
->fenced
) {
474 DBG(" - free immediately\n");
477 mmFreeMem(block
->mem
);
483 alloc_backing_store(drm_intel_bo
*bo
)
485 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
486 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
487 assert(!bo_fake
->backing_store
);
488 assert(!(bo_fake
->flags
& (BM_PINNED
|BM_NO_BACKING_STORE
)));
490 bo_fake
->backing_store
= malloc(bo
->size
);
492 DBG("alloc_backing - buf %d %p %d\n", bo_fake
->id
, bo_fake
->backing_store
, bo
->size
);
493 assert(bo_fake
->backing_store
);
497 free_backing_store(drm_intel_bo
*bo
)
499 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
501 if (bo_fake
->backing_store
) {
502 assert(!(bo_fake
->flags
& (BM_PINNED
|BM_NO_BACKING_STORE
)));
503 free(bo_fake
->backing_store
);
504 bo_fake
->backing_store
= NULL
;
509 set_dirty(drm_intel_bo
*bo
)
511 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
512 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
514 if (bo_fake
->flags
& BM_NO_BACKING_STORE
&& bo_fake
->invalidate_cb
!= NULL
)
515 bo_fake
->invalidate_cb(bo
, bo_fake
->invalidate_ptr
);
517 assert(!(bo_fake
->flags
& BM_PINNED
));
519 DBG("set_dirty - buf %d\n", bo_fake
->id
);
524 evict_lru(drm_intel_bufmgr_fake
*bufmgr_fake
, unsigned int max_fence
)
526 struct block
*block
, *tmp
;
528 DBG("%s\n", __FUNCTION__
);
530 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->lru
) {
531 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)block
->bo
;
533 if (bo_fake
!= NULL
&& (bo_fake
->flags
& BM_NO_FENCE_SUBDATA
))
536 if (block
->fence
&& max_fence
&& !FENCE_LTE(block
->fence
, max_fence
))
539 set_dirty(&bo_fake
->bo
);
540 bo_fake
->block
= NULL
;
542 free_block(bufmgr_fake
, block
, 0);
550 evict_mru(drm_intel_bufmgr_fake
*bufmgr_fake
)
552 struct block
*block
, *tmp
;
554 DBG("%s\n", __FUNCTION__
);
556 DRMLISTFOREACHSAFEREVERSE(block
, tmp
, &bufmgr_fake
->lru
) {
557 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)block
->bo
;
559 if (bo_fake
&& (bo_fake
->flags
& BM_NO_FENCE_SUBDATA
))
562 set_dirty(&bo_fake
->bo
);
563 bo_fake
->block
= NULL
;
565 free_block(bufmgr_fake
, block
, 0);
573 * Removes all objects from the fenced list older than the given fence.
575 static int clear_fenced(drm_intel_bufmgr_fake
*bufmgr_fake
,
576 unsigned int fence_cookie
)
578 struct block
*block
, *tmp
;
581 bufmgr_fake
->last_fence
= fence_cookie
;
582 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->fenced
) {
583 assert(block
->fenced
);
585 if (_fence_test(bufmgr_fake
, block
->fence
)) {
590 DBG("delayed free: offset %x sz %x\n",
591 block
->mem
->ofs
, block
->mem
->size
);
593 mmFreeMem(block
->mem
);
597 DBG("return to lru: offset %x sz %x\n",
598 block
->mem
->ofs
, block
->mem
->size
);
600 DRMLISTADDTAIL(block
, &bufmgr_fake
->lru
);
606 /* Blocks are ordered by fence, so if one fails, all from
607 * here will fail also:
609 DBG("fence not passed: offset %x sz %x %d %d \n",
610 block
->mem
->ofs
, block
->mem
->size
, block
->fence
, bufmgr_fake
->last_fence
);
615 DBG("%s: %d\n", __FUNCTION__
, ret
);
619 static void fence_blocks(drm_intel_bufmgr_fake
*bufmgr_fake
, unsigned fence
)
621 struct block
*block
, *tmp
;
623 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->on_hardware
) {
624 DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block
,
625 block
->mem
->size
, block
->mem
->ofs
, block
->bo
, fence
);
626 block
->fence
= fence
;
628 block
->on_hardware
= 0;
631 /* Move to tail of pending list here
634 DRMLISTADDTAIL(block
, &bufmgr_fake
->fenced
);
637 assert(DRMLISTEMPTY(&bufmgr_fake
->on_hardware
));
640 static int evict_and_alloc_block(drm_intel_bo
*bo
)
642 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
643 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
645 assert(bo_fake
->block
== NULL
);
647 /* Search for already free memory:
652 /* If we're not thrashing, allow lru eviction to dig deeper into
653 * recently used textures. We'll probably be thrashing soon:
655 if (!bufmgr_fake
->thrashing
) {
656 while (evict_lru(bufmgr_fake
, 0))
661 /* Keep thrashing counter alive?
663 if (bufmgr_fake
->thrashing
)
664 bufmgr_fake
->thrashing
= 20;
666 /* Wait on any already pending fences - here we are waiting for any
667 * freed memory that has been submitted to hardware and fenced to
670 while (!DRMLISTEMPTY(&bufmgr_fake
->fenced
)) {
671 uint32_t fence
= bufmgr_fake
->fenced
.next
->fence
;
672 _fence_wait_internal(bufmgr_fake
, fence
);
678 if (!DRMLISTEMPTY(&bufmgr_fake
->on_hardware
)) {
679 while (!DRMLISTEMPTY(&bufmgr_fake
->fenced
)) {
680 uint32_t fence
= bufmgr_fake
->fenced
.next
->fence
;
681 _fence_wait_internal(bufmgr_fake
, fence
);
684 if (!bufmgr_fake
->thrashing
) {
687 bufmgr_fake
->thrashing
= 20;
693 while (evict_mru(bufmgr_fake
))
697 DBG("%s 0x%x bytes failed\n", __FUNCTION__
, bo
->size
);
702 /***********************************************************************
707 * Wait for hardware idle by emitting a fence and waiting for it.
710 drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake
*bufmgr_fake
)
714 cookie
= _fence_emit_internal(bufmgr_fake
);
715 _fence_wait_internal(bufmgr_fake
, cookie
);
719 * Wait for rendering to a buffer to complete.
721 * It is assumed that the bathcbuffer which performed the rendering included
722 * the necessary flushing.
725 drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo
*bo
)
727 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
728 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
730 if (bo_fake
->block
== NULL
|| !bo_fake
->block
->fenced
)
733 _fence_wait_internal(bufmgr_fake
, bo_fake
->block
->fence
);
737 drm_intel_fake_bo_wait_rendering(drm_intel_bo
*bo
)
739 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
741 pthread_mutex_lock(&bufmgr_fake
->lock
);
742 drm_intel_fake_bo_wait_rendering_locked(bo
);
743 pthread_mutex_unlock(&bufmgr_fake
->lock
);
746 /* Specifically ignore texture memory sharing.
747 * -- just evict everything
748 * -- and wait for idle
751 drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr
*bufmgr
)
753 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bufmgr
;
754 struct block
*block
, *tmp
;
756 pthread_mutex_lock(&bufmgr_fake
->lock
);
758 bufmgr_fake
->need_fence
= 1;
759 bufmgr_fake
->fail
= 0;
761 /* Wait for hardware idle. We don't know where acceleration has been
762 * happening, so we'll need to wait anyway before letting anything get
763 * put on the card again.
765 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake
);
767 /* Check that we hadn't released the lock without having fenced the last
770 assert(DRMLISTEMPTY(&bufmgr_fake
->fenced
));
771 assert(DRMLISTEMPTY(&bufmgr_fake
->on_hardware
));
773 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->lru
) {
774 assert(_fence_test(bufmgr_fake
, block
->fence
));
775 set_dirty(block
->bo
);
778 pthread_mutex_unlock(&bufmgr_fake
->lock
);
781 static drm_intel_bo
*
782 drm_intel_fake_bo_alloc(drm_intel_bufmgr
*bufmgr
, const char *name
,
783 unsigned long size
, unsigned int alignment
)
785 drm_intel_bufmgr_fake
*bufmgr_fake
;
786 drm_intel_bo_fake
*bo_fake
;
788 bufmgr_fake
= (drm_intel_bufmgr_fake
*)bufmgr
;
792 bo_fake
= calloc(1, sizeof(*bo_fake
));
796 bo_fake
->bo
.size
= size
;
797 bo_fake
->bo
.offset
= -1;
798 bo_fake
->bo
.virtual = NULL
;
799 bo_fake
->bo
.bufmgr
= bufmgr
;
800 bo_fake
->refcount
= 1;
802 /* Alignment must be a power of two */
803 assert((alignment
& (alignment
- 1)) == 0);
806 bo_fake
->alignment
= alignment
;
807 bo_fake
->id
= ++bufmgr_fake
->buf_nr
;
808 bo_fake
->name
= name
;
810 bo_fake
->is_static
= 0;
812 DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake
->id
, bo_fake
->name
,
813 bo_fake
->bo
.size
/ 1024);
819 drm_intel_bo_fake_alloc_static(drm_intel_bufmgr
*bufmgr
, const char *name
,
820 unsigned long offset
, unsigned long size
,
823 drm_intel_bufmgr_fake
*bufmgr_fake
;
824 drm_intel_bo_fake
*bo_fake
;
826 bufmgr_fake
= (drm_intel_bufmgr_fake
*)bufmgr
;
830 bo_fake
= calloc(1, sizeof(*bo_fake
));
834 bo_fake
->bo
.size
= size
;
835 bo_fake
->bo
.offset
= offset
;
836 bo_fake
->bo
.virtual = virtual;
837 bo_fake
->bo
.bufmgr
= bufmgr
;
838 bo_fake
->refcount
= 1;
839 bo_fake
->id
= ++bufmgr_fake
->buf_nr
;
840 bo_fake
->name
= name
;
841 bo_fake
->flags
= BM_PINNED
;
842 bo_fake
->is_static
= 1;
844 DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake
->id
, bo_fake
->name
,
845 bo_fake
->bo
.size
/ 1024);
851 drm_intel_fake_bo_reference(drm_intel_bo
*bo
)
853 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
854 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
856 pthread_mutex_lock(&bufmgr_fake
->lock
);
858 pthread_mutex_unlock(&bufmgr_fake
->lock
);
862 drm_intel_fake_bo_reference_locked(drm_intel_bo
*bo
)
864 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
870 drm_intel_fake_bo_unreference_locked(drm_intel_bo
*bo
)
872 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
873 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
876 if (--bo_fake
->refcount
== 0) {
877 assert(bo_fake
->map_count
== 0);
878 /* No remaining references, so free it */
880 free_block(bufmgr_fake
, bo_fake
->block
, 1);
881 free_backing_store(bo
);
883 for (i
= 0; i
< bo_fake
->nr_relocs
; i
++)
884 drm_intel_fake_bo_unreference_locked(bo_fake
->relocs
[i
].target_buf
);
886 DBG("drm_bo_unreference: free buf %d %s\n", bo_fake
->id
, bo_fake
->name
);
888 free(bo_fake
->relocs
);
894 drm_intel_fake_bo_unreference(drm_intel_bo
*bo
)
896 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
898 pthread_mutex_lock(&bufmgr_fake
->lock
);
899 drm_intel_fake_bo_unreference_locked(bo
);
900 pthread_mutex_unlock(&bufmgr_fake
->lock
);
904 * Set the buffer as not requiring backing store, and instead get the callback
905 * invoked whenever it would be set dirty.
907 void drm_intel_bo_fake_disable_backing_store(drm_intel_bo
*bo
,
908 void (*invalidate_cb
)(drm_intel_bo
*bo
,
912 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
913 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
915 pthread_mutex_lock(&bufmgr_fake
->lock
);
917 if (bo_fake
->backing_store
)
918 free_backing_store(bo
);
920 bo_fake
->flags
|= BM_NO_BACKING_STORE
;
922 DBG("disable_backing_store set buf %d dirty\n", bo_fake
->id
);
924 bo_fake
->invalidate_cb
= invalidate_cb
;
925 bo_fake
->invalidate_ptr
= ptr
;
927 /* Note that it is invalid right from the start. Also note
928 * invalidate_cb is called with the bufmgr locked, so cannot
929 * itself make bufmgr calls.
931 if (invalidate_cb
!= NULL
)
932 invalidate_cb(bo
, ptr
);
934 pthread_mutex_unlock(&bufmgr_fake
->lock
);
938 * Map a buffer into bo->virtual, allocating either card memory space (If
939 * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
942 drm_intel_fake_bo_map_locked(drm_intel_bo
*bo
, int write_enable
)
944 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
945 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
947 /* Static buffers are always mapped. */
948 if (bo_fake
->is_static
) {
949 if (bo_fake
->card_dirty
) {
950 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake
);
951 bo_fake
->card_dirty
= 0;
956 /* Allow recursive mapping. Mesa may recursively map buffers with
957 * nested display loops, and it is used internally in bufmgr_fake
960 if (bo_fake
->map_count
++ != 0)
964 DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake
->id
, bo_fake
->name
,
965 bo_fake
->bo
.size
/ 1024);
967 if (bo
->virtual != NULL
) {
968 drmMsg("%s: already mapped\n", __FUNCTION__
);
971 else if (bo_fake
->flags
& (BM_NO_BACKING_STORE
|BM_PINNED
)) {
973 if (!bo_fake
->block
&& !evict_and_alloc_block(bo
)) {
974 DBG("%s: alloc failed\n", __FUNCTION__
);
975 bufmgr_fake
->fail
= 1;
979 assert(bo_fake
->block
);
982 if (!(bo_fake
->flags
& BM_NO_FENCE_SUBDATA
) &&
983 bo_fake
->block
->fenced
) {
984 drm_intel_fake_bo_wait_rendering_locked(bo
);
987 bo
->virtual = bo_fake
->block
->virtual;
994 if (bo_fake
->backing_store
== 0)
995 alloc_backing_store(bo
);
997 if ((bo_fake
->card_dirty
== 1) && bo_fake
->block
) {
998 if (bo_fake
->block
->fenced
)
999 drm_intel_fake_bo_wait_rendering_locked(bo
);
1001 memcpy(bo_fake
->backing_store
, bo_fake
->block
->virtual, bo_fake
->block
->bo
->size
);
1002 bo_fake
->card_dirty
= 0;
1005 bo
->virtual = bo_fake
->backing_store
;
1013 drm_intel_fake_bo_map(drm_intel_bo
*bo
, int write_enable
)
1015 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
1018 pthread_mutex_lock(&bufmgr_fake
->lock
);
1019 ret
= drm_intel_fake_bo_map_locked(bo
, write_enable
);
1020 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1026 drm_intel_fake_bo_unmap_locked(drm_intel_bo
*bo
)
1028 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
1029 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
1031 /* Static buffers are always mapped. */
1032 if (bo_fake
->is_static
)
1035 assert(bo_fake
->map_count
!= 0);
1036 if (--bo_fake
->map_count
!= 0)
1039 DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake
->id
, bo_fake
->name
,
1040 bo_fake
->bo
.size
/ 1024);
1048 drm_intel_fake_bo_unmap(drm_intel_bo
*bo
)
1050 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
1053 pthread_mutex_lock(&bufmgr_fake
->lock
);
1054 ret
= drm_intel_fake_bo_unmap_locked(bo
);
1055 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1061 drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake
*bufmgr_fake
)
1063 struct block
*block
, *tmp
;
1065 bufmgr_fake
->performed_rendering
= 0;
1066 /* okay for ever BO that is on the HW kick it off.
1067 seriously not afraid of the POLICE right now */
1068 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->on_hardware
) {
1069 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)block
->bo
;
1071 block
->on_hardware
= 0;
1072 free_block(bufmgr_fake
, block
, 0);
1073 bo_fake
->block
= NULL
;
1074 bo_fake
->validated
= 0;
1075 if (!(bo_fake
->flags
& BM_NO_BACKING_STORE
))
1082 drm_intel_fake_bo_validate(drm_intel_bo
*bo
)
1084 drm_intel_bufmgr_fake
*bufmgr_fake
;
1085 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
1087 bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
1089 DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake
->id
, bo_fake
->name
,
1090 bo_fake
->bo
.size
/ 1024);
1092 /* Sanity check: Buffers should be unmapped before being validated.
1093 * This is not so much of a problem for bufmgr_fake, but TTM refuses,
1094 * and the problem is harder to debug there.
1096 assert(bo_fake
->map_count
== 0);
1098 if (bo_fake
->is_static
) {
1099 /* Add it to the needs-fence list */
1100 bufmgr_fake
->need_fence
= 1;
1104 /* Allocate the card memory */
1105 if (!bo_fake
->block
&& !evict_and_alloc_block(bo
)) {
1106 bufmgr_fake
->fail
= 1;
1107 DBG("Failed to validate buf %d:%s\n", bo_fake
->id
, bo_fake
->name
);
1111 assert(bo_fake
->block
);
1112 assert(bo_fake
->block
->bo
== &bo_fake
->bo
);
1114 bo
->offset
= bo_fake
->block
->mem
->ofs
;
1116 /* Upload the buffer contents if necessary */
1117 if (bo_fake
->dirty
) {
1118 DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake
->id
,
1119 bo_fake
->name
, bo
->size
, bo_fake
->block
->mem
->ofs
);
1121 assert(!(bo_fake
->flags
&
1122 (BM_NO_BACKING_STORE
|BM_PINNED
)));
1124 /* Actually, should be able to just wait for a fence on the memory,
1125 * which we would be tracking when we free it. Waiting for idle is
1126 * a sufficiently large hammer for now.
1128 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake
);
1130 /* we may never have mapped this BO so it might not have any backing
1131 * store if this happens it should be rare, but 0 the card memory
1133 if (bo_fake
->backing_store
)
1134 memcpy(bo_fake
->block
->virtual, bo_fake
->backing_store
, bo
->size
);
1136 memset(bo_fake
->block
->virtual, 0, bo
->size
);
1141 bo_fake
->block
->fenced
= 0;
1142 bo_fake
->block
->on_hardware
= 1;
1143 DRMLISTDEL(bo_fake
->block
);
1144 DRMLISTADDTAIL(bo_fake
->block
, &bufmgr_fake
->on_hardware
);
1146 bo_fake
->validated
= 1;
1147 bufmgr_fake
->need_fence
= 1;
1153 drm_intel_fake_fence_validated(drm_intel_bufmgr
*bufmgr
)
1155 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bufmgr
;
1156 unsigned int cookie
;
1158 cookie
= _fence_emit_internal(bufmgr_fake
);
1159 fence_blocks(bufmgr_fake
, cookie
);
1161 DBG("drm_fence_validated: 0x%08x cookie\n", cookie
);
1165 drm_intel_fake_destroy(drm_intel_bufmgr
*bufmgr
)
1167 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bufmgr
;
1169 pthread_mutex_destroy(&bufmgr_fake
->lock
);
1170 mmDestroy(bufmgr_fake
->heap
);
1175 drm_intel_fake_emit_reloc(drm_intel_bo
*bo
, uint32_t offset
,
1176 drm_intel_bo
*target_bo
, uint32_t target_offset
,
1177 uint32_t read_domains
, uint32_t write_domain
)
1179 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
1180 struct fake_buffer_reloc
*r
;
1181 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
1182 drm_intel_bo_fake
*target_fake
= (drm_intel_bo_fake
*)target_bo
;
1185 pthread_mutex_lock(&bufmgr_fake
->lock
);
1190 if (bo_fake
->relocs
== NULL
) {
1191 bo_fake
->relocs
= malloc(sizeof(struct fake_buffer_reloc
) * MAX_RELOCS
);
1194 r
= &bo_fake
->relocs
[bo_fake
->nr_relocs
++];
1196 assert(bo_fake
->nr_relocs
<= MAX_RELOCS
);
1198 drm_intel_fake_bo_reference_locked(target_bo
);
1200 if (!target_fake
->is_static
) {
1201 bo_fake
->child_size
+= ALIGN(target_bo
->size
, target_fake
->alignment
);
1202 bo_fake
->child_size
+= target_fake
->child_size
;
1204 r
->target_buf
= target_bo
;
1206 r
->last_target_offset
= target_bo
->offset
;
1207 r
->delta
= target_offset
;
1208 r
->read_domains
= read_domains
;
1209 r
->write_domain
= write_domain
;
1211 if (bufmgr_fake
->debug
) {
1212 /* Check that a conflicting relocation hasn't already been emitted. */
1213 for (i
= 0; i
< bo_fake
->nr_relocs
- 1; i
++) {
1214 struct fake_buffer_reloc
*r2
= &bo_fake
->relocs
[i
];
1216 assert(r
->offset
!= r2
->offset
);
1220 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1226 * Incorporates the validation flags associated with each relocation into
1227 * the combined validation flags for the buffer on this batchbuffer submission.
1230 drm_intel_fake_calculate_domains(drm_intel_bo
*bo
)
1232 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
1235 for (i
= 0; i
< bo_fake
->nr_relocs
; i
++) {
1236 struct fake_buffer_reloc
*r
= &bo_fake
->relocs
[i
];
1237 drm_intel_bo_fake
*target_fake
= (drm_intel_bo_fake
*)r
->target_buf
;
1239 /* Do the same for the tree of buffers we depend on */
1240 drm_intel_fake_calculate_domains(r
->target_buf
);
1242 target_fake
->read_domains
|= r
->read_domains
;
1243 target_fake
->write_domain
|= r
->write_domain
;
1249 drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo
*bo
)
1251 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
1252 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
1255 assert(bo_fake
->map_count
== 0);
1257 for (i
= 0; i
< bo_fake
->nr_relocs
; i
++) {
1258 struct fake_buffer_reloc
*r
= &bo_fake
->relocs
[i
];
1259 drm_intel_bo_fake
*target_fake
= (drm_intel_bo_fake
*)r
->target_buf
;
1260 uint32_t reloc_data
;
1262 /* Validate the target buffer if that hasn't been done. */
1263 if (!target_fake
->validated
) {
1264 ret
= drm_intel_fake_reloc_and_validate_buffer(r
->target_buf
);
1266 if (bo
->virtual != NULL
)
1267 drm_intel_fake_bo_unmap_locked(bo
);
1272 /* Calculate the value of the relocation entry. */
1273 if (r
->target_buf
->offset
!= r
->last_target_offset
) {
1274 reloc_data
= r
->target_buf
->offset
+ r
->delta
;
1276 if (bo
->virtual == NULL
)
1277 drm_intel_fake_bo_map_locked(bo
, 1);
1279 *(uint32_t *)((uint8_t *)bo
->virtual + r
->offset
) = reloc_data
;
1281 r
->last_target_offset
= r
->target_buf
->offset
;
1285 if (bo
->virtual != NULL
)
1286 drm_intel_fake_bo_unmap_locked(bo
);
1288 if (bo_fake
->write_domain
!= 0) {
1289 if (!(bo_fake
->flags
& (BM_NO_BACKING_STORE
|BM_PINNED
))) {
1290 if (bo_fake
->backing_store
== 0)
1291 alloc_backing_store(bo
);
1293 bo_fake
->card_dirty
= 1;
1294 bufmgr_fake
->performed_rendering
= 1;
1297 return drm_intel_fake_bo_validate(bo
);
1301 drm_intel_bo_fake_post_submit(drm_intel_bo
*bo
)
1303 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
1304 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo
;
1307 for (i
= 0; i
< bo_fake
->nr_relocs
; i
++) {
1308 struct fake_buffer_reloc
*r
= &bo_fake
->relocs
[i
];
1309 drm_intel_bo_fake
*target_fake
= (drm_intel_bo_fake
*)r
->target_buf
;
1311 if (target_fake
->validated
)
1312 drm_intel_bo_fake_post_submit(r
->target_buf
);
1314 DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
1315 bo_fake
->name
, (uint32_t)bo
->offset
, r
->offset
,
1316 target_fake
->name
, (uint32_t)r
->target_buf
->offset
, r
->delta
);
1319 assert(bo_fake
->map_count
== 0);
1320 bo_fake
->validated
= 0;
1321 bo_fake
->read_domains
= 0;
1322 bo_fake
->write_domain
= 0;
1326 void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr
*bufmgr
,
1327 int (*exec
)(drm_intel_bo
*bo
,
1332 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bufmgr
;
1334 bufmgr_fake
->exec
= exec
;
1335 bufmgr_fake
->exec_priv
= priv
;
1339 drm_intel_fake_bo_exec(drm_intel_bo
*bo
, int used
,
1340 drm_clip_rect_t
*cliprects
, int num_cliprects
,
1343 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo
->bufmgr
;
1344 drm_intel_bo_fake
*batch_fake
= (drm_intel_bo_fake
*)bo
;
1345 struct drm_i915_batchbuffer batch
;
1347 int retry_count
= 0;
1349 pthread_mutex_lock(&bufmgr_fake
->lock
);
1351 bufmgr_fake
->performed_rendering
= 0;
1353 drm_intel_fake_calculate_domains(bo
);
1355 batch_fake
->read_domains
= I915_GEM_DOMAIN_COMMAND
;
1357 /* we've ran out of RAM so blow the whole lot away and retry */
1359 ret
= drm_intel_fake_reloc_and_validate_buffer(bo
);
1360 if (bufmgr_fake
->fail
== 1) {
1361 if (retry_count
== 0) {
1363 drm_intel_fake_kick_all_locked(bufmgr_fake
);
1364 bufmgr_fake
->fail
= 0;
1366 } else /* dump out the memory here */
1367 mmDumpMemInfo(bufmgr_fake
->heap
);
1372 if (bufmgr_fake
->exec
!= NULL
) {
1373 int ret
= bufmgr_fake
->exec(bo
, used
, bufmgr_fake
->exec_priv
);
1375 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1379 batch
.start
= bo
->offset
;
1381 batch
.cliprects
= cliprects
;
1382 batch
.num_cliprects
= num_cliprects
;
1386 if (drmCommandWrite(bufmgr_fake
->fd
, DRM_I915_BATCHBUFFER
, &batch
,
1388 drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno
);
1389 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1394 drm_intel_fake_fence_validated(bo
->bufmgr
);
1396 drm_intel_bo_fake_post_submit(bo
);
1398 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1404 * Return an error if the list of BOs will exceed the aperture size.
1406 * This is a rough guess and likely to fail, as during the validate sequence we
1407 * may place a buffer in an inopportune spot early on and then fail to fit
1408 * a set smaller than the aperture.
1411 drm_intel_fake_check_aperture_space(drm_intel_bo
**bo_array
, int count
)
1413 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bo_array
[0]->bufmgr
;
1414 unsigned int sz
= 0;
1417 for (i
= 0; i
< count
; i
++) {
1418 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)bo_array
[i
];
1420 if (bo_fake
== NULL
)
1423 if (!bo_fake
->is_static
)
1424 sz
+= ALIGN(bo_array
[i
]->size
, bo_fake
->alignment
);
1425 sz
+= bo_fake
->child_size
;
1428 if (sz
> bufmgr_fake
->size
) {
1429 DBG("check_space: overflowed bufmgr size, %dkb vs %dkb\n",
1430 sz
/ 1024, bufmgr_fake
->size
/ 1024);
1434 DBG("drm_check_space: sz %dkb vs bufgr %dkb\n", sz
/ 1024 ,
1435 bufmgr_fake
->size
/ 1024);
1440 * Evicts all buffers, waiting for fences to pass and copying contents out
1443 * Used by the X Server on LeaveVT, when the card memory is no longer our
1447 drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr
*bufmgr
)
1449 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bufmgr
;
1450 struct block
*block
, *tmp
;
1452 pthread_mutex_lock(&bufmgr_fake
->lock
);
1454 bufmgr_fake
->need_fence
= 1;
1455 bufmgr_fake
->fail
= 0;
1457 /* Wait for hardware idle. We don't know where acceleration has been
1458 * happening, so we'll need to wait anyway before letting anything get
1459 * put on the card again.
1461 drm_intel_bufmgr_fake_wait_idle(bufmgr_fake
);
1463 /* Check that we hadn't released the lock without having fenced the last
1466 assert(DRMLISTEMPTY(&bufmgr_fake
->fenced
));
1467 assert(DRMLISTEMPTY(&bufmgr_fake
->on_hardware
));
1469 DRMLISTFOREACHSAFE(block
, tmp
, &bufmgr_fake
->lru
) {
1470 drm_intel_bo_fake
*bo_fake
= (drm_intel_bo_fake
*)block
->bo
;
1471 /* Releases the memory, and memcpys dirty contents out if necessary. */
1472 free_block(bufmgr_fake
, block
, 0);
1473 bo_fake
->block
= NULL
;
1476 pthread_mutex_unlock(&bufmgr_fake
->lock
);
1478 void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr
*bufmgr
,
1479 volatile unsigned int *last_dispatch
)
1481 drm_intel_bufmgr_fake
*bufmgr_fake
= (drm_intel_bufmgr_fake
*)bufmgr
;
1483 bufmgr_fake
->last_dispatch
= (volatile int *)last_dispatch
;
1487 drm_intel_bufmgr_fake_init(int fd
,
1488 unsigned long low_offset
, void *low_virtual
,
1490 volatile unsigned int *last_dispatch
)
1492 drm_intel_bufmgr_fake
*bufmgr_fake
;
1494 bufmgr_fake
= calloc(1, sizeof(*bufmgr_fake
));
1496 if (pthread_mutex_init(&bufmgr_fake
->lock
, NULL
) != 0) {
1501 /* Initialize allocator */
1502 DRMINITLISTHEAD(&bufmgr_fake
->fenced
);
1503 DRMINITLISTHEAD(&bufmgr_fake
->on_hardware
);
1504 DRMINITLISTHEAD(&bufmgr_fake
->lru
);
1506 bufmgr_fake
->low_offset
= low_offset
;
1507 bufmgr_fake
->virtual = low_virtual
;
1508 bufmgr_fake
->size
= size
;
1509 bufmgr_fake
->heap
= mmInit(low_offset
, size
);
1511 /* Hook in methods */
1512 bufmgr_fake
->bufmgr
.bo_alloc
= drm_intel_fake_bo_alloc
;
1513 bufmgr_fake
->bufmgr
.bo_alloc_for_render
= drm_intel_fake_bo_alloc
;
1514 bufmgr_fake
->bufmgr
.bo_reference
= drm_intel_fake_bo_reference
;
1515 bufmgr_fake
->bufmgr
.bo_unreference
= drm_intel_fake_bo_unreference
;
1516 bufmgr_fake
->bufmgr
.bo_map
= drm_intel_fake_bo_map
;
1517 bufmgr_fake
->bufmgr
.bo_unmap
= drm_intel_fake_bo_unmap
;
1518 bufmgr_fake
->bufmgr
.bo_wait_rendering
= drm_intel_fake_bo_wait_rendering
;
1519 bufmgr_fake
->bufmgr
.bo_emit_reloc
= drm_intel_fake_emit_reloc
;
1520 bufmgr_fake
->bufmgr
.destroy
= drm_intel_fake_destroy
;
1521 bufmgr_fake
->bufmgr
.bo_exec
= drm_intel_fake_bo_exec
;
1522 bufmgr_fake
->bufmgr
.check_aperture_space
= drm_intel_fake_check_aperture_space
;
1523 bufmgr_fake
->bufmgr
.debug
= 0;
1525 bufmgr_fake
->fd
= fd
;
1526 bufmgr_fake
->last_dispatch
= (volatile int *)last_dispatch
;
1528 return &bufmgr_fake
->bufmgr
;