1 /**************************************************************************
3 * Copyright 2007-2010 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Implementation of fenced buffers.
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
37 #include "pipe/p_config.h"
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_defines.h"
46 #include "util/u_debug.h"
47 #include "os/os_thread.h"
48 #include "util/u_memory.h"
49 #include "util/u_double_list.h"
51 #include "pb_buffer.h"
52 #include "pb_buffer_fenced.h"
53 #include "pb_bufmgr.h"
58 * Convenience macro (type safe).
60 #define SUPER(__derived) (&(__derived)->base)
65 struct pb_manager base
;
66 struct pb_manager
*provider
;
67 struct pb_fence_ops
*ops
;
70 * Maximum buffer size that can be safely allocated.
72 pb_size max_buffer_size
;
75 * Maximum cpu memory we can allocate before we start waiting for the
78 pb_size max_cpu_total_size
;
81 * Following members are mutable and protected by this mutex.
88 * All fenced buffers are placed in this listed, ordered from the oldest
89 * fence to the newest fence.
91 struct list_head fenced
;
94 struct list_head unfenced
;
98 * How much temporary CPU memory is being used to hold unvalidated buffers.
100 pb_size cpu_total_size
;
107 * Wrapper around a pipe buffer which adds fencing and reference counting.
115 struct pb_buffer base
;
116 struct fenced_manager
*mgr
;
119 * Following members are mutable and protected by fenced_manager::mutex.
122 struct list_head head
;
125 * Buffer with storage.
127 struct pb_buffer
*buffer
;
132 * Temporary CPU storage data. Used when there isn't enough GPU memory to
138 * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
145 struct pb_validate
*vl
;
146 unsigned validation_flags
;
148 struct pipe_fence_handle
*fence
;
152 static INLINE
struct fenced_manager
*
153 fenced_manager(struct pb_manager
*mgr
)
156 return (struct fenced_manager
*)mgr
;
160 static INLINE
struct fenced_buffer
*
161 fenced_buffer(struct pb_buffer
*buf
)
164 return (struct fenced_buffer
*)buf
;
169 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer
*fenced_buf
);
171 static enum pipe_error
172 fenced_buffer_create_cpu_storage_locked(struct fenced_manager
*fenced_mgr
,
173 struct fenced_buffer
*fenced_buf
);
176 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer
*fenced_buf
);
178 static enum pipe_error
179 fenced_buffer_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
180 struct fenced_buffer
*fenced_buf
,
183 static enum pipe_error
184 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer
*fenced_buf
);
186 static enum pipe_error
187 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer
*fenced_buf
);
191 * Dump the fenced buffer list.
193 * Useful to understand failures to allocate buffers.
196 fenced_manager_dump_locked(struct fenced_manager
*fenced_mgr
)
199 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
200 struct list_head
*curr
, *next
;
201 struct fenced_buffer
*fenced_buf
;
203 debug_printf("%10s %7s %8s %7s %10s %s\n",
204 "buffer", "size", "refcount", "storage", "fence", "signalled");
206 curr
= fenced_mgr
->unfenced
.next
;
208 while(curr
!= &fenced_mgr
->unfenced
) {
209 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
210 assert(!fenced_buf
->fence
);
211 debug_printf("%10p %7u %8u %7s\n",
213 fenced_buf
->base
.base
.size
,
214 p_atomic_read(&fenced_buf
->base
.base
.reference
.count
),
215 fenced_buf
->buffer
? "gpu" : (fenced_buf
->data
? "cpu" : "none"));
220 curr
= fenced_mgr
->fenced
.next
;
222 while(curr
!= &fenced_mgr
->fenced
) {
224 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
225 assert(fenced_buf
->buffer
);
226 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
227 debug_printf("%10p %7u %8u %7s %10p %s\n",
229 fenced_buf
->base
.base
.size
,
230 p_atomic_read(&fenced_buf
->base
.base
.reference
.count
),
232 (void *) fenced_buf
->fence
,
233 signaled
== 0 ? "y" : "n");
244 fenced_buffer_destroy_locked(struct fenced_manager
*fenced_mgr
,
245 struct fenced_buffer
*fenced_buf
)
247 assert(!pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
249 assert(!fenced_buf
->fence
);
250 assert(fenced_buf
->head
.prev
);
251 assert(fenced_buf
->head
.next
);
252 LIST_DEL(&fenced_buf
->head
);
253 assert(fenced_mgr
->num_unfenced
);
254 --fenced_mgr
->num_unfenced
;
256 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
257 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
264 * Add the buffer to the fenced list.
266 * Reference count should be incremented before calling this function.
269 fenced_buffer_add_locked(struct fenced_manager
*fenced_mgr
,
270 struct fenced_buffer
*fenced_buf
)
272 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
273 assert(fenced_buf
->flags
& PB_USAGE_GPU_READ_WRITE
);
274 assert(fenced_buf
->fence
);
276 p_atomic_inc(&fenced_buf
->base
.base
.reference
.count
);
278 LIST_DEL(&fenced_buf
->head
);
279 assert(fenced_mgr
->num_unfenced
);
280 --fenced_mgr
->num_unfenced
;
281 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->fenced
);
282 ++fenced_mgr
->num_fenced
;
287 * Remove the buffer from the fenced list, and potentially destroy the buffer
288 * if the reference count reaches zero.
290 * Returns TRUE if the buffer was detroyed.
292 static INLINE boolean
293 fenced_buffer_remove_locked(struct fenced_manager
*fenced_mgr
,
294 struct fenced_buffer
*fenced_buf
)
296 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
298 assert(fenced_buf
->fence
);
299 assert(fenced_buf
->mgr
== fenced_mgr
);
301 ops
->fence_reference(ops
, &fenced_buf
->fence
, NULL
);
302 fenced_buf
->flags
&= ~PB_USAGE_GPU_READ_WRITE
;
304 assert(fenced_buf
->head
.prev
);
305 assert(fenced_buf
->head
.next
);
307 LIST_DEL(&fenced_buf
->head
);
308 assert(fenced_mgr
->num_fenced
);
309 --fenced_mgr
->num_fenced
;
311 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->unfenced
);
312 ++fenced_mgr
->num_unfenced
;
314 if (p_atomic_dec_zero(&fenced_buf
->base
.base
.reference
.count
)) {
315 fenced_buffer_destroy_locked(fenced_mgr
, fenced_buf
);
324 * Wait for the fence to expire, and remove it from the fenced list.
326 * This function will release and re-aquire the mutex, so any copy of mutable
327 * state must be discarded after calling it.
329 static INLINE
enum pipe_error
330 fenced_buffer_finish_locked(struct fenced_manager
*fenced_mgr
,
331 struct fenced_buffer
*fenced_buf
)
333 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
334 enum pipe_error ret
= PIPE_ERROR
;
337 debug_warning("waiting for GPU");
340 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
341 assert(fenced_buf
->fence
);
343 if(fenced_buf
->fence
) {
344 struct pipe_fence_handle
*fence
= NULL
;
348 ops
->fence_reference(ops
, &fence
, fenced_buf
->fence
);
350 pipe_mutex_unlock(fenced_mgr
->mutex
);
352 finished
= ops
->fence_finish(ops
, fenced_buf
->fence
, 0);
354 pipe_mutex_lock(fenced_mgr
->mutex
);
356 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
359 * Only proceed if the fence object didn't change in the meanwhile.
360 * Otherwise assume the work has been already carried out by another
361 * thread that re-aquired the lock before us.
363 proceed
= fence
== fenced_buf
->fence
? TRUE
: FALSE
;
365 ops
->fence_reference(ops
, &fence
, NULL
);
367 if(proceed
&& finished
== 0) {
369 * Remove from the fenced list
374 destroyed
= fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
376 /* TODO: remove consequents buffers with the same fence? */
380 fenced_buf
->flags
&= ~PB_USAGE_GPU_READ_WRITE
;
391 * Remove as many fenced buffers from the fenced list as possible.
393 * Returns TRUE if at least one buffer was removed.
396 fenced_manager_check_signalled_locked(struct fenced_manager
*fenced_mgr
,
399 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
400 struct list_head
*curr
, *next
;
401 struct fenced_buffer
*fenced_buf
;
402 struct pipe_fence_handle
*prev_fence
= NULL
;
405 curr
= fenced_mgr
->fenced
.next
;
407 while(curr
!= &fenced_mgr
->fenced
) {
408 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
410 if(fenced_buf
->fence
!= prev_fence
) {
414 signaled
= ops
->fence_finish(ops
, fenced_buf
->fence
, 0);
417 * Don't return just now. Instead preemptively check if the
418 * following buffers' fences already expired, without further waits.
423 signaled
= ops
->fence_signalled(ops
, fenced_buf
->fence
, 0);
430 prev_fence
= fenced_buf
->fence
;
433 /* This buffer's fence object is identical to the previous buffer's
434 * fence object, so no need to check the fence again.
436 assert(ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) == 0);
439 fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
452 * Try to free some GPU memory by backing it up into CPU memory.
454 * Returns TRUE if at least one buffer was freed.
457 fenced_manager_free_gpu_storage_locked(struct fenced_manager
*fenced_mgr
)
459 struct list_head
*curr
, *next
;
460 struct fenced_buffer
*fenced_buf
;
462 curr
= fenced_mgr
->unfenced
.next
;
464 while(curr
!= &fenced_mgr
->unfenced
) {
465 fenced_buf
= LIST_ENTRY(struct fenced_buffer
, curr
, head
);
468 * We can only move storage if the buffer is not mapped and not
471 if(fenced_buf
->buffer
&&
472 !fenced_buf
->mapcount
&&
476 ret
= fenced_buffer_create_cpu_storage_locked(fenced_mgr
, fenced_buf
);
478 ret
= fenced_buffer_copy_storage_to_cpu_locked(fenced_buf
);
480 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
483 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
496 * Destroy CPU storage for this buffer.
499 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer
*fenced_buf
)
501 if(fenced_buf
->data
) {
502 align_free(fenced_buf
->data
);
503 fenced_buf
->data
= NULL
;
504 assert(fenced_buf
->mgr
->cpu_total_size
>= fenced_buf
->size
);
505 fenced_buf
->mgr
->cpu_total_size
-= fenced_buf
->size
;
511 * Create CPU storage for this buffer.
513 static enum pipe_error
514 fenced_buffer_create_cpu_storage_locked(struct fenced_manager
*fenced_mgr
,
515 struct fenced_buffer
*fenced_buf
)
517 assert(!fenced_buf
->data
);
521 if (fenced_mgr
->cpu_total_size
+ fenced_buf
->size
> fenced_mgr
->max_cpu_total_size
)
522 return PIPE_ERROR_OUT_OF_MEMORY
;
524 fenced_buf
->data
= align_malloc(fenced_buf
->size
, fenced_buf
->desc
.alignment
);
525 if(!fenced_buf
->data
)
526 return PIPE_ERROR_OUT_OF_MEMORY
;
528 fenced_mgr
->cpu_total_size
+= fenced_buf
->size
;
535 * Destroy the GPU storage.
538 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer
*fenced_buf
)
540 if(fenced_buf
->buffer
) {
541 pb_reference(&fenced_buf
->buffer
, NULL
);
547 * Try to create GPU storage for this buffer.
549 * This function is a shorthand around pb_manager::create_buffer for
550 * fenced_buffer_create_gpu_storage_locked()'s benefit.
552 static INLINE boolean
553 fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
554 struct fenced_buffer
*fenced_buf
)
556 struct pb_manager
*provider
= fenced_mgr
->provider
;
558 assert(!fenced_buf
->buffer
);
560 fenced_buf
->buffer
= provider
->create_buffer(fenced_mgr
->provider
,
563 return fenced_buf
->buffer
? TRUE
: FALSE
;
568 * Create GPU storage for this buffer.
570 static enum pipe_error
571 fenced_buffer_create_gpu_storage_locked(struct fenced_manager
*fenced_mgr
,
572 struct fenced_buffer
*fenced_buf
,
575 assert(!fenced_buf
->buffer
);
578 * Check for signaled buffers before trying to allocate.
580 fenced_manager_check_signalled_locked(fenced_mgr
, FALSE
);
582 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
585 * Keep trying while there is some sort of progress:
586 * - fences are expiring,
587 * - or buffers are being being swapped out from GPU memory into CPU memory.
589 while(!fenced_buf
->buffer
&&
590 (fenced_manager_check_signalled_locked(fenced_mgr
, FALSE
) ||
591 fenced_manager_free_gpu_storage_locked(fenced_mgr
))) {
592 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
595 if(!fenced_buf
->buffer
&& wait
) {
597 * Same as before, but this time around, wait to free buffers if
600 while(!fenced_buf
->buffer
&&
601 (fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
) ||
602 fenced_manager_free_gpu_storage_locked(fenced_mgr
))) {
603 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr
, fenced_buf
);
607 if(!fenced_buf
->buffer
) {
609 fenced_manager_dump_locked(fenced_mgr
);
612 return PIPE_ERROR_OUT_OF_MEMORY
;
619 static enum pipe_error
620 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer
*fenced_buf
)
624 assert(fenced_buf
->data
);
625 assert(fenced_buf
->buffer
);
627 map
= pb_map(fenced_buf
->buffer
, PB_USAGE_CPU_WRITE
, NULL
);
631 memcpy(map
, fenced_buf
->data
, fenced_buf
->size
);
633 pb_unmap(fenced_buf
->buffer
);
639 static enum pipe_error
640 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer
*fenced_buf
)
644 assert(fenced_buf
->data
);
645 assert(fenced_buf
->buffer
);
647 map
= pb_map(fenced_buf
->buffer
, PB_USAGE_CPU_READ
, NULL
);
651 memcpy(fenced_buf
->data
, map
, fenced_buf
->size
);
653 pb_unmap(fenced_buf
->buffer
);
660 fenced_buffer_destroy(struct pb_buffer
*buf
)
662 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
663 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
665 assert(!pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
667 pipe_mutex_lock(fenced_mgr
->mutex
);
669 fenced_buffer_destroy_locked(fenced_mgr
, fenced_buf
);
671 pipe_mutex_unlock(fenced_mgr
->mutex
);
676 fenced_buffer_map(struct pb_buffer
*buf
,
677 unsigned flags
, void *flush_ctx
)
679 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
680 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
681 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
684 pipe_mutex_lock(fenced_mgr
->mutex
);
686 assert(!(flags
& PB_USAGE_GPU_READ_WRITE
));
691 while((fenced_buf
->flags
& PB_USAGE_GPU_WRITE
) ||
692 ((fenced_buf
->flags
& PB_USAGE_GPU_READ
) &&
693 (flags
& PB_USAGE_CPU_WRITE
))) {
696 * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
698 if((flags
& PB_USAGE_DONTBLOCK
) &&
699 ops
->fence_signalled(ops
, fenced_buf
->fence
, 0) != 0) {
703 if (flags
& PB_USAGE_UNSYNCHRONIZED
) {
708 * Wait for the GPU to finish accessing. This will release and re-acquire
709 * the mutex, so all copies of mutable state must be discarded.
711 fenced_buffer_finish_locked(fenced_mgr
, fenced_buf
);
714 if(fenced_buf
->buffer
) {
715 map
= pb_map(fenced_buf
->buffer
, flags
, flush_ctx
);
718 assert(fenced_buf
->data
);
719 map
= fenced_buf
->data
;
723 ++fenced_buf
->mapcount
;
724 fenced_buf
->flags
|= flags
& PB_USAGE_CPU_READ_WRITE
;
728 pipe_mutex_unlock(fenced_mgr
->mutex
);
735 fenced_buffer_unmap(struct pb_buffer
*buf
)
737 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
738 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
740 pipe_mutex_lock(fenced_mgr
->mutex
);
742 assert(fenced_buf
->mapcount
);
743 if(fenced_buf
->mapcount
) {
744 if (fenced_buf
->buffer
)
745 pb_unmap(fenced_buf
->buffer
);
746 --fenced_buf
->mapcount
;
747 if(!fenced_buf
->mapcount
)
748 fenced_buf
->flags
&= ~PB_USAGE_CPU_READ_WRITE
;
751 pipe_mutex_unlock(fenced_mgr
->mutex
);
755 static enum pipe_error
756 fenced_buffer_validate(struct pb_buffer
*buf
,
757 struct pb_validate
*vl
,
760 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
761 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
764 pipe_mutex_lock(fenced_mgr
->mutex
);
768 fenced_buf
->vl
= NULL
;
769 fenced_buf
->validation_flags
= 0;
774 assert(flags
& PB_USAGE_GPU_READ_WRITE
);
775 assert(!(flags
& ~PB_USAGE_GPU_READ_WRITE
));
776 flags
&= PB_USAGE_GPU_READ_WRITE
;
778 /* Buffer cannot be validated in two different lists */
779 if(fenced_buf
->vl
&& fenced_buf
->vl
!= vl
) {
780 ret
= PIPE_ERROR_RETRY
;
784 if(fenced_buf
->vl
== vl
&&
785 (fenced_buf
->validation_flags
& flags
) == flags
) {
786 /* Nothing to do -- buffer already validated */
792 * Create and update GPU storage.
794 if(!fenced_buf
->buffer
) {
795 assert(!fenced_buf
->mapcount
);
797 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, TRUE
);
802 ret
= fenced_buffer_copy_storage_to_gpu_locked(fenced_buf
);
804 fenced_buffer_destroy_gpu_storage_locked(fenced_buf
);
808 if(fenced_buf
->mapcount
) {
809 debug_printf("warning: validating a buffer while it is still mapped\n");
812 fenced_buffer_destroy_cpu_storage_locked(fenced_buf
);
816 ret
= pb_validate(fenced_buf
->buffer
, vl
, flags
);
821 fenced_buf
->validation_flags
|= flags
;
824 pipe_mutex_unlock(fenced_mgr
->mutex
);
831 fenced_buffer_fence(struct pb_buffer
*buf
,
832 struct pipe_fence_handle
*fence
)
834 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
835 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
836 struct pb_fence_ops
*ops
= fenced_mgr
->ops
;
838 pipe_mutex_lock(fenced_mgr
->mutex
);
840 assert(pipe_is_referenced(&fenced_buf
->base
.base
.reference
));
841 assert(fenced_buf
->buffer
);
843 if(fence
!= fenced_buf
->fence
) {
844 assert(fenced_buf
->vl
);
845 assert(fenced_buf
->validation_flags
);
847 if (fenced_buf
->fence
) {
849 destroyed
= fenced_buffer_remove_locked(fenced_mgr
, fenced_buf
);
853 ops
->fence_reference(ops
, &fenced_buf
->fence
, fence
);
854 fenced_buf
->flags
|= fenced_buf
->validation_flags
;
855 fenced_buffer_add_locked(fenced_mgr
, fenced_buf
);
858 pb_fence(fenced_buf
->buffer
, fence
);
860 fenced_buf
->vl
= NULL
;
861 fenced_buf
->validation_flags
= 0;
864 pipe_mutex_unlock(fenced_mgr
->mutex
);
869 fenced_buffer_get_base_buffer(struct pb_buffer
*buf
,
870 struct pb_buffer
**base_buf
,
873 struct fenced_buffer
*fenced_buf
= fenced_buffer(buf
);
874 struct fenced_manager
*fenced_mgr
= fenced_buf
->mgr
;
876 pipe_mutex_lock(fenced_mgr
->mutex
);
879 * This should only be called when the buffer is validated. Typically
880 * when processing relocations.
882 assert(fenced_buf
->vl
);
883 assert(fenced_buf
->buffer
);
885 if(fenced_buf
->buffer
)
886 pb_get_base_buffer(fenced_buf
->buffer
, base_buf
, offset
);
892 pipe_mutex_unlock(fenced_mgr
->mutex
);
896 static const struct pb_vtbl
897 fenced_buffer_vtbl
= {
898 fenced_buffer_destroy
,
901 fenced_buffer_validate
,
903 fenced_buffer_get_base_buffer
908 * Wrap a buffer in a fenced buffer.
910 static struct pb_buffer
*
911 fenced_bufmgr_create_buffer(struct pb_manager
*mgr
,
913 const struct pb_desc
*desc
)
915 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
916 struct fenced_buffer
*fenced_buf
;
920 * Don't stall the GPU, waste time evicting buffers, or waste memory
921 * trying to create a buffer that will most likely never fit into the
924 if(size
> fenced_mgr
->max_buffer_size
) {
928 fenced_buf
= CALLOC_STRUCT(fenced_buffer
);
932 pipe_reference_init(&fenced_buf
->base
.base
.reference
, 1);
933 fenced_buf
->base
.base
.alignment
= desc
->alignment
;
934 fenced_buf
->base
.base
.usage
= desc
->usage
;
935 fenced_buf
->base
.base
.size
= size
;
936 fenced_buf
->size
= size
;
937 fenced_buf
->desc
= *desc
;
939 fenced_buf
->base
.vtbl
= &fenced_buffer_vtbl
;
940 fenced_buf
->mgr
= fenced_mgr
;
942 pipe_mutex_lock(fenced_mgr
->mutex
);
945 * Try to create GPU storage without stalling,
947 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, FALSE
);
950 * Attempt to use CPU memory to avoid stalling the GPU.
953 ret
= fenced_buffer_create_cpu_storage_locked(fenced_mgr
, fenced_buf
);
957 * Create GPU storage, waiting for some to be available.
960 ret
= fenced_buffer_create_gpu_storage_locked(fenced_mgr
, fenced_buf
, TRUE
);
970 assert(fenced_buf
->buffer
|| fenced_buf
->data
);
972 LIST_ADDTAIL(&fenced_buf
->head
, &fenced_mgr
->unfenced
);
973 ++fenced_mgr
->num_unfenced
;
974 pipe_mutex_unlock(fenced_mgr
->mutex
);
976 return &fenced_buf
->base
;
979 pipe_mutex_unlock(fenced_mgr
->mutex
);
987 fenced_bufmgr_flush(struct pb_manager
*mgr
)
989 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
991 pipe_mutex_lock(fenced_mgr
->mutex
);
992 while(fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
))
994 pipe_mutex_unlock(fenced_mgr
->mutex
);
996 assert(fenced_mgr
->provider
->flush
);
997 if(fenced_mgr
->provider
->flush
)
998 fenced_mgr
->provider
->flush(fenced_mgr
->provider
);
1003 fenced_bufmgr_destroy(struct pb_manager
*mgr
)
1005 struct fenced_manager
*fenced_mgr
= fenced_manager(mgr
);
1007 pipe_mutex_lock(fenced_mgr
->mutex
);
1009 /* Wait on outstanding fences */
1010 while (fenced_mgr
->num_fenced
) {
1011 pipe_mutex_unlock(fenced_mgr
->mutex
);
1012 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
1015 pipe_mutex_lock(fenced_mgr
->mutex
);
1016 while(fenced_manager_check_signalled_locked(fenced_mgr
, TRUE
))
1021 /*assert(!fenced_mgr->num_unfenced);*/
1024 pipe_mutex_unlock(fenced_mgr
->mutex
);
1025 pipe_mutex_destroy(fenced_mgr
->mutex
);
1027 if(fenced_mgr
->provider
)
1028 fenced_mgr
->provider
->destroy(fenced_mgr
->provider
);
1030 fenced_mgr
->ops
->destroy(fenced_mgr
->ops
);
1037 fenced_bufmgr_create(struct pb_manager
*provider
,
1038 struct pb_fence_ops
*ops
,
1039 pb_size max_buffer_size
,
1040 pb_size max_cpu_total_size
)
1042 struct fenced_manager
*fenced_mgr
;
1047 fenced_mgr
= CALLOC_STRUCT(fenced_manager
);
1051 fenced_mgr
->base
.destroy
= fenced_bufmgr_destroy
;
1052 fenced_mgr
->base
.create_buffer
= fenced_bufmgr_create_buffer
;
1053 fenced_mgr
->base
.flush
= fenced_bufmgr_flush
;
1055 fenced_mgr
->provider
= provider
;
1056 fenced_mgr
->ops
= ops
;
1057 fenced_mgr
->max_buffer_size
= max_buffer_size
;
1058 fenced_mgr
->max_cpu_total_size
= max_cpu_total_size
;
1060 LIST_INITHEAD(&fenced_mgr
->fenced
);
1061 fenced_mgr
->num_fenced
= 0;
1063 LIST_INITHEAD(&fenced_mgr
->unfenced
);
1064 fenced_mgr
->num_unfenced
= 0;
1066 pipe_mutex_init(fenced_mgr
->mutex
);
1068 return &fenced_mgr
->base
;