revert 213 commits (to 56092) from the last month. 10 still need work to resolve...
[AROS.git] / workbench / libs / mesa / src / gallium / auxiliary / pipebuffer / pb_buffer_fenced.c
blobc310f28f51fd31d83dd53baf2b982ae8cb3ed553
1 /**************************************************************************
3 * Copyright 2007-2010 VMware, Inc.
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 /**
29 * \file
30 * Implementation of fenced buffers.
32 * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
33 * \author Thomas Hellström <thellstrom-at-vmware-dot-com>
37 #include "pipe/p_config.h"
39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
40 #include <unistd.h>
41 #include <sched.h>
42 #endif
44 #include "pipe/p_compiler.h"
45 #include "pipe/p_defines.h"
46 #include "util/u_debug.h"
47 #include "os/os_thread.h"
48 #include "util/u_memory.h"
49 #include "util/u_double_list.h"
51 #include "pb_buffer.h"
52 #include "pb_buffer_fenced.h"
53 #include "pb_bufmgr.h"
57 /**
58 * Convenience macro (type safe).
60 #define SUPER(__derived) (&(__derived)->base)
63 struct fenced_manager
65 struct pb_manager base;
66 struct pb_manager *provider;
67 struct pb_fence_ops *ops;
69 /**
70 * Maximum buffer size that can be safely allocated.
72 pb_size max_buffer_size;
74 /**
75 * Maximum cpu memory we can allocate before we start waiting for the
76 * GPU to idle.
78 pb_size max_cpu_total_size;
80 /**
81 * Following members are mutable and protected by this mutex.
83 pipe_mutex mutex;
85 /**
86 * Fenced buffer list.
88 * All fenced buffers are placed in this listed, ordered from the oldest
89 * fence to the newest fence.
91 struct list_head fenced;
92 pb_size num_fenced;
94 struct list_head unfenced;
95 pb_size num_unfenced;
97 /**
98 * How much temporary CPU memory is being used to hold unvalidated buffers.
100 pb_size cpu_total_size;
105 * Fenced buffer.
107 * Wrapper around a pipe buffer which adds fencing and reference counting.
109 struct fenced_buffer
112 * Immutable members.
115 struct pb_buffer base;
116 struct fenced_manager *mgr;
119 * Following members are mutable and protected by fenced_manager::mutex.
122 struct list_head head;
125 * Buffer with storage.
127 struct pb_buffer *buffer;
128 pb_size size;
129 struct pb_desc desc;
132 * Temporary CPU storage data. Used when there isn't enough GPU memory to
133 * store the buffer.
135 void *data;
138 * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
139 * buffer usage.
141 unsigned flags;
143 unsigned mapcount;
145 struct pb_validate *vl;
146 unsigned validation_flags;
148 struct pipe_fence_handle *fence;
152 static INLINE struct fenced_manager *
153 fenced_manager(struct pb_manager *mgr)
155 assert(mgr);
156 return (struct fenced_manager *)mgr;
160 static INLINE struct fenced_buffer *
161 fenced_buffer(struct pb_buffer *buf)
163 assert(buf);
164 return (struct fenced_buffer *)buf;
168 static void
169 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf);
171 static enum pipe_error
172 fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
173 struct fenced_buffer *fenced_buf);
175 static void
176 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
178 static enum pipe_error
179 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
180 struct fenced_buffer *fenced_buf,
181 boolean wait);
183 static enum pipe_error
184 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf);
186 static enum pipe_error
187 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf);
191 * Dump the fenced buffer list.
193 * Useful to understand failures to allocate buffers.
195 static void
196 fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
198 #ifdef DEBUG
199 struct pb_fence_ops *ops = fenced_mgr->ops;
200 struct list_head *curr, *next;
201 struct fenced_buffer *fenced_buf;
203 debug_printf("%10s %7s %8s %7s %10s %s\n",
204 "buffer", "size", "refcount", "storage", "fence", "signalled");
206 curr = fenced_mgr->unfenced.next;
207 next = curr->next;
208 while(curr != &fenced_mgr->unfenced) {
209 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
210 assert(!fenced_buf->fence);
211 debug_printf("%10p %7u %8u %7s\n",
212 (void *) fenced_buf,
213 fenced_buf->base.base.size,
214 p_atomic_read(&fenced_buf->base.base.reference.count),
215 fenced_buf->buffer ? "gpu" : (fenced_buf->data ? "cpu" : "none"));
216 curr = next;
217 next = curr->next;
220 curr = fenced_mgr->fenced.next;
221 next = curr->next;
222 while(curr != &fenced_mgr->fenced) {
223 int signaled;
224 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
225 assert(fenced_buf->buffer);
226 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
227 debug_printf("%10p %7u %8u %7s %10p %s\n",
228 (void *) fenced_buf,
229 fenced_buf->base.base.size,
230 p_atomic_read(&fenced_buf->base.base.reference.count),
231 "gpu",
232 (void *) fenced_buf->fence,
233 signaled == 0 ? "y" : "n");
234 curr = next;
235 next = curr->next;
237 #else
238 (void)fenced_mgr;
239 #endif
243 static INLINE void
244 fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
245 struct fenced_buffer *fenced_buf)
247 assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
249 assert(!fenced_buf->fence);
250 assert(fenced_buf->head.prev);
251 assert(fenced_buf->head.next);
252 LIST_DEL(&fenced_buf->head);
253 assert(fenced_mgr->num_unfenced);
254 --fenced_mgr->num_unfenced;
256 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
257 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
259 FREE(fenced_buf);
264 * Add the buffer to the fenced list.
266 * Reference count should be incremented before calling this function.
268 static INLINE void
269 fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
270 struct fenced_buffer *fenced_buf)
272 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
273 assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);
274 assert(fenced_buf->fence);
276 p_atomic_inc(&fenced_buf->base.base.reference.count);
278 LIST_DEL(&fenced_buf->head);
279 assert(fenced_mgr->num_unfenced);
280 --fenced_mgr->num_unfenced;
281 LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
282 ++fenced_mgr->num_fenced;
287 * Remove the buffer from the fenced list, and potentially destroy the buffer
288 * if the reference count reaches zero.
290 * Returns TRUE if the buffer was detroyed.
292 static INLINE boolean
293 fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
294 struct fenced_buffer *fenced_buf)
296 struct pb_fence_ops *ops = fenced_mgr->ops;
298 assert(fenced_buf->fence);
299 assert(fenced_buf->mgr == fenced_mgr);
301 ops->fence_reference(ops, &fenced_buf->fence, NULL);
302 fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
304 assert(fenced_buf->head.prev);
305 assert(fenced_buf->head.next);
307 LIST_DEL(&fenced_buf->head);
308 assert(fenced_mgr->num_fenced);
309 --fenced_mgr->num_fenced;
311 LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
312 ++fenced_mgr->num_unfenced;
314 if (p_atomic_dec_zero(&fenced_buf->base.base.reference.count)) {
315 fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
316 return TRUE;
319 return FALSE;
324 * Wait for the fence to expire, and remove it from the fenced list.
326 * This function will release and re-aquire the mutex, so any copy of mutable
327 * state must be discarded after calling it.
329 static INLINE enum pipe_error
330 fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
331 struct fenced_buffer *fenced_buf)
333 struct pb_fence_ops *ops = fenced_mgr->ops;
334 enum pipe_error ret = PIPE_ERROR;
336 #if 0
337 debug_warning("waiting for GPU");
338 #endif
340 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
341 assert(fenced_buf->fence);
343 if(fenced_buf->fence) {
344 struct pipe_fence_handle *fence = NULL;
345 int finished;
346 boolean proceed;
348 ops->fence_reference(ops, &fence, fenced_buf->fence);
350 pipe_mutex_unlock(fenced_mgr->mutex);
352 finished = ops->fence_finish(ops, fenced_buf->fence, 0);
354 pipe_mutex_lock(fenced_mgr->mutex);
356 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
359 * Only proceed if the fence object didn't change in the meanwhile.
360 * Otherwise assume the work has been already carried out by another
361 * thread that re-aquired the lock before us.
363 proceed = fence == fenced_buf->fence ? TRUE : FALSE;
365 ops->fence_reference(ops, &fence, NULL);
367 if(proceed && finished == 0) {
369 * Remove from the fenced list
372 boolean destroyed;
374 destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
376 /* TODO: remove consequents buffers with the same fence? */
378 assert(!destroyed);
380 fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
382 ret = PIPE_OK;
386 return ret;
391 * Remove as many fenced buffers from the fenced list as possible.
393 * Returns TRUE if at least one buffer was removed.
395 static boolean
396 fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
397 boolean wait)
399 struct pb_fence_ops *ops = fenced_mgr->ops;
400 struct list_head *curr, *next;
401 struct fenced_buffer *fenced_buf;
402 struct pipe_fence_handle *prev_fence = NULL;
403 boolean ret = FALSE;
405 curr = fenced_mgr->fenced.next;
406 next = curr->next;
407 while(curr != &fenced_mgr->fenced) {
408 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
410 if(fenced_buf->fence != prev_fence) {
411 int signaled;
413 if (wait) {
414 signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
417 * Don't return just now. Instead preemptively check if the
418 * following buffers' fences already expired, without further waits.
420 wait = FALSE;
422 else {
423 signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
426 if (signaled != 0) {
427 return ret;
430 prev_fence = fenced_buf->fence;
432 else {
433 /* This buffer's fence object is identical to the previous buffer's
434 * fence object, so no need to check the fence again.
436 assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
439 fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
441 ret = TRUE;
443 curr = next;
444 next = curr->next;
447 return ret;
452 * Try to free some GPU memory by backing it up into CPU memory.
454 * Returns TRUE if at least one buffer was freed.
456 static boolean
457 fenced_manager_free_gpu_storage_locked(struct fenced_manager *fenced_mgr)
459 struct list_head *curr, *next;
460 struct fenced_buffer *fenced_buf;
462 curr = fenced_mgr->unfenced.next;
463 next = curr->next;
464 while(curr != &fenced_mgr->unfenced) {
465 fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
468 * We can only move storage if the buffer is not mapped and not
469 * validated.
471 if(fenced_buf->buffer &&
472 !fenced_buf->mapcount &&
473 !fenced_buf->vl) {
474 enum pipe_error ret;
476 ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
477 if(ret == PIPE_OK) {
478 ret = fenced_buffer_copy_storage_to_cpu_locked(fenced_buf);
479 if(ret == PIPE_OK) {
480 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
481 return TRUE;
483 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
487 curr = next;
488 next = curr->next;
491 return FALSE;
496 * Destroy CPU storage for this buffer.
498 static void
499 fenced_buffer_destroy_cpu_storage_locked(struct fenced_buffer *fenced_buf)
501 if(fenced_buf->data) {
502 align_free(fenced_buf->data);
503 fenced_buf->data = NULL;
504 assert(fenced_buf->mgr->cpu_total_size >= fenced_buf->size);
505 fenced_buf->mgr->cpu_total_size -= fenced_buf->size;
511 * Create CPU storage for this buffer.
513 static enum pipe_error
514 fenced_buffer_create_cpu_storage_locked(struct fenced_manager *fenced_mgr,
515 struct fenced_buffer *fenced_buf)
517 assert(!fenced_buf->data);
518 if(fenced_buf->data)
519 return PIPE_OK;
521 if (fenced_mgr->cpu_total_size + fenced_buf->size > fenced_mgr->max_cpu_total_size)
522 return PIPE_ERROR_OUT_OF_MEMORY;
524 fenced_buf->data = align_malloc(fenced_buf->size, fenced_buf->desc.alignment);
525 if(!fenced_buf->data)
526 return PIPE_ERROR_OUT_OF_MEMORY;
528 fenced_mgr->cpu_total_size += fenced_buf->size;
530 return PIPE_OK;
535 * Destroy the GPU storage.
537 static void
538 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
540 if(fenced_buf->buffer) {
541 pb_reference(&fenced_buf->buffer, NULL);
547 * Try to create GPU storage for this buffer.
549 * This function is a shorthand around pb_manager::create_buffer for
550 * fenced_buffer_create_gpu_storage_locked()'s benefit.
552 static INLINE boolean
553 fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
554 struct fenced_buffer *fenced_buf)
556 struct pb_manager *provider = fenced_mgr->provider;
558 assert(!fenced_buf->buffer);
560 fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
561 fenced_buf->size,
562 &fenced_buf->desc);
563 return fenced_buf->buffer ? TRUE : FALSE;
568 * Create GPU storage for this buffer.
570 static enum pipe_error
571 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
572 struct fenced_buffer *fenced_buf,
573 boolean wait)
575 assert(!fenced_buf->buffer);
578 * Check for signaled buffers before trying to allocate.
580 fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
582 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
585 * Keep trying while there is some sort of progress:
586 * - fences are expiring,
587 * - or buffers are being being swapped out from GPU memory into CPU memory.
589 while(!fenced_buf->buffer &&
590 (fenced_manager_check_signalled_locked(fenced_mgr, FALSE) ||
591 fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
592 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
595 if(!fenced_buf->buffer && wait) {
597 * Same as before, but this time around, wait to free buffers if
598 * necessary.
600 while(!fenced_buf->buffer &&
601 (fenced_manager_check_signalled_locked(fenced_mgr, TRUE) ||
602 fenced_manager_free_gpu_storage_locked(fenced_mgr))) {
603 fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf);
607 if(!fenced_buf->buffer) {
608 if(0)
609 fenced_manager_dump_locked(fenced_mgr);
611 /* give up */
612 return PIPE_ERROR_OUT_OF_MEMORY;
615 return PIPE_OK;
619 static enum pipe_error
620 fenced_buffer_copy_storage_to_gpu_locked(struct fenced_buffer *fenced_buf)
622 uint8_t *map;
624 assert(fenced_buf->data);
625 assert(fenced_buf->buffer);
627 map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_WRITE, NULL);
628 if(!map)
629 return PIPE_ERROR;
631 memcpy(map, fenced_buf->data, fenced_buf->size);
633 pb_unmap(fenced_buf->buffer);
635 return PIPE_OK;
639 static enum pipe_error
640 fenced_buffer_copy_storage_to_cpu_locked(struct fenced_buffer *fenced_buf)
642 const uint8_t *map;
644 assert(fenced_buf->data);
645 assert(fenced_buf->buffer);
647 map = pb_map(fenced_buf->buffer, PB_USAGE_CPU_READ, NULL);
648 if(!map)
649 return PIPE_ERROR;
651 memcpy(fenced_buf->data, map, fenced_buf->size);
653 pb_unmap(fenced_buf->buffer);
655 return PIPE_OK;
659 static void
660 fenced_buffer_destroy(struct pb_buffer *buf)
662 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
663 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
665 assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
667 pipe_mutex_lock(fenced_mgr->mutex);
669 fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
671 pipe_mutex_unlock(fenced_mgr->mutex);
675 static void *
676 fenced_buffer_map(struct pb_buffer *buf,
677 unsigned flags, void *flush_ctx)
679 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
680 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
681 struct pb_fence_ops *ops = fenced_mgr->ops;
682 void *map = NULL;
684 pipe_mutex_lock(fenced_mgr->mutex);
686 assert(!(flags & PB_USAGE_GPU_READ_WRITE));
689 * Serialize writes.
691 while((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
692 ((fenced_buf->flags & PB_USAGE_GPU_READ) &&
693 (flags & PB_USAGE_CPU_WRITE))) {
696 * Don't wait for the GPU to finish accessing it, if blocking is forbidden.
698 if((flags & PB_USAGE_DONTBLOCK) &&
699 ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
700 goto done;
703 if (flags & PB_USAGE_UNSYNCHRONIZED) {
704 break;
708 * Wait for the GPU to finish accessing. This will release and re-acquire
709 * the mutex, so all copies of mutable state must be discarded.
711 fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
714 if(fenced_buf->buffer) {
715 map = pb_map(fenced_buf->buffer, flags, flush_ctx);
717 else {
718 assert(fenced_buf->data);
719 map = fenced_buf->data;
722 if(map) {
723 ++fenced_buf->mapcount;
724 fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
727 done:
728 pipe_mutex_unlock(fenced_mgr->mutex);
730 return map;
734 static void
735 fenced_buffer_unmap(struct pb_buffer *buf)
737 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
738 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
740 pipe_mutex_lock(fenced_mgr->mutex);
742 assert(fenced_buf->mapcount);
743 if(fenced_buf->mapcount) {
744 if (fenced_buf->buffer)
745 pb_unmap(fenced_buf->buffer);
746 --fenced_buf->mapcount;
747 if(!fenced_buf->mapcount)
748 fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
751 pipe_mutex_unlock(fenced_mgr->mutex);
755 static enum pipe_error
756 fenced_buffer_validate(struct pb_buffer *buf,
757 struct pb_validate *vl,
758 unsigned flags)
760 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
761 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
762 enum pipe_error ret;
764 pipe_mutex_lock(fenced_mgr->mutex);
766 if(!vl) {
767 /* invalidate */
768 fenced_buf->vl = NULL;
769 fenced_buf->validation_flags = 0;
770 ret = PIPE_OK;
771 goto done;
774 assert(flags & PB_USAGE_GPU_READ_WRITE);
775 assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
776 flags &= PB_USAGE_GPU_READ_WRITE;
778 /* Buffer cannot be validated in two different lists */
779 if(fenced_buf->vl && fenced_buf->vl != vl) {
780 ret = PIPE_ERROR_RETRY;
781 goto done;
784 if(fenced_buf->vl == vl &&
785 (fenced_buf->validation_flags & flags) == flags) {
786 /* Nothing to do -- buffer already validated */
787 ret = PIPE_OK;
788 goto done;
792 * Create and update GPU storage.
794 if(!fenced_buf->buffer) {
795 assert(!fenced_buf->mapcount);
797 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
798 if(ret != PIPE_OK) {
799 goto done;
802 ret = fenced_buffer_copy_storage_to_gpu_locked(fenced_buf);
803 if(ret != PIPE_OK) {
804 fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
805 goto done;
808 if(fenced_buf->mapcount) {
809 debug_printf("warning: validating a buffer while it is still mapped\n");
811 else {
812 fenced_buffer_destroy_cpu_storage_locked(fenced_buf);
816 ret = pb_validate(fenced_buf->buffer, vl, flags);
817 if (ret != PIPE_OK)
818 goto done;
820 fenced_buf->vl = vl;
821 fenced_buf->validation_flags |= flags;
823 done:
824 pipe_mutex_unlock(fenced_mgr->mutex);
826 return ret;
830 static void
831 fenced_buffer_fence(struct pb_buffer *buf,
832 struct pipe_fence_handle *fence)
834 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
835 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
836 struct pb_fence_ops *ops = fenced_mgr->ops;
838 pipe_mutex_lock(fenced_mgr->mutex);
840 assert(pipe_is_referenced(&fenced_buf->base.base.reference));
841 assert(fenced_buf->buffer);
843 if(fence != fenced_buf->fence) {
844 assert(fenced_buf->vl);
845 assert(fenced_buf->validation_flags);
847 if (fenced_buf->fence) {
848 boolean destroyed;
849 destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
850 assert(!destroyed);
852 if (fence) {
853 ops->fence_reference(ops, &fenced_buf->fence, fence);
854 fenced_buf->flags |= fenced_buf->validation_flags;
855 fenced_buffer_add_locked(fenced_mgr, fenced_buf);
858 pb_fence(fenced_buf->buffer, fence);
860 fenced_buf->vl = NULL;
861 fenced_buf->validation_flags = 0;
864 pipe_mutex_unlock(fenced_mgr->mutex);
868 static void
869 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
870 struct pb_buffer **base_buf,
871 pb_size *offset)
873 struct fenced_buffer *fenced_buf = fenced_buffer(buf);
874 struct fenced_manager *fenced_mgr = fenced_buf->mgr;
876 pipe_mutex_lock(fenced_mgr->mutex);
879 * This should only be called when the buffer is validated. Typically
880 * when processing relocations.
882 assert(fenced_buf->vl);
883 assert(fenced_buf->buffer);
885 if(fenced_buf->buffer)
886 pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
887 else {
888 *base_buf = buf;
889 *offset = 0;
892 pipe_mutex_unlock(fenced_mgr->mutex);
896 static const struct pb_vtbl
897 fenced_buffer_vtbl = {
898 fenced_buffer_destroy,
899 fenced_buffer_map,
900 fenced_buffer_unmap,
901 fenced_buffer_validate,
902 fenced_buffer_fence,
903 fenced_buffer_get_base_buffer
908 * Wrap a buffer in a fenced buffer.
910 static struct pb_buffer *
911 fenced_bufmgr_create_buffer(struct pb_manager *mgr,
912 pb_size size,
913 const struct pb_desc *desc)
915 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
916 struct fenced_buffer *fenced_buf;
917 enum pipe_error ret;
920 * Don't stall the GPU, waste time evicting buffers, or waste memory
921 * trying to create a buffer that will most likely never fit into the
922 * graphics aperture.
924 if(size > fenced_mgr->max_buffer_size) {
925 goto no_buffer;
928 fenced_buf = CALLOC_STRUCT(fenced_buffer);
929 if(!fenced_buf)
930 goto no_buffer;
932 pipe_reference_init(&fenced_buf->base.base.reference, 1);
933 fenced_buf->base.base.alignment = desc->alignment;
934 fenced_buf->base.base.usage = desc->usage;
935 fenced_buf->base.base.size = size;
936 fenced_buf->size = size;
937 fenced_buf->desc = *desc;
939 fenced_buf->base.vtbl = &fenced_buffer_vtbl;
940 fenced_buf->mgr = fenced_mgr;
942 pipe_mutex_lock(fenced_mgr->mutex);
945 * Try to create GPU storage without stalling,
947 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, FALSE);
950 * Attempt to use CPU memory to avoid stalling the GPU.
952 if(ret != PIPE_OK) {
953 ret = fenced_buffer_create_cpu_storage_locked(fenced_mgr, fenced_buf);
957 * Create GPU storage, waiting for some to be available.
959 if(ret != PIPE_OK) {
960 ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf, TRUE);
964 * Give up.
966 if(ret != PIPE_OK) {
967 goto no_storage;
970 assert(fenced_buf->buffer || fenced_buf->data);
972 LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
973 ++fenced_mgr->num_unfenced;
974 pipe_mutex_unlock(fenced_mgr->mutex);
976 return &fenced_buf->base;
978 no_storage:
979 pipe_mutex_unlock(fenced_mgr->mutex);
980 FREE(fenced_buf);
981 no_buffer:
982 return NULL;
986 static void
987 fenced_bufmgr_flush(struct pb_manager *mgr)
989 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
991 pipe_mutex_lock(fenced_mgr->mutex);
992 while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
994 pipe_mutex_unlock(fenced_mgr->mutex);
996 assert(fenced_mgr->provider->flush);
997 if(fenced_mgr->provider->flush)
998 fenced_mgr->provider->flush(fenced_mgr->provider);
1002 static void
1003 fenced_bufmgr_destroy(struct pb_manager *mgr)
1005 struct fenced_manager *fenced_mgr = fenced_manager(mgr);
1007 pipe_mutex_lock(fenced_mgr->mutex);
1009 /* Wait on outstanding fences */
1010 while (fenced_mgr->num_fenced) {
1011 pipe_mutex_unlock(fenced_mgr->mutex);
1012 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
1013 sched_yield();
1014 #endif
1015 pipe_mutex_lock(fenced_mgr->mutex);
1016 while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
1020 #ifdef DEBUG
1021 /*assert(!fenced_mgr->num_unfenced);*/
1022 #endif
1024 pipe_mutex_unlock(fenced_mgr->mutex);
1025 pipe_mutex_destroy(fenced_mgr->mutex);
1027 if(fenced_mgr->provider)
1028 fenced_mgr->provider->destroy(fenced_mgr->provider);
1030 fenced_mgr->ops->destroy(fenced_mgr->ops);
1032 FREE(fenced_mgr);
1036 struct pb_manager *
1037 fenced_bufmgr_create(struct pb_manager *provider,
1038 struct pb_fence_ops *ops,
1039 pb_size max_buffer_size,
1040 pb_size max_cpu_total_size)
1042 struct fenced_manager *fenced_mgr;
1044 if(!provider)
1045 return NULL;
1047 fenced_mgr = CALLOC_STRUCT(fenced_manager);
1048 if (!fenced_mgr)
1049 return NULL;
1051 fenced_mgr->base.destroy = fenced_bufmgr_destroy;
1052 fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
1053 fenced_mgr->base.flush = fenced_bufmgr_flush;
1055 fenced_mgr->provider = provider;
1056 fenced_mgr->ops = ops;
1057 fenced_mgr->max_buffer_size = max_buffer_size;
1058 fenced_mgr->max_cpu_total_size = max_cpu_total_size;
1060 LIST_INITHEAD(&fenced_mgr->fenced);
1061 fenced_mgr->num_fenced = 0;
1063 LIST_INITHEAD(&fenced_mgr->unfenced);
1064 fenced_mgr->num_unfenced = 0;
1066 pipe_mutex_init(fenced_mgr->mutex);
1068 return &fenced_mgr->base;