1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "os/os_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_screen_buffer.h"
38 #include "svga_winsys.h"
39 #include "svga_debug.h"
43 * Vertex and index buffers have to be treated slightly differently from
44 * regular guest memory regions because the SVGA device sees them as
45 * surfaces, and the state tracker can create/destroy without the pipe
46 * driver, therefore we must do the uploads from the vws.
49 svga_buffer_needs_hw_storage(unsigned usage
)
51 return usage
& (PIPE_BUFFER_USAGE_VERTEX
| PIPE_BUFFER_USAGE_INDEX
);
55 static INLINE
enum pipe_error
56 svga_buffer_create_host_surface(struct svga_screen
*ss
,
57 struct svga_buffer
*sbuf
)
62 sbuf
->key
.format
= SVGA3D_BUFFER
;
63 if(sbuf
->base
.usage
& PIPE_BUFFER_USAGE_VERTEX
)
64 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_VERTEXBUFFER
;
65 if(sbuf
->base
.usage
& PIPE_BUFFER_USAGE_INDEX
)
66 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_INDEXBUFFER
;
68 sbuf
->key
.size
.width
= sbuf
->base
.size
;
69 sbuf
->key
.size
.height
= 1;
70 sbuf
->key
.size
.depth
= 1;
72 sbuf
->key
.numFaces
= 1;
73 sbuf
->key
.numMipLevels
= 1;
74 sbuf
->key
.cachable
= 1;
76 SVGA_DBG(DEBUG_DMA
, "surface_create for buffer sz %d\n", sbuf
->base
.size
);
78 sbuf
->handle
= svga_screen_surface_create(ss
, &sbuf
->key
);
80 return PIPE_ERROR_OUT_OF_MEMORY
;
82 /* Always set the discard flag on the first time the buffer is written
83 * as svga_screen_surface_create might have passed a recycled host
86 sbuf
->dma
.flags
.discard
= TRUE
;
88 SVGA_DBG(DEBUG_DMA
, " --> got sid %p sz %d (buffer)\n", sbuf
->handle
, sbuf
->base
.size
);
96 svga_buffer_destroy_host_surface(struct svga_screen
*ss
,
97 struct svga_buffer
*sbuf
)
100 SVGA_DBG(DEBUG_DMA
, " ungrab sid %p sz %d\n", sbuf
->handle
, sbuf
->base
.size
);
101 svga_screen_surface_destroy(ss
, &sbuf
->key
, &sbuf
->handle
);
107 svga_buffer_destroy_hw_storage(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
109 struct svga_winsys_screen
*sws
= ss
->sws
;
111 assert(!sbuf
->map
.count
);
114 sws
->buffer_destroy(sws
, sbuf
->hwbuf
);
119 struct svga_winsys_buffer
*
120 svga_winsys_buffer_create( struct svga_screen
*ss
,
125 struct svga_winsys_screen
*sws
= ss
->sws
;
126 struct svga_winsys_buffer
*buf
;
129 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
132 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "flushing screen to find %d bytes GMR\n",
135 /* Try flushing all pending DMAs */
136 svga_screen_flush(ss
, NULL
);
137 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
146 * Allocate DMA'ble storage for the buffer.
148 * Called before mapping a buffer.
150 static INLINE
enum pipe_error
151 svga_buffer_create_hw_storage(struct svga_screen
*ss
,
152 struct svga_buffer
*sbuf
)
157 unsigned alignment
= sbuf
->base
.alignment
;
159 unsigned size
= sbuf
->base
.size
;
161 sbuf
->hwbuf
= svga_winsys_buffer_create(ss
, alignment
, usage
, size
);
163 return PIPE_ERROR_OUT_OF_MEMORY
;
165 assert(!sbuf
->dma
.pending
);
173 * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
175 static enum pipe_error
176 svga_buffer_upload_command(struct svga_context
*svga
,
177 struct svga_buffer
*sbuf
)
179 struct svga_winsys_context
*swc
= svga
->swc
;
180 struct svga_winsys_buffer
*guest
= sbuf
->hwbuf
;
181 struct svga_winsys_surface
*host
= sbuf
->handle
;
182 SVGA3dTransferType transfer
= SVGA3D_WRITE_HOST_VRAM
;
183 SVGA3dCmdSurfaceDMA
*cmd
;
184 uint32 numBoxes
= sbuf
->map
.num_ranges
;
185 SVGA3dCopyBox
*boxes
;
186 SVGA3dCmdSurfaceDMASuffix
*pSuffix
;
187 unsigned region_flags
;
188 unsigned surface_flags
;
189 struct pipe_buffer
*dummy
;
191 if(transfer
== SVGA3D_WRITE_HOST_VRAM
) {
192 region_flags
= PIPE_BUFFER_USAGE_GPU_READ
;
193 surface_flags
= PIPE_BUFFER_USAGE_GPU_WRITE
;
195 else if(transfer
== SVGA3D_READ_HOST_VRAM
) {
196 region_flags
= PIPE_BUFFER_USAGE_GPU_WRITE
;
197 surface_flags
= PIPE_BUFFER_USAGE_GPU_READ
;
201 return PIPE_ERROR_BAD_INPUT
;
206 cmd
= SVGA3D_FIFOReserve(swc
,
207 SVGA_3D_CMD_SURFACE_DMA
,
208 sizeof *cmd
+ numBoxes
* sizeof *boxes
+ sizeof *pSuffix
,
211 return PIPE_ERROR_OUT_OF_MEMORY
;
213 swc
->region_relocation(swc
, &cmd
->guest
.ptr
, guest
, 0, region_flags
);
214 cmd
->guest
.pitch
= 0;
216 swc
->surface_relocation(swc
, &cmd
->host
.sid
, host
, surface_flags
);
218 cmd
->host
.mipmap
= 0;
220 cmd
->transfer
= transfer
;
222 sbuf
->dma
.boxes
= (SVGA3dCopyBox
*)&cmd
[1];
223 sbuf
->dma
.svga
= svga
;
225 /* Increment reference count */
227 pipe_buffer_reference(&dummy
, &sbuf
->base
);
229 pSuffix
= (SVGA3dCmdSurfaceDMASuffix
*)((uint8_t*)cmd
+ sizeof *cmd
+ numBoxes
* sizeof *boxes
);
230 pSuffix
->suffixSize
= sizeof *pSuffix
;
231 pSuffix
->maximumOffset
= sbuf
->base
.size
;
232 pSuffix
->flags
= sbuf
->dma
.flags
;
234 SVGA_FIFOCommitAll(swc
);
236 sbuf
->dma
.flags
.discard
= FALSE
;
243 * Patch up the upload DMA command reserved by svga_buffer_upload_command
244 * with the final ranges.
247 svga_buffer_upload_flush(struct svga_context
*svga
,
248 struct svga_buffer
*sbuf
)
250 SVGA3dCopyBox
*boxes
;
253 assert(sbuf
->handle
);
255 assert(sbuf
->map
.num_ranges
);
256 assert(sbuf
->dma
.svga
== svga
);
257 assert(sbuf
->dma
.boxes
);
260 * Patch the DMA command with the final copy box.
263 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
265 boxes
= sbuf
->dma
.boxes
;
266 for(i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
267 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
268 sbuf
->map
.ranges
[i
].start
, sbuf
->map
.ranges
[i
].end
);
270 boxes
[i
].x
= sbuf
->map
.ranges
[i
].start
;
273 boxes
[i
].w
= sbuf
->map
.ranges
[i
].end
- sbuf
->map
.ranges
[i
].start
;
276 boxes
[i
].srcx
= sbuf
->map
.ranges
[i
].start
;
281 sbuf
->map
.num_ranges
= 0;
283 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
284 LIST_DEL(&sbuf
->head
);
286 sbuf
->head
.next
= sbuf
->head
.prev
= NULL
;
288 sbuf
->dma
.pending
= FALSE
;
290 sbuf
->dma
.svga
= NULL
;
291 sbuf
->dma
.boxes
= NULL
;
293 /* Decrement reference count */
294 pipe_reference(&(sbuf
->base
.reference
), NULL
);
300 * Note a dirty range.
302 * This function only notes the range down. It doesn't actually emit a DMA
303 * upload command. That only happens when a context tries to refer to this
304 * buffer, and the DMA upload command is added to that context's command buffer.
306 * We try to lump as many contiguous DMA transfers together as possible.
309 svga_buffer_add_range(struct svga_buffer
*sbuf
,
314 unsigned nearest_range
;
315 unsigned nearest_dist
;
319 if (sbuf
->map
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
320 nearest_range
= sbuf
->map
.num_ranges
;
323 nearest_range
= SVGA_BUFFER_MAX_RANGES
- 1;
328 * Try to grow one of the ranges.
330 * Note that it is not this function task to care about overlapping ranges,
331 * as the GMR was already given so it is too late to do anything. Situations
332 * where overlapping ranges may pose a problem should be detected via
333 * pipe_context::is_buffer_referenced and the context that refers to the
334 * buffer should be flushed.
337 for(i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
342 left_dist
= start
- sbuf
->map
.ranges
[i
].end
;
343 right_dist
= sbuf
->map
.ranges
[i
].start
- end
;
344 dist
= MAX2(left_dist
, right_dist
);
348 * Ranges are contiguous or overlapping -- extend this one and return.
351 sbuf
->map
.ranges
[i
].start
= MIN2(sbuf
->map
.ranges
[i
].start
, start
);
352 sbuf
->map
.ranges
[i
].end
= MAX2(sbuf
->map
.ranges
[i
].end
, end
);
357 * Discontiguous ranges -- keep track of the nearest range.
360 if (dist
< nearest_dist
) {
368 * We cannot add a new range to an existing DMA command, so patch-up the
369 * pending DMA upload and start clean.
372 if(sbuf
->dma
.pending
)
373 svga_buffer_upload_flush(sbuf
->dma
.svga
, sbuf
);
375 assert(!sbuf
->dma
.pending
);
376 assert(!sbuf
->dma
.svga
);
377 assert(!sbuf
->dma
.boxes
);
379 if (sbuf
->map
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
384 sbuf
->map
.ranges
[sbuf
->map
.num_ranges
].start
= start
;
385 sbuf
->map
.ranges
[sbuf
->map
.num_ranges
].end
= end
;
386 ++sbuf
->map
.num_ranges
;
389 * Everything else failed, so just extend the nearest range.
391 * It is OK to do this because we always keep a local copy of the
392 * host buffer data, for SW TNL, and the host never modifies the buffer.
395 assert(nearest_range
< SVGA_BUFFER_MAX_RANGES
);
396 assert(nearest_range
< sbuf
->map
.num_ranges
);
397 sbuf
->map
.ranges
[nearest_range
].start
= MIN2(sbuf
->map
.ranges
[nearest_range
].start
, start
);
398 sbuf
->map
.ranges
[nearest_range
].end
= MAX2(sbuf
->map
.ranges
[nearest_range
].end
, end
);
404 svga_buffer_map_range( struct pipe_screen
*screen
,
405 struct pipe_buffer
*buf
,
406 unsigned offset
, unsigned length
,
409 struct svga_screen
*ss
= svga_screen(screen
);
410 struct svga_winsys_screen
*sws
= ss
->sws
;
411 struct svga_buffer
*sbuf
= svga_buffer( buf
);
414 if (!sbuf
->swbuf
&& !sbuf
->hwbuf
) {
415 if (svga_buffer_create_hw_storage(ss
, sbuf
) != PIPE_OK
) {
417 * We can't create a hardware buffer big enough, so create a malloc
421 debug_printf("%s: failed to allocate %u KB of DMA, splitting DMA transfers\n",
423 (sbuf
->base
.size
+ 1023)/1024);
425 sbuf
->swbuf
= align_malloc(sbuf
->base
.size
, sbuf
->base
.alignment
);
430 /* User/malloc buffer */
433 else if (sbuf
->hwbuf
) {
434 map
= sws
->buffer_map(sws
, sbuf
->hwbuf
, usage
);
441 pipe_mutex_lock(ss
->swc_mutex
);
445 if (usage
& PIPE_BUFFER_USAGE_CPU_WRITE
) {
446 assert(sbuf
->map
.count
<= 1);
447 sbuf
->map
.writing
= TRUE
;
448 if (usage
& PIPE_BUFFER_USAGE_FLUSH_EXPLICIT
)
449 sbuf
->map
.flush_explicit
= TRUE
;
452 pipe_mutex_unlock(ss
->swc_mutex
);
459 svga_buffer_flush_mapped_range( struct pipe_screen
*screen
,
460 struct pipe_buffer
*buf
,
461 unsigned offset
, unsigned length
)
463 struct svga_buffer
*sbuf
= svga_buffer( buf
);
464 struct svga_screen
*ss
= svga_screen(screen
);
466 pipe_mutex_lock(ss
->swc_mutex
);
467 assert(sbuf
->map
.writing
);
468 if(sbuf
->map
.writing
) {
469 assert(sbuf
->map
.flush_explicit
);
470 svga_buffer_add_range(sbuf
, offset
, offset
+ length
);
472 pipe_mutex_unlock(ss
->swc_mutex
);
476 svga_buffer_unmap( struct pipe_screen
*screen
,
477 struct pipe_buffer
*buf
)
479 struct svga_screen
*ss
= svga_screen(screen
);
480 struct svga_winsys_screen
*sws
= ss
->sws
;
481 struct svga_buffer
*sbuf
= svga_buffer( buf
);
483 pipe_mutex_lock(ss
->swc_mutex
);
485 assert(sbuf
->map
.count
);
490 sws
->buffer_unmap(sws
, sbuf
->hwbuf
);
492 if(sbuf
->map
.writing
) {
493 if(!sbuf
->map
.flush_explicit
) {
494 /* No mapped range was flushed -- flush the whole buffer */
495 SVGA_DBG(DEBUG_DMA
, "flushing the whole buffer\n");
497 svga_buffer_add_range(sbuf
, 0, sbuf
->base
.size
);
500 sbuf
->map
.writing
= FALSE
;
501 sbuf
->map
.flush_explicit
= FALSE
;
504 pipe_mutex_unlock(ss
->swc_mutex
);
508 svga_buffer_destroy( struct pipe_buffer
*buf
)
510 struct svga_screen
*ss
= svga_screen(buf
->screen
);
511 struct svga_buffer
*sbuf
= svga_buffer( buf
);
513 assert(!p_atomic_read(&buf
->reference
.count
));
515 assert(!sbuf
->dma
.pending
);
518 svga_buffer_destroy_host_surface(ss
, sbuf
);
520 if(sbuf
->uploaded
.buffer
)
521 pipe_buffer_reference(&sbuf
->uploaded
.buffer
, NULL
);
524 svga_buffer_destroy_hw_storage(ss
, sbuf
);
526 if(sbuf
->swbuf
&& !sbuf
->user
)
527 align_free(sbuf
->swbuf
);
532 static struct pipe_buffer
*
533 svga_buffer_create(struct pipe_screen
*screen
,
538 struct svga_screen
*ss
= svga_screen(screen
);
539 struct svga_buffer
*sbuf
;
544 sbuf
= CALLOC_STRUCT(svga_buffer
);
548 sbuf
->magic
= SVGA_BUFFER_MAGIC
;
550 pipe_reference_init(&sbuf
->base
.reference
, 1);
551 sbuf
->base
.screen
= screen
;
552 sbuf
->base
.alignment
= alignment
;
553 sbuf
->base
.usage
= usage
;
554 sbuf
->base
.size
= size
;
556 if(svga_buffer_needs_hw_storage(usage
)) {
557 if(svga_buffer_create_host_surface(ss
, sbuf
) != PIPE_OK
)
561 if(alignment
< sizeof(void*))
562 alignment
= sizeof(void*);
564 usage
|= PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
566 sbuf
->swbuf
= align_malloc(size
, alignment
);
579 static struct pipe_buffer
*
580 svga_user_buffer_create(struct pipe_screen
*screen
,
584 struct svga_buffer
*sbuf
;
586 sbuf
= CALLOC_STRUCT(svga_buffer
);
590 sbuf
->magic
= SVGA_BUFFER_MAGIC
;
595 pipe_reference_init(&sbuf
->base
.reference
, 1);
596 sbuf
->base
.screen
= screen
;
597 sbuf
->base
.alignment
= 1;
598 sbuf
->base
.usage
= 0;
599 sbuf
->base
.size
= bytes
;
609 svga_screen_init_buffer_functions(struct pipe_screen
*screen
)
611 screen
->buffer_create
= svga_buffer_create
;
612 screen
->user_buffer_create
= svga_user_buffer_create
;
613 screen
->buffer_map_range
= svga_buffer_map_range
;
614 screen
->buffer_flush_mapped_range
= svga_buffer_flush_mapped_range
;
615 screen
->buffer_unmap
= svga_buffer_unmap
;
616 screen
->buffer_destroy
= svga_buffer_destroy
;
621 * Copy the contents of the malloc buffer to a hardware buffer.
623 static INLINE
enum pipe_error
624 svga_buffer_update_hw(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
635 ret
= svga_buffer_create_hw_storage(ss
, sbuf
);
639 pipe_mutex_lock(ss
->swc_mutex
);
640 map
= ss
->sws
->buffer_map(ss
->sws
, sbuf
->hwbuf
, PIPE_BUFFER_USAGE_CPU_WRITE
);
643 pipe_mutex_unlock(ss
->swc_mutex
);
644 svga_buffer_destroy_hw_storage(ss
, sbuf
);
648 memcpy(map
, sbuf
->swbuf
, sbuf
->base
.size
);
649 ss
->sws
->buffer_unmap(ss
->sws
, sbuf
->hwbuf
);
651 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
652 assert(!sbuf
->map
.count
);
653 if(!sbuf
->map
.count
) {
657 align_free(sbuf
->swbuf
);
661 pipe_mutex_unlock(ss
->swc_mutex
);
669 * Upload the buffer to the host in a piecewise fashion.
671 * Used when the buffer is too big to fit in the GMR aperture.
673 static INLINE
enum pipe_error
674 svga_buffer_upload_piecewise(struct svga_screen
*ss
,
675 struct svga_context
*svga
,
676 struct svga_buffer
*sbuf
)
678 struct svga_winsys_screen
*sws
= ss
->sws
;
679 const unsigned alignment
= sizeof(void *);
680 const unsigned usage
= 0;
683 assert(sbuf
->map
.num_ranges
);
684 assert(!sbuf
->dma
.pending
);
686 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
688 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
689 struct svga_buffer_range
*range
= &sbuf
->map
.ranges
[i
];
690 unsigned offset
= range
->start
;
691 unsigned size
= range
->end
- range
->start
;
693 while (offset
< range
->end
) {
694 struct svga_winsys_buffer
*hwbuf
;
698 if (offset
+ size
> range
->end
)
699 size
= range
->end
- offset
;
701 hwbuf
= svga_winsys_buffer_create(ss
, alignment
, usage
, size
);
705 return PIPE_ERROR_OUT_OF_MEMORY
;
706 hwbuf
= svga_winsys_buffer_create(ss
, alignment
, usage
, size
);
709 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
710 offset
, offset
+ size
);
712 map
= sws
->buffer_map(sws
, hwbuf
,
713 PIPE_BUFFER_USAGE_CPU_WRITE
|
714 PIPE_BUFFER_USAGE_DISCARD
);
717 memcpy(map
, sbuf
->swbuf
, size
);
718 sws
->buffer_unmap(sws
, hwbuf
);
721 ret
= SVGA3D_BufferDMA(svga
->swc
,
723 SVGA3D_WRITE_HOST_VRAM
,
724 size
, 0, offset
, sbuf
->dma
.flags
);
726 svga_context_flush(svga
, NULL
);
727 ret
= SVGA3D_BufferDMA(svga
->swc
,
729 SVGA3D_WRITE_HOST_VRAM
,
730 size
, 0, offset
, sbuf
->dma
.flags
);
731 assert(ret
== PIPE_OK
);
734 sbuf
->dma
.flags
.discard
= FALSE
;
736 sws
->buffer_destroy(sws
, hwbuf
);
742 sbuf
->map
.num_ranges
= 0;
748 struct svga_winsys_surface
*
749 svga_buffer_handle(struct svga_context
*svga
,
750 struct pipe_buffer
*buf
)
752 struct pipe_screen
*screen
= svga
->pipe
.screen
;
753 struct svga_screen
*ss
= svga_screen(screen
);
754 struct svga_buffer
*sbuf
;
760 sbuf
= svga_buffer(buf
);
762 assert(!sbuf
->map
.count
);
766 ret
= svga_buffer_create_host_surface(ss
, sbuf
);
771 assert(sbuf
->handle
);
773 if (sbuf
->map
.num_ranges
) {
774 if (!sbuf
->dma
.pending
) {
776 * No pending DMA upload yet, so insert a DMA upload command now.
780 * Migrate the data from swbuf -> hwbuf if necessary.
782 ret
= svga_buffer_update_hw(ss
, sbuf
);
783 if (ret
== PIPE_OK
) {
785 * Queue a dma command.
788 ret
= svga_buffer_upload_command(svga
, sbuf
);
789 if (ret
== PIPE_ERROR_OUT_OF_MEMORY
) {
790 svga_context_flush(svga
, NULL
);
791 ret
= svga_buffer_upload_command(svga
, sbuf
);
792 assert(ret
== PIPE_OK
);
794 if (ret
== PIPE_OK
) {
795 sbuf
->dma
.pending
= TRUE
;
796 assert(!sbuf
->head
.prev
&& !sbuf
->head
.next
);
797 LIST_ADDTAIL(&sbuf
->head
, &svga
->dirty_buffers
);
800 else if (ret
== PIPE_ERROR_OUT_OF_MEMORY
) {
802 * The buffer is too big to fit in the GMR aperture, so break it in
805 ret
= svga_buffer_upload_piecewise(ss
, svga
, sbuf
);
808 if (ret
!= PIPE_OK
) {
810 * Something unexpected happened above. There is very little that
811 * we can do other than proceeding while ignoring the dirty ranges.
814 sbuf
->map
.num_ranges
= 0;
819 * There a pending dma already. Make sure it is from this context.
821 assert(sbuf
->dma
.svga
== svga
);
825 assert(!sbuf
->map
.num_ranges
|| sbuf
->dma
.pending
);
832 svga_screen_buffer_wrap_surface(struct pipe_screen
*screen
,
833 enum SVGA3dSurfaceFormat format
,
834 struct svga_winsys_surface
*srf
)
836 struct pipe_buffer
*buf
;
837 struct svga_buffer
*sbuf
;
838 struct svga_winsys_screen
*sws
= svga_winsys_screen(screen
);
840 buf
= svga_buffer_create(screen
, 0, SVGA_BUFFER_USAGE_WRAPPED
, 0);
844 sbuf
= svga_buffer(buf
);
847 * We are not the creator of this surface and therefore we must not
848 * cache it for reuse. Set the cacheable flag to zero in the key to
851 sbuf
->key
.format
= format
;
852 sbuf
->key
.cachable
= 0;
853 sws
->surface_reference(sws
, &sbuf
->handle
, srf
);
859 struct svga_winsys_surface
*
860 svga_screen_buffer_get_winsys_surface(struct pipe_buffer
*buffer
)
862 struct svga_winsys_screen
*sws
= svga_winsys_screen(buffer
->screen
);
863 struct svga_winsys_surface
*vsurf
= NULL
;
865 assert(svga_buffer(buffer
)->key
.cachable
== 0);
866 svga_buffer(buffer
)->key
.cachable
= 0;
867 sws
->surface_reference(sws
, &vsurf
, svga_buffer(buffer
)->handle
);
872 svga_context_flush_buffers(struct svga_context
*svga
)
874 struct list_head
*curr
, *next
;
875 struct svga_buffer
*sbuf
;
877 curr
= svga
->dirty_buffers
.next
;
879 while(curr
!= &svga
->dirty_buffers
) {
880 sbuf
= LIST_ENTRY(struct svga_buffer
, curr
, head
);
882 assert(p_atomic_read(&sbuf
->base
.reference
.count
) != 0);
883 assert(sbuf
->dma
.pending
);
885 svga_buffer_upload_flush(svga
, sbuf
);