1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "pipe/p_inlines.h"
31 #include "pipe/p_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_screen_buffer.h"
38 #include "svga_winsys.h"
39 #include "svga_debug.h"
43 * Vertex and index buffers have to be treated slightly differently from
44 * regular guest memory regions because the SVGA device sees them as
45 * surfaces, and the state tracker can create/destroy without the pipe
46 * driver, therefore we must do the uploads from the vws.
49 svga_buffer_needs_hw_storage(unsigned usage
)
51 return usage
& (PIPE_BUFFER_USAGE_VERTEX
| PIPE_BUFFER_USAGE_INDEX
);
55 static INLINE
enum pipe_error
56 svga_buffer_create_host_surface(struct svga_screen
*ss
,
57 struct svga_buffer
*sbuf
)
62 sbuf
->key
.format
= SVGA3D_BUFFER
;
63 if(sbuf
->base
.usage
& PIPE_BUFFER_USAGE_VERTEX
)
64 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_VERTEXBUFFER
;
65 if(sbuf
->base
.usage
& PIPE_BUFFER_USAGE_INDEX
)
66 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_INDEXBUFFER
;
68 sbuf
->key
.size
.width
= sbuf
->base
.size
;
69 sbuf
->key
.size
.height
= 1;
70 sbuf
->key
.size
.depth
= 1;
72 sbuf
->key
.numFaces
= 1;
73 sbuf
->key
.numMipLevels
= 1;
75 sbuf
->handle
= svga_screen_surface_create(ss
, &sbuf
->key
);
77 return PIPE_ERROR_OUT_OF_MEMORY
;
79 /* Always set the discard flag on the first time the buffer is written
80 * as svga_screen_surface_create might have passed a recycled host
83 sbuf
->hw
.flags
.discard
= TRUE
;
85 SVGA_DBG(DEBUG_DMA
, " grab sid %p sz %d\n", sbuf
->handle
, sbuf
->base
.size
);
93 svga_buffer_destroy_host_surface(struct svga_screen
*ss
,
94 struct svga_buffer
*sbuf
)
97 SVGA_DBG(DEBUG_DMA
, " ungrab sid %p sz %d\n", sbuf
->handle
, sbuf
->base
.size
);
98 svga_screen_surface_destroy(ss
, &sbuf
->key
, &sbuf
->handle
);
104 svga_buffer_destroy_hw_storage(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
106 struct svga_winsys_screen
*sws
= ss
->sws
;
108 assert(!sbuf
->map
.count
);
109 assert(sbuf
->hw
.buf
);
111 sws
->buffer_destroy(sws
, sbuf
->hw
.buf
);
113 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
114 LIST_DEL(&sbuf
->head
);
116 sbuf
->head
.next
= sbuf
->head
.prev
= NULL
;
121 static INLINE
enum pipe_error
122 svga_buffer_backup(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
124 if (sbuf
->hw
.buf
&& sbuf
->hw
.num_ranges
) {
128 sbuf
->swbuf
= align_malloc(sbuf
->base
.size
, sbuf
->base
.alignment
);
130 return PIPE_ERROR_OUT_OF_MEMORY
;
132 src
= ss
->sws
->buffer_map(ss
->sws
, sbuf
->hw
.buf
,
133 PIPE_BUFFER_USAGE_CPU_READ
);
137 memcpy(sbuf
->swbuf
, src
, sbuf
->base
.size
);
138 ss
->sws
->buffer_unmap(ss
->sws
, sbuf
->hw
.buf
);
145 * Try to make GMR space available by freeing the hardware storage of
149 svga_buffer_free_cached_hw_storage(struct svga_screen
*ss
)
151 struct list_head
*curr
;
152 struct svga_buffer
*sbuf
;
153 enum pipe_error ret
= PIPE_OK
;
155 curr
= ss
->cached_buffers
.prev
;
157 /* free the least recently used buffer's hw storage which is not mapped */
159 if(curr
== &ss
->cached_buffers
)
162 sbuf
= LIST_ENTRY(struct svga_buffer
, curr
, head
);
165 if (sbuf
->map
.count
== 0)
166 ret
= svga_buffer_backup(ss
, sbuf
);
168 } while(sbuf
->map
.count
!= 0 || ret
!= PIPE_OK
);
170 svga_buffer_destroy_hw_storage(ss
, sbuf
);
175 struct svga_winsys_buffer
*
176 svga_winsys_buffer_create( struct svga_screen
*ss
,
181 struct svga_winsys_screen
*sws
= ss
->sws
;
182 struct svga_winsys_buffer
*buf
;
185 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
188 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "flushing screen to find %d bytes GMR\n",
191 /* Try flushing all pending DMAs */
192 svga_screen_flush(ss
, NULL
);
193 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
195 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "evicting buffers to find %d bytes GMR\n",
198 /* Try evicing all buffer storage */
199 while(!buf
&& svga_buffer_free_cached_hw_storage(ss
))
200 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
208 * Allocate DMA'ble storage for the buffer.
210 * Called before mapping a buffer.
212 static INLINE
enum pipe_error
213 svga_buffer_create_hw_storage(struct svga_screen
*ss
,
214 struct svga_buffer
*sbuf
)
217 unsigned alignment
= sbuf
->base
.alignment
;
219 unsigned size
= sbuf
->base
.size
;
221 sbuf
->hw
.buf
= svga_winsys_buffer_create(ss
, alignment
, usage
, size
);
223 return PIPE_ERROR_OUT_OF_MEMORY
;
225 assert(!sbuf
->needs_flush
);
226 assert(!sbuf
->head
.prev
&& !sbuf
->head
.next
);
227 LIST_ADD(&sbuf
->head
, &ss
->cached_buffers
);
235 * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
237 static enum pipe_error
238 svga_buffer_upload_command(struct svga_context
*svga
,
239 struct svga_buffer
*sbuf
)
241 struct svga_winsys_context
*swc
= svga
->swc
;
242 struct svga_winsys_buffer
*guest
= sbuf
->hw
.buf
;
243 struct svga_winsys_surface
*host
= sbuf
->handle
;
244 SVGA3dTransferType transfer
= SVGA3D_WRITE_HOST_VRAM
;
245 SVGA3dSurfaceDMAFlags flags
= sbuf
->hw
.flags
;
246 SVGA3dCmdSurfaceDMA
*cmd
;
247 uint32 numBoxes
= sbuf
->hw
.num_ranges
;
248 SVGA3dCopyBox
*boxes
;
249 SVGA3dCmdSurfaceDMASuffix
*pSuffix
;
250 unsigned region_flags
;
251 unsigned surface_flags
;
252 struct pipe_buffer
*dummy
;
254 if(transfer
== SVGA3D_WRITE_HOST_VRAM
) {
255 region_flags
= PIPE_BUFFER_USAGE_GPU_READ
;
256 surface_flags
= PIPE_BUFFER_USAGE_GPU_WRITE
;
258 else if(transfer
== SVGA3D_READ_HOST_VRAM
) {
259 region_flags
= PIPE_BUFFER_USAGE_GPU_WRITE
;
260 surface_flags
= PIPE_BUFFER_USAGE_GPU_READ
;
264 return PIPE_ERROR_BAD_INPUT
;
269 cmd
= SVGA3D_FIFOReserve(swc
,
270 SVGA_3D_CMD_SURFACE_DMA
,
271 sizeof *cmd
+ numBoxes
* sizeof *boxes
+ sizeof *pSuffix
,
274 return PIPE_ERROR_OUT_OF_MEMORY
;
276 swc
->region_relocation(swc
, &cmd
->guest
.ptr
, guest
, 0, region_flags
);
277 cmd
->guest
.pitch
= 0;
279 swc
->surface_relocation(swc
, &cmd
->host
.sid
, host
, surface_flags
);
281 cmd
->host
.mipmap
= 0;
283 cmd
->transfer
= transfer
;
285 sbuf
->hw
.boxes
= (SVGA3dCopyBox
*)&cmd
[1];
286 sbuf
->hw
.svga
= svga
;
288 /* Increment reference count */
290 pipe_buffer_reference(&dummy
, &sbuf
->base
);
292 pSuffix
= (SVGA3dCmdSurfaceDMASuffix
*)((uint8_t*)cmd
+ sizeof *cmd
+ numBoxes
* sizeof *boxes
);
293 pSuffix
->suffixSize
= sizeof *pSuffix
;
294 pSuffix
->maximumOffset
= sbuf
->base
.size
;
295 pSuffix
->flags
= flags
;
304 * Patch up the upload DMA command reserved by svga_buffer_upload_command
305 * with the final ranges.
308 svga_buffer_upload_flush(struct svga_context
*svga
,
309 struct svga_buffer
*sbuf
)
311 struct svga_screen
*ss
= svga_screen(svga
->pipe
.screen
);
312 SVGA3dCopyBox
*boxes
;
315 assert(sbuf
->handle
);
316 assert(sbuf
->hw
.buf
);
317 assert(sbuf
->hw
.num_ranges
);
318 assert(sbuf
->hw
.svga
== svga
);
319 assert(sbuf
->hw
.boxes
);
322 * Patch the DMA command with the final copy box.
325 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
327 boxes
= sbuf
->hw
.boxes
;
328 for(i
= 0; i
< sbuf
->hw
.num_ranges
; ++i
) {
329 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
330 sbuf
->hw
.ranges
[i
].start
, sbuf
->hw
.ranges
[i
].end
);
332 boxes
[i
].x
= sbuf
->hw
.ranges
[i
].start
;
335 boxes
[i
].w
= sbuf
->hw
.ranges
[i
].end
- sbuf
->hw
.ranges
[i
].start
;
338 boxes
[i
].srcx
= sbuf
->hw
.ranges
[i
].start
;
343 sbuf
->hw
.num_ranges
= 0;
344 memset(&sbuf
->hw
.flags
, 0, sizeof sbuf
->hw
.flags
);
346 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
347 LIST_DEL(&sbuf
->head
);
348 sbuf
->needs_flush
= FALSE
;
349 /* XXX: do we care about cached_buffers any more ?*/
350 LIST_ADD(&sbuf
->head
, &ss
->cached_buffers
);
352 sbuf
->hw
.svga
= NULL
;
353 sbuf
->hw
.boxes
= NULL
;
355 /* Decrement reference count */
356 pipe_buffer_reference((struct pipe_buffer
**)&sbuf
, NULL
);
361 * Queue a DMA upload of a range of this buffer to the host.
363 * This function only notes the range down. It doesn't actually emit a DMA
364 * upload command. That only happens when a context tries to refer to this
365 * buffer, and the DMA upload command is added to that context's command buffer.
367 * We try to lump as many contiguous DMA transfers together as possible.
370 svga_buffer_upload_queue(struct svga_buffer
*sbuf
,
376 assert(sbuf
->hw
.buf
);
380 * Try to grow one of the ranges.
382 * Note that it is not this function task to care about overlapping ranges,
383 * as the GMR was already given so it is too late to do anything. Situations
384 * where overlapping ranges may pose a problem should be detected via
385 * pipe_context::is_buffer_referenced and the context that refers to the
386 * buffer should be flushed.
389 for(i
= 0; i
< sbuf
->hw
.num_ranges
; ++i
) {
390 if(start
<= sbuf
->hw
.ranges
[i
].end
&& sbuf
->hw
.ranges
[i
].start
<= end
) {
391 sbuf
->hw
.ranges
[i
].start
= MIN2(sbuf
->hw
.ranges
[i
].start
, start
);
392 sbuf
->hw
.ranges
[i
].end
= MAX2(sbuf
->hw
.ranges
[i
].end
, end
);
398 * We cannot add a new range to an existing DMA command, so patch-up the
399 * pending DMA upload and start clean.
402 if(sbuf
->needs_flush
)
403 svga_buffer_upload_flush(sbuf
->hw
.svga
, sbuf
);
405 assert(!sbuf
->needs_flush
);
406 assert(!sbuf
->hw
.svga
);
407 assert(!sbuf
->hw
.boxes
);
413 sbuf
->hw
.ranges
[sbuf
->hw
.num_ranges
].start
= start
;
414 sbuf
->hw
.ranges
[sbuf
->hw
.num_ranges
].end
= end
;
415 ++sbuf
->hw
.num_ranges
;
420 svga_buffer_map_range( struct pipe_screen
*screen
,
421 struct pipe_buffer
*buf
,
422 unsigned offset
, unsigned length
,
425 struct svga_screen
*ss
= svga_screen(screen
);
426 struct svga_winsys_screen
*sws
= ss
->sws
;
427 struct svga_buffer
*sbuf
= svga_buffer( buf
);
431 /* User/malloc buffer */
436 struct svga_winsys_surface
*handle
= sbuf
->handle
;
438 if(svga_buffer_create_hw_storage(ss
, sbuf
) != PIPE_OK
)
441 /* Populate the hardware storage if the host surface pre-existed */
442 if((usage
& PIPE_BUFFER_USAGE_CPU_READ
) && handle
) {
443 SVGA3dSurfaceDMAFlags flags
;
445 struct pipe_fence_handle
*fence
= NULL
;
447 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "dma from sid %p, bytes %u - %u\n",
448 sbuf
->handle
, 0, sbuf
->base
.size
);
450 memset(&flags
, 0, sizeof flags
);
452 ret
= SVGA3D_BufferDMA(ss
->swc
,
455 SVGA3D_READ_HOST_VRAM
,
460 ss
->swc
->flush(ss
->swc
, NULL
);
462 ret
= SVGA3D_BufferDMA(ss
->swc
,
465 SVGA3D_READ_HOST_VRAM
,
469 assert(ret
== PIPE_OK
);
472 ss
->swc
->flush(ss
->swc
, &fence
);
473 sws
->fence_finish(sws
, fence
, 0);
474 sws
->fence_reference(sws
, &fence
, NULL
);
478 if((usage
& PIPE_BUFFER_USAGE_CPU_READ
) && !sbuf
->needs_flush
) {
479 /* We already had the hardware storage but we would have to issue
480 * a download if we hadn't, so move the buffer to the begginning
483 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
484 LIST_DEL(&sbuf
->head
);
485 LIST_ADD(&sbuf
->head
, &ss
->cached_buffers
);
489 map
= sws
->buffer_map(sws
, sbuf
->hw
.buf
, usage
);
493 pipe_mutex_lock(ss
->swc_mutex
);
497 if (usage
& PIPE_BUFFER_USAGE_CPU_WRITE
) {
498 assert(sbuf
->map
.count
<= 1);
499 sbuf
->map
.writing
= TRUE
;
500 if (usage
& PIPE_BUFFER_USAGE_FLUSH_EXPLICIT
)
501 sbuf
->map
.flush_explicit
= TRUE
;
504 pipe_mutex_unlock(ss
->swc_mutex
);
511 svga_buffer_flush_mapped_range( struct pipe_screen
*screen
,
512 struct pipe_buffer
*buf
,
513 unsigned offset
, unsigned length
)
515 struct svga_buffer
*sbuf
= svga_buffer( buf
);
516 struct svga_screen
*ss
= svga_screen(screen
);
518 pipe_mutex_lock(ss
->swc_mutex
);
519 assert(sbuf
->map
.writing
);
520 if(sbuf
->map
.writing
) {
521 assert(sbuf
->map
.flush_explicit
);
523 svga_buffer_upload_queue(sbuf
, offset
, offset
+ length
);
525 pipe_mutex_unlock(ss
->swc_mutex
);
529 svga_buffer_unmap( struct pipe_screen
*screen
,
530 struct pipe_buffer
*buf
)
532 struct svga_screen
*ss
= svga_screen(screen
);
533 struct svga_winsys_screen
*sws
= ss
->sws
;
534 struct svga_buffer
*sbuf
= svga_buffer( buf
);
536 pipe_mutex_lock(ss
->swc_mutex
);
538 assert(sbuf
->map
.count
);
543 sws
->buffer_unmap(sws
, sbuf
->hw
.buf
);
545 if(sbuf
->map
.writing
) {
546 if(!sbuf
->map
.flush_explicit
) {
547 /* No mapped range was flushed -- flush the whole buffer */
548 SVGA_DBG(DEBUG_DMA
, "flushing the whole buffer\n");
551 svga_buffer_upload_queue(sbuf
, 0, sbuf
->base
.size
);
554 sbuf
->map
.writing
= FALSE
;
555 sbuf
->map
.flush_explicit
= FALSE
;
558 pipe_mutex_unlock(ss
->swc_mutex
);
562 svga_buffer_destroy( struct pipe_buffer
*buf
)
564 struct svga_screen
*ss
= svga_screen(buf
->screen
);
565 struct svga_buffer
*sbuf
= svga_buffer( buf
);
567 assert(!p_atomic_read(&buf
->reference
.count
));
569 assert(!sbuf
->needs_flush
);
572 SVGA_DBG(DEBUG_DMA
, "release sid %p sz %d\n", sbuf
->handle
, sbuf
->base
.size
);
573 svga_screen_surface_destroy(ss
, &sbuf
->key
, &sbuf
->handle
);
577 svga_buffer_destroy_hw_storage(ss
, sbuf
);
579 if(sbuf
->swbuf
&& !sbuf
->user
)
580 align_free(sbuf
->swbuf
);
585 static struct pipe_buffer
*
586 svga_buffer_create(struct pipe_screen
*screen
,
591 struct svga_screen
*ss
= svga_screen(screen
);
592 struct svga_buffer
*sbuf
;
594 sbuf
= CALLOC_STRUCT(svga_buffer
);
598 sbuf
->magic
= SVGA_BUFFER_MAGIC
;
600 pipe_reference_init(&sbuf
->base
.reference
, 1);
601 sbuf
->base
.screen
= screen
;
602 sbuf
->base
.alignment
= alignment
;
603 sbuf
->base
.usage
= usage
;
604 sbuf
->base
.size
= size
;
606 if(svga_buffer_needs_hw_storage(usage
)) {
607 if(svga_buffer_create_host_surface(ss
, sbuf
) != PIPE_OK
)
611 if(alignment
< sizeof(void*))
612 alignment
= sizeof(void*);
614 usage
|= PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
616 sbuf
->swbuf
= align_malloc(size
, alignment
);
629 static struct pipe_buffer
*
630 svga_user_buffer_create(struct pipe_screen
*screen
,
634 struct svga_buffer
*sbuf
;
636 sbuf
= CALLOC_STRUCT(svga_buffer
);
640 sbuf
->magic
= SVGA_BUFFER_MAGIC
;
645 pipe_reference_init(&sbuf
->base
.reference
, 1);
646 sbuf
->base
.screen
= screen
;
647 sbuf
->base
.alignment
= 1;
648 sbuf
->base
.usage
= 0;
649 sbuf
->base
.size
= bytes
;
659 svga_screen_init_buffer_functions(struct pipe_screen
*screen
)
661 screen
->buffer_create
= svga_buffer_create
;
662 screen
->user_buffer_create
= svga_user_buffer_create
;
663 screen
->buffer_map_range
= svga_buffer_map_range
;
664 screen
->buffer_flush_mapped_range
= svga_buffer_flush_mapped_range
;
665 screen
->buffer_unmap
= svga_buffer_unmap
;
666 screen
->buffer_destroy
= svga_buffer_destroy
;
671 * Copy the contents of the user buffer / malloc buffer to a hardware buffer.
673 static INLINE
enum pipe_error
674 svga_buffer_update_hw(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
684 ret
= svga_buffer_create_hw_storage(ss
, sbuf
);
685 assert(ret
== PIPE_OK
);
689 pipe_mutex_lock(ss
->swc_mutex
);
690 map
= ss
->sws
->buffer_map(ss
->sws
, sbuf
->hw
.buf
, PIPE_BUFFER_USAGE_CPU_WRITE
);
693 pipe_mutex_unlock(ss
->swc_mutex
);
694 return PIPE_ERROR_OUT_OF_MEMORY
;
697 memcpy(map
, sbuf
->swbuf
, sbuf
->base
.size
);
698 ss
->sws
->buffer_unmap(ss
->sws
, sbuf
->hw
.buf
);
700 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
701 assert(!sbuf
->map
.count
);
702 if(!sbuf
->map
.count
) {
706 align_free(sbuf
->swbuf
);
710 svga_buffer_upload_queue(sbuf
, 0, sbuf
->base
.size
);
713 pipe_mutex_unlock(ss
->swc_mutex
);
718 struct svga_winsys_surface
*
719 svga_buffer_handle(struct svga_context
*svga
,
720 struct pipe_buffer
*buf
)
722 struct pipe_screen
*screen
= svga
->pipe
.screen
;
723 struct svga_screen
*ss
= svga_screen(screen
);
724 struct svga_buffer
*sbuf
;
730 sbuf
= svga_buffer(buf
);
732 assert(!sbuf
->map
.count
);
735 ret
= svga_buffer_create_host_surface(ss
, sbuf
);
739 ret
= svga_buffer_update_hw(ss
, sbuf
);
744 if(!sbuf
->needs_flush
&& sbuf
->hw
.num_ranges
) {
745 /* Queue the buffer for flushing */
746 ret
= svga_buffer_upload_command(svga
, sbuf
);
748 /* XXX: Should probably have a richer return value */
751 assert(sbuf
->hw
.svga
== svga
);
753 sbuf
->needs_flush
= TRUE
;
754 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
755 LIST_DEL(&sbuf
->head
);
756 LIST_ADDTAIL(&sbuf
->head
, &svga
->dirty_buffers
);
763 svga_screen_buffer_wrap_surface(struct pipe_screen
*screen
,
764 enum SVGA3dSurfaceFormat format
,
765 struct svga_winsys_surface
*srf
)
767 struct pipe_buffer
*buf
;
768 struct svga_buffer
*sbuf
;
769 struct svga_winsys_screen
*sws
= svga_winsys_screen(screen
);
771 buf
= svga_buffer_create(screen
, 0, SVGA_BUFFER_USAGE_WRAPPED
, 0);
775 sbuf
= svga_buffer(buf
);
778 * We are not the creator of this surface and therefore we must not
779 * cache it for reuse. The caching code only caches SVGA3D_BUFFER surfaces
780 * so make sure this isn't one of those.
783 assert(format
!= SVGA3D_BUFFER
);
784 sbuf
->key
.format
= format
;
785 sws
->surface_reference(sws
, &sbuf
->handle
, srf
);
791 struct svga_winsys_surface
*
792 svga_screen_buffer_get_winsys_surface(struct pipe_buffer
*buffer
)
794 struct svga_winsys_screen
*sws
= svga_winsys_screen(buffer
->screen
);
795 struct svga_winsys_surface
*vsurf
= NULL
;
797 sws
->surface_reference(sws
, &vsurf
, svga_buffer(buffer
)->handle
);
802 svga_context_flush_buffers(struct svga_context
*svga
)
804 struct list_head
*curr
, *next
;
805 struct svga_buffer
*sbuf
;
807 curr
= svga
->dirty_buffers
.next
;
809 while(curr
!= &svga
->dirty_buffers
) {
810 sbuf
= LIST_ENTRY(struct svga_buffer
, curr
, head
);
812 assert(p_atomic_read(&sbuf
->base
.reference
.count
) != 0);
813 assert(sbuf
->needs_flush
);
815 svga_buffer_upload_flush(svga
, sbuf
);