svga: Remove unused variable.
[mesa.git] / src / gallium / drivers / svga / svga_screen_buffer.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "svga_cmd.h"
27
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "pipe/p_inlines.h"
31 #include "pipe/p_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
34
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_screen_buffer.h"
38 #include "svga_winsys.h"
39 #include "svga_debug.h"
40
41
42 /**
43 * Vertex and index buffers have to be treated slightly differently from
44 * regular guest memory regions because the SVGA device sees them as
45 * surfaces, and the state tracker can create/destroy without the pipe
46 * driver, therefore we must do the uploads from the vws.
47 */
48 static INLINE boolean
49 svga_buffer_needs_hw_storage(unsigned usage)
50 {
51 return usage & (PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_INDEX);
52 }
53
54
55 static INLINE enum pipe_error
56 svga_buffer_create_host_surface(struct svga_screen *ss,
57 struct svga_buffer *sbuf)
58 {
59 if(!sbuf->handle) {
60 sbuf->key.flags = 0;
61
62 sbuf->key.format = SVGA3D_BUFFER;
63 if(sbuf->base.usage & PIPE_BUFFER_USAGE_VERTEX)
64 sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
65 if(sbuf->base.usage & PIPE_BUFFER_USAGE_INDEX)
66 sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
67
68 sbuf->key.size.width = sbuf->base.size;
69 sbuf->key.size.height = 1;
70 sbuf->key.size.depth = 1;
71
72 sbuf->key.numFaces = 1;
73 sbuf->key.numMipLevels = 1;
74 sbuf->key.cachable = 1;
75
76 SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n", sbuf->base.size);
77
78 sbuf->handle = svga_screen_surface_create(ss, &sbuf->key);
79 if(!sbuf->handle)
80 return PIPE_ERROR_OUT_OF_MEMORY;
81
82 /* Always set the discard flag on the first time the buffer is written
83 * as svga_screen_surface_create might have passed a recycled host
84 * buffer.
85 */
86 sbuf->hw.flags.discard = TRUE;
87
88 SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n", sbuf->handle, sbuf->base.size);
89 }
90
91 return PIPE_OK;
92 }
93
94
95 static INLINE void
96 svga_buffer_destroy_host_surface(struct svga_screen *ss,
97 struct svga_buffer *sbuf)
98 {
99 if(sbuf->handle) {
100 SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n", sbuf->handle, sbuf->base.size);
101 svga_screen_surface_destroy(ss, &sbuf->key, &sbuf->handle);
102 }
103 }
104
105
106 static INLINE void
107 svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
108 {
109 struct svga_winsys_screen *sws = ss->sws;
110
111 assert(!sbuf->map.count);
112 assert(sbuf->hw.buf);
113 if(sbuf->hw.buf) {
114 sws->buffer_destroy(sws, sbuf->hw.buf);
115 sbuf->hw.buf = NULL;
116 }
117 }
118
119 struct svga_winsys_buffer *
120 svga_winsys_buffer_create( struct svga_screen *ss,
121 unsigned alignment,
122 unsigned usage,
123 unsigned size )
124 {
125 struct svga_winsys_screen *sws = ss->sws;
126 struct svga_winsys_buffer *buf;
127
128 /* Just try */
129 buf = sws->buffer_create(sws, alignment, usage, size);
130 if(!buf) {
131
132 SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing screen to find %d bytes GMR\n",
133 size);
134
135 /* Try flushing all pending DMAs */
136 svga_screen_flush(ss, NULL);
137 buf = sws->buffer_create(sws, alignment, usage, size);
138
139 }
140
141 return buf;
142 }
143
144
145 /**
146 * Allocate DMA'ble storage for the buffer.
147 *
148 * Called before mapping a buffer.
149 */
150 static INLINE enum pipe_error
151 svga_buffer_create_hw_storage(struct svga_screen *ss,
152 struct svga_buffer *sbuf)
153 {
154 if(!sbuf->hw.buf) {
155 unsigned alignment = sbuf->base.alignment;
156 unsigned usage = 0;
157 unsigned size = sbuf->base.size;
158
159 sbuf->hw.buf = svga_winsys_buffer_create(ss, alignment, usage, size);
160 if(!sbuf->hw.buf)
161 return PIPE_ERROR_OUT_OF_MEMORY;
162
163 assert(!sbuf->needs_flush);
164 }
165
166 return PIPE_OK;
167 }
168
169
170 /**
171 * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
172 */
173 static enum pipe_error
174 svga_buffer_upload_command(struct svga_context *svga,
175 struct svga_buffer *sbuf)
176 {
177 struct svga_winsys_context *swc = svga->swc;
178 struct svga_winsys_buffer *guest = sbuf->hw.buf;
179 struct svga_winsys_surface *host = sbuf->handle;
180 SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
181 SVGA3dSurfaceDMAFlags flags = sbuf->hw.flags;
182 SVGA3dCmdSurfaceDMA *cmd;
183 uint32 numBoxes = sbuf->hw.num_ranges;
184 SVGA3dCopyBox *boxes;
185 SVGA3dCmdSurfaceDMASuffix *pSuffix;
186 unsigned region_flags;
187 unsigned surface_flags;
188 struct pipe_buffer *dummy;
189
190 if(transfer == SVGA3D_WRITE_HOST_VRAM) {
191 region_flags = PIPE_BUFFER_USAGE_GPU_READ;
192 surface_flags = PIPE_BUFFER_USAGE_GPU_WRITE;
193 }
194 else if(transfer == SVGA3D_READ_HOST_VRAM) {
195 region_flags = PIPE_BUFFER_USAGE_GPU_WRITE;
196 surface_flags = PIPE_BUFFER_USAGE_GPU_READ;
197 }
198 else {
199 assert(0);
200 return PIPE_ERROR_BAD_INPUT;
201 }
202
203 assert(numBoxes);
204
205 cmd = SVGA3D_FIFOReserve(swc,
206 SVGA_3D_CMD_SURFACE_DMA,
207 sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
208 2);
209 if(!cmd)
210 return PIPE_ERROR_OUT_OF_MEMORY;
211
212 swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
213 cmd->guest.pitch = 0;
214
215 swc->surface_relocation(swc, &cmd->host.sid, host, surface_flags);
216 cmd->host.face = 0;
217 cmd->host.mipmap = 0;
218
219 cmd->transfer = transfer;
220
221 sbuf->hw.boxes = (SVGA3dCopyBox *)&cmd[1];
222 sbuf->hw.svga = svga;
223
224 /* Increment reference count */
225 dummy = NULL;
226 pipe_buffer_reference(&dummy, &sbuf->base);
227
228 pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes);
229 pSuffix->suffixSize = sizeof *pSuffix;
230 pSuffix->maximumOffset = sbuf->base.size;
231 pSuffix->flags = flags;
232
233 swc->commit(swc);
234
235 return PIPE_OK;
236 }
237
238
239 /**
240 * Patch up the upload DMA command reserved by svga_buffer_upload_command
241 * with the final ranges.
242 */
243 static void
244 svga_buffer_upload_flush(struct svga_context *svga,
245 struct svga_buffer *sbuf)
246 {
247 SVGA3dCopyBox *boxes;
248 unsigned i;
249
250 assert(sbuf->handle);
251 assert(sbuf->hw.buf);
252 assert(sbuf->hw.num_ranges);
253 assert(sbuf->hw.svga == svga);
254 assert(sbuf->hw.boxes);
255
256 /*
257 * Patch the DMA command with the final copy box.
258 */
259
260 SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
261
262 boxes = sbuf->hw.boxes;
263 for(i = 0; i < sbuf->hw.num_ranges; ++i) {
264 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
265 sbuf->hw.ranges[i].start, sbuf->hw.ranges[i].end);
266
267 boxes[i].x = sbuf->hw.ranges[i].start;
268 boxes[i].y = 0;
269 boxes[i].z = 0;
270 boxes[i].w = sbuf->hw.ranges[i].end - sbuf->hw.ranges[i].start;
271 boxes[i].h = 1;
272 boxes[i].d = 1;
273 boxes[i].srcx = sbuf->hw.ranges[i].start;
274 boxes[i].srcy = 0;
275 boxes[i].srcz = 0;
276 }
277
278 sbuf->hw.num_ranges = 0;
279 memset(&sbuf->hw.flags, 0, sizeof sbuf->hw.flags);
280
281 assert(sbuf->head.prev && sbuf->head.next);
282 LIST_DEL(&sbuf->head);
283 #ifdef DEBUG
284 sbuf->head.next = sbuf->head.prev = NULL;
285 #endif
286 sbuf->needs_flush = FALSE;
287
288 sbuf->hw.svga = NULL;
289 sbuf->hw.boxes = NULL;
290
291 sbuf->host_written = TRUE;
292
293 /* Decrement reference count */
294 pipe_buffer_reference((struct pipe_buffer **)&sbuf, NULL);
295 }
296
297
298 /**
299 * Queue a DMA upload of a range of this buffer to the host.
300 *
301 * This function only notes the range down. It doesn't actually emit a DMA
302 * upload command. That only happens when a context tries to refer to this
303 * buffer, and the DMA upload command is added to that context's command buffer.
304 *
305 * We try to lump as many contiguous DMA transfers together as possible.
306 */
307 static void
308 svga_buffer_upload_queue(struct svga_buffer *sbuf,
309 unsigned start,
310 unsigned end)
311 {
312 unsigned i;
313
314 assert(sbuf->hw.buf);
315 assert(end > start);
316
317 /*
318 * Try to grow one of the ranges.
319 *
320 * Note that it is not this function task to care about overlapping ranges,
321 * as the GMR was already given so it is too late to do anything. Situations
322 * where overlapping ranges may pose a problem should be detected via
323 * pipe_context::is_buffer_referenced and the context that refers to the
324 * buffer should be flushed.
325 */
326
327 for(i = 0; i < sbuf->hw.num_ranges; ++i) {
328 if(start <= sbuf->hw.ranges[i].end && sbuf->hw.ranges[i].start <= end) {
329 sbuf->hw.ranges[i].start = MIN2(sbuf->hw.ranges[i].start, start);
330 sbuf->hw.ranges[i].end = MAX2(sbuf->hw.ranges[i].end, end);
331 return;
332 }
333 }
334
335 /*
336 * We cannot add a new range to an existing DMA command, so patch-up the
337 * pending DMA upload and start clean.
338 */
339
340 if(sbuf->needs_flush)
341 svga_buffer_upload_flush(sbuf->hw.svga, sbuf);
342
343 assert(!sbuf->needs_flush);
344 assert(!sbuf->hw.svga);
345 assert(!sbuf->hw.boxes);
346
347 /*
348 * Add a new range.
349 */
350
351 sbuf->hw.ranges[sbuf->hw.num_ranges].start = start;
352 sbuf->hw.ranges[sbuf->hw.num_ranges].end = end;
353 ++sbuf->hw.num_ranges;
354 }
355
356
357 static void *
358 svga_buffer_map_range( struct pipe_screen *screen,
359 struct pipe_buffer *buf,
360 unsigned offset, unsigned length,
361 unsigned usage )
362 {
363 struct svga_screen *ss = svga_screen(screen);
364 struct svga_winsys_screen *sws = ss->sws;
365 struct svga_buffer *sbuf = svga_buffer( buf );
366 void *map;
367
368 if(sbuf->swbuf) {
369 /* User/malloc buffer */
370 map = sbuf->swbuf;
371 }
372 else {
373 if(!sbuf->hw.buf) {
374 if(svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK)
375 return NULL;
376
377 /* Populate the hardware storage if the host surface pre-existed */
378 if(sbuf->host_written) {
379 SVGA3dSurfaceDMAFlags flags;
380 enum pipe_error ret;
381 struct pipe_fence_handle *fence = NULL;
382
383 assert(sbuf->handle);
384
385 SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "dma from sid %p (buffer), bytes %u - %u\n",
386 sbuf->handle, 0, sbuf->base.size);
387
388 memset(&flags, 0, sizeof flags);
389
390 ret = SVGA3D_BufferDMA(ss->swc,
391 sbuf->hw.buf,
392 sbuf->handle,
393 SVGA3D_READ_HOST_VRAM,
394 sbuf->base.size,
395 0,
396 flags);
397 if(ret != PIPE_OK) {
398 ss->swc->flush(ss->swc, NULL);
399
400 ret = SVGA3D_BufferDMA(ss->swc,
401 sbuf->hw.buf,
402 sbuf->handle,
403 SVGA3D_READ_HOST_VRAM,
404 sbuf->base.size,
405 0,
406 flags);
407 assert(ret == PIPE_OK);
408 }
409
410 ss->swc->flush(ss->swc, &fence);
411 sws->fence_finish(sws, fence, 0);
412 sws->fence_reference(sws, &fence, NULL);
413 }
414 }
415
416 map = sws->buffer_map(sws, sbuf->hw.buf, usage);
417 }
418
419 if(map) {
420 pipe_mutex_lock(ss->swc_mutex);
421
422 ++sbuf->map.count;
423
424 if (usage & PIPE_BUFFER_USAGE_CPU_WRITE) {
425 assert(sbuf->map.count <= 1);
426 sbuf->map.writing = TRUE;
427 if (usage & PIPE_BUFFER_USAGE_FLUSH_EXPLICIT)
428 sbuf->map.flush_explicit = TRUE;
429 }
430
431 pipe_mutex_unlock(ss->swc_mutex);
432 }
433
434 return map;
435 }
436
437 static void
438 svga_buffer_flush_mapped_range( struct pipe_screen *screen,
439 struct pipe_buffer *buf,
440 unsigned offset, unsigned length)
441 {
442 struct svga_buffer *sbuf = svga_buffer( buf );
443 struct svga_screen *ss = svga_screen(screen);
444
445 pipe_mutex_lock(ss->swc_mutex);
446 assert(sbuf->map.writing);
447 if(sbuf->map.writing) {
448 assert(sbuf->map.flush_explicit);
449 if(sbuf->hw.buf)
450 svga_buffer_upload_queue(sbuf, offset, offset + length);
451 }
452 pipe_mutex_unlock(ss->swc_mutex);
453 }
454
455 static void
456 svga_buffer_unmap( struct pipe_screen *screen,
457 struct pipe_buffer *buf)
458 {
459 struct svga_screen *ss = svga_screen(screen);
460 struct svga_winsys_screen *sws = ss->sws;
461 struct svga_buffer *sbuf = svga_buffer( buf );
462
463 pipe_mutex_lock(ss->swc_mutex);
464
465 assert(sbuf->map.count);
466 if(sbuf->map.count)
467 --sbuf->map.count;
468
469 if(sbuf->hw.buf)
470 sws->buffer_unmap(sws, sbuf->hw.buf);
471
472 if(sbuf->map.writing) {
473 if(!sbuf->map.flush_explicit) {
474 /* No mapped range was flushed -- flush the whole buffer */
475 SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
476
477 if(sbuf->hw.buf)
478 svga_buffer_upload_queue(sbuf, 0, sbuf->base.size);
479 }
480
481 sbuf->map.writing = FALSE;
482 sbuf->map.flush_explicit = FALSE;
483 }
484
485 pipe_mutex_unlock(ss->swc_mutex);
486 }
487
488 static void
489 svga_buffer_destroy( struct pipe_buffer *buf )
490 {
491 struct svga_screen *ss = svga_screen(buf->screen);
492 struct svga_buffer *sbuf = svga_buffer( buf );
493
494 assert(!p_atomic_read(&buf->reference.count));
495
496 assert(!sbuf->needs_flush);
497
498 if(sbuf->handle)
499 svga_buffer_destroy_host_surface(ss, sbuf);
500
501 if(sbuf->hw.buf)
502 svga_buffer_destroy_hw_storage(ss, sbuf);
503
504 if(sbuf->swbuf && !sbuf->user)
505 align_free(sbuf->swbuf);
506
507 FREE(sbuf);
508 }
509
510 static struct pipe_buffer *
511 svga_buffer_create(struct pipe_screen *screen,
512 unsigned alignment,
513 unsigned usage,
514 unsigned size)
515 {
516 struct svga_screen *ss = svga_screen(screen);
517 struct svga_buffer *sbuf;
518
519 assert(size);
520 assert(alignment);
521
522 sbuf = CALLOC_STRUCT(svga_buffer);
523 if(!sbuf)
524 goto error1;
525
526 sbuf->magic = SVGA_BUFFER_MAGIC;
527
528 pipe_reference_init(&sbuf->base.reference, 1);
529 sbuf->base.screen = screen;
530 sbuf->base.alignment = alignment;
531 sbuf->base.usage = usage;
532 sbuf->base.size = size;
533
534 if(svga_buffer_needs_hw_storage(usage)) {
535 if(svga_buffer_create_host_surface(ss, sbuf) != PIPE_OK)
536 goto error2;
537 }
538 else {
539 if(alignment < sizeof(void*))
540 alignment = sizeof(void*);
541
542 usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
543
544 sbuf->swbuf = align_malloc(size, alignment);
545 if(!sbuf->swbuf)
546 goto error2;
547 }
548
549 return &sbuf->base;
550
551 error2:
552 FREE(sbuf);
553 error1:
554 return NULL;
555 }
556
557 static struct pipe_buffer *
558 svga_user_buffer_create(struct pipe_screen *screen,
559 void *ptr,
560 unsigned bytes)
561 {
562 struct svga_buffer *sbuf;
563
564 sbuf = CALLOC_STRUCT(svga_buffer);
565 if(!sbuf)
566 goto no_sbuf;
567
568 sbuf->magic = SVGA_BUFFER_MAGIC;
569
570 sbuf->swbuf = ptr;
571 sbuf->user = TRUE;
572
573 pipe_reference_init(&sbuf->base.reference, 1);
574 sbuf->base.screen = screen;
575 sbuf->base.alignment = 1;
576 sbuf->base.usage = 0;
577 sbuf->base.size = bytes;
578
579 return &sbuf->base;
580
581 no_sbuf:
582 return NULL;
583 }
584
585
586 void
587 svga_screen_init_buffer_functions(struct pipe_screen *screen)
588 {
589 screen->buffer_create = svga_buffer_create;
590 screen->user_buffer_create = svga_user_buffer_create;
591 screen->buffer_map_range = svga_buffer_map_range;
592 screen->buffer_flush_mapped_range = svga_buffer_flush_mapped_range;
593 screen->buffer_unmap = svga_buffer_unmap;
594 screen->buffer_destroy = svga_buffer_destroy;
595 }
596
597
598 /**
599 * Copy the contents of the user buffer / malloc buffer to a hardware buffer.
600 */
601 static INLINE enum pipe_error
602 svga_buffer_update_hw(struct svga_screen *ss, struct svga_buffer *sbuf)
603 {
604 if(!sbuf->hw.buf) {
605 enum pipe_error ret;
606 void *map;
607
608 assert(sbuf->swbuf);
609 if(!sbuf->swbuf)
610 return PIPE_ERROR;
611
612 ret = svga_buffer_create_hw_storage(ss, sbuf);
613 assert(ret == PIPE_OK);
614 if(ret != PIPE_OK)
615 return ret;
616
617 pipe_mutex_lock(ss->swc_mutex);
618 map = ss->sws->buffer_map(ss->sws, sbuf->hw.buf, PIPE_BUFFER_USAGE_CPU_WRITE);
619 assert(map);
620 if(!map) {
621 pipe_mutex_unlock(ss->swc_mutex);
622 return PIPE_ERROR_OUT_OF_MEMORY;
623 }
624
625 memcpy(map, sbuf->swbuf, sbuf->base.size);
626 ss->sws->buffer_unmap(ss->sws, sbuf->hw.buf);
627
628 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
629 assert(!sbuf->map.count);
630 if(!sbuf->map.count) {
631 if(sbuf->user)
632 sbuf->user = FALSE;
633 else
634 align_free(sbuf->swbuf);
635 sbuf->swbuf = NULL;
636 }
637
638 svga_buffer_upload_queue(sbuf, 0, sbuf->base.size);
639 }
640
641 pipe_mutex_unlock(ss->swc_mutex);
642 return PIPE_OK;
643 }
644
645
646 struct svga_winsys_surface *
647 svga_buffer_handle(struct svga_context *svga,
648 struct pipe_buffer *buf)
649 {
650 struct pipe_screen *screen = svga->pipe.screen;
651 struct svga_screen *ss = svga_screen(screen);
652 struct svga_buffer *sbuf;
653 enum pipe_error ret;
654
655 if(!buf)
656 return NULL;
657
658 sbuf = svga_buffer(buf);
659
660 assert(!sbuf->map.count);
661
662 if(!sbuf->handle) {
663 ret = svga_buffer_create_host_surface(ss, sbuf);
664 if(ret != PIPE_OK)
665 return NULL;
666
667 ret = svga_buffer_update_hw(ss, sbuf);
668 if(ret != PIPE_OK)
669 return NULL;
670 }
671
672 if(!sbuf->needs_flush && sbuf->hw.num_ranges) {
673 /* Queue the buffer for flushing */
674 ret = svga_buffer_upload_command(svga, sbuf);
675 if(ret != PIPE_OK)
676 /* XXX: Should probably have a richer return value */
677 return NULL;
678
679 assert(sbuf->hw.svga == svga);
680
681 sbuf->needs_flush = TRUE;
682 assert(!sbuf->head.prev && !sbuf->head.next);
683 LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
684 }
685
686 return sbuf->handle;
687 }
688
689 struct pipe_buffer *
690 svga_screen_buffer_wrap_surface(struct pipe_screen *screen,
691 enum SVGA3dSurfaceFormat format,
692 struct svga_winsys_surface *srf)
693 {
694 struct pipe_buffer *buf;
695 struct svga_buffer *sbuf;
696 struct svga_winsys_screen *sws = svga_winsys_screen(screen);
697
698 buf = svga_buffer_create(screen, 0, SVGA_BUFFER_USAGE_WRAPPED, 0);
699 if (!buf)
700 return NULL;
701
702 sbuf = svga_buffer(buf);
703
704 /*
705 * We are not the creator of this surface and therefore we must not
706 * cache it for reuse. Set the cacheable flag to zero in the key to
707 * prevent this.
708 */
709 sbuf->key.format = format;
710 sbuf->key.cachable = 0;
711 sws->surface_reference(sws, &sbuf->handle, srf);
712
713 return buf;
714 }
715
716
717 struct svga_winsys_surface *
718 svga_screen_buffer_get_winsys_surface(struct pipe_buffer *buffer)
719 {
720 struct svga_winsys_screen *sws = svga_winsys_screen(buffer->screen);
721 struct svga_winsys_surface *vsurf = NULL;
722
723 assert(svga_buffer(buffer)->key.cachable == 0);
724 svga_buffer(buffer)->key.cachable = 0;
725 sws->surface_reference(sws, &vsurf, svga_buffer(buffer)->handle);
726 return vsurf;
727 }
728
729 void
730 svga_context_flush_buffers(struct svga_context *svga)
731 {
732 struct list_head *curr, *next;
733 struct svga_buffer *sbuf;
734
735 curr = svga->dirty_buffers.next;
736 next = curr->next;
737 while(curr != &svga->dirty_buffers) {
738 sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
739
740 assert(p_atomic_read(&sbuf->base.reference.count) != 0);
741 assert(sbuf->needs_flush);
742
743 svga_buffer_upload_flush(svga, sbuf);
744
745 curr = next;
746 next = curr->next;
747 }
748 }