Merge branch 'width0'
[mesa.git] / src / gallium / drivers / svga / svga_screen_buffer.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "svga_cmd.h"
27
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "pipe/p_inlines.h"
31 #include "pipe/p_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
34
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_screen_buffer.h"
38 #include "svga_winsys.h"
39 #include "svga_debug.h"
40
41
42 /**
43 * Vertex and index buffers have to be treated slightly differently from
44 * regular guest memory regions because the SVGA device sees them as
45 * surfaces, and the state tracker can create/destroy without the pipe
46 * driver, therefore we must do the uploads from the vws.
47 */
48 static INLINE boolean
49 svga_buffer_needs_hw_storage(unsigned usage)
50 {
51 return usage & (PIPE_BUFFER_USAGE_VERTEX | PIPE_BUFFER_USAGE_INDEX);
52 }
53
54
55 static INLINE enum pipe_error
56 svga_buffer_create_host_surface(struct svga_screen *ss,
57 struct svga_buffer *sbuf)
58 {
59 if(!sbuf->handle) {
60 sbuf->key.flags = 0;
61
62 sbuf->key.format = SVGA3D_BUFFER;
63 if(sbuf->base.usage & PIPE_BUFFER_USAGE_VERTEX)
64 sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
65 if(sbuf->base.usage & PIPE_BUFFER_USAGE_INDEX)
66 sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
67
68 sbuf->key.size.width = sbuf->base.size;
69 sbuf->key.size.height = 1;
70 sbuf->key.size.depth = 1;
71
72 sbuf->key.numFaces = 1;
73 sbuf->key.numMipLevels = 1;
74
75 sbuf->handle = svga_screen_surface_create(ss, &sbuf->key);
76 if(!sbuf->handle)
77 return PIPE_ERROR_OUT_OF_MEMORY;
78
79 /* Always set the discard flag on the first time the buffer is written
80 * as svga_screen_surface_create might have passed a recycled host
81 * buffer.
82 */
83 sbuf->hw.flags.discard = TRUE;
84
85 SVGA_DBG(DEBUG_DMA, " grab sid %p sz %d\n", sbuf->handle, sbuf->base.size);
86 }
87
88 return PIPE_OK;
89 }
90
91
92 static INLINE void
93 svga_buffer_destroy_host_surface(struct svga_screen *ss,
94 struct svga_buffer *sbuf)
95 {
96 if(sbuf->handle) {
97 SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n", sbuf->handle, sbuf->base.size);
98 svga_screen_surface_destroy(ss, &sbuf->key, &sbuf->handle);
99 }
100 }
101
102
103 static INLINE void
104 svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
105 {
106 struct svga_winsys_screen *sws = ss->sws;
107
108 assert(!sbuf->map.count);
109 assert(sbuf->hw.buf);
110 if(sbuf->hw.buf) {
111 sws->buffer_destroy(sws, sbuf->hw.buf);
112 sbuf->hw.buf = NULL;
113 assert(sbuf->head.prev && sbuf->head.next);
114 LIST_DEL(&sbuf->head);
115 #ifdef DEBUG
116 sbuf->head.next = sbuf->head.prev = NULL;
117 #endif
118 }
119 }
120
121 static INLINE enum pipe_error
122 svga_buffer_backup(struct svga_screen *ss, struct svga_buffer *sbuf)
123 {
124 if (sbuf->hw.buf && sbuf->hw.num_ranges) {
125 void *src;
126
127 if (!sbuf->swbuf)
128 sbuf->swbuf = align_malloc(sbuf->base.size, sbuf->base.alignment);
129 if (!sbuf->swbuf)
130 return PIPE_ERROR_OUT_OF_MEMORY;
131
132 src = ss->sws->buffer_map(ss->sws, sbuf->hw.buf,
133 PIPE_BUFFER_USAGE_CPU_READ);
134 if (!src)
135 return PIPE_ERROR;
136
137 memcpy(sbuf->swbuf, src, sbuf->base.size);
138 ss->sws->buffer_unmap(ss->sws, sbuf->hw.buf);
139 }
140
141 return PIPE_OK;
142 }
143
144 /**
145 * Try to make GMR space available by freeing the hardware storage of
146 * unmapped
147 */
148 boolean
149 svga_buffer_free_cached_hw_storage(struct svga_screen *ss)
150 {
151 struct list_head *curr;
152 struct svga_buffer *sbuf;
153 enum pipe_error ret = PIPE_OK;
154
155 curr = ss->cached_buffers.prev;
156
157 /* free the least recently used buffer's hw storage which is not mapped */
158 do {
159 if(curr == &ss->cached_buffers)
160 return FALSE;
161
162 sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
163
164 curr = curr->prev;
165 if (sbuf->map.count == 0)
166 ret = svga_buffer_backup(ss, sbuf);
167
168 } while(sbuf->map.count != 0 || ret != PIPE_OK);
169
170 svga_buffer_destroy_hw_storage(ss, sbuf);
171
172 return TRUE;
173 }
174
175 struct svga_winsys_buffer *
176 svga_winsys_buffer_create( struct svga_screen *ss,
177 unsigned alignment,
178 unsigned usage,
179 unsigned size )
180 {
181 struct svga_winsys_screen *sws = ss->sws;
182 struct svga_winsys_buffer *buf;
183
184 /* Just try */
185 buf = sws->buffer_create(sws, alignment, usage, size);
186 if(!buf) {
187
188 SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing screen to find %d bytes GMR\n",
189 size);
190
191 /* Try flushing all pending DMAs */
192 svga_screen_flush(ss, NULL);
193 buf = sws->buffer_create(sws, alignment, usage, size);
194
195 SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "evicting buffers to find %d bytes GMR\n",
196 size);
197
198 /* Try evicing all buffer storage */
199 while(!buf && svga_buffer_free_cached_hw_storage(ss))
200 buf = sws->buffer_create(sws, alignment, usage, size);
201 }
202
203 return buf;
204 }
205
206
207 /**
208 * Allocate DMA'ble storage for the buffer.
209 *
210 * Called before mapping a buffer.
211 */
212 static INLINE enum pipe_error
213 svga_buffer_create_hw_storage(struct svga_screen *ss,
214 struct svga_buffer *sbuf)
215 {
216 if(!sbuf->hw.buf) {
217 unsigned alignment = sbuf->base.alignment;
218 unsigned usage = 0;
219 unsigned size = sbuf->base.size;
220
221 sbuf->hw.buf = svga_winsys_buffer_create(ss, alignment, usage, size);
222 if(!sbuf->hw.buf)
223 return PIPE_ERROR_OUT_OF_MEMORY;
224
225 assert(!sbuf->needs_flush);
226 assert(!sbuf->head.prev && !sbuf->head.next);
227 LIST_ADD(&sbuf->head, &ss->cached_buffers);
228 }
229
230 return PIPE_OK;
231 }
232
233
234 /**
235 * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
236 */
237 static enum pipe_error
238 svga_buffer_upload_command(struct svga_context *svga,
239 struct svga_buffer *sbuf)
240 {
241 struct svga_winsys_context *swc = svga->swc;
242 struct svga_winsys_buffer *guest = sbuf->hw.buf;
243 struct svga_winsys_surface *host = sbuf->handle;
244 SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
245 SVGA3dSurfaceDMAFlags flags = sbuf->hw.flags;
246 SVGA3dCmdSurfaceDMA *cmd;
247 uint32 numBoxes = sbuf->hw.num_ranges;
248 SVGA3dCopyBox *boxes;
249 SVGA3dCmdSurfaceDMASuffix *pSuffix;
250 unsigned region_flags;
251 unsigned surface_flags;
252 struct pipe_buffer *dummy;
253
254 if(transfer == SVGA3D_WRITE_HOST_VRAM) {
255 region_flags = PIPE_BUFFER_USAGE_GPU_READ;
256 surface_flags = PIPE_BUFFER_USAGE_GPU_WRITE;
257 }
258 else if(transfer == SVGA3D_READ_HOST_VRAM) {
259 region_flags = PIPE_BUFFER_USAGE_GPU_WRITE;
260 surface_flags = PIPE_BUFFER_USAGE_GPU_READ;
261 }
262 else {
263 assert(0);
264 return PIPE_ERROR_BAD_INPUT;
265 }
266
267 assert(numBoxes);
268
269 cmd = SVGA3D_FIFOReserve(swc,
270 SVGA_3D_CMD_SURFACE_DMA,
271 sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
272 2);
273 if(!cmd)
274 return PIPE_ERROR_OUT_OF_MEMORY;
275
276 swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
277 cmd->guest.pitch = 0;
278
279 swc->surface_relocation(swc, &cmd->host.sid, host, surface_flags);
280 cmd->host.face = 0;
281 cmd->host.mipmap = 0;
282
283 cmd->transfer = transfer;
284
285 sbuf->hw.boxes = (SVGA3dCopyBox *)&cmd[1];
286 sbuf->hw.svga = svga;
287
288 /* Increment reference count */
289 dummy = NULL;
290 pipe_buffer_reference(&dummy, &sbuf->base);
291
292 pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes);
293 pSuffix->suffixSize = sizeof *pSuffix;
294 pSuffix->maximumOffset = sbuf->base.size;
295 pSuffix->flags = flags;
296
297 swc->commit(swc);
298
299 return PIPE_OK;
300 }
301
302
303 /**
304 * Patch up the upload DMA command reserved by svga_buffer_upload_command
305 * with the final ranges.
306 */
307 static void
308 svga_buffer_upload_flush(struct svga_context *svga,
309 struct svga_buffer *sbuf)
310 {
311 struct svga_screen *ss = svga_screen(svga->pipe.screen);
312 SVGA3dCopyBox *boxes;
313 unsigned i;
314
315 assert(sbuf->handle);
316 assert(sbuf->hw.buf);
317 assert(sbuf->hw.num_ranges);
318 assert(sbuf->hw.svga == svga);
319 assert(sbuf->hw.boxes);
320
321 /*
322 * Patch the DMA command with the final copy box.
323 */
324
325 SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
326
327 boxes = sbuf->hw.boxes;
328 for(i = 0; i < sbuf->hw.num_ranges; ++i) {
329 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
330 sbuf->hw.ranges[i].start, sbuf->hw.ranges[i].end);
331
332 boxes[i].x = sbuf->hw.ranges[i].start;
333 boxes[i].y = 0;
334 boxes[i].z = 0;
335 boxes[i].w = sbuf->hw.ranges[i].end - sbuf->hw.ranges[i].start;
336 boxes[i].h = 1;
337 boxes[i].d = 1;
338 boxes[i].srcx = sbuf->hw.ranges[i].start;
339 boxes[i].srcy = 0;
340 boxes[i].srcz = 0;
341 }
342
343 sbuf->hw.num_ranges = 0;
344 memset(&sbuf->hw.flags, 0, sizeof sbuf->hw.flags);
345
346 assert(sbuf->head.prev && sbuf->head.next);
347 LIST_DEL(&sbuf->head);
348 sbuf->needs_flush = FALSE;
349 /* XXX: do we care about cached_buffers any more ?*/
350 LIST_ADD(&sbuf->head, &ss->cached_buffers);
351
352 sbuf->hw.svga = NULL;
353 sbuf->hw.boxes = NULL;
354
355 /* Decrement reference count */
356 pipe_buffer_reference((struct pipe_buffer **)&sbuf, NULL);
357 }
358
359
360 /**
361 * Queue a DMA upload of a range of this buffer to the host.
362 *
363 * This function only notes the range down. It doesn't actually emit a DMA
364 * upload command. That only happens when a context tries to refer to this
365 * buffer, and the DMA upload command is added to that context's command buffer.
366 *
367 * We try to lump as many contiguous DMA transfers together as possible.
368 */
369 static void
370 svga_buffer_upload_queue(struct svga_buffer *sbuf,
371 unsigned start,
372 unsigned end)
373 {
374 unsigned i;
375
376 assert(sbuf->hw.buf);
377 assert(end > start);
378
379 /*
380 * Try to grow one of the ranges.
381 *
382 * Note that it is not this function task to care about overlapping ranges,
383 * as the GMR was already given so it is too late to do anything. Situations
384 * where overlapping ranges may pose a problem should be detected via
385 * pipe_context::is_buffer_referenced and the context that refers to the
386 * buffer should be flushed.
387 */
388
389 for(i = 0; i < sbuf->hw.num_ranges; ++i) {
390 if(start <= sbuf->hw.ranges[i].end && sbuf->hw.ranges[i].start <= end) {
391 sbuf->hw.ranges[i].start = MIN2(sbuf->hw.ranges[i].start, start);
392 sbuf->hw.ranges[i].end = MAX2(sbuf->hw.ranges[i].end, end);
393 return;
394 }
395 }
396
397 /*
398 * We cannot add a new range to an existing DMA command, so patch-up the
399 * pending DMA upload and start clean.
400 */
401
402 if(sbuf->needs_flush)
403 svga_buffer_upload_flush(sbuf->hw.svga, sbuf);
404
405 assert(!sbuf->needs_flush);
406 assert(!sbuf->hw.svga);
407 assert(!sbuf->hw.boxes);
408
409 /*
410 * Add a new range.
411 */
412
413 sbuf->hw.ranges[sbuf->hw.num_ranges].start = start;
414 sbuf->hw.ranges[sbuf->hw.num_ranges].end = end;
415 ++sbuf->hw.num_ranges;
416 }
417
418
419 static void *
420 svga_buffer_map_range( struct pipe_screen *screen,
421 struct pipe_buffer *buf,
422 unsigned offset, unsigned length,
423 unsigned usage )
424 {
425 struct svga_screen *ss = svga_screen(screen);
426 struct svga_winsys_screen *sws = ss->sws;
427 struct svga_buffer *sbuf = svga_buffer( buf );
428 void *map;
429
430 if(sbuf->swbuf) {
431 /* User/malloc buffer */
432 map = sbuf->swbuf;
433 }
434 else {
435 if(!sbuf->hw.buf) {
436 struct svga_winsys_surface *handle = sbuf->handle;
437
438 if(svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK)
439 return NULL;
440
441 /* Populate the hardware storage if the host surface pre-existed */
442 if((usage & PIPE_BUFFER_USAGE_CPU_READ) && handle) {
443 SVGA3dSurfaceDMAFlags flags;
444 enum pipe_error ret;
445 struct pipe_fence_handle *fence = NULL;
446
447 SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "dma from sid %p, bytes %u - %u\n",
448 sbuf->handle, 0, sbuf->base.size);
449
450 memset(&flags, 0, sizeof flags);
451
452 ret = SVGA3D_BufferDMA(ss->swc,
453 sbuf->hw.buf,
454 sbuf->handle,
455 SVGA3D_READ_HOST_VRAM,
456 sbuf->base.size,
457 0,
458 flags);
459 if(ret != PIPE_OK) {
460 ss->swc->flush(ss->swc, NULL);
461
462 ret = SVGA3D_BufferDMA(ss->swc,
463 sbuf->hw.buf,
464 sbuf->handle,
465 SVGA3D_READ_HOST_VRAM,
466 sbuf->base.size,
467 0,
468 flags);
469 assert(ret == PIPE_OK);
470 }
471
472 ss->swc->flush(ss->swc, &fence);
473 sws->fence_finish(sws, fence, 0);
474 sws->fence_reference(sws, &fence, NULL);
475 }
476 }
477 else {
478 if((usage & PIPE_BUFFER_USAGE_CPU_READ) && !sbuf->needs_flush) {
479 /* We already had the hardware storage but we would have to issue
480 * a download if we hadn't, so move the buffer to the begginning
481 * of the LRU list.
482 */
483 assert(sbuf->head.prev && sbuf->head.next);
484 LIST_DEL(&sbuf->head);
485 LIST_ADD(&sbuf->head, &ss->cached_buffers);
486 }
487 }
488
489 map = sws->buffer_map(sws, sbuf->hw.buf, usage);
490 }
491
492 if(map) {
493 pipe_mutex_lock(ss->swc_mutex);
494
495 ++sbuf->map.count;
496
497 if (usage & PIPE_BUFFER_USAGE_CPU_WRITE) {
498 assert(sbuf->map.count <= 1);
499 sbuf->map.writing = TRUE;
500 if (usage & PIPE_BUFFER_USAGE_FLUSH_EXPLICIT)
501 sbuf->map.flush_explicit = TRUE;
502 }
503
504 pipe_mutex_unlock(ss->swc_mutex);
505 }
506
507 return map;
508 }
509
510 static void
511 svga_buffer_flush_mapped_range( struct pipe_screen *screen,
512 struct pipe_buffer *buf,
513 unsigned offset, unsigned length)
514 {
515 struct svga_buffer *sbuf = svga_buffer( buf );
516 struct svga_screen *ss = svga_screen(screen);
517
518 pipe_mutex_lock(ss->swc_mutex);
519 assert(sbuf->map.writing);
520 if(sbuf->map.writing) {
521 assert(sbuf->map.flush_explicit);
522 if(sbuf->hw.buf)
523 svga_buffer_upload_queue(sbuf, offset, offset + length);
524 }
525 pipe_mutex_unlock(ss->swc_mutex);
526 }
527
528 static void
529 svga_buffer_unmap( struct pipe_screen *screen,
530 struct pipe_buffer *buf)
531 {
532 struct svga_screen *ss = svga_screen(screen);
533 struct svga_winsys_screen *sws = ss->sws;
534 struct svga_buffer *sbuf = svga_buffer( buf );
535
536 pipe_mutex_lock(ss->swc_mutex);
537
538 assert(sbuf->map.count);
539 if(sbuf->map.count)
540 --sbuf->map.count;
541
542 if(sbuf->hw.buf)
543 sws->buffer_unmap(sws, sbuf->hw.buf);
544
545 if(sbuf->map.writing) {
546 if(!sbuf->map.flush_explicit) {
547 /* No mapped range was flushed -- flush the whole buffer */
548 SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
549
550 if(sbuf->hw.buf)
551 svga_buffer_upload_queue(sbuf, 0, sbuf->base.size);
552 }
553
554 sbuf->map.writing = FALSE;
555 sbuf->map.flush_explicit = FALSE;
556 }
557
558 pipe_mutex_unlock(ss->swc_mutex);
559 }
560
561 static void
562 svga_buffer_destroy( struct pipe_buffer *buf )
563 {
564 struct svga_screen *ss = svga_screen(buf->screen);
565 struct svga_buffer *sbuf = svga_buffer( buf );
566
567 assert(!p_atomic_read(&buf->reference.count));
568
569 assert(!sbuf->needs_flush);
570
571 if(sbuf->handle) {
572 SVGA_DBG(DEBUG_DMA, "release sid %p sz %d\n", sbuf->handle, sbuf->base.size);
573 svga_screen_surface_destroy(ss, &sbuf->key, &sbuf->handle);
574 }
575
576 if(sbuf->hw.buf)
577 svga_buffer_destroy_hw_storage(ss, sbuf);
578
579 if(sbuf->swbuf && !sbuf->user)
580 align_free(sbuf->swbuf);
581
582 FREE(sbuf);
583 }
584
585 static struct pipe_buffer *
586 svga_buffer_create(struct pipe_screen *screen,
587 unsigned alignment,
588 unsigned usage,
589 unsigned size)
590 {
591 struct svga_screen *ss = svga_screen(screen);
592 struct svga_buffer *sbuf;
593
594 sbuf = CALLOC_STRUCT(svga_buffer);
595 if(!sbuf)
596 goto error1;
597
598 sbuf->magic = SVGA_BUFFER_MAGIC;
599
600 pipe_reference_init(&sbuf->base.reference, 1);
601 sbuf->base.screen = screen;
602 sbuf->base.alignment = alignment;
603 sbuf->base.usage = usage;
604 sbuf->base.size = size;
605
606 if(svga_buffer_needs_hw_storage(usage)) {
607 if(svga_buffer_create_host_surface(ss, sbuf) != PIPE_OK)
608 goto error2;
609 }
610 else {
611 if(alignment < sizeof(void*))
612 alignment = sizeof(void*);
613
614 usage |= PIPE_BUFFER_USAGE_CPU_READ_WRITE;
615
616 sbuf->swbuf = align_malloc(size, alignment);
617 if(!sbuf->swbuf)
618 goto error2;
619 }
620
621 return &sbuf->base;
622
623 error2:
624 FREE(sbuf);
625 error1:
626 return NULL;
627 }
628
629 static struct pipe_buffer *
630 svga_user_buffer_create(struct pipe_screen *screen,
631 void *ptr,
632 unsigned bytes)
633 {
634 struct svga_buffer *sbuf;
635
636 sbuf = CALLOC_STRUCT(svga_buffer);
637 if(!sbuf)
638 goto no_sbuf;
639
640 sbuf->magic = SVGA_BUFFER_MAGIC;
641
642 sbuf->swbuf = ptr;
643 sbuf->user = TRUE;
644
645 pipe_reference_init(&sbuf->base.reference, 1);
646 sbuf->base.screen = screen;
647 sbuf->base.alignment = 1;
648 sbuf->base.usage = 0;
649 sbuf->base.size = bytes;
650
651 return &sbuf->base;
652
653 no_sbuf:
654 return NULL;
655 }
656
657
658 void
659 svga_screen_init_buffer_functions(struct pipe_screen *screen)
660 {
661 screen->buffer_create = svga_buffer_create;
662 screen->user_buffer_create = svga_user_buffer_create;
663 screen->buffer_map_range = svga_buffer_map_range;
664 screen->buffer_flush_mapped_range = svga_buffer_flush_mapped_range;
665 screen->buffer_unmap = svga_buffer_unmap;
666 screen->buffer_destroy = svga_buffer_destroy;
667 }
668
669
670 /**
671 * Copy the contents of the user buffer / malloc buffer to a hardware buffer.
672 */
673 static INLINE enum pipe_error
674 svga_buffer_update_hw(struct svga_screen *ss, struct svga_buffer *sbuf)
675 {
676 if(!sbuf->hw.buf) {
677 enum pipe_error ret;
678 void *map;
679
680 assert(sbuf->swbuf);
681 if(!sbuf->swbuf)
682 return PIPE_ERROR;
683
684 ret = svga_buffer_create_hw_storage(ss, sbuf);
685 assert(ret == PIPE_OK);
686 if(ret != PIPE_OK)
687 return ret;
688
689 pipe_mutex_lock(ss->swc_mutex);
690 map = ss->sws->buffer_map(ss->sws, sbuf->hw.buf, PIPE_BUFFER_USAGE_CPU_WRITE);
691 assert(map);
692 if(!map) {
693 pipe_mutex_unlock(ss->swc_mutex);
694 return PIPE_ERROR_OUT_OF_MEMORY;
695 }
696
697 memcpy(map, sbuf->swbuf, sbuf->base.size);
698 ss->sws->buffer_unmap(ss->sws, sbuf->hw.buf);
699
700 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
701 assert(!sbuf->map.count);
702 if(!sbuf->map.count) {
703 if(sbuf->user)
704 sbuf->user = FALSE;
705 else
706 align_free(sbuf->swbuf);
707 sbuf->swbuf = NULL;
708 }
709
710 svga_buffer_upload_queue(sbuf, 0, sbuf->base.size);
711 }
712
713 pipe_mutex_unlock(ss->swc_mutex);
714 return PIPE_OK;
715 }
716
717
718 struct svga_winsys_surface *
719 svga_buffer_handle(struct svga_context *svga,
720 struct pipe_buffer *buf)
721 {
722 struct pipe_screen *screen = svga->pipe.screen;
723 struct svga_screen *ss = svga_screen(screen);
724 struct svga_buffer *sbuf;
725 enum pipe_error ret;
726
727 if(!buf)
728 return NULL;
729
730 sbuf = svga_buffer(buf);
731
732 assert(!sbuf->map.count);
733
734 if(!sbuf->handle) {
735 ret = svga_buffer_create_host_surface(ss, sbuf);
736 if(ret != PIPE_OK)
737 return NULL;
738
739 ret = svga_buffer_update_hw(ss, sbuf);
740 if(ret != PIPE_OK)
741 return NULL;
742 }
743
744 if(!sbuf->needs_flush && sbuf->hw.num_ranges) {
745 /* Queue the buffer for flushing */
746 ret = svga_buffer_upload_command(svga, sbuf);
747 if(ret != PIPE_OK)
748 /* XXX: Should probably have a richer return value */
749 return NULL;
750
751 assert(sbuf->hw.svga == svga);
752
753 sbuf->needs_flush = TRUE;
754 assert(sbuf->head.prev && sbuf->head.next);
755 LIST_DEL(&sbuf->head);
756 LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
757 }
758
759 return sbuf->handle;
760 }
761
762 struct pipe_buffer *
763 svga_screen_buffer_wrap_surface(struct pipe_screen *screen,
764 enum SVGA3dSurfaceFormat format,
765 struct svga_winsys_surface *srf)
766 {
767 struct pipe_buffer *buf;
768 struct svga_buffer *sbuf;
769 struct svga_winsys_screen *sws = svga_winsys_screen(screen);
770
771 buf = svga_buffer_create(screen, 0, SVGA_BUFFER_USAGE_WRAPPED, 0);
772 if (!buf)
773 return NULL;
774
775 sbuf = svga_buffer(buf);
776
777 /*
778 * We are not the creator of this surface and therefore we must not
779 * cache it for reuse. The caching code only caches SVGA3D_BUFFER surfaces
780 * so make sure this isn't one of those.
781 */
782
783 assert(format != SVGA3D_BUFFER);
784 sbuf->key.format = format;
785 sws->surface_reference(sws, &sbuf->handle, srf);
786
787 return buf;
788 }
789
790
791 struct svga_winsys_surface *
792 svga_screen_buffer_get_winsys_surface(struct pipe_buffer *buffer)
793 {
794 struct svga_winsys_screen *sws = svga_winsys_screen(buffer->screen);
795 struct svga_winsys_surface *vsurf = NULL;
796
797 sws->surface_reference(sws, &vsurf, svga_buffer(buffer)->handle);
798 return vsurf;
799 }
800
801 void
802 svga_context_flush_buffers(struct svga_context *svga)
803 {
804 struct list_head *curr, *next;
805 struct svga_buffer *sbuf;
806
807 curr = svga->dirty_buffers.next;
808 next = curr->next;
809 while(curr != &svga->dirty_buffers) {
810 sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
811
812 assert(p_atomic_read(&sbuf->base.reference.count) != 0);
813 assert(sbuf->needs_flush);
814
815 svga_buffer_upload_flush(svga, sbuf);
816
817 curr = next;
818 next = curr->next;
819 }
820 }