1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "pipe/p_inlines.h"
31 #include "pipe/p_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_screen_buffer.h"
38 #include "svga_winsys.h"
39 #include "svga_debug.h"
43 * Vertex and index buffers have to be treated slightly differently from
44 * regular guest memory regions because the SVGA device sees them as
45 * surfaces, and the state tracker can create/destroy without the pipe
46 * driver, therefore we must do the uploads from the vws.
49 svga_buffer_needs_hw_storage(unsigned usage
)
51 return usage
& (PIPE_BUFFER_USAGE_VERTEX
| PIPE_BUFFER_USAGE_INDEX
);
55 static INLINE
enum pipe_error
56 svga_buffer_create_host_surface(struct svga_screen
*ss
,
57 struct svga_buffer
*sbuf
)
62 sbuf
->key
.format
= SVGA3D_BUFFER
;
63 if(sbuf
->base
.usage
& PIPE_BUFFER_USAGE_VERTEX
)
64 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_VERTEXBUFFER
;
65 if(sbuf
->base
.usage
& PIPE_BUFFER_USAGE_INDEX
)
66 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_INDEXBUFFER
;
68 sbuf
->key
.size
.width
= sbuf
->base
.size
;
69 sbuf
->key
.size
.height
= 1;
70 sbuf
->key
.size
.depth
= 1;
72 sbuf
->key
.numFaces
= 1;
73 sbuf
->key
.numMipLevels
= 1;
74 sbuf
->key
.cachable
= 1;
76 SVGA_DBG(DEBUG_DMA
, "surface_create for buffer sz %d\n", sbuf
->base
.size
);
78 sbuf
->handle
= svga_screen_surface_create(ss
, &sbuf
->key
);
80 return PIPE_ERROR_OUT_OF_MEMORY
;
82 /* Always set the discard flag on the first time the buffer is written
83 * as svga_screen_surface_create might have passed a recycled host
86 sbuf
->hw
.flags
.discard
= TRUE
;
88 SVGA_DBG(DEBUG_DMA
, " --> got sid %p sz %d (buffer)\n", sbuf
->handle
, sbuf
->base
.size
);
96 svga_buffer_destroy_host_surface(struct svga_screen
*ss
,
97 struct svga_buffer
*sbuf
)
100 SVGA_DBG(DEBUG_DMA
, " ungrab sid %p sz %d\n", sbuf
->handle
, sbuf
->base
.size
);
101 svga_screen_surface_destroy(ss
, &sbuf
->key
, &sbuf
->handle
);
107 svga_buffer_destroy_hw_storage(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
109 struct svga_winsys_screen
*sws
= ss
->sws
;
111 assert(!sbuf
->map
.count
);
112 assert(sbuf
->hw
.buf
);
114 sws
->buffer_destroy(sws
, sbuf
->hw
.buf
);
116 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
117 LIST_DEL(&sbuf
->head
);
119 sbuf
->head
.next
= sbuf
->head
.prev
= NULL
;
124 struct svga_winsys_buffer
*
125 svga_winsys_buffer_create( struct svga_screen
*ss
,
130 struct svga_winsys_screen
*sws
= ss
->sws
;
131 struct svga_winsys_buffer
*buf
;
134 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
137 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "flushing screen to find %d bytes GMR\n",
140 /* Try flushing all pending DMAs */
141 svga_screen_flush(ss
, NULL
);
142 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
151 * Allocate DMA'ble storage for the buffer.
153 * Called before mapping a buffer.
155 static INLINE
enum pipe_error
156 svga_buffer_create_hw_storage(struct svga_screen
*ss
,
157 struct svga_buffer
*sbuf
)
160 unsigned alignment
= sbuf
->base
.alignment
;
162 unsigned size
= sbuf
->base
.size
;
164 sbuf
->hw
.buf
= svga_winsys_buffer_create(ss
, alignment
, usage
, size
);
166 return PIPE_ERROR_OUT_OF_MEMORY
;
168 assert(!sbuf
->needs_flush
);
169 assert(!sbuf
->head
.prev
&& !sbuf
->head
.next
);
170 LIST_ADD(&sbuf
->head
, &ss
->cached_buffers
);
178 * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
180 static enum pipe_error
181 svga_buffer_upload_command(struct svga_context
*svga
,
182 struct svga_buffer
*sbuf
)
184 struct svga_winsys_context
*swc
= svga
->swc
;
185 struct svga_winsys_buffer
*guest
= sbuf
->hw
.buf
;
186 struct svga_winsys_surface
*host
= sbuf
->handle
;
187 SVGA3dTransferType transfer
= SVGA3D_WRITE_HOST_VRAM
;
188 SVGA3dSurfaceDMAFlags flags
= sbuf
->hw
.flags
;
189 SVGA3dCmdSurfaceDMA
*cmd
;
190 uint32 numBoxes
= sbuf
->hw
.num_ranges
;
191 SVGA3dCopyBox
*boxes
;
192 SVGA3dCmdSurfaceDMASuffix
*pSuffix
;
193 unsigned region_flags
;
194 unsigned surface_flags
;
195 struct pipe_buffer
*dummy
;
197 if(transfer
== SVGA3D_WRITE_HOST_VRAM
) {
198 region_flags
= PIPE_BUFFER_USAGE_GPU_READ
;
199 surface_flags
= PIPE_BUFFER_USAGE_GPU_WRITE
;
201 else if(transfer
== SVGA3D_READ_HOST_VRAM
) {
202 region_flags
= PIPE_BUFFER_USAGE_GPU_WRITE
;
203 surface_flags
= PIPE_BUFFER_USAGE_GPU_READ
;
207 return PIPE_ERROR_BAD_INPUT
;
212 cmd
= SVGA3D_FIFOReserve(swc
,
213 SVGA_3D_CMD_SURFACE_DMA
,
214 sizeof *cmd
+ numBoxes
* sizeof *boxes
+ sizeof *pSuffix
,
217 return PIPE_ERROR_OUT_OF_MEMORY
;
219 swc
->region_relocation(swc
, &cmd
->guest
.ptr
, guest
, 0, region_flags
);
220 cmd
->guest
.pitch
= 0;
222 swc
->surface_relocation(swc
, &cmd
->host
.sid
, host
, surface_flags
);
224 cmd
->host
.mipmap
= 0;
226 cmd
->transfer
= transfer
;
228 sbuf
->hw
.boxes
= (SVGA3dCopyBox
*)&cmd
[1];
229 sbuf
->hw
.svga
= svga
;
231 /* Increment reference count */
233 pipe_buffer_reference(&dummy
, &sbuf
->base
);
235 pSuffix
= (SVGA3dCmdSurfaceDMASuffix
*)((uint8_t*)cmd
+ sizeof *cmd
+ numBoxes
* sizeof *boxes
);
236 pSuffix
->suffixSize
= sizeof *pSuffix
;
237 pSuffix
->maximumOffset
= sbuf
->base
.size
;
238 pSuffix
->flags
= flags
;
247 * Patch up the upload DMA command reserved by svga_buffer_upload_command
248 * with the final ranges.
251 svga_buffer_upload_flush(struct svga_context
*svga
,
252 struct svga_buffer
*sbuf
)
254 struct svga_screen
*ss
= svga_screen(svga
->pipe
.screen
);
255 SVGA3dCopyBox
*boxes
;
258 assert(sbuf
->handle
);
259 assert(sbuf
->hw
.buf
);
260 assert(sbuf
->hw
.num_ranges
);
261 assert(sbuf
->hw
.svga
== svga
);
262 assert(sbuf
->hw
.boxes
);
265 * Patch the DMA command with the final copy box.
268 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
270 boxes
= sbuf
->hw
.boxes
;
271 for(i
= 0; i
< sbuf
->hw
.num_ranges
; ++i
) {
272 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
273 sbuf
->hw
.ranges
[i
].start
, sbuf
->hw
.ranges
[i
].end
);
275 boxes
[i
].x
= sbuf
->hw
.ranges
[i
].start
;
278 boxes
[i
].w
= sbuf
->hw
.ranges
[i
].end
- sbuf
->hw
.ranges
[i
].start
;
281 boxes
[i
].srcx
= sbuf
->hw
.ranges
[i
].start
;
286 sbuf
->hw
.num_ranges
= 0;
287 memset(&sbuf
->hw
.flags
, 0, sizeof sbuf
->hw
.flags
);
289 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
290 LIST_DEL(&sbuf
->head
);
291 sbuf
->needs_flush
= FALSE
;
292 /* XXX: do we care about cached_buffers any more ?*/
293 LIST_ADD(&sbuf
->head
, &ss
->cached_buffers
);
295 sbuf
->hw
.svga
= NULL
;
296 sbuf
->hw
.boxes
= NULL
;
298 sbuf
->host_written
= TRUE
;
300 /* Decrement reference count */
301 pipe_buffer_reference((struct pipe_buffer
**)&sbuf
, NULL
);
306 * Queue a DMA upload of a range of this buffer to the host.
308 * This function only notes the range down. It doesn't actually emit a DMA
309 * upload command. That only happens when a context tries to refer to this
310 * buffer, and the DMA upload command is added to that context's command buffer.
312 * We try to lump as many contiguous DMA transfers together as possible.
315 svga_buffer_upload_queue(struct svga_buffer
*sbuf
,
321 assert(sbuf
->hw
.buf
);
325 * Try to grow one of the ranges.
327 * Note that it is not this function task to care about overlapping ranges,
328 * as the GMR was already given so it is too late to do anything. Situations
329 * where overlapping ranges may pose a problem should be detected via
330 * pipe_context::is_buffer_referenced and the context that refers to the
331 * buffer should be flushed.
334 for(i
= 0; i
< sbuf
->hw
.num_ranges
; ++i
) {
335 if(start
<= sbuf
->hw
.ranges
[i
].end
&& sbuf
->hw
.ranges
[i
].start
<= end
) {
336 sbuf
->hw
.ranges
[i
].start
= MIN2(sbuf
->hw
.ranges
[i
].start
, start
);
337 sbuf
->hw
.ranges
[i
].end
= MAX2(sbuf
->hw
.ranges
[i
].end
, end
);
343 * We cannot add a new range to an existing DMA command, so patch-up the
344 * pending DMA upload and start clean.
347 if(sbuf
->needs_flush
)
348 svga_buffer_upload_flush(sbuf
->hw
.svga
, sbuf
);
350 assert(!sbuf
->needs_flush
);
351 assert(!sbuf
->hw
.svga
);
352 assert(!sbuf
->hw
.boxes
);
358 sbuf
->hw
.ranges
[sbuf
->hw
.num_ranges
].start
= start
;
359 sbuf
->hw
.ranges
[sbuf
->hw
.num_ranges
].end
= end
;
360 ++sbuf
->hw
.num_ranges
;
365 svga_buffer_map_range( struct pipe_screen
*screen
,
366 struct pipe_buffer
*buf
,
367 unsigned offset
, unsigned length
,
370 struct svga_screen
*ss
= svga_screen(screen
);
371 struct svga_winsys_screen
*sws
= ss
->sws
;
372 struct svga_buffer
*sbuf
= svga_buffer( buf
);
376 /* User/malloc buffer */
381 if(svga_buffer_create_hw_storage(ss
, sbuf
) != PIPE_OK
)
384 /* Populate the hardware storage if the host surface pre-existed */
385 if(sbuf
->host_written
) {
386 SVGA3dSurfaceDMAFlags flags
;
388 struct pipe_fence_handle
*fence
= NULL
;
390 assert(sbuf
->handle
);
392 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "dma from sid %p (buffer), bytes %u - %u\n",
393 sbuf
->handle
, 0, sbuf
->base
.size
);
395 memset(&flags
, 0, sizeof flags
);
397 ret
= SVGA3D_BufferDMA(ss
->swc
,
400 SVGA3D_READ_HOST_VRAM
,
405 ss
->swc
->flush(ss
->swc
, NULL
);
407 ret
= SVGA3D_BufferDMA(ss
->swc
,
410 SVGA3D_READ_HOST_VRAM
,
414 assert(ret
== PIPE_OK
);
417 ss
->swc
->flush(ss
->swc
, &fence
);
418 sws
->fence_finish(sws
, fence
, 0);
419 sws
->fence_reference(sws
, &fence
, NULL
);
423 if(!(usage
& PIPE_BUFFER_USAGE_DISCARD
) && !sbuf
->needs_flush
) {
424 /* We already had the hardware storage but we would have to issue
425 * a download if we hadn't, so move the buffer to the begginning
428 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
429 LIST_DEL(&sbuf
->head
);
430 LIST_ADD(&sbuf
->head
, &ss
->cached_buffers
);
434 map
= sws
->buffer_map(sws
, sbuf
->hw
.buf
, usage
);
438 pipe_mutex_lock(ss
->swc_mutex
);
442 if (usage
& PIPE_BUFFER_USAGE_CPU_WRITE
) {
443 assert(sbuf
->map
.count
<= 1);
444 sbuf
->map
.writing
= TRUE
;
445 if (usage
& PIPE_BUFFER_USAGE_FLUSH_EXPLICIT
)
446 sbuf
->map
.flush_explicit
= TRUE
;
449 pipe_mutex_unlock(ss
->swc_mutex
);
456 svga_buffer_flush_mapped_range( struct pipe_screen
*screen
,
457 struct pipe_buffer
*buf
,
458 unsigned offset
, unsigned length
)
460 struct svga_buffer
*sbuf
= svga_buffer( buf
);
461 struct svga_screen
*ss
= svga_screen(screen
);
463 pipe_mutex_lock(ss
->swc_mutex
);
464 assert(sbuf
->map
.writing
);
465 if(sbuf
->map
.writing
) {
466 assert(sbuf
->map
.flush_explicit
);
468 svga_buffer_upload_queue(sbuf
, offset
, offset
+ length
);
470 pipe_mutex_unlock(ss
->swc_mutex
);
474 svga_buffer_unmap( struct pipe_screen
*screen
,
475 struct pipe_buffer
*buf
)
477 struct svga_screen
*ss
= svga_screen(screen
);
478 struct svga_winsys_screen
*sws
= ss
->sws
;
479 struct svga_buffer
*sbuf
= svga_buffer( buf
);
481 pipe_mutex_lock(ss
->swc_mutex
);
483 assert(sbuf
->map
.count
);
488 sws
->buffer_unmap(sws
, sbuf
->hw
.buf
);
490 if(sbuf
->map
.writing
) {
491 if(!sbuf
->map
.flush_explicit
) {
492 /* No mapped range was flushed -- flush the whole buffer */
493 SVGA_DBG(DEBUG_DMA
, "flushing the whole buffer\n");
496 svga_buffer_upload_queue(sbuf
, 0, sbuf
->base
.size
);
499 sbuf
->map
.writing
= FALSE
;
500 sbuf
->map
.flush_explicit
= FALSE
;
503 pipe_mutex_unlock(ss
->swc_mutex
);
507 svga_buffer_destroy( struct pipe_buffer
*buf
)
509 struct svga_screen
*ss
= svga_screen(buf
->screen
);
510 struct svga_buffer
*sbuf
= svga_buffer( buf
);
512 assert(!p_atomic_read(&buf
->reference
.count
));
514 assert(!sbuf
->needs_flush
);
517 SVGA_DBG(DEBUG_DMA
, "release sid %p sz %d\n", sbuf
->handle
, sbuf
->base
.size
);
518 svga_screen_surface_destroy(ss
, &sbuf
->key
, &sbuf
->handle
);
522 svga_buffer_destroy_hw_storage(ss
, sbuf
);
524 if(sbuf
->swbuf
&& !sbuf
->user
)
525 align_free(sbuf
->swbuf
);
530 static struct pipe_buffer
*
531 svga_buffer_create(struct pipe_screen
*screen
,
536 struct svga_screen
*ss
= svga_screen(screen
);
537 struct svga_buffer
*sbuf
;
542 sbuf
= CALLOC_STRUCT(svga_buffer
);
546 sbuf
->magic
= SVGA_BUFFER_MAGIC
;
548 pipe_reference_init(&sbuf
->base
.reference
, 1);
549 sbuf
->base
.screen
= screen
;
550 sbuf
->base
.alignment
= alignment
;
551 sbuf
->base
.usage
= usage
;
552 sbuf
->base
.size
= size
;
554 if(svga_buffer_needs_hw_storage(usage
)) {
555 if(svga_buffer_create_host_surface(ss
, sbuf
) != PIPE_OK
)
559 if(alignment
< sizeof(void*))
560 alignment
= sizeof(void*);
562 usage
|= PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
564 sbuf
->swbuf
= align_malloc(size
, alignment
);
577 static struct pipe_buffer
*
578 svga_user_buffer_create(struct pipe_screen
*screen
,
582 struct svga_buffer
*sbuf
;
584 sbuf
= CALLOC_STRUCT(svga_buffer
);
588 sbuf
->magic
= SVGA_BUFFER_MAGIC
;
593 pipe_reference_init(&sbuf
->base
.reference
, 1);
594 sbuf
->base
.screen
= screen
;
595 sbuf
->base
.alignment
= 1;
596 sbuf
->base
.usage
= 0;
597 sbuf
->base
.size
= bytes
;
607 svga_screen_init_buffer_functions(struct pipe_screen
*screen
)
609 screen
->buffer_create
= svga_buffer_create
;
610 screen
->user_buffer_create
= svga_user_buffer_create
;
611 screen
->buffer_map_range
= svga_buffer_map_range
;
612 screen
->buffer_flush_mapped_range
= svga_buffer_flush_mapped_range
;
613 screen
->buffer_unmap
= svga_buffer_unmap
;
614 screen
->buffer_destroy
= svga_buffer_destroy
;
619 * Copy the contents of the user buffer / malloc buffer to a hardware buffer.
621 static INLINE
enum pipe_error
622 svga_buffer_update_hw(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
632 ret
= svga_buffer_create_hw_storage(ss
, sbuf
);
633 assert(ret
== PIPE_OK
);
637 pipe_mutex_lock(ss
->swc_mutex
);
638 map
= ss
->sws
->buffer_map(ss
->sws
, sbuf
->hw
.buf
, PIPE_BUFFER_USAGE_CPU_WRITE
);
641 pipe_mutex_unlock(ss
->swc_mutex
);
642 return PIPE_ERROR_OUT_OF_MEMORY
;
645 memcpy(map
, sbuf
->swbuf
, sbuf
->base
.size
);
646 ss
->sws
->buffer_unmap(ss
->sws
, sbuf
->hw
.buf
);
648 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
649 assert(!sbuf
->map
.count
);
650 if(!sbuf
->map
.count
) {
654 align_free(sbuf
->swbuf
);
658 svga_buffer_upload_queue(sbuf
, 0, sbuf
->base
.size
);
661 pipe_mutex_unlock(ss
->swc_mutex
);
666 struct svga_winsys_surface
*
667 svga_buffer_handle(struct svga_context
*svga
,
668 struct pipe_buffer
*buf
)
670 struct pipe_screen
*screen
= svga
->pipe
.screen
;
671 struct svga_screen
*ss
= svga_screen(screen
);
672 struct svga_buffer
*sbuf
;
678 sbuf
= svga_buffer(buf
);
680 assert(!sbuf
->map
.count
);
683 ret
= svga_buffer_create_host_surface(ss
, sbuf
);
687 ret
= svga_buffer_update_hw(ss
, sbuf
);
692 if(!sbuf
->needs_flush
&& sbuf
->hw
.num_ranges
) {
693 /* Queue the buffer for flushing */
694 ret
= svga_buffer_upload_command(svga
, sbuf
);
696 /* XXX: Should probably have a richer return value */
699 assert(sbuf
->hw
.svga
== svga
);
701 sbuf
->needs_flush
= TRUE
;
702 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
703 LIST_DEL(&sbuf
->head
);
704 LIST_ADDTAIL(&sbuf
->head
, &svga
->dirty_buffers
);
711 svga_screen_buffer_wrap_surface(struct pipe_screen
*screen
,
712 enum SVGA3dSurfaceFormat format
,
713 struct svga_winsys_surface
*srf
)
715 struct pipe_buffer
*buf
;
716 struct svga_buffer
*sbuf
;
717 struct svga_winsys_screen
*sws
= svga_winsys_screen(screen
);
719 buf
= svga_buffer_create(screen
, 0, SVGA_BUFFER_USAGE_WRAPPED
, 0);
723 sbuf
= svga_buffer(buf
);
726 * We are not the creator of this surface and therefore we must not
727 * cache it for reuse. Set the cacheable flag to zero in the key to
730 sbuf
->key
.format
= format
;
731 sbuf
->key
.cachable
= 0;
732 sws
->surface_reference(sws
, &sbuf
->handle
, srf
);
738 struct svga_winsys_surface
*
739 svga_screen_buffer_get_winsys_surface(struct pipe_buffer
*buffer
)
741 struct svga_winsys_screen
*sws
= svga_winsys_screen(buffer
->screen
);
742 struct svga_winsys_surface
*vsurf
= NULL
;
744 assert(svga_buffer(buffer
)->key
.cachable
== 0);
745 svga_buffer(buffer
)->key
.cachable
= 0;
746 sws
->surface_reference(sws
, &vsurf
, svga_buffer(buffer
)->handle
);
751 svga_context_flush_buffers(struct svga_context
*svga
)
753 struct list_head
*curr
, *next
;
754 struct svga_buffer
*sbuf
;
756 curr
= svga
->dirty_buffers
.next
;
758 while(curr
!= &svga
->dirty_buffers
) {
759 sbuf
= LIST_ENTRY(struct svga_buffer
, curr
, head
);
761 assert(p_atomic_read(&sbuf
->base
.reference
.count
) != 0);
762 assert(sbuf
->needs_flush
);
764 svga_buffer_upload_flush(svga
, sbuf
);