1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "os/os_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_screen_buffer.h"
38 #include "svga_winsys.h"
39 #include "svga_debug.h"
43 * Vertex and index buffers have to be treated slightly differently from
44 * regular guest memory regions because the SVGA device sees them as
45 * surfaces, and the state tracker can create/destroy without the pipe
46 * driver, therefore we must do the uploads from the vws.
49 svga_buffer_needs_hw_storage(unsigned usage
)
51 return usage
& (PIPE_BUFFER_USAGE_VERTEX
| PIPE_BUFFER_USAGE_INDEX
);
55 static INLINE
enum pipe_error
56 svga_buffer_create_host_surface(struct svga_screen
*ss
,
57 struct svga_buffer
*sbuf
)
62 sbuf
->key
.format
= SVGA3D_BUFFER
;
63 if(sbuf
->base
.usage
& PIPE_BUFFER_USAGE_VERTEX
)
64 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_VERTEXBUFFER
;
65 if(sbuf
->base
.usage
& PIPE_BUFFER_USAGE_INDEX
)
66 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_INDEXBUFFER
;
68 sbuf
->key
.size
.width
= sbuf
->base
.size
;
69 sbuf
->key
.size
.height
= 1;
70 sbuf
->key
.size
.depth
= 1;
72 sbuf
->key
.numFaces
= 1;
73 sbuf
->key
.numMipLevels
= 1;
74 sbuf
->key
.cachable
= 1;
76 SVGA_DBG(DEBUG_DMA
, "surface_create for buffer sz %d\n", sbuf
->base
.size
);
78 sbuf
->handle
= svga_screen_surface_create(ss
, &sbuf
->key
);
80 return PIPE_ERROR_OUT_OF_MEMORY
;
82 /* Always set the discard flag on the first time the buffer is written
83 * as svga_screen_surface_create might have passed a recycled host
86 sbuf
->hw
.flags
.discard
= TRUE
;
88 SVGA_DBG(DEBUG_DMA
, " --> got sid %p sz %d (buffer)\n", sbuf
->handle
, sbuf
->base
.size
);
96 svga_buffer_destroy_host_surface(struct svga_screen
*ss
,
97 struct svga_buffer
*sbuf
)
100 SVGA_DBG(DEBUG_DMA
, " ungrab sid %p sz %d\n", sbuf
->handle
, sbuf
->base
.size
);
101 svga_screen_surface_destroy(ss
, &sbuf
->key
, &sbuf
->handle
);
107 svga_buffer_destroy_hw_storage(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
109 struct svga_winsys_screen
*sws
= ss
->sws
;
111 assert(!sbuf
->map
.count
);
112 assert(sbuf
->hw
.buf
);
114 sws
->buffer_destroy(sws
, sbuf
->hw
.buf
);
119 struct svga_winsys_buffer
*
120 svga_winsys_buffer_create( struct svga_screen
*ss
,
125 struct svga_winsys_screen
*sws
= ss
->sws
;
126 struct svga_winsys_buffer
*buf
;
129 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
132 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "flushing screen to find %d bytes GMR\n",
135 /* Try flushing all pending DMAs */
136 svga_screen_flush(ss
, NULL
);
137 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
146 * Allocate DMA'ble storage for the buffer.
148 * Called before mapping a buffer.
150 static INLINE
enum pipe_error
151 svga_buffer_create_hw_storage(struct svga_screen
*ss
,
152 struct svga_buffer
*sbuf
)
155 unsigned alignment
= sbuf
->base
.alignment
;
157 unsigned size
= sbuf
->base
.size
;
159 sbuf
->hw
.buf
= svga_winsys_buffer_create(ss
, alignment
, usage
, size
);
161 return PIPE_ERROR_OUT_OF_MEMORY
;
163 assert(!sbuf
->needs_flush
);
171 * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
173 static enum pipe_error
174 svga_buffer_upload_command(struct svga_context
*svga
,
175 struct svga_buffer
*sbuf
)
177 struct svga_winsys_context
*swc
= svga
->swc
;
178 struct svga_winsys_buffer
*guest
= sbuf
->hw
.buf
;
179 struct svga_winsys_surface
*host
= sbuf
->handle
;
180 SVGA3dTransferType transfer
= SVGA3D_WRITE_HOST_VRAM
;
181 SVGA3dSurfaceDMAFlags flags
= sbuf
->hw
.flags
;
182 SVGA3dCmdSurfaceDMA
*cmd
;
183 uint32 numBoxes
= sbuf
->hw
.num_ranges
;
184 SVGA3dCopyBox
*boxes
;
185 SVGA3dCmdSurfaceDMASuffix
*pSuffix
;
186 unsigned region_flags
;
187 unsigned surface_flags
;
188 struct pipe_buffer
*dummy
;
190 if(transfer
== SVGA3D_WRITE_HOST_VRAM
) {
191 region_flags
= PIPE_BUFFER_USAGE_GPU_READ
;
192 surface_flags
= PIPE_BUFFER_USAGE_GPU_WRITE
;
194 else if(transfer
== SVGA3D_READ_HOST_VRAM
) {
195 region_flags
= PIPE_BUFFER_USAGE_GPU_WRITE
;
196 surface_flags
= PIPE_BUFFER_USAGE_GPU_READ
;
200 return PIPE_ERROR_BAD_INPUT
;
205 cmd
= SVGA3D_FIFOReserve(swc
,
206 SVGA_3D_CMD_SURFACE_DMA
,
207 sizeof *cmd
+ numBoxes
* sizeof *boxes
+ sizeof *pSuffix
,
210 return PIPE_ERROR_OUT_OF_MEMORY
;
212 swc
->region_relocation(swc
, &cmd
->guest
.ptr
, guest
, 0, region_flags
);
213 cmd
->guest
.pitch
= 0;
215 swc
->surface_relocation(swc
, &cmd
->host
.sid
, host
, surface_flags
);
217 cmd
->host
.mipmap
= 0;
219 cmd
->transfer
= transfer
;
221 sbuf
->hw
.boxes
= (SVGA3dCopyBox
*)&cmd
[1];
222 sbuf
->hw
.svga
= svga
;
224 /* Increment reference count */
226 pipe_buffer_reference(&dummy
, &sbuf
->base
);
228 pSuffix
= (SVGA3dCmdSurfaceDMASuffix
*)((uint8_t*)cmd
+ sizeof *cmd
+ numBoxes
* sizeof *boxes
);
229 pSuffix
->suffixSize
= sizeof *pSuffix
;
230 pSuffix
->maximumOffset
= sbuf
->base
.size
;
231 pSuffix
->flags
= flags
;
240 * Patch up the upload DMA command reserved by svga_buffer_upload_command
241 * with the final ranges.
244 svga_buffer_upload_flush(struct svga_context
*svga
,
245 struct svga_buffer
*sbuf
)
247 SVGA3dCopyBox
*boxes
;
250 assert(sbuf
->handle
);
251 assert(sbuf
->hw
.buf
);
252 assert(sbuf
->hw
.num_ranges
);
253 assert(sbuf
->hw
.svga
== svga
);
254 assert(sbuf
->hw
.boxes
);
257 * Patch the DMA command with the final copy box.
260 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
262 boxes
= sbuf
->hw
.boxes
;
263 for(i
= 0; i
< sbuf
->hw
.num_ranges
; ++i
) {
264 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
265 sbuf
->hw
.ranges
[i
].start
, sbuf
->hw
.ranges
[i
].end
);
267 boxes
[i
].x
= sbuf
->hw
.ranges
[i
].start
;
270 boxes
[i
].w
= sbuf
->hw
.ranges
[i
].end
- sbuf
->hw
.ranges
[i
].start
;
273 boxes
[i
].srcx
= sbuf
->hw
.ranges
[i
].start
;
278 sbuf
->hw
.num_ranges
= 0;
279 memset(&sbuf
->hw
.flags
, 0, sizeof sbuf
->hw
.flags
);
281 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
282 LIST_DEL(&sbuf
->head
);
284 sbuf
->head
.next
= sbuf
->head
.prev
= NULL
;
286 sbuf
->needs_flush
= FALSE
;
288 sbuf
->hw
.svga
= NULL
;
289 sbuf
->hw
.boxes
= NULL
;
291 /* Decrement reference count */
292 pipe_reference(&(sbuf
->base
.reference
), NULL
);
298 * Queue a DMA upload of a range of this buffer to the host.
300 * This function only notes the range down. It doesn't actually emit a DMA
301 * upload command. That only happens when a context tries to refer to this
302 * buffer, and the DMA upload command is added to that context's command buffer.
304 * We try to lump as many contiguous DMA transfers together as possible.
307 svga_buffer_upload_queue(struct svga_buffer
*sbuf
,
312 unsigned nearest_range
;
313 unsigned nearest_dist
;
315 assert(sbuf
->hw
.buf
);
318 if (sbuf
->hw
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
319 nearest_range
= sbuf
->hw
.num_ranges
;
322 nearest_range
= SVGA_BUFFER_MAX_RANGES
- 1;
327 * Try to grow one of the ranges.
329 * Note that it is not this function task to care about overlapping ranges,
330 * as the GMR was already given so it is too late to do anything. Situations
331 * where overlapping ranges may pose a problem should be detected via
332 * pipe_context::is_buffer_referenced and the context that refers to the
333 * buffer should be flushed.
336 for(i
= 0; i
< sbuf
->hw
.num_ranges
; ++i
) {
341 left_dist
= start
- sbuf
->hw
.ranges
[i
].end
;
342 right_dist
= sbuf
->hw
.ranges
[i
].start
- end
;
343 dist
= MAX2(left_dist
, right_dist
);
347 * Ranges are contiguous or overlapping -- extend this one and return.
350 sbuf
->hw
.ranges
[i
].start
= MIN2(sbuf
->hw
.ranges
[i
].start
, start
);
351 sbuf
->hw
.ranges
[i
].end
= MAX2(sbuf
->hw
.ranges
[i
].end
, end
);
356 * Discontiguous ranges -- keep track of the nearest range.
359 if (dist
< nearest_dist
) {
367 * We cannot add a new range to an existing DMA command, so patch-up the
368 * pending DMA upload and start clean.
371 if(sbuf
->needs_flush
)
372 svga_buffer_upload_flush(sbuf
->hw
.svga
, sbuf
);
374 assert(!sbuf
->needs_flush
);
375 assert(!sbuf
->hw
.svga
);
376 assert(!sbuf
->hw
.boxes
);
378 if (sbuf
->hw
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
383 sbuf
->hw
.ranges
[sbuf
->hw
.num_ranges
].start
= start
;
384 sbuf
->hw
.ranges
[sbuf
->hw
.num_ranges
].end
= end
;
385 ++sbuf
->hw
.num_ranges
;
388 * Everything else failed, so just extend the nearest range.
390 * It is OK to do this because we always keep a local copy of the
391 * host buffer data, for SW TNL, and the host never modifies the buffer.
394 assert(nearest_range
< SVGA_BUFFER_MAX_RANGES
);
395 assert(nearest_range
< sbuf
->hw
.num_ranges
);
396 sbuf
->hw
.ranges
[nearest_range
].start
= MIN2(sbuf
->hw
.ranges
[nearest_range
].start
, start
);
397 sbuf
->hw
.ranges
[nearest_range
].end
= MAX2(sbuf
->hw
.ranges
[nearest_range
].end
, end
);
403 svga_buffer_map_range( struct pipe_screen
*screen
,
404 struct pipe_buffer
*buf
,
405 unsigned offset
, unsigned length
,
408 struct svga_screen
*ss
= svga_screen(screen
);
409 struct svga_winsys_screen
*sws
= ss
->sws
;
410 struct svga_buffer
*sbuf
= svga_buffer( buf
);
414 /* User/malloc buffer */
419 if(svga_buffer_create_hw_storage(ss
, sbuf
) != PIPE_OK
)
423 map
= sws
->buffer_map(sws
, sbuf
->hw
.buf
, usage
);
427 pipe_mutex_lock(ss
->swc_mutex
);
431 if (usage
& PIPE_BUFFER_USAGE_CPU_WRITE
) {
432 assert(sbuf
->map
.count
<= 1);
433 sbuf
->map
.writing
= TRUE
;
434 if (usage
& PIPE_BUFFER_USAGE_FLUSH_EXPLICIT
)
435 sbuf
->map
.flush_explicit
= TRUE
;
438 pipe_mutex_unlock(ss
->swc_mutex
);
445 svga_buffer_flush_mapped_range( struct pipe_screen
*screen
,
446 struct pipe_buffer
*buf
,
447 unsigned offset
, unsigned length
)
449 struct svga_buffer
*sbuf
= svga_buffer( buf
);
450 struct svga_screen
*ss
= svga_screen(screen
);
452 pipe_mutex_lock(ss
->swc_mutex
);
453 assert(sbuf
->map
.writing
);
454 if(sbuf
->map
.writing
) {
455 assert(sbuf
->map
.flush_explicit
);
457 svga_buffer_upload_queue(sbuf
, offset
, offset
+ length
);
459 pipe_mutex_unlock(ss
->swc_mutex
);
463 svga_buffer_unmap( struct pipe_screen
*screen
,
464 struct pipe_buffer
*buf
)
466 struct svga_screen
*ss
= svga_screen(screen
);
467 struct svga_winsys_screen
*sws
= ss
->sws
;
468 struct svga_buffer
*sbuf
= svga_buffer( buf
);
470 pipe_mutex_lock(ss
->swc_mutex
);
472 assert(sbuf
->map
.count
);
477 sws
->buffer_unmap(sws
, sbuf
->hw
.buf
);
479 if(sbuf
->map
.writing
) {
480 if(!sbuf
->map
.flush_explicit
) {
481 /* No mapped range was flushed -- flush the whole buffer */
482 SVGA_DBG(DEBUG_DMA
, "flushing the whole buffer\n");
485 svga_buffer_upload_queue(sbuf
, 0, sbuf
->base
.size
);
488 sbuf
->map
.writing
= FALSE
;
489 sbuf
->map
.flush_explicit
= FALSE
;
492 pipe_mutex_unlock(ss
->swc_mutex
);
496 svga_buffer_destroy( struct pipe_buffer
*buf
)
498 struct svga_screen
*ss
= svga_screen(buf
->screen
);
499 struct svga_buffer
*sbuf
= svga_buffer( buf
);
501 assert(!p_atomic_read(&buf
->reference
.count
));
503 assert(!sbuf
->needs_flush
);
506 svga_buffer_destroy_host_surface(ss
, sbuf
);
509 svga_buffer_destroy_hw_storage(ss
, sbuf
);
511 if(sbuf
->swbuf
&& !sbuf
->user
)
512 align_free(sbuf
->swbuf
);
517 static struct pipe_buffer
*
518 svga_buffer_create(struct pipe_screen
*screen
,
523 struct svga_screen
*ss
= svga_screen(screen
);
524 struct svga_buffer
*sbuf
;
529 sbuf
= CALLOC_STRUCT(svga_buffer
);
533 sbuf
->magic
= SVGA_BUFFER_MAGIC
;
535 pipe_reference_init(&sbuf
->base
.reference
, 1);
536 sbuf
->base
.screen
= screen
;
537 sbuf
->base
.alignment
= alignment
;
538 sbuf
->base
.usage
= usage
;
539 sbuf
->base
.size
= size
;
541 if(svga_buffer_needs_hw_storage(usage
)) {
542 if(svga_buffer_create_host_surface(ss
, sbuf
) != PIPE_OK
)
546 if(alignment
< sizeof(void*))
547 alignment
= sizeof(void*);
549 usage
|= PIPE_BUFFER_USAGE_CPU_READ_WRITE
;
551 sbuf
->swbuf
= align_malloc(size
, alignment
);
564 static struct pipe_buffer
*
565 svga_user_buffer_create(struct pipe_screen
*screen
,
569 struct svga_buffer
*sbuf
;
571 sbuf
= CALLOC_STRUCT(svga_buffer
);
575 sbuf
->magic
= SVGA_BUFFER_MAGIC
;
580 pipe_reference_init(&sbuf
->base
.reference
, 1);
581 sbuf
->base
.screen
= screen
;
582 sbuf
->base
.alignment
= 1;
583 sbuf
->base
.usage
= 0;
584 sbuf
->base
.size
= bytes
;
594 svga_screen_init_buffer_functions(struct pipe_screen
*screen
)
596 screen
->buffer_create
= svga_buffer_create
;
597 screen
->user_buffer_create
= svga_user_buffer_create
;
598 screen
->buffer_map_range
= svga_buffer_map_range
;
599 screen
->buffer_flush_mapped_range
= svga_buffer_flush_mapped_range
;
600 screen
->buffer_unmap
= svga_buffer_unmap
;
601 screen
->buffer_destroy
= svga_buffer_destroy
;
606 * Copy the contents of the user buffer / malloc buffer to a hardware buffer.
608 static INLINE
enum pipe_error
609 svga_buffer_update_hw(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
619 ret
= svga_buffer_create_hw_storage(ss
, sbuf
);
620 assert(ret
== PIPE_OK
);
624 pipe_mutex_lock(ss
->swc_mutex
);
625 map
= ss
->sws
->buffer_map(ss
->sws
, sbuf
->hw
.buf
, PIPE_BUFFER_USAGE_CPU_WRITE
);
628 pipe_mutex_unlock(ss
->swc_mutex
);
629 return PIPE_ERROR_OUT_OF_MEMORY
;
632 memcpy(map
, sbuf
->swbuf
, sbuf
->base
.size
);
633 ss
->sws
->buffer_unmap(ss
->sws
, sbuf
->hw
.buf
);
635 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
636 assert(!sbuf
->map
.count
);
637 if(!sbuf
->map
.count
) {
641 align_free(sbuf
->swbuf
);
645 svga_buffer_upload_queue(sbuf
, 0, sbuf
->base
.size
);
648 pipe_mutex_unlock(ss
->swc_mutex
);
653 struct svga_winsys_surface
*
654 svga_buffer_handle(struct svga_context
*svga
,
655 struct pipe_buffer
*buf
)
657 struct pipe_screen
*screen
= svga
->pipe
.screen
;
658 struct svga_screen
*ss
= svga_screen(screen
);
659 struct svga_buffer
*sbuf
;
665 sbuf
= svga_buffer(buf
);
667 assert(!sbuf
->map
.count
);
670 ret
= svga_buffer_create_host_surface(ss
, sbuf
);
674 ret
= svga_buffer_update_hw(ss
, sbuf
);
679 if(!sbuf
->needs_flush
&& sbuf
->hw
.num_ranges
) {
680 /* Queue the buffer for flushing */
681 ret
= svga_buffer_upload_command(svga
, sbuf
);
683 /* XXX: Should probably have a richer return value */
686 assert(sbuf
->hw
.svga
== svga
);
688 sbuf
->needs_flush
= TRUE
;
689 assert(!sbuf
->head
.prev
&& !sbuf
->head
.next
);
690 LIST_ADDTAIL(&sbuf
->head
, &svga
->dirty_buffers
);
697 svga_screen_buffer_wrap_surface(struct pipe_screen
*screen
,
698 enum SVGA3dSurfaceFormat format
,
699 struct svga_winsys_surface
*srf
)
701 struct pipe_buffer
*buf
;
702 struct svga_buffer
*sbuf
;
703 struct svga_winsys_screen
*sws
= svga_winsys_screen(screen
);
705 buf
= svga_buffer_create(screen
, 0, SVGA_BUFFER_USAGE_WRAPPED
, 0);
709 sbuf
= svga_buffer(buf
);
712 * We are not the creator of this surface and therefore we must not
713 * cache it for reuse. Set the cacheable flag to zero in the key to
716 sbuf
->key
.format
= format
;
717 sbuf
->key
.cachable
= 0;
718 sws
->surface_reference(sws
, &sbuf
->handle
, srf
);
724 struct svga_winsys_surface
*
725 svga_screen_buffer_get_winsys_surface(struct pipe_buffer
*buffer
)
727 struct svga_winsys_screen
*sws
= svga_winsys_screen(buffer
->screen
);
728 struct svga_winsys_surface
*vsurf
= NULL
;
730 assert(svga_buffer(buffer
)->key
.cachable
== 0);
731 svga_buffer(buffer
)->key
.cachable
= 0;
732 sws
->surface_reference(sws
, &vsurf
, svga_buffer(buffer
)->handle
);
737 svga_context_flush_buffers(struct svga_context
*svga
)
739 struct list_head
*curr
, *next
;
740 struct svga_buffer
*sbuf
;
742 curr
= svga
->dirty_buffers
.next
;
744 while(curr
!= &svga
->dirty_buffers
) {
745 sbuf
= LIST_ENTRY(struct svga_buffer
, curr
, head
);
747 assert(p_atomic_read(&sbuf
->base
.reference
.count
) != 0);
748 assert(sbuf
->needs_flush
);
750 svga_buffer_upload_flush(svga
, sbuf
);