1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "os/os_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_winsys.h"
40 #include "svga_debug.h"
44 * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
46 * It will flush and retry in case the first attempt to create a DMA buffer
47 * fails, so it should not be called from any function involved in flushing
50 struct svga_winsys_buffer
*
51 svga_winsys_buffer_create( struct svga_context
*svga
,
56 struct svga_screen
*svgascreen
= svga_screen(svga
->pipe
.screen
);
57 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
58 struct svga_winsys_buffer
*buf
;
61 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
63 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "flushing context to find %d bytes GMR\n",
66 /* Try flushing all pending DMAs */
67 svga_context_flush(svga
, NULL
);
68 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
76 svga_buffer_destroy_hw_storage(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
78 struct svga_winsys_screen
*sws
= ss
->sws
;
80 assert(!sbuf
->map
.count
);
83 sws
->buffer_destroy(sws
, sbuf
->hwbuf
);
91 * Allocate DMA'ble storage for the buffer.
93 * Called before mapping a buffer.
96 svga_buffer_create_hw_storage(struct svga_screen
*ss
,
97 struct svga_buffer
*sbuf
)
102 struct svga_winsys_screen
*sws
= ss
->sws
;
103 unsigned alignment
= 16;
105 unsigned size
= sbuf
->b
.b
.width0
;
107 sbuf
->hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
109 return PIPE_ERROR_OUT_OF_MEMORY
;
111 assert(!sbuf
->dma
.pending
);
120 svga_buffer_create_host_surface(struct svga_screen
*ss
,
121 struct svga_buffer
*sbuf
)
128 sbuf
->key
.format
= SVGA3D_BUFFER
;
129 if (sbuf
->b
.b
.bind
& PIPE_BIND_VERTEX_BUFFER
)
130 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_VERTEXBUFFER
;
131 if (sbuf
->b
.b
.bind
& PIPE_BIND_INDEX_BUFFER
)
132 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_INDEXBUFFER
;
134 sbuf
->key
.size
.width
= sbuf
->b
.b
.width0
;
135 sbuf
->key
.size
.height
= 1;
136 sbuf
->key
.size
.depth
= 1;
138 sbuf
->key
.numFaces
= 1;
139 sbuf
->key
.numMipLevels
= 1;
140 sbuf
->key
.cachable
= 1;
142 SVGA_DBG(DEBUG_DMA
, "surface_create for buffer sz %d\n", sbuf
->b
.b
.width0
);
144 sbuf
->handle
= svga_screen_surface_create(ss
, &sbuf
->key
);
146 return PIPE_ERROR_OUT_OF_MEMORY
;
148 /* Always set the discard flag on the first time the buffer is written
149 * as svga_screen_surface_create might have passed a recycled host
152 sbuf
->dma
.flags
.discard
= TRUE
;
154 SVGA_DBG(DEBUG_DMA
, " --> got sid %p sz %d (buffer)\n", sbuf
->handle
, sbuf
->b
.b
.width0
);
162 svga_buffer_destroy_host_surface(struct svga_screen
*ss
,
163 struct svga_buffer
*sbuf
)
166 SVGA_DBG(DEBUG_DMA
, " ungrab sid %p sz %d\n", sbuf
->handle
, sbuf
->b
.b
.width0
);
167 svga_screen_surface_destroy(ss
, &sbuf
->key
, &sbuf
->handle
);
173 * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
175 static enum pipe_error
176 svga_buffer_upload_command(struct svga_context
*svga
,
177 struct svga_buffer
*sbuf
)
179 struct svga_winsys_context
*swc
= svga
->swc
;
180 struct svga_winsys_buffer
*guest
= sbuf
->hwbuf
;
181 struct svga_winsys_surface
*host
= sbuf
->handle
;
182 SVGA3dTransferType transfer
= SVGA3D_WRITE_HOST_VRAM
;
183 SVGA3dCmdSurfaceDMA
*cmd
;
184 uint32 numBoxes
= sbuf
->map
.num_ranges
;
185 SVGA3dCopyBox
*boxes
;
186 SVGA3dCmdSurfaceDMASuffix
*pSuffix
;
187 unsigned region_flags
;
188 unsigned surface_flags
;
189 struct pipe_resource
*dummy
;
191 if (transfer
== SVGA3D_WRITE_HOST_VRAM
) {
192 region_flags
= SVGA_RELOC_READ
;
193 surface_flags
= SVGA_RELOC_WRITE
;
195 else if (transfer
== SVGA3D_READ_HOST_VRAM
) {
196 region_flags
= SVGA_RELOC_WRITE
;
197 surface_flags
= SVGA_RELOC_READ
;
201 return PIPE_ERROR_BAD_INPUT
;
206 cmd
= SVGA3D_FIFOReserve(swc
,
207 SVGA_3D_CMD_SURFACE_DMA
,
208 sizeof *cmd
+ numBoxes
* sizeof *boxes
+ sizeof *pSuffix
,
211 return PIPE_ERROR_OUT_OF_MEMORY
;
213 swc
->region_relocation(swc
, &cmd
->guest
.ptr
, guest
, 0, region_flags
);
214 cmd
->guest
.pitch
= 0;
216 swc
->surface_relocation(swc
, &cmd
->host
.sid
, host
, surface_flags
);
218 cmd
->host
.mipmap
= 0;
220 cmd
->transfer
= transfer
;
222 sbuf
->dma
.boxes
= (SVGA3dCopyBox
*)&cmd
[1];
223 sbuf
->dma
.svga
= svga
;
225 /* Increment reference count */
227 pipe_resource_reference(&dummy
, &sbuf
->b
.b
);
229 pSuffix
= (SVGA3dCmdSurfaceDMASuffix
*)((uint8_t*)cmd
+ sizeof *cmd
+ numBoxes
* sizeof *boxes
);
230 pSuffix
->suffixSize
= sizeof *pSuffix
;
231 pSuffix
->maximumOffset
= sbuf
->b
.b
.width0
;
232 pSuffix
->flags
= sbuf
->dma
.flags
;
234 SVGA_FIFOCommitAll(swc
);
236 sbuf
->dma
.flags
.discard
= FALSE
;
243 * Patch up the upload DMA command reserved by svga_buffer_upload_command
244 * with the final ranges.
247 svga_buffer_upload_flush(struct svga_context
*svga
,
248 struct svga_buffer
*sbuf
)
250 SVGA3dCopyBox
*boxes
;
252 struct pipe_resource
*dummy
;
254 if (!sbuf
->dma
.pending
) {
258 assert(sbuf
->handle
);
260 assert(sbuf
->map
.num_ranges
);
261 assert(sbuf
->dma
.svga
== svga
);
262 assert(sbuf
->dma
.boxes
);
265 * Patch the DMA command with the final copy box.
268 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
270 boxes
= sbuf
->dma
.boxes
;
271 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
272 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
273 sbuf
->map
.ranges
[i
].start
, sbuf
->map
.ranges
[i
].end
);
275 boxes
[i
].x
= sbuf
->map
.ranges
[i
].start
;
278 boxes
[i
].w
= sbuf
->map
.ranges
[i
].end
- sbuf
->map
.ranges
[i
].start
;
281 boxes
[i
].srcx
= sbuf
->map
.ranges
[i
].start
;
286 sbuf
->map
.num_ranges
= 0;
288 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
289 LIST_DEL(&sbuf
->head
);
291 sbuf
->head
.next
= sbuf
->head
.prev
= NULL
;
293 sbuf
->dma
.pending
= FALSE
;
294 sbuf
->dma
.flags
.discard
= FALSE
;
295 sbuf
->dma
.flags
.unsynchronized
= FALSE
;
297 sbuf
->dma
.svga
= NULL
;
298 sbuf
->dma
.boxes
= NULL
;
300 /* Decrement reference count (and potentially destroy) */
302 pipe_resource_reference(&dummy
, NULL
);
307 * Note a dirty range.
309 * This function only notes the range down. It doesn't actually emit a DMA
310 * upload command. That only happens when a context tries to refer to this
311 * buffer, and the DMA upload command is added to that context's command
314 * We try to lump as many contiguous DMA transfers together as possible.
317 svga_buffer_add_range(struct svga_buffer
*sbuf
,
322 unsigned nearest_range
;
323 unsigned nearest_dist
;
327 if (sbuf
->map
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
328 nearest_range
= sbuf
->map
.num_ranges
;
331 nearest_range
= SVGA_BUFFER_MAX_RANGES
- 1;
336 * Try to grow one of the ranges.
339 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
344 left_dist
= start
- sbuf
->map
.ranges
[i
].end
;
345 right_dist
= sbuf
->map
.ranges
[i
].start
- end
;
346 dist
= MAX2(left_dist
, right_dist
);
350 * Ranges are contiguous or overlapping -- extend this one and return.
352 * Note that it is not this function's task to prevent overlapping
353 * ranges, as the GMR was already given so it is too late to do
354 * anything. If the ranges overlap here it must surely be because
355 * PIPE_TRANSFER_UNSYNCHRONIZED was set.
358 sbuf
->map
.ranges
[i
].start
= MIN2(sbuf
->map
.ranges
[i
].start
, start
);
359 sbuf
->map
.ranges
[i
].end
= MAX2(sbuf
->map
.ranges
[i
].end
, end
);
364 * Discontiguous ranges -- keep track of the nearest range.
367 if (dist
< nearest_dist
) {
375 * We cannot add a new range to an existing DMA command, so patch-up the
376 * pending DMA upload and start clean.
379 svga_buffer_upload_flush(sbuf
->dma
.svga
, sbuf
);
381 assert(!sbuf
->dma
.pending
);
382 assert(!sbuf
->dma
.svga
);
383 assert(!sbuf
->dma
.boxes
);
385 if (sbuf
->map
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
390 sbuf
->map
.ranges
[sbuf
->map
.num_ranges
].start
= start
;
391 sbuf
->map
.ranges
[sbuf
->map
.num_ranges
].end
= end
;
392 ++sbuf
->map
.num_ranges
;
395 * Everything else failed, so just extend the nearest range.
397 * It is OK to do this because we always keep a local copy of the
398 * host buffer data, for SW TNL, and the host never modifies the buffer.
401 assert(nearest_range
< SVGA_BUFFER_MAX_RANGES
);
402 assert(nearest_range
< sbuf
->map
.num_ranges
);
403 sbuf
->map
.ranges
[nearest_range
].start
= MIN2(sbuf
->map
.ranges
[nearest_range
].start
, start
);
404 sbuf
->map
.ranges
[nearest_range
].end
= MAX2(sbuf
->map
.ranges
[nearest_range
].end
, end
);
411 * Copy the contents of the malloc buffer to a hardware buffer.
413 static INLINE
enum pipe_error
414 svga_buffer_update_hw(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
425 ret
= svga_buffer_create_hw_storage(ss
, sbuf
);
429 pipe_mutex_lock(ss
->swc_mutex
);
430 map
= ss
->sws
->buffer_map(ss
->sws
, sbuf
->hwbuf
, PIPE_TRANSFER_WRITE
);
433 pipe_mutex_unlock(ss
->swc_mutex
);
434 svga_buffer_destroy_hw_storage(ss
, sbuf
);
438 memcpy(map
, sbuf
->swbuf
, sbuf
->b
.b
.width0
);
439 ss
->sws
->buffer_unmap(ss
->sws
, sbuf
->hwbuf
);
441 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
442 assert(!sbuf
->map
.count
);
443 if (!sbuf
->map
.count
) {
447 align_free(sbuf
->swbuf
);
451 pipe_mutex_unlock(ss
->swc_mutex
);
459 * Upload the buffer to the host in a piecewise fashion.
461 * Used when the buffer is too big to fit in the GMR aperture.
463 static INLINE
enum pipe_error
464 svga_buffer_upload_piecewise(struct svga_screen
*ss
,
465 struct svga_context
*svga
,
466 struct svga_buffer
*sbuf
)
468 struct svga_winsys_screen
*sws
= ss
->sws
;
469 const unsigned alignment
= sizeof(void *);
470 const unsigned usage
= 0;
473 assert(sbuf
->map
.num_ranges
);
474 assert(!sbuf
->dma
.pending
);
476 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
478 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
479 struct svga_buffer_range
*range
= &sbuf
->map
.ranges
[i
];
480 unsigned offset
= range
->start
;
481 unsigned size
= range
->end
- range
->start
;
483 while (offset
< range
->end
) {
484 struct svga_winsys_buffer
*hwbuf
;
488 if (offset
+ size
> range
->end
)
489 size
= range
->end
- offset
;
491 hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
495 return PIPE_ERROR_OUT_OF_MEMORY
;
496 hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
499 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
500 offset
, offset
+ size
);
502 map
= sws
->buffer_map(sws
, hwbuf
,
503 PIPE_TRANSFER_WRITE
|
504 PIPE_TRANSFER_DISCARD_RANGE
);
507 memcpy(map
, sbuf
->swbuf
, size
);
508 sws
->buffer_unmap(sws
, hwbuf
);
511 ret
= SVGA3D_BufferDMA(svga
->swc
,
513 SVGA3D_WRITE_HOST_VRAM
,
514 size
, 0, offset
, sbuf
->dma
.flags
);
515 if (ret
!= PIPE_OK
) {
516 svga_context_flush(svga
, NULL
);
517 ret
= SVGA3D_BufferDMA(svga
->swc
,
519 SVGA3D_WRITE_HOST_VRAM
,
520 size
, 0, offset
, sbuf
->dma
.flags
);
521 assert(ret
== PIPE_OK
);
524 sbuf
->dma
.flags
.discard
= FALSE
;
526 sws
->buffer_destroy(sws
, hwbuf
);
532 sbuf
->map
.num_ranges
= 0;
540 /* Get (or create/upload) the winsys surface handle so that we can
541 * refer to this buffer in fifo commands.
543 struct svga_winsys_surface
*
544 svga_buffer_handle(struct svga_context
*svga
,
545 struct pipe_resource
*buf
)
547 struct pipe_screen
*screen
= svga
->pipe
.screen
;
548 struct svga_screen
*ss
= svga_screen(screen
);
549 struct svga_buffer
*sbuf
;
555 sbuf
= svga_buffer(buf
);
557 assert(!sbuf
->map
.count
);
561 ret
= svga_buffer_create_host_surface(ss
, sbuf
);
566 assert(sbuf
->handle
);
568 if (sbuf
->map
.num_ranges
) {
569 if (!sbuf
->dma
.pending
) {
571 * No pending DMA upload yet, so insert a DMA upload command now.
575 * Migrate the data from swbuf -> hwbuf if necessary.
577 ret
= svga_buffer_update_hw(ss
, sbuf
);
578 if (ret
== PIPE_OK
) {
580 * Queue a dma command.
583 ret
= svga_buffer_upload_command(svga
, sbuf
);
584 if (ret
== PIPE_ERROR_OUT_OF_MEMORY
) {
585 svga_context_flush(svga
, NULL
);
586 ret
= svga_buffer_upload_command(svga
, sbuf
);
587 assert(ret
== PIPE_OK
);
589 if (ret
== PIPE_OK
) {
590 sbuf
->dma
.pending
= TRUE
;
591 assert(!sbuf
->head
.prev
&& !sbuf
->head
.next
);
592 LIST_ADDTAIL(&sbuf
->head
, &svga
->dirty_buffers
);
595 else if (ret
== PIPE_ERROR_OUT_OF_MEMORY
) {
597 * The buffer is too big to fit in the GMR aperture, so break it in
600 ret
= svga_buffer_upload_piecewise(ss
, svga
, sbuf
);
603 if (ret
!= PIPE_OK
) {
605 * Something unexpected happened above. There is very little that
606 * we can do other than proceeding while ignoring the dirty ranges.
609 sbuf
->map
.num_ranges
= 0;
614 * There a pending dma already. Make sure it is from this context.
616 assert(sbuf
->dma
.svga
== svga
);
620 assert(!sbuf
->map
.num_ranges
|| sbuf
->dma
.pending
);
628 svga_context_flush_buffers(struct svga_context
*svga
)
630 struct list_head
*curr
, *next
;
631 struct svga_buffer
*sbuf
;
633 curr
= svga
->dirty_buffers
.next
;
635 while(curr
!= &svga
->dirty_buffers
) {
636 sbuf
= LIST_ENTRY(struct svga_buffer
, curr
, head
);
638 assert(p_atomic_read(&sbuf
->b
.b
.reference
.count
) != 0);
639 assert(sbuf
->dma
.pending
);
641 svga_buffer_upload_flush(svga
, sbuf
);
650 svga_redefine_user_buffer(struct pipe_context
*pipe
,
651 struct pipe_resource
*resource
,
655 struct svga_buffer
*sbuf
= svga_buffer(resource
);
658 assert(!sbuf
->dma
.pending
);
659 assert(!sbuf
->handle
);
660 assert(!sbuf
->hwbuf
);
662 /* use the default action of simply resizing the user buffer's size */
663 u_default_redefine_user_buffer(pipe
, resource
, offset
, size
);