1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "os/os_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_winsys.h"
40 #include "svga_debug.h"
43 #define MAX_DMA_SIZE (4 * 1024 * 1024)
47 * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
49 * It will flush and retry in case the first attempt to create a DMA buffer
50 * fails, so it should not be called from any function involved in flushing
53 struct svga_winsys_buffer
*
54 svga_winsys_buffer_create( struct svga_context
*svga
,
59 struct svga_screen
*svgascreen
= svga_screen(svga
->pipe
.screen
);
60 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
61 struct svga_winsys_buffer
*buf
;
63 /* XXX this shouldn't be a hard-coded number; it should be queried
66 if (size
> MAX_DMA_SIZE
) {
71 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
74 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "flushing screen to find %d bytes GMR\n",
77 /* Try flushing all pending DMAs */
78 svga_context_flush(svga
, NULL
);
79 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
87 svga_buffer_destroy_hw_storage(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
89 struct svga_winsys_screen
*sws
= ss
->sws
;
91 assert(!sbuf
->map
.count
);
94 sws
->buffer_destroy(sws
, sbuf
->hwbuf
);
102 * Allocate DMA'ble storage for the buffer.
104 * Called before mapping a buffer.
107 svga_buffer_create_hw_storage(struct svga_screen
*ss
,
108 struct svga_buffer
*sbuf
)
113 struct svga_winsys_screen
*sws
= ss
->sws
;
114 unsigned alignment
= 16;
116 unsigned size
= sbuf
->b
.b
.width0
;
118 sbuf
->hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
120 return PIPE_ERROR_OUT_OF_MEMORY
;
122 assert(!sbuf
->dma
.pending
);
131 svga_buffer_create_host_surface(struct svga_screen
*ss
,
132 struct svga_buffer
*sbuf
)
139 sbuf
->key
.format
= SVGA3D_BUFFER
;
140 if(sbuf
->b
.b
.bind
& PIPE_BIND_VERTEX_BUFFER
)
141 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_VERTEXBUFFER
;
142 if(sbuf
->b
.b
.bind
& PIPE_BIND_INDEX_BUFFER
)
143 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_INDEXBUFFER
;
145 sbuf
->key
.size
.width
= sbuf
->b
.b
.width0
;
146 sbuf
->key
.size
.height
= 1;
147 sbuf
->key
.size
.depth
= 1;
149 sbuf
->key
.numFaces
= 1;
150 sbuf
->key
.numMipLevels
= 1;
151 sbuf
->key
.cachable
= 1;
153 SVGA_DBG(DEBUG_DMA
, "surface_create for buffer sz %d\n", sbuf
->b
.b
.width0
);
155 sbuf
->handle
= svga_screen_surface_create(ss
, &sbuf
->key
);
157 return PIPE_ERROR_OUT_OF_MEMORY
;
159 /* Always set the discard flag on the first time the buffer is written
160 * as svga_screen_surface_create might have passed a recycled host
163 sbuf
->dma
.flags
.discard
= TRUE
;
165 SVGA_DBG(DEBUG_DMA
, " --> got sid %p sz %d (buffer)\n", sbuf
->handle
, sbuf
->b
.b
.width0
);
173 svga_buffer_destroy_host_surface(struct svga_screen
*ss
,
174 struct svga_buffer
*sbuf
)
177 SVGA_DBG(DEBUG_DMA
, " ungrab sid %p sz %d\n", sbuf
->handle
, sbuf
->b
.b
.width0
);
178 svga_screen_surface_destroy(ss
, &sbuf
->key
, &sbuf
->handle
);
184 * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
186 static enum pipe_error
187 svga_buffer_upload_command(struct svga_context
*svga
,
188 struct svga_buffer
*sbuf
)
190 struct svga_winsys_context
*swc
= svga
->swc
;
191 struct svga_winsys_buffer
*guest
= sbuf
->hwbuf
;
192 struct svga_winsys_surface
*host
= sbuf
->handle
;
193 SVGA3dTransferType transfer
= SVGA3D_WRITE_HOST_VRAM
;
194 SVGA3dCmdSurfaceDMA
*cmd
;
195 uint32 numBoxes
= sbuf
->map
.num_ranges
;
196 SVGA3dCopyBox
*boxes
;
197 SVGA3dCmdSurfaceDMASuffix
*pSuffix
;
198 unsigned region_flags
;
199 unsigned surface_flags
;
200 struct pipe_resource
*dummy
;
202 if(transfer
== SVGA3D_WRITE_HOST_VRAM
) {
203 region_flags
= SVGA_RELOC_READ
;
204 surface_flags
= SVGA_RELOC_WRITE
;
206 else if(transfer
== SVGA3D_READ_HOST_VRAM
) {
207 region_flags
= SVGA_RELOC_WRITE
;
208 surface_flags
= SVGA_RELOC_READ
;
212 return PIPE_ERROR_BAD_INPUT
;
217 cmd
= SVGA3D_FIFOReserve(swc
,
218 SVGA_3D_CMD_SURFACE_DMA
,
219 sizeof *cmd
+ numBoxes
* sizeof *boxes
+ sizeof *pSuffix
,
222 return PIPE_ERROR_OUT_OF_MEMORY
;
224 swc
->region_relocation(swc
, &cmd
->guest
.ptr
, guest
, 0, region_flags
);
225 cmd
->guest
.pitch
= 0;
227 swc
->surface_relocation(swc
, &cmd
->host
.sid
, host
, surface_flags
);
229 cmd
->host
.mipmap
= 0;
231 cmd
->transfer
= transfer
;
233 sbuf
->dma
.boxes
= (SVGA3dCopyBox
*)&cmd
[1];
234 sbuf
->dma
.svga
= svga
;
236 /* Increment reference count */
238 pipe_resource_reference(&dummy
, &sbuf
->b
.b
);
240 pSuffix
= (SVGA3dCmdSurfaceDMASuffix
*)((uint8_t*)cmd
+ sizeof *cmd
+ numBoxes
* sizeof *boxes
);
241 pSuffix
->suffixSize
= sizeof *pSuffix
;
242 pSuffix
->maximumOffset
= sbuf
->b
.b
.width0
;
243 pSuffix
->flags
= sbuf
->dma
.flags
;
245 SVGA_FIFOCommitAll(swc
);
247 sbuf
->dma
.flags
.discard
= FALSE
;
254 * Patch up the upload DMA command reserved by svga_buffer_upload_command
255 * with the final ranges.
258 svga_buffer_upload_flush(struct svga_context
*svga
,
259 struct svga_buffer
*sbuf
)
261 SVGA3dCopyBox
*boxes
;
263 struct pipe_resource
*dummy
;
265 if (!sbuf
->dma
.pending
) {
269 assert(sbuf
->handle
);
271 assert(sbuf
->map
.num_ranges
);
272 assert(sbuf
->dma
.svga
== svga
);
273 assert(sbuf
->dma
.boxes
);
276 * Patch the DMA command with the final copy box.
279 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
281 boxes
= sbuf
->dma
.boxes
;
282 for(i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
283 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
284 sbuf
->map
.ranges
[i
].start
, sbuf
->map
.ranges
[i
].end
);
286 boxes
[i
].x
= sbuf
->map
.ranges
[i
].start
;
289 boxes
[i
].w
= sbuf
->map
.ranges
[i
].end
- sbuf
->map
.ranges
[i
].start
;
292 boxes
[i
].srcx
= sbuf
->map
.ranges
[i
].start
;
297 sbuf
->map
.num_ranges
= 0;
299 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
300 LIST_DEL(&sbuf
->head
);
302 sbuf
->head
.next
= sbuf
->head
.prev
= NULL
;
304 sbuf
->dma
.pending
= FALSE
;
305 sbuf
->dma
.flags
.discard
= FALSE
;
306 sbuf
->dma
.flags
.unsynchronized
= FALSE
;
308 sbuf
->dma
.svga
= NULL
;
309 sbuf
->dma
.boxes
= NULL
;
311 /* Decrement reference count (and potentially destroy) */
313 pipe_resource_reference(&dummy
, NULL
);
318 * Note a dirty range.
320 * This function only notes the range down. It doesn't actually emit a DMA
321 * upload command. That only happens when a context tries to refer to this
322 * buffer, and the DMA upload command is added to that context's command buffer.
324 * We try to lump as many contiguous DMA transfers together as possible.
327 svga_buffer_add_range(struct svga_buffer
*sbuf
,
332 unsigned nearest_range
;
333 unsigned nearest_dist
;
337 if (sbuf
->map
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
338 nearest_range
= sbuf
->map
.num_ranges
;
341 nearest_range
= SVGA_BUFFER_MAX_RANGES
- 1;
346 * Try to grow one of the ranges.
349 for(i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
354 left_dist
= start
- sbuf
->map
.ranges
[i
].end
;
355 right_dist
= sbuf
->map
.ranges
[i
].start
- end
;
356 dist
= MAX2(left_dist
, right_dist
);
360 * Ranges are contiguous or overlapping -- extend this one and return.
362 * Note that it is not this function's task to prevent overlapping
363 * ranges, as the GMR was already given so it is too late to do
364 * anything. If the ranges overlap here it must surely be because
365 * PIPE_TRANSFER_UNSYNCHRONIZED was set.
368 sbuf
->map
.ranges
[i
].start
= MIN2(sbuf
->map
.ranges
[i
].start
, start
);
369 sbuf
->map
.ranges
[i
].end
= MAX2(sbuf
->map
.ranges
[i
].end
, end
);
374 * Discontiguous ranges -- keep track of the nearest range.
377 if (dist
< nearest_dist
) {
385 * We cannot add a new range to an existing DMA command, so patch-up the
386 * pending DMA upload and start clean.
389 svga_buffer_upload_flush(sbuf
->dma
.svga
, sbuf
);
391 assert(!sbuf
->dma
.pending
);
392 assert(!sbuf
->dma
.svga
);
393 assert(!sbuf
->dma
.boxes
);
395 if (sbuf
->map
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
400 sbuf
->map
.ranges
[sbuf
->map
.num_ranges
].start
= start
;
401 sbuf
->map
.ranges
[sbuf
->map
.num_ranges
].end
= end
;
402 ++sbuf
->map
.num_ranges
;
405 * Everything else failed, so just extend the nearest range.
407 * It is OK to do this because we always keep a local copy of the
408 * host buffer data, for SW TNL, and the host never modifies the buffer.
411 assert(nearest_range
< SVGA_BUFFER_MAX_RANGES
);
412 assert(nearest_range
< sbuf
->map
.num_ranges
);
413 sbuf
->map
.ranges
[nearest_range
].start
= MIN2(sbuf
->map
.ranges
[nearest_range
].start
, start
);
414 sbuf
->map
.ranges
[nearest_range
].end
= MAX2(sbuf
->map
.ranges
[nearest_range
].end
, end
);
421 * Copy the contents of the malloc buffer to a hardware buffer.
423 static INLINE
enum pipe_error
424 svga_buffer_update_hw(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
435 ret
= svga_buffer_create_hw_storage(ss
, sbuf
);
439 pipe_mutex_lock(ss
->swc_mutex
);
440 map
= ss
->sws
->buffer_map(ss
->sws
, sbuf
->hwbuf
, PIPE_TRANSFER_WRITE
);
443 pipe_mutex_unlock(ss
->swc_mutex
);
444 svga_buffer_destroy_hw_storage(ss
, sbuf
);
448 memcpy(map
, sbuf
->swbuf
, sbuf
->b
.b
.width0
);
449 ss
->sws
->buffer_unmap(ss
->sws
, sbuf
->hwbuf
);
451 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
452 assert(!sbuf
->map
.count
);
453 if(!sbuf
->map
.count
) {
457 align_free(sbuf
->swbuf
);
461 pipe_mutex_unlock(ss
->swc_mutex
);
469 * Upload the buffer to the host in a piecewise fashion.
471 * Used when the buffer is too big to fit in the GMR aperture.
473 static INLINE
enum pipe_error
474 svga_buffer_upload_piecewise(struct svga_screen
*ss
,
475 struct svga_context
*svga
,
476 struct svga_buffer
*sbuf
)
478 struct svga_winsys_screen
*sws
= ss
->sws
;
479 const unsigned alignment
= sizeof(void *);
480 const unsigned usage
= 0;
483 assert(sbuf
->map
.num_ranges
);
484 assert(!sbuf
->dma
.pending
);
486 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
488 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
489 struct svga_buffer_range
*range
= &sbuf
->map
.ranges
[i
];
490 unsigned offset
= range
->start
;
491 unsigned size
= range
->end
- range
->start
;
493 while (offset
< range
->end
) {
494 struct svga_winsys_buffer
*hwbuf
;
498 if (offset
+ size
> range
->end
)
499 size
= range
->end
- offset
;
501 hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
505 return PIPE_ERROR_OUT_OF_MEMORY
;
506 hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
509 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
510 offset
, offset
+ size
);
512 map
= sws
->buffer_map(sws
, hwbuf
,
513 PIPE_TRANSFER_WRITE
|
514 PIPE_TRANSFER_DISCARD
);
517 memcpy(map
, sbuf
->swbuf
, size
);
518 sws
->buffer_unmap(sws
, hwbuf
);
521 ret
= SVGA3D_BufferDMA(svga
->swc
,
523 SVGA3D_WRITE_HOST_VRAM
,
524 size
, 0, offset
, sbuf
->dma
.flags
);
526 svga_context_flush(svga
, NULL
);
527 ret
= SVGA3D_BufferDMA(svga
->swc
,
529 SVGA3D_WRITE_HOST_VRAM
,
530 size
, 0, offset
, sbuf
->dma
.flags
);
531 assert(ret
== PIPE_OK
);
534 sbuf
->dma
.flags
.discard
= FALSE
;
536 sws
->buffer_destroy(sws
, hwbuf
);
542 sbuf
->map
.num_ranges
= 0;
550 /* Get (or create/upload) the winsys surface handle so that we can
551 * refer to this buffer in fifo commands.
553 struct svga_winsys_surface
*
554 svga_buffer_handle(struct svga_context
*svga
,
555 struct pipe_resource
*buf
)
557 struct pipe_screen
*screen
= svga
->pipe
.screen
;
558 struct svga_screen
*ss
= svga_screen(screen
);
559 struct svga_buffer
*sbuf
;
565 sbuf
= svga_buffer(buf
);
567 assert(!sbuf
->map
.count
);
571 ret
= svga_buffer_create_host_surface(ss
, sbuf
);
576 assert(sbuf
->handle
);
578 if (sbuf
->map
.num_ranges
) {
579 if (!sbuf
->dma
.pending
) {
581 * No pending DMA upload yet, so insert a DMA upload command now.
585 * Migrate the data from swbuf -> hwbuf if necessary.
587 ret
= svga_buffer_update_hw(ss
, sbuf
);
588 if (ret
== PIPE_OK
) {
590 * Queue a dma command.
593 ret
= svga_buffer_upload_command(svga
, sbuf
);
594 if (ret
== PIPE_ERROR_OUT_OF_MEMORY
) {
595 svga_context_flush(svga
, NULL
);
596 ret
= svga_buffer_upload_command(svga
, sbuf
);
597 assert(ret
== PIPE_OK
);
599 if (ret
== PIPE_OK
) {
600 sbuf
->dma
.pending
= TRUE
;
601 assert(!sbuf
->head
.prev
&& !sbuf
->head
.next
);
602 LIST_ADDTAIL(&sbuf
->head
, &svga
->dirty_buffers
);
605 else if (ret
== PIPE_ERROR_OUT_OF_MEMORY
) {
607 * The buffer is too big to fit in the GMR aperture, so break it in
610 ret
= svga_buffer_upload_piecewise(ss
, svga
, sbuf
);
613 if (ret
!= PIPE_OK
) {
615 * Something unexpected happened above. There is very little that
616 * we can do other than proceeding while ignoring the dirty ranges.
619 sbuf
->map
.num_ranges
= 0;
624 * There a pending dma already. Make sure it is from this context.
626 assert(sbuf
->dma
.svga
== svga
);
630 assert(!sbuf
->map
.num_ranges
|| sbuf
->dma
.pending
);
638 svga_context_flush_buffers(struct svga_context
*svga
)
640 struct list_head
*curr
, *next
;
641 struct svga_buffer
*sbuf
;
643 curr
= svga
->dirty_buffers
.next
;
645 while(curr
!= &svga
->dirty_buffers
) {
646 sbuf
= LIST_ENTRY(struct svga_buffer
, curr
, head
);
648 assert(p_atomic_read(&sbuf
->b
.b
.reference
.count
) != 0);
649 assert(sbuf
->dma
.pending
);
651 svga_buffer_upload_flush(svga
, sbuf
);
660 svga_redefine_user_buffer(struct pipe_context
*pipe
,
661 struct pipe_resource
*resource
,
665 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
666 struct svga_context
*svga
= svga_context(pipe
);
667 struct svga_buffer
*sbuf
= svga_buffer(resource
);
670 assert(!sbuf
->dma
.pending
);
671 assert(!sbuf
->handle
);
672 assert(!sbuf
->hwbuf
);
675 * Release any uploaded user buffer.
677 * TODO: As an optimization, we could try to update the uploaded buffer
681 pipe_resource_reference(&sbuf
->uploaded
.buffer
, NULL
);
683 pipe_mutex_lock(ss
->swc_mutex
);
685 sbuf
->key
.size
.width
= sbuf
->b
.b
.width0
= offset
+ size
;
687 pipe_mutex_unlock(ss
->swc_mutex
);
689 svga
->curr
.any_user_vertex_buffers
= TRUE
;
690 svga
->dirty
|= SVGA_NEW_VBUFFER
| SVGA_NEW_VELEMENT
;