1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
27 #include "os/os_thread.h"
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "util/u_math.h"
32 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_debug.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_screen.h"
40 #include "svga_winsys.h"
43 * Describes a complete SVGA_3D_CMD_UPDATE_GB_IMAGE command
46 struct svga_3d_update_gb_image
{
47 SVGA3dCmdHeader header
;
48 SVGA3dCmdUpdateGBImage body
;
51 struct svga_3d_invalidate_gb_image
{
52 SVGA3dCmdHeader header
;
53 SVGA3dCmdInvalidateGBImage body
;
58 * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
60 * It will flush and retry in case the first attempt to create a DMA buffer
61 * fails, so it should not be called from any function involved in flushing
64 struct svga_winsys_buffer
*
65 svga_winsys_buffer_create( struct svga_context
*svga
,
70 struct svga_screen
*svgascreen
= svga_screen(svga
->pipe
.screen
);
71 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
72 struct svga_winsys_buffer
*buf
;
75 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
77 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "flushing context to find %d bytes GMR\n",
80 /* Try flushing all pending DMAs */
81 svga_context_flush(svga
, NULL
);
82 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
90 * Destroy HW storage if separate from the host surface.
91 * In the GB case, the HW storage is associated with the host surface
92 * and is therefore a No-op.
95 svga_buffer_destroy_hw_storage(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
97 struct svga_winsys_screen
*sws
= ss
->sws
;
99 assert(sbuf
->map
.count
== 0);
102 sws
->buffer_destroy(sws
, sbuf
->hwbuf
);
110 * Allocate DMA'ble or Updatable storage for the buffer.
112 * Called before mapping a buffer.
115 svga_buffer_create_hw_storage(struct svga_screen
*ss
,
116 struct svga_buffer
*sbuf
,
121 if (ss
->sws
->have_gb_objects
) {
122 assert(sbuf
->handle
|| !sbuf
->dma
.pending
);
123 return svga_buffer_create_host_surface(ss
, sbuf
, bind_flags
);
126 struct svga_winsys_screen
*sws
= ss
->sws
;
127 unsigned alignment
= 16;
129 unsigned size
= sbuf
->b
.b
.width0
;
131 sbuf
->hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
133 return PIPE_ERROR_OUT_OF_MEMORY
;
135 assert(!sbuf
->dma
.pending
);
143 * Allocate graphics memory for vertex/index/constant/etc buffer (not
147 svga_buffer_create_host_surface(struct svga_screen
*ss
,
148 struct svga_buffer
*sbuf
,
158 sbuf
->key
.format
= SVGA3D_BUFFER
;
159 if (bind_flags
& PIPE_BIND_VERTEX_BUFFER
) {
160 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_VERTEXBUFFER
;
161 sbuf
->key
.flags
|= SVGA3D_SURFACE_BIND_VERTEX_BUFFER
;
163 if (bind_flags
& PIPE_BIND_INDEX_BUFFER
) {
164 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_INDEXBUFFER
;
165 sbuf
->key
.flags
|= SVGA3D_SURFACE_BIND_INDEX_BUFFER
;
167 if (bind_flags
& PIPE_BIND_CONSTANT_BUFFER
)
168 sbuf
->key
.flags
|= SVGA3D_SURFACE_BIND_CONSTANT_BUFFER
;
170 if (bind_flags
& PIPE_BIND_STREAM_OUTPUT
)
171 sbuf
->key
.flags
|= SVGA3D_SURFACE_BIND_STREAM_OUTPUT
;
173 if (bind_flags
& PIPE_BIND_SAMPLER_VIEW
)
174 sbuf
->key
.flags
|= SVGA3D_SURFACE_BIND_SHADER_RESOURCE
;
176 if (!bind_flags
&& sbuf
->b
.b
.usage
== PIPE_USAGE_STAGING
) {
177 /* This surface is to be used with the
178 * SVGA3D_CMD_DX_TRANSFER_FROM_BUFFER command, and no other
179 * bind flags are allowed to be set for this surface.
181 sbuf
->key
.flags
= SVGA3D_SURFACE_TRANSFER_FROM_BUFFER
;
184 sbuf
->key
.size
.width
= sbuf
->b
.b
.width0
;
185 sbuf
->key
.size
.height
= 1;
186 sbuf
->key
.size
.depth
= 1;
188 sbuf
->key
.numFaces
= 1;
189 sbuf
->key
.numMipLevels
= 1;
190 sbuf
->key
.cachable
= 1;
191 sbuf
->key
.arraySize
= 1;
193 SVGA_DBG(DEBUG_DMA
, "surface_create for buffer sz %d\n",
196 sbuf
->handle
= svga_screen_surface_create(ss
, bind_flags
,
198 &validated
, &sbuf
->key
);
200 return PIPE_ERROR_OUT_OF_MEMORY
;
202 /* Always set the discard flag on the first time the buffer is written
203 * as svga_screen_surface_create might have passed a recycled host
206 sbuf
->dma
.flags
.discard
= TRUE
;
208 SVGA_DBG(DEBUG_DMA
, " --> got sid %p sz %d (buffer)\n",
209 sbuf
->handle
, sbuf
->b
.b
.width0
);
217 svga_buffer_destroy_host_surface(struct svga_screen
*ss
,
218 struct svga_buffer
*sbuf
)
221 SVGA_DBG(DEBUG_DMA
, " ungrab sid %p sz %d\n",
222 sbuf
->handle
, sbuf
->b
.b
.width0
);
223 svga_screen_surface_destroy(ss
, &sbuf
->key
, &sbuf
->handle
);
229 * Insert a number of preliminary UPDATE_GB_IMAGE commands in the
230 * command buffer, equal to the current number of mapped ranges.
231 * The UPDATE_GB_IMAGE commands will be patched with the
232 * actual ranges just before flush.
234 static enum pipe_error
235 svga_buffer_upload_gb_command(struct svga_context
*svga
,
236 struct svga_buffer
*sbuf
)
238 struct svga_winsys_context
*swc
= svga
->swc
;
239 SVGA3dCmdUpdateGBImage
*update_cmd
;
240 struct svga_3d_update_gb_image
*whole_update_cmd
= NULL
;
241 const uint32 numBoxes
= sbuf
->map
.num_ranges
;
242 struct pipe_resource
*dummy
;
245 assert(svga_have_gb_objects(svga
));
247 assert(sbuf
->dma
.updates
== NULL
);
249 if (sbuf
->dma
.flags
.discard
) {
250 struct svga_3d_invalidate_gb_image
*cicmd
= NULL
;
251 SVGA3dCmdInvalidateGBImage
*invalidate_cmd
;
252 const unsigned total_commands_size
=
253 sizeof(*invalidate_cmd
) + numBoxes
* sizeof(*whole_update_cmd
);
255 /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by
256 * 'numBoxes' UPDATE_GB_IMAGE commands. Allocate all at once rather
257 * than with separate commands because we need to properly deal with
258 * filling the command buffer.
260 invalidate_cmd
= SVGA3D_FIFOReserve(swc
,
261 SVGA_3D_CMD_INVALIDATE_GB_IMAGE
,
262 total_commands_size
, 1 + numBoxes
);
264 return PIPE_ERROR_OUT_OF_MEMORY
;
266 cicmd
= container_of(invalidate_cmd
, cicmd
, body
);
267 cicmd
->header
.size
= sizeof(*invalidate_cmd
);
268 swc
->surface_relocation(swc
, &invalidate_cmd
->image
.sid
, NULL
, sbuf
->handle
,
270 SVGA_RELOC_INTERNAL
|
272 invalidate_cmd
->image
.face
= 0;
273 invalidate_cmd
->image
.mipmap
= 0;
275 /* The whole_update_command is a SVGA3dCmdHeader plus the
276 * SVGA3dCmdUpdateGBImage command.
278 whole_update_cmd
= (struct svga_3d_update_gb_image
*) &invalidate_cmd
[1];
279 /* initialize the first UPDATE_GB_IMAGE command */
280 whole_update_cmd
->header
.id
= SVGA_3D_CMD_UPDATE_GB_IMAGE
;
281 update_cmd
= &whole_update_cmd
->body
;
284 /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */
285 const unsigned total_commands_size
=
286 sizeof(*update_cmd
) + (numBoxes
- 1) * sizeof(*whole_update_cmd
);
288 update_cmd
= SVGA3D_FIFOReserve(swc
,
289 SVGA_3D_CMD_UPDATE_GB_IMAGE
,
290 total_commands_size
, numBoxes
);
292 return PIPE_ERROR_OUT_OF_MEMORY
;
294 /* The whole_update_command is a SVGA3dCmdHeader plus the
295 * SVGA3dCmdUpdateGBImage command.
297 whole_update_cmd
= container_of(update_cmd
, whole_update_cmd
, body
);
300 /* Init the first UPDATE_GB_IMAGE command */
301 whole_update_cmd
->header
.size
= sizeof(*update_cmd
);
302 swc
->surface_relocation(swc
, &update_cmd
->image
.sid
, NULL
, sbuf
->handle
,
303 SVGA_RELOC_WRITE
| SVGA_RELOC_INTERNAL
);
304 update_cmd
->image
.face
= 0;
305 update_cmd
->image
.mipmap
= 0;
307 /* Save pointer to the first UPDATE_GB_IMAGE command so that we can
308 * fill in the box info below.
310 sbuf
->dma
.updates
= whole_update_cmd
;
313 * Copy the face, mipmap, etc. info to all subsequent commands.
314 * Also do the surface relocation for each subsequent command.
316 for (i
= 1; i
< numBoxes
; ++i
) {
318 memcpy(whole_update_cmd
, sbuf
->dma
.updates
, sizeof(*whole_update_cmd
));
320 swc
->surface_relocation(swc
, &whole_update_cmd
->body
.image
.sid
, NULL
,
322 SVGA_RELOC_WRITE
| SVGA_RELOC_INTERNAL
);
325 /* Increment reference count */
326 sbuf
->dma
.svga
= svga
;
328 pipe_resource_reference(&dummy
, &sbuf
->b
.b
);
329 SVGA_FIFOCommitAll(swc
);
331 swc
->hints
|= SVGA_HINT_FLAG_CAN_PRE_FLUSH
;
332 sbuf
->dma
.flags
.discard
= FALSE
;
334 svga
->hud
.num_resource_updates
++;
341 * Issue DMA commands to transfer guest memory to the host.
342 * Note that the memory segments (offset, size) will be patched in
343 * later in the svga_buffer_upload_flush() function.
345 static enum pipe_error
346 svga_buffer_upload_hb_command(struct svga_context
*svga
,
347 struct svga_buffer
*sbuf
)
349 struct svga_winsys_context
*swc
= svga
->swc
;
350 struct svga_winsys_buffer
*guest
= sbuf
->hwbuf
;
351 struct svga_winsys_surface
*host
= sbuf
->handle
;
352 const SVGA3dTransferType transfer
= SVGA3D_WRITE_HOST_VRAM
;
353 SVGA3dCmdSurfaceDMA
*cmd
;
354 const uint32 numBoxes
= sbuf
->map
.num_ranges
;
355 SVGA3dCopyBox
*boxes
;
356 SVGA3dCmdSurfaceDMASuffix
*pSuffix
;
357 unsigned region_flags
;
358 unsigned surface_flags
;
359 struct pipe_resource
*dummy
;
361 assert(!svga_have_gb_objects(svga
));
363 if (transfer
== SVGA3D_WRITE_HOST_VRAM
) {
364 region_flags
= SVGA_RELOC_READ
;
365 surface_flags
= SVGA_RELOC_WRITE
;
367 else if (transfer
== SVGA3D_READ_HOST_VRAM
) {
368 region_flags
= SVGA_RELOC_WRITE
;
369 surface_flags
= SVGA_RELOC_READ
;
373 return PIPE_ERROR_BAD_INPUT
;
378 cmd
= SVGA3D_FIFOReserve(swc
,
379 SVGA_3D_CMD_SURFACE_DMA
,
380 sizeof *cmd
+ numBoxes
* sizeof *boxes
+ sizeof *pSuffix
,
383 return PIPE_ERROR_OUT_OF_MEMORY
;
385 swc
->region_relocation(swc
, &cmd
->guest
.ptr
, guest
, 0, region_flags
);
386 cmd
->guest
.pitch
= 0;
388 swc
->surface_relocation(swc
, &cmd
->host
.sid
, NULL
, host
, surface_flags
);
390 cmd
->host
.mipmap
= 0;
392 cmd
->transfer
= transfer
;
394 sbuf
->dma
.boxes
= (SVGA3dCopyBox
*)&cmd
[1];
395 sbuf
->dma
.svga
= svga
;
397 /* Increment reference count */
399 pipe_resource_reference(&dummy
, &sbuf
->b
.b
);
401 pSuffix
= (SVGA3dCmdSurfaceDMASuffix
*)((uint8_t*)cmd
+ sizeof *cmd
+ numBoxes
* sizeof *boxes
);
402 pSuffix
->suffixSize
= sizeof *pSuffix
;
403 pSuffix
->maximumOffset
= sbuf
->b
.b
.width0
;
404 pSuffix
->flags
= sbuf
->dma
.flags
;
406 SVGA_FIFOCommitAll(swc
);
408 swc
->hints
|= SVGA_HINT_FLAG_CAN_PRE_FLUSH
;
409 sbuf
->dma
.flags
.discard
= FALSE
;
411 svga
->hud
.num_buffer_uploads
++;
418 * Issue commands to transfer guest memory to the host.
420 static enum pipe_error
421 svga_buffer_upload_command(struct svga_context
*svga
, struct svga_buffer
*sbuf
)
423 if (svga_have_gb_objects(svga
)) {
424 return svga_buffer_upload_gb_command(svga
, sbuf
);
426 return svga_buffer_upload_hb_command(svga
, sbuf
);
432 * Patch up the upload DMA command reserved by svga_buffer_upload_command
433 * with the final ranges.
436 svga_buffer_upload_flush(struct svga_context
*svga
,
437 struct svga_buffer
*sbuf
)
440 struct pipe_resource
*dummy
;
442 if (!sbuf
->dma
.pending
) {
443 //debug_printf("no dma pending on buffer\n");
447 assert(sbuf
->handle
);
448 assert(sbuf
->map
.num_ranges
);
449 assert(sbuf
->dma
.svga
== svga
);
452 * Patch the DMA/update command with the final copy box.
454 if (svga_have_gb_objects(svga
)) {
455 struct svga_3d_update_gb_image
*update
= sbuf
->dma
.updates
;
458 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
, ++update
) {
459 SVGA3dBox
*box
= &update
->body
.box
;
461 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
462 sbuf
->map
.ranges
[i
].start
, sbuf
->map
.ranges
[i
].end
);
464 box
->x
= sbuf
->map
.ranges
[i
].start
;
467 box
->w
= sbuf
->map
.ranges
[i
].end
- sbuf
->map
.ranges
[i
].start
;
471 assert(box
->x
<= sbuf
->b
.b
.width0
);
472 assert(box
->x
+ box
->w
<= sbuf
->b
.b
.width0
);
474 svga
->hud
.num_bytes_uploaded
+= box
->w
;
475 svga
->hud
.num_buffer_uploads
++;
480 assert(sbuf
->dma
.boxes
);
481 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
483 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
484 SVGA3dCopyBox
*box
= sbuf
->dma
.boxes
+ i
;
486 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
487 sbuf
->map
.ranges
[i
].start
, sbuf
->map
.ranges
[i
].end
);
489 box
->x
= sbuf
->map
.ranges
[i
].start
;
492 box
->w
= sbuf
->map
.ranges
[i
].end
- sbuf
->map
.ranges
[i
].start
;
495 box
->srcx
= sbuf
->map
.ranges
[i
].start
;
499 assert(box
->x
<= sbuf
->b
.b
.width0
);
500 assert(box
->x
+ box
->w
<= sbuf
->b
.b
.width0
);
502 svga
->hud
.num_bytes_uploaded
+= box
->w
;
503 svga
->hud
.num_buffer_uploads
++;
507 /* Reset sbuf for next use/upload */
509 sbuf
->map
.num_ranges
= 0;
511 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
512 LIST_DEL(&sbuf
->head
); /* remove from svga->dirty_buffers list */
514 sbuf
->head
.next
= sbuf
->head
.prev
= NULL
;
516 sbuf
->dma
.pending
= FALSE
;
517 sbuf
->dma
.flags
.discard
= FALSE
;
518 sbuf
->dma
.flags
.unsynchronized
= FALSE
;
520 sbuf
->dma
.svga
= NULL
;
521 sbuf
->dma
.boxes
= NULL
;
522 sbuf
->dma
.updates
= NULL
;
524 /* Decrement reference count (and potentially destroy) */
526 pipe_resource_reference(&dummy
, NULL
);
531 * Note a dirty range.
533 * This function only notes the range down. It doesn't actually emit a DMA
534 * upload command. That only happens when a context tries to refer to this
535 * buffer, and the DMA upload command is added to that context's command
538 * We try to lump as many contiguous DMA transfers together as possible.
541 svga_buffer_add_range(struct svga_buffer
*sbuf
, unsigned start
, unsigned end
)
544 unsigned nearest_range
;
545 unsigned nearest_dist
;
549 if (sbuf
->map
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
550 nearest_range
= sbuf
->map
.num_ranges
;
553 nearest_range
= SVGA_BUFFER_MAX_RANGES
- 1;
558 * Try to grow one of the ranges.
560 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
561 const int left_dist
= start
- sbuf
->map
.ranges
[i
].end
;
562 const int right_dist
= sbuf
->map
.ranges
[i
].start
- end
;
563 const int dist
= MAX2(left_dist
, right_dist
);
567 * Ranges are contiguous or overlapping -- extend this one and return.
569 * Note that it is not this function's task to prevent overlapping
570 * ranges, as the GMR was already given so it is too late to do
571 * anything. If the ranges overlap here it must surely be because
572 * PIPE_TRANSFER_UNSYNCHRONIZED was set.
574 sbuf
->map
.ranges
[i
].start
= MIN2(sbuf
->map
.ranges
[i
].start
, start
);
575 sbuf
->map
.ranges
[i
].end
= MAX2(sbuf
->map
.ranges
[i
].end
, end
);
580 * Discontiguous ranges -- keep track of the nearest range.
582 if (dist
< nearest_dist
) {
590 * We cannot add a new range to an existing DMA command, so patch-up the
591 * pending DMA upload and start clean.
594 svga_buffer_upload_flush(sbuf
->dma
.svga
, sbuf
);
596 assert(!sbuf
->dma
.pending
);
597 assert(!sbuf
->dma
.svga
);
598 assert(!sbuf
->dma
.boxes
);
600 if (sbuf
->map
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
605 sbuf
->map
.ranges
[sbuf
->map
.num_ranges
].start
= start
;
606 sbuf
->map
.ranges
[sbuf
->map
.num_ranges
].end
= end
;
607 ++sbuf
->map
.num_ranges
;
610 * Everything else failed, so just extend the nearest range.
612 * It is OK to do this because we always keep a local copy of the
613 * host buffer data, for SW TNL, and the host never modifies the buffer.
616 assert(nearest_range
< SVGA_BUFFER_MAX_RANGES
);
617 assert(nearest_range
< sbuf
->map
.num_ranges
);
618 sbuf
->map
.ranges
[nearest_range
].start
=
619 MIN2(sbuf
->map
.ranges
[nearest_range
].start
, start
);
620 sbuf
->map
.ranges
[nearest_range
].end
=
621 MAX2(sbuf
->map
.ranges
[nearest_range
].end
, end
);
628 * Copy the contents of the malloc buffer to a hardware buffer.
630 static enum pipe_error
631 svga_buffer_update_hw(struct svga_context
*svga
, struct svga_buffer
*sbuf
,
635 if (!svga_buffer_has_hw_storage(sbuf
)) {
636 struct svga_screen
*ss
= svga_screen(sbuf
->b
.b
.screen
);
646 ret
= svga_buffer_create_hw_storage(svga_screen(sbuf
->b
.b
.screen
), sbuf
,
651 mtx_lock(&ss
->swc_mutex
);
652 map
= svga_buffer_hw_storage_map(svga
, sbuf
, PIPE_TRANSFER_WRITE
, &retry
);
656 mtx_unlock(&ss
->swc_mutex
);
657 svga_buffer_destroy_hw_storage(ss
, sbuf
);
661 /* Copy data from malloc'd swbuf to the new hardware buffer */
662 for (i
= 0; i
< sbuf
->map
.num_ranges
; i
++) {
663 unsigned start
= sbuf
->map
.ranges
[i
].start
;
664 unsigned len
= sbuf
->map
.ranges
[i
].end
- start
;
665 memcpy((uint8_t *) map
+ start
, (uint8_t *) sbuf
->swbuf
+ start
, len
);
668 svga_buffer_hw_storage_unmap(svga
, sbuf
);
670 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
671 assert(sbuf
->map
.count
== 0);
672 if (sbuf
->map
.count
== 0) {
676 align_free(sbuf
->swbuf
);
680 mtx_unlock(&ss
->swc_mutex
);
688 * Upload the buffer to the host in a piecewise fashion.
690 * Used when the buffer is too big to fit in the GMR aperture.
691 * This function should never get called in the guest-backed case
692 * since we always have a full-sized hardware storage backing the
695 static enum pipe_error
696 svga_buffer_upload_piecewise(struct svga_screen
*ss
,
697 struct svga_context
*svga
,
698 struct svga_buffer
*sbuf
)
700 struct svga_winsys_screen
*sws
= ss
->sws
;
701 const unsigned alignment
= sizeof(void *);
702 const unsigned usage
= 0;
705 assert(sbuf
->map
.num_ranges
);
706 assert(!sbuf
->dma
.pending
);
707 assert(!svga_have_gb_objects(svga
));
709 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
711 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
712 const struct svga_buffer_range
*range
= &sbuf
->map
.ranges
[i
];
713 unsigned offset
= range
->start
;
714 unsigned size
= range
->end
- range
->start
;
716 while (offset
< range
->end
) {
717 struct svga_winsys_buffer
*hwbuf
;
721 if (offset
+ size
> range
->end
)
722 size
= range
->end
- offset
;
724 hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
728 return PIPE_ERROR_OUT_OF_MEMORY
;
729 hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
732 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
733 offset
, offset
+ size
);
735 map
= sws
->buffer_map(sws
, hwbuf
,
736 PIPE_TRANSFER_WRITE
|
737 PIPE_TRANSFER_DISCARD_RANGE
);
740 memcpy(map
, (const char *) sbuf
->swbuf
+ offset
, size
);
741 sws
->buffer_unmap(sws
, hwbuf
);
744 ret
= SVGA3D_BufferDMA(svga
->swc
,
746 SVGA3D_WRITE_HOST_VRAM
,
747 size
, 0, offset
, sbuf
->dma
.flags
);
748 if (ret
!= PIPE_OK
) {
749 svga_context_flush(svga
, NULL
);
750 ret
= SVGA3D_BufferDMA(svga
->swc
,
752 SVGA3D_WRITE_HOST_VRAM
,
753 size
, 0, offset
, sbuf
->dma
.flags
);
754 assert(ret
== PIPE_OK
);
757 sbuf
->dma
.flags
.discard
= FALSE
;
759 sws
->buffer_destroy(sws
, hwbuf
);
765 sbuf
->map
.num_ranges
= 0;
772 * Get (or create/upload) the winsys surface handle so that we can
773 * refer to this buffer in fifo commands.
774 * This function will create the host surface, and in the GB case also the
775 * hardware storage. In the non-GB case, the hardware storage will be created
776 * if there are mapped ranges and the data is currently in a malloc'ed buffer.
778 struct svga_winsys_surface
*
779 svga_buffer_handle(struct svga_context
*svga
, struct pipe_resource
*buf
)
781 struct pipe_screen
*screen
= svga
->pipe
.screen
;
782 struct svga_screen
*ss
= svga_screen(screen
);
783 struct svga_buffer
*sbuf
;
789 sbuf
= svga_buffer(buf
);
794 /* This call will set sbuf->handle */
795 if (svga_have_gb_objects(svga
)) {
796 ret
= svga_buffer_update_hw(svga
, sbuf
, sbuf
->bind_flags
);
798 ret
= svga_buffer_create_host_surface(ss
, sbuf
, sbuf
->bind_flags
);
804 assert(sbuf
->handle
);
806 if (sbuf
->map
.num_ranges
) {
807 if (!sbuf
->dma
.pending
) {
808 /* No pending DMA/update commands yet. */
810 /* Migrate the data from swbuf -> hwbuf if necessary */
811 ret
= svga_buffer_update_hw(svga
, sbuf
, sbuf
->bind_flags
);
812 if (ret
== PIPE_OK
) {
813 /* Emit DMA or UpdateGBImage commands */
814 ret
= svga_buffer_upload_command(svga
, sbuf
);
815 if (ret
== PIPE_ERROR_OUT_OF_MEMORY
) {
816 svga_context_flush(svga
, NULL
);
817 ret
= svga_buffer_upload_command(svga
, sbuf
);
818 assert(ret
== PIPE_OK
);
820 if (ret
== PIPE_OK
) {
821 sbuf
->dma
.pending
= TRUE
;
822 assert(!sbuf
->head
.prev
&& !sbuf
->head
.next
);
823 LIST_ADDTAIL(&sbuf
->head
, &svga
->dirty_buffers
);
826 else if (ret
== PIPE_ERROR_OUT_OF_MEMORY
) {
828 * The buffer is too big to fit in the GMR aperture, so break it in
831 ret
= svga_buffer_upload_piecewise(ss
, svga
, sbuf
);
834 if (ret
!= PIPE_OK
) {
836 * Something unexpected happened above. There is very little that
837 * we can do other than proceeding while ignoring the dirty ranges.
840 sbuf
->map
.num_ranges
= 0;
845 * There a pending dma already. Make sure it is from this context.
847 assert(sbuf
->dma
.svga
== svga
);
851 assert(sbuf
->map
.num_ranges
== 0 || sbuf
->dma
.pending
);
859 svga_context_flush_buffers(struct svga_context
*svga
)
861 struct list_head
*curr
, *next
;
863 SVGA_STATS_TIME_PUSH(svga_sws(svga
), SVGA_STATS_TIME_BUFFERSFLUSH
);
865 curr
= svga
->dirty_buffers
.next
;
867 while (curr
!= &svga
->dirty_buffers
) {
868 struct svga_buffer
*sbuf
= LIST_ENTRY(struct svga_buffer
, curr
, head
);
870 assert(p_atomic_read(&sbuf
->b
.b
.reference
.count
) != 0);
871 assert(sbuf
->dma
.pending
);
873 svga_buffer_upload_flush(svga
, sbuf
);
879 SVGA_STATS_TIME_POP(svga_sws(svga
));