1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
27 #include "os/os_thread.h"
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "util/u_math.h"
32 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_debug.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_screen.h"
40 #include "svga_winsys.h"
43 * Describes a complete SVGA_3D_CMD_UPDATE_GB_IMAGE command
46 struct svga_3d_update_gb_image
{
47 SVGA3dCmdHeader header
;
48 SVGA3dCmdUpdateGBImage body
;
51 struct svga_3d_invalidate_gb_image
{
52 SVGA3dCmdHeader header
;
53 SVGA3dCmdInvalidateGBImage body
;
58 * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
60 * It will flush and retry in case the first attempt to create a DMA buffer
61 * fails, so it should not be called from any function involved in flushing
64 struct svga_winsys_buffer
*
65 svga_winsys_buffer_create( struct svga_context
*svga
,
70 struct svga_screen
*svgascreen
= svga_screen(svga
->pipe
.screen
);
71 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
72 struct svga_winsys_buffer
*buf
;
75 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
77 SVGA_DBG(DEBUG_DMA
|DEBUG_PERF
, "flushing context to find %d bytes GMR\n",
80 /* Try flushing all pending DMAs */
81 svga_context_flush(svga
, NULL
);
82 buf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
90 * Destroy HW storage if separate from the host surface.
91 * In the GB case, the HW storage is associated with the host surface
92 * and is therefore a No-op.
95 svga_buffer_destroy_hw_storage(struct svga_screen
*ss
, struct svga_buffer
*sbuf
)
97 struct svga_winsys_screen
*sws
= ss
->sws
;
99 assert(sbuf
->map
.count
== 0);
102 sws
->buffer_destroy(sws
, sbuf
->hwbuf
);
110 * Allocate DMA'ble or Updatable storage for the buffer.
112 * Called before mapping a buffer.
115 svga_buffer_create_hw_storage(struct svga_screen
*ss
,
116 struct svga_buffer
*sbuf
)
120 if (ss
->sws
->have_gb_objects
) {
121 assert(sbuf
->handle
|| !sbuf
->dma
.pending
);
122 return svga_buffer_create_host_surface(ss
, sbuf
);
125 struct svga_winsys_screen
*sws
= ss
->sws
;
126 unsigned alignment
= 16;
128 unsigned size
= sbuf
->b
.b
.width0
;
130 sbuf
->hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
132 return PIPE_ERROR_OUT_OF_MEMORY
;
134 assert(!sbuf
->dma
.pending
);
143 svga_buffer_create_host_surface(struct svga_screen
*ss
,
144 struct svga_buffer
*sbuf
)
151 sbuf
->key
.format
= SVGA3D_BUFFER
;
152 if (sbuf
->bind_flags
& PIPE_BIND_VERTEX_BUFFER
) {
153 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_VERTEXBUFFER
;
154 sbuf
->key
.flags
|= SVGA3D_SURFACE_BIND_VERTEX_BUFFER
;
156 if (sbuf
->bind_flags
& PIPE_BIND_INDEX_BUFFER
) {
157 sbuf
->key
.flags
|= SVGA3D_SURFACE_HINT_INDEXBUFFER
;
158 sbuf
->key
.flags
|= SVGA3D_SURFACE_BIND_INDEX_BUFFER
;
160 if (sbuf
->bind_flags
& PIPE_BIND_CONSTANT_BUFFER
)
161 sbuf
->key
.flags
|= SVGA3D_SURFACE_BIND_CONSTANT_BUFFER
;
163 if (sbuf
->bind_flags
& PIPE_BIND_STREAM_OUTPUT
)
164 sbuf
->key
.flags
|= SVGA3D_SURFACE_BIND_STREAM_OUTPUT
;
166 if (sbuf
->bind_flags
& PIPE_BIND_SAMPLER_VIEW
)
167 sbuf
->key
.flags
|= SVGA3D_SURFACE_BIND_SHADER_RESOURCE
;
169 if (!sbuf
->bind_flags
&& sbuf
->b
.b
.usage
== PIPE_USAGE_STAGING
) {
170 /* This surface is to be used with the
171 * SVGA3D_CMD_DX_TRANSFER_FROM_BUFFER command, and no other
172 * bind flags are allowed to be set for this surface.
174 sbuf
->key
.flags
= SVGA3D_SURFACE_TRANSFER_FROM_BUFFER
;
177 sbuf
->key
.size
.width
= sbuf
->b
.b
.width0
;
178 sbuf
->key
.size
.height
= 1;
179 sbuf
->key
.size
.depth
= 1;
181 sbuf
->key
.numFaces
= 1;
182 sbuf
->key
.numMipLevels
= 1;
183 sbuf
->key
.cachable
= 1;
184 sbuf
->key
.arraySize
= 1;
186 SVGA_DBG(DEBUG_DMA
, "surface_create for buffer sz %d\n",
189 sbuf
->handle
= svga_screen_surface_create(ss
, sbuf
->b
.b
.bind
,
190 sbuf
->b
.b
.usage
, &sbuf
->key
);
192 return PIPE_ERROR_OUT_OF_MEMORY
;
194 /* Always set the discard flag on the first time the buffer is written
195 * as svga_screen_surface_create might have passed a recycled host
198 sbuf
->dma
.flags
.discard
= TRUE
;
200 SVGA_DBG(DEBUG_DMA
, " --> got sid %p sz %d (buffer)\n",
201 sbuf
->handle
, sbuf
->b
.b
.width0
);
209 svga_buffer_destroy_host_surface(struct svga_screen
*ss
,
210 struct svga_buffer
*sbuf
)
213 SVGA_DBG(DEBUG_DMA
, " ungrab sid %p sz %d\n",
214 sbuf
->handle
, sbuf
->b
.b
.width0
);
215 svga_screen_surface_destroy(ss
, &sbuf
->key
, &sbuf
->handle
);
221 * Insert a number of preliminary UPDATE_GB_IMAGE commands in the
222 * command buffer, equal to the current number of mapped ranges.
223 * The UPDATE_GB_IMAGE commands will be patched with the
224 * actual ranges just before flush.
226 static enum pipe_error
227 svga_buffer_upload_gb_command(struct svga_context
*svga
,
228 struct svga_buffer
*sbuf
)
230 struct svga_winsys_context
*swc
= svga
->swc
;
231 SVGA3dCmdUpdateGBImage
*update_cmd
;
232 struct svga_3d_update_gb_image
*whole_update_cmd
= NULL
;
233 const uint32 numBoxes
= sbuf
->map
.num_ranges
;
234 struct pipe_resource
*dummy
;
237 assert(svga_have_gb_objects(svga
));
239 assert(sbuf
->dma
.updates
== NULL
);
241 if (sbuf
->dma
.flags
.discard
) {
242 struct svga_3d_invalidate_gb_image
*cicmd
= NULL
;
243 SVGA3dCmdInvalidateGBImage
*invalidate_cmd
;
244 const unsigned total_commands_size
=
245 sizeof(*invalidate_cmd
) + numBoxes
* sizeof(*whole_update_cmd
);
247 /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by
248 * 'numBoxes' UPDATE_GB_IMAGE commands. Allocate all at once rather
249 * than with separate commands because we need to properly deal with
250 * filling the command buffer.
252 invalidate_cmd
= SVGA3D_FIFOReserve(swc
,
253 SVGA_3D_CMD_INVALIDATE_GB_IMAGE
,
254 total_commands_size
, 1 + numBoxes
);
256 return PIPE_ERROR_OUT_OF_MEMORY
;
258 cicmd
= container_of(invalidate_cmd
, cicmd
, body
);
259 cicmd
->header
.size
= sizeof(*invalidate_cmd
);
260 swc
->surface_relocation(swc
, &invalidate_cmd
->image
.sid
, NULL
, sbuf
->handle
,
262 SVGA_RELOC_INTERNAL
|
264 invalidate_cmd
->image
.face
= 0;
265 invalidate_cmd
->image
.mipmap
= 0;
267 /* The whole_update_command is a SVGA3dCmdHeader plus the
268 * SVGA3dCmdUpdateGBImage command.
270 whole_update_cmd
= (struct svga_3d_update_gb_image
*) &invalidate_cmd
[1];
271 /* initialize the first UPDATE_GB_IMAGE command */
272 whole_update_cmd
->header
.id
= SVGA_3D_CMD_UPDATE_GB_IMAGE
;
273 update_cmd
= &whole_update_cmd
->body
;
276 /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */
277 const unsigned total_commands_size
=
278 sizeof(*update_cmd
) + (numBoxes
- 1) * sizeof(*whole_update_cmd
);
280 update_cmd
= SVGA3D_FIFOReserve(swc
,
281 SVGA_3D_CMD_UPDATE_GB_IMAGE
,
282 total_commands_size
, numBoxes
);
284 return PIPE_ERROR_OUT_OF_MEMORY
;
286 /* The whole_update_command is a SVGA3dCmdHeader plus the
287 * SVGA3dCmdUpdateGBImage command.
289 whole_update_cmd
= container_of(update_cmd
, whole_update_cmd
, body
);
292 /* Init the first UPDATE_GB_IMAGE command */
293 whole_update_cmd
->header
.size
= sizeof(*update_cmd
);
294 swc
->surface_relocation(swc
, &update_cmd
->image
.sid
, NULL
, sbuf
->handle
,
295 SVGA_RELOC_WRITE
| SVGA_RELOC_INTERNAL
);
296 update_cmd
->image
.face
= 0;
297 update_cmd
->image
.mipmap
= 0;
299 /* Save pointer to the first UPDATE_GB_IMAGE command so that we can
300 * fill in the box info below.
302 sbuf
->dma
.updates
= whole_update_cmd
;
305 * Copy the face, mipmap, etc. info to all subsequent commands.
306 * Also do the surface relocation for each subsequent command.
308 for (i
= 1; i
< numBoxes
; ++i
) {
310 memcpy(whole_update_cmd
, sbuf
->dma
.updates
, sizeof(*whole_update_cmd
));
312 swc
->surface_relocation(swc
, &whole_update_cmd
->body
.image
.sid
, NULL
,
314 SVGA_RELOC_WRITE
| SVGA_RELOC_INTERNAL
);
317 /* Increment reference count */
318 sbuf
->dma
.svga
= svga
;
320 pipe_resource_reference(&dummy
, &sbuf
->b
.b
);
321 SVGA_FIFOCommitAll(swc
);
323 swc
->hints
|= SVGA_HINT_FLAG_CAN_PRE_FLUSH
;
324 sbuf
->dma
.flags
.discard
= FALSE
;
326 svga
->hud
.num_resource_updates
++;
333 * Issue DMA commands to transfer guest memory to the host.
334 * Note that the memory segments (offset, size) will be patched in
335 * later in the svga_buffer_upload_flush() function.
337 static enum pipe_error
338 svga_buffer_upload_hb_command(struct svga_context
*svga
,
339 struct svga_buffer
*sbuf
)
341 struct svga_winsys_context
*swc
= svga
->swc
;
342 struct svga_winsys_buffer
*guest
= sbuf
->hwbuf
;
343 struct svga_winsys_surface
*host
= sbuf
->handle
;
344 const SVGA3dTransferType transfer
= SVGA3D_WRITE_HOST_VRAM
;
345 SVGA3dCmdSurfaceDMA
*cmd
;
346 const uint32 numBoxes
= sbuf
->map
.num_ranges
;
347 SVGA3dCopyBox
*boxes
;
348 SVGA3dCmdSurfaceDMASuffix
*pSuffix
;
349 unsigned region_flags
;
350 unsigned surface_flags
;
351 struct pipe_resource
*dummy
;
353 assert(!svga_have_gb_objects(svga
));
355 if (transfer
== SVGA3D_WRITE_HOST_VRAM
) {
356 region_flags
= SVGA_RELOC_READ
;
357 surface_flags
= SVGA_RELOC_WRITE
;
359 else if (transfer
== SVGA3D_READ_HOST_VRAM
) {
360 region_flags
= SVGA_RELOC_WRITE
;
361 surface_flags
= SVGA_RELOC_READ
;
365 return PIPE_ERROR_BAD_INPUT
;
370 cmd
= SVGA3D_FIFOReserve(swc
,
371 SVGA_3D_CMD_SURFACE_DMA
,
372 sizeof *cmd
+ numBoxes
* sizeof *boxes
+ sizeof *pSuffix
,
375 return PIPE_ERROR_OUT_OF_MEMORY
;
377 swc
->region_relocation(swc
, &cmd
->guest
.ptr
, guest
, 0, region_flags
);
378 cmd
->guest
.pitch
= 0;
380 swc
->surface_relocation(swc
, &cmd
->host
.sid
, NULL
, host
, surface_flags
);
382 cmd
->host
.mipmap
= 0;
384 cmd
->transfer
= transfer
;
386 sbuf
->dma
.boxes
= (SVGA3dCopyBox
*)&cmd
[1];
387 sbuf
->dma
.svga
= svga
;
389 /* Increment reference count */
391 pipe_resource_reference(&dummy
, &sbuf
->b
.b
);
393 pSuffix
= (SVGA3dCmdSurfaceDMASuffix
*)((uint8_t*)cmd
+ sizeof *cmd
+ numBoxes
* sizeof *boxes
);
394 pSuffix
->suffixSize
= sizeof *pSuffix
;
395 pSuffix
->maximumOffset
= sbuf
->b
.b
.width0
;
396 pSuffix
->flags
= sbuf
->dma
.flags
;
398 SVGA_FIFOCommitAll(swc
);
400 swc
->hints
|= SVGA_HINT_FLAG_CAN_PRE_FLUSH
;
401 sbuf
->dma
.flags
.discard
= FALSE
;
403 svga
->hud
.num_buffer_uploads
++;
410 * Issue commands to transfer guest memory to the host.
412 static enum pipe_error
413 svga_buffer_upload_command(struct svga_context
*svga
, struct svga_buffer
*sbuf
)
415 if (svga_have_gb_objects(svga
)) {
416 return svga_buffer_upload_gb_command(svga
, sbuf
);
418 return svga_buffer_upload_hb_command(svga
, sbuf
);
424 * Patch up the upload DMA command reserved by svga_buffer_upload_command
425 * with the final ranges.
428 svga_buffer_upload_flush(struct svga_context
*svga
,
429 struct svga_buffer
*sbuf
)
432 struct pipe_resource
*dummy
;
434 if (!sbuf
->dma
.pending
) {
435 //debug_printf("no dma pending on buffer\n");
439 assert(sbuf
->handle
);
440 assert(sbuf
->map
.num_ranges
);
441 assert(sbuf
->dma
.svga
== svga
);
444 * Patch the DMA/update command with the final copy box.
446 if (svga_have_gb_objects(svga
)) {
447 struct svga_3d_update_gb_image
*update
= sbuf
->dma
.updates
;
450 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
, ++update
) {
451 SVGA3dBox
*box
= &update
->body
.box
;
453 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
454 sbuf
->map
.ranges
[i
].start
, sbuf
->map
.ranges
[i
].end
);
456 box
->x
= sbuf
->map
.ranges
[i
].start
;
459 box
->w
= sbuf
->map
.ranges
[i
].end
- sbuf
->map
.ranges
[i
].start
;
463 assert(box
->x
<= sbuf
->b
.b
.width0
);
464 assert(box
->x
+ box
->w
<= sbuf
->b
.b
.width0
);
466 svga
->hud
.num_bytes_uploaded
+= box
->w
;
467 svga
->hud
.num_buffer_uploads
++;
472 assert(sbuf
->dma
.boxes
);
473 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
475 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
476 SVGA3dCopyBox
*box
= sbuf
->dma
.boxes
+ i
;
478 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
479 sbuf
->map
.ranges
[i
].start
, sbuf
->map
.ranges
[i
].end
);
481 box
->x
= sbuf
->map
.ranges
[i
].start
;
484 box
->w
= sbuf
->map
.ranges
[i
].end
- sbuf
->map
.ranges
[i
].start
;
487 box
->srcx
= sbuf
->map
.ranges
[i
].start
;
491 assert(box
->x
<= sbuf
->b
.b
.width0
);
492 assert(box
->x
+ box
->w
<= sbuf
->b
.b
.width0
);
494 svga
->hud
.num_bytes_uploaded
+= box
->w
;
495 svga
->hud
.num_buffer_uploads
++;
499 /* Reset sbuf for next use/upload */
501 sbuf
->map
.num_ranges
= 0;
503 assert(sbuf
->head
.prev
&& sbuf
->head
.next
);
504 LIST_DEL(&sbuf
->head
); /* remove from svga->dirty_buffers list */
506 sbuf
->head
.next
= sbuf
->head
.prev
= NULL
;
508 sbuf
->dma
.pending
= FALSE
;
509 sbuf
->dma
.flags
.discard
= FALSE
;
510 sbuf
->dma
.flags
.unsynchronized
= FALSE
;
512 sbuf
->dma
.svga
= NULL
;
513 sbuf
->dma
.boxes
= NULL
;
514 sbuf
->dma
.updates
= NULL
;
516 /* Decrement reference count (and potentially destroy) */
518 pipe_resource_reference(&dummy
, NULL
);
523 * Note a dirty range.
525 * This function only notes the range down. It doesn't actually emit a DMA
526 * upload command. That only happens when a context tries to refer to this
527 * buffer, and the DMA upload command is added to that context's command
530 * We try to lump as many contiguous DMA transfers together as possible.
533 svga_buffer_add_range(struct svga_buffer
*sbuf
, unsigned start
, unsigned end
)
536 unsigned nearest_range
;
537 unsigned nearest_dist
;
541 if (sbuf
->map
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
542 nearest_range
= sbuf
->map
.num_ranges
;
545 nearest_range
= SVGA_BUFFER_MAX_RANGES
- 1;
550 * Try to grow one of the ranges.
552 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
553 const int left_dist
= start
- sbuf
->map
.ranges
[i
].end
;
554 const int right_dist
= sbuf
->map
.ranges
[i
].start
- end
;
555 const int dist
= MAX2(left_dist
, right_dist
);
559 * Ranges are contiguous or overlapping -- extend this one and return.
561 * Note that it is not this function's task to prevent overlapping
562 * ranges, as the GMR was already given so it is too late to do
563 * anything. If the ranges overlap here it must surely be because
564 * PIPE_TRANSFER_UNSYNCHRONIZED was set.
566 sbuf
->map
.ranges
[i
].start
= MIN2(sbuf
->map
.ranges
[i
].start
, start
);
567 sbuf
->map
.ranges
[i
].end
= MAX2(sbuf
->map
.ranges
[i
].end
, end
);
572 * Discontiguous ranges -- keep track of the nearest range.
574 if (dist
< nearest_dist
) {
582 * We cannot add a new range to an existing DMA command, so patch-up the
583 * pending DMA upload and start clean.
586 svga_buffer_upload_flush(sbuf
->dma
.svga
, sbuf
);
588 assert(!sbuf
->dma
.pending
);
589 assert(!sbuf
->dma
.svga
);
590 assert(!sbuf
->dma
.boxes
);
592 if (sbuf
->map
.num_ranges
< SVGA_BUFFER_MAX_RANGES
) {
597 sbuf
->map
.ranges
[sbuf
->map
.num_ranges
].start
= start
;
598 sbuf
->map
.ranges
[sbuf
->map
.num_ranges
].end
= end
;
599 ++sbuf
->map
.num_ranges
;
602 * Everything else failed, so just extend the nearest range.
604 * It is OK to do this because we always keep a local copy of the
605 * host buffer data, for SW TNL, and the host never modifies the buffer.
608 assert(nearest_range
< SVGA_BUFFER_MAX_RANGES
);
609 assert(nearest_range
< sbuf
->map
.num_ranges
);
610 sbuf
->map
.ranges
[nearest_range
].start
=
611 MIN2(sbuf
->map
.ranges
[nearest_range
].start
, start
);
612 sbuf
->map
.ranges
[nearest_range
].end
=
613 MAX2(sbuf
->map
.ranges
[nearest_range
].end
, end
);
620 * Copy the contents of the malloc buffer to a hardware buffer.
622 static enum pipe_error
623 svga_buffer_update_hw(struct svga_context
*svga
, struct svga_buffer
*sbuf
)
626 if (!svga_buffer_has_hw_storage(sbuf
)) {
627 struct svga_screen
*ss
= svga_screen(sbuf
->b
.b
.screen
);
637 ret
= svga_buffer_create_hw_storage(svga_screen(sbuf
->b
.b
.screen
), sbuf
);
641 pipe_mutex_lock(ss
->swc_mutex
);
642 map
= svga_buffer_hw_storage_map(svga
, sbuf
, PIPE_TRANSFER_WRITE
, &retry
);
646 pipe_mutex_unlock(ss
->swc_mutex
);
647 svga_buffer_destroy_hw_storage(ss
, sbuf
);
651 /* Copy data from malloc'd swbuf to the new hardware buffer */
652 for (i
= 0; i
< sbuf
->map
.num_ranges
; i
++) {
653 unsigned start
= sbuf
->map
.ranges
[i
].start
;
654 unsigned len
= sbuf
->map
.ranges
[i
].end
- start
;
655 memcpy((uint8_t *) map
+ start
, (uint8_t *) sbuf
->swbuf
+ start
, len
);
658 svga_buffer_hw_storage_unmap(svga
, sbuf
);
660 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
661 assert(sbuf
->map
.count
== 0);
662 if (sbuf
->map
.count
== 0) {
666 align_free(sbuf
->swbuf
);
670 pipe_mutex_unlock(ss
->swc_mutex
);
678 * Upload the buffer to the host in a piecewise fashion.
680 * Used when the buffer is too big to fit in the GMR aperture.
681 * This function should never get called in the guest-backed case
682 * since we always have a full-sized hardware storage backing the
685 static enum pipe_error
686 svga_buffer_upload_piecewise(struct svga_screen
*ss
,
687 struct svga_context
*svga
,
688 struct svga_buffer
*sbuf
)
690 struct svga_winsys_screen
*sws
= ss
->sws
;
691 const unsigned alignment
= sizeof(void *);
692 const unsigned usage
= 0;
695 assert(sbuf
->map
.num_ranges
);
696 assert(!sbuf
->dma
.pending
);
697 assert(!svga_have_gb_objects(svga
));
699 SVGA_DBG(DEBUG_DMA
, "dma to sid %p\n", sbuf
->handle
);
701 for (i
= 0; i
< sbuf
->map
.num_ranges
; ++i
) {
702 const struct svga_buffer_range
*range
= &sbuf
->map
.ranges
[i
];
703 unsigned offset
= range
->start
;
704 unsigned size
= range
->end
- range
->start
;
706 while (offset
< range
->end
) {
707 struct svga_winsys_buffer
*hwbuf
;
711 if (offset
+ size
> range
->end
)
712 size
= range
->end
- offset
;
714 hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
718 return PIPE_ERROR_OUT_OF_MEMORY
;
719 hwbuf
= sws
->buffer_create(sws
, alignment
, usage
, size
);
722 SVGA_DBG(DEBUG_DMA
, " bytes %u - %u\n",
723 offset
, offset
+ size
);
725 map
= sws
->buffer_map(sws
, hwbuf
,
726 PIPE_TRANSFER_WRITE
|
727 PIPE_TRANSFER_DISCARD_RANGE
);
730 memcpy(map
, (const char *) sbuf
->swbuf
+ offset
, size
);
731 sws
->buffer_unmap(sws
, hwbuf
);
734 ret
= SVGA3D_BufferDMA(svga
->swc
,
736 SVGA3D_WRITE_HOST_VRAM
,
737 size
, 0, offset
, sbuf
->dma
.flags
);
738 if (ret
!= PIPE_OK
) {
739 svga_context_flush(svga
, NULL
);
740 ret
= SVGA3D_BufferDMA(svga
->swc
,
742 SVGA3D_WRITE_HOST_VRAM
,
743 size
, 0, offset
, sbuf
->dma
.flags
);
744 assert(ret
== PIPE_OK
);
747 sbuf
->dma
.flags
.discard
= FALSE
;
749 sws
->buffer_destroy(sws
, hwbuf
);
755 sbuf
->map
.num_ranges
= 0;
762 * Get (or create/upload) the winsys surface handle so that we can
763 * refer to this buffer in fifo commands.
764 * This function will create the host surface, and in the GB case also the
765 * hardware storage. In the non-GB case, the hardware storage will be created
766 * if there are mapped ranges and the data is currently in a malloc'ed buffer.
768 struct svga_winsys_surface
*
769 svga_buffer_handle(struct svga_context
*svga
, struct pipe_resource
*buf
)
771 struct pipe_screen
*screen
= svga
->pipe
.screen
;
772 struct svga_screen
*ss
= svga_screen(screen
);
773 struct svga_buffer
*sbuf
;
779 sbuf
= svga_buffer(buf
);
784 /* This call will set sbuf->handle */
785 if (svga_have_gb_objects(svga
)) {
786 ret
= svga_buffer_update_hw(svga
, sbuf
);
788 ret
= svga_buffer_create_host_surface(ss
, sbuf
);
794 assert(sbuf
->handle
);
796 if (sbuf
->map
.num_ranges
) {
797 if (!sbuf
->dma
.pending
) {
798 /* No pending DMA/update commands yet. */
800 /* Migrate the data from swbuf -> hwbuf if necessary */
801 ret
= svga_buffer_update_hw(svga
, sbuf
);
802 if (ret
== PIPE_OK
) {
803 /* Emit DMA or UpdateGBImage commands */
804 ret
= svga_buffer_upload_command(svga
, sbuf
);
805 if (ret
== PIPE_ERROR_OUT_OF_MEMORY
) {
806 svga_context_flush(svga
, NULL
);
807 ret
= svga_buffer_upload_command(svga
, sbuf
);
808 assert(ret
== PIPE_OK
);
810 if (ret
== PIPE_OK
) {
811 sbuf
->dma
.pending
= TRUE
;
812 assert(!sbuf
->head
.prev
&& !sbuf
->head
.next
);
813 LIST_ADDTAIL(&sbuf
->head
, &svga
->dirty_buffers
);
816 else if (ret
== PIPE_ERROR_OUT_OF_MEMORY
) {
818 * The buffer is too big to fit in the GMR aperture, so break it in
821 ret
= svga_buffer_upload_piecewise(ss
, svga
, sbuf
);
824 if (ret
!= PIPE_OK
) {
826 * Something unexpected happened above. There is very little that
827 * we can do other than proceeding while ignoring the dirty ranges.
830 sbuf
->map
.num_ranges
= 0;
835 * There a pending dma already. Make sure it is from this context.
837 assert(sbuf
->dma
.svga
== svga
);
841 assert(sbuf
->map
.num_ranges
== 0 || sbuf
->dma
.pending
);
849 svga_context_flush_buffers(struct svga_context
*svga
)
851 struct list_head
*curr
, *next
;
853 SVGA_STATS_TIME_PUSH(svga_sws(svga
), SVGA_STATS_TIME_BUFFERSFLUSH
);
855 curr
= svga
->dirty_buffers
.next
;
857 while (curr
!= &svga
->dirty_buffers
) {
858 struct svga_buffer
*sbuf
= LIST_ENTRY(struct svga_buffer
, curr
, head
);
860 assert(p_atomic_read(&sbuf
->b
.b
.reference
.count
) != 0);
861 assert(sbuf
->dma
.pending
);
863 svga_buffer_upload_flush(svga
, sbuf
);
869 SVGA_STATS_TIME_POP(svga_sws(svga
));