{
struct svga_winsys_screen *sws = ss->sws;
- assert(!sbuf->map.count);
+ assert(sbuf->map.count == 0);
assert(sbuf->hwbuf);
if (sbuf->hwbuf) {
sws->buffer_destroy(sws, sbuf->hwbuf);
assert(!sbuf->user);
if (!sbuf->handle) {
+ boolean validated;
+
sbuf->key.flags = 0;
sbuf->key.format = SVGA3D_BUFFER;
if (sbuf->bind_flags & PIPE_BIND_SAMPLER_VIEW)
sbuf->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
+ if (!sbuf->bind_flags && sbuf->b.b.usage == PIPE_USAGE_STAGING) {
+ /* This surface is to be used with the
+ * SVGA3D_CMD_DX_TRANSFER_FROM_BUFFER command, and no other
+ * bind flags are allowed to be set for this surface.
+ */
+ sbuf->key.flags = SVGA3D_SURFACE_TRANSFER_FROM_BUFFER;
+ }
+
sbuf->key.size.width = sbuf->b.b.width0;
sbuf->key.size.height = 1;
sbuf->key.size.depth = 1;
sbuf->key.cachable = 1;
sbuf->key.arraySize = 1;
- SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n", sbuf->b.b.width0);
+ SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n",
+ sbuf->b.b.width0);
sbuf->handle = svga_screen_surface_create(ss, sbuf->b.b.bind,
- sbuf->b.b.usage, &sbuf->key);
+ sbuf->b.b.usage,
+ &validated, &sbuf->key);
if (!sbuf->handle)
return PIPE_ERROR_OUT_OF_MEMORY;
*/
sbuf->dma.flags.discard = TRUE;
- SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n", sbuf->handle, sbuf->b.b.width0);
+ SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n",
+ sbuf->handle, sbuf->b.b.width0);
}
return PIPE_OK;
struct svga_buffer *sbuf)
{
if (sbuf->handle) {
- SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n", sbuf->handle, sbuf->b.b.width0);
+ SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n",
+ sbuf->handle, sbuf->b.b.width0);
svga_screen_surface_destroy(ss, &sbuf->key, &sbuf->handle);
}
}
struct svga_winsys_context *swc = svga->swc;
SVGA3dCmdUpdateGBImage *update_cmd;
struct svga_3d_update_gb_image *whole_update_cmd = NULL;
- uint32 numBoxes = sbuf->map.num_ranges;
+ const uint32 numBoxes = sbuf->map.num_ranges;
struct pipe_resource *dummy;
unsigned i;
+ assert(svga_have_gb_objects(svga));
assert(numBoxes);
assert(sbuf->dma.updates == NULL);
swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
sbuf->dma.flags.discard = FALSE;
+ svga->hud.num_resource_updates++;
+
return PIPE_OK;
}
/**
- * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
+ * Issue DMA commands to transfer guest memory to the host.
+ * Note that the memory segments (offset, size) will be patched in
+ * later in the svga_buffer_upload_flush() function.
*/
static enum pipe_error
-svga_buffer_upload_command(struct svga_context *svga,
- struct svga_buffer *sbuf)
+svga_buffer_upload_hb_command(struct svga_context *svga,
+ struct svga_buffer *sbuf)
{
struct svga_winsys_context *swc = svga->swc;
struct svga_winsys_buffer *guest = sbuf->hwbuf;
struct svga_winsys_surface *host = sbuf->handle;
- SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
+ const SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
SVGA3dCmdSurfaceDMA *cmd;
- uint32 numBoxes = sbuf->map.num_ranges;
+ const uint32 numBoxes = sbuf->map.num_ranges;
SVGA3dCopyBox *boxes;
SVGA3dCmdSurfaceDMASuffix *pSuffix;
unsigned region_flags;
unsigned surface_flags;
struct pipe_resource *dummy;
- if (svga_have_gb_objects(svga))
- return svga_buffer_upload_gb_command(svga, sbuf);
+ assert(!svga_have_gb_objects(svga));
if (transfer == SVGA3D_WRITE_HOST_VRAM) {
region_flags = SVGA_RELOC_READ;
swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
sbuf->dma.flags.discard = FALSE;
+ svga->hud.num_buffer_uploads++;
+
return PIPE_OK;
}
+/**
+ * Issue commands to transfer guest memory to the host.
+ */
+static enum pipe_error
+svga_buffer_upload_command(struct svga_context *svga, struct svga_buffer *sbuf)
+{
+ if (svga_have_gb_objects(svga)) {
+ return svga_buffer_upload_gb_command(svga, sbuf);
+ } else {
+ return svga_buffer_upload_hb_command(svga, sbuf);
+ }
+}
+
+
/**
* Patch up the upload DMA command reserved by svga_buffer_upload_command
* with the final ranges.
assert(box->x + box->w <= sbuf->b.b.width0);
svga->hud.num_bytes_uploaded += box->w;
+ svga->hud.num_buffer_uploads++;
}
}
else {
assert(box->x + box->w <= sbuf->b.b.width0);
svga->hud.num_bytes_uploaded += box->w;
+ svga->hud.num_buffer_uploads++;
}
}
* We try to lump as many contiguous DMA transfers together as possible.
*/
void
-svga_buffer_add_range(struct svga_buffer *sbuf,
- unsigned start,
- unsigned end)
+svga_buffer_add_range(struct svga_buffer *sbuf, unsigned start, unsigned end)
{
unsigned i;
unsigned nearest_range;
/*
* Try to grow one of the ranges.
*/
-
for (i = 0; i < sbuf->map.num_ranges; ++i) {
- int left_dist;
- int right_dist;
- int dist;
-
- left_dist = start - sbuf->map.ranges[i].end;
- right_dist = sbuf->map.ranges[i].start - end;
- dist = MAX2(left_dist, right_dist);
+ const int left_dist = start - sbuf->map.ranges[i].end;
+ const int right_dist = sbuf->map.ranges[i].start - end;
+ const int dist = MAX2(left_dist, right_dist);
if (dist <= 0) {
/*
* anything. If the ranges overlap here it must surely be because
* PIPE_TRANSFER_UNSYNCHRONIZED was set.
*/
-
sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
sbuf->map.ranges[i].end = MAX2(sbuf->map.ranges[i].end, end);
return;
/*
* Discontiguous ranges -- keep track of the nearest range.
*/
-
if (dist < nearest_dist) {
nearest_range = i;
nearest_dist = dist;
assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
assert(nearest_range < sbuf->map.num_ranges);
- sbuf->map.ranges[nearest_range].start = MIN2(sbuf->map.ranges[nearest_range].start, start);
- sbuf->map.ranges[nearest_range].end = MAX2(sbuf->map.ranges[nearest_range].end, end);
+ sbuf->map.ranges[nearest_range].start =
+ MIN2(sbuf->map.ranges[nearest_range].start, start);
+ sbuf->map.ranges[nearest_range].end =
+ MAX2(sbuf->map.ranges[nearest_range].end, end);
}
}
enum pipe_error ret;
boolean retry;
void *map;
+ unsigned i;
assert(sbuf->swbuf);
if (!sbuf->swbuf)
return PIPE_ERROR;
- ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen),
- sbuf);
+ ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen), sbuf);
if (ret != PIPE_OK)
return ret;
return PIPE_ERROR;
}
- memcpy(map, sbuf->swbuf, sbuf->b.b.width0);
+ /* Copy data from malloc'd swbuf to the new hardware buffer */
+ for (i = 0; i < sbuf->map.num_ranges; i++) {
+ unsigned start = sbuf->map.ranges[i].start;
+ unsigned len = sbuf->map.ranges[i].end - start;
+ memcpy((uint8_t *) map + start, (uint8_t *) sbuf->swbuf + start, len);
+ }
+
svga_buffer_hw_storage_unmap(svga, sbuf);
/* This user/malloc buffer is now indistinguishable from a gpu buffer */
- assert(!sbuf->map.count);
- if (!sbuf->map.count) {
+ assert(sbuf->map.count == 0);
+ if (sbuf->map.count == 0) {
if (sbuf->user)
sbuf->user = FALSE;
else
SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
for (i = 0; i < sbuf->map.num_ranges; ++i) {
- struct svga_buffer_range *range = &sbuf->map.ranges[i];
+ const struct svga_buffer_range *range = &sbuf->map.ranges[i];
unsigned offset = range->start;
unsigned size = range->end - range->start;
* if there are mapped ranges and the data is currently in a malloc'ed buffer.
*/
struct svga_winsys_surface *
-svga_buffer_handle(struct svga_context *svga,
- struct pipe_resource *buf)
+svga_buffer_handle(struct svga_context *svga, struct pipe_resource *buf)
{
struct pipe_screen *screen = svga->pipe.screen;
struct svga_screen *ss = svga_screen(screen);
if (sbuf->map.num_ranges) {
if (!sbuf->dma.pending) {
- /*
- * No pending DMA upload yet, so insert a DMA upload command now.
- */
+ /* No pending DMA/update commands yet. */
- /*
- * Migrate the data from swbuf -> hwbuf if necessary.
- */
+ /* Migrate the data from swbuf -> hwbuf if necessary */
ret = svga_buffer_update_hw(svga, sbuf);
if (ret == PIPE_OK) {
- /*
- * Queue a dma command.
- */
-
+ /* Emit DMA or UpdateGBImage commands */
ret = svga_buffer_upload_command(svga, sbuf);
if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
svga_context_flush(svga, NULL);
}
}
- assert(!sbuf->map.num_ranges || sbuf->dma.pending);
+ assert(sbuf->map.num_ranges == 0 || sbuf->dma.pending);
return sbuf->handle;
}
svga_context_flush_buffers(struct svga_context *svga)
{
struct list_head *curr, *next;
- struct svga_buffer *sbuf;
+
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERSFLUSH);
curr = svga->dirty_buffers.next;
next = curr->next;
- while(curr != &svga->dirty_buffers) {
- sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
+ while (curr != &svga->dirty_buffers) {
+ struct svga_buffer *sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
assert(p_atomic_read(&sbuf->b.b.reference.count) != 0);
assert(sbuf->dma.pending);
curr = next;
next = curr->next;
}
+
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}