*
**********************************************************/
-#include "svga_cmd.h"
+#include "os/os_thread.h"
#include "pipe/p_state.h"
#include "pipe/p_defines.h"
#include "util/u_inlines.h"
-#include "os/os_thread.h"
#include "util/u_math.h"
#include "util/u_memory.h"
+#include "svga_cmd.h"
#include "svga_context.h"
-#include "svga_screen.h"
+#include "svga_debug.h"
#include "svga_resource_buffer.h"
#include "svga_resource_buffer_upload.h"
+#include "svga_screen.h"
#include "svga_winsys.h"
-#include "svga_debug.h"
+/**
+ * Describes a complete SVGA_3D_CMD_UPDATE_GB_IMAGE command
+ *
+ */
+struct svga_3d_update_gb_image {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+};
-#define MAX_DMA_SIZE (4 * 1024 * 1024)
+struct svga_3d_invalidate_gb_image {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdInvalidateGBImage body;
+};
/**
*/
struct svga_winsys_buffer *
svga_winsys_buffer_create( struct svga_context *svga,
- unsigned alignment,
+ unsigned alignment,
unsigned usage,
unsigned size )
{
struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
struct svga_winsys_screen *sws = svgascreen->sws;
struct svga_winsys_buffer *buf;
-
- /* XXX this shouldn't be a hard-coded number; it should be queried
- * somehow.
- */
- if (size > MAX_DMA_SIZE) {
- return NULL;
- }
/* Just try */
buf = sws->buffer_create(sws, alignment, usage, size);
- if(!buf) {
+ if (!buf) {
+ SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing context to find %d bytes GMR\n",
+ size);
- SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing screen to find %d bytes GMR\n",
- size);
-
/* Try flushing all pending DMAs */
svga_context_flush(svga, NULL);
buf = sws->buffer_create(sws, alignment, usage, size);
}
-
+
return buf;
}
+/**
+ * Destroy HW storage if separate from the host surface.
+ * In the GB case, the HW storage is associated with the host surface
+ * and is therefore a No-op.
+ */
void
svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
{
struct svga_winsys_screen *sws = ss->sws;
- assert(!sbuf->map.count);
+ assert(sbuf->map.count == 0);
assert(sbuf->hwbuf);
- if(sbuf->hwbuf) {
+ if (sbuf->hwbuf) {
sws->buffer_destroy(sws, sbuf->hwbuf);
sbuf->hwbuf = NULL;
}
/**
- * Allocate DMA'ble storage for the buffer.
- *
+ * Allocate DMA'ble or Updatable storage for the buffer.
+ *
* Called before mapping a buffer.
*/
enum pipe_error
svga_buffer_create_hw_storage(struct svga_screen *ss,
- struct svga_buffer *sbuf)
+ struct svga_buffer *sbuf,
+ unsigned bind_flags)
{
assert(!sbuf->user);
- if(!sbuf->hwbuf) {
+ if (ss->sws->have_gb_objects) {
+ assert(sbuf->handle || !sbuf->dma.pending);
+ return svga_buffer_create_host_surface(ss, sbuf, bind_flags);
+ }
+ if (!sbuf->hwbuf) {
struct svga_winsys_screen *sws = ss->sws;
unsigned alignment = 16;
unsigned usage = 0;
unsigned size = sbuf->b.b.width0;
-
+
sbuf->hwbuf = sws->buffer_create(sws, alignment, usage, size);
- if(!sbuf->hwbuf)
+ if (!sbuf->hwbuf)
return PIPE_ERROR_OUT_OF_MEMORY;
-
+
assert(!sbuf->dma.pending);
}
-
+
return PIPE_OK;
}
-
+/**
+ * Allocate graphics memory for vertex/index/constant/etc buffer (not
+ * textures).
+ */
enum pipe_error
svga_buffer_create_host_surface(struct svga_screen *ss,
- struct svga_buffer *sbuf)
+ struct svga_buffer *sbuf,
+ unsigned bind_flags)
{
- if(!sbuf->handle) {
+ enum pipe_error ret = PIPE_OK;
+
+ assert(!sbuf->user);
+
+ if (!sbuf->handle) {
+ boolean validated;
+
sbuf->key.flags = 0;
-
+
sbuf->key.format = SVGA3D_BUFFER;
- if(sbuf->b.b.bind & PIPE_BIND_VERTEX_BUFFER)
+ if (bind_flags & PIPE_BIND_VERTEX_BUFFER) {
sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
- if(sbuf->b.b.bind & PIPE_BIND_INDEX_BUFFER)
+ sbuf->key.flags |= SVGA3D_SURFACE_BIND_VERTEX_BUFFER;
+ }
+ if (bind_flags & PIPE_BIND_INDEX_BUFFER) {
sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
-
+ sbuf->key.flags |= SVGA3D_SURFACE_BIND_INDEX_BUFFER;
+ }
+ if (bind_flags & PIPE_BIND_CONSTANT_BUFFER)
+ sbuf->key.flags |= SVGA3D_SURFACE_BIND_CONSTANT_BUFFER;
+
+ if (bind_flags & PIPE_BIND_STREAM_OUTPUT)
+ sbuf->key.flags |= SVGA3D_SURFACE_BIND_STREAM_OUTPUT;
+
+ if (bind_flags & PIPE_BIND_SAMPLER_VIEW)
+ sbuf->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
+
+ if (!bind_flags && sbuf->b.b.usage == PIPE_USAGE_STAGING) {
+ /* This surface is to be used with the
+ * SVGA3D_CMD_DX_TRANSFER_FROM_BUFFER command, and no other
+ * bind flags are allowed to be set for this surface.
+ */
+ sbuf->key.flags = SVGA3D_SURFACE_TRANSFER_FROM_BUFFER;
+ }
+
sbuf->key.size.width = sbuf->b.b.width0;
sbuf->key.size.height = 1;
sbuf->key.size.depth = 1;
-
+
sbuf->key.numFaces = 1;
sbuf->key.numMipLevels = 1;
sbuf->key.cachable = 1;
-
- SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n", sbuf->b.b.width0);
+ sbuf->key.arraySize = 1;
- sbuf->handle = svga_screen_surface_create(ss, &sbuf->key);
- if(!sbuf->handle)
+ SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n",
+ sbuf->b.b.width0);
+
+ sbuf->handle = svga_screen_surface_create(ss, bind_flags,
+ sbuf->b.b.usage,
+ &validated, &sbuf->key);
+ if (!sbuf->handle)
return PIPE_ERROR_OUT_OF_MEMORY;
-
+
/* Always set the discard flag on the first time the buffer is written
* as svga_screen_surface_create might have passed a recycled host
* buffer.
*/
sbuf->dma.flags.discard = TRUE;
- SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n", sbuf->handle, sbuf->b.b.width0);
+ SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n",
+ sbuf->handle, sbuf->b.b.width0);
+
+ /* Add the new surface to the buffer surface list */
+ ret = svga_buffer_add_host_surface(sbuf, sbuf->handle, &sbuf->key,
+ bind_flags);
+ }
+
+ return ret;
+}
+
+
+/**
+ * Recreates a host surface with the new bind flags.
+ */
+enum pipe_error
+svga_buffer_recreate_host_surface(struct svga_context *svga,
+ struct svga_buffer *sbuf,
+ unsigned bind_flags)
+{
+ enum pipe_error ret = PIPE_OK;
+ struct svga_winsys_surface *old_handle = sbuf->handle;
+
+ assert(sbuf->bind_flags != bind_flags);
+ assert(old_handle);
+
+ sbuf->handle = NULL;
+
+ /* Create a new resource with the requested bind_flags */
+ ret = svga_buffer_create_host_surface(svga_screen(svga->pipe.screen),
+ sbuf, bind_flags);
+ if (ret == PIPE_OK) {
+ /* Copy the surface data */
+ assert(sbuf->handle);
+ ret = SVGA3D_vgpu10_BufferCopy(svga->swc, old_handle, sbuf->handle,
+ 0, 0, sbuf->b.b.width0);
+ if (ret != PIPE_OK) {
+ svga_context_flush(svga, NULL);
+ ret = SVGA3D_vgpu10_BufferCopy(svga->swc, old_handle, sbuf->handle,
+ 0, 0, sbuf->b.b.width0);
+ assert(ret == PIPE_OK);
+ }
+ }
+
+ /* Set the new bind flags for this buffer resource */
+ sbuf->bind_flags = bind_flags;
+
+ return ret;
+}
+
+
+/**
+ * Returns TRUE if the surface bind flags is compatible with the new bind flags.
+ */
+static boolean
+compatible_bind_flags(unsigned bind_flags,
+ unsigned tobind_flags)
+{
+ if ((bind_flags & tobind_flags) == tobind_flags)
+ return TRUE;
+ else if ((bind_flags|tobind_flags) & PIPE_BIND_CONSTANT_BUFFER)
+ return FALSE;
+ else
+ return TRUE;
+}
+
+
+/**
+ * Returns a buffer surface from the surface list
+ * that has the requested bind flags or its existing bind flags
+ * can be promoted to include the new bind flags.
+ */
+static struct svga_buffer_surface *
+svga_buffer_get_host_surface(struct svga_buffer *sbuf,
+ unsigned bind_flags)
+{
+ struct svga_buffer_surface *bufsurf;
+
+ LIST_FOR_EACH_ENTRY(bufsurf, &sbuf->surfaces, list) {
+ if (compatible_bind_flags(bufsurf->bind_flags, bind_flags))
+ return bufsurf;
}
-
+ return NULL;
+}
+
+
+/**
+ * Adds the host surface to the buffer surface list.
+ */
+enum pipe_error
+svga_buffer_add_host_surface(struct svga_buffer *sbuf,
+ struct svga_winsys_surface *handle,
+ struct svga_host_surface_cache_key *key,
+ unsigned bind_flags)
+{
+ struct svga_buffer_surface *bufsurf;
+
+ bufsurf = CALLOC_STRUCT(svga_buffer_surface);
+ if (!bufsurf)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ bufsurf->bind_flags = bind_flags;
+ bufsurf->handle = handle;
+ bufsurf->key = *key;
+
+ /* add the surface to the surface list */
+ LIST_ADD(&bufsurf->list, &sbuf->surfaces);
+
return PIPE_OK;
-}
+}
+
+
+/**
+ * Start using the specified surface for this buffer resource.
+ */
+void
+svga_buffer_bind_host_surface(struct svga_context *svga,
+ struct svga_buffer *sbuf,
+ struct svga_buffer_surface *bufsurf)
+{
+ enum pipe_error ret;
+
+ /* Update the to-bind surface */
+ assert(bufsurf->handle);
+ assert(sbuf->handle);
+
+ /* If we are switching from stream output to other buffer,
+ * make sure to copy the buffer content.
+ */
+ if (sbuf->bind_flags & PIPE_BIND_STREAM_OUTPUT) {
+ ret = SVGA3D_vgpu10_BufferCopy(svga->swc, sbuf->handle, bufsurf->handle,
+ 0, 0, sbuf->b.b.width0);
+ if (ret != PIPE_OK) {
+ svga_context_flush(svga, NULL);
+ ret = SVGA3D_vgpu10_BufferCopy(svga->swc, sbuf->handle, bufsurf->handle,
+ 0, 0, sbuf->b.b.width0);
+ assert(ret == PIPE_OK);
+ }
+ }
+
+ /* Set this surface as the current one */
+ sbuf->handle = bufsurf->handle;
+ sbuf->key = bufsurf->key;
+ sbuf->bind_flags = bufsurf->bind_flags;
+}
+
+
+/**
+ * Prepare a host surface that can be used as indicated in the
+ * tobind_flags. If the existing host surface is not created
+ * with the necessary binding flags and if the new bind flags can be
+ * combined with the existing bind flags, then we will recreate a
+ * new surface with the combined bind flags. Otherwise, we will create
+ * a surface for that incompatible bind flags.
+ * For example, if a stream output buffer is reused as a constant buffer,
+ * since constant buffer surface cannot be bound as a stream output surface,
+ * two surfaces will be created, one for stream output,
+ * and another one for constant buffer.
+ */
+enum pipe_error
+svga_buffer_validate_host_surface(struct svga_context *svga,
+ struct svga_buffer *sbuf,
+ unsigned tobind_flags)
+{
+ struct svga_buffer_surface *bufsurf;
+ enum pipe_error ret = PIPE_OK;
+
+ /* Flush any pending upload first */
+ svga_buffer_upload_flush(svga, sbuf);
+
+ /* First check from the cached buffer surface list to see if there is
+ * already a buffer surface that has the requested bind flags, or
+ * surface with compatible bind flags that can be promoted.
+ */
+ bufsurf = svga_buffer_get_host_surface(sbuf, tobind_flags);
+
+ if (bufsurf) {
+ if ((bufsurf->bind_flags & tobind_flags) == tobind_flags) {
+ /* there is a surface with the requested bind flags */
+ svga_buffer_bind_host_surface(svga, sbuf, bufsurf);
+ } else {
+
+ /* Recreate a host surface with the combined bind flags */
+ ret = svga_buffer_recreate_host_surface(svga, sbuf,
+ bufsurf->bind_flags |
+ tobind_flags);
+
+ /* Destroy the old surface */
+ svga_screen_surface_destroy(svga_screen(sbuf->b.b.screen),
+ &bufsurf->key, &bufsurf->handle);
+
+ LIST_DEL(&bufsurf->list);
+ FREE(bufsurf);
+ }
+ } else {
+ /* Need to create a new surface if the bind flags are incompatible,
+ * such as constant buffer surface & stream output surface.
+ */
+ ret = svga_buffer_recreate_host_surface(svga, sbuf,
+ tobind_flags);
+ }
+ return ret;
+}
void
svga_buffer_destroy_host_surface(struct svga_screen *ss,
struct svga_buffer *sbuf)
{
- if(sbuf->handle) {
- SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n", sbuf->handle, sbuf->b.b.width0);
- svga_screen_surface_destroy(ss, &sbuf->key, &sbuf->handle);
+ struct svga_buffer_surface *bufsurf, *next;
+
+ LIST_FOR_EACH_ENTRY_SAFE(bufsurf, next, &sbuf->surfaces, list) {
+ SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n",
+ bufsurf->handle, sbuf->b.b.width0);
+ svga_screen_surface_destroy(ss, &bufsurf->key, &bufsurf->handle);
+ FREE(bufsurf);
+ }
+}
+
+
+/**
+ * Insert a number of preliminary UPDATE_GB_IMAGE commands in the
+ * command buffer, equal to the current number of mapped ranges.
+ * The UPDATE_GB_IMAGE commands will be patched with the
+ * actual ranges just before flush.
+ */
+static enum pipe_error
+svga_buffer_upload_gb_command(struct svga_context *svga,
+ struct svga_buffer *sbuf)
+{
+ struct svga_winsys_context *swc = svga->swc;
+ SVGA3dCmdUpdateGBImage *update_cmd;
+ struct svga_3d_update_gb_image *whole_update_cmd = NULL;
+ const uint32 numBoxes = sbuf->map.num_ranges;
+ struct pipe_resource *dummy;
+ unsigned i;
+
+ assert(svga_have_gb_objects(svga));
+ assert(numBoxes);
+ assert(sbuf->dma.updates == NULL);
+
+ if (sbuf->dma.flags.discard) {
+ struct svga_3d_invalidate_gb_image *cicmd = NULL;
+ SVGA3dCmdInvalidateGBImage *invalidate_cmd;
+ const unsigned total_commands_size =
+ sizeof(*invalidate_cmd) + numBoxes * sizeof(*whole_update_cmd);
+
+ /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by
+ * 'numBoxes' UPDATE_GB_IMAGE commands. Allocate all at once rather
+ * than with separate commands because we need to properly deal with
+ * filling the command buffer.
+ */
+ invalidate_cmd = SVGA3D_FIFOReserve(swc,
+ SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
+ total_commands_size, 1 + numBoxes);
+ if (!invalidate_cmd)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ cicmd = container_of(invalidate_cmd, cicmd, body);
+ cicmd->header.size = sizeof(*invalidate_cmd);
+ swc->surface_relocation(swc, &invalidate_cmd->image.sid, NULL,
+ sbuf->handle,
+ (SVGA_RELOC_WRITE |
+ SVGA_RELOC_INTERNAL |
+ SVGA_RELOC_DMA));
+ invalidate_cmd->image.face = 0;
+ invalidate_cmd->image.mipmap = 0;
+
+ /* The whole_update_command is a SVGA3dCmdHeader plus the
+ * SVGA3dCmdUpdateGBImage command.
+ */
+ whole_update_cmd = (struct svga_3d_update_gb_image *) &invalidate_cmd[1];
+ /* initialize the first UPDATE_GB_IMAGE command */
+ whole_update_cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ update_cmd = &whole_update_cmd->body;
+
+ } else {
+ /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */
+ const unsigned total_commands_size =
+ sizeof(*update_cmd) + (numBoxes - 1) * sizeof(*whole_update_cmd);
+
+ update_cmd = SVGA3D_FIFOReserve(swc,
+ SVGA_3D_CMD_UPDATE_GB_IMAGE,
+ total_commands_size, numBoxes);
+ if (!update_cmd)
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ /* The whole_update_command is a SVGA3dCmdHeader plus the
+ * SVGA3dCmdUpdateGBImage command.
+ */
+ whole_update_cmd = container_of(update_cmd, whole_update_cmd, body);
+ }
+
+ /* Init the first UPDATE_GB_IMAGE command */
+ whole_update_cmd->header.size = sizeof(*update_cmd);
+ swc->surface_relocation(swc, &update_cmd->image.sid, NULL, sbuf->handle,
+ SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
+ update_cmd->image.face = 0;
+ update_cmd->image.mipmap = 0;
+
+ /* Save pointer to the first UPDATE_GB_IMAGE command so that we can
+ * fill in the box info below.
+ */
+ sbuf->dma.updates = whole_update_cmd;
+
+ /*
+ * Copy the face, mipmap, etc. info to all subsequent commands.
+ * Also do the surface relocation for each subsequent command.
+ */
+ for (i = 1; i < numBoxes; ++i) {
+ whole_update_cmd++;
+ memcpy(whole_update_cmd, sbuf->dma.updates, sizeof(*whole_update_cmd));
+
+ swc->surface_relocation(swc, &whole_update_cmd->body.image.sid, NULL,
+ sbuf->handle,
+ SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
}
-}
+
+ /* Increment reference count */
+ sbuf->dma.svga = svga;
+ dummy = NULL;
+ pipe_resource_reference(&dummy, &sbuf->b.b);
+ SVGA_FIFOCommitAll(swc);
+
+ swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
+ sbuf->dma.flags.discard = FALSE;
+
+ svga->hud.num_resource_updates++;
+
+ return PIPE_OK;
+}
/**
- * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
+ * Issue DMA commands to transfer guest memory to the host.
+ * Note that the memory segments (offset, size) will be patched in
+ * later in the svga_buffer_upload_flush() function.
*/
static enum pipe_error
-svga_buffer_upload_command(struct svga_context *svga,
- struct svga_buffer *sbuf)
+svga_buffer_upload_hb_command(struct svga_context *svga,
+ struct svga_buffer *sbuf)
{
struct svga_winsys_context *swc = svga->swc;
struct svga_winsys_buffer *guest = sbuf->hwbuf;
struct svga_winsys_surface *host = sbuf->handle;
- SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
+ const SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
SVGA3dCmdSurfaceDMA *cmd;
- uint32 numBoxes = sbuf->map.num_ranges;
+ const uint32 numBoxes = sbuf->map.num_ranges;
SVGA3dCopyBox *boxes;
SVGA3dCmdSurfaceDMASuffix *pSuffix;
unsigned region_flags;
unsigned surface_flags;
struct pipe_resource *dummy;
- if(transfer == SVGA3D_WRITE_HOST_VRAM) {
+ assert(!svga_have_gb_objects(svga));
+
+ if (transfer == SVGA3D_WRITE_HOST_VRAM) {
region_flags = SVGA_RELOC_READ;
surface_flags = SVGA_RELOC_WRITE;
}
- else if(transfer == SVGA3D_READ_HOST_VRAM) {
+ else if (transfer == SVGA3D_READ_HOST_VRAM) {
region_flags = SVGA_RELOC_WRITE;
surface_flags = SVGA_RELOC_READ;
}
SVGA_3D_CMD_SURFACE_DMA,
sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
2);
- if(!cmd)
+ if (!cmd)
return PIPE_ERROR_OUT_OF_MEMORY;
swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
cmd->guest.pitch = 0;
- swc->surface_relocation(swc, &cmd->host.sid, host, surface_flags);
+ swc->surface_relocation(swc, &cmd->host.sid, NULL, host, surface_flags);
cmd->host.face = 0;
cmd->host.mipmap = 0;
SVGA_FIFOCommitAll(swc);
+ swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
sbuf->dma.flags.discard = FALSE;
+ svga->hud.num_buffer_uploads++;
+
return PIPE_OK;
}
+/**
+ * Issue commands to transfer guest memory to the host.
+ */
+static enum pipe_error
+svga_buffer_upload_command(struct svga_context *svga, struct svga_buffer *sbuf)
+{
+ if (svga_have_gb_objects(svga)) {
+ return svga_buffer_upload_gb_command(svga, sbuf);
+ } else {
+ return svga_buffer_upload_hb_command(svga, sbuf);
+ }
+}
+
+
/**
* Patch up the upload DMA command reserved by svga_buffer_upload_command
* with the final ranges.
*/
-static void
-svga_buffer_upload_flush(struct svga_context *svga,
- struct svga_buffer *sbuf)
+void
+svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf)
{
- SVGA3dCopyBox *boxes;
unsigned i;
struct pipe_resource *dummy;
- assert(sbuf->handle);
- assert(sbuf->hwbuf);
+ if (!sbuf->dma.pending) {
+ //debug_printf("no dma pending on buffer\n");
+ return;
+ }
+
+ assert(sbuf->handle);
assert(sbuf->map.num_ranges);
assert(sbuf->dma.svga == svga);
- assert(sbuf->dma.boxes);
-
+
/*
- * Patch the DMA command with the final copy box.
+ * Patch the DMA/update command with the final copy box.
*/
+ if (svga_have_gb_objects(svga)) {
+ struct svga_3d_update_gb_image *update = sbuf->dma.updates;
+ assert(update);
- SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
+ for (i = 0; i < sbuf->map.num_ranges; ++i, ++update) {
+ SVGA3dBox *box = &update->body.box;
+
+ SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
+ sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
+
+ box->x = sbuf->map.ranges[i].start;
+ box->y = 0;
+ box->z = 0;
+ box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
+ box->h = 1;
+ box->d = 1;
+
+ assert(box->x <= sbuf->b.b.width0);
+ assert(box->x + box->w <= sbuf->b.b.width0);
+
+ svga->hud.num_bytes_uploaded += box->w;
+ svga->hud.num_buffer_uploads++;
+ }
+ }
+ else {
+ assert(sbuf->hwbuf);
+ assert(sbuf->dma.boxes);
+ SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
+
+ for (i = 0; i < sbuf->map.num_ranges; ++i) {
+ SVGA3dCopyBox *box = sbuf->dma.boxes + i;
- boxes = sbuf->dma.boxes;
- for(i = 0; i < sbuf->map.num_ranges; ++i) {
- SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
+ SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
- boxes[i].x = sbuf->map.ranges[i].start;
- boxes[i].y = 0;
- boxes[i].z = 0;
- boxes[i].w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
- boxes[i].h = 1;
- boxes[i].d = 1;
- boxes[i].srcx = sbuf->map.ranges[i].start;
- boxes[i].srcy = 0;
- boxes[i].srcz = 0;
+ box->x = sbuf->map.ranges[i].start;
+ box->y = 0;
+ box->z = 0;
+ box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
+ box->h = 1;
+ box->d = 1;
+ box->srcx = sbuf->map.ranges[i].start;
+ box->srcy = 0;
+ box->srcz = 0;
+
+ assert(box->x <= sbuf->b.b.width0);
+ assert(box->x + box->w <= sbuf->b.b.width0);
+
+ svga->hud.num_bytes_uploaded += box->w;
+ svga->hud.num_buffer_uploads++;
+ }
}
+ /* Reset sbuf for next use/upload */
+
sbuf->map.num_ranges = 0;
assert(sbuf->head.prev && sbuf->head.next);
- LIST_DEL(&sbuf->head);
+ LIST_DEL(&sbuf->head); /* remove from svga->dirty_buffers list */
#ifdef DEBUG
- sbuf->head.next = sbuf->head.prev = NULL;
+ sbuf->head.next = sbuf->head.prev = NULL;
#endif
sbuf->dma.pending = FALSE;
+ sbuf->dma.flags.discard = FALSE;
+ sbuf->dma.flags.unsynchronized = FALSE;
sbuf->dma.svga = NULL;
sbuf->dma.boxes = NULL;
+ sbuf->dma.updates = NULL;
/* Decrement reference count (and potentially destroy) */
dummy = &sbuf->b.b;
}
-
/**
* Note a dirty range.
*
* This function only notes the range down. It doesn't actually emit a DMA
* upload command. That only happens when a context tries to refer to this
- * buffer, and the DMA upload command is added to that context's command buffer.
- *
+ * buffer, and the DMA upload command is added to that context's command
+ * buffer.
+ *
* We try to lump as many contiguous DMA transfers together as possible.
*/
void
-svga_buffer_add_range(struct svga_buffer *sbuf,
- unsigned start,
- unsigned end)
+svga_buffer_add_range(struct svga_buffer *sbuf, unsigned start, unsigned end)
{
unsigned i;
unsigned nearest_range;
unsigned nearest_dist;
assert(end > start);
-
+
if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
nearest_range = sbuf->map.num_ranges;
nearest_dist = ~0;
/*
* Try to grow one of the ranges.
- *
- * Note that it is not this function task to care about overlapping ranges,
- * as the GMR was already given so it is too late to do anything. Situations
- * where overlapping ranges may pose a problem should be detected via
- * pipe_context::is_resource_referenced and the context that refers to the
- * buffer should be flushed.
*/
-
- for(i = 0; i < sbuf->map.num_ranges; ++i) {
- int left_dist;
- int right_dist;
- int dist;
-
- left_dist = start - sbuf->map.ranges[i].end;
- right_dist = sbuf->map.ranges[i].start - end;
- dist = MAX2(left_dist, right_dist);
+ for (i = 0; i < sbuf->map.num_ranges; ++i) {
+ const int left_dist = start - sbuf->map.ranges[i].end;
+ const int right_dist = sbuf->map.ranges[i].start - end;
+ const int dist = MAX2(left_dist, right_dist);
if (dist <= 0) {
/*
* Ranges are contiguous or overlapping -- extend this one and return.
+ *
+ * Note that it is not this function's task to prevent overlapping
+ * ranges, as the GMR was already given so it is too late to do
+ * anything. If the ranges overlap here it must surely be because
+ * PIPE_TRANSFER_UNSYNCHRONIZED was set.
*/
-
sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
sbuf->map.ranges[i].end = MAX2(sbuf->map.ranges[i].end, end);
return;
/*
* Discontiguous ranges -- keep track of the nearest range.
*/
-
if (dist < nearest_dist) {
nearest_range = i;
nearest_dist = dist;
* pending DMA upload and start clean.
*/
- if(sbuf->dma.pending)
- svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
+ svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
assert(!sbuf->dma.pending);
assert(!sbuf->dma.svga);
assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
assert(nearest_range < sbuf->map.num_ranges);
- sbuf->map.ranges[nearest_range].start = MIN2(sbuf->map.ranges[nearest_range].start, start);
- sbuf->map.ranges[nearest_range].end = MAX2(sbuf->map.ranges[nearest_range].end, end);
+ sbuf->map.ranges[nearest_range].start =
+ MIN2(sbuf->map.ranges[nearest_range].start, start);
+ sbuf->map.ranges[nearest_range].end =
+ MAX2(sbuf->map.ranges[nearest_range].end, end);
}
}
/**
* Copy the contents of the malloc buffer to a hardware buffer.
*/
-static INLINE enum pipe_error
-svga_buffer_update_hw(struct svga_screen *ss, struct svga_buffer *sbuf)
+static enum pipe_error
+svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf,
+ unsigned bind_flags)
{
assert(!sbuf->user);
- if(!sbuf->hwbuf) {
+ if (!svga_buffer_has_hw_storage(sbuf)) {
+ struct svga_screen *ss = svga_screen(sbuf->b.b.screen);
enum pipe_error ret;
+ boolean retry;
void *map;
-
+ unsigned i;
+
assert(sbuf->swbuf);
- if(!sbuf->swbuf)
+ if (!sbuf->swbuf)
return PIPE_ERROR;
-
- ret = svga_buffer_create_hw_storage(ss, sbuf);
- if(ret != PIPE_OK)
+
+ ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen), sbuf,
+ bind_flags);
+ if (ret != PIPE_OK)
return ret;
- pipe_mutex_lock(ss->swc_mutex);
- map = ss->sws->buffer_map(ss->sws, sbuf->hwbuf, PIPE_TRANSFER_WRITE);
+ mtx_lock(&ss->swc_mutex);
+ map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
assert(map);
- if(!map) {
- pipe_mutex_unlock(ss->swc_mutex);
+ assert(!retry);
+ if (!map) {
+ mtx_unlock(&ss->swc_mutex);
svga_buffer_destroy_hw_storage(ss, sbuf);
return PIPE_ERROR;
}
- memcpy(map, sbuf->swbuf, sbuf->b.b.width0);
- ss->sws->buffer_unmap(ss->sws, sbuf->hwbuf);
+ /* Copy data from malloc'd swbuf to the new hardware buffer */
+ for (i = 0; i < sbuf->map.num_ranges; i++) {
+ unsigned start = sbuf->map.ranges[i].start;
+ unsigned len = sbuf->map.ranges[i].end - start;
+ memcpy((uint8_t *) map + start, (uint8_t *) sbuf->swbuf + start, len);
+ }
+
+ svga_buffer_hw_storage_unmap(svga, sbuf);
/* This user/malloc buffer is now indistinguishable from a gpu buffer */
- assert(!sbuf->map.count);
- if(!sbuf->map.count) {
- if(sbuf->user)
+ assert(sbuf->map.count == 0);
+ if (sbuf->map.count == 0) {
+ if (sbuf->user)
sbuf->user = FALSE;
else
align_free(sbuf->swbuf);
sbuf->swbuf = NULL;
}
-
- pipe_mutex_unlock(ss->swc_mutex);
+
+ mtx_unlock(&ss->swc_mutex);
}
-
+
return PIPE_OK;
}
* Upload the buffer to the host in a piecewise fashion.
*
* Used when the buffer is too big to fit in the GMR aperture.
+ * This function should never get called in the guest-backed case
+ * since we always have a full-sized hardware storage backing the
+ * host surface.
*/
-static INLINE enum pipe_error
+static enum pipe_error
svga_buffer_upload_piecewise(struct svga_screen *ss,
struct svga_context *svga,
struct svga_buffer *sbuf)
assert(sbuf->map.num_ranges);
assert(!sbuf->dma.pending);
+ assert(!svga_have_gb_objects(svga));
SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
for (i = 0; i < sbuf->map.num_ranges; ++i) {
- struct svga_buffer_range *range = &sbuf->map.ranges[i];
+ const struct svga_buffer_range *range = &sbuf->map.ranges[i];
unsigned offset = range->start;
unsigned size = range->end - range->start;
map = sws->buffer_map(sws, hwbuf,
PIPE_TRANSFER_WRITE |
- PIPE_TRANSFER_DISCARD);
+ PIPE_TRANSFER_DISCARD_RANGE);
assert(map);
if (map) {
- memcpy(map, sbuf->swbuf, size);
+ memcpy(map, (const char *) sbuf->swbuf + offset, size);
sws->buffer_unmap(sws, hwbuf);
}
hwbuf, sbuf->handle,
SVGA3D_WRITE_HOST_VRAM,
size, 0, offset, sbuf->dma.flags);
- if(ret != PIPE_OK) {
+ if (ret != PIPE_OK) {
svga_context_flush(svga, NULL);
ret = SVGA3D_BufferDMA(svga->swc,
hwbuf, sbuf->handle,
}
-
-
-/* Get (or create/upload) the winsys surface handle so that we can
+/**
+ * Get (or create/upload) the winsys surface handle so that we can
* refer to this buffer in fifo commands.
+ * This function will create the host surface, and in the GB case also the
+ * hardware storage. In the non-GB case, the hardware storage will be created
+ * if there are mapped ranges and the data is currently in a malloc'ed buffer.
*/
struct svga_winsys_surface *
-svga_buffer_handle(struct svga_context *svga,
- struct pipe_resource *buf)
+svga_buffer_handle(struct svga_context *svga, struct pipe_resource *buf,
+ unsigned tobind_flags)
{
struct pipe_screen *screen = svga->pipe.screen;
struct svga_screen *ss = svga_screen(screen);
struct svga_buffer *sbuf;
enum pipe_error ret;
- if(!buf)
+ if (!buf)
return NULL;
sbuf = svga_buffer(buf);
-
- assert(!sbuf->map.count);
+
assert(!sbuf->user);
-
- if(!sbuf->handle) {
- ret = svga_buffer_create_host_surface(ss, sbuf);
- if(ret != PIPE_OK)
- return NULL;
+
+ if (sbuf->handle) {
+ if ((sbuf->bind_flags & tobind_flags) != tobind_flags) {
+ /* If the allocated resource's bind flags do not include the
+ * requested bind flags, validate the host surface.
+ */
+ ret = svga_buffer_validate_host_surface(svga, sbuf, tobind_flags);
+ if (ret != PIPE_OK)
+ return NULL;
+ }
+ } else {
+ if (!sbuf->bind_flags) {
+ sbuf->bind_flags = tobind_flags;
+ }
+
+ assert((sbuf->bind_flags & tobind_flags) == tobind_flags);
+
+ /* This call will set sbuf->handle */
+ if (svga_have_gb_objects(svga)) {
+ ret = svga_buffer_update_hw(svga, sbuf, sbuf->bind_flags);
+ } else {
+ ret = svga_buffer_create_host_surface(ss, sbuf, sbuf->bind_flags);
+ }
+ if (ret != PIPE_OK)
+ return NULL;
}
assert(sbuf->handle);
if (sbuf->map.num_ranges) {
if (!sbuf->dma.pending) {
- /*
- * No pending DMA upload yet, so insert a DMA upload command now.
- */
+ /* No pending DMA/update commands yet. */
- /*
- * Migrate the data from swbuf -> hwbuf if necessary.
- */
- ret = svga_buffer_update_hw(ss, sbuf);
+ /* Migrate the data from swbuf -> hwbuf if necessary */
+ ret = svga_buffer_update_hw(svga, sbuf, sbuf->bind_flags);
if (ret == PIPE_OK) {
- /*
- * Queue a dma command.
- */
-
+ /* Emit DMA or UpdateGBImage commands */
ret = svga_buffer_upload_command(svga, sbuf);
if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
svga_context_flush(svga, NULL);
}
}
- assert(!sbuf->map.num_ranges || sbuf->dma.pending);
+ assert(sbuf->map.num_ranges == 0 || sbuf->dma.pending);
return sbuf->handle;
}
-
void
svga_context_flush_buffers(struct svga_context *svga)
{
struct list_head *curr, *next;
- struct svga_buffer *sbuf;
+
+ SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERSFLUSH);
curr = svga->dirty_buffers.next;
next = curr->next;
- while(curr != &svga->dirty_buffers) {
- sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
+ while (curr != &svga->dirty_buffers) {
+ struct svga_buffer *sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
assert(p_atomic_read(&sbuf->b.b.reference.count) != 0);
assert(sbuf->dma.pending);
-
+
svga_buffer_upload_flush(svga, sbuf);
- curr = next;
+ curr = next;
next = curr->next;
}
-}
-
-
-void
-svga_redefine_user_buffer(struct pipe_context *pipe,
- struct pipe_resource *resource,
- unsigned offset,
- unsigned size)
-{
- struct svga_screen *ss = svga_screen(pipe->screen);
- struct svga_context *svga = svga_context(pipe);
- struct svga_buffer *sbuf = svga_buffer(resource);
-
- assert(sbuf->user);
-
- /*
- * Release any uploaded user buffer.
- *
- * TODO: As an optimization, we could try to update the uploaded buffer
- * instead.
- */
-
- pipe_resource_reference(&sbuf->uploaded.buffer, NULL);
-
- pipe_mutex_lock(ss->swc_mutex);
-
- if (offset + size > resource->width0) {
- /*
- * User buffers shouldn't have DMA directly, unless
- * SVGA_COMBINE_USERBUFFERS is not set.
- */
-
- if (sbuf->dma.pending) {
- svga_buffer_upload_flush(svga, sbuf);
- }
-
- if (sbuf->handle) {
- svga_buffer_destroy_host_surface(ss, sbuf);
- }
-
- if (sbuf->hwbuf) {
- svga_buffer_destroy_hw_storage(ss, sbuf);
- }
-
- sbuf->key.size.width = sbuf->b.b.width0 = offset + size;
- }
-
- pipe_mutex_unlock(ss->swc_mutex);
- svga->curr.any_user_vertex_buffers = TRUE;
- svga->dirty |= SVGA_NEW_VBUFFER | SVGA_NEW_VELEMENT;
+ SVGA_STATS_TIME_POP(svga_sws(svga));
}