This involves a few driver modifications to keep things building.
The driver may not actually run properly at this point.
Signed-off-by: Brian Paul <brianp@vmware.com>
if (!cmd)
return PIPE_ERROR_OUT_OF_MEMORY;
- swc->shader_relocation(swc, &cmd->shid, NULL, NULL, gbshader);
+ swc->shader_relocation(swc, &cmd->shid, NULL, NULL, gbshader, 0);
cmd->type = type;
cmd->sizeInBytes = sizeInBytes;
return PIPE_ERROR_OUT_OF_MEMORY;
swc->shader_relocation(swc, &cmd->shid, &cmd->mobid,
- &cmd->offsetInBytes, gbshader);
+ &cmd->offsetInBytes, gbshader, 0);
swc->commit(swc);
swc->context_relocation(swc, &cmd->cid);
cmd->type = type;
if (gbshader)
- swc->shader_relocation(swc, &cmd->shid, NULL, NULL, gbshader);
+ swc->shader_relocation(swc, &cmd->shid, NULL, NULL, gbshader, 0);
else
cmd->shid = SVGA_ID_INVALID;
swc->commit(swc);
if (!cmd)
return PIPE_ERROR_OUT_OF_MEMORY;
- swc->shader_relocation(swc, &cmd->shid, NULL, NULL, gbshader);
+ swc->shader_relocation(swc, &cmd->shid, NULL, NULL, gbshader, 0);
swc->commit(swc);
0 : SVGA_SURFACE_USAGE_SHARED,
key->size,
key->numFaces,
- key->numMipLevels);
+ key->numMipLevels,
+ 0);
if (handle)
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
" CREATE sid %p sz %dx%dx%d\n",
#define SVGA_FENCE_FLAG_EXEC (1 << 0)
#define SVGA_FENCE_FLAG_QUERY (1 << 1)
-#define SVGA_SURFACE_USAGE_SHARED (1 << 0)
+#define SVGA_SURFACE_USAGE_SHARED (1 << 0)
+#define SVGA_SURFACE_USAGE_SCANOUT (1 << 1)
+
+#define SVGA_QUERY_FLAG_SET (1 << 0)
+#define SVGA_QUERY_FLAG_REF (1 << 1)
/** Opaque surface handle */
struct svga_winsys_surface;
-
/** Opaque guest-backed objects */
struct svga_winsys_gb_shader;
-
+struct svga_winsys_gb_query;
/**
uint32 *shid,
uint32 *mobid,
uint32 *offset,
- struct svga_winsys_gb_shader *shader);
+ struct svga_winsys_gb_shader *shader,
+ unsigned flags);
/**
* Emit a relocation for a guest-backed context.
uint32 offset,
unsigned flags);
+ /**
+ * Emit a relocation for a guest-backed query object.
+ *
+ * NOTE: Order of this call does matter. It should be the same order
+ * as relocations appear in the command buffer.
+ */
+ void
+ (*query_relocation)(struct svga_winsys_context *swc,
+ SVGAMobId *id,
+ struct svga_winsys_gb_query *query);
+
+ /**
+ * Bind queries to context.
+ * \param flags exactly one of SVGA_QUERY_FLAG_SET/REF
+ */
+ enum pipe_error
+ (*query_bind)(struct svga_winsys_context *sws,
+ struct svga_winsys_gb_query *query,
+ unsigned flags);
+
void
(*commit)(struct svga_winsys_context *swc);
struct svga_winsys_surface *surface,
boolean *rebind);
+ /**
+ * Create and define a DX GB shader that resides in the device COTable.
+ * Caller of this function will issue the DXDefineShader command.
+ */
+ struct svga_winsys_gb_shader *
+ (*shader_create)(struct svga_winsys_context *swc,
+ uint32 shaderId,
+ SVGA3dShaderType shaderType,
+ const uint32 *bytecode,
+ uint32 bytecodeLen);
+
+ /**
+ * Destroy a DX GB shader.
+ * This function will issue the DXDestroyShader command.
+ */
+ void
+ (*shader_destroy)(struct svga_winsys_context *swc,
+ struct svga_winsys_gb_shader *shader);
+
+ /**
+ * Rebind a DX GB resource to a context.
+ * This is called to reference a DX GB resource in the command stream in
+ * order to page in the associated resource in case the memory has been
+ * paged out, and to fence it if necessary after command submission.
+ */
+ enum pipe_error
+ (*resource_rebind)(struct svga_winsys_context *swc,
+ struct svga_winsys_surface *surface,
+ struct svga_winsys_gb_shader *shader,
+ unsigned flags);
};
* \param format Format Device surface format
* \param usage Winsys usage: bitmask of SVGA_SURFACE_USAGE_x flags
* \param size Surface size given in device format
- * \param numFaces Number of faces of the surface (1 or 6)
+ * \param numLayers Number of layers of the surface (or cube faces)
* \param numMipLevels Number of mipmap levels for each face
*
* Returns the surface ID (sid). Surfaces are generic
* - Each face has a list of mipmap levels
*
* - Each mipmap image may have multiple volume
- * slices, if the image is three dimensional.
+ * slices for 3D image, or multiple 2D slices for texture array.
*
* - Each slice is a 2D array of 'blocks'
*
SVGA3dSurfaceFormat format,
unsigned usage,
SVGA3dSize size,
- uint32 numFaces,
- uint32 numMipLevels);
+ uint32 numLayers,
+ uint32 numMipLevels,
+ unsigned sampleCount);
/**
* Creates a surface from a winsys handle.
(*surface_can_create)(struct svga_winsys_screen *sws,
SVGA3dSurfaceFormat format,
SVGA3dSize size,
- uint32 numFaces,
+ uint32 numLayers,
uint32 numMipLevels);
/**
*/
struct svga_winsys_gb_shader *
(*shader_create)(struct svga_winsys_screen *sws,
- SVGA3dShaderType type,
+ SVGA3dShaderType shaderType,
const uint32 *bytecode,
uint32 bytecodeLen);
(*shader_destroy)(struct svga_winsys_screen *sws,
struct svga_winsys_gb_shader *shader);
+ /**
+ * Create and define a GB query.
+ */
+ struct svga_winsys_gb_query *
+ (*query_create)(struct svga_winsys_screen *sws, uint32 len);
+
+ /**
+ * Destroy a GB query.
+ */
+ void
+ (*query_destroy)(struct svga_winsys_screen *sws,
+ struct svga_winsys_gb_query *query);
+
+ /**
+ * Initialize the query state of the query that resides in the slot
+ * specified in offset
+ * \return zero on success.
+ */
+ int
+ (*query_init)(struct svga_winsys_screen *sws,
+ struct svga_winsys_gb_query *query,
+ unsigned offset,
+ SVGA3dQueryState queryState);
+
+ /**
+ * Inquire for the query state and result of the query that resides
+ * in the slot specified in offset
+ */
+ void
+ (*query_get_result)(struct svga_winsys_screen *sws,
+ struct svga_winsys_gb_query *query,
+ unsigned offset,
+ SVGA3dQueryState *queryState,
+ void *result, uint32 resultLen);
+
+ /** Have VGPU v10 hardware? */
+ boolean have_vgpu10;
+
+ /** To rebind resources at the beginnning of a new command buffer */
+ boolean need_to_rebind_resources;
};
/**************************************************************************
*
- * Copyright 2007-2010 VMware, Inc.
+ * Copyright 2007-2015 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
/* TODO: remove consequents buffers with the same fence? */
assert(!destroyed);
+ (void) destroyed;
fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
boolean destroyed;
destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
assert(!destroyed);
+ (void) destroyed;
}
if (fence) {
ops->fence_reference(ops, &fenced_buf->fence, fence);
/**********************************************************
- * Copyright 2010 VMware, Inc. All rights reserved.
+ * Copyright 2010-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
#include "vmw_surface.h"
#include "vmw_fence.h"
#include "vmw_shader.h"
+#include "vmw_query.h"
#define VMW_COMMAND_SIZE (64*1024)
#define VMW_SURFACE_RELOCS (1024)
{
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
struct vmw_buffer_relocation *reloc;
+ struct pb_buffer *pb_buffer = vmw_pb_buffer(buffer);
- assert(vswc->region.staged < vswc->region.reserved);
+ if (id) {
+ assert(vswc->region.staged < vswc->region.reserved);
- reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
- reloc->mob.id = id;
- reloc->mob.offset_into_mob = offset_into_mob;
+ reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
+ reloc->mob.id = id;
+ reloc->mob.offset_into_mob = offset_into_mob;
- /*
- * pb_validate holds a refcount to the buffer, so no need to
- * refcount it again in the relocation.
- */
- reloc->buffer = vmw_pb_buffer(buffer);
- reloc->offset = offset;
- reloc->is_mob = TRUE;
- ++vswc->region.staged;
+ /*
+ * pb_validate holds a refcount to the buffer, so no need to
+ * refcount it again in the relocation.
+ */
+ reloc->buffer = pb_buffer;
+ reloc->offset = offset;
+ reloc->is_mob = TRUE;
+ ++vswc->region.staged;
+ }
- if (vmw_swc_add_validate_buffer(vswc, reloc->buffer, flags)) {
- vswc->seen_mobs += reloc->buffer->size;
+ if (vmw_swc_add_validate_buffer(vswc, pb_buffer, flags)) {
+ vswc->seen_mobs += pb_buffer->size;
/* divide by 5, tested for best performance */
if (vswc->seen_mobs >= vswc->vws->ioctl.max_mob_memory / VMW_MAX_MOB_MEM_FACTOR)
vswc->preemptive_flush = TRUE;
p_atomic_inc(&vsurf->validated);
}
- *where = vsurf->sid;
+ if (where)
+ *where = vsurf->sid;
}
static void
assert(swc->have_gb_objects || mobid == NULL);
- if(!surface) {
+ if (!surface) {
*where = SVGA3D_INVALID_ID;
if (mobid)
*mobid = SVGA3D_INVALID_ID;
uint32 *shid,
uint32 *mobid,
uint32 *offset,
- struct svga_winsys_gb_shader *shader)
+ struct svga_winsys_gb_shader *shader,
+ unsigned flags)
{
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+ struct vmw_winsys_screen *vws = vswc->vws;
struct vmw_svga_winsys_shader *vshader;
struct vmw_ctx_validate_item *ishader;
+
if(!shader) {
*shid = SVGA3D_INVALID_ID;
return;
}
- assert(vswc->shader.staged < vswc->shader.reserved);
vshader = vmw_svga_winsys_shader(shader);
- ishader = util_hash_table_get(vswc->hash, vshader);
- if (ishader == NULL) {
- ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged];
- vmw_svga_winsys_shader_reference(&ishader->vshader, vshader);
- ishader->referenced = FALSE;
- /*
- * Note that a failure here may just fall back to unhashed behavior
- * and potentially cause unnecessary flushing, so ignore the
- * return code.
- */
- (void) util_hash_table_set(vswc->hash, vshader, ishader);
- ++vswc->shader.staged;
- }
+ if (!vws->base.have_vgpu10) {
+ assert(vswc->shader.staged < vswc->shader.reserved);
+ ishader = util_hash_table_get(vswc->hash, vshader);
+
+ if (ishader == NULL) {
+ ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged];
+ vmw_svga_winsys_shader_reference(&ishader->vshader, vshader);
+ ishader->referenced = FALSE;
+ /*
+ * Note that a failure here may just fall back to unhashed behavior
+ * and potentially cause unnecessary flushing, so ignore the
+ * return code.
+ */
+ (void) util_hash_table_set(vswc->hash, vshader, ishader);
+ ++vswc->shader.staged;
+ }
- if (!ishader->referenced) {
- ishader->referenced = TRUE;
- p_atomic_inc(&vshader->validated);
+ if (!ishader->referenced) {
+ ishader->referenced = TRUE;
+ p_atomic_inc(&vshader->validated);
+ }
}
- *shid = vshader->shid;
+ if (shid)
+ *shid = vshader->shid;
- if (mobid != NULL && vshader->buf)
+ if (vshader->buf)
vmw_swc_mob_relocation(swc, mobid, offset, vshader->buf,
0, SVGA_RELOC_READ);
}
+static void
+vmw_swc_query_relocation(struct svga_winsys_context *swc,
+ SVGAMobId *id,
+ struct svga_winsys_gb_query *query)
+{
+ /* Queries are backed by one big MOB */
+ vmw_swc_mob_relocation(swc, id, NULL, query->buf, 0,
+ SVGA_RELOC_READ | SVGA_RELOC_WRITE);
+}
+
static void
vmw_swc_commit(struct svga_winsys_context *swc)
{
struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
- assert(vswc->command.reserved);
+ assert(vswc->command.reserved >= 0);
assert(vswc->command.used + vswc->command.reserved <= vswc->command.size);
vswc->command.used += vswc->command.reserved;
vswc->command.reserved = 0;
return (key1 == key2) ? 0 : 1;
}
+
+/**
+ * vmw_svga_winsys_vgpu10_shader_screate - The winsys shader_crate callback
+ *
+ * @swc: The winsys context.
+ * @shaderId: Previously allocated shader id.
+ * @shaderType: The shader type.
+ * @bytecode: The shader bytecode
+ * @bytecodelen: The length of the bytecode.
+ *
+ * Creates an svga_winsys_gb_shader structure and allocates a buffer for the
+ * shader code and copies the shader code into the buffer. Shader
+ * resource creation is not done.
+ */
+static struct svga_winsys_gb_shader *
+vmw_svga_winsys_vgpu10_shader_create(struct svga_winsys_context *swc,
+ uint32 shaderId,
+ SVGA3dShaderType shaderType,
+ const uint32 *bytecode,
+ uint32 bytecodeLen)
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+ struct vmw_svga_winsys_shader *shader;
+ struct svga_winsys_gb_shader *gb_shader =
+ vmw_svga_winsys_shader_create(&vswc->vws->base, shaderType, bytecode,
+ bytecodeLen);
+ if (!gb_shader)
+ return NULL;
+
+ shader = vmw_svga_winsys_shader(gb_shader);
+ shader->shid = shaderId;
+
+ return gb_shader;
+}
+
+/**
+ * vmw_svga_winsys_vgpu10_shader_destroy - The winsys shader_destroy callback.
+ *
+ * @swc: The winsys context.
+ * @shader: A shader structure previously allocated by shader_create.
+ *
+ * Frees the shader structure and the buffer holding the shader code.
+ */
+static void
+vmw_svga_winsys_vgpu10_shader_destroy(struct svga_winsys_context *swc,
+ struct svga_winsys_gb_shader *shader)
+{
+ struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
+
+ vmw_svga_winsys_shader_destroy(&vswc->vws->base, shader);
+}
+
+/**
+ * vmw_svga_winsys_resource_rebind - The winsys resource_rebind callback
+ *
+ * @swc: The winsys context.
+ * @surface: The surface to be referenced.
+ * @shader: The shader to be referenced.
+ * @flags: Relocation flags.
+ *
+ * This callback is needed because shader backing buffers are sub-allocated, and
+ * hence the kernel fencing is not sufficient. The buffers need to be put on
+ * the context's validation list and fenced after command submission to avoid
+ * reuse of busy shader buffers. In addition, surfaces need to be put on the
+ * validation list in order for the driver to regard them as referenced
+ * by the command stream.
+ */
+static enum pipe_error
+vmw_svga_winsys_resource_rebind(struct svga_winsys_context *swc,
+ struct svga_winsys_surface *surface,
+ struct svga_winsys_gb_shader *shader,
+ unsigned flags)
+{
+ /**
+ * Need to reserve one validation item for either the surface or
+ * the shader.
+ */
+ if (!vmw_swc_reserve(swc, 0, 1))
+ return PIPE_ERROR_OUT_OF_MEMORY;
+
+ if (surface)
+ vmw_swc_surface_relocation(swc, NULL, NULL, surface, flags);
+ else if (shader)
+ vmw_swc_shader_relocation(swc, NULL, NULL, NULL, shader, flags);
+
+ vmw_swc_commit(swc);
+
+ return PIPE_OK;
+}
+
struct svga_winsys_context *
vmw_svga_winsys_context_create(struct svga_winsys_screen *sws)
{
vswc->base.surface_relocation = vmw_swc_surface_relocation;
vswc->base.region_relocation = vmw_swc_region_relocation;
vswc->base.mob_relocation = vmw_swc_mob_relocation;
+ vswc->base.query_relocation = vmw_swc_query_relocation;
+ vswc->base.query_bind = vmw_swc_query_bind;
vswc->base.context_relocation = vmw_swc_context_relocation;
vswc->base.shader_relocation = vmw_swc_shader_relocation;
vswc->base.commit = vmw_swc_commit;
vswc->base.surface_map = vmw_svga_winsys_surface_map;
vswc->base.surface_unmap = vmw_svga_winsys_surface_unmap;
- vswc->base.cid = vmw_ioctl_context_create(vws);
+ vswc->base.shader_create = vmw_svga_winsys_vgpu10_shader_create;
+ vswc->base.shader_destroy = vmw_svga_winsys_vgpu10_shader_destroy;
+
+ vswc->base.resource_rebind = vmw_svga_winsys_resource_rebind;
+
+ if (sws->have_vgpu10)
+ vswc->base.cid = vmw_ioctl_extended_context_create(vws, sws->have_vgpu10);
+ else
+ vswc->base.cid = vmw_ioctl_context_create(vws);
+
+ if (vswc->base.cid == -1)
+ goto out_no_context;
+
vswc->base.have_gb_objects = sws->have_gb_objects;
vswc->vws = vws;
out_no_hash:
pb_validate_destroy(vswc->validate);
out_no_validate:
+ vmw_ioctl_context_destroy(vws, vswc->base.cid);
+out_no_context:
FREE(vswc);
return NULL;
}
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
/**********************************************************
- * Copyright 2009-2011 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
*/
struct vmw_winsys_screen *
-vmw_winsys_create( int fd, boolean use_old_scanout_flag )
+vmw_winsys_create( int fd )
{
struct vmw_winsys_screen *vws;
struct stat stat_buf;
vws->device = stat_buf.st_rdev;
vws->open_count = 1;
vws->ioctl.drm_fd = dup(fd);
- vws->use_old_scanout_flag = use_old_scanout_flag;
vws->base.have_gb_dma = TRUE;
+ vws->base.need_to_rebind_resources = FALSE;
if (!vmw_ioctl_init(vws))
goto out_no_ioctl;
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
{
struct svga_winsys_screen base;
- boolean use_old_scanout_flag;
-
struct {
int drm_fd;
uint32_t hwversion;
uint64_t max_surface_memory;
uint64_t max_texture_size;
boolean have_drm_2_6;
+ boolean have_drm_2_9;
+ uint32_t drm_execbuf_version;
} ioctl;
struct {
uint32
vmw_ioctl_context_create(struct vmw_winsys_screen *vws);
+uint32
+vmw_ioctl_extended_context_create(struct vmw_winsys_screen *vws,
+ boolean vgpu10);
+
void
vmw_ioctl_context_destroy(struct vmw_winsys_screen *vws,
uint32 cid);
unsigned usage,
SVGA3dSize size,
uint32 numFaces,
- uint32 numMipLevels);
+ uint32 numMipLevels,
+ unsigned sampleCount);
uint32
vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
SVGA3dSurfaceFlags flags,
SVGA3dSize size,
uint32 numFaces,
uint32 numMipLevels,
+ unsigned sampleCount,
uint32 buffer_handle,
struct vmw_region **p_region);
void vmw_ioctl_cleanup(struct vmw_winsys_screen *vws);
void vmw_pools_cleanup(struct vmw_winsys_screen *vws);
-struct vmw_winsys_screen *vmw_winsys_create(int fd, boolean use_old_scanout_flag);
+struct vmw_winsys_screen *vmw_winsys_create(int fd);
void vmw_winsys_destroy(struct vmw_winsys_screen *sws);
void vmw_winsys_screen_set_throttling(struct pipe_screen *screen,
uint32_t throttle_us);
uint32_t emitted,
boolean has_emitted);
+struct svga_winsys_gb_shader *
+vmw_svga_winsys_shader_create(struct svga_winsys_screen *sws,
+ SVGA3dShaderType type,
+ const uint32 *bytecode,
+ uint32 bytecodeLen);
+void
+vmw_svga_winsys_shader_destroy(struct svga_winsys_screen *sws,
+ struct svga_winsys_gb_shader *shader);
+
#endif /* VMW_SCREEN_H_ */
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
&drm_compat, "vmwgfx drm driver"))
return NULL;
- vws = vmw_winsys_create( fd, FALSE );
+ vws = vmw_winsys_create(fd);
if (!vws)
goto out_no_vws;
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
uint32_t size;
};
-/* XXX: This isn't a real hardware flag, but just a hack for kernel to
- * know about primary surfaces. In newer versions of the kernel
- * interface the driver uses a special field.
- */
-#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
-
-
uint32_t
vmw_region_size(struct vmw_region *region)
{
return -1;
vmw_printf("Context id is %d\n", c_arg.cid);
-
return c_arg.cid;
}
+uint32
+vmw_ioctl_extended_context_create(struct vmw_winsys_screen *vws,
+ boolean vgpu10)
+{
+ union drm_vmw_extended_context_arg c_arg;
+ int ret;
+
+ VMW_FUNC;
+ memset(&c_arg, 0, sizeof(c_arg));
+ c_arg.req = (vgpu10 ? drm_vmw_context_vgpu10 : drm_vmw_context_legacy);
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd,
+ DRM_VMW_CREATE_EXTENDED_CONTEXT,
+ &c_arg, sizeof(c_arg));
+
+ if (ret)
+ return -1;
+
+ vmw_printf("Context id is %d\n", c_arg.cid);
+ return c_arg.rep.cid;
+}
+
void
vmw_ioctl_context_destroy(struct vmw_winsys_screen *vws, uint32 cid)
{
SVGA3dSurfaceFormat format,
unsigned usage,
SVGA3dSize size,
- uint32_t numFaces, uint32_t numMipLevels)
+ uint32_t numFaces, uint32_t numMipLevels,
+ unsigned sampleCount)
{
union drm_vmw_surface_create_arg s_arg;
struct drm_vmw_surface_create_req *req = &s_arg.req;
vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
memset(&s_arg, 0, sizeof(s_arg));
- if (vws->use_old_scanout_flag &&
- (flags & SVGA3D_SURFACE_HINT_SCANOUT)) {
- req->flags = (uint32_t) flags;
- req->scanout = false;
- } else if (flags & SVGA3D_SURFACE_HINT_SCANOUT) {
- req->flags = (uint32_t) (flags & ~SVGA3D_SURFACE_HINT_SCANOUT);
- req->scanout = true;
- } else {
- req->flags = (uint32_t) flags;
- req->scanout = false;
- }
+ req->flags = (uint32_t) flags;
+ req->scanout = !!(usage & SVGA_SURFACE_USAGE_SCANOUT);
req->format = (uint32_t) format;
req->shareable = !!(usage & SVGA_SURFACE_USAGE_SHARED);
SVGA3dSize size,
uint32_t numFaces,
uint32_t numMipLevels,
+ unsigned sampleCount,
uint32_t buffer_handle,
struct vmw_region **p_region)
{
}
memset(&s_arg, 0, sizeof(s_arg));
- if (flags & SVGA3D_SURFACE_HINT_SCANOUT) {
- req->svga3d_flags = (uint32_t) (flags & ~SVGA3D_SURFACE_HINT_SCANOUT);
- req->drm_surface_flags = drm_vmw_surface_flag_scanout;
- } else {
- req->svga3d_flags = (uint32_t) flags;
- }
+ req->svga3d_flags = (uint32_t) flags;
+ if (usage & SVGA_SURFACE_USAGE_SCANOUT)
+ req->drm_surface_flags |= drm_vmw_surface_flag_scanout;
req->format = (uint32_t) format;
if (usage & SVGA_SURFACE_USAGE_SHARED)
req->drm_surface_flags |= drm_vmw_surface_flag_shareable;
req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
-
- assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
- DRM_VMW_MAX_MIP_LEVELS);
req->base_size.width = size.width;
req->base_size.height = size.height;
req->base_size.depth = size.depth;
req->mip_levels = numMipLevels;
req->multisample_count = 0;
req->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+
+ if (vws->base.have_vgpu10) {
+ req->array_size = numFaces;
+ req->multisample_count = sampleCount;
+ } else {
+ assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
+ DRM_VMW_MAX_MIP_LEVELS);
+ req->array_size = 0;
+ }
+
if (buffer_handle)
req->buffer_handle = buffer_handle;
else
struct drm_vmw_execbuf_arg arg;
struct drm_vmw_fence_rep rep;
int ret;
+ int argsize;
#ifdef DEBUG
{
arg.commands = (unsigned long)commands;
arg.command_size = size;
arg.throttle_us = throttle_us;
- arg.version = DRM_VMW_EXECBUF_VERSION;
-
+ arg.version = vws->ioctl.drm_execbuf_version;
+ arg.context_handle = (vws->base.have_vgpu10 ? cid : SVGA3D_INVALID_ID);
+
+ /* In DRM_VMW_EXECBUF_VERSION 1, the drm_vmw_execbuf_arg structure ends with
+ * the flags field. The structure size sent to drmCommandWrite must match
+ * the drm_execbuf_version. Otherwise, an invalid value will be returned.
+ */
+ argsize = vws->ioctl.drm_execbuf_version > 1 ? sizeof(arg) :
+ offsetof(struct drm_vmw_execbuf_arg, context_handle);
do {
- ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, sizeof(arg));
+ ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, argsize);
} while(ret == -ERESTART);
if (ret) {
vmw_error("%s error %s.\n", __FUNCTION__, strerror(-ret));
+ abort();
}
if (rep.error) {
int ret;
uint32_t *cap_buffer;
drmVersionPtr version;
+ boolean drm_gb_capable;
boolean have_drm_2_5;
VMW_FUNC;
(version->version_major == 2 && version->version_minor > 4);
vws->ioctl.have_drm_2_6 = version->version_major > 2 ||
(version->version_major == 2 && version->version_minor > 5);
+ vws->ioctl.have_drm_2_9 = version->version_major > 2 ||
+ (version->version_major == 2 && version->version_minor > 8);
+
+ vws->ioctl.drm_execbuf_version = vws->ioctl.have_drm_2_9 ? 2 : 1;
+
+ drm_gb_capable = have_drm_2_5;
memset(&gp_arg, 0, sizeof(gp_arg));
gp_arg.param = DRM_VMW_PARAM_3D;
vws->base.have_gb_objects =
!!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS);
- if (vws->base.have_gb_objects && !have_drm_2_5)
+ if (vws->base.have_gb_objects && !drm_gb_capable)
goto out_no_3d;
+ vws->base.have_vgpu10 = FALSE;
if (vws->base.have_gb_objects) {
memset(&gp_arg, 0, sizeof(gp_arg));
gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE;
/* Never early flush surfaces, mobs do accounting. */
vws->ioctl.max_surface_memory = -1;
+
+ if (vws->ioctl.have_drm_2_9) {
+
+ memset(&gp_arg, 0, sizeof(gp_arg));
+ gp_arg.param = DRM_VMW_PARAM_VGPU10;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
+ &gp_arg, sizeof(gp_arg));
+ if (ret == 0 && gp_arg.value != 0) {
+ const char *vgpu10_val;
+
+ debug_printf("Have VGPU10 interface and hardware.\n");
+ vws->base.have_vgpu10 = TRUE;
+ vgpu10_val = getenv("SVGA_VGPU10");
+ if (vgpu10_val && strcmp(vgpu10_val, "0") == 0) {
+ debug_printf("Disabling VGPU10 interface.\n");
+ vws->base.have_vgpu10 = FALSE;
+ } else {
+ debug_printf("Enabling VGPU10 interface.\n");
+ }
+ }
+ }
} else {
vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
}
+ debug_printf("VGPU10 interface is %s.\n",
+ vws->base.have_vgpu10 ? "on" : "off");
+
cap_buffer = calloc(1, size);
if (!cap_buffer) {
debug_printf("Failed alloc fifo 3D caps buffer.\n");
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
#include "vmw_buffer.h"
#include "vmw_fence.h"
#include "vmw_shader.h"
+#include "vmw_query.h"
#include "svga3d_surfacedefs.h"
/**
SVGA3dSurfaceFormat format,
unsigned usage,
SVGA3dSize size,
- uint32 numFaces,
- uint32 numMipLevels)
+ uint32 numLayers,
+ uint32 numMipLevels,
+ unsigned sampleCount)
{
struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
struct vmw_svga_winsys_surface *surface;
struct pb_manager *provider;
uint32_t buffer_size;
-
memset(&desc, 0, sizeof(desc));
surface = CALLOC_STRUCT(vmw_svga_winsys_surface);
if(!surface)
* Used for the backing buffer GB surfaces, and to approximate
* when to flush on non-GB hosts.
*/
- buffer_size = svga3dsurface_get_serialized_size(format, size, numMipLevels, (numFaces == 6));
+ buffer_size = svga3dsurface_get_serialized_size(format, size, numMipLevels,
+ numLayers);
+ if (flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
+ buffer_size += sizeof(SVGA3dDXSOState);
+
if (buffer_size > vws->ioctl.max_texture_size) {
goto no_sid;
}
}
surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
- size, numFaces,
- numMipLevels, ptr.gmrId,
+ size, numLayers,
+ numMipLevels, sampleCount,
+ ptr.gmrId,
surface->buf ? NULL :
&desc.region);
vmw_svga_winsys_buffer_destroy(sws, surface->buf);
surface->buf = NULL;
surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
- size, numFaces,
- numMipLevels, 0,
- &desc.region);
+ size, numLayers,
+ numMipLevels, sampleCount,
+ 0, &desc.region);
if (surface->sid == SVGA3D_INVALID_ID)
goto no_sid;
}
}
} else {
surface->sid = vmw_ioctl_surface_create(vws, flags, format, usage,
- size, numFaces, numMipLevels);
+ size, numLayers, numMipLevels,
+ sampleCount);
if(surface->sid == SVGA3D_INVALID_ID)
goto no_sid;
vmw_svga_winsys_surface_can_create(struct svga_winsys_screen *sws,
SVGA3dSurfaceFormat format,
SVGA3dSize size,
- uint32 numFaces,
+ uint32 numLayers,
uint32 numMipLevels)
{
struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
buffer_size = svga3dsurface_get_serialized_size(format, size,
numMipLevels,
- (numFaces == 6));
+ numLayers);
if (buffer_size > vws->ioctl.max_texture_size) {
return FALSE;
}
{
struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
- if (index > vws->ioctl.num_cap_3d || !vws->ioctl.cap_3d[index].has_cap)
+ if (index > vws->ioctl.num_cap_3d ||
+ index >= SVGA3D_DEVCAP_MAX ||
+ !vws->ioctl.cap_3d[index].has_cap)
return FALSE;
*result = vws->ioctl.cap_3d[index].result;
return TRUE;
}
-static struct svga_winsys_gb_shader *
+struct svga_winsys_gb_shader *
vmw_svga_winsys_shader_create(struct svga_winsys_screen *sws,
SVGA3dShaderType type,
const uint32 *bytecode,
memcpy(code, bytecode, bytecodeLen);
vmw_svga_winsys_buffer_unmap(sws, shader->buf);
- shader->shid = vmw_ioctl_shader_create(vws, type, bytecodeLen);
- if(shader->shid == SVGA3D_INVALID_ID)
- goto out_no_shid;
+ if (!sws->have_vgpu10) {
+ shader->shid = vmw_ioctl_shader_create(vws, type, bytecodeLen);
+ if (shader->shid == SVGA3D_INVALID_ID)
+ goto out_no_shid;
+ }
return svga_winsys_shader(shader);
return NULL;
}
-static void
+void
vmw_svga_winsys_shader_destroy(struct svga_winsys_screen *sws,
struct svga_winsys_gb_shader *shader)
{
vws->base.shader_destroy = vmw_svga_winsys_shader_destroy;
vws->base.fence_finish = vmw_svga_winsys_fence_finish;
+ vws->base.query_create = vmw_svga_winsys_query_create;
+ vws->base.query_init = vmw_svga_winsys_query_init;
+ vws->base.query_destroy = vmw_svga_winsys_query_destroy;
+ vws->base.query_get_result = vmw_svga_winsys_query_get_result;
+
return TRUE;
}
/**********************************************************
- * Copyright 2009-2012 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
if (pipe_reference(dst_ref, src_ref)) {
struct svga_winsys_screen *sws = &dst->screen->base;
- vmw_ioctl_shader_destroy(dst->screen, dst->shid);
+ if (!sws->have_vgpu10)
+ vmw_ioctl_shader_destroy(dst->screen, dst->shid);
#ifdef DEBUG
/* to detect dangling pointers */
assert(p_atomic_read(&dst->validated) == 0);
/**********************************************************
- * Copyright 2009-2012 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
/**********************************************************
- * Copyright 2009 VMware, Inc. All rights reserved.
+ * Copyright 2009-2015 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
#define DRM_VMW_GB_SURFACE_CREATE 23
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
+#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
/*************************************************************************/
/**
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
+#define DRM_VMW_PARAM_SCREEN_TARGET 11
+#define DRM_VMW_PARAM_VGPU10 12
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
* Argument to the DRM_VMW_EXECBUF Ioctl.
*/
-#define DRM_VMW_EXECBUF_VERSION 1
+#define DRM_VMW_EXECBUF_VERSION 2
struct drm_vmw_execbuf_arg {
uint64_t commands;
uint64_t fence_rep;
uint32_t version;
uint32_t flags;
+ uint32_t context_handle;
+ uint32_t pad64;
};
/**
enum drm_vmw_shader_type {
drm_vmw_shader_type_vs = 0,
drm_vmw_shader_type_ps,
- drm_vmw_shader_type_gs
};
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
* if none.
* @base_size Size of the base mip level for all faces.
+ * @array_size Must be zero for non-vgpu10 hardware, and if non-zero
+ * svga3d_flags must have proper bind flags setup.
*
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
uint32_t multisample_count;
uint32_t autogen_filter;
uint32_t buffer_handle;
- uint32_t pad64;
+ uint32_t array_size;
struct drm_vmw_size base_size;
};
uint32_t pad64;
};
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+enum drm_vmw_extended_context {
+ drm_vmw_context_legacy,
+ drm_vmw_context_vgpu10
+};
+
+/**
+ * union drm_vmw_extended_context_arg
+ *
+ * @req: Context type.
+ * @rep: Context identifier.
+ *
+ * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
+ */
+union drm_vmw_extended_context_arg {
+ enum drm_vmw_extended_context req;
+ struct drm_vmw_context_arg rep;
+};
#endif