#define VMW_MAX_DEFAULT_TEXTURE_SIZE (128 * 1024 * 1024)
#define VMW_FENCE_TIMEOUT_SECONDS 60
+#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
+#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
+#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
+ (svga3d_flags & ((uint64_t)UINT32_MAX))
+
struct vmw_region
{
uint32_t handle;
uint32
vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
- SVGA3dSurface1Flags flags,
- SVGA3dSurfaceFormat format,
+ SVGA3dSurfaceAllFlags flags,
+ SVGA3dSurfaceFormat format,
unsigned usage,
- SVGA3dSize size,
- uint32_t numFaces,
- uint32_t numMipLevels,
+ SVGA3dSize size,
+ uint32_t numFaces,
+ uint32_t numMipLevels,
unsigned sampleCount,
uint32_t buffer_handle,
- struct vmw_region **p_region)
+ SVGA3dMSPattern multisamplePattern,
+ struct vmw_region **p_region)
{
- union drm_vmw_gb_surface_create_arg s_arg;
- struct drm_vmw_gb_surface_create_req *req = &s_arg.req;
- struct drm_vmw_gb_surface_create_rep *rep = &s_arg.rep;
+ struct drm_vmw_gb_surface_create_rep *rep;
struct vmw_region *region = NULL;
int ret;
return SVGA3D_INVALID_ID;
}
- memset(&s_arg, 0, sizeof(s_arg));
- req->svga3d_flags = (uint32_t) flags;
- if (usage & SVGA_SURFACE_USAGE_SCANOUT)
- req->drm_surface_flags |= drm_vmw_surface_flag_scanout;
- req->format = (uint32_t) format;
- if (usage & SVGA_SURFACE_USAGE_SHARED)
- req->drm_surface_flags |= drm_vmw_surface_flag_shareable;
- req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
- req->base_size.width = size.width;
- req->base_size.height = size.height;
- req->base_size.depth = size.depth;
- req->mip_levels = numMipLevels;
- req->multisample_count = 0;
- req->autogen_filter = SVGA3D_TEX_FILTER_NONE;
-
- if (vws->base.have_vgpu10) {
- req->array_size = numFaces;
- req->multisample_count = sampleCount;
+ if (vws->ioctl.have_drm_2_15) {
+ union drm_vmw_gb_surface_create_ext_arg s_arg;
+ struct drm_vmw_gb_surface_create_ext_req *req = &s_arg.req;
+ rep = &s_arg.rep;
+
+ memset(&s_arg, 0, sizeof(s_arg));
+
+ req->version = drm_vmw_gb_surface_v1;
+ req->multisample_pattern = multisamplePattern;
+ req->must_be_zero = 0;
+ req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags);
+ req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags);
+ req->base.format = (uint32_t) format;
+
+ if (usage & SVGA_SURFACE_USAGE_SCANOUT)
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_scanout;
+
+ if (usage & SVGA_SURFACE_USAGE_SHARED)
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
+
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
+ req->base.base_size.width = size.width;
+ req->base.base_size.height = size.height;
+ req->base.base_size.depth = size.depth;
+ req->base.mip_levels = numMipLevels;
+ req->base.multisample_count = 0;
+ req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE;
+
+ if (vws->base.have_vgpu10) {
+ req->base.array_size = numFaces;
+ req->base.multisample_count = sampleCount;
+ } else {
+ assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
+ DRM_VMW_MAX_MIP_LEVELS);
+ req->base.array_size = 0;
+ }
+
+ req->base.buffer_handle = buffer_handle ?
+ buffer_handle : SVGA3D_INVALID_ID;
+
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd,
+ DRM_VMW_GB_SURFACE_CREATE_EXT, &s_arg,
+ sizeof(s_arg));
+
+ if (ret)
+ goto out_fail_create;
} else {
- assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
- DRM_VMW_MAX_MIP_LEVELS);
- req->array_size = 0;
- }
+ union drm_vmw_gb_surface_create_arg s_arg;
+ struct drm_vmw_gb_surface_create_req *req = &s_arg.req;
+ rep = &s_arg.rep;
+
+ memset(&s_arg, 0, sizeof(s_arg));
+ req->svga3d_flags = (uint32_t) flags;
+ req->format = (uint32_t) format;
+
+ if (usage & SVGA_SURFACE_USAGE_SCANOUT)
+ req->drm_surface_flags |= drm_vmw_surface_flag_scanout;
+
+ if (usage & SVGA_SURFACE_USAGE_SHARED)
+ req->drm_surface_flags |= drm_vmw_surface_flag_shareable;
+
+ req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
+ req->base_size.width = size.width;
+ req->base_size.height = size.height;
+ req->base_size.depth = size.depth;
+ req->mip_levels = numMipLevels;
+ req->multisample_count = 0;
+ req->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+
+ if (vws->base.have_vgpu10) {
+ req->array_size = numFaces;
+ req->multisample_count = sampleCount;
+ } else {
+ assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
+ DRM_VMW_MAX_MIP_LEVELS);
+ req->array_size = 0;
+ }
- if (buffer_handle)
- req->buffer_handle = buffer_handle;
- else
- req->buffer_handle = SVGA3D_INVALID_ID;
+ req->buffer_handle = buffer_handle ?
+ buffer_handle : SVGA3D_INVALID_ID;
- ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_CREATE,
- &s_arg, sizeof(s_arg));
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_CREATE,
+ &s_arg, sizeof(s_arg));
- if (ret)
- goto out_fail_create;
+ if (ret)
+ goto out_fail_create;
+ }
if (p_region) {
region->handle = rep->buffer_handle;
int
vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen *vws,
const struct winsys_handle *whandle,
- SVGA3dSurface1Flags *flags,
+ SVGA3dSurfaceAllFlags *flags,
SVGA3dSurfaceFormat *format,
uint32_t *numMipLevels,
uint32_t *handle,
struct vmw_region **p_region)
{
- union drm_vmw_gb_surface_reference_arg s_arg;
- struct drm_vmw_surface_arg *req = &s_arg.req;
- struct drm_vmw_gb_surface_ref_rep *rep = &s_arg.rep;
struct vmw_region *region = NULL;
boolean needs_unref = FALSE;
int ret;
- vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
-
assert(p_region != NULL);
region = CALLOC_STRUCT(vmw_region);
if (!region)
return -ENOMEM;
- memset(&s_arg, 0, sizeof(s_arg));
- ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
- if (ret)
- goto out_fail_req;
+ if (vws->ioctl.have_drm_2_15) {
+ union drm_vmw_gb_surface_reference_ext_arg s_arg;
+ struct drm_vmw_surface_arg *req = &s_arg.req;
+ struct drm_vmw_gb_surface_ref_ext_rep *rep = &s_arg.rep;
- *handle = req->sid;
- ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF,
- &s_arg, sizeof(s_arg));
+ memset(&s_arg, 0, sizeof(s_arg));
+ ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
+ if (ret)
+ goto out_fail_req;
- if (ret)
- goto out_fail_ref;
+ *handle = req->sid;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF_EXT,
+ &s_arg, sizeof(s_arg));
- region->handle = rep->crep.buffer_handle;
- region->map_handle = rep->crep.buffer_map_handle;
- region->drm_fd = vws->ioctl.drm_fd;
- region->size = rep->crep.backup_size;
- *p_region = region;
+ if (ret)
+ goto out_fail_ref;
+
+ region->handle = rep->crep.buffer_handle;
+ region->map_handle = rep->crep.buffer_map_handle;
+ region->drm_fd = vws->ioctl.drm_fd;
+ region->size = rep->crep.backup_size;
+ *p_region = region;
+
+ *handle = rep->crep.handle;
+ *flags = SVGA3D_FLAGS_64(rep->creq.svga3d_flags_upper_32_bits,
+ rep->creq.base.svga3d_flags);
+ *format = rep->creq.base.format;
+ *numMipLevels = rep->creq.base.mip_levels;
+ } else {
+ union drm_vmw_gb_surface_reference_arg s_arg;
+ struct drm_vmw_surface_arg *req = &s_arg.req;
+ struct drm_vmw_gb_surface_ref_rep *rep = &s_arg.rep;
+
+ memset(&s_arg, 0, sizeof(s_arg));
+ ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
+ if (ret)
+ goto out_fail_req;
+
+ *handle = req->sid;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF,
+ &s_arg, sizeof(s_arg));
+
+ if (ret)
+ goto out_fail_ref;
+
+ region->handle = rep->crep.buffer_handle;
+ region->map_handle = rep->crep.buffer_map_handle;
+ region->drm_fd = vws->ioctl.drm_fd;
+ region->size = rep->crep.backup_size;
+ *p_region = region;
+
+ *handle = rep->crep.handle;
+ *flags = rep->creq.svga3d_flags;
+ *format = rep->creq.format;
+ *numMipLevels = rep->creq.mip_levels;
+ }
- *handle = rep->crep.handle;
- *flags = rep->creq.svga3d_flags;
- *format = rep->creq.format;
- *numMipLevels = rep->creq.mip_levels;
+ vmw_printf("%s flags %d format %d\n", __FUNCTION__, *flags, *format);
if (needs_unref)
vmw_ioctl_surface_destroy(vws, *handle);
(version->version_major == 2 && version->version_minor > 5);
vws->ioctl.have_drm_2_9 = version->version_major > 2 ||
(version->version_major == 2 && version->version_minor > 8);
+ vws->ioctl.have_drm_2_15 = version->version_major > 2 ||
+ (version->version_major == 2 && version->version_minor > 14);
vws->ioctl.drm_execbuf_version = vws->ioctl.have_drm_2_9 ? 2 : 1;
vws->base.have_intra_surface_copy = FALSE;
vws->base.have_vgpu10 = FALSE;
- if (vws->base.have_gb_objects) {
- memset(&gp_arg, 0, sizeof(gp_arg));
- gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE;
- ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
- &gp_arg, sizeof(gp_arg));
- if (ret)
- size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
- else
- size = gp_arg.value;
-
- if (vws->base.have_gb_objects)
- vws->ioctl.num_cap_3d = size / sizeof(uint32_t);
- else
- vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
-
+ vws->base.have_sm4_1 = FALSE;
+ if (vws->base.have_gb_objects) {
memset(&gp_arg, 0, sizeof(gp_arg));
gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY;
ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
vws->ioctl.max_surface_memory = -1;
if (vws->ioctl.have_drm_2_9) {
-
memset(&gp_arg, 0, sizeof(gp_arg));
gp_arg.param = DRM_VMW_PARAM_VGPU10;
ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
}
}
}
+
+ if (vws->ioctl.have_drm_2_15) {
+ memset(&gp_arg, 0, sizeof(gp_arg));
+ gp_arg.param = DRM_VMW_PARAM_SM4_1;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
+ &gp_arg, sizeof(gp_arg));
+ if (ret == 0 && gp_arg.value != 0) {
+ vws->base.have_sm4_1 = TRUE;
+ }
+ }
+
+ memset(&gp_arg, 0, sizeof(gp_arg));
+ gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE;
+ ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
+ &gp_arg, sizeof(gp_arg));
+ if (ret)
+ size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
+ else
+ size = gp_arg.value;
+
+ if (vws->base.have_gb_objects)
+ vws->ioctl.num_cap_3d = size / sizeof(uint32_t);
+ else
+ vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
} else {
vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
debug_printf("Failed alloc fifo 3D caps buffer.\n");
goto out_no_caparray;
}
-
+
memset(&cap_arg, 0, sizeof(cap_arg));
cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer);
cap_arg.max_size = size;
+ /*
+ * This call must always be after DRM_VMW_PARAM_MAX_MOB_MEMORY and
+ * DRM_VMW_PARAM_SM4_1. This is because, based on these calls, kernel
+ * driver sends the supported cap.
+ */
ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP,
&cap_arg, sizeof(cap_arg));
static struct svga_winsys_surface *
vmw_svga_winsys_surface_create(struct svga_winsys_screen *sws,
- SVGA3dSurfaceAllFlags allflags,
+ SVGA3dSurfaceAllFlags flags,
SVGA3dSurfaceFormat format,
unsigned usage,
SVGA3dSize size,
struct vmw_buffer_desc desc;
struct pb_manager *provider;
uint32_t buffer_size;
-
- /* Until the kernel supports 64 bits surface flag, the linux driver
- * only honors the lower 32 bits of the surface flag.
- */
- SVGA3dSurface1Flags flags = (SVGA3dSurface1Flags)allflags;
+ uint32_t num_samples = 1;
+ SVGA3dMSPattern multisample_pattern = SVGA3D_MS_PATTERN_NONE;
memset(&desc, 0, sizeof(desc));
surface = CALLOC_STRUCT(vmw_svga_winsys_surface);
surface->shared = !!(usage & SVGA_SURFACE_USAGE_SHARED);
provider = (surface->shared) ? vws->pools.gmr : vws->pools.mob_fenced;
+ /*
+ * When multisampling is not supported sample count received is 0,
+ * otherwise should have a valid sample count.
+ */
+ if ((flags & SVGA3D_SURFACE_MULTISAMPLE) != 0) {
+ if (sampleCount == 0)
+ goto no_sid;
+ num_samples = sampleCount;
+ multisample_pattern = SVGA3D_MS_PATTERN_STANDARD;
+ }
+
/*
* Used for the backing buffer GB surfaces, and to approximate
* when to flush on non-GB hosts.
*/
- buffer_size = svga3dsurface_get_serialized_size(format, size, numMipLevels,
- numLayers);
+ buffer_size = svga3dsurface_get_serialized_size_extended(format, size,
+ numMipLevels,
+ numLayers,
+ num_samples);
if (flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
buffer_size += sizeof(SVGA3dDXSOState);
size, numLayers,
numMipLevels, sampleCount,
ptr.gmrId,
+ multisample_pattern,
surface->buf ? NULL :
&desc.region);
surface->sid = vmw_ioctl_gb_surface_create(vws, flags, format, usage,
size, numLayers,
numMipLevels, sampleCount,
- 0, &desc.region);
+ 0, multisample_pattern,
+ &desc.region);
if (surface->sid == SVGA3D_INVALID_ID)
goto no_sid;
}
}
}
} else {
- surface->sid = vmw_ioctl_surface_create(vws, flags, format, usage,
- size, numLayers, numMipLevels,
- sampleCount);
+ /* Legacy surface only support 32-bit svga3d flags */
+ surface->sid = vmw_ioctl_surface_create(vws, (SVGA3dSurface1Flags)flags,
+ format, usage, size, numLayers,
+ numMipLevels, sampleCount);
if(surface->sid == SVGA3D_INVALID_ID)
goto no_sid;
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
+#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
+#define DRM_VMW_GB_SURFACE_REF_EXT 28
/*************************************************************************/
/**
*
* DRM_VMW_PARAM_OVERLAY_IOCTL:
* Does the driver support the overlay ioctl.
+ *
+ * DRM_VMW_PARAM_SM4_1
+ * SM4_1 support is enabled.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
#define DRM_VMW_PARAM_SCREEN_TARGET 11
#define DRM_VMW_PARAM_VGPU10 12
#define DRM_VMW_PARAM_HW_CAPS2 13
+#define DRM_VMW_PARAM_SM4_1 14
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
enum drm_vmw_extended_context req;
struct drm_vmw_context_arg rep;
};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
+ *
+ * Allocates a surface handle and queues a create surface command
+ * for the host on the first use of the surface. The surface ID can
+ * be used as the surface ID in commands referencing the surface.
+ *
+ * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
+ * parameter and 64 bit svga flag.
+ */
+
+/**
+ * enum drm_vmw_surface_version
+ *
+ * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
+ * svga3d surface flags split into 2, upper half and lower half.
+ */
+enum drm_vmw_surface_version {
+ drm_vmw_gb_surface_v1
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_ext_req
+ *
+ * @base: Surface create parameters.
+ * @version: Version of surface create ioctl.
+ * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
+ * @multisample_pattern: Multisampling pattern when msaa is supported.
+ * @must_be_zero: Reserved for future usage.
+ *
+ * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
+ */
+struct drm_vmw_gb_surface_create_ext_req {
+ struct drm_vmw_gb_surface_create_req base;
+ enum drm_vmw_surface_version version;
+ uint32_t svga3d_flags_upper_32_bits;
+ SVGA3dMSPattern multisample_pattern;
+ uint32_t pad64;
+ uint64_t must_be_zero;
+};
+
+/**
+ * union drm_vmw_gb_surface_create_ext_arg
+ *
+ * @req: Input argument as described above.
+ * @rep: Output argument as described above.
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ */
+union drm_vmw_gb_surface_create_ext_arg {
+ struct drm_vmw_gb_surface_create_rep rep;
+ struct drm_vmw_gb_surface_create_ext_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a given handle, as previously
+ * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface handle in
+ * the command stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ */
+
+/**
+ * struct drm_vmw_gb_surface_ref_ext_rep
+ *
+ * @creq: The data used as input when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_ext_req"
+ * @crep: Additional data output when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_rep"
+ *
+ * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
+ */
+struct drm_vmw_gb_surface_ref_ext_rep {
+ struct drm_vmw_gb_surface_create_ext_req creq;
+ struct drm_vmw_gb_surface_create_rep crep;
+};
+
+/**
+ * union drm_vmw_gb_surface_reference_ext_arg
+ *
+ * @req: Input data as described above at "struct drm_vmw_surface_arg"
+ * @rep: Output data as described above at
+ * "struct drm_vmw_gb_surface_ref_ext_rep"
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+union drm_vmw_gb_surface_reference_ext_arg {
+ struct drm_vmw_gb_surface_ref_ext_rep rep;
+ struct drm_vmw_surface_arg req;
+};
+
#endif