-/**************************************************************************
- *
+/*
* Copyright 2003 VMware, Inc.
* All Rights Reserved.
*
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
+ * distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
+ */
-#include "main/glheader.h"
+#include "main/arrayobj.h"
#include "main/bufferobj.h"
#include "main/context.h"
#include "main/enums.h"
#include "main/macros.h"
#include "main/glformats.h"
+#include "nir.h"
#include "brw_draw.h"
#include "brw_defines.h"
#include "intel_batchbuffer.h"
#include "intel_buffer_objects.h"
-static const GLuint double_types[5] = {
+static const GLuint double_types_float[5] = {
0,
- BRW_SURFACEFORMAT_R64_FLOAT,
- BRW_SURFACEFORMAT_R64G64_FLOAT,
- BRW_SURFACEFORMAT_R64G64B64_FLOAT,
- BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
+ ISL_FORMAT_R64_FLOAT,
+ ISL_FORMAT_R64G64_FLOAT,
+ ISL_FORMAT_R64G64B64_FLOAT,
+ ISL_FORMAT_R64G64B64A64_FLOAT
+};
+
+static const GLuint double_types_passthru[5] = {
+ 0,
+ ISL_FORMAT_R64_PASSTHRU,
+ ISL_FORMAT_R64G64_PASSTHRU,
+ ISL_FORMAT_R64G64B64_PASSTHRU,
+ ISL_FORMAT_R64G64B64A64_PASSTHRU
};
static const GLuint float_types[5] = {
0,
- BRW_SURFACEFORMAT_R32_FLOAT,
- BRW_SURFACEFORMAT_R32G32_FLOAT,
- BRW_SURFACEFORMAT_R32G32B32_FLOAT,
- BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
+ ISL_FORMAT_R32_FLOAT,
+ ISL_FORMAT_R32G32_FLOAT,
+ ISL_FORMAT_R32G32B32_FLOAT,
+ ISL_FORMAT_R32G32B32A32_FLOAT
};
static const GLuint half_float_types[5] = {
0,
- BRW_SURFACEFORMAT_R16_FLOAT,
- BRW_SURFACEFORMAT_R16G16_FLOAT,
- BRW_SURFACEFORMAT_R16G16B16A16_FLOAT,
- BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
+ ISL_FORMAT_R16_FLOAT,
+ ISL_FORMAT_R16G16_FLOAT,
+ ISL_FORMAT_R16G16B16_FLOAT,
+ ISL_FORMAT_R16G16B16A16_FLOAT
};
static const GLuint fixed_point_types[5] = {
0,
- BRW_SURFACEFORMAT_R32_SFIXED,
- BRW_SURFACEFORMAT_R32G32_SFIXED,
- BRW_SURFACEFORMAT_R32G32B32_SFIXED,
- BRW_SURFACEFORMAT_R32G32B32A32_SFIXED,
+ ISL_FORMAT_R32_SFIXED,
+ ISL_FORMAT_R32G32_SFIXED,
+ ISL_FORMAT_R32G32B32_SFIXED,
+ ISL_FORMAT_R32G32B32A32_SFIXED,
};
static const GLuint uint_types_direct[5] = {
0,
- BRW_SURFACEFORMAT_R32_UINT,
- BRW_SURFACEFORMAT_R32G32_UINT,
- BRW_SURFACEFORMAT_R32G32B32_UINT,
- BRW_SURFACEFORMAT_R32G32B32A32_UINT
+ ISL_FORMAT_R32_UINT,
+ ISL_FORMAT_R32G32_UINT,
+ ISL_FORMAT_R32G32B32_UINT,
+ ISL_FORMAT_R32G32B32A32_UINT
};
static const GLuint uint_types_norm[5] = {
0,
- BRW_SURFACEFORMAT_R32_UNORM,
- BRW_SURFACEFORMAT_R32G32_UNORM,
- BRW_SURFACEFORMAT_R32G32B32_UNORM,
- BRW_SURFACEFORMAT_R32G32B32A32_UNORM
+ ISL_FORMAT_R32_UNORM,
+ ISL_FORMAT_R32G32_UNORM,
+ ISL_FORMAT_R32G32B32_UNORM,
+ ISL_FORMAT_R32G32B32A32_UNORM
};
static const GLuint uint_types_scale[5] = {
0,
- BRW_SURFACEFORMAT_R32_USCALED,
- BRW_SURFACEFORMAT_R32G32_USCALED,
- BRW_SURFACEFORMAT_R32G32B32_USCALED,
- BRW_SURFACEFORMAT_R32G32B32A32_USCALED
+ ISL_FORMAT_R32_USCALED,
+ ISL_FORMAT_R32G32_USCALED,
+ ISL_FORMAT_R32G32B32_USCALED,
+ ISL_FORMAT_R32G32B32A32_USCALED
};
static const GLuint int_types_direct[5] = {
0,
- BRW_SURFACEFORMAT_R32_SINT,
- BRW_SURFACEFORMAT_R32G32_SINT,
- BRW_SURFACEFORMAT_R32G32B32_SINT,
- BRW_SURFACEFORMAT_R32G32B32A32_SINT
+ ISL_FORMAT_R32_SINT,
+ ISL_FORMAT_R32G32_SINT,
+ ISL_FORMAT_R32G32B32_SINT,
+ ISL_FORMAT_R32G32B32A32_SINT
};
static const GLuint int_types_norm[5] = {
0,
- BRW_SURFACEFORMAT_R32_SNORM,
- BRW_SURFACEFORMAT_R32G32_SNORM,
- BRW_SURFACEFORMAT_R32G32B32_SNORM,
- BRW_SURFACEFORMAT_R32G32B32A32_SNORM
+ ISL_FORMAT_R32_SNORM,
+ ISL_FORMAT_R32G32_SNORM,
+ ISL_FORMAT_R32G32B32_SNORM,
+ ISL_FORMAT_R32G32B32A32_SNORM
};
static const GLuint int_types_scale[5] = {
0,
- BRW_SURFACEFORMAT_R32_SSCALED,
- BRW_SURFACEFORMAT_R32G32_SSCALED,
- BRW_SURFACEFORMAT_R32G32B32_SSCALED,
- BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
+ ISL_FORMAT_R32_SSCALED,
+ ISL_FORMAT_R32G32_SSCALED,
+ ISL_FORMAT_R32G32B32_SSCALED,
+ ISL_FORMAT_R32G32B32A32_SSCALED
};
static const GLuint ushort_types_direct[5] = {
0,
- BRW_SURFACEFORMAT_R16_UINT,
- BRW_SURFACEFORMAT_R16G16_UINT,
- BRW_SURFACEFORMAT_R16G16B16A16_UINT,
- BRW_SURFACEFORMAT_R16G16B16A16_UINT
+ ISL_FORMAT_R16_UINT,
+ ISL_FORMAT_R16G16_UINT,
+ ISL_FORMAT_R16G16B16_UINT,
+ ISL_FORMAT_R16G16B16A16_UINT
};
static const GLuint ushort_types_norm[5] = {
0,
- BRW_SURFACEFORMAT_R16_UNORM,
- BRW_SURFACEFORMAT_R16G16_UNORM,
- BRW_SURFACEFORMAT_R16G16B16_UNORM,
- BRW_SURFACEFORMAT_R16G16B16A16_UNORM
+ ISL_FORMAT_R16_UNORM,
+ ISL_FORMAT_R16G16_UNORM,
+ ISL_FORMAT_R16G16B16_UNORM,
+ ISL_FORMAT_R16G16B16A16_UNORM
};
static const GLuint ushort_types_scale[5] = {
0,
- BRW_SURFACEFORMAT_R16_USCALED,
- BRW_SURFACEFORMAT_R16G16_USCALED,
- BRW_SURFACEFORMAT_R16G16B16_USCALED,
- BRW_SURFACEFORMAT_R16G16B16A16_USCALED
+ ISL_FORMAT_R16_USCALED,
+ ISL_FORMAT_R16G16_USCALED,
+ ISL_FORMAT_R16G16B16_USCALED,
+ ISL_FORMAT_R16G16B16A16_USCALED
};
static const GLuint short_types_direct[5] = {
0,
- BRW_SURFACEFORMAT_R16_SINT,
- BRW_SURFACEFORMAT_R16G16_SINT,
- BRW_SURFACEFORMAT_R16G16B16A16_SINT,
- BRW_SURFACEFORMAT_R16G16B16A16_SINT
+ ISL_FORMAT_R16_SINT,
+ ISL_FORMAT_R16G16_SINT,
+ ISL_FORMAT_R16G16B16_SINT,
+ ISL_FORMAT_R16G16B16A16_SINT
};
static const GLuint short_types_norm[5] = {
0,
- BRW_SURFACEFORMAT_R16_SNORM,
- BRW_SURFACEFORMAT_R16G16_SNORM,
- BRW_SURFACEFORMAT_R16G16B16_SNORM,
- BRW_SURFACEFORMAT_R16G16B16A16_SNORM
+ ISL_FORMAT_R16_SNORM,
+ ISL_FORMAT_R16G16_SNORM,
+ ISL_FORMAT_R16G16B16_SNORM,
+ ISL_FORMAT_R16G16B16A16_SNORM
};
static const GLuint short_types_scale[5] = {
0,
- BRW_SURFACEFORMAT_R16_SSCALED,
- BRW_SURFACEFORMAT_R16G16_SSCALED,
- BRW_SURFACEFORMAT_R16G16B16_SSCALED,
- BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
+ ISL_FORMAT_R16_SSCALED,
+ ISL_FORMAT_R16G16_SSCALED,
+ ISL_FORMAT_R16G16B16_SSCALED,
+ ISL_FORMAT_R16G16B16A16_SSCALED
};
static const GLuint ubyte_types_direct[5] = {
0,
- BRW_SURFACEFORMAT_R8_UINT,
- BRW_SURFACEFORMAT_R8G8_UINT,
- BRW_SURFACEFORMAT_R8G8B8A8_UINT,
- BRW_SURFACEFORMAT_R8G8B8A8_UINT
+ ISL_FORMAT_R8_UINT,
+ ISL_FORMAT_R8G8_UINT,
+ ISL_FORMAT_R8G8B8_UINT,
+ ISL_FORMAT_R8G8B8A8_UINT
};
static const GLuint ubyte_types_norm[5] = {
0,
- BRW_SURFACEFORMAT_R8_UNORM,
- BRW_SURFACEFORMAT_R8G8_UNORM,
- BRW_SURFACEFORMAT_R8G8B8_UNORM,
- BRW_SURFACEFORMAT_R8G8B8A8_UNORM
+ ISL_FORMAT_R8_UNORM,
+ ISL_FORMAT_R8G8_UNORM,
+ ISL_FORMAT_R8G8B8_UNORM,
+ ISL_FORMAT_R8G8B8A8_UNORM
};
static const GLuint ubyte_types_scale[5] = {
0,
- BRW_SURFACEFORMAT_R8_USCALED,
- BRW_SURFACEFORMAT_R8G8_USCALED,
- BRW_SURFACEFORMAT_R8G8B8_USCALED,
- BRW_SURFACEFORMAT_R8G8B8A8_USCALED
+ ISL_FORMAT_R8_USCALED,
+ ISL_FORMAT_R8G8_USCALED,
+ ISL_FORMAT_R8G8B8_USCALED,
+ ISL_FORMAT_R8G8B8A8_USCALED
};
static const GLuint byte_types_direct[5] = {
0,
- BRW_SURFACEFORMAT_R8_SINT,
- BRW_SURFACEFORMAT_R8G8_SINT,
- BRW_SURFACEFORMAT_R8G8B8A8_SINT,
- BRW_SURFACEFORMAT_R8G8B8A8_SINT
+ ISL_FORMAT_R8_SINT,
+ ISL_FORMAT_R8G8_SINT,
+ ISL_FORMAT_R8G8B8_SINT,
+ ISL_FORMAT_R8G8B8A8_SINT
};
static const GLuint byte_types_norm[5] = {
0,
- BRW_SURFACEFORMAT_R8_SNORM,
- BRW_SURFACEFORMAT_R8G8_SNORM,
- BRW_SURFACEFORMAT_R8G8B8_SNORM,
- BRW_SURFACEFORMAT_R8G8B8A8_SNORM
+ ISL_FORMAT_R8_SNORM,
+ ISL_FORMAT_R8G8_SNORM,
+ ISL_FORMAT_R8G8B8_SNORM,
+ ISL_FORMAT_R8G8B8A8_SNORM
};
static const GLuint byte_types_scale[5] = {
0,
- BRW_SURFACEFORMAT_R8_SSCALED,
- BRW_SURFACEFORMAT_R8G8_SSCALED,
- BRW_SURFACEFORMAT_R8G8B8_SSCALED,
- BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
+ ISL_FORMAT_R8_SSCALED,
+ ISL_FORMAT_R8G8_SSCALED,
+ ISL_FORMAT_R8G8B8_SSCALED,
+ ISL_FORMAT_R8G8B8A8_SSCALED
};
+static GLuint
+double_types(int size, GLboolean doubles)
+{
+ /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
+ * "When SourceElementFormat is set to one of the *64*_PASSTHRU formats,
+ * 64-bit components are stored in the URB without any conversion."
+ * Also included on BDW PRM, Volume 7, page 470, table "Source Element
+ * Formats Supported in VF Unit"
+ *
+ * Previous PRMs don't include those references, so for gen7 we can't use
+ * PASSTHRU formats directly. But in any case, we prefer to return passthru
+ * even in that case, because that reflects what we want to achieve, even
+ * if we would need to workaround on gen < 8.
+ */
+ return (doubles
+ ? double_types_passthru[size]
+ : double_types_float[size]);
+}
/**
* Given vertex array type/size/format/normalized info, return
*/
unsigned
brw_get_vertex_surface_type(struct brw_context *brw,
- const struct gl_client_array *glarray)
+ const struct gl_vertex_format *glformat)
{
- int size = glarray->Size;
+ int size = glformat->Size;
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
+ const bool is_ivybridge_or_older =
+ devinfo->gen <= 7 && !devinfo->is_baytrail && !devinfo->is_haswell;
if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
fprintf(stderr, "type %s size %d normalized %d\n",
- _mesa_enum_to_string(glarray->Type),
- glarray->Size, glarray->Normalized);
+ _mesa_enum_to_string(glformat->Type),
+ glformat->Size, glformat->Normalized);
- if (glarray->Integer) {
- assert(glarray->Format == GL_RGBA); /* sanity check */
- switch (glarray->Type) {
+ if (glformat->Integer) {
+ assert(glformat->Format == GL_RGBA); /* sanity check */
+ switch (glformat->Type) {
case GL_INT: return int_types_direct[size];
- case GL_SHORT: return short_types_direct[size];
- case GL_BYTE: return byte_types_direct[size];
+ case GL_SHORT:
+ if (is_ivybridge_or_older && size == 3)
+ return short_types_direct[4];
+ else
+ return short_types_direct[size];
+ case GL_BYTE:
+ if (is_ivybridge_or_older && size == 3)
+ return byte_types_direct[4];
+ else
+ return byte_types_direct[size];
case GL_UNSIGNED_INT: return uint_types_direct[size];
- case GL_UNSIGNED_SHORT: return ushort_types_direct[size];
- case GL_UNSIGNED_BYTE: return ubyte_types_direct[size];
+ case GL_UNSIGNED_SHORT:
+ if (is_ivybridge_or_older && size == 3)
+ return ushort_types_direct[4];
+ else
+ return ushort_types_direct[size];
+ case GL_UNSIGNED_BYTE:
+ if (is_ivybridge_or_older && size == 3)
+ return ubyte_types_direct[4];
+ else
+ return ubyte_types_direct[size];
default: unreachable("not reached");
}
- } else if (glarray->Type == GL_UNSIGNED_INT_10F_11F_11F_REV) {
- return BRW_SURFACEFORMAT_R11G11B10_FLOAT;
- } else if (glarray->Normalized) {
- switch (glarray->Type) {
- case GL_DOUBLE: return double_types[size];
+ } else if (glformat->Type == GL_UNSIGNED_INT_10F_11F_11F_REV) {
+ return ISL_FORMAT_R11G11B10_FLOAT;
+ } else if (glformat->Normalized) {
+ switch (glformat->Type) {
+ case GL_DOUBLE: return double_types(size, glformat->Doubles);
case GL_FLOAT: return float_types[size];
- case GL_HALF_FLOAT: return half_float_types[size];
+ case GL_HALF_FLOAT:
+ case GL_HALF_FLOAT_OES:
+ if (devinfo->gen < 6 && size == 3)
+ return half_float_types[4];
+ else
+ return half_float_types[size];
case GL_INT: return int_types_norm[size];
case GL_SHORT: return short_types_norm[size];
case GL_BYTE: return byte_types_norm[size];
case GL_UNSIGNED_INT: return uint_types_norm[size];
case GL_UNSIGNED_SHORT: return ushort_types_norm[size];
case GL_UNSIGNED_BYTE:
- if (glarray->Format == GL_BGRA) {
+ if (glformat->Format == GL_BGRA) {
/* See GL_EXT_vertex_array_bgra */
assert(size == 4);
- return BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
+ return ISL_FORMAT_B8G8R8A8_UNORM;
}
else {
return ubyte_types_norm[size];
}
case GL_FIXED:
- if (brw->gen >= 8 || brw->is_haswell)
+ if (devinfo->gen >= 8 || devinfo->is_haswell)
return fixed_point_types[size];
/* This produces GL_FIXED inputs as values between INT32_MIN and
*/
case GL_INT_2_10_10_10_REV:
assert(size == 4);
- if (brw->gen >= 8 || brw->is_haswell) {
- return glarray->Format == GL_BGRA
- ? BRW_SURFACEFORMAT_B10G10R10A2_SNORM
- : BRW_SURFACEFORMAT_R10G10B10A2_SNORM;
+ if (devinfo->gen >= 8 || devinfo->is_haswell) {
+ return glformat->Format == GL_BGRA
+ ? ISL_FORMAT_B10G10R10A2_SNORM
+ : ISL_FORMAT_R10G10B10A2_SNORM;
}
- return BRW_SURFACEFORMAT_R10G10B10A2_UINT;
+ return ISL_FORMAT_R10G10B10A2_UINT;
case GL_UNSIGNED_INT_2_10_10_10_REV:
assert(size == 4);
- if (brw->gen >= 8 || brw->is_haswell) {
- return glarray->Format == GL_BGRA
- ? BRW_SURFACEFORMAT_B10G10R10A2_UNORM
- : BRW_SURFACEFORMAT_R10G10B10A2_UNORM;
+ if (devinfo->gen >= 8 || devinfo->is_haswell) {
+ return glformat->Format == GL_BGRA
+ ? ISL_FORMAT_B10G10R10A2_UNORM
+ : ISL_FORMAT_R10G10B10A2_UNORM;
}
- return BRW_SURFACEFORMAT_R10G10B10A2_UINT;
+ return ISL_FORMAT_R10G10B10A2_UINT;
default: unreachable("not reached");
}
}
* like to use here, so upload everything as UINT and fix
* it in the shader
*/
- if (glarray->Type == GL_INT_2_10_10_10_REV) {
+ if (glformat->Type == GL_INT_2_10_10_10_REV) {
assert(size == 4);
- if (brw->gen >= 8 || brw->is_haswell) {
- return glarray->Format == GL_BGRA
- ? BRW_SURFACEFORMAT_B10G10R10A2_SSCALED
- : BRW_SURFACEFORMAT_R10G10B10A2_SSCALED;
+ if (devinfo->gen >= 8 || devinfo->is_haswell) {
+ return glformat->Format == GL_BGRA
+ ? ISL_FORMAT_B10G10R10A2_SSCALED
+ : ISL_FORMAT_R10G10B10A2_SSCALED;
}
- return BRW_SURFACEFORMAT_R10G10B10A2_UINT;
- } else if (glarray->Type == GL_UNSIGNED_INT_2_10_10_10_REV) {
+ return ISL_FORMAT_R10G10B10A2_UINT;
+ } else if (glformat->Type == GL_UNSIGNED_INT_2_10_10_10_REV) {
assert(size == 4);
- if (brw->gen >= 8 || brw->is_haswell) {
- return glarray->Format == GL_BGRA
- ? BRW_SURFACEFORMAT_B10G10R10A2_USCALED
- : BRW_SURFACEFORMAT_R10G10B10A2_USCALED;
+ if (devinfo->gen >= 8 || devinfo->is_haswell) {
+ return glformat->Format == GL_BGRA
+ ? ISL_FORMAT_B10G10R10A2_USCALED
+ : ISL_FORMAT_R10G10B10A2_USCALED;
}
- return BRW_SURFACEFORMAT_R10G10B10A2_UINT;
+ return ISL_FORMAT_R10G10B10A2_UINT;
}
- assert(glarray->Format == GL_RGBA); /* sanity check */
- switch (glarray->Type) {
- case GL_DOUBLE: return double_types[size];
+ assert(glformat->Format == GL_RGBA); /* sanity check */
+ switch (glformat->Type) {
+ case GL_DOUBLE: return double_types(size, glformat->Doubles);
case GL_FLOAT: return float_types[size];
- case GL_HALF_FLOAT: return half_float_types[size];
+ case GL_HALF_FLOAT:
+ case GL_HALF_FLOAT_OES:
+ if (devinfo->gen < 6 && size == 3)
+ return half_float_types[4];
+ else
+ return half_float_types[size];
case GL_INT: return int_types_scale[size];
case GL_SHORT: return short_types_scale[size];
case GL_BYTE: return byte_types_scale[size];
case GL_UNSIGNED_SHORT: return ushort_types_scale[size];
case GL_UNSIGNED_BYTE: return ubyte_types_scale[size];
case GL_FIXED:
- if (brw->gen >= 8 || brw->is_haswell)
+ if (devinfo->gen >= 8 || devinfo->is_haswell)
return fixed_point_types[size];
/* This produces GL_FIXED inputs as values between INT32_MIN and
static void
copy_array_to_vbo_array(struct brw_context *brw,
- struct brw_vertex_element *element,
+ const uint8_t *const ptr, const int src_stride,
int min, int max,
struct brw_vertex_buffer *buffer,
GLuint dst_stride)
{
- const int src_stride = element->glarray->StrideB;
-
- /* If the source stride is zero, we just want to upload the current
- * attribute once and set the buffer's stride to 0. There's no need
- * to replicate it out.
- */
- if (src_stride == 0) {
- intel_upload_data(brw, element->glarray->Ptr,
- element->glarray->_ElementSize,
- element->glarray->_ElementSize,
- &buffer->bo, &buffer->offset);
-
- buffer->stride = 0;
- return;
- }
-
- const unsigned char *src = element->glarray->Ptr + min * src_stride;
+ const unsigned char *src = ptr + min * src_stride;
int count = max - min + 1;
GLuint size = count * dst_stride;
- uint8_t *dst = intel_upload_space(brw, size, dst_stride,
- &buffer->bo, &buffer->offset);
+ uint8_t *dst = brw_upload_space(&brw->upload, size, dst_stride,
+ &buffer->bo, &buffer->offset);
- if (dst_stride == src_stride) {
- memcpy(dst, src, size);
- } else {
- while (count--) {
- memcpy(dst, src, dst_stride);
- src += src_stride;
- dst += dst_stride;
+ /* The GL 4.5 spec says:
+ * "If any enabled array’s buffer binding is zero when DrawArrays or
+ * one of the other drawing commands defined in section 10.4 is called,
+ * the result is undefined."
+ *
+ * In this case, let's the dst with undefined values
+ */
+ if (ptr != NULL) {
+ if (dst_stride == src_stride) {
+ memcpy(dst, src, size);
+ } else {
+ while (count--) {
+ memcpy(dst, src, dst_stride);
+ src += src_stride;
+ dst += dst_stride;
+ }
}
}
buffer->stride = dst_stride;
+ buffer->size = size;
}
void
brw_prepare_vertices(struct brw_context *brw)
{
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
struct gl_context *ctx = &brw->ctx;
+ /* BRW_NEW_VERTEX_PROGRAM */
+ const struct gl_program *vp = brw->programs[MESA_SHADER_VERTEX];
/* BRW_NEW_VS_PROG_DATA */
- GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read;
- const unsigned char *ptr = NULL;
- GLuint interleaved = 0;
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
+ const uint64_t vs_inputs64 =
+ nir_get_single_slot_attribs_mask(vs_prog_data->inputs_read,
+ vp->DualSlotInputs);
+ assert((vs_inputs64 & ~(uint64_t)VERT_BIT_ALL) == 0);
+ unsigned vs_inputs = (unsigned)vs_inputs64;
unsigned int min_index = brw->vb.min_index + brw->basevertex;
unsigned int max_index = brw->vb.max_index + brw->basevertex;
- int delta, i, j;
-
- struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
- GLuint nr_uploads = 0;
+ int delta, j;
/* _NEW_POLYGON
*
* is passed sideband through the fixed function units. So, we need to
* prepare the vertex buffer for it, but it's not present in inputs_read.
*/
- if (brw->gen >= 6 && (ctx->Polygon.FrontMode != GL_FILL ||
+ if (devinfo->gen >= 6 && (ctx->Polygon.FrontMode != GL_FILL ||
ctx->Polygon.BackMode != GL_FILL)) {
vs_inputs |= VERT_BIT_EDGEFLAG;
}
/* Accumulate the list of enabled arrays. */
brw->vb.nr_enabled = 0;
- while (vs_inputs) {
- GLuint i = ffsll(vs_inputs) - 1;
- struct brw_vertex_element *input = &brw->vb.inputs[i];
- vs_inputs &= ~BITFIELD64_BIT(i);
+ unsigned mask = vs_inputs;
+ while (mask) {
+ const gl_vert_attrib attr = u_bit_scan(&mask);
+ struct brw_vertex_element *input = &brw->vb.inputs[attr];
brw->vb.enabled[brw->vb.nr_enabled++] = input;
}
+ assert(brw->vb.nr_enabled <= VERT_ATTRIB_MAX);
if (brw->vb.nr_enabled == 0)
return;
if (brw->vb.nr_buffers)
return;
- for (i = j = 0; i < brw->vb.nr_enabled; i++) {
- struct brw_vertex_element *input = brw->vb.enabled[i];
- const struct gl_client_array *glarray = input->glarray;
-
- if (_mesa_is_bufferobj(glarray->BufferObj)) {
- struct intel_buffer_object *intel_buffer =
- intel_buffer_object(glarray->BufferObj);
- int k;
-
- /* If we have a VB set to be uploaded for this buffer object
- * already, reuse that VB state so that we emit fewer
- * relocations.
- */
- for (k = 0; k < i; k++) {
- const struct gl_client_array *other = brw->vb.enabled[k]->glarray;
- if (glarray->BufferObj == other->BufferObj &&
- glarray->StrideB == other->StrideB &&
- glarray->InstanceDivisor == other->InstanceDivisor &&
- (uintptr_t)(glarray->Ptr - other->Ptr) < glarray->StrideB)
- {
- input->buffer = brw->vb.enabled[k]->buffer;
- input->offset = glarray->Ptr - other->Ptr;
- break;
- }
- }
- if (k == i) {
- struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
-
- /* Named buffer object: Just reference its contents directly. */
- buffer->offset = (uintptr_t)glarray->Ptr;
- buffer->stride = glarray->StrideB;
- buffer->step_rate = glarray->InstanceDivisor;
-
- uint32_t offset, size;
- if (glarray->InstanceDivisor) {
- offset = buffer->offset;
- size = (buffer->stride * ((brw->num_instances /
- glarray->InstanceDivisor) - 1) +
- glarray->_ElementSize);
- } else {
- if (min_index == -1) {
- offset = 0;
- size = intel_buffer->Base.Size;
- } else {
- offset = buffer->offset + min_index * buffer->stride;
- size = (buffer->stride * (max_index - min_index) +
- glarray->_ElementSize);
- }
- }
- buffer->bo = intel_bufferobj_buffer(brw, intel_buffer,
- offset, size);
- drm_intel_bo_reference(buffer->bo);
-
- input->buffer = j++;
- input->offset = 0;
- }
-
- /* This is a common place to reach if the user mistakenly supplies
- * a pointer in place of a VBO offset. If we just let it go through,
- * we may end up dereferencing a pointer beyond the bounds of the
- * GTT.
- *
- * The VBO spec allows application termination in this case, and it's
- * probably a service to the poor programmer to do so rather than
- * trying to just not render.
- */
- assert(input->offset < brw->vb.buffers[input->buffer].bo->size);
+ j = 0;
+ const struct gl_vertex_array_object *vao = ctx->Array._DrawVAO;
+
+ unsigned vbomask = vs_inputs & _mesa_draw_vbo_array_bits(ctx);
+ while (vbomask) {
+ const struct gl_vertex_buffer_binding *const glbinding =
+ _mesa_draw_buffer_binding(vao, ffs(vbomask) - 1);
+ const GLsizei stride = glbinding->Stride;
+
+ assert(glbinding->BufferObj);
+
+ /* Accumulate the range of a single vertex, start with inverted range */
+ uint32_t vertex_range_start = ~(uint32_t)0;
+ uint32_t vertex_range_end = 0;
+
+ const unsigned boundmask = _mesa_draw_bound_attrib_bits(glbinding);
+ unsigned attrmask = vbomask & boundmask;
+ /* Mark the those attributes as processed */
+ vbomask ^= attrmask;
+ /* We can assume that we have an array for the binding */
+ assert(attrmask);
+ /* Walk attributes belonging to the binding */
+ while (attrmask) {
+ const gl_vert_attrib attr = u_bit_scan(&attrmask);
+ const struct gl_array_attributes *const glattrib =
+ _mesa_draw_array_attrib(vao, attr);
+ const uint32_t rel_offset =
+ _mesa_draw_attributes_relative_offset(glattrib);
+ const uint32_t rel_end = rel_offset + glattrib->Format._ElementSize;
+
+ vertex_range_start = MIN2(vertex_range_start, rel_offset);
+ vertex_range_end = MAX2(vertex_range_end, rel_end);
+
+ struct brw_vertex_element *input = &brw->vb.inputs[attr];
+ input->glformat = &glattrib->Format;
+ input->buffer = j;
+ input->is_dual_slot = (vp->DualSlotInputs & BITFIELD64_BIT(attr)) != 0;
+ input->offset = rel_offset;
+ }
+ assert(vertex_range_start <= vertex_range_end);
+
+ struct intel_buffer_object *intel_buffer =
+ intel_buffer_object(glbinding->BufferObj);
+ struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
+
+ const uint32_t offset = _mesa_draw_binding_offset(glbinding);
+
+ /* If nothing else is known take the buffer size and offset as a bound */
+ uint32_t start = vertex_range_start;
+ uint32_t range = intel_buffer->Base.Size - offset - vertex_range_start;
+ /* Check if we can get a more narrow range */
+ if (glbinding->InstanceDivisor) {
+ if (brw->num_instances) {
+ const uint32_t vertex_size = vertex_range_end - vertex_range_start;
+ start = vertex_range_start + stride * brw->baseinstance;
+ range = (stride * ((brw->num_instances - 1) /
+ glbinding->InstanceDivisor) +
+ vertex_size);
+ }
} else {
- /* Queue the buffer object up to be uploaded in the next pass,
- * when we've decided if we're doing interleaved or not.
- */
- if (nr_uploads == 0) {
- interleaved = glarray->StrideB;
- ptr = glarray->Ptr;
- }
- else if (interleaved != glarray->StrideB ||
- glarray->Ptr < ptr ||
- (uintptr_t)(glarray->Ptr - ptr) + glarray->_ElementSize > interleaved)
- {
- /* If our stride is different from the first attribute's stride,
- * or if the first attribute's stride didn't cover our element,
- * disable the interleaved upload optimization. The second case
- * can most commonly occur in cases where there is a single vertex
- * and, for example, the data is stored on the application's
- * stack.
- *
- * NOTE: This will also disable the optimization in cases where
- * the data is in a different order than the array indices.
- * Something like:
- *
- * float data[...];
- * glVertexAttribPointer(0, 4, GL_FLOAT, 32, &data[4]);
- * glVertexAttribPointer(1, 4, GL_FLOAT, 32, &data[0]);
- */
- interleaved = 0;
- }
-
- upload[nr_uploads++] = input;
+ if (brw->vb.index_bounds_valid) {
+ const uint32_t vertex_size = vertex_range_end - vertex_range_start;
+ start = vertex_range_start + stride * min_index;
+ range = (stride * (max_index - min_index) +
+ vertex_size);
+ }
}
+
+ buffer->offset = offset;
+ buffer->size = start + range;
+ buffer->stride = stride;
+ buffer->step_rate = glbinding->InstanceDivisor;
+
+ buffer->bo = intel_bufferobj_buffer(brw, intel_buffer, offset + start,
+ range, false);
+ brw_bo_reference(buffer->bo);
+
+ j++;
}
/* If we need to upload all the arrays, then we can trim those arrays to
*/
brw->vb.start_vertex_bias = 0;
delta = min_index;
- if (nr_uploads == brw->vb.nr_enabled) {
+ if ((vs_inputs & _mesa_draw_vbo_array_bits(ctx)) == 0) {
brw->vb.start_vertex_bias = -delta;
delta = 0;
}
- /* Handle any arrays to be uploaded. */
- if (nr_uploads > 1) {
- if (interleaved) {
- struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
- /* All uploads are interleaved, so upload the arrays together as
- * interleaved. First, upload the contents and set up upload[0].
- */
- copy_array_to_vbo_array(brw, upload[0], min_index, max_index,
- buffer, interleaved);
- buffer->offset -= delta * interleaved;
-
- for (i = 0; i < nr_uploads; i++) {
- /* Then, just point upload[i] at upload[0]'s buffer. */
- upload[i]->offset =
- ((const unsigned char *)upload[i]->glarray->Ptr - ptr);
- upload[i]->buffer = j;
- }
- j++;
-
- nr_uploads = 0;
+ unsigned usermask = vs_inputs & _mesa_draw_user_array_bits(ctx);
+ while (usermask) {
+ const struct gl_vertex_buffer_binding *const glbinding =
+ _mesa_draw_buffer_binding(vao, ffs(usermask) - 1);
+ const GLsizei stride = glbinding->Stride;
+
+ assert(!glbinding->BufferObj);
+ assert(brw->vb.index_bounds_valid);
+
+ /* Accumulate the range of a single vertex, start with inverted range */
+ uint32_t vertex_range_start = ~(uint32_t)0;
+ uint32_t vertex_range_end = 0;
+
+ const unsigned boundmask = _mesa_draw_bound_attrib_bits(glbinding);
+ unsigned attrmask = usermask & boundmask;
+ /* Mark the those attributes as processed */
+ usermask ^= attrmask;
+ /* We can assume that we have an array for the binding */
+ assert(attrmask);
+ /* Walk attributes belonging to the binding */
+ while (attrmask) {
+ const gl_vert_attrib attr = u_bit_scan(&attrmask);
+ const struct gl_array_attributes *const glattrib =
+ _mesa_draw_array_attrib(vao, attr);
+ const uint32_t rel_offset =
+ _mesa_draw_attributes_relative_offset(glattrib);
+ const uint32_t rel_end = rel_offset + glattrib->Format._ElementSize;
+
+ vertex_range_start = MIN2(vertex_range_start, rel_offset);
+ vertex_range_end = MAX2(vertex_range_end, rel_end);
+
+ struct brw_vertex_element *input = &brw->vb.inputs[attr];
+ input->glformat = &glattrib->Format;
+ input->buffer = j;
+ input->is_dual_slot = (vp->DualSlotInputs & BITFIELD64_BIT(attr)) != 0;
+ input->offset = rel_offset;
}
- }
- /* Upload non-interleaved arrays */
- for (i = 0; i < nr_uploads; i++) {
+ assert(vertex_range_start <= vertex_range_end);
+
struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
- if (upload[i]->glarray->InstanceDivisor == 0) {
- copy_array_to_vbo_array(brw, upload[i], min_index, max_index,
- buffer, upload[i]->glarray->_ElementSize);
+
+ const uint8_t *ptr = (const uint8_t*)_mesa_draw_binding_offset(glbinding);
+ ptr += vertex_range_start;
+ const uint32_t vertex_size = vertex_range_end - vertex_range_start;
+ if (glbinding->Stride == 0) {
+ /* If the source stride is zero, we just want to upload the current
+ * attribute once and set the buffer's stride to 0. There's no need
+ * to replicate it out.
+ */
+ copy_array_to_vbo_array(brw, ptr, 0, 0, 0, buffer, vertex_size);
+ } else if (glbinding->InstanceDivisor == 0) {
+ copy_array_to_vbo_array(brw, ptr, stride, min_index,
+ max_index, buffer, vertex_size);
} else {
/* This is an instanced attribute, since its InstanceDivisor
* is not zero. Therefore, its data will be stepped after the
* instanced draw has been run InstanceDivisor times.
*/
uint32_t instanced_attr_max_index =
- (brw->num_instances - 1) / upload[i]->glarray->InstanceDivisor;
- copy_array_to_vbo_array(brw, upload[i], 0, instanced_attr_max_index,
- buffer, upload[i]->glarray->_ElementSize);
+ (brw->num_instances - 1) / glbinding->InstanceDivisor;
+ copy_array_to_vbo_array(brw, ptr, stride, 0,
+ instanced_attr_max_index, buffer, vertex_size);
}
- buffer->offset -= delta * buffer->stride;
- buffer->step_rate = upload[i]->glarray->InstanceDivisor;
- upload[i]->buffer = j++;
- upload[i]->offset = 0;
- }
-
- brw->vb.nr_buffers = j;
-}
-
-void
-brw_prepare_shader_draw_parameters(struct brw_context *brw)
-{
- /* For non-indirect draws, upload gl_BaseVertex. */
- if (brw->vs.prog_data->uses_vertexid && brw->draw.draw_params_bo == NULL) {
- intel_upload_data(brw, &brw->draw.gl_basevertex, 4, 4,
- &brw->draw.draw_params_bo,
- &brw->draw.draw_params_offset);
- }
-}
+ buffer->offset -= delta * buffer->stride + vertex_range_start;
+ buffer->size += delta * buffer->stride + vertex_range_start;
+ buffer->step_rate = glbinding->InstanceDivisor;
-/**
- * Emit a VERTEX_BUFFER_STATE entry (part of 3DSTATE_VERTEX_BUFFERS).
- */
-static uint32_t *
-emit_vertex_buffer_state(struct brw_context *brw,
- unsigned buffer_nr,
- drm_intel_bo *bo,
- unsigned bo_ending_address,
- unsigned bo_offset,
- unsigned stride,
- unsigned step_rate,
- uint32_t *__map)
-{
- struct gl_context *ctx = &brw->ctx;
- uint32_t dw0;
-
- if (brw->gen >= 6) {
- dw0 = (buffer_nr << GEN6_VB0_INDEX_SHIFT) |
- (step_rate ? GEN6_VB0_ACCESS_INSTANCEDATA
- : GEN6_VB0_ACCESS_VERTEXDATA);
- } else {
- dw0 = (buffer_nr << BRW_VB0_INDEX_SHIFT) |
- (step_rate ? BRW_VB0_ACCESS_INSTANCEDATA
- : BRW_VB0_ACCESS_VERTEXDATA);
+ j++;
}
- if (brw->gen >= 7)
- dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
+ /* Upload the current values */
+ unsigned curmask = vs_inputs & _mesa_draw_current_bits(ctx);
+ if (curmask) {
+ /* For each attribute, upload the maximum possible size. */
+ uint8_t data[VERT_ATTRIB_MAX * sizeof(GLdouble) * 4];
+ uint8_t *cursor = data;
+
+ do {
+ const gl_vert_attrib attr = u_bit_scan(&curmask);
+ const struct gl_array_attributes *const glattrib =
+ _mesa_draw_current_attrib(ctx, attr);
+ const unsigned size = glattrib->Format._ElementSize;
+ const unsigned alignment = align(size, sizeof(GLdouble));
+ memcpy(cursor, glattrib->Ptr, size);
+ if (alignment != size)
+ memset(cursor + size, 0, alignment - size);
+
+ struct brw_vertex_element *input = &brw->vb.inputs[attr];
+ input->glformat = &glattrib->Format;
+ input->buffer = j;
+ input->is_dual_slot = (vp->DualSlotInputs & BITFIELD64_BIT(attr)) != 0;
+ input->offset = cursor - data;
+
+ cursor += alignment;
+ } while (curmask);
- if (brw->gen == 7)
- dw0 |= GEN7_MOCS_L3 << 16;
+ struct brw_vertex_buffer *buffer = &brw->vb.buffers[j];
+ const unsigned size = cursor - data;
+ brw_upload_data(&brw->upload, data, size, size,
+ &buffer->bo, &buffer->offset);
+ buffer->stride = 0;
+ buffer->size = size;
+ buffer->step_rate = 0;
- WARN_ONCE(stride >= (brw->gen >= 5 ? 2048 : 2047),
- "VBO stride %d too large, bad rendering may occur\n",
- stride);
- OUT_BATCH(dw0 | (stride << BRW_VB0_PITCH_SHIFT));
- OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, bo_offset);
- if (brw->gen >= 5) {
- OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, bo_ending_address);
- } else {
- OUT_BATCH(0);
+ j++;
}
- OUT_BATCH(step_rate);
-
- return __map;
+ brw->vb.nr_buffers = j;
}
-#define EMIT_VERTEX_BUFFER_STATE(...) __map = emit_vertex_buffer_state(__VA_ARGS__, __map)
-static void
-brw_emit_vertices(struct brw_context *brw)
+void
+brw_prepare_shader_draw_parameters(struct brw_context *brw)
{
- GLuint i;
-
- brw_prepare_vertices(brw);
- brw_prepare_shader_draw_parameters(brw);
-
- brw_emit_query_begin(brw);
-
- unsigned nr_elements = brw->vb.nr_enabled;
- if (brw->vs.prog_data->uses_vertexid || brw->vs.prog_data->uses_instanceid)
- ++nr_elements;
-
- /* If the VS doesn't read any inputs (calculating vertex position from
- * a state variable for some reason, for example), emit a single pad
- * VERTEX_ELEMENT struct and bail.
- *
- * The stale VB state stays in place, but they don't do anything unless
- * a VE loads from them.
- */
- if (nr_elements == 0) {
- BEGIN_BATCH(3);
- OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | 1);
- if (brw->gen >= 6) {
- OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT) |
- GEN6_VE0_VALID |
- (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
- (0 << BRW_VE0_SRC_OFFSET_SHIFT));
- } else {
- OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT) |
- BRW_VE0_VALID |
- (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
- (0 << BRW_VE0_SRC_OFFSET_SHIFT));
- }
- OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
- (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
- (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
- (BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT));
- ADVANCE_BATCH();
- return;
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
+
+ /* For non-indirect draws, upload the shader draw parameters */
+ if ((vs_prog_data->uses_firstvertex || vs_prog_data->uses_baseinstance) &&
+ brw->draw.draw_params_bo == NULL) {
+ brw_upload_data(&brw->upload,
+ &brw->draw.params, sizeof(brw->draw.params), 4,
+ &brw->draw.draw_params_bo,
+ &brw->draw.draw_params_offset);
}
- /* Now emit VB and VEP state packets.
- */
-
- unsigned nr_buffers =
- brw->vb.nr_buffers + brw->vs.prog_data->uses_vertexid;
-
- if (nr_buffers) {
- if (brw->gen >= 6) {
- assert(nr_buffers <= 33);
- } else {
- assert(nr_buffers <= 17);
- }
-
- BEGIN_BATCH(1 + 4 * nr_buffers);
- OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4 * nr_buffers - 1));
- for (i = 0; i < brw->vb.nr_buffers; i++) {
- struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
- EMIT_VERTEX_BUFFER_STATE(brw, i, buffer->bo, buffer->bo->size - 1,
- buffer->offset, buffer->stride,
- buffer->step_rate);
-
- }
-
- if (brw->vs.prog_data->uses_vertexid) {
- EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers,
- brw->draw.draw_params_bo,
- brw->draw.draw_params_bo->size - 1,
- brw->draw.draw_params_offset,
- 0, /* stride */
- 0); /* step rate */
- }
- ADVANCE_BATCH();
- }
-
- /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably
- * for VertexID/InstanceID.
- */
- if (brw->gen >= 6) {
- assert(nr_elements <= 34);
- } else {
- assert(nr_elements <= 18);
- }
-
- struct brw_vertex_element *gen6_edgeflag_input = NULL;
-
- BEGIN_BATCH(1 + nr_elements * 2);
- OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (2 * nr_elements - 1));
- for (i = 0; i < brw->vb.nr_enabled; i++) {
- struct brw_vertex_element *input = brw->vb.enabled[i];
- uint32_t format = brw_get_vertex_surface_type(brw, input->glarray);
- uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
- uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
- uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
- uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC;
-
- if (input == &brw->vb.inputs[VERT_ATTRIB_EDGEFLAG]) {
- /* Gen6+ passes edgeflag as sideband along with the vertex, instead
- * of in the VUE. We have to upload it sideband as the last vertex
- * element according to the B-Spec.
- */
- if (brw->gen >= 6) {
- gen6_edgeflag_input = input;
- continue;
- }
- }
-
- switch (input->glarray->Size) {
- case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
- case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
- case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
- case 3: comp3 = input->glarray->Integer ? BRW_VE1_COMPONENT_STORE_1_INT
- : BRW_VE1_COMPONENT_STORE_1_FLT;
- break;
- }
-
- if (brw->gen >= 6) {
- OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) |
- GEN6_VE0_VALID |
- (format << BRW_VE0_FORMAT_SHIFT) |
- (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
- } else {
- OUT_BATCH((input->buffer << BRW_VE0_INDEX_SHIFT) |
- BRW_VE0_VALID |
- (format << BRW_VE0_FORMAT_SHIFT) |
- (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
- }
-
- if (brw->gen >= 5)
- OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
- (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
- (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
- (comp3 << BRW_VE1_COMPONENT_3_SHIFT));
- else
- OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
- (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
- (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
- (comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
- ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
+ if (vs_prog_data->uses_drawid || vs_prog_data->uses_is_indexed_draw) {
+ brw_upload_data(&brw->upload,
+ &brw->draw.derived_params, sizeof(brw->draw.derived_params), 4,
+ &brw->draw.derived_draw_params_bo,
+ &brw->draw.derived_draw_params_offset);
}
-
- if (brw->gen >= 6 && gen6_edgeflag_input) {
- uint32_t format =
- brw_get_vertex_surface_type(brw, gen6_edgeflag_input->glarray);
-
- OUT_BATCH((gen6_edgeflag_input->buffer << GEN6_VE0_INDEX_SHIFT) |
- GEN6_VE0_VALID |
- GEN6_VE0_EDGE_FLAG_ENABLE |
- (format << BRW_VE0_FORMAT_SHIFT) |
- (gen6_edgeflag_input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
- OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
- (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
- (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
- (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
- }
-
- if (brw->vs.prog_data->uses_vertexid || brw->vs.prog_data->uses_instanceid) {
- uint32_t dw0 = 0, dw1 = 0;
- uint32_t comp0 = BRW_VE1_COMPONENT_STORE_0;
- uint32_t comp1 = BRW_VE1_COMPONENT_STORE_0;
- uint32_t comp2 = BRW_VE1_COMPONENT_STORE_0;
- uint32_t comp3 = BRW_VE1_COMPONENT_STORE_0;
-
- if (brw->vs.prog_data->uses_vertexid) {
- comp0 = BRW_VE1_COMPONENT_STORE_SRC;
- comp2 = BRW_VE1_COMPONENT_STORE_VID;
- }
-
- if (brw->vs.prog_data->uses_instanceid) {
- comp3 = BRW_VE1_COMPONENT_STORE_IID;
- }
-
- dw1 = (comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
- (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
- (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
- (comp3 << BRW_VE1_COMPONENT_3_SHIFT);
-
- if (brw->gen >= 6) {
- dw0 |= GEN6_VE0_VALID |
- brw->vb.nr_buffers << GEN6_VE0_INDEX_SHIFT |
- BRW_SURFACEFORMAT_R32_UINT << BRW_VE0_FORMAT_SHIFT;
- } else {
- dw0 |= BRW_VE0_VALID |
- brw->vb.nr_buffers << BRW_VE0_INDEX_SHIFT |
- BRW_SURFACEFORMAT_R32_UINT << BRW_VE0_FORMAT_SHIFT;
- dw1 |= (i * 4) << BRW_VE1_DST_OFFSET_SHIFT;
- }
-
- /* Note that for gl_VertexID, gl_InstanceID, and gl_PrimitiveID values,
- * the format is ignored and the value is always int.
- */
-
- OUT_BATCH(dw0);
- OUT_BATCH(dw1);
- }
-
- ADVANCE_BATCH();
}
-const struct brw_tracked_state brw_vertices = {
- .dirty = {
- .mesa = _NEW_POLYGON,
- .brw = BRW_NEW_BATCH |
- BRW_NEW_VERTICES |
- BRW_NEW_VS_PROG_DATA,
- },
- .emit = brw_emit_vertices,
-};
-
static void
brw_upload_indices(struct brw_context *brw)
{
- struct gl_context *ctx = &brw->ctx;
const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
GLuint ib_size;
- drm_intel_bo *old_bo = brw->ib.bo;
+ struct brw_bo *old_bo = brw->ib.bo;
struct gl_buffer_object *bufferobj;
GLuint offset;
GLuint ib_type_size;
if (index_buffer == NULL)
return;
- ib_type_size = _mesa_sizeof_type(index_buffer->type);
- ib_size = ib_type_size * index_buffer->count;
+ ib_type_size = 1 << index_buffer->index_size_shift;
+ ib_size = index_buffer->count ? ib_type_size * index_buffer->count :
+ index_buffer->obj->Size;
bufferobj = index_buffer->obj;
/* Turn into a proper VBO:
*/
- if (!_mesa_is_bufferobj(bufferobj)) {
+ if (!bufferobj) {
/* Get new bufferobj, offset:
*/
- intel_upload_data(brw, index_buffer->ptr, ib_size, ib_type_size,
- &brw->ib.bo, &offset);
+ brw_upload_data(&brw->upload, index_buffer->ptr, ib_size, ib_type_size,
+ &brw->ib.bo, &offset);
+ brw->ib.size = brw->ib.bo->size;
} else {
offset = (GLuint) (unsigned long) index_buffer->ptr;
- /* If the index buffer isn't aligned to its element size, we have to
- * rebase it into a temporary.
- */
- if ((ib_type_size - 1) & offset) {
- perf_debug("copying index buffer to a temporary to work around "
- "misaligned offset %d\n", offset);
-
- GLubyte *map = ctx->Driver.MapBufferRange(ctx,
- offset,
- ib_size,
- GL_MAP_READ_BIT,
- bufferobj,
- MAP_INTERNAL);
-
- intel_upload_data(brw, map, ib_size, ib_type_size,
- &brw->ib.bo, &offset);
-
- ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL);
- } else {
- drm_intel_bo *bo =
- intel_bufferobj_buffer(brw, intel_buffer_object(bufferobj),
- offset, ib_size);
- if (bo != brw->ib.bo) {
- drm_intel_bo_unreference(brw->ib.bo);
- brw->ib.bo = bo;
- drm_intel_bo_reference(bo);
- }
+ struct brw_bo *bo =
+ intel_bufferobj_buffer(brw, intel_buffer_object(bufferobj),
+ offset, ib_size, false);
+ if (bo != brw->ib.bo) {
+ brw_bo_unreference(brw->ib.bo);
+ brw->ib.bo = bo;
+ brw->ib.size = bufferobj->Size;
+ brw_bo_reference(bo);
}
}
if (brw->ib.bo != old_bo)
brw->ctx.NewDriverState |= BRW_NEW_INDEX_BUFFER;
- if (index_buffer->type != brw->ib.type) {
- brw->ib.type = index_buffer->type;
+ unsigned index_size = 1 << index_buffer->index_size_shift;
+ if (index_size != brw->ib.index_size) {
+ brw->ib.index_size = index_size;
brw->ctx.NewDriverState |= BRW_NEW_INDEX_BUFFER;
}
-}
-const struct brw_tracked_state brw_indices = {
- .dirty = {
- .mesa = 0,
- .brw = BRW_NEW_INDICES,
- },
- .emit = brw_upload_indices,
-};
-
-static void
-brw_emit_index_buffer(struct brw_context *brw)
-{
- const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
- GLuint cut_index_setting;
-
- if (index_buffer == NULL)
- return;
-
- if (brw->prim_restart.enable_cut_index && !brw->is_haswell) {
- cut_index_setting = BRW_CUT_INDEX_ENABLE;
- } else {
- cut_index_setting = 0;
+ /* We need to re-emit an index buffer state each time
+ * when cut index flag is changed
+ */
+ if (brw->prim_restart.enable_cut_index != brw->ib.enable_cut_index) {
+ brw->ib.enable_cut_index = brw->prim_restart.enable_cut_index;
+ brw->ctx.NewDriverState |= BRW_NEW_INDEX_BUFFER;
}
-
- BEGIN_BATCH(3);
- OUT_BATCH(CMD_INDEX_BUFFER << 16 |
- cut_index_setting |
- brw_get_index_type(index_buffer->type) |
- 1);
- OUT_RELOC(brw->ib.bo,
- I915_GEM_DOMAIN_VERTEX, 0,
- 0);
- OUT_RELOC(brw->ib.bo,
- I915_GEM_DOMAIN_VERTEX, 0,
- brw->ib.bo->size - 1);
- ADVANCE_BATCH();
}
-const struct brw_tracked_state brw_index_buffer = {
+const struct brw_tracked_state brw_indices = {
.dirty = {
.mesa = 0,
- .brw = BRW_NEW_BATCH |
- BRW_NEW_INDEX_BUFFER,
+ .brw = BRW_NEW_BLORP |
+ BRW_NEW_INDICES,
},
- .emit = brw_emit_index_buffer,
+ .emit = brw_upload_indices,
};