0,
BRW_SURFACEFORMAT_R16_FLOAT,
BRW_SURFACEFORMAT_R16G16_FLOAT,
- BRW_SURFACEFORMAT_R16G16B16A16_FLOAT,
+ BRW_SURFACEFORMAT_R16G16B16_FLOAT,
BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
};
0,
BRW_SURFACEFORMAT_R16_UINT,
BRW_SURFACEFORMAT_R16G16_UINT,
- BRW_SURFACEFORMAT_R16G16B16A16_UINT,
+ BRW_SURFACEFORMAT_R16G16B16_UINT,
BRW_SURFACEFORMAT_R16G16B16A16_UINT
};
0,
BRW_SURFACEFORMAT_R16_SINT,
BRW_SURFACEFORMAT_R16G16_SINT,
- BRW_SURFACEFORMAT_R16G16B16A16_SINT,
+ BRW_SURFACEFORMAT_R16G16B16_SINT,
BRW_SURFACEFORMAT_R16G16B16A16_SINT
};
0,
BRW_SURFACEFORMAT_R8_UINT,
BRW_SURFACEFORMAT_R8G8_UINT,
- BRW_SURFACEFORMAT_R8G8B8A8_UINT,
+ BRW_SURFACEFORMAT_R8G8B8_UINT,
BRW_SURFACEFORMAT_R8G8B8A8_UINT
};
0,
BRW_SURFACEFORMAT_R8_SINT,
BRW_SURFACEFORMAT_R8G8_SINT,
- BRW_SURFACEFORMAT_R8G8B8A8_SINT,
+ BRW_SURFACEFORMAT_R8G8B8_SINT,
BRW_SURFACEFORMAT_R8G8B8A8_SINT
};
* 64-bit components are stored in the URB without any conversion."
* Also included on BDW PRM, Volume 7, page 470, table "Source Element
* Formats Supported in VF Unit"
- * Previous PRMs don't include those references.
+ *
+ * Previous PRMs don't include those references, so for gen7 we can't use
+ * PASSTHRU formats directly. But in any case, we prefer to return passthru
+ * even in that case, because that reflects what we want to achieve, even
+ * if we would need to workaround on gen < 8.
*/
- return (brw->gen >= 8 && doubles
+ return (doubles
? double_types_passthru[size]
: double_types_float[size]);
}
+static bool
+is_passthru_format(uint32_t format)
+{
+ switch (format) {
+ case BRW_SURFACEFORMAT_R64_PASSTHRU:
+ case BRW_SURFACEFORMAT_R64G64_PASSTHRU:
+ case BRW_SURFACEFORMAT_R64G64B64_PASSTHRU:
+ case BRW_SURFACEFORMAT_R64G64B64A64_PASSTHRU:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int
+uploads_needed(uint32_t format)
+{
+ if (!is_passthru_format(format))
+ return 1;
+
+ switch (format) {
+ case BRW_SURFACEFORMAT_R64_PASSTHRU:
+ case BRW_SURFACEFORMAT_R64G64_PASSTHRU:
+ return 1;
+ case BRW_SURFACEFORMAT_R64G64B64_PASSTHRU:
+ case BRW_SURFACEFORMAT_R64G64B64A64_PASSTHRU:
+ return 2;
+ default:
+ unreachable("not reached");
+ }
+}
+
+/*
+ * Returns the number of componentes associated with a format that is used on
+ * a 64 to 32 format split. See downsize_format()
+ */
+static int
+upload_format_size(uint32_t upload_format)
+{
+ switch (upload_format) {
+ case BRW_SURFACEFORMAT_R32G32_FLOAT:
+ return 2;
+ case BRW_SURFACEFORMAT_R32G32B32A32_FLOAT:
+ return 4;
+ default:
+ unreachable("not reached");
+ }
+}
+
+/*
+ * Returns the format that we are finally going to use when upload a vertex
+ * element. It will only change if we are using *64*PASSTHRU formats, as for
+ * gen < 8 they need to be splitted on two *32*FLOAT formats.
+ *
+ * @upload points in which upload we are. Valid values are [0,1]
+ */
+static uint32_t
+downsize_format_if_needed(uint32_t format,
+ int upload)
+{
+ assert(upload == 0 || upload == 1);
+
+ if (!is_passthru_format(format))
+ return format;
+
+ switch (format) {
+ case BRW_SURFACEFORMAT_R64_PASSTHRU:
+ return BRW_SURFACEFORMAT_R32G32_FLOAT;
+ case BRW_SURFACEFORMAT_R64G64_PASSTHRU:
+ return BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
+ case BRW_SURFACEFORMAT_R64G64B64_PASSTHRU:
+ return !upload ? BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
+ : BRW_SURFACEFORMAT_R32G32_FLOAT;
+ case BRW_SURFACEFORMAT_R64G64B64A64_PASSTHRU:
+ return BRW_SURFACEFORMAT_R32G32B32A32_FLOAT;
+ default:
+ unreachable("not reached");
+ }
+}
+
/**
* Given vertex array type/size/format/normalized info, return
* the appopriate hardware surface type.
*/
unsigned
brw_get_vertex_surface_type(struct brw_context *brw,
- const struct gl_client_array *glarray)
+ const struct gl_vertex_array *glarray)
{
int size = glarray->Size;
+ const bool is_ivybridge_or_older =
+ brw->gen <= 7 && !brw->is_baytrail && !brw->is_haswell;
if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
fprintf(stderr, "type %s size %d normalized %d\n",
assert(glarray->Format == GL_RGBA); /* sanity check */
switch (glarray->Type) {
case GL_INT: return int_types_direct[size];
- case GL_SHORT: return short_types_direct[size];
- case GL_BYTE: return byte_types_direct[size];
+ case GL_SHORT:
+ if (is_ivybridge_or_older && size == 3)
+ return short_types_direct[4];
+ else
+ return short_types_direct[size];
+ case GL_BYTE:
+ if (is_ivybridge_or_older && size == 3)
+ return byte_types_direct[4];
+ else
+ return byte_types_direct[size];
case GL_UNSIGNED_INT: return uint_types_direct[size];
- case GL_UNSIGNED_SHORT: return ushort_types_direct[size];
- case GL_UNSIGNED_BYTE: return ubyte_types_direct[size];
+ case GL_UNSIGNED_SHORT:
+ if (is_ivybridge_or_older && size == 3)
+ return ushort_types_direct[4];
+ else
+ return ushort_types_direct[size];
+ case GL_UNSIGNED_BYTE:
+ if (is_ivybridge_or_older && size == 3)
+ return ubyte_types_direct[4];
+ else
+ return ubyte_types_direct[size];
default: unreachable("not reached");
}
} else if (glarray->Type == GL_UNSIGNED_INT_10F_11F_11F_REV) {
switch (glarray->Type) {
case GL_DOUBLE: return double_types(brw, size, glarray->Doubles);
case GL_FLOAT: return float_types[size];
- case GL_HALF_FLOAT: return half_float_types[size];
+ case GL_HALF_FLOAT:
+ case GL_HALF_FLOAT_OES:
+ if (brw->gen < 6 && size == 3)
+ return half_float_types[4];
+ else
+ return half_float_types[size];
case GL_INT: return int_types_norm[size];
case GL_SHORT: return short_types_norm[size];
case GL_BYTE: return byte_types_norm[size];
switch (glarray->Type) {
case GL_DOUBLE: return double_types(brw, size, glarray->Doubles);
case GL_FLOAT: return float_types[size];
- case GL_HALF_FLOAT: return half_float_types[size];
+ case GL_HALF_FLOAT:
+ case GL_HALF_FLOAT_OES:
+ if (brw->gen < 6 && size == 3)
+ return half_float_types[4];
+ else
+ return half_float_types[size];
case GL_INT: return int_types_scale[size];
case GL_SHORT: return short_types_scale[size];
case GL_BYTE: return byte_types_scale[size];
&buffer->bo, &buffer->offset);
buffer->stride = 0;
+ buffer->size = element->glarray->_ElementSize;
return;
}
uint8_t *dst = intel_upload_space(brw, size, dst_stride,
&buffer->bo, &buffer->offset);
- if (dst_stride == src_stride) {
- memcpy(dst, src, size);
- } else {
- while (count--) {
- memcpy(dst, src, dst_stride);
- src += src_stride;
- dst += dst_stride;
+ /* The GL 4.5 spec says:
+ * "If any enabled array’s buffer binding is zero when DrawArrays or
+ * one of the other drawing commands defined in section 10.4 is called,
+ * the result is undefined."
+ *
+ * In this case, let's the dst with undefined values
+ */
+ if (src != NULL) {
+ if (dst_stride == src_stride) {
+ memcpy(dst, src, size);
+ } else {
+ while (count--) {
+ memcpy(dst, src, dst_stride);
+ src += src_stride;
+ dst += dst_stride;
+ }
}
}
buffer->stride = dst_stride;
+ buffer->size = size;
}
void
{
struct gl_context *ctx = &brw->ctx;
/* BRW_NEW_VS_PROG_DATA */
- GLbitfield64 vs_inputs = brw->vs.prog_data->inputs_read;
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
+ GLbitfield64 vs_inputs = vs_prog_data->inputs_read;
const unsigned char *ptr = NULL;
GLuint interleaved = 0;
unsigned int min_index = brw->vb.min_index + brw->basevertex;
/* Accumulate the list of enabled arrays. */
brw->vb.nr_enabled = 0;
while (vs_inputs) {
- GLuint index = ffsll(vs_inputs) - 1;
+ GLuint first = ffsll(vs_inputs) - 1;
+ assert (first < 64);
+ GLuint index =
+ first - DIV_ROUND_UP(_mesa_bitcount_64(vs_prog_data->double_inputs_read &
+ BITFIELD64_MASK(first)), 2);
struct brw_vertex_element *input = &brw->vb.inputs[index];
-
- vs_inputs &= ~BITFIELD64_BIT(index);
+ input->is_dual_slot = (vs_prog_data->double_inputs_read & BITFIELD64_BIT(first)) != 0;
+ vs_inputs &= ~BITFIELD64_BIT(first);
+ if (input->is_dual_slot)
+ vs_inputs &= ~BITFIELD64_BIT(first + 1);
brw->vb.enabled[brw->vb.nr_enabled++] = input;
}
for (i = j = 0; i < brw->vb.nr_enabled; i++) {
struct brw_vertex_element *input = brw->vb.enabled[i];
- const struct gl_client_array *glarray = input->glarray;
+ const struct gl_vertex_array *glarray = input->glarray;
if (_mesa_is_bufferobj(glarray->BufferObj)) {
struct intel_buffer_object *intel_buffer =
if (glarray->InstanceDivisor) {
if (brw->num_instances) {
start = offset + glarray->StrideB * brw->baseinstance;
- range = (glarray->StrideB * ((brw->num_instances /
- glarray->InstanceDivisor) - 1) +
+ range = (glarray->StrideB * ((brw->num_instances - 1) /
+ glarray->InstanceDivisor) +
glarray->_ElementSize);
}
} else {
*/
unsigned k;
for (k = 0; k < i; k++) {
- const struct gl_client_array *other = brw->vb.enabled[k]->glarray;
+ const struct gl_vertex_array *other = brw->vb.enabled[k]->glarray;
if (glarray->BufferObj == other->BufferObj &&
glarray->StrideB == other->StrideB &&
glarray->InstanceDivisor == other->InstanceDivisor &&
input->buffer = brw->vb.enabled[k]->buffer;
input->offset = glarray->Ptr - other->Ptr;
- buffer_range_start[k] = MIN2(buffer_range_start[k], start);
- buffer_range_end[k] = MAX2(buffer_range_end[k], start + range);
+ buffer_range_start[input->buffer] =
+ MIN2(buffer_range_start[input->buffer], start);
+ buffer_range_end[input->buffer] =
+ MAX2(buffer_range_end[input->buffer], start + range);
break;
}
}
buffer->offset = offset;
buffer->stride = glarray->StrideB;
buffer->step_rate = glarray->InstanceDivisor;
+ buffer->size = glarray->BufferObj->Size - offset;
enabled_buffer[j] = intel_buffer;
buffer_range_start[j] = start;
input->buffer = j++;
input->offset = 0;
}
-
- /* This is a common place to reach if the user mistakenly supplies
- * a pointer in place of a VBO offset. If we just let it go through,
- * we may end up dereferencing a pointer beyond the bounds of the
- * GTT.
- *
- * The VBO spec allows application termination in this case, and it's
- * probably a service to the poor programmer to do so rather than
- * trying to just not render.
- */
- assert(input->offset < intel_buffer->Base.Size);
} else {
/* Queue the buffer object up to be uploaded in the next pass,
* when we've decided if we're doing interleaved or not.
copy_array_to_vbo_array(brw, upload[0], min_index, max_index,
buffer, interleaved);
buffer->offset -= delta * interleaved;
+ buffer->size += delta * interleaved;
for (i = 0; i < nr_uploads; i++) {
/* Then, just point upload[i] at upload[0]'s buffer. */
buffer, upload[i]->glarray->_ElementSize);
}
buffer->offset -= delta * buffer->stride;
+ buffer->size += delta * buffer->stride;
buffer->step_rate = upload[i]->glarray->InstanceDivisor;
upload[i]->buffer = j++;
upload[i]->offset = 0;
void
brw_prepare_shader_draw_parameters(struct brw_context *brw)
{
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
+
/* For non-indirect draws, upload gl_BaseVertex. */
- if ((brw->vs.prog_data->uses_basevertex ||
- brw->vs.prog_data->uses_baseinstance) &&
+ if ((vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) &&
brw->draw.draw_params_bo == NULL) {
intel_upload_data(brw, &brw->draw.params, sizeof(brw->draw.params), 4,
&brw->draw.draw_params_bo,
&brw->draw.draw_params_offset);
}
- if (brw->vs.prog_data->uses_drawid) {
+ if (vs_prog_data->uses_drawid) {
intel_upload_data(brw, &brw->draw.gl_drawid, sizeof(brw->draw.gl_drawid), 4,
&brw->draw.draw_id_bo,
&brw->draw.draw_id_offset);
/**
* Emit a VERTEX_BUFFER_STATE entry (part of 3DSTATE_VERTEX_BUFFERS).
*/
-static uint32_t *
-emit_vertex_buffer_state(struct brw_context *brw,
- unsigned buffer_nr,
- drm_intel_bo *bo,
- unsigned bo_ending_address,
- unsigned bo_offset,
- unsigned stride,
- unsigned step_rate,
- uint32_t *__map)
+uint32_t *
+brw_emit_vertex_buffer_state(struct brw_context *brw,
+ unsigned buffer_nr,
+ drm_intel_bo *bo,
+ unsigned start_offset,
+ unsigned end_offset,
+ unsigned stride,
+ unsigned step_rate,
+ uint32_t *__map)
{
struct gl_context *ctx = &brw->ctx;
uint32_t dw0;
- if (brw->gen >= 6) {
+ if (brw->gen >= 8) {
+ dw0 = buffer_nr << GEN6_VB0_INDEX_SHIFT;
+ } else if (brw->gen >= 6) {
dw0 = (buffer_nr << GEN6_VB0_INDEX_SHIFT) |
(step_rate ? GEN6_VB0_ACCESS_INSTANCEDATA
: GEN6_VB0_ACCESS_VERTEXDATA);
if (brw->gen >= 7)
dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
- if (brw->gen == 7)
+ switch (brw->gen) {
+ case 7:
dw0 |= GEN7_MOCS_L3 << 16;
+ break;
+ case 8:
+ dw0 |= BDW_MOCS_WB << 16;
+ break;
+ case 9:
+ dw0 |= SKL_MOCS_WB << 16;
+ break;
+ }
WARN_ONCE(stride >= (brw->gen >= 5 ? 2048 : 2047),
"VBO stride %d too large, bad rendering may occur\n",
stride);
OUT_BATCH(dw0 | (stride << BRW_VB0_PITCH_SHIFT));
- OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, bo_offset);
- if (brw->gen >= 5) {
- OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, bo_ending_address);
+ if (brw->gen >= 8) {
+ OUT_RELOC64(bo, I915_GEM_DOMAIN_VERTEX, 0, start_offset);
+ /* From the BSpec: 3D Pipeline Stages - 3D Pipeline Geometry -
+ * Vertex Fetch (VF) Stage - State
+ *
+ * Instead of "VBState.StartingBufferAddress + VBState.MaxIndex x
+ * VBState.BufferPitch", the address of the byte immediately beyond the
+ * last valid byte of the buffer is determined by
+ * "VBState.StartingBufferAddress + VBState.BufferSize".
+ */
+ OUT_BATCH(end_offset - start_offset);
+ } else if (brw->gen >= 5) {
+ OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, start_offset);
+ /* From the BSpec: 3D Pipeline Stages - 3D Pipeline Geometry -
+ * Vertex Fetch (VF) Stage - State
+ *
+ * Instead of "VBState.StartingBufferAddress + VBState.MaxIndex x
+ * VBState.BufferPitch", the address of the byte immediately beyond the
+ * last valid byte of the buffer is determined by
+ * "VBState.EndAddress + 1".
+ */
+ OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, end_offset - 1);
+ OUT_BATCH(step_rate);
} else {
+ OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, start_offset);
OUT_BATCH(0);
+ OUT_BATCH(step_rate);
}
- OUT_BATCH(step_rate);
return __map;
}
-#define EMIT_VERTEX_BUFFER_STATE(...) __map = emit_vertex_buffer_state(__VA_ARGS__, __map)
static void
brw_emit_vertices(struct brw_context *brw)
brw_emit_query_begin(brw);
+ const struct brw_vs_prog_data *vs_prog_data =
+ brw_vs_prog_data(brw->vs.base.prog_data);
+
unsigned nr_elements = brw->vb.nr_enabled;
- if (brw->vs.prog_data->uses_vertexid || brw->vs.prog_data->uses_instanceid ||
- brw->vs.prog_data->uses_basevertex || brw->vs.prog_data->uses_baseinstance)
+ if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid ||
+ vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
++nr_elements;
- if (brw->vs.prog_data->uses_drawid)
+ if (vs_prog_data->uses_drawid)
nr_elements++;
+ /* If any of the formats of vb.enabled needs more that one upload, we need
+ * to add it to nr_elements */
+ unsigned extra_uploads = 0;
+ for (unsigned i = 0; i < brw->vb.nr_enabled; i++) {
+ struct brw_vertex_element *input = brw->vb.enabled[i];
+ uint32_t format = brw_get_vertex_surface_type(brw, input->glarray);
+
+ if (uploads_needed(format) > 1)
+ extra_uploads++;
+ }
+ nr_elements += extra_uploads;
+
/* If the VS doesn't read any inputs (calculating vertex position from
* a state variable for some reason, for example), emit a single pad
* VERTEX_ELEMENT struct and bail.
*/
const bool uses_draw_params =
- brw->vs.prog_data->uses_basevertex ||
- brw->vs.prog_data->uses_baseinstance;
+ vs_prog_data->uses_basevertex ||
+ vs_prog_data->uses_baseinstance;
const unsigned nr_buffers = brw->vb.nr_buffers +
- uses_draw_params + brw->vs.prog_data->uses_drawid;
+ uses_draw_params + vs_prog_data->uses_drawid;
if (nr_buffers) {
if (brw->gen >= 6) {
OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4 * nr_buffers - 1));
for (i = 0; i < brw->vb.nr_buffers; i++) {
struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
- EMIT_VERTEX_BUFFER_STATE(brw, i, buffer->bo, buffer->bo->size - 1,
- buffer->offset, buffer->stride,
- buffer->step_rate);
+ /* Prior to Haswell and Bay Trail we have to use 4-component formats
+ * to fake 3-component ones. In particular, we do this for
+ * half-float and 8 and 16-bit integer formats. This means that the
+ * vertex element may poke over the end of the buffer by 2 bytes.
+ */
+ unsigned padding =
+ (brw->gen <= 7 && !brw->is_baytrail && !brw->is_haswell) * 2;
+ EMIT_VERTEX_BUFFER_STATE(brw, i, buffer->bo, buffer->offset,
+ buffer->offset + buffer->size + padding,
+ buffer->stride, buffer->step_rate);
}
if (uses_draw_params) {
EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers,
brw->draw.draw_params_bo,
- brw->draw.draw_params_bo->size - 1,
brw->draw.draw_params_offset,
+ brw->draw.draw_params_bo->size,
0, /* stride */
0); /* step rate */
}
- if (brw->vs.prog_data->uses_drawid) {
+ if (vs_prog_data->uses_drawid) {
EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers + 1,
brw->draw.draw_id_bo,
- brw->draw.draw_id_bo->size - 1,
brw->draw.draw_id_offset,
+ brw->draw.draw_id_bo->size,
0, /* stride */
0); /* step rate */
}
uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC;
+ unsigned num_uploads = 1;
+ unsigned c;
+
+ num_uploads = uploads_needed(format);
if (input == &brw->vb.inputs[VERT_ATTRIB_EDGEFLAG]) {
/* Gen6+ passes edgeflag as sideband along with the vertex, instead
}
}
- switch (input->glarray->Size) {
- case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
- case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
- case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
- case 3: comp3 = input->glarray->Integer ? BRW_VE1_COMPONENT_STORE_1_INT
- : BRW_VE1_COMPONENT_STORE_1_FLT;
- break;
- }
+ for (c = 0; c < num_uploads; c++) {
+ uint32_t upload_format = downsize_format_if_needed(format, c);
+ /* If we need more that one upload, the offset stride would be 128
+ * bits (16 bytes), as for previous uploads we are using the full
+ * entry. */
+ unsigned int offset = input->offset + c * 16;
+ int size = input->glarray->Size;
+
+ if (is_passthru_format(format))
+ size = upload_format_size(upload_format);
+
+ switch (size) {
+ case 0: comp0 = BRW_VE1_COMPONENT_STORE_0;
+ case 1: comp1 = BRW_VE1_COMPONENT_STORE_0;
+ case 2: comp2 = BRW_VE1_COMPONENT_STORE_0;
+ case 3: comp3 = input->glarray->Integer
+ ? BRW_VE1_COMPONENT_STORE_1_INT
+ : BRW_VE1_COMPONENT_STORE_1_FLT;
+ break;
+ }
- if (brw->gen >= 6) {
- OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) |
- GEN6_VE0_VALID |
- (format << BRW_VE0_FORMAT_SHIFT) |
- (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
- } else {
- OUT_BATCH((input->buffer << BRW_VE0_INDEX_SHIFT) |
- BRW_VE0_VALID |
- (format << BRW_VE0_FORMAT_SHIFT) |
- (input->offset << BRW_VE0_SRC_OFFSET_SHIFT));
- }
+ if (brw->gen >= 6) {
+ OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) |
+ GEN6_VE0_VALID |
+ (upload_format << BRW_VE0_FORMAT_SHIFT) |
+ (offset << BRW_VE0_SRC_OFFSET_SHIFT));
+ } else {
+ OUT_BATCH((input->buffer << BRW_VE0_INDEX_SHIFT) |
+ BRW_VE0_VALID |
+ (upload_format << BRW_VE0_FORMAT_SHIFT) |
+ (offset << BRW_VE0_SRC_OFFSET_SHIFT));
+ }
- if (brw->gen >= 5)
- OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
- (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
- (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
- (comp3 << BRW_VE1_COMPONENT_3_SHIFT));
- else
- OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
- (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
- (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
- (comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
- ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
+ if (brw->gen >= 5)
+ OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
+ (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
+ (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
+ (comp3 << BRW_VE1_COMPONENT_3_SHIFT));
+ else
+ OUT_BATCH((comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
+ (comp1 << BRW_VE1_COMPONENT_1_SHIFT) |
+ (comp2 << BRW_VE1_COMPONENT_2_SHIFT) |
+ (comp3 << BRW_VE1_COMPONENT_3_SHIFT) |
+ ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
+ }
}
- if (brw->vs.prog_data->uses_vertexid || brw->vs.prog_data->uses_instanceid ||
- brw->vs.prog_data->uses_basevertex || brw->vs.prog_data->uses_baseinstance) {
+ if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid ||
+ vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) {
uint32_t dw0 = 0, dw1 = 0;
uint32_t comp0 = BRW_VE1_COMPONENT_STORE_0;
uint32_t comp1 = BRW_VE1_COMPONENT_STORE_0;
uint32_t comp2 = BRW_VE1_COMPONENT_STORE_0;
uint32_t comp3 = BRW_VE1_COMPONENT_STORE_0;
- if (brw->vs.prog_data->uses_basevertex)
+ if (vs_prog_data->uses_basevertex)
comp0 = BRW_VE1_COMPONENT_STORE_SRC;
- if (brw->vs.prog_data->uses_baseinstance)
+ if (vs_prog_data->uses_baseinstance)
comp1 = BRW_VE1_COMPONENT_STORE_SRC;
- if (brw->vs.prog_data->uses_vertexid)
+ if (vs_prog_data->uses_vertexid)
comp2 = BRW_VE1_COMPONENT_STORE_VID;
- if (brw->vs.prog_data->uses_instanceid)
+ if (vs_prog_data->uses_instanceid)
comp3 = BRW_VE1_COMPONENT_STORE_IID;
dw1 = (comp0 << BRW_VE1_COMPONENT_0_SHIFT) |
OUT_BATCH(dw1);
}
- if (brw->vs.prog_data->uses_drawid) {
+ if (vs_prog_data->uses_drawid) {
uint32_t dw0 = 0, dw1 = 0;
dw1 = (BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
return;
ib_type_size = _mesa_sizeof_type(index_buffer->type);
- ib_size = ib_type_size * index_buffer->count;
+ ib_size = index_buffer->count ? ib_type_size * index_buffer->count :
+ index_buffer->obj->Size;
bufferobj = index_buffer->obj;
/* Turn into a proper VBO:
*/
intel_upload_data(brw, index_buffer->ptr, ib_size, ib_type_size,
&brw->ib.bo, &offset);
+ brw->ib.size = brw->ib.bo->size;
} else {
offset = (GLuint) (unsigned long) index_buffer->ptr;
intel_upload_data(brw, map, ib_size, ib_type_size,
&brw->ib.bo, &offset);
+ brw->ib.size = brw->ib.bo->size;
ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL);
} else {
if (bo != brw->ib.bo) {
drm_intel_bo_unreference(brw->ib.bo);
brw->ib.bo = bo;
+ brw->ib.size = bufferobj->Size;
drm_intel_bo_reference(bo);
}
}
0);
OUT_RELOC(brw->ib.bo,
I915_GEM_DOMAIN_VERTEX, 0,
- brw->ib.bo->size - 1);
+ brw->ib.size - 1);
ADVANCE_BATCH();
}