* IN THE SOFTWARE.
*/
-#include "main/glheader.h"
#include "main/bufferobj.h"
#include "main/context.h"
#include "main/enums.h"
#include "intel_batchbuffer.h"
#include "intel_buffer_objects.h"
+static bool
+is_passthru_format(uint32_t format)
+{
+ switch (format) {
+ case BRW_SURFACEFORMAT_R64_PASSTHRU:
+ case BRW_SURFACEFORMAT_R64G64_PASSTHRU:
+ case BRW_SURFACEFORMAT_R64G64B64_PASSTHRU:
+ case BRW_SURFACEFORMAT_R64G64B64A64_PASSTHRU:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void
gen8_emit_vertices(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
- uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
+ bool uses_edge_flag;
brw_prepare_vertices(brw);
brw_prepare_shader_draw_parameters(brw);
+ uses_edge_flag = (ctx->Polygon.FrontMode != GL_FILL ||
+ ctx->Polygon.BackMode != GL_FILL);
+
if (brw->vs.prog_data->uses_vertexid || brw->vs.prog_data->uses_instanceid) {
unsigned vue = brw->vb.nr_enabled;
- WARN_ONCE(brw->vs.prog_data->inputs_read & VERT_BIT_EDGEFLAG,
- "Using VID/IID with edgeflags, need to reorder the "
- "vertex attributes");
+ /* The element for the edge flags must always be last, so we have to
+ * insert the SGVS before it in that case.
+ */
+ if (uses_edge_flag) {
+ assert(vue > 0);
+ vue--;
+ }
+
WARN_ONCE(vue >= 33,
"Trying to insert VID/IID past 33rd vertex element, "
"need to reorder the vertex attrbutes.");
BEGIN_BATCH(3);
OUT_BATCH(_3DSTATE_VF_INSTANCING << 16 | (3 - 2));
- OUT_BATCH(brw->vb.nr_buffers | GEN8_VF_INSTANCING_ENABLE);
+ OUT_BATCH(vue | GEN8_VF_INSTANCING_ENABLE);
OUT_BATCH(0);
ADVANCE_BATCH();
} else {
}
/* Now emit 3DSTATE_VERTEX_BUFFERS and 3DSTATE_VERTEX_ELEMENTS packets. */
- unsigned nr_buffers = brw->vb.nr_buffers + brw->vs.prog_data->uses_vertexid;
+ const bool uses_draw_params =
+ brw->vs.prog_data->uses_basevertex ||
+ brw->vs.prog_data->uses_baseinstance;
+ const unsigned nr_buffers = brw->vb.nr_buffers +
+ uses_draw_params + brw->vs.prog_data->uses_drawid;
+
if (nr_buffers) {
assert(nr_buffers <= 33);
BEGIN_BATCH(1 + 4 * nr_buffers);
OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4 * nr_buffers - 1));
for (unsigned i = 0; i < brw->vb.nr_buffers; i++) {
- struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
- uint32_t dw0 = 0;
-
- dw0 |= i << GEN6_VB0_INDEX_SHIFT;
- dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
- dw0 |= buffer->stride << BRW_VB0_PITCH_SHIFT;
- dw0 |= mocs_wb << 16;
+ const struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
+ EMIT_VERTEX_BUFFER_STATE(brw, i, buffer->bo,
+ buffer->offset,
+ buffer->offset + buffer->size,
+ buffer->stride, 0 /* unused */);
+ }
- OUT_BATCH(dw0);
- OUT_RELOC64(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset);
- OUT_BATCH(buffer->bo->size);
+ if (uses_draw_params) {
+ EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers,
+ brw->draw.draw_params_bo,
+ brw->draw.draw_params_offset,
+ brw->draw.draw_params_bo->size,
+ 0 /* stride */,
+ 0 /* unused */);
}
- if (brw->vs.prog_data->uses_vertexid) {
- OUT_BATCH(brw->vb.nr_buffers << GEN6_VB0_INDEX_SHIFT |
- GEN7_VB0_ADDRESS_MODIFYENABLE |
- mocs_wb << 16);
- OUT_RELOC64(brw->draw.draw_params_bo, I915_GEM_DOMAIN_VERTEX, 0,
- brw->draw.draw_params_offset);
- OUT_BATCH(brw->draw.draw_params_bo->size);
+ if (brw->vs.prog_data->uses_drawid) {
+ EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers + 1,
+ brw->draw.draw_id_bo,
+ brw->draw.draw_id_offset,
+ brw->draw.draw_id_bo->size,
+ 0 /* stride */,
+ 0 /* unused */);
}
ADVANCE_BATCH();
}
- unsigned nr_elements = brw->vb.nr_enabled + brw->vs.prog_data->uses_vertexid;
+ /* Normally we don't need an element for the SGVS attribute because the
+ * 3DSTATE_VF_SGVS instruction lets you store the generated attribute in an
+ * element that is past the list in 3DSTATE_VERTEX_ELEMENTS. However if
+ * we're using draw parameters then we need an element for the those
+ * values. Additionally if there is an edge flag element then the SGVS
+ * can't be inserted past that so we need a dummy element to ensure that
+ * the edge flag is the last one.
+ */
+ const bool needs_sgvs_element = (brw->vs.prog_data->uses_basevertex ||
+ brw->vs.prog_data->uses_baseinstance ||
+ ((brw->vs.prog_data->uses_instanceid ||
+ brw->vs.prog_data->uses_vertexid) &&
+ uses_edge_flag));
+ const unsigned nr_elements =
+ brw->vb.nr_enabled + needs_sgvs_element + brw->vs.prog_data->uses_drawid;
/* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS,
* presumably for VertexID/InstanceID.
uint32_t comp2 = BRW_VE1_COMPONENT_STORE_SRC;
uint32_t comp3 = BRW_VE1_COMPONENT_STORE_SRC;
+ /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
+ * "Any SourceElementFormat of *64*_PASSTHRU cannot be used with an
+ * element which has edge flag enabled."
+ */
+ assert(!(is_passthru_format(format) && uses_edge_flag));
+
/* The gen4 driver expects edgeflag to come in as a float, and passes
* that float on to the tests in the clipper. Mesa's current vertex
* attribute value for EdgeFlag is stored as a float, which works out.
break;
}
+ /* From the BDW PRM, Volume 2d, page 586 (VERTEX_ELEMENT_STATE):
+ *
+ * "When SourceElementFormat is set to one of the *64*_PASSTHRU
+ * formats, 64-bit components are stored in the URB without any
+ * conversion. In this case, vertex elements must be written as 128
+ * or 256 bits, with VFCOMP_STORE_0 being used to pad the output
+ * as required. E.g., if R64_PASSTHRU is used to copy a 64-bit Red
+ * component into the URB, Component 1 must be specified as
+ * VFCOMP_STORE_0 (with Components 2,3 set to VFCOMP_NOSTORE)
+ * in order to output a 128-bit vertex element, or Components 1-3 must
+ * be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
+ * element. Likewise, use of R64G64B64_PASSTHRU requires Component 3
+ * to be specified as VFCOMP_STORE_0 in order to output a 256-bit vertex
+ * element."
+ */
+ if (input->glarray->Doubles) {
+ switch (input->glarray->Size) {
+ case 0:
+ case 1:
+ case 2:
+ /* Use 128-bits instead of 256-bits to write double and dvec2
+ * vertex elements.
+ */
+ comp2 = BRW_VE1_COMPONENT_NOSTORE;
+ comp3 = BRW_VE1_COMPONENT_NOSTORE;
+ break;
+ case 3:
+ /* Pad the output using VFCOMP_STORE_0 as suggested
+ * by the BDW PRM.
+ */
+ comp3 = BRW_VE1_COMPONENT_STORE_0;
+ break;
+ }
+ }
+
OUT_BATCH((input->buffer << GEN6_VE0_INDEX_SHIFT) |
GEN6_VE0_VALID |
(format << BRW_VE0_FORMAT_SHIFT) |
(comp3 << BRW_VE1_COMPONENT_3_SHIFT));
}
+ if (needs_sgvs_element) {
+ if (brw->vs.prog_data->uses_basevertex ||
+ brw->vs.prog_data->uses_baseinstance) {
+ OUT_BATCH(GEN6_VE0_VALID |
+ brw->vb.nr_buffers << GEN6_VE0_INDEX_SHIFT |
+ BRW_SURFACEFORMAT_R32G32_UINT << BRW_VE0_FORMAT_SHIFT);
+ OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_1_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
+ } else {
+ OUT_BATCH(GEN6_VE0_VALID);
+ OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
+ }
+ }
+
+ if (brw->vs.prog_data->uses_drawid) {
+ OUT_BATCH(GEN6_VE0_VALID |
+ ((brw->vb.nr_buffers + 1) << GEN6_VE0_INDEX_SHIFT) |
+ (BRW_SURFACEFORMAT_R32_UINT << BRW_VE0_FORMAT_SHIFT));
+ OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
+ }
+
if (gen6_edgeflag_input) {
uint32_t format =
brw_get_vertex_surface_type(brw, gen6_edgeflag_input->glarray);
(BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
(BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
}
-
- if (brw->vs.prog_data->uses_vertexid) {
- OUT_BATCH(GEN6_VE0_VALID |
- brw->vb.nr_buffers << GEN6_VE0_INDEX_SHIFT |
- BRW_SURFACEFORMAT_R32_UINT << BRW_VE0_FORMAT_SHIFT);
- OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT) |
- (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
- (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
- (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_3_SHIFT));
- }
ADVANCE_BATCH();
- for (unsigned i = 0; i < brw->vb.nr_enabled; i++) {
+ for (unsigned i = 0, j = 0; i < brw->vb.nr_enabled; i++) {
const struct brw_vertex_element *input = brw->vb.enabled[i];
const struct brw_vertex_buffer *buffer = &brw->vb.buffers[input->buffer];
+ unsigned element_index;
+
+ /* The edge flag element is reordered to be the last one in the code
+ * above so we need to compensate for that in the element indices used
+ * below.
+ */
+ if (input == gen6_edgeflag_input)
+ element_index = nr_elements - 1;
+ else
+ element_index = j++;
BEGIN_BATCH(3);
OUT_BATCH(_3DSTATE_VF_INSTANCING << 16 | (3 - 2));
- OUT_BATCH(i | (buffer->step_rate ? GEN8_VF_INSTANCING_ENABLE : 0));
+ OUT_BATCH(element_index |
+ (buffer->step_rate ? GEN8_VF_INSTANCING_ENABLE : 0));
OUT_BATCH(buffer->step_rate);
ADVANCE_BATCH();
}
+
+ if (brw->vs.prog_data->uses_drawid) {
+ const unsigned element = brw->vb.nr_enabled + needs_sgvs_element;
+ BEGIN_BATCH(3);
+ OUT_BATCH(_3DSTATE_VF_INSTANCING << 16 | (3 - 2));
+ OUT_BATCH(element);
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+ }
}
const struct brw_tracked_state gen8_vertices = {
.dirty = {
.mesa = _NEW_POLYGON,
.brw = BRW_NEW_BATCH |
+ BRW_NEW_BLORP |
BRW_NEW_VERTICES |
BRW_NEW_VS_PROG_DATA,
},
BEGIN_BATCH(5);
OUT_BATCH(CMD_INDEX_BUFFER << 16 | (5 - 2));
- OUT_BATCH(brw_get_index_type(index_buffer->type) << 8 | mocs_wb);
+ OUT_BATCH(brw_get_index_type(index_buffer->type) | mocs_wb);
OUT_RELOC64(brw->ib.bo, I915_GEM_DOMAIN_VERTEX, 0, 0);
- OUT_BATCH(brw->ib.bo->size);
+ OUT_BATCH(brw->ib.size);
ADVANCE_BATCH();
}
.dirty = {
.mesa = 0,
.brw = BRW_NEW_BATCH |
+ BRW_NEW_BLORP |
BRW_NEW_INDEX_BUFFER,
- .cache = 0,
},
.emit = gen8_emit_index_buffer,
};
const struct brw_tracked_state gen8_vf_topology = {
.dirty = {
.mesa = 0,
- .brw = BRW_NEW_PRIMITIVE,
- .cache = 0,
+ .brw = BRW_NEW_BLORP |
+ BRW_NEW_PRIMITIVE,
},
.emit = gen8_emit_vf_topology,
};