struct gl_context *ctx = &brw->ctx;
uint32_t dw0;
- if (brw->gen >= 6) {
+ if (brw->gen >= 8) {
+ dw0 = buffer_nr << GEN6_VB0_INDEX_SHIFT;
+ } else if (brw->gen >= 6) {
dw0 = (buffer_nr << GEN6_VB0_INDEX_SHIFT) |
(step_rate ? GEN6_VB0_ACCESS_INSTANCEDATA
: GEN6_VB0_ACCESS_VERTEXDATA);
if (brw->gen >= 7)
dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
- if (brw->gen == 7)
+ switch (brw->gen) {
+ case 7:
dw0 |= GEN7_MOCS_L3 << 16;
+ break;
+ case 8:
+ dw0 |= BDW_MOCS_WB << 16;
+ break;
+ case 9:
+ dw0 |= SKL_MOCS_WB << 16;
+ break;
+ }
WARN_ONCE(stride >= (brw->gen >= 5 ? 2048 : 2047),
"VBO stride %d too large, bad rendering may occur\n",
stride);
OUT_BATCH(dw0 | (stride << BRW_VB0_PITCH_SHIFT));
- OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, start_offset);
- if (brw->gen >= 5) {
+ if (brw->gen >= 8) {
+ OUT_RELOC64(bo, I915_GEM_DOMAIN_VERTEX, 0, start_offset);
+ /* From the BSpec: 3D Pipeline Stages - 3D Pipeline Geometry -
+ * Vertex Fetch (VF) Stage - State
+ *
+ * Instead of "VBState.StartingBufferAddress + VBState.MaxIndex x
+ * VBState.BufferPitch", the address of the byte immediately beyond the
+ * last valid byte of the buffer is determined by
+ * "VBState.StartingBufferAddress + VBState.BufferSize".
+ */
+ OUT_BATCH(end_offset - start_offset);
+ } else if (brw->gen >= 5) {
+ OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, start_offset);
/* From the BSpec: 3D Pipeline Stages - 3D Pipeline Geometry -
* Vertex Fetch (VF) Stage - State
*
* "VBState.EndAddress + 1".
*/
OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, end_offset - 1);
+ OUT_BATCH(step_rate);
} else {
+ OUT_RELOC(bo, I915_GEM_DOMAIN_VERTEX, 0, start_offset);
OUT_BATCH(0);
+ OUT_BATCH(step_rate);
}
- OUT_BATCH(step_rate);
return __map;
}
gen8_emit_vertices(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
- uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
bool uses_edge_flag;
brw_prepare_vertices(brw);
BEGIN_BATCH(1 + 4 * nr_buffers);
OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (4 * nr_buffers - 1));
for (unsigned i = 0; i < brw->vb.nr_buffers; i++) {
- struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
- uint32_t dw0 = 0;
-
- dw0 |= i << GEN6_VB0_INDEX_SHIFT;
- dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
- dw0 |= buffer->stride << BRW_VB0_PITCH_SHIFT;
- dw0 |= mocs_wb << 16;
-
- OUT_BATCH(dw0);
- OUT_RELOC64(buffer->bo, I915_GEM_DOMAIN_VERTEX, 0, buffer->offset);
- OUT_BATCH(buffer->size);
+ const struct brw_vertex_buffer *buffer = &brw->vb.buffers[i];
+ EMIT_VERTEX_BUFFER_STATE(brw, i, buffer->bo,
+ buffer->offset,
+ buffer->offset + buffer->size,
+ buffer->stride, 0 /* unused */);
}
if (uses_draw_params) {
- OUT_BATCH(brw->vb.nr_buffers << GEN6_VB0_INDEX_SHIFT |
- GEN7_VB0_ADDRESS_MODIFYENABLE |
- mocs_wb << 16);
- OUT_RELOC64(brw->draw.draw_params_bo, I915_GEM_DOMAIN_VERTEX, 0,
- brw->draw.draw_params_offset);
- OUT_BATCH(brw->draw.draw_params_bo->size);
+ EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers,
+ brw->draw.draw_params_bo,
+ brw->draw.draw_params_offset,
+ brw->draw.draw_params_bo->size,
+ 0 /* stride */,
+ 0 /* unused */);
}
if (brw->vs.prog_data->uses_drawid) {
- OUT_BATCH((brw->vb.nr_buffers + 1) << GEN6_VB0_INDEX_SHIFT |
- GEN7_VB0_ADDRESS_MODIFYENABLE |
- mocs_wb << 16);
- OUT_RELOC64(brw->draw.draw_id_bo, I915_GEM_DOMAIN_VERTEX, 0,
- brw->draw.draw_id_offset);
- OUT_BATCH(brw->draw.draw_id_bo->size);
+ EMIT_VERTEX_BUFFER_STATE(brw, brw->vb.nr_buffers + 1,
+ brw->draw.draw_id_bo,
+ brw->draw.draw_id_offset,
+ brw->draw.draw_id_bo->size,
+ 0 /* stride */,
+ 0 /* unused */);
}
ADVANCE_BATCH();
}