static void
emit_vertex_bo(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *bo, uint32_t offset,
+ struct anv_address addr,
uint32_t size, uint32_t index)
{
uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
.BufferPitch = 0,
#if (GEN_GEN >= 8)
.MemoryObjectControlState = GENX(MOCS),
- .BufferStartingAddress = { bo, offset },
+ .BufferStartingAddress = addr,
.BufferSize = size
#else
.VertexBufferMemoryObjectControlState = GENX(MOCS),
- .BufferStartingAddress = { bo, offset },
- .EndAddress = { bo, offset + size },
+ .BufferStartingAddress = addr,
+ .EndAddress = anv_address_add(addr, size),
#endif
});
}
static void
emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *bo, uint32_t offset)
+ struct anv_address addr)
{
- emit_vertex_bo(cmd_buffer, bo, offset, 8, ANV_SVGS_VB_INDEX);
+ emit_vertex_bo(cmd_buffer, addr, 8, ANV_SVGS_VB_INDEX);
}
static void
anv_state_flush(cmd_buffer->device, id_state);
- emit_base_vertex_instance_bo(cmd_buffer,
- &cmd_buffer->device->dynamic_state_pool.block_pool.bo, id_state.offset);
+ struct anv_address addr = {
+ .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .offset = id_state.offset,
+ };
+
+ emit_base_vertex_instance_bo(cmd_buffer, addr);
}
static void
anv_state_flush(cmd_buffer->device, state);
- emit_vertex_bo(cmd_buffer,
- &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
- state.offset, 4, ANV_DRAWID_VB_INDEX);
+ struct anv_address addr = {
+ .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .offset = state.offset,
+ };
+
+ emit_vertex_bo(cmd_buffer, addr, 4, ANV_DRAWID_VB_INDEX);
}
void genX(CmdDraw)(
static void
load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer,
- struct anv_buffer *buffer, uint64_t offset,
+ struct anv_address addr,
bool indexed)
{
struct anv_batch *batch = &cmd_buffer->batch;
- struct anv_bo *bo = buffer->bo;
- uint32_t bo_offset = buffer->offset + offset;
- emit_lrm(batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
+ emit_lrm(batch, GEN7_3DPRIM_VERTEX_COUNT, addr.bo, addr.offset);
unsigned view_count = anv_subpass_view_count(cmd_buffer->state.subpass);
if (view_count > 1) {
#if GEN_IS_HASWELL || GEN_GEN >= 8
- emit_lrm(batch, CS_GPR(0), bo, bo_offset + 4);
+ emit_lrm(batch, CS_GPR(0), addr.bo, addr.offset + 4);
emit_mul_gpr0(batch, view_count);
emit_lrr(batch, GEN7_3DPRIM_INSTANCE_COUNT, CS_GPR(0));
#else
anv_finishme("Multiview + indirect draw requires MI_MATH; "
"MI_MATH is not supported on Ivy Bridge");
- emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
+ emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, addr.bo, addr.offset + 4);
#endif
} else {
- emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
+ emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, addr.bo, addr.offset + 4);
}
- emit_lrm(batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
+ emit_lrm(batch, GEN7_3DPRIM_START_VERTEX, addr.bo, addr.offset + 8);
if (indexed) {
- emit_lrm(batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
- emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
+ emit_lrm(batch, GEN7_3DPRIM_BASE_VERTEX, addr.bo, addr.offset + 12);
+ emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, addr.bo, addr.offset + 16);
} else {
- emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
+ emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, addr.bo, addr.offset + 12);
emit_lri(batch, GEN7_3DPRIM_BASE_VERTEX, 0);
}
}
genX(cmd_buffer_flush_state)(cmd_buffer);
for (uint32_t i = 0; i < drawCount; i++) {
- struct anv_bo *bo = buffer->bo;
- uint32_t bo_offset = buffer->offset + offset;
+ struct anv_address draw = {
+ .bo = buffer->bo,
+ .offset = buffer->offset + offset,
+ };
if (vs_prog_data->uses_firstvertex ||
vs_prog_data->uses_baseinstance)
- emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
+ emit_base_vertex_instance_bo(cmd_buffer, anv_address_add(draw, 8));
if (vs_prog_data->uses_drawid)
emit_draw_index(cmd_buffer, i);
- load_indirect_parameters(cmd_buffer, buffer, offset, false);
+ load_indirect_parameters(cmd_buffer, draw, false);
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
prim.IndirectParameterEnable = true;
genX(cmd_buffer_flush_state)(cmd_buffer);
for (uint32_t i = 0; i < drawCount; i++) {
- struct anv_bo *bo = buffer->bo;
- uint32_t bo_offset = buffer->offset + offset;
+ struct anv_address draw = {
+ .bo = buffer->bo,
+ .offset = buffer->offset + offset,
+ };
/* TODO: We need to stomp base vertex to 0 somehow */
if (vs_prog_data->uses_firstvertex ||
vs_prog_data->uses_baseinstance)
- emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
+ emit_base_vertex_instance_bo(cmd_buffer, anv_address_add(draw, 12));
if (vs_prog_data->uses_drawid)
emit_draw_index(cmd_buffer, i);
- load_indirect_parameters(cmd_buffer, buffer, offset, true);
+ load_indirect_parameters(cmd_buffer, draw, true);
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
prim.IndirectParameterEnable = true;
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
- struct anv_bo *bo = buffer->bo;
- uint32_t bo_offset = buffer->offset + offset;
+ struct anv_address addr = {
+ .bo = buffer->bo,
+ .offset = buffer->offset + offset,
+ };
struct anv_batch *batch = &cmd_buffer->batch;
anv_cmd_buffer_push_base_group_id(cmd_buffer, 0, 0, 0);
return;
#endif
- if (prog_data->uses_num_work_groups) {
- cmd_buffer->state.compute.num_workgroups = (struct anv_address) {
- .bo = bo,
- .offset = bo_offset,
- };
- }
+ if (prog_data->uses_num_work_groups)
+ cmd_buffer->state.compute.num_workgroups = addr;
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
- emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
- emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
- emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
+ emit_lrm(batch, GPGPU_DISPATCHDIMX, addr.bo, addr.offset);
+ emit_lrm(batch, GPGPU_DISPATCHDIMY, addr.bo, addr.offset + 4);
+ emit_lrm(batch, GPGPU_DISPATCHDIMZ, addr.bo, addr.offset + 8);
#if GEN_GEN <= 7
/* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
/* Load compute_dispatch_indirect_x_size into SRC0 */
- emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
+ emit_lrm(batch, MI_PREDICATE_SRC0, addr.bo, addr.offset + 0);
/* predicate = (compute_dispatch_indirect_x_size == 0); */
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
}
/* Load compute_dispatch_indirect_y_size into SRC0 */
- emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
+ emit_lrm(batch, MI_PREDICATE_SRC0, addr.bo, addr.offset + 4);
/* predicate |= (compute_dispatch_indirect_y_size == 0); */
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
}
/* Load compute_dispatch_indirect_z_size into SRC0 */
- emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
+ emit_lrm(batch, MI_PREDICATE_SRC0, addr.bo, addr.offset + 8);
/* predicate |= (compute_dispatch_indirect_z_size == 0); */
anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {