anv_foreach_stage(s, stages) {
if (cmd_buffer->state.samplers[s].alloc_size > 0) {
- anv_batch_emit(&cmd_buffer->batch,
- GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS),
- ._3DCommandSubOpcode = sampler_state_opcodes[s],
- .PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
+ ssp._3DCommandSubOpcode = sampler_state_opcodes[s];
+ ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset;
+ }
}
/* Always emit binding table pointers if we're asked to, since on SKL
* this is what flushes push constants. */
- anv_batch_emit(&cmd_buffer->batch,
- GENX(3DSTATE_BINDING_TABLE_POINTERS_VS),
- ._3DCommandSubOpcode = binding_table_opcodes[s],
- .PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) {
+ btp._3DCommandSubOpcode = binding_table_opcodes[s];
+ btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset;
+ }
}
}
}
}
- anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_SCISSOR_STATE_POINTERS,
- .ScissorRectPointer = scissor_state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GEN7_3DSTATE_SCISSOR_STATE_POINTERS, ssp) {
+ ssp.ScissorRectPointer = scissor_state.offset;
+ }
if (!cmd_buffer->device->info.has_llc)
anv_state_clflush(scissor_state);
unsigned push_constant_regs = reg_aligned_constant_size / 32;
if (push_state.alloc_size) {
- anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD),
- .CURBETotalDataLength = push_state.alloc_size,
- .CURBEDataStartAddress = push_state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
+ curbe.CURBETotalDataLength = push_state.alloc_size;
+ curbe.CURBEDataStartAddress = push_state.offset;
+ }
}
assert(prog_data->total_shared <= 64 * 1024);
pipeline->cs_thread_width_max);
const uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
- anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD),
- .InterfaceDescriptorTotalLength = size,
- .InterfaceDescriptorDataStartAddress = state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), idl) {
+ idl.InterfaceDescriptorTotalLength = size;
+ idl.InterfaceDescriptorDataStartAddress = state.offset;
+ }
return VK_SUCCESS;
}
-#define emit_lri(batch, reg, imm) \
- anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), \
- .RegisterOffset = __anv_reg_num(reg), \
- .DataDWord = imm)
-
void
genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm)
{
* flushed, which involves a first PIPE_CONTROL flush which stalls the
* pipeline...
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .DCFlushEnable = true,
- .PostSyncOperation = NoWrite,
- .CommandStreamerStallEnable = true);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DCFlushEnable = true;
+ pc.CommandStreamerStallEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ }
/* ...followed by a second pipelined PIPE_CONTROL that initiates
* invalidation of the relevant caches. Note that because RO
* previous and subsequent PIPE_CONTROLs already guarantee that there is
* no concurrent GPGPU kernel execution (see SKL HSD 2132585).
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .TextureCacheInvalidationEnable = true,
- .ConstantCacheInvalidationEnable = true,
- .InstructionCacheInvalidateEnable = true,
- .StateCacheInvalidationEnable = true,
- .PostSyncOperation = NoWrite);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.TextureCacheInvalidationEnable = true;
+ pc.ConstantCacheInvalidationEnable = true;
+ pc.InstructionCacheInvalidateEnable = true;
+ pc.StateCacheInvalidationEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ }
/* Now send a third stalling flush to make sure that invalidation is
* complete when the L3 configuration registers are modified.
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .DCFlushEnable = true,
- .PostSyncOperation = NoWrite,
- .CommandStreamerStallEnable = true);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DCFlushEnable = true;
+ pc.CommandStreamerStallEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ }
anv_finishme("write GEN7_L3SQCREG1");
- emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2), l3cr2_val);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = GENX(L3CNTLREG2_num);
+ lri.DataDWord = l3cr2_val;
+ }
uint32_t l3cr3_slm, l3cr3_noslm;
anv_pack_struct(&l3cr3_noslm, GENX(L3CNTLREG3),
.CAllocation = 8,
.TAllocation = 8);
const uint32_t l3cr3_val = enable_slm ? l3cr3_slm : l3cr3_noslm;
- emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3), l3cr3_val);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = GENX(L3CNTLREG3_num);
+ lri.DataDWord = l3cr3_val;
+ }
cmd_buffer->state.current_l3_config = l3cr2_val;
}
if (!cmd_buffer->device->info.has_llc)
anv_state_clflush(cc_state);
- anv_batch_emit(&cmd_buffer->batch,
- GENX(3DSTATE_CC_STATE_POINTERS),
- .ColorCalcStatePointer = cc_state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(3DSTATE_CC_STATE_POINTERS), ccp) {
+ ccp.ColorCalcStatePointer = cc_state.offset;
+ }
}
if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
pipeline->gen7.depth_stencil_state,
GENX(DEPTH_STENCIL_STATE_length), 64);
- anv_batch_emit(&cmd_buffer->batch,
- GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS),
- .PointertoDEPTH_STENCIL_STATE = ds_state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), dsp) {
+ dsp.PointertoDEPTH_STENCIL_STATE = ds_state.offset;
+ }
}
if (cmd_buffer->state.gen7.index_buffer &&
uint32_t offset = cmd_buffer->state.gen7.index_offset;
#if GEN_IS_HASWELL
- anv_batch_emit(&cmd_buffer->batch, GEN75_3DSTATE_VF,
- .IndexedDrawCutIndexEnable = pipeline->primitive_restart,
- .CutIndex = cmd_buffer->state.restart_index);
+ anv_batch_emit_blk(&cmd_buffer->batch, GEN75_3DSTATE_VF, vf) {
+ vf.IndexedDrawCutIndexEnable = pipeline->primitive_restart;
+ vf.CutIndex = cmd_buffer->state.restart_index;
+ }
#endif
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER),
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
#if !GEN_IS_HASWELL
- .CutIndexEnable = pipeline->primitive_restart,
+ ib.CutIndexEnable = pipeline->primitive_restart;
#endif
- .IndexFormat = cmd_buffer->state.gen7.index_type,
- .MemoryObjectControlState = GENX(MOCS),
- .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
- .BufferEndingAddress = { buffer->bo, buffer->offset + buffer->size });
+ ib.IndexFormat = cmd_buffer->state.gen7.index_type;
+ ib.MemoryObjectControlState = GENX(MOCS);
+
+ ib.BufferStartingAddress =
+ (struct anv_address) { buffer->bo, buffer->offset + offset };
+ ib.BufferEndingAddress =
+ (struct anv_address) { buffer->bo, buffer->offset + buffer->size };
+ }
}
cmd_buffer->state.dirty = 0;