anv_state_clflush(cc_state);
}
- anv_batch_emit(&cmd_buffer->batch,
- GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC),
- .CCViewportPointer = cc_state.offset);
- anv_batch_emit(&cmd_buffer->batch,
- GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP),
- .SFClipViewportPointer = sf_clip_state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), cc) {
+ cc.CCViewportPointer = cc_state.offset;
+ }
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), clip) {
+ clip.SFClipViewportPointer = sf_clip_state.offset;
+ }
}
#endif
-#define emit_lri(batch, reg, imm) \
- anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), \
- .RegisterOffset = __anv_reg_num(reg), \
- .DataDWord = imm)
-
void
genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm)
{
* flushed, which involves a first PIPE_CONTROL flush which stalls the
* pipeline...
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .DCFlushEnable = true,
- .PostSyncOperation = NoWrite,
- .CommandStreamerStallEnable = true);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DCFlushEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ pc.CommandStreamerStallEnable = true;
+ }
/* ...followed by a second pipelined PIPE_CONTROL that initiates
* invalidation of the relevant caches. Note that because RO
* previous and subsequent PIPE_CONTROLs already guarantee that there is
* no concurrent GPGPU kernel execution (see SKL HSD 2132585).
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .TextureCacheInvalidationEnable = true,
- .ConstantCacheInvalidationEnable = true,
- .InstructionCacheInvalidateEnable = true,
- .StateCacheInvalidationEnable = true,
- .PostSyncOperation = NoWrite);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.TextureCacheInvalidationEnable = true,
+ pc.ConstantCacheInvalidationEnable = true,
+ pc.InstructionCacheInvalidateEnable = true,
+ pc.StateCacheInvalidationEnable = true,
+ pc.PostSyncOperation = NoWrite;
+ }
/* Now send a third stalling flush to make sure that invalidation is
* complete when the L3 configuration registers are modified.
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .DCFlushEnable = true,
- .PostSyncOperation = NoWrite,
- .CommandStreamerStallEnable = true);
-
- emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG), l3cr_val);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DCFlushEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ pc.CommandStreamerStallEnable = true;
+ }
+
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = GENX(L3CNTLREG_num);
+ lri.DataDWord = l3cr_val;
+ }
cmd_buffer->state.current_l3_config = l3cr_val;
}
}
if (!cmd_buffer->device->info.has_llc)
anv_state_clflush(cc_state);
- anv_batch_emit(&cmd_buffer->batch,
- GENX(3DSTATE_CC_STATE_POINTERS),
- .ColorCalcStatePointer = cc_state.offset,
- .ColorCalcStatePointerValid = true);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(3DSTATE_CC_STATE_POINTERS), ccp) {
+ ccp.ColorCalcStatePointer = cc_state.offset;
+ ccp.ColorCalcStatePointerValid = true;
+ }
}
if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
if (!cmd_buffer->device->info.has_llc)
anv_state_clflush(cc_state);
- anv_batch_emit(&cmd_buffer->batch,
- GEN9_3DSTATE_CC_STATE_POINTERS,
- .ColorCalcStatePointer = cc_state.offset,
- .ColorCalcStatePointerValid = true);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GEN9_3DSTATE_CC_STATE_POINTERS, ccp) {
+ ccp.ColorCalcStatePointer = cc_state.offset;
+ ccp.ColorCalcStatePointerValid = true;
+ }
}
if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
ANV_CMD_DIRTY_INDEX_BUFFER)) {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF),
- .IndexedDrawCutIndexEnable = pipeline->primitive_restart,
- .CutIndex = cmd_buffer->state.restart_index,
- );
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_VF), vf) {
+ vf.IndexedDrawCutIndexEnable = pipeline->primitive_restart;
+ vf.CutIndex = cmd_buffer->state.restart_index;
+ }
}
cmd_buffer->state.dirty = 0;
cmd_buffer->state.restart_index = restart_index_for_type[indexType];
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER),
- .IndexFormat = vk_to_gen_index_type[indexType],
- .MemoryObjectControlState = GENX(MOCS),
- .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
- .BufferSize = buffer->size - offset);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
+ ib.IndexFormat = vk_to_gen_index_type[indexType];
+ ib.MemoryObjectControlState = GENX(MOCS);
+ ib.BufferStartingAddress =
+ (struct anv_address) { buffer->bo, buffer->offset + offset };
+ ib.BufferSize = buffer->size - offset;
+ }
cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
}
unsigned push_constant_regs = reg_aligned_constant_size / 32;
if (push_state.alloc_size) {
- anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD),
- .CURBETotalDataLength = push_state.alloc_size,
- .CURBEDataStartAddress = push_state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
+ curbe.CURBETotalDataLength = push_state.alloc_size;
+ curbe.CURBEDataStartAddress = push_state.offset;
+ }
}
assert(prog_data->total_shared <= 64 * 1024);
pipeline->cs_thread_width_max);
uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
- anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD),
- .InterfaceDescriptorTotalLength = size,
- .InterfaceDescriptorDataStartAddress = state.offset);
+ anv_batch_emit_blk(&cmd_buffer->batch,
+ GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) {
+ mid.InterfaceDescriptorTotalLength = size;
+ mid.InterfaceDescriptorDataStartAddress = state.offset;
+ }
return VK_SUCCESS;
}
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_event, event, _event);
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .DestinationAddressType = DAT_PPGTT,
- .PostSyncOperation = WriteImmediateData,
- .Address = {
- &cmd_buffer->device->dynamic_state_block_pool.bo,
- event->state.offset
- },
- .ImmediateData = VK_EVENT_SET);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DestinationAddressType = DAT_PPGTT,
+ pc.PostSyncOperation = WriteImmediateData,
+ pc.Address = (struct anv_address) {
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ event->state.offset
+ };
+ pc.ImmediateData = VK_EVENT_SET;
+ }
}
void genX(CmdResetEvent)(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_event, event, _event);
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .DestinationAddressType = DAT_PPGTT,
- .PostSyncOperation = WriteImmediateData,
- .Address = {
- &cmd_buffer->device->dynamic_state_block_pool.bo,
- event->state.offset
- },
- .ImmediateData = VK_EVENT_RESET);
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DestinationAddressType = DAT_PPGTT;
+ pc.PostSyncOperation = WriteImmediateData;
+ pc.Address = (struct anv_address) {
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ event->state.offset
+ };
+ pc.ImmediateData = VK_EVENT_RESET;
+ }
}
void genX(CmdWaitEvents)(
for (uint32_t i = 0; i < eventCount; i++) {
ANV_FROM_HANDLE(anv_event, event, pEvents[i]);
- anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT),
- .WaitMode = PollingMode,
- .CompareOperation = COMPARE_SAD_EQUAL_SDD,
- .SemaphoreDataDword = VK_EVENT_SET,
- .SemaphoreAddress = {
- &cmd_buffer->device->dynamic_state_block_pool.bo,
- event->state.offset
- });
+ anv_batch_emit_blk(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) {
+ sem.WaitMode = PollingMode,
+ sem.CompareOperation = COMPARE_SAD_EQUAL_SDD,
+ sem.SemaphoreDataDword = VK_EVENT_SET,
+ sem.SemaphoreAddress = (struct anv_address) {
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ event->state.offset
+ };
+ }
}
genX(CmdPipelineBarrier)(commandBuffer, srcStageMask, destStageMask,