* this, we get GPU hangs when using multi-level command buffers which
* clear depth, reset state base address, and then go render stuff.
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .RenderTargetCacheFlushEnable = true);
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.RenderTargetCacheFlushEnable = true;
+ }
#endif
- anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS),
- .GeneralStateBaseAddress = { scratch_bo, 0 },
- .GeneralStateMemoryObjectControlState = GENX(MOCS),
- .GeneralStateBaseAddressModifyEnable = true,
+ anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
+ sba.GeneralStateBaseAddress = (struct anv_address) { scratch_bo, 0 };
+ sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
+ sba.GeneralStateBaseAddressModifyEnable = true;
- .SurfaceStateBaseAddress = anv_cmd_buffer_surface_base_address(cmd_buffer),
- .SurfaceStateMemoryObjectControlState = GENX(MOCS),
- .SurfaceStateBaseAddressModifyEnable = true,
+ sba.SurfaceStateBaseAddress =
+ anv_cmd_buffer_surface_base_address(cmd_buffer);
+ sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
+ sba.SurfaceStateBaseAddressModifyEnable = true;
- .DynamicStateBaseAddress = { &device->dynamic_state_block_pool.bo, 0 },
- .DynamicStateMemoryObjectControlState = GENX(MOCS),
- .DynamicStateBaseAddressModifyEnable = true,
+ sba.DynamicStateBaseAddress =
+ (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
+ sba.DynamicStateMemoryObjectControlState = GENX(MOCS),
+ sba.DynamicStateBaseAddressModifyEnable = true,
- .IndirectObjectBaseAddress = { NULL, 0 },
- .IndirectObjectMemoryObjectControlState = GENX(MOCS),
- .IndirectObjectBaseAddressModifyEnable = true,
+ sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
+ sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
+ sba.IndirectObjectBaseAddressModifyEnable = true;
- .InstructionBaseAddress = { &device->instruction_block_pool.bo, 0 },
- .InstructionMemoryObjectControlState = GENX(MOCS),
- .InstructionBaseAddressModifyEnable = true,
+ sba.InstructionBaseAddress =
+ (struct anv_address) { &device->instruction_block_pool.bo, 0 };
+ sba.InstructionMemoryObjectControlState = GENX(MOCS);
+ sba.InstructionBaseAddressModifyEnable = true;
# if (GEN_GEN >= 8)
/* Broadwell requires that we specify a buffer size for a bunch of
* these fields. However, since we will be growing the BO's live, we
* just set them all to the maximum.
*/
- .GeneralStateBufferSize = 0xfffff,
- .GeneralStateBufferSizeModifyEnable = true,
- .DynamicStateBufferSize = 0xfffff,
- .DynamicStateBufferSizeModifyEnable = true,
- .IndirectObjectBufferSize = 0xfffff,
- .IndirectObjectBufferSizeModifyEnable = true,
- .InstructionBufferSize = 0xfffff,
- .InstructionBuffersizeModifyEnable = true,
+ sba.GeneralStateBufferSize = 0xfffff;
+ sba.GeneralStateBufferSizeModifyEnable = true;
+ sba.DynamicStateBufferSize = 0xfffff;
+ sba.DynamicStateBufferSizeModifyEnable = true;
+ sba.IndirectObjectBufferSize = 0xfffff;
+ sba.IndirectObjectBufferSizeModifyEnable = true;
+ sba.InstructionBufferSize = 0xfffff;
+ sba.InstructionBuffersizeModifyEnable = true;
# endif
- );
+ }
/* After re-setting the surface state base address, we have to do some
* cache flusing so that the sampler engine will pick up the new
* units cache the binding table in the texture cache. However, we have
* yet to be able to actually confirm this.
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
- .TextureCacheInvalidationEnable = true);
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.TextureCacheInvalidationEnable = true;
+ }
+}
+
+void
+genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
+{
+ enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
+
+ /* Flushes are pipelined while invalidations are handled immediately.
+ * Therefore, if we're flushing anything then we need to schedule a stall
+ * before any invalidations can happen.
+ */
+ if (bits & ANV_PIPE_FLUSH_BITS)
+ bits |= ANV_PIPE_NEEDS_CS_STALL_BIT;
+
+ /* If we're going to do an invalidate and we have a pending CS stall that
+ * has yet to be resolved, we do the CS stall now.
+ */
+ if ((bits & ANV_PIPE_INVALIDATE_BITS) &&
+ (bits & ANV_PIPE_NEEDS_CS_STALL_BIT)) {
+ bits |= ANV_PIPE_CS_STALL_BIT;
+ bits &= ~ANV_PIPE_NEEDS_CS_STALL_BIT;
+ }
+
+ if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT)) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
+ pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
+ pipe.DCFlushEnable = bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT;
+ pipe.RenderTargetCacheFlushEnable =
+ bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
+
+ pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT;
+ pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT;
+ pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
+
+ /*
+ * According to the Broadwell documentation, any PIPE_CONTROL with the
+ * "Command Streamer Stall" bit set must also have another bit set,
+ * with five different options:
+ *
+ * - Render Target Cache Flush
+ * - Depth Cache Flush
+ * - Stall at Pixel Scoreboard
+ * - Post-Sync Operation
+ * - Depth Stall
+ * - DC Flush Enable
+ *
+ * I chose "Stall at Pixel Scoreboard" since that's what we use in
+ * mesa and it seems to work fine. The choice is fairly arbitrary.
+ */
+ if ((bits & ANV_PIPE_CS_STALL_BIT) &&
+ !(bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_DEPTH_STALL_BIT |
+ ANV_PIPE_STALL_AT_SCOREBOARD_BIT)))
+ pipe.StallAtPixelScoreboard = true;
+ }
+
+ bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT);
+ }
+
+ if (bits & ANV_PIPE_INVALIDATE_BITS) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
+ pipe.StateCacheInvalidationEnable =
+ bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
+ pipe.ConstantCacheInvalidationEnable =
+ bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
+ pipe.VFCacheInvalidationEnable =
+ bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
+ pipe.TextureCacheInvalidationEnable =
+ bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
+ pipe.InstructionCacheInvalidateEnable =
+ bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT;
+ }
+
+ bits &= ~ANV_PIPE_INVALIDATE_BITS;
+ }
+
+ cmd_buffer->state.pending_pipe_bits = bits;
}
void genX(CmdPipelineBarrier)(
const VkImageMemoryBarrier* pImageMemoryBarriers)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- uint32_t b, *dw;
+ uint32_t b;
/* XXX: Right now, we're really dumb and just flush whatever categories
* the app asks for. One of these days we may make this a bit better
dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
}
- /* Mask out the Source access flags we care about */
- const uint32_t src_mask =
- VK_ACCESS_SHADER_WRITE_BIT |
- VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_TRANSFER_WRITE_BIT;
-
- src_flags = src_flags & src_mask;
-
- /* Mask out the destination access flags we care about */
- const uint32_t dst_mask =
- VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
- VK_ACCESS_INDEX_READ_BIT |
- VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
- VK_ACCESS_UNIFORM_READ_BIT |
- VK_ACCESS_SHADER_READ_BIT |
- VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
- VK_ACCESS_TRANSFER_READ_BIT;
-
- dst_flags = dst_flags & dst_mask;
-
- /* The src flags represent how things were used previously. This is
- * what we use for doing flushes.
- */
- struct GENX(PIPE_CONTROL) flush_cmd = {
- GENX(PIPE_CONTROL_header),
- .PostSyncOperation = NoWrite,
- };
+ enum anv_pipe_bits pipe_bits = 0;
for_each_bit(b, src_flags) {
switch ((VkAccessFlagBits)(1 << b)) {
case VK_ACCESS_SHADER_WRITE_BIT:
- flush_cmd.DCFlushEnable = true;
+ pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
break;
case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
- flush_cmd.RenderTargetCacheFlushEnable = true;
+ pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
break;
case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
- flush_cmd.DepthCacheFlushEnable = true;
+ pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
break;
case VK_ACCESS_TRANSFER_WRITE_BIT:
- flush_cmd.RenderTargetCacheFlushEnable = true;
- flush_cmd.DepthCacheFlushEnable = true;
+ pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
+ pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
break;
default:
- unreachable("should've masked this out by now");
+ break; /* Nothing to do */
}
}
- /* If we end up doing two PIPE_CONTROLs, the first, flusing one also has to
- * stall and wait for the flushing to finish, so we don't re-dirty the
- * caches with in-flight rendering after the second PIPE_CONTROL
- * invalidates.
- */
-
- if (dst_flags)
- flush_cmd.CommandStreamerStallEnable = true;
-
- if (src_flags && dst_flags) {
- dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
- GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &flush_cmd);
- }
-
- /* The dst flags represent how things will be used in the future. This
- * is what we use for doing cache invalidations.
- */
- struct GENX(PIPE_CONTROL) invalidate_cmd = {
- GENX(PIPE_CONTROL_header),
- .PostSyncOperation = NoWrite,
- };
-
for_each_bit(b, dst_flags) {
switch ((VkAccessFlagBits)(1 << b)) {
case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
case VK_ACCESS_INDEX_READ_BIT:
case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
- invalidate_cmd.VFCacheInvalidationEnable = true;
+ pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
break;
case VK_ACCESS_UNIFORM_READ_BIT:
- invalidate_cmd.ConstantCacheInvalidationEnable = true;
- /* fallthrough */
- case VK_ACCESS_SHADER_READ_BIT:
- invalidate_cmd.TextureCacheInvalidationEnable = true;
+ pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
+ pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
break;
+ case VK_ACCESS_SHADER_READ_BIT:
case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
- invalidate_cmd.TextureCacheInvalidationEnable = true;
- break;
case VK_ACCESS_TRANSFER_READ_BIT:
- invalidate_cmd.TextureCacheInvalidationEnable = true;
+ pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
break;
default:
- unreachable("should've masked this out by now");
+ break; /* Nothing to do */
}
}
- if (dst_flags) {
- dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
- GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &invalidate_cmd);
+ cmd_buffer->state.pending_pipe_bits |= pipe_bits;
+}
+
+static void
+cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
+{
+ VkShaderStageFlags stages = cmd_buffer->state.pipeline->active_stages;
+
+ /* In order to avoid thrash, we assume that vertex and fragment stages
+ * always exist. In the rare case where one is missing *and* the other
+ * uses push concstants, this may be suboptimal. However, avoiding stalls
+ * seems more important.
+ */
+ stages |= VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT;
+
+ if (stages == cmd_buffer->state.push_constant_stages)
+ return;
+
+#if GEN_GEN >= 8
+ const unsigned push_constant_kb = 32;
+#elif GEN_IS_HASWELL
+ const unsigned push_constant_kb = cmd_buffer->device->info.gt == 3 ? 32 : 16;
+#else
+ const unsigned push_constant_kb = 16;
+#endif
+
+ const unsigned num_stages =
+ _mesa_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
+ unsigned size_per_stage = push_constant_kb / num_stages;
+
+ /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
+ * units of 2KB. Incidentally, these are the same platforms that have
+ * 32KB worth of push constant space.
+ */
+ if (push_constant_kb == 32)
+ size_per_stage &= ~1u;
+
+ uint32_t kb_used = 0;
+ for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
+ unsigned push_size = (stages & (1 << i)) ? size_per_stage : 0;
+ anv_batch_emit(&cmd_buffer->batch,
+ GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
+ alloc._3DCommandSubOpcode = 18 + i;
+ alloc.ConstantBufferOffset = (push_size > 0) ? kb_used : 0;
+ alloc.ConstantBufferSize = push_size;
+ }
+ kb_used += push_size;
}
+
+ anv_batch_emit(&cmd_buffer->batch,
+ GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) {
+ alloc.ConstantBufferOffset = kb_used;
+ alloc.ConstantBufferSize = push_constant_kb - kb_used;
+ }
+
+ cmd_buffer->state.push_constant_stages = stages;
+
+ /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
+ *
+ * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
+ * the next 3DPRIMITIVE command after programming the
+ * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
+ *
+ * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
+ * pipeline setup, we need to dirty push constants.
+ */
+ cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
+}
+
+static uint32_t
+cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
+{
+ static const uint32_t push_constant_opcodes[] = {
+ [MESA_SHADER_VERTEX] = 21,
+ [MESA_SHADER_TESS_CTRL] = 25, /* HS */
+ [MESA_SHADER_TESS_EVAL] = 26, /* DS */
+ [MESA_SHADER_GEOMETRY] = 22,
+ [MESA_SHADER_FRAGMENT] = 23,
+ [MESA_SHADER_COMPUTE] = 0,
+ };
+
+ VkShaderStageFlags flushed = 0;
+
+ anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
+ if (stage == MESA_SHADER_COMPUTE)
+ continue;
+
+ struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
+
+ if (state.offset == 0) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
+ c._3DCommandSubOpcode = push_constant_opcodes[stage];
+ } else {
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
+ c._3DCommandSubOpcode = push_constant_opcodes[stage],
+ c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
+#if GEN_GEN >= 9
+ .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
+ .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
+#else
+ .PointerToConstantBuffer0 = { .offset = state.offset },
+ .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
+#endif
+ };
+ }
+ }
+
+ flushed |= mesa_to_vk_shader_stage(stage);
+ }
+
+ cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
+
+ return flushed;
+}
+
+void
+genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
+{
+ struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ uint32_t *p;
+
+ uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
+
+ assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
+
+ genX(cmd_buffer_config_l3)(cmd_buffer, pipeline);
+
+ genX(flush_pipeline_select_3d)(cmd_buffer);
+
+ if (vb_emit) {
+ const uint32_t num_buffers = __builtin_popcount(vb_emit);
+ const uint32_t num_dwords = 1 + num_buffers * 4;
+
+ p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
+ GENX(3DSTATE_VERTEX_BUFFERS));
+ uint32_t vb, i = 0;
+ for_each_bit(vb, vb_emit) {
+ struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
+ uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
+
+ struct GENX(VERTEX_BUFFER_STATE) state = {
+ .VertexBufferIndex = vb,
+
+#if GEN_GEN >= 8
+ .MemoryObjectControlState = GENX(MOCS),
+#else
+ .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
+ .InstanceDataStepRate = 1,
+ .VertexBufferMemoryObjectControlState = GENX(MOCS),
+#endif
+
+ .AddressModifyEnable = true,
+ .BufferPitch = pipeline->binding_stride[vb],
+ .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
+
+#if GEN_GEN >= 8
+ .BufferSize = buffer->size - offset
+#else
+ .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
+#endif
+ };
+
+ GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
+ i++;
+ }
+ }
+
+ cmd_buffer->state.vb_dirty &= ~vb_emit;
+
+ if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
+ /* If somebody compiled a pipeline after starting a command buffer the
+ * scratch bo may have grown since we started this cmd buffer (and
+ * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
+ * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
+ if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
+ anv_cmd_buffer_emit_state_base_address(cmd_buffer);
+
+ anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
+
+ /* If the pipeline changed, we may need to re-allocate push constant
+ * space in the URB.
+ */
+ cmd_buffer_alloc_push_constants(cmd_buffer);
+ }
+
+#if GEN_GEN <= 7
+ if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
+ cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
+ /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
+ *
+ * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
+ * stall needs to be sent just prior to any 3DSTATE_VS,
+ * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
+ * 3DSTATE_BINDING_TABLE_POINTER_VS,
+ * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
+ * PIPE_CONTROL needs to be sent before any combination of VS
+ * associated 3DSTATE."
+ */
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DepthStallEnable = true;
+ pc.PostSyncOperation = WriteImmediateData;
+ pc.Address =
+ (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
+ }
+ }
+#endif
+
+ /* We emit the binding tables and sampler tables first, then emit push
+ * constants and then finally emit binding table and sampler table
+ * pointers. It has to happen in this order, since emitting the binding
+ * tables may change the push constants (in case of storage images). After
+ * emitting push constants, on SKL+ we have to emit the corresponding
+ * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
+ */
+ uint32_t dirty = 0;
+ if (cmd_buffer->state.descriptors_dirty)
+ dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
+
+ if (cmd_buffer->state.push_constants_dirty) {
+#if GEN_GEN >= 9
+ /* On Sky Lake and later, the binding table pointers commands are
+ * what actually flush the changes to push constant state so we need
+ * to dirty them so they get re-emitted below.
+ */
+ dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
+#else
+ cmd_buffer_flush_push_constants(cmd_buffer);
+#endif
+ }
+
+ if (dirty)
+ gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
+
+ if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
+ gen8_cmd_buffer_emit_viewport(cmd_buffer);
+
+ if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
+ gen7_cmd_buffer_emit_scissor(cmd_buffer);
+
+ genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
+
+ genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
}
static void
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
genX(cmd_buffer_flush_state)(cmd_buffer);
- if (cmd_buffer->state.pipeline->vs_prog_data.uses_basevertex ||
- cmd_buffer->state.pipeline->vs_prog_data.uses_baseinstance)
+ if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
- anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
- .VertexAccessType = SEQUENTIAL,
- .PrimitiveTopologyType = pipeline->topology,
- .VertexCountPerInstance = vertexCount,
- .StartVertexLocation = firstVertex,
- .InstanceCount = instanceCount,
- .StartInstanceLocation = firstInstance,
- .BaseVertexLocation = 0);
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+ prim.VertexAccessType = SEQUENTIAL;
+ prim.PrimitiveTopologyType = pipeline->topology;
+ prim.VertexCountPerInstance = vertexCount;
+ prim.StartVertexLocation = firstVertex;
+ prim.InstanceCount = instanceCount;
+ prim.StartInstanceLocation = firstInstance;
+ prim.BaseVertexLocation = 0;
+ }
}
void genX(CmdDrawIndexed)(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
genX(cmd_buffer_flush_state)(cmd_buffer);
- if (cmd_buffer->state.pipeline->vs_prog_data.uses_basevertex ||
- cmd_buffer->state.pipeline->vs_prog_data.uses_baseinstance)
+ if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
- anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
- .VertexAccessType = RANDOM,
- .PrimitiveTopologyType = pipeline->topology,
- .VertexCountPerInstance = indexCount,
- .StartVertexLocation = firstIndex,
- .InstanceCount = instanceCount,
- .StartInstanceLocation = firstInstance,
- .BaseVertexLocation = vertexOffset);
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+ prim.VertexAccessType = RANDOM;
+ prim.PrimitiveTopologyType = pipeline->topology;
+ prim.VertexCountPerInstance = indexCount;
+ prim.StartVertexLocation = firstIndex;
+ prim.InstanceCount = instanceCount;
+ prim.StartInstanceLocation = firstInstance;
+ prim.BaseVertexLocation = vertexOffset;
+ }
}
/* Auto-Draw / Indirect Registers */
emit_lrm(struct anv_batch *batch,
uint32_t reg, struct anv_bo *bo, uint32_t offset)
{
- anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
- .RegisterAddress = reg,
- .MemoryAddress = { bo, offset });
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = reg;
+ lrm.MemoryAddress = (struct anv_address) { bo, offset };
+ }
}
static void
emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
{
- anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
- .RegisterOffset = reg,
- .DataDWord = imm);
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = reg;
+ lri.DataDWord = imm;
+ }
}
void genX(CmdDrawIndirect)(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
struct anv_bo *bo = buffer->bo;
uint32_t bo_offset = buffer->offset + offset;
genX(cmd_buffer_flush_state)(cmd_buffer);
- if (cmd_buffer->state.pipeline->vs_prog_data.uses_basevertex ||
- cmd_buffer->state.pipeline->vs_prog_data.uses_baseinstance)
+ if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
- anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
- .IndirectParameterEnable = true,
- .VertexAccessType = SEQUENTIAL,
- .PrimitiveTopologyType = pipeline->topology);
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+ prim.IndirectParameterEnable = true;
+ prim.VertexAccessType = SEQUENTIAL;
+ prim.PrimitiveTopologyType = pipeline->topology;
+ }
}
void genX(CmdDrawIndexedIndirect)(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
struct anv_bo *bo = buffer->bo;
uint32_t bo_offset = buffer->offset + offset;
genX(cmd_buffer_flush_state)(cmd_buffer);
/* TODO: We need to stomp base vertex to 0 somehow */
- if (cmd_buffer->state.pipeline->vs_prog_data.uses_basevertex ||
- cmd_buffer->state.pipeline->vs_prog_data.uses_baseinstance)
+ if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
- anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
- .IndirectParameterEnable = true,
- .VertexAccessType = RANDOM,
- .PrimitiveTopologyType = pipeline->topology);
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
+ prim.IndirectParameterEnable = true;
+ prim.VertexAccessType = RANDOM;
+ prim.PrimitiveTopologyType = pipeline->topology;
+ }
+}
+
+#if GEN_GEN == 7
+
+static bool
+verify_cmd_parser(const struct anv_device *device,
+ int required_version,
+ const char *function)
+{
+ if (device->instance->physicalDevice.cmd_parser_version < required_version) {
+ vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
+ "cmd parser version %d is required for %s",
+ required_version, function);
+ return false;
+ } else {
+ return true;
+ }
}
+#endif
void genX(CmdDispatch)(
VkCommandBuffer commandBuffer,
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
- struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
+ const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
if (prog_data->uses_num_work_groups) {
struct anv_state state =
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
- anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER),
- .SIMDSize = prog_data->simd_size / 16,
- .ThreadDepthCounterMaximum = 0,
- .ThreadHeightCounterMaximum = 0,
- .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
- .ThreadGroupIDXDimension = x,
- .ThreadGroupIDYDimension = y,
- .ThreadGroupIDZDimension = z,
- .RightExecutionMask = pipeline->cs_right_mask,
- .BottomExecutionMask = 0xffffffff);
+ anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
+ ggw.SIMDSize = prog_data->simd_size / 16;
+ ggw.ThreadDepthCounterMaximum = 0;
+ ggw.ThreadHeightCounterMaximum = 0;
+ ggw.ThreadWidthCounterMaximum = prog_data->threads - 1;
+ ggw.ThreadGroupIDXDimension = x;
+ ggw.ThreadGroupIDYDimension = y;
+ ggw.ThreadGroupIDZDimension = z;
+ ggw.RightExecutionMask = pipeline->cs_right_mask;
+ ggw.BottomExecutionMask = 0xffffffff;
+ }
- anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH));
+ anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
}
#define GPGPU_DISPATCHDIMX 0x2500
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
- struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
+ const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
struct anv_bo *bo = buffer->bo;
uint32_t bo_offset = buffer->offset + offset;
struct anv_batch *batch = &cmd_buffer->batch;
+#if GEN_GEN == 7
+ /* Linux 4.4 added command parser version 5 which allows the GPGPU
+ * indirect dispatch registers to be written.
+ */
+ if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
+ return;
+#endif
+
if (prog_data->uses_num_work_groups) {
cmd_buffer->state.num_workgroups_offset = bo_offset;
cmd_buffer->state.num_workgroups_bo = bo;
emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
/* predicate = (compute_dispatch_indirect_x_size == 0); */
- anv_batch_emit(batch, GENX(MI_PREDICATE),
- .LoadOperation = LOAD_LOAD,
- .CombineOperation = COMBINE_SET,
- .CompareOperation = COMPARE_SRCS_EQUAL);
+ anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
+ mip.LoadOperation = LOAD_LOAD;
+ mip.CombineOperation = COMBINE_SET;
+ mip.CompareOperation = COMPARE_SRCS_EQUAL;
+ }
/* Load compute_dispatch_indirect_y_size into SRC0 */
emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
/* predicate |= (compute_dispatch_indirect_y_size == 0); */
- anv_batch_emit(batch, GENX(MI_PREDICATE),
- .LoadOperation = LOAD_LOAD,
- .CombineOperation = COMBINE_OR,
- .CompareOperation = COMPARE_SRCS_EQUAL);
+ anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
+ mip.LoadOperation = LOAD_LOAD;
+ mip.CombineOperation = COMBINE_OR;
+ mip.CompareOperation = COMPARE_SRCS_EQUAL;
+ }
/* Load compute_dispatch_indirect_z_size into SRC0 */
emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
/* predicate |= (compute_dispatch_indirect_z_size == 0); */
- anv_batch_emit(batch, GENX(MI_PREDICATE),
- .LoadOperation = LOAD_LOAD,
- .CombineOperation = COMBINE_OR,
- .CompareOperation = COMPARE_SRCS_EQUAL);
+ anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
+ mip.LoadOperation = LOAD_LOAD;
+ mip.CombineOperation = COMBINE_OR;
+ mip.CompareOperation = COMPARE_SRCS_EQUAL;
+ }
/* predicate = !predicate; */
#define COMPARE_FALSE 1
- anv_batch_emit(batch, GENX(MI_PREDICATE),
- .LoadOperation = LOAD_LOADINV,
- .CombineOperation = COMBINE_OR,
- .CompareOperation = COMPARE_FALSE);
+ anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
+ mip.LoadOperation = LOAD_LOADINV;
+ mip.CombineOperation = COMBINE_OR;
+ mip.CompareOperation = COMPARE_FALSE;
+ }
#endif
- anv_batch_emit(batch, GENX(GPGPU_WALKER),
- .IndirectParameterEnable = true,
- .PredicateEnable = GEN_GEN <= 7,
- .SIMDSize = prog_data->simd_size / 16,
- .ThreadDepthCounterMaximum = 0,
- .ThreadHeightCounterMaximum = 0,
- .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
- .RightExecutionMask = pipeline->cs_right_mask,
- .BottomExecutionMask = 0xffffffff);
+ anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
+ ggw.IndirectParameterEnable = true;
+ ggw.PredicateEnable = GEN_GEN <= 7;
+ ggw.SIMDSize = prog_data->simd_size / 16;
+ ggw.ThreadDepthCounterMaximum = 0;
+ ggw.ThreadHeightCounterMaximum = 0;
+ ggw.ThreadWidthCounterMaximum = prog_data->threads - 1;
+ ggw.RightExecutionMask = pipeline->cs_right_mask;
+ ggw.BottomExecutionMask = 0xffffffff;
+ }
+
+ anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
+}
+
+static void
+flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
+ uint32_t pipeline)
+{
+#if GEN_GEN >= 8 && GEN_GEN < 10
+ /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
+ *
+ * Software must clear the COLOR_CALC_STATE Valid field in
+ * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
+ * with Pipeline Select set to GPGPU.
+ *
+ * The internal hardware docs recommend the same workaround for Gen9
+ * hardware too.
+ */
+ if (pipeline == GPGPU)
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
+#elif GEN_GEN <= 7
+ /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
+ * PIPELINE_SELECT [DevBWR+]":
+ *
+ * Project: DEVSNB+
+ *
+ * Software must ensure all the write caches are flushed through a
+ * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
+ * command to invalidate read only caches prior to programming
+ * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
+ */
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.RenderTargetCacheFlushEnable = true;
+ pc.DepthCacheFlushEnable = true;
+ pc.DCFlushEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ pc.CommandStreamerStallEnable = true;
+ }
- anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH));
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.TextureCacheInvalidationEnable = true;
+ pc.ConstantCacheInvalidationEnable = true;
+ pc.StateCacheInvalidationEnable = true;
+ pc.InstructionCacheInvalidateEnable = true;
+ pc.PostSyncOperation = NoWrite;
+ }
+#endif
}
void
genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
{
if (cmd_buffer->state.current_pipeline != _3D) {
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
+ flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
#if GEN_GEN >= 9
- .MaskBits = 3,
+ ps.MaskBits = 3;
#endif
- .PipelineSelection = _3D);
+ ps.PipelineSelection = _3D;
+ }
+
cmd_buffer->state.current_pipeline = _3D;
}
}
+void
+genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
+{
+ if (cmd_buffer->state.current_pipeline != GPGPU) {
+ flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
+#if GEN_GEN >= 9
+ ps.MaskBits = 3;
+#endif
+ ps.PipelineSelection = GPGPU;
+ }
+
+ cmd_buffer->state.current_pipeline = GPGPU;
+ }
+}
+
+struct anv_state
+genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_framebuffer *fb)
+{
+ struct anv_state state =
+ anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
+
+ struct GENX(RENDER_SURFACE_STATE) null_ss = {
+ .SurfaceType = SURFTYPE_NULL,
+ .SurfaceArray = fb->layers > 0,
+ .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
+#if GEN_GEN >= 8
+ .TileMode = YMAJOR,
+#else
+ .TiledSurface = true,
+#endif
+ .Width = fb->width - 1,
+ .Height = fb->height - 1,
+ .Depth = fb->layers - 1,
+ .RenderTargetViewExtent = fb->layers - 1,
+ };
+
+ GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(state);
+
+ return state;
+}
+
static void
cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
{
const struct anv_image_view *iview =
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
const struct anv_image *image = iview ? iview->image : NULL;
- const struct anv_format *anv_format =
- iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
- const bool has_depth = iview && anv_format->has_depth;
- const bool has_stencil = iview && anv_format->has_stencil;
+ const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
+ const bool has_stencil =
+ image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
/* FIXME: Implement the PMA stall W/A */
/* FIXME: Width and Height are wrong */
/* Emit 3DSTATE_DEPTH_BUFFER */
if (has_depth) {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
- .SurfaceType = SURFTYPE_2D,
- .DepthWriteEnable = true,
- .StencilWriteEnable = has_stencil,
- .HierarchicalDepthBufferEnable = false,
- .SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
- &image->depth_surface.isl),
- .SurfacePitch = image->depth_surface.isl.row_pitch - 1,
- .SurfaceBaseAddress = {
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
+ db.SurfaceType = SURFTYPE_2D;
+ db.DepthWriteEnable = true;
+ db.StencilWriteEnable = has_stencil;
+ db.HierarchicalDepthBufferEnable = false;
+
+ db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
+ &image->depth_surface.isl);
+
+ db.SurfaceBaseAddress = (struct anv_address) {
.bo = image->bo,
.offset = image->offset + image->depth_surface.offset,
- },
- .Height = fb->height - 1,
- .Width = fb->width - 1,
- .LOD = 0,
- .Depth = 1 - 1,
- .MinimumArrayElement = 0,
- .DepthBufferObjectControlState = GENX(MOCS),
+ };
+ db.DepthBufferObjectControlState = GENX(MOCS),
+
+ db.SurfacePitch = image->depth_surface.isl.row_pitch - 1;
+ db.Height = fb->height - 1;
+ db.Width = fb->width - 1;
+ db.LOD = 0;
+ db.Depth = 1 - 1;
+ db.MinimumArrayElement = 0;
+
#if GEN_GEN >= 8
- .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
+ db.SurfaceQPitch =
+ isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
#endif
- .RenderTargetViewExtent = 1 - 1);
+ db.RenderTargetViewExtent = 1 - 1;
+ }
} else {
/* Even when no depth buffer is present, the hardware requires that
* 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
*
* The PRM is wrong, though. The width and height must be programmed to
* actual framebuffer's width and height, even when neither depth buffer
- * nor stencil buffer is present.
+ * nor stencil buffer is present. Also, D16_UNORM is not allowed to
+ * be combined with a stencil buffer so we use D32_FLOAT instead.
*/
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
- .SurfaceType = SURFTYPE_2D,
- .SurfaceFormat = D16_UNORM,
- .Width = fb->width - 1,
- .Height = fb->height - 1,
- .StencilWriteEnable = has_stencil);
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
+ db.SurfaceType = SURFTYPE_2D;
+ db.SurfaceFormat = D32_FLOAT;
+ db.Width = fb->width - 1;
+ db.Height = fb->height - 1;
+ db.StencilWriteEnable = has_stencil;
+ }
}
/* Emit 3DSTATE_STENCIL_BUFFER */
if (has_stencil) {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER),
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
#if GEN_GEN >= 8 || GEN_IS_HASWELL
- .StencilBufferEnable = true,
+ sb.StencilBufferEnable = true,
#endif
- .StencilBufferObjectControlState = GENX(MOCS),
+ sb.StencilBufferObjectControlState = GENX(MOCS),
/* Stencil buffers have strange pitch. The PRM says:
*
* The pitch must be set to 2x the value computed based on width,
* as the stencil buffer is stored with two rows interleaved.
*/
- .SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
+ sb.SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
#if GEN_GEN >= 8
- .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
+ sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
#endif
- .SurfaceBaseAddress = {
+ sb.SurfaceBaseAddress = (struct anv_address) {
.bo = image->bo,
.offset = image->offset + image->stencil_surface.offset,
- });
+ };
+ }
} else {
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER));
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
}
/* Disable hierarchial depth buffers. */
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER));
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz);
/* Clear the clear params. */
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS));
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp);
}
/**
cmd_buffer->state.framebuffer = framebuffer;
cmd_buffer->state.pass = pass;
+ cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
genX(flush_pipeline_select_3d)(cmd_buffer);
- const VkRect2D *render_area = &pRenderPassBegin->renderArea;
-
- anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE),
- .ClippedDrawingRectangleYMin = render_area->offset.y,
- .ClippedDrawingRectangleXMin = render_area->offset.x,
- .ClippedDrawingRectangleYMax =
- render_area->offset.y + render_area->extent.height - 1,
- .ClippedDrawingRectangleXMax =
- render_area->offset.x + render_area->extent.width - 1,
- .DrawingRectangleOriginY = 0,
- .DrawingRectangleOriginX = 0);
-
genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
anv_cmd_buffer_clear_subpass(cmd_buffer);
}
anv_cmd_buffer_resolve_subpass(cmd_buffer);
}
+
+static void
+emit_ps_depth_count(struct anv_batch *batch,
+ struct anv_bo *bo, uint32_t offset)
+{
+ anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
+ pc.DestinationAddressType = DAT_PPGTT;
+ pc.PostSyncOperation = WritePSDepthCount;
+ pc.DepthStallEnable = true;
+ pc.Address = (struct anv_address) { bo, offset };
+ }
+}
+
+static void
+emit_query_availability(struct anv_batch *batch,
+ struct anv_bo *bo, uint32_t offset)
+{
+ anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
+ pc.DestinationAddressType = DAT_PPGTT;
+ pc.PostSyncOperation = WriteImmediateData;
+ pc.Address = (struct anv_address) { bo, offset };
+ pc.ImmediateData = 1;
+ }
+}
+
+void genX(CmdBeginQuery)(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query,
+ VkQueryControlFlags flags)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+
+ /* Workaround: When meta uses the pipeline with the VS disabled, it seems
+ * that the pipelining of the depth write breaks. What we see is that
+ * samples from the render pass clear leaks into the first query
+ * immediately after the clear. Doing a pipecontrol with a post-sync
+ * operation and DepthStallEnable seems to work around the issue.
+ */
+ if (cmd_buffer->state.need_query_wa) {
+ cmd_buffer->state.need_query_wa = false;
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DepthCacheFlushEnable = true;
+ pc.DepthStallEnable = true;
+ }
+ }
+
+ switch (pool->type) {
+ case VK_QUERY_TYPE_OCCLUSION:
+ emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
+ query * sizeof(struct anv_query_pool_slot));
+ break;
+
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+ default:
+ unreachable("");
+ }
+}
+
+void genX(CmdEndQuery)(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t query)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+
+ switch (pool->type) {
+ case VK_QUERY_TYPE_OCCLUSION:
+ emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
+ query * sizeof(struct anv_query_pool_slot) + 8);
+
+ emit_query_availability(&cmd_buffer->batch, &pool->bo,
+ query * sizeof(struct anv_query_pool_slot) + 16);
+ break;
+
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+ default:
+ unreachable("");
+ }
+}
+
+#define TIMESTAMP 0x2358
+
+void genX(CmdWriteTimestamp)(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool,
+ uint32_t query)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+ uint32_t offset = query * sizeof(struct anv_query_pool_slot);
+
+ assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
+
+ switch (pipelineStage) {
+ case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+ srm.RegisterAddress = TIMESTAMP;
+ srm.MemoryAddress = (struct anv_address) { &pool->bo, offset };
+ }
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+ srm.RegisterAddress = TIMESTAMP + 4;
+ srm.MemoryAddress = (struct anv_address) { &pool->bo, offset + 4 };
+ }
+ break;
+
+ default:
+ /* Everything else is bottom-of-pipe */
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DestinationAddressType = DAT_PPGTT,
+ pc.PostSyncOperation = WriteTimestamp,
+ pc.Address = (struct anv_address) { &pool->bo, offset };
+ }
+ break;
+ }
+
+ emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
+}
+
+#if GEN_GEN > 7 || GEN_IS_HASWELL
+
+#define alu_opcode(v) __gen_uint((v), 20, 31)
+#define alu_operand1(v) __gen_uint((v), 10, 19)
+#define alu_operand2(v) __gen_uint((v), 0, 9)
+#define alu(opcode, operand1, operand2) \
+ alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
+
+#define OPCODE_NOOP 0x000
+#define OPCODE_LOAD 0x080
+#define OPCODE_LOADINV 0x480
+#define OPCODE_LOAD0 0x081
+#define OPCODE_LOAD1 0x481
+#define OPCODE_ADD 0x100
+#define OPCODE_SUB 0x101
+#define OPCODE_AND 0x102
+#define OPCODE_OR 0x103
+#define OPCODE_XOR 0x104
+#define OPCODE_STORE 0x180
+#define OPCODE_STOREINV 0x580
+
+#define OPERAND_R0 0x00
+#define OPERAND_R1 0x01
+#define OPERAND_R2 0x02
+#define OPERAND_R3 0x03
+#define OPERAND_R4 0x04
+#define OPERAND_SRCA 0x20
+#define OPERAND_SRCB 0x21
+#define OPERAND_ACCU 0x31
+#define OPERAND_ZF 0x32
+#define OPERAND_CF 0x33
+
+#define CS_GPR(n) (0x2600 + (n) * 8)
+
+static void
+emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
+ struct anv_bo *bo, uint32_t offset)
+{
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = reg,
+ lrm.MemoryAddress = (struct anv_address) { bo, offset };
+ }
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = reg + 4;
+ lrm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
+ }
+}
+
+static void
+store_query_result(struct anv_batch *batch, uint32_t reg,
+ struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
+{
+ anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+ srm.RegisterAddress = reg;
+ srm.MemoryAddress = (struct anv_address) { bo, offset };
+ }
+
+ if (flags & VK_QUERY_RESULT_64_BIT) {
+ anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+ srm.RegisterAddress = reg + 4;
+ srm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
+ }
+ }
+}
+
+void genX(CmdCopyQueryPoolResults)(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ VkBuffer destBuffer,
+ VkDeviceSize destOffset,
+ VkDeviceSize destStride,
+ VkQueryResultFlags flags)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+ ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
+ uint32_t slot_offset, dst_offset;
+
+ if (flags & VK_QUERY_RESULT_WAIT_BIT) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.CommandStreamerStallEnable = true;
+ pc.StallAtPixelScoreboard = true;
+ }
+ }
+
+ dst_offset = buffer->offset + destOffset;
+ for (uint32_t i = 0; i < queryCount; i++) {
+
+ slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
+ switch (pool->type) {
+ case VK_QUERY_TYPE_OCCLUSION:
+ emit_load_alu_reg_u64(&cmd_buffer->batch,
+ CS_GPR(0), &pool->bo, slot_offset);
+ emit_load_alu_reg_u64(&cmd_buffer->batch,
+ CS_GPR(1), &pool->bo, slot_offset + 8);
+
+ /* FIXME: We need to clamp the result for 32 bit. */
+
+ uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
+ dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
+ dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
+ dw[3] = alu(OPCODE_SUB, 0, 0);
+ dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
+ break;
+
+ case VK_QUERY_TYPE_TIMESTAMP:
+ emit_load_alu_reg_u64(&cmd_buffer->batch,
+ CS_GPR(2), &pool->bo, slot_offset);
+ break;
+
+ default:
+ unreachable("unhandled query type");
+ }
+
+ store_query_result(&cmd_buffer->batch,
+ CS_GPR(2), buffer->bo, dst_offset, flags);
+
+ if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
+ emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
+ &pool->bo, slot_offset + 16);
+ if (flags & VK_QUERY_RESULT_64_BIT)
+ store_query_result(&cmd_buffer->batch,
+ CS_GPR(0), buffer->bo, dst_offset + 8, flags);
+ else
+ store_query_result(&cmd_buffer->batch,
+ CS_GPR(0), buffer->bo, dst_offset + 4, flags);
+ }
+
+ dst_offset += destStride;
+ }
+}
+
+#endif