{
struct anv_device *device = cmd_buffer->device;
-/* XXX: Do we need this on more than just BDW? */
-#if (GEN_GEN >= 8)
/* Emit a render target cache flush.
*
* This isn't documented anywhere in the PRM. However, it seems to be
* clear depth, reset state base address, and then go render stuff.
*/
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.DCFlushEnable = true;
pc.RenderTargetCacheFlushEnable = true;
+ pc.CommandStreamerStallEnable = true;
}
-#endif
anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
*/
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
pc.TextureCacheInvalidationEnable = true;
+ pc.ConstantCacheInvalidationEnable = true;
+ pc.StateCacheInvalidationEnable = true;
}
}
att_state->fast_clear = false;
}
- if (isl_format_supports_lossless_compression(&device->info,
- iview->isl.format)) {
+ /**
+ * TODO: Consider using a heuristic to determine if temporarily enabling
+ * CCS_E for this image view would be beneficial.
+ *
+ * While fast-clear resolves and partial resolves are fairly cheap in the
+ * case where you render to most of the pixels, full resolves are not
+ * because they potentially involve reading and writing the entire
+ * framebuffer. If we can't texture with CCS_E, we should leave it off and
+ * limit ourselves to fast clears.
+ */
+ if (iview->image->aux_usage == ISL_AUX_USAGE_CCS_E) {
att_state->aux_usage = ISL_AUX_USAGE_CCS_E;
att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E;
} else if (att_state->fast_clear) {
att_state->aux_usage = ISL_AUX_USAGE_CCS_D;
- if (GEN_GEN >= 9) {
+ if (GEN_GEN >= 9 &&
+ !isl_format_supports_ccs_e(&device->info, iview->isl.format)) {
/* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode:
*
* "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D
if (!(att->usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT))
return false;
- /* We only allocate input attachment states for color and depth surfaces.
- * Stencil doesn't allow compression so we can just use the texture surface
- * state from the view
+ /* We only allocate input attachment states for color surfaces. Compression
+ * is not yet enabled for depth textures and stencil doesn't allow
+ * compression so we can just use the texture surface state from the view.
*/
- return vk_format_is_color(att->format) || vk_format_has_depth(att->format);
+ return vk_format_is_color(att->format);
}
static enum isl_aux_usage
-layout_to_hiz_usage(VkImageLayout layout)
+layout_to_hiz_usage(VkImageLayout layout, uint8_t samples)
{
switch (layout) {
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
return ISL_AUX_USAGE_HIZ;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+ if (anv_can_sample_with_hiz(GEN_GEN, samples))
+ return ISL_AUX_USAGE_HIZ;
+ /* Fall-through */
+ case VK_IMAGE_LAYOUT_GENERAL:
+ /* This buffer could be used as a source or destination in a transfer
+ * operation. Transfer operations current don't perform HiZ-enabled reads
+ * and writes.
+ */
default:
return ISL_AUX_USAGE_NONE;
}
{
assert(image);
- if (image->aux_usage != ISL_AUX_USAGE_HIZ)
+ if (image->aux_usage != ISL_AUX_USAGE_HIZ || final_layout == initial_layout)
return;
- const bool hiz_enabled = layout_to_hiz_usage(initial_layout) ==
+ const bool hiz_enabled = layout_to_hiz_usage(initial_layout, image->samples) ==
ISL_AUX_USAGE_HIZ;
- const bool enable_hiz = layout_to_hiz_usage(final_layout) ==
+ const bool enable_hiz = layout_to_hiz_usage(final_layout, image->samples) ==
ISL_AUX_USAGE_HIZ;
- /* We've already initialized the aux HiZ buffer at BindImageMemory time,
- * so there's no need to perform a HIZ resolve or clear to avoid GPU hangs.
- * This initial layout indicates that the user doesn't care about the data
- * that's currently in the buffer, so no resolves are necessary.
- */
- if (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED)
- return;
-
- if (hiz_enabled == enable_hiz) {
- /* The same buffer will be used, no resolves are necessary */
+ enum blorp_hiz_op hiz_op;
+ if (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
+ /* We've already initialized the aux HiZ buffer at BindImageMemory time,
+ * so there's no need to perform a HIZ resolve or clear to avoid GPU hangs.
+ * This initial layout indicates that the user doesn't care about the data
+ * that's currently in the buffer, so resolves are not necessary except
+ * for the special case noted below.
+ */
+ hiz_op = BLORP_HIZ_OP_NONE;
} else if (hiz_enabled && !enable_hiz) {
- anv_gen8_hiz_op_resolve(cmd_buffer, image, BLORP_HIZ_OP_DEPTH_RESOLVE);
+ hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE;
+ } else if (!hiz_enabled && enable_hiz) {
+ hiz_op = BLORP_HIZ_OP_HIZ_RESOLVE;
} else {
- assert(!hiz_enabled && enable_hiz);
+ assert(hiz_enabled == enable_hiz);
+ /* If the same buffer will be used, no resolves are necessary except for
+ * the special case noted below.
+ */
+ hiz_op = BLORP_HIZ_OP_NONE;
+ }
+
+ if (hiz_op != BLORP_HIZ_OP_NONE)
+ anv_gen8_hiz_op_resolve(cmd_buffer, image, hiz_op);
+
+ /* Images that have sampling with HiZ enabled cause all shader sampling to
+ * load data with the HiZ buffer. Therefore, in the case of transitioning to
+ * the general layout - which currently routes all writes to the depth
+ * buffer - we must ensure that the HiZ buffer remains consistent with the
+ * depth buffer by performing an additional HIZ resolve if the operation
+ * required by this transition was not already a HiZ resolve.
+ */
+ if (final_layout == VK_IMAGE_LAYOUT_GENERAL &&
+ anv_can_sample_with_hiz(GEN_GEN, image->samples) &&
+ hiz_op != BLORP_HIZ_OP_HIZ_RESOLVE) {
anv_gen8_hiz_op_resolve(cmd_buffer, image, BLORP_HIZ_OP_HIZ_RESOLVE);
}
}
}
}
+ state->attachments[i].current_layout = att->initial_layout;
state->attachments[i].pending_clear_aspects = clear_aspects;
if (clear_aspects)
state->attachments[i].clear_value = begin->pClearValues[i];
struct isl_view view = iview->isl;
view.usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT;
+ view.swizzle = anv_swizzle_for_render(view.swizzle);
isl_surf_fill_state(isl_dev,
state->attachments[i].color_rt_state.map,
.surf = &iview->image->color_surface.isl,
state->attachments[i].aux_usage,
state->attachments[i].color_rt_state);
} else {
- state->attachments[i].aux_usage = iview->image->aux_usage;
+ if (iview->image->aux_usage == ISL_AUX_USAGE_HIZ) {
+ state->attachments[i].aux_usage =
+ layout_to_hiz_usage(att->initial_layout, iview->image->samples);
+ } else {
+ state->attachments[i].aux_usage = ISL_AUX_USAGE_NONE;
+ }
state->attachments[i].input_aux_usage = ISL_AUX_USAGE_NONE;
}
if (need_input_attachment_state(&pass->attachments[i])) {
- const struct isl_surf *surf;
- if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
- surf = &iview->image->color_surface.isl;
- } else {
- surf = &iview->image->depth_surface.isl;
- }
-
struct isl_view view = iview->isl;
view.usage |= ISL_SURF_USAGE_TEXTURE_BIT;
isl_surf_fill_state(isl_dev,
state->attachments[i].input_att_state.map,
- .surf = surf,
+ .surf = &iview->image->color_surface.isl,
.view = &view,
.aux_surf = &iview->image->aux_surface.isl,
.aux_usage = state->attachments[i].input_aux_usage,
}
if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(state->render_pass_states);
+ anv_state_flush(state->render_pass_states);
}
}
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ /* We want every command buffer to start with the PMA fix in a known state,
+ * so we disable it at the end of the command buffer.
+ */
+ genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
+
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
anv_cmd_buffer_end_batch_buffer(cmd_buffer);
assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+ /* The secondary command buffers will assume that the PMA fix is disabled
+ * when they begin executing. Make sure this is true.
+ */
+ genX(cmd_buffer_enable_pma_fix)(primary, false);
+
for (uint32_t i = 0; i < commandBufferCount; i++) {
ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
src_flags |= pImageMemoryBarriers[i].srcAccessMask;
dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
+ ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[i].image);
+ if (pImageMemoryBarriers[i].subresourceRange.aspectMask &
+ VK_IMAGE_ASPECT_DEPTH_BIT) {
+ transition_depth_buffer(cmd_buffer, image,
+ pImageMemoryBarriers[i].oldLayout,
+ pImageMemoryBarriers[i].newLayout);
+ }
}
enum anv_pipe_bits pipe_bits = 0;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
assert(stage == MESA_SHADER_FRAGMENT);
- if (desc->image_view->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
- /* For stencil input attachments, we treat it like any old texture
- * that a user may have bound.
+ if (desc->image_view->aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT) {
+ /* For depth and stencil input attachments, we treat it like any
+ * old texture that a user may have bound.
*/
surface_state = desc->image_view->sampler_surface_state;
assert(surface_state.alloc_size);
desc->image_view->image->aux_usage,
surface_state);
} else {
- /* For depth and color input attachments, we create the surface
- * state at vkBeginRenderPass time so that we can include aux
- * and clear color information.
+ /* For color input attachments, we create the surface state at
+ * vkBeginRenderPass time so that we can include aux and clear
+ * color information.
*/
assert(binding->input_attachment_index < subpass->input_count);
const unsigned subpass_att = binding->input_attachment_index;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
- surface_state = desc->image_view->storage_surface_state;
+ surface_state = (binding->write_only)
+ ? desc->image_view->writeonly_storage_surface_state
+ : desc->image_view->storage_surface_state;
assert(surface_state.alloc_size);
add_image_view_relocs(cmd_buffer, desc->image_view,
desc->image_view->image->aux_usage,
break;
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- surface_state = desc->buffer_view->storage_surface_state;
+ surface_state = (binding->write_only)
+ ? desc->buffer_view->writeonly_storage_surface_state
+ : desc->buffer_view->storage_surface_state;
assert(surface_state.alloc_size);
add_surface_state_reloc(cmd_buffer, surface_state,
desc->buffer_view->bo,
out:
if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(*bt_state);
+ anv_state_flush(*bt_state);
return VK_SUCCESS;
}
}
if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(*state);
+ anv_state_flush(*state);
return VK_SUCCESS;
}
}
static void
-emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *bo, uint32_t offset)
+emit_vertex_bo(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_bo *bo, uint32_t offset,
+ uint32_t size, uint32_t index)
{
uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
GENX(3DSTATE_VERTEX_BUFFERS));
GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
&(struct GENX(VERTEX_BUFFER_STATE)) {
- .VertexBufferIndex = 32, /* Reserved for this */
+ .VertexBufferIndex = index,
.AddressModifyEnable = true,
.BufferPitch = 0,
#if (GEN_GEN >= 8)
.MemoryObjectControlState = GENX(MOCS),
.BufferStartingAddress = { bo, offset },
- .BufferSize = 8
+ .BufferSize = size
#else
.VertexBufferMemoryObjectControlState = GENX(MOCS),
.BufferStartingAddress = { bo, offset },
- .EndAddress = { bo, offset + 8 },
+ .EndAddress = { bo, offset + size },
#endif
});
}
+static void
+emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_bo *bo, uint32_t offset)
+{
+ emit_vertex_bo(cmd_buffer, bo, offset, 8, ANV_SVGS_VB_INDEX);
+}
+
static void
emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
uint32_t base_vertex, uint32_t base_instance)
((uint32_t *)id_state.map)[1] = base_instance;
if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(id_state);
+ anv_state_flush(id_state);
emit_base_vertex_instance_bo(cmd_buffer,
&cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
}
+static void
+emit_draw_index(struct anv_cmd_buffer *cmd_buffer, uint32_t draw_index)
+{
+ struct anv_state state =
+ anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 4, 4);
+
+ ((uint32_t *)state.map)[0] = draw_index;
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_flush(state);
+
+ emit_vertex_bo(cmd_buffer,
+ &cmd_buffer->device->dynamic_state_block_pool.bo,
+ state.offset, 4, ANV_DRAWID_VB_INDEX);
+}
+
void genX(CmdDraw)(
VkCommandBuffer commandBuffer,
uint32_t vertexCount,
if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
+ if (vs_prog_data->uses_drawid)
+ emit_draw_index(cmd_buffer, 0);
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
prim.VertexAccessType = SEQUENTIAL;
if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
+ if (vs_prog_data->uses_drawid)
+ emit_draw_index(cmd_buffer, 0);
anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
prim.VertexAccessType = RANDOM;
if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
+ if (vs_prog_data->uses_drawid)
+ emit_draw_index(cmd_buffer, 0);
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
/* TODO: We need to stomp base vertex to 0 somehow */
if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
+ if (vs_prog_data->uses_drawid)
+ emit_draw_index(cmd_buffer, 0);
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
#if GEN_GEN == 7
-static bool
+static VkResult
verify_cmd_parser(const struct anv_device *device,
int required_version,
const char *function)
{
if (device->instance->physicalDevice.cmd_parser_version < required_version) {
- vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
- "cmd parser version %d is required for %s",
- required_version, function);
- return false;
+ return vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
+ "cmd parser version %d is required for %s",
+ required_version, function);
} else {
- return true;
+ return VK_SUCCESS;
}
}
sizes[1] = y;
sizes[2] = z;
if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(state);
+ anv_state_flush(state);
cmd_buffer->state.num_workgroups_offset = state.offset;
cmd_buffer->state.num_workgroups_bo =
&cmd_buffer->device->dynamic_state_block_pool.bo;
/* Linux 4.4 added command parser version 5 which allows the GPGPU
* indirect dispatch registers to be written.
*/
- if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
+ if (verify_cmd_parser(cmd_buffer->device, 5,
+ "vkCmdDispatchIndirect") != VK_SUCCESS)
return;
#endif
const bool has_stencil =
image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
- /* FIXME: Implement the PMA stall W/A */
+ cmd_buffer->state.hiz_enabled = has_hiz;
+
/* FIXME: Width and Height are wrong */
genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer);
anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp) {
if (has_hiz) {
cp.DepthClearValueValid = true;
- const uint32_t ds =
- cmd_buffer->state.subpass->depth_stencil_attachment;
- cp.DepthClearValue =
- cmd_buffer->state.attachments[ds].clear_value.depthStencil.depth;
+ cp.DepthClearValue = ANV_HZ_FC_VAL;
}
}
}
const struct anv_image_view *iview =
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
- if (iview) {
- anv_gen8_hiz_op_resolve(cmd_buffer, iview->image,
- BLORP_HIZ_OP_HIZ_RESOLVE);
+ if (iview && iview->image->aux_usage == ISL_AUX_USAGE_HIZ) {
+ const uint32_t ds = subpass->depth_stencil_attachment;
+ transition_depth_buffer(cmd_buffer, iview->image,
+ cmd_buffer->state.attachments[ds].current_layout,
+ cmd_buffer->state.subpass->depth_stencil_layout);
+ cmd_buffer->state.attachments[ds].current_layout =
+ cmd_buffer->state.subpass->depth_stencil_layout;
+ cmd_buffer->state.attachments[ds].aux_usage =
+ layout_to_hiz_usage(cmd_buffer->state.subpass->depth_stencil_layout,
+ iview->image->samples);
}
cmd_buffer_emit_depth_stencil(cmd_buffer);
const struct anv_image_view *iview =
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
- if (iview) {
- anv_gen8_hiz_op_resolve(cmd_buffer, iview->image,
- BLORP_HIZ_OP_DEPTH_RESOLVE);
+ if (iview && iview->image->aux_usage == ISL_AUX_USAGE_HIZ) {
+ const uint32_t ds = cmd_buffer->state.subpass->depth_stencil_attachment;
+
+ if (cmd_buffer->state.subpass - cmd_buffer->state.pass->subpasses ==
+ cmd_buffer->state.pass->attachments[ds].last_subpass_idx) {
+ transition_depth_buffer(cmd_buffer, iview->image,
+ cmd_buffer->state.attachments[ds].current_layout,
+ cmd_buffer->state.pass->attachments[ds].final_layout);
+ }
}
anv_cmd_buffer_resolve_subpass(cmd_buffer);
const struct anv_image_view *iview =
anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
- if (iview) {
- anv_gen8_hiz_op_resolve(cmd_buffer, iview->image,
- BLORP_HIZ_OP_DEPTH_RESOLVE);
+ if (iview && iview->image->aux_usage == ISL_AUX_USAGE_HIZ) {
+ const uint32_t ds = cmd_buffer->state.subpass->depth_stencil_attachment;
+
+ if (cmd_buffer->state.subpass - cmd_buffer->state.pass->subpasses ==
+ cmd_buffer->state.pass->attachments[ds].last_subpass_idx) {
+ transition_depth_buffer(cmd_buffer, iview->image,
+ cmd_buffer->state.attachments[ds].current_layout,
+ cmd_buffer->state.pass->attachments[ds].final_layout);
+ }
}
anv_cmd_buffer_resolve_subpass(cmd_buffer);
+ cmd_buffer->state.hiz_enabled = false;
+
#ifndef NDEBUG
anv_dump_add_framebuffer(cmd_buffer, cmd_buffer->state.framebuffer);
#endif
}
-
-static void
-emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *bo, uint32_t offset)
-{
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
- pc.DestinationAddressType = DAT_PPGTT;
- pc.PostSyncOperation = WritePSDepthCount;
- pc.DepthStallEnable = true;
- pc.Address = (struct anv_address) { bo, offset };
-
- if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
- pc.CommandStreamerStallEnable = true;
- }
-}
-
-static void
-emit_query_availability(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *bo, uint32_t offset)
-{
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
- pc.DestinationAddressType = DAT_PPGTT;
- pc.PostSyncOperation = WriteImmediateData;
- pc.Address = (struct anv_address) { bo, offset };
- pc.ImmediateData = 1;
- }
-}
-
-void genX(CmdBeginQuery)(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t query,
- VkQueryControlFlags flags)
-{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
-
- /* Workaround: When meta uses the pipeline with the VS disabled, it seems
- * that the pipelining of the depth write breaks. What we see is that
- * samples from the render pass clear leaks into the first query
- * immediately after the clear. Doing a pipecontrol with a post-sync
- * operation and DepthStallEnable seems to work around the issue.
- */
- if (cmd_buffer->state.need_query_wa) {
- cmd_buffer->state.need_query_wa = false;
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
- pc.DepthCacheFlushEnable = true;
- pc.DepthStallEnable = true;
- }
- }
-
- switch (pool->type) {
- case VK_QUERY_TYPE_OCCLUSION:
- emit_ps_depth_count(cmd_buffer, &pool->bo,
- query * sizeof(struct anv_query_pool_slot));
- break;
-
- case VK_QUERY_TYPE_PIPELINE_STATISTICS:
- default:
- unreachable("");
- }
-}
-
-void genX(CmdEndQuery)(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t query)
-{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
-
- switch (pool->type) {
- case VK_QUERY_TYPE_OCCLUSION:
- emit_ps_depth_count(cmd_buffer, &pool->bo,
- query * sizeof(struct anv_query_pool_slot) + 8);
-
- emit_query_availability(cmd_buffer, &pool->bo,
- query * sizeof(struct anv_query_pool_slot) + 16);
- break;
-
- case VK_QUERY_TYPE_PIPELINE_STATISTICS:
- default:
- unreachable("");
- }
-}
-
-#define TIMESTAMP 0x2358
-
-void genX(CmdWriteTimestamp)(
- VkCommandBuffer commandBuffer,
- VkPipelineStageFlagBits pipelineStage,
- VkQueryPool queryPool,
- uint32_t query)
-{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
- uint32_t offset = query * sizeof(struct anv_query_pool_slot);
-
- assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
-
- switch (pipelineStage) {
- case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
- anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
- srm.RegisterAddress = TIMESTAMP;
- srm.MemoryAddress = (struct anv_address) { &pool->bo, offset };
- }
- anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
- srm.RegisterAddress = TIMESTAMP + 4;
- srm.MemoryAddress = (struct anv_address) { &pool->bo, offset + 4 };
- }
- break;
-
- default:
- /* Everything else is bottom-of-pipe */
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
- pc.DestinationAddressType = DAT_PPGTT;
- pc.PostSyncOperation = WriteTimestamp;
- pc.Address = (struct anv_address) { &pool->bo, offset };
-
- if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
- pc.CommandStreamerStallEnable = true;
- }
- break;
- }
-
- emit_query_availability(cmd_buffer, &pool->bo, query + 16);
-}
-
-#if GEN_GEN > 7 || GEN_IS_HASWELL
-
-#define alu_opcode(v) __gen_uint((v), 20, 31)
-#define alu_operand1(v) __gen_uint((v), 10, 19)
-#define alu_operand2(v) __gen_uint((v), 0, 9)
-#define alu(opcode, operand1, operand2) \
- alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
-
-#define OPCODE_NOOP 0x000
-#define OPCODE_LOAD 0x080
-#define OPCODE_LOADINV 0x480
-#define OPCODE_LOAD0 0x081
-#define OPCODE_LOAD1 0x481
-#define OPCODE_ADD 0x100
-#define OPCODE_SUB 0x101
-#define OPCODE_AND 0x102
-#define OPCODE_OR 0x103
-#define OPCODE_XOR 0x104
-#define OPCODE_STORE 0x180
-#define OPCODE_STOREINV 0x580
-
-#define OPERAND_R0 0x00
-#define OPERAND_R1 0x01
-#define OPERAND_R2 0x02
-#define OPERAND_R3 0x03
-#define OPERAND_R4 0x04
-#define OPERAND_SRCA 0x20
-#define OPERAND_SRCB 0x21
-#define OPERAND_ACCU 0x31
-#define OPERAND_ZF 0x32
-#define OPERAND_CF 0x33
-
-#define CS_GPR(n) (0x2600 + (n) * 8)
-
-static void
-emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
- struct anv_bo *bo, uint32_t offset)
-{
- anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
- lrm.RegisterAddress = reg,
- lrm.MemoryAddress = (struct anv_address) { bo, offset };
- }
- anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
- lrm.RegisterAddress = reg + 4;
- lrm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
- }
-}
-
-static void
-store_query_result(struct anv_batch *batch, uint32_t reg,
- struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
-{
- anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
- srm.RegisterAddress = reg;
- srm.MemoryAddress = (struct anv_address) { bo, offset };
- }
-
- if (flags & VK_QUERY_RESULT_64_BIT) {
- anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
- srm.RegisterAddress = reg + 4;
- srm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
- }
- }
-}
-
-void genX(CmdCopyQueryPoolResults)(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t firstQuery,
- uint32_t queryCount,
- VkBuffer destBuffer,
- VkDeviceSize destOffset,
- VkDeviceSize destStride,
- VkQueryResultFlags flags)
-{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
- ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
- uint32_t slot_offset, dst_offset;
-
- if (flags & VK_QUERY_RESULT_WAIT_BIT) {
- anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
- pc.CommandStreamerStallEnable = true;
- pc.StallAtPixelScoreboard = true;
- }
- }
-
- dst_offset = buffer->offset + destOffset;
- for (uint32_t i = 0; i < queryCount; i++) {
-
- slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
- switch (pool->type) {
- case VK_QUERY_TYPE_OCCLUSION:
- emit_load_alu_reg_u64(&cmd_buffer->batch,
- CS_GPR(0), &pool->bo, slot_offset);
- emit_load_alu_reg_u64(&cmd_buffer->batch,
- CS_GPR(1), &pool->bo, slot_offset + 8);
-
- /* FIXME: We need to clamp the result for 32 bit. */
-
- uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
- dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
- dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
- dw[3] = alu(OPCODE_SUB, 0, 0);
- dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
- break;
-
- case VK_QUERY_TYPE_TIMESTAMP:
- emit_load_alu_reg_u64(&cmd_buffer->batch,
- CS_GPR(2), &pool->bo, slot_offset);
- break;
-
- default:
- unreachable("unhandled query type");
- }
-
- store_query_result(&cmd_buffer->batch,
- CS_GPR(2), buffer->bo, dst_offset, flags);
-
- if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
- emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
- &pool->bo, slot_offset + 16);
- if (flags & VK_QUERY_RESULT_64_BIT)
- store_query_result(&cmd_buffer->batch,
- CS_GPR(0), buffer->bo, dst_offset + 8, flags);
- else
- store_query_result(&cmd_buffer->batch,
- CS_GPR(0), buffer->bo, dst_offset + 4, flags);
- }
-
- dst_offset += destStride;
- }
-}
-
-#else
-void genX(CmdCopyQueryPoolResults)(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t firstQuery,
- uint32_t queryCount,
- VkBuffer destBuffer,
- VkDeviceSize destOffset,
- VkDeviceSize destStride,
- VkQueryResultFlags flags)
-{
- anv_finishme("Queries not yet supported on Ivy Bridge");
-}
-#endif