X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fvulkan%2FgenX_cmd_buffer.c;h=6131cfb60e38e7d2a9ffceb8e9c09a6b27aceb23;hb=d6545f234593fb00d02fdc07f9b2a803d2b569f6;hp=64172ca377bdfb6c31ab184d515c544e21916f1d;hpb=3a83c176eab8c513eb723554a240af1a7308d701;p=mesa.git diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c index 64172ca377b..6131cfb60e3 100644 --- a/src/intel/vulkan/genX_cmd_buffer.c +++ b/src/intel/vulkan/genX_cmd_buffer.c @@ -25,20 +25,35 @@ #include #include "anv_private.h" +#include "vk_format_info.h" +#include "common/gen_l3_config.h" #include "genxml/gen_macros.h" #include "genxml/genX_pack.h" +static void +emit_lrm(struct anv_batch *batch, + uint32_t reg, struct anv_bo *bo, uint32_t offset) +{ + anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { + lrm.RegisterAddress = reg; + lrm.MemoryAddress = (struct anv_address) { bo, offset }; + } +} + +static void +emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm) +{ + anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) { + lri.RegisterOffset = reg; + lri.DataDWord = imm; + } +} + void genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer) { struct anv_device *device = cmd_buffer->device; - struct anv_bo *scratch_bo = NULL; - - cmd_buffer->state.scratch_size = - anv_block_pool_size(&device->scratch_block_pool); - if (cmd_buffer->state.scratch_size > 0) - scratch_bo = &device->scratch_block_pool.bo; /* XXX: Do we need this on more than just BDW? */ #if (GEN_GEN >= 8) @@ -55,7 +70,7 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer) #endif anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) { - sba.GeneralStateBaseAddress = (struct anv_address) { scratch_bo, 0 }; + sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 }; sba.GeneralStateMemoryObjectControlState = GENX(MOCS); sba.GeneralStateBaseAddressModifyEnable = true; @@ -66,8 +81,8 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer) sba.DynamicStateBaseAddress = (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 }; - sba.DynamicStateMemoryObjectControlState = GENX(MOCS), - sba.DynamicStateBaseAddressModifyEnable = true, + sba.DynamicStateMemoryObjectControlState = GENX(MOCS); + sba.DynamicStateBaseAddressModifyEnable = true; sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 }; sba.IndirectObjectMemoryObjectControlState = GENX(MOCS); @@ -136,6 +151,610 @@ genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer) } } +static void +add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer, + struct anv_state state, + struct anv_bo *bo, uint32_t offset) +{ + const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev; + + anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc, + state.offset + isl_dev->ss.addr_offset, bo, offset); +} + +static void +add_image_view_relocs(struct anv_cmd_buffer *cmd_buffer, + const struct anv_image_view *iview, + enum isl_aux_usage aux_usage, + struct anv_state state) +{ + const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev; + + anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc, + state.offset + isl_dev->ss.addr_offset, + iview->bo, iview->offset); + + if (aux_usage != ISL_AUX_USAGE_NONE) { + uint32_t aux_offset = iview->offset + iview->image->aux_surface.offset; + + /* On gen7 and prior, the bottom 12 bits of the MCS base address are + * used to store other information. This should be ok, however, because + * surface buffer addresses are always 4K page alinged. + */ + assert((aux_offset & 0xfff) == 0); + uint32_t *aux_addr_dw = state.map + isl_dev->ss.aux_addr_offset; + aux_offset += *aux_addr_dw & 0xfff; + + anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc, + state.offset + isl_dev->ss.aux_addr_offset, + iview->bo, aux_offset); + } +} + +static bool +color_is_zero_one(VkClearColorValue value, enum isl_format format) +{ + if (isl_format_has_int_channel(format)) { + for (unsigned i = 0; i < 4; i++) { + if (value.int32[i] != 0 && value.int32[i] != 1) + return false; + } + } else { + for (unsigned i = 0; i < 4; i++) { + if (value.float32[i] != 0.0f && value.float32[i] != 1.0f) + return false; + } + } + + return true; +} + +static void +color_attachment_compute_aux_usage(struct anv_device *device, + struct anv_attachment_state *att_state, + struct anv_image_view *iview, + VkRect2D render_area, + union isl_color_value *fast_clear_color) +{ + if (iview->image->aux_surface.isl.size == 0) { + att_state->aux_usage = ISL_AUX_USAGE_NONE; + att_state->input_aux_usage = ISL_AUX_USAGE_NONE; + att_state->fast_clear = false; + return; + } + + assert(iview->image->aux_surface.isl.usage & ISL_SURF_USAGE_CCS_BIT); + + att_state->clear_color_is_zero_one = + color_is_zero_one(att_state->clear_value.color, iview->isl.format); + + if (att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { + /* Start off assuming fast clears are possible */ + att_state->fast_clear = true; + + /* Potentially, we could do partial fast-clears but doing so has crazy + * alignment restrictions. It's easier to just restrict to full size + * fast clears for now. + */ + if (render_area.offset.x != 0 || + render_area.offset.y != 0 || + render_area.extent.width != iview->extent.width || + render_area.extent.height != iview->extent.height) + att_state->fast_clear = false; + + if (GEN_GEN <= 7) { + /* On gen7, we can't do multi-LOD or multi-layer fast-clears. We + * technically can, but it comes with crazy restrictions that we + * don't want to deal with now. + */ + if (iview->isl.base_level > 0 || + iview->isl.base_array_layer > 0 || + iview->isl.array_len > 1) + att_state->fast_clear = false; + } + + /* On Broadwell and earlier, we can only handle 0/1 clear colors */ + if (GEN_GEN <= 8 && !att_state->clear_color_is_zero_one) + att_state->fast_clear = false; + + if (att_state->fast_clear) { + memcpy(fast_clear_color->u32, att_state->clear_value.color.uint32, + sizeof(fast_clear_color->u32)); + } + } else { + att_state->fast_clear = false; + } + + if (isl_format_supports_lossless_compression(&device->info, + iview->isl.format)) { + att_state->aux_usage = ISL_AUX_USAGE_CCS_E; + att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E; + } else if (att_state->fast_clear) { + att_state->aux_usage = ISL_AUX_USAGE_CCS_D; + if (GEN_GEN >= 9) { + /* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode: + * + * "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D + * setting is only allowed if Surface Format supported for Fast + * Clear. In addition, if the surface is bound to the sampling + * engine, Surface Format must be supported for Render Target + * Compression for surfaces bound to the sampling engine." + * + * In other words, we can't sample from a fast-cleared image if it + * doesn't also support color compression. + */ + att_state->input_aux_usage = ISL_AUX_USAGE_NONE; + } else if (GEN_GEN == 8) { + /* Broadwell can sample from fast-cleared images */ + att_state->input_aux_usage = ISL_AUX_USAGE_CCS_D; + } else { + /* Ivy Bridge and Haswell cannot */ + att_state->input_aux_usage = ISL_AUX_USAGE_NONE; + } + } else { + att_state->aux_usage = ISL_AUX_USAGE_NONE; + att_state->input_aux_usage = ISL_AUX_USAGE_NONE; + } +} + +static bool +need_input_attachment_state(const struct anv_render_pass_attachment *att) +{ + if (!(att->usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) + return false; + + /* We only allocate input attachment states for color and depth surfaces. + * Stencil doesn't allow compression so we can just use the texture surface + * state from the view + */ + return vk_format_is_color(att->format) || vk_format_has_depth(att->format); +} + +/** + * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass. + */ +static void +genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer, + struct anv_render_pass *pass, + const VkRenderPassBeginInfo *begin) +{ + const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev; + struct anv_cmd_state *state = &cmd_buffer->state; + + vk_free(&cmd_buffer->pool->alloc, state->attachments); + + if (pass->attachment_count == 0) { + state->attachments = NULL; + return; + } + + state->attachments = vk_alloc(&cmd_buffer->pool->alloc, + pass->attachment_count * + sizeof(state->attachments[0]), + 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (state->attachments == NULL) { + /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */ + abort(); + } + + bool need_null_state = false; + unsigned num_states = 0; + for (uint32_t i = 0; i < pass->attachment_count; ++i) { + if (vk_format_is_color(pass->attachments[i].format)) { + num_states++; + } else { + /* We need a null state for any depth-stencil-only subpasses. + * Importantly, this includes depth/stencil clears so we create one + * whenever we have depth or stencil + */ + need_null_state = true; + } + + if (need_input_attachment_state(&pass->attachments[i])) + num_states++; + } + num_states += need_null_state; + + const uint32_t ss_stride = align_u32(isl_dev->ss.size, isl_dev->ss.align); + state->render_pass_states = + anv_state_stream_alloc(&cmd_buffer->surface_state_stream, + num_states * ss_stride, isl_dev->ss.align); + + struct anv_state next_state = state->render_pass_states; + next_state.alloc_size = isl_dev->ss.size; + + if (need_null_state) { + state->null_surface_state = next_state; + next_state.offset += ss_stride; + next_state.map += ss_stride; + } + + for (uint32_t i = 0; i < pass->attachment_count; ++i) { + if (vk_format_is_color(pass->attachments[i].format)) { + state->attachments[i].color_rt_state = next_state; + next_state.offset += ss_stride; + next_state.map += ss_stride; + } + + if (need_input_attachment_state(&pass->attachments[i])) { + state->attachments[i].input_att_state = next_state; + next_state.offset += ss_stride; + next_state.map += ss_stride; + } + } + assert(next_state.offset == state->render_pass_states.offset + + state->render_pass_states.alloc_size); + + if (begin) { + ANV_FROM_HANDLE(anv_framebuffer, framebuffer, begin->framebuffer); + assert(pass->attachment_count == framebuffer->attachment_count); + + if (need_null_state) { + struct GENX(RENDER_SURFACE_STATE) null_ss = { + .SurfaceType = SURFTYPE_NULL, + .SurfaceArray = framebuffer->layers > 0, + .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM, +#if GEN_GEN >= 8 + .TileMode = YMAJOR, +#else + .TiledSurface = true, +#endif + .Width = framebuffer->width - 1, + .Height = framebuffer->height - 1, + .Depth = framebuffer->layers - 1, + .RenderTargetViewExtent = framebuffer->layers - 1, + }; + GENX(RENDER_SURFACE_STATE_pack)(NULL, state->null_surface_state.map, + &null_ss); + } + + for (uint32_t i = 0; i < pass->attachment_count; ++i) { + struct anv_render_pass_attachment *att = &pass->attachments[i]; + VkImageAspectFlags att_aspects = vk_format_aspects(att->format); + VkImageAspectFlags clear_aspects = 0; + + if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { + /* color attachment */ + if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { + clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT; + } + } else { + /* depthstencil attachment */ + if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && + att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { + clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT; + } + if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) && + att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { + clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT; + } + } + + state->attachments[i].pending_clear_aspects = clear_aspects; + if (clear_aspects) + state->attachments[i].clear_value = begin->pClearValues[i]; + + struct anv_image_view *iview = framebuffer->attachments[i]; + assert(iview->vk_format == att->format); + + union isl_color_value clear_color = { .u32 = { 0, } }; + if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { + color_attachment_compute_aux_usage(cmd_buffer->device, + &state->attachments[i], + iview, begin->renderArea, + &clear_color); + + struct isl_view view = iview->isl; + view.usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT; + isl_surf_fill_state(isl_dev, + state->attachments[i].color_rt_state.map, + .surf = &iview->image->color_surface.isl, + .view = &view, + .aux_surf = &iview->image->aux_surface.isl, + .aux_usage = state->attachments[i].aux_usage, + .clear_color = clear_color, + .mocs = cmd_buffer->device->default_mocs); + + add_image_view_relocs(cmd_buffer, iview, + state->attachments[i].aux_usage, + state->attachments[i].color_rt_state); + } else { + state->attachments[i].aux_usage = ISL_AUX_USAGE_NONE; + state->attachments[i].input_aux_usage = ISL_AUX_USAGE_NONE; + } + + if (need_input_attachment_state(&pass->attachments[i])) { + const struct isl_surf *surf; + if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { + surf = &iview->image->color_surface.isl; + } else { + surf = &iview->image->depth_surface.isl; + } + + struct isl_view view = iview->isl; + view.usage |= ISL_SURF_USAGE_TEXTURE_BIT; + isl_surf_fill_state(isl_dev, + state->attachments[i].input_att_state.map, + .surf = surf, + .view = &view, + .aux_surf = &iview->image->aux_surface.isl, + .aux_usage = state->attachments[i].input_aux_usage, + .clear_color = clear_color, + .mocs = cmd_buffer->device->default_mocs); + + add_image_view_relocs(cmd_buffer, iview, + state->attachments[i].input_aux_usage, + state->attachments[i].input_att_state); + } + } + + if (!cmd_buffer->device->info.has_llc) + anv_state_clflush(state->render_pass_states); + } +} + +VkResult +genX(BeginCommandBuffer)( + VkCommandBuffer commandBuffer, + const VkCommandBufferBeginInfo* pBeginInfo) +{ + ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); + + /* If this is the first vkBeginCommandBuffer, we must *initialize* the + * command buffer's state. Otherwise, we must *reset* its state. In both + * cases we reset it. + * + * From the Vulkan 1.0 spec: + * + * If a command buffer is in the executable state and the command buffer + * was allocated from a command pool with the + * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then + * vkBeginCommandBuffer implicitly resets the command buffer, behaving + * as if vkResetCommandBuffer had been called with + * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts + * the command buffer in the recording state. + */ + anv_cmd_buffer_reset(cmd_buffer); + + cmd_buffer->usage_flags = pBeginInfo->flags; + + assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY || + !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)); + + genX(cmd_buffer_emit_state_base_address)(cmd_buffer); + + if (cmd_buffer->usage_flags & + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { + cmd_buffer->state.pass = + anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass); + cmd_buffer->state.subpass = + &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass]; + cmd_buffer->state.framebuffer = NULL; + + genX(cmd_buffer_setup_attachments)(cmd_buffer, cmd_buffer->state.pass, + NULL); + + cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS; + } + + return VK_SUCCESS; +} + +VkResult +genX(EndCommandBuffer)( + VkCommandBuffer commandBuffer) +{ + ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); + + genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); + + anv_cmd_buffer_end_batch_buffer(cmd_buffer); + + return VK_SUCCESS; +} + +void +genX(CmdExecuteCommands)( + VkCommandBuffer commandBuffer, + uint32_t commandBufferCount, + const VkCommandBuffer* pCmdBuffers) +{ + ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer); + + assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY); + + for (uint32_t i = 0; i < commandBufferCount; i++) { + ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]); + + assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY); + + if (secondary->usage_flags & + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { + /* If we're continuing a render pass from the primary, we need to + * copy the surface states for the current subpass into the storage + * we allocated for them in BeginCommandBuffer. + */ + struct anv_bo *ss_bo = &primary->device->surface_state_block_pool.bo; + struct anv_state src_state = primary->state.render_pass_states; + struct anv_state dst_state = secondary->state.render_pass_states; + assert(src_state.alloc_size == dst_state.alloc_size); + + genX(cmd_buffer_gpu_memcpy)(primary, ss_bo, dst_state.offset, + ss_bo, src_state.offset, + src_state.alloc_size); + } + + anv_cmd_buffer_add_secondary(primary, secondary); + } + + /* Each of the secondary command buffers will use its own state base + * address. We need to re-emit state base address for the primary after + * all of the secondaries are done. + * + * TODO: Maybe we want to make this a dirty bit to avoid extra state base + * address calls? + */ + genX(cmd_buffer_emit_state_base_address)(primary); +} + +#define IVB_L3SQCREG1_SQGHPCI_DEFAULT 0x00730000 +#define VLV_L3SQCREG1_SQGHPCI_DEFAULT 0x00d30000 +#define HSW_L3SQCREG1_SQGHPCI_DEFAULT 0x00610000 + +/** + * Program the hardware to use the specified L3 configuration. + */ +void +genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, + const struct gen_l3_config *cfg) +{ + assert(cfg); + if (cfg == cmd_buffer->state.current_l3_config) + return; + + if (unlikely(INTEL_DEBUG & DEBUG_L3)) { + fprintf(stderr, "L3 config transition: "); + gen_dump_l3_config(cfg, stderr); + } + + const bool has_slm = cfg->n[GEN_L3P_SLM]; + + /* According to the hardware docs, the L3 partitioning can only be changed + * while the pipeline is completely drained and the caches are flushed, + * which involves a first PIPE_CONTROL flush which stalls the pipeline... + */ + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { + pc.DCFlushEnable = true; + pc.PostSyncOperation = NoWrite; + pc.CommandStreamerStallEnable = true; + } + + /* ...followed by a second pipelined PIPE_CONTROL that initiates + * invalidation of the relevant caches. Note that because RO invalidation + * happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL + * command is processed by the CS) we cannot combine it with the previous + * stalling flush as the hardware documentation suggests, because that + * would cause the CS to stall on previous rendering *after* RO + * invalidation and wouldn't prevent the RO caches from being polluted by + * concurrent rendering before the stall completes. This intentionally + * doesn't implement the SKL+ hardware workaround suggesting to enable CS + * stall on PIPE_CONTROLs with the texture cache invalidation bit set for + * GPGPU workloads because the previous and subsequent PIPE_CONTROLs + * already guarantee that there is no concurrent GPGPU kernel execution + * (see SKL HSD 2132585). + */ + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { + pc.TextureCacheInvalidationEnable = true; + pc.ConstantCacheInvalidationEnable = true; + pc.InstructionCacheInvalidateEnable = true; + pc.StateCacheInvalidationEnable = true; + pc.PostSyncOperation = NoWrite; + } + + /* Now send a third stalling flush to make sure that invalidation is + * complete when the L3 configuration registers are modified. + */ + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { + pc.DCFlushEnable = true; + pc.PostSyncOperation = NoWrite; + pc.CommandStreamerStallEnable = true; + } + +#if GEN_GEN >= 8 + + assert(!cfg->n[GEN_L3P_IS] && !cfg->n[GEN_L3P_C] && !cfg->n[GEN_L3P_T]); + + uint32_t l3cr; + anv_pack_struct(&l3cr, GENX(L3CNTLREG), + .SLMEnable = has_slm, + .URBAllocation = cfg->n[GEN_L3P_URB], + .ROAllocation = cfg->n[GEN_L3P_RO], + .DCAllocation = cfg->n[GEN_L3P_DC], + .AllAllocation = cfg->n[GEN_L3P_ALL]); + + /* Set up the L3 partitioning. */ + emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG_num), l3cr); + +#else + + const bool has_dc = cfg->n[GEN_L3P_DC] || cfg->n[GEN_L3P_ALL]; + const bool has_is = cfg->n[GEN_L3P_IS] || cfg->n[GEN_L3P_RO] || + cfg->n[GEN_L3P_ALL]; + const bool has_c = cfg->n[GEN_L3P_C] || cfg->n[GEN_L3P_RO] || + cfg->n[GEN_L3P_ALL]; + const bool has_t = cfg->n[GEN_L3P_T] || cfg->n[GEN_L3P_RO] || + cfg->n[GEN_L3P_ALL]; + + assert(!cfg->n[GEN_L3P_ALL]); + + /* When enabled SLM only uses a portion of the L3 on half of the banks, + * the matching space on the remaining banks has to be allocated to a + * client (URB for all validated configurations) set to the + * lower-bandwidth 2-bank address hashing mode. + */ + const struct gen_device_info *devinfo = &cmd_buffer->device->info; + const bool urb_low_bw = has_slm && !devinfo->is_baytrail; + assert(!urb_low_bw || cfg->n[GEN_L3P_URB] == cfg->n[GEN_L3P_SLM]); + + /* Minimum number of ways that can be allocated to the URB. */ + MAYBE_UNUSED const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0; + assert(cfg->n[GEN_L3P_URB] >= n0_urb); + + uint32_t l3sqcr1, l3cr2, l3cr3; + anv_pack_struct(&l3sqcr1, GENX(L3SQCREG1), + .ConvertDC_UC = !has_dc, + .ConvertIS_UC = !has_is, + .ConvertC_UC = !has_c, + .ConvertT_UC = !has_t); + l3sqcr1 |= + GEN_IS_HASWELL ? HSW_L3SQCREG1_SQGHPCI_DEFAULT : + devinfo->is_baytrail ? VLV_L3SQCREG1_SQGHPCI_DEFAULT : + IVB_L3SQCREG1_SQGHPCI_DEFAULT; + + anv_pack_struct(&l3cr2, GENX(L3CNTLREG2), + .SLMEnable = has_slm, + .URBLowBandwidth = urb_low_bw, + .URBAllocation = cfg->n[GEN_L3P_URB], +#if !GEN_IS_HASWELL + .ALLAllocation = cfg->n[GEN_L3P_ALL], +#endif + .ROAllocation = cfg->n[GEN_L3P_RO], + .DCAllocation = cfg->n[GEN_L3P_DC]); + + anv_pack_struct(&l3cr3, GENX(L3CNTLREG3), + .ISAllocation = cfg->n[GEN_L3P_IS], + .ISLowBandwidth = 0, + .CAllocation = cfg->n[GEN_L3P_C], + .CLowBandwidth = 0, + .TAllocation = cfg->n[GEN_L3P_T], + .TLowBandwidth = 0); + + /* Set up the L3 partitioning. */ + emit_lri(&cmd_buffer->batch, GENX(L3SQCREG1_num), l3sqcr1); + emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2_num), l3cr2); + emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3); + +#if GEN_IS_HASWELL + if (cmd_buffer->device->instance->physicalDevice.cmd_parser_version >= 4) { + /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep + * them disabled to avoid crashing the system hard. + */ + uint32_t scratch1, chicken3; + anv_pack_struct(&scratch1, GENX(SCRATCH1), + .L3AtomicDisable = !has_dc); + anv_pack_struct(&chicken3, GENX(CHICKEN3), + .L3AtomicDisableMask = true, + .L3AtomicDisable = !has_dc); + emit_lri(&cmd_buffer->batch, GENX(SCRATCH1_num), scratch1); + emit_lri(&cmd_buffer->batch, GENX(CHICKEN3_num), chicken3); + } +#endif + +#endif + + cmd_buffer->state.current_l3_config = cfg; +} + void genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer) { @@ -283,7 +902,7 @@ void genX(CmdPipelineBarrier)( pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT; break; case VK_ACCESS_SHADER_READ_BIT: - case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: + case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: case VK_ACCESS_TRANSFER_READ_BIT: pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT; break; @@ -361,6 +980,340 @@ cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer) cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS; } +static VkResult +emit_binding_table(struct anv_cmd_buffer *cmd_buffer, + gl_shader_stage stage, + struct anv_state *bt_state) +{ + struct anv_subpass *subpass = cmd_buffer->state.subpass; + struct anv_pipeline *pipeline; + uint32_t bias, state_offset; + + switch (stage) { + case MESA_SHADER_COMPUTE: + pipeline = cmd_buffer->state.compute_pipeline; + bias = 1; + break; + default: + pipeline = cmd_buffer->state.pipeline; + bias = 0; + break; + } + + if (!anv_pipeline_has_stage(pipeline, stage)) { + *bt_state = (struct anv_state) { 0, }; + return VK_SUCCESS; + } + + struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map; + if (bias + map->surface_count == 0) { + *bt_state = (struct anv_state) { 0, }; + return VK_SUCCESS; + } + + *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, + bias + map->surface_count, + &state_offset); + uint32_t *bt_map = bt_state->map; + + if (bt_state->map == NULL) + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + + if (stage == MESA_SHADER_COMPUTE && + get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) { + struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo; + uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset; + + struct anv_state surface_state; + surface_state = + anv_cmd_buffer_alloc_surface_state(cmd_buffer); + + const enum isl_format format = + anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); + anv_fill_buffer_surface_state(cmd_buffer->device, surface_state, + format, bo_offset, 12, 1); + + bt_map[0] = surface_state.offset + state_offset; + add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset); + } + + if (map->surface_count == 0) + goto out; + + if (map->image_count > 0) { + VkResult result = + anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images); + if (result != VK_SUCCESS) + return result; + + cmd_buffer->state.push_constants_dirty |= 1 << stage; + } + + uint32_t image = 0; + for (uint32_t s = 0; s < map->surface_count; s++) { + struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s]; + + struct anv_state surface_state; + + if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) { + /* Color attachment binding */ + assert(stage == MESA_SHADER_FRAGMENT); + assert(binding->binding == 0); + if (binding->index < subpass->color_count) { + const unsigned att = subpass->color_attachments[binding->index]; + surface_state = cmd_buffer->state.attachments[att].color_rt_state; + } else { + surface_state = cmd_buffer->state.null_surface_state; + } + + bt_map[bias + s] = surface_state.offset + state_offset; + continue; + } + + struct anv_descriptor_set *set = + cmd_buffer->state.descriptors[binding->set]; + uint32_t offset = set->layout->binding[binding->binding].descriptor_index; + struct anv_descriptor *desc = &set->descriptors[offset + binding->index]; + + switch (desc->type) { + case VK_DESCRIPTOR_TYPE_SAMPLER: + /* Nothing for us to do here */ + continue; + + case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: + case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: + surface_state = desc->image_view->sampler_surface_state; + assert(surface_state.alloc_size); + add_image_view_relocs(cmd_buffer, desc->image_view, + desc->image_view->image->aux_usage, + surface_state); + break; + + case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: + assert(stage == MESA_SHADER_FRAGMENT); + if (desc->image_view->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) { + /* For stencil input attachments, we treat it like any old texture + * that a user may have bound. + */ + surface_state = desc->image_view->sampler_surface_state; + assert(surface_state.alloc_size); + add_image_view_relocs(cmd_buffer, desc->image_view, + desc->image_view->image->aux_usage, + surface_state); + } else { + /* For depth and color input attachments, we create the surface + * state at vkBeginRenderPass time so that we can include aux + * and clear color information. + */ + assert(binding->input_attachment_index < subpass->input_count); + const unsigned subpass_att = binding->input_attachment_index; + const unsigned att = subpass->input_attachments[subpass_att]; + surface_state = cmd_buffer->state.attachments[att].input_att_state; + } + break; + + case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { + surface_state = desc->image_view->storage_surface_state; + assert(surface_state.alloc_size); + add_image_view_relocs(cmd_buffer, desc->image_view, + desc->image_view->image->aux_usage, + surface_state); + + struct brw_image_param *image_param = + &cmd_buffer->state.push_constants[stage]->images[image++]; + + *image_param = desc->image_view->storage_image_param; + image_param->surface_idx = bias + s; + break; + } + + case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: + case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: + case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: + case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: + case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: + surface_state = desc->buffer_view->surface_state; + assert(surface_state.alloc_size); + add_surface_state_reloc(cmd_buffer, surface_state, + desc->buffer_view->bo, + desc->buffer_view->offset); + break; + + case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: + surface_state = desc->buffer_view->storage_surface_state; + assert(surface_state.alloc_size); + add_surface_state_reloc(cmd_buffer, surface_state, + desc->buffer_view->bo, + desc->buffer_view->offset); + + struct brw_image_param *image_param = + &cmd_buffer->state.push_constants[stage]->images[image++]; + + *image_param = desc->buffer_view->storage_image_param; + image_param->surface_idx = bias + s; + break; + + default: + assert(!"Invalid descriptor type"); + continue; + } + + bt_map[bias + s] = surface_state.offset + state_offset; + } + assert(image == map->image_count); + + out: + if (!cmd_buffer->device->info.has_llc) + anv_state_clflush(*bt_state); + + return VK_SUCCESS; +} + +static VkResult +emit_samplers(struct anv_cmd_buffer *cmd_buffer, + gl_shader_stage stage, + struct anv_state *state) +{ + struct anv_pipeline *pipeline; + + if (stage == MESA_SHADER_COMPUTE) + pipeline = cmd_buffer->state.compute_pipeline; + else + pipeline = cmd_buffer->state.pipeline; + + if (!anv_pipeline_has_stage(pipeline, stage)) { + *state = (struct anv_state) { 0, }; + return VK_SUCCESS; + } + + struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map; + if (map->sampler_count == 0) { + *state = (struct anv_state) { 0, }; + return VK_SUCCESS; + } + + uint32_t size = map->sampler_count * 16; + *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32); + + if (state->map == NULL) + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + + for (uint32_t s = 0; s < map->sampler_count; s++) { + struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s]; + struct anv_descriptor_set *set = + cmd_buffer->state.descriptors[binding->set]; + uint32_t offset = set->layout->binding[binding->binding].descriptor_index; + struct anv_descriptor *desc = &set->descriptors[offset + binding->index]; + + if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER && + desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) + continue; + + struct anv_sampler *sampler = desc->sampler; + + /* This can happen if we have an unfilled slot since TYPE_SAMPLER + * happens to be zero. + */ + if (sampler == NULL) + continue; + + memcpy(state->map + (s * 16), + sampler->state, sizeof(sampler->state)); + } + + if (!cmd_buffer->device->info.has_llc) + anv_state_clflush(*state); + + return VK_SUCCESS; +} + +static uint32_t +flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer) +{ + VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty & + cmd_buffer->state.pipeline->active_stages; + + VkResult result = VK_SUCCESS; + anv_foreach_stage(s, dirty) { + result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]); + if (result != VK_SUCCESS) + break; + result = emit_binding_table(cmd_buffer, s, + &cmd_buffer->state.binding_tables[s]); + if (result != VK_SUCCESS) + break; + } + + if (result != VK_SUCCESS) { + assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY); + + result = anv_cmd_buffer_new_binding_table_block(cmd_buffer); + assert(result == VK_SUCCESS); + + /* Re-emit state base addresses so we get the new surface state base + * address before we start emitting binding tables etc. + */ + genX(cmd_buffer_emit_state_base_address)(cmd_buffer); + + /* Re-emit all active binding tables */ + dirty |= cmd_buffer->state.pipeline->active_stages; + anv_foreach_stage(s, dirty) { + result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]); + if (result != VK_SUCCESS) + return result; + result = emit_binding_table(cmd_buffer, s, + &cmd_buffer->state.binding_tables[s]); + if (result != VK_SUCCESS) + return result; + } + } + + cmd_buffer->state.descriptors_dirty &= ~dirty; + + return dirty; +} + +static void +cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer, + uint32_t stages) +{ + static const uint32_t sampler_state_opcodes[] = { + [MESA_SHADER_VERTEX] = 43, + [MESA_SHADER_TESS_CTRL] = 44, /* HS */ + [MESA_SHADER_TESS_EVAL] = 45, /* DS */ + [MESA_SHADER_GEOMETRY] = 46, + [MESA_SHADER_FRAGMENT] = 47, + [MESA_SHADER_COMPUTE] = 0, + }; + + static const uint32_t binding_table_opcodes[] = { + [MESA_SHADER_VERTEX] = 38, + [MESA_SHADER_TESS_CTRL] = 39, + [MESA_SHADER_TESS_EVAL] = 40, + [MESA_SHADER_GEOMETRY] = 41, + [MESA_SHADER_FRAGMENT] = 42, + [MESA_SHADER_COMPUTE] = 0, + }; + + anv_foreach_stage(s, stages) { + if (cmd_buffer->state.samplers[s].alloc_size > 0) { + anv_batch_emit(&cmd_buffer->batch, + GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) { + ssp._3DCommandSubOpcode = sampler_state_opcodes[s]; + ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset; + } + } + + /* Always emit binding table pointers if we're asked to, since on SKL + * this is what flushes push constants. */ + anv_batch_emit(&cmd_buffer->batch, + GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) { + btp._3DCommandSubOpcode = binding_table_opcodes[s]; + btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset; + } + } +} + static uint32_t cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer) { @@ -417,7 +1370,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer) assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0); - genX(cmd_buffer_config_l3)(cmd_buffer, pipeline); + genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config); genX(flush_pipeline_select_3d)(cmd_buffer); @@ -462,15 +1415,14 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer) cmd_buffer->state.vb_dirty &= ~vb_emit; if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) { - /* If somebody compiled a pipeline after starting a command buffer the - * scratch bo may have grown since we started this cmd buffer (and - * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now, - * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */ - if (cmd_buffer->state.scratch_size < pipeline->total_scratch) - anv_cmd_buffer_emit_state_base_address(cmd_buffer); - anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch); + /* The exact descriptor layout is pulled from the pipeline, so we need + * to re-emit binding tables on every pipeline change. + */ + cmd_buffer->state.descriptors_dirty |= + cmd_buffer->state.pipeline->active_stages; + /* If the pipeline changed, we may need to re-allocate push constant * space in the URB. */ @@ -499,6 +1451,10 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer) } #endif + /* Render targets live in the same binding table as fragment descriptors */ + if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_RENDER_TARGETS) + cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT; + /* We emit the binding tables and sampler tables first, then emit push * constants and then finally emit binding table and sampler table * pointers. It has to happen in this order, since emitting the binding @@ -508,7 +1464,7 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer) */ uint32_t dirty = 0; if (cmd_buffer->state.descriptors_dirty) - dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer); + dirty = flush_descriptor_sets(cmd_buffer); if (cmd_buffer->state.push_constants_dirty) { #if GEN_GEN >= 9 @@ -523,11 +1479,17 @@ genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer) } if (dirty) - gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty); + cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty); if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) gen8_cmd_buffer_emit_viewport(cmd_buffer); + if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT | + ANV_CMD_DIRTY_PIPELINE)) { + gen8_cmd_buffer_emit_depth_viewport(cmd_buffer, + pipeline->depth_clamp_enable); + } + if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) gen7_cmd_buffer_emit_scissor(cmd_buffer); @@ -640,25 +1602,6 @@ void genX(CmdDrawIndexed)( #define GEN7_3DPRIM_START_INSTANCE 0x243C #define GEN7_3DPRIM_BASE_VERTEX 0x2440 -static void -emit_lrm(struct anv_batch *batch, - uint32_t reg, struct anv_bo *bo, uint32_t offset) -{ - anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { - lrm.RegisterAddress = reg; - lrm.MemoryAddress = (struct anv_address) { bo, offset }; - } -} - -static void -emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm) -{ - anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) { - lri.RegisterOffset = reg; - lri.DataDWord = imm; - } -} - void genX(CmdDrawIndirect)( VkCommandBuffer commandBuffer, VkBuffer _buffer, @@ -724,6 +1667,106 @@ void genX(CmdDrawIndexedIndirect)( } } +static VkResult +flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) +{ + struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; + struct anv_state surfaces = { 0, }, samplers = { 0, }; + VkResult result; + + result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces); + if (result != VK_SUCCESS) { + assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY); + result = anv_cmd_buffer_new_binding_table_block(cmd_buffer); + assert(result == VK_SUCCESS); + + /* Re-emit state base addresses so we get the new surface state base + * address before we start emitting binding tables etc. + */ + genX(cmd_buffer_emit_state_base_address)(cmd_buffer); + + result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces); + assert(result == VK_SUCCESS); + } + + result = emit_samplers(cmd_buffer, MESA_SHADER_COMPUTE, &samplers); + assert(result == VK_SUCCESS); + + uint32_t iface_desc_data_dw[GENX(INTERFACE_DESCRIPTOR_DATA_length)]; + struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = { + .BindingTablePointer = surfaces.offset, + .SamplerStatePointer = samplers.offset, + }; + GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL, iface_desc_data_dw, &desc); + + struct anv_state state = + anv_cmd_buffer_merge_dynamic(cmd_buffer, iface_desc_data_dw, + pipeline->interface_descriptor_data, + GENX(INTERFACE_DESCRIPTOR_DATA_length), + 64); + + uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t); + anv_batch_emit(&cmd_buffer->batch, + GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) { + mid.InterfaceDescriptorTotalLength = size; + mid.InterfaceDescriptorDataStartAddress = state.offset; + } + + return VK_SUCCESS; +} + +void +genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer) +{ + struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; + MAYBE_UNUSED VkResult result; + + assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT); + + genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config); + + genX(flush_pipeline_select_gpgpu)(cmd_buffer); + + if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE) { + /* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE: + * + * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless + * the only bits that are changed are scoreboard related: Scoreboard + * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For + * these scoreboard related states, a MEDIA_STATE_FLUSH is + * sufficient." + */ + cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT; + genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); + + anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch); + } + + if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) || + (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) { + /* FIXME: figure out descriptors for gen7 */ + result = flush_compute_descriptor_set(cmd_buffer); + assert(result == VK_SUCCESS); + cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT; + } + + if (cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_COMPUTE_BIT) { + struct anv_state push_state = + anv_cmd_buffer_cs_push_constants(cmd_buffer); + + if (push_state.alloc_size) { + anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) { + curbe.CURBETotalDataLength = push_state.alloc_size; + curbe.CURBEDataStartAddress = push_state.offset; + } + } + } + + cmd_buffer->state.compute_dirty = 0; + + genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); +} + #if GEN_GEN == 7 static bool @@ -773,7 +1816,7 @@ void genX(CmdDispatch)( ggw.SIMDSize = prog_data->simd_size / 16; ggw.ThreadDepthCounterMaximum = 0; ggw.ThreadHeightCounterMaximum = 0; - ggw.ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1; + ggw.ThreadWidthCounterMaximum = prog_data->threads - 1; ggw.ThreadGroupIDXDimension = x; ggw.ThreadGroupIDYDimension = y; ggw.ThreadGroupIDZDimension = z; @@ -874,7 +1917,7 @@ void genX(CmdDispatchIndirect)( ggw.SIMDSize = prog_data->simd_size / 16; ggw.ThreadDepthCounterMaximum = 0; ggw.ThreadHeightCounterMaximum = 0; - ggw.ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1; + ggw.ThreadWidthCounterMaximum = prog_data->threads - 1; ggw.RightExecutionMask = pipeline->cs_right_mask; ggw.BottomExecutionMask = 0xffffffff; } @@ -961,34 +2004,78 @@ genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer) } } -struct anv_state -genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer, - struct anv_framebuffer *fb) +void +genX(cmd_buffer_emit_gen7_depth_flush)(struct anv_cmd_buffer *cmd_buffer) { - struct anv_state state = - anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64); - - struct GENX(RENDER_SURFACE_STATE) null_ss = { - .SurfaceType = SURFTYPE_NULL, - .SurfaceArray = fb->layers > 0, - .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM, -#if GEN_GEN >= 8 - .TileMode = YMAJOR, -#else - .TiledSurface = true, -#endif - .Width = fb->width - 1, - .Height = fb->height - 1, - .Depth = fb->layers - 1, - .RenderTargetViewExtent = fb->layers - 1, - }; - - GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss); + if (GEN_GEN >= 8) + return; - if (!cmd_buffer->device->info.has_llc) - anv_state_clflush(state); + /* From the Haswell PRM, documentation for 3DSTATE_DEPTH_BUFFER: + * + * "Restriction: Prior to changing Depth/Stencil Buffer state (i.e., any + * combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, + * 3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first + * issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit + * set), followed by a pipelined depth cache flush (PIPE_CONTROL with + * Depth Flush Bit set, followed by another pipelined depth stall + * (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise + * guarantee that the pipeline from WM onwards is already flushed (e.g., + * via a preceding MI_FLUSH)." + */ + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) { + pipe.DepthStallEnable = true; + } + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) { + pipe.DepthCacheFlushEnable = true; + } + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) { + pipe.DepthStallEnable = true; + } +} - return state; +static uint32_t +depth_stencil_surface_type(enum isl_surf_dim dim) +{ + switch (dim) { + case ISL_SURF_DIM_1D: + if (GEN_GEN >= 9) { + /* From the Sky Lake PRM, 3DSTATAE_DEPTH_BUFFER::SurfaceType + * + * Programming Notes: + * The Surface Type of the depth buffer must be the same as the + * Surface Type of the render target(s) (defined in + * SURFACE_STATE), unless either the depth buffer or render + * targets are SURFTYPE_NULL (see exception below for SKL). 1D + * surface type not allowed for depth surface and stencil surface. + * + * Workaround: + * If depth/stencil is enabled with 1D render target, + * depth/stencil surface type needs to be set to 2D surface type + * and height set to 1. Depth will use (legacy) TileY and stencil + * will use TileW. For this case only, the Surface Type of the + * depth buffer can be 2D while the Surface Type of the render + * target(s) are 1D, representing an exception to a programming + * note above. + */ + return SURFTYPE_2D; + } else { + return SURFTYPE_1D; + } + case ISL_SURF_DIM_2D: + return SURFTYPE_2D; + case ISL_SURF_DIM_3D: + if (GEN_GEN >= 9) { + /* The Sky Lake docs list the value for 3D as "Reserved". However, + * they have the exact same layout as 2D arrays on gen9+, so we can + * just use 2D here. + */ + return SURFTYPE_2D; + } else { + return SURFTYPE_3D; + } + default: + unreachable("Invalid surface dimension"); + } } static void @@ -1000,19 +2087,28 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) anv_cmd_buffer_get_depth_stencil_view(cmd_buffer); const struct anv_image *image = iview ? iview->image : NULL; const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT); + const bool has_hiz = image != NULL && anv_image_has_hiz(image); const bool has_stencil = image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT); /* FIXME: Implement the PMA stall W/A */ /* FIXME: Width and Height are wrong */ + genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer); + /* Emit 3DSTATE_DEPTH_BUFFER */ if (has_depth) { anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) { - db.SurfaceType = SURFTYPE_2D; + db.SurfaceType = + depth_stencil_surface_type(image->depth_surface.isl.dim); db.DepthWriteEnable = true; db.StencilWriteEnable = has_stencil; - db.HierarchicalDepthBufferEnable = false; + + if (cmd_buffer->state.pass->subpass_count == 1) { + db.HierarchicalDepthBufferEnable = has_hiz; + } else { + anv_finishme("Multiple-subpass HiZ not implemented"); + } db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev, &image->depth_surface.isl); @@ -1021,18 +2117,18 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) .bo = image->bo, .offset = image->offset + image->depth_surface.offset, }; - db.DepthBufferObjectControlState = GENX(MOCS), + db.DepthBufferObjectControlState = GENX(MOCS); db.SurfacePitch = image->depth_surface.isl.row_pitch - 1; - db.Height = fb->height - 1; - db.Width = fb->width - 1; - db.LOD = 0; - db.Depth = 1 - 1; - db.MinimumArrayElement = 0; + db.Height = image->extent.height - 1; + db.Width = image->extent.width - 1; + db.LOD = iview->isl.base_level; + db.Depth = image->array_size - 1; /* FIXME: 3-D */ + db.MinimumArrayElement = iview->isl.base_array_layer; #if GEN_GEN >= 8 db.SurfaceQPitch = - isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2, + isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2; #endif db.RenderTargetViewExtent = 1 - 1; } @@ -1056,7 +2152,12 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) * be combined with a stencil buffer so we use D32_FLOAT instead. */ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) { - db.SurfaceType = SURFTYPE_2D; + if (has_stencil) { + db.SurfaceType = + depth_stencil_surface_type(image->stencil_surface.isl.dim); + } else { + db.SurfaceType = SURFTYPE_2D; + } db.SurfaceFormat = D32_FLOAT; db.Width = fb->width - 1; db.Height = fb->height - 1; @@ -1064,23 +2165,48 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) } } + if (has_hiz) { + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hdb) { + hdb.HierarchicalDepthBufferObjectControlState = GENX(MOCS); + hdb.SurfacePitch = image->aux_surface.isl.row_pitch - 1; + hdb.SurfaceBaseAddress = (struct anv_address) { + .bo = image->bo, + .offset = image->offset + image->aux_surface.offset, + }; +#if GEN_GEN >= 8 + /* From the SKL PRM Vol2a: + * + * The interpretation of this field is dependent on Surface Type + * as follows: + * - SURFTYPE_1D: distance in pixels between array slices + * - SURFTYPE_2D/CUBE: distance in rows between array slices + * - SURFTYPE_3D: distance in rows between R - slices + * + * Unfortunately, the docs aren't 100% accurate here. They fail to + * mention that the 1-D rule only applies to linear 1-D images. + * Since depth and HiZ buffers are always tiled, they are treated as + * 2-D images. Prior to Sky Lake, this field is always in rows. + */ + hdb.SurfaceQPitch = + isl_surf_get_array_pitch_el_rows(&image->aux_surface.isl) >> 2; +#endif + } + } else { + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hdb); + } + /* Emit 3DSTATE_STENCIL_BUFFER */ if (has_stencil) { anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) { #if GEN_GEN >= 8 || GEN_IS_HASWELL - sb.StencilBufferEnable = true, + sb.StencilBufferEnable = true; #endif - sb.StencilBufferObjectControlState = GENX(MOCS), + sb.StencilBufferObjectControlState = GENX(MOCS); - /* Stencil buffers have strange pitch. The PRM says: - * - * The pitch must be set to 2x the value computed based on width, - * as the stencil buffer is stored with two rows interleaved. - */ - sb.SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1, + sb.SurfacePitch = image->stencil_surface.isl.row_pitch - 1; #if GEN_GEN >= 8 - sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2, + sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2; #endif sb.SurfaceBaseAddress = (struct anv_address) { .bo = image->bo, @@ -1091,25 +2217,42 @@ cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb); } - /* Disable hierarchial depth buffers. */ - anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz); - - /* Clear the clear params. */ - anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp); + /* From the IVB PRM Vol2P1, 11.5.5.4 3DSTATE_CLEAR_PARAMS: + * + * 3DSTATE_CLEAR_PARAMS must always be programmed in the along with + * the other Depth/Stencil state commands(i.e. 3DSTATE_DEPTH_BUFFER, + * 3DSTATE_STENCIL_BUFFER, or 3DSTATE_HIER_DEPTH_BUFFER) + * + * Testing also shows that some variant of this restriction may exist HSW+. + * On BDW+, it is not possible to emit 2 of these packets consecutively when + * both have DepthClearValueValid set. An analysis of such state programming + * on SKL showed that the GPU doesn't register the latter packet's clear + * value. + */ + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp) { + if (has_hiz) { + cp.DepthClearValueValid = true; + const uint32_t ds = + cmd_buffer->state.subpass->depth_stencil_attachment; + cp.DepthClearValue = + cmd_buffer->state.attachments[ds].clear_value.depthStencil.depth; + } + } } -/** - * @see anv_cmd_buffer_set_subpass() - */ -void +static void genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer, struct anv_subpass *subpass) { cmd_buffer->state.subpass = subpass; - cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT; + cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS; cmd_buffer_emit_depth_stencil(cmd_buffer); + genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_HIZ_RESOLVE); + genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_DEPTH_CLEAR); + + anv_cmd_buffer_clear_subpass(cmd_buffer); } void genX(CmdBeginRenderPass)( @@ -1124,25 +2267,11 @@ void genX(CmdBeginRenderPass)( cmd_buffer->state.framebuffer = framebuffer; cmd_buffer->state.pass = pass; cmd_buffer->state.render_area = pRenderPassBegin->renderArea; - anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin); + genX(cmd_buffer_setup_attachments)(cmd_buffer, pass, pRenderPassBegin); genX(flush_pipeline_select_3d)(cmd_buffer); - const VkRect2D *render_area = &pRenderPassBegin->renderArea; - - anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE), r) { - r.ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0); - r.ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0); - r.ClippedDrawingRectangleYMax = - render_area->offset.y + render_area->extent.height - 1; - r.ClippedDrawingRectangleXMax = - render_area->offset.x + render_area->extent.width - 1; - r.DrawingRectangleOriginY = 0; - r.DrawingRectangleOriginX = 0; - } - genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses); - anv_cmd_buffer_clear_subpass(cmd_buffer); } void genX(CmdNextSubpass)( @@ -1155,7 +2284,6 @@ void genX(CmdNextSubpass)( anv_cmd_buffer_resolve_subpass(cmd_buffer); genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1); - anv_cmd_buffer_clear_subpass(cmd_buffer); } void genX(CmdEndRenderPass)( @@ -1163,26 +2291,34 @@ void genX(CmdEndRenderPass)( { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); + genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_DEPTH_RESOLVE); anv_cmd_buffer_resolve_subpass(cmd_buffer); + +#ifndef NDEBUG + anv_dump_add_framebuffer(cmd_buffer, cmd_buffer->state.framebuffer); +#endif } static void -emit_ps_depth_count(struct anv_batch *batch, +emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer, struct anv_bo *bo, uint32_t offset) { - anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) { + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { pc.DestinationAddressType = DAT_PPGTT; pc.PostSyncOperation = WritePSDepthCount; pc.DepthStallEnable = true; pc.Address = (struct anv_address) { bo, offset }; + + if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4) + pc.CommandStreamerStallEnable = true; } } static void -emit_query_availability(struct anv_batch *batch, +emit_query_availability(struct anv_cmd_buffer *cmd_buffer, struct anv_bo *bo, uint32_t offset) { - anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) { + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { pc.DestinationAddressType = DAT_PPGTT; pc.PostSyncOperation = WriteImmediateData; pc.Address = (struct anv_address) { bo, offset }; @@ -1215,7 +2351,7 @@ void genX(CmdBeginQuery)( switch (pool->type) { case VK_QUERY_TYPE_OCCLUSION: - emit_ps_depth_count(&cmd_buffer->batch, &pool->bo, + emit_ps_depth_count(cmd_buffer, &pool->bo, query * sizeof(struct anv_query_pool_slot)); break; @@ -1235,10 +2371,10 @@ void genX(CmdEndQuery)( switch (pool->type) { case VK_QUERY_TYPE_OCCLUSION: - emit_ps_depth_count(&cmd_buffer->batch, &pool->bo, + emit_ps_depth_count(cmd_buffer, &pool->bo, query * sizeof(struct anv_query_pool_slot) + 8); - emit_query_availability(&cmd_buffer->batch, &pool->bo, + emit_query_availability(cmd_buffer, &pool->bo, query * sizeof(struct anv_query_pool_slot) + 16); break; @@ -1277,14 +2413,17 @@ void genX(CmdWriteTimestamp)( default: /* Everything else is bottom-of-pipe */ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { - pc.DestinationAddressType = DAT_PPGTT, - pc.PostSyncOperation = WriteTimestamp, + pc.DestinationAddressType = DAT_PPGTT; + pc.PostSyncOperation = WriteTimestamp; pc.Address = (struct anv_address) { &pool->bo, offset }; + + if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4) + pc.CommandStreamerStallEnable = true; } break; } - emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16); + emit_query_availability(cmd_buffer, &pool->bo, query + 16); } #if GEN_GEN > 7 || GEN_IS_HASWELL @@ -1421,4 +2560,17 @@ void genX(CmdCopyQueryPoolResults)( } } +#else +void genX(CmdCopyQueryPoolResults)( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VkBuffer destBuffer, + VkDeviceSize destOffset, + VkDeviceSize destStride, + VkQueryResultFlags flags) +{ + anv_finishme("Queries not yet supported on Ivy Bridge"); +} #endif