X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fintel%2Fvulkan%2Fgen7_cmd_buffer.c;h=4977cc97aae5ed1a5690b907fee3ae106c3063ae;hb=86ded00c4021e015ac38a3274309414b2e0b6caa;hp=32bae027725bb51d09e8961733b14e976d359dac;hpb=aa41de080dad1996877fe7faaa8fed5e63d2f622;p=mesa.git diff --git a/src/intel/vulkan/gen7_cmd_buffer.c b/src/intel/vulkan/gen7_cmd_buffer.c index 32bae027725..4977cc97aae 100644 --- a/src/intel/vulkan/gen7_cmd_buffer.c +++ b/src/intel/vulkan/gen7_cmd_buffer.c @@ -28,102 +28,13 @@ #include #include "anv_private.h" +#include "vk_format_info.h" #include "genxml/gen_macros.h" #include "genxml/genX_pack.h" #if GEN_GEN == 7 && !GEN_IS_HASWELL -void -gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer, - uint32_t stages) -{ - static const uint32_t sampler_state_opcodes[] = { - [MESA_SHADER_VERTEX] = 43, - [MESA_SHADER_TESS_CTRL] = 44, /* HS */ - [MESA_SHADER_TESS_EVAL] = 45, /* DS */ - [MESA_SHADER_GEOMETRY] = 46, - [MESA_SHADER_FRAGMENT] = 47, - [MESA_SHADER_COMPUTE] = 0, - }; - - static const uint32_t binding_table_opcodes[] = { - [MESA_SHADER_VERTEX] = 38, - [MESA_SHADER_TESS_CTRL] = 39, - [MESA_SHADER_TESS_EVAL] = 40, - [MESA_SHADER_GEOMETRY] = 41, - [MESA_SHADER_FRAGMENT] = 42, - [MESA_SHADER_COMPUTE] = 0, - }; - - anv_foreach_stage(s, stages) { - if (cmd_buffer->state.samplers[s].alloc_size > 0) { - anv_batch_emit(&cmd_buffer->batch, - GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) { - ssp._3DCommandSubOpcode = sampler_state_opcodes[s]; - ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset; - } - } - - /* Always emit binding table pointers if we're asked to, since on SKL - * this is what flushes push constants. */ - anv_batch_emit(&cmd_buffer->batch, - GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) { - btp._3DCommandSubOpcode = binding_table_opcodes[s]; - btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset; - } - } -} - -uint32_t -gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer) -{ - VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty & - cmd_buffer->state.pipeline->active_stages; - - VkResult result = VK_SUCCESS; - anv_foreach_stage(s, dirty) { - result = anv_cmd_buffer_emit_samplers(cmd_buffer, s, - &cmd_buffer->state.samplers[s]); - if (result != VK_SUCCESS) - break; - result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s, - &cmd_buffer->state.binding_tables[s]); - if (result != VK_SUCCESS) - break; - } - - if (result != VK_SUCCESS) { - assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY); - - result = anv_cmd_buffer_new_binding_table_block(cmd_buffer); - assert(result == VK_SUCCESS); - - /* Re-emit state base addresses so we get the new surface state base - * address before we start emitting binding tables etc. - */ - anv_cmd_buffer_emit_state_base_address(cmd_buffer); - - /* Re-emit all active binding tables */ - dirty |= cmd_buffer->state.pipeline->active_stages; - anv_foreach_stage(s, dirty) { - result = anv_cmd_buffer_emit_samplers(cmd_buffer, s, - &cmd_buffer->state.samplers[s]); - if (result != VK_SUCCESS) - return result; - result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s, - &cmd_buffer->state.binding_tables[s]); - if (result != VK_SUCCESS) - return result; - } - } - - cmd_buffer->state.descriptors_dirty &= ~dirty; - - return dirty; -} -#endif /* GEN_GEN == 7 && !GEN_IS_HASWELL */ - -static inline int64_t +static int64_t clamp_int64(int64_t x, int64_t min, int64_t max) { if (x < min) @@ -134,12 +45,12 @@ clamp_int64(int64_t x, int64_t min, int64_t max) return max; } -#if GEN_GEN == 7 && !GEN_IS_HASWELL void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer) { - uint32_t count = cmd_buffer->state.dynamic.scissor.count; - const VkRect2D *scissors = cmd_buffer->state.dynamic.scissor.scissors; + struct anv_framebuffer *fb = cmd_buffer->state.framebuffer; + uint32_t count = cmd_buffer->state.gfx.dynamic.scissor.count; + const VkRect2D *scissors = cmd_buffer->state.gfx.dynamic.scissor.scissors; struct anv_state scissor_state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 8, 32); @@ -159,12 +70,36 @@ gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer) }; const int max = 0xffff; + + uint32_t y_min = s->offset.y; + uint32_t x_min = s->offset.x; + uint32_t y_max = s->offset.y + s->extent.height - 1; + uint32_t x_max = s->offset.x + s->extent.width - 1; + + /* Do this math using int64_t so overflow gets clamped correctly. */ + if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { + y_min = clamp_int64((uint64_t) y_min, + cmd_buffer->state.render_area.offset.y, max); + x_min = clamp_int64((uint64_t) x_min, + cmd_buffer->state.render_area.offset.x, max); + y_max = clamp_int64((uint64_t) y_max, 0, + cmd_buffer->state.render_area.offset.y + + cmd_buffer->state.render_area.extent.height - 1); + x_max = clamp_int64((uint64_t) x_max, 0, + cmd_buffer->state.render_area.offset.x + + cmd_buffer->state.render_area.extent.width - 1); + } else if (fb) { + y_min = clamp_int64((uint64_t) y_min, 0, max); + x_min = clamp_int64((uint64_t) x_min, 0, max); + y_max = clamp_int64((uint64_t) y_max, 0, fb->height - 1); + x_max = clamp_int64((uint64_t) x_max, 0, fb->width - 1); + } + struct GEN7_SCISSOR_RECT scissor = { - /* Do this math using int64_t so overflow gets clamped correctly. */ - .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max), - .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max), - .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max), - .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max) + .ScissorRectangleYMin = y_min, + .ScissorRectangleXMin = x_min, + .ScissorRectangleYMax = y_max, + .ScissorRectangleXMax = x_max }; if (s->extent.width <= 0 || s->extent.height <= 0) { @@ -179,21 +114,36 @@ gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer) GEN7_3DSTATE_SCISSOR_STATE_POINTERS, ssp) { ssp.ScissorRectPointer = scissor_state.offset; } - - if (!cmd_buffer->device->info.has_llc) - anv_state_clflush(scissor_state); } #endif -static const uint32_t vk_to_gen_index_type[] = { - [VK_INDEX_TYPE_UINT16] = INDEX_WORD, - [VK_INDEX_TYPE_UINT32] = INDEX_DWORD, -}; +static uint32_t vk_to_gen_index_type(VkIndexType type) +{ + switch (type) { + case VK_INDEX_TYPE_UINT8_EXT: + return INDEX_BYTE; + case VK_INDEX_TYPE_UINT16: + return INDEX_WORD; + case VK_INDEX_TYPE_UINT32: + return INDEX_DWORD; + default: + unreachable("invalid index type"); + } +} -static const uint32_t restart_index_for_type[] = { - [VK_INDEX_TYPE_UINT16] = UINT16_MAX, - [VK_INDEX_TYPE_UINT32] = UINT32_MAX, -}; +static uint32_t restart_index_for_type(VkIndexType type) +{ + switch (type) { + case VK_INDEX_TYPE_UINT8_EXT: + return UINT8_MAX; + case VK_INDEX_TYPE_UINT16: + return UINT16_MAX; + case VK_INDEX_TYPE_UINT32: + return UINT32_MAX; + default: + unreachable("invalid index type"); + } +} void genX(CmdBindIndexBuffer)( VkCommandBuffer commandBuffer, @@ -204,267 +154,103 @@ void genX(CmdBindIndexBuffer)( ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); - cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER; + cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER; if (GEN_IS_HASWELL) - cmd_buffer->state.restart_index = restart_index_for_type[indexType]; - cmd_buffer->state.gen7.index_buffer = buffer; - cmd_buffer->state.gen7.index_type = vk_to_gen_index_type[indexType]; - cmd_buffer->state.gen7.index_offset = offset; + cmd_buffer->state.restart_index = restart_index_for_type(indexType); + cmd_buffer->state.gfx.gen7.index_buffer = buffer; + cmd_buffer->state.gfx.gen7.index_type = vk_to_gen_index_type(indexType); + cmd_buffer->state.gfx.gen7.index_offset = offset; } -static VkResult -flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) +static uint32_t +get_depth_format(struct anv_cmd_buffer *cmd_buffer) { - struct anv_device *device = cmd_buffer->device; - struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; - struct anv_state surfaces = { 0, }, samplers = { 0, }; - VkResult result; - - result = anv_cmd_buffer_emit_samplers(cmd_buffer, - MESA_SHADER_COMPUTE, &samplers); - if (result != VK_SUCCESS) - return result; - result = anv_cmd_buffer_emit_binding_table(cmd_buffer, - MESA_SHADER_COMPUTE, &surfaces); - if (result != VK_SUCCESS) - return result; - - struct anv_state push_state = anv_cmd_buffer_cs_push_constants(cmd_buffer); - - const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline); - const struct brw_stage_prog_data *prog_data = &cs_prog_data->base; - - unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8; - unsigned push_constant_data_size = - (prog_data->nr_params + local_id_dwords) * 4; - unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32); - unsigned push_constant_regs = reg_aligned_constant_size / 32; - - if (push_state.alloc_size) { - anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) { - curbe.CURBETotalDataLength = push_state.alloc_size; - curbe.CURBEDataStartAddress = push_state.offset; - } - } + const struct anv_render_pass *pass = cmd_buffer->state.pass; + const struct anv_subpass *subpass = cmd_buffer->state.subpass; - assert(prog_data->total_shared <= 64 * 1024); - uint32_t slm_size = 0; - if (prog_data->total_shared > 0) { - /* slm_size is in 4k increments, but must be a power of 2. */ - slm_size = 4 * 1024; - while (slm_size < prog_data->total_shared) - slm_size <<= 1; - slm_size /= 4 * 1024; - } + if (!subpass->depth_stencil_attachment) + return D16_UNORM; - struct anv_state state = - anv_state_pool_emit(&device->dynamic_state_pool, - GENX(INTERFACE_DESCRIPTOR_DATA), 64, - .KernelStartPointer = pipeline->cs_simd, - .BindingTablePointer = surfaces.offset, - .SamplerStatePointer = samplers.offset, - .ConstantURBEntryReadLength = - push_constant_regs, -#if !GEN_IS_HASWELL - .ConstantURBEntryReadOffset = 0, -#endif - .BarrierEnable = cs_prog_data->uses_barrier, - .SharedLocalMemorySize = slm_size, - .NumberofThreadsinGPGPUThreadGroup = - pipeline->cs_thread_width_max); + struct anv_render_pass_attachment *att = + &pass->attachments[subpass->depth_stencil_attachment->attachment]; - const uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t); - anv_batch_emit(&cmd_buffer->batch, - GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), idl) { - idl.InterfaceDescriptorTotalLength = size; - idl.InterfaceDescriptorDataStartAddress = state.offset; - } + switch (att->format) { + case VK_FORMAT_D16_UNORM: + case VK_FORMAT_D16_UNORM_S8_UINT: + return D16_UNORM; - return VK_SUCCESS; -} + case VK_FORMAT_X8_D24_UNORM_PACK32: + case VK_FORMAT_D24_UNORM_S8_UINT: + return D24_UNORM_X8_UINT; -void -genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, bool enable_slm) -{ - /* References for GL state: - * - * - commits e307cfa..228d5a3 - * - src/mesa/drivers/dri/i965/gen7_l3_state.c - */ - - uint32_t l3cr2_slm, l3cr2_noslm; - anv_pack_struct(&l3cr2_noslm, GENX(L3CNTLREG2), - .URBAllocation = 24, - .ROAllocation = 0, - .DCAllocation = 16); - anv_pack_struct(&l3cr2_slm, GENX(L3CNTLREG2), - .SLMEnable = 1, - .URBAllocation = 16, - .URBLowBandwidth = 1, - .ROAllocation = 0, - .DCAllocation = 8); - const uint32_t l3cr2_val = enable_slm ? l3cr2_slm : l3cr2_noslm; - bool changed = cmd_buffer->state.current_l3_config != l3cr2_val; - - if (changed) { - /* According to the hardware docs, the L3 partitioning can only be - * changed while the pipeline is completely drained and the caches are - * flushed, which involves a first PIPE_CONTROL flush which stalls the - * pipeline... - */ - anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { - pc.DCFlushEnable = true; - pc.CommandStreamerStallEnable = true; - pc.PostSyncOperation = NoWrite; - } - - /* ...followed by a second pipelined PIPE_CONTROL that initiates - * invalidation of the relevant caches. Note that because RO - * invalidation happens at the top of the pipeline (i.e. right away as - * the PIPE_CONTROL command is processed by the CS) we cannot combine it - * with the previous stalling flush as the hardware documentation - * suggests, because that would cause the CS to stall on previous - * rendering *after* RO invalidation and wouldn't prevent the RO caches - * from being polluted by concurrent rendering before the stall - * completes. This intentionally doesn't implement the SKL+ hardware - * workaround suggesting to enable CS stall on PIPE_CONTROLs with the - * texture cache invalidation bit set for GPGPU workloads because the - * previous and subsequent PIPE_CONTROLs already guarantee that there is - * no concurrent GPGPU kernel execution (see SKL HSD 2132585). - */ - anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { - pc.TextureCacheInvalidationEnable = true; - pc.ConstantCacheInvalidationEnable = true; - pc.InstructionCacheInvalidateEnable = true; - pc.StateCacheInvalidationEnable = true; - pc.PostSyncOperation = NoWrite; - } - - /* Now send a third stalling flush to make sure that invalidation is - * complete when the L3 configuration registers are modified. - */ - anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { - pc.DCFlushEnable = true; - pc.CommandStreamerStallEnable = true; - pc.PostSyncOperation = NoWrite; - } - - anv_finishme("write GEN7_L3SQCREG1"); - anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) { - lri.RegisterOffset = GENX(L3CNTLREG2_num); - lri.DataDWord = l3cr2_val; - } - - uint32_t l3cr3_slm, l3cr3_noslm; - anv_pack_struct(&l3cr3_noslm, GENX(L3CNTLREG3), - .ISAllocation = 8, - .CAllocation = 4, - .TAllocation = 8); - anv_pack_struct(&l3cr3_slm, GENX(L3CNTLREG3), - .ISAllocation = 8, - .CAllocation = 8, - .TAllocation = 8); - const uint32_t l3cr3_val = enable_slm ? l3cr3_slm : l3cr3_noslm; - anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) { - lri.RegisterOffset = GENX(L3CNTLREG3_num); - lri.DataDWord = l3cr3_val; - } + case VK_FORMAT_D32_SFLOAT: + case VK_FORMAT_D32_SFLOAT_S8_UINT: + return D32_FLOAT; - cmd_buffer->state.current_l3_config = l3cr2_val; + default: + return D16_UNORM; } } -void -genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer) -{ - struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; - const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline); - MAYBE_UNUSED VkResult result; - - assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT); - - bool needs_slm = cs_prog_data->base.total_shared > 0; - genX(cmd_buffer_config_l3)(cmd_buffer, needs_slm); - - genX(flush_pipeline_select_gpgpu)(cmd_buffer); - - if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE) - anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch); - - if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) || - (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) { - /* FIXME: figure out descriptors for gen7 */ - result = flush_compute_descriptor_set(cmd_buffer); - assert(result == VK_SUCCESS); - cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT; - } - - cmd_buffer->state.compute_dirty = 0; -} - void genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) { - struct anv_pipeline *pipeline = cmd_buffer->state.pipeline; - - if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE | - ANV_CMD_DIRTY_RENDER_TARGETS | - ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH | - ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) { - - const struct anv_image_view *iview = - anv_cmd_buffer_get_depth_stencil_view(cmd_buffer); - const struct anv_image *image = iview ? iview->image : NULL; - const bool has_depth = - image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT); - const uint32_t depth_format = has_depth ? - isl_surf_get_depth_format(&cmd_buffer->device->isl_dev, - &image->depth_surface.isl) : D16_UNORM; + struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline; + struct anv_dynamic_state *d = &cmd_buffer->state.gfx.dynamic; + if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE | + ANV_CMD_DIRTY_RENDER_TARGETS | + ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH | + ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) { uint32_t sf_dw[GENX(3DSTATE_SF_length)]; struct GENX(3DSTATE_SF) sf = { GENX(3DSTATE_SF_header), - .DepthBufferSurfaceFormat = depth_format, - .LineWidth = cmd_buffer->state.dynamic.line_width, - .GlobalDepthOffsetConstant = cmd_buffer->state.dynamic.depth_bias.bias, - .GlobalDepthOffsetScale = cmd_buffer->state.dynamic.depth_bias.slope, - .GlobalDepthOffsetClamp = cmd_buffer->state.dynamic.depth_bias.clamp + .DepthBufferSurfaceFormat = get_depth_format(cmd_buffer), + .LineWidth = d->line_width, + .GlobalDepthOffsetConstant = d->depth_bias.bias, + .GlobalDepthOffsetScale = d->depth_bias.slope, + .GlobalDepthOffsetClamp = d->depth_bias.clamp }; GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf); anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gen7.sf); } - if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS | - ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) { - struct anv_dynamic_state *d = &cmd_buffer->state.dynamic; + if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS | + ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) { struct anv_state cc_state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, GENX(COLOR_CALC_STATE_length) * 4, 64); struct GENX(COLOR_CALC_STATE) cc = { - .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0], - .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1], - .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2], - .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3], + .BlendConstantColorRed = d->blend_constants[0], + .BlendConstantColorGreen = d->blend_constants[1], + .BlendConstantColorBlue = d->blend_constants[2], + .BlendConstantColorAlpha = d->blend_constants[3], .StencilReferenceValue = d->stencil_reference.front & 0xff, - .BackFaceStencilReferenceValue = d->stencil_reference.back & 0xff, + .BackfaceStencilReferenceValue = d->stencil_reference.back & 0xff, }; GENX(COLOR_CALC_STATE_pack)(NULL, cc_state.map, &cc); - if (!cmd_buffer->device->info.has_llc) - anv_state_clflush(cc_state); anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), ccp) { ccp.ColorCalcStatePointer = cc_state.offset; } } - if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE | - ANV_CMD_DIRTY_RENDER_TARGETS | - ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK | - ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) { + if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE) { + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_LINE_STIPPLE), ls) { + ls.LineStipplePattern = d->line_stipple.pattern; + ls.LineStippleInverseRepeatCount = + 1.0f / MAX2(1, d->line_stipple.factor); + ls.LineStippleRepeatCount = d->line_stipple.factor; + } + } + + if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE | + ANV_CMD_DIRTY_RENDER_TARGETS | + ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK | + ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) { uint32_t depth_stencil_dw[GENX(DEPTH_STENCIL_STATE_length)]; - struct anv_dynamic_state *d = &cmd_buffer->state.dynamic; struct GENX(DEPTH_STENCIL_STATE) depth_stencil = { .StencilTestMask = d->stencil_compare_mask.front & 0xff, @@ -472,6 +258,10 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) .BackfaceStencilTestMask = d->stencil_compare_mask.back & 0xff, .BackfaceStencilWriteMask = d->stencil_write_mask.back & 0xff, + + .StencilBufferWriteEnable = + (d->stencil_write_mask.front || d->stencil_write_mask.back) && + pipeline->writes_stencil, }; GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil); @@ -486,11 +276,11 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) } } - if (cmd_buffer->state.gen7.index_buffer && - cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE | - ANV_CMD_DIRTY_INDEX_BUFFER)) { - struct anv_buffer *buffer = cmd_buffer->state.gen7.index_buffer; - uint32_t offset = cmd_buffer->state.gen7.index_offset; + if (cmd_buffer->state.gfx.gen7.index_buffer && + cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_PIPELINE | + ANV_CMD_DIRTY_INDEX_BUFFER)) { + struct anv_buffer *buffer = cmd_buffer->state.gfx.gen7.index_buffer; + uint32_t offset = cmd_buffer->state.gfx.gen7.index_offset; #if GEN_IS_HASWELL anv_batch_emit(&cmd_buffer->batch, GEN75_3DSTATE_VF, vf) { @@ -503,53 +293,23 @@ genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer) #if !GEN_IS_HASWELL ib.CutIndexEnable = pipeline->primitive_restart; #endif - ib.IndexFormat = cmd_buffer->state.gen7.index_type; - ib.MemoryObjectControlState = GENX(MOCS); - - ib.BufferStartingAddress = - (struct anv_address) { buffer->bo, buffer->offset + offset }; - ib.BufferEndingAddress = - (struct anv_address) { buffer->bo, buffer->offset + buffer->size }; + ib.IndexFormat = cmd_buffer->state.gfx.gen7.index_type; + ib.MOCS = anv_mocs_for_bo(cmd_buffer->device, + buffer->address.bo); + + ib.BufferStartingAddress = anv_address_add(buffer->address, + offset); + ib.BufferEndingAddress = anv_address_add(buffer->address, + buffer->size); } } - cmd_buffer->state.dirty = 0; -} - -void genX(CmdSetEvent)( - VkCommandBuffer commandBuffer, - VkEvent event, - VkPipelineStageFlags stageMask) -{ - stub(); + cmd_buffer->state.gfx.dirty = 0; } -void genX(CmdResetEvent)( - VkCommandBuffer commandBuffer, - VkEvent event, - VkPipelineStageFlags stageMask) -{ - stub(); -} - -void genX(CmdWaitEvents)( - VkCommandBuffer commandBuffer, - uint32_t eventCount, - const VkEvent* pEvents, - VkPipelineStageFlags srcStageMask, - VkPipelineStageFlags destStageMask, - uint32_t memoryBarrierCount, - const VkMemoryBarrier* pMemoryBarriers, - uint32_t bufferMemoryBarrierCount, - const VkBufferMemoryBarrier* pBufferMemoryBarriers, - uint32_t imageMemoryBarrierCount, - const VkImageMemoryBarrier* pImageMemoryBarriers) +void +genX(cmd_buffer_enable_pma_fix)(struct anv_cmd_buffer *cmd_buffer, + bool enable) { - stub(); - - genX(CmdPipelineBarrier)(commandBuffer, srcStageMask, destStageMask, - false, /* byRegion */ - memoryBarrierCount, pMemoryBarriers, - bufferMemoryBarrierCount, pBufferMemoryBarriers, - imageMemoryBarrierCount, pImageMemoryBarriers); + /* The NP PMA fix doesn't exist on gen7 */ }