From: Kristian Høgsberg Kristensen Date: Thu, 26 Nov 2015 06:27:01 +0000 (-0800) Subject: vk: Add SKL support X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=cd4721c062a300739b107925f801c9b9ced5f9fa;p=mesa.git vk: Add SKL support Signed-off-by: Kristian Høgsberg Kristensen --- diff --git a/src/vulkan/Makefile.am b/src/vulkan/Makefile.am index 64b5e90ae33..3f6d4a3e2e9 100644 --- a/src/vulkan/Makefile.am +++ b/src/vulkan/Makefile.am @@ -37,7 +37,8 @@ check_LTLIBRARIES = libvulkan-test.la PER_GEN_LIBS = \ libanv-gen7.la \ libanv-gen75.la \ - libanv-gen8.la + libanv-gen8.la \ + libanv-gen9.la noinst_LTLIBRARIES = $(PER_GEN_LIBS) @@ -115,6 +116,13 @@ libanv_gen8_la_SOURCES = \ gen8_state.c libanv_gen8_la_CFLAGS = $(libvulkan_la_CFLAGS) -DANV_GENx10=80 +libanv_gen9_la_SOURCES = \ + genX_cmd_buffer.c \ + gen8_cmd_buffer.c \ + gen8_pipeline.c \ + gen8_state.c +libanv_gen9_la_CFLAGS = $(libvulkan_la_CFLAGS) -DANV_GENx10=90 + if HAVE_EGL_PLATFORM_WAYLAND BUILT_SOURCES += \ wayland-drm-protocol.c \ diff --git a/src/vulkan/anv_cmd_buffer.c b/src/vulkan/anv_cmd_buffer.c index 6dedc3f335f..66b2f65e9f7 100644 --- a/src/vulkan/anv_cmd_buffer.c +++ b/src/vulkan/anv_cmd_buffer.c @@ -250,6 +250,8 @@ anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer) return gen7_cmd_buffer_emit_state_base_address(cmd_buffer); case 8: return gen8_cmd_buffer_emit_state_base_address(cmd_buffer); + case 9: + return gen9_cmd_buffer_emit_state_base_address(cmd_buffer); default: unreachable("unsupported gen\n"); } @@ -788,6 +790,9 @@ anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer, case 8: gen8_cmd_buffer_begin_subpass(cmd_buffer, subpass); break; + case 9: + gen9_cmd_buffer_begin_subpass(cmd_buffer, subpass); + break; default: unreachable("unsupported gen\n"); } diff --git a/src/vulkan/anv_device.c b/src/vulkan/anv_device.c index e2b00c97c25..94bc4f11f27 100644 --- a/src/vulkan/anv_device.c +++ b/src/vulkan/anv_device.c @@ -87,8 +87,10 @@ anv_physical_device_init(struct anv_physical_device *device, fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n"); } else if (device->info->gen == 7 && !device->info->is_baytrail) { fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n"); + } else if (device->info->gen == 9) { + fprintf(stderr, "WARNING: Skylake Vulkan support is incomplete\n"); } else if (device->info->gen == 8 && !device->info->is_cherryview) { - /* Briadwell is as fully supported as anything */ + /* Broadwell is as fully supported as anything */ } else { result = vk_errorf(VK_UNSUPPORTED, "Vulkan not yet supported on %s", device->name); @@ -1448,6 +1450,9 @@ anv_fill_buffer_surface_state(struct anv_device *device, void *state, case 8: gen8_fill_buffer_surface_state(state, format, offset, range, stride); break; + case 9: + gen9_fill_buffer_surface_state(state, format, offset, range, stride); + break; default: unreachable("unsupported gen\n"); } diff --git a/src/vulkan/anv_image.c b/src/vulkan/anv_image.c index 60d23a17f5f..d344fcc2bea 100644 --- a/src/vulkan/anv_image.c +++ b/src/vulkan/anv_image.c @@ -573,6 +573,9 @@ anv_image_view_init(struct anv_image_view *iview, case 8: gen8_image_view_init(iview, device, pCreateInfo, cmd_buffer); break; + case 9: + gen9_image_view_init(iview, device, pCreateInfo, cmd_buffer); + break; default: unreachable("unsupported gen\n"); } diff --git a/src/vulkan/anv_pipeline.c b/src/vulkan/anv_pipeline.c index 3d9e0705626..cf4bf9f87a0 100644 --- a/src/vulkan/anv_pipeline.c +++ b/src/vulkan/anv_pipeline.c @@ -1082,6 +1082,8 @@ anv_graphics_pipeline_create( return gen7_graphics_pipeline_create(_device, pCreateInfo, extra, pPipeline); case 8: return gen8_graphics_pipeline_create(_device, pCreateInfo, extra, pPipeline); + case 9: + return gen9_graphics_pipeline_create(_device, pCreateInfo, extra, pPipeline); default: unreachable("unsupported gen\n"); } @@ -1127,6 +1129,8 @@ static VkResult anv_compute_pipeline_create( return gen7_compute_pipeline_create(_device, pCreateInfo, pPipeline); case 8: return gen8_compute_pipeline_create(_device, pCreateInfo, pPipeline); + case 9: + return gen9_compute_pipeline_create(_device, pCreateInfo, pPipeline); default: unreachable("unsupported gen\n"); } diff --git a/src/vulkan/anv_private.h b/src/vulkan/anv_private.h index a3e63e4c0c0..36cee88602d 100644 --- a/src/vulkan/anv_private.h +++ b/src/vulkan/anv_private.h @@ -701,6 +701,20 @@ __gen_combine_address(struct anv_batch *batch, void *location, .AgeforQUADLRU = 0 \ } +/* Skylake: MOCS is now an index into an array of 62 different caching + * configurations programmed by the kernel. + */ + +#define GEN9_MOCS { \ + /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \ + .IndextoMOCSTables = 2 \ + } + +#define GEN9_MOCS_PTE { \ + /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \ + .IndextoMOCSTables = 1 \ + } + struct anv_device_memory { struct anv_bo bo; VkDeviceSize map_size; @@ -1079,6 +1093,7 @@ void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer); void gen7_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer); void gen75_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer); void gen8_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer); +void gen9_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer); void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer); @@ -1087,6 +1102,8 @@ void gen7_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer, void gen8_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer, struct anv_subpass *subpass); +void gen9_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer, + struct anv_subpass *subpass); void anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer, struct anv_subpass *subpass); @@ -1184,7 +1201,7 @@ struct anv_pipeline { struct { uint32_t sf[4]; uint32_t raster[5]; - uint32_t wm_depth_stencil[3]; + uint32_t wm_depth_stencil[4]; } gen8; }; @@ -1230,6 +1247,11 @@ gen8_graphics_pipeline_create(VkDevice _device, const struct anv_graphics_pipeline_create_info *extra, VkPipeline *pPipeline); VkResult +gen9_graphics_pipeline_create(VkDevice _device, + const VkGraphicsPipelineCreateInfo *pCreateInfo, + const struct anv_graphics_pipeline_create_info *extra, + VkPipeline *pPipeline); +VkResult gen7_compute_pipeline_create(VkDevice _device, const VkComputePipelineCreateInfo *pCreateInfo, VkPipeline *pPipeline); @@ -1242,6 +1264,10 @@ VkResult gen8_compute_pipeline_create(VkDevice _device, const VkComputePipelineCreateInfo *pCreateInfo, VkPipeline *pPipeline); +VkResult +gen9_compute_pipeline_create(VkDevice _device, + const VkComputePipelineCreateInfo *pCreateInfo, + VkPipeline *pPipeline); struct anv_format { const VkFormat vk_format; @@ -1405,6 +1431,12 @@ gen8_image_view_init(struct anv_image_view *iview, const VkImageViewCreateInfo* pCreateInfo, struct anv_cmd_buffer *cmd_buffer); +void +gen9_image_view_init(struct anv_image_view *iview, + struct anv_device *device, + const VkImageViewCreateInfo* pCreateInfo, + struct anv_cmd_buffer *cmd_buffer); + void anv_fill_buffer_surface_state(struct anv_device *device, void *state, const struct anv_format *format, uint32_t offset, uint32_t range, @@ -1419,6 +1451,9 @@ void gen75_fill_buffer_surface_state(void *state, const struct anv_format *forma void gen8_fill_buffer_surface_state(void *state, const struct anv_format *format, uint32_t offset, uint32_t range, uint32_t stride); +void gen9_fill_buffer_surface_state(void *state, const struct anv_format *format, + uint32_t offset, uint32_t range, + uint32_t stride); struct anv_sampler { uint32_t state[4]; diff --git a/src/vulkan/gen8_cmd_buffer.c b/src/vulkan/gen8_cmd_buffer.c index a02d7bb2321..09315319001 100644 --- a/src/vulkan/gen8_cmd_buffer.c +++ b/src/vulkan/gen8_cmd_buffer.c @@ -30,9 +30,10 @@ #include "anv_private.h" #include "gen8_pack.h" +#include "gen9_pack.h" static void -gen8_cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer) +cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer) { static const uint32_t push_constant_opcodes[] = { [VK_SHADER_STAGE_VERTEX] = 21, @@ -52,7 +53,7 @@ gen8_cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer) if (state.offset == 0) continue; - anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_CONSTANT_VS, + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), ._3DCommandSubOpcode = push_constant_opcodes[stage], .ConstantBody = { .PointerToConstantBuffer0 = { .offset = state.offset }, @@ -65,6 +66,7 @@ gen8_cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer) cmd_buffer->state.push_constants_dirty &= ~flushed; } +#if ANV_GEN == 8 static void emit_viewport_state(struct anv_cmd_buffer *cmd_buffer, uint32_t count, const VkViewport *viewports) @@ -79,7 +81,7 @@ emit_viewport_state(struct anv_cmd_buffer *cmd_buffer, /* The gen7 state struct has just the matrix and guardband fields, the * gen8 struct adds the min/max viewport fields. */ - struct GEN8_SF_CLIP_VIEWPORT sf_clip_viewport = { + struct GENX(SF_CLIP_VIEWPORT) sf_clip_viewport = { .ViewportMatrixElementm00 = vp->width / 2, .ViewportMatrixElementm11 = vp->height / 2, .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2, @@ -96,21 +98,21 @@ emit_viewport_state(struct anv_cmd_buffer *cmd_buffer, .YMaxViewPort = vp->originY + vp->height - 1, }; - struct GEN8_CC_VIEWPORT cc_viewport = { + struct GENX(CC_VIEWPORT) cc_viewport = { .MinimumDepth = vp->minDepth, .MaximumDepth = vp->maxDepth }; - GEN8_SF_CLIP_VIEWPORT_pack(NULL, sf_clip_state.map + i * 64, + GENX(SF_CLIP_VIEWPORT_pack)(NULL, sf_clip_state.map + i * 64, &sf_clip_viewport); - GEN8_CC_VIEWPORT_pack(NULL, cc_state.map + i * 32, &cc_viewport); + GENX(CC_VIEWPORT_pack)(NULL, cc_state.map + i * 32, &cc_viewport); } anv_batch_emit(&cmd_buffer->batch, - GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_CC, + GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), .CCViewportPointer = cc_state.offset); anv_batch_emit(&cmd_buffer->batch, - GEN8_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP, + GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), .SFClipViewportPointer = sf_clip_state.offset); } @@ -133,9 +135,10 @@ gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer) }); } } +#endif static void -gen8_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer) +cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer) { struct anv_pipeline *pipeline = cmd_buffer->state.pipeline; uint32_t *p; @@ -145,7 +148,10 @@ gen8_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer) assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0); if (cmd_buffer->state.current_pipeline != _3D) { - anv_batch_emit(&cmd_buffer->batch, GEN8_PIPELINE_SELECT, + anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), +#if ANV_GEN >= 9 + .MaskBits = 3, +#endif .PipelineSelection = _3D); cmd_buffer->state.current_pipeline = _3D; } @@ -155,22 +161,22 @@ gen8_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer) const uint32_t num_dwords = 1 + num_buffers * 4; p = anv_batch_emitn(&cmd_buffer->batch, num_dwords, - GEN8_3DSTATE_VERTEX_BUFFERS); + GENX(3DSTATE_VERTEX_BUFFERS)); uint32_t vb, i = 0; for_each_bit(vb, vb_emit) { struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer; uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset; - struct GEN8_VERTEX_BUFFER_STATE state = { + struct GENX(VERTEX_BUFFER_STATE) state = { .VertexBufferIndex = vb, - .MemoryObjectControlState = GEN8_MOCS, + .MemoryObjectControlState = GENX(MOCS), .AddressModifyEnable = true, .BufferPitch = pipeline->binding_stride[vb], .BufferStartingAddress = { buffer->bo, buffer->offset + offset }, .BufferSize = buffer->size - offset }; - GEN8_VERTEX_BUFFER_STATE_pack(&cmd_buffer->batch, &p[1 + i * 4], &state); + GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state); i++; } } @@ -186,11 +192,23 @@ gen8_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer) anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch); } - if (cmd_buffer->state.descriptors_dirty) - gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer); +#if ANV_GEN >= 9 + /* On SKL+ the new constants don't take effect until the next corresponding + * 3DSTATE_BINDING_TABLE_POINTER_* command is parsed so we need to ensure + * that is sent. As it is, we re-emit binding tables but we could hold on + * to the offset of the most recent binding table and only re-emit the + * 3DSTATE_BINDING_TABLE_POINTER_* command. + */ + cmd_buffer->state.descriptors_dirty |= + cmd_buffer->state.push_constants_dirty & + cmd_buffer->state.pipeline->active_stages; +#endif if (cmd_buffer->state.push_constants_dirty) - gen8_cmd_buffer_flush_push_constants(cmd_buffer); + cmd_buffer_flush_push_constants(cmd_buffer); + + if (cmd_buffer->state.descriptors_dirty) + gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer); if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) gen8_cmd_buffer_emit_viewport(cmd_buffer); @@ -200,12 +218,13 @@ gen8_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer) if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE | ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)) { - uint32_t sf_dw[GEN8_3DSTATE_SF_length]; - struct GEN8_3DSTATE_SF sf = { - GEN8_3DSTATE_SF_header, + uint32_t sf_dw[GENX(3DSTATE_SF_length)]; + struct GENX(3DSTATE_SF) sf = { + GENX(3DSTATE_SF_header), .LineWidth = cmd_buffer->state.dynamic.line_width, }; - GEN8_3DSTATE_SF_pack(NULL, sf_dw, &sf); + GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf); + /* FIXME: gen9.fs */ anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gen8.sf); } @@ -214,9 +233,9 @@ gen8_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer) bool enable_bias = cmd_buffer->state.dynamic.depth_bias.bias != 0.0f || cmd_buffer->state.dynamic.depth_bias.slope_scaled != 0.0f; - uint32_t raster_dw[GEN8_3DSTATE_RASTER_length]; - struct GEN8_3DSTATE_RASTER raster = { - GEN8_3DSTATE_RASTER_header, + uint32_t raster_dw[GENX(3DSTATE_RASTER_length)]; + struct GENX(3DSTATE_RASTER) raster = { + GENX(3DSTATE_RASTER_header), .GlobalDepthOffsetEnableSolid = enable_bias, .GlobalDepthOffsetEnableWireframe = enable_bias, .GlobalDepthOffsetEnablePoint = enable_bias, @@ -224,11 +243,17 @@ gen8_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer) .GlobalDepthOffsetScale = cmd_buffer->state.dynamic.depth_bias.slope_scaled, .GlobalDepthOffsetClamp = cmd_buffer->state.dynamic.depth_bias.clamp }; - GEN8_3DSTATE_RASTER_pack(NULL, raster_dw, &raster); + GENX(3DSTATE_RASTER_pack)(NULL, raster_dw, &raster); anv_batch_emit_merge(&cmd_buffer->batch, raster_dw, pipeline->gen8.raster); } + /* Stencil reference values were moves from COLOR_CALC_STATE in gen8 to + * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split + * across different state packets for gen8 and gen9. We handle that by + * using a big old #if switch here. + */ +#if ANV_GEN == 8 if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS | ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) { struct anv_state cc_state = @@ -280,10 +305,55 @@ gen8_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer) anv_batch_emit_merge(&cmd_buffer->batch, wm_depth_stencil_dw, pipeline->gen8.wm_depth_stencil); } +#else + if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) { + struct anv_state cc_state = + anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, + GEN9_COLOR_CALC_STATE_length, 64); + struct GEN9_COLOR_CALC_STATE cc = { + .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0], + .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1], + .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2], + .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3], + }; + GEN9_COLOR_CALC_STATE_pack(NULL, cc_state.map, &cc); + + anv_batch_emit(&cmd_buffer->batch, + GEN9_3DSTATE_CC_STATE_POINTERS, + .ColorCalcStatePointer = cc_state.offset, + .ColorCalcStatePointerValid = true); + } + + if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE | + ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK | + ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK | + ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) { + uint32_t dwords[GEN9_3DSTATE_WM_DEPTH_STENCIL_length]; + struct anv_dynamic_state *d = &cmd_buffer->state.dynamic; + struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = { + GEN9_3DSTATE_WM_DEPTH_STENCIL_header, + + .StencilBufferWriteEnable = d->stencil_write_mask.front != 0, + + .StencilTestMask = d->stencil_compare_mask.front & 0xff, + .StencilWriteMask = d->stencil_write_mask.front & 0xff, + + .BackfaceStencilTestMask = d->stencil_compare_mask.back & 0xff, + .BackfaceStencilWriteMask = d->stencil_write_mask.back & 0xff, + + .StencilReferenceValue = d->stencil_reference.front, + .BackfaceStencilReferenceValue = d->stencil_reference.back + }; + GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, dwords, &wm_depth_stencil); + + anv_batch_emit_merge(&cmd_buffer->batch, dwords, + pipeline->gen8.wm_depth_stencil); + } +#endif if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE | ANV_CMD_DIRTY_INDEX_BUFFER)) { - anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_VF, + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF), .IndexedDrawCutIndexEnable = pipeline->primitive_restart, .CutIndex = cmd_buffer->state.restart_index, ); @@ -293,7 +363,7 @@ gen8_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer) cmd_buffer->state.dirty = 0; } -void gen8_CmdDraw( +void genX(CmdDraw)( VkCmdBuffer cmdBuffer, uint32_t vertexCount, uint32_t instanceCount, @@ -302,9 +372,9 @@ void gen8_CmdDraw( { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer); - gen8_cmd_buffer_flush_state(cmd_buffer); + cmd_buffer_flush_state(cmd_buffer); - anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE, + anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), .VertexAccessType = SEQUENTIAL, .VertexCountPerInstance = vertexCount, .StartVertexLocation = firstVertex, @@ -313,7 +383,7 @@ void gen8_CmdDraw( .BaseVertexLocation = 0); } -void gen8_CmdDrawIndexed( +void genX(CmdDrawIndexed)( VkCmdBuffer cmdBuffer, uint32_t indexCount, uint32_t instanceCount, @@ -323,9 +393,9 @@ void gen8_CmdDrawIndexed( { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer); - gen8_cmd_buffer_flush_state(cmd_buffer); + cmd_buffer_flush_state(cmd_buffer); - anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE, + anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), .VertexAccessType = RANDOM, .VertexCountPerInstance = indexCount, .StartVertexLocation = firstIndex, @@ -338,7 +408,7 @@ static void emit_lrm(struct anv_batch *batch, uint32_t reg, struct anv_bo *bo, uint32_t offset) { - anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM, + anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), .RegisterAddress = reg, .MemoryAddress = { bo, offset }); } @@ -346,7 +416,7 @@ emit_lrm(struct anv_batch *batch, static void emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm) { - anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_IMM, + anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), .RegisterOffset = reg, .DataDWord = imm); } @@ -359,7 +429,7 @@ emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm) #define GEN7_3DPRIM_START_INSTANCE 0x243C #define GEN7_3DPRIM_BASE_VERTEX 0x2440 -void gen8_CmdDrawIndirect( +void genX(CmdDrawIndirect)( VkCmdBuffer cmdBuffer, VkBuffer _buffer, VkDeviceSize offset, @@ -371,7 +441,7 @@ void gen8_CmdDrawIndirect( struct anv_bo *bo = buffer->bo; uint32_t bo_offset = buffer->offset + offset; - gen8_cmd_buffer_flush_state(cmd_buffer); + cmd_buffer_flush_state(cmd_buffer); emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset); emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4); @@ -379,12 +449,12 @@ void gen8_CmdDrawIndirect( emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12); emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0); - anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE, + anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), .IndirectParameterEnable = true, .VertexAccessType = SEQUENTIAL); } -void gen8_CmdBindIndexBuffer( +void genX(CmdBindIndexBuffer)( VkCmdBuffer cmdBuffer, VkBuffer _buffer, VkDeviceSize offset, @@ -405,9 +475,9 @@ void gen8_CmdBindIndexBuffer( cmd_buffer->state.restart_index = restart_index_for_type[indexType]; - anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_INDEX_BUFFER, + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), .IndexFormat = vk_to_gen_index_type[indexType], - .MemoryObjectControlState = GEN8_MOCS, + .MemoryObjectControlState = GENX(MOCS), .BufferStartingAddress = { buffer->bo, buffer->offset + offset }, .BufferSize = buffer->size - offset); @@ -415,7 +485,7 @@ void gen8_CmdBindIndexBuffer( } static VkResult -gen8_flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) +flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) { struct anv_device *device = cmd_buffer->device; struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; @@ -431,7 +501,7 @@ gen8_flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) if (result != VK_SUCCESS) return result; - struct GEN8_INTERFACE_DESCRIPTOR_DATA desc = { + struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = { .KernelStartPointer = pipeline->cs_simd, .KernelStartPointerHigh = 0, .BindingTablePointer = surfaces.offset, @@ -441,13 +511,13 @@ gen8_flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) .NumberofThreadsinGPGPUThreadGroup = 0 /* FIXME: Really? */ }; - uint32_t size = GEN8_INTERFACE_DESCRIPTOR_DATA_length * sizeof(uint32_t); + uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t); struct anv_state state = anv_state_pool_alloc(&device->dynamic_state_pool, size, 64); - GEN8_INTERFACE_DESCRIPTOR_DATA_pack(NULL, state.map, &desc); + GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL, state.map, &desc); - anv_batch_emit(&cmd_buffer->batch, GEN8_MEDIA_INTERFACE_DESCRIPTOR_LOAD, + anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), .InterfaceDescriptorTotalLength = size, .InterfaceDescriptorDataStartAddress = state.offset); @@ -455,7 +525,7 @@ gen8_flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) } static void -gen8_cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer) +cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer) { struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; VkResult result; @@ -463,7 +533,10 @@ gen8_cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer) assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT); if (cmd_buffer->state.current_pipeline != GPGPU) { - anv_batch_emit(&cmd_buffer->batch, GEN8_PIPELINE_SELECT, + anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), +#if ANV_GEN >= 9 + .MaskBits = 3, +#endif .PipelineSelection = GPGPU); cmd_buffer->state.current_pipeline = GPGPU; } @@ -473,7 +546,7 @@ gen8_cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer) if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) || (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) { - result = gen8_flush_compute_descriptor_set(cmd_buffer); + result = flush_compute_descriptor_set(cmd_buffer); assert(result == VK_SUCCESS); cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE; } @@ -481,7 +554,7 @@ gen8_cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer) cmd_buffer->state.compute_dirty = 0; } -void gen8_CmdDrawIndexedIndirect( +void genX(CmdDrawIndexedIndirect)( VkCmdBuffer cmdBuffer, VkBuffer _buffer, VkDeviceSize offset, @@ -493,7 +566,7 @@ void gen8_CmdDrawIndexedIndirect( struct anv_bo *bo = buffer->bo; uint32_t bo_offset = buffer->offset + offset; - gen8_cmd_buffer_flush_state(cmd_buffer); + cmd_buffer_flush_state(cmd_buffer); emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset); emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4); @@ -501,12 +574,12 @@ void gen8_CmdDrawIndexedIndirect( emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12); emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16); - anv_batch_emit(&cmd_buffer->batch, GEN8_3DPRIMITIVE, + anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), .IndirectParameterEnable = true, .VertexAccessType = RANDOM); } -void gen8_CmdDispatch( +void genX(CmdDispatch)( VkCmdBuffer cmdBuffer, uint32_t x, uint32_t y, @@ -516,9 +589,9 @@ void gen8_CmdDispatch( struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data; - gen8_cmd_buffer_flush_compute_state(cmd_buffer); + cmd_buffer_flush_compute_state(cmd_buffer); - anv_batch_emit(&cmd_buffer->batch, GEN8_GPGPU_WALKER, + anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), .SIMDSize = prog_data->simd_size / 16, .ThreadDepthCounterMaximum = 0, .ThreadHeightCounterMaximum = 0, @@ -529,14 +602,14 @@ void gen8_CmdDispatch( .RightExecutionMask = pipeline->cs_right_mask, .BottomExecutionMask = 0xffffffff); - anv_batch_emit(&cmd_buffer->batch, GEN8_MEDIA_STATE_FLUSH); + anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH)); } #define GPGPU_DISPATCHDIMX 0x2500 #define GPGPU_DISPATCHDIMY 0x2504 #define GPGPU_DISPATCHDIMZ 0x2508 -void gen8_CmdDispatchIndirect( +void genX(CmdDispatchIndirect)( VkCmdBuffer cmdBuffer, VkBuffer _buffer, VkDeviceSize offset) @@ -548,13 +621,13 @@ void gen8_CmdDispatchIndirect( struct anv_bo *bo = buffer->bo; uint32_t bo_offset = buffer->offset + offset; - gen8_cmd_buffer_flush_compute_state(cmd_buffer); + cmd_buffer_flush_compute_state(cmd_buffer); emit_lrm(&cmd_buffer->batch, GPGPU_DISPATCHDIMX, bo, bo_offset); emit_lrm(&cmd_buffer->batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4); emit_lrm(&cmd_buffer->batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8); - anv_batch_emit(&cmd_buffer->batch, GEN8_GPGPU_WALKER, + anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), .IndirectParameterEnable = true, .SIMDSize = prog_data->simd_size / 16, .ThreadDepthCounterMaximum = 0, @@ -563,11 +636,11 @@ void gen8_CmdDispatchIndirect( .RightExecutionMask = pipeline->cs_right_mask, .BottomExecutionMask = 0xffffffff); - anv_batch_emit(&cmd_buffer->batch, GEN8_MEDIA_STATE_FLUSH); + anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH)); } static void -gen8_cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) +cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) { const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer; const struct anv_image_view *iview = @@ -581,7 +654,7 @@ gen8_cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) /* Emit 3DSTATE_DEPTH_BUFFER */ if (has_depth) { - anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DEPTH_BUFFER, + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), .SurfaceType = SURFTYPE_2D, .DepthWriteEnable = iview->format->depth_format, .StencilWriteEnable = has_stencil, @@ -597,7 +670,7 @@ gen8_cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) .LOD = 0, .Depth = 1 - 1, .MinimumArrayElement = 0, - .DepthBufferObjectControlState = GEN8_MOCS, + .DepthBufferObjectControlState = GENX(MOCS), .RenderTargetViewExtent = 1 - 1, .SurfaceQPitch = image->depth_surface.qpitch >> 2); } else { @@ -618,7 +691,7 @@ gen8_cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) * actual framebuffer's width and height, even when neither depth buffer * nor stencil buffer is present. */ - anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DEPTH_BUFFER, + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), .SurfaceType = SURFTYPE_2D, .SurfaceFormat = D16_UNORM, .Width = fb->width - 1, @@ -628,9 +701,9 @@ gen8_cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) /* Emit 3DSTATE_STENCIL_BUFFER */ if (has_stencil) { - anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_STENCIL_BUFFER, + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), .StencilBufferEnable = true, - .StencilBufferObjectControlState = GEN8_MOCS, + .StencilBufferObjectControlState = GENX(MOCS), /* Stencil buffers have strange pitch. The PRM says: * @@ -645,28 +718,28 @@ gen8_cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) }, .SurfaceQPitch = image->stencil_surface.stride >> 2); } else { - anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_STENCIL_BUFFER); + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER)); } /* Disable hierarchial depth buffers. */ - anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_HIER_DEPTH_BUFFER); + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER)); /* Clear the clear params. */ - anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_CLEAR_PARAMS); + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS)); } void -gen8_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer, - struct anv_subpass *subpass) +genX(cmd_buffer_begin_subpass)(struct anv_cmd_buffer *cmd_buffer, + struct anv_subpass *subpass) { cmd_buffer->state.subpass = subpass; cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT; - gen8_cmd_buffer_emit_depth_stencil(cmd_buffer); + cmd_buffer_emit_depth_stencil(cmd_buffer); } -void gen8_CmdBeginRenderPass( +void genX(CmdBeginRenderPass)( VkCmdBuffer cmdBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkRenderPassContents contents) @@ -680,7 +753,7 @@ void gen8_CmdBeginRenderPass( const VkRect2D *render_area = &pRenderPassBegin->renderArea; - anv_batch_emit(&cmd_buffer->batch, GEN8_3DSTATE_DRAWING_RECTANGLE, + anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE), .ClippedDrawingRectangleYMin = render_area->offset.y, .ClippedDrawingRectangleXMin = render_area->offset.x, .ClippedDrawingRectangleYMax = @@ -693,10 +766,10 @@ void gen8_CmdBeginRenderPass( anv_cmd_buffer_clear_attachments(cmd_buffer, pass, pRenderPassBegin->pClearValues); - gen8_cmd_buffer_begin_subpass(cmd_buffer, pass->subpasses); + genX(cmd_buffer_begin_subpass)(cmd_buffer, pass->subpasses); } -void gen8_CmdNextSubpass( +void genX(CmdNextSubpass)( VkCmdBuffer cmdBuffer, VkRenderPassContents contents) { @@ -704,10 +777,10 @@ void gen8_CmdNextSubpass( assert(cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY); - gen8_cmd_buffer_begin_subpass(cmd_buffer, cmd_buffer->state.subpass + 1); + genX(cmd_buffer_begin_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1); } -void gen8_CmdEndRenderPass( +void genX(CmdEndRenderPass)( VkCmdBuffer cmdBuffer) { ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer); @@ -717,7 +790,7 @@ void gen8_CmdEndRenderPass( * Eventually, we should do flushing based on image format transitions * or something of that nature. */ - anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL, + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), .PostSyncOperation = NoWrite, .RenderTargetCacheFlushEnable = true, .InstructionCacheInvalidateEnable = true, @@ -731,13 +804,13 @@ static void emit_ps_depth_count(struct anv_batch *batch, struct anv_bo *bo, uint32_t offset) { - anv_batch_emit(batch, GEN8_PIPE_CONTROL, + anv_batch_emit(batch, GENX(PIPE_CONTROL), .DestinationAddressType = DAT_PPGTT, .PostSyncOperation = WritePSDepthCount, .Address = { bo, offset }); /* FIXME: This is only lower 32 bits */ } -void gen8_CmdBeginQuery( +void genX(CmdBeginQuery)( VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, @@ -758,7 +831,7 @@ void gen8_CmdBeginQuery( } } -void gen8_CmdEndQuery( +void genX(CmdEndQuery)( VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot) @@ -780,7 +853,7 @@ void gen8_CmdEndQuery( #define TIMESTAMP 0x2358 -void gen8_CmdWriteTimestamp( +void genX(CmdWriteTimestamp)( VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, @@ -792,16 +865,16 @@ void gen8_CmdWriteTimestamp( switch (timestampType) { case VK_TIMESTAMP_TYPE_TOP: - anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM, + anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), .RegisterAddress = TIMESTAMP, .MemoryAddress = { bo, buffer->offset + destOffset }); - anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM, + anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), .RegisterAddress = TIMESTAMP + 4, .MemoryAddress = { bo, buffer->offset + destOffset + 4 }); break; case VK_TIMESTAMP_TYPE_BOTTOM: - anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL, + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), .DestinationAddressType = DAT_PPGTT, .PostSyncOperation = WriteTimestamp, .Address = /* FIXME: This is only lower 32 bits */ @@ -849,15 +922,15 @@ static void emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg, struct anv_bo *bo, uint32_t offset) { - anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM, + anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), .RegisterAddress = reg, .MemoryAddress = { bo, offset }); - anv_batch_emit(batch, GEN8_MI_LOAD_REGISTER_MEM, + anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), .RegisterAddress = reg + 4, .MemoryAddress = { bo, offset + 4 }); } -void gen8_CmdCopyQueryPoolResults( +void genX(CmdCopyQueryPoolResults)( VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t startQuery, @@ -882,7 +955,7 @@ void gen8_CmdCopyQueryPoolResults( /* FIXME: If we're not waiting, should we just do this on the CPU? */ if (flags & VK_QUERY_RESULT_WAIT_BIT) - anv_batch_emit(&cmd_buffer->batch, GEN8_PIPE_CONTROL, + anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), .CommandStreamerStallEnable = true, .StallAtPixelScoreboard = true); @@ -896,19 +969,19 @@ void gen8_CmdCopyQueryPoolResults( /* FIXME: We need to clamp the result for 32 bit. */ - uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GEN8_MI_MATH); + uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH)); dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1); dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0); dw[3] = alu(OPCODE_SUB, 0, 0); dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU); - anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM, + anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), .RegisterAddress = CS_GPR(2), /* FIXME: This is only lower 32 bits */ .MemoryAddress = { buffer->bo, dst_offset }); if (flags & VK_QUERY_RESULT_64_BIT) - anv_batch_emit(&cmd_buffer->batch, GEN8_MI_STORE_REGISTER_MEM, + anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), .RegisterAddress = CS_GPR(2) + 4, /* FIXME: This is only lower 32 bits */ .MemoryAddress = { buffer->bo, dst_offset + 4 }); diff --git a/src/vulkan/gen8_pipeline.c b/src/vulkan/gen8_pipeline.c index 6e2e65d6344..0038bca01b4 100644 --- a/src/vulkan/gen8_pipeline.c +++ b/src/vulkan/gen8_pipeline.c @@ -30,6 +30,7 @@ #include "anv_private.h" #include "gen8_pack.h" +#include "gen9_pack.h" static void emit_vertex_input(struct anv_pipeline *pipeline, @@ -38,9 +39,11 @@ emit_vertex_input(struct anv_pipeline *pipeline, const uint32_t num_dwords = 1 + info->attributeCount * 2; uint32_t *p; + static_assert(ANV_GEN >= 8, "should be compiling this for gen < 8"); + if (info->attributeCount > 0) { p = anv_batch_emitn(&pipeline->batch, num_dwords, - GEN8_3DSTATE_VERTEX_ELEMENTS); + GENX(3DSTATE_VERTEX_ELEMENTS)); } for (uint32_t i = 0; i < info->attributeCount; i++) { @@ -48,7 +51,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, &info->pVertexAttributeDescriptions[i]; const struct anv_format *format = anv_format_for_vk_format(desc->format); - struct GEN8_VERTEX_ELEMENT_STATE element = { + struct GENX(VERTEX_ELEMENT_STATE) element = { .VertexBufferIndex = desc->binding, .Valid = true, .SourceElementFormat = format->surface_format, @@ -59,9 +62,9 @@ emit_vertex_input(struct anv_pipeline *pipeline, .Component2Control = format->num_channels >= 3 ? VFCOMP_STORE_SRC : VFCOMP_STORE_0, .Component3Control = format->num_channels >= 4 ? VFCOMP_STORE_SRC : VFCOMP_STORE_1_FP }; - GEN8_VERTEX_ELEMENT_STATE_pack(NULL, &p[1 + i * 2], &element); + GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + i * 2], &element); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_INSTANCING, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), .InstancingEnable = pipeline->instancing_enable[desc->binding], .VertexElementIndex = i, /* Vulkan so far doesn't have an instance divisor, so @@ -69,7 +72,7 @@ emit_vertex_input(struct anv_pipeline *pipeline, .InstanceDataStepRate = 1); } - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_SGVS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS), .VertexIDEnable = pipeline->vs_prog_data.uses_vertexid, .VertexIDComponentNumber = 2, .VertexIDElementOffset = info->bindingCount, @@ -83,7 +86,7 @@ emit_ia_state(struct anv_pipeline *pipeline, const VkPipelineInputAssemblyStateCreateInfo *info, const struct anv_graphics_pipeline_create_info *extra) { - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_TOPOLOGY, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), .PrimitiveTopologyType = pipeline->topology); } @@ -110,8 +113,8 @@ emit_rs_state(struct anv_pipeline *pipeline, [VK_FRONT_FACE_CW] = Clockwise }; - struct GEN8_3DSTATE_SF sf = { - GEN8_3DSTATE_SF_header, + struct GENX(3DSTATE_SF) sf = { + GENX(3DSTATE_SF_header), .ViewportTransformEnable = !(extra && extra->disable_viewport), .TriangleStripListProvokingVertexSelect = 0, .LineStripListProvokingVertexSelect = 0, @@ -122,19 +125,25 @@ emit_rs_state(struct anv_pipeline *pipeline, /* FINISHME: VkBool32 rasterizerDiscardEnable; */ - GEN8_3DSTATE_SF_pack(NULL, pipeline->gen8.sf, &sf); + GENX(3DSTATE_SF_pack)(NULL, pipeline->gen8.sf, &sf); - struct GEN8_3DSTATE_RASTER raster = { - GEN8_3DSTATE_RASTER_header, + struct GENX(3DSTATE_RASTER) raster = { + GENX(3DSTATE_RASTER_header), .FrontWinding = vk_to_gen_front_face[info->frontFace], .CullMode = vk_to_gen_cullmode[info->cullMode], .FrontFaceFillMode = vk_to_gen_fillmode[info->fillMode], .BackFaceFillMode = vk_to_gen_fillmode[info->fillMode], .ScissorRectangleEnable = !(extra && extra->disable_scissor), +#if ANV_GEN == 8 .ViewportZClipTestEnable = info->depthClipEnable +#else + /* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */ + .ViewportZFarClipTestEnable = info->depthClipEnable, + .ViewportZNearClipTestEnable = info->depthClipEnable, +#endif }; - GEN8_3DSTATE_RASTER_pack(NULL, pipeline->gen8.raster, &raster); + GENX(3DSTATE_RASTER_pack)(NULL, pipeline->gen8.raster, &raster); } static void @@ -192,11 +201,11 @@ emit_cb_state(struct anv_pipeline *pipeline, [VK_BLEND_OP_MAX] = BLENDFUNCTION_MAX, }; - uint32_t num_dwords = GEN8_BLEND_STATE_length; + uint32_t num_dwords = GENX(BLEND_STATE_length); pipeline->blend_state = anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64); - struct GEN8_BLEND_STATE blend_state = { + struct GENX(BLEND_STATE) blend_state = { .AlphaToCoverageEnable = info->alphaToCoverageEnable, .AlphaToOneEnable = info->alphaToOneEnable, }; @@ -210,7 +219,7 @@ emit_cb_state(struct anv_pipeline *pipeline, blend_state.IndependentAlphaBlendEnable = true; } - blend_state.Entry[i] = (struct GEN8_BLEND_STATE_ENTRY) { + blend_state.Entry[i] = (struct GENX(BLEND_STATE_ENTRY)) { .LogicOpEnable = info->logicOpEnable, .LogicOpFunction = vk_to_gen_logic_op[info->logicOp], .ColorBufferBlendEnable = a->blendEnable, @@ -248,9 +257,9 @@ emit_cb_state(struct anv_pipeline *pipeline, } } - GEN8_BLEND_STATE_pack(NULL, pipeline->blend_state.map, &blend_state); + GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_BLEND_STATE_POINTERS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), .BlendStatePointer = pipeline->blend_state.offset, .BlendStatePointerValid = true); } @@ -285,6 +294,7 @@ emit_ds_state(struct anv_pipeline *pipeline, /* We're going to OR this together with the dynamic state. We need * to make sure it's initialized to something useful. */ + /* FIXME: gen9 wm_depth_stencil */ memset(pipeline->gen8.wm_depth_stencil, 0, sizeof(pipeline->gen8.wm_depth_stencil)); return; @@ -292,7 +302,7 @@ emit_ds_state(struct anv_pipeline *pipeline, /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */ - struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = { + struct GENX(3DSTATE_WM_DEPTH_STENCIL) wm_depth_stencil = { .DepthTestEnable = info->depthTestEnable, .DepthBufferWriteEnable = info->depthWriteEnable, .DepthTestFunction = vk_to_gen_compare_op[info->depthCompareOp], @@ -309,11 +319,11 @@ emit_ds_state(struct anv_pipeline *pipeline, .BackfaceStencilTestFunction = vk_to_gen_compare_op[info->back.stencilCompareOp], }; - GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, pipeline->gen8.wm_depth_stencil, &wm_depth_stencil); + GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, pipeline->gen8.wm_depth_stencil, &wm_depth_stencil); } VkResult -gen8_graphics_pipeline_create( +genX(graphics_pipeline_create)( VkDevice _device, const VkGraphicsPipelineCreateInfo* pCreateInfo, const struct anv_graphics_pipeline_create_info *extra, @@ -353,34 +363,34 @@ gen8_graphics_pipeline_create( emit_ds_state(pipeline, pCreateInfo->pDepthStencilState); emit_cb_state(pipeline, pCreateInfo->pColorBlendState); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VF_STATISTICS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_STATISTICS), .StatisticsEnable = true); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_HS, .Enable = false); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_TE, .TEEnable = false); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_DS, .FunctionEnable = false); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_STREAMOUT, .SOFunctionEnable = false); + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), .Enable = false); + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), .TEEnable = false); + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), .FunctionEnable = false); + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_STREAMOUT), .SOFunctionEnable = false); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), .ConstantBufferOffset = 0, .ConstantBufferSize = 4); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_GS), .ConstantBufferOffset = 4, .ConstantBufferSize = 4); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), .ConstantBufferOffset = 8, .ConstantBufferSize = 4); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM_CHROMAKEY, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM_CHROMAKEY), .ChromaKeyKillEnable = false); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_AA_LINE_PARAMETERS); + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_AA_LINE_PARAMETERS)); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_CLIP, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), .ClipEnable = true, .ViewportXYClipTestEnable = !(extra && extra->disable_viewport), .MinimumPointWidth = 0.125, .MaximumPointWidth = 255.875); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_WM, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), .StatisticsEnable = true, .LineEndCapAntialiasingRegionWidth = _05pixels, .LineAntialiasingRegionWidth = _10pixels, @@ -394,30 +404,30 @@ gen8_graphics_pipeline_create( uint32_t log2_samples = __builtin_ffs(samples) - 1; bool enable_sampling = samples > 1 ? true : false; - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_MULTISAMPLE, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), .PixelPositionOffsetEnable = enable_sampling, .PixelLocation = CENTER, .NumberofMultisamples = log2_samples); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SAMPLE_MASK, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), .SampleMask = 0xffff); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_VS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_VS), .VSURBStartingAddress = pipeline->urb.vs_start, .VSURBEntryAllocationSize = pipeline->urb.vs_size - 1, .VSNumberofURBEntries = pipeline->urb.nr_vs_entries); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_GS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_GS), .GSURBStartingAddress = pipeline->urb.gs_start, .GSURBEntryAllocationSize = pipeline->urb.gs_size - 1, .GSNumberofURBEntries = pipeline->urb.nr_gs_entries); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_HS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_HS), .HSURBStartingAddress = pipeline->urb.vs_start, .HSURBEntryAllocationSize = 0, .HSNumberofURBEntries = 0); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_URB_DS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_URB_DS), .DSURBStartingAddress = pipeline->urb.vs_start, .DSURBEntryAllocationSize = 0, .DSNumberofURBEntries = 0); @@ -427,9 +437,9 @@ gen8_graphics_pipeline_create( length = (gs_prog_data->base.vue_map.num_slots + 1) / 2 - offset; if (pipeline->gs_vec4 == NO_KERNEL) - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS, .Enable = false); + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), .Enable = false); else - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_GS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), .SingleProgramFlow = false, .KernelStartPointer = pipeline->gs_vec4, .VectorMaskEnable = Dmask, @@ -475,14 +485,14 @@ gen8_graphics_pipeline_create( length = (vue_prog_data->vue_map.num_slots + 1) / 2 - offset; if (pipeline->vs_simd8 == NO_KERNEL || (extra && extra->disable_vs)) - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), .FunctionEnable = false, /* Even if VS is disabled, SBE still gets the amount of * vertex data to read from this field. */ .VertexURBEntryOutputReadOffset = offset, .VertexURBEntryOutputLength = length); else - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_VS, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), .KernelStartPointer = pipeline->vs_simd8, .SingleVertexDispatch = Multiple, .VectorMaskEnable = Dmask, @@ -525,8 +535,8 @@ gen8_graphics_pipeline_create( else fs_input_map = &gs_prog_data->base.vue_map; - struct GEN8_3DSTATE_SBE_SWIZ swiz = { - GEN8_3DSTATE_SBE_SWIZ_header, + struct GENX(3DSTATE_SBE_SWIZ) swiz = { + GENX(3DSTATE_SBE_SWIZ_header), }; int max_source_attr = 0; @@ -548,20 +558,59 @@ gen8_graphics_pipeline_create( swiz.Attribute[input_index].SourceAttribute = source_attr; } - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_SBE, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), .AttributeSwizzleEnable = true, .ForceVertexURBEntryReadLength = false, .ForceVertexURBEntryReadOffset = false, .VertexURBEntryReadLength = DIV_ROUND_UP(max_source_attr + 1, 2), .PointSpriteTextureCoordinateOrigin = UPPERLEFT, .NumberofSFOutputAttributes = - wm_prog_data->num_varying_inputs); + wm_prog_data->num_varying_inputs, + +#if ANV_GEN >= 9 + .Attribute0ActiveComponentFormat = ACF_XYZW, + .Attribute1ActiveComponentFormat = ACF_XYZW, + .Attribute2ActiveComponentFormat = ACF_XYZW, + .Attribute3ActiveComponentFormat = ACF_XYZW, + .Attribute4ActiveComponentFormat = ACF_XYZW, + .Attribute5ActiveComponentFormat = ACF_XYZW, + .Attribute6ActiveComponentFormat = ACF_XYZW, + .Attribute7ActiveComponentFormat = ACF_XYZW, + .Attribute8ActiveComponentFormat = ACF_XYZW, + .Attribute9ActiveComponentFormat = ACF_XYZW, + .Attribute10ActiveComponentFormat = ACF_XYZW, + .Attribute11ActiveComponentFormat = ACF_XYZW, + .Attribute12ActiveComponentFormat = ACF_XYZW, + .Attribute13ActiveComponentFormat = ACF_XYZW, + .Attribute14ActiveComponentFormat = ACF_XYZW, + .Attribute15ActiveComponentFormat = ACF_XYZW, + /* wow, much field, very attribute */ + .Attribute16ActiveComponentFormat = ACF_XYZW, + .Attribute17ActiveComponentFormat = ACF_XYZW, + .Attribute18ActiveComponentFormat = ACF_XYZW, + .Attribute19ActiveComponentFormat = ACF_XYZW, + .Attribute20ActiveComponentFormat = ACF_XYZW, + .Attribute21ActiveComponentFormat = ACF_XYZW, + .Attribute22ActiveComponentFormat = ACF_XYZW, + .Attribute23ActiveComponentFormat = ACF_XYZW, + .Attribute24ActiveComponentFormat = ACF_XYZW, + .Attribute25ActiveComponentFormat = ACF_XYZW, + .Attribute26ActiveComponentFormat = ACF_XYZW, + .Attribute27ActiveComponentFormat = ACF_XYZW, + .Attribute28ActiveComponentFormat = ACF_XYZW, + .Attribute29ActiveComponentFormat = ACF_XYZW, + .Attribute28ActiveComponentFormat = ACF_XYZW, + .Attribute29ActiveComponentFormat = ACF_XYZW, + .Attribute30ActiveComponentFormat = ACF_XYZW, +#endif + ); uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch, - GEN8_3DSTATE_SBE_SWIZ_length); - GEN8_3DSTATE_SBE_SWIZ_pack(&pipeline->batch, dw, &swiz); + GENX(3DSTATE_SBE_SWIZ_length)); + GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz); - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS, + const int num_thread_bias = ANV_GEN == 8 ? 2 : 1; + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), .KernelStartPointer0 = pipeline->ps_ksp0, .SingleProgramFlow = false, @@ -571,7 +620,7 @@ gen8_graphics_pipeline_create( .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_FRAGMENT], .PerThreadScratchSpace = ffs(wm_prog_data->base.total_scratch / 2048), - .MaximumNumberofThreadsPerPSD = 64 - 2, + .MaximumNumberofThreadsPerPSD = 64 - num_thread_bias, .PositionXYOffsetSelect = wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE: POSOFFSET_NONE, .PushConstantEnable = wm_prog_data->base.nr_params > 0, @@ -587,20 +636,25 @@ gen8_graphics_pipeline_create( .KernelStartPointer2 = pipeline->ps_ksp2); bool per_sample_ps = false; - anv_batch_emit(&pipeline->batch, GEN8_3DSTATE_PS_EXTRA, + anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), .PixelShaderValid = true, .PixelShaderKillsPixel = wm_prog_data->uses_kill, .PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode, .AttributeEnable = wm_prog_data->num_varying_inputs > 0, .oMaskPresenttoRenderTarget = wm_prog_data->uses_omask, - .PixelShaderIsPerSample = per_sample_ps); + .PixelShaderIsPerSample = per_sample_ps, +#if ANV_GEN >= 9 + .PixelShaderPullsBary = wm_prog_data->pulls_bary, + .InputCoverageMaskState = ICMS_NONE +#endif + ); *pPipeline = anv_pipeline_to_handle(pipeline); return VK_SUCCESS; } -VkResult gen8_compute_pipeline_create( +VkResult genX(compute_pipeline_create)( VkDevice _device, const VkComputePipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline) @@ -654,7 +708,7 @@ VkResult gen8_compute_pipeline_create( const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data; - anv_batch_emit(&pipeline->batch, GEN8_MEDIA_VFE_STATE, + anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), .ScratchSpaceBasePointer = pipeline->scratch_start[VK_SHADER_STAGE_COMPUTE], .PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch / 2048), .ScratchSpaceBasePointerHigh = 0, @@ -663,7 +717,9 @@ VkResult gen8_compute_pipeline_create( .MaximumNumberofThreads = device->info.max_cs_threads - 1, .NumberofURBEntries = 2, .ResetGatewayTimer = true, +#if ANV_GEN == 8 .BypassGatewayControl = true, +#endif .URBEntryAllocationSize = 2, .CURBEAllocationSize = 0); diff --git a/src/vulkan/gen8_state.c b/src/vulkan/gen8_state.c index 4e441797523..c7dd6b3f836 100644 --- a/src/vulkan/gen8_state.c +++ b/src/vulkan/gen8_state.c @@ -30,14 +30,15 @@ #include "anv_private.h" #include "gen8_pack.h" +#include "gen9_pack.h" void -gen8_fill_buffer_surface_state(void *state, const struct anv_format *format, - uint32_t offset, uint32_t range, uint32_t stride) +genX(fill_buffer_surface_state)(void *state, const struct anv_format *format, + uint32_t offset, uint32_t range, uint32_t stride) { uint32_t num_elements = range / stride; - struct GEN8_RENDER_SURFACE_STATE surface_state = { + struct GENX(RENDER_SURFACE_STATE) surface_state = { .SurfaceType = SURFTYPE_BUFFER, .SurfaceArray = false, .SurfaceFormat = format->surface_format, @@ -46,7 +47,7 @@ gen8_fill_buffer_surface_state(void *state, const struct anv_format *format, .TileMode = LINEAR, .SamplerL2BypassModeDisable = true, .RenderCacheReadWriteMode = WriteOnlyCache, - .MemoryObjectControlState = GEN8_MOCS, + .MemoryObjectControlState = GENX(MOCS), .Height = ((num_elements - 1) >> 7) & 0x3fff, .Width = (num_elements - 1) & 0x7f, .Depth = ((num_elements - 1) >> 21) & 0x3f, @@ -60,7 +61,7 @@ gen8_fill_buffer_surface_state(void *state, const struct anv_format *format, .SurfaceBaseAddress = { NULL, offset }, }; - GEN8_RENDER_SURFACE_STATE_pack(NULL, state, &surface_state); + GENX(RENDER_SURFACE_STATE_pack)(NULL, state, &surface_state); } static const uint8_t anv_halign[] = { @@ -76,8 +77,8 @@ static const uint8_t anv_valign[] = { }; static struct anv_state -gen8_alloc_surface_state(struct anv_device *device, - struct anv_cmd_buffer *cmd_buffer) +alloc_surface_state(struct anv_device *device, + struct anv_cmd_buffer *cmd_buffer) { if (cmd_buffer) { return anv_cmd_buffer_alloc_surface_state(cmd_buffer); @@ -87,10 +88,10 @@ gen8_alloc_surface_state(struct anv_device *device, } void -gen8_image_view_init(struct anv_image_view *iview, - struct anv_device *device, - const VkImageViewCreateInfo* pCreateInfo, - struct anv_cmd_buffer *cmd_buffer) +genX(image_view_init)(struct anv_image_view *iview, + struct anv_device *device, + const VkImageViewCreateInfo* pCreateInfo, + struct anv_cmd_buffer *cmd_buffer) { ANV_FROM_HANDLE(anv_image, image, pCreateInfo->image); @@ -173,7 +174,7 @@ gen8_image_view_init(struct anv_image_view *iview, [ISL_TILING_W] = WMAJOR, }; - struct GEN8_RENDER_SURFACE_STATE surface_state = { + struct GENX(RENDER_SURFACE_STATE) surface_state = { .SurfaceType = image->surface_type, .SurfaceArray = image->array_size > 1, .SurfaceFormat = format_info->surface_format, @@ -184,7 +185,7 @@ gen8_image_view_init(struct anv_image_view *iview, .VerticalLineStrideOffset = 0, .SamplerL2BypassModeDisable = true, .RenderCacheReadWriteMode = WriteOnlyCache, - .MemoryObjectControlState = GEN8_MOCS, + .MemoryObjectControlState = GENX(MOCS), /* The driver sets BaseMipLevel in SAMPLER_STATE, not here in * RENDER_SURFACE_STATE. The Broadwell PRM says "it is illegal to have @@ -221,7 +222,7 @@ gen8_image_view_init(struct anv_image_view *iview, if (image->needs_nonrt_surface_state) { iview->nonrt_surface_state = - gen8_alloc_surface_state(device, cmd_buffer); + alloc_surface_state(device, cmd_buffer); /* For non render target surfaces, the hardware interprets field * MIPCount/LOD as MIPCount. The range of levels accessible by the @@ -230,13 +231,13 @@ gen8_image_view_init(struct anv_image_view *iview, surface_state.SurfaceMinLOD = range->baseMipLevel; surface_state.MIPCountLOD = range->mipLevels - 1; - GEN8_RENDER_SURFACE_STATE_pack(NULL, iview->nonrt_surface_state.map, - &surface_state); + GENX(RENDER_SURFACE_STATE_pack)(NULL, iview->nonrt_surface_state.map, + &surface_state); } if (image->needs_color_rt_surface_state) { iview->color_rt_surface_state = - gen8_alloc_surface_state(device, cmd_buffer); + alloc_surface_state(device, cmd_buffer); /* For render target surfaces, the hardware interprets field * MIPCount/LOD as LOD. The Broadwell PRM says: @@ -247,12 +248,12 @@ gen8_image_view_init(struct anv_image_view *iview, surface_state.MIPCountLOD = range->baseMipLevel; surface_state.SurfaceMinLOD = 0; - GEN8_RENDER_SURFACE_STATE_pack(NULL, iview->color_rt_surface_state.map, - &surface_state); + GENX(RENDER_SURFACE_STATE_pack)(NULL, iview->color_rt_surface_state.map, + &surface_state); } } -VkResult gen8_CreateSampler( +VkResult genX(CreateSampler)( VkDevice _device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler) @@ -308,11 +309,13 @@ VkResult gen8_CreateSampler( max_anisotropy = RATIO21; } - struct GEN8_SAMPLER_STATE sampler_state = { + struct GENX(SAMPLER_STATE) sampler_state = { .SamplerDisable = false, .TextureBorderColorMode = DX10OGL, .LODPreClampMode = 0, +#if ANV_GEN == 8 .BaseMipLevel = 0.0, +#endif .MipModeFilter = vk_to_gen_mipmap_mode[pCreateInfo->mipMode], .MagModeFilter = mag_filter, .MinModeFilter = min_filter, @@ -345,7 +348,7 @@ VkResult gen8_CreateSampler( .TCZAddressControlMode = vk_to_gen_tex_address[pCreateInfo->addressModeW], }; - GEN8_SAMPLER_STATE_pack(NULL, sampler->state, &sampler_state); + GENX(SAMPLER_STATE_pack)(NULL, sampler->state, &sampler_state); *pSampler = anv_sampler_to_handle(sampler); diff --git a/src/vulkan/genX_cmd_buffer.c b/src/vulkan/genX_cmd_buffer.c index 66f7480a5e5..cba0515161a 100644 --- a/src/vulkan/genX_cmd_buffer.c +++ b/src/vulkan/genX_cmd_buffer.c @@ -26,7 +26,9 @@ #include "anv_private.h" -#if (ANV_GEN == 8) +#if (ANV_GEN == 9) +# include "gen9_pack.h" +#elif (ANV_GEN == 8) # include "gen8_pack.h" #elif (ANV_IS_HASWELL) # include "gen75_pack.h"