X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_pipeline.c;h=c89a6f139ba003d8a86665a01a489424961e99a2;hb=d7501834cd86f9ec0b7c3dea17448dc523e36390;hp=e1d50163444a3bf3b205b1eda1410e9bc9205159;hpb=4649471a9e5fef200ea69673bcc0e2b0e4c561c1;p=mesa.git diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c index e1d50163444..c89a6f139ba 100644 --- a/src/amd/vulkan/radv_pipeline.c +++ b/src/amd/vulkan/radv_pipeline.c @@ -524,6 +524,14 @@ radv_pipeline_compute_spi_color_formats(struct radv_pipeline *pipeline, col_format |= cf << (4 * i); } + if (!col_format && blend->need_src_alpha & (1 << 0)) { + /* When a subpass doesn't have any color attachments, write the + * alpha channel of MRT0 when alpha coverage is enabled because + * the depth attachment needs it. + */ + col_format |= V_028714_SPI_SHADER_32_AR; + } + /* If the i-th target format is set, all previous target formats must * be non-zero to avoid hangs. */ @@ -681,13 +689,15 @@ radv_pipeline_init_blend_state(struct radv_pipeline *pipeline, else blend.cb_color_control |= S_028808_ROP3(V_028808_ROP3_COPY); - blend.db_alpha_to_mask = S_028B70_ALPHA_TO_MASK_OFFSET0(2) | - S_028B70_ALPHA_TO_MASK_OFFSET1(2) | - S_028B70_ALPHA_TO_MASK_OFFSET2(2) | - S_028B70_ALPHA_TO_MASK_OFFSET3(2); + blend.db_alpha_to_mask = S_028B70_ALPHA_TO_MASK_OFFSET0(3) | + S_028B70_ALPHA_TO_MASK_OFFSET1(1) | + S_028B70_ALPHA_TO_MASK_OFFSET2(0) | + S_028B70_ALPHA_TO_MASK_OFFSET3(2) | + S_028B70_OFFSET_ROUND(1); if (vkms && vkms->alphaToCoverageEnable) { blend.db_alpha_to_mask |= S_028B70_ALPHA_TO_MASK_ENABLE(1); + blend.need_src_alpha |= 0x1; } blend.cb_target_mask = 0; @@ -965,11 +975,11 @@ radv_pipeline_out_of_order_rast(struct radv_pipeline *pipeline, }; if (pCreateInfo->pDepthStencilState && - subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { + subpass->depth_stencil_attachment) { const VkPipelineDepthStencilStateCreateInfo *vkds = pCreateInfo->pDepthStencilState; struct radv_render_pass_attachment *attachment = - pass->attachments + subpass->depth_stencil_attachment.attachment; + pass->attachments + subpass->depth_stencil_attachment->attachment; bool has_stencil = vk_format_is_stencil(attachment->format); struct radv_dsa_order_invariance order_invariance[2]; struct radv_shader_variant *ps = @@ -1234,25 +1244,6 @@ si_conv_prim_to_gs_out(enum VkPrimitiveTopology topology) } } -static unsigned si_map_swizzle(unsigned swizzle) -{ - switch (swizzle) { - case VK_SWIZZLE_Y: - return V_008F0C_SQ_SEL_Y; - case VK_SWIZZLE_Z: - return V_008F0C_SQ_SEL_Z; - case VK_SWIZZLE_W: - return V_008F0C_SQ_SEL_W; - case VK_SWIZZLE_0: - return V_008F0C_SQ_SEL_0; - case VK_SWIZZLE_1: - return V_008F0C_SQ_SEL_1; - default: /* VK_SWIZZLE_X */ - return V_008F0C_SQ_SEL_X; - } -} - - static unsigned radv_dynamic_state_mask(VkDynamicState state) { switch(state) { @@ -1374,15 +1365,7 @@ radv_pipeline_init_dynamic_state(struct radv_pipeline *pipeline, * disabled or if the subpass of the render pass the pipeline is * created against does not use any color attachments. */ - bool uses_color_att = false; - for (unsigned i = 0; i < subpass->color_count; ++i) { - if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) { - uses_color_att = true; - break; - } - } - - if (uses_color_att && states & RADV_DYNAMIC_BLEND_CONSTANTS) { + if (subpass->has_color_att && states & RADV_DYNAMIC_BLEND_CONSTANTS) { assert(pCreateInfo->pColorBlendState); typed_memcpy(dynamic->blend_constants, pCreateInfo->pColorBlendState->blendConstants, 4); @@ -1400,8 +1383,7 @@ radv_pipeline_init_dynamic_state(struct radv_pipeline *pipeline, * disabled or if the subpass of the render pass the pipeline is created * against does not use a depth/stencil attachment. */ - if (needed_states && - subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { + if (needed_states && subpass->depth_stencil_attachment) { assert(pCreateInfo->pDepthStencilState); if (states & RADV_DYNAMIC_DEPTH_BOUNDS) { @@ -1435,11 +1417,13 @@ radv_pipeline_init_dynamic_state(struct radv_pipeline *pipeline, const VkPipelineDiscardRectangleStateCreateInfoEXT *discard_rectangle_info = vk_find_struct_const(pCreateInfo->pNext, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT); - if (states & RADV_DYNAMIC_DISCARD_RECTANGLE) { + if (needed_states & RADV_DYNAMIC_DISCARD_RECTANGLE) { dynamic->discard_rectangle.count = discard_rectangle_info->discardRectangleCount; - typed_memcpy(dynamic->discard_rectangle.rectangles, - discard_rectangle_info->pDiscardRectangles, - discard_rectangle_info->discardRectangleCount); + if (states & RADV_DYNAMIC_DISCARD_RECTANGLE) { + typed_memcpy(dynamic->discard_rectangle.rectangles, + discard_rectangle_info->pDiscardRectangles, + discard_rectangle_info->discardRectangleCount); + } } pipeline->dynamic_state.mask = states; @@ -1574,11 +1558,11 @@ calculate_gs_ring_sizes(struct radv_pipeline *pipeline, const struct radv_gs_sta unsigned num_se = device->physical_device->rad_info.max_se; unsigned wave_size = 64; unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */ - /* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16. - * On VI+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2). + /* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16. + * On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2). */ unsigned gs_vertex_reuse = - (device->physical_device->rad_info.chip_class >= VI ? 32 : 16) * num_se; + (device->physical_device->rad_info.chip_class >= GFX8 ? 32 : 16) * num_se; unsigned alignment = 256 * num_se; /* The maximum size is 63.999 MB per SE. */ unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se; @@ -1597,7 +1581,7 @@ calculate_gs_ring_sizes(struct radv_pipeline *pipeline, const struct radv_gs_sta esgs_ring_size = align(esgs_ring_size, alignment); gsvs_ring_size = align(gsvs_ring_size, alignment); - if (pipeline->device->physical_device->rad_info.chip_class <= VI) + if (pipeline->device->physical_device->rad_info.chip_class <= GFX8) pipeline->graphics.esgs_ring_size = CLAMP(esgs_ring_size, min_esgs_ring_size, max_size); pipeline->graphics.gsvs_ring_size = MIN2(gsvs_ring_size, max_size); @@ -1659,7 +1643,7 @@ calculate_tess_state(struct radv_pipeline *pipeline, lds_size = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.lds_size; - if (pipeline->device->physical_device->rad_info.chip_class >= CIK) { + if (pipeline->device->physical_device->rad_info.chip_class >= GFX7) { assert(lds_size <= 65536); lds_size = align(lds_size, 512) / 512; } else { @@ -1705,11 +1689,11 @@ calculate_tess_state(struct radv_pipeline *pipeline, } bool ccw = tes->info.tes.ccw; - const VkPipelineTessellationDomainOriginStateCreateInfoKHR *domain_origin_state = + const VkPipelineTessellationDomainOriginStateCreateInfo *domain_origin_state = vk_find_struct_const(pCreateInfo->pTessellationState, - PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR); + PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO); - if (domain_origin_state && domain_origin_state->domainOrigin != VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR) + if (domain_origin_state && domain_origin_state->domainOrigin != VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT) ccw = !ccw; if (tes->info.tes.point_mode) @@ -1814,6 +1798,10 @@ radv_link_shaders(struct radv_pipeline *pipeline, nir_shader **shaders) nir_lower_io_arrays_to_elements(ordered_shaders[i], ordered_shaders[i - 1]); + if (nir_link_opt_varyings(ordered_shaders[i], + ordered_shaders[i - 1])) + radv_optimize_nir(ordered_shaders[i - 1], false, false); + nir_remove_dead_variables(ordered_shaders[i], nir_var_shader_out); nir_remove_dead_variables(ordered_shaders[i - 1], @@ -1841,6 +1829,20 @@ radv_link_shaders(struct radv_pipeline *pipeline, nir_shader **shaders) } } +static uint32_t +radv_get_attrib_stride(const VkPipelineVertexInputStateCreateInfo *input_state, + uint32_t attrib_binding) +{ + for (uint32_t i = 0; i < input_state->vertexBindingDescriptionCount; i++) { + const VkVertexInputBindingDescription *input_binding = + &input_state->pVertexBindingDescriptions[i]; + + if (input_binding->binding == attrib_binding) + return input_binding->stride; + } + + return 0; +} static struct radv_pipeline_key radv_generate_graphics_pipeline_key(struct radv_pipeline *pipeline, @@ -1878,14 +1880,31 @@ radv_generate_graphics_pipeline_key(struct radv_pipeline *pipeline, } for (unsigned i = 0; i < input_state->vertexAttributeDescriptionCount; ++i) { - unsigned location = input_state->pVertexAttributeDescriptions[i].location; - unsigned binding = input_state->pVertexAttributeDescriptions[i].binding; + const VkVertexInputAttributeDescription *desc = + &input_state->pVertexAttributeDescriptions[i]; + const struct vk_format_description *format_desc; + unsigned location = desc->location; + unsigned binding = desc->binding; + unsigned num_format, data_format; + int first_non_void; + if (binding_input_rate & (1u << binding)) { key.instance_rate_inputs |= 1u << location; key.instance_rate_divisors[location] = instance_rate_divisors[binding]; } - if (pipeline->device->physical_device->rad_info.chip_class <= VI && + format_desc = vk_format_description(desc->format); + first_non_void = vk_format_get_first_non_void_channel(desc->format); + + num_format = radv_translate_buffer_numformat(format_desc, first_non_void); + data_format = radv_translate_buffer_dataformat(format_desc, first_non_void); + + key.vertex_attribute_formats[location] = data_format | (num_format << 4); + key.vertex_attribute_bindings[location] = desc->binding; + key.vertex_attribute_offsets[location] = desc->offset; + key.vertex_attribute_strides[location] = radv_get_attrib_stride(input_state, desc->binding); + + if (pipeline->device->physical_device->rad_info.chip_class <= GFX8 && pipeline->device->physical_device->rad_info.family != CHIP_STONEY) { VkFormat format = input_state->pVertexAttributeDescriptions[i].format; uint64_t adjust; @@ -1908,6 +1927,26 @@ radv_generate_graphics_pipeline_key(struct radv_pipeline *pipeline, } key.vertex_alpha_adjust |= adjust << (2 * location); } + + switch (desc->format) { + case VK_FORMAT_B8G8R8A8_UNORM: + case VK_FORMAT_B8G8R8A8_SNORM: + case VK_FORMAT_B8G8R8A8_USCALED: + case VK_FORMAT_B8G8R8A8_SSCALED: + case VK_FORMAT_B8G8R8A8_UINT: + case VK_FORMAT_B8G8R8A8_SINT: + case VK_FORMAT_B8G8R8A8_SRGB: + case VK_FORMAT_A2R10G10B10_UNORM_PACK32: + case VK_FORMAT_A2R10G10B10_SNORM_PACK32: + case VK_FORMAT_A2R10G10B10_USCALED_PACK32: + case VK_FORMAT_A2R10G10B10_SSCALED_PACK32: + case VK_FORMAT_A2R10G10B10_UINT_PACK32: + case VK_FORMAT_A2R10G10B10_SINT_PACK32: + key.vertex_post_shuffle |= 1 << location; + break; + default: + break; + } } if (pCreateInfo->pTessellationState) @@ -1923,7 +1962,7 @@ radv_generate_graphics_pipeline_key(struct radv_pipeline *pipeline, } key.col_format = blend->spi_shader_col_format; - if (pipeline->device->physical_device->rad_info.chip_class < VI) + if (pipeline->device->physical_device->rad_info.chip_class < GFX8) radv_pipeline_compute_get_int_clamp(pCreateInfo, &key.is_int8, &key.is_int10); return key; @@ -1936,8 +1975,14 @@ radv_fill_shader_keys(struct radv_shader_variant_key *keys, { keys[MESA_SHADER_VERTEX].vs.instance_rate_inputs = key->instance_rate_inputs; keys[MESA_SHADER_VERTEX].vs.alpha_adjust = key->vertex_alpha_adjust; - for (unsigned i = 0; i < MAX_VERTEX_ATTRIBS; ++i) + keys[MESA_SHADER_VERTEX].vs.post_shuffle = key->vertex_post_shuffle; + for (unsigned i = 0; i < MAX_VERTEX_ATTRIBS; ++i) { keys[MESA_SHADER_VERTEX].vs.instance_rate_divisors[i] = key->instance_rate_divisors[i]; + keys[MESA_SHADER_VERTEX].vs.vertex_attribute_formats[i] = key->vertex_attribute_formats[i]; + keys[MESA_SHADER_VERTEX].vs.vertex_attribute_bindings[i] = key->vertex_attribute_bindings[i]; + keys[MESA_SHADER_VERTEX].vs.vertex_attribute_offsets[i] = key->vertex_attribute_offsets[i]; + keys[MESA_SHADER_VERTEX].vs.vertex_attribute_strides[i] = key->vertex_attribute_strides[i]; + } if (nir[MESA_SHADER_TESS_CTRL]) { keys[MESA_SHADER_VERTEX].vs.as_ls = true; @@ -2004,13 +2049,53 @@ merge_tess_info(struct shader_info *tes_info, tes_info->tess.point_mode |= tcs_info->tess.point_mode; } +static +void radv_init_feedback(const VkPipelineCreationFeedbackCreateInfoEXT *ext) +{ + if (!ext) + return; + + if (ext->pPipelineCreationFeedback) { + ext->pPipelineCreationFeedback->flags = 0; + ext->pPipelineCreationFeedback->duration = 0; + } + + for (unsigned i = 0; i < ext->pipelineStageCreationFeedbackCount; ++i) { + ext->pPipelineStageCreationFeedbacks[i].flags = 0; + ext->pPipelineStageCreationFeedbacks[i].duration = 0; + } +} + +static +void radv_start_feedback(VkPipelineCreationFeedbackEXT *feedback) +{ + if (!feedback) + return; + + feedback->duration -= radv_get_current_time(); + feedback ->flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT; +} + +static +void radv_stop_feedback(VkPipelineCreationFeedbackEXT *feedback, bool cache_hit) +{ + if (!feedback) + return; + + feedback->duration += radv_get_current_time(); + feedback ->flags = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT | + (cache_hit ? VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT : 0); +} + static void radv_create_shaders(struct radv_pipeline *pipeline, struct radv_device *device, struct radv_pipeline_cache *cache, const struct radv_pipeline_key *key, const VkPipelineShaderStageCreateInfo **pStages, - const VkPipelineCreateFlags flags) + const VkPipelineCreateFlags flags, + VkPipelineCreationFeedbackEXT *pipeline_feedback, + VkPipelineCreationFeedbackEXT **stage_feedbacks) { struct radv_shader_module fs_m = {0}; struct radv_shader_module *modules[MESA_SHADER_STAGES] = { 0, }; @@ -2020,6 +2105,8 @@ void radv_create_shaders(struct radv_pipeline *pipeline, struct radv_shader_variant_key keys[MESA_SHADER_STAGES] = {{{{0}}}}; unsigned char hash[20], gs_copy_hash[20]; + radv_start_feedback(pipeline_feedback); + for (unsigned i = 0; i < MESA_SHADER_STAGES; ++i) { if (pStages[i]) { modules[i] = radv_shader_module_from_handle(pStages[i]->module); @@ -2036,14 +2123,18 @@ void radv_create_shaders(struct radv_pipeline *pipeline, memcpy(gs_copy_hash, hash, 20); gs_copy_hash[0] ^= 1; + bool found_in_application_cache = true; if (modules[MESA_SHADER_GEOMETRY]) { struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0}; - radv_create_shader_variants_from_pipeline_cache(device, cache, gs_copy_hash, variants); + radv_create_shader_variants_from_pipeline_cache(device, cache, gs_copy_hash, variants, + &found_in_application_cache); pipeline->gs_copy_shader = variants[MESA_SHADER_GEOMETRY]; } - if (radv_create_shader_variants_from_pipeline_cache(device, cache, hash, pipeline->shaders) && + if (radv_create_shader_variants_from_pipeline_cache(device, cache, hash, pipeline->shaders, + &found_in_application_cache) && (!modules[MESA_SHADER_GEOMETRY] || pipeline->gs_copy_shader)) { + radv_stop_feedback(pipeline_feedback, found_in_application_cache); return; } @@ -2061,10 +2152,12 @@ void radv_create_shaders(struct radv_pipeline *pipeline, if (!modules[i]) continue; + radv_start_feedback(stage_feedbacks[i]); + nir[i] = radv_shader_compile_to_nir(device, modules[i], stage ? stage->pName : "main", i, stage ? stage->pSpecializationInfo : NULL, - flags); + flags, pipeline->layout); /* We don't want to alter meta shaders IR directly so clone it * first. @@ -2072,6 +2165,8 @@ void radv_create_shaders(struct radv_pipeline *pipeline, if (nir[i]->info.name) { nir[i] = nir_shader_clone(NULL, nir[i]); } + + radv_stop_feedback(stage_feedbacks[i], false); } if (nir[MESA_SHADER_TESS_CTRL]) { @@ -2083,6 +2178,15 @@ void radv_create_shaders(struct radv_pipeline *pipeline, radv_link_shaders(pipeline, nir); for (int i = 0; i < MESA_SHADER_STAGES; ++i) { + if (nir[i]) { + NIR_PASS_V(nir[i], nir_lower_bool_to_int32); + NIR_PASS_V(nir[i], nir_lower_non_uniform_access, + nir_lower_non_uniform_ubo_access | + nir_lower_non_uniform_ssbo_access | + nir_lower_non_uniform_texture_access | + nir_lower_non_uniform_image_access); + } + if (radv_can_dump_shader(device, modules[i], false)) nir_print_shader(nir[i], stderr); } @@ -2091,10 +2195,14 @@ void radv_create_shaders(struct radv_pipeline *pipeline, if (nir[MESA_SHADER_FRAGMENT]) { if (!pipeline->shaders[MESA_SHADER_FRAGMENT]) { + radv_start_feedback(stage_feedbacks[MESA_SHADER_FRAGMENT]); + pipeline->shaders[MESA_SHADER_FRAGMENT] = radv_shader_variant_create(device, modules[MESA_SHADER_FRAGMENT], &nir[MESA_SHADER_FRAGMENT], 1, pipeline->layout, keys + MESA_SHADER_FRAGMENT, &codes[MESA_SHADER_FRAGMENT], &code_sizes[MESA_SHADER_FRAGMENT]); + + radv_stop_feedback(stage_feedbacks[MESA_SHADER_FRAGMENT], false); } /* TODO: These are no longer used as keys we should refactor this */ @@ -2113,10 +2221,15 @@ void radv_create_shaders(struct radv_pipeline *pipeline, struct nir_shader *combined_nir[] = {nir[MESA_SHADER_VERTEX], nir[MESA_SHADER_TESS_CTRL]}; struct radv_shader_variant_key key = keys[MESA_SHADER_TESS_CTRL]; key.tcs.vs_key = keys[MESA_SHADER_VERTEX].vs; + + radv_start_feedback(stage_feedbacks[MESA_SHADER_TESS_CTRL]); + pipeline->shaders[MESA_SHADER_TESS_CTRL] = radv_shader_variant_create(device, modules[MESA_SHADER_TESS_CTRL], combined_nir, 2, pipeline->layout, &key, &codes[MESA_SHADER_TESS_CTRL], &code_sizes[MESA_SHADER_TESS_CTRL]); + + radv_stop_feedback(stage_feedbacks[MESA_SHADER_TESS_CTRL], false); } modules[MESA_SHADER_VERTEX] = NULL; keys[MESA_SHADER_TESS_EVAL].tes.num_patches = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches; @@ -2127,10 +2240,15 @@ void radv_create_shaders(struct radv_pipeline *pipeline, gl_shader_stage pre_stage = modules[MESA_SHADER_TESS_EVAL] ? MESA_SHADER_TESS_EVAL : MESA_SHADER_VERTEX; if (!pipeline->shaders[MESA_SHADER_GEOMETRY]) { struct nir_shader *combined_nir[] = {nir[pre_stage], nir[MESA_SHADER_GEOMETRY]}; + + radv_start_feedback(stage_feedbacks[MESA_SHADER_GEOMETRY]); + pipeline->shaders[MESA_SHADER_GEOMETRY] = radv_shader_variant_create(device, modules[MESA_SHADER_GEOMETRY], combined_nir, 2, pipeline->layout, &keys[pre_stage] , &codes[MESA_SHADER_GEOMETRY], &code_sizes[MESA_SHADER_GEOMETRY]); + + radv_stop_feedback(stage_feedbacks[MESA_SHADER_GEOMETRY], false); } modules[pre_stage] = NULL; } @@ -2144,10 +2262,15 @@ void radv_create_shaders(struct radv_pipeline *pipeline, keys[MESA_SHADER_TESS_EVAL].tes.num_patches = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches; keys[MESA_SHADER_TESS_EVAL].tes.tcs_num_outputs = util_last_bit64(pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.info.tcs.outputs_written); } + + radv_start_feedback(stage_feedbacks[i]); + pipeline->shaders[i] = radv_shader_variant_create(device, modules[i], &nir[i], 1, pipeline->layout, keys + i, &codes[i], &code_sizes[i]); + + radv_stop_feedback(stage_feedbacks[i], false); } } @@ -2197,6 +2320,8 @@ void radv_create_shaders(struct radv_pipeline *pipeline, if (fs_m.nir) ralloc_free(fs_m.nir); + + radv_stop_feedback(pipeline_feedback, false); } static uint32_t @@ -2497,8 +2622,8 @@ radv_compute_bin_size(struct radv_pipeline *pipeline, const VkGraphicsPipelineCr extent = color_entry->extent; - if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { - struct radv_render_pass_attachment *attachment = pass->attachments + subpass->depth_stencil_attachment.attachment; + if (subpass->depth_stencil_attachment) { + struct radv_render_pass_attachment *attachment = pass->attachments + subpass->depth_stencil_attachment->attachment; /* Coefficients taken from AMDVLK */ unsigned depth_coeff = vk_format_is_depth(attachment->format) ? 5 : 0; @@ -2517,7 +2642,7 @@ radv_compute_bin_size(struct radv_pipeline *pipeline, const VkGraphicsPipelineCr } static void -radv_pipeline_generate_binning_state(struct radeon_cmdbuf *cs, +radv_pipeline_generate_binning_state(struct radeon_cmdbuf *ctx_cs, struct radv_pipeline *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo) { @@ -2544,6 +2669,7 @@ radv_pipeline_generate_binning_state(struct radeon_cmdbuf *cs, fpovs_per_batch = 63; break; case CHIP_RAVEN: + case CHIP_RAVEN2: context_states_per_bin = 6; persistent_states_per_bin = 32; fpovs_per_batch = 63; @@ -2566,15 +2692,15 @@ radv_pipeline_generate_binning_state(struct radeon_cmdbuf *cs, S_028C44_OPTIMAL_BIN_SELECTION(1); } - radeon_set_context_reg(cs, R_028C44_PA_SC_BINNER_CNTL_0, + radeon_set_context_reg(ctx_cs, R_028C44_PA_SC_BINNER_CNTL_0, pa_sc_binner_cntl_0); - radeon_set_context_reg(cs, R_028060_DB_DFSM_CONTROL, + radeon_set_context_reg(ctx_cs, R_028060_DB_DFSM_CONTROL, db_dfsm_control); } static void -radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf *cs, +radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf *ctx_cs, struct radv_pipeline *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo, const struct radv_graphics_pipeline_create_info *extra) @@ -2588,8 +2714,8 @@ radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf *cs, uint32_t db_render_control = 0, db_render_override2 = 0; uint32_t db_render_override = 0; - if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) - attachment = pass->attachments + subpass->depth_stencil_attachment.attachment; + if (subpass->depth_stencil_attachment) + attachment = pass->attachments + subpass->depth_stencil_attachment->attachment; bool has_depth_attachment = attachment && vk_format_is_depth(attachment->format); bool has_stencil_attachment = attachment && vk_format_is_stencil(attachment->format); @@ -2631,8 +2757,7 @@ radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf *cs, db_render_override |= S_02800C_FORCE_HIS_ENABLE0(V_02800C_FORCE_DISABLE) | S_02800C_FORCE_HIS_ENABLE1(V_02800C_FORCE_DISABLE); - if (pipeline->device->enabled_extensions.EXT_depth_range_unrestricted && - !pCreateInfo->pRasterizationState->depthClampEnable && + if (!pCreateInfo->pRasterizationState->depthClampEnable && ps->info.info.ps.writes_z) { /* From VK_EXT_depth_range_unrestricted spec: * @@ -2647,35 +2772,35 @@ radv_pipeline_generate_depth_stencil_state(struct radeon_cmdbuf *cs, db_render_override |= S_02800C_DISABLE_VIEWPORT_CLAMP(1); } - radeon_set_context_reg(cs, R_028800_DB_DEPTH_CONTROL, db_depth_control); - radeon_set_context_reg(cs, R_02842C_DB_STENCIL_CONTROL, db_stencil_control); + radeon_set_context_reg(ctx_cs, R_028800_DB_DEPTH_CONTROL, db_depth_control); + radeon_set_context_reg(ctx_cs, R_02842C_DB_STENCIL_CONTROL, db_stencil_control); - radeon_set_context_reg(cs, R_028000_DB_RENDER_CONTROL, db_render_control); - radeon_set_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override); - radeon_set_context_reg(cs, R_028010_DB_RENDER_OVERRIDE2, db_render_override2); + radeon_set_context_reg(ctx_cs, R_028000_DB_RENDER_CONTROL, db_render_control); + radeon_set_context_reg(ctx_cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override); + radeon_set_context_reg(ctx_cs, R_028010_DB_RENDER_OVERRIDE2, db_render_override2); } static void -radv_pipeline_generate_blend_state(struct radeon_cmdbuf *cs, +radv_pipeline_generate_blend_state(struct radeon_cmdbuf *ctx_cs, struct radv_pipeline *pipeline, const struct radv_blend_state *blend) { - radeon_set_context_reg_seq(cs, R_028780_CB_BLEND0_CONTROL, 8); - radeon_emit_array(cs, blend->cb_blend_control, + radeon_set_context_reg_seq(ctx_cs, R_028780_CB_BLEND0_CONTROL, 8); + radeon_emit_array(ctx_cs, blend->cb_blend_control, 8); - radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL, blend->cb_color_control); - radeon_set_context_reg(cs, R_028B70_DB_ALPHA_TO_MASK, blend->db_alpha_to_mask); + radeon_set_context_reg(ctx_cs, R_028808_CB_COLOR_CONTROL, blend->cb_color_control); + radeon_set_context_reg(ctx_cs, R_028B70_DB_ALPHA_TO_MASK, blend->db_alpha_to_mask); if (pipeline->device->physical_device->has_rbplus) { - radeon_set_context_reg_seq(cs, R_028760_SX_MRT0_BLEND_OPT, 8); - radeon_emit_array(cs, blend->sx_mrt_blend_opt, 8); + radeon_set_context_reg_seq(ctx_cs, R_028760_SX_MRT0_BLEND_OPT, 8); + radeon_emit_array(ctx_cs, blend->sx_mrt_blend_opt, 8); } - radeon_set_context_reg(cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format); + radeon_set_context_reg(ctx_cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format); - radeon_set_context_reg(cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask); - radeon_set_context_reg(cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask); + radeon_set_context_reg(ctx_cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask); + radeon_set_context_reg(ctx_cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask); pipeline->graphics.col_format = blend->spi_shader_col_format; pipeline->graphics.cb_target_mask = blend->cb_target_mask; @@ -2693,23 +2818,30 @@ radv_get_conservative_raster_mode(const VkPipelineRasterizationStateCreateInfo * } static void -radv_pipeline_generate_raster_state(struct radeon_cmdbuf *cs, +radv_pipeline_generate_raster_state(struct radeon_cmdbuf *ctx_cs, struct radv_pipeline *pipeline, const VkGraphicsPipelineCreateInfo *pCreateInfo) { const VkPipelineRasterizationStateCreateInfo *vkraster = pCreateInfo->pRasterizationState; const VkConservativeRasterizationModeEXT mode = radv_get_conservative_raster_mode(vkraster); - uint32_t pa_sc_conservative_rast = 0; + uint32_t pa_sc_conservative_rast = S_028C4C_NULL_SQUAD_AA_MASK_ENABLE(1); + bool depth_clip_disable = vkraster->depthClampEnable; + + const VkPipelineRasterizationDepthClipStateCreateInfoEXT *depth_clip_state = + vk_find_struct_const(vkraster->pNext, PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT); + if (depth_clip_state) { + depth_clip_disable = !depth_clip_state->depthClipEnable; + } - radeon_set_context_reg(cs, R_028810_PA_CL_CLIP_CNTL, + radeon_set_context_reg(ctx_cs, R_028810_PA_CL_CLIP_CNTL, S_028810_DX_CLIP_SPACE_DEF(1) | // vulkan uses DX conventions. - S_028810_ZCLIP_NEAR_DISABLE(vkraster->depthClampEnable ? 1 : 0) | - S_028810_ZCLIP_FAR_DISABLE(vkraster->depthClampEnable ? 1 : 0) | + S_028810_ZCLIP_NEAR_DISABLE(depth_clip_disable ? 1 : 0) | + S_028810_ZCLIP_FAR_DISABLE(depth_clip_disable ? 1 : 0) | S_028810_DX_RASTERIZATION_KILL(vkraster->rasterizerDiscardEnable ? 1 : 0) | S_028810_DX_LINEAR_ATTR_CLIP_ENA(1)); - radeon_set_context_reg(cs, R_0286D4_SPI_INTERP_CONTROL_0, + radeon_set_context_reg(ctx_cs, R_0286D4_SPI_INTERP_CONTROL_0, S_0286D4_FLAT_SHADE_ENA(1) | S_0286D4_PNT_SPRITE_ENA(1) | S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) | @@ -2718,12 +2850,12 @@ radv_pipeline_generate_raster_state(struct radeon_cmdbuf *cs, S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) | S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */ - radeon_set_context_reg(cs, R_028BE4_PA_SU_VTX_CNTL, + radeon_set_context_reg(ctx_cs, R_028BE4_PA_SU_VTX_CNTL, S_028BE4_PIX_CENTER(1) | // TODO verify S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) | S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH)); - radeon_set_context_reg(cs, R_028814_PA_SU_SC_MODE_CNTL, + radeon_set_context_reg(ctx_cs, R_028814_PA_SU_SC_MODE_CNTL, S_028814_FACE(vkraster->frontFace) | S_028814_CULL_FRONT(!!(vkraster->cullMode & VK_CULL_MODE_FRONT_BIT)) | S_028814_CULL_BACK(!!(vkraster->cullMode & VK_CULL_MODE_BACK_BIT)) | @@ -2764,28 +2896,37 @@ radv_pipeline_generate_raster_state(struct radeon_cmdbuf *cs, } } - radeon_set_context_reg(cs, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL, + radeon_set_context_reg(ctx_cs, R_028C4C_PA_SC_CONSERVATIVE_RASTERIZATION_CNTL, pa_sc_conservative_rast); } static void -radv_pipeline_generate_multisample_state(struct radeon_cmdbuf *cs, +radv_pipeline_generate_multisample_state(struct radeon_cmdbuf *ctx_cs, struct radv_pipeline *pipeline) { struct radv_multisample_state *ms = &pipeline->graphics.ms; - radeon_set_context_reg_seq(cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2); - radeon_emit(cs, ms->pa_sc_aa_mask[0]); - radeon_emit(cs, ms->pa_sc_aa_mask[1]); + radeon_set_context_reg_seq(ctx_cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2); + radeon_emit(ctx_cs, ms->pa_sc_aa_mask[0]); + radeon_emit(ctx_cs, ms->pa_sc_aa_mask[1]); + + radeon_set_context_reg(ctx_cs, R_028804_DB_EQAA, ms->db_eqaa); + radeon_set_context_reg(ctx_cs, R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1); - radeon_set_context_reg(cs, R_028804_DB_EQAA, ms->db_eqaa); - radeon_set_context_reg(cs, R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1); + /* The exclusion bits can be set to improve rasterization efficiency + * if no sample lies on the pixel boundary (-8 sample offset). It's + * currently always TRUE because the driver doesn't support 16 samples. + */ + bool exclusion = pipeline->device->physical_device->rad_info.chip_class >= GFX7; + radeon_set_context_reg(ctx_cs, R_02882C_PA_SU_PRIM_FILTER_CNTL, + S_02882C_XMAX_RIGHT_EXCLUSION(exclusion) | + S_02882C_YMAX_BOTTOM_EXCLUSION(exclusion)); } static void -radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf *cs, - const struct radv_pipeline *pipeline) +radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf *ctx_cs, + struct radv_pipeline *pipeline) { const struct radv_vs_output_info *outinfo = get_vs_output_info(pipeline); @@ -2803,12 +2944,13 @@ radv_pipeline_generate_vgt_gs_mode(struct radeon_cmdbuf *cs, vgt_primitiveid_en = true; } - radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, vgt_primitiveid_en); - radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, vgt_gs_mode); + radeon_set_context_reg(ctx_cs, R_028A84_VGT_PRIMITIVEID_EN, vgt_primitiveid_en); + radeon_set_context_reg(ctx_cs, R_028A40_VGT_GS_MODE, vgt_gs_mode); } static void -radv_pipeline_generate_hw_vs(struct radeon_cmdbuf *cs, +radv_pipeline_generate_hw_vs(struct radeon_cmdbuf *ctx_cs, + struct radeon_cmdbuf *cs, struct radv_pipeline *pipeline, struct radv_shader_variant *shader) { @@ -2829,10 +2971,10 @@ radv_pipeline_generate_hw_vs(struct radeon_cmdbuf *cs, outinfo->writes_layer || outinfo->writes_viewport_index; - radeon_set_context_reg(cs, R_0286C4_SPI_VS_OUT_CONFIG, + radeon_set_context_reg(ctx_cs, R_0286C4_SPI_VS_OUT_CONFIG, S_0286C4_VS_EXPORT_COUNT(MAX2(1, outinfo->param_exports) - 1)); - radeon_set_context_reg(cs, R_02870C_SPI_SHADER_POS_FORMAT, + radeon_set_context_reg(ctx_cs, R_02870C_SPI_SHADER_POS_FORMAT, S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | S_02870C_POS1_EXPORT_FORMAT(outinfo->pos_exports > 1 ? V_02870C_SPI_SHADER_4COMP : @@ -2844,13 +2986,13 @@ radv_pipeline_generate_hw_vs(struct radeon_cmdbuf *cs, V_02870C_SPI_SHADER_4COMP : V_02870C_SPI_SHADER_NONE)); - radeon_set_context_reg(cs, R_028818_PA_CL_VTE_CNTL, + radeon_set_context_reg(ctx_cs, R_028818_PA_CL_VTE_CNTL, S_028818_VTX_W0_FMT(1) | S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); - radeon_set_context_reg(cs, R_02881C_PA_CL_VS_OUT_CNTL, + radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL, S_02881C_USE_VTX_POINT_SIZE(outinfo->writes_pointsize) | S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo->writes_layer) | S_02881C_USE_VTX_VIEWPORT_INDX(outinfo->writes_viewport_index) | @@ -2861,8 +3003,8 @@ radv_pipeline_generate_hw_vs(struct radeon_cmdbuf *cs, cull_dist_mask << 8 | clip_dist_mask); - if (pipeline->device->physical_device->rad_info.chip_class <= VI) - radeon_set_context_reg(cs, R_028AB4_VGT_REUSE_OFF, + if (pipeline->device->physical_device->rad_info.chip_class <= GFX8) + radeon_set_context_reg(ctx_cs, R_028AB4_VGT_REUSE_OFF, outinfo->writes_viewport_index); } @@ -2894,7 +3036,7 @@ radv_pipeline_generate_hw_ls(struct radeon_cmdbuf *cs, radeon_emit(cs, S_00B524_MEM_BASE(va >> 40)); rsrc2 |= S_00B52C_LDS_SIZE(tess->lds_size); - if (pipeline->device->physical_device->rad_info.chip_class == CIK && + if (pipeline->device->physical_device->rad_info.chip_class == GFX7 && pipeline->device->physical_device->rad_info.family != CHIP_HAWAII) radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, rsrc2); @@ -2930,7 +3072,8 @@ radv_pipeline_generate_hw_hs(struct radeon_cmdbuf *cs, } static void -radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf *cs, +radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf *ctx_cs, + struct radeon_cmdbuf *cs, struct radv_pipeline *pipeline, const struct radv_tessellation_state *tess) { @@ -2946,11 +3089,12 @@ radv_pipeline_generate_vertex_shader(struct radeon_cmdbuf *cs, else if (vs->info.vs.as_es) radv_pipeline_generate_hw_es(cs, pipeline, vs); else - radv_pipeline_generate_hw_vs(cs, pipeline, vs); + radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, vs); } static void -radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf *cs, +radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf *ctx_cs, + struct radeon_cmdbuf *cs, struct radv_pipeline *pipeline, const struct radv_tessellation_state *tess) { @@ -2966,24 +3110,25 @@ radv_pipeline_generate_tess_shaders(struct radeon_cmdbuf *cs, if (tes->info.tes.as_es) radv_pipeline_generate_hw_es(cs, pipeline, tes); else - radv_pipeline_generate_hw_vs(cs, pipeline, tes); + radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, tes); } radv_pipeline_generate_hw_hs(cs, pipeline, tcs, tess); - radeon_set_context_reg(cs, R_028B6C_VGT_TF_PARAM, + radeon_set_context_reg(ctx_cs, R_028B6C_VGT_TF_PARAM, tess->tf_param); - if (pipeline->device->physical_device->rad_info.chip_class >= CIK) - radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2, + if (pipeline->device->physical_device->rad_info.chip_class >= GFX7) + radeon_set_context_reg_idx(ctx_cs, R_028B58_VGT_LS_HS_CONFIG, 2, tess->ls_hs_config); else - radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, + radeon_set_context_reg(ctx_cs, R_028B58_VGT_LS_HS_CONFIG, tess->ls_hs_config); } static void -radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf *cs, +radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf *ctx_cs, + struct radeon_cmdbuf *cs, struct radv_pipeline *pipeline, const struct radv_gs_state *gs_state) { @@ -3004,33 +3149,32 @@ radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf *cs, offset = num_components[0] * gs_max_out_vertices; - radeon_set_context_reg_seq(cs, R_028A60_VGT_GSVS_RING_OFFSET_1, 3); - radeon_emit(cs, offset); + radeon_set_context_reg_seq(ctx_cs, R_028A60_VGT_GSVS_RING_OFFSET_1, 3); + radeon_emit(ctx_cs, offset); if (max_stream >= 1) offset += num_components[1] * gs_max_out_vertices; - radeon_emit(cs, offset); + radeon_emit(ctx_cs, offset); if (max_stream >= 2) offset += num_components[2] * gs_max_out_vertices; - radeon_emit(cs, offset); + radeon_emit(ctx_cs, offset); if (max_stream >= 3) offset += num_components[3] * gs_max_out_vertices; - radeon_set_context_reg(cs, R_028AB0_VGT_GSVS_RING_ITEMSIZE, offset); + radeon_set_context_reg(ctx_cs, R_028AB0_VGT_GSVS_RING_ITEMSIZE, offset); - radeon_set_context_reg(cs, R_028B38_VGT_GS_MAX_VERT_OUT, gs->info.gs.vertices_out); + radeon_set_context_reg(ctx_cs, R_028B38_VGT_GS_MAX_VERT_OUT, gs->info.gs.vertices_out); - uint32_t gs_vert_itemsize = gs->info.gs.gsvs_vertex_size; - radeon_set_context_reg_seq(cs, R_028B5C_VGT_GS_VERT_ITEMSIZE, 4); - radeon_emit(cs, gs_vert_itemsize >> 2); - radeon_emit(cs, 0); - radeon_emit(cs, 0); - radeon_emit(cs, 0); + radeon_set_context_reg_seq(ctx_cs, R_028B5C_VGT_GS_VERT_ITEMSIZE, 4); + radeon_emit(ctx_cs, num_components[0]); + radeon_emit(ctx_cs, (max_stream >= 1) ? num_components[1] : 0); + radeon_emit(ctx_cs, (max_stream >= 2) ? num_components[2] : 0); + radeon_emit(ctx_cs, (max_stream >= 3) ? num_components[3] : 0); uint32_t gs_num_invocations = gs->info.gs.invocations; - radeon_set_context_reg(cs, R_028B90_VGT_GS_INSTANCE_CNT, + radeon_set_context_reg(ctx_cs, R_028B90_VGT_GS_INSTANCE_CNT, S_028B90_CNT(MIN2(gs_num_invocations, 127)) | S_028B90_ENABLE(gs_num_invocations > 0)); - radeon_set_context_reg(cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE, + radeon_set_context_reg(ctx_cs, R_028AAC_VGT_ESGS_RING_ITEMSIZE, gs_state->vgt_esgs_ring_itemsize); va = radv_buffer_get_va(gs->bo) + gs->bo_offset; @@ -3044,8 +3188,8 @@ radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf *cs, radeon_emit(cs, gs->rsrc1); radeon_emit(cs, gs->rsrc2 | S_00B22C_LDS_SIZE(gs_state->lds_size)); - radeon_set_context_reg(cs, R_028A44_VGT_GS_ONCHIP_CNTL, gs_state->vgt_gs_onchip_cntl); - radeon_set_context_reg(cs, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP, gs_state->vgt_gs_max_prims_per_subgroup); + radeon_set_context_reg(ctx_cs, R_028A44_VGT_GS_ONCHIP_CNTL, gs_state->vgt_gs_onchip_cntl); + radeon_set_context_reg(ctx_cs, R_028A94_VGT_GS_MAX_PRIMS_PER_SUBGROUP, gs_state->vgt_gs_max_prims_per_subgroup); } else { radeon_set_sh_reg_seq(cs, R_00B220_SPI_SHADER_PGM_LO_GS, 4); radeon_emit(cs, va >> 8); @@ -3054,16 +3198,20 @@ radv_pipeline_generate_geometry_shader(struct radeon_cmdbuf *cs, radeon_emit(cs, gs->rsrc2); } - radv_pipeline_generate_hw_vs(cs, pipeline, pipeline->gs_copy_shader); + radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, pipeline->gs_copy_shader); } -static uint32_t offset_to_ps_input(uint32_t offset, bool flat_shade) +static uint32_t offset_to_ps_input(uint32_t offset, bool flat_shade, bool float16) { uint32_t ps_input_cntl; if (offset <= AC_EXP_PARAM_OFFSET_31) { ps_input_cntl = S_028644_OFFSET(offset); if (flat_shade) ps_input_cntl |= S_028644_FLAT_SHADE(1); + if (float16) { + ps_input_cntl |= S_028644_FP16_INTERP_MODE(1) | + S_028644_ATTR0_VALID(1); + } } else { /* The input is a DEFAULT_VAL constant. */ assert(offset >= AC_EXP_PARAM_DEFAULT_VAL_0000 && @@ -3076,8 +3224,8 @@ static uint32_t offset_to_ps_input(uint32_t offset, bool flat_shade) } static void -radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *cs, - struct radv_pipeline *pipeline) +radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *ctx_cs, + struct radv_pipeline *pipeline) { struct radv_shader_variant *ps = pipeline->shaders[MESA_SHADER_FRAGMENT]; const struct radv_vs_output_info *outinfo = get_vs_output_info(pipeline); @@ -3088,7 +3236,7 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *cs, if (ps->info.info.ps.prim_id_input) { unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID]; if (vs_offset != AC_EXP_PARAM_UNDEFINED) { - ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, true); + ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, true, false); ++ps_offset; } } @@ -3098,9 +3246,9 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *cs, ps->info.info.needs_multiview_view_index) { unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_LAYER]; if (vs_offset != AC_EXP_PARAM_UNDEFINED) - ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, true); + ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, true, false); else - ps_input_cntl[ps_offset] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000, true); + ps_input_cntl[ps_offset] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000, true, false); ++ps_offset; } @@ -3116,14 +3264,14 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *cs, vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST0]; if (vs_offset != AC_EXP_PARAM_UNDEFINED) { - ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, false); + ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, false, false); ++ps_offset; } vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_CLIP_DIST1]; if (vs_offset != AC_EXP_PARAM_UNDEFINED && ps->info.info.ps.num_input_clips_culls > 4) { - ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, false); + ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, false, false); ++ps_offset; } } @@ -3131,6 +3279,7 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *cs, for (unsigned i = 0; i < 32 && (1u << i) <= ps->info.fs.input_mask; ++i) { unsigned vs_offset; bool flat_shade; + bool float16; if (!(ps->info.fs.input_mask & (1u << i))) continue; @@ -3142,21 +3291,23 @@ radv_pipeline_generate_ps_inputs(struct radeon_cmdbuf *cs, } flat_shade = !!(ps->info.fs.flat_shaded_mask & (1u << ps_offset)); + float16 = !!(ps->info.fs.float16_shaded_mask & (1u << ps_offset)); - ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, flat_shade); + ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, flat_shade, float16); ++ps_offset; } if (ps_offset) { - radeon_set_context_reg_seq(cs, R_028644_SPI_PS_INPUT_CNTL_0, ps_offset); + radeon_set_context_reg_seq(ctx_cs, R_028644_SPI_PS_INPUT_CNTL_0, ps_offset); for (unsigned i = 0; i < ps_offset; i++) { - radeon_emit(cs, ps_input_cntl[i]); + radeon_emit(ctx_cs, ps_input_cntl[i]); } } } static uint32_t radv_compute_db_shader_control(const struct radv_device *device, + const struct radv_pipeline *pipeline, const struct radv_shader_variant *ps) { unsigned z_order; @@ -3168,10 +3319,16 @@ radv_compute_db_shader_control(const struct radv_device *device, bool disable_rbplus = device->physical_device->has_rbplus && !device->physical_device->rbplus_allowed; + /* It shouldn't be needed to export gl_SampleMask when MSAA is disabled + * but this appears to break Project Cars (DXVK). See + * https://bugs.freedesktop.org/show_bug.cgi?id=109401 + */ + bool mask_export_enable = ps->info.info.ps.writes_sample_mask; + return S_02880C_Z_EXPORT_ENABLE(ps->info.info.ps.writes_z) | S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps->info.info.ps.writes_stencil) | S_02880C_KILL_ENABLE(!!ps->info.fs.can_discard) | - S_02880C_MASK_EXPORT_ENABLE(ps->info.info.ps.writes_sample_mask) | + S_02880C_MASK_EXPORT_ENABLE(mask_export_enable) | S_02880C_Z_ORDER(z_order) | S_02880C_DEPTH_BEFORE_SHADER(ps->info.fs.early_fragment_test) | S_02880C_EXEC_ON_HIER_FAIL(ps->info.info.ps.writes_memory) | @@ -3180,7 +3337,8 @@ radv_compute_db_shader_control(const struct radv_device *device, } static void -radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf *cs, +radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf *ctx_cs, + struct radeon_cmdbuf *cs, struct radv_pipeline *pipeline) { struct radv_shader_variant *ps; @@ -3196,21 +3354,22 @@ radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf *cs, radeon_emit(cs, ps->rsrc1); radeon_emit(cs, ps->rsrc2); - radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, - radv_compute_db_shader_control(pipeline->device, ps)); + radeon_set_context_reg(ctx_cs, R_02880C_DB_SHADER_CONTROL, + radv_compute_db_shader_control(pipeline->device, + pipeline, ps)); - radeon_set_context_reg(cs, R_0286CC_SPI_PS_INPUT_ENA, + radeon_set_context_reg(ctx_cs, R_0286CC_SPI_PS_INPUT_ENA, ps->config.spi_ps_input_ena); - radeon_set_context_reg(cs, R_0286D0_SPI_PS_INPUT_ADDR, + radeon_set_context_reg(ctx_cs, R_0286D0_SPI_PS_INPUT_ADDR, ps->config.spi_ps_input_addr); - radeon_set_context_reg(cs, R_0286D8_SPI_PS_IN_CONTROL, + radeon_set_context_reg(ctx_cs, R_0286D8_SPI_PS_IN_CONTROL, S_0286D8_NUM_INTERP(ps->info.fs.num_interp)); - radeon_set_context_reg(cs, R_0286E0_SPI_BARYC_CNTL, pipeline->graphics.spi_baryc_cntl); + radeon_set_context_reg(ctx_cs, R_0286E0_SPI_BARYC_CNTL, pipeline->graphics.spi_baryc_cntl); - radeon_set_context_reg(cs, R_028710_SPI_SHADER_Z_FORMAT, + radeon_set_context_reg(ctx_cs, R_028710_SPI_SHADER_Z_FORMAT, ac_get_spi_shader_z_format(ps->info.info.ps.writes_z, ps->info.info.ps.writes_stencil, ps->info.info.ps.writes_sample_mask)); @@ -3223,7 +3382,7 @@ radv_pipeline_generate_fragment_shader(struct radeon_cmdbuf *cs, } static void -radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf *cs, +radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf *ctx_cs, struct radv_pipeline *pipeline) { if (pipeline->device->physical_device->rad_info.family < CHIP_POLARIS10) @@ -3234,7 +3393,7 @@ radv_pipeline_generate_vgt_vertex_reuse(struct radeon_cmdbuf *cs, radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL)->info.tes.spacing == TESS_SPACING_FRACTIONAL_ODD) { vtx_reuse_depth = 14; } - radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, + radeon_set_context_reg(ctx_cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth)); } @@ -3304,38 +3463,46 @@ radv_pipeline_generate_pm4(struct radv_pipeline *pipeline, const struct radv_gs_state *gs, unsigned prim, unsigned gs_out) { - pipeline->cs.buf = malloc(4 * 256); - pipeline->cs.max_dw = 256; - - radv_pipeline_generate_depth_stencil_state(&pipeline->cs, pipeline, pCreateInfo, extra); - radv_pipeline_generate_blend_state(&pipeline->cs, pipeline, blend); - radv_pipeline_generate_raster_state(&pipeline->cs, pipeline, pCreateInfo); - radv_pipeline_generate_multisample_state(&pipeline->cs, pipeline); - radv_pipeline_generate_vgt_gs_mode(&pipeline->cs, pipeline); - radv_pipeline_generate_vertex_shader(&pipeline->cs, pipeline, tess); - radv_pipeline_generate_tess_shaders(&pipeline->cs, pipeline, tess); - radv_pipeline_generate_geometry_shader(&pipeline->cs, pipeline, gs); - radv_pipeline_generate_fragment_shader(&pipeline->cs, pipeline); - radv_pipeline_generate_ps_inputs(&pipeline->cs, pipeline); - radv_pipeline_generate_vgt_vertex_reuse(&pipeline->cs, pipeline); - radv_pipeline_generate_binning_state(&pipeline->cs, pipeline, pCreateInfo); - - radeon_set_context_reg(&pipeline->cs, R_0286E8_SPI_TMPRING_SIZE, + struct radeon_cmdbuf *ctx_cs = &pipeline->ctx_cs; + struct radeon_cmdbuf *cs = &pipeline->cs; + + cs->max_dw = 64; + ctx_cs->max_dw = 256; + cs->buf = malloc(4 * (cs->max_dw + ctx_cs->max_dw)); + ctx_cs->buf = cs->buf + cs->max_dw; + + radv_pipeline_generate_depth_stencil_state(ctx_cs, pipeline, pCreateInfo, extra); + radv_pipeline_generate_blend_state(ctx_cs, pipeline, blend); + radv_pipeline_generate_raster_state(ctx_cs, pipeline, pCreateInfo); + radv_pipeline_generate_multisample_state(ctx_cs, pipeline); + radv_pipeline_generate_vgt_gs_mode(ctx_cs, pipeline); + radv_pipeline_generate_vertex_shader(ctx_cs, cs, pipeline, tess); + radv_pipeline_generate_tess_shaders(ctx_cs, cs, pipeline, tess); + radv_pipeline_generate_geometry_shader(ctx_cs, cs, pipeline, gs); + radv_pipeline_generate_fragment_shader(ctx_cs, cs, pipeline); + radv_pipeline_generate_ps_inputs(ctx_cs, pipeline); + radv_pipeline_generate_vgt_vertex_reuse(ctx_cs, pipeline); + radv_pipeline_generate_binning_state(ctx_cs, pipeline, pCreateInfo); + + radeon_set_context_reg(ctx_cs, R_0286E8_SPI_TMPRING_SIZE, S_0286E8_WAVES(pipeline->max_waves) | S_0286E8_WAVESIZE(pipeline->scratch_bytes_per_wave >> 10)); - radeon_set_context_reg(&pipeline->cs, R_028B54_VGT_SHADER_STAGES_EN, radv_compute_vgt_shader_stages_en(pipeline)); + radeon_set_context_reg(ctx_cs, R_028B54_VGT_SHADER_STAGES_EN, radv_compute_vgt_shader_stages_en(pipeline)); - if (pipeline->device->physical_device->rad_info.chip_class >= CIK) { - radeon_set_uconfig_reg_idx(&pipeline->cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim); + if (pipeline->device->physical_device->rad_info.chip_class >= GFX7) { + radeon_set_uconfig_reg_idx(cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim); } else { - radeon_set_config_reg(&pipeline->cs, R_008958_VGT_PRIMITIVE_TYPE, prim); + radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim); } - radeon_set_context_reg(&pipeline->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out); + radeon_set_context_reg(ctx_cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out); - radeon_set_context_reg(&pipeline->cs, R_02820C_PA_SC_CLIPRECT_RULE, radv_compute_cliprect_rule(pCreateInfo)); + radeon_set_context_reg(ctx_cs, R_02820C_PA_SC_CLIPRECT_RULE, radv_compute_cliprect_rule(pCreateInfo)); - assert(pipeline->cs.cdw <= pipeline->cs.max_dw); + pipeline->ctx_cs_hash = _mesa_hash_data(ctx_cs->buf, ctx_cs->cdw * 4); + + assert(ctx_cs->cdw <= ctx_cs->max_dw); + assert(cs->cdw <= cs->max_dw); } static struct radv_ia_multi_vgt_param_helpers @@ -3353,20 +3520,14 @@ radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline *pipeline, else ia_multi_vgt_param.primgroup_size = 128; /* recommended without a GS */ - ia_multi_vgt_param.partial_es_wave = false; - if (pipeline->device->has_distributed_tess) { - if (radv_pipeline_has_gs(pipeline)) { - if (device->physical_device->rad_info.chip_class <= VI) - ia_multi_vgt_param.partial_es_wave = true; - } - } /* GS requirement. */ - if (radv_pipeline_has_gs(pipeline) && device->physical_device->rad_info.chip_class <= VI) + ia_multi_vgt_param.partial_es_wave = false; + if (radv_pipeline_has_gs(pipeline) && device->physical_device->rad_info.chip_class <= GFX8) if (SI_GS_PER_ES / ia_multi_vgt_param.primgroup_size >= pipeline->device->gs_table_depth - 3) ia_multi_vgt_param.partial_es_wave = true; ia_multi_vgt_param.wd_switch_on_eop = false; - if (device->physical_device->rad_info.chip_class >= CIK) { + if (device->physical_device->rad_info.chip_class >= GFX7) { /* WD_SWITCH_ON_EOP has no effect on GPUs with less than * 4 shader engines. Set 1 to pass the assertion below. * The other cases are hardware requirements. */ @@ -3378,8 +3539,7 @@ radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline *pipeline, (pipeline->graphics.prim_restart_enable && (device->physical_device->rad_info.family < CHIP_POLARIS10 || (prim != V_008958_DI_PT_POINTLIST && - prim != V_008958_DI_PT_LINESTRIP && - prim != V_008958_DI_PT_TRISTRIP)))) + prim != V_008958_DI_PT_LINESTRIP)))) ia_multi_vgt_param.wd_switch_on_eop = true; } @@ -3407,13 +3567,8 @@ radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline *pipeline, /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */ if (device->has_distributed_tess) { if (radv_pipeline_has_gs(pipeline)) { - if (device->physical_device->rad_info.family == CHIP_TONGA || - device->physical_device->rad_info.family == CHIP_FIJI || - device->physical_device->rad_info.family == CHIP_POLARIS10 || - device->physical_device->rad_info.family == CHIP_POLARIS11 || - device->physical_device->rad_info.family == CHIP_POLARIS12 || - device->physical_device->rad_info.family == CHIP_VEGAM) - ia_multi_vgt_param.partial_vs_wave = true; + if (device->physical_device->rad_info.chip_class <= GFX8) + ia_multi_vgt_param.partial_es_wave = true; } else { ia_multi_vgt_param.partial_vs_wave = true; } @@ -3431,10 +3586,30 @@ radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline *pipeline, ia_multi_vgt_param.partial_vs_wave = true; } + if (radv_pipeline_has_gs(pipeline)) { + /* On these chips there is the possibility of a hang if the + * pipeline uses a GS and partial_vs_wave is not set. + * + * This mostly does not hit 4-SE chips, as those typically set + * ia_switch_on_eoi and then partial_vs_wave is set for pipelines + * with GS due to another workaround. + * + * Reproducer: https://bugs.freedesktop.org/show_bug.cgi?id=109242 + */ + if (device->physical_device->rad_info.family == CHIP_TONGA || + device->physical_device->rad_info.family == CHIP_FIJI || + device->physical_device->rad_info.family == CHIP_POLARIS10 || + device->physical_device->rad_info.family == CHIP_POLARIS11 || + device->physical_device->rad_info.family == CHIP_POLARIS12 || + device->physical_device->rad_info.family == CHIP_VEGAM) { + ia_multi_vgt_param.partial_vs_wave = true; + } + } + ia_multi_vgt_param.base = S_028AA8_PRIMGROUP_SIZE(ia_multi_vgt_param.primgroup_size - 1) | /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */ - S_028AA8_MAX_PRIMGRP_IN_WAVE(device->physical_device->rad_info.chip_class == VI ? 2 : 0) | + S_028AA8_MAX_PRIMGRP_IN_WAVE(device->physical_device->rad_info.chip_class == GFX8 ? 2 : 0) | S_030960_EN_INST_OPT_BASIC(device->physical_device->rad_info.chip_class >= GFX9) | S_030960_EN_INST_OPT_ADV(device->physical_device->rad_info.chip_class >= GFX9); @@ -3455,24 +3630,10 @@ radv_compute_vertex_input_state(struct radv_pipeline *pipeline, &vi_info->pVertexAttributeDescriptions[i]; unsigned loc = desc->location; const struct vk_format_description *format_desc; - int first_non_void; - uint32_t num_format, data_format; - format_desc = vk_format_description(desc->format); - first_non_void = vk_format_get_first_non_void_channel(desc->format); - num_format = radv_translate_buffer_numformat(format_desc, first_non_void); - data_format = radv_translate_buffer_dataformat(format_desc, first_non_void); + format_desc = vk_format_description(desc->format); - velems->rsrc_word3[loc] = S_008F0C_DST_SEL_X(si_map_swizzle(format_desc->swizzle[0])) | - S_008F0C_DST_SEL_Y(si_map_swizzle(format_desc->swizzle[1])) | - S_008F0C_DST_SEL_Z(si_map_swizzle(format_desc->swizzle[2])) | - S_008F0C_DST_SEL_W(si_map_swizzle(format_desc->swizzle[3])) | - S_008F0C_NUM_FORMAT(num_format) | - S_008F0C_DATA_FORMAT(data_format); velems->format_size[loc] = format_desc->block.bits / 8; - velems->offset[loc] = desc->offset; - velems->binding[loc] = desc->binding; - velems->count = MAX2(velems->count, loc + 1); } for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) { @@ -3480,7 +3641,25 @@ radv_compute_vertex_input_state(struct radv_pipeline *pipeline, &vi_info->pVertexBindingDescriptions[i]; pipeline->binding_stride[desc->binding] = desc->stride; + pipeline->num_vertex_bindings = + MAX2(pipeline->num_vertex_bindings, desc->binding + 1); + } +} + +static struct radv_shader_variant * +radv_pipeline_get_streamout_shader(struct radv_pipeline *pipeline) +{ + int i; + + for (i = MESA_SHADER_GEOMETRY; i >= MESA_SHADER_VERTEX; i--) { + struct radv_shader_variant *shader = + radv_get_shader(pipeline, i); + + if (shader && shader->info.info.so.num_outputs > 0) + return shader; } + + return NULL; } static VkResult @@ -3488,8 +3667,7 @@ radv_pipeline_init(struct radv_pipeline *pipeline, struct radv_device *device, struct radv_pipeline_cache *cache, const VkGraphicsPipelineCreateInfo *pCreateInfo, - const struct radv_graphics_pipeline_create_info *extra, - const VkAllocationCallbacks *alloc) + const struct radv_graphics_pipeline_create_info *extra) { VkResult result; bool has_view_index = false; @@ -3498,8 +3676,6 @@ radv_pipeline_init(struct radv_pipeline *pipeline, struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass; if (subpass->view_mask) has_view_index = true; - if (alloc == NULL) - alloc = &device->alloc; pipeline->device = device; pipeline->layout = radv_pipeline_layout_from_handle(pCreateInfo->layout); @@ -3507,14 +3683,23 @@ radv_pipeline_init(struct radv_pipeline *pipeline, struct radv_blend_state blend = radv_pipeline_init_blend_state(pipeline, pCreateInfo, extra); + const VkPipelineCreationFeedbackCreateInfoEXT *creation_feedback = + vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT); + radv_init_feedback(creation_feedback); + + VkPipelineCreationFeedbackEXT *pipeline_feedback = creation_feedback ? creation_feedback->pPipelineCreationFeedback : NULL; + const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, }; + VkPipelineCreationFeedbackEXT *stage_feedbacks[MESA_SHADER_STAGES] = { 0 }; for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) { gl_shader_stage stage = ffs(pCreateInfo->pStages[i].stage) - 1; pStages[stage] = &pCreateInfo->pStages[i]; + if(creation_feedback) + stage_feedbacks[stage] = &creation_feedback->pPipelineStageCreationFeedbacks[i]; } struct radv_pipeline_key key = radv_generate_graphics_pipeline_key(pipeline, pCreateInfo, &blend, has_view_index); - radv_create_shaders(pipeline, device, cache, &key, pStages, pCreateInfo->flags); + radv_create_shaders(pipeline, device, cache, &key, pStages, pCreateInfo->flags, pipeline_feedback, stage_feedbacks); pipeline->graphics.spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1); radv_pipeline_init_multisample_state(pipeline, &blend, pCreateInfo); @@ -3598,6 +3783,9 @@ radv_pipeline_init(struct radv_pipeline *pipeline, pipeline->graphics.vtx_emit_num = 2; } + /* Find the last vertex shader stage that eventually uses streamout. */ + pipeline->streamout_shader = radv_pipeline_get_streamout_shader(pipeline); + result = radv_pipeline_scratch_init(device, pipeline); radv_pipeline_generate_pm4(pipeline, pCreateInfo, extra, &blend, &tess, &gs, prim, gs_out); @@ -3624,7 +3812,7 @@ radv_graphics_pipeline_create( return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); result = radv_pipeline_init(pipeline, device, cache, - pCreateInfo, extra, pAllocator); + pCreateInfo, extra); if (result != VK_SUCCESS) { radv_pipeline_destroy(device, pipeline, pAllocator); return result; @@ -3697,7 +3885,7 @@ radv_compute_generate_pm4(struct radv_pipeline *pipeline) compute_resource_limits = S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0); - if (device->physical_device->rad_info.chip_class >= CIK) { + if (device->physical_device->rad_info.chip_class >= GFX7) { unsigned num_cu_per_se = device->physical_device->rad_info.num_good_compute_units / device->physical_device->rad_info.max_se; @@ -3734,6 +3922,7 @@ static VkResult radv_compute_pipeline_create( RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_pipeline_cache, cache, _cache); const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, }; + VkPipelineCreationFeedbackEXT *stage_feedbacks[MESA_SHADER_STAGES] = { 0 }; struct radv_pipeline *pipeline; VkResult result; @@ -3746,8 +3935,16 @@ static VkResult radv_compute_pipeline_create( pipeline->layout = radv_pipeline_layout_from_handle(pCreateInfo->layout); assert(pipeline->layout); + const VkPipelineCreationFeedbackCreateInfoEXT *creation_feedback = + vk_find_struct_const(pCreateInfo->pNext, PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT); + radv_init_feedback(creation_feedback); + + VkPipelineCreationFeedbackEXT *pipeline_feedback = creation_feedback ? creation_feedback->pPipelineCreationFeedback : NULL; + if (creation_feedback) + stage_feedbacks[MESA_SHADER_COMPUTE] = &creation_feedback->pPipelineStageCreationFeedbacks[0]; + pStages[MESA_SHADER_COMPUTE] = &pCreateInfo->stage; - radv_create_shaders(pipeline, device, cache, &(struct radv_pipeline_key) {0}, pStages, pCreateInfo->flags); + radv_create_shaders(pipeline, device, cache, &(struct radv_pipeline_key) {0}, pStages, pCreateInfo->flags, pipeline_feedback, stage_feedbacks); pipeline->user_data_0[MESA_SHADER_COMPUTE] = radv_pipeline_stage_to_user_data_0(pipeline, MESA_SHADER_COMPUTE, device->physical_device->rad_info.chip_class); pipeline->need_indirect_descriptor_sets |= pipeline->shaders[MESA_SHADER_COMPUTE]->info.need_indirect_descriptor_sets;