#include "util/debug.h"
#include "ac_exp_param.h"
#include "ac_shader_util.h"
-#include "main/menums.h"
struct radv_blend_state {
uint32_t blend_enable_4bit;
uint32_t cb_blend_control[8];
uint32_t spi_shader_col_format;
+ uint32_t col_format_is_int8;
+ uint32_t col_format_is_int10;
uint32_t cb_shader_mask;
uint32_t db_alpha_to_mask;
if(pipeline->cs.buf)
free(pipeline->cs.buf);
- vk_free2(&device->alloc, allocator, pipeline);
+
+ vk_object_base_finish(&pipeline->base);
+ vk_free2(&device->vk.alloc, allocator, pipeline);
}
void radv_DestroyPipeline(
hash_flags |= RADV_HASH_SHADER_PS_WAVE32;
if (device->physical_device->ge_wave_size == 32)
hash_flags |= RADV_HASH_SHADER_GE_WAVE32;
- if (device->physical_device->use_aco)
- hash_flags |= RADV_HASH_SHADER_ACO;
+ if (device->physical_device->use_llvm)
+ hash_flags |= RADV_HASH_SHADER_LLVM;
return hash_flags;
}
}
}
-static unsigned si_choose_spi_color_format(VkFormat vk_format,
- bool blend_enable,
- bool blend_need_alpha)
+static unsigned radv_choose_spi_color_format(VkFormat vk_format,
+ bool blend_enable,
+ bool blend_need_alpha)
{
const struct vk_format_description *desc = vk_format_description(vk_format);
+ struct ac_spi_color_formats formats = {};
unsigned format, ntype, swap;
- /* Alpha is needed for alpha-to-coverage.
- * Blending may be with or without alpha.
- */
- unsigned normal = 0; /* most optimal, may not support blending or export alpha */
- unsigned alpha = 0; /* exports alpha, but may not support blending */
- unsigned blend = 0; /* supports blending, but may not export alpha */
- unsigned blend_alpha = 0; /* least optimal, supports blending and exports alpha */
-
format = radv_translate_colorformat(vk_format);
ntype = radv_translate_color_numformat(vk_format, desc,
vk_format_get_first_non_void_channel(vk_format));
swap = radv_translate_colorswap(vk_format, false);
- /* Choose the SPI color formats. These are required values for Stoney/RB+.
- * Other chips have multiple choices, though they are not necessarily better.
- */
- switch (format) {
- case V_028C70_COLOR_5_6_5:
- case V_028C70_COLOR_1_5_5_5:
- case V_028C70_COLOR_5_5_5_1:
- case V_028C70_COLOR_4_4_4_4:
- case V_028C70_COLOR_10_11_11:
- case V_028C70_COLOR_11_11_10:
- case V_028C70_COLOR_8:
- case V_028C70_COLOR_8_8:
- case V_028C70_COLOR_8_8_8_8:
- case V_028C70_COLOR_10_10_10_2:
- case V_028C70_COLOR_2_10_10_10:
- if (ntype == V_028C70_NUMBER_UINT)
- alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
- else if (ntype == V_028C70_NUMBER_SINT)
- alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
- else
- alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
- break;
+ ac_choose_spi_color_formats(format, swap, ntype, false, &formats);
- case V_028C70_COLOR_16:
- case V_028C70_COLOR_16_16:
- case V_028C70_COLOR_16_16_16_16:
- if (ntype == V_028C70_NUMBER_UNORM ||
- ntype == V_028C70_NUMBER_SNORM) {
- /* UNORM16 and SNORM16 don't support blending */
- if (ntype == V_028C70_NUMBER_UNORM)
- normal = alpha = V_028714_SPI_SHADER_UNORM16_ABGR;
- else
- normal = alpha = V_028714_SPI_SHADER_SNORM16_ABGR;
-
- /* Use 32 bits per channel for blending. */
- if (format == V_028C70_COLOR_16) {
- if (swap == V_028C70_SWAP_STD) { /* R */
- blend = V_028714_SPI_SHADER_32_R;
- blend_alpha = V_028714_SPI_SHADER_32_AR;
- } else if (swap == V_028C70_SWAP_ALT_REV) /* A */
- blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
- else
- assert(0);
- } else if (format == V_028C70_COLOR_16_16) {
- if (swap == V_028C70_SWAP_STD) { /* RG */
- blend = V_028714_SPI_SHADER_32_GR;
- blend_alpha = V_028714_SPI_SHADER_32_ABGR;
- } else if (swap == V_028C70_SWAP_ALT) /* RA */
- blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
- else
- assert(0);
- } else /* 16_16_16_16 */
- blend = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
- } else if (ntype == V_028C70_NUMBER_UINT)
- alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
- else if (ntype == V_028C70_NUMBER_SINT)
- alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
- else if (ntype == V_028C70_NUMBER_FLOAT)
- alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
- else
- assert(0);
- break;
+ if (blend_enable && blend_need_alpha)
+ return formats.blend_alpha;
+ else if(blend_need_alpha)
+ return formats.alpha;
+ else if(blend_enable)
+ return formats.blend;
+ else
+ return formats.normal;
+}
- case V_028C70_COLOR_32:
- if (swap == V_028C70_SWAP_STD) { /* R */
- blend = normal = V_028714_SPI_SHADER_32_R;
- alpha = blend_alpha = V_028714_SPI_SHADER_32_AR;
- } else if (swap == V_028C70_SWAP_ALT_REV) /* A */
- alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
- else
- assert(0);
- break;
+static bool
+format_is_int8(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
+ int channel = vk_format_get_first_non_void_channel(format);
- case V_028C70_COLOR_32_32:
- if (swap == V_028C70_SWAP_STD) { /* RG */
- blend = normal = V_028714_SPI_SHADER_32_GR;
- alpha = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
- } else if (swap == V_028C70_SWAP_ALT) /* RA */
- alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
- else
- assert(0);
- break;
+ return channel >= 0 && desc->channel[channel].pure_integer &&
+ desc->channel[channel].size == 8;
+}
- case V_028C70_COLOR_32_32_32_32:
- case V_028C70_COLOR_8_24:
- case V_028C70_COLOR_24_8:
- case V_028C70_COLOR_X24_8_32_FLOAT:
- alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
- break;
+static bool
+format_is_int10(VkFormat format)
+{
+ const struct vk_format_description *desc = vk_format_description(format);
- default:
- unreachable("unhandled blend format");
+ if (desc->nr_channels != 4)
+ return false;
+ for (unsigned i = 0; i < 4; i++) {
+ if (desc->channel[i].pure_integer && desc->channel[i].size == 10)
+ return true;
}
-
- if (blend_enable && blend_need_alpha)
- return blend_alpha;
- else if(blend_need_alpha)
- return alpha;
- else if(blend_enable)
- return blend;
- else
- return normal;
+ return false;
}
static void
{
RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
- unsigned col_format = 0;
+ unsigned col_format = 0, is_int8 = 0, is_int10 = 0;
unsigned num_targets;
for (unsigned i = 0; i < (blend->single_cb_enable ? 1 : subpass->color_count); ++i) {
unsigned cf;
- if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
+ if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED ||
+ !(blend->cb_target_mask & (0xfu << (i * 4)))) {
cf = V_028714_SPI_SHADER_ZERO;
} else {
struct radv_render_pass_attachment *attachment = pass->attachments + subpass->color_attachments[i].attachment;
bool blend_enable =
blend->blend_enable_4bit & (0xfu << (i * 4));
- cf = si_choose_spi_color_format(attachment->format,
- blend_enable,
- blend->need_src_alpha & (1 << i));
+ cf = radv_choose_spi_color_format(attachment->format,
+ blend_enable,
+ blend->need_src_alpha & (1 << i));
+
+ if (format_is_int8(attachment->format))
+ is_int8 |= 1 << i;
+ if (format_is_int10(attachment->format))
+ is_int10 |= 1 << i;
}
col_format |= cf << (4 * i);
if (blend->mrt0_is_dual_src)
col_format |= (col_format & 0xf) << 4;
- blend->cb_shader_mask = ac_get_cb_shader_mask(col_format);
blend->spi_shader_col_format = col_format;
-}
-
-static bool
-format_is_int8(VkFormat format)
-{
- const struct vk_format_description *desc = vk_format_description(format);
- int channel = vk_format_get_first_non_void_channel(format);
-
- return channel >= 0 && desc->channel[channel].pure_integer &&
- desc->channel[channel].size == 8;
-}
-
-static bool
-format_is_int10(VkFormat format)
-{
- const struct vk_format_description *desc = vk_format_description(format);
-
- if (desc->nr_channels != 4)
- return false;
- for (unsigned i = 0; i < 4; i++) {
- if (desc->channel[i].pure_integer && desc->channel[i].size == 10)
- return true;
- }
- return false;
+ blend->col_format_is_int8 = is_int8;
+ blend->col_format_is_int10 = is_int10;
}
/*
unsigned radv_format_meta_fs_key(VkFormat format)
{
- unsigned col_format = si_choose_spi_color_format(format, false, false);
+ unsigned col_format = radv_choose_spi_color_format(format, false, false);
assert(col_format != V_028714_SPI_SHADER_32_AR);
if (col_format >= V_028714_SPI_SHADER_32_AR)
return col_format + (is_int8 ? 3 : is_int10 ? 5 : 0);
}
-static void
-radv_pipeline_compute_get_int_clamp(const VkGraphicsPipelineCreateInfo *pCreateInfo,
- unsigned *is_int8, unsigned *is_int10)
-{
- RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
- struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
- *is_int8 = 0;
- *is_int10 = 0;
-
- for (unsigned i = 0; i < subpass->color_count; ++i) {
- struct radv_render_pass_attachment *attachment;
-
- if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED)
- continue;
-
- attachment = pass->attachments + subpass->color_attachments[i].attachment;
-
- if (format_is_int8(attachment->format))
- *is_int8 |= 1 << i;
- if (format_is_int10(attachment->format))
- *is_int10 |= 1 << i;
- }
-}
-
static void
radv_blend_check_commutativity(struct radv_blend_state *blend,
VkBlendOp op, VkBlendFactor src,
return blend;
}
-static uint32_t si_translate_stencil_op(enum VkStencilOp op)
-{
- switch (op) {
- case VK_STENCIL_OP_KEEP:
- return V_02842C_STENCIL_KEEP;
- case VK_STENCIL_OP_ZERO:
- return V_02842C_STENCIL_ZERO;
- case VK_STENCIL_OP_REPLACE:
- return V_02842C_STENCIL_REPLACE_TEST;
- case VK_STENCIL_OP_INCREMENT_AND_CLAMP:
- return V_02842C_STENCIL_ADD_CLAMP;
- case VK_STENCIL_OP_DECREMENT_AND_CLAMP:
- return V_02842C_STENCIL_SUB_CLAMP;
- case VK_STENCIL_OP_INVERT:
- return V_02842C_STENCIL_INVERT;
- case VK_STENCIL_OP_INCREMENT_AND_WRAP:
- return V_02842C_STENCIL_ADD_WRAP;
- case VK_STENCIL_OP_DECREMENT_AND_WRAP:
- return V_02842C_STENCIL_SUB_WRAP;
- default:
- return 0;
- }
-}
-
static uint32_t si_translate_fill(VkPolygonMode func)
{
switch(func) {
}
if (vkms->sampleShadingEnable) {
- ps_iter_samples = ceil(vkms->minSampleShading * num_samples);
+ ps_iter_samples = ceilf(vkms->minSampleShading * num_samples);
ps_iter_samples = util_next_power_of_two(ps_iter_samples);
}
return ps_iter_samples;
radv_order_invariant_stencil_op(state->failOp));
}
+static bool
+radv_pipeline_has_dynamic_ds_states(const VkGraphicsPipelineCreateInfo *pCreateInfo)
+{
+ VkDynamicState ds_states[] = {
+ VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT,
+ VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,
+ VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT,
+ VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT,
+ VK_DYNAMIC_STATE_STENCIL_OP_EXT,
+ };
+
+ if (pCreateInfo->pDynamicState) {
+ uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
+ for (uint32_t i = 0; i < count; i++) {
+ for (uint32_t j = 0; j < ARRAY_SIZE(ds_states); j++) {
+ if (pCreateInfo->pDynamicState->pDynamicStates[i] == ds_states[j])
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
static bool
radv_pipeline_out_of_order_rast(struct radv_pipeline *pipeline,
struct radv_blend_state *blend,
if (colormask && vkblend && vkblend->logicOpEnable)
return false;
+ /* Be conservative if an extended dynamic depth/stencil state is
+ * enabled because the driver can't update out-of-order rasterization
+ * dynamically.
+ */
+ if (radv_pipeline_has_dynamic_ds_states(pCreateInfo))
+ return false;
+
/* Default depth/stencil invariance when no attachment is bound. */
struct radv_dsa_order_invariance dsa_order_invariant = {
.zs = true, .pass_set = true
radv_pipeline_out_of_order_rast(pipeline, blend, pCreateInfo);
}
- ms->pa_sc_line_cntl = S_028BDC_DX10_DIAMOND_TEST_ENA(1);
ms->pa_sc_aa_config = 0;
ms->db_eqaa = S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
S_028804_INCOHERENT_EQAA_READS(1) |
unsigned log_z_samples = util_logbase2(z_samples);
unsigned log_ps_iter_samples = util_logbase2(ps_iter_samples);
ms->pa_sc_mode_cntl_0 |= S_028A48_MSAA_ENABLE(1);
- ms->pa_sc_line_cntl |= S_028BDC_EXPAND_LINE_WIDTH(1); /* CM_R_028BDC_PA_SC_LINE_CNTL */
ms->db_eqaa |= S_028804_MAX_ANCHOR_SAMPLES(log_z_samples) |
S_028804_PS_ITER_SAMPLES(log_ps_iter_samples) |
S_028804_MASK_EXPORT_NUM_SAMPLES(log_samples) |
S_028804_ALPHA_TO_MASK_NUM_SAMPLES(log_samples);
ms->pa_sc_aa_config |= S_028BE0_MSAA_NUM_SAMPLES(log_samples) |
S_028BE0_MAX_SAMPLE_DIST(radv_get_default_max_sample_dist(log_samples)) |
- S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples); /* CM_R_028BE0_PA_SC_AA_CONFIG */
+ S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples) | /* CM_R_028BE0_PA_SC_AA_CONFIG */
+ S_028BE0_COVERED_CENTROID_IS_CENTER_GFX103(pipeline->device->physical_device->rad_info.chip_class >= GFX10_3);
ms->pa_sc_mode_cntl_1 |= S_028A4C_PS_ITER_SAMPLE(ps_iter_samples > 1);
if (ps_iter_samples > 1)
pipeline->graphics.spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(2);
}
}
-static uint32_t
-si_translate_prim(enum VkPrimitiveTopology topology)
-{
- switch (topology) {
- case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
- return V_008958_DI_PT_POINTLIST;
- case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
- return V_008958_DI_PT_LINELIST;
- case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
- return V_008958_DI_PT_LINESTRIP;
- case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
- return V_008958_DI_PT_TRILIST;
- case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
- return V_008958_DI_PT_TRISTRIP;
- case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
- return V_008958_DI_PT_TRIFAN;
- case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
- return V_008958_DI_PT_LINELIST_ADJ;
- case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
- return V_008958_DI_PT_LINESTRIP_ADJ;
- case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
- return V_008958_DI_PT_TRILIST_ADJ;
- case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
- return V_008958_DI_PT_TRISTRIP_ADJ;
- case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
- return V_008958_DI_PT_PATCH;
- default:
- assert(0);
- return 0;
- }
-}
-
static uint32_t
si_conv_gl_prim_to_gs_out(unsigned gl_prim)
{
{
switch(state) {
case VK_DYNAMIC_STATE_VIEWPORT:
+ case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT:
return RADV_DYNAMIC_VIEWPORT;
case VK_DYNAMIC_STATE_SCISSOR:
+ case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT:
return RADV_DYNAMIC_SCISSOR;
case VK_DYNAMIC_STATE_LINE_WIDTH:
return RADV_DYNAMIC_LINE_WIDTH;
return RADV_DYNAMIC_SAMPLE_LOCATIONS;
case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT:
return RADV_DYNAMIC_LINE_STIPPLE;
+ case VK_DYNAMIC_STATE_CULL_MODE_EXT:
+ return RADV_DYNAMIC_CULL_MODE;
+ case VK_DYNAMIC_STATE_FRONT_FACE_EXT:
+ return RADV_DYNAMIC_FRONT_FACE;
+ case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT:
+ return RADV_DYNAMIC_PRIMITIVE_TOPOLOGY;
+ case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT:
+ return RADV_DYNAMIC_DEPTH_TEST_ENABLE;
+ case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT:
+ return RADV_DYNAMIC_DEPTH_WRITE_ENABLE;
+ case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT:
+ return RADV_DYNAMIC_DEPTH_COMPARE_OP;
+ case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT:
+ return RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE;
+ case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT:
+ return RADV_DYNAMIC_STENCIL_TEST_ENABLE;
+ case VK_DYNAMIC_STATE_STENCIL_OP_EXT:
+ return RADV_DYNAMIC_STENCIL_OP;
+ case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT:
+ return RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE;
default:
unreachable("Unhandled dynamic state");
}
{
uint32_t states = RADV_DYNAMIC_ALL;
- /* If rasterization is disabled we do not care about any of the dynamic states,
- * since they are all rasterization related only. */
+ /* If rasterization is disabled we do not care about any of the
+ * dynamic states, since they are all rasterization related only,
+ * except primitive topology and vertex binding stride.
+ */
if (pCreateInfo->pRasterizationState->rasterizerDiscardEnable)
- return 0;
+ return RADV_DYNAMIC_PRIMITIVE_TOPOLOGY |
+ RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE;
if (!pCreateInfo->pRasterizationState->depthBiasEnable)
states &= ~RADV_DYNAMIC_DEPTH_BIAS;
static void
radv_pipeline_init_dynamic_state(struct radv_pipeline *pipeline,
- const VkGraphicsPipelineCreateInfo *pCreateInfo)
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const struct radv_graphics_pipeline_create_info *extra)
{
uint32_t needed_states = radv_pipeline_needed_dynamic_state(pCreateInfo);
uint32_t states = needed_states;
pCreateInfo->pColorBlendState->blendConstants, 4);
}
+ if (states & RADV_DYNAMIC_CULL_MODE) {
+ dynamic->cull_mode =
+ pCreateInfo->pRasterizationState->cullMode;
+ }
+
+ if (states & RADV_DYNAMIC_FRONT_FACE) {
+ dynamic->front_face =
+ pCreateInfo->pRasterizationState->frontFace;
+ }
+
+ if (states & RADV_DYNAMIC_PRIMITIVE_TOPOLOGY) {
+ dynamic->primitive_topology =
+ si_translate_prim(pCreateInfo->pInputAssemblyState->topology);
+ if (extra && extra->use_rectlist) {
+ dynamic->primitive_topology = V_008958_DI_PT_RECTLIST;
+ }
+ }
+
/* If there is no depthstencil attachment, then don't read
* pDepthStencilState. The Vulkan spec states that pDepthStencilState may
* be NULL in this case. Even if pDepthStencilState is non-NULL, there is
dynamic->stencil_reference.back =
pCreateInfo->pDepthStencilState->back.reference;
}
+
+ if (states & RADV_DYNAMIC_DEPTH_TEST_ENABLE) {
+ dynamic->depth_test_enable =
+ pCreateInfo->pDepthStencilState->depthTestEnable;
+ }
+
+ if (states & RADV_DYNAMIC_DEPTH_WRITE_ENABLE) {
+ dynamic->depth_write_enable =
+ pCreateInfo->pDepthStencilState->depthWriteEnable;
+ }
+
+ if (states & RADV_DYNAMIC_DEPTH_COMPARE_OP) {
+ dynamic->depth_compare_op =
+ pCreateInfo->pDepthStencilState->depthCompareOp;
+ }
+
+ if (states & RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE) {
+ dynamic->depth_bounds_test_enable =
+ pCreateInfo->pDepthStencilState->depthBoundsTestEnable;
+ }
+
+ if (states & RADV_DYNAMIC_STENCIL_TEST_ENABLE) {
+ dynamic->stencil_test_enable =
+ pCreateInfo->pDepthStencilState->stencilTestEnable;
+ }
+
+ if (states & RADV_DYNAMIC_STENCIL_OP) {
+ dynamic->stencil_op.front.compare_op =
+ pCreateInfo->pDepthStencilState->front.compareOp;
+ dynamic->stencil_op.front.fail_op =
+ pCreateInfo->pDepthStencilState->front.failOp;
+ dynamic->stencil_op.front.pass_op =
+ pCreateInfo->pDepthStencilState->front.passOp;
+ dynamic->stencil_op.front.depth_fail_op =
+ pCreateInfo->pDepthStencilState->front.depthFailOp;
+
+ dynamic->stencil_op.back.compare_op =
+ pCreateInfo->pDepthStencilState->back.compareOp;
+ dynamic->stencil_op.back.fail_op =
+ pCreateInfo->pDepthStencilState->back.failOp;
+ dynamic->stencil_op.back.pass_op =
+ pCreateInfo->pDepthStencilState->back.passOp;
+ dynamic->stencil_op.back.depth_fail_op =
+ pCreateInfo->pDepthStencilState->back.depthFailOp;
+ }
}
const VkPipelineDiscardRectangleStateCreateInfoEXT *discard_rectangle_info =
dynamic->line_stipple.pattern = rast_line_info->lineStipplePattern;
}
+ if (!(states & RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE))
+ pipeline->graphics.uses_dynamic_stride = true;
+
pipeline->dynamic_state.mask = states;
}
return tess;
}
-static const struct radv_prim_vertex_count prim_size_table[] = {
- [V_008958_DI_PT_NONE] = {0, 0},
- [V_008958_DI_PT_POINTLIST] = {1, 1},
- [V_008958_DI_PT_LINELIST] = {2, 2},
- [V_008958_DI_PT_LINESTRIP] = {2, 1},
- [V_008958_DI_PT_TRILIST] = {3, 3},
- [V_008958_DI_PT_TRIFAN] = {3, 1},
- [V_008958_DI_PT_TRISTRIP] = {3, 1},
- [V_008958_DI_PT_LINELIST_ADJ] = {4, 4},
- [V_008958_DI_PT_LINESTRIP_ADJ] = {4, 1},
- [V_008958_DI_PT_TRILIST_ADJ] = {6, 6},
- [V_008958_DI_PT_TRISTRIP_ADJ] = {6, 2},
- [V_008958_DI_PT_RECTLIST] = {3, 3},
- [V_008958_DI_PT_LINELOOP] = {2, 1},
- [V_008958_DI_PT_POLYGON] = {3, 1},
- [V_008958_DI_PT_2D_TRI_STRIP] = {0, 0},
-};
-
static const struct radv_vs_output_info *get_vs_output_info(const struct radv_pipeline *pipeline)
{
if (radv_pipeline_has_gs(pipeline))
radv_optimize_nir(ordered_shaders[i - 1], false, false);
nir_remove_dead_variables(ordered_shaders[i],
- nir_var_shader_out);
+ nir_var_shader_out, NULL);
nir_remove_dead_variables(ordered_shaders[i - 1],
- nir_var_shader_in);
+ nir_var_shader_in, NULL);
bool progress = nir_remove_unused_varyings(ordered_shaders[i],
ordered_shaders[i - 1]);
}
}
+static void
+radv_set_linked_driver_locations(struct radv_pipeline *pipeline, nir_shader **shaders,
+ struct radv_shader_info infos[MESA_SHADER_STAGES])
+{
+ bool has_tess = shaders[MESA_SHADER_TESS_CTRL];
+ bool has_gs = shaders[MESA_SHADER_GEOMETRY];
+
+ if (!has_tess && !has_gs)
+ return;
+
+ unsigned vs_info_idx = MESA_SHADER_VERTEX;
+ unsigned tes_info_idx = MESA_SHADER_TESS_EVAL;
+
+ if (pipeline->device->physical_device->rad_info.chip_class >= GFX9) {
+ /* These are merged into the next stage */
+ vs_info_idx = has_tess ? MESA_SHADER_TESS_CTRL : MESA_SHADER_GEOMETRY;
+ tes_info_idx = has_gs ? MESA_SHADER_GEOMETRY : MESA_SHADER_TESS_EVAL;
+ }
+
+ if (has_tess) {
+ nir_linked_io_var_info vs2tcs =
+ nir_assign_linked_io_var_locations(shaders[MESA_SHADER_VERTEX], shaders[MESA_SHADER_TESS_CTRL]);
+ nir_linked_io_var_info tcs2tes =
+ nir_assign_linked_io_var_locations(shaders[MESA_SHADER_TESS_CTRL], shaders[MESA_SHADER_TESS_EVAL]);
+
+ infos[vs_info_idx].vs.num_linked_outputs = vs2tcs.num_linked_io_vars;
+ infos[MESA_SHADER_TESS_CTRL].tcs.num_linked_inputs = vs2tcs.num_linked_io_vars;
+ infos[MESA_SHADER_TESS_CTRL].tcs.num_linked_outputs = tcs2tes.num_linked_io_vars;
+ infos[MESA_SHADER_TESS_CTRL].tcs.num_linked_patch_outputs = tcs2tes.num_linked_patch_io_vars;
+ infos[tes_info_idx].tes.num_linked_inputs = tcs2tes.num_linked_io_vars;
+ infos[tes_info_idx].tes.num_linked_patch_inputs = tcs2tes.num_linked_patch_io_vars;
+
+ if (has_gs) {
+ nir_linked_io_var_info tes2gs =
+ nir_assign_linked_io_var_locations(shaders[MESA_SHADER_TESS_EVAL], shaders[MESA_SHADER_GEOMETRY]);
+
+ infos[tes_info_idx].tes.num_linked_outputs = tes2gs.num_linked_io_vars;
+ infos[MESA_SHADER_GEOMETRY].gs.num_linked_inputs = tes2gs.num_linked_io_vars;
+ }
+ } else if (has_gs) {
+ nir_linked_io_var_info vs2gs =
+ nir_assign_linked_io_var_locations(shaders[MESA_SHADER_VERTEX], shaders[MESA_SHADER_GEOMETRY]);
+
+ infos[vs_info_idx].vs.num_linked_outputs = vs2gs.num_linked_io_vars;
+ infos[MESA_SHADER_GEOMETRY].gs.num_linked_inputs = vs2gs.num_linked_io_vars;
+ }
+}
+
static uint32_t
radv_get_attrib_stride(const VkPipelineVertexInputStateCreateInfo *input_state,
uint32_t attrib_binding)
}
key.col_format = blend->spi_shader_col_format;
- if (pipeline->device->physical_device->rad_info.chip_class < GFX8)
- radv_pipeline_compute_get_int_clamp(pCreateInfo, &key.is_int8, &key.is_int10);
+ key.is_dual_src = blend->mrt0_is_dual_src;
+ if (pipeline->device->physical_device->rad_info.chip_class < GFX8) {
+ key.is_int8 = blend->col_format_is_int8;
+ key.is_int10 = blend->col_format_is_int10;
+ }
if (pipeline->device->physical_device->rad_info.chip_class >= GFX10)
key.topology = pCreateInfo->pInputAssemblyState->topology;
keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg = false;
}
+ if (!device->physical_device->use_ngg_gs) {
+ if (nir[MESA_SHADER_GEOMETRY]) {
+ if (nir[MESA_SHADER_TESS_CTRL])
+ keys[MESA_SHADER_TESS_EVAL].vs_common_out.as_ngg = false;
+ else
+ keys[MESA_SHADER_VERTEX].vs_common_out.as_ngg = false;
+ }
+ }
+
gl_shader_stage last_xfb_stage = MESA_SHADER_VERTEX;
for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
keys[MESA_SHADER_FRAGMENT].fs.is_int10 = key->is_int10;
keys[MESA_SHADER_FRAGMENT].fs.log2_ps_iter_samples = key->log2_ps_iter_samples;
keys[MESA_SHADER_FRAGMENT].fs.num_samples = key->num_samples;
+ keys[MESA_SHADER_FRAGMENT].fs.is_dual_src = key->is_dual_src;
if (nir[MESA_SHADER_COMPUTE]) {
keys[MESA_SHADER_COMPUTE].cs.subgroup_size = key->compute_subgroup_size;
radv_nir_shader_info_pass(nir[MESA_SHADER_FRAGMENT],
pipeline->layout,
&keys[MESA_SHADER_FRAGMENT],
- &infos[MESA_SHADER_FRAGMENT]);
+ &infos[MESA_SHADER_FRAGMENT],
+ pipeline->device->physical_device->use_llvm);
/* TODO: These are no longer used as keys we should refactor this */
keys[MESA_SHADER_VERTEX].vs_common_out.export_prim_id =
infos[MESA_SHADER_FRAGMENT].ps.layer_input;
keys[MESA_SHADER_VERTEX].vs_common_out.export_clip_dists =
!!infos[MESA_SHADER_FRAGMENT].ps.num_input_clips_culls;
+ keys[MESA_SHADER_VERTEX].vs_common_out.export_viewport_index =
+ infos[MESA_SHADER_FRAGMENT].ps.viewport_index_input;
keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_prim_id =
infos[MESA_SHADER_FRAGMENT].ps.prim_id_input;
keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_layer_id =
infos[MESA_SHADER_FRAGMENT].ps.layer_input;
keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_clip_dists =
!!infos[MESA_SHADER_FRAGMENT].ps.num_input_clips_culls;
+ keys[MESA_SHADER_TESS_EVAL].vs_common_out.export_viewport_index =
+ infos[MESA_SHADER_FRAGMENT].ps.viewport_index_input;
/* NGG passthrough mode can't be enabled for vertex shaders
* that export the primitive ID.
filled_stages |= (1 << MESA_SHADER_FRAGMENT);
}
+ if (nir[MESA_SHADER_TESS_CTRL]) {
+ infos[MESA_SHADER_TESS_CTRL].tcs.tes_inputs_read =
+ nir[MESA_SHADER_TESS_EVAL]->info.inputs_read;
+ infos[MESA_SHADER_TESS_CTRL].tcs.tes_patch_inputs_read =
+ nir[MESA_SHADER_TESS_EVAL]->info.patch_inputs_read;
+ }
+
if (pipeline->device->physical_device->rad_info.chip_class >= GFX9 &&
nir[MESA_SHADER_TESS_CTRL]) {
struct nir_shader *combined_nir[] = {nir[MESA_SHADER_VERTEX], nir[MESA_SHADER_TESS_CTRL]};
for (int i = 0; i < 2; i++) {
radv_nir_shader_info_pass(combined_nir[i],
pipeline->layout, &key,
- &infos[MESA_SHADER_TESS_CTRL]);
+ &infos[MESA_SHADER_TESS_CTRL],
+ pipeline->device->physical_device->use_llvm);
}
keys[MESA_SHADER_TESS_EVAL].tes.num_patches =
radv_nir_shader_info_pass(combined_nir[i],
pipeline->layout,
&keys[pre_stage],
- &infos[MESA_SHADER_GEOMETRY]);
+ &infos[MESA_SHADER_GEOMETRY],
+ pipeline->device->physical_device->use_llvm);
}
filled_stages |= (1 << pre_stage);
radv_nir_shader_info_init(&infos[i]);
radv_nir_shader_info_pass(nir[i], pipeline->layout,
- &keys[i], &infos[i]);
+ &keys[i], &infos[i], pipeline->device->physical_device->use_llvm);
}
for (int i = 0; i < MESA_SHADER_STAGES; i++) {
(cache_hit ? VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT : 0);
}
-void radv_create_shaders(struct radv_pipeline *pipeline,
- struct radv_device *device,
- struct radv_pipeline_cache *cache,
- const struct radv_pipeline_key *key,
- const VkPipelineShaderStageCreateInfo **pStages,
- const VkPipelineCreateFlags flags,
- VkPipelineCreationFeedbackEXT *pipeline_feedback,
- VkPipelineCreationFeedbackEXT **stage_feedbacks)
+VkResult radv_create_shaders(struct radv_pipeline *pipeline,
+ struct radv_device *device,
+ struct radv_pipeline_cache *cache,
+ const struct radv_pipeline_key *key,
+ const VkPipelineShaderStageCreateInfo **pStages,
+ const VkPipelineCreateFlags flags,
+ VkPipelineCreationFeedbackEXT *pipeline_feedback,
+ VkPipelineCreationFeedbackEXT **stage_feedbacks)
{
struct radv_shader_module fs_m = {0};
struct radv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
struct radv_shader_info infos[MESA_SHADER_STAGES] = {0};
unsigned char hash[20], gs_copy_hash[20];
bool keep_executable_info = (flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR) || device->keep_shader_info;
+ bool keep_statistic_info = (flags & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR) ||
+ (device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS) ||
+ device->keep_shader_info;
radv_start_feedback(pipeline_feedback);
gs_copy_hash[0] ^= 1;
bool found_in_application_cache = true;
- if (modules[MESA_SHADER_GEOMETRY] && !keep_executable_info) {
+ if (modules[MESA_SHADER_GEOMETRY] && !keep_executable_info && !keep_statistic_info) {
struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
radv_create_shader_variants_from_pipeline_cache(device, cache, gs_copy_hash, variants,
&found_in_application_cache);
pipeline->gs_copy_shader = variants[MESA_SHADER_GEOMETRY];
}
- if (!keep_executable_info &&
+ if (!keep_executable_info && !keep_statistic_info &&
radv_create_shader_variants_from_pipeline_cache(device, cache, hash, pipeline->shaders,
&found_in_application_cache) &&
(!modules[MESA_SHADER_GEOMETRY] || pipeline->gs_copy_shader)) {
radv_stop_feedback(pipeline_feedback, found_in_application_cache);
- return;
+ return VK_SUCCESS;
+ }
+
+ if (flags & VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT) {
+ radv_stop_feedback(pipeline_feedback, found_in_application_cache);
+ return VK_PIPELINE_COMPILE_REQUIRED_EXT;
}
if (!modules[MESA_SHADER_FRAGMENT] && !modules[MESA_SHADER_COMPUTE]) {
if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
radv_link_shaders(pipeline, nir);
+ radv_set_linked_driver_locations(pipeline, nir, infos);
+
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
if (nir[i]) {
- if (device->physical_device->use_aco) {
+ /* do this again since information such as outputs_read can be out-of-date */
+ nir_shader_gather_info(nir[i], nir_shader_get_entrypoint(nir[i]));
+
+ if (device->physical_device->use_llvm) {
+ NIR_PASS_V(nir[i], nir_lower_bool_to_int32);
+ } else {
NIR_PASS_V(nir[i], nir_lower_non_uniform_access,
nir_lower_non_uniform_ubo_access |
nir_lower_non_uniform_ssbo_access |
nir_lower_non_uniform_texture_access |
nir_lower_non_uniform_image_access);
- } else
- NIR_PASS_V(nir[i], nir_lower_bool_to_int32);
+ }
}
}
radv_nir_shader_info_pass(nir[MESA_SHADER_GEOMETRY],
pipeline->layout, &key,
- &info);
+ &info, pipeline->device->physical_device->use_llvm);
info.wave_size = 64; /* Wave32 not supported. */
info.ballot_bit_size = 64;
pipeline->gs_copy_shader = radv_create_gs_copy_shader(
device, nir[MESA_SHADER_GEOMETRY], &info,
- &gs_copy_binary, keep_executable_info,
+ &gs_copy_binary, keep_executable_info, keep_statistic_info,
keys[MESA_SHADER_GEOMETRY].has_multiview_view_index);
}
- if (!keep_executable_info && pipeline->gs_copy_shader) {
+ if (!keep_executable_info && !keep_statistic_info && pipeline->gs_copy_shader) {
struct radv_shader_binary *binaries[MESA_SHADER_STAGES] = {NULL};
struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
radv_shader_variant_compile(device, modules[MESA_SHADER_FRAGMENT], &nir[MESA_SHADER_FRAGMENT], 1,
pipeline->layout, keys + MESA_SHADER_FRAGMENT,
infos + MESA_SHADER_FRAGMENT,
- keep_executable_info,
+ keep_executable_info, keep_statistic_info,
&binaries[MESA_SHADER_FRAGMENT]);
radv_stop_feedback(stage_feedbacks[MESA_SHADER_FRAGMENT], false);
pipeline->shaders[MESA_SHADER_TESS_CTRL] = radv_shader_variant_compile(device, modules[MESA_SHADER_TESS_CTRL], combined_nir, 2,
pipeline->layout,
&key, &infos[MESA_SHADER_TESS_CTRL], keep_executable_info,
- &binaries[MESA_SHADER_TESS_CTRL]);
+ keep_statistic_info, &binaries[MESA_SHADER_TESS_CTRL]);
radv_stop_feedback(stage_feedbacks[MESA_SHADER_TESS_CTRL], false);
}
pipeline->shaders[MESA_SHADER_GEOMETRY] = radv_shader_variant_compile(device, modules[MESA_SHADER_GEOMETRY], combined_nir, 2,
pipeline->layout,
&keys[pre_stage], &infos[MESA_SHADER_GEOMETRY], keep_executable_info,
- &binaries[MESA_SHADER_GEOMETRY]);
+ keep_statistic_info, &binaries[MESA_SHADER_GEOMETRY]);
radv_stop_feedback(stage_feedbacks[MESA_SHADER_GEOMETRY], false);
}
pipeline->shaders[i] = radv_shader_variant_compile(device, modules[i], &nir[i], 1,
pipeline->layout,
- keys + i, infos + i,keep_executable_info,
- &binaries[i]);
+ keys + i, infos + i, keep_executable_info,
+ keep_statistic_info, &binaries[i]);
radv_stop_feedback(stage_feedbacks[i], false);
}
}
- if (!keep_executable_info) {
+ if (!keep_executable_info && !keep_statistic_info) {
radv_pipeline_cache_insert_shaders(device, cache, hash, pipeline->shaders,
binaries);
}
ralloc_free(fs_m.nir);
radv_stop_feedback(pipeline_feedback, false);
+ return VK_SUCCESS;
}
static uint32_t
struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
struct radv_shader_variant *ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
struct radv_render_pass_attachment *attachment = NULL;
- uint32_t db_depth_control = 0, db_stencil_control = 0;
+ uint32_t db_depth_control = 0;
uint32_t db_render_control = 0, db_render_override2 = 0;
uint32_t db_render_override = 0;
/* from amdvlk: For 4xAA and 8xAA need to decompress on flush for better performance */
db_render_override2 |= S_028010_DECOMPRESS_Z_ON_FLUSH(attachment->samples > 2);
+
+ if (pipeline->device->physical_device->rad_info.chip_class >= GFX10_3)
+ db_render_override2 |= S_028010_CENTROID_COMPUTATION_MODE_GFX103(2);
}
if (has_stencil_attachment && vkds && vkds->stencilTestEnable) {
db_depth_control |= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
db_depth_control |= S_028800_STENCILFUNC(vkds->front.compareOp);
- db_stencil_control |= S_02842C_STENCILFAIL(si_translate_stencil_op(vkds->front.failOp));
- db_stencil_control |= S_02842C_STENCILZPASS(si_translate_stencil_op(vkds->front.passOp));
- db_stencil_control |= S_02842C_STENCILZFAIL(si_translate_stencil_op(vkds->front.depthFailOp));
db_depth_control |= S_028800_STENCILFUNC_BF(vkds->back.compareOp);
- db_stencil_control |= S_02842C_STENCILFAIL_BF(si_translate_stencil_op(vkds->back.failOp));
- db_stencil_control |= S_02842C_STENCILZPASS_BF(si_translate_stencil_op(vkds->back.passOp));
- db_stencil_control |= S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(vkds->back.depthFailOp));
}
if (attachment && extra) {
db_render_control |= S_028000_DEPTH_CLEAR_ENABLE(extra->db_depth_clear);
db_render_control |= S_028000_STENCIL_CLEAR_ENABLE(extra->db_stencil_clear);
- db_render_control |= S_028000_RESUMMARIZE_ENABLE(extra->db_resummarize);
- db_render_control |= S_028000_DEPTH_COMPRESS_DISABLE(extra->db_flush_depth_inplace);
- db_render_control |= S_028000_STENCIL_COMPRESS_DISABLE(extra->db_flush_stencil_inplace);
+ db_render_control |= S_028000_RESUMMARIZE_ENABLE(extra->resummarize_enable);
+ db_render_control |= S_028000_DEPTH_COMPRESS_DISABLE(extra->depth_compress_disable);
+ db_render_control |= S_028000_STENCIL_COMPRESS_DISABLE(extra->stencil_compress_disable);
db_render_override2 |= S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(extra->db_depth_disable_expclear);
db_render_override2 |= S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(extra->db_stencil_disable_expclear);
}
db_render_override |= S_02800C_DISABLE_VIEWPORT_CLAMP(1);
}
- radeon_set_context_reg(ctx_cs, R_028800_DB_DEPTH_CONTROL, db_depth_control);
- radeon_set_context_reg(ctx_cs, R_02842C_DB_STENCIL_CONTROL, db_stencil_control);
-
radeon_set_context_reg(ctx_cs, R_028000_DB_RENDER_CONTROL, db_render_control);
radeon_set_context_reg(ctx_cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override);
radeon_set_context_reg(ctx_cs, R_028010_DB_RENDER_OVERRIDE2, db_render_override2);
+
+ pipeline->graphics.db_depth_control = db_depth_control;
}
static void
S_028810_DX_RASTERIZATION_KILL(vkraster->rasterizerDiscardEnable ? 1 : 0) |
S_028810_DX_LINEAR_ATTR_CLIP_ENA(1));
- radeon_set_context_reg(ctx_cs, R_0286D4_SPI_INTERP_CONTROL_0,
- S_0286D4_FLAT_SHADE_ENA(1) |
- S_0286D4_PNT_SPRITE_ENA(1) |
- S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) |
- S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T) |
- S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0) |
- S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) |
- S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
-
- radeon_set_context_reg(ctx_cs, R_028BE4_PA_SU_VTX_CNTL,
- S_028BE4_PIX_CENTER(1) | // TODO verify
- S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
- S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH));
-
- radeon_set_context_reg(ctx_cs, R_028814_PA_SU_SC_MODE_CNTL,
- S_028814_FACE(vkraster->frontFace) |
- S_028814_CULL_FRONT(!!(vkraster->cullMode & VK_CULL_MODE_FRONT_BIT)) |
- S_028814_CULL_BACK(!!(vkraster->cullMode & VK_CULL_MODE_BACK_BIT)) |
- S_028814_POLY_MODE(vkraster->polygonMode != VK_POLYGON_MODE_FILL) |
- S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(vkraster->polygonMode)) |
- S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(vkraster->polygonMode)) |
- S_028814_POLY_OFFSET_FRONT_ENABLE(vkraster->depthBiasEnable ? 1 : 0) |
- S_028814_POLY_OFFSET_BACK_ENABLE(vkraster->depthBiasEnable ? 1 : 0) |
- S_028814_POLY_OFFSET_PARA_ENABLE(vkraster->depthBiasEnable ? 1 : 0));
+ pipeline->graphics.pa_su_sc_mode_cntl =
+ S_028814_FACE(vkraster->frontFace) |
+ S_028814_CULL_FRONT(!!(vkraster->cullMode & VK_CULL_MODE_FRONT_BIT)) |
+ S_028814_CULL_BACK(!!(vkraster->cullMode & VK_CULL_MODE_BACK_BIT)) |
+ S_028814_POLY_MODE(vkraster->polygonMode != VK_POLYGON_MODE_FILL) |
+ S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(vkraster->polygonMode)) |
+ S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(vkraster->polygonMode)) |
+ S_028814_POLY_OFFSET_FRONT_ENABLE(vkraster->depthBiasEnable ? 1 : 0) |
+ S_028814_POLY_OFFSET_BACK_ENABLE(vkraster->depthBiasEnable ? 1 : 0) |
+ S_028814_POLY_OFFSET_PARA_ENABLE(vkraster->depthBiasEnable ? 1 : 0);
+
+ radeon_set_context_reg(ctx_cs, R_028BDC_PA_SC_LINE_CNTL,
+ S_028BDC_DX10_DIAMOND_TEST_ENA(1));
/* Conservative rasterization. */
if (mode != VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT) {
radeon_set_context_reg(ctx_cs, R_028804_DB_EQAA, ms->db_eqaa);
radeon_set_context_reg(ctx_cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
radeon_set_context_reg(ctx_cs, R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
- radeon_set_context_reg(ctx_cs, R_028BDC_PA_SC_LINE_CNTL, ms->pa_sc_line_cntl);
radeon_set_context_reg(ctx_cs, R_028BE0_PA_SC_AA_CONFIG, ms->pa_sc_aa_config);
/* The exclusion bits can be set to improve rasterization efficiency
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE));
- radeon_set_context_reg(ctx_cs, R_028818_PA_CL_VTE_CNTL,
- S_028818_VTX_W0_FMT(1) |
- S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
- S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
- S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
-
radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL,
S_02881C_USE_VTX_POINT_SIZE(outinfo->writes_pointsize) |
S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo->writes_layer) |
S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena) |
S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) |
S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) |
+ S_02881C_BYPASS_PRIM_RATE_COMBINER_GFX103(pipeline->device->physical_device->rad_info.chip_class >= GFX10_3) |
cull_dist_mask << 8 |
clip_dist_mask);
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE));
- radeon_set_context_reg(ctx_cs, R_028818_PA_CL_VTE_CNTL,
- S_028818_VTX_W0_FMT(1) |
- S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
- S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
- S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL,
S_02881C_USE_VTX_POINT_SIZE(outinfo->writes_pointsize) |
S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo->writes_layer) |
S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(misc_vec_ena) |
S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) |
S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) |
+ S_02881C_BYPASS_PRIM_RATE_COMBINER_GFX103(pipeline->device->physical_device->rad_info.chip_class >= GFX10_3) |
cull_dist_mask << 8 |
clip_dist_mask);
*/
radeon_set_context_reg(ctx_cs, R_028838_PA_CL_NGG_CNTL,
S_028838_INDEX_BUF_EDGE_FLAG_ENA(!radv_pipeline_has_tess(pipeline) &&
- !radv_pipeline_has_gs(pipeline)));
+ !radv_pipeline_has_gs(pipeline)) |
+ /* Reuse for NGG. */
+ S_028838_VERTEX_REUSE_DEPTH_GFX103(pipeline->device->physical_device->rad_info.chip_class >= GFX10_3 ? 30 : 0));
ge_cntl = S_03096C_PRIM_GRP_SIZE(ngg_state->max_gsprims) |
S_03096C_VERT_GRP_SIZE(256) | /* 256 = disable vertex grouping */
*
* Requirement: GE_CNTL.VERT_GRP_SIZE = VGT_GS_ONCHIP_CNTL.ES_VERTS_PER_SUBGRP - 5
*/
- if ((pipeline->device->physical_device->rad_info.family == CHIP_NAVI10 ||
- pipeline->device->physical_device->rad_info.family == CHIP_NAVI12 ||
- pipeline->device->physical_device->rad_info.family == CHIP_NAVI14) &&
+ if (pipeline->device->physical_device->rad_info.chip_class == GFX10 &&
!radv_pipeline_has_tess(pipeline) &&
ngg_state->hw_max_esverts != 256) {
ge_cntl &= C_03096C_VERT_GRP_SIZE;
++ps_offset;
}
+ if (ps->info.ps.viewport_index_input) {
+ unsigned vs_offset = outinfo->vs_output_param_offset[VARYING_SLOT_VIEWPORT];
+ if (vs_offset != AC_EXP_PARAM_UNDEFINED)
+ ps_input_cntl[ps_offset] = offset_to_ps_input(vs_offset, true, false, false);
+ else
+ ps_input_cntl[ps_offset] = offset_to_ps_input(AC_EXP_PARAM_DEFAULT_VAL_0000, true, false, false);
+ ++ps_offset;
+ }
+
if (ps->info.ps.has_pcoord) {
unsigned val;
val = S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20);
const struct radv_pipeline *pipeline,
const struct radv_shader_variant *ps)
{
+ unsigned conservative_z_export = V_02880C_EXPORT_ANY_Z;
unsigned z_order;
if (ps->info.ps.early_fragment_test || !ps->info.ps.writes_memory)
z_order = V_02880C_EARLY_Z_THEN_LATE_Z;
else
z_order = V_02880C_LATE_Z;
+ if (ps->info.ps.depth_layout == FRAG_DEPTH_LAYOUT_GREATER)
+ conservative_z_export = V_02880C_EXPORT_GREATER_THAN_Z;
+ else if (ps->info.ps.depth_layout == FRAG_DEPTH_LAYOUT_LESS)
+ conservative_z_export = V_02880C_EXPORT_LESS_THAN_Z;
+
bool disable_rbplus = device->physical_device->rad_info.has_rbplus &&
!device->physical_device->rad_info.rbplus_allowed;
S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps->info.ps.writes_stencil) |
S_02880C_KILL_ENABLE(!!ps->info.ps.can_discard) |
S_02880C_MASK_EXPORT_ENABLE(mask_export_enable) |
+ S_02880C_CONSERVATIVE_Z_EXPORT(conservative_z_export) |
S_02880C_Z_ORDER(z_order) |
S_02880C_DEPTH_BEFORE_SHADER(ps->info.ps.early_fragment_test) |
S_02880C_PRE_SHADER_DEPTH_COVERAGE_ENABLE(ps->info.ps.post_depth_coverage) |
S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth));
}
-static uint32_t
-radv_compute_vgt_shader_stages_en(const struct radv_pipeline *pipeline)
+static void
+radv_pipeline_generate_vgt_shader_config(struct radeon_cmdbuf *ctx_cs,
+ const struct radv_pipeline *pipeline)
{
uint32_t stages = 0;
if (radv_pipeline_has_tess(pipeline)) {
S_028B54_VS_W32_EN(vs_size == 32 ? 1 : 0);
}
- return stages;
+ radeon_set_context_reg(ctx_cs, R_028B54_VGT_SHADER_STAGES_EN, stages);
}
-static uint32_t
-radv_compute_cliprect_rule(const VkGraphicsPipelineCreateInfo *pCreateInfo)
+static void
+radv_pipeline_generate_cliprect_rule(struct radeon_cmdbuf *ctx_cs,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo)
{
const VkPipelineDiscardRectangleStateCreateInfoEXT *discard_rectangle_info =
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT);
+ uint32_t cliprect_rule = 0;
- if (!discard_rectangle_info)
- return 0xffff;
-
- unsigned mask = 0;
-
- for (unsigned i = 0; i < (1u << MAX_DISCARD_RECTANGLES); ++i) {
- /* Interpret i as a bitmask, and then set the bit in the mask if
- * that combination of rectangles in which the pixel is contained
- * should pass the cliprect test. */
- unsigned relevant_subset = i & ((1u << discard_rectangle_info->discardRectangleCount) - 1);
+ if (!discard_rectangle_info) {
+ cliprect_rule = 0xffff;
+ } else {
+ for (unsigned i = 0; i < (1u << MAX_DISCARD_RECTANGLES); ++i) {
+ /* Interpret i as a bitmask, and then set the bit in
+ * the mask if that combination of rectangles in which
+ * the pixel is contained should pass the cliprect
+ * test.
+ */
+ unsigned relevant_subset = i & ((1u << discard_rectangle_info->discardRectangleCount) - 1);
- if (discard_rectangle_info->discardRectangleMode == VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT &&
- !relevant_subset)
- continue;
+ if (discard_rectangle_info->discardRectangleMode == VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT &&
+ !relevant_subset)
+ continue;
- if (discard_rectangle_info->discardRectangleMode == VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT &&
- relevant_subset)
- continue;
+ if (discard_rectangle_info->discardRectangleMode == VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT &&
+ relevant_subset)
+ continue;
- mask |= 1u << i;
+ cliprect_rule |= 1u << i;
+ }
}
- return mask;
+ radeon_set_context_reg(ctx_cs, R_02820C_PA_SC_CLIPRECT_RULE, cliprect_rule);
}
static void
const struct radv_graphics_pipeline_create_info *extra,
const struct radv_blend_state *blend,
const struct radv_tessellation_state *tess,
- unsigned prim, unsigned gs_out)
+ unsigned gs_out)
{
struct radeon_cmdbuf *ctx_cs = &pipeline->ctx_cs;
struct radeon_cmdbuf *cs = &pipeline->cs;
radv_pipeline_generate_ps_inputs(ctx_cs, pipeline);
radv_pipeline_generate_vgt_vertex_reuse(ctx_cs, pipeline);
radv_pipeline_generate_binning_state(ctx_cs, pipeline, pCreateInfo, blend);
+ radv_pipeline_generate_vgt_shader_config(ctx_cs, pipeline);
+ radv_pipeline_generate_cliprect_rule(ctx_cs, pCreateInfo);
if (pipeline->device->physical_device->rad_info.chip_class >= GFX10 && !radv_pipeline_has_ngg(pipeline))
gfx10_pipeline_generate_ge_cntl(ctx_cs, pipeline, tess);
- radeon_set_context_reg(ctx_cs, R_028B54_VGT_SHADER_STAGES_EN, radv_compute_vgt_shader_stages_en(pipeline));
-
- if (pipeline->device->physical_device->rad_info.chip_class >= GFX7) {
- radeon_set_uconfig_reg_idx(pipeline->device->physical_device,
- cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
- } else {
- radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
- }
radeon_set_context_reg(ctx_cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out);
- radeon_set_context_reg(ctx_cs, R_02820C_PA_SC_CLIPRECT_RULE, radv_compute_cliprect_rule(pCreateInfo));
-
pipeline->ctx_cs_hash = _mesa_hash_data(ctx_cs->buf, ctx_cs->cdw * 4);
assert(ctx_cs->cdw <= ctx_cs->max_dw);
static struct radv_ia_multi_vgt_param_helpers
radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline *pipeline,
- const struct radv_tessellation_state *tess,
- uint32_t prim)
+ const struct radv_tessellation_state *tess)
{
struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param = {0};
const struct radv_device *device = pipeline->device;
if (SI_GS_PER_ES / ia_multi_vgt_param.primgroup_size >= pipeline->device->gs_table_depth - 3)
ia_multi_vgt_param.partial_es_wave = true;
- ia_multi_vgt_param.wd_switch_on_eop = false;
- if (device->physical_device->rad_info.chip_class >= GFX7) {
- /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
- * 4 shader engines. Set 1 to pass the assertion below.
- * The other cases are hardware requirements. */
- if (device->physical_device->rad_info.max_se < 4 ||
- prim == V_008958_DI_PT_POLYGON ||
- prim == V_008958_DI_PT_LINELOOP ||
- prim == V_008958_DI_PT_TRIFAN ||
- prim == V_008958_DI_PT_TRISTRIP_ADJ ||
- (pipeline->graphics.prim_restart_enable &&
- (device->physical_device->rad_info.family < CHIP_POLARIS10 ||
- (prim != V_008958_DI_PT_POINTLIST &&
- prim != V_008958_DI_PT_LINESTRIP))))
- ia_multi_vgt_param.wd_switch_on_eop = true;
- }
-
ia_multi_vgt_param.ia_switch_on_eoi = false;
if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.ps.prim_id_input)
ia_multi_vgt_param.ia_switch_on_eoi = true;
}
}
- /* Workaround for a VGT hang when strip primitive types are used with
- * primitive restart.
- */
- if (pipeline->graphics.prim_restart_enable &&
- (prim == V_008958_DI_PT_LINESTRIP ||
- prim == V_008958_DI_PT_TRISTRIP ||
- prim == V_008958_DI_PT_LINESTRIP_ADJ ||
- prim == V_008958_DI_PT_TRISTRIP_ADJ)) {
- ia_multi_vgt_param.partial_vs_wave = true;
- }
-
if (radv_pipeline_has_gs(pipeline)) {
/* On these chips there is the possibility of a hang if the
* pipeline uses a GS and partial_vs_wave is not set.
{
const VkPipelineVertexInputStateCreateInfo *vi_info =
pCreateInfo->pVertexInputState;
- struct radv_vertex_elements_info *velems = &pipeline->vertex_elements;
-
- for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
- const VkVertexInputAttributeDescription *desc =
- &vi_info->pVertexAttributeDescriptions[i];
- unsigned loc = desc->location;
- const struct vk_format_description *format_desc;
-
- format_desc = vk_format_description(desc->format);
-
- velems->format_size[loc] = format_desc->block.bits / 8;
- }
for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
const VkVertexInputBindingDescription *desc =
return NULL;
}
-static VkResult
-radv_secure_compile(struct radv_pipeline *pipeline,
- struct radv_device *device,
- const struct radv_pipeline_key *key,
- const VkPipelineShaderStageCreateInfo **pStages,
- const VkPipelineCreateFlags flags,
- unsigned num_stages)
-{
- uint8_t allowed_pipeline_hashes[2][20];
- radv_hash_shaders(allowed_pipeline_hashes[0], pStages,
- pipeline->layout, key, get_hash_flags(device));
-
- /* Generate the GC copy hash */
- memcpy(allowed_pipeline_hashes[1], allowed_pipeline_hashes[0], 20);
- allowed_pipeline_hashes[1][0] ^= 1;
-
- uint8_t allowed_hashes[2][20];
- for (unsigned i = 0; i < 2; ++i) {
- disk_cache_compute_key(device->physical_device->disk_cache,
- allowed_pipeline_hashes[i], 20,
- allowed_hashes[i]);
- }
-
- /* Do an early exit if all cache entries are already there. */
- bool may_need_copy_shader = pStages[MESA_SHADER_GEOMETRY];
- void *main_entry = disk_cache_get(device->physical_device->disk_cache, allowed_hashes[0], NULL);
- void *copy_entry = NULL;
- if (may_need_copy_shader)
- copy_entry = disk_cache_get(device->physical_device->disk_cache, allowed_hashes[1], NULL);
-
- bool has_all_cache_entries = main_entry && (!may_need_copy_shader || copy_entry);
- free(main_entry);
- free(copy_entry);
-
- if(has_all_cache_entries)
- return VK_SUCCESS;
-
- unsigned process = 0;
- uint8_t sc_threads = device->instance->num_sc_threads;
- while (true) {
- mtx_lock(&device->sc_state->secure_compile_mutex);
- if (device->sc_state->secure_compile_thread_counter < sc_threads) {
- device->sc_state->secure_compile_thread_counter++;
- for (unsigned i = 0; i < sc_threads; i++) {
- if (!device->sc_state->secure_compile_processes[i].in_use) {
- device->sc_state->secure_compile_processes[i].in_use = true;
- process = i;
- break;
- }
- }
- mtx_unlock(&device->sc_state->secure_compile_mutex);
- break;
- }
- mtx_unlock(&device->sc_state->secure_compile_mutex);
- }
-
- int fd_secure_input = device->sc_state->secure_compile_processes[process].fd_secure_input;
- int fd_secure_output = device->sc_state->secure_compile_processes[process].fd_secure_output;
-
- /* Fork a copy of the slim untainted secure compile process */
- enum radv_secure_compile_type sc_type = RADV_SC_TYPE_FORK_DEVICE;
- write(fd_secure_input, &sc_type, sizeof(sc_type));
-
- if (!radv_sc_read(fd_secure_output, &sc_type, sizeof(sc_type), true) ||
- sc_type != RADV_SC_TYPE_INIT_SUCCESS)
- return VK_ERROR_DEVICE_LOST;
-
- fd_secure_input = device->sc_state->secure_compile_processes[process].fd_server;
- fd_secure_output = device->sc_state->secure_compile_processes[process].fd_client;
-
- /* Write pipeline / shader module out to secure process via pipe */
- sc_type = RADV_SC_TYPE_COMPILE_PIPELINE;
- write(fd_secure_input, &sc_type, sizeof(sc_type));
-
- /* Write pipeline layout out to secure process */
- struct radv_pipeline_layout *layout = pipeline->layout;
- write(fd_secure_input, layout, sizeof(struct radv_pipeline_layout));
- write(fd_secure_input, &layout->num_sets, sizeof(uint32_t));
- for (uint32_t set = 0; set < layout->num_sets; set++) {
- write(fd_secure_input, &layout->set[set].layout->layout_size, sizeof(uint32_t));
- write(fd_secure_input, layout->set[set].layout, layout->set[set].layout->layout_size);
- }
-
- /* Write pipeline key out to secure process */
- write(fd_secure_input, key, sizeof(struct radv_pipeline_key));
-
- /* Write pipeline create flags out to secure process */
- write(fd_secure_input, &flags, sizeof(VkPipelineCreateFlags));
-
- /* Write stage and shader information out to secure process */
- write(fd_secure_input, &num_stages, sizeof(uint32_t));
- for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) {
- if (!pStages[i])
- continue;
-
- /* Write stage out to secure process */
- gl_shader_stage stage = ffs(pStages[i]->stage) - 1;
- write(fd_secure_input, &stage, sizeof(gl_shader_stage));
-
- /* Write entry point name out to secure process */
- size_t name_size = strlen(pStages[i]->pName) + 1;
- write(fd_secure_input, &name_size, sizeof(size_t));
- write(fd_secure_input, pStages[i]->pName, name_size);
-
- /* Write shader module out to secure process */
- struct radv_shader_module *module = radv_shader_module_from_handle(pStages[i]->module);
- assert(!module->nir);
- size_t module_size = sizeof(struct radv_shader_module) + module->size;
- write(fd_secure_input, &module_size, sizeof(size_t));
- write(fd_secure_input, module, module_size);
-
- /* Write specialization info out to secure process */
- const VkSpecializationInfo *specInfo = pStages[i]->pSpecializationInfo;
- bool has_spec_info = specInfo ? true : false;
- write(fd_secure_input, &has_spec_info, sizeof(bool));
- if (specInfo) {
- write(fd_secure_input, &specInfo->dataSize, sizeof(size_t));
- write(fd_secure_input, specInfo->pData, specInfo->dataSize);
-
- write(fd_secure_input, &specInfo->mapEntryCount, sizeof(uint32_t));
- for (uint32_t j = 0; j < specInfo->mapEntryCount; j++)
- write(fd_secure_input, &specInfo->pMapEntries[j], sizeof(VkSpecializationMapEntry));
- }
- }
-
- /* Read the data returned from the secure process */
- while (sc_type != RADV_SC_TYPE_COMPILE_PIPELINE_FINISHED) {
- if (!radv_sc_read(fd_secure_output, &sc_type, sizeof(sc_type), true))
- return VK_ERROR_DEVICE_LOST;
-
- if (sc_type == RADV_SC_TYPE_WRITE_DISK_CACHE) {
- assert(device->physical_device->disk_cache);
-
- uint8_t disk_sha1[20];
- if (!radv_sc_read(fd_secure_output, disk_sha1, sizeof(uint8_t) * 20, true))
- return VK_ERROR_DEVICE_LOST;
-
- if (memcmp(disk_sha1, allowed_hashes[0], 20) &&
- memcmp(disk_sha1, allowed_hashes[1], 20))
- return VK_ERROR_DEVICE_LOST;
-
- uint32_t entry_size;
- if (!radv_sc_read(fd_secure_output, &entry_size, sizeof(uint32_t), true))
- return VK_ERROR_DEVICE_LOST;
-
- struct cache_entry *entry = malloc(entry_size);
- if (!radv_sc_read(fd_secure_output, entry, entry_size, true))
- return VK_ERROR_DEVICE_LOST;
-
- disk_cache_put(device->physical_device->disk_cache,
- disk_sha1, entry, entry_size,
- NULL);
-
- free(entry);
- } else if (sc_type == RADV_SC_TYPE_READ_DISK_CACHE) {
- uint8_t disk_sha1[20];
- if (!radv_sc_read(fd_secure_output, disk_sha1, sizeof(uint8_t) * 20, true))
- return VK_ERROR_DEVICE_LOST;
-
- if (memcmp(disk_sha1, allowed_hashes[0], 20) &&
- memcmp(disk_sha1, allowed_hashes[1], 20))
- return VK_ERROR_DEVICE_LOST;
-
- size_t size;
- struct cache_entry *entry = (struct cache_entry *)
- disk_cache_get(device->physical_device->disk_cache,
- disk_sha1, &size);
-
- uint8_t found = entry ? 1 : 0;
- write(fd_secure_input, &found, sizeof(uint8_t));
-
- if (found) {
- write(fd_secure_input, &size, sizeof(size_t));
- write(fd_secure_input, entry, size);
- }
-
- free(entry);
- }
- }
-
- sc_type = RADV_SC_TYPE_DESTROY_DEVICE;
- write(fd_secure_input, &sc_type, sizeof(sc_type));
-
- mtx_lock(&device->sc_state->secure_compile_mutex);
- device->sc_state->secure_compile_thread_counter--;
- device->sc_state->secure_compile_processes[process].in_use = false;
- mtx_unlock(&device->sc_state->secure_compile_mutex);
-
- return VK_SUCCESS;
-}
-
static VkResult
radv_pipeline_init(struct radv_pipeline *pipeline,
struct radv_device *device,
}
struct radv_pipeline_key key = radv_generate_graphics_pipeline_key(pipeline, pCreateInfo, &blend, has_view_index);
- if (radv_device_use_secure_compile(device->instance)) {
- return radv_secure_compile(pipeline, device, &key, pStages, pCreateInfo->flags, pCreateInfo->stageCount);
- } else {
- radv_create_shaders(pipeline, device, cache, &key, pStages, pCreateInfo->flags, pipeline_feedback, stage_feedbacks);
- }
+
+ result = radv_create_shaders(pipeline, device, cache, &key, pStages,
+ pCreateInfo->flags, pipeline_feedback,
+ stage_feedbacks);
+ if (result != VK_SUCCESS)
+ return result;
pipeline->graphics.spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
radv_pipeline_init_multisample_state(pipeline, &blend, pCreateInfo);
uint32_t gs_out;
- uint32_t prim = si_translate_prim(pCreateInfo->pInputAssemblyState->topology);
- pipeline->graphics.topology = pCreateInfo->pInputAssemblyState->topology;
pipeline->graphics.can_use_guardband = radv_prim_can_use_guardband(pCreateInfo->pInputAssemblyState->topology);
if (radv_pipeline_has_gs(pipeline)) {
gs_out = si_conv_prim_to_gs_out(pCreateInfo->pInputAssemblyState->topology);
}
if (extra && extra->use_rectlist) {
- prim = V_008958_DI_PT_RECTLIST;
gs_out = V_028A6C_OUTPRIM_TYPE_TRISTRIP;
pipeline->graphics.can_use_guardband = true;
if (radv_pipeline_has_ngg(pipeline))
gs_out = V_028A6C_VGT_OUT_RECT_V0;
}
pipeline->graphics.prim_restart_enable = !!pCreateInfo->pInputAssemblyState->primitiveRestartEnable;
- /* prim vertex count will need TESS changes */
- pipeline->graphics.prim_vertex_count = prim_size_table[prim];
- radv_pipeline_init_dynamic_state(pipeline, pCreateInfo);
+ radv_pipeline_init_dynamic_state(pipeline, pCreateInfo, extra);
/* Ensure that some export memory is always allocated, for two reasons:
*
blend.spi_shader_col_format = V_028714_SPI_SHADER_32_R;
}
+ blend.cb_shader_mask = ps->info.ps.cb_shader_mask;
+
+ if (extra &&
+ (extra->custom_blend_mode == V_028808_CB_ELIMINATE_FAST_CLEAR ||
+ extra->custom_blend_mode == V_028808_CB_FMASK_DECOMPRESS ||
+ extra->custom_blend_mode == V_028808_CB_DCC_DECOMPRESS ||
+ extra->custom_blend_mode == V_028808_CB_RESOLVE)) {
+ /* According to the CB spec states, CB_SHADER_MASK should be
+ * set to enable writes to all four channels of MRT0.
+ */
+ blend.cb_shader_mask = 0xf;
+ }
+
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
if (pipeline->shaders[i]) {
pipeline->need_indirect_descriptor_sets |= pipeline->shaders[i]->info.need_indirect_descriptor_sets;
struct radv_tessellation_state tess = {0};
if (radv_pipeline_has_tess(pipeline)) {
- if (prim == V_008958_DI_PT_PATCH) {
- pipeline->graphics.prim_vertex_count.min = pCreateInfo->pTessellationState->patchControlPoints;
- pipeline->graphics.prim_vertex_count.incr = 1;
- }
+ pipeline->graphics.tess_patch_control_points =
+ pCreateInfo->pTessellationState->patchControlPoints;
tess = calculate_tess_state(pipeline, pCreateInfo);
}
- pipeline->graphics.ia_multi_vgt_param = radv_compute_ia_multi_vgt_param_helpers(pipeline, &tess, prim);
+ pipeline->graphics.ia_multi_vgt_param = radv_compute_ia_multi_vgt_param_helpers(pipeline, &tess);
radv_compute_vertex_input_state(pipeline, pCreateInfo);
pipeline->streamout_shader = radv_pipeline_get_streamout_shader(pipeline);
result = radv_pipeline_scratch_init(device, pipeline);
- radv_pipeline_generate_pm4(pipeline, pCreateInfo, extra, &blend, &tess, prim, gs_out);
+ radv_pipeline_generate_pm4(pipeline, pCreateInfo, extra, &blend, &tess, gs_out);
return result;
}
struct radv_pipeline *pipeline;
VkResult result;
- pipeline = vk_zalloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
+ pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pipeline->base,
+ VK_OBJECT_TYPE_PIPELINE);
+
result = radv_pipeline_init(pipeline, device, cache,
pCreateInfo, extra);
if (result != VK_SUCCESS) {
if (r != VK_SUCCESS) {
result = r;
pPipelines[i] = VK_NULL_HANDLE;
+
+ if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT)
+ break;
}
}
+ for (; i < count; ++i)
+ pPipelines[i] = VK_NULL_HANDLE;
+
return result;
}
struct radv_pipeline *pipeline;
VkResult result;
- pipeline = vk_zalloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
+ pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ vk_object_base_init(&device->vk, &pipeline->base,
+ VK_OBJECT_TYPE_PIPELINE);
+
pipeline->device = device;
pipeline->layout = radv_pipeline_layout_from_handle(pCreateInfo->layout);
assert(pipeline->layout);
struct radv_pipeline_key key =
radv_generate_compute_pipeline_key(pipeline, pCreateInfo);
- if (radv_device_use_secure_compile(device->instance)) {
- result = radv_secure_compile(pipeline, device, &key, pStages, pCreateInfo->flags, 1);
- *pPipeline = radv_pipeline_to_handle(pipeline);
-
+ result = radv_create_shaders(pipeline, device, cache, &key, pStages,
+ pCreateInfo->flags, pipeline_feedback,
+ stage_feedbacks);
+ if (result != VK_SUCCESS) {
+ radv_pipeline_destroy(device, pipeline, pAllocator);
return result;
- } else {
- radv_create_shaders(pipeline, device, cache, &key, pStages, pCreateInfo->flags, pipeline_feedback, stage_feedbacks);
}
pipeline->user_data_0[MESA_SHADER_COMPUTE] = radv_pipeline_stage_to_user_data_0(pipeline, MESA_SHADER_COMPUTE, device->physical_device->rad_info.chip_class);
if (r != VK_SUCCESS) {
result = r;
pPipelines[i] = VK_NULL_HANDLE;
+
+ if (pCreateInfos[i].flags & VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT)
+ break;
}
}
+ for (; i < count; ++i)
+ pPipelines[i] = VK_NULL_HANDLE;
+
return result;
}
}
++s;
+ if (shader->statistics) {
+ for (unsigned i = 0; i < shader->statistics->count; i++) {
+ struct radv_compiler_statistic_info *info = &shader->statistics->infos[i];
+ uint32_t value = shader->statistics->values[i];
+ if (s < end) {
+ desc_copy(s->name, info->name);
+ desc_copy(s->description, info->desc);
+ s->format = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR;
+ s->value.u64 = value;
+ }
+ ++s;
+ }
+ }
+
if (!pStatistics)
*pStatisticCount = s - pStatistics;
else if (s > end) {
/* backend IR */
if (p < end) {
p->isText = true;
- if (pipeline->device->physical_device->use_aco) {
- desc_copy(p->name, "ACO IR");
- desc_copy(p->description, "The ACO IR after some optimizations");
- } else {
+ if (pipeline->device->physical_device->use_llvm) {
desc_copy(p->name, "LLVM IR");
desc_copy(p->description, "The LLVM IR after some optimizations");
+ } else {
+ desc_copy(p->name, "ACO IR");
+ desc_copy(p->description, "The ACO IR after some optimizations");
}
if (radv_copy_representation(p->pData, &p->dataSize, shader->ir_string) != VK_SUCCESS)
result = VK_INCOMPLETE;