struct radv_tessellation_state {
uint32_t ls_hs_config;
- unsigned num_patches;
- unsigned lds_size;
+ unsigned num_lds_blocks;
uint32_t tf_param;
};
return blend;
}
-static uint32_t si_translate_stencil_op(enum VkStencilOp op)
-{
- switch (op) {
- case VK_STENCIL_OP_KEEP:
- return V_02842C_STENCIL_KEEP;
- case VK_STENCIL_OP_ZERO:
- return V_02842C_STENCIL_ZERO;
- case VK_STENCIL_OP_REPLACE:
- return V_02842C_STENCIL_REPLACE_TEST;
- case VK_STENCIL_OP_INCREMENT_AND_CLAMP:
- return V_02842C_STENCIL_ADD_CLAMP;
- case VK_STENCIL_OP_DECREMENT_AND_CLAMP:
- return V_02842C_STENCIL_SUB_CLAMP;
- case VK_STENCIL_OP_INVERT:
- return V_02842C_STENCIL_INVERT;
- case VK_STENCIL_OP_INCREMENT_AND_WRAP:
- return V_02842C_STENCIL_ADD_WRAP;
- case VK_STENCIL_OP_DECREMENT_AND_WRAP:
- return V_02842C_STENCIL_SUB_WRAP;
- default:
- return 0;
- }
-}
-
static uint32_t si_translate_fill(VkPolygonMode func)
{
switch(func) {
radv_order_invariant_stencil_op(state->failOp));
}
+static bool
+radv_pipeline_has_dynamic_ds_states(const VkGraphicsPipelineCreateInfo *pCreateInfo)
+{
+ VkDynamicState ds_states[] = {
+ VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT,
+ VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,
+ VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT,
+ VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT,
+ VK_DYNAMIC_STATE_STENCIL_OP_EXT,
+ };
+
+ if (pCreateInfo->pDynamicState) {
+ uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
+ for (uint32_t i = 0; i < count; i++) {
+ for (uint32_t j = 0; j < ARRAY_SIZE(ds_states); j++) {
+ if (pCreateInfo->pDynamicState->pDynamicStates[i] == ds_states[j])
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
static bool
radv_pipeline_out_of_order_rast(struct radv_pipeline *pipeline,
struct radv_blend_state *blend,
if (colormask && vkblend && vkblend->logicOpEnable)
return false;
+ /* Be conservative if an extended dynamic depth/stencil state is
+ * enabled because the driver can't update out-of-order rasterization
+ * dynamically.
+ */
+ if (radv_pipeline_has_dynamic_ds_states(pCreateInfo))
+ return false;
+
/* Default depth/stencil invariance when no attachment is bound. */
struct radv_dsa_order_invariance dsa_order_invariant = {
.zs = true, .pass_set = true
radv_pipeline_out_of_order_rast(pipeline, blend, pCreateInfo);
}
- ms->pa_sc_line_cntl = S_028BDC_DX10_DIAMOND_TEST_ENA(1);
ms->pa_sc_aa_config = 0;
ms->db_eqaa = S_028804_HIGH_QUALITY_INTERSECTIONS(1) |
S_028804_INCOHERENT_EQAA_READS(1) |
return RADV_DYNAMIC_FRONT_FACE;
case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT:
return RADV_DYNAMIC_PRIMITIVE_TOPOLOGY;
+ case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT:
+ return RADV_DYNAMIC_DEPTH_TEST_ENABLE;
+ case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT:
+ return RADV_DYNAMIC_DEPTH_WRITE_ENABLE;
+ case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT:
+ return RADV_DYNAMIC_DEPTH_COMPARE_OP;
+ case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT:
+ return RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE;
+ case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT:
+ return RADV_DYNAMIC_STENCIL_TEST_ENABLE;
+ case VK_DYNAMIC_STATE_STENCIL_OP_EXT:
+ return RADV_DYNAMIC_STENCIL_OP;
+ case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT:
+ return RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE;
default:
unreachable("Unhandled dynamic state");
}
/* If rasterization is disabled we do not care about any of the
* dynamic states, since they are all rasterization related only,
- * except primitive topology.
+ * except primitive topology and vertex binding stride.
*/
if (pCreateInfo->pRasterizationState->rasterizerDiscardEnable)
- return RADV_DYNAMIC_PRIMITIVE_TOPOLOGY;
+ return RADV_DYNAMIC_PRIMITIVE_TOPOLOGY |
+ RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE;
if (!pCreateInfo->pRasterizationState->depthBiasEnable)
states &= ~RADV_DYNAMIC_DEPTH_BIAS;
dynamic->stencil_reference.back =
pCreateInfo->pDepthStencilState->back.reference;
}
+
+ if (states & RADV_DYNAMIC_DEPTH_TEST_ENABLE) {
+ dynamic->depth_test_enable =
+ pCreateInfo->pDepthStencilState->depthTestEnable;
+ }
+
+ if (states & RADV_DYNAMIC_DEPTH_WRITE_ENABLE) {
+ dynamic->depth_write_enable =
+ pCreateInfo->pDepthStencilState->depthWriteEnable;
+ }
+
+ if (states & RADV_DYNAMIC_DEPTH_COMPARE_OP) {
+ dynamic->depth_compare_op =
+ pCreateInfo->pDepthStencilState->depthCompareOp;
+ }
+
+ if (states & RADV_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE) {
+ dynamic->depth_bounds_test_enable =
+ pCreateInfo->pDepthStencilState->depthBoundsTestEnable;
+ }
+
+ if (states & RADV_DYNAMIC_STENCIL_TEST_ENABLE) {
+ dynamic->stencil_test_enable =
+ pCreateInfo->pDepthStencilState->stencilTestEnable;
+ }
+
+ if (states & RADV_DYNAMIC_STENCIL_OP) {
+ dynamic->stencil_op.front.compare_op =
+ pCreateInfo->pDepthStencilState->front.compareOp;
+ dynamic->stencil_op.front.fail_op =
+ pCreateInfo->pDepthStencilState->front.failOp;
+ dynamic->stencil_op.front.pass_op =
+ pCreateInfo->pDepthStencilState->front.passOp;
+ dynamic->stencil_op.front.depth_fail_op =
+ pCreateInfo->pDepthStencilState->front.depthFailOp;
+
+ dynamic->stencil_op.back.compare_op =
+ pCreateInfo->pDepthStencilState->back.compareOp;
+ dynamic->stencil_op.back.fail_op =
+ pCreateInfo->pDepthStencilState->back.failOp;
+ dynamic->stencil_op.back.pass_op =
+ pCreateInfo->pDepthStencilState->back.passOp;
+ dynamic->stencil_op.back.depth_fail_op =
+ pCreateInfo->pDepthStencilState->back.depthFailOp;
+ }
}
const VkPipelineDiscardRectangleStateCreateInfoEXT *discard_rectangle_info =
dynamic->line_stipple.pattern = rast_line_info->lineStipplePattern;
}
+ if (!(states & RADV_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE))
+ pipeline->graphics.uses_dynamic_stride = true;
+
pipeline->dynamic_state.mask = states;
}
pipeline->graphics.gsvs_ring_size = MIN2(gsvs_ring_size, max_size);
}
-static void si_multiwave_lds_size_workaround(struct radv_device *device,
- unsigned *lds_size)
-{
- /* If tessellation is all offchip and on-chip GS isn't used, this
- * workaround is not needed.
- */
- return;
-
- /* SPI barrier management bug:
- * Make sure we have at least 4k of LDS in use to avoid the bug.
- * It applies to workgroup sizes of more than one wavefront.
- */
- if (device->physical_device->rad_info.family == CHIP_BONAIRE ||
- device->physical_device->rad_info.family == CHIP_KABINI)
- *lds_size = MAX2(*lds_size, 8);
-}
-
struct radv_shader_variant *
radv_get_shader(struct radv_pipeline *pipeline,
gl_shader_stage stage)
{
unsigned num_tcs_input_cp;
unsigned num_tcs_output_cp;
- unsigned lds_size;
unsigned num_patches;
struct radv_tessellation_state tess = {0};
num_tcs_output_cp = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.tcs_vertices_out; //TCS VERTICES OUT
num_patches = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches;
- lds_size = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.lds_size;
-
- if (pipeline->device->physical_device->rad_info.chip_class >= GFX7) {
- assert(lds_size <= 65536);
- lds_size = align(lds_size, 512) / 512;
- } else {
- assert(lds_size <= 32768);
- lds_size = align(lds_size, 256) / 256;
- }
- si_multiwave_lds_size_workaround(pipeline->device, &lds_size);
-
- tess.lds_size = lds_size;
+ tess.num_lds_blocks = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_lds_blocks;
tess.ls_hs_config = S_028B58_NUM_PATCHES(num_patches) |
S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp);
- tess.num_patches = num_patches;
struct radv_shader_variant *tes = radv_get_shader(pipeline, MESA_SHADER_TESS_EVAL);
unsigned type = 0, partitioning = 0, topology = 0, distribution_mode = 0;
static struct radv_pipeline_key
radv_generate_graphics_pipeline_key(struct radv_pipeline *pipeline,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct radv_blend_state *blend,
- bool has_view_index)
+ const struct radv_blend_state *blend)
{
+ RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
+ struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
const VkPipelineVertexInputStateCreateInfo *input_state =
pCreateInfo->pVertexInputState;
const VkPipelineVertexInputDivisorStateCreateInfoEXT *divisor_state =
if (pCreateInfo->flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT)
key.optimisations_disabled = 1;
- key.has_multiview_view_index = has_view_index;
+ key.has_multiview_view_index = !!subpass->view_mask;
uint32_t binding_input_rate = 0;
uint32_t instance_rate_divisors[MAX_VERTEX_ATTRIBS];
struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
struct radv_shader_variant *ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
struct radv_render_pass_attachment *attachment = NULL;
- uint32_t db_depth_control = 0, db_stencil_control = 0;
+ uint32_t db_depth_control = 0;
uint32_t db_render_control = 0, db_render_override2 = 0;
uint32_t db_render_override = 0;
if (has_stencil_attachment && vkds && vkds->stencilTestEnable) {
db_depth_control |= S_028800_STENCIL_ENABLE(1) | S_028800_BACKFACE_ENABLE(1);
db_depth_control |= S_028800_STENCILFUNC(vkds->front.compareOp);
- db_stencil_control |= S_02842C_STENCILFAIL(si_translate_stencil_op(vkds->front.failOp));
- db_stencil_control |= S_02842C_STENCILZPASS(si_translate_stencil_op(vkds->front.passOp));
- db_stencil_control |= S_02842C_STENCILZFAIL(si_translate_stencil_op(vkds->front.depthFailOp));
db_depth_control |= S_028800_STENCILFUNC_BF(vkds->back.compareOp);
- db_stencil_control |= S_02842C_STENCILFAIL_BF(si_translate_stencil_op(vkds->back.failOp));
- db_stencil_control |= S_02842C_STENCILZPASS_BF(si_translate_stencil_op(vkds->back.passOp));
- db_stencil_control |= S_02842C_STENCILZFAIL_BF(si_translate_stencil_op(vkds->back.depthFailOp));
}
if (attachment && extra) {
db_render_override |= S_02800C_DISABLE_VIEWPORT_CLAMP(1);
}
- radeon_set_context_reg(ctx_cs, R_028800_DB_DEPTH_CONTROL, db_depth_control);
- radeon_set_context_reg(ctx_cs, R_02842C_DB_STENCIL_CONTROL, db_stencil_control);
-
radeon_set_context_reg(ctx_cs, R_028000_DB_RENDER_CONTROL, db_render_control);
radeon_set_context_reg(ctx_cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override);
radeon_set_context_reg(ctx_cs, R_028010_DB_RENDER_OVERRIDE2, db_render_override2);
+
+ pipeline->graphics.db_depth_control = db_depth_control;
}
static void
S_028810_DX_RASTERIZATION_KILL(vkraster->rasterizerDiscardEnable ? 1 : 0) |
S_028810_DX_LINEAR_ATTR_CLIP_ENA(1));
- radeon_set_context_reg(ctx_cs, R_0286D4_SPI_INTERP_CONTROL_0,
- S_0286D4_FLAT_SHADE_ENA(1) |
- S_0286D4_PNT_SPRITE_ENA(1) |
- S_0286D4_PNT_SPRITE_OVRD_X(V_0286D4_SPI_PNT_SPRITE_SEL_S) |
- S_0286D4_PNT_SPRITE_OVRD_Y(V_0286D4_SPI_PNT_SPRITE_SEL_T) |
- S_0286D4_PNT_SPRITE_OVRD_Z(V_0286D4_SPI_PNT_SPRITE_SEL_0) |
- S_0286D4_PNT_SPRITE_OVRD_W(V_0286D4_SPI_PNT_SPRITE_SEL_1) |
- S_0286D4_PNT_SPRITE_TOP_1(0)); /* vulkan is top to bottom - 1.0 at bottom */
-
- radeon_set_context_reg(ctx_cs, R_028BE4_PA_SU_VTX_CNTL,
- S_028BE4_PIX_CENTER(1) | // TODO verify
- S_028BE4_ROUND_MODE(V_028BE4_X_ROUND_TO_EVEN) |
- S_028BE4_QUANT_MODE(V_028BE4_X_16_8_FIXED_POINT_1_256TH));
-
pipeline->graphics.pa_su_sc_mode_cntl =
S_028814_FACE(vkraster->frontFace) |
S_028814_CULL_FRONT(!!(vkraster->cullMode & VK_CULL_MODE_FRONT_BIT)) |
S_028814_POLY_OFFSET_BACK_ENABLE(vkraster->depthBiasEnable ? 1 : 0) |
S_028814_POLY_OFFSET_PARA_ENABLE(vkraster->depthBiasEnable ? 1 : 0);
+ radeon_set_context_reg(ctx_cs, R_028BDC_PA_SC_LINE_CNTL,
+ S_028BDC_DX10_DIAMOND_TEST_ENA(1));
+
/* Conservative rasterization. */
if (mode != VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT) {
struct radv_multisample_state *ms = &pipeline->graphics.ms;
radeon_set_context_reg(ctx_cs, R_028804_DB_EQAA, ms->db_eqaa);
radeon_set_context_reg(ctx_cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
radeon_set_context_reg(ctx_cs, R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1);
- radeon_set_context_reg(ctx_cs, R_028BDC_PA_SC_LINE_CNTL, ms->pa_sc_line_cntl);
radeon_set_context_reg(ctx_cs, R_028BE0_PA_SC_AA_CONFIG, ms->pa_sc_aa_config);
/* The exclusion bits can be set to improve rasterization efficiency
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE));
- radeon_set_context_reg(ctx_cs, R_028818_PA_CL_VTE_CNTL,
- S_028818_VTX_W0_FMT(1) |
- S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
- S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
- S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
-
radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL,
S_02881C_USE_VTX_POINT_SIZE(outinfo->writes_pointsize) |
S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo->writes_layer) |
radeon_emit(cs, va >> 8);
radeon_emit(cs, S_00B524_MEM_BASE(va >> 40));
- rsrc2 |= S_00B52C_LDS_SIZE(tess->lds_size);
+ rsrc2 |= S_00B52C_LDS_SIZE(tess->num_lds_blocks);
if (pipeline->device->physical_device->rad_info.chip_class == GFX7 &&
pipeline->device->physical_device->rad_info.family != CHIP_HAWAII)
radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, rsrc2);
V_02870C_SPI_SHADER_4COMP :
V_02870C_SPI_SHADER_NONE));
- radeon_set_context_reg(ctx_cs, R_028818_PA_CL_VTE_CNTL,
- S_028818_VTX_W0_FMT(1) |
- S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) |
- S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) |
- S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1));
radeon_set_context_reg(ctx_cs, R_02881C_PA_CL_VS_OUT_CNTL,
S_02881C_USE_VTX_POINT_SIZE(outinfo->writes_pointsize) |
S_02881C_USE_VTX_RENDER_TARGET_INDX(outinfo->writes_layer) |
static void
radv_pipeline_generate_hw_hs(struct radeon_cmdbuf *cs,
struct radv_pipeline *pipeline,
- struct radv_shader_variant *shader,
- const struct radv_tessellation_state *tess)
+ struct radv_shader_variant *shader)
{
uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
if (pipeline->device->physical_device->rad_info.chip_class >= GFX9) {
- unsigned hs_rsrc2 = shader->config.rsrc2;
-
- if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
- hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX10(tess->lds_size);
- } else {
- hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX9(tess->lds_size);
- }
-
if (pipeline->device->physical_device->rad_info.chip_class >= GFX10) {
radeon_set_sh_reg_seq(cs, R_00B520_SPI_SHADER_PGM_LO_LS, 2);
radeon_emit(cs, va >> 8);
radeon_set_sh_reg_seq(cs, R_00B428_SPI_SHADER_PGM_RSRC1_HS, 2);
radeon_emit(cs, shader->config.rsrc1);
- radeon_emit(cs, hs_rsrc2);
+ radeon_emit(cs, shader->config.rsrc2);
} else {
radeon_set_sh_reg_seq(cs, R_00B420_SPI_SHADER_PGM_LO_HS, 4);
radeon_emit(cs, va >> 8);
radv_pipeline_generate_hw_vs(ctx_cs, cs, pipeline, tes);
}
- radv_pipeline_generate_hw_hs(cs, pipeline, tcs, tess);
+ radv_pipeline_generate_hw_hs(cs, pipeline, tcs);
radeon_set_context_reg(ctx_cs, R_028B6C_VGT_TF_PARAM,
tess->tf_param);
S_028C58_VTX_REUSE_DEPTH(vtx_reuse_depth));
}
-static uint32_t
-radv_compute_vgt_shader_stages_en(const struct radv_pipeline *pipeline)
+static void
+radv_pipeline_generate_vgt_shader_config(struct radeon_cmdbuf *ctx_cs,
+ const struct radv_pipeline *pipeline)
{
uint32_t stages = 0;
if (radv_pipeline_has_tess(pipeline)) {
S_028B54_VS_W32_EN(vs_size == 32 ? 1 : 0);
}
- return stages;
+ radeon_set_context_reg(ctx_cs, R_028B54_VGT_SHADER_STAGES_EN, stages);
}
-static uint32_t
-radv_compute_cliprect_rule(const VkGraphicsPipelineCreateInfo *pCreateInfo)
+static void
+radv_pipeline_generate_cliprect_rule(struct radeon_cmdbuf *ctx_cs,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo)
{
const VkPipelineDiscardRectangleStateCreateInfoEXT *discard_rectangle_info =
vk_find_struct_const(pCreateInfo->pNext, PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT);
+ uint32_t cliprect_rule = 0;
- if (!discard_rectangle_info)
- return 0xffff;
-
- unsigned mask = 0;
-
- for (unsigned i = 0; i < (1u << MAX_DISCARD_RECTANGLES); ++i) {
- /* Interpret i as a bitmask, and then set the bit in the mask if
- * that combination of rectangles in which the pixel is contained
- * should pass the cliprect test. */
- unsigned relevant_subset = i & ((1u << discard_rectangle_info->discardRectangleCount) - 1);
+ if (!discard_rectangle_info) {
+ cliprect_rule = 0xffff;
+ } else {
+ for (unsigned i = 0; i < (1u << MAX_DISCARD_RECTANGLES); ++i) {
+ /* Interpret i as a bitmask, and then set the bit in
+ * the mask if that combination of rectangles in which
+ * the pixel is contained should pass the cliprect
+ * test.
+ */
+ unsigned relevant_subset = i & ((1u << discard_rectangle_info->discardRectangleCount) - 1);
- if (discard_rectangle_info->discardRectangleMode == VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT &&
- !relevant_subset)
- continue;
+ if (discard_rectangle_info->discardRectangleMode == VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT &&
+ !relevant_subset)
+ continue;
- if (discard_rectangle_info->discardRectangleMode == VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT &&
- relevant_subset)
- continue;
+ if (discard_rectangle_info->discardRectangleMode == VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT &&
+ relevant_subset)
+ continue;
- mask |= 1u << i;
+ cliprect_rule |= 1u << i;
+ }
}
- return mask;
+ radeon_set_context_reg(ctx_cs, R_02820C_PA_SC_CLIPRECT_RULE, cliprect_rule);
}
static void
gfx10_pipeline_generate_ge_cntl(struct radeon_cmdbuf *ctx_cs,
- struct radv_pipeline *pipeline,
- const struct radv_tessellation_state *tess)
+ struct radv_pipeline *pipeline)
{
bool break_wave_at_eoi = false;
unsigned primgroup_size;
unsigned vertgroup_size = 256; /* 256 = disable vertex grouping */
if (radv_pipeline_has_tess(pipeline)) {
- primgroup_size = tess->num_patches; /* must be a multiple of NUM_PATCHES */
+ primgroup_size = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches;
} else if (radv_pipeline_has_gs(pipeline)) {
const struct gfx9_gs_info *gs_state =
&pipeline->shaders[MESA_SHADER_GEOMETRY]->info.gs_ring_info;
radv_pipeline_generate_ps_inputs(ctx_cs, pipeline);
radv_pipeline_generate_vgt_vertex_reuse(ctx_cs, pipeline);
radv_pipeline_generate_binning_state(ctx_cs, pipeline, pCreateInfo, blend);
+ radv_pipeline_generate_vgt_shader_config(ctx_cs, pipeline);
+ radv_pipeline_generate_cliprect_rule(ctx_cs, pCreateInfo);
if (pipeline->device->physical_device->rad_info.chip_class >= GFX10 && !radv_pipeline_has_ngg(pipeline))
- gfx10_pipeline_generate_ge_cntl(ctx_cs, pipeline, tess);
+ gfx10_pipeline_generate_ge_cntl(ctx_cs, pipeline);
- radeon_set_context_reg(ctx_cs, R_028B54_VGT_SHADER_STAGES_EN, radv_compute_vgt_shader_stages_en(pipeline));
radeon_set_context_reg(ctx_cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out);
- radeon_set_context_reg(ctx_cs, R_02820C_PA_SC_CLIPRECT_RULE, radv_compute_cliprect_rule(pCreateInfo));
-
pipeline->ctx_cs_hash = _mesa_hash_data(ctx_cs->buf, ctx_cs->cdw * 4);
assert(ctx_cs->cdw <= ctx_cs->max_dw);
}
static struct radv_ia_multi_vgt_param_helpers
-radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline *pipeline,
- const struct radv_tessellation_state *tess)
+radv_compute_ia_multi_vgt_param_helpers(struct radv_pipeline *pipeline)
{
struct radv_ia_multi_vgt_param_helpers ia_multi_vgt_param = {0};
const struct radv_device *device = pipeline->device;
if (radv_pipeline_has_tess(pipeline))
- ia_multi_vgt_param.primgroup_size = tess->num_patches;
+ ia_multi_vgt_param.primgroup_size = pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.tcs.num_patches;
else if (radv_pipeline_has_gs(pipeline))
ia_multi_vgt_param.primgroup_size = 64;
else
{
const VkPipelineVertexInputStateCreateInfo *vi_info =
pCreateInfo->pVertexInputState;
- struct radv_vertex_elements_info *velems = &pipeline->vertex_elements;
-
- for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
- const VkVertexInputAttributeDescription *desc =
- &vi_info->pVertexAttributeDescriptions[i];
- unsigned loc = desc->location;
- const struct vk_format_description *format_desc;
-
- format_desc = vk_format_description(desc->format);
-
- velems->format_size[loc] = format_desc->block.bits / 8;
- }
for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
const VkVertexInputBindingDescription *desc =
const struct radv_graphics_pipeline_create_info *extra)
{
VkResult result;
- bool has_view_index = false;
-
- RADV_FROM_HANDLE(radv_render_pass, pass, pCreateInfo->renderPass);
- struct radv_subpass *subpass = pass->subpasses + pCreateInfo->subpass;
- if (subpass->view_mask)
- has_view_index = true;
pipeline->device = device;
pipeline->layout = radv_pipeline_layout_from_handle(pCreateInfo->layout);
stage_feedbacks[stage] = &creation_feedback->pPipelineStageCreationFeedbacks[i];
}
- struct radv_pipeline_key key = radv_generate_graphics_pipeline_key(pipeline, pCreateInfo, &blend, has_view_index);
+ struct radv_pipeline_key key = radv_generate_graphics_pipeline_key(pipeline, pCreateInfo, &blend);
result = radv_create_shaders(pipeline, device, cache, &key, pStages,
pCreateInfo->flags, pipeline_feedback,
tess = calculate_tess_state(pipeline, pCreateInfo);
}
- pipeline->graphics.ia_multi_vgt_param = radv_compute_ia_multi_vgt_param_helpers(pipeline, &tess);
+ pipeline->graphics.ia_multi_vgt_param = radv_compute_ia_multi_vgt_param_helpers(pipeline);
radv_compute_vertex_input_state(pipeline, pCreateInfo);
return result;
}
+static void
+radv_pipeline_generate_hw_cs(struct radeon_cmdbuf *cs,
+ struct radv_pipeline *pipeline)
+{
+ struct radv_shader_variant *shader = pipeline->shaders[MESA_SHADER_COMPUTE];
+ uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
+ struct radv_device *device = pipeline->device;
+
+ radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
+ radeon_emit(cs, va >> 8);
+ radeon_emit(cs, S_00B834_DATA(va >> 40));
+
+ radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
+ radeon_emit(cs, shader->config.rsrc1);
+ radeon_emit(cs, shader->config.rsrc2);
+ if (device->physical_device->rad_info.chip_class >= GFX10) {
+ radeon_set_sh_reg(cs, R_00B8A0_COMPUTE_PGM_RSRC3, shader->config.rsrc3);
+ }
+}
static void
-radv_compute_generate_pm4(struct radv_pipeline *pipeline)
+radv_pipeline_generate_compute_state(struct radeon_cmdbuf *cs,
+ struct radv_pipeline *pipeline)
{
- struct radv_shader_variant *compute_shader;
+ struct radv_shader_variant *shader = pipeline->shaders[MESA_SHADER_COMPUTE];
struct radv_device *device = pipeline->device;
unsigned threads_per_threadgroup;
unsigned threadgroups_per_cu = 1;
unsigned waves_per_threadgroup;
unsigned max_waves_per_sh = 0;
- uint64_t va;
-
- pipeline->cs.max_dw = device->physical_device->rad_info.chip_class >= GFX10 ? 22 : 20;
- pipeline->cs.buf = malloc(pipeline->cs.max_dw * 4);
-
- compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
- va = radv_buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
-
- radeon_set_sh_reg_seq(&pipeline->cs, R_00B830_COMPUTE_PGM_LO, 2);
- radeon_emit(&pipeline->cs, va >> 8);
- radeon_emit(&pipeline->cs, S_00B834_DATA(va >> 40));
-
- radeon_set_sh_reg_seq(&pipeline->cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
- radeon_emit(&pipeline->cs, compute_shader->config.rsrc1);
- radeon_emit(&pipeline->cs, compute_shader->config.rsrc2);
- if (device->physical_device->rad_info.chip_class >= GFX10) {
- radeon_set_sh_reg(&pipeline->cs, R_00B8A0_COMPUTE_PGM_RSRC3, compute_shader->config.rsrc3);
- }
/* Calculate best compute resource limits. */
- threads_per_threadgroup = compute_shader->info.cs.block_size[0] *
- compute_shader->info.cs.block_size[1] *
- compute_shader->info.cs.block_size[2];
+ threads_per_threadgroup = shader->info.cs.block_size[0] *
+ shader->info.cs.block_size[1] *
+ shader->info.cs.block_size[2];
waves_per_threadgroup = DIV_ROUND_UP(threads_per_threadgroup,
- compute_shader->info.wave_size);
+ shader->info.wave_size);
if (device->physical_device->rad_info.chip_class >= GFX10 &&
waves_per_threadgroup == 1)
threadgroups_per_cu = 2;
- radeon_set_sh_reg(&pipeline->cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
+ radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
ac_get_compute_resource_limits(&device->physical_device->rad_info,
waves_per_threadgroup,
max_waves_per_sh,
threadgroups_per_cu));
- radeon_set_sh_reg_seq(&pipeline->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
- radeon_emit(&pipeline->cs,
- S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]));
- radeon_emit(&pipeline->cs,
- S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]));
- radeon_emit(&pipeline->cs,
- S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]));
+ radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
+ radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(shader->info.cs.block_size[0]));
+ radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(shader->info.cs.block_size[1]));
+ radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(shader->info.cs.block_size[2]));
+}
+
+static void
+radv_compute_generate_pm4(struct radv_pipeline *pipeline)
+{
+ struct radv_device *device = pipeline->device;
+ struct radeon_cmdbuf *cs = &pipeline->cs;
+
+ cs->max_dw = device->physical_device->rad_info.chip_class >= GFX10 ? 19 : 16;
+ cs->buf = malloc(cs->max_dw * 4);
+
+ radv_pipeline_generate_hw_cs(cs, pipeline);
+ radv_pipeline_generate_compute_state(cs, pipeline);
assert(pipeline->cs.cdw <= pipeline->cs.max_dw);
}