}
static uint32_t
-shader_stage_to_user_data_0(gl_shader_stage stage)
+shader_stage_to_user_data_0(gl_shader_stage stage, bool has_gs)
{
switch (stage) {
case MESA_SHADER_FRAGMENT:
return R_00B030_SPI_SHADER_USER_DATA_PS_0;
case MESA_SHADER_VERTEX:
- return R_00B130_SPI_SHADER_USER_DATA_VS_0;
+ return has_gs ? R_00B330_SPI_SHADER_USER_DATA_ES_0 : R_00B130_SPI_SHADER_USER_DATA_VS_0;
+ case MESA_SHADER_GEOMETRY:
+ return R_00B230_SPI_SHADER_USER_DATA_GS_0;
case MESA_SHADER_COMPUTE:
return R_00B900_COMPUTE_USER_DATA_0;
default:
int idx, uint64_t va)
{
struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
- uint32_t base_reg = shader_stage_to_user_data_0(stage);
+ uint32_t base_reg = shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline));
if (loc->sgpr_idx == -1)
return;
assert(loc->num_sgprs == 2);
gl_shader_stage stage)
{
struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
- uint32_t base_reg = shader_stage_to_user_data_0(stage);
+ uint32_t base_reg = shader_stage_to_user_data_0(stage, radv_pipeline_has_gs(pipeline));
if (desc_set_loc->sgpr_idx == -1)
return;
idx, set->va,
MESA_SHADER_VERTEX);
+ if ((stages & VK_SHADER_STAGE_GEOMETRY_BIT) && radv_pipeline_has_gs(pipeline))
+ emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
+ idx, set->va,
+ MESA_SHADER_GEOMETRY);
+
if (stages & VK_SHADER_STAGE_COMPUTE_BIT)
emit_stage_descriptor_set_userdata(cmd_buffer, pipeline,
idx, set->va,
radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT,
AC_UD_PUSH_CONSTANTS, va);
+ if ((stages & VK_SHADER_STAGE_GEOMETRY_BIT) && radv_pipeline_has_gs(pipeline))
+ radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_GEOMETRY,
+ AC_UD_PUSH_CONSTANTS, va);
+
if (stages & VK_SHADER_STAGE_COMPUTE_BIT)
radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_COMPUTE,
AC_UD_PUSH_CONSTANTS, va);
struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
AC_UD_VS_BASE_VERTEX_START_INSTANCE);
if (loc->sgpr_idx != -1) {
- radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4, 2);
+ uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(cmd_buffer->state.pipeline));
+ radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2);
radeon_emit(cmd_buffer->cs, firstVertex);
radeon_emit(cmd_buffer->cs, firstInstance);
}
struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
AC_UD_VS_BASE_VERTEX_START_INSTANCE);
if (loc->sgpr_idx != -1) {
- radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4, 2);
+ uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(cmd_buffer->state.pipeline));
+ radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2);
radeon_emit(cmd_buffer->cs, vertexOffset);
radeon_emit(cmd_buffer->cs, firstInstance);
}
struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
AC_UD_VS_BASE_VERTEX_START_INSTANCE);
+ uint32_t base_reg = shader_stage_to_user_data_0(MESA_SHADER_VERTEX, radv_pipeline_has_gs(cmd_buffer->state.pipeline));
assert(loc->sgpr_idx != -1);
radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
radeon_emit(cs, 1);
PKT3_DRAW_INDIRECT_MULTI,
8, false));
radeon_emit(cs, 0);
- radeon_emit(cs, ((R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4) - SI_SH_REG_OFFSET) >> 2);
- radeon_emit(cs, ((R_00B130_SPI_SHADER_USER_DATA_VS_0 + (loc->sgpr_idx + 1) * 4) - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, ((base_reg + loc->sgpr_idx * 4) - SI_SH_REG_OFFSET) >> 2);
+ radeon_emit(cs, ((base_reg + (loc->sgpr_idx + 1) * 4) - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, S_2C3_COUNT_INDIRECT_ENABLE(!!count_va)); /* draw_index and count_indirect enable */
radeon_emit(cs, draw_count); /* count */
radeon_emit(cs, count_va); /* count_addr */