RADEON_LLVM_AMDGPU_GS = 88,
RADEON_LLVM_AMDGPU_PS = 89,
RADEON_LLVM_AMDGPU_CS = 90,
+ RADEON_LLVM_AMDGPU_HS = 93,
};
#define CONST_ADDR_SPACE 2
LLVMValueRef descriptor_sets[AC_UD_MAX_SETS];
LLVMValueRef ring_offsets;
LLVMValueRef push_constants;
+ LLVMValueRef view_index;
LLVMValueRef num_work_groups;
LLVMValueRef workgroup_ids;
LLVMValueRef local_invocation_ids;
LLVMValueRef tcs_out_layout;
LLVMValueRef tcs_in_layout;
LLVMValueRef oc_lds;
+ LLVMValueRef merged_wave_info;
LLVMValueRef tess_factor_offset;
LLVMValueRef tcs_patch_id;
LLVMValueRef tcs_rel_ids;
LLVMValueRef gs2vs_offset;
LLVMValueRef gs_wave_id;
LLVMValueRef gs_vtx_offset[6];
- LLVMValueRef gs_prim_id, gs_invocation_id;
LLVMValueRef esgs_ring;
LLVMValueRef gsvs_ring;
LLVMValueRef persp_sample, persp_center, persp_centroid;
LLVMValueRef linear_sample, linear_center, linear_centroid;
- LLVMTypeRef i1;
- LLVMTypeRef i8;
- LLVMTypeRef i16;
- LLVMTypeRef i32;
- LLVMTypeRef i64;
- LLVMTypeRef v2i32;
- LLVMTypeRef v3i32;
- LLVMTypeRef v4i32;
- LLVMTypeRef v8i32;
- LLVMTypeRef f64;
- LLVMTypeRef f32;
- LLVMTypeRef f16;
- LLVMTypeRef v2f32;
- LLVMTypeRef v4f32;
- LLVMTypeRef voidt;
-
- LLVMValueRef i1true;
- LLVMValueRef i1false;
- LLVMValueRef i32zero;
- LLVMValueRef i32one;
- LLVMValueRef f32zero;
- LLVMValueRef f32one;
- LLVMValueRef v4f32empty;
-
- unsigned uniform_md_kind;
- LLVMValueRef empty_md;
gl_shader_stage stage;
- LLVMValueRef lds;
LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];
uint64_t input_mask;
unsigned tes_primitive_mode;
uint64_t tess_outputs_written;
uint64_t tess_patch_outputs_written;
+
+ uint32_t tcs_patch_outputs_read;
+ uint64_t tcs_outputs_read;
};
static inline struct nir_to_llvm_context *
return container_of(abi, ctx, abi);
}
+static LLVMTypeRef
+nir2llvmtype(struct ac_nir_context *ctx,
+ const struct glsl_type *type)
+{
+ switch (glsl_get_base_type(glsl_without_array(type))) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ return ctx->ac.i32;
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ return ctx->ac.i64;
+ case GLSL_TYPE_DOUBLE:
+ return ctx->ac.f64;
+ case GLSL_TYPE_FLOAT:
+ return ctx->ac.f32;
+ default:
+ assert(!"Unsupported type in nir2llvmtype()");
+ break;
+ }
+ return 0;
+}
+
static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
const nir_deref_var *deref,
enum ac_descriptor_type desc_type,
+ const nir_tex_instr *instr,
bool image, bool write);
static unsigned radeon_llvm_reg_index_soa(unsigned index, unsigned chan)
unreachable("illegal slot in get unique index\n");
}
-static unsigned llvm_get_type_size(LLVMTypeRef type)
-{
- LLVMTypeKind kind = LLVMGetTypeKind(type);
-
- switch (kind) {
- case LLVMIntegerTypeKind:
- return LLVMGetIntTypeWidth(type) / 8;
- case LLVMFloatTypeKind:
- return 4;
- case LLVMPointerTypeKind:
- return 8;
- case LLVMVectorTypeKind:
- return LLVMGetVectorSize(type) *
- llvm_get_type_size(LLVMGetElementType(type));
- default:
- assert(0);
- return 0;
- }
-}
-
static void set_llvm_calling_convention(LLVMValueRef func,
gl_shader_stage stage)
{
switch (stage) {
case MESA_SHADER_VERTEX:
- case MESA_SHADER_TESS_CTRL:
case MESA_SHADER_TESS_EVAL:
calling_conv = RADEON_LLVM_AMDGPU_VS;
break;
case MESA_SHADER_GEOMETRY:
calling_conv = RADEON_LLVM_AMDGPU_GS;
break;
+ case MESA_SHADER_TESS_CTRL:
+ calling_conv = HAVE_LLVM >= 0x0500 ? RADEON_LLVM_AMDGPU_HS : RADEON_LLVM_AMDGPU_VS;
+ break;
case MESA_SHADER_FRAGMENT:
calling_conv = RADEON_LLVM_AMDGPU_PS;
break;
LLVMTypeRef type, LLVMValueRef *param_ptr)
{
add_argument(info, type, param_ptr);
- info->num_sgprs_used += llvm_get_type_size(type) / 4;
+ info->num_sgprs_used += ac_get_type_size(type) / 4;
info->sgpr_count++;
}
LLVMValueRef *param_ptr)
{
add_sgpr_argument(info, type, param_ptr);
- info->num_user_sgprs_used += llvm_get_type_size(type) / 4;
+ info->num_user_sgprs_used += ac_get_type_size(type) / 4;
info->user_sgpr_count++;
}
LLVMValueRef *param_ptr)
{
add_argument(info, type, param_ptr);
- info->num_vgprs_used += llvm_get_type_size(type) / 4;
+ info->num_vgprs_used += ac_get_type_size(type) / 4;
}
static inline void
CONST_ADDR_SPACE);
}
-static LLVMTypeRef to_integer_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
-{
- if (t == ctx->f16 || t == ctx->i16)
- return ctx->i16;
- else if (t == ctx->f32 || t == ctx->i32)
- return ctx->i32;
- else if (t == ctx->f64 || t == ctx->i64)
- return ctx->i64;
- else
- unreachable("Unhandled integer size");
-}
-
-static LLVMTypeRef to_integer_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
-{
- if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
- LLVMTypeRef elem_type = LLVMGetElementType(t);
- return LLVMVectorType(to_integer_type_scalar(ctx, elem_type),
- LLVMGetVectorSize(t));
- }
- return to_integer_type_scalar(ctx, t);
-}
-
-static LLVMValueRef to_integer(struct ac_llvm_context *ctx, LLVMValueRef v)
-{
- LLVMTypeRef type = LLVMTypeOf(v);
- return LLVMBuildBitCast(ctx->builder, v, to_integer_type(ctx, type), "");
-}
-
-static LLVMTypeRef to_float_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
-{
- if (t == ctx->i16 || t == ctx->f16)
- return ctx->f16;
- else if (t == ctx->i32 || t == ctx->f32)
- return ctx->f32;
- else if (t == ctx->i64 || t == ctx->f64)
- return ctx->f64;
- else
- unreachable("Unhandled float size");
-}
-
-static LLVMTypeRef to_float_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
-{
- if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
- LLVMTypeRef elem_type = LLVMGetElementType(t);
- return LLVMVectorType(to_float_type_scalar(ctx, elem_type),
- LLVMGetVectorSize(t));
- }
- return to_float_type_scalar(ctx, t);
-}
-
-static LLVMValueRef to_float(struct ac_llvm_context *ctx, LLVMValueRef v)
-{
- LLVMTypeRef type = LLVMTypeOf(v);
- return LLVMBuildBitCast(ctx->builder, v, to_float_type(ctx, type), "");
-}
-
static int get_elem_bits(struct ac_llvm_context *ctx, LLVMTypeRef type)
{
if (LLVMGetTypeKind(type) == LLVMVectorTypeKind)
{
return LLVMBuildMul(ctx->builder,
unpack_param(&ctx->ac, ctx->tcs_out_offsets, 0, 16),
- LLVMConstInt(ctx->i32, 4, false), "");
+ LLVMConstInt(ctx->ac.i32, 4, false), "");
}
static LLVMValueRef
{
return LLVMBuildMul(ctx->builder,
unpack_param(&ctx->ac, ctx->tcs_out_offsets, 16, 16),
- LLVMConstInt(ctx->i32, 4, false), "");
+ LLVMConstInt(ctx->ac.i32, 4, false), "");
}
static LLVMValueRef
ud_info->indirect_offset = indirect_offset;
}
-static void declare_tess_lds(struct nir_to_llvm_context *ctx)
-{
- unsigned lds_size = ctx->options->chip_class >= CIK ? 65536 : 32768;
- ctx->lds = LLVMBuildIntToPtr(ctx->builder, ctx->i32zero,
- LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), LOCAL_ADDR_SPACE),
- "tess_lds");
-}
-
struct user_sgpr_info {
bool need_ring_offsets;
uint8_t sgpr_count;
}
}
-static void create_function(struct nir_to_llvm_context *ctx)
+static void
+radv_define_common_user_sgprs_phase1(struct nir_to_llvm_context *ctx,
+ gl_shader_stage stage,
+ bool has_previous_stage,
+ gl_shader_stage previous_stage,
+ const struct user_sgpr_info *user_sgpr_info,
+ struct arg_info *args,
+ LLVMValueRef *desc_sets)
{
unsigned num_sets = ctx->options->layout ? ctx->options->layout->num_sets : 0;
- uint8_t user_sgpr_idx;
- struct user_sgpr_info user_sgpr_info;
- struct arg_info args = {};
- LLVMValueRef desc_sets;
-
- allocate_user_sgprs(ctx, &user_sgpr_info);
- if (user_sgpr_info.need_ring_offsets && !ctx->options->supports_spill) {
- add_user_sgpr_argument(&args, const_array(ctx->v4i32, 16), &ctx->ring_offsets); /* address of rings */
- }
+ unsigned stage_mask = 1 << stage;
+ if (has_previous_stage)
+ stage_mask |= 1 << previous_stage;
/* 1 for each descriptor set */
- if (!user_sgpr_info.indirect_all_descriptor_sets) {
+ if (!user_sgpr_info->indirect_all_descriptor_sets) {
for (unsigned i = 0; i < num_sets; ++i) {
- if (ctx->options->layout->set[i].layout->shader_stages & (1 << ctx->stage)) {
- add_user_sgpr_array_argument(&args, const_array(ctx->i8, 1024 * 1024), &ctx->descriptor_sets[i]);
+ if (ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
+ add_user_sgpr_array_argument(args, const_array(ctx->ac.i8, 1024 * 1024), &ctx->descriptor_sets[i]);
}
}
} else
- add_user_sgpr_array_argument(&args, const_array(const_array(ctx->i8, 1024 * 1024), 32), &desc_sets);
+ add_user_sgpr_array_argument(args, const_array(const_array(ctx->ac.i8, 1024 * 1024), 32), desc_sets);
if (ctx->shader_info->info.needs_push_constants) {
/* 1 for push constants and dynamic descriptors */
- add_user_sgpr_array_argument(&args, const_array(ctx->i8, 1024 * 1024), &ctx->push_constants);
+ add_user_sgpr_array_argument(args, const_array(ctx->ac.i8, 1024 * 1024), &ctx->push_constants);
}
+}
- switch (ctx->stage) {
+static void
+radv_define_common_user_sgprs_phase2(struct nir_to_llvm_context *ctx,
+ gl_shader_stage stage,
+ bool has_previous_stage,
+ gl_shader_stage previous_stage,
+ const struct user_sgpr_info *user_sgpr_info,
+ LLVMValueRef desc_sets,
+ uint8_t *user_sgpr_idx)
+{
+ unsigned num_sets = ctx->options->layout ? ctx->options->layout->num_sets : 0;
+ unsigned stage_mask = 1 << stage;
+ if (has_previous_stage)
+ stage_mask |= 1 << previous_stage;
+
+ if (!user_sgpr_info->indirect_all_descriptor_sets) {
+ for (unsigned i = 0; i < num_sets; ++i) {
+ if (ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
+ set_userdata_location(&ctx->shader_info->user_sgprs_locs.descriptor_sets[i], user_sgpr_idx, 2);
+ } else
+ ctx->descriptor_sets[i] = NULL;
+ }
+ } else {
+ uint32_t desc_sgpr_idx = *user_sgpr_idx;
+ set_userdata_location_shader(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS, user_sgpr_idx, 2);
+
+ for (unsigned i = 0; i < num_sets; ++i) {
+ if (ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
+ set_userdata_location_indirect(&ctx->shader_info->user_sgprs_locs.descriptor_sets[i], desc_sgpr_idx, 2, i * 8);
+ ctx->descriptor_sets[i] = ac_build_load_to_sgpr(&ctx->ac, desc_sets, LLVMConstInt(ctx->ac.i32, i, false));
+
+ } else
+ ctx->descriptor_sets[i] = NULL;
+ }
+ ctx->shader_info->need_indirect_descriptor_sets = true;
+ }
+
+ if (ctx->shader_info->info.needs_push_constants) {
+ set_userdata_location_shader(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx, 2);
+ }
+}
+
+static void
+radv_define_vs_user_sgprs_phase1(struct nir_to_llvm_context *ctx,
+ gl_shader_stage stage,
+ bool has_previous_stage,
+ gl_shader_stage previous_stage,
+ struct arg_info *args)
+{
+ if (!ctx->is_gs_copy_shader && (stage == MESA_SHADER_VERTEX || (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
+ if (ctx->shader_info->info.vs.has_vertex_buffers)
+ add_user_sgpr_argument(args, const_array(ctx->ac.v4i32, 16), &ctx->vertex_buffers); /* vertex buffers */
+ add_user_sgpr_argument(args, ctx->ac.i32, &ctx->abi.base_vertex); // base vertex
+ add_user_sgpr_argument(args, ctx->ac.i32, &ctx->abi.start_instance);// start instance
+ if (ctx->shader_info->info.vs.needs_draw_id)
+ add_user_sgpr_argument(args, ctx->ac.i32, &ctx->abi.draw_id); // draw id
+ }
+}
+
+static void
+radv_define_vs_user_sgprs_phase2(struct nir_to_llvm_context *ctx,
+ gl_shader_stage stage,
+ bool has_previous_stage,
+ gl_shader_stage previous_stage,
+ uint8_t *user_sgpr_idx)
+{
+ if (!ctx->is_gs_copy_shader && (stage == MESA_SHADER_VERTEX || (has_previous_stage && previous_stage == MESA_SHADER_VERTEX))) {
+ if (ctx->shader_info->info.vs.has_vertex_buffers) {
+ set_userdata_location_shader(ctx, AC_UD_VS_VERTEX_BUFFERS, user_sgpr_idx, 2);
+ }
+ unsigned vs_num = 2;
+ if (ctx->shader_info->info.vs.needs_draw_id)
+ vs_num++;
+
+ set_userdata_location_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE, user_sgpr_idx, vs_num);
+ }
+}
+
+
+static void create_function(struct nir_to_llvm_context *ctx,
+ gl_shader_stage stage,
+ bool has_previous_stage,
+ gl_shader_stage previous_stage)
+{
+ uint8_t user_sgpr_idx;
+ struct user_sgpr_info user_sgpr_info;
+ struct arg_info args = {};
+ LLVMValueRef desc_sets;
+
+ allocate_user_sgprs(ctx, &user_sgpr_info);
+
+ if (user_sgpr_info.need_ring_offsets && !ctx->options->supports_spill) {
+ add_user_sgpr_argument(&args, const_array(ctx->ac.v4i32, 16), &ctx->ring_offsets); /* address of rings */
+ }
+
+ switch (stage) {
case MESA_SHADER_COMPUTE:
+ radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
if (ctx->shader_info->info.cs.grid_components_used)
- add_user_sgpr_argument(&args, LLVMVectorType(ctx->i32, ctx->shader_info->info.cs.grid_components_used), &ctx->num_work_groups); /* grid size */
- add_sgpr_argument(&args, LLVMVectorType(ctx->i32, 3), &ctx->workgroup_ids);
- add_sgpr_argument(&args, ctx->i32, &ctx->tg_size);
- add_vgpr_argument(&args, LLVMVectorType(ctx->i32, 3), &ctx->local_invocation_ids);
+ add_user_sgpr_argument(&args, LLVMVectorType(ctx->ac.i32, ctx->shader_info->info.cs.grid_components_used), &ctx->num_work_groups); /* grid size */
+ add_sgpr_argument(&args, ctx->ac.v3i32, &ctx->workgroup_ids);
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->tg_size);
+ add_vgpr_argument(&args, ctx->ac.v3i32, &ctx->local_invocation_ids);
break;
case MESA_SHADER_VERTEX:
- if (!ctx->is_gs_copy_shader) {
- if (ctx->shader_info->info.vs.has_vertex_buffers)
- add_user_sgpr_argument(&args, const_array(ctx->v4i32, 16), &ctx->vertex_buffers); /* vertex buffers */
- add_user_sgpr_argument(&args, ctx->i32, &ctx->abi.base_vertex); // base vertex
- add_user_sgpr_argument(&args, ctx->i32, &ctx->abi.start_instance);// start instance
- if (ctx->shader_info->info.vs.needs_draw_id)
- add_user_sgpr_argument(&args, ctx->i32, &ctx->abi.draw_id); // draw id
- }
+ radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
+ radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args);
+ if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.vs.as_es && !ctx->options->key.vs.as_ls && ctx->options->key.has_multiview_view_index))
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
if (ctx->options->key.vs.as_es)
- add_sgpr_argument(&args, ctx->i32, &ctx->es2gs_offset); // es2gs offset
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->es2gs_offset); // es2gs offset
else if (ctx->options->key.vs.as_ls)
- add_user_sgpr_argument(&args, ctx->i32, &ctx->ls_out_layout); // ls out layout
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.vertex_id); // vertex id
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->ls_out_layout); // ls out layout
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.vertex_id); // vertex id
if (!ctx->is_gs_copy_shader) {
- add_vgpr_argument(&args, ctx->i32, &ctx->rel_auto_id); // rel auto id
- add_vgpr_argument(&args, ctx->i32, &ctx->vs_prim_id); // vs prim id
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.instance_id); // instance id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->rel_auto_id); // rel auto id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->vs_prim_id); // vs prim id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.instance_id); // instance id
}
break;
case MESA_SHADER_TESS_CTRL:
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_offchip_layout); // tcs offchip layout
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_out_offsets); // tcs out offsets
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_out_layout); // tcs out layout
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_in_layout); // tcs in layout
- add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // param oc lds
- add_sgpr_argument(&args, ctx->i32, &ctx->tess_factor_offset); // tess factor offset
- add_vgpr_argument(&args, ctx->i32, &ctx->tcs_patch_id); // patch id
- add_vgpr_argument(&args, ctx->i32, &ctx->tcs_rel_ids); // rel ids;
+ if (has_previous_stage) {
+ // First 6 system regs
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // param oc lds
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->merged_wave_info); // merged wave info
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->tess_factor_offset); // tess factor offset
+
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // scratch offset
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown
+
+ radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
+ radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args);
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->ls_out_layout); // ls out layout
+
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_offsets); // tcs out offsets
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_layout); // tcs out layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_in_layout); // tcs in layout
+ if (ctx->shader_info->info.needs_multiview_view_index)
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
+
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_patch_id); // patch id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_rel_ids); // rel ids;
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.vertex_id); // vertex id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->rel_auto_id); // rel auto id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->vs_prim_id); // vs prim id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.instance_id); // instance id
+ } else {
+ radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_offsets); // tcs out offsets
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_out_layout); // tcs out layout
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_in_layout); // tcs in layout
+ if (ctx->shader_info->info.needs_multiview_view_index)
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // param oc lds
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->tess_factor_offset); // tess factor offset
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_patch_id); // patch id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tcs_rel_ids); // rel ids;
+ }
break;
case MESA_SHADER_TESS_EVAL:
- add_user_sgpr_argument(&args, ctx->i32, &ctx->tcs_offchip_layout); // tcs offchip layout
+ radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout
+ if (ctx->shader_info->info.needs_multiview_view_index || (!ctx->options->key.tes.as_es && ctx->options->key.has_multiview_view_index))
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
if (ctx->options->key.tes.as_es) {
- add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // OC LDS
- add_sgpr_argument(&args, ctx->i32, NULL); //
- add_sgpr_argument(&args, ctx->i32, &ctx->es2gs_offset); // es2gs offset
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // OC LDS
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); //
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->es2gs_offset); // es2gs offset
} else {
- add_sgpr_argument(&args, ctx->i32, NULL); //
- add_sgpr_argument(&args, ctx->i32, &ctx->oc_lds); // OC LDS
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); //
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // OC LDS
}
- add_vgpr_argument(&args, ctx->f32, &ctx->tes_u); // tes_u
- add_vgpr_argument(&args, ctx->f32, &ctx->tes_v); // tes_v
- add_vgpr_argument(&args, ctx->i32, &ctx->tes_rel_patch_id); // tes rel patch id
- add_vgpr_argument(&args, ctx->i32, &ctx->tes_patch_id); // tes patch id
+ add_vgpr_argument(&args, ctx->ac.f32, &ctx->tes_u); // tes_u
+ add_vgpr_argument(&args, ctx->ac.f32, &ctx->tes_v); // tes_v
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_rel_patch_id); // tes rel patch id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_patch_id); // tes patch id
break;
case MESA_SHADER_GEOMETRY:
- add_user_sgpr_argument(&args, ctx->i32, &ctx->gsvs_ring_stride); // gsvs stride
- add_user_sgpr_argument(&args, ctx->i32, &ctx->gsvs_num_entries); // gsvs num entires
- add_sgpr_argument(&args, ctx->i32, &ctx->gs2vs_offset); // gs2vs offset
- add_sgpr_argument(&args, ctx->i32, &ctx->gs_wave_id); // wave id
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[0]); // vtx0
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[1]); // vtx1
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_prim_id); // prim id
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[2]);
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[3]);
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[4]);
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_vtx_offset[5]);
- add_vgpr_argument(&args, ctx->i32, &ctx->gs_invocation_id);
+ if (has_previous_stage) {
+ // First 6 system regs
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->gs2vs_offset); // tess factor offset
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->merged_wave_info); // merged wave info
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->oc_lds); // param oc lds
+
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // scratch offset
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown
+ add_sgpr_argument(&args, ctx->ac.i32, NULL); // unknown
+
+ radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
+ if (previous_stage == MESA_SHADER_TESS_EVAL)
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->tcs_offchip_layout); // tcs offchip layout
+ else
+ radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args);
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_ring_stride); // gsvs stride
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_num_entries); // gsvs num entires
+ if (ctx->shader_info->info.needs_multiview_view_index)
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
+
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[0]); // vtx01
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[2]); // vtx23
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.gs_prim_id); // prim id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.gs_invocation_id);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[4]);
+
+ if (previous_stage == MESA_SHADER_VERTEX) {
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.vertex_id); // vertex id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->rel_auto_id); // rel auto id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->vs_prim_id); // vs prim id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.instance_id); // instance id
+ } else {
+ add_vgpr_argument(&args, ctx->ac.f32, &ctx->tes_u); // tes_u
+ add_vgpr_argument(&args, ctx->ac.f32, &ctx->tes_v); // tes_v
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_rel_patch_id); // tes rel patch id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->tes_patch_id); // tes patch id
+ }
+ } else {
+ radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
+ radv_define_vs_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &args);
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_ring_stride); // gsvs stride
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->gsvs_num_entries); // gsvs num entires
+ if (ctx->shader_info->info.needs_multiview_view_index)
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->view_index);
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->gs2vs_offset); // gs2vs offset
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->gs_wave_id); // wave id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[0]); // vtx0
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[1]); // vtx1
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.gs_prim_id); // prim id
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[2]);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[3]);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[4]);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->gs_vtx_offset[5]);
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.gs_invocation_id);
+ }
break;
case MESA_SHADER_FRAGMENT:
+ radv_define_common_user_sgprs_phase1(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, &args, &desc_sets);
if (ctx->shader_info->info.ps.needs_sample_positions)
- add_user_sgpr_argument(&args, ctx->i32, &ctx->sample_pos_offset); /* sample position offset */
- add_sgpr_argument(&args, ctx->i32, &ctx->prim_mask); /* prim mask */
- add_vgpr_argument(&args, ctx->v2i32, &ctx->persp_sample); /* persp sample */
- add_vgpr_argument(&args, ctx->v2i32, &ctx->persp_center); /* persp center */
- add_vgpr_argument(&args, ctx->v2i32, &ctx->persp_centroid); /* persp centroid */
- add_vgpr_argument(&args, ctx->v3i32, NULL); /* persp pull model */
- add_vgpr_argument(&args, ctx->v2i32, &ctx->linear_sample); /* linear sample */
- add_vgpr_argument(&args, ctx->v2i32, &ctx->linear_center); /* linear center */
- add_vgpr_argument(&args, ctx->v2i32, &ctx->linear_centroid); /* linear centroid */
- add_vgpr_argument(&args, ctx->f32, NULL); /* line stipple tex */
- add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[0]); /* pos x float */
- add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[1]); /* pos y float */
- add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[2]); /* pos z float */
- add_vgpr_argument(&args, ctx->f32, &ctx->abi.frag_pos[3]); /* pos w float */
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.front_face); /* front face */
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.ancillary); /* ancillary */
- add_vgpr_argument(&args, ctx->i32, &ctx->abi.sample_coverage); /* sample coverage */
- add_vgpr_argument(&args, ctx->i32, NULL); /* fixed pt */
+ add_user_sgpr_argument(&args, ctx->ac.i32, &ctx->sample_pos_offset); /* sample position offset */
+ add_sgpr_argument(&args, ctx->ac.i32, &ctx->prim_mask); /* prim mask */
+ add_vgpr_argument(&args, ctx->ac.v2i32, &ctx->persp_sample); /* persp sample */
+ add_vgpr_argument(&args, ctx->ac.v2i32, &ctx->persp_center); /* persp center */
+ add_vgpr_argument(&args, ctx->ac.v2i32, &ctx->persp_centroid); /* persp centroid */
+ add_vgpr_argument(&args, ctx->ac.v3i32, NULL); /* persp pull model */
+ add_vgpr_argument(&args, ctx->ac.v2i32, &ctx->linear_sample); /* linear sample */
+ add_vgpr_argument(&args, ctx->ac.v2i32, &ctx->linear_center); /* linear center */
+ add_vgpr_argument(&args, ctx->ac.v2i32, &ctx->linear_centroid); /* linear centroid */
+ add_vgpr_argument(&args, ctx->ac.f32, NULL); /* line stipple tex */
+ add_vgpr_argument(&args, ctx->ac.f32, &ctx->abi.frag_pos[0]); /* pos x float */
+ add_vgpr_argument(&args, ctx->ac.f32, &ctx->abi.frag_pos[1]); /* pos y float */
+ add_vgpr_argument(&args, ctx->ac.f32, &ctx->abi.frag_pos[2]); /* pos z float */
+ add_vgpr_argument(&args, ctx->ac.f32, &ctx->abi.frag_pos[3]); /* pos w float */
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.front_face); /* front face */
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.ancillary); /* ancillary */
+ add_vgpr_argument(&args, ctx->ac.i32, &ctx->abi.sample_coverage); /* sample coverage */
+ add_vgpr_argument(&args, ctx->ac.i32, NULL); /* fixed pt */
break;
default:
unreachable("Shader stage not implemented");
ctx->context, ctx->module, ctx->builder, NULL, 0, &args,
ctx->max_workgroup_size,
ctx->options->unsafe_math);
- set_llvm_calling_convention(ctx->main_function, ctx->stage);
+ set_llvm_calling_convention(ctx->main_function, stage);
ctx->shader_info->num_input_vgprs = 0;
- ctx->shader_info->num_input_sgprs = ctx->shader_info->num_user_sgprs =
- ctx->options->supports_spill ? 2 : 0;
+ ctx->shader_info->num_input_sgprs = ctx->options->supports_spill ? 2 : 0;
- ctx->shader_info->num_user_sgprs += args.num_user_sgprs_used;
ctx->shader_info->num_input_sgprs += args.num_sgprs_used;
if (ctx->stage != MESA_SHADER_FRAGMENT)
set_userdata_location_shader(ctx, AC_UD_SCRATCH_RING_OFFSETS, &user_sgpr_idx, 2);
if (ctx->options->supports_spill) {
ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
- LLVMPointerType(ctx->i8, CONST_ADDR_SPACE),
+ LLVMPointerType(ctx->ac.i8, CONST_ADDR_SPACE),
NULL, 0, AC_FUNC_ATTR_READNONE);
ctx->ring_offsets = LLVMBuildBitCast(ctx->builder, ctx->ring_offsets,
- const_array(ctx->v4i32, 16), "");
+ const_array(ctx->ac.v4i32, 16), "");
}
}
+
+ /* For merged shaders the user SGPRs start at 8, with 8 system SGPRs in front (including
+ * the rw_buffers at s0/s1. With user SGPR0 = s8, lets restart the count from 0 */
+ if (has_previous_stage)
+ user_sgpr_idx = 0;
- if (!user_sgpr_info.indirect_all_descriptor_sets) {
- for (unsigned i = 0; i < num_sets; ++i) {
- if (ctx->options->layout->set[i].layout->shader_stages & (1 << ctx->stage)) {
- set_userdata_location(&ctx->shader_info->user_sgprs_locs.descriptor_sets[i], &user_sgpr_idx, 2);
- } else
- ctx->descriptor_sets[i] = NULL;
- }
- } else {
- uint32_t desc_sgpr_idx = user_sgpr_idx;
- set_userdata_location_shader(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS, &user_sgpr_idx, 2);
-
- for (unsigned i = 0; i < num_sets; ++i) {
- if (ctx->options->layout->set[i].layout->shader_stages & (1 << ctx->stage)) {
- set_userdata_location_indirect(&ctx->shader_info->user_sgprs_locs.descriptor_sets[i], desc_sgpr_idx, 2, i * 8);
- ctx->descriptor_sets[i] = ac_build_indexed_load_const(&ctx->ac, desc_sets, LLVMConstInt(ctx->i32, i, false));
-
- } else
- ctx->descriptor_sets[i] = NULL;
- }
- ctx->shader_info->need_indirect_descriptor_sets = true;
- }
-
- if (ctx->shader_info->info.needs_push_constants) {
- set_userdata_location_shader(ctx, AC_UD_PUSH_CONSTANTS, &user_sgpr_idx, 2);
- }
+ radv_define_common_user_sgprs_phase2(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_info, desc_sets, &user_sgpr_idx);
- switch (ctx->stage) {
+ switch (stage) {
case MESA_SHADER_COMPUTE:
if (ctx->shader_info->info.cs.grid_components_used) {
set_userdata_location_shader(ctx, AC_UD_CS_GRID_SIZE, &user_sgpr_idx, ctx->shader_info->info.cs.grid_components_used);
}
break;
case MESA_SHADER_VERTEX:
- if (!ctx->is_gs_copy_shader) {
- if (ctx->shader_info->info.vs.has_vertex_buffers) {
- set_userdata_location_shader(ctx, AC_UD_VS_VERTEX_BUFFERS, &user_sgpr_idx, 2);
- }
- unsigned vs_num = 2;
- if (ctx->shader_info->info.vs.needs_draw_id)
- vs_num++;
-
- set_userdata_location_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE, &user_sgpr_idx, vs_num);
- }
+ radv_define_vs_user_sgprs_phase2(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_idx);
+ if (ctx->view_index)
+ set_userdata_location_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
if (ctx->options->key.vs.as_ls) {
set_userdata_location_shader(ctx, AC_UD_VS_LS_TCS_IN_LAYOUT, &user_sgpr_idx, 1);
}
if (ctx->options->key.vs.as_ls)
- declare_tess_lds(ctx);
+ ac_declare_lds_as_pointer(&ctx->ac);
break;
case MESA_SHADER_TESS_CTRL:
+ radv_define_vs_user_sgprs_phase2(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_idx);
+ if (has_previous_stage)
+ set_userdata_location_shader(ctx, AC_UD_VS_LS_TCS_IN_LAYOUT, &user_sgpr_idx, 1);
set_userdata_location_shader(ctx, AC_UD_TCS_OFFCHIP_LAYOUT, &user_sgpr_idx, 4);
- declare_tess_lds(ctx);
+ if (ctx->view_index)
+ set_userdata_location_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
+ ac_declare_lds_as_pointer(&ctx->ac);
break;
case MESA_SHADER_TESS_EVAL:
set_userdata_location_shader(ctx, AC_UD_TES_OFFCHIP_LAYOUT, &user_sgpr_idx, 1);
+ if (ctx->view_index)
+ set_userdata_location_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
break;
case MESA_SHADER_GEOMETRY:
+ if (has_previous_stage) {
+ if (previous_stage == MESA_SHADER_VERTEX)
+ radv_define_vs_user_sgprs_phase2(ctx, stage, has_previous_stage, previous_stage, &user_sgpr_idx);
+ else
+ set_userdata_location_shader(ctx, AC_UD_TES_OFFCHIP_LAYOUT, &user_sgpr_idx, 1);
+ }
set_userdata_location_shader(ctx, AC_UD_GS_VS_RING_STRIDE_ENTRIES, &user_sgpr_idx, 2);
+ if (ctx->view_index)
+ set_userdata_location_shader(ctx, AC_UD_VIEW_INDEX, &user_sgpr_idx, 1);
+ if (has_previous_stage)
+ ac_declare_lds_as_pointer(&ctx->ac);
break;
case MESA_SHADER_FRAGMENT:
if (ctx->shader_info->info.ps.needs_sample_positions) {
default:
unreachable("Shader stage not implemented");
}
-}
-
-static void setup_types(struct nir_to_llvm_context *ctx)
-{
- LLVMValueRef args[4];
-
- ctx->voidt = LLVMVoidTypeInContext(ctx->context);
- ctx->i1 = LLVMIntTypeInContext(ctx->context, 1);
- ctx->i8 = LLVMIntTypeInContext(ctx->context, 8);
- ctx->i16 = LLVMIntTypeInContext(ctx->context, 16);
- ctx->i32 = LLVMIntTypeInContext(ctx->context, 32);
- ctx->i64 = LLVMIntTypeInContext(ctx->context, 64);
- ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
- ctx->v3i32 = LLVMVectorType(ctx->i32, 3);
- ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
- ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
- ctx->f32 = LLVMFloatTypeInContext(ctx->context);
- ctx->f16 = LLVMHalfTypeInContext(ctx->context);
- ctx->f64 = LLVMDoubleTypeInContext(ctx->context);
- ctx->v2f32 = LLVMVectorType(ctx->f32, 2);
- ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
-
- ctx->i1false = LLVMConstInt(ctx->i1, 0, false);
- ctx->i1true = LLVMConstInt(ctx->i1, 1, false);
- ctx->i32zero = LLVMConstInt(ctx->i32, 0, false);
- ctx->i32one = LLVMConstInt(ctx->i32, 1, false);
- ctx->f32zero = LLVMConstReal(ctx->f32, 0.0);
- ctx->f32one = LLVMConstReal(ctx->f32, 1.0);
- args[0] = ctx->f32zero;
- args[1] = ctx->f32zero;
- args[2] = ctx->f32zero;
- args[3] = ctx->f32one;
- ctx->v4f32empty = LLVMConstVector(args, 4);
-
- ctx->uniform_md_kind =
- LLVMGetMDKindIDInContext(ctx->context, "amdgpu.uniform", 14);
- ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0);
-
- args[0] = LLVMConstReal(ctx->f32, 2.5);
+ ctx->shader_info->num_user_sgprs = user_sgpr_idx;
}
static int get_llvm_num_components(LLVMValueRef value)
{
int count = get_llvm_num_components(value);
- assert(index < count);
if (count == 1)
return value;
LLVMValueRef src1)
{
LLVMValueRef result;
- src0 = to_float(ctx, src0);
- src1 = to_float(ctx, src1);
+ src0 = ac_to_float(ctx, src0);
+ src1 = ac_to_float(ctx, src1);
result = LLVMBuildFCmp(ctx->builder, pred, src0, src1, "");
return LLVMBuildSelect(ctx->builder, result,
LLVMConstInt(ctx->i32, 0xFFFFFFFF, false),
{
char name[64];
LLVMValueRef params[] = {
- to_float(ctx, src0),
+ ac_to_float(ctx, src0),
};
MAYBE_UNUSED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
{
char name[64];
LLVMValueRef params[] = {
- to_float(ctx, src0),
- to_float(ctx, src1),
+ ac_to_float(ctx, src0),
+ ac_to_float(ctx, src1),
};
MAYBE_UNUSED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
{
char name[64];
LLVMValueRef params[] = {
- to_float(ctx, src0),
- to_float(ctx, src1),
- to_float(ctx, src2),
+ ac_to_float(ctx, src0),
+ ac_to_float(ctx, src1),
+ ac_to_float(ctx, src2),
};
MAYBE_UNUSED const int length = snprintf(name, sizeof(name), "%s.f%d", intrin,
return LLVMBuildSelect(ctx->builder, v, src1, src2, "");
}
-static LLVMValueRef emit_find_lsb(struct ac_llvm_context *ctx,
- LLVMValueRef src0)
-{
- LLVMValueRef params[2] = {
- src0,
-
- /* The value of 1 means that ffs(x=0) = undef, so LLVM won't
- * add special code to check for x=0. The reason is that
- * the LLVM behavior for x=0 is different from what we
- * need here.
- *
- * The hardware already implements the correct behavior.
- */
- LLVMConstInt(ctx->i1, 1, false),
- };
-
- LLVMValueRef lsb = ac_build_intrinsic(ctx, "llvm.cttz.i32", ctx->i32,
- params, 2,
- AC_FUNC_ATTR_READNONE);
-
- /* TODO: We need an intrinsic to skip this conditional. */
- /* Check for zero: */
- return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder,
- LLVMIntEQ, src0,
- ctx->i32_0, ""),
- LLVMConstInt(ctx->i32, -1, 0), lsb, "");
-}
-
-static LLVMValueRef emit_ifind_msb(struct ac_llvm_context *ctx,
- LLVMValueRef src0)
-{
- return ac_build_imsb(ctx, src0, ctx->i32);
-}
-
-static LLVMValueRef emit_ufind_msb(struct ac_llvm_context *ctx,
- LLVMValueRef src0)
-{
- return ac_build_umsb(ctx, src0, ctx->i32);
-}
-
static LLVMValueRef emit_minmax_int(struct ac_llvm_context *ctx,
LLVMIntPredicate pred,
LLVMValueRef src0, LLVMValueRef src1)
LLVMValueRef src0)
{
const char *intr = "llvm.floor.f32";
- LLVMValueRef fsrc0 = to_float(ctx, src0);
+ LLVMValueRef fsrc0 = ac_to_float(ctx, src0);
LLVMValueRef params[] = {
fsrc0,
};
static LLVMValueRef emit_f2b(struct ac_llvm_context *ctx,
LLVMValueRef src0)
{
- src0 = to_float(ctx, src0);
+ src0 = ac_to_float(ctx, src0);
return LLVMBuildSExt(ctx->builder,
LLVMBuildFCmp(ctx->builder, LLVMRealUNE, src0, ctx->f32_0, ""),
ctx->i32, "");
LLVMValueRef src0)
{
LLVMValueRef result;
- LLVMValueRef cond;
+ LLVMValueRef cond = NULL;
- src0 = to_float(&ctx->ac, src0);
- result = LLVMBuildFPTrunc(ctx->builder, src0, ctx->f16, "");
+ src0 = ac_to_float(&ctx->ac, src0);
+ result = LLVMBuildFPTrunc(ctx->builder, src0, ctx->ac.f16, "");
if (ctx->options->chip_class >= VI) {
LLVMValueRef args[2];
/* Check if the result is a denormal - and flush to 0 if so. */
args[0] = result;
- args[1] = LLVMConstInt(ctx->i32, N_SUBNORMAL | P_SUBNORMAL, false);
- cond = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.class.f16", ctx->i1, args, 2, AC_FUNC_ATTR_READNONE);
+ args[1] = LLVMConstInt(ctx->ac.i32, N_SUBNORMAL | P_SUBNORMAL, false);
+ cond = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.class.f16", ctx->ac.i1, args, 2, AC_FUNC_ATTR_READNONE);
}
/* need to convert back up to f32 */
- result = LLVMBuildFPExt(ctx->builder, result, ctx->f32, "");
+ result = LLVMBuildFPExt(ctx->builder, result, ctx->ac.f32, "");
if (ctx->options->chip_class >= VI)
- result = LLVMBuildSelect(ctx->builder, cond, ctx->f32zero, result, "");
+ result = LLVMBuildSelect(ctx->builder, cond, ctx->ac.f32_0, result, "");
else {
/* for SI/CIK */
/* 0x38800000 is smallest half float value (2^-14) in 32-bit float,
*/
LLVMValueRef temp, cond2;
temp = emit_intrin_1f_param(&ctx->ac, "llvm.fabs",
- ctx->f32, result);
+ ctx->ac.f32, result);
cond = LLVMBuildFCmp(ctx->builder, LLVMRealUGT,
- LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->i32, 0x38800000, false), ctx->f32, ""),
+ LLVMBuildBitCast(ctx->builder, LLVMConstInt(ctx->ac.i32, 0x38800000, false), ctx->ac.f32, ""),
temp, "");
cond2 = LLVMBuildFCmp(ctx->builder, LLVMRealUNE,
- temp, ctx->f32zero, "");
+ temp, ctx->ac.f32_0, "");
cond = LLVMBuildAnd(ctx->builder, cond, cond2, "");
- result = LLVMBuildSelect(ctx->builder, cond, ctx->f32zero, result, "");
+ result = LLVMBuildSelect(ctx->builder, cond, ctx->ac.f32_0, result, "");
}
return result;
}
int i;
LLVMValueRef comp[2];
- src0 = to_float(ctx, src0);
+ src0 = ac_to_float(ctx, src0);
comp[0] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_0, "");
comp[1] = LLVMBuildExtractElement(ctx->builder, src0, ctx->i32_1, "");
for (i = 0; i < 2; i++) {
temps[i] = LLVMBuildFPExt(ctx->builder, val, ctx->f32, "");
}
- LLVMTypeRef v2f32 = LLVMVectorType(ctx->f32, 2);
- result = LLVMBuildInsertElement(ctx->builder, LLVMGetUndef(v2f32), temps[0],
+ result = LLVMBuildInsertElement(ctx->builder, LLVMGetUndef(ctx->v2f32), temps[0],
ctx->i32_0, "");
result = LLVMBuildInsertElement(ctx->builder, result, temps[1],
ctx->i32_1, "");
unsigned mask;
int idx;
LLVMValueRef result;
- bool has_ds_bpermute = ctx->abi->chip_class >= VI;
if (op == nir_op_fddx_fine || op == nir_op_fddx)
mask = AC_TID_MASK_LEFT;
else
idx = 2;
- result = ac_build_ddxy(&ctx->ac, has_ds_bpermute,
- mask, idx,
- src0);
+ result = ac_build_ddxy(&ctx->ac, mask, idx, src0);
return result;
}
result = src[0];
break;
case nir_op_fneg:
- src[0] = to_float(&ctx->ac, src[0]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
result = LLVMBuildFNeg(ctx->ac.builder, src[0], "");
break;
case nir_op_ineg:
result = LLVMBuildAdd(ctx->ac.builder, src[0], src[1], "");
break;
case nir_op_fadd:
- src[0] = to_float(&ctx->ac, src[0]);
- src[1] = to_float(&ctx->ac, src[1]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
+ src[1] = ac_to_float(&ctx->ac, src[1]);
result = LLVMBuildFAdd(ctx->ac.builder, src[0], src[1], "");
break;
case nir_op_fsub:
- src[0] = to_float(&ctx->ac, src[0]);
- src[1] = to_float(&ctx->ac, src[1]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
+ src[1] = ac_to_float(&ctx->ac, src[1]);
result = LLVMBuildFSub(ctx->ac.builder, src[0], src[1], "");
break;
case nir_op_isub:
result = LLVMBuildURem(ctx->ac.builder, src[0], src[1], "");
break;
case nir_op_fmod:
- src[0] = to_float(&ctx->ac, src[0]);
- src[1] = to_float(&ctx->ac, src[1]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
+ src[1] = ac_to_float(&ctx->ac, src[1]);
result = ac_build_fdiv(&ctx->ac, src[0], src[1]);
result = emit_intrin_1f_param(&ctx->ac, "llvm.floor",
- to_float_type(&ctx->ac, def_type), result);
+ ac_to_float_type(&ctx->ac, def_type), result);
result = LLVMBuildFMul(ctx->ac.builder, src[1] , result, "");
result = LLVMBuildFSub(ctx->ac.builder, src[0], result, "");
break;
case nir_op_frem:
- src[0] = to_float(&ctx->ac, src[0]);
- src[1] = to_float(&ctx->ac, src[1]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
+ src[1] = ac_to_float(&ctx->ac, src[1]);
result = LLVMBuildFRem(ctx->ac.builder, src[0], src[1], "");
break;
case nir_op_irem:
result = LLVMBuildUDiv(ctx->ac.builder, src[0], src[1], "");
break;
case nir_op_fmul:
- src[0] = to_float(&ctx->ac, src[0]);
- src[1] = to_float(&ctx->ac, src[1]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
+ src[1] = ac_to_float(&ctx->ac, src[1]);
result = LLVMBuildFMul(ctx->ac.builder, src[0], src[1], "");
break;
case nir_op_fdiv:
- src[0] = to_float(&ctx->ac, src[0]);
- src[1] = to_float(&ctx->ac, src[1]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
+ src[1] = ac_to_float(&ctx->ac, src[1]);
result = ac_build_fdiv(&ctx->ac, src[0], src[1]);
break;
case nir_op_frcp:
- src[0] = to_float(&ctx->ac, src[0]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
result = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1, src[0]);
break;
case nir_op_iand:
break;
case nir_op_fabs:
result = emit_intrin_1f_param(&ctx->ac, "llvm.fabs",
- to_float_type(&ctx->ac, def_type), src[0]);
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_iabs:
result = emit_iabs(&ctx->ac, src[0]);
result = emit_isign(&ctx->ac, src[0]);
break;
case nir_op_fsign:
- src[0] = to_float(&ctx->ac, src[0]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
result = emit_fsign(&ctx->ac, src[0]);
break;
case nir_op_ffloor:
result = emit_intrin_1f_param(&ctx->ac, "llvm.floor",
- to_float_type(&ctx->ac, def_type), src[0]);
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_ftrunc:
result = emit_intrin_1f_param(&ctx->ac, "llvm.trunc",
- to_float_type(&ctx->ac, def_type), src[0]);
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_fceil:
result = emit_intrin_1f_param(&ctx->ac, "llvm.ceil",
- to_float_type(&ctx->ac, def_type), src[0]);
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_fround_even:
result = emit_intrin_1f_param(&ctx->ac, "llvm.rint",
- to_float_type(&ctx->ac, def_type),src[0]);
+ ac_to_float_type(&ctx->ac, def_type),src[0]);
break;
case nir_op_ffract:
result = emit_ffract(&ctx->ac, src[0]);
break;
case nir_op_fsin:
result = emit_intrin_1f_param(&ctx->ac, "llvm.sin",
- to_float_type(&ctx->ac, def_type), src[0]);
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_fcos:
result = emit_intrin_1f_param(&ctx->ac, "llvm.cos",
- to_float_type(&ctx->ac, def_type), src[0]);
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_fsqrt:
result = emit_intrin_1f_param(&ctx->ac, "llvm.sqrt",
- to_float_type(&ctx->ac, def_type), src[0]);
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_fexp2:
result = emit_intrin_1f_param(&ctx->ac, "llvm.exp2",
- to_float_type(&ctx->ac, def_type), src[0]);
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_flog2:
result = emit_intrin_1f_param(&ctx->ac, "llvm.log2",
- to_float_type(&ctx->ac, def_type), src[0]);
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
break;
case nir_op_frsq:
result = emit_intrin_1f_param(&ctx->ac, "llvm.sqrt",
- to_float_type(&ctx->ac, def_type), src[0]);
+ ac_to_float_type(&ctx->ac, def_type), src[0]);
result = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1, result);
break;
case nir_op_fpow:
result = emit_intrin_2f_param(&ctx->ac, "llvm.pow",
- to_float_type(&ctx->ac, def_type), src[0], src[1]);
+ ac_to_float_type(&ctx->ac, def_type), src[0], src[1]);
break;
case nir_op_fmax:
result = emit_intrin_2f_param(&ctx->ac, "llvm.maxnum",
- to_float_type(&ctx->ac, def_type), src[0], src[1]);
+ ac_to_float_type(&ctx->ac, def_type), src[0], src[1]);
if (instr->dest.dest.ssa.bit_size == 32)
result = emit_intrin_1f_param(&ctx->ac, "llvm.canonicalize",
- to_float_type(&ctx->ac, def_type),
+ ac_to_float_type(&ctx->ac, def_type),
result);
break;
case nir_op_fmin:
result = emit_intrin_2f_param(&ctx->ac, "llvm.minnum",
- to_float_type(&ctx->ac, def_type), src[0], src[1]);
+ ac_to_float_type(&ctx->ac, def_type), src[0], src[1]);
if (instr->dest.dest.ssa.bit_size == 32)
result = emit_intrin_1f_param(&ctx->ac, "llvm.canonicalize",
- to_float_type(&ctx->ac, def_type),
+ ac_to_float_type(&ctx->ac, def_type),
result);
break;
case nir_op_ffma:
- result = emit_intrin_3f_param(&ctx->ac, "llvm.fma",
- to_float_type(&ctx->ac, def_type), src[0], src[1], src[2]);
+ result = emit_intrin_3f_param(&ctx->ac, "llvm.fmuladd",
+ ac_to_float_type(&ctx->ac, def_type), src[0], src[1], src[2]);
break;
case nir_op_ibitfield_extract:
result = emit_bitfield_extract(&ctx->ac, true, src);
case nir_op_vec3:
case nir_op_vec4:
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
- src[i] = to_integer(&ctx->ac, src[i]);
+ src[i] = ac_to_integer(&ctx->ac, src[i]);
result = ac_build_gather_values(&ctx->ac, src, num_components);
break;
case nir_op_f2i32:
case nir_op_f2i64:
- src[0] = to_float(&ctx->ac, src[0]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
result = LLVMBuildFPToSI(ctx->ac.builder, src[0], def_type, "");
break;
case nir_op_f2u32:
case nir_op_f2u64:
- src[0] = to_float(&ctx->ac, src[0]);
+ src[0] = ac_to_float(&ctx->ac, src[0]);
result = LLVMBuildFPToUI(ctx->ac.builder, src[0], def_type, "");
break;
case nir_op_i2f32:
case nir_op_i2f64:
- result = LLVMBuildSIToFP(ctx->ac.builder, src[0], to_float_type(&ctx->ac, def_type), "");
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
+ result = LLVMBuildSIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
break;
case nir_op_u2f32:
case nir_op_u2f64:
- result = LLVMBuildUIToFP(ctx->ac.builder, src[0], to_float_type(&ctx->ac, def_type), "");
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
+ result = LLVMBuildUIToFP(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
break;
case nir_op_f2f64:
- result = LLVMBuildFPExt(ctx->ac.builder, src[0], to_float_type(&ctx->ac, def_type), "");
+ result = LLVMBuildFPExt(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
break;
case nir_op_f2f32:
- result = LLVMBuildFPTrunc(ctx->ac.builder, src[0], to_float_type(&ctx->ac, def_type), "");
+ result = LLVMBuildFPTrunc(ctx->ac.builder, src[0], ac_to_float_type(&ctx->ac, def_type), "");
break;
case nir_op_u2u32:
case nir_op_u2u64:
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
if (get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < get_elem_bits(&ctx->ac, def_type))
result = LLVMBuildZExt(ctx->ac.builder, src[0], def_type, "");
else
break;
case nir_op_i2i32:
case nir_op_i2i64:
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
if (get_elem_bits(&ctx->ac, LLVMTypeOf(src[0])) < get_elem_bits(&ctx->ac, def_type))
result = LLVMBuildSExt(ctx->ac.builder, src[0], def_type, "");
else
result = emit_bcsel(&ctx->ac, src[0], src[1], src[2]);
break;
case nir_op_find_lsb:
- result = emit_find_lsb(&ctx->ac, src[0]);
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
+ result = ac_find_lsb(&ctx->ac, ctx->ac.i32, src[0]);
break;
case nir_op_ufind_msb:
- result = emit_ufind_msb(&ctx->ac, src[0]);
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
+ result = ac_build_umsb(&ctx->ac, src[0], ctx->ac.i32);
break;
case nir_op_ifind_msb:
- result = emit_ifind_msb(&ctx->ac, src[0]);
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
+ result = ac_build_imsb(&ctx->ac, src[0], ctx->ac.i32);
break;
case nir_op_uadd_carry:
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
+ src[1] = ac_to_integer(&ctx->ac, src[1]);
result = emit_uint_carry(&ctx->ac, "llvm.uadd.with.overflow.i32", src[0], src[1]);
break;
case nir_op_usub_borrow:
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
+ src[1] = ac_to_integer(&ctx->ac, src[1]);
result = emit_uint_carry(&ctx->ac, "llvm.usub.with.overflow.i32", src[0], src[1]);
break;
case nir_op_b2f:
result = emit_b2i(&ctx->ac, src[0]);
break;
case nir_op_i2b:
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
result = emit_i2b(&ctx->ac, src[0]);
break;
case nir_op_fquantize2f16:
result = emit_f2f16(ctx->nctx, src[0]);
break;
case nir_op_umul_high:
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
+ src[1] = ac_to_integer(&ctx->ac, src[1]);
result = emit_umul_high(&ctx->ac, src[0], src[1]);
break;
case nir_op_imul_high:
+ src[0] = ac_to_integer(&ctx->ac, src[0]);
+ src[1] = ac_to_integer(&ctx->ac, src[1]);
result = emit_imul_high(&ctx->ac, src[0], src[1]);
break;
case nir_op_pack_half_2x16:
case nir_op_unpack_64_2x32_split_x: {
assert(instr->src[0].src.ssa->num_components == 1);
LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0],
- LLVMVectorType(ctx->ac.i32, 2),
+ ctx->ac.v2i32,
"");
result = LLVMBuildExtractElement(ctx->ac.builder, tmp,
ctx->ac.i32_0, "");
case nir_op_unpack_64_2x32_split_y: {
assert(instr->src[0].src.ssa->num_components == 1);
LLVMValueRef tmp = LLVMBuildBitCast(ctx->ac.builder, src[0],
- LLVMVectorType(ctx->ac.i32, 2),
+ ctx->ac.v2i32,
"");
result = LLVMBuildExtractElement(ctx->ac.builder, tmp,
ctx->ac.i32_1, "");
}
case nir_op_pack_64_2x32_split: {
- LLVMValueRef tmp = LLVMGetUndef(LLVMVectorType(ctx->ac.i32, 2));
+ LLVMValueRef tmp = LLVMGetUndef(ctx->ac.v2i32);
tmp = LLVMBuildInsertElement(ctx->ac.builder, tmp,
src[0], ctx->ac.i32_0, "");
tmp = LLVMBuildInsertElement(ctx->ac.builder, tmp,
if (result) {
assert(instr->dest.dest.is_ssa);
- result = to_integer(&ctx->ac, result);
+ result = ac_to_integer(&ctx->ac, result);
_mesa_hash_table_insert(ctx->defs, &instr->dest.dest.ssa,
result);
}
LLVMConstInt(ctx->ac.i32, 2, false), "");
/* VI only */
- if (ctx->abi->chip_class >= VI && in_elements) {
+ if (ctx->ac.chip_class == VI && in_elements) {
/* On VI, the descriptor contains the size in bytes,
* but TXQ must return the size in elements.
* The stride is always non-zero for resources using TXQ.
enum glsl_base_type stype = glsl_get_sampler_result_type(instr->texture->var->type);
LLVMValueRef coord = args->addr;
LLVMValueRef half_texel[2];
- LLVMValueRef compare_cube_wa;
+ LLVMValueRef compare_cube_wa = NULL;
LLVMValueRef result;
int c;
unsigned coord_vgpr_index = (unsigned)args->offset + (unsigned)args->compare;
break;
}
- if (instr->op == nir_texop_tg4 && ctx->abi->chip_class <= VI) {
+ if (instr->op == nir_texop_tg4 && ctx->ac.chip_class <= VI) {
enum glsl_base_type stype = glsl_get_sampler_result_type(instr->texture->var->type);
if (stype == GLSL_TYPE_UINT || stype == GLSL_TYPE_INT) {
return radv_lower_gather4_integer(&ctx->ac, args, instr);
layout->binding[binding].dynamic_offset_offset;
desc_ptr = ctx->push_constants;
base_offset = pipeline_layout->push_constant_size + 16 * idx;
- stride = LLVMConstInt(ctx->i32, 16, false);
+ stride = LLVMConstInt(ctx->ac.i32, 16, false);
} else
- stride = LLVMConstInt(ctx->i32, layout->binding[binding].size, false);
+ stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false);
- offset = LLVMConstInt(ctx->i32, base_offset, false);
+ offset = LLVMConstInt(ctx->ac.i32, base_offset, false);
index = LLVMBuildMul(ctx->builder, index, stride, "");
offset = LLVMBuildAdd(ctx->builder, offset, index, "");
desc_ptr = ac_build_gep0(&ctx->ac, desc_ptr, offset);
- desc_ptr = cast_ptr(ctx, desc_ptr, ctx->v4i32);
- LLVMSetMetadata(desc_ptr, ctx->uniform_md_kind, ctx->empty_md);
+ desc_ptr = cast_ptr(ctx, desc_ptr, ctx->ac.v4i32);
+ LLVMSetMetadata(desc_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);
return LLVMBuildLoad(ctx->builder, desc_ptr, "");
}
{
LLVMValueRef ptr, addr;
- addr = LLVMConstInt(ctx->i32, nir_intrinsic_base(instr), 0);
+ addr = LLVMConstInt(ctx->ac.i32, nir_intrinsic_base(instr), 0);
addr = LLVMBuildAdd(ctx->builder, addr, get_src(ctx->nir, instr->src[0]), "");
ptr = ac_build_gep0(&ctx->ac, ctx->push_constants, addr);
unsigned writemask = nir_intrinsic_write_mask(instr);
LLVMValueRef base_data, base_offset;
LLVMValueRef params[6];
- LLVMValueRef i1false = LLVMConstInt(ctx->ac.i1, 0, false);
params[1] = ctx->abi->load_ssbo(ctx->abi,
get_src(ctx, instr->src[1]), true);
params[2] = LLVMConstInt(ctx->ac.i32, 0, false); /* vindex */
- params[4] = i1false; /* glc */
- params[5] = i1false; /* slc */
+ params[4] = ctx->ac.i1false; /* glc */
+ params[5] = ctx->ac.i1false; /* slc */
if (components_32bit > 1)
data_type = LLVMVectorType(ctx->ac.f32, components_32bit);
- base_data = to_float(&ctx->ac, src_data);
+ base_data = ac_to_float(&ctx->ac, src_data);
base_data = trim_vector(&ctx->ac, base_data, instr->num_components);
base_data = LLVMBuildBitCast(ctx->ac.builder, base_data,
data_type, "");
store_name = "llvm.amdgcn.buffer.store.v4f32";
data = base_data;
} else if (count == 2) {
- LLVMTypeRef v2f32 = LLVMVectorType(ctx->ac.f32, 2);
-
tmp = LLVMBuildExtractElement(ctx->ac.builder,
base_data, LLVMConstInt(ctx->ac.i32, start, false), "");
- data = LLVMBuildInsertElement(ctx->ac.builder, LLVMGetUndef(v2f32), tmp,
+ data = LLVMBuildInsertElement(ctx->ac.builder, LLVMGetUndef(ctx->ac.v2f32), tmp,
ctx->ac.i32_0, "");
tmp = LLVMBuildExtractElement(ctx->ac.builder,
else
unreachable("unhandled number of components");
- LLVMValueRef i1false = LLVMConstInt(ctx->ac.i1, 0, false);
LLVMValueRef params[] = {
ctx->abi->load_ssbo(ctx->abi,
get_src(ctx, instr->src[0]),
false),
LLVMConstInt(ctx->ac.i32, 0, false),
offset,
- i1false,
- i1false,
+ ctx->ac.i1false,
+ ctx->ac.i1false,
};
results[i] = ac_build_intrinsic(&ctx->ac, load_name, data_type, params, 5, 0);
}
+ assume(results[0]);
LLVMValueRef ret = results[0];
if (num_components > 4 || num_components == 3) {
LLVMValueRef masks[] = {
}
- ret = ac_build_gather_values(&ctx->ac, results, instr->num_components);
+ ret = ac_build_gather_values(&ctx->ac, results, num_components);
return LLVMBuildBitCast(ctx->ac.builder, ret,
get_def_type(ctx, &instr->dest.ssa), "");
}
*indir_out = offset;
}
-static LLVMValueRef
-lds_load(struct nir_to_llvm_context *ctx,
- LLVMValueRef dw_addr)
-{
- LLVMValueRef value;
- value = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
- return value;
-}
-
-static void
-lds_store(struct nir_to_llvm_context *ctx,
- LLVMValueRef dw_addr, LLVMValueRef value)
-{
- value = LLVMBuildBitCast(ctx->builder, value, ctx->i32, "");
- ac_build_indexed_store(&ctx->ac, ctx->lds,
- dw_addr, value);
-}
/* The offchip buffer layout for TCS->TES is
*
total_vertices = LLVMBuildMul(ctx->builder, vertices_per_patch,
num_patches, "");
- constant16 = LLVMConstInt(ctx->i32, 16, false);
+ constant16 = LLVMConstInt(ctx->ac.i32, 16, false);
if (vertex_index) {
base_addr = LLVMBuildMul(ctx->builder, rel_patch_id,
vertices_per_patch, "");
LLVMValueRef param_index;
if (indir_index)
- param_index = LLVMBuildAdd(ctx->builder, LLVMConstInt(ctx->i32, param, false),
+ param_index = LLVMBuildAdd(ctx->builder, LLVMConstInt(ctx->ac.i32, param, false),
indir_index, "");
else {
if (const_index && !is_compact)
param += const_index;
- param_index = LLVMConstInt(ctx->i32, param, false);
+ param_index = LLVMConstInt(ctx->ac.i32, param, false);
}
return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
}
if (indir_index)
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
LLVMBuildMul(ctx->builder, indir_index,
- LLVMConstInt(ctx->i32, 4, false), ""), "");
+ LLVMConstInt(ctx->ac.i32, 4, false), ""), "");
else if (const_index && !compact_const_index)
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
- LLVMConstInt(ctx->i32, const_index, false), "");
+ LLVMConstInt(ctx->ac.i32, const_index, false), "");
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
- LLVMConstInt(ctx->i32, param * 4, false), "");
+ LLVMConstInt(ctx->ac.i32, param * 4, false), "");
if (const_index && compact_const_index)
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
- LLVMConstInt(ctx->i32, const_index, false), "");
+ LLVMConstInt(ctx->ac.i32, const_index, false), "");
return dw_addr;
}
dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
indir_index);
- for (unsigned i = 0; i < instr->num_components; i++) {
- value[i] = lds_load(ctx, dw_addr);
+ unsigned comp = instr->variables[0]->var->data.location_frac;
+ for (unsigned i = 0; i < instr->num_components + comp; i++) {
+ value[i] = ac_lds_load(&ctx->ac, dw_addr);
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
- ctx->i32one, "");
+ ctx->ac.i32_1, "");
}
- result = ac_build_gather_values(&ctx->ac, value, instr->num_components);
+ result = ac_build_varying_gather_values(&ctx->ac, value, instr->num_components, comp);
result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx->nir, &instr->dest.ssa), "");
return result;
}
load_tcs_output(struct nir_to_llvm_context *ctx,
nir_intrinsic_instr *instr)
{
- LLVMValueRef dw_addr, stride;
+ LLVMValueRef dw_addr;
+ LLVMValueRef stride = NULL;
LLVMValueRef value[4], result;
LLVMValueRef vertex_index = NULL;
LLVMValueRef indir_index = NULL;
dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
indir_index);
- for (unsigned i = 0; i < instr->num_components; i++) {
- value[i] = lds_load(ctx, dw_addr);
+ unsigned comp = instr->variables[0]->var->data.location_frac;
+ for (unsigned i = comp; i < instr->num_components + comp; i++) {
+ value[i] = ac_lds_load(&ctx->ac, dw_addr);
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
- ctx->i32one, "");
+ ctx->ac.i32_1, "");
}
- result = ac_build_gather_values(&ctx->ac, value, instr->num_components);
+ result = ac_build_varying_gather_values(&ctx->ac, value, instr->num_components, comp);
result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx->nir, &instr->dest.ssa), "");
return result;
}
LLVMValueRef src,
unsigned writemask)
{
- LLVMValueRef stride, dw_addr;
+ LLVMValueRef dw_addr;
+ LLVMValueRef stride = NULL;
LLVMValueRef buf_addr = NULL;
LLVMValueRef vertex_index = NULL;
LLVMValueRef indir_index = NULL;
unsigned const_index = 0;
unsigned param;
+ const unsigned comp = instr->variables[0]->var->data.location_frac;
const bool per_vertex = nir_is_per_vertex_io(instr->variables[0]->var, ctx->stage);
const bool is_compact = instr->variables[0]->var->data.compact;
+ bool store_lds = true;
+ if (instr->variables[0]->var->data.patch) {
+ if (!(ctx->tcs_patch_outputs_read & (1U << instr->variables[0]->var->data.location)))
+ store_lds = false;
+ } else {
+ if (!(ctx->tcs_outputs_read & (1ULL << instr->variables[0]->var->data.location)))
+ store_lds = false;
+ }
get_deref_offset(ctx->nir, instr->variables[0],
false, NULL, per_vertex ? &vertex_index : NULL,
&const_index, &indir_index);
buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index, is_compact,
vertex_index, indir_index);
+ bool is_tess_factor = false;
+ if (instr->variables[0]->var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
+ instr->variables[0]->var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)
+ is_tess_factor = true;
+
unsigned base = is_compact ? const_index : 0;
for (unsigned chan = 0; chan < 8; chan++) {
- bool is_tess_factor = false;
if (!(writemask & (1 << chan)))
continue;
- LLVMValueRef value = llvm_extract_elem(&ctx->ac, src, chan);
+ LLVMValueRef value = llvm_extract_elem(&ctx->ac, src, chan - comp);
- lds_store(ctx, dw_addr, value);
-
- if (instr->variables[0]->var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
- instr->variables[0]->var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)
- is_tess_factor = true;
+ if (store_lds || is_tess_factor)
+ ac_lds_store(&ctx->ac, dw_addr, value);
if (!is_tess_factor && writemask != 0xF)
ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, value, 1,
4 * (base + chan), 1, 0, true, false);
dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
- ctx->i32one, "");
+ ctx->ac.i32_1, "");
}
if (writemask == 0xF) {
const_index -= 3;
param++;
}
+
+ unsigned comp = instr->variables[0]->var->data.location_frac;
buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
is_compact, vertex_index, indir_index);
+ LLVMValueRef comp_offset = LLVMConstInt(ctx->ac.i32, comp * 4, false);
+ buf_addr = LLVMBuildAdd(ctx->builder, buf_addr, comp_offset, "");
+
result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, instr->num_components, NULL,
buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false);
result = trim_vector(&ctx->ac, result, instr->num_components);
vtx_offset_param = vertex_index;
assert(vtx_offset_param < 6);
vtx_offset = LLVMBuildMul(ctx->builder, ctx->gs_vtx_offset[vtx_offset_param],
- LLVMConstInt(ctx->i32, 4, false), "");
+ LLVMConstInt(ctx->ac.i32, 4, false), "");
param = shader_io_get_unique_index(instr->variables[0]->var->data.location);
- for (unsigned i = 0; i < instr->num_components; i++) {
-
- args[0] = ctx->esgs_ring;
- args[1] = vtx_offset;
- args[2] = LLVMConstInt(ctx->i32, (param * 4 + i + const_index) * 256, false);
- args[3] = ctx->i32zero;
- args[4] = ctx->i32one; /* OFFEN */
- args[5] = ctx->i32zero; /* IDXEN */
- args[6] = ctx->i32one; /* GLC */
- args[7] = ctx->i32zero; /* SLC */
- args[8] = ctx->i32zero; /* TFE */
- value[i] = ac_build_intrinsic(&ctx->ac, "llvm.SI.buffer.load.dword.i32.i32",
- ctx->i32, args, 9,
- AC_FUNC_ATTR_READONLY |
- AC_FUNC_ATTR_LEGACY);
+ unsigned comp = instr->variables[0]->var->data.location_frac;
+ for (unsigned i = comp; i < instr->num_components + comp; i++) {
+ if (ctx->ac.chip_class >= GFX9) {
+ LLVMValueRef dw_addr = ctx->gs_vtx_offset[vtx_offset_param];
+ dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
+ LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index, 0), "");
+ value[i] = ac_lds_load(&ctx->ac, dw_addr);
+ } else {
+ args[0] = ctx->esgs_ring;
+ args[1] = vtx_offset;
+ args[2] = LLVMConstInt(ctx->ac.i32, (param * 4 + i + const_index) * 256, false);
+ args[3] = ctx->ac.i32_0;
+ args[4] = ctx->ac.i32_1; /* OFFEN */
+ args[5] = ctx->ac.i32_0; /* IDXEN */
+ args[6] = ctx->ac.i32_1; /* GLC */
+ args[7] = ctx->ac.i32_0; /* SLC */
+ args[8] = ctx->ac.i32_0; /* TFE */
+
+ value[i] = ac_build_intrinsic(&ctx->ac, "llvm.SI.buffer.load.dword.i32.i32",
+ ctx->ac.i32, args, 9,
+ AC_FUNC_ATTR_READONLY |
+ AC_FUNC_ATTR_LEGACY);
+ }
}
- result = ac_build_gather_values(&ctx->ac, value, instr->num_components);
+ result = ac_build_varying_gather_values(&ctx->ac, value, instr->num_components, comp);
return result;
}
LLVMValueRef values[8];
int idx = instr->variables[0]->var->data.driver_location;
int ve = instr->dest.ssa.num_components;
+ unsigned comp = instr->variables[0]->var->data.location_frac;
LLVMValueRef indir_index;
LLVMValueRef ret;
unsigned const_index;
if (ctx->stage == MESA_SHADER_GEOMETRY) {
return load_gs_input(ctx->nctx, instr);
}
- for (unsigned chan = 0; chan < ve; chan++) {
+
+ for (unsigned chan = comp; chan < ve + comp; chan++) {
if (indir_index) {
unsigned count = glsl_count_attribute_slots(
instr->variables[0]->var->type,
case nir_var_shader_out:
if (ctx->stage == MESA_SHADER_TESS_CTRL)
return load_tcs_output(ctx->nctx, instr);
- for (unsigned chan = 0; chan < ve; chan++) {
+
+ for (unsigned chan = comp; chan < ve + comp; chan++) {
if (indir_index) {
unsigned count = glsl_count_attribute_slots(
instr->variables[0]->var->type, false);
default:
unreachable("unhandle variable mode");
}
- ret = ac_build_gather_values(&ctx->ac, values, ve);
+ ret = ac_build_varying_gather_values(&ctx->ac, values, ve, comp);
return LLVMBuildBitCast(ctx->ac.builder, ret, get_def_type(ctx, &instr->dest.ssa), "");
}
{
LLVMValueRef temp_ptr, value;
int idx = instr->variables[0]->var->data.driver_location;
- LLVMValueRef src = to_float(&ctx->ac, get_src(ctx, instr->src[0]));
- int writemask = instr->const_index[0];
+ unsigned comp = instr->variables[0]->var->data.location_frac;
+ LLVMValueRef src = ac_to_float(&ctx->ac, get_src(ctx, instr->src[0]));
+ int writemask = instr->const_index[0] << comp;
LLVMValueRef indir_index;
unsigned const_index;
get_deref_offset(ctx, instr->variables[0], false,
if (!(writemask & (1 << chan)))
continue;
- value = llvm_extract_elem(&ctx->ac, src, chan);
+ value = llvm_extract_elem(&ctx->ac, src, chan - comp);
if (instr->variables[0]->var->data.compact)
stride = 1;
res = ac_build_image_opcode(ctx, &args);
- res = to_integer(ctx, res);
+ res = ac_to_integer(ctx, res);
LLVMValueRef four = LLVMConstInt(ctx->i32, 4, false);
LLVMValueRef F = LLVMConstInt(ctx->i32, 0xf, false);
int count;
enum glsl_sampler_dim dim = glsl_get_sampler_dim(type);
+ bool is_array = glsl_sampler_type_is_array(type);
bool add_frag_pos = (dim == GLSL_SAMPLER_DIM_SUBPASS ||
dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
bool is_ms = (dim == GLSL_SAMPLER_DIM_MS ||
dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
-
- count = image_type_to_components_count(dim,
- glsl_sampler_type_is_array(type));
+ bool gfx9_1d = ctx->ac.chip_class >= GFX9 && dim == GLSL_SAMPLER_DIM_1D;
+ count = image_type_to_components_count(dim, is_array);
if (is_ms) {
LLVMValueRef fmask_load_address[3];
fmask_load_address[0] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[0], "");
fmask_load_address[1] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[1], "");
- if (glsl_sampler_type_is_array(type))
+ if (is_array)
fmask_load_address[2] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[2], "");
else
fmask_load_address[2] = NULL;
LLVMBuildAdd(ctx->ac.builder, fmask_load_address[chan],
LLVMBuildFPToUI(ctx->ac.builder, ctx->abi->frag_pos[chan],
ctx->ac.i32, ""), "");
+ fmask_load_address[2] = ac_to_integer(&ctx->ac, ctx->abi->inputs[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)]);
}
sample_index = adjust_sample_index_using_fmask(&ctx->ac,
fmask_load_address[0],
fmask_load_address[1],
fmask_load_address[2],
sample_index,
- get_sampler_desc(ctx, instr->variables[0], AC_DESC_FMASK, true, false));
+ get_sampler_desc(ctx, instr->variables[0], AC_DESC_FMASK, NULL, true, false));
}
- if (count == 1) {
+ if (count == 1 && !gfx9_1d) {
if (instr->src[0].ssa->num_components)
res = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[0], "");
else
if (is_ms)
count--;
for (chan = 0; chan < count; ++chan) {
- coords[chan] = LLVMBuildExtractElement(ctx->ac.builder, src0, masks[chan], "");
+ coords[chan] = llvm_extract_elem(&ctx->ac, src0, chan);
}
-
if (add_frag_pos) {
- for (chan = 0; chan < count; ++chan)
+ for (chan = 0; chan < 2; ++chan)
coords[chan] = LLVMBuildAdd(ctx->ac.builder, coords[chan], LLVMBuildFPToUI(ctx->ac.builder, ctx->abi->frag_pos[chan],
ctx->ac.i32, ""), "");
+ coords[2] = ac_to_integer(&ctx->ac, ctx->abi->inputs[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)]);
+ count++;
+ }
+
+ if (gfx9_1d) {
+ if (is_array) {
+ coords[2] = coords[1];
+ coords[1] = ctx->ac.i32_0;
+ } else
+ coords[1] = ctx->ac.i32_0;
+ count++;
}
+
if (is_ms) {
coords[count] = sample_index;
count++;
char intrinsic_name[64];
const nir_variable *var = instr->variables[0]->var;
const struct glsl_type *type = var->type;
- LLVMValueRef i1false = LLVMConstInt(ctx->ac.i1, 0, false);
- LLVMValueRef i1true = LLVMConstInt(ctx->ac.i1, 1, false);
if(instr->variables[0]->deref.child)
type = instr->variables[0]->deref.child->type;
type = glsl_without_array(type);
if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF) {
- params[0] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_BUFFER, true, false);
+ params[0] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_BUFFER, NULL, true, false);
params[1] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[0]),
ctx->ac.i32_0, ""); /* vindex */
params[2] = ctx->ac.i32_0; /* voffset */
- params[3] = i1false; /* glc */
- params[4] = i1false; /* slc */
+ params[3] = ctx->ac.i1false; /* glc */
+ params[4] = ctx->ac.i1false; /* slc */
res = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.buffer.load.format.v4f32", ctx->ac.v4f32,
params, 5, 0);
res = trim_vector(&ctx->ac, res, instr->dest.ssa.num_components);
- res = to_integer(&ctx->ac, res);
+ res = ac_to_integer(&ctx->ac, res);
} else {
bool is_da = glsl_sampler_type_is_array(type) ||
- glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE;
- LLVMValueRef da = is_da ? i1true : i1false;
- LLVMValueRef glc = i1false;
- LLVMValueRef slc = i1false;
+ glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE ||
+ glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_SUBPASS ||
+ glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_SUBPASS_MS;
+ LLVMValueRef da = is_da ? ctx->ac.i1true : ctx->ac.i1false;
+ LLVMValueRef glc = ctx->ac.i1false;
+ LLVMValueRef slc = ctx->ac.i1false;
params[0] = get_image_coords(ctx, instr);
- params[1] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE, true, false);
+ params[1] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE, NULL, true, false);
params[2] = LLVMConstInt(ctx->ac.i32, 15, false); /* dmask */
if (HAVE_LLVM <= 0x0309) {
- params[3] = i1false; /* r128 */
+ params[3] = ctx->ac.i1false; /* r128 */
params[4] = da;
params[5] = glc;
params[6] = slc;
} else {
- LLVMValueRef lwe = i1false;
+ LLVMValueRef lwe = ctx->ac.i1false;
params[3] = glc;
params[4] = slc;
params[5] = lwe;
res = ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->ac.v4f32,
params, 7, AC_FUNC_ATTR_READONLY);
}
- return to_integer(&ctx->ac, res);
+ return ac_to_integer(&ctx->ac, res);
}
static void visit_image_store(struct ac_nir_context *ctx,
char intrinsic_name[64];
const nir_variable *var = instr->variables[0]->var;
const struct glsl_type *type = glsl_without_array(var->type);
- LLVMValueRef i1false = LLVMConstInt(ctx->ac.i1, 0, false);
- LLVMValueRef i1true = LLVMConstInt(ctx->ac.i1, 1, false);
- LLVMValueRef glc = i1false;
- bool force_glc = ctx->abi->chip_class == SI;
+ LLVMValueRef glc = ctx->ac.i1false;
+ bool force_glc = ctx->ac.chip_class == SI;
if (force_glc)
- glc = i1true;
+ glc = ctx->ac.i1true;
if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF) {
- params[0] = to_float(&ctx->ac, get_src(ctx, instr->src[2])); /* data */
- params[1] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_BUFFER, true, true);
+ params[0] = ac_to_float(&ctx->ac, get_src(ctx, instr->src[2])); /* data */
+ params[1] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_BUFFER, NULL, true, true);
params[2] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[0]),
ctx->ac.i32_0, ""); /* vindex */
params[3] = ctx->ac.i32_0; /* voffset */
params[4] = glc; /* glc */
- params[5] = i1false; /* slc */
+ params[5] = ctx->ac.i1false; /* slc */
ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.buffer.store.format.v4f32", ctx->ac.voidt,
params, 6, 0);
} else {
bool is_da = glsl_sampler_type_is_array(type) ||
glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE;
- LLVMValueRef da = is_da ? i1true : i1false;
- LLVMValueRef slc = i1false;
+ LLVMValueRef da = is_da ? ctx->ac.i1true : ctx->ac.i1false;
+ LLVMValueRef slc = ctx->ac.i1false;
- params[0] = to_float(&ctx->ac, get_src(ctx, instr->src[2]));
+ params[0] = ac_to_float(&ctx->ac, get_src(ctx, instr->src[2]));
params[1] = get_image_coords(ctx, instr); /* coords */
- params[2] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE, true, true);
+ params[2] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE, NULL, true, true);
params[3] = LLVMConstInt(ctx->ac.i32, 15, false); /* dmask */
if (HAVE_LLVM <= 0x0309) {
- params[4] = i1false; /* r128 */
+ params[4] = ctx->ac.i1false; /* r128 */
params[5] = da;
params[6] = glc;
params[7] = slc;
} else {
- LLVMValueRef lwe = i1false;
+ LLVMValueRef lwe = ctx->ac.i1false;
params[4] = glc;
params[5] = slc;
params[6] = lwe;
static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx,
const nir_intrinsic_instr *instr)
{
- LLVMValueRef params[6];
+ LLVMValueRef params[7];
int param_count = 0;
const nir_variable *var = instr->variables[0]->var;
const char *atomic_name;
char intrinsic_name[41];
const struct glsl_type *type = glsl_without_array(var->type);
- LLVMValueRef i1false = LLVMConstInt(ctx->ac.i1, 0, false);
- LLVMValueRef i1true = LLVMConstInt(ctx->ac.i1, 1, false);
MAYBE_UNUSED int length;
+ bool is_unsigned = glsl_get_sampler_result_type(type) == GLSL_TYPE_UINT;
+
switch (instr->intrinsic) {
case nir_intrinsic_image_atomic_add:
atomic_name = "add";
break;
case nir_intrinsic_image_atomic_min:
- atomic_name = "smin";
+ atomic_name = is_unsigned ? "umin" : "smin";
break;
case nir_intrinsic_image_atomic_max:
- atomic_name = "smax";
+ atomic_name = is_unsigned ? "umax" : "smax";
break;
case nir_intrinsic_image_atomic_and:
atomic_name = "and";
if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF) {
params[param_count++] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_BUFFER,
- true, true);
+ NULL, true, true);
params[param_count++] = LLVMBuildExtractElement(ctx->ac.builder, get_src(ctx, instr->src[0]),
ctx->ac.i32_0, ""); /* vindex */
params[param_count++] = ctx->ac.i32_0; /* voffset */
- params[param_count++] = i1false; /* slc */
+ params[param_count++] = ctx->ac.i1false; /* slc */
length = snprintf(intrinsic_name, sizeof(intrinsic_name),
"llvm.amdgcn.buffer.atomic.%s", atomic_name);
LLVMValueRef coords = params[param_count++] = get_image_coords(ctx, instr);
params[param_count++] = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE,
- true, true);
- params[param_count++] = i1false; /* r128 */
- params[param_count++] = da ? i1true : i1false; /* da */
- params[param_count++] = i1false; /* slc */
+ NULL, true, true);
+ params[param_count++] = ctx->ac.i1false; /* r128 */
+ params[param_count++] = da ? ctx->ac.i1true : ctx->ac.i1false; /* da */
+ params[param_count++] = ctx->ac.i1false; /* slc */
build_int_type_name(LLVMTypeOf(coords),
coords_type, sizeof(coords_type));
if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF)
return get_buffer_size(ctx,
get_sampler_desc(ctx, instr->variables[0],
- AC_DESC_BUFFER, true, false), true);
+ AC_DESC_BUFFER, NULL, true, false), true);
struct ac_image_args args = { 0 };
args.da = da;
args.dmask = 0xf;
- args.resource = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE, true, false);
+ args.resource = get_sampler_desc(ctx, instr->variables[0], AC_DESC_IMAGE, NULL, true, false);
args.opcode = ac_image_get_resinfo;
args.addr = ctx->ac.i32_0;
res = ac_build_image_opcode(&ctx->ac, &args);
+ LLVMValueRef two = LLVMConstInt(ctx->ac.i32, 2, false);
+
if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE &&
glsl_sampler_type_is_array(type)) {
- LLVMValueRef two = LLVMConstInt(ctx->ac.i32, 2, false);
LLVMValueRef six = LLVMConstInt(ctx->ac.i32, 6, false);
LLVMValueRef z = LLVMBuildExtractElement(ctx->ac.builder, res, two, "");
z = LLVMBuildSDiv(ctx->ac.builder, z, six, "");
res = LLVMBuildInsertElement(ctx->ac.builder, res, z, two, "");
}
+ if (ctx->ac.chip_class >= GFX9 &&
+ glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_1D &&
+ glsl_sampler_type_is_array(type)) {
+ LLVMValueRef layers = LLVMBuildExtractElement(ctx->ac.builder, res, two, "");
+ res = LLVMBuildInsertElement(ctx->ac.builder, res, layers,
+ ctx->ac.i32_1, "");
+
+ }
return res;
}
unsigned simm16)
{
LLVMValueRef args[1] = {
- LLVMConstInt(ctx->i32, simm16, false),
+ LLVMConstInt(ctx->ac.i32, simm16, false),
};
ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.s.waitcnt",
- ctx->voidt, args, 1, 0);
+ ctx->ac.voidt, args, 1, 0);
+}
+
+static void emit_membar(struct nir_to_llvm_context *ctx,
+ const nir_intrinsic_instr *instr)
+{
+ unsigned waitcnt = NOOP_WAITCNT;
+
+ switch (instr->intrinsic) {
+ case nir_intrinsic_memory_barrier:
+ case nir_intrinsic_group_memory_barrier:
+ waitcnt &= VM_CNT & LGKM_CNT;
+ break;
+ case nir_intrinsic_memory_barrier_atomic_counter:
+ case nir_intrinsic_memory_barrier_buffer:
+ case nir_intrinsic_memory_barrier_image:
+ waitcnt &= VM_CNT;
+ break;
+ case nir_intrinsic_memory_barrier_shared:
+ waitcnt &= LGKM_CNT;
+ break;
+ default:
+ break;
+ }
+ if (waitcnt != NOOP_WAITCNT)
+ emit_waitcnt(ctx, waitcnt);
}
static void emit_barrier(struct nir_to_llvm_context *ctx)
return;
}
ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.s.barrier",
- ctx->voidt, NULL, 0, AC_FUNC_ATTR_CONVERGENT);
+ ctx->ac.voidt, NULL, 0, AC_FUNC_ATTR_CONVERGENT);
}
static void emit_discard_if(struct ac_nir_context *ctx,
{
LLVMValueRef cond;
- cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntNE,
+ cond = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
get_src(ctx, instr->src[0]),
ctx->ac.i32_0, "");
-
- cond = LLVMBuildSelect(ctx->ac.builder, cond,
- LLVMConstReal(ctx->ac.f32, -1.0f),
- ctx->ac.f32_0, "");
- ac_build_kill(&ctx->ac, cond);
+ ac_build_kill_if_false(&ctx->ac, cond);
}
static LLVMValueRef
LLVMValueRef result;
LLVMValueRef thread_id = ac_get_thread_id(&ctx->ac);
result = LLVMBuildAnd(ctx->builder, ctx->tg_size,
- LLVMConstInt(ctx->i32, 0xfc0, false), "");
+ LLVMConstInt(ctx->ac.i32, 0xfc0, false), "");
return LLVMBuildAdd(ctx->builder, result, thread_id, "");
}
return NULL;
}
- result = LLVMBuildAtomicRMW(ctx->builder, op, ptr, to_integer(&ctx->ac, src),
+ result = LLVMBuildAtomicRMW(ctx->builder, op, ptr, ac_to_integer(&ctx->ac, src),
LLVMAtomicOrderingSequentiallyConsistent,
false);
}
LLVMValueRef sample_id)
{
LLVMValueRef result;
- LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_PS_SAMPLE_POSITIONS, false));
+ LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false));
ptr = LLVMBuildBitCast(ctx->builder, ptr,
- const_array(ctx->v2f32, 64), "");
+ const_array(ctx->ac.v2f32, 64), "");
sample_id = LLVMBuildAdd(ctx->builder, sample_id, ctx->sample_pos_offset, "");
- result = ac_build_indexed_load(&ctx->ac, ptr, sample_id, false);
+ result = ac_build_load_invariant(&ctx->ac, ptr, sample_id);
return result;
}
static LLVMValueRef visit_interp(struct nir_to_llvm_context *ctx,
const nir_intrinsic_instr *instr)
{
- LLVMValueRef result[2];
+ LLVMValueRef result[4];
LLVMValueRef interp_param, attr_number;
unsigned location;
unsigned chan;
- LLVMValueRef src_c0, src_c1;
- LLVMValueRef src0;
+ LLVMValueRef src_c0 = NULL;
+ LLVMValueRef src_c1 = NULL;
+ LLVMValueRef src0 = NULL;
int input_index = instr->variables[0]->var->data.location - VARYING_SLOT_VAR0;
switch (instr->intrinsic) {
case nir_intrinsic_interp_var_at_centroid:
}
if (instr->intrinsic == nir_intrinsic_interp_var_at_offset) {
- src_c0 = to_float(&ctx->ac, LLVMBuildExtractElement(ctx->builder, src0, ctx->i32zero, ""));
- src_c1 = to_float(&ctx->ac, LLVMBuildExtractElement(ctx->builder, src0, ctx->i32one, ""));
+ src_c0 = ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->builder, src0, ctx->ac.i32_0, ""));
+ src_c1 = ac_to_float(&ctx->ac, LLVMBuildExtractElement(ctx->builder, src0, ctx->ac.i32_1, ""));
} else if (instr->intrinsic == nir_intrinsic_interp_var_at_sample) {
LLVMValueRef sample_position;
- LLVMValueRef halfval = LLVMConstReal(ctx->f32, 0.5f);
+ LLVMValueRef halfval = LLVMConstReal(ctx->ac.f32, 0.5f);
/* fetch sample ID */
sample_position = load_sample_position(ctx, src0);
- src_c0 = LLVMBuildExtractElement(ctx->builder, sample_position, ctx->i32zero, "");
+ src_c0 = LLVMBuildExtractElement(ctx->builder, sample_position, ctx->ac.i32_0, "");
src_c0 = LLVMBuildFSub(ctx->builder, src_c0, halfval, "");
- src_c1 = LLVMBuildExtractElement(ctx->builder, sample_position, ctx->i32one, "");
+ src_c1 = LLVMBuildExtractElement(ctx->builder, sample_position, ctx->ac.i32_1, "");
src_c1 = LLVMBuildFSub(ctx->builder, src_c1, halfval, "");
}
interp_param = lookup_interp_param(ctx, instr->variables[0]->var->data.interpolation, location);
- attr_number = LLVMConstInt(ctx->i32, input_index, false);
+ attr_number = LLVMConstInt(ctx->ac.i32, input_index, false);
- if (location == INTERP_SAMPLE || location == INTERP_CENTER) {
+ if (location == INTERP_CENTER) {
LLVMValueRef ij_out[2];
LLVMValueRef ddxy_out = emit_ddxy_interp(ctx->nir, interp_param);
* interp_param.J = ddy * offset/sample.y + temp1;
*/
for (unsigned i = 0; i < 2; i++) {
- LLVMValueRef ix_ll = LLVMConstInt(ctx->i32, i, false);
- LLVMValueRef iy_ll = LLVMConstInt(ctx->i32, i + 2, false);
+ LLVMValueRef ix_ll = LLVMConstInt(ctx->ac.i32, i, false);
+ LLVMValueRef iy_ll = LLVMConstInt(ctx->ac.i32, i + 2, false);
LLVMValueRef ddx_el = LLVMBuildExtractElement(ctx->builder,
ddxy_out, ix_ll, "");
LLVMValueRef ddy_el = LLVMBuildExtractElement(ctx->builder,
LLVMValueRef temp1, temp2;
interp_el = LLVMBuildBitCast(ctx->builder, interp_el,
- ctx->f32, "");
+ ctx->ac.f32, "");
temp1 = LLVMBuildFMul(ctx->builder, ddx_el, src_c0, "");
temp1 = LLVMBuildFAdd(ctx->builder, temp1, interp_el, "");
temp2 = LLVMBuildFAdd(ctx->builder, temp2, temp1, "");
ij_out[i] = LLVMBuildBitCast(ctx->builder,
- temp2, ctx->i32, "");
+ temp2, ctx->ac.i32, "");
}
interp_param = ac_build_gather_values(&ctx->ac, ij_out, 2);
}
- for (chan = 0; chan < 2; chan++) {
- LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, false);
+ for (chan = 0; chan < 4; chan++) {
+ LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
if (interp_param) {
interp_param = LLVMBuildBitCast(ctx->builder,
- interp_param, LLVMVectorType(ctx->f32, 2), "");
+ interp_param, ctx->ac.v2f32, "");
LLVMValueRef i = LLVMBuildExtractElement(
- ctx->builder, interp_param, ctx->i32zero, "");
+ ctx->builder, interp_param, ctx->ac.i32_0, "");
LLVMValueRef j = LLVMBuildExtractElement(
- ctx->builder, interp_param, ctx->i32one, "");
+ ctx->builder, interp_param, ctx->ac.i32_1, "");
result[chan] = ac_build_fs_interp(&ctx->ac,
llvm_chan, attr_number,
ctx->prim_mask, i, j);
} else {
result[chan] = ac_build_fs_interp_mov(&ctx->ac,
- LLVMConstInt(ctx->i32, 2, false),
+ LLVMConstInt(ctx->ac.i32, 2, false),
llvm_chan, attr_number,
ctx->prim_mask);
}
}
- return ac_build_gather_values(&ctx->ac, result, 2);
+ return ac_build_varying_gather_values(&ctx->ac, result, instr->num_components,
+ instr->variables[0]->var->data.location_frac);
}
static void
-visit_emit_vertex(struct nir_to_llvm_context *ctx,
- const nir_intrinsic_instr *instr)
+visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addrs)
{
LLVMValueRef gs_next_vertex;
- LLVMValueRef can_emit, kill;
+ LLVMValueRef can_emit;
int idx;
+ struct nir_to_llvm_context *ctx = nir_to_llvm_context_from_abi(abi);
- assert(instr->const_index[0] == 0);
/* Write vertex attribute values to GSVS ring */
gs_next_vertex = LLVMBuildLoad(ctx->builder,
ctx->gs_next_vertex,
* effects other than emitting vertices.
*/
can_emit = LLVMBuildICmp(ctx->builder, LLVMIntULT, gs_next_vertex,
- LLVMConstInt(ctx->i32, ctx->gs_max_out_vertices, false), "");
-
- kill = LLVMBuildSelect(ctx->builder, can_emit,
- LLVMConstReal(ctx->f32, 1.0f),
- LLVMConstReal(ctx->f32, -1.0f), "");
- ac_build_kill(&ctx->ac, kill);
+ LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
+ ac_build_kill_if_false(&ctx->ac, can_emit);
/* loop num outputs */
idx = 0;
for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
- LLVMValueRef *out_ptr = &ctx->nir->outputs[i * 4];
+ LLVMValueRef *out_ptr = &addrs[i * 4];
int length = 4;
int slot = idx;
int slot_inc = 1;
for (unsigned j = 0; j < length; j++) {
LLVMValueRef out_val = LLVMBuildLoad(ctx->builder,
out_ptr[j], "");
- LLVMValueRef voffset = LLVMConstInt(ctx->i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false);
+ LLVMValueRef voffset = LLVMConstInt(ctx->ac.i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false);
voffset = LLVMBuildAdd(ctx->builder, voffset, gs_next_vertex, "");
- voffset = LLVMBuildMul(ctx->builder, voffset, LLVMConstInt(ctx->i32, 4, false), "");
+ voffset = LLVMBuildMul(ctx->builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), "");
- out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->i32, "");
+ out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->ac.i32, "");
ac_build_buffer_store_dword(&ctx->ac, ctx->gsvs_ring,
out_val, 1,
}
gs_next_vertex = LLVMBuildAdd(ctx->builder, gs_next_vertex,
- ctx->i32one, "");
+ ctx->ac.i32_1, "");
LLVMBuildStore(ctx->builder, gs_next_vertex, ctx->gs_next_vertex);
ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
LLVMValueRef coord[4] = {
ctx->tes_u,
ctx->tes_v,
- ctx->f32zero,
- ctx->f32zero,
+ ctx->ac.f32_0,
+ ctx->ac.f32_0,
};
if (ctx->tes_primitive_mode == GL_TRIANGLES)
- coord[2] = LLVMBuildFSub(ctx->builder, ctx->f32one,
+ coord[2] = LLVMBuildFSub(ctx->builder, ctx->ac.f32_1,
LLVMBuildFAdd(ctx->builder, coord[0], coord[1], ""), "");
LLVMValueRef result = ac_build_gather_values(&ctx->ac, coord, instr->num_components);
case nir_intrinsic_load_draw_id:
result = ctx->abi->draw_id;
break;
+ case nir_intrinsic_load_view_index:
+ result = ctx->nctx->view_index ? ctx->nctx->view_index : ctx->ac.i32_0;
+ break;
case nir_intrinsic_load_invocation_id:
if (ctx->stage == MESA_SHADER_TESS_CTRL)
result = unpack_param(&ctx->ac, ctx->nctx->tcs_rel_ids, 8, 5);
else
- result = ctx->nctx->gs_invocation_id;
+ result = ctx->abi->gs_invocation_id;
break;
case nir_intrinsic_load_primitive_id:
if (ctx->stage == MESA_SHADER_GEOMETRY) {
- ctx->nctx->shader_info->gs.uses_prim_id = true;
- result = ctx->nctx->gs_prim_id;
+ if (ctx->nctx)
+ ctx->nctx->shader_info->gs.uses_prim_id = true;
+ result = ctx->abi->gs_prim_id;
} else if (ctx->stage == MESA_SHADER_TESS_CTRL) {
ctx->nctx->shader_info->tcs.uses_prim_id = true;
result = ctx->nctx->tcs_patch_id;
emit_discard_if(ctx, instr);
break;
case nir_intrinsic_memory_barrier:
- emit_waitcnt(ctx->nctx, VM_CNT);
+ case nir_intrinsic_group_memory_barrier:
+ case nir_intrinsic_memory_barrier_atomic_counter:
+ case nir_intrinsic_memory_barrier_buffer:
+ case nir_intrinsic_memory_barrier_image:
+ case nir_intrinsic_memory_barrier_shared:
+ emit_membar(ctx->nctx, instr);
break;
case nir_intrinsic_barrier:
emit_barrier(ctx->nctx);
result = visit_interp(ctx->nctx, instr);
break;
case nir_intrinsic_emit_vertex:
- visit_emit_vertex(ctx->nctx, instr);
+ assert(instr->const_index[0] == 0);
+ ctx->abi->emit_vertex(ctx->abi, 0, ctx->outputs);
break;
case nir_intrinsic_end_primitive:
visit_end_primitive(ctx->nctx, instr);
switch (desc_type) {
case AC_DESC_IMAGE:
- type = ctx->v8i32;
+ type = ctx->ac.v8i32;
type_size = 32;
break;
case AC_DESC_FMASK:
- type = ctx->v8i32;
+ type = ctx->ac.v8i32;
offset += 32;
type_size = 32;
break;
case AC_DESC_SAMPLER:
- type = ctx->v4i32;
+ type = ctx->ac.v4i32;
if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
offset += 64;
type_size = 16;
break;
case AC_DESC_BUFFER:
- type = ctx->v4i32;
+ type = ctx->ac.v4i32;
type_size = 16;
break;
default:
const uint32_t *samplers = radv_immutable_samplers(layout, binding);
LLVMValueRef constants[] = {
- LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 0], 0),
- LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 1], 0),
- LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 2], 0),
- LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 3], 0),
+ LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 0], 0),
+ LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 1], 0),
+ LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 2], 0),
+ LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 3], 0),
};
return ac_build_gather_values(&ctx->ac, constants, 4);
}
assert(stride % type_size == 0);
if (!index)
- index = ctx->i32zero;
+ index = ctx->ac.i32_0;
- index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->i32, stride / type_size, 0), "");
+ index = LLVMBuildMul(builder, index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
- list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->i32, offset, 0));
+ list = ac_build_gep0(&ctx->ac, list, LLVMConstInt(ctx->ac.i32, offset, 0));
list = LLVMBuildPointerCast(builder, list, const_array(type, 0), "");
- return ac_build_indexed_load_const(&ctx->ac, list, index);
+ return ac_build_load_to_sgpr(&ctx->ac, list, index);
}
static LLVMValueRef get_sampler_desc(struct ac_nir_context *ctx,
const nir_deref_var *deref,
enum ac_descriptor_type desc_type,
+ const nir_tex_instr *tex_instr,
bool image, bool write)
{
LLVMValueRef index = NULL;
unsigned constant_index = 0;
- const nir_deref *tail = &deref->deref;
+ unsigned descriptor_set;
+ unsigned base_index;
- while (tail->child) {
- const nir_deref_array *child = nir_deref_as_array(tail->child);
- unsigned array_size = glsl_get_aoa_size(tail->child->type);
+ if (!deref) {
+ assert(tex_instr && !image);
+ descriptor_set = 0;
+ base_index = tex_instr->sampler_index;
+ } else {
+ const nir_deref *tail = &deref->deref;
+ while (tail->child) {
+ const nir_deref_array *child = nir_deref_as_array(tail->child);
+ unsigned array_size = glsl_get_aoa_size(tail->child->type);
- if (!array_size)
- array_size = 1;
+ if (!array_size)
+ array_size = 1;
- assert(child->deref_array_type != nir_deref_array_type_wildcard);
+ assert(child->deref_array_type != nir_deref_array_type_wildcard);
- if (child->deref_array_type == nir_deref_array_type_indirect) {
- LLVMValueRef indirect = get_src(ctx, child->indirect);
+ if (child->deref_array_type == nir_deref_array_type_indirect) {
+ LLVMValueRef indirect = get_src(ctx, child->indirect);
- indirect = LLVMBuildMul(ctx->ac.builder, indirect,
- LLVMConstInt(ctx->ac.i32, array_size, false), "");
+ indirect = LLVMBuildMul(ctx->ac.builder, indirect,
+ LLVMConstInt(ctx->ac.i32, array_size, false), "");
- if (!index)
- index = indirect;
- else
- index = LLVMBuildAdd(ctx->ac.builder, index, indirect, "");
- }
+ if (!index)
+ index = indirect;
+ else
+ index = LLVMBuildAdd(ctx->ac.builder, index, indirect, "");
+ }
- constant_index += child->base_offset * array_size;
+ constant_index += child->base_offset * array_size;
- tail = &child->deref;
+ tail = &child->deref;
+ }
+ descriptor_set = deref->var->data.descriptor_set;
+ base_index = deref->var->data.binding;
}
return ctx->abi->load_sampler_desc(ctx->abi,
- deref->var->data.descriptor_set,
- deref->var->data.binding,
+ descriptor_set,
+ base_index,
constant_index, index,
desc_type, image, write);
}
LLVMBuilderRef builder = ctx->ac.builder;
LLVMValueRef img7, samp0;
- if (ctx->abi->chip_class >= VI)
+ if (ctx->ac.chip_class >= VI)
return samp;
img7 = LLVMBuildExtractElement(builder, res,
LLVMValueRef *fmask_ptr)
{
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
- *res_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_BUFFER, false, false);
+ *res_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_BUFFER, instr, false, false);
else
- *res_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_IMAGE, false, false);
+ *res_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_IMAGE, instr, false, false);
if (samp_ptr) {
if (instr->sampler)
- *samp_ptr = get_sampler_desc(ctx, instr->sampler, AC_DESC_SAMPLER, false, false);
+ *samp_ptr = get_sampler_desc(ctx, instr->sampler, AC_DESC_SAMPLER, instr, false, false);
else
- *samp_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_SAMPLER, false, false);
+ *samp_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_SAMPLER, instr, false, false);
if (instr->sampler_dim < GLSL_SAMPLER_DIM_RECT)
*samp_ptr = sici_fix_sampler_aniso(ctx, *res_ptr, *samp_ptr);
}
if (fmask_ptr && !instr->sampler && (instr->op == nir_texop_txf_ms ||
instr->op == nir_texop_samples_identical))
- *fmask_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_FMASK, false, false);
+ *fmask_ptr = get_sampler_desc(ctx, instr->texture, AC_DESC_FMASK, instr, false, false);
}
static LLVMValueRef apply_round_slice(struct ac_llvm_context *ctx,
LLVMValueRef coord)
{
- coord = to_float(ctx, coord);
+ coord = ac_to_float(ctx, coord);
coord = ac_build_intrinsic(ctx, "llvm.rint.f32", ctx->f32, &coord, 1, 0);
- coord = to_integer(ctx, coord);
+ coord = ac_to_integer(ctx, coord);
return coord;
}
/* Pack depth comparison value */
if (instr->is_shadow && comparator) {
- LLVMValueRef z = to_float(&ctx->ac,
- llvm_extract_elem(&ctx->ac, comparator, 0));
+ LLVMValueRef z = ac_to_float(&ctx->ac,
+ llvm_extract_elem(&ctx->ac, comparator, 0));
- /* TC-compatible HTILE promotes Z16 and Z24 to Z32_FLOAT,
+ /* TC-compatible HTILE on radeonsi promotes Z16 and Z24 to Z32_FLOAT,
* so the depth comparison value isn't clamped for Z16 and
* Z24 anymore. Do it manually here.
*
* It's unnecessary if the original texture format was
* Z32_FLOAT, but we don't know that here.
*/
- if (ctx->abi->chip_class == VI)
+ if (ctx->ac.chip_class == VI && ctx->abi->clamp_shadow_reference)
z = ac_build_clamp(&ctx->ac, z);
address[count++] = z;
/* pack derivatives */
if (ddx || ddy) {
+ int num_src_deriv_channels, num_dest_deriv_channels;
switch (instr->sampler_dim) {
case GLSL_SAMPLER_DIM_3D:
case GLSL_SAMPLER_DIM_CUBE:
num_deriv_comp = 3;
+ num_src_deriv_channels = 3;
+ num_dest_deriv_channels = 3;
break;
case GLSL_SAMPLER_DIM_2D:
default:
+ num_src_deriv_channels = 2;
+ num_dest_deriv_channels = 2;
num_deriv_comp = 2;
break;
case GLSL_SAMPLER_DIM_1D:
- num_deriv_comp = 1;
+ num_src_deriv_channels = 1;
+ if (ctx->ac.chip_class >= GFX9) {
+ num_dest_deriv_channels = 2;
+ num_deriv_comp = 2;
+ } else {
+ num_dest_deriv_channels = 1;
+ num_deriv_comp = 1;
+ }
break;
}
- for (unsigned i = 0; i < num_deriv_comp; i++) {
- derivs[i] = to_float(&ctx->ac, llvm_extract_elem(&ctx->ac, ddx, i));
- derivs[num_deriv_comp + i] = to_float(&ctx->ac, llvm_extract_elem(&ctx->ac, ddy, i));
+ for (unsigned i = 0; i < num_src_deriv_channels; i++) {
+ derivs[i] = ac_to_float(&ctx->ac, llvm_extract_elem(&ctx->ac, ddx, i));
+ derivs[num_dest_deriv_channels + i] = ac_to_float(&ctx->ac, llvm_extract_elem(&ctx->ac, ddy, i));
+ }
+ for (unsigned i = num_src_deriv_channels; i < num_dest_deriv_channels; i++) {
+ derivs[i] = ctx->ac.f32_0;
+ derivs[num_dest_deriv_channels + i] = ctx->ac.f32_0;
}
}
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && coord) {
- if (instr->is_array && instr->op != nir_texop_lod)
- coords[3] = apply_round_slice(&ctx->ac, coords[3]);
for (chan = 0; chan < instr->coord_components; chan++)
- coords[chan] = to_float(&ctx->ac, coords[chan]);
+ coords[chan] = ac_to_float(&ctx->ac, coords[chan]);
if (instr->coord_components == 3)
coords[3] = LLVMGetUndef(ctx->ac.f32);
ac_prepare_cube_coords(&ctx->ac,
instr->op == nir_texop_txd, instr->is_array,
- coords, derivs);
+ instr->op == nir_texop_lod, coords, derivs);
if (num_deriv_comp)
num_deriv_comp--;
}
}
address[count++] = coords[2];
}
+
+ if (ctx->ac.chip_class >= GFX9) {
+ LLVMValueRef filler;
+ if (instr->op == nir_texop_txf)
+ filler = ctx->ac.i32_0;
+ else
+ filler = LLVMConstReal(ctx->ac.f32, 0.5);
+
+ if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D) {
+ /* No nir_texop_lod, because it does not take a slice
+ * even with array textures. */
+ if (instr->is_array && instr->op != nir_texop_lod ) {
+ address[count] = address[count - 1];
+ address[count - 1] = filler;
+ count++;
+ } else
+ address[count++] = filler;
+ }
+ }
}
/* Pack LOD */
LLVMValueRef z = LLVMBuildExtractElement(ctx->ac.builder, result, two, "");
z = LLVMBuildSDiv(ctx->ac.builder, z, six, "");
result = LLVMBuildInsertElement(ctx->ac.builder, result, z, two, "");
+ } else if (ctx->ac.chip_class >= GFX9 &&
+ instr->op == nir_texop_txs &&
+ instr->sampler_dim == GLSL_SAMPLER_DIM_1D &&
+ instr->is_array) {
+ LLVMValueRef two = LLVMConstInt(ctx->ac.i32, 2, false);
+ LLVMValueRef layers = LLVMBuildExtractElement(ctx->ac.builder, result, two, "");
+ result = LLVMBuildInsertElement(ctx->ac.builder, result, layers,
+ ctx->ac.i32_1, "");
} else if (instr->dest.ssa.num_components != 4)
result = trim_vector(&ctx->ac, result, instr->dest.ssa.num_components);
write_result:
if (result) {
assert(instr->dest.is_ssa);
- result = to_integer(&ctx->ac, result);
+ result = ac_to_integer(&ctx->ac, result);
_mesa_hash_table_insert(ctx->defs, &instr->dest.ssa, result);
}
}
ctx->abi.base_vertex, "");
for (unsigned i = 0; i < attrib_count; ++i, ++idx) {
- t_offset = LLVMConstInt(ctx->i32, index + i, false);
+ t_offset = LLVMConstInt(ctx->ac.i32, index + i, false);
- t_list = ac_build_indexed_load_const(&ctx->ac, t_list_ptr, t_offset);
+ t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
input = ac_build_buffer_load_format(&ctx->ac, t_list,
buffer_index,
- LLVMConstInt(ctx->i32, 0, false),
+ LLVMConstInt(ctx->ac.i32, 0, false),
true);
for (unsigned chan = 0; chan < 4; chan++) {
- LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, false);
+ LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
ctx->inputs[radeon_llvm_reg_index_soa(idx, chan)] =
- to_integer(&ctx->ac, LLVMBuildExtractElement(ctx->builder,
+ ac_to_integer(&ctx->ac, LLVMBuildExtractElement(ctx->builder,
input, llvm_chan, ""));
}
}
LLVMValueRef i, j;
bool interp = interp_param != NULL;
- attr_number = LLVMConstInt(ctx->i32, attr, false);
+ attr_number = LLVMConstInt(ctx->ac.i32, attr, false);
/* fs.constant returns the param from the middle vertex, so it's not
* really useful for flat shading. It's meant to be used for custom
*/
if (interp) {
interp_param = LLVMBuildBitCast(ctx->builder, interp_param,
- LLVMVectorType(ctx->f32, 2), "");
+ ctx->ac.v2f32, "");
i = LLVMBuildExtractElement(ctx->builder, interp_param,
- ctx->i32zero, "");
+ ctx->ac.i32_0, "");
j = LLVMBuildExtractElement(ctx->builder, interp_param,
- ctx->i32one, "");
+ ctx->ac.i32_1, "");
}
for (chan = 0; chan < 4; chan++) {
- LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, false);
+ LLVMValueRef llvm_chan = LLVMConstInt(ctx->ac.i32, chan, false);
if (interp) {
result[chan] = ac_build_fs_interp(&ctx->ac,
prim_mask, i, j);
} else {
result[chan] = ac_build_fs_interp_mov(&ctx->ac,
- LLVMConstInt(ctx->i32, 2, false),
+ LLVMConstInt(ctx->ac.i32, 2, false),
llvm_chan,
attr_number,
prim_mask);
}
static void
-handle_shader_input_decl(struct nir_to_llvm_context *ctx,
- struct nir_variable *variable)
-{
- switch (ctx->stage) {
- case MESA_SHADER_VERTEX:
+handle_vs_inputs(struct nir_to_llvm_context *ctx,
+ struct nir_shader *nir) {
+ nir_foreach_variable(variable, &nir->inputs)
handle_vs_input_decl(ctx, variable);
- break;
- case MESA_SHADER_FRAGMENT:
- handle_fs_input_decl(ctx, variable);
- break;
- default:
- break;
+}
+
+static void
+prepare_interp_optimize(struct nir_to_llvm_context *ctx,
+ struct nir_shader *nir)
+{
+ if (!ctx->options->key.fs.multisample)
+ return;
+
+ bool uses_center = false;
+ bool uses_centroid = false;
+ nir_foreach_variable(variable, &nir->inputs) {
+ if (glsl_get_base_type(glsl_without_array(variable->type)) != GLSL_TYPE_FLOAT ||
+ variable->data.sample)
+ continue;
+
+ if (variable->data.centroid)
+ uses_centroid = true;
+ else
+ uses_center = true;
}
+ if (uses_center && uses_centroid) {
+ LLVMValueRef sel = LLVMBuildICmp(ctx->builder, LLVMIntSLT, ctx->prim_mask, ctx->ac.i32_0, "");
+ ctx->persp_centroid = LLVMBuildSelect(ctx->builder, sel, ctx->persp_center, ctx->persp_centroid, "");
+ ctx->linear_centroid = LLVMBuildSelect(ctx->builder, sel, ctx->linear_center, ctx->linear_centroid, "");
+ }
}
static void
-handle_fs_inputs_pre(struct nir_to_llvm_context *ctx,
- struct nir_shader *nir)
+handle_fs_inputs(struct nir_to_llvm_context *ctx,
+ struct nir_shader *nir)
{
+ prepare_interp_optimize(ctx, nir);
+
+ nir_foreach_variable(variable, &nir->inputs)
+ handle_fs_input_decl(ctx, variable);
+
unsigned index = 0;
+
+ if (ctx->shader_info->info.ps.uses_input_attachments ||
+ ctx->shader_info->info.needs_multiview_view_index)
+ ctx->input_mask |= 1ull << VARYING_SLOT_LAYER;
+
for (unsigned i = 0; i < RADEON_LLVM_MAX_INPUTS; ++i) {
LLVMValueRef interp_param;
LLVMValueRef *inputs = ctx->inputs +radeon_llvm_reg_index_soa(i, 0);
for(int i = 0; i < 3; ++i)
inputs[i] = ctx->abi.frag_pos[i];
- inputs[3] = ac_build_fdiv(&ctx->ac, ctx->f32one,
+ inputs[3] = ac_build_fdiv(&ctx->ac, ctx->ac.f32_1,
ctx->abi.frag_pos[3]);
}
}
if (ctx->input_mask & (1 << VARYING_SLOT_LAYER))
ctx->shader_info->fs.layer_input = true;
ctx->shader_info->fs.input_mask = ctx->input_mask >> VARYING_SLOT_VAR0;
+
+ if (ctx->shader_info->info.needs_multiview_view_index)
+ ctx->view_index = ctx->inputs[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
}
static LLVMValueRef
static void
scan_shader_output_decl(struct nir_to_llvm_context *ctx,
- struct nir_variable *variable)
+ struct nir_variable *variable,
+ struct nir_shader *shader,
+ gl_shader_stage stage)
{
int idx = variable->data.location + variable->data.index;
unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
variable->data.driver_location = idx * 4;
/* tess ctrl has it's own load/store paths for outputs */
- if (ctx->stage == MESA_SHADER_TESS_CTRL)
+ if (stage == MESA_SHADER_TESS_CTRL)
return;
mask_attribs = ((1ull << attrib_count) - 1) << idx;
- if (ctx->stage == MESA_SHADER_VERTEX ||
- ctx->stage == MESA_SHADER_TESS_EVAL ||
- ctx->stage == MESA_SHADER_GEOMETRY) {
+ if (stage == MESA_SHADER_VERTEX ||
+ stage == MESA_SHADER_TESS_EVAL ||
+ stage == MESA_SHADER_GEOMETRY) {
if (idx == VARYING_SLOT_CLIP_DIST0) {
- int length = ctx->num_output_clips + ctx->num_output_culls;
- if (ctx->stage == MESA_SHADER_VERTEX) {
- ctx->shader_info->vs.outinfo.clip_dist_mask = (1 << ctx->num_output_clips) - 1;
- ctx->shader_info->vs.outinfo.cull_dist_mask = (1 << ctx->num_output_culls) - 1;
+ int length = shader->info.clip_distance_array_size +
+ shader->info.cull_distance_array_size;
+ if (stage == MESA_SHADER_VERTEX) {
+ ctx->shader_info->vs.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
+ ctx->shader_info->vs.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
}
- if (ctx->stage == MESA_SHADER_TESS_EVAL) {
- ctx->shader_info->tes.outinfo.clip_dist_mask = (1 << ctx->num_output_clips) - 1;
- ctx->shader_info->tes.outinfo.cull_dist_mask = (1 << ctx->num_output_culls) - 1;
+ if (stage == MESA_SHADER_TESS_EVAL) {
+ ctx->shader_info->tes.outinfo.clip_dist_mask = (1 << shader->info.clip_distance_array_size) - 1;
+ ctx->shader_info->tes.outinfo.cull_dist_mask = (1 << shader->info.cull_distance_array_size) - 1;
}
if (length > 4)
case GLSL_TYPE_UINT:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_SUBROUTINE:
- return ctx->i32;
+ return ctx->ac.i32;
case GLSL_TYPE_FLOAT: /* TODO handle mediump */
- return ctx->f32;
+ return ctx->ac.f32;
case GLSL_TYPE_INT64:
case GLSL_TYPE_UINT64:
- return ctx->i64;
+ return ctx->ac.i64;
case GLSL_TYPE_DOUBLE:
- return ctx->f64;
+ return ctx->ac.f64;
default:
unreachable("unknown GLSL type");
}
static LLVMValueRef
emit_float_saturate(struct ac_llvm_context *ctx, LLVMValueRef v, float lo, float hi)
{
- v = to_float(ctx, v);
+ v = ac_to_float(ctx, v);
v = emit_intrin_2f_param(ctx, "llvm.maxnum", ctx->f32, v, LLVMConstReal(ctx->f32, lo));
return emit_intrin_2f_param(ctx, "llvm.minnum", ctx->f32, v, LLVMConstReal(ctx->f32, hi));
}
static LLVMValueRef emit_pack_int16(struct nir_to_llvm_context *ctx,
LLVMValueRef src0, LLVMValueRef src1)
{
- LLVMValueRef const16 = LLVMConstInt(ctx->i32, 16, false);
+ LLVMValueRef const16 = LLVMConstInt(ctx->ac.i32, 16, false);
LLVMValueRef comp[2];
- comp[0] = LLVMBuildAnd(ctx->builder, src0, LLVMConstInt(ctx-> i32, 65535, 0), "");
- comp[1] = LLVMBuildAnd(ctx->builder, src1, LLVMConstInt(ctx-> i32, 65535, 0), "");
+ comp[0] = LLVMBuildAnd(ctx->builder, src0, LLVMConstInt(ctx->ac.i32, 65535, 0), "");
+ comp[1] = LLVMBuildAnd(ctx->builder, src1, LLVMConstInt(ctx->ac.i32, 65535, 0), "");
comp[1] = LLVMBuildShl(ctx->builder, comp[1], const16, "");
return LLVMBuildOr(ctx->builder, comp[0], comp[1], "");
}
args->target = target;
args->compr = false;
- args->out[0] = LLVMGetUndef(ctx->f32);
- args->out[1] = LLVMGetUndef(ctx->f32);
- args->out[2] = LLVMGetUndef(ctx->f32);
- args->out[3] = LLVMGetUndef(ctx->f32);
+ args->out[0] = LLVMGetUndef(ctx->ac.f32);
+ args->out[1] = LLVMGetUndef(ctx->ac.f32);
+ args->out[2] = LLVMGetUndef(ctx->ac.f32);
+ args->out[3] = LLVMGetUndef(ctx->ac.f32);
if (!values)
return;
for (unsigned chan = 0; chan < 4; chan++) {
val[chan] = ac_build_clamp(&ctx->ac, values[chan]);
val[chan] = LLVMBuildFMul(ctx->builder, val[chan],
- LLVMConstReal(ctx->f32, 65535), "");
+ LLVMConstReal(ctx->ac.f32, 65535), "");
val[chan] = LLVMBuildFAdd(ctx->builder, val[chan],
- LLVMConstReal(ctx->f32, 0.5), "");
+ LLVMConstReal(ctx->ac.f32, 0.5), "");
val[chan] = LLVMBuildFPToUI(ctx->builder, val[chan],
- ctx->i32, "");
+ ctx->ac.i32, "");
}
args->compr = 1;
for (unsigned chan = 0; chan < 4; chan++) {
val[chan] = emit_float_saturate(&ctx->ac, values[chan], -1, 1);
val[chan] = LLVMBuildFMul(ctx->builder, val[chan],
- LLVMConstReal(ctx->f32, 32767), "");
+ LLVMConstReal(ctx->ac.f32, 32767), "");
/* If positive, add 0.5, else add -0.5. */
val[chan] = LLVMBuildFAdd(ctx->builder, val[chan],
LLVMBuildSelect(ctx->builder,
LLVMBuildFCmp(ctx->builder, LLVMRealOGE,
- val[chan], ctx->f32zero, ""),
- LLVMConstReal(ctx->f32, 0.5),
- LLVMConstReal(ctx->f32, -0.5), ""), "");
- val[chan] = LLVMBuildFPToSI(ctx->builder, val[chan], ctx->i32, "");
+ val[chan], ctx->ac.f32_0, ""),
+ LLVMConstReal(ctx->ac.f32, 0.5),
+ LLVMConstReal(ctx->ac.f32, -0.5), ""), "");
+ val[chan] = LLVMBuildFPToSI(ctx->builder, val[chan], ctx->ac.i32, "");
}
args->compr = 1;
break;
case V_028714_SPI_SHADER_UINT16_ABGR: {
- LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
+ LLVMValueRef max_rgb = LLVMConstInt(ctx->ac.i32,
is_int8 ? 255 : is_int10 ? 1023 : 65535, 0);
- LLVMValueRef max_alpha = !is_int10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
+ LLVMValueRef max_alpha = !is_int10 ? max_rgb : LLVMConstInt(ctx->ac.i32, 3, 0);
for (unsigned chan = 0; chan < 4; chan++) {
- val[chan] = to_integer(&ctx->ac, values[chan]);
+ val[chan] = ac_to_integer(&ctx->ac, values[chan]);
val[chan] = emit_minmax_int(&ctx->ac, LLVMIntULT, val[chan], chan == 3 ? max_alpha : max_rgb);
}
}
case V_028714_SPI_SHADER_SINT16_ABGR: {
- LLVMValueRef max_rgb = LLVMConstInt(ctx->i32,
+ LLVMValueRef max_rgb = LLVMConstInt(ctx->ac.i32,
is_int8 ? 127 : is_int10 ? 511 : 32767, 0);
- LLVMValueRef min_rgb = LLVMConstInt(ctx->i32,
+ LLVMValueRef min_rgb = LLVMConstInt(ctx->ac.i32,
is_int8 ? -128 : is_int10 ? -512 : -32768, 0);
- LLVMValueRef max_alpha = !is_int10 ? max_rgb : ctx->i32one;
- LLVMValueRef min_alpha = !is_int10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
+ LLVMValueRef max_alpha = !is_int10 ? max_rgb : ctx->ac.i32_1;
+ LLVMValueRef min_alpha = !is_int10 ? min_rgb : LLVMConstInt(ctx->ac.i32, -2, 0);
/* Clamp. */
for (unsigned chan = 0; chan < 4; chan++) {
- val[chan] = to_integer(&ctx->ac, values[chan]);
+ val[chan] = ac_to_integer(&ctx->ac, values[chan]);
val[chan] = emit_minmax_int(&ctx->ac, LLVMIntSLT, val[chan], chan == 3 ? max_alpha : max_rgb);
val[chan] = emit_minmax_int(&ctx->ac, LLVMIntSGT, val[chan], chan == 3 ? min_alpha : min_rgb);
}
memcpy(&args->out[0], values, sizeof(values[0]) * 4);
for (unsigned i = 0; i < 4; ++i)
- args->out[i] = to_float(&ctx->ac, args->out[i]);
+ args->out[i] = ac_to_float(&ctx->ac, args->out[i]);
}
static void
LLVMValueRef psize_value = NULL, layer_value = NULL, viewport_index_value = NULL;
int i;
+ if (ctx->options->key.has_multiview_view_index) {
+ LLVMValueRef* tmp_out = &ctx->nir->outputs[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER, 0)];
+ if(!*tmp_out) {
+ for(unsigned i = 0; i < 4; ++i)
+ ctx->nir->outputs[radeon_llvm_reg_index_soa(VARYING_SLOT_LAYER, i)] =
+ si_build_alloca_undef(&ctx->ac, ctx->ac.f32, "");
+ }
+
+ LLVMBuildStore(ctx->builder, ac_to_float(&ctx->ac, ctx->view_index), *tmp_out);
+ ctx->output_mask |= 1ull << VARYING_SLOT_LAYER;
+ }
+
memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
sizeof(outinfo->vs_output_param_offset));
i = VARYING_SLOT_CLIP_DIST0;
for (j = 0; j < ctx->num_output_clips + ctx->num_output_culls; j++)
- slots[j] = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
+ slots[j] = ac_to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
for (i = ctx->num_output_clips + ctx->num_output_culls; i < 8; i++)
- slots[i] = LLVMGetUndef(ctx->f32);
+ slots[i] = LLVMGetUndef(ctx->ac.f32);
if (ctx->num_output_clips + ctx->num_output_culls > 4) {
target = V_008DFC_SQ_EXP_POS + 3;
}
- LLVMValueRef pos_values[4] = {ctx->f32zero, ctx->f32zero, ctx->f32zero, ctx->f32one};
+ LLVMValueRef pos_values[4] = {ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_0, ctx->ac.f32_1};
if (ctx->output_mask & (1ull << VARYING_SLOT_POS)) {
for (unsigned j = 0; j < 4; j++)
pos_values[j] = LLVMBuildLoad(ctx->builder,
ctx->nir->outputs[radeon_llvm_reg_index_soa(VARYING_SLOT_VIEWPORT, 0)], "");
}
- uint32_t mask = ((outinfo->writes_pointsize == true ? 1 : 0) |
- (outinfo->writes_layer == true ? 4 : 0) |
- (outinfo->writes_viewport_index == true ? 8 : 0));
- if (mask) {
- pos_args[1].enabled_channels = mask;
+ if (outinfo->writes_pointsize ||
+ outinfo->writes_layer ||
+ outinfo->writes_viewport_index) {
+ pos_args[1].enabled_channels = ((outinfo->writes_pointsize == true ? 1 : 0) |
+ (outinfo->writes_layer == true ? 4 : 0));
pos_args[1].valid_mask = 0;
pos_args[1].done = 0;
pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
pos_args[1].compr = 0;
- pos_args[1].out[0] = ctx->f32zero; /* X */
- pos_args[1].out[1] = ctx->f32zero; /* Y */
- pos_args[1].out[2] = ctx->f32zero; /* Z */
- pos_args[1].out[3] = ctx->f32zero; /* W */
+ pos_args[1].out[0] = ctx->ac.f32_0; /* X */
+ pos_args[1].out[1] = ctx->ac.f32_0; /* Y */
+ pos_args[1].out[2] = ctx->ac.f32_0; /* Z */
+ pos_args[1].out[3] = ctx->ac.f32_0; /* W */
if (outinfo->writes_pointsize == true)
pos_args[1].out[0] = psize_value;
if (outinfo->writes_layer == true)
pos_args[1].out[2] = layer_value;
- if (outinfo->writes_viewport_index == true)
- pos_args[1].out[3] = viewport_index_value;
+ if (outinfo->writes_viewport_index == true) {
+ if (ctx->options->chip_class >= GFX9) {
+ /* GFX9 has the layer in out.z[10:0] and the viewport
+ * index in out.z[19:16].
+ */
+ LLVMValueRef v = viewport_index_value;
+ v = ac_to_integer(&ctx->ac, v);
+ v = LLVMBuildShl(ctx->builder, v,
+ LLVMConstInt(ctx->ac.i32, 16, false),
+ "");
+ v = LLVMBuildOr(ctx->builder, v,
+ ac_to_integer(&ctx->ac, pos_args[1].out[2]), "");
+
+ pos_args[1].out[2] = ac_to_float(&ctx->ac, v);
+ pos_args[1].enabled_channels |= 1 << 2;
+ } else {
+ pos_args[1].out[3] = viewport_index_value;
+ pos_args[1].enabled_channels |= 1 << 3;
+ }
+ }
}
for (i = 0; i < 4; i++) {
if (pos_args[i].out[0])
continue;
for (unsigned j = 0; j < 4; j++)
- values[j] = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
- ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
+ values[j] = ac_to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
+ ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
if (i == VARYING_SLOT_LAYER) {
target = V_008DFC_SQ_EXP_PARAM + param_count;
ctx->shader_info->vs.vgpr_comp_cnt = MAX2(2,
ctx->shader_info->vs.vgpr_comp_cnt);
for (unsigned j = 1; j < 4; j++)
- values[j] = ctx->f32zero;
+ values[j] = ctx->ac.f32_0;
si_llvm_init_export_args(ctx, values, target, &args);
ac_build_export(&ctx->ac, &args);
outinfo->export_prim_id = true;
{
int j;
uint64_t max_output_written = 0;
+ LLVMValueRef lds_base = NULL;
+
for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
- LLVMValueRef *out_ptr = &ctx->nir->outputs[i * 4];
int param_index;
int length = 4;
param_index = shader_io_get_unique_index(i);
max_output_written = MAX2(param_index + (length > 4), max_output_written);
+ }
+
+ outinfo->esgs_itemsize = (max_output_written + 1) * 16;
+
+ if (ctx->ac.chip_class >= GFX9) {
+ unsigned itemsize_dw = outinfo->esgs_itemsize / 4;
+ LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
+ LLVMValueRef wave_idx = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
+ LLVMConstInt(ctx->ac.i32, 24, false),
+ LLVMConstInt(ctx->ac.i32, 4, false), false);
+ vertex_idx = LLVMBuildOr(ctx->ac.builder, vertex_idx,
+ LLVMBuildMul(ctx->ac.builder, wave_idx,
+ LLVMConstInt(ctx->ac.i32, 64, false), ""), "");
+ lds_base = LLVMBuildMul(ctx->ac.builder, vertex_idx,
+ LLVMConstInt(ctx->ac.i32, itemsize_dw, 0), "");
+ }
+
+ for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
+ LLVMValueRef dw_addr;
+ LLVMValueRef *out_ptr = &ctx->nir->outputs[i * 4];
+ int param_index;
+ int length = 4;
+
+ if (!(ctx->output_mask & (1ull << i)))
+ continue;
+
+ if (i == VARYING_SLOT_CLIP_DIST0)
+ length = ctx->num_output_clips + ctx->num_output_culls;
+
+ param_index = shader_io_get_unique_index(i);
+ if (lds_base) {
+ dw_addr = LLVMBuildAdd(ctx->builder, lds_base,
+ LLVMConstInt(ctx->ac.i32, param_index * 4, false),
+ "");
+ }
for (j = 0; j < length; j++) {
LLVMValueRef out_val = LLVMBuildLoad(ctx->builder, out_ptr[j], "");
- out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->i32, "");
-
- ac_build_buffer_store_dword(&ctx->ac,
- ctx->esgs_ring,
- out_val, 1,
- NULL, ctx->es2gs_offset,
- (4 * param_index + j) * 4,
- 1, 1, true, true);
+ out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->ac.i32, "");
+
+ if (ctx->ac.chip_class >= GFX9) {
+ ac_lds_store(&ctx->ac, dw_addr,
+ LLVMBuildLoad(ctx->builder, out_ptr[j], ""));
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, ctx->ac.i32_1, "");
+ } else {
+ ac_build_buffer_store_dword(&ctx->ac,
+ ctx->esgs_ring,
+ out_val, 1,
+ NULL, ctx->es2gs_offset,
+ (4 * param_index + j) * 4,
+ 1, 1, true, true);
+ }
}
}
- outinfo->esgs_itemsize = (max_output_written + 1) * 16;
}
static void
if (length > 4)
mark_tess_output(ctx, false, param + 1);
LLVMValueRef dw_addr = LLVMBuildAdd(ctx->builder, base_dw_addr,
- LLVMConstInt(ctx->i32, param * 4, false),
+ LLVMConstInt(ctx->ac.i32, param * 4, false),
"");
for (unsigned j = 0; j < length; j++) {
- lds_store(ctx, dw_addr,
- LLVMBuildLoad(ctx->builder, out_ptr[j], ""));
- dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, ctx->i32one, "");
+ ac_lds_store(&ctx->ac, dw_addr,
+ LLVMBuildLoad(ctx->builder, out_ptr[j], ""));
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, ctx->ac.i32_1, "");
}
}
}
ac_nir_build_if(&if_ctx, ctx,
LLVMBuildICmp(ctx->builder, LLVMIntEQ,
- invocation_id, ctx->i32zero, ""));
+ invocation_id, ctx->ac.i32_0, ""));
tess_inner_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
tess_outer_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
mark_tess_output(ctx, true, tess_outer_index);
lds_base = get_tcs_out_current_patch_data_offset(ctx);
lds_inner = LLVMBuildAdd(ctx->builder, lds_base,
- LLVMConstInt(ctx->i32, tess_inner_index * 4, false), "");
+ LLVMConstInt(ctx->ac.i32, tess_inner_index * 4, false), "");
lds_outer = LLVMBuildAdd(ctx->builder, lds_base,
- LLVMConstInt(ctx->i32, tess_outer_index * 4, false), "");
+ LLVMConstInt(ctx->ac.i32, tess_outer_index * 4, false), "");
for (i = 0; i < 4; i++) {
- inner[i] = LLVMGetUndef(ctx->i32);
- outer[i] = LLVMGetUndef(ctx->i32);
+ inner[i] = LLVMGetUndef(ctx->ac.i32);
+ outer[i] = LLVMGetUndef(ctx->ac.i32);
}
// LINES reverseal
if (ctx->options->key.tcs.primitive_mode == GL_ISOLINES) {
- outer[0] = out[1] = lds_load(ctx, lds_outer);
+ outer[0] = out[1] = ac_lds_load(&ctx->ac, lds_outer);
lds_outer = LLVMBuildAdd(ctx->builder, lds_outer,
- LLVMConstInt(ctx->i32, 1, false), "");
- outer[1] = out[0] = lds_load(ctx, lds_outer);
+ LLVMConstInt(ctx->ac.i32, 1, false), "");
+ outer[1] = out[0] = ac_lds_load(&ctx->ac, lds_outer);
} else {
for (i = 0; i < outer_comps; i++) {
outer[i] = out[i] =
- lds_load(ctx, lds_outer);
+ ac_lds_load(&ctx->ac, lds_outer);
lds_outer = LLVMBuildAdd(ctx->builder, lds_outer,
- LLVMConstInt(ctx->i32, 1, false), "");
+ LLVMConstInt(ctx->ac.i32, 1, false), "");
}
for (i = 0; i < inner_comps; i++) {
inner[i] = out[outer_comps+i] =
- lds_load(ctx, lds_inner);
+ ac_lds_load(&ctx->ac, lds_inner);
lds_inner = LLVMBuildAdd(ctx->builder, lds_inner,
- LLVMConstInt(ctx->i32, 1, false), "");
+ LLVMConstInt(ctx->ac.i32, 1, false), "");
}
}
buffer = ctx->hs_ring_tess_factor;
tf_base = ctx->tess_factor_offset;
byteoffset = LLVMBuildMul(ctx->builder, rel_patch_id,
- LLVMConstInt(ctx->i32, 4 * stride, false), "");
+ LLVMConstInt(ctx->ac.i32, 4 * stride, false), "");
+ unsigned tf_offset = 0;
+
+ if (ctx->options->chip_class <= VI) {
+ ac_nir_build_if(&inner_if_ctx, ctx,
+ LLVMBuildICmp(ctx->builder, LLVMIntEQ,
+ rel_patch_id, ctx->ac.i32_0, ""));
- ac_nir_build_if(&inner_if_ctx, ctx,
- LLVMBuildICmp(ctx->builder, LLVMIntEQ,
- rel_patch_id, ctx->i32zero, ""));
+ /* Store the dynamic HS control word. */
+ ac_build_buffer_store_dword(&ctx->ac, buffer,
+ LLVMConstInt(ctx->ac.i32, 0x80000000, false),
+ 1, ctx->ac.i32_0, tf_base,
+ 0, 1, 0, true, false);
+ tf_offset += 4;
- /* Store the dynamic HS control word. */
- ac_build_buffer_store_dword(&ctx->ac, buffer,
- LLVMConstInt(ctx->i32, 0x80000000, false),
- 1, ctx->i32zero, tf_base,
- 0, 1, 0, true, false);
- ac_nir_build_endif(&inner_if_ctx);
+ ac_nir_build_endif(&inner_if_ctx);
+ }
/* Store the tessellation factors. */
ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
MIN2(stride, 4), byteoffset, tf_base,
- 4, 1, 0, true, false);
+ tf_offset, 1, 0, true, false);
if (vec1)
ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
stride - 4, byteoffset, tf_base,
- 20, 1, 0, true, false);
+ 16 + tf_offset, 1, 0, true, false);
- //TODO store to offchip for TES to read - only if TES reads them
- if (1) {
+ //store to offchip for TES to read - only if TES reads them
+ if (ctx->options->key.tcs.tes_reads_tess_factors) {
LLVMValueRef inner_vec, outer_vec, tf_outer_offset;
LLVMValueRef tf_inner_offset;
unsigned param_outer, param_inner;
param_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
tf_outer_offset = get_tcs_tes_buffer_address(ctx, NULL,
- LLVMConstInt(ctx->i32, param_outer, 0));
+ LLVMConstInt(ctx->ac.i32, param_outer, 0));
outer_vec = ac_build_gather_values(&ctx->ac, outer,
util_next_power_of_two(outer_comps));
if (inner_comps) {
param_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL,
- LLVMConstInt(ctx->i32, param_inner, 0));
+ LLVMConstInt(ctx->ac.i32, param_inner, 0));
inner_vec = inner_comps == 1 ? inner[0] :
ac_build_gather_values(&ctx->ac, inner, inner_comps);
args.target = V_008DFC_SQ_EXP_MRTZ;
args.compr = false;
- args.out[0] = LLVMGetUndef(ctx->f32); /* R, depth */
- args.out[1] = LLVMGetUndef(ctx->f32); /* G, stencil test val[0:7], stencil op val[8:15] */
- args.out[2] = LLVMGetUndef(ctx->f32); /* B, sample mask */
- args.out[3] = LLVMGetUndef(ctx->f32); /* A, alpha to mask */
+ args.out[0] = LLVMGetUndef(ctx->ac.f32); /* R, depth */
+ args.out[1] = LLVMGetUndef(ctx->ac.f32); /* G, stencil test val[0:7], stencil op val[8:15] */
+ args.out[2] = LLVMGetUndef(ctx->ac.f32); /* B, sample mask */
+ args.out[3] = LLVMGetUndef(ctx->ac.f32); /* A, alpha to mask */
if (depth) {
args.out[0] = depth;
if (i == FRAG_RESULT_DEPTH) {
ctx->shader_info->fs.writes_z = true;
- depth = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
+ depth = ac_to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
ctx->nir->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
} else if (i == FRAG_RESULT_STENCIL) {
ctx->shader_info->fs.writes_stencil = true;
- stencil = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
+ stencil = ac_to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
ctx->nir->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
} else if (i == FRAG_RESULT_SAMPLE_MASK) {
ctx->shader_info->fs.writes_sample_mask = true;
- samplemask = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
+ samplemask = ac_to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
ctx->nir->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
} else {
bool last = false;
for (unsigned j = 0; j < 4; j++)
- values[j] = to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
+ values[j] = ac_to_float(&ctx->ac, LLVMBuildLoad(ctx->builder,
ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
if (!ctx->shader_info->fs.writes_z && !ctx->shader_info->fs.writes_stencil && !ctx->shader_info->fs.writes_sample_mask)
{
if ((ctx->stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_es) ||
(ctx->stage == MESA_SHADER_TESS_EVAL && ctx->options->key.tes.as_es)) {
- ctx->esgs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_ESGS_VS, false));
+ ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_VS, false));
}
if (ctx->is_gs_copy_shader) {
- ctx->gsvs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_GSVS_VS, false));
+ ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_VS, false));
}
if (ctx->stage == MESA_SHADER_GEOMETRY) {
LLVMValueRef tmp;
- ctx->esgs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_ESGS_GS, false));
- ctx->gsvs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_GSVS_GS, false));
+ ctx->esgs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_ESGS_GS, false));
+ ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_GS, false));
- ctx->gsvs_ring = LLVMBuildBitCast(ctx->builder, ctx->gsvs_ring, ctx->v4i32, "");
+ ctx->gsvs_ring = LLVMBuildBitCast(ctx->builder, ctx->gsvs_ring, ctx->ac.v4i32, "");
- ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, ctx->gsvs_ring, ctx->gsvs_num_entries, LLVMConstInt(ctx->i32, 2, false), "");
- tmp = LLVMBuildExtractElement(ctx->builder, ctx->gsvs_ring, ctx->i32one, "");
+ ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, ctx->gsvs_ring, ctx->gsvs_num_entries, LLVMConstInt(ctx->ac.i32, 2, false), "");
+ tmp = LLVMBuildExtractElement(ctx->builder, ctx->gsvs_ring, ctx->ac.i32_1, "");
tmp = LLVMBuildOr(ctx->builder, tmp, ctx->gsvs_ring_stride, "");
- ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, ctx->gsvs_ring, tmp, ctx->i32one, "");
+ ctx->gsvs_ring = LLVMBuildInsertElement(ctx->builder, ctx->gsvs_ring, tmp, ctx->ac.i32_1, "");
}
if (ctx->stage == MESA_SHADER_TESS_CTRL ||
ctx->stage == MESA_SHADER_TESS_EVAL) {
- ctx->hs_ring_tess_offchip = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_HS_TESS_OFFCHIP, false));
- ctx->hs_ring_tess_factor = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_HS_TESS_FACTOR, false));
+ ctx->hs_ring_tess_offchip = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_OFFCHIP, false));
+ ctx->hs_ring_tess_factor = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_HS_TESS_FACTOR, false));
}
}
ac_nir_get_max_workgroup_size(enum chip_class chip_class,
const struct nir_shader *nir)
{
- switch (nir->stage) {
+ switch (nir->info.stage) {
case MESA_SHADER_TESS_CTRL:
return chip_class >= CIK ? 128 : 64;
case MESA_SHADER_GEOMETRY:
- return 64;
+ return chip_class >= GFX9 ? 128 : 64;
case MESA_SHADER_COMPUTE:
break;
default:
return max_workgroup_size;
}
+/* Fixup the HW not emitting the TCS regs if there are no HS threads. */
+static void ac_nir_fixup_ls_hs_input_vgprs(struct nir_to_llvm_context *ctx)
+{
+ LLVMValueRef count = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
+ LLVMConstInt(ctx->ac.i32, 8, false),
+ LLVMConstInt(ctx->ac.i32, 8, false), false);
+ LLVMValueRef hs_empty = LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, count,
+ LLVMConstInt(ctx->ac.i32, 0, false), "");
+ ctx->abi.instance_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->rel_auto_id, ctx->abi.instance_id, "");
+ ctx->vs_prim_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->abi.vertex_id, ctx->vs_prim_id, "");
+ ctx->rel_auto_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->tcs_rel_ids, ctx->rel_auto_id, "");
+ ctx->abi.vertex_id = LLVMBuildSelect(ctx->ac.builder, hs_empty, ctx->tcs_patch_id, ctx->abi.vertex_id, "");
+}
+
+static void prepare_gs_input_vgprs(struct nir_to_llvm_context *ctx)
+{
+ for(int i = 5; i >= 0; --i) {
+ ctx->gs_vtx_offset[i] = ac_build_bfe(&ctx->ac, ctx->gs_vtx_offset[i & ~1],
+ LLVMConstInt(ctx->ac.i32, (i & 1) * 16, false),
+ LLVMConstInt(ctx->ac.i32, 16, false), false);
+ }
+
+ ctx->gs_wave_id = ac_build_bfe(&ctx->ac, ctx->merged_wave_info,
+ LLVMConstInt(ctx->ac.i32, 16, false),
+ LLVMConstInt(ctx->ac.i32, 8, false), false);
+}
+
void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
struct nir_shader *nir, struct nir_to_llvm_context *nctx)
{
if (nctx)
nctx->nir = &ctx;
- ctx.stage = nir->stage;
+ ctx.stage = nir->info.stage;
ctx.main_function = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
setup_locals(&ctx, func);
- if (nir->stage == MESA_SHADER_COMPUTE)
+ if (nir->info.stage == MESA_SHADER_COMPUTE)
setup_shared(&ctx, nir);
visit_cf_list(&ctx, &func->impl->body);
static
LLVMModuleRef ac_translate_nir_to_llvm(LLVMTargetMachineRef tm,
- struct nir_shader *nir,
+ struct nir_shader *const *shaders,
+ int shader_count,
struct ac_shader_variant_info *shader_info,
const struct ac_nir_compiler_options *options)
{
ctx.context = LLVMContextCreate();
ctx.module = LLVMModuleCreateWithNameInContext("shader", ctx.context);
- ac_llvm_context_init(&ctx.ac, ctx.context);
+ ac_llvm_context_init(&ctx.ac, ctx.context, options->chip_class);
ctx.ac.module = ctx.module;
-
- memset(shader_info, 0, sizeof(*shader_info));
-
- ac_nir_shader_info_pass(nir, options, &shader_info->info);
-
LLVMSetTarget(ctx.module, options->supports_spill ? "amdgcn-mesa-mesa3d" : "amdgcn--");
LLVMTargetDataRef data_layout = LLVMCreateTargetDataLayout(tm);
LLVMDisposeTargetData(data_layout);
LLVMDisposeMessage(data_layout_str);
- setup_types(&ctx);
-
ctx.builder = LLVMCreateBuilderInContext(ctx.context);
ctx.ac.builder = ctx.builder;
- ctx.stage = nir->stage;
- ctx.max_workgroup_size = ac_nir_get_max_workgroup_size(ctx.options->chip_class, nir);
+
+ memset(shader_info, 0, sizeof(*shader_info));
+
+ for(int i = 0; i < shader_count; ++i)
+ ac_nir_shader_info_pass(shaders[i], options, &shader_info->info);
for (i = 0; i < AC_UD_MAX_SETS; i++)
shader_info->user_sgprs_locs.descriptor_sets[i].sgpr_idx = -1;
for (i = 0; i < AC_UD_MAX_UD; i++)
shader_info->user_sgprs_locs.shader_data[i].sgpr_idx = -1;
- create_function(&ctx);
+ ctx.max_workgroup_size = 0;
+ for (int i = 0; i < shader_count; ++i) {
+ ctx.max_workgroup_size = MAX2(ctx.max_workgroup_size,
+ ac_nir_get_max_workgroup_size(ctx.options->chip_class,
+ shaders[i]));
+ }
- if (nir->stage == MESA_SHADER_GEOMETRY) {
- ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.i32, "gs_next_vertex");
+ create_function(&ctx, shaders[shader_count - 1]->info.stage, shader_count >= 2,
+ shader_count >= 2 ? shaders[shader_count - 2]->info.stage : MESA_SHADER_VERTEX);
- ctx.gs_max_out_vertices = nir->info.gs.vertices_out;
- } else if (nir->stage == MESA_SHADER_TESS_EVAL) {
- ctx.tes_primitive_mode = nir->info.tess.primitive_mode;
- } else if (nir->stage == MESA_SHADER_VERTEX) {
- if (shader_info->info.vs.needs_instance_id) {
- ctx.shader_info->vs.vgpr_comp_cnt =
- MAX2(3, ctx.shader_info->vs.vgpr_comp_cnt);
+ ctx.abi.inputs = &ctx.inputs[0];
+ ctx.abi.emit_outputs = handle_shader_outputs_post;
+ ctx.abi.emit_vertex = visit_emit_vertex;
+ ctx.abi.load_ssbo = radv_load_ssbo;
+ ctx.abi.load_sampler_desc = radv_get_sampler_desc;
+ ctx.abi.clamp_shadow_reference = false;
+
+ if (shader_count >= 2)
+ ac_init_exec_full_mask(&ctx.ac);
+
+ if (ctx.ac.chip_class == GFX9 &&
+ shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
+ ac_nir_fixup_ls_hs_input_vgprs(&ctx);
+
+ for(int i = 0; i < shader_count; ++i) {
+ ctx.stage = shaders[i]->info.stage;
+ ctx.output_mask = 0;
+ ctx.tess_outputs_written = 0;
+ ctx.num_output_clips = shaders[i]->info.clip_distance_array_size;
+ ctx.num_output_culls = shaders[i]->info.cull_distance_array_size;
+
+ if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
+ ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.ac.i32, "gs_next_vertex");
+
+ ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
+ } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
+ ctx.tcs_outputs_read = shaders[i]->info.outputs_read;
+ ctx.tcs_patch_outputs_read = shaders[i]->info.patch_outputs_read;
+ } else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
+ ctx.tes_primitive_mode = shaders[i]->info.tess.primitive_mode;
+ } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
+ if (shader_info->info.vs.needs_instance_id) {
+ ctx.shader_info->vs.vgpr_comp_cnt =
+ MAX2(3, ctx.shader_info->vs.vgpr_comp_cnt);
+ }
+ } else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) {
+ shader_info->fs.can_discard = shaders[i]->info.fs.uses_discard;
}
- } else if (nir->stage == MESA_SHADER_FRAGMENT) {
- shader_info->fs.can_discard = nir->info.fs.uses_discard;
- }
- ac_setup_rings(&ctx);
+ if (i)
+ emit_barrier(&ctx);
- ctx.num_output_clips = nir->info.clip_distance_array_size;
- ctx.num_output_culls = nir->info.cull_distance_array_size;
+ ac_setup_rings(&ctx);
- nir_foreach_variable(variable, &nir->inputs)
- handle_shader_input_decl(&ctx, variable);
+ LLVMBasicBlockRef merge_block;
+ if (shader_count >= 2) {
+ LLVMValueRef fn = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
+ LLVMBasicBlockRef then_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
+ merge_block = LLVMAppendBasicBlockInContext(ctx.ac.context, fn, "");
- if (nir->stage == MESA_SHADER_FRAGMENT)
- handle_fs_inputs_pre(&ctx, nir);
+ LLVMValueRef count = ac_build_bfe(&ctx.ac, ctx.merged_wave_info,
+ LLVMConstInt(ctx.ac.i32, 8 * i, false),
+ LLVMConstInt(ctx.ac.i32, 8, false), false);
+ LLVMValueRef thread_id = ac_get_thread_id(&ctx.ac);
+ LLVMValueRef cond = LLVMBuildICmp(ctx.ac.builder, LLVMIntULT,
+ thread_id, count, "");
+ LLVMBuildCondBr(ctx.ac.builder, cond, then_block, merge_block);
- ctx.abi.chip_class = options->chip_class;
- ctx.abi.inputs = &ctx.inputs[0];
- ctx.abi.emit_outputs = handle_shader_outputs_post;
- ctx.abi.load_ssbo = radv_load_ssbo;
- ctx.abi.load_sampler_desc = radv_get_sampler_desc;
+ LLVMPositionBuilderAtEnd(ctx.ac.builder, then_block);
+ }
- nir_foreach_variable(variable, &nir->outputs)
- scan_shader_output_decl(&ctx, variable);
+ if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT)
+ handle_fs_inputs(&ctx, shaders[i]);
+ else if(shaders[i]->info.stage == MESA_SHADER_VERTEX)
+ handle_vs_inputs(&ctx, shaders[i]);
+ else if(shader_count >= 2 && shaders[i]->info.stage == MESA_SHADER_GEOMETRY)
+ prepare_gs_input_vgprs(&ctx);
+
+ nir_foreach_variable(variable, &shaders[i]->outputs)
+ scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage);
- ac_nir_translate(&ctx.ac, &ctx.abi, nir, &ctx);
+ ac_nir_translate(&ctx.ac, &ctx.abi, shaders[i], &ctx);
+
+ if (shader_count >= 2) {
+ LLVMBuildBr(ctx.ac.builder, merge_block);
+ LLVMPositionBuilderAtEnd(ctx.ac.builder, merge_block);
+ }
+
+ if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
+ unsigned addclip = shaders[i]->info.clip_distance_array_size +
+ shaders[i]->info.cull_distance_array_size > 4;
+ shader_info->gs.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
+ shader_info->gs.max_gsvs_emit_size = shader_info->gs.gsvs_vertex_size *
+ shaders[i]->info.gs.vertices_out;
+ } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
+ shader_info->tcs.outputs_written = ctx.tess_outputs_written;
+ shader_info->tcs.patch_outputs_written = ctx.tess_patch_outputs_written;
+ } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX && ctx.options->key.vs.as_ls) {
+ shader_info->vs.outputs_written = ctx.tess_outputs_written;
+ }
+ }
LLVMBuildRetVoid(ctx.builder);
ac_llvm_finalize_module(&ctx);
- ac_nir_eliminate_const_vs_outputs(&ctx);
-
- if (nir->stage == MESA_SHADER_GEOMETRY) {
- unsigned addclip = ctx.num_output_clips + ctx.num_output_culls > 4;
- shader_info->gs.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
- shader_info->gs.max_gsvs_emit_size = shader_info->gs.gsvs_vertex_size *
- nir->info.gs.vertices_out;
- } else if (nir->stage == MESA_SHADER_TESS_CTRL) {
- shader_info->tcs.outputs_written = ctx.tess_outputs_written;
- shader_info->tcs.patch_outputs_written = ctx.tess_patch_outputs_written;
- } else if (nir->stage == MESA_SHADER_VERTEX && ctx.options->key.vs.as_ls) {
- shader_info->vs.outputs_written = ctx.tess_outputs_written;
- }
+ if (shader_count == 1)
+ ac_nir_eliminate_const_vs_outputs(&ctx);
return ctx.module;
}
shader_info->num_input_sgprs + 3);
}
+static void
+ac_fill_shader_info(struct ac_shader_variant_info *shader_info, struct nir_shader *nir, const struct ac_nir_compiler_options *options)
+{
+ switch (nir->info.stage) {
+ case MESA_SHADER_COMPUTE:
+ for (int i = 0; i < 3; ++i)
+ shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
+ break;
+ case MESA_SHADER_FRAGMENT:
+ shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
+ break;
+ case MESA_SHADER_GEOMETRY:
+ shader_info->gs.vertices_in = nir->info.gs.vertices_in;
+ shader_info->gs.vertices_out = nir->info.gs.vertices_out;
+ shader_info->gs.output_prim = nir->info.gs.output_primitive;
+ shader_info->gs.invocations = nir->info.gs.invocations;
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ shader_info->tes.primitive_mode = nir->info.tess.primitive_mode;
+ shader_info->tes.spacing = nir->info.tess.spacing;
+ shader_info->tes.ccw = nir->info.tess.ccw;
+ shader_info->tes.point_mode = nir->info.tess.point_mode;
+ shader_info->tes.as_es = options->key.tes.as_es;
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
+ break;
+ case MESA_SHADER_VERTEX:
+ shader_info->vs.as_es = options->key.vs.as_es;
+ shader_info->vs.as_ls = options->key.vs.as_ls;
+ /* in LS mode we need at least 1, invocation id needs 3, handled elsewhere */
+ if (options->key.vs.as_ls)
+ shader_info->vs.vgpr_comp_cnt = MAX2(1, shader_info->vs.vgpr_comp_cnt);
+ break;
+ default:
+ break;
+ }
+}
+
void ac_compile_nir_shader(LLVMTargetMachineRef tm,
struct ac_shader_binary *binary,
struct ac_shader_config *config,
struct ac_shader_variant_info *shader_info,
- struct nir_shader *nir,
+ struct nir_shader *const *nir,
+ int nir_count,
const struct ac_nir_compiler_options *options,
bool dump_shader)
{
- LLVMModuleRef llvm_module = ac_translate_nir_to_llvm(tm, nir, shader_info,
+ LLVMModuleRef llvm_module = ac_translate_nir_to_llvm(tm, nir, nir_count, shader_info,
options);
- ac_compile_llvm_module(tm, llvm_module, binary, config, shader_info, nir->stage, dump_shader, options->supports_spill);
- switch (nir->stage) {
- case MESA_SHADER_COMPUTE:
- for (int i = 0; i < 3; ++i)
- shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
- break;
- case MESA_SHADER_FRAGMENT:
- shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
- break;
- case MESA_SHADER_GEOMETRY:
- shader_info->gs.vertices_in = nir->info.gs.vertices_in;
- shader_info->gs.vertices_out = nir->info.gs.vertices_out;
- shader_info->gs.output_prim = nir->info.gs.output_primitive;
- shader_info->gs.invocations = nir->info.gs.invocations;
- break;
- case MESA_SHADER_TESS_EVAL:
- shader_info->tes.primitive_mode = nir->info.tess.primitive_mode;
- shader_info->tes.spacing = nir->info.tess.spacing;
- shader_info->tes.ccw = nir->info.tess.ccw;
- shader_info->tes.point_mode = nir->info.tess.point_mode;
- shader_info->tes.as_es = options->key.tes.as_es;
- break;
- case MESA_SHADER_TESS_CTRL:
- shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
- break;
- case MESA_SHADER_VERTEX:
- shader_info->vs.as_es = options->key.vs.as_es;
- shader_info->vs.as_ls = options->key.vs.as_ls;
- /* in LS mode we need at least 1, invocation id needs 3, handled elsewhere */
- if (options->key.vs.as_ls)
- shader_info->vs.vgpr_comp_cnt = MAX2(1, shader_info->vs.vgpr_comp_cnt);
- break;
- default:
- break;
- }
+ ac_compile_llvm_module(tm, llvm_module, binary, config, shader_info, nir[0]->info.stage, dump_shader, options->supports_spill);
+ for (int i = 0; i < nir_count; ++i)
+ ac_fill_shader_info(shader_info, nir[i], options);
}
static void
{
LLVMValueRef args[9];
args[0] = ctx->gsvs_ring;
- args[1] = LLVMBuildMul(ctx->builder, ctx->abi.vertex_id, LLVMConstInt(ctx->i32, 4, false), "");
- args[3] = ctx->i32zero;
- args[4] = ctx->i32one; /* OFFEN */
- args[5] = ctx->i32zero; /* IDXEN */
- args[6] = ctx->i32one; /* GLC */
- args[7] = ctx->i32one; /* SLC */
- args[8] = ctx->i32zero; /* TFE */
+ args[1] = LLVMBuildMul(ctx->builder, ctx->abi.vertex_id, LLVMConstInt(ctx->ac.i32, 4, false), "");
+ args[3] = ctx->ac.i32_0;
+ args[4] = ctx->ac.i32_1; /* OFFEN */
+ args[5] = ctx->ac.i32_0; /* IDXEN */
+ args[6] = ctx->ac.i32_1; /* GLC */
+ args[7] = ctx->ac.i32_1; /* SLC */
+ args[8] = ctx->ac.i32_0; /* TFE */
int idx = 0;
for (unsigned j = 0; j < length; j++) {
LLVMValueRef value;
- args[2] = LLVMConstInt(ctx->i32,
+ args[2] = LLVMConstInt(ctx->ac.i32,
(slot * 4 + j) *
ctx->gs_max_out_vertices * 16 * 4, false);
value = ac_build_intrinsic(&ctx->ac,
"llvm.SI.buffer.load.dword.i32.i32",
- ctx->i32, args, 9,
+ ctx->ac.i32, args, 9,
AC_FUNC_ATTR_READONLY |
AC_FUNC_ATTR_LEGACY);
LLVMBuildStore(ctx->builder,
- to_float(&ctx->ac, value), ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)]);
+ ac_to_float(&ctx->ac, value), ctx->nir->outputs[radeon_llvm_reg_index_soa(i, j)]);
}
idx += slot_inc;
}
ctx.options = options;
ctx.shader_info = shader_info;
- ac_llvm_context_init(&ctx.ac, ctx.context);
+ ac_llvm_context_init(&ctx.ac, ctx.context, options->chip_class);
ctx.ac.module = ctx.module;
ctx.is_gs_copy_shader = true;
LLVMSetTarget(ctx.module, "amdgcn--");
- setup_types(&ctx);
ctx.builder = LLVMCreateBuilderInContext(ctx.context);
ctx.ac.builder = ctx.builder;
ctx.stage = MESA_SHADER_VERTEX;
- create_function(&ctx);
+ create_function(&ctx, MESA_SHADER_VERTEX, false, MESA_SHADER_VERTEX);
ctx.gs_max_out_vertices = geom_shader->info.gs.vertices_out;
ac_setup_rings(&ctx);
ctx.nir = &nir_ctx;
nir_foreach_variable(variable, &geom_shader->outputs) {
- scan_shader_output_decl(&ctx, variable);
+ scan_shader_output_decl(&ctx, variable, geom_shader, MESA_SHADER_VERTEX);
handle_shader_output_decl(&nir_ctx, geom_shader, variable);
}