#include "../vulkan/radv_descriptor_set.h"
#include "util/bitscan.h"
#include <llvm-c/Transforms/Scalar.h>
+#include "ac_shader_info.h"
+#include "ac_exp_param.h"
enum radeon_llvm_calling_convention {
RADEON_LLVM_AMDGPU_VS = 87,
struct ac_llvm_context ac;
const struct ac_nir_compiler_options *options;
struct ac_shader_variant_info *shader_info;
-
+ unsigned max_workgroup_size;
LLVMContextRef context;
LLVMModuleRef module;
LLVMBuilderRef builder;
LLVMValueRef rel_auto_id;
LLVMValueRef vs_prim_id;
LLVMValueRef instance_id;
-
+ LLVMValueRef ls_out_layout;
LLVMValueRef es2gs_offset;
+ LLVMValueRef tcs_offchip_layout;
+ LLVMValueRef tcs_out_offsets;
+ LLVMValueRef tcs_out_layout;
+ LLVMValueRef tcs_in_layout;
+ LLVMValueRef oc_lds;
+ LLVMValueRef tess_factor_offset;
+ LLVMValueRef tcs_patch_id;
+ LLVMValueRef tcs_rel_ids;
+ LLVMValueRef tes_rel_patch_id;
+ LLVMValueRef tes_patch_id;
+ LLVMValueRef tes_u;
+ LLVMValueRef tes_v;
+
LLVMValueRef gsvs_ring_stride;
LLVMValueRef gsvs_num_entries;
LLVMValueRef gs2vs_offset;
LLVMValueRef esgs_ring;
LLVMValueRef gsvs_ring;
+ LLVMValueRef hs_ring_tess_offchip;
+ LLVMValueRef hs_ring_tess_factor;
LLVMValueRef prim_mask;
- LLVMValueRef sample_positions;
+ LLVMValueRef sample_pos_offset;
LLVMValueRef persp_sample, persp_center, persp_centroid;
LLVMValueRef linear_sample, linear_center, linear_centroid;
LLVMValueRef front_face;
LLVMTypeRef v16i8;
LLVMTypeRef voidt;
+ LLVMValueRef i1true;
+ LLVMValueRef i1false;
LLVMValueRef i32zero;
LLVMValueRef i32one;
LLVMValueRef f32zero;
int num_locals;
LLVMValueRef *locals;
bool has_ddxy;
- uint8_t num_input_clips;
- uint8_t num_input_culls;
uint8_t num_output_clips;
uint8_t num_output_culls;
bool is_gs_copy_shader;
LLVMValueRef gs_next_vertex;
unsigned gs_max_out_vertices;
-};
-struct ac_tex_info {
- LLVMValueRef args[12];
- int arg_count;
- LLVMTypeRef dst_type;
- bool has_offset;
+ unsigned tes_primitive_mode;
+ uint64_t tess_outputs_written;
+ uint64_t tess_patch_outputs_written;
};
static LLVMValueRef get_sampler_desc(struct nir_to_llvm_context *ctx,
static unsigned shader_io_get_unique_index(gl_varying_slot slot)
{
+ /* handle patch indices separate */
+ if (slot == VARYING_SLOT_TESS_LEVEL_OUTER)
+ return 0;
+ if (slot == VARYING_SLOT_TESS_LEVEL_INNER)
+ return 1;
+ if (slot >= VARYING_SLOT_PATCH0 && slot <= VARYING_SLOT_TESS_MAX)
+ return 2 + (slot - VARYING_SLOT_PATCH0);
+
if (slot == VARYING_SLOT_POS)
return 0;
if (slot == VARYING_SLOT_PSIZ)
return 1;
- if (slot == VARYING_SLOT_CLIP_DIST0 ||
- slot == VARYING_SLOT_CULL_DIST0)
+ if (slot == VARYING_SLOT_CLIP_DIST0)
return 2;
- if (slot == VARYING_SLOT_CLIP_DIST1 ||
- slot == VARYING_SLOT_CULL_DIST1)
- return 3;
+ /* 3 is reserved for clip dist as well */
if (slot >= VARYING_SLOT_VAR0 && slot <= VARYING_SLOT_VAR31)
return 4 + (slot - VARYING_SLOT_VAR0);
unreachable("illegal slot in get unique index\n");
LLVMBuilderRef builder, LLVMTypeRef *return_types,
unsigned num_return_elems, LLVMTypeRef *param_types,
unsigned param_count, unsigned array_params_mask,
- unsigned sgpr_params, bool unsafe_math)
+ unsigned sgpr_params, unsigned max_workgroup_size,
+ bool unsafe_math)
{
LLVMTypeRef main_function_type, ret_type;
LLVMBasicBlockRef main_function_body;
for (unsigned i = 0; i < sgpr_params; ++i) {
if (array_params_mask & (1 << i)) {
LLVMValueRef P = LLVMGetParam(main_function, i);
- ac_add_function_attr(main_function, i + 1, AC_FUNC_ATTR_BYVAL);
+ ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_BYVAL);
ac_add_attr_dereferenceable(P, UINT64_MAX);
}
else {
- ac_add_function_attr(main_function, i + 1, AC_FUNC_ATTR_INREG);
+ ac_add_function_attr(ctx, main_function, i + 1, AC_FUNC_ATTR_INREG);
}
}
+ if (max_workgroup_size) {
+ ac_llvm_add_target_dep_function_attr(main_function,
+ "amdgpu-max-work-group-size",
+ max_workgroup_size);
+ }
if (unsafe_math) {
/* These were copied from some LLVM test. */
LLVMAddTargetDependentFunctionAttr(main_function,
LLVMValueRef ptr;
int addr_space;
- offset = LLVMConstInt(ctx->i32, idx, false);
+ offset = LLVMConstInt(ctx->i32, idx * 16, false);
ptr = ctx->shared_memory;
ptr = LLVMBuildGEP(ctx->builder, ptr, &offset, 1, "");
return value;
}
+static LLVMValueRef get_rel_patch_id(struct nir_to_llvm_context *ctx)
+{
+ switch (ctx->stage) {
+ case MESA_SHADER_TESS_CTRL:
+ return unpack_param(ctx, ctx->tcs_rel_ids, 0, 8);
+ case MESA_SHADER_TESS_EVAL:
+ return ctx->tes_rel_patch_id;
+ break;
+ default:
+ unreachable("Illegal stage");
+ }
+}
+
+/* Tessellation shaders pass outputs to the next shader using LDS.
+ *
+ * LS outputs = TCS inputs
+ * TCS outputs = TES inputs
+ *
+ * The LDS layout is:
+ * - TCS inputs for patch 0
+ * - TCS inputs for patch 1
+ * - TCS inputs for patch 2 = get_tcs_in_current_patch_offset (if RelPatchID==2)
+ * - ...
+ * - TCS outputs for patch 0 = get_tcs_out_patch0_offset
+ * - Per-patch TCS outputs for patch 0 = get_tcs_out_patch0_patch_data_offset
+ * - TCS outputs for patch 1
+ * - Per-patch TCS outputs for patch 1
+ * - TCS outputs for patch 2 = get_tcs_out_current_patch_offset (if RelPatchID==2)
+ * - Per-patch TCS outputs for patch 2 = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
+ * - ...
+ *
+ * All three shaders VS(LS), TCS, TES share the same LDS space.
+ */
+static LLVMValueRef
+get_tcs_in_patch_stride(struct nir_to_llvm_context *ctx)
+{
+ if (ctx->stage == MESA_SHADER_VERTEX)
+ return unpack_param(ctx, ctx->ls_out_layout, 0, 13);
+ else if (ctx->stage == MESA_SHADER_TESS_CTRL)
+ return unpack_param(ctx, ctx->tcs_in_layout, 0, 13);
+ else {
+ assert(0);
+ return NULL;
+ }
+}
+
+static LLVMValueRef
+get_tcs_out_patch_stride(struct nir_to_llvm_context *ctx)
+{
+ return unpack_param(ctx, ctx->tcs_out_layout, 0, 13);
+}
+
+static LLVMValueRef
+get_tcs_out_patch0_offset(struct nir_to_llvm_context *ctx)
+{
+ return LLVMBuildMul(ctx->builder,
+ unpack_param(ctx, ctx->tcs_out_offsets, 0, 16),
+ LLVMConstInt(ctx->i32, 4, false), "");
+}
+
+static LLVMValueRef
+get_tcs_out_patch0_patch_data_offset(struct nir_to_llvm_context *ctx)
+{
+ return LLVMBuildMul(ctx->builder,
+ unpack_param(ctx, ctx->tcs_out_offsets, 16, 16),
+ LLVMConstInt(ctx->i32, 4, false), "");
+}
+
+static LLVMValueRef
+get_tcs_in_current_patch_offset(struct nir_to_llvm_context *ctx)
+{
+ LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
+ LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
+
+ return LLVMBuildMul(ctx->builder, patch_stride, rel_patch_id, "");
+}
+
+static LLVMValueRef
+get_tcs_out_current_patch_offset(struct nir_to_llvm_context *ctx)
+{
+ LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
+ LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
+ LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
+
+ return LLVMBuildAdd(ctx->builder, patch0_offset,
+ LLVMBuildMul(ctx->builder, patch_stride,
+ rel_patch_id, ""),
+ "");
+}
+
+static LLVMValueRef
+get_tcs_out_current_patch_data_offset(struct nir_to_llvm_context *ctx)
+{
+ LLVMValueRef patch0_patch_data_offset =
+ get_tcs_out_patch0_patch_data_offset(ctx);
+ LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
+ LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
+
+ return LLVMBuildAdd(ctx->builder, patch0_patch_data_offset,
+ LLVMBuildMul(ctx->builder, patch_stride,
+ rel_patch_id, ""),
+ "");
+}
+
static void set_userdata_location(struct ac_userdata_info *ud_info, uint8_t sgpr_idx, uint8_t num_sgprs)
{
ud_info->sgpr_idx = sgpr_idx;
set_userdata_location(&ctx->shader_info->user_sgprs_locs.shader_data[idx], sgpr_idx, num_sgprs);
}
-#if 0
+
static void set_userdata_location_indirect(struct ac_userdata_info *ud_info, uint8_t sgpr_idx, uint8_t num_sgprs,
uint32_t indirect_offset)
{
ud_info->indirect = true;
ud_info->indirect_offset = indirect_offset;
}
-#endif
+
+static void declare_tess_lds(struct nir_to_llvm_context *ctx)
+{
+ unsigned lds_size = ctx->options->chip_class >= CIK ? 65536 : 32768;
+ ctx->lds = LLVMBuildIntToPtr(ctx->builder, ctx->i32zero,
+ LLVMPointerType(LLVMArrayType(ctx->i32, lds_size / 4), LOCAL_ADDR_SPACE),
+ "tess_lds");
+}
+
+struct user_sgpr_info {
+ bool need_ring_offsets;
+ uint8_t sgpr_count;
+ bool indirect_all_descriptor_sets;
+};
+
+static void allocate_user_sgprs(struct nir_to_llvm_context *ctx,
+ struct user_sgpr_info *user_sgpr_info)
+{
+ memset(user_sgpr_info, 0, sizeof(struct user_sgpr_info));
+
+ /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
+ if (ctx->stage == MESA_SHADER_GEOMETRY ||
+ ctx->stage == MESA_SHADER_VERTEX ||
+ ctx->stage == MESA_SHADER_TESS_CTRL ||
+ ctx->stage == MESA_SHADER_TESS_EVAL ||
+ ctx->is_gs_copy_shader)
+ user_sgpr_info->need_ring_offsets = true;
+
+ if (ctx->stage == MESA_SHADER_FRAGMENT &&
+ ctx->shader_info->info.ps.needs_sample_positions)
+ user_sgpr_info->need_ring_offsets = true;
+
+ /* 2 user sgprs will nearly always be allocated for scratch/rings */
+ if (ctx->options->supports_spill || user_sgpr_info->need_ring_offsets) {
+ user_sgpr_info->sgpr_count += 2;
+ }
+
+ switch (ctx->stage) {
+ case MESA_SHADER_COMPUTE:
+ user_sgpr_info->sgpr_count += ctx->shader_info->info.cs.grid_components_used;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ user_sgpr_info->sgpr_count += ctx->shader_info->info.ps.needs_sample_positions;
+ break;
+ case MESA_SHADER_VERTEX:
+ if (!ctx->is_gs_copy_shader) {
+ user_sgpr_info->sgpr_count += ctx->shader_info->info.vs.has_vertex_buffers ? 2 : 0;
+ if (ctx->shader_info->info.vs.needs_draw_id) {
+ user_sgpr_info->sgpr_count += 3;
+ } else {
+ user_sgpr_info->sgpr_count += 2;
+ }
+ }
+ if (ctx->options->key.vs.as_ls)
+ user_sgpr_info->sgpr_count++;
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ user_sgpr_info->sgpr_count += 4;
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ user_sgpr_info->sgpr_count += 1;
+ break;
+ case MESA_SHADER_GEOMETRY:
+ user_sgpr_info->sgpr_count += 2;
+ break;
+ default:
+ break;
+ }
+
+ if (ctx->shader_info->info.needs_push_constants)
+ user_sgpr_info->sgpr_count += 2;
+
+ uint32_t remaining_sgprs = 16 - user_sgpr_info->sgpr_count;
+ if (remaining_sgprs / 2 < util_bitcount(ctx->shader_info->info.desc_set_used_mask)) {
+ user_sgpr_info->sgpr_count += 2;
+ user_sgpr_info->indirect_all_descriptor_sets = true;
+ } else {
+ user_sgpr_info->sgpr_count += util_bitcount(ctx->shader_info->info.desc_set_used_mask) * 2;
+ }
+}
static void create_function(struct nir_to_llvm_context *ctx)
{
unsigned i;
unsigned num_sets = ctx->options->layout ? ctx->options->layout->num_sets : 0;
unsigned user_sgpr_idx;
- bool need_push_constants;
- bool need_ring_offsets = false;
+ struct user_sgpr_info user_sgpr_info;
- /* until we sort out scratch/global buffers always assign ring offsets for gs/vs/es */
- if (ctx->stage == MESA_SHADER_GEOMETRY ||
- ctx->stage == MESA_SHADER_VERTEX ||
- ctx->is_gs_copy_shader)
- need_ring_offsets = true;
-
- need_push_constants = true;
- if (!ctx->options->layout)
- need_push_constants = false;
- else if (!ctx->options->layout->push_constant_size &&
- !ctx->options->layout->dynamic_offset_count)
- need_push_constants = false;
-
- if (need_ring_offsets && !ctx->options->supports_spill) {
- arg_types[arg_idx++] = const_array(ctx->v16i8, 8); /* address of rings */
+ allocate_user_sgprs(ctx, &user_sgpr_info);
+ if (user_sgpr_info.need_ring_offsets && !ctx->options->supports_spill) {
+ arg_types[arg_idx++] = const_array(ctx->v16i8, 16); /* address of rings */
}
/* 1 for each descriptor set */
- for (unsigned i = 0; i < num_sets; ++i) {
- if (ctx->options->layout->set[i].layout->shader_stages & (1 << ctx->stage)) {
- array_params_mask |= (1 << arg_idx);
- arg_types[arg_idx++] = const_array(ctx->i8, 1024 * 1024);
+ if (!user_sgpr_info.indirect_all_descriptor_sets) {
+ for (unsigned i = 0; i < num_sets; ++i) {
+ if (ctx->options->layout->set[i].layout->shader_stages & (1 << ctx->stage)) {
+ array_params_mask |= (1 << arg_idx);
+ arg_types[arg_idx++] = const_array(ctx->i8, 1024 * 1024);
+ }
}
+ } else {
+ array_params_mask |= (1 << arg_idx);
+ arg_types[arg_idx++] = const_array(const_array(ctx->i8, 1024 * 1024), 32);
}
- if (need_push_constants) {
+ if (ctx->shader_info->info.needs_push_constants) {
/* 1 for push constants and dynamic descriptors */
array_params_mask |= (1 << arg_idx);
arg_types[arg_idx++] = const_array(ctx->i8, 1024 * 1024);
switch (ctx->stage) {
case MESA_SHADER_COMPUTE:
- arg_types[arg_idx++] = LLVMVectorType(ctx->i32, 3); /* grid size */
+ if (ctx->shader_info->info.cs.grid_components_used)
+ arg_types[arg_idx++] = LLVMVectorType(ctx->i32, ctx->shader_info->info.cs.grid_components_used); /* grid size */
user_sgpr_count = arg_idx;
arg_types[arg_idx++] = LLVMVectorType(ctx->i32, 3);
arg_types[arg_idx++] = ctx->i32;
break;
case MESA_SHADER_VERTEX:
if (!ctx->is_gs_copy_shader) {
- arg_types[arg_idx++] = const_array(ctx->v16i8, 16); /* vertex buffers */
+ if (ctx->shader_info->info.vs.has_vertex_buffers)
+ arg_types[arg_idx++] = const_array(ctx->v16i8, 16); /* vertex buffers */
arg_types[arg_idx++] = ctx->i32; // base vertex
arg_types[arg_idx++] = ctx->i32; // start instance
- arg_types[arg_idx++] = ctx->i32; // draw index
+ if (ctx->shader_info->info.vs.needs_draw_id)
+ arg_types[arg_idx++] = ctx->i32; // draw index
}
user_sgpr_count = arg_idx;
if (ctx->options->key.vs.as_es)
arg_types[arg_idx++] = ctx->i32; //es2gs offset
+ else if (ctx->options->key.vs.as_ls) {
+ arg_types[arg_idx++] = ctx->i32; //ls out layout
+ user_sgpr_count++;
+ }
sgpr_count = arg_idx;
arg_types[arg_idx++] = ctx->i32; // vertex id
if (!ctx->is_gs_copy_shader) {
arg_types[arg_idx++] = ctx->i32; // instance id
}
break;
+ case MESA_SHADER_TESS_CTRL:
+ arg_types[arg_idx++] = ctx->i32; // tcs offchip layout
+ arg_types[arg_idx++] = ctx->i32; // tcs out offsets
+ arg_types[arg_idx++] = ctx->i32; // tcs out layout
+ arg_types[arg_idx++] = ctx->i32; // tcs in layout
+ user_sgpr_count = arg_idx;
+ arg_types[arg_idx++] = ctx->i32; // param oc lds
+ arg_types[arg_idx++] = ctx->i32; // tess factor offset
+ sgpr_count = arg_idx;
+ arg_types[arg_idx++] = ctx->i32; // patch id
+ arg_types[arg_idx++] = ctx->i32; // rel ids;
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ arg_types[arg_idx++] = ctx->i32; // tcs offchip layout
+ user_sgpr_count = arg_idx;
+ if (ctx->options->key.tes.as_es) {
+ arg_types[arg_idx++] = ctx->i32; // OC LDS
+ arg_types[arg_idx++] = ctx->i32; //
+ arg_types[arg_idx++] = ctx->i32; // es2gs offset
+ } else {
+ arg_types[arg_idx++] = ctx->i32; //
+ arg_types[arg_idx++] = ctx->i32; // OC LDS
+ }
+ sgpr_count = arg_idx;
+ arg_types[arg_idx++] = ctx->f32; // tes_u
+ arg_types[arg_idx++] = ctx->f32; // tes_v
+ arg_types[arg_idx++] = ctx->i32; // tes rel patch id
+ arg_types[arg_idx++] = ctx->i32; // tes patch id
+ break;
case MESA_SHADER_GEOMETRY:
arg_types[arg_idx++] = ctx->i32; // gsvs stride
arg_types[arg_idx++] = ctx->i32; // gsvs num entires
arg_types[arg_idx++] = ctx->i32; // GS instance id
break;
case MESA_SHADER_FRAGMENT:
- arg_types[arg_idx++] = const_array(ctx->f32, 32); /* sample positions */
+ if (ctx->shader_info->info.ps.needs_sample_positions)
+ arg_types[arg_idx++] = ctx->i32; /* sample position offset */
user_sgpr_count = arg_idx;
arg_types[arg_idx++] = ctx->i32; /* prim mask */
sgpr_count = arg_idx;
ctx->main_function = create_llvm_function(
ctx->context, ctx->module, ctx->builder, NULL, 0, arg_types,
- arg_idx, array_params_mask, sgpr_count, ctx->options->unsafe_math);
+ arg_idx, array_params_mask, sgpr_count, ctx->max_workgroup_size,
+ ctx->options->unsafe_math);
set_llvm_calling_convention(ctx->main_function, ctx->stage);
ctx->shader_info->num_input_sgprs = 0;
arg_idx = 0;
user_sgpr_idx = 0;
- if (ctx->options->supports_spill || need_ring_offsets) {
+ if (ctx->options->supports_spill || user_sgpr_info.need_ring_offsets) {
set_userdata_location_shader(ctx, AC_UD_SCRATCH_RING_OFFSETS, user_sgpr_idx, 2);
user_sgpr_idx += 2;
if (ctx->options->supports_spill) {
- ctx->ring_offsets = ac_emit_llvm_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
- LLVMPointerType(ctx->i8, CONST_ADDR_SPACE),
- NULL, 0, AC_FUNC_ATTR_READNONE);
+ ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
+ LLVMPointerType(ctx->i8, CONST_ADDR_SPACE),
+ NULL, 0, AC_FUNC_ATTR_READNONE);
ctx->ring_offsets = LLVMBuildBitCast(ctx->builder, ctx->ring_offsets,
- const_array(ctx->v16i8, 8), "");
+ const_array(ctx->v16i8, 16), "");
} else
ctx->ring_offsets = LLVMGetParam(ctx->main_function, arg_idx++);
}
- for (unsigned i = 0; i < num_sets; ++i) {
- if (ctx->options->layout->set[i].layout->shader_stages & (1 << ctx->stage)) {
- set_userdata_location(&ctx->shader_info->user_sgprs_locs.descriptor_sets[i], user_sgpr_idx, 2);
- user_sgpr_idx += 2;
- ctx->descriptor_sets[i] =
- LLVMGetParam(ctx->main_function, arg_idx++);
- } else
- ctx->descriptor_sets[i] = NULL;
+ if (!user_sgpr_info.indirect_all_descriptor_sets) {
+ for (unsigned i = 0; i < num_sets; ++i) {
+ if (ctx->options->layout->set[i].layout->shader_stages & (1 << ctx->stage)) {
+ set_userdata_location(&ctx->shader_info->user_sgprs_locs.descriptor_sets[i], user_sgpr_idx, 2);
+ user_sgpr_idx += 2;
+ ctx->descriptor_sets[i] =
+ LLVMGetParam(ctx->main_function, arg_idx++);
+ } else
+ ctx->descriptor_sets[i] = NULL;
+ }
+ } else {
+ uint32_t desc_sgpr_idx = user_sgpr_idx;
+ LLVMValueRef desc_sets = LLVMGetParam(ctx->main_function, arg_idx++);
+ set_userdata_location_shader(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS, user_sgpr_idx, 2);
+ user_sgpr_idx += 2;
+
+ for (unsigned i = 0; i < num_sets; ++i) {
+ if (ctx->options->layout->set[i].layout->shader_stages & (1 << ctx->stage)) {
+ set_userdata_location_indirect(&ctx->shader_info->user_sgprs_locs.descriptor_sets[i], desc_sgpr_idx, 2, i * 8);
+ ctx->descriptor_sets[i] = ac_build_indexed_load_const(&ctx->ac, desc_sets, LLVMConstInt(ctx->i32, i, false));
+
+ } else
+ ctx->descriptor_sets[i] = NULL;
+ }
+ ctx->shader_info->need_indirect_descriptor_sets = true;
}
- if (need_push_constants) {
+ if (ctx->shader_info->info.needs_push_constants) {
ctx->push_constants = LLVMGetParam(ctx->main_function, arg_idx++);
set_userdata_location_shader(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx, 2);
user_sgpr_idx += 2;
switch (ctx->stage) {
case MESA_SHADER_COMPUTE:
- set_userdata_location_shader(ctx, AC_UD_CS_GRID_SIZE, user_sgpr_idx, 3);
- user_sgpr_idx += 3;
- ctx->num_work_groups =
- LLVMGetParam(ctx->main_function, arg_idx++);
+ if (ctx->shader_info->info.cs.grid_components_used) {
+ set_userdata_location_shader(ctx, AC_UD_CS_GRID_SIZE, user_sgpr_idx, ctx->shader_info->info.cs.grid_components_used);
+ user_sgpr_idx += ctx->shader_info->info.cs.grid_components_used;
+ ctx->num_work_groups =
+ LLVMGetParam(ctx->main_function, arg_idx++);
+ }
ctx->workgroup_ids =
LLVMGetParam(ctx->main_function, arg_idx++);
ctx->tg_size =
break;
case MESA_SHADER_VERTEX:
if (!ctx->is_gs_copy_shader) {
- set_userdata_location_shader(ctx, AC_UD_VS_VERTEX_BUFFERS, user_sgpr_idx, 2);
- user_sgpr_idx += 2;
- ctx->vertex_buffers = LLVMGetParam(ctx->main_function, arg_idx++);
- set_userdata_location_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE, user_sgpr_idx, 3);
- user_sgpr_idx += 3;
+ if (ctx->shader_info->info.vs.has_vertex_buffers) {
+ set_userdata_location_shader(ctx, AC_UD_VS_VERTEX_BUFFERS, user_sgpr_idx, 2);
+ user_sgpr_idx += 2;
+ ctx->vertex_buffers = LLVMGetParam(ctx->main_function, arg_idx++);
+ }
+ unsigned vs_num = 2;
+ if (ctx->shader_info->info.vs.needs_draw_id)
+ vs_num++;
+
+ set_userdata_location_shader(ctx, AC_UD_VS_BASE_VERTEX_START_INSTANCE, user_sgpr_idx, vs_num);
+ user_sgpr_idx += vs_num;
+
ctx->base_vertex = LLVMGetParam(ctx->main_function, arg_idx++);
ctx->start_instance = LLVMGetParam(ctx->main_function, arg_idx++);
- ctx->draw_index = LLVMGetParam(ctx->main_function, arg_idx++);
+ if (ctx->shader_info->info.vs.needs_draw_id)
+ ctx->draw_index = LLVMGetParam(ctx->main_function, arg_idx++);
}
if (ctx->options->key.vs.as_es)
ctx->es2gs_offset = LLVMGetParam(ctx->main_function, arg_idx++);
+ else if (ctx->options->key.vs.as_ls) {
+ set_userdata_location_shader(ctx, AC_UD_VS_LS_TCS_IN_LAYOUT, user_sgpr_idx, 1);
+ user_sgpr_idx += 1;
+ ctx->ls_out_layout = LLVMGetParam(ctx->main_function, arg_idx++);
+ }
ctx->vertex_id = LLVMGetParam(ctx->main_function, arg_idx++);
if (!ctx->is_gs_copy_shader) {
ctx->rel_auto_id = LLVMGetParam(ctx->main_function, arg_idx++);
ctx->vs_prim_id = LLVMGetParam(ctx->main_function, arg_idx++);
ctx->instance_id = LLVMGetParam(ctx->main_function, arg_idx++);
}
+ if (ctx->options->key.vs.as_ls)
+ declare_tess_lds(ctx);
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ set_userdata_location_shader(ctx, AC_UD_TCS_OFFCHIP_LAYOUT, user_sgpr_idx, 4);
+ user_sgpr_idx += 4;
+ ctx->tcs_offchip_layout = LLVMGetParam(ctx->main_function, arg_idx++);
+ ctx->tcs_out_offsets = LLVMGetParam(ctx->main_function, arg_idx++);
+ ctx->tcs_out_layout = LLVMGetParam(ctx->main_function, arg_idx++);
+ ctx->tcs_in_layout = LLVMGetParam(ctx->main_function, arg_idx++);
+ ctx->oc_lds = LLVMGetParam(ctx->main_function, arg_idx++);
+ ctx->tess_factor_offset = LLVMGetParam(ctx->main_function, arg_idx++);
+ ctx->tcs_patch_id = LLVMGetParam(ctx->main_function, arg_idx++);
+ ctx->tcs_rel_ids = LLVMGetParam(ctx->main_function, arg_idx++);
+
+ declare_tess_lds(ctx);
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ set_userdata_location_shader(ctx, AC_UD_TES_OFFCHIP_LAYOUT, user_sgpr_idx, 1);
+ user_sgpr_idx += 1;
+ ctx->tcs_offchip_layout = LLVMGetParam(ctx->main_function, arg_idx++);
+ if (ctx->options->key.tes.as_es) {
+ ctx->oc_lds = LLVMGetParam(ctx->main_function, arg_idx++);
+ arg_idx++;
+ ctx->es2gs_offset = LLVMGetParam(ctx->main_function, arg_idx++);
+ } else {
+ arg_idx++;
+ ctx->oc_lds = LLVMGetParam(ctx->main_function, arg_idx++);
+ }
+ ctx->tes_u = LLVMGetParam(ctx->main_function, arg_idx++);
+ ctx->tes_v = LLVMGetParam(ctx->main_function, arg_idx++);
+ ctx->tes_rel_patch_id = LLVMGetParam(ctx->main_function, arg_idx++);
+ ctx->tes_patch_id = LLVMGetParam(ctx->main_function, arg_idx++);
break;
case MESA_SHADER_GEOMETRY:
set_userdata_location_shader(ctx, AC_UD_GS_VS_RING_STRIDE_ENTRIES, user_sgpr_idx, 2);
ctx->gs_invocation_id = LLVMGetParam(ctx->main_function, arg_idx++);
break;
case MESA_SHADER_FRAGMENT:
- set_userdata_location_shader(ctx, AC_UD_PS_SAMPLE_POS, user_sgpr_idx, 2);
- user_sgpr_idx += 2;
- ctx->sample_positions = LLVMGetParam(ctx->main_function, arg_idx++);
+ if (ctx->shader_info->info.ps.needs_sample_positions) {
+ set_userdata_location_shader(ctx, AC_UD_PS_SAMPLE_POS_OFFSET, user_sgpr_idx, 1);
+ user_sgpr_idx += 1;
+ ctx->sample_pos_offset = LLVMGetParam(ctx->main_function, arg_idx++);
+ }
ctx->prim_mask = LLVMGetParam(ctx->main_function, arg_idx++);
ctx->persp_sample = LLVMGetParam(ctx->main_function, arg_idx++);
ctx->persp_center = LLVMGetParam(ctx->main_function, arg_idx++);
ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
ctx->v16i8 = LLVMVectorType(ctx->i8, 16);
+ ctx->i1false = LLVMConstInt(ctx->i1, 0, false);
+ ctx->i1true = LLVMConstInt(ctx->i1, 1, false);
ctx->i32zero = LLVMConstInt(ctx->i32, 0, false);
ctx->i32one = LLVMConstInt(ctx->i32, 1, false);
ctx->f32zero = LLVMConstReal(ctx->f32, 0.0);
};
sprintf(name, "%s.f%d", intrin, get_elem_bits(ctx, result_type));
- return ac_emit_llvm_intrinsic(&ctx->ac, name, result_type, params, 1, AC_FUNC_ATTR_READNONE);
+ return ac_build_intrinsic(&ctx->ac, name, result_type, params, 1, AC_FUNC_ATTR_READNONE);
}
static LLVMValueRef emit_intrin_2f_param(struct nir_to_llvm_context *ctx,
};
sprintf(name, "%s.f%d", intrin, get_elem_bits(ctx, result_type));
- return ac_emit_llvm_intrinsic(&ctx->ac, name, result_type, params, 2, AC_FUNC_ATTR_READNONE);
+ return ac_build_intrinsic(&ctx->ac, name, result_type, params, 2, AC_FUNC_ATTR_READNONE);
}
static LLVMValueRef emit_intrin_3f_param(struct nir_to_llvm_context *ctx,
};
sprintf(name, "%s.f%d", intrin, get_elem_bits(ctx, result_type));
- return ac_emit_llvm_intrinsic(&ctx->ac, name, result_type, params, 3, AC_FUNC_ATTR_READNONE);
+ return ac_build_intrinsic(&ctx->ac, name, result_type, params, 3, AC_FUNC_ATTR_READNONE);
}
static LLVMValueRef emit_bcsel(struct nir_to_llvm_context *ctx,
*/
LLVMConstInt(ctx->i32, 1, false),
};
- return ac_emit_llvm_intrinsic(&ctx->ac, "llvm.cttz.i32", ctx->i32, params, 2, AC_FUNC_ATTR_READNONE);
+ return ac_build_intrinsic(&ctx->ac, "llvm.cttz.i32", ctx->i32, params, 2, AC_FUNC_ATTR_READNONE);
}
static LLVMValueRef emit_ifind_msb(struct nir_to_llvm_context *ctx,
LLVMValueRef src0)
{
- return ac_emit_imsb(&ctx->ac, src0, ctx->i32);
+ return ac_build_imsb(&ctx->ac, src0, ctx->i32);
}
static LLVMValueRef emit_ufind_msb(struct nir_to_llvm_context *ctx,
LLVMValueRef src0)
{
- return ac_emit_umsb(&ctx->ac, src0, ctx->i32);
+ return ac_build_umsb(&ctx->ac, src0, ctx->i32);
}
static LLVMValueRef emit_minmax_int(struct nir_to_llvm_context *ctx,
LLVMValueRef params[] = {
fsrc0,
};
- LLVMValueRef floor = ac_emit_llvm_intrinsic(&ctx->ac, intr,
- ctx->f32, params, 1,
- AC_FUNC_ATTR_READNONE);
+ LLVMValueRef floor = ac_build_intrinsic(&ctx->ac, intr,
+ ctx->f32, params, 1,
+ AC_FUNC_ATTR_READNONE);
return LLVMBuildFSub(ctx->builder, fsrc0, floor, "");
}
ret_type = LLVMStructTypeInContext(ctx->context, types,
2, true);
- res = ac_emit_llvm_intrinsic(&ctx->ac, intrin, ret_type,
- params, 2, AC_FUNC_ATTR_READNONE);
+ res = ac_build_intrinsic(&ctx->ac, intrin, ret_type,
+ params, 2, AC_FUNC_ATTR_READNONE);
res = LLVMBuildExtractValue(ctx->builder, res, 1, "");
res = LLVMBuildZExt(ctx->builder, res, ctx->i32, "");
return LLVMBuildAnd(ctx->builder, src0, LLVMBuildBitCast(ctx->builder, LLVMConstReal(ctx->f32, 1.0), ctx->i32, ""), "");
}
+static LLVMValueRef emit_f2f16(struct nir_to_llvm_context *ctx,
+ LLVMValueRef src0)
+{
+ LLVMValueRef result;
+ LLVMValueRef cond;
+
+ src0 = to_float(ctx, src0);
+ result = LLVMBuildFPTrunc(ctx->builder, src0, ctx->f16, "");
+
+ /* TODO SI/CIK options here */
+ if (ctx->options->chip_class >= VI) {
+ LLVMValueRef args[2];
+ /* Check if the result is a denormal - and flush to 0 if so. */
+ args[0] = result;
+ args[1] = LLVMConstInt(ctx->i32, N_SUBNORMAL | P_SUBNORMAL, false);
+ cond = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.class.f16", ctx->i1, args, 2, AC_FUNC_ATTR_READNONE);
+ }
+
+ /* need to convert back up to f32 */
+ result = LLVMBuildFPExt(ctx->builder, result, ctx->f32, "");
+
+ if (ctx->options->chip_class >= VI)
+ result = LLVMBuildSelect(ctx->builder, cond, ctx->f32zero, result, "");
+
+ return result;
+}
+
static LLVMValueRef emit_umul_high(struct nir_to_llvm_context *ctx,
LLVMValueRef src0, LLVMValueRef src1)
{
}
static LLVMValueRef emit_bitfield_extract(struct nir_to_llvm_context *ctx,
- const char *intrin,
+ bool is_signed,
LLVMValueRef srcs[3])
{
LLVMValueRef result;
LLVMValueRef icond = LLVMBuildICmp(ctx->builder, LLVMIntEQ, srcs[2], LLVMConstInt(ctx->i32, 32, false), "");
- result = ac_emit_llvm_intrinsic(&ctx->ac, intrin, ctx->i32, srcs, 3, AC_FUNC_ATTR_READNONE);
+ result = ac_build_bfe(&ctx->ac, srcs[0], srcs[1], srcs[2], is_signed);
result = LLVMBuildSelect(ctx->builder, icond, srcs[0], result, "");
return result;
}
else
idx = 2;
- result = ac_emit_ddxy(&ctx->ac, ctx->has_ds_bpermute,
+ result = ac_build_ddxy(&ctx->ac, ctx->has_ds_bpermute,
mask, idx, ctx->lds,
src0);
return result;
case nir_op_fmod:
src[0] = to_float(ctx, src[0]);
src[1] = to_float(ctx, src[1]);
- result = ac_emit_fdiv(&ctx->ac, src[0], src[1]);
+ result = ac_build_fdiv(&ctx->ac, src[0], src[1]);
result = emit_intrin_1f_param(ctx, "llvm.floor",
to_float_type(ctx, def_type), result);
result = LLVMBuildFMul(ctx->builder, src[1] , result, "");
case nir_op_fdiv:
src[0] = to_float(ctx, src[0]);
src[1] = to_float(ctx, src[1]);
- result = ac_emit_fdiv(&ctx->ac, src[0], src[1]);
+ result = ac_build_fdiv(&ctx->ac, src[0], src[1]);
break;
case nir_op_frcp:
src[0] = to_float(ctx, src[0]);
- result = ac_emit_fdiv(&ctx->ac, ctx->f32one, src[0]);
+ result = ac_build_fdiv(&ctx->ac, ctx->f32one, src[0]);
break;
case nir_op_iand:
result = LLVMBuildAnd(ctx->builder, src[0], src[1], "");
case nir_op_frsq:
result = emit_intrin_1f_param(ctx, "llvm.sqrt",
to_float_type(ctx, def_type), src[0]);
- result = ac_emit_fdiv(&ctx->ac, ctx->f32one, result);
+ result = ac_build_fdiv(&ctx->ac, ctx->f32one, result);
break;
case nir_op_fpow:
result = emit_intrin_2f_param(ctx, "llvm.pow",
case nir_op_fmax:
result = emit_intrin_2f_param(ctx, "llvm.maxnum",
to_float_type(ctx, def_type), src[0], src[1]);
+ if (instr->dest.dest.ssa.bit_size == 32)
+ result = emit_intrin_1f_param(ctx, "llvm.canonicalize",
+ to_float_type(ctx, def_type),
+ result);
break;
case nir_op_fmin:
result = emit_intrin_2f_param(ctx, "llvm.minnum",
to_float_type(ctx, def_type), src[0], src[1]);
+ if (instr->dest.dest.ssa.bit_size == 32)
+ result = emit_intrin_1f_param(ctx, "llvm.canonicalize",
+ to_float_type(ctx, def_type),
+ result);
break;
case nir_op_ffma:
result = emit_intrin_3f_param(ctx, "llvm.fma",
to_float_type(ctx, def_type), src[0], src[1], src[2]);
break;
case nir_op_ibitfield_extract:
- result = emit_bitfield_extract(ctx, "llvm.AMDGPU.bfe.i32", src);
+ result = emit_bitfield_extract(ctx, true, src);
break;
case nir_op_ubitfield_extract:
- result = emit_bitfield_extract(ctx, "llvm.AMDGPU.bfe.u32", src);
+ result = emit_bitfield_extract(ctx, false, src);
break;
case nir_op_bitfield_insert:
result = emit_bitfield_insert(ctx, src[0], src[1], src[2], src[3]);
break;
case nir_op_bitfield_reverse:
- result = ac_emit_llvm_intrinsic(&ctx->ac, "llvm.bitreverse.i32", ctx->i32, src, 1, AC_FUNC_ATTR_READNONE);
+ result = ac_build_intrinsic(&ctx->ac, "llvm.bitreverse.i32", ctx->i32, src, 1, AC_FUNC_ATTR_READNONE);
break;
case nir_op_bit_count:
- result = ac_emit_llvm_intrinsic(&ctx->ac, "llvm.ctpop.i32", ctx->i32, src, 1, AC_FUNC_ATTR_READNONE);
+ result = ac_build_intrinsic(&ctx->ac, "llvm.ctpop.i32", ctx->i32, src, 1, AC_FUNC_ATTR_READNONE);
break;
case nir_op_vec2:
case nir_op_vec3:
src[i] = to_integer(ctx, src[i]);
result = ac_build_gather_values(&ctx->ac, src, num_components);
break;
- case nir_op_d2i:
- case nir_op_f2i:
+ case nir_op_f2i32:
+ case nir_op_f2i64:
src[0] = to_float(ctx, src[0]);
result = LLVMBuildFPToSI(ctx->builder, src[0], def_type, "");
break;
- case nir_op_d2u:
- case nir_op_f2u:
+ case nir_op_f2u32:
+ case nir_op_f2u64:
src[0] = to_float(ctx, src[0]);
result = LLVMBuildFPToUI(ctx->builder, src[0], def_type, "");
break;
- case nir_op_i2d:
- case nir_op_i2f:
+ case nir_op_i2f32:
+ case nir_op_i2f64:
result = LLVMBuildSIToFP(ctx->builder, src[0], to_float_type(ctx, def_type), "");
break;
- case nir_op_u2d:
- case nir_op_u2f:
+ case nir_op_u2f32:
+ case nir_op_u2f64:
result = LLVMBuildUIToFP(ctx->builder, src[0], to_float_type(ctx, def_type), "");
break;
- case nir_op_f2d:
+ case nir_op_f2f64:
result = LLVMBuildFPExt(ctx->builder, src[0], to_float_type(ctx, def_type), "");
break;
- case nir_op_d2f:
+ case nir_op_f2f32:
result = LLVMBuildFPTrunc(ctx->builder, src[0], to_float_type(ctx, def_type), "");
break;
+ case nir_op_u2u32:
+ case nir_op_u2u64:
+ if (get_elem_bits(ctx, LLVMTypeOf(src[0])) < get_elem_bits(ctx, def_type))
+ result = LLVMBuildZExt(ctx->builder, src[0], def_type, "");
+ else
+ result = LLVMBuildTrunc(ctx->builder, src[0], def_type, "");
+ break;
+ case nir_op_i2i32:
+ case nir_op_i2i64:
+ if (get_elem_bits(ctx, LLVMTypeOf(src[0])) < get_elem_bits(ctx, def_type))
+ result = LLVMBuildSExt(ctx->builder, src[0], def_type, "");
+ else
+ result = LLVMBuildTrunc(ctx->builder, src[0], def_type, "");
+ break;
case nir_op_bcsel:
result = emit_bcsel(ctx, src[0], src[1], src[2]);
break;
result = emit_b2f(ctx, src[0]);
break;
case nir_op_fquantize2f16:
- src[0] = to_float(ctx, src[0]);
- result = LLVMBuildFPTrunc(ctx->builder, src[0], ctx->f16, "");
- /* need to convert back up to f32 */
- result = LLVMBuildFPExt(ctx->builder, result, ctx->f32, "");
+ result = emit_f2f16(ctx, src[0]);
break;
case nir_op_umul_high:
result = emit_umul_high(ctx, src[0], src[1]);
}
static LLVMValueRef radv_lower_gather4_integer(struct nir_to_llvm_context *ctx,
- struct ac_tex_info *tinfo,
- nir_tex_instr *instr,
- const char *intr_name,
- unsigned coord_vgpr_index)
+ struct ac_image_args *args,
+ nir_tex_instr *instr)
{
- LLVMValueRef coord = tinfo->args[0];
+ enum glsl_base_type stype = glsl_get_sampler_result_type(instr->texture->var->type);
+ LLVMValueRef coord = args->addr;
LLVMValueRef half_texel[2];
+ LLVMValueRef compare_cube_wa;
+ LLVMValueRef result;
int c;
+ unsigned coord_vgpr_index = (unsigned)args->offset + (unsigned)args->compare;
//TODO Rect
{
- LLVMValueRef txq_args[10];
- int txq_arg_count = 0;
- LLVMValueRef size;
- bool da = instr->is_array || instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE;
- txq_args[txq_arg_count++] = LLVMConstInt(ctx->i32, 0, false);
- txq_args[txq_arg_count++] = tinfo->args[1];
- txq_args[txq_arg_count++] = LLVMConstInt(ctx->i32, 0xf, 0); /* dmask */
- txq_args[txq_arg_count++] = LLVMConstInt(ctx->i32, 0, 0); /* unorm */
- txq_args[txq_arg_count++] = LLVMConstInt(ctx->i32, 0, 0); /* r128 */
- txq_args[txq_arg_count++] = LLVMConstInt(ctx->i32, da ? 1 : 0, 0);
- txq_args[txq_arg_count++] = LLVMConstInt(ctx->i32, 0, 0); /* glc */
- txq_args[txq_arg_count++] = LLVMConstInt(ctx->i32, 0, 0); /* slc */
- txq_args[txq_arg_count++] = LLVMConstInt(ctx->i32, 0, 0); /* tfe */
- txq_args[txq_arg_count++] = LLVMConstInt(ctx->i32, 0, 0); /* lwe */
- size = ac_emit_llvm_intrinsic(&ctx->ac, "llvm.SI.getresinfo.i32", ctx->v4i32,
- txq_args, txq_arg_count,
- AC_FUNC_ATTR_READNONE);
+ struct ac_image_args txq_args = { 0 };
+
+ txq_args.da = instr->is_array || instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE;
+ txq_args.opcode = ac_image_get_resinfo;
+ txq_args.dmask = 0xf;
+ txq_args.addr = ctx->i32zero;
+ txq_args.resource = args->resource;
+ LLVMValueRef size = ac_build_image_opcode(&ctx->ac, &txq_args);
for (c = 0; c < 2; c++) {
half_texel[c] = LLVMBuildExtractElement(ctx->builder, size,
LLVMConstInt(ctx->i32, c, false), "");
half_texel[c] = LLVMBuildUIToFP(ctx->builder, half_texel[c], ctx->f32, "");
- half_texel[c] = ac_emit_fdiv(&ctx->ac, ctx->f32one, half_texel[c]);
+ half_texel[c] = ac_build_fdiv(&ctx->ac, ctx->f32one, half_texel[c]);
half_texel[c] = LLVMBuildFMul(ctx->builder, half_texel[c],
LLVMConstReal(ctx->f32, -0.5), "");
}
}
+ LLVMValueRef orig_coords = args->addr;
+
for (c = 0; c < 2; c++) {
LLVMValueRef tmp;
LLVMValueRef index = LLVMConstInt(ctx->i32, coord_vgpr_index + c, 0);
coord = LLVMBuildInsertElement(ctx->builder, coord, tmp, index, "");
}
- tinfo->args[0] = coord;
- return ac_emit_llvm_intrinsic(&ctx->ac, intr_name, tinfo->dst_type, tinfo->args, tinfo->arg_count,
- AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_NOUNWIND);
+ /*
+ * Apparantly cube has issue with integer types that the workaround doesn't solve,
+ * so this tests if the format is 8_8_8_8 and an integer type do an alternate
+ * workaround by sampling using a scaled type and converting.
+ * This is taken from amdgpu-pro shaders.
+ */
+ /* NOTE this produces some ugly code compared to amdgpu-pro,
+ * LLVM ends up dumping SGPRs into VGPRs to deal with the compare/select,
+ * and then reads them back. -pro generates two selects,
+ * one s_cmp for the descriptor rewriting
+ * one v_cmp for the coordinate and result changes.
+ */
+ if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
+ LLVMValueRef tmp, tmp2;
+
+ /* workaround 8/8/8/8 uint/sint cube gather bug */
+ /* first detect it then change to a scaled read and f2i */
+ tmp = LLVMBuildExtractElement(ctx->builder, args->resource, ctx->i32one, "");
+ tmp2 = tmp;
+
+ /* extract the DATA_FORMAT */
+ tmp = ac_build_bfe(&ctx->ac, tmp, LLVMConstInt(ctx->i32, 20, false),
+ LLVMConstInt(ctx->i32, 6, false), false);
+
+ /* is the DATA_FORMAT == 8_8_8_8 */
+ compare_cube_wa = LLVMBuildICmp(ctx->builder, LLVMIntEQ, tmp, LLVMConstInt(ctx->i32, V_008F14_IMG_DATA_FORMAT_8_8_8_8, false), "");
+
+ if (stype == GLSL_TYPE_UINT)
+ /* Create a NUM FORMAT - 0x2 or 0x4 - USCALED or UINT */
+ tmp = LLVMBuildSelect(ctx->builder, compare_cube_wa, LLVMConstInt(ctx->i32, 0x8000000, false),
+ LLVMConstInt(ctx->i32, 0x10000000, false), "");
+ else
+ /* Create a NUM FORMAT - 0x3 or 0x5 - SSCALED or SINT */
+ tmp = LLVMBuildSelect(ctx->builder, compare_cube_wa, LLVMConstInt(ctx->i32, 0xc000000, false),
+ LLVMConstInt(ctx->i32, 0x14000000, false), "");
+
+ /* replace the NUM FORMAT in the descriptor */
+ tmp2 = LLVMBuildAnd(ctx->builder, tmp2, LLVMConstInt(ctx->i32, C_008F14_NUM_FORMAT_GFX6, false), "");
+ tmp2 = LLVMBuildOr(ctx->builder, tmp2, tmp, "");
+
+ args->resource = LLVMBuildInsertElement(ctx->builder, args->resource, tmp2, ctx->i32one, "");
+
+ /* don't modify the coordinates for this case */
+ coord = LLVMBuildSelect(ctx->builder, compare_cube_wa, orig_coords, coord, "");
+ }
+ args->addr = coord;
+ result = ac_build_image_opcode(&ctx->ac, args);
+
+ if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
+ LLVMValueRef tmp, tmp2;
+
+ /* if the cube workaround is in place, f2i the result. */
+ for (c = 0; c < 4; c++) {
+ tmp = LLVMBuildExtractElement(ctx->builder, result, LLVMConstInt(ctx->i32, c, false), "");
+ if (stype == GLSL_TYPE_UINT)
+ tmp2 = LLVMBuildFPToUI(ctx->builder, tmp, ctx->i32, "");
+ else
+ tmp2 = LLVMBuildFPToSI(ctx->builder, tmp, ctx->i32, "");
+ tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->i32, "");
+ tmp2 = LLVMBuildBitCast(ctx->builder, tmp2, ctx->i32, "");
+ tmp = LLVMBuildSelect(ctx->builder, compare_cube_wa, tmp2, tmp, "");
+ tmp = LLVMBuildBitCast(ctx->builder, tmp, ctx->f32, "");
+ result = LLVMBuildInsertElement(ctx->builder, result, tmp, LLVMConstInt(ctx->i32, c, false), "");
+ }
+ }
+ return result;
}
static LLVMValueRef build_tex_intrinsic(struct nir_to_llvm_context *ctx,
nir_tex_instr *instr,
- struct ac_tex_info *tinfo)
-{
- const char *name = "llvm.SI.image.sample";
- const char *infix = "";
- char intr_name[127];
- char type[64];
- bool is_shadow = instr->is_shadow;
- bool has_offset = tinfo->has_offset;
+ bool lod_is_zero,
+ struct ac_image_args *args)
+{
+ if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
+ return ac_build_buffer_load_format(&ctx->ac,
+ args->resource,
+ args->addr,
+ LLVMConstInt(ctx->i32, 0, false),
+ true);
+ }
+
+ args->opcode = ac_image_sample;
+ args->compare = instr->is_shadow;
+
switch (instr->op) {
case nir_texop_txf:
case nir_texop_txf_ms:
case nir_texop_samples_identical:
- name = instr->sampler_dim == GLSL_SAMPLER_DIM_MS ? "llvm.SI.image.load" :
- instr->sampler_dim == GLSL_SAMPLER_DIM_BUF ? "llvm.SI.vs.load.input" :
- "llvm.SI.image.load.mip";
- is_shadow = false;
- has_offset = false;
+ args->opcode = instr->sampler_dim == GLSL_SAMPLER_DIM_MS ? ac_image_load : ac_image_load_mip;
+ args->compare = false;
+ args->offset = false;
break;
case nir_texop_txb:
- infix = ".b";
+ args->bias = true;
break;
case nir_texop_txl:
- infix = ".l";
+ if (lod_is_zero)
+ args->level_zero = true;
+ else
+ args->lod = true;
break;
case nir_texop_txs:
- name = "llvm.SI.getresinfo";
- break;
case nir_texop_query_levels:
- name = "llvm.SI.getresinfo";
+ args->opcode = ac_image_get_resinfo;
break;
case nir_texop_tex:
if (ctx->stage != MESA_SHADER_FRAGMENT)
- infix = ".lz";
+ args->level_zero = true;
break;
case nir_texop_txd:
- infix = ".d";
+ args->deriv = true;
break;
case nir_texop_tg4:
- name = "llvm.SI.gather4";
- infix = ".lz";
+ args->opcode = ac_image_gather4;
+ args->level_zero = true;
break;
case nir_texop_lod:
- name = "llvm.SI.getlod";
- is_shadow = false;
- has_offset = false;
+ args->opcode = ac_image_get_lod;
+ args->compare = false;
+ args->offset = false;
break;
default:
break;
}
- build_int_type_name(LLVMTypeOf(tinfo->args[0]), type, sizeof(type));
- sprintf(intr_name, "%s%s%s%s.%s", name, is_shadow ? ".c" : "", infix,
- has_offset ? ".o" : "", type);
-
if (instr->op == nir_texop_tg4) {
enum glsl_base_type stype = glsl_get_sampler_result_type(instr->texture->var->type);
if (stype == GLSL_TYPE_UINT || stype == GLSL_TYPE_INT) {
- return radv_lower_gather4_integer(ctx, tinfo, instr, intr_name,
- (int)has_offset + (int)is_shadow);
+ return radv_lower_gather4_integer(ctx, args, instr);
}
}
- return ac_emit_llvm_intrinsic(&ctx->ac, intr_name, tinfo->dst_type, tinfo->args, tinfo->arg_count,
- AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_NOUNWIND);
-
+ return ac_build_image_opcode(&ctx->ac, args);
}
static LLVMValueRef visit_vulkan_resource_index(struct nir_to_llvm_context *ctx,
unsigned desc_set = nir_intrinsic_desc_set(instr);
unsigned binding = nir_intrinsic_binding(instr);
LLVMValueRef desc_ptr = ctx->descriptor_sets[desc_set];
- struct radv_descriptor_set_layout *layout = ctx->options->layout->set[desc_set].layout;
+ struct radv_pipeline_layout *pipeline_layout = ctx->options->layout;
+ struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
unsigned base_offset = layout->binding[binding].offset;
LLVMValueRef offset, stride;
if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
+ unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start +
+ layout->binding[binding].dynamic_offset_offset;
desc_ptr = ctx->push_constants;
- base_offset = ctx->options->layout->push_constant_size;
- base_offset += 16 * layout->binding[binding].dynamic_offset_offset;
+ base_offset = pipeline_layout->push_constant_size + 16 * idx;
stride = LLVMConstInt(ctx->i32, 16, false);
} else
stride = LLVMConstInt(ctx->i32, layout->binding[binding].size, false);
params[1] = get_src(ctx, instr->src[1]);
params[2] = LLVMConstInt(ctx->i32, 0, false); /* vindex */
- params[4] = LLVMConstInt(ctx->i1, 0, false); /* glc */
- params[5] = LLVMConstInt(ctx->i1, 0, false); /* slc */
+ params[4] = ctx->i1false; /* glc */
+ params[5] = ctx->i1false; /* slc */
if (components_32bit > 1)
data_type = LLVMVectorType(ctx->f32, components_32bit);
}
params[0] = data;
params[3] = offset;
- ac_emit_llvm_intrinsic(&ctx->ac, store_name,
- ctx->voidt, params, 6, 0);
+ ac_build_intrinsic(&ctx->ac, store_name,
+ ctx->voidt, params, 6, 0);
}
}
params[arg_count++] = get_src(ctx, instr->src[0]);
params[arg_count++] = LLVMConstInt(ctx->i32, 0, false); /* vindex */
params[arg_count++] = get_src(ctx, instr->src[1]); /* voffset */
- params[arg_count++] = LLVMConstInt(ctx->i1, 0, false); /* slc */
+ params[arg_count++] = ctx->i1false; /* slc */
switch (instr->intrinsic) {
case nir_intrinsic_ssbo_atomic_add:
abort();
}
- return ac_emit_llvm_intrinsic(&ctx->ac, name, ctx->i32, params, arg_count, 0);
+ return ac_build_intrinsic(&ctx->ac, name, ctx->i32, params, arg_count, 0);
}
static LLVMValueRef visit_load_buffer(struct nir_to_llvm_context *ctx,
get_src(ctx, instr->src[0]),
LLVMConstInt(ctx->i32, 0, false),
offset,
- LLVMConstInt(ctx->i1, 0, false),
- LLVMConstInt(ctx->i1, 0, false),
+ ctx->i1false,
+ ctx->i1false,
};
- results[i] = ac_emit_llvm_intrinsic(&ctx->ac, load_name, data_type, params, 5, 0);
+ results[i] = ac_build_intrinsic(&ctx->ac, load_name, data_type, params, 5, 0);
}
LLVMBuildAdd(ctx->builder, LLVMConstInt(ctx->i32, 4 * i, 0),
offset, "")
};
- results[i] = ac_emit_llvm_intrinsic(&ctx->ac, "llvm.SI.load.const", ctx->f32,
- params, 2, AC_FUNC_ATTR_READNONE);
+ results[i] = ac_build_intrinsic(&ctx->ac, "llvm.SI.load.const", ctx->f32,
+ params, 2,
+ AC_FUNC_ATTR_READNONE |
+ AC_FUNC_ATTR_LEGACY);
}
}
static void
-radv_get_deref_offset(struct nir_to_llvm_context *ctx, nir_deref *tail,
+radv_get_deref_offset(struct nir_to_llvm_context *ctx, nir_deref_var *deref,
bool vs_in, unsigned *vertex_index_out,
+ LLVMValueRef *vertex_index_ref,
unsigned *const_out, LLVMValueRef *indir_out)
{
unsigned const_offset = 0;
+ nir_deref *tail = &deref->deref;
LLVMValueRef offset = NULL;
- if (vertex_index_out != NULL) {
+ if (vertex_index_out != NULL || vertex_index_ref != NULL) {
tail = tail->child;
nir_deref_array *deref_array = nir_deref_as_array(tail);
- *vertex_index_out = deref_array->base_offset;
+ if (vertex_index_out)
+ *vertex_index_out = deref_array->base_offset;
+
+ if (vertex_index_ref) {
+ LLVMValueRef vtx = LLVMConstInt(ctx->i32, deref_array->base_offset, false);
+ if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
+ vtx = LLVMBuildAdd(ctx->builder, vtx, get_src(ctx, deref_array->indirect), "");
+ }
+ *vertex_index_ref = vtx;
+ }
+ }
+
+ if (deref->var->data.compact) {
+ assert(tail->child->deref_type == nir_deref_type_array);
+ assert(glsl_type_is_scalar(glsl_without_array(deref->var->type)));
+ nir_deref_array *deref_array = nir_deref_as_array(tail->child);
+ /* We always lower indirect dereferences for "compact" array vars. */
+ assert(deref_array->deref_array_type == nir_deref_array_type_direct);
+
+ const_offset = deref_array->base_offset;
+ goto out;
}
while (tail->child != NULL) {
unreachable("unsupported deref type");
}
-
+out:
if (const_offset && offset)
offset = LLVMBuildAdd(ctx->builder, offset,
LLVMConstInt(ctx->i32, const_offset, 0),
*indir_out = offset;
}
+static LLVMValueRef
+lds_load(struct nir_to_llvm_context *ctx,
+ LLVMValueRef dw_addr)
+{
+ LLVMValueRef value;
+ value = ac_build_indexed_load(&ctx->ac, ctx->lds, dw_addr, false);
+ return value;
+}
+
+static void
+lds_store(struct nir_to_llvm_context *ctx,
+ LLVMValueRef dw_addr, LLVMValueRef value)
+{
+ value = LLVMBuildBitCast(ctx->builder, value, ctx->i32, "");
+ ac_build_indexed_store(&ctx->ac, ctx->lds,
+ dw_addr, value);
+}
+
+/* The offchip buffer layout for TCS->TES is
+ *
+ * - attribute 0 of patch 0 vertex 0
+ * - attribute 0 of patch 0 vertex 1
+ * - attribute 0 of patch 0 vertex 2
+ * ...
+ * - attribute 0 of patch 1 vertex 0
+ * - attribute 0 of patch 1 vertex 1
+ * ...
+ * - attribute 1 of patch 0 vertex 0
+ * - attribute 1 of patch 0 vertex 1
+ * ...
+ * - per patch attribute 0 of patch 0
+ * - per patch attribute 0 of patch 1
+ * ...
+ *
+ * Note that every attribute has 4 components.
+ */
+static LLVMValueRef get_tcs_tes_buffer_address(struct nir_to_llvm_context *ctx,
+ LLVMValueRef vertex_index,
+ LLVMValueRef param_index)
+{
+ LLVMValueRef base_addr, vertices_per_patch, num_patches, total_vertices;
+ LLVMValueRef param_stride, constant16;
+ LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
+
+ vertices_per_patch = unpack_param(ctx, ctx->tcs_offchip_layout, 9, 6);
+ num_patches = unpack_param(ctx, ctx->tcs_offchip_layout, 0, 9);
+ total_vertices = LLVMBuildMul(ctx->builder, vertices_per_patch,
+ num_patches, "");
+
+ constant16 = LLVMConstInt(ctx->i32, 16, false);
+ if (vertex_index) {
+ base_addr = LLVMBuildMul(ctx->builder, rel_patch_id,
+ vertices_per_patch, "");
+
+ base_addr = LLVMBuildAdd(ctx->builder, base_addr,
+ vertex_index, "");
+
+ param_stride = total_vertices;
+ } else {
+ base_addr = rel_patch_id;
+ param_stride = num_patches;
+ }
+
+ base_addr = LLVMBuildAdd(ctx->builder, base_addr,
+ LLVMBuildMul(ctx->builder, param_index,
+ param_stride, ""), "");
+
+ base_addr = LLVMBuildMul(ctx->builder, base_addr, constant16, "");
+
+ if (!vertex_index) {
+ LLVMValueRef patch_data_offset =
+ unpack_param(ctx, ctx->tcs_offchip_layout, 16, 16);
+
+ base_addr = LLVMBuildAdd(ctx->builder, base_addr,
+ patch_data_offset, "");
+ }
+ return base_addr;
+}
+
+static LLVMValueRef get_tcs_tes_buffer_address_params(struct nir_to_llvm_context *ctx,
+ unsigned param,
+ unsigned const_index,
+ bool is_compact,
+ LLVMValueRef vertex_index,
+ LLVMValueRef indir_index)
+{
+ LLVMValueRef param_index;
+
+ if (indir_index)
+ param_index = LLVMBuildAdd(ctx->builder, LLVMConstInt(ctx->i32, param, false),
+ indir_index, "");
+ else {
+ if (const_index && !is_compact)
+ param += const_index;
+ param_index = LLVMConstInt(ctx->i32, param, false);
+ }
+ return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
+}
+
+static void
+mark_tess_output(struct nir_to_llvm_context *ctx,
+ bool is_patch, uint32_t param)
+
+{
+ if (is_patch) {
+ ctx->tess_patch_outputs_written |= (1ull << param);
+ } else
+ ctx->tess_outputs_written |= (1ull << param);
+}
+
+static LLVMValueRef
+get_dw_address(struct nir_to_llvm_context *ctx,
+ LLVMValueRef dw_addr,
+ unsigned param,
+ unsigned const_index,
+ bool compact_const_index,
+ LLVMValueRef vertex_index,
+ LLVMValueRef stride,
+ LLVMValueRef indir_index)
+
+{
+
+ if (vertex_index) {
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
+ LLVMBuildMul(ctx->builder,
+ vertex_index,
+ stride, ""), "");
+ }
+
+ if (indir_index)
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
+ LLVMBuildMul(ctx->builder, indir_index,
+ LLVMConstInt(ctx->i32, 4, false), ""), "");
+ else if (const_index && !compact_const_index)
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
+ LLVMConstInt(ctx->i32, const_index, false), "");
+
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
+ LLVMConstInt(ctx->i32, param * 4, false), "");
+
+ if (const_index && compact_const_index)
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
+ LLVMConstInt(ctx->i32, const_index, false), "");
+ return dw_addr;
+}
+
+static LLVMValueRef
+load_tcs_input(struct nir_to_llvm_context *ctx,
+ nir_intrinsic_instr *instr)
+{
+ LLVMValueRef dw_addr, stride;
+ unsigned const_index;
+ LLVMValueRef vertex_index;
+ LLVMValueRef indir_index;
+ unsigned param;
+ LLVMValueRef value[4], result;
+ const bool per_vertex = nir_is_per_vertex_io(instr->variables[0]->var, ctx->stage);
+ const bool is_compact = instr->variables[0]->var->data.compact;
+ param = shader_io_get_unique_index(instr->variables[0]->var->data.location);
+ radv_get_deref_offset(ctx, instr->variables[0],
+ false, NULL, per_vertex ? &vertex_index : NULL,
+ &const_index, &indir_index);
+
+ stride = unpack_param(ctx, ctx->tcs_in_layout, 13, 8);
+ dw_addr = get_tcs_in_current_patch_offset(ctx);
+ dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
+ indir_index);
+
+ for (unsigned i = 0; i < instr->num_components; i++) {
+ value[i] = lds_load(ctx, dw_addr);
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
+ ctx->i32one, "");
+ }
+ result = ac_build_gather_values(&ctx->ac, value, instr->num_components);
+ result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx, &instr->dest.ssa), "");
+ return result;
+}
+
+static LLVMValueRef
+load_tcs_output(struct nir_to_llvm_context *ctx,
+ nir_intrinsic_instr *instr)
+{
+ LLVMValueRef dw_addr, stride;
+ LLVMValueRef value[4], result;
+ LLVMValueRef vertex_index = NULL;
+ LLVMValueRef indir_index = NULL;
+ unsigned const_index = 0;
+ unsigned param;
+ const bool per_vertex = nir_is_per_vertex_io(instr->variables[0]->var, ctx->stage);
+ const bool is_compact = instr->variables[0]->var->data.compact;
+ param = shader_io_get_unique_index(instr->variables[0]->var->data.location);
+ radv_get_deref_offset(ctx, instr->variables[0],
+ false, NULL, per_vertex ? &vertex_index : NULL,
+ &const_index, &indir_index);
+
+ if (!instr->variables[0]->var->data.patch) {
+ stride = unpack_param(ctx, ctx->tcs_out_layout, 13, 8);
+ dw_addr = get_tcs_out_current_patch_offset(ctx);
+ } else {
+ dw_addr = get_tcs_out_current_patch_data_offset(ctx);
+ }
+
+ dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
+ indir_index);
+
+ for (unsigned i = 0; i < instr->num_components; i++) {
+ value[i] = lds_load(ctx, dw_addr);
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
+ ctx->i32one, "");
+ }
+ result = ac_build_gather_values(&ctx->ac, value, instr->num_components);
+ result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx, &instr->dest.ssa), "");
+ return result;
+}
+
+static void
+store_tcs_output(struct nir_to_llvm_context *ctx,
+ nir_intrinsic_instr *instr,
+ LLVMValueRef src,
+ unsigned writemask)
+{
+ LLVMValueRef stride, dw_addr;
+ LLVMValueRef buf_addr = NULL;
+ LLVMValueRef vertex_index = NULL;
+ LLVMValueRef indir_index = NULL;
+ unsigned const_index = 0;
+ unsigned param;
+ const bool per_vertex = nir_is_per_vertex_io(instr->variables[0]->var, ctx->stage);
+ const bool is_compact = instr->variables[0]->var->data.compact;
+
+ radv_get_deref_offset(ctx, instr->variables[0],
+ false, NULL, per_vertex ? &vertex_index : NULL,
+ &const_index, &indir_index);
+
+ param = shader_io_get_unique_index(instr->variables[0]->var->data.location);
+ if (instr->variables[0]->var->data.location == VARYING_SLOT_CLIP_DIST0 &&
+ is_compact && const_index > 3) {
+ const_index -= 3;
+ param++;
+ }
+
+ if (!instr->variables[0]->var->data.patch) {
+ stride = unpack_param(ctx, ctx->tcs_out_layout, 13, 8);
+ dw_addr = get_tcs_out_current_patch_offset(ctx);
+ } else {
+ dw_addr = get_tcs_out_current_patch_data_offset(ctx);
+ }
+
+ mark_tess_output(ctx, instr->variables[0]->var->data.patch, param);
+
+ dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
+ indir_index);
+ buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index, is_compact,
+ vertex_index, indir_index);
+
+ unsigned base = is_compact ? const_index : 0;
+ for (unsigned chan = 0; chan < 8; chan++) {
+ bool is_tess_factor = false;
+ if (!(writemask & (1 << chan)))
+ continue;
+ LLVMValueRef value = llvm_extract_elem(ctx, src, chan);
+
+ lds_store(ctx, dw_addr, value);
+
+ if (instr->variables[0]->var->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
+ instr->variables[0]->var->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)
+ is_tess_factor = true;
+
+ if (!is_tess_factor && writemask != 0xF)
+ ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, value, 1,
+ buf_addr, ctx->oc_lds,
+ 4 * (base + chan), 1, 0, true, false);
+
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr,
+ ctx->i32one, "");
+ }
+
+ if (writemask == 0xF) {
+ ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, src, 4,
+ buf_addr, ctx->oc_lds,
+ (base * 4), 1, 0, true, false);
+ }
+}
+
+static LLVMValueRef
+load_tes_input(struct nir_to_llvm_context *ctx,
+ nir_intrinsic_instr *instr)
+{
+ LLVMValueRef buf_addr;
+ LLVMValueRef result;
+ LLVMValueRef vertex_index = NULL;
+ LLVMValueRef indir_index = NULL;
+ unsigned const_index = 0;
+ unsigned param;
+ const bool per_vertex = nir_is_per_vertex_io(instr->variables[0]->var, ctx->stage);
+ const bool is_compact = instr->variables[0]->var->data.compact;
+
+ radv_get_deref_offset(ctx, instr->variables[0],
+ false, NULL, per_vertex ? &vertex_index : NULL,
+ &const_index, &indir_index);
+ param = shader_io_get_unique_index(instr->variables[0]->var->data.location);
+ if (instr->variables[0]->var->data.location == VARYING_SLOT_CLIP_DIST0 &&
+ is_compact && const_index > 3) {
+ const_index -= 3;
+ param++;
+ }
+ buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
+ is_compact, vertex_index, indir_index);
+
+ result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, instr->num_components, NULL,
+ buf_addr, ctx->oc_lds, is_compact ? (4 * const_index) : 0, 1, 0, true, false);
+ result = trim_vector(ctx, result, instr->num_components);
+ result = LLVMBuildBitCast(ctx->builder, result, get_def_type(ctx, &instr->dest.ssa), "");
+ return result;
+}
+
static LLVMValueRef
load_gs_input(struct nir_to_llvm_context *ctx,
nir_intrinsic_instr *instr)
unsigned param, vtx_offset_param;
LLVMValueRef value[4], result;
unsigned vertex_index;
- unsigned cull_offset = 0;
- radv_get_deref_offset(ctx, &instr->variables[0]->deref,
- false, &vertex_index,
+ radv_get_deref_offset(ctx, instr->variables[0],
+ false, &vertex_index, NULL,
&const_index, &indir_index);
vtx_offset_param = vertex_index;
assert(vtx_offset_param < 6);
LLVMConstInt(ctx->i32, 4, false), "");
param = shader_io_get_unique_index(instr->variables[0]->var->data.location);
- if (instr->variables[0]->var->data.location == VARYING_SLOT_CULL_DIST0)
- cull_offset += ctx->num_input_clips;
for (unsigned i = 0; i < instr->num_components; i++) {
args[0] = ctx->esgs_ring;
args[1] = vtx_offset;
- args[2] = LLVMConstInt(ctx->i32, (param * 4 + i + const_index + cull_offset) * 256, false);
+ args[2] = LLVMConstInt(ctx->i32, (param * 4 + i + const_index) * 256, false);
args[3] = ctx->i32zero;
args[4] = ctx->i32one; /* OFFEN */
args[5] = ctx->i32zero; /* IDXEN */
args[7] = ctx->i32zero; /* SLC */
args[8] = ctx->i32zero; /* TFE */
- value[i] = ac_emit_llvm_intrinsic(&ctx->ac, "llvm.SI.buffer.load.dword.i32.i32",
- ctx->i32, args, 9, AC_FUNC_ATTR_READONLY);
+ value[i] = ac_build_intrinsic(&ctx->ac, "llvm.SI.buffer.load.dword.i32.i32",
+ ctx->i32, args, 9,
+ AC_FUNC_ATTR_READONLY |
+ AC_FUNC_ATTR_LEGACY);
}
result = ac_build_gather_values(&ctx->ac, value, instr->num_components);
unsigned const_index;
bool vs_in = ctx->stage == MESA_SHADER_VERTEX &&
instr->variables[0]->var->data.mode == nir_var_shader_in;
- radv_get_deref_offset(ctx, &instr->variables[0]->deref, vs_in, NULL,
+ radv_get_deref_offset(ctx, instr->variables[0], vs_in, NULL, NULL,
&const_index, &indir_index);
if (instr->dest.ssa.bit_size == 64)
switch (instr->variables[0]->var->data.mode) {
case nir_var_shader_in:
+ if (ctx->stage == MESA_SHADER_TESS_CTRL)
+ return load_tcs_input(ctx, instr);
+ if (ctx->stage == MESA_SHADER_TESS_EVAL)
+ return load_tes_input(ctx, instr);
if (ctx->stage == MESA_SHADER_GEOMETRY) {
return load_gs_input(ctx, instr);
}
}
break;
case nir_var_shader_out:
+ if (ctx->stage == MESA_SHADER_TESS_CTRL)
+ return load_tcs_output(ctx, instr);
for (unsigned chan = 0; chan < ve; chan++) {
if (indir_index) {
unsigned count = glsl_count_attribute_slots(
int writemask = instr->const_index[0];
LLVMValueRef indir_index;
unsigned const_index;
- radv_get_deref_offset(ctx, &instr->variables[0]->deref, false,
- NULL, &const_index, &indir_index);
+ radv_get_deref_offset(ctx, instr->variables[0], false,
+ NULL, NULL, &const_index, &indir_index);
if (get_elem_bits(ctx, LLVMTypeOf(src)) == 64) {
int old_writemask = writemask;
switch (instr->variables[0]->var->data.mode) {
case nir_var_shader_out:
+
+ if (ctx->stage == MESA_SHADER_TESS_CTRL) {
+ store_tcs_output(ctx, instr, src, writemask);
+ return;
+ }
+
for (unsigned chan = 0; chan < 8; chan++) {
int stride = 4;
if (!(writemask & (1 << chan)))
value = llvm_extract_elem(ctx, src, chan);
- if (instr->variables[0]->var->data.location == VARYING_SLOT_CLIP_DIST0 ||
- instr->variables[0]->var->data.location == VARYING_SLOT_CULL_DIST0)
+ if (instr->variables[0]->var->data.compact)
stride = 1;
if (indir_index) {
unsigned count = glsl_count_attribute_slots(
default:
break;
}
- return 0;
-}
+ return 0;
+}
+
+
+
+/* Adjust the sample index according to FMASK.
+ *
+ * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
+ * which is the identity mapping. Each nibble says which physical sample
+ * should be fetched to get that sample.
+ *
+ * For example, 0x11111100 means there are only 2 samples stored and
+ * the second sample covers 3/4 of the pixel. When reading samples 0
+ * and 1, return physical sample 0 (determined by the first two 0s
+ * in FMASK), otherwise return physical sample 1.
+ *
+ * The sample index should be adjusted as follows:
+ * sample_index = (fmask >> (sample_index * 4)) & 0xF;
+ */
+static LLVMValueRef adjust_sample_index_using_fmask(struct nir_to_llvm_context *ctx,
+ LLVMValueRef coord_x, LLVMValueRef coord_y,
+ LLVMValueRef coord_z,
+ LLVMValueRef sample_index,
+ LLVMValueRef fmask_desc_ptr)
+{
+ LLVMValueRef fmask_load_address[4];
+ LLVMValueRef res;
+
+ fmask_load_address[0] = coord_x;
+ fmask_load_address[1] = coord_y;
+ if (coord_z) {
+ fmask_load_address[2] = coord_z;
+ fmask_load_address[3] = LLVMGetUndef(ctx->i32);
+ }
+
+ struct ac_image_args args = {0};
+
+ args.opcode = ac_image_load;
+ args.da = coord_z ? true : false;
+ args.resource = fmask_desc_ptr;
+ args.dmask = 0xf;
+ args.addr = ac_build_gather_values(&ctx->ac, fmask_load_address, coord_z ? 4 : 2);
+
+ res = ac_build_image_opcode(&ctx->ac, &args);
+
+ res = to_integer(ctx, res);
+ LLVMValueRef four = LLVMConstInt(ctx->i32, 4, false);
+ LLVMValueRef F = LLVMConstInt(ctx->i32, 0xf, false);
+
+ LLVMValueRef fmask = LLVMBuildExtractElement(ctx->builder,
+ res,
+ ctx->i32zero, "");
+ LLVMValueRef sample_index4 =
+ LLVMBuildMul(ctx->builder, sample_index, four, "");
+ LLVMValueRef shifted_fmask =
+ LLVMBuildLShr(ctx->builder, fmask, sample_index4, "");
+ LLVMValueRef final_sample =
+ LLVMBuildAnd(ctx->builder, shifted_fmask, F, "");
-static void build_type_name_for_intr(
- LLVMTypeRef type,
- char *buf, unsigned bufsize)
-{
- LLVMTypeRef elem_type = type;
-
- assert(bufsize >= 8);
-
- if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
- int ret = snprintf(buf, bufsize, "v%u",
- LLVMGetVectorSize(type));
- if (ret < 0) {
- char *type_name = LLVMPrintTypeToString(type);
- fprintf(stderr, "Error building type name for: %s\n",
- type_name);
- return;
- }
- elem_type = LLVMGetElementType(type);
- buf += ret;
- bufsize -= ret;
- }
- switch (LLVMGetTypeKind(elem_type)) {
- default: break;
- case LLVMIntegerTypeKind:
- snprintf(buf, bufsize, "i%d", LLVMGetIntTypeWidth(elem_type));
- break;
- case LLVMFloatTypeKind:
- snprintf(buf, bufsize, "f32");
- break;
- case LLVMDoubleTypeKind:
- snprintf(buf, bufsize, "f64");
- break;
- }
-}
-
-static void get_image_intr_name(const char *base_name,
- LLVMTypeRef data_type,
- LLVMTypeRef coords_type,
- LLVMTypeRef rsrc_type,
- char *out_name, unsigned out_len)
-{
- char coords_type_name[8];
-
- build_type_name_for_intr(coords_type, coords_type_name,
- sizeof(coords_type_name));
-
- if (HAVE_LLVM <= 0x0309) {
- snprintf(out_name, out_len, "%s.%s", base_name, coords_type_name);
- } else {
- char data_type_name[8];
- char rsrc_type_name[8];
-
- build_type_name_for_intr(data_type, data_type_name,
- sizeof(data_type_name));
- build_type_name_for_intr(rsrc_type, rsrc_type_name,
- sizeof(rsrc_type_name));
- snprintf(out_name, out_len, "%s.%s.%s.%s", base_name,
- data_type_name, coords_type_name, rsrc_type_name);
- }
+ /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
+ * resource descriptor is 0 (invalid),
+ */
+ LLVMValueRef fmask_desc =
+ LLVMBuildBitCast(ctx->builder, fmask_desc_ptr,
+ ctx->v8i32, "");
+
+ LLVMValueRef fmask_word1 =
+ LLVMBuildExtractElement(ctx->builder, fmask_desc,
+ ctx->i32one, "");
+
+ LLVMValueRef word1_is_nonzero =
+ LLVMBuildICmp(ctx->builder, LLVMIntNE,
+ fmask_word1, ctx->i32zero, "");
+
+ /* Replace the MSAA sample index. */
+ sample_index =
+ LLVMBuildSelect(ctx->builder, word1_is_nonzero,
+ final_sample, sample_index, "");
+ return sample_index;
}
static LLVMValueRef get_image_coords(struct nir_to_llvm_context *ctx,
glsl_sampler_type_is_array(type));
if (is_ms) {
- LLVMValueRef fmask_load_address[4];
- LLVMValueRef params[7];
- LLVMValueRef glc = LLVMConstInt(ctx->i1, 0, false);
- LLVMValueRef slc = LLVMConstInt(ctx->i1, 0, false);
- LLVMValueRef da = ctx->i32zero;
- char intrinsic_name[64];
+ LLVMValueRef fmask_load_address[3];
int chan;
+
fmask_load_address[0] = LLVMBuildExtractElement(ctx->builder, src0, masks[0], "");
fmask_load_address[1] = LLVMBuildExtractElement(ctx->builder, src0, masks[1], "");
- fmask_load_address[2] = LLVMGetUndef(ctx->i32);
- fmask_load_address[3] = LLVMGetUndef(ctx->i32);
+ if (glsl_sampler_type_is_array(type))
+ fmask_load_address[2] = LLVMBuildExtractElement(ctx->builder, src0, masks[2], "");
+ else
+ fmask_load_address[2] = NULL;
if (add_frag_pos) {
for (chan = 0; chan < 2; ++chan)
fmask_load_address[chan] = LLVMBuildAdd(ctx->builder, fmask_load_address[chan], LLVMBuildFPToUI(ctx->builder, ctx->frag_pos[chan], ctx->i32, ""), "");
}
- params[0] = ac_build_gather_values(&ctx->ac, fmask_load_address, 4);
- params[1] = get_sampler_desc(ctx, instr->variables[0], DESC_FMASK);
- params[2] = LLVMConstInt(ctx->i32, 15, false); /* dmask */
- LLVMValueRef lwe = LLVMConstInt(ctx->i1, 0, false);
- params[3] = glc;
- params[4] = slc;
- params[5] = lwe;
- params[6] = da;
-
- get_image_intr_name("llvm.amdgcn.image.load",
- ctx->v4f32, /* vdata */
- LLVMTypeOf(params[0]), /* coords */
- LLVMTypeOf(params[1]), /* rsrc */
- intrinsic_name, sizeof(intrinsic_name));
-
- res = ac_emit_llvm_intrinsic(&ctx->ac, intrinsic_name, ctx->v4f32,
- params, 7, AC_FUNC_ATTR_READONLY);
-
- res = to_integer(ctx, res);
- LLVMValueRef four = LLVMConstInt(ctx->i32, 4, false);
- LLVMValueRef F = LLVMConstInt(ctx->i32, 0xf, false);
-
- LLVMValueRef fmask = LLVMBuildExtractElement(ctx->builder,
- res,
- ctx->i32zero, "");
-
- LLVMValueRef sample_index4 =
- LLVMBuildMul(ctx->builder, sample_index, four, "");
- LLVMValueRef shifted_fmask =
- LLVMBuildLShr(ctx->builder, fmask, sample_index4, "");
- LLVMValueRef final_sample =
- LLVMBuildAnd(ctx->builder, shifted_fmask, F, "");
-
- /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
- * resource descriptor is 0 (invalid),
- */
- LLVMValueRef fmask_desc =
- LLVMBuildBitCast(ctx->builder, params[1],
- ctx->v8i32, "");
-
- LLVMValueRef fmask_word1 =
- LLVMBuildExtractElement(ctx->builder, fmask_desc,
- ctx->i32one, "");
-
- LLVMValueRef word1_is_nonzero =
- LLVMBuildICmp(ctx->builder, LLVMIntNE,
- fmask_word1, ctx->i32zero, "");
-
- /* Replace the MSAA sample index. */
- sample_index =
- LLVMBuildSelect(ctx->builder, word1_is_nonzero,
- final_sample, sample_index, "");
+ sample_index = adjust_sample_index_using_fmask(ctx,
+ fmask_load_address[0],
+ fmask_load_address[1],
+ fmask_load_address[2],
+ sample_index,
+ get_sampler_desc(ctx, instr->variables[0], DESC_FMASK));
}
if (count == 1) {
if (instr->src[0].ssa->num_components)
params[1] = LLVMBuildExtractElement(ctx->builder, get_src(ctx, instr->src[0]),
LLVMConstInt(ctx->i32, 0, false), ""); /* vindex */
params[2] = LLVMConstInt(ctx->i32, 0, false); /* voffset */
- params[3] = LLVMConstInt(ctx->i1, 0, false); /* glc */
- params[4] = LLVMConstInt(ctx->i1, 0, false); /* slc */
- res = ac_emit_llvm_intrinsic(&ctx->ac, "llvm.amdgcn.buffer.load.format.v4f32", ctx->v4f32,
- params, 5, 0);
+ params[3] = ctx->i1false; /* glc */
+ params[4] = ctx->i1false; /* slc */
+ res = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.buffer.load.format.v4f32", ctx->v4f32,
+ params, 5, 0);
res = trim_vector(ctx, res, instr->dest.ssa.num_components);
res = to_integer(ctx, res);
} else {
bool is_da = glsl_sampler_type_is_array(type) ||
glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE;
- LLVMValueRef da = is_da ? ctx->i32one : ctx->i32zero;
- LLVMValueRef glc = LLVMConstInt(ctx->i1, 0, false);
- LLVMValueRef slc = LLVMConstInt(ctx->i1, 0, false);
+ LLVMValueRef da = is_da ? ctx->i1true : ctx->i1false;
+ LLVMValueRef glc = ctx->i1false;
+ LLVMValueRef slc = ctx->i1false;
params[0] = get_image_coords(ctx, instr);
params[1] = get_sampler_desc(ctx, instr->variables[0], DESC_IMAGE);
params[2] = LLVMConstInt(ctx->i32, 15, false); /* dmask */
if (HAVE_LLVM <= 0x0309) {
- params[3] = LLVMConstInt(ctx->i1, 0, false); /* r128 */
+ params[3] = ctx->i1false; /* r128 */
params[4] = da;
params[5] = glc;
params[6] = slc;
} else {
- LLVMValueRef lwe = LLVMConstInt(ctx->i1, 0, false);
+ LLVMValueRef lwe = ctx->i1false;
params[3] = glc;
params[4] = slc;
params[5] = lwe;
params[6] = da;
}
- get_image_intr_name("llvm.amdgcn.image.load",
- ctx->v4f32, /* vdata */
- LLVMTypeOf(params[0]), /* coords */
- LLVMTypeOf(params[1]), /* rsrc */
- intrinsic_name, sizeof(intrinsic_name));
+ ac_get_image_intr_name("llvm.amdgcn.image.load",
+ ctx->v4f32, /* vdata */
+ LLVMTypeOf(params[0]), /* coords */
+ LLVMTypeOf(params[1]), /* rsrc */
+ intrinsic_name, sizeof(intrinsic_name));
- res = ac_emit_llvm_intrinsic(&ctx->ac, intrinsic_name, ctx->v4f32,
- params, 7, AC_FUNC_ATTR_READONLY);
+ res = ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->v4f32,
+ params, 7, AC_FUNC_ATTR_READONLY);
}
return to_integer(ctx, res);
}
LLVMValueRef params[8];
char intrinsic_name[64];
const nir_variable *var = instr->variables[0]->var;
- LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
- LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
const struct glsl_type *type = glsl_without_array(var->type);
if (ctx->stage == MESA_SHADER_FRAGMENT)
params[2] = LLVMBuildExtractElement(ctx->builder, get_src(ctx, instr->src[0]),
LLVMConstInt(ctx->i32, 0, false), ""); /* vindex */
params[3] = LLVMConstInt(ctx->i32, 0, false); /* voffset */
- params[4] = i1false; /* glc */
- params[5] = i1false; /* slc */
- ac_emit_llvm_intrinsic(&ctx->ac, "llvm.amdgcn.buffer.store.format.v4f32", ctx->voidt,
- params, 6, 0);
+ params[4] = ctx->i1false; /* glc */
+ params[5] = ctx->i1false; /* slc */
+ ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.buffer.store.format.v4f32", ctx->voidt,
+ params, 6, 0);
} else {
bool is_da = glsl_sampler_type_is_array(type) ||
glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE;
- LLVMValueRef da = is_da ? i1true : i1false;
- LLVMValueRef glc = i1false;
- LLVMValueRef slc = i1false;
+ LLVMValueRef da = is_da ? ctx->i1true : ctx->i1false;
+ LLVMValueRef glc = ctx->i1false;
+ LLVMValueRef slc = ctx->i1false;
params[0] = to_float(ctx, get_src(ctx, instr->src[2]));
params[1] = get_image_coords(ctx, instr); /* coords */
params[2] = get_sampler_desc(ctx, instr->variables[0], DESC_IMAGE);
params[3] = LLVMConstInt(ctx->i32, 15, false); /* dmask */
if (HAVE_LLVM <= 0x0309) {
- params[4] = i1false; /* r128 */
+ params[4] = ctx->i1false; /* r128 */
params[5] = da;
params[6] = glc;
params[7] = slc;
} else {
- LLVMValueRef lwe = i1false;
+ LLVMValueRef lwe = ctx->i1false;
params[4] = glc;
params[5] = slc;
params[6] = lwe;
params[7] = da;
}
- get_image_intr_name("llvm.amdgcn.image.store",
- LLVMTypeOf(params[0]), /* vdata */
- LLVMTypeOf(params[1]), /* coords */
- LLVMTypeOf(params[2]), /* rsrc */
- intrinsic_name, sizeof(intrinsic_name));
+ ac_get_image_intr_name("llvm.amdgcn.image.store",
+ LLVMTypeOf(params[0]), /* vdata */
+ LLVMTypeOf(params[1]), /* coords */
+ LLVMTypeOf(params[2]), /* rsrc */
+ intrinsic_name, sizeof(intrinsic_name));
- ac_emit_llvm_intrinsic(&ctx->ac, intrinsic_name, ctx->voidt,
- params, 8, 0);
+ ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->voidt,
+ params, 8, 0);
}
}
LLVMValueRef params[6];
int param_count = 0;
const nir_variable *var = instr->variables[0]->var;
- LLVMValueRef i1false = LLVMConstInt(ctx->i1, 0, 0);
- LLVMValueRef i1true = LLVMConstInt(ctx->i1, 1, 0);
+
const char *base_name = "llvm.amdgcn.image.atomic";
const char *atomic_name;
LLVMValueRef coords;
coords = params[param_count++] = LLVMBuildExtractElement(ctx->builder, get_src(ctx, instr->src[0]),
LLVMConstInt(ctx->i32, 0, false), ""); /* vindex */
params[param_count++] = ctx->i32zero; /* voffset */
- params[param_count++] = i1false; /* glc */
- params[param_count++] = i1false; /* slc */
+ params[param_count++] = ctx->i1false; /* glc */
+ params[param_count++] = ctx->i1false; /* slc */
} else {
bool da = glsl_sampler_type_is_array(type) ||
glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE;
coords = params[param_count++] = get_image_coords(ctx, instr);
params[param_count++] = get_sampler_desc(ctx, instr->variables[0], DESC_IMAGE);
- params[param_count++] = i1false; /* r128 */
- params[param_count++] = da ? i1true : i1false; /* da */
- params[param_count++] = i1false; /* slc */
+ params[param_count++] = ctx->i1false; /* r128 */
+ params[param_count++] = da ? ctx->i1true : ctx->i1false; /* da */
+ params[param_count++] = ctx->i1false; /* slc */
}
switch (instr->intrinsic) {
snprintf(intrinsic_name, sizeof(intrinsic_name),
"%s.%s.%s", base_name, atomic_name, coords_type);
- return ac_emit_llvm_intrinsic(&ctx->ac, intrinsic_name, ctx->i32, params, param_count, 0);
+ return ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->i32, params, param_count, 0);
}
static LLVMValueRef visit_image_size(struct nir_to_llvm_context *ctx,
nir_intrinsic_instr *instr)
{
LLVMValueRef res;
- LLVMValueRef params[10];
const nir_variable *var = instr->variables[0]->var;
const struct glsl_type *type = instr->variables[0]->var->type;
bool da = glsl_sampler_type_is_array(var->type) ||
if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_BUF)
return get_buffer_size(ctx, get_sampler_desc(ctx, instr->variables[0], DESC_BUFFER), true);
- params[0] = ctx->i32zero;
- params[1] = get_sampler_desc(ctx, instr->variables[0], DESC_IMAGE);
- params[2] = LLVMConstInt(ctx->i32, 15, false);
- params[3] = ctx->i32zero;
- params[4] = ctx->i32zero;
- params[5] = da ? ctx->i32one : ctx->i32zero;
- params[6] = ctx->i32zero;
- params[7] = ctx->i32zero;
- params[8] = ctx->i32zero;
- params[9] = ctx->i32zero;
-
- res = ac_emit_llvm_intrinsic(&ctx->ac, "llvm.SI.getresinfo.i32", ctx->v4i32,
- params, 10, AC_FUNC_ATTR_READNONE);
+
+ struct ac_image_args args = { 0 };
+
+ args.da = da;
+ args.dmask = 0xf;
+ args.resource = get_sampler_desc(ctx, instr->variables[0], DESC_IMAGE);
+ args.opcode = ac_image_get_resinfo;
+ args.addr = ctx->i32zero;
+
+ res = ac_build_image_opcode(&ctx->ac, &args);
if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_CUBE &&
glsl_sampler_type_is_array(type)) {
return res;
}
-static void emit_waitcnt(struct nir_to_llvm_context *ctx)
+#define NOOP_WAITCNT 0xf7f
+#define LGKM_CNT 0x07f
+#define VM_CNT 0xf70
+
+static void emit_waitcnt(struct nir_to_llvm_context *ctx,
+ unsigned simm16)
{
LLVMValueRef args[1] = {
- LLVMConstInt(ctx->i32, 0xf70, false),
+ LLVMConstInt(ctx->i32, simm16, false),
};
- ac_emit_llvm_intrinsic(&ctx->ac, "llvm.amdgcn.s.waitcnt",
- ctx->voidt, args, 1, 0);
+ ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.s.waitcnt",
+ ctx->voidt, args, 1, 0);
}
static void emit_barrier(struct nir_to_llvm_context *ctx)
{
- // TODO tess
- ac_emit_llvm_intrinsic(&ctx->ac, "llvm.amdgcn.s.barrier",
- ctx->voidt, NULL, 0, 0);
+ /* SI only (thanks to a hw bug workaround):
+ * The real barrier instruction isn’t needed, because an entire patch
+ * always fits into a single wave.
+ */
+ if (ctx->options->chip_class == SI &&
+ ctx->stage == MESA_SHADER_TESS_CTRL) {
+ emit_waitcnt(ctx, LGKM_CNT & VM_CNT);
+ return;
+ }
+ ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.s.barrier",
+ ctx->voidt, NULL, 0, AC_FUNC_ATTR_CONVERGENT);
}
static void emit_discard_if(struct nir_to_llvm_context *ctx,
cond = LLVMBuildSelect(ctx->builder, cond,
LLVMConstReal(ctx->f32, -1.0f),
ctx->f32zero, "");
- ac_emit_llvm_intrinsic(&ctx->ac, "llvm.AMDGPU.kill",
- ctx->voidt,
- &cond, 1, 0);
+ ac_build_kill(&ctx->ac, cond);
}
static LLVMValueRef
static LLVMValueRef load_sample_position(struct nir_to_llvm_context *ctx,
LLVMValueRef sample_id)
{
- /* offset = sample_id * 8 (8 = 2 floats containing samplepos.xy) */
- LLVMValueRef offset0 = LLVMBuildMul(ctx->builder, sample_id, LLVMConstInt(ctx->i32, 8, false), "");
- LLVMValueRef offset1 = LLVMBuildAdd(ctx->builder, offset0, LLVMConstInt(ctx->i32, 4, false), "");
- LLVMValueRef result[2];
+ LLVMValueRef result;
+ LLVMValueRef ptr = ac_build_gep0(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_PS_SAMPLE_POSITIONS, false));
- result[0] = ac_build_indexed_load_const(&ctx->ac, ctx->sample_positions, offset0);
- result[1] = ac_build_indexed_load_const(&ctx->ac, ctx->sample_positions, offset1);
+ ptr = LLVMBuildBitCast(ctx->builder, ptr,
+ const_array(ctx->v2f32, 64), "");
- return ac_build_gather_values(&ctx->ac, result, 2);
+ sample_id = LLVMBuildAdd(ctx->builder, sample_id, ctx->sample_pos_offset, "");
+ result = ac_build_indexed_load(&ctx->ac, ptr, sample_id, false);
+
+ return result;
}
static LLVMValueRef load_sample_pos(struct nir_to_llvm_context *ctx)
location = INTERP_CENTROID;
break;
case nir_intrinsic_interp_var_at_sample:
- location = INTERP_SAMPLE;
- src0 = get_src(ctx, instr->src[0]);
- break;
case nir_intrinsic_interp_var_at_offset:
location = INTERP_CENTER;
src0 = get_src(ctx, instr->src[0]);
+ break;
default:
break;
}
LLVMValueRef gs_next_vertex;
LLVMValueRef can_emit, kill;
int idx;
- int clip_cull_slot = -1;
+
assert(instr->const_index[0] == 0);
/* Write vertex attribute values to GSVS ring */
gs_next_vertex = LLVMBuildLoad(ctx->builder,
kill = LLVMBuildSelect(ctx->builder, can_emit,
LLVMConstReal(ctx->f32, 1.0f),
LLVMConstReal(ctx->f32, -1.0f), "");
- ac_emit_llvm_intrinsic(&ctx->ac, "llvm.AMDGPU.kill",
- ctx->voidt, &kill, 1, 0);
+ ac_build_kill(&ctx->ac, kill);
/* loop num outputs */
idx = 0;
for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
LLVMValueRef *out_ptr = &ctx->outputs[i * 4];
int length = 4;
- int start = 0;
int slot = idx;
int slot_inc = 1;
if (!(ctx->output_mask & (1ull << i)))
continue;
- if (i == VARYING_SLOT_CLIP_DIST1 ||
- i == VARYING_SLOT_CULL_DIST1)
- continue;
-
- if (i == VARYING_SLOT_CLIP_DIST0 ||
- i == VARYING_SLOT_CULL_DIST0) {
+ if (i == VARYING_SLOT_CLIP_DIST0) {
/* pack clip and cull into a single set of slots */
- if (clip_cull_slot == -1) {
- clip_cull_slot = idx;
- if (ctx->num_output_clips + ctx->num_output_culls > 4)
- slot_inc = 2;
- } else {
- slot = clip_cull_slot;
- slot_inc = 0;
- }
- if (i == VARYING_SLOT_CLIP_DIST0)
- length = ctx->num_output_clips;
- if (i == VARYING_SLOT_CULL_DIST0) {
- start = ctx->num_output_clips;
- length = ctx->num_output_culls;
- }
+ length = ctx->num_output_clips + ctx->num_output_culls;
+ if (length > 4)
+ slot_inc = 2;
}
for (unsigned j = 0; j < length; j++) {
LLVMValueRef out_val = LLVMBuildLoad(ctx->builder,
out_ptr[j], "");
- LLVMValueRef voffset = LLVMConstInt(ctx->i32, (slot * 4 + j + start) * ctx->gs_max_out_vertices, false);
+ LLVMValueRef voffset = LLVMConstInt(ctx->i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false);
voffset = LLVMBuildAdd(ctx->builder, voffset, gs_next_vertex, "");
voffset = LLVMBuildMul(ctx->builder, voffset, LLVMConstInt(ctx->i32, 4, false), "");
out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->i32, "");
- ac_build_tbuffer_store(&ctx->ac, ctx->gsvs_ring,
- out_val, 1,
- voffset, ctx->gs2vs_offset, 0,
- V_008F0C_BUF_DATA_FORMAT_32,
- V_008F0C_BUF_NUM_FORMAT_UINT,
- 1, 0, 1, 1, 0);
+ ac_build_buffer_store_dword(&ctx->ac, ctx->gsvs_ring,
+ out_val, 1,
+ voffset, ctx->gs2vs_offset, 0,
+ 1, 1, true, true);
}
idx += slot_inc;
}
ctx->i32one, "");
LLVMBuildStore(ctx->builder, gs_next_vertex, ctx->gs_next_vertex);
- ac_emit_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
+ ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
}
static void
visit_end_primitive(struct nir_to_llvm_context *ctx,
nir_intrinsic_instr *instr)
{
- ac_emit_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
+ ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
+}
+
+static LLVMValueRef
+visit_load_tess_coord(struct nir_to_llvm_context *ctx,
+ nir_intrinsic_instr *instr)
+{
+ LLVMValueRef coord[4] = {
+ ctx->tes_u,
+ ctx->tes_v,
+ ctx->f32zero,
+ ctx->f32zero,
+ };
+
+ if (ctx->tes_primitive_mode == GL_TRIANGLES)
+ coord[2] = LLVMBuildFSub(ctx->builder, ctx->f32one,
+ LLVMBuildFAdd(ctx->builder, coord[0], coord[1], ""), "");
+
+ LLVMValueRef result = ac_build_gather_values(&ctx->ac, coord, instr->num_components);
+ return LLVMBuildBitCast(ctx->builder, result,
+ get_def_type(ctx, &instr->dest.ssa), "");
}
static void visit_intrinsic(struct nir_to_llvm_context *ctx,
result = ctx->draw_index;
break;
case nir_intrinsic_load_invocation_id:
- result = ctx->gs_invocation_id;
+ if (ctx->stage == MESA_SHADER_TESS_CTRL)
+ result = unpack_param(ctx, ctx->tcs_rel_ids, 8, 5);
+ else
+ result = ctx->gs_invocation_id;
break;
case nir_intrinsic_load_primitive_id:
if (ctx->stage == MESA_SHADER_GEOMETRY)
result = ctx->gs_prim_id;
+ else if (ctx->stage == MESA_SHADER_TESS_CTRL)
+ result = ctx->tcs_patch_id;
+ else if (ctx->stage == MESA_SHADER_TESS_EVAL)
+ result = ctx->tes_patch_id;
else
fprintf(stderr, "Unknown primitive id intrinsic: %d", ctx->stage);
break;
break;
case nir_intrinsic_discard:
ctx->shader_info->fs.can_discard = true;
- ac_emit_llvm_intrinsic(&ctx->ac, "llvm.AMDGPU.kilp",
- ctx->voidt,
- NULL, 0, 0);
+ ac_build_intrinsic(&ctx->ac, "llvm.AMDGPU.kilp",
+ ctx->voidt,
+ NULL, 0, AC_FUNC_ATTR_LEGACY);
break;
case nir_intrinsic_discard_if:
emit_discard_if(ctx, instr);
break;
case nir_intrinsic_memory_barrier:
- emit_waitcnt(ctx);
+ emit_waitcnt(ctx, VM_CNT);
break;
case nir_intrinsic_barrier:
emit_barrier(ctx);
case nir_intrinsic_end_primitive:
visit_end_primitive(ctx, instr);
break;
+ case nir_intrinsic_load_tess_coord:
+ result = visit_load_tess_coord(ctx, instr);
+ break;
+ case nir_intrinsic_load_patch_vertices_in:
+ result = LLVMConstInt(ctx->i32, ctx->options->key.tcs.input_vertices, false);
+ break;
default:
fprintf(stderr, "Unknown intrinsic: ");
nir_print_instr(&instr->instr, stderr);
LLVMBuilderRef builder = ctx->builder;
LLVMTypeRef type;
LLVMValueRef index = NULL;
+ unsigned constant_index = 0;
assert(deref->var->data.binding < layout->binding_count);
if (child->deref_array_type == nir_deref_array_type_indirect) {
index = get_src(ctx, child->indirect);
}
+
+ constant_index = child->base_offset;
+ }
+ if (desc_type == DESC_SAMPLER && binding->immutable_samplers_offset &&
+ (!index || binding->immutable_samplers_equal)) {
+ if (binding->immutable_samplers_equal)
+ constant_index = 0;
+
+ const uint32_t *samplers = radv_immutable_samplers(layout, binding);
+
+ LLVMValueRef constants[] = {
+ LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 0], 0),
+ LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 1], 0),
+ LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 2], 0),
+ LLVMConstInt(ctx->i32, samplers[constant_index * 4 + 3], 0),
+ };
+ return ac_build_gather_values(&ctx->ac, constants, 4);
}
assert(stride % type_size == 0);
}
static void set_tex_fetch_args(struct nir_to_llvm_context *ctx,
- struct ac_tex_info *tinfo,
+ struct ac_image_args *args,
nir_tex_instr *instr,
nir_texop op,
LLVMValueRef res_ptr, LLVMValueRef samp_ptr,
LLVMValueRef *param, unsigned count,
unsigned dmask)
{
- int num_args;
unsigned is_rect = 0;
bool da = instr->is_array || instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE;
param[count++] = LLVMGetUndef(ctx->i32);
if (count > 1)
- tinfo->args[0] = ac_build_gather_values(&ctx->ac, param, count);
+ args->addr = ac_build_gather_values(&ctx->ac, param, count);
else
- tinfo->args[0] = param[0];
+ args->addr = param[0];
- tinfo->args[1] = res_ptr;
- num_args = 2;
-
- if (op == nir_texop_txf ||
- op == nir_texop_txf_ms ||
- op == nir_texop_query_levels ||
- op == nir_texop_texture_samples ||
- op == nir_texop_txs)
- tinfo->dst_type = ctx->v4i32;
- else {
- tinfo->dst_type = ctx->v4f32;
- tinfo->args[num_args++] = samp_ptr;
- }
+ args->resource = res_ptr;
+ args->sampler = samp_ptr;
if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF && op == nir_texop_txf) {
- tinfo->args[0] = res_ptr;
- tinfo->args[1] = LLVMConstInt(ctx->i32, 0, false);
- tinfo->args[2] = param[0];
- tinfo->arg_count = 3;
+ args->addr = param[0];
return;
}
- tinfo->args[num_args++] = LLVMConstInt(ctx->i32, dmask, 0);
- tinfo->args[num_args++] = LLVMConstInt(ctx->i32, is_rect, 0); /* unorm */
- tinfo->args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* r128 */
- tinfo->args[num_args++] = LLVMConstInt(ctx->i32, da ? 1 : 0, 0);
- tinfo->args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* glc */
- tinfo->args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* slc */
- tinfo->args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* tfe */
- tinfo->args[num_args++] = LLVMConstInt(ctx->i32, 0, 0); /* lwe */
-
- tinfo->arg_count = num_args;
+ args->dmask = dmask;
+ args->unorm = is_rect;
+ args->da = da;
}
/* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
LLVMValueRef coord)
{
coord = to_float(ctx, coord);
- coord = ac_emit_llvm_intrinsic(&ctx->ac, "llvm.rint.f32", ctx->f32, &coord, 1, 0);
+ coord = ac_build_intrinsic(&ctx->ac, "llvm.rint.f32", ctx->f32, &coord, 1, 0);
coord = to_integer(ctx, coord);
return coord;
}
static void visit_tex(struct nir_to_llvm_context *ctx, nir_tex_instr *instr)
{
LLVMValueRef result = NULL;
- struct ac_tex_info tinfo = { 0 };
+ struct ac_image_args args = { 0 };
unsigned dmask = 0xf;
LLVMValueRef address[16];
LLVMValueRef coords[5];
LLVMValueRef derivs[6];
unsigned chan, count = 0;
unsigned const_src = 0, num_deriv_comp = 0;
-
+ bool lod_is_zero = false;
tex_fetch_ptrs(ctx, instr, &res_ptr, &samp_ptr, &fmask_ptr);
for (unsigned i = 0; i < instr->num_srcs; i++) {
case nir_tex_src_bias:
bias = get_src(ctx, instr->src[i].src);
break;
- case nir_tex_src_lod:
+ case nir_tex_src_lod: {
+ nir_const_value *val = nir_src_as_const_value(instr->src[i].src);
+
+ if (val && val->i32[0] == 0)
+ lod_is_zero = true;
lod = get_src(ctx, instr->src[i].src);
break;
+ }
case nir_tex_src_ms_index:
sample_index = get_src(ctx, instr->src[i].src);
break;
}
if (instr->op == nir_texop_txs && instr->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
- result = get_buffer_size(ctx, res_ptr, false);
+ result = get_buffer_size(ctx, res_ptr, true);
goto write_result;
}
for (chan = 0; chan < 3; ++chan)
offset[chan] = ctx->i32zero;
- tinfo.has_offset = true;
+ args.offset = true;
for (chan = 0; chan < get_llvm_num_components(offsets); chan++) {
offset[chan] = llvm_extract_elem(ctx, offsets, chan);
offset[chan] = LLVMBuildAnd(ctx->builder, offset[chan],
}
for (unsigned i = 0; i < num_deriv_comp; i++) {
- derivs[i * 2] = to_float(ctx, llvm_extract_elem(ctx, ddx, i));
- derivs[i * 2 + 1] = to_float(ctx, llvm_extract_elem(ctx, ddy, i));
+ derivs[i] = to_float(ctx, llvm_extract_elem(ctx, ddx, i));
+ derivs[num_deriv_comp + i] = to_float(ctx, llvm_extract_elem(ctx, ddy, i));
}
}
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE && coord) {
+ if (instr->is_array && instr->op != nir_texop_lod)
+ coords[3] = apply_round_slice(ctx, coords[3]);
for (chan = 0; chan < instr->coord_components; chan++)
coords[chan] = to_float(ctx, coords[chan]);
if (instr->coord_components == 3)
}
if (instr->coord_components > 2) {
/* This seems like a bit of a hack - but it passes Vulkan CTS with it */
- if (instr->sampler_dim != GLSL_SAMPLER_DIM_3D && instr->op != nir_texop_txf) {
+ if (instr->sampler_dim != GLSL_SAMPLER_DIM_3D &&
+ instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE &&
+ instr->op != nir_texop_txf) {
coords[2] = apply_round_slice(ctx, coords[2]);
}
address[count++] = coords[2];
}
/* Pack LOD */
- if ((instr->op == nir_texop_txl || instr->op == nir_texop_txf) && lod) {
+ if (lod && ((instr->op == nir_texop_txl && !lod_is_zero) ||
+ instr->op == nir_texop_txf)) {
address[count++] = lod;
} else if (instr->op == nir_texop_txf_ms && sample_index) {
address[count++] = sample_index;
if (instr->op == nir_texop_samples_identical) {
LLVMValueRef txf_address[4];
- struct ac_tex_info txf_info = { 0 };
+ struct ac_image_args txf_args = { 0 };
unsigned txf_count = count;
memcpy(txf_address, address, sizeof(txf_address));
txf_address[2] = ctx->i32zero;
txf_address[3] = ctx->i32zero;
- set_tex_fetch_args(ctx, &txf_info, instr, nir_texop_txf,
+ set_tex_fetch_args(ctx, &txf_args, instr, nir_texop_txf,
fmask_ptr, NULL,
txf_address, txf_count, 0xf);
- result = build_tex_intrinsic(ctx, instr, &txf_info);
+ result = build_tex_intrinsic(ctx, instr, false, &txf_args);
result = LLVMBuildExtractElement(ctx->builder, result, ctx->i32zero, "");
result = emit_int_cmp(ctx, LLVMIntEQ, result, ctx->i32zero);
goto write_result;
}
- /* Adjust the sample index according to FMASK.
- *
- * For uncompressed MSAA surfaces, FMASK should return 0x76543210,
- * which is the identity mapping. Each nibble says which physical sample
- * should be fetched to get that sample.
- *
- * For example, 0x11111100 means there are only 2 samples stored and
- * the second sample covers 3/4 of the pixel. When reading samples 0
- * and 1, return physical sample 0 (determined by the first two 0s
- * in FMASK), otherwise return physical sample 1.
- *
- * The sample index should be adjusted as follows:
- * sample_index = (fmask >> (sample_index * 4)) & 0xF;
- */
if (instr->sampler_dim == GLSL_SAMPLER_DIM_MS &&
instr->op != nir_texop_txs) {
- LLVMValueRef txf_address[4];
- struct ac_tex_info txf_info = { 0 };
- unsigned txf_count = count;
- memcpy(txf_address, address, sizeof(txf_address));
-
- if (!instr->is_array)
- txf_address[2] = ctx->i32zero;
- txf_address[3] = ctx->i32zero;
-
- set_tex_fetch_args(ctx, &txf_info, instr, nir_texop_txf,
- fmask_ptr, NULL,
- txf_address, txf_count, 0xf);
-
- result = build_tex_intrinsic(ctx, instr, &txf_info);
- LLVMValueRef four = LLVMConstInt(ctx->i32, 4, false);
- LLVMValueRef F = LLVMConstInt(ctx->i32, 0xf, false);
-
- LLVMValueRef fmask = LLVMBuildExtractElement(ctx->builder,
- result,
- ctx->i32zero, "");
-
unsigned sample_chan = instr->is_array ? 3 : 2;
-
- LLVMValueRef sample_index4 =
- LLVMBuildMul(ctx->builder, address[sample_chan], four, "");
- LLVMValueRef shifted_fmask =
- LLVMBuildLShr(ctx->builder, fmask, sample_index4, "");
- LLVMValueRef final_sample =
- LLVMBuildAnd(ctx->builder, shifted_fmask, F, "");
-
- /* Don't rewrite the sample index if WORD1.DATA_FORMAT of the FMASK
- * resource descriptor is 0 (invalid),
- */
- LLVMValueRef fmask_desc =
- LLVMBuildBitCast(ctx->builder, fmask_ptr,
- ctx->v8i32, "");
-
- LLVMValueRef fmask_word1 =
- LLVMBuildExtractElement(ctx->builder, fmask_desc,
- ctx->i32one, "");
-
- LLVMValueRef word1_is_nonzero =
- LLVMBuildICmp(ctx->builder, LLVMIntNE,
- fmask_word1, ctx->i32zero, "");
-
- /* Replace the MSAA sample index. */
- address[sample_chan] =
- LLVMBuildSelect(ctx->builder, word1_is_nonzero,
- final_sample, address[sample_chan], "");
+ address[sample_chan] = adjust_sample_index_using_fmask(ctx,
+ address[0],
+ address[1],
+ instr->is_array ? address[2] : NULL,
+ address[sample_chan],
+ fmask_ptr);
}
if (offsets && instr->op == nir_texop_txf) {
else
dmask = 1 << instr->component;
}
- set_tex_fetch_args(ctx, &tinfo, instr, instr->op,
+ set_tex_fetch_args(ctx, &args, instr, instr->op,
res_ptr, samp_ptr, address, count, dmask);
- result = build_tex_intrinsic(ctx, instr, &tinfo);
+ result = build_tex_intrinsic(ctx, instr, lod_is_zero, &args);
if (instr->op == nir_texop_query_levels)
result = LLVMBuildExtractElement(ctx->builder, result, LLVMConstInt(ctx->i32, 3, false), "");
LLVMValueRef t_list_ptr = ctx->vertex_buffers;
LLVMValueRef t_offset;
LLVMValueRef t_list;
- LLVMValueRef args[3];
LLVMValueRef input;
LLVMValueRef buffer_index;
int index = variable->data.location - VERT_ATTRIB_GENERIC0;
t_offset = LLVMConstInt(ctx->i32, index + i, false);
t_list = ac_build_indexed_load_const(&ctx->ac, t_list_ptr, t_offset);
- args[0] = t_list;
- args[1] = LLVMConstInt(ctx->i32, 0, false);
- args[2] = buffer_index;
- input = ac_emit_llvm_intrinsic(&ctx->ac,
- "llvm.SI.vs.load.input", ctx->v4f32, args, 3,
- AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_NOUNWIND);
+
+ input = ac_build_buffer_load_format(&ctx->ac, t_list,
+ buffer_index,
+ LLVMConstInt(ctx->i32, 0, false),
+ true);
for (unsigned chan = 0; chan < 4; chan++) {
LLVMValueRef llvm_chan = LLVMConstInt(ctx->i32, chan, false);
}
}
-static void
-handle_gs_input_decl(struct nir_to_llvm_context *ctx,
- struct nir_variable *variable)
-{
- int idx = variable->data.location;
-
- if (idx == VARYING_SLOT_CLIP_DIST0 ||
- idx == VARYING_SLOT_CULL_DIST0) {
- int length = glsl_get_length(glsl_get_array_element(variable->type));
- if (idx == VARYING_SLOT_CLIP_DIST0)
- ctx->num_input_clips = length;
- else
- ctx->num_input_culls = length;
- }
-}
-
static void interp_fs_input(struct nir_to_llvm_context *ctx,
unsigned attr,
LLVMValueRef interp_param,
case MESA_SHADER_FRAGMENT:
handle_fs_input_decl(ctx, variable);
break;
- case MESA_SHADER_GEOMETRY:
- handle_gs_input_decl(ctx, variable);
- break;
default:
break;
}
for(int i = 0; i < 3; ++i)
inputs[i] = ctx->frag_pos[i];
- inputs[3] = ac_emit_fdiv(&ctx->ac, ctx->f32one, ctx->frag_pos[3]);
+ inputs[3] = ac_build_fdiv(&ctx->ac, ctx->f32one, ctx->frag_pos[3]);
}
}
ctx->shader_info->fs.num_interp = index;
{
int idx = variable->data.location + variable->data.index;
unsigned attrib_count = glsl_count_attribute_slots(variable->type, false);
-
+ uint64_t mask_attribs;
variable->data.driver_location = idx * 4;
+ /* tess ctrl has it's own load/store paths for outputs */
+ if (ctx->stage == MESA_SHADER_TESS_CTRL)
+ return;
+
+ mask_attribs = ((1ull << attrib_count) - 1) << idx;
if (ctx->stage == MESA_SHADER_VERTEX ||
+ ctx->stage == MESA_SHADER_TESS_EVAL ||
ctx->stage == MESA_SHADER_GEOMETRY) {
- if (idx == VARYING_SLOT_CLIP_DIST0 ||
- idx == VARYING_SLOT_CULL_DIST0) {
- int length = glsl_get_length(variable->type);
- if (idx == VARYING_SLOT_CLIP_DIST0) {
- if (ctx->stage == MESA_SHADER_VERTEX)
- ctx->shader_info->vs.clip_dist_mask = (1 << length) - 1;
- ctx->num_output_clips = length;
- } else if (idx == VARYING_SLOT_CULL_DIST0) {
- if (ctx->stage == MESA_SHADER_VERTEX)
- ctx->shader_info->vs.cull_dist_mask = (1 << length) - 1;
- ctx->num_output_culls = length;
+ if (idx == VARYING_SLOT_CLIP_DIST0) {
+ int length = ctx->num_output_clips + ctx->num_output_culls;
+ if (ctx->stage == MESA_SHADER_VERTEX) {
+ ctx->shader_info->vs.outinfo.clip_dist_mask = (1 << ctx->num_output_clips) - 1;
+ ctx->shader_info->vs.outinfo.cull_dist_mask = (1 << ctx->num_output_culls) - 1;
}
+ if (ctx->stage == MESA_SHADER_TESS_EVAL) {
+ ctx->shader_info->tes.outinfo.clip_dist_mask = (1 << ctx->num_output_clips) - 1;
+ ctx->shader_info->tes.outinfo.cull_dist_mask = (1 << ctx->num_output_culls) - 1;
+ }
+
if (length > 4)
attrib_count = 2;
else
attrib_count = 1;
+ mask_attribs = 1ull << idx;
}
}
si_build_alloca_undef(ctx, ctx->f32, "");
}
}
- ctx->output_mask |= ((1ull << attrib_count) - 1) << idx;
+ ctx->output_mask |= mask_attribs;
}
static void
si_llvm_init_export_args(struct nir_to_llvm_context *ctx,
LLVMValueRef *values,
unsigned target,
- LLVMValueRef *args)
+ struct ac_export_args *args)
{
/* Default is 0xf. Adjusted below depending on the format. */
- args[0] = LLVMConstInt(ctx->i32, target != V_008DFC_SQ_EXP_NULL ? 0xf : 0, false);
+ args->enabled_channels = 0xf;
+
/* Specify whether the EXEC mask represents the valid mask */
- args[1] = LLVMConstInt(ctx->i32, 0, false);
+ args->valid_mask = 0;
/* Specify whether this is the last export */
- args[2] = LLVMConstInt(ctx->i32, 0, false);
+ args->done = 0;
+
/* Specify the target we are exporting */
- args[3] = LLVMConstInt(ctx->i32, target, false);
+ args->target = target;
- args[4] = LLVMConstInt(ctx->i32, 0, false); /* COMPR flag */
- args[5] = LLVMGetUndef(ctx->f32);
- args[6] = LLVMGetUndef(ctx->f32);
- args[7] = LLVMGetUndef(ctx->f32);
- args[8] = LLVMGetUndef(ctx->f32);
+ args->compr = false;
+ args->out[0] = LLVMGetUndef(ctx->f32);
+ args->out[1] = LLVMGetUndef(ctx->f32);
+ args->out[2] = LLVMGetUndef(ctx->f32);
+ args->out[3] = LLVMGetUndef(ctx->f32);
if (!values)
return;
switch(col_format) {
case V_028714_SPI_SHADER_ZERO:
- args[0] = LLVMConstInt(ctx->i32, 0x0, 0);
- args[3] = LLVMConstInt(ctx->i32, V_008DFC_SQ_EXP_NULL, 0);
+ args->enabled_channels = 0; /* writemask */
+ args->target = V_008DFC_SQ_EXP_NULL;
break;
case V_028714_SPI_SHADER_32_R:
- args[0] = LLVMConstInt(ctx->i32, 0x1, 0);
- args[5] = values[0];
+ args->enabled_channels = 1;
+ args->out[0] = values[0];
break;
case V_028714_SPI_SHADER_32_GR:
- args[0] = LLVMConstInt(ctx->i32, 0x3, 0);
- args[5] = values[0];
- args[6] = values[1];
+ args->enabled_channels = 0x3;
+ args->out[0] = values[0];
+ args->out[1] = values[1];
break;
case V_028714_SPI_SHADER_32_AR:
- args[0] = LLVMConstInt(ctx->i32, 0x9, 0);
- args[5] = values[0];
- args[8] = values[3];
+ args->enabled_channels = 0x9;
+ args->out[0] = values[0];
+ args->out[3] = values[3];
break;
case V_028714_SPI_SHADER_FP16_ABGR:
- args[4] = ctx->i32one;
+ args->compr = 1;
for (unsigned chan = 0; chan < 2; chan++) {
LLVMValueRef pack_args[2] = {
};
LLVMValueRef packed;
- packed = ac_emit_llvm_intrinsic(&ctx->ac, "llvm.SI.packf16",
- ctx->i32, pack_args, 2,
- AC_FUNC_ATTR_READNONE);
- args[chan + 5] = packed;
+ packed = ac_build_cvt_pkrtz_f16(&ctx->ac, pack_args);
+ args->out[chan] = packed;
}
break;
case V_028714_SPI_SHADER_UNORM16_ABGR:
for (unsigned chan = 0; chan < 4; chan++) {
- val[chan] = emit_float_saturate(ctx, values[chan], 0, 1);
+ val[chan] = ac_build_clamp(&ctx->ac, values[chan]);
val[chan] = LLVMBuildFMul(ctx->builder, val[chan],
LLVMConstReal(ctx->f32, 65535), "");
val[chan] = LLVMBuildFAdd(ctx->builder, val[chan],
ctx->i32, "");
}
- args[4] = ctx->i32one;
- args[5] = emit_pack_int16(ctx, val[0], val[1]);
- args[6] = emit_pack_int16(ctx, val[2], val[3]);
+ args->compr = 1;
+ args->out[0] = emit_pack_int16(ctx, val[0], val[1]);
+ args->out[1] = emit_pack_int16(ctx, val[2], val[3]);
break;
case V_028714_SPI_SHADER_SNORM16_ABGR:
val[chan] = LLVMBuildFPToSI(ctx->builder, val[chan], ctx->i32, "");
}
- args[4] = ctx->i32one;
- args[5] = emit_pack_int16(ctx, val[0], val[1]);
- args[6] = emit_pack_int16(ctx, val[2], val[3]);
+ args->compr = 1;
+ args->out[0] = emit_pack_int16(ctx, val[0], val[1]);
+ args->out[1] = emit_pack_int16(ctx, val[2], val[3]);
break;
case V_028714_SPI_SHADER_UINT16_ABGR: {
val[chan] = emit_minmax_int(ctx, LLVMIntULT, val[chan], max);
}
- args[4] = ctx->i32one;
- args[5] = emit_pack_int16(ctx, val[0], val[1]);
- args[6] = emit_pack_int16(ctx, val[2], val[3]);
+ args->compr = 1;
+ args->out[0] = emit_pack_int16(ctx, val[0], val[1]);
+ args->out[1] = emit_pack_int16(ctx, val[2], val[3]);
break;
}
val[chan] = emit_minmax_int(ctx, LLVMIntSGT, val[chan], min);
}
- args[4] = ctx->i32one;
- args[5] = emit_pack_int16(ctx, val[0], val[1]);
- args[6] = emit_pack_int16(ctx, val[2], val[3]);
+ args->compr = 1;
+ args->out[0] = emit_pack_int16(ctx, val[0], val[1]);
+ args->out[1] = emit_pack_int16(ctx, val[2], val[3]);
break;
}
default:
case V_028714_SPI_SHADER_32_ABGR:
- memcpy(&args[5], values, sizeof(values[0]) * 4);
+ memcpy(&args->out[0], values, sizeof(values[0]) * 4);
break;
}
} else
- memcpy(&args[5], values, sizeof(values[0]) * 4);
+ memcpy(&args->out[0], values, sizeof(values[0]) * 4);
- for (unsigned i = 5; i < 9; ++i)
- args[i] = to_float(ctx, args[i]);
+ for (unsigned i = 0; i < 4; ++i)
+ args->out[i] = to_float(ctx, args->out[i]);
}
static void
-handle_vs_outputs_post(struct nir_to_llvm_context *ctx)
+handle_vs_outputs_post(struct nir_to_llvm_context *ctx,
+ struct ac_vs_output_info *outinfo)
{
uint32_t param_count = 0;
unsigned target;
unsigned pos_idx, num_pos_exports = 0;
- LLVMValueRef args[9];
- LLVMValueRef pos_args[4][9] = { { 0 } };
+ struct ac_export_args args, pos_args[4] = {};
LLVMValueRef psize_value = NULL, layer_value = NULL, viewport_index_value = NULL;
int i;
- const uint64_t clip_mask = ctx->output_mask & ((1ull << VARYING_SLOT_CLIP_DIST0) |
- (1ull << VARYING_SLOT_CLIP_DIST1) |
- (1ull << VARYING_SLOT_CULL_DIST0) |
- (1ull << VARYING_SLOT_CULL_DIST1));
-
- ctx->shader_info->vs.prim_id_output = 0xffffffff;
- ctx->shader_info->vs.layer_output = 0xffffffff;
- if (clip_mask) {
+
+ memset(outinfo->vs_output_param_offset, AC_EXP_PARAM_UNDEFINED,
+ sizeof(outinfo->vs_output_param_offset));
+
+ if (ctx->output_mask & (1ull << VARYING_SLOT_CLIP_DIST0)) {
LLVMValueRef slots[8];
unsigned j;
- if (ctx->shader_info->vs.cull_dist_mask)
- ctx->shader_info->vs.cull_dist_mask <<= ctx->num_output_clips;
+ if (outinfo->cull_dist_mask)
+ outinfo->cull_dist_mask <<= ctx->num_output_clips;
i = VARYING_SLOT_CLIP_DIST0;
- for (j = 0; j < ctx->num_output_clips; j++)
+ for (j = 0; j < ctx->num_output_clips + ctx->num_output_culls; j++)
slots[j] = to_float(ctx, LLVMBuildLoad(ctx->builder,
ctx->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
- i = VARYING_SLOT_CULL_DIST0;
- for (j = 0; j < ctx->num_output_culls; j++)
- slots[ctx->num_output_clips + j] = to_float(ctx, LLVMBuildLoad(ctx->builder,
- ctx->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
for (i = ctx->num_output_clips + ctx->num_output_culls; i < 8; i++)
slots[i] = LLVMGetUndef(ctx->f32);
if (ctx->num_output_clips + ctx->num_output_culls > 4) {
target = V_008DFC_SQ_EXP_POS + 3;
- si_llvm_init_export_args(ctx, &slots[4], target, args);
- memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
- args, sizeof(args));
+ si_llvm_init_export_args(ctx, &slots[4], target, &args);
+ memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
+ &args, sizeof(args));
}
target = V_008DFC_SQ_EXP_POS + 2;
- si_llvm_init_export_args(ctx, &slots[0], target, args);
- memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
- args, sizeof(args));
+ si_llvm_init_export_args(ctx, &slots[0], target, &args);
+ memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
+ &args, sizeof(args));
}
if (i == VARYING_SLOT_POS) {
target = V_008DFC_SQ_EXP_POS;
- } else if (i == VARYING_SLOT_CLIP_DIST0 ||
- i == VARYING_SLOT_CLIP_DIST1 ||
- i == VARYING_SLOT_CULL_DIST0 ||
- i == VARYING_SLOT_CULL_DIST1) {
+ } else if (i == VARYING_SLOT_CLIP_DIST0) {
continue;
} else if (i == VARYING_SLOT_PSIZ) {
- ctx->shader_info->vs.writes_pointsize = true;
+ outinfo->writes_pointsize = true;
psize_value = values[0];
continue;
} else if (i == VARYING_SLOT_LAYER) {
- ctx->shader_info->vs.writes_layer = true;
+ outinfo->writes_layer = true;
layer_value = values[0];
- ctx->shader_info->vs.layer_output = param_count;
target = V_008DFC_SQ_EXP_PARAM + param_count;
+ outinfo->vs_output_param_offset[VARYING_SLOT_LAYER] = param_count;
param_count++;
} else if (i == VARYING_SLOT_VIEWPORT) {
- ctx->shader_info->vs.writes_viewport_index = true;
+ outinfo->writes_viewport_index = true;
viewport_index_value = values[0];
continue;
} else if (i == VARYING_SLOT_PRIMITIVE_ID) {
- ctx->shader_info->vs.prim_id_output = param_count;
target = V_008DFC_SQ_EXP_PARAM + param_count;
+ outinfo->vs_output_param_offset[VARYING_SLOT_PRIMITIVE_ID] = param_count;
param_count++;
} else if (i >= VARYING_SLOT_VAR0) {
- ctx->shader_info->vs.export_mask |= 1u << (i - VARYING_SLOT_VAR0);
+ outinfo->export_mask |= 1u << (i - VARYING_SLOT_VAR0);
target = V_008DFC_SQ_EXP_PARAM + param_count;
+ outinfo->vs_output_param_offset[i] = param_count;
param_count++;
}
- si_llvm_init_export_args(ctx, values, target, args);
+ si_llvm_init_export_args(ctx, values, target, &args);
if (target >= V_008DFC_SQ_EXP_POS &&
target <= (V_008DFC_SQ_EXP_POS + 3)) {
- memcpy(pos_args[target - V_008DFC_SQ_EXP_POS],
- args, sizeof(args));
+ memcpy(&pos_args[target - V_008DFC_SQ_EXP_POS],
+ &args, sizeof(args));
} else {
- ac_emit_llvm_intrinsic(&ctx->ac,
- "llvm.SI.export",
- ctx->voidt,
- args, 9, 0);
+ ac_build_export(&ctx->ac, &args);
}
}
/* We need to add the position output manually if it's missing. */
- if (!pos_args[0][0]) {
- pos_args[0][0] = LLVMConstInt(ctx->i32, 0xf, false);
- pos_args[0][1] = ctx->i32zero; /* EXEC mask */
- pos_args[0][2] = ctx->i32zero; /* last export? */
- pos_args[0][3] = LLVMConstInt(ctx->i32, V_008DFC_SQ_EXP_POS, false);
- pos_args[0][4] = ctx->i32zero; /* COMPR flag */
- pos_args[0][5] = ctx->f32zero; /* X */
- pos_args[0][6] = ctx->f32zero; /* Y */
- pos_args[0][7] = ctx->f32zero; /* Z */
- pos_args[0][8] = ctx->f32one; /* W */
- }
-
- uint32_t mask = ((ctx->shader_info->vs.writes_pointsize == true ? 1 : 0) |
- (ctx->shader_info->vs.writes_layer == true ? 4 : 0) |
- (ctx->shader_info->vs.writes_viewport_index == true ? 8 : 0));
+ if (!pos_args[0].out[0]) {
+ pos_args[0].enabled_channels = 0xf;
+ pos_args[0].valid_mask = 0;
+ pos_args[0].done = 0;
+ pos_args[0].target = V_008DFC_SQ_EXP_POS;
+ pos_args[0].compr = 0;
+ pos_args[0].out[0] = ctx->f32zero; /* X */
+ pos_args[0].out[1] = ctx->f32zero; /* Y */
+ pos_args[0].out[2] = ctx->f32zero; /* Z */
+ pos_args[0].out[3] = ctx->f32one; /* W */
+ }
+
+ uint32_t mask = ((outinfo->writes_pointsize == true ? 1 : 0) |
+ (outinfo->writes_layer == true ? 4 : 0) |
+ (outinfo->writes_viewport_index == true ? 8 : 0));
if (mask) {
- pos_args[1][0] = LLVMConstInt(ctx->i32, mask, false); /* writemask */
- pos_args[1][1] = ctx->i32zero; /* EXEC mask */
- pos_args[1][2] = ctx->i32zero; /* last export? */
- pos_args[1][3] = LLVMConstInt(ctx->i32, V_008DFC_SQ_EXP_POS + 1, false);
- pos_args[1][4] = ctx->i32zero; /* COMPR flag */
- pos_args[1][5] = ctx->f32zero; /* X */
- pos_args[1][6] = ctx->f32zero; /* Y */
- pos_args[1][7] = ctx->f32zero; /* Z */
- pos_args[1][8] = ctx->f32zero; /* W */
-
- if (ctx->shader_info->vs.writes_pointsize == true)
- pos_args[1][5] = psize_value;
- if (ctx->shader_info->vs.writes_layer == true)
- pos_args[1][7] = layer_value;
- if (ctx->shader_info->vs.writes_viewport_index == true)
- pos_args[1][8] = viewport_index_value;
+ pos_args[1].enabled_channels = mask;
+ pos_args[1].valid_mask = 0;
+ pos_args[1].done = 0;
+ pos_args[1].target = V_008DFC_SQ_EXP_POS + 1;
+ pos_args[1].compr = 0;
+ pos_args[1].out[0] = ctx->f32zero; /* X */
+ pos_args[1].out[1] = ctx->f32zero; /* Y */
+ pos_args[1].out[2] = ctx->f32zero; /* Z */
+ pos_args[1].out[3] = ctx->f32zero; /* W */
+
+ if (outinfo->writes_pointsize == true)
+ pos_args[1].out[0] = psize_value;
+ if (outinfo->writes_layer == true)
+ pos_args[1].out[2] = layer_value;
+ if (outinfo->writes_viewport_index == true)
+ pos_args[1].out[3] = viewport_index_value;
}
for (i = 0; i < 4; i++) {
- if (pos_args[i][0])
+ if (pos_args[i].out[0])
num_pos_exports++;
}
pos_idx = 0;
for (i = 0; i < 4; i++) {
- if (!pos_args[i][0])
+ if (!pos_args[i].out[0])
continue;
/* Specify the target we are exporting */
- pos_args[i][3] = LLVMConstInt(ctx->i32, V_008DFC_SQ_EXP_POS + pos_idx++, false);
+ pos_args[i].target = V_008DFC_SQ_EXP_POS + pos_idx++;
if (pos_idx == num_pos_exports)
- pos_args[i][2] = ctx->i32one;
- ac_emit_llvm_intrinsic(&ctx->ac,
- "llvm.SI.export",
- ctx->voidt,
- pos_args[i], 9, 0);
+ pos_args[i].done = 1;
+ ac_build_export(&ctx->ac, &pos_args[i]);
}
- ctx->shader_info->vs.pos_exports = num_pos_exports;
- ctx->shader_info->vs.param_exports = param_count;
+ outinfo->pos_exports = num_pos_exports;
+ outinfo->param_exports = param_count;
}
static void
-handle_es_outputs_post(struct nir_to_llvm_context *ctx)
+handle_es_outputs_post(struct nir_to_llvm_context *ctx,
+ struct ac_es_output_info *outinfo)
{
int j;
uint64_t max_output_written = 0;
LLVMValueRef *out_ptr = &ctx->outputs[i * 4];
int param_index;
int length = 4;
- int start = 0;
+
if (!(ctx->output_mask & (1ull << i)))
continue;
- if (i == VARYING_SLOT_CLIP_DIST0) {
- length = ctx->num_output_clips;
- } else if (i == VARYING_SLOT_CULL_DIST0) {
- start = ctx->num_output_clips;
- length = ctx->num_output_culls;
- }
+ if (i == VARYING_SLOT_CLIP_DIST0)
+ length = ctx->num_output_clips + ctx->num_output_culls;
+
param_index = shader_io_get_unique_index(i);
- if (param_index > max_output_written)
- max_output_written = param_index;
+ max_output_written = MAX2(param_index + (length > 4), max_output_written);
for (j = 0; j < length; j++) {
LLVMValueRef out_val = LLVMBuildLoad(ctx->builder, out_ptr[j], "");
out_val = LLVMBuildBitCast(ctx->builder, out_val, ctx->i32, "");
- ac_build_tbuffer_store(&ctx->ac,
+ ac_build_buffer_store_dword(&ctx->ac,
ctx->esgs_ring,
out_val, 1,
- LLVMGetUndef(ctx->i32), ctx->es2gs_offset,
- (4 * param_index + j + start) * 4,
- V_008F0C_BUF_DATA_FORMAT_32,
- V_008F0C_BUF_NUM_FORMAT_UINT,
- 0, 0, 1, 1, 0);
+ NULL, ctx->es2gs_offset,
+ (4 * param_index + j) * 4,
+ 1, 1, true, true);
+ }
+ }
+ outinfo->esgs_itemsize = (max_output_written + 1) * 16;
+}
+
+static void
+handle_ls_outputs_post(struct nir_to_llvm_context *ctx)
+{
+ LLVMValueRef vertex_id = ctx->rel_auto_id;
+ LLVMValueRef vertex_dw_stride = unpack_param(ctx, ctx->ls_out_layout, 13, 8);
+ LLVMValueRef base_dw_addr = LLVMBuildMul(ctx->builder, vertex_id,
+ vertex_dw_stride, "");
+
+ for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
+ LLVMValueRef *out_ptr = &ctx->outputs[i * 4];
+ int length = 4;
+
+ if (!(ctx->output_mask & (1ull << i)))
+ continue;
+
+ if (i == VARYING_SLOT_CLIP_DIST0)
+ length = ctx->num_output_clips + ctx->num_output_culls;
+ int param = shader_io_get_unique_index(i);
+ mark_tess_output(ctx, false, param);
+ if (length > 4)
+ mark_tess_output(ctx, false, param + 1);
+ LLVMValueRef dw_addr = LLVMBuildAdd(ctx->builder, base_dw_addr,
+ LLVMConstInt(ctx->i32, param * 4, false),
+ "");
+ for (unsigned j = 0; j < length; j++) {
+ lds_store(ctx, dw_addr,
+ LLVMBuildLoad(ctx->builder, out_ptr[j], ""));
+ dw_addr = LLVMBuildAdd(ctx->builder, dw_addr, ctx->i32one, "");
+ }
+ }
+}
+
+struct ac_build_if_state
+{
+ struct nir_to_llvm_context *ctx;
+ LLVMValueRef condition;
+ LLVMBasicBlockRef entry_block;
+ LLVMBasicBlockRef true_block;
+ LLVMBasicBlockRef false_block;
+ LLVMBasicBlockRef merge_block;
+};
+
+static LLVMBasicBlockRef
+ac_build_insert_new_block(struct nir_to_llvm_context *ctx, const char *name)
+{
+ LLVMBasicBlockRef current_block;
+ LLVMBasicBlockRef next_block;
+ LLVMBasicBlockRef new_block;
+
+ /* get current basic block */
+ current_block = LLVMGetInsertBlock(ctx->builder);
+
+ /* chqeck if there's another block after this one */
+ next_block = LLVMGetNextBasicBlock(current_block);
+ if (next_block) {
+ /* insert the new block before the next block */
+ new_block = LLVMInsertBasicBlockInContext(ctx->context, next_block, name);
+ }
+ else {
+ /* append new block after current block */
+ LLVMValueRef function = LLVMGetBasicBlockParent(current_block);
+ new_block = LLVMAppendBasicBlockInContext(ctx->context, function, name);
+ }
+ return new_block;
+}
+
+static void
+ac_nir_build_if(struct ac_build_if_state *ifthen,
+ struct nir_to_llvm_context *ctx,
+ LLVMValueRef condition)
+{
+ LLVMBasicBlockRef block = LLVMGetInsertBlock(ctx->builder);
+
+ memset(ifthen, 0, sizeof *ifthen);
+ ifthen->ctx = ctx;
+ ifthen->condition = condition;
+ ifthen->entry_block = block;
+
+ /* create endif/merge basic block for the phi functions */
+ ifthen->merge_block = ac_build_insert_new_block(ctx, "endif-block");
+
+ /* create/insert true_block before merge_block */
+ ifthen->true_block =
+ LLVMInsertBasicBlockInContext(ctx->context,
+ ifthen->merge_block,
+ "if-true-block");
+
+ /* successive code goes into the true block */
+ LLVMPositionBuilderAtEnd(ctx->builder, ifthen->true_block);
+}
+
+/**
+ * End a conditional.
+ */
+static void
+ac_nir_build_endif(struct ac_build_if_state *ifthen)
+{
+ LLVMBuilderRef builder = ifthen->ctx->builder;
+
+ /* Insert branch to the merge block from current block */
+ LLVMBuildBr(builder, ifthen->merge_block);
+
+ /*
+ * Now patch in the various branch instructions.
+ */
+
+ /* Insert the conditional branch instruction at the end of entry_block */
+ LLVMPositionBuilderAtEnd(builder, ifthen->entry_block);
+ if (ifthen->false_block) {
+ /* we have an else clause */
+ LLVMBuildCondBr(builder, ifthen->condition,
+ ifthen->true_block, ifthen->false_block);
+ }
+ else {
+ /* no else clause */
+ LLVMBuildCondBr(builder, ifthen->condition,
+ ifthen->true_block, ifthen->merge_block);
+ }
+
+ /* Resume building code at end of the ifthen->merge_block */
+ LLVMPositionBuilderAtEnd(builder, ifthen->merge_block);
+}
+
+static void
+write_tess_factors(struct nir_to_llvm_context *ctx)
+{
+ unsigned stride, outer_comps, inner_comps;
+ struct ac_build_if_state if_ctx, inner_if_ctx;
+ LLVMValueRef invocation_id = unpack_param(ctx, ctx->tcs_rel_ids, 8, 5);
+ LLVMValueRef rel_patch_id = unpack_param(ctx, ctx->tcs_rel_ids, 0, 8);
+ unsigned tess_inner_index, tess_outer_index;
+ LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
+ LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
+ int i;
+ emit_barrier(ctx);
+
+ switch (ctx->options->key.tcs.primitive_mode) {
+ case GL_ISOLINES:
+ stride = 2;
+ outer_comps = 2;
+ inner_comps = 0;
+ break;
+ case GL_TRIANGLES:
+ stride = 4;
+ outer_comps = 3;
+ inner_comps = 1;
+ break;
+ case GL_QUADS:
+ stride = 6;
+ outer_comps = 4;
+ inner_comps = 2;
+ break;
+ default:
+ return;
+ }
+
+ ac_nir_build_if(&if_ctx, ctx,
+ LLVMBuildICmp(ctx->builder, LLVMIntEQ,
+ invocation_id, ctx->i32zero, ""));
+
+ tess_inner_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
+ tess_outer_index = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
+
+ mark_tess_output(ctx, true, tess_inner_index);
+ mark_tess_output(ctx, true, tess_outer_index);
+ lds_base = get_tcs_out_current_patch_data_offset(ctx);
+ lds_inner = LLVMBuildAdd(ctx->builder, lds_base,
+ LLVMConstInt(ctx->i32, tess_inner_index * 4, false), "");
+ lds_outer = LLVMBuildAdd(ctx->builder, lds_base,
+ LLVMConstInt(ctx->i32, tess_outer_index * 4, false), "");
+
+ for (i = 0; i < 4; i++) {
+ inner[i] = LLVMGetUndef(ctx->i32);
+ outer[i] = LLVMGetUndef(ctx->i32);
+ }
+
+ // LINES reverseal
+ if (ctx->options->key.tcs.primitive_mode == GL_ISOLINES) {
+ outer[0] = out[1] = lds_load(ctx, lds_outer);
+ lds_outer = LLVMBuildAdd(ctx->builder, lds_outer,
+ LLVMConstInt(ctx->i32, 1, false), "");
+ outer[1] = out[0] = lds_load(ctx, lds_outer);
+ } else {
+ for (i = 0; i < outer_comps; i++) {
+ outer[i] = out[i] =
+ lds_load(ctx, lds_outer);
+ lds_outer = LLVMBuildAdd(ctx->builder, lds_outer,
+ LLVMConstInt(ctx->i32, 1, false), "");
+ }
+ for (i = 0; i < inner_comps; i++) {
+ inner[i] = out[outer_comps+i] =
+ lds_load(ctx, lds_inner);
+ lds_inner = LLVMBuildAdd(ctx->builder, lds_inner,
+ LLVMConstInt(ctx->i32, 1, false), "");
+ }
+ }
+
+ /* Convert the outputs to vectors for stores. */
+ vec0 = ac_build_gather_values(&ctx->ac, out, MIN2(stride, 4));
+ vec1 = NULL;
+
+ if (stride > 4)
+ vec1 = ac_build_gather_values(&ctx->ac, out + 4, stride - 4);
+
+
+ buffer = ctx->hs_ring_tess_factor;
+ tf_base = ctx->tess_factor_offset;
+ byteoffset = LLVMBuildMul(ctx->builder, rel_patch_id,
+ LLVMConstInt(ctx->i32, 4 * stride, false), "");
+
+ ac_nir_build_if(&inner_if_ctx, ctx,
+ LLVMBuildICmp(ctx->builder, LLVMIntEQ,
+ rel_patch_id, ctx->i32zero, ""));
+
+ /* Store the dynamic HS control word. */
+ ac_build_buffer_store_dword(&ctx->ac, buffer,
+ LLVMConstInt(ctx->i32, 0x80000000, false),
+ 1, ctx->i32zero, tf_base,
+ 0, 1, 0, true, false);
+ ac_nir_build_endif(&inner_if_ctx);
+
+ /* Store the tessellation factors. */
+ ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
+ MIN2(stride, 4), byteoffset, tf_base,
+ 4, 1, 0, true, false);
+ if (vec1)
+ ac_build_buffer_store_dword(&ctx->ac, buffer, vec1,
+ stride - 4, byteoffset, tf_base,
+ 20, 1, 0, true, false);
+
+ //TODO store to offchip for TES to read - only if TES reads them
+ if (1) {
+ LLVMValueRef inner_vec, outer_vec, tf_outer_offset;
+ LLVMValueRef tf_inner_offset;
+ unsigned param_outer, param_inner;
+
+ param_outer = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_OUTER);
+ tf_outer_offset = get_tcs_tes_buffer_address(ctx, NULL,
+ LLVMConstInt(ctx->i32, param_outer, 0));
+
+ outer_vec = ac_build_gather_values(&ctx->ac, outer,
+ util_next_power_of_two(outer_comps));
+
+ ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, outer_vec,
+ outer_comps, tf_outer_offset,
+ ctx->oc_lds, 0, 1, 0, true, false);
+ if (inner_comps) {
+ param_inner = shader_io_get_unique_index(VARYING_SLOT_TESS_LEVEL_INNER);
+ tf_inner_offset = get_tcs_tes_buffer_address(ctx, NULL,
+ LLVMConstInt(ctx->i32, param_inner, 0));
+
+ inner_vec = inner_comps == 1 ? inner[0] :
+ ac_build_gather_values(&ctx->ac, inner, inner_comps);
+ ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, inner_vec,
+ inner_comps, tf_inner_offset,
+ ctx->oc_lds, 0, 1, 0, true, false);
}
}
- ctx->shader_info->vs.esgs_itemsize = (max_output_written + 1) * 16;
+ ac_nir_build_endif(&if_ctx);
}
static void
+handle_tcs_outputs_post(struct nir_to_llvm_context *ctx)
+{
+ write_tess_factors(ctx);
+}
+
+static bool
si_export_mrt_color(struct nir_to_llvm_context *ctx,
- LLVMValueRef *color, unsigned param, bool is_last)
+ LLVMValueRef *color, unsigned param, bool is_last,
+ struct ac_export_args *args)
{
- LLVMValueRef args[9];
/* Export */
si_llvm_init_export_args(ctx, color, param,
args);
if (is_last) {
- args[1] = ctx->i32one; /* whether the EXEC mask is valid */
- args[2] = ctx->i32one; /* DONE bit */
- } else if (args[0] == ctx->i32zero)
- return; /* unnecessary NULL export */
+ args->valid_mask = 1; /* whether the EXEC mask is valid */
+ args->done = 1; /* DONE bit */
+ } else if (!args->enabled_channels)
+ return false; /* unnecessary NULL export */
- ac_emit_llvm_intrinsic(&ctx->ac, "llvm.SI.export",
- ctx->voidt, args, 9, 0);
+ return true;
}
static void
LLVMValueRef depth, LLVMValueRef stencil,
LLVMValueRef samplemask)
{
- LLVMValueRef args[9];
- unsigned mask = 0;
- args[1] = ctx->i32one; /* whether the EXEC mask is valid */
- args[2] = ctx->i32one; /* DONE bit */
- /* Specify the target we are exporting */
- args[3] = LLVMConstInt(ctx->i32, V_008DFC_SQ_EXP_MRTZ, false);
+ struct ac_export_args args;
- args[4] = ctx->i32zero; /* COMP flag */
- args[5] = LLVMGetUndef(ctx->f32); /* R, depth */
- args[6] = LLVMGetUndef(ctx->f32); /* G, stencil test val[0:7], stencil op val[8:15] */
- args[7] = LLVMGetUndef(ctx->f32); /* B, sample mask */
- args[8] = LLVMGetUndef(ctx->f32); /* A, alpha to mask */
+ args.enabled_channels = 0;
+ args.valid_mask = 1;
+ args.done = 1;
+ args.target = V_008DFC_SQ_EXP_MRTZ;
+ args.compr = false;
+
+ args.out[0] = LLVMGetUndef(ctx->f32); /* R, depth */
+ args.out[1] = LLVMGetUndef(ctx->f32); /* G, stencil test val[0:7], stencil op val[8:15] */
+ args.out[2] = LLVMGetUndef(ctx->f32); /* B, sample mask */
+ args.out[3] = LLVMGetUndef(ctx->f32); /* A, alpha to mask */
if (depth) {
- args[5] = depth;
- mask |= 0x1;
+ args.out[0] = depth;
+ args.enabled_channels |= 0x1;
}
if (stencil) {
- args[6] = stencil;
- mask |= 0x2;
+ args.out[1] = stencil;
+ args.enabled_channels |= 0x2;
}
if (samplemask) {
- args[7] = samplemask;
- mask |= 0x04;
+ args.out[2] = samplemask;
+ args.enabled_channels |= 0x4;
}
/* SI (except OLAND) has a bug that it only looks
* at the X writemask component. */
if (ctx->options->chip_class == SI &&
ctx->options->family != CHIP_OLAND)
- mask |= 0x01;
+ args.enabled_channels |= 0x1;
- args[0] = LLVMConstInt(ctx->i32, mask, false);
- ac_emit_llvm_intrinsic(&ctx->ac, "llvm.SI.export",
- ctx->voidt, args, 9, 0);
+ ac_build_export(&ctx->ac, &args);
}
static void
{
unsigned index = 0;
LLVMValueRef depth = NULL, stencil = NULL, samplemask = NULL;
+ struct ac_export_args color_args[8];
for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
LLVMValueRef values[4];
ctx->shader_info->fs.writes_stencil = true;
stencil = to_float(ctx, LLVMBuildLoad(ctx->builder,
ctx->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
+ } else if (i == FRAG_RESULT_SAMPLE_MASK) {
+ ctx->shader_info->fs.writes_sample_mask = true;
+ samplemask = to_float(ctx, LLVMBuildLoad(ctx->builder,
+ ctx->outputs[radeon_llvm_reg_index_soa(i, 0)], ""));
} else {
bool last = false;
for (unsigned j = 0; j < 4; j++)
values[j] = to_float(ctx, LLVMBuildLoad(ctx->builder,
ctx->outputs[radeon_llvm_reg_index_soa(i, j)], ""));
- if (!ctx->shader_info->fs.writes_z && !ctx->shader_info->fs.writes_stencil)
+ if (!ctx->shader_info->fs.writes_z && !ctx->shader_info->fs.writes_stencil && !ctx->shader_info->fs.writes_sample_mask)
last = ctx->output_mask <= ((1ull << (i + 1)) - 1);
- si_export_mrt_color(ctx, values, V_008DFC_SQ_EXP_MRT + index, last);
- index++;
+ bool ret = si_export_mrt_color(ctx, values, V_008DFC_SQ_EXP_MRT + (i - FRAG_RESULT_DATA0), last, &color_args[index]);
+ if (ret)
+ index++;
}
}
- if (depth || stencil)
+ for (unsigned i = 0; i < index; i++)
+ ac_build_export(&ctx->ac, &color_args[i]);
+ if (depth || stencil || samplemask)
si_export_mrt_z(ctx, depth, stencil, samplemask);
- else if (!index)
- si_export_mrt_color(ctx, NULL, V_008DFC_SQ_EXP_NULL, true);
+ else if (!index) {
+ si_export_mrt_color(ctx, NULL, V_008DFC_SQ_EXP_NULL, true, &color_args[0]);
+ ac_build_export(&ctx->ac, &color_args[0]);
+ }
ctx->shader_info->fs.output_mask = index ? ((1ull << index) - 1) : 0;
}
static void
emit_gs_epilogue(struct nir_to_llvm_context *ctx)
{
- ac_emit_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE, ctx->gs_wave_id);
+ ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE, ctx->gs_wave_id);
}
static void
{
switch (ctx->stage) {
case MESA_SHADER_VERTEX:
- if (ctx->options->key.vs.as_es)
- handle_es_outputs_post(ctx);
+ if (ctx->options->key.vs.as_ls)
+ handle_ls_outputs_post(ctx);
+ else if (ctx->options->key.vs.as_es)
+ handle_es_outputs_post(ctx, &ctx->shader_info->vs.es_info);
else
- handle_vs_outputs_post(ctx);
+ handle_vs_outputs_post(ctx, &ctx->shader_info->vs.outinfo);
break;
case MESA_SHADER_FRAGMENT:
handle_fs_outputs_post(ctx);
case MESA_SHADER_GEOMETRY:
emit_gs_epilogue(ctx);
break;
+ case MESA_SHADER_TESS_CTRL:
+ handle_tcs_outputs_post(ctx);
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ if (ctx->options->key.tes.as_es)
+ handle_es_outputs_post(ctx, &ctx->shader_info->tes.es_info);
+ else
+ handle_vs_outputs_post(ctx, &ctx->shader_info->tes.outinfo);
+ break;
default:
break;
}
LLVMDisposePassManager(passmgr);
}
+static void
+ac_nir_eliminate_const_vs_outputs(struct nir_to_llvm_context *ctx)
+{
+ struct ac_vs_output_info *outinfo;
+
+ if (ctx->stage == MESA_SHADER_FRAGMENT ||
+ ctx->stage == MESA_SHADER_COMPUTE ||
+ ctx->stage == MESA_SHADER_TESS_CTRL ||
+ ctx->stage == MESA_SHADER_GEOMETRY)
+ return;
+
+ if (ctx->stage == MESA_SHADER_VERTEX) {
+ if (ctx->options->key.vs.as_ls ||
+ ctx->options->key.vs.as_es)
+ return;
+ outinfo = &ctx->shader_info->vs.outinfo;
+ }
+
+ if (ctx->stage == MESA_SHADER_TESS_EVAL) {
+ if (ctx->options->key.vs.as_es)
+ return;
+ outinfo = &ctx->shader_info->tes.outinfo;
+ }
+
+ ac_optimize_vs_outputs(&ctx->ac,
+ ctx->main_function,
+ outinfo->vs_output_param_offset,
+ VARYING_SLOT_MAX,
+ &outinfo->param_exports);
+}
+
static void
ac_setup_rings(struct nir_to_llvm_context *ctx)
{
- if (ctx->stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_es) {
- ctx->esgs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, ctx->i32one);
+ if ((ctx->stage == MESA_SHADER_VERTEX && ctx->options->key.vs.as_es) ||
+ (ctx->stage == MESA_SHADER_TESS_EVAL && ctx->options->key.tes.as_es)) {
+ ctx->esgs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_ESGS_VS, false));
}
if (ctx->is_gs_copy_shader) {
- ctx->gsvs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, 3, false));
+ ctx->gsvs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_GSVS_VS, false));
}
if (ctx->stage == MESA_SHADER_GEOMETRY) {
LLVMValueRef tmp;
- ctx->esgs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, 2, false));
- ctx->gsvs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, 4, false));
+ ctx->esgs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_ESGS_GS, false));
+ ctx->gsvs_ring = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_GSVS_GS, false));
ctx->gsvs_ring = LLVMBuildBitCast(ctx->builder, ctx->gsvs_ring, ctx->v4i32, "");
ctx->gsvs_ring = LLVMBuildBitCast(ctx->builder, ctx->gsvs_ring, ctx->v16i8, "");
}
+
+ if (ctx->stage == MESA_SHADER_TESS_CTRL ||
+ ctx->stage == MESA_SHADER_TESS_EVAL) {
+ ctx->hs_ring_tess_offchip = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_HS_TESS_OFFCHIP, false));
+ ctx->hs_ring_tess_factor = ac_build_indexed_load_const(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->i32, RING_HS_TESS_FACTOR, false));
+ }
+}
+
+static unsigned
+ac_nir_get_max_workgroup_size(enum chip_class chip_class,
+ struct nir_shader *nir)
+{
+ switch (nir->stage) {
+ case MESA_SHADER_TESS_CTRL:
+ return chip_class >= CIK ? 128 : 64;
+ case MESA_SHADER_GEOMETRY:
+ return 64;
+ case MESA_SHADER_COMPUTE:
+ break;
+ default:
+ return 0;
+ }
+
+ unsigned max_workgroup_size = nir->info.cs.local_size[0] *
+ nir->info.cs.local_size[1] *
+ nir->info.cs.local_size[2];
+ return max_workgroup_size;
}
static
memset(shader_info, 0, sizeof(*shader_info));
+ ac_nir_shader_info_pass(nir, options, &shader_info->info);
+
LLVMSetTarget(ctx.module, options->supports_spill ? "amdgcn-mesa-mesa3d" : "amdgcn--");
+
+ LLVMTargetDataRef data_layout = LLVMCreateTargetDataLayout(tm);
+ char *data_layout_str = LLVMCopyStringRepOfTargetData(data_layout);
+ LLVMSetDataLayout(ctx.module, data_layout_str);
+ LLVMDisposeTargetData(data_layout);
+ LLVMDisposeMessage(data_layout_str);
+
setup_types(&ctx);
ctx.builder = LLVMCreateBuilderInContext(ctx.context);
ctx.ac.builder = ctx.builder;
ctx.stage = nir->stage;
+ ctx.max_workgroup_size = ac_nir_get_max_workgroup_size(ctx.options->chip_class, nir);
for (i = 0; i < AC_UD_MAX_SETS; i++)
shader_info->user_sgprs_locs.descriptor_sets[i].sgpr_idx = -1;
} else if (nir->stage == MESA_SHADER_GEOMETRY) {
ctx.gs_next_vertex = ac_build_alloca(&ctx, ctx.i32, "gs_next_vertex");
- ctx.gs_max_out_vertices = nir->info->gs.vertices_out;
+ ctx.gs_max_out_vertices = nir->info.gs.vertices_out;
+ } else if (nir->stage == MESA_SHADER_TESS_EVAL) {
+ ctx.tes_primitive_mode = nir->info.tess.primitive_mode;
}
ac_setup_rings(&ctx);
if (nir->stage == MESA_SHADER_FRAGMENT)
handle_fs_inputs_pre(&ctx, nir);
+ ctx.num_output_clips = nir->info.clip_distance_array_size;
+ ctx.num_output_culls = nir->info.cull_distance_array_size;
+
nir_foreach_variable(variable, &nir->outputs)
handle_shader_output_decl(&ctx, variable);
LLVMBuildRetVoid(ctx.builder);
ac_llvm_finalize_module(&ctx);
+
+ ac_nir_eliminate_const_vs_outputs(&ctx);
free(ctx.locals);
ralloc_free(ctx.defs);
ralloc_free(ctx.phis);
if (nir->stage == MESA_SHADER_GEOMETRY) {
- shader_info->gs.gsvs_vertex_size = util_bitcount64(ctx.output_mask) * 16;
+ unsigned addclip = ctx.num_output_clips + ctx.num_output_culls > 4;
+ shader_info->gs.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
shader_info->gs.max_gsvs_emit_size = shader_info->gs.gsvs_vertex_size *
- nir->info->gs.vertices_out;
+ nir->info.gs.vertices_out;
+ } else if (nir->stage == MESA_SHADER_TESS_CTRL) {
+ shader_info->tcs.outputs_written = ctx.tess_outputs_written;
+ shader_info->tcs.patch_outputs_written = ctx.tess_patch_outputs_written;
+ } else if (nir->stage == MESA_SHADER_VERTEX && ctx.options->key.vs.as_ls) {
+ shader_info->vs.outputs_written = ctx.tess_outputs_written;
}
+
return ctx.module;
}
switch (nir->stage) {
case MESA_SHADER_COMPUTE:
for (int i = 0; i < 3; ++i)
- shader_info->cs.block_size[i] = nir->info->cs.local_size[i];
+ shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
break;
case MESA_SHADER_FRAGMENT:
- shader_info->fs.early_fragment_test = nir->info->fs.early_fragment_tests;
+ shader_info->fs.early_fragment_test = nir->info.fs.early_fragment_tests;
break;
case MESA_SHADER_GEOMETRY:
- shader_info->gs.vertices_in = nir->info->gs.vertices_in;
- shader_info->gs.vertices_out = nir->info->gs.vertices_out;
- shader_info->gs.output_prim = nir->info->gs.output_primitive;
- shader_info->gs.invocations = nir->info->gs.invocations;
+ shader_info->gs.vertices_in = nir->info.gs.vertices_in;
+ shader_info->gs.vertices_out = nir->info.gs.vertices_out;
+ shader_info->gs.output_prim = nir->info.gs.output_primitive;
+ shader_info->gs.invocations = nir->info.gs.invocations;
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ shader_info->tes.primitive_mode = nir->info.tess.primitive_mode;
+ shader_info->tes.spacing = nir->info.tess.spacing;
+ shader_info->tes.ccw = nir->info.tess.ccw;
+ shader_info->tes.point_mode = nir->info.tess.point_mode;
+ shader_info->tes.as_es = options->key.tes.as_es;
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ shader_info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
break;
case MESA_SHADER_VERTEX:
shader_info->vs.as_es = options->key.vs.as_es;
+ shader_info->vs.as_ls = options->key.vs.as_ls;
+ /* in LS mode we need at least 1, invocation id needs 3, handled elsewhere */
+ if (options->key.vs.as_ls)
+ shader_info->vs.vgpr_comp_cnt = MAX2(1, shader_info->vs.vgpr_comp_cnt);
break;
default:
break;
args[8] = ctx->i32zero; /* TFE */
int idx = 0;
- int clip_cull_slot = -1;
+
for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
int length = 4;
- int start = 0;
int slot = idx;
int slot_inc = 1;
if (!(ctx->output_mask & (1ull << i)))
continue;
- if (i == VARYING_SLOT_CLIP_DIST1 ||
- i == VARYING_SLOT_CULL_DIST1)
- continue;
-
- if (i == VARYING_SLOT_CLIP_DIST0 ||
- i == VARYING_SLOT_CULL_DIST0) {
+ if (i == VARYING_SLOT_CLIP_DIST0) {
/* unpack clip and cull from a single set of slots */
- if (clip_cull_slot == -1) {
- clip_cull_slot = idx;
- if (ctx->num_output_clips + ctx->num_output_culls > 4)
- slot_inc = 2;
- } else {
- slot = clip_cull_slot;
- slot_inc = 0;
- }
- if (i == VARYING_SLOT_CLIP_DIST0)
- length = ctx->num_output_clips;
- if (i == VARYING_SLOT_CULL_DIST0) {
- start = ctx->num_output_clips;
- length = ctx->num_output_culls;
- }
+ length = ctx->num_output_clips + ctx->num_output_culls;
+ if (length > 4)
+ slot_inc = 2;
}
for (unsigned j = 0; j < length; j++) {
LLVMValueRef value;
args[2] = LLVMConstInt(ctx->i32,
- (slot * 4 + j + start) *
+ (slot * 4 + j) *
ctx->gs_max_out_vertices * 16 * 4, false);
- value = ac_emit_llvm_intrinsic(&ctx->ac,
- "llvm.SI.buffer.load.dword.i32.i32",
- ctx->i32, args, 9,
- AC_FUNC_ATTR_READONLY);
+ value = ac_build_intrinsic(&ctx->ac,
+ "llvm.SI.buffer.load.dword.i32.i32",
+ ctx->i32, args, 9,
+ AC_FUNC_ATTR_READONLY |
+ AC_FUNC_ATTR_LEGACY);
LLVMBuildStore(ctx->builder,
to_float(ctx, value), ctx->outputs[radeon_llvm_reg_index_soa(i, j)]);
}
idx += slot_inc;
}
- handle_vs_outputs_post(ctx);
+ handle_vs_outputs_post(ctx, &ctx->shader_info->vs.outinfo);
}
void ac_create_gs_copy_shader(LLVMTargetMachineRef tm,
create_function(&ctx);
- ctx.gs_max_out_vertices = geom_shader->info->gs.vertices_out;
+ ctx.gs_max_out_vertices = geom_shader->info.gs.vertices_out;
ac_setup_rings(&ctx);
+ ctx.num_output_clips = geom_shader->info.clip_distance_array_size;
+ ctx.num_output_culls = geom_shader->info.cull_distance_array_size;
+
nir_foreach_variable(variable, &geom_shader->outputs)
handle_shader_output_decl(&ctx, variable);