#include <llvm-c/Core.h>
#include <llvm-c/TargetMachine.h>
#include <llvm-c/Transforms/Scalar.h>
-#if HAVE_LLVM >= 0x0700
#include <llvm-c/Transforms/Utils.h>
-#endif
#include "sid.h"
#include "gfx9d.h"
LLVMValueRef gs_vtx_offset[6];
LLVMValueRef esgs_ring;
- LLVMValueRef gsvs_ring;
+ LLVMValueRef gsvs_ring[4];
LLVMValueRef hs_ring_tess_offchip;
LLVMValueRef hs_ring_tess_factor;
LLVMValueRef persp_sample, persp_center, persp_centroid;
LLVMValueRef linear_sample, linear_center, linear_centroid;
+ /* Streamout */
+ LLVMValueRef streamout_buffers;
+ LLVMValueRef streamout_write_idx;
+ LLVMValueRef streamout_config;
+ LLVMValueRef streamout_offset[4];
+
gl_shader_stage stage;
LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];
uint64_t output_mask;
bool is_gs_copy_shader;
- LLVMValueRef gs_next_vertex;
+ LLVMValueRef gs_next_vertex[4];
unsigned gs_max_out_vertices;
unsigned tes_primitive_mode;
/* Make sure that the data fits in LDS. This assumes the shaders only
* use LDS for the inputs and outputs.
*/
- hardware_lds_size = ctx->options->chip_class >= CIK ? 65536 : 32768;
+ hardware_lds_size = 32768;
+
+ /* Looks like STONEY hangs if we use more than 32 KiB LDS in a single
+ * threadgroup, even though there is more than 32 KiB LDS.
+ *
+ * Test: dEQP-VK.tessellation.shader_input_output.barrier
+ */
+ if (ctx->options->chip_class >= CIK && ctx->options->family != CHIP_STONEY)
+ hardware_lds_size = 65536;
+
num_patches = MIN2(num_patches, hardware_lds_size / (input_patch_size + output_patch_size));
/* Make sure the output data fits in the offchip buffer */
num_patches = MIN2(num_patches, (ctx->options->tess_offchip_block_dw_size * 4) / output_patch_size);
patch0_patch_data_offset);
}
-#define MAX_ARGS 23
+#define MAX_ARGS 64
struct arg_info {
LLVMTypeRef types[MAX_ARGS];
LLVMValueRef *assign[MAX_ARGS];
static void
-set_loc(struct radv_userdata_info *ud_info, uint8_t *sgpr_idx, uint8_t num_sgprs,
- uint32_t indirect_offset)
+set_loc(struct radv_userdata_info *ud_info, uint8_t *sgpr_idx,
+ uint8_t num_sgprs)
{
ud_info->sgpr_idx = *sgpr_idx;
ud_info->num_sgprs = num_sgprs;
- ud_info->indirect = indirect_offset > 0;
- ud_info->indirect_offset = indirect_offset;
*sgpr_idx += num_sgprs;
}
&ctx->shader_info->user_sgprs_locs.shader_data[idx];
assert(ud_info);
- set_loc(ud_info, sgpr_idx, num_sgprs, 0);
+ set_loc(ud_info, sgpr_idx, num_sgprs);
}
static void
set_loc_shader_ptr(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx)
{
- bool use_32bit_pointers = HAVE_32BIT_POINTERS &&
- idx != AC_UD_SCRATCH_RING_OFFSETS;
+ bool use_32bit_pointers = idx != AC_UD_SCRATCH_RING_OFFSETS;
set_loc_shader(ctx, idx, sgpr_idx, use_32bit_pointers ? 1 : 2);
}
static void
-set_loc_desc(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx,
- uint32_t indirect_offset)
+set_loc_desc(struct radv_shader_context *ctx, int idx, uint8_t *sgpr_idx)
{
struct radv_userdata_locations *locs =
&ctx->shader_info->user_sgprs_locs;
struct radv_userdata_info *ud_info = &locs->descriptor_sets[idx];
assert(ud_info);
- set_loc(ud_info, sgpr_idx, HAVE_32BIT_POINTERS ? 1 : 2, indirect_offset);
- if (indirect_offset == 0)
- locs->descriptor_sets_enabled |= 1 << idx;
+ set_loc(ud_info, sgpr_idx, 1);
+
+ locs->descriptor_sets_enabled |= 1 << idx;
}
struct user_sgpr_info {
uint8_t count = 0;
if (ctx->shader_info->info.vs.has_vertex_buffers)
- count += HAVE_32BIT_POINTERS ? 1 : 2;
+ count++;
count += ctx->shader_info->info.vs.needs_draw_id ? 3 : 2;
return count;
user_sgpr_count++;
if (ctx->shader_info->info.loads_push_constants)
- user_sgpr_count += HAVE_32BIT_POINTERS ? 1 : 2;
+ user_sgpr_count++;
+
+ if (ctx->streamout_buffers)
+ user_sgpr_count++;
- uint32_t available_sgprs = ctx->options->chip_class >= GFX9 ? 32 : 16;
+ uint32_t available_sgprs = ctx->options->chip_class >= GFX9 && stage != MESA_SHADER_COMPUTE ? 32 : 16;
uint32_t remaining_sgprs = available_sgprs - user_sgpr_count;
uint32_t num_desc_set =
util_bitcount(ctx->shader_info->info.desc_set_used_mask);
- if (remaining_sgprs / (HAVE_32BIT_POINTERS ? 1 : 2) < num_desc_set) {
+ if (remaining_sgprs < num_desc_set) {
user_sgpr_info->indirect_all_descriptor_sets = true;
}
}
static void
declare_global_input_sgprs(struct radv_shader_context *ctx,
- gl_shader_stage stage,
- bool has_previous_stage,
- gl_shader_stage previous_stage,
const struct user_sgpr_info *user_sgpr_info,
struct arg_info *args,
LLVMValueRef *desc_sets)
{
LLVMTypeRef type = ac_array_in_const32_addr_space(ctx->ac.i8);
- unsigned num_sets = ctx->options->layout ?
- ctx->options->layout->num_sets : 0;
- unsigned stage_mask = 1 << stage;
-
- if (has_previous_stage)
- stage_mask |= 1 << previous_stage;
/* 1 for each descriptor set */
if (!user_sgpr_info->indirect_all_descriptor_sets) {
- for (unsigned i = 0; i < num_sets; ++i) {
- if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
- ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
- add_array_arg(args, type,
- &ctx->descriptor_sets[i]);
- }
+ uint32_t mask = ctx->shader_info->info.desc_set_used_mask;
+
+ while (mask) {
+ int i = u_bit_scan(&mask);
+
+ add_array_arg(args, type, &ctx->descriptor_sets[i]);
}
} else {
add_array_arg(args, ac_array_in_const32_addr_space(type), desc_sets);
/* 1 for push constants and dynamic descriptors */
add_array_arg(args, type, &ctx->abi.push_constants);
}
+
+ if (ctx->shader_info->info.so.num_outputs) {
+ add_arg(args, ARG_SGPR,
+ ac_array_in_const32_addr_space(ctx->ac.v4i32),
+ &ctx->streamout_buffers);
+ }
}
static void
}
}
+static void
+declare_streamout_sgprs(struct radv_shader_context *ctx, gl_shader_stage stage,
+ struct arg_info *args)
+{
+ int i;
+
+ /* Streamout SGPRs. */
+ if (ctx->shader_info->info.so.num_outputs) {
+ assert(stage == MESA_SHADER_VERTEX ||
+ stage == MESA_SHADER_TESS_EVAL);
+
+ if (stage != MESA_SHADER_TESS_EVAL) {
+ add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->streamout_config);
+ } else {
+ args->assign[args->count - 1] = &ctx->streamout_config;
+ args->types[args->count - 1] = ctx->ac.i32;
+ }
+
+ add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->streamout_write_idx);
+ }
+
+ /* A streamout buffer offset is loaded if the stride is non-zero. */
+ for (i = 0; i < 4; i++) {
+ if (!ctx->shader_info->info.so.strides[i])
+ continue;
+
+ add_arg(args, ARG_SGPR, ctx->ac.i32, &ctx->streamout_offset[i]);
+ }
+}
+
static void
declare_tes_input_vgprs(struct radv_shader_context *ctx, struct arg_info *args)
{
}
static void
-set_global_input_locs(struct radv_shader_context *ctx, gl_shader_stage stage,
- bool has_previous_stage, gl_shader_stage previous_stage,
+set_global_input_locs(struct radv_shader_context *ctx,
const struct user_sgpr_info *user_sgpr_info,
LLVMValueRef desc_sets, uint8_t *user_sgpr_idx)
{
- unsigned num_sets = ctx->options->layout ?
- ctx->options->layout->num_sets : 0;
- unsigned stage_mask = 1 << stage;
-
- if (has_previous_stage)
- stage_mask |= 1 << previous_stage;
+ uint32_t mask = ctx->shader_info->info.desc_set_used_mask;
if (!user_sgpr_info->indirect_all_descriptor_sets) {
- for (unsigned i = 0; i < num_sets; ++i) {
- if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
- ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
- set_loc_desc(ctx, i, user_sgpr_idx, 0);
- } else
- ctx->descriptor_sets[i] = NULL;
+ while (mask) {
+ int i = u_bit_scan(&mask);
+
+ set_loc_desc(ctx, i, user_sgpr_idx);
}
} else {
set_loc_shader_ptr(ctx, AC_UD_INDIRECT_DESCRIPTOR_SETS,
user_sgpr_idx);
- for (unsigned i = 0; i < num_sets; ++i) {
- if ((ctx->shader_info->info.desc_set_used_mask & (1 << i)) &&
- ctx->options->layout->set[i].layout->shader_stages & stage_mask) {
- set_loc_desc(ctx, i, user_sgpr_idx, i * 8);
- ctx->descriptor_sets[i] =
- ac_build_load_to_sgpr(&ctx->ac,
- desc_sets,
- LLVMConstInt(ctx->ac.i32, i, false));
-
- } else
- ctx->descriptor_sets[i] = NULL;
+ while (mask) {
+ int i = u_bit_scan(&mask);
+
+ ctx->descriptor_sets[i] =
+ ac_build_load_to_sgpr(&ctx->ac, desc_sets,
+ LLVMConstInt(ctx->ac.i32, i, false));
+
}
+
ctx->shader_info->need_indirect_descriptor_sets = true;
}
if (ctx->shader_info->info.loads_push_constants) {
set_loc_shader_ptr(ctx, AC_UD_PUSH_CONSTANTS, user_sgpr_idx);
}
+
+ if (ctx->streamout_buffers) {
+ set_loc_shader_ptr(ctx, AC_UD_STREAMOUT_BUFFERS,
+ user_sgpr_idx);
+ }
}
static void
switch (stage) {
case MESA_SHADER_COMPUTE:
- declare_global_input_sgprs(ctx, stage, has_previous_stage,
- previous_stage, &user_sgpr_info,
- &args, &desc_sets);
+ declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
+ &desc_sets);
if (ctx->shader_info->info.cs.uses_grid_size) {
add_arg(&args, ARG_SGPR, ctx->ac.v3i32,
&ctx->abi.local_invocation_ids);
break;
case MESA_SHADER_VERTEX:
- declare_global_input_sgprs(ctx, stage, has_previous_stage,
- previous_stage, &user_sgpr_info,
- &args, &desc_sets);
+ declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
+ &desc_sets);
+
declare_vs_specific_input_sgprs(ctx, stage, has_previous_stage,
previous_stage, &args);
if (needs_view_index)
add_arg(&args, ARG_SGPR, ctx->ac.i32,
&ctx->abi.view_index);
- if (ctx->options->key.vs.as_es)
+ if (ctx->options->key.vs.as_es) {
add_arg(&args, ARG_SGPR, ctx->ac.i32,
&ctx->es2gs_offset);
+ } else if (ctx->options->key.vs.as_ls) {
+ /* no extra parameters */
+ } else {
+ declare_streamout_sgprs(ctx, stage, &args);
+ }
declare_vs_input_vgprs(ctx, &args);
break;
add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
- declare_global_input_sgprs(ctx, stage,
- has_previous_stage,
- previous_stage,
- &user_sgpr_info, &args,
+ declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
&desc_sets);
+
declare_vs_specific_input_sgprs(ctx, stage,
has_previous_stage,
previous_stage, &args);
declare_vs_input_vgprs(ctx, &args);
} else {
- declare_global_input_sgprs(ctx, stage,
- has_previous_stage,
- previous_stage,
- &user_sgpr_info, &args,
+ declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
&desc_sets);
if (needs_view_index)
}
break;
case MESA_SHADER_TESS_EVAL:
- declare_global_input_sgprs(ctx, stage, has_previous_stage,
- previous_stage, &user_sgpr_info,
- &args, &desc_sets);
+ declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
+ &desc_sets);
if (needs_view_index)
add_arg(&args, ARG_SGPR, ctx->ac.i32,
&ctx->es2gs_offset);
} else {
add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL);
+ declare_streamout_sgprs(ctx, stage, &args);
add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->oc_lds);
}
declare_tes_input_vgprs(ctx, &args);
add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
add_arg(&args, ARG_SGPR, ctx->ac.i32, NULL); // unknown
- declare_global_input_sgprs(ctx, stage,
- has_previous_stage,
- previous_stage,
- &user_sgpr_info, &args,
+ declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
&desc_sets);
if (previous_stage != MESA_SHADER_TESS_EVAL) {
declare_tes_input_vgprs(ctx, &args);
}
} else {
- declare_global_input_sgprs(ctx, stage,
- has_previous_stage,
- previous_stage,
- &user_sgpr_info, &args,
+ declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
&desc_sets);
if (needs_view_index)
}
break;
case MESA_SHADER_FRAGMENT:
- declare_global_input_sgprs(ctx, stage, has_previous_stage,
- previous_stage, &user_sgpr_info,
- &args, &desc_sets);
+ declare_global_input_sgprs(ctx, &user_sgpr_info, &args,
+ &desc_sets);
add_arg(&args, ARG_SGPR, ctx->ac.i32, &ctx->abi.prim_mask);
add_arg(&args, ARG_VGPR, ctx->ac.v2i32, &ctx->persp_sample);
&user_sgpr_idx);
if (ctx->options->supports_spill) {
ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
- LLVMPointerType(ctx->ac.i8, AC_CONST_ADDR_SPACE),
+ LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_CONST),
NULL, 0, AC_FUNC_ATTR_READNONE);
ctx->ring_offsets = LLVMBuildBitCast(ctx->ac.builder, ctx->ring_offsets,
ac_array_in_const_addr_space(ctx->ac.v4i32), "");
if (has_previous_stage)
user_sgpr_idx = 0;
- set_global_input_locs(ctx, stage, has_previous_stage, previous_stage,
- &user_sgpr_info, desc_sets, &user_sgpr_idx);
+ set_global_input_locs(ctx, &user_sgpr_info, desc_sets, &user_sgpr_idx);
switch (stage) {
case MESA_SHADER_COMPUTE:
case 8:
sample_pos_offset = 7;
break;
- case 16:
- sample_pos_offset = 15;
- break;
default:
break;
}
{
LLVMValueRef gs_next_vertex;
LLVMValueRef can_emit;
- int idx;
+ unsigned offset = 0;
struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
- assert(stream == 0);
-
/* Write vertex attribute values to GSVS ring */
gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
- ctx->gs_next_vertex,
+ ctx->gs_next_vertex[stream],
"");
/* If this thread has already emitted the declared maximum number of
LLVMConstInt(ctx->ac.i32, ctx->gs_max_out_vertices, false), "");
ac_build_kill_if_false(&ctx->ac, can_emit);
- /* loop num outputs */
- idx = 0;
for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
unsigned output_usage_mask =
ctx->shader_info->info.gs.output_usage_mask[i];
+ uint8_t output_stream =
+ ctx->shader_info->info.gs.output_streams[i];
LLVMValueRef *out_ptr = &addrs[i * 4];
- int length = 4;
- int slot = idx;
- int slot_inc = 1;
+ int length = util_last_bit(output_usage_mask);
- if (!(ctx->output_mask & (1ull << i)))
+ if (!(ctx->output_mask & (1ull << i)) ||
+ output_stream != stream)
continue;
- if (i == VARYING_SLOT_CLIP_DIST0) {
- /* pack clip and cull into a single set of slots */
- length = util_last_bit(output_usage_mask);
- if (length > 4)
- slot_inc = 2;
- }
-
for (unsigned j = 0; j < length; j++) {
if (!(output_usage_mask & (1 << j)))
continue;
LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder,
out_ptr[j], "");
- LLVMValueRef voffset = LLVMConstInt(ctx->ac.i32, (slot * 4 + j) * ctx->gs_max_out_vertices, false);
+ LLVMValueRef voffset =
+ LLVMConstInt(ctx->ac.i32, offset *
+ ctx->gs_max_out_vertices, false);
+
+ offset++;
+
voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
voffset = LLVMBuildMul(ctx->ac.builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), "");
out_val = ac_to_integer(&ctx->ac, out_val);
out_val = LLVMBuildZExtOrBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
- ac_build_buffer_store_dword(&ctx->ac, ctx->gsvs_ring,
+ ac_build_buffer_store_dword(&ctx->ac,
+ ctx->gsvs_ring[stream],
out_val, 1,
voffset, ctx->gs2vs_offset, 0,
1, 1, true, true);
}
- idx += slot_inc;
}
gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex,
ctx->ac.i32_1, "");
- LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex);
+ LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
- ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (0 << 8), ctx->gs_wave_id);
+ ac_build_sendmsg(&ctx->ac,
+ AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
+ ctx->gs_wave_id);
}
static void
interp = lookup_interp_param(&ctx->abi, variable->data.interpolation, interp_type);
}
- bool is_16bit = glsl_type_is_16bit(variable->type);
+ bool is_16bit = glsl_type_is_16bit(glsl_without_array(variable->type));
LLVMTypeRef type = is_16bit ? ctx->ac.i16 : ctx->ac.i32;
if (interp == NULL)
interp = LLVMGetUndef(type);
if (LLVMIsUndef(interp_param))
ctx->shader_info->fs.flat_shaded_mask |= 1u << index;
+ if (i >= VARYING_SLOT_VAR0)
+ ctx->abi.fs_input_attr_indices[i - VARYING_SLOT_VAR0] = index;
++index;
} else if (i == VARYING_SLOT_CLIP_DIST0) {
int length = ctx->shader_info->info.ps.num_input_clips_culls;
return;
bool is_16bit = ac_get_type_size(LLVMTypeOf(values[0])) == 2;
- if (ctx->stage == MESA_SHADER_FRAGMENT && target >= V_008DFC_SQ_EXP_MRT) {
+ if (ctx->stage == MESA_SHADER_FRAGMENT) {
unsigned index = target - V_008DFC_SQ_EXP_MRT;
unsigned col_format = (ctx->options->key.fs.col_format >> (4 * index)) & 0xf;
bool is_int8 = (ctx->options->key.fs.is_int8 >> index) & 1;
return LLVMBuildLoad(ctx->ac.builder, output, "");
}
+static void
+radv_emit_stream_output(struct radv_shader_context *ctx,
+ LLVMValueRef const *so_buffers,
+ LLVMValueRef const *so_write_offsets,
+ const struct radv_stream_output *output)
+{
+ unsigned num_comps = util_bitcount(output->component_mask);
+ unsigned loc = output->location;
+ unsigned buf = output->buffer;
+ unsigned offset = output->offset;
+ unsigned start;
+ LLVMValueRef out[4];
+
+ assert(num_comps && num_comps <= 4);
+ if (!num_comps || num_comps > 4)
+ return;
+
+ /* Get the first component. */
+ start = ffs(output->component_mask) - 1;
+
+ /* Load the output as int. */
+ for (int i = 0; i < num_comps; i++) {
+ out[i] = ac_to_integer(&ctx->ac,
+ radv_load_output(ctx, loc, start + i));
+ }
+
+ /* Pack the output. */
+ LLVMValueRef vdata = NULL;
+
+ switch (num_comps) {
+ case 1: /* as i32 */
+ vdata = out[0];
+ break;
+ case 2: /* as v2i32 */
+ case 3: /* as v4i32 (aligned to 4) */
+ out[3] = LLVMGetUndef(ctx->ac.i32);
+ /* fall through */
+ case 4: /* as v4i32 */
+ vdata = ac_build_gather_values(&ctx->ac, out,
+ util_next_power_of_two(num_comps));
+ break;
+ }
+
+ ac_build_buffer_store_dword(&ctx->ac, so_buffers[buf],
+ vdata, num_comps, so_write_offsets[buf],
+ ctx->ac.i32_0, offset,
+ 1, 1, true, false);
+}
+
+static void
+radv_emit_streamout(struct radv_shader_context *ctx, unsigned stream)
+{
+ struct ac_build_if_state if_ctx;
+ int i;
+
+ /* Get bits [22:16], i.e. (so_param >> 16) & 127; */
+ assert(ctx->streamout_config);
+ LLVMValueRef so_vtx_count =
+ ac_build_bfe(&ctx->ac, ctx->streamout_config,
+ LLVMConstInt(ctx->ac.i32, 16, false),
+ LLVMConstInt(ctx->ac.i32, 7, false), false);
+
+ LLVMValueRef tid = ac_get_thread_id(&ctx->ac);
+
+ /* can_emit = tid < so_vtx_count; */
+ LLVMValueRef can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
+ tid, so_vtx_count, "");
+
+ /* Emit the streamout code conditionally. This actually avoids
+ * out-of-bounds buffer access. The hw tells us via the SGPR
+ * (so_vtx_count) which threads are allowed to emit streamout data.
+ */
+ ac_nir_build_if(&if_ctx, ctx, can_emit);
+ {
+ /* The buffer offset is computed as follows:
+ * ByteOffset = streamout_offset[buffer_id]*4 +
+ * (streamout_write_index + thread_id)*stride[buffer_id] +
+ * attrib_offset
+ */
+ LLVMValueRef so_write_index = ctx->streamout_write_idx;
+
+ /* Compute (streamout_write_index + thread_id). */
+ so_write_index =
+ LLVMBuildAdd(ctx->ac.builder, so_write_index, tid, "");
+
+ /* Load the descriptor and compute the write offset for each
+ * enabled buffer.
+ */
+ LLVMValueRef so_write_offset[4] = {};
+ LLVMValueRef so_buffers[4] = {};
+ LLVMValueRef buf_ptr = ctx->streamout_buffers;
+
+ for (i = 0; i < 4; i++) {
+ uint16_t stride = ctx->shader_info->info.so.strides[i];
+
+ if (!stride)
+ continue;
+
+ LLVMValueRef offset =
+ LLVMConstInt(ctx->ac.i32, i, false);
+
+ so_buffers[i] = ac_build_load_to_sgpr(&ctx->ac,
+ buf_ptr, offset);
+
+ LLVMValueRef so_offset = ctx->streamout_offset[i];
+
+ so_offset = LLVMBuildMul(ctx->ac.builder, so_offset,
+ LLVMConstInt(ctx->ac.i32, 4, false), "");
+
+ so_write_offset[i] =
+ ac_build_imad(&ctx->ac, so_write_index,
+ LLVMConstInt(ctx->ac.i32,
+ stride * 4, false),
+ so_offset);
+ }
+
+ /* Write streamout data. */
+ for (i = 0; i < ctx->shader_info->info.so.num_outputs; i++) {
+ struct radv_stream_output *output =
+ &ctx->shader_info->info.so.outputs[i];
+
+ if (stream != output->stream)
+ continue;
+
+ radv_emit_stream_output(ctx, so_buffers,
+ so_write_offset, output);
+ }
+ }
+ ac_nir_build_endif(&if_ctx);
+}
+
static void
handle_vs_outputs_post(struct radv_shader_context *ctx,
bool export_prim_id, bool export_layer_id,
viewport_index_value = radv_load_output(ctx, VARYING_SLOT_VIEWPORT, 0);
}
+ if (ctx->shader_info->info.so.num_outputs &&
+ !ctx->is_gs_copy_shader) {
+ /* The GS copy shader emission already emits streamout. */
+ radv_emit_streamout(ctx, 0);
+ }
+
if (outinfo->writes_pointsize ||
outinfo->writes_layer ||
outinfo->writes_viewport_index) {
}
if (ctx->is_gs_copy_shader) {
- ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_VS, false));
+ ctx->gsvs_ring[0] =
+ ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets,
+ LLVMConstInt(ctx->ac.i32,
+ RING_GSVS_VS, false));
}
+
if (ctx->stage == MESA_SHADER_GEOMETRY) {
- LLVMValueRef tmp;
- uint32_t num_entries = 64;
- LLVMValueRef gsvs_ring_stride = LLVMConstInt(ctx->ac.i32, ctx->max_gsvs_emit_size, false);
- LLVMValueRef gsvs_ring_desc = LLVMConstInt(ctx->ac.i32, ctx->max_gsvs_emit_size << 16, false);
- ctx->gsvs_ring = ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets, LLVMConstInt(ctx->ac.i32, RING_GSVS_GS, false));
+ /* The conceptual layout of the GSVS ring is
+ * v0c0 .. vLv0 v0c1 .. vLc1 ..
+ * but the real memory layout is swizzled across
+ * threads:
+ * t0v0c0 .. t15v0c0 t0v1c0 .. t15v1c0 ... t15vLcL
+ * t16v0c0 ..
+ * Override the buffer descriptor accordingly.
+ */
+ LLVMTypeRef v2i64 = LLVMVectorType(ctx->ac.i64, 2);
+ uint64_t stream_offset = 0;
+ unsigned num_records = 64;
+ LLVMValueRef base_ring;
+
+ base_ring =
+ ac_build_load_to_sgpr(&ctx->ac, ctx->ring_offsets,
+ LLVMConstInt(ctx->ac.i32,
+ RING_GSVS_GS, false));
+
+ for (unsigned stream = 0; stream < 4; stream++) {
+ unsigned num_components, stride;
+ LLVMValueRef ring, tmp;
+
+ num_components =
+ ctx->shader_info->info.gs.num_stream_output_components[stream];
+
+ if (!num_components)
+ continue;
+
+ stride = 4 * num_components * ctx->gs_max_out_vertices;
- ctx->gsvs_ring = LLVMBuildBitCast(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.v4i32, "");
+ /* Limit on the stride field for <= CIK. */
+ assert(stride < (1 << 14));
- tmp = LLVMConstInt(ctx->ac.i32, num_entries, false);
- if (ctx->options->chip_class >= VI)
- tmp = LLVMBuildMul(ctx->ac.builder, gsvs_ring_stride, tmp, "");
- ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, tmp, LLVMConstInt(ctx->ac.i32, 2, false), "");
- tmp = LLVMBuildExtractElement(ctx->ac.builder, ctx->gsvs_ring, ctx->ac.i32_1, "");
- tmp = LLVMBuildOr(ctx->ac.builder, tmp, gsvs_ring_desc, "");
- ctx->gsvs_ring = LLVMBuildInsertElement(ctx->ac.builder, ctx->gsvs_ring, tmp, ctx->ac.i32_1, "");
+ ring = LLVMBuildBitCast(ctx->ac.builder,
+ base_ring, v2i64, "");
+ tmp = LLVMBuildExtractElement(ctx->ac.builder,
+ ring, ctx->ac.i32_0, "");
+ tmp = LLVMBuildAdd(ctx->ac.builder, tmp,
+ LLVMConstInt(ctx->ac.i64,
+ stream_offset, 0), "");
+ ring = LLVMBuildInsertElement(ctx->ac.builder,
+ ring, tmp, ctx->ac.i32_0, "");
+
+ stream_offset += stride * 64;
+
+ ring = LLVMBuildBitCast(ctx->ac.builder, ring,
+ ctx->ac.v4i32, "");
+
+ tmp = LLVMBuildExtractElement(ctx->ac.builder, ring,
+ ctx->ac.i32_1, "");
+ tmp = LLVMBuildOr(ctx->ac.builder, tmp,
+ LLVMConstInt(ctx->ac.i32,
+ S_008F04_STRIDE(stride), false), "");
+ ring = LLVMBuildInsertElement(ctx->ac.builder, ring, tmp,
+ ctx->ac.i32_1, "");
+
+ ring = LLVMBuildInsertElement(ctx->ac.builder, ring,
+ LLVMConstInt(ctx->ac.i32,
+ num_records, false),
+ LLVMConstInt(ctx->ac.i32, 2, false), "");
+
+ ctx->gsvs_ring[stream] = ring;
+ }
}
if (ctx->stage == MESA_SHADER_TESS_CTRL ||
ctx.abi.load_sampler_desc = radv_get_sampler_desc;
ctx.abi.load_resource = radv_load_resource;
ctx.abi.clamp_shadow_reference = false;
- ctx.abi.gfx9_stride_size_workaround = ctx.ac.chip_class == GFX9;
+ ctx.abi.gfx9_stride_size_workaround = ctx.ac.chip_class == GFX9 && HAVE_LLVM < 0x800;
if (shader_count >= 2)
ac_init_exec_full_mask(&ctx.ac);
ctx.output_mask = 0;
if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
- ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.ac.i32, "gs_next_vertex");
+ for (int i = 0; i < 4; i++) {
+ ctx.gs_next_vertex[i] =
+ ac_build_alloca(&ctx.ac, ctx.ac.i32, "");
+ }
ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
ctx.abi.load_inputs = load_gs_input;
ctx.abi.emit_primitive = visit_end_primitive;
LLVMValueRef vtx_offset =
LLVMBuildMul(ctx->ac.builder, ctx->abi.vertex_id,
LLVMConstInt(ctx->ac.i32, 4, false), "");
- int idx = 0;
+ LLVMValueRef stream_id;
- for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
- unsigned output_usage_mask =
- ctx->shader_info->info.gs.output_usage_mask[i];
- int length = 4;
- int slot = idx;
- int slot_inc = 1;
- if (!(ctx->output_mask & (1ull << i)))
+ /* Fetch the vertex stream ID. */
+ if (ctx->shader_info->info.so.num_outputs) {
+ stream_id =
+ ac_unpack_param(&ctx->ac, ctx->streamout_config, 24, 2);
+ } else {
+ stream_id = ctx->ac.i32_0;
+ }
+
+ LLVMBasicBlockRef end_bb;
+ LLVMValueRef switch_inst;
+
+ end_bb = LLVMAppendBasicBlockInContext(ctx->ac.context,
+ ctx->main_function, "end");
+ switch_inst = LLVMBuildSwitch(ctx->ac.builder, stream_id, end_bb, 4);
+
+ for (unsigned stream = 0; stream < 4; stream++) {
+ unsigned num_components =
+ ctx->shader_info->info.gs.num_stream_output_components[stream];
+ LLVMBasicBlockRef bb;
+ unsigned offset;
+
+ if (!num_components)
continue;
- if (i == VARYING_SLOT_CLIP_DIST0) {
- /* unpack clip and cull from a single set of slots */
- length = util_last_bit(output_usage_mask);
- if (length > 4)
- slot_inc = 2;
- }
+ if (stream > 0 && !ctx->shader_info->info.so.num_outputs)
+ continue;
- for (unsigned j = 0; j < length; j++) {
- LLVMValueRef value, soffset;
+ bb = LLVMInsertBasicBlockInContext(ctx->ac.context, end_bb, "out");
+ LLVMAddCase(switch_inst, LLVMConstInt(ctx->ac.i32, stream, 0), bb);
+ LLVMPositionBuilderAtEnd(ctx->ac.builder, bb);
+
+ offset = 0;
+ for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
+ unsigned output_usage_mask =
+ ctx->shader_info->info.gs.output_usage_mask[i];
+ unsigned output_stream =
+ ctx->shader_info->info.gs.output_streams[i];
+ int length = util_last_bit(output_usage_mask);
+
+ if (!(ctx->output_mask & (1ull << i)) ||
+ output_stream != stream)
+ continue;
+
+ for (unsigned j = 0; j < length; j++) {
+ LLVMValueRef value, soffset;
- soffset = LLVMConstInt(ctx->ac.i32,
- (slot * 4 + j) *
- ctx->gs_max_out_vertices * 16 * 4, false);
+ if (!(output_usage_mask & (1 << j)))
+ continue;
- value = ac_build_buffer_load(&ctx->ac, ctx->gsvs_ring,
- 1, ctx->ac.i32_0,
- vtx_offset, soffset,
- 0, 1, 1, true, false);
+ soffset = LLVMConstInt(ctx->ac.i32,
+ offset *
+ ctx->gs_max_out_vertices * 16 * 4, false);
- LLVMTypeRef type = LLVMGetAllocatedType(ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
- if (ac_get_type_size(type) == 2) {
- value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
- value = LLVMBuildTrunc(ctx->ac.builder, value, ctx->ac.i16, "");
+ offset++;
+
+ value = ac_build_buffer_load(&ctx->ac,
+ ctx->gsvs_ring[0],
+ 1, ctx->ac.i32_0,
+ vtx_offset, soffset,
+ 0, 1, 1, true, false);
+
+ LLVMTypeRef type = LLVMGetAllocatedType(ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
+ if (ac_get_type_size(type) == 2) {
+ value = LLVMBuildBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
+ value = LLVMBuildTrunc(ctx->ac.builder, value, ctx->ac.i16, "");
+ }
+
+ LLVMBuildStore(ctx->ac.builder,
+ ac_to_float(&ctx->ac, value), ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
}
+ }
- LLVMBuildStore(ctx->ac.builder,
- ac_to_float(&ctx->ac, value), ctx->abi.outputs[ac_llvm_reg_index_soa(i, j)]);
+ if (ctx->shader_info->info.so.num_outputs)
+ radv_emit_streamout(ctx, stream);
+
+ if (stream == 0) {
+ handle_vs_outputs_post(ctx, false, false,
+ &ctx->shader_info->vs.outinfo);
}
- idx += slot_inc;
+
+ LLVMBuildBr(ctx->ac.builder, end_bb);
}
- handle_vs_outputs_post(ctx, false, false, &ctx->shader_info->vs.outinfo);
+
+ LLVMPositionBuilderAtEnd(ctx->ac.builder, end_bb);
}
void