}
static void
-visit_emit_vertex(struct nir_to_llvm_context *ctx,
- const nir_intrinsic_instr *instr)
+visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addrs)
{
LLVMValueRef gs_next_vertex;
LLVMValueRef can_emit;
int idx;
+ struct nir_to_llvm_context *ctx = nir_to_llvm_context_from_abi(abi);
- assert(instr->const_index[0] == 0);
/* Write vertex attribute values to GSVS ring */
gs_next_vertex = LLVMBuildLoad(ctx->builder,
ctx->gs_next_vertex,
/* loop num outputs */
idx = 0;
for (unsigned i = 0; i < RADEON_LLVM_MAX_OUTPUTS; ++i) {
- LLVMValueRef *out_ptr = &ctx->nir->outputs[i * 4];
+ LLVMValueRef *out_ptr = &addrs[i * 4];
int length = 4;
int slot = idx;
int slot_inc = 1;
result = visit_interp(ctx->nctx, instr);
break;
case nir_intrinsic_emit_vertex:
- visit_emit_vertex(ctx->nctx, instr);
+ assert(instr->const_index[0] == 0);
+ ctx->abi->emit_vertex(ctx->abi, 0, ctx->outputs);
break;
case nir_intrinsic_end_primitive:
visit_end_primitive(ctx->nctx, instr);
ctx.abi.inputs = &ctx.inputs[0];
ctx.abi.emit_outputs = handle_shader_outputs_post;
+ ctx.abi.emit_vertex = visit_emit_vertex;
ctx.abi.load_ssbo = radv_load_ssbo;
ctx.abi.load_sampler_desc = radv_get_sampler_desc;
ctx.abi.clamp_shadow_reference = false;
}
/* Emit one vertex from the geometry shader */
-static void si_llvm_emit_vertex(
- const struct lp_build_tgsi_action *action,
- struct lp_build_tgsi_context *bld_base,
- struct lp_build_emit_data *emit_data)
+static void si_llvm_emit_vertex(struct ac_shader_abi *abi,
+ unsigned stream,
+ LLVMValueRef *addrs)
{
- struct si_shader_context *ctx = si_shader_context(bld_base);
- struct lp_build_context *uint = &bld_base->uint_bld;
+ struct si_shader_context *ctx = si_shader_context_from_abi(abi);
+ struct tgsi_shader_info *info = &ctx->shader->selector->info;
+ struct lp_build_context *uint = &ctx->bld_base.uint_bld;
struct si_shader *shader = ctx->shader;
- struct tgsi_shader_info *info = &shader->selector->info;
struct lp_build_if_state if_state;
LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
ctx->param_gs2vs_offset);
LLVMValueRef can_emit;
unsigned chan, offset;
int i;
- unsigned stream;
-
- stream = si_llvm_get_stream(bld_base, emit_data);
/* Write vertex attribute values to GSVS ring */
gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
offset = 0;
for (i = 0; i < info->num_outputs; i++) {
- LLVMValueRef *out_ptr = ctx->outputs[i];
-
for (chan = 0; chan < 4; chan++) {
if (!(info->output_usagemask[i] & (1 << chan)) ||
((info->output_streams[i] >> (2 * chan)) & 3) != stream)
continue;
- LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, out_ptr[chan], "");
+ LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder, addrs[4 * i + chan], "");
LLVMValueRef voffset =
LLVMConstInt(ctx->i32, offset *
shader->selector->gs_max_out_vertices, 0);
lp_build_endif(&if_state);
}
+/* Emit one vertex from the geometry shader */
+static void si_tgsi_emit_vertex(
+ const struct lp_build_tgsi_action *action,
+ struct lp_build_tgsi_context *bld_base,
+ struct lp_build_emit_data *emit_data)
+{
+ struct si_shader_context *ctx = si_shader_context(bld_base);
+ unsigned stream = si_llvm_get_stream(bld_base, emit_data);
+
+ si_llvm_emit_vertex(&ctx->abi, stream, ctx->outputs[0]);
+}
+
/* Cut one primitive from the geometry shader */
static void si_llvm_emit_primitive(
const struct lp_build_tgsi_action *action,
bld_base->op_actions[TGSI_OPCODE_READ_INVOC].fetch_args = read_invoc_fetch_args;
bld_base->op_actions[TGSI_OPCODE_READ_INVOC].emit = read_lane_emit;
- bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_llvm_emit_vertex;
+ bld_base->op_actions[TGSI_OPCODE_EMIT].emit = si_tgsi_emit_vertex;
bld_base->op_actions[TGSI_OPCODE_ENDPRIM].emit = si_llvm_emit_primitive;
bld_base->op_actions[TGSI_OPCODE_BARRIER].emit = si_llvm_emit_barrier;
}
break;
case PIPE_SHADER_GEOMETRY:
bld_base->emit_fetch_funcs[TGSI_FILE_INPUT] = fetch_input_gs;
+ ctx->abi.emit_vertex = si_llvm_emit_vertex;
bld_base->emit_epilogue = si_llvm_emit_gs_epilogue;
break;
case PIPE_SHADER_FRAGMENT: