enum si_arg_regfile regfile, LLVMTypeRef type,
LLVMValueRef *assign, unsigned idx)
{
- MAYBE_UNUSED unsigned actual = add_arg_assign(fninfo, regfile, type, assign);
+ ASSERTED unsigned actual = add_arg_assign(fninfo, regfile, type, assign);
assert(actual == idx);
}
struct pipe_stream_output_info *so = &sel->so;
LLVMBuilderRef builder = ctx->ac.builder;
int i;
- struct lp_build_if_state if_ctx;
/* Get bits [22:16], i.e. (so_param >> 16) & 127; */
LLVMValueRef so_vtx_count =
/* Emit the streamout code conditionally. This actually avoids
* out-of-bounds buffer access. The hw tells us via the SGPR
* (so_vtx_count) which threads are allowed to emit streamout data. */
- lp_build_if(&if_ctx, &ctx->gallivm, can_emit);
+ ac_build_ifcc(&ctx->ac, can_emit, 6501);
{
/* The buffer offset is computed as follows:
* ByteOffset = streamout_offset[buffer_id]*4 +
&so->output[i], &outputs[reg]);
}
}
- lp_build_endif(&if_ctx);
+ ac_build_endif(&ctx->ac, 6501);
}
static void si_export_param(struct si_shader_context *ctx, unsigned index,
LLVMValueRef cond = LLVMGetParam(ctx->main_fn, ctx->param_vs_state_bits);
cond = LLVMBuildTrunc(ctx->ac.builder, cond, ctx->i1, "");
- struct lp_build_if_state if_ctx;
- lp_build_if(&if_ctx, &ctx->gallivm, cond);
+ ac_build_ifcc(&ctx->ac, cond, 6502);
/* Store clamped colors to alloca variables within the conditional block. */
for (unsigned i = 0; i < noutput; i++) {
addr[i][j]);
}
}
- lp_build_endif(&if_ctx);
+ ac_build_endif(&ctx->ac, 6502);
/* Load clamped colors */
for (unsigned i = 0; i < noutput; i++) {
LLVMValueRef lds_base, lds_inner, lds_outer, byteoffset, buffer;
LLVMValueRef out[6], vec0, vec1, tf_base, inner[4], outer[4];
unsigned stride, outer_comps, inner_comps, i, offset;
- struct lp_build_if_state if_ctx, inner_if_ctx;
/* Add a barrier before loading tess factors from LDS. */
if (!shader->key.part.tcs.epilog.invoc0_tess_factors_are_def)
* This can't jump, because invocation 0 executes this. It should
* at least mask out the loads and stores for other invocations.
*/
- lp_build_if(&if_ctx, &ctx->gallivm,
- LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
- invocation_id, ctx->i32_0, ""));
+ ac_build_ifcc(&ctx->ac,
+ LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
+ invocation_id, ctx->i32_0, ""), 6503);
/* Determine the layout of one tess factor element in the buffer. */
switch (shader->key.part.tcs.epilog.prim_mode) {
byteoffset = LLVMBuildMul(ctx->ac.builder, rel_patch_id,
LLVMConstInt(ctx->i32, 4 * stride, 0), "");
- lp_build_if(&inner_if_ctx, &ctx->gallivm,
- LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
- rel_patch_id, ctx->i32_0, ""));
+ ac_build_ifcc(&ctx->ac,
+ LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ,
+ rel_patch_id, ctx->i32_0, ""), 6504);
/* Store the dynamic HS control word. */
offset = 0;
offset += 4;
}
- lp_build_endif(&inner_if_ctx);
+ ac_build_endif(&ctx->ac, 6504);
/* Store the tessellation factors. */
ac_build_buffer_store_dword(&ctx->ac, buffer, vec0,
}
}
- lp_build_endif(&if_ctx);
+ ac_build_endif(&ctx->ac, 6503);
}
static LLVMValueRef
if (ctx->screen->info.chip_class >= GFX9) {
LLVMBasicBlockRef blocks[2] = {
LLVMGetInsertBlock(builder),
- ctx->merged_wrap_if_state.entry_block
+ ctx->merged_wrap_if_entry_block
};
LLVMValueRef values[2];
- lp_build_endif(&ctx->merged_wrap_if_state);
+ ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
values[0] = rel_patch_id;
values[1] = LLVMGetUndef(ctx->i32);
si_get_gs_wave_id(ctx));
if (ctx->screen->info.chip_class >= GFX9)
- lp_build_endif(&ctx->merged_wrap_if_state);
+ ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
}
static void si_llvm_emit_gs_epilogue(struct ac_shader_abi *abi,
struct tgsi_shader_info *info = &ctx->shader->selector->info;
struct si_shader *shader = ctx->shader;
- struct lp_build_if_state if_state;
LLVMValueRef soffset = LLVMGetParam(ctx->main_fn,
ctx->param_gs2vs_offset);
LLVMValueRef gs_next_vertex;
if (use_kill) {
ac_build_kill_if_false(&ctx->ac, can_emit);
} else {
- lp_build_if(&if_state, &ctx->gallivm, can_emit);
+ ac_build_ifcc(&ctx->ac, can_emit, 6505);
}
offset = 0;
}
if (!use_kill)
- lp_build_endif(&if_state);
+ ac_build_endif(&ctx->ac, 6505);
}
/* Emit one vertex from the geometry shader */
shader->is_gs_copy_shader = true;
si_init_shader_ctx(&ctx, sscreen, compiler,
- si_get_wave_size(sscreen, PIPE_SHADER_VERTEX, false));
+ si_get_wave_size(sscreen, PIPE_SHADER_VERTEX, false, false));
ctx.shader = shader;
ctx.type = PIPE_SHADER_VERTEX;
ctx->abi.lookup_interp_param = si_nir_lookup_interp_param;
ctx->abi.load_sample_position = load_sample_position;
ctx->abi.load_sample_mask_in = load_sample_mask_in;
+ ctx->abi.emit_fbfetch = si_nir_emit_fbfetch;
ctx->abi.emit_kill = si_llvm_emit_kill;
break;
case PIPE_SHADER_COMPUTE:
if (shader->key.as_ngg) {
for (unsigned i = 0; i < 4; ++i) {
ctx->gs_curprim_verts[i] =
- lp_build_alloca(&ctx->gallivm, ctx->ac.i32, "");
+ ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
ctx->gs_generated_prims[i] =
- lp_build_alloca(&ctx->gallivm, ctx->ac.i32, "");
+ ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
}
unsigned scratch_size = 8;
}
}
- if (shader->key.as_ngg && ctx->type != PIPE_SHADER_GEOMETRY) {
+ if (ctx->type != PIPE_SHADER_GEOMETRY &&
+ (shader->key.as_ngg && !shader->key.as_es)) {
/* Unconditionally declare scratch space base for streamout and
* vertex compaction. Whether space is actually allocated is
* determined during linking / PM4 creation.
ctx->param_merged_wave_info, 0);
} else if (ctx->type == PIPE_SHADER_TESS_CTRL ||
ctx->type == PIPE_SHADER_GEOMETRY ||
- shader->key.as_ngg) {
+ (shader->key.as_ngg && !shader->key.as_es)) {
LLVMValueRef num_threads;
bool nested_barrier;
if (!shader->is_monolithic ||
(ctx->type == PIPE_SHADER_TESS_EVAL &&
- shader->key.as_ngg))
+ (shader->key.as_ngg && !shader->key.as_es)))
ac_init_exec_full_mask(&ctx->ac);
if (ctx->type == PIPE_SHADER_TESS_CTRL ||
LLVMValueRef ena =
LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
ac_get_thread_id(&ctx->ac), num_threads, "");
- lp_build_if(&ctx->merged_wrap_if_state, &ctx->gallivm, ena);
+
+ ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
+ ctx->merged_wrap_if_label = 11500;
+ ac_build_ifcc(&ctx->ac, ena, ctx->merged_wrap_if_label);
if (nested_barrier) {
/* Execute a barrier before the second shader in
LLVMTypeRef function_type;
unsigned num_first_params;
unsigned num_out, initial_num_out;
- MAYBE_UNUSED unsigned num_out_sgpr; /* used in debug checks */
- MAYBE_UNUSED unsigned initial_num_out_sgpr; /* used in debug checks */
+ ASSERTED unsigned num_out_sgpr; /* used in debug checks */
+ ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
unsigned num_sgprs, num_vgprs;
unsigned gprs;
- struct lp_build_if_state if_state;
si_init_function_info(&fninfo);
initial_num_out_sgpr = num_out_sgpr;
/* Now chain the parts. */
- LLVMValueRef ret;
+ LLVMValueRef ret = NULL;
for (unsigned part = 0; part < num_parts; ++part) {
LLVMValueRef in[48];
LLVMTypeRef ret_type;
LLVMConstInt(ctx->i32, 0x7f, 0), "");
ena = LLVMBuildICmp(builder, LLVMIntULT,
ac_get_thread_id(&ctx->ac), count, "");
- lp_build_if(&if_state, &ctx->gallivm, ena);
+ ac_build_ifcc(&ctx->ac, ena, 6506);
}
/* Derive arguments for the next part from outputs of the
if (is_multi_part_shader(ctx) &&
part + 1 == next_shader_first_part) {
- lp_build_endif(&if_state);
+ ac_build_endif(&ctx->ac, 6506);
/* The second half of the merged shader should use
* the inputs from the toplevel (wrapper) function,
struct si_shader shader_es = {};
shader_es.selector = es;
shader_es.key.as_es = 1;
+ shader_es.key.as_ngg = shader->key.as_ngg;
shader_es.key.mono = shader->key.mono;
shader_es.key.opt = shader->key.opt;
shader_es.is_monolithic = true;
struct si_shader_context ctx;
si_init_shader_ctx(&ctx, sscreen, compiler,
- si_get_wave_size(sscreen, type, shader.key.as_ngg));
+ si_get_wave_size(sscreen, type, shader.key.as_ngg,
+ shader.key.as_es));
ctx.shader = &shader;
ctx.type = type;
key->vs_prolog.num_input_sgprs + i, "");
}
- struct lp_build_if_state wrap_if_state;
LLVMValueRef original_ret = ret;
bool wrapped = false;
+ LLVMBasicBlockRef if_entry_block = NULL;
if (key->vs_prolog.is_monolithic && key->vs_prolog.as_ngg) {
LLVMValueRef num_threads;
num_threads = si_unpack_param(ctx, 3, 0, 8);
ena = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT,
ac_get_thread_id(&ctx->ac), num_threads, "");
- lp_build_if(&wrap_if_state, &ctx->gallivm, ena);
+ if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
+ ac_build_ifcc(&ctx->ac, ena, 11501);
wrapped = true;
}
}
if (wrapped) {
- lp_build_endif(&wrap_if_state);
+ LLVMBasicBlockRef bbs[2] = {
+ LLVMGetInsertBlock(ctx->ac.builder),
+ if_entry_block,
+ };
+ ac_build_endif(&ctx->ac, 11501);
LLVMValueRef values[2] = {
ret,
original_ret
};
- LLVMBasicBlockRef bbs[2] = {
- wrap_if_state.true_block,
- wrap_if_state.entry_block
- };
ret = ac_build_phi(&ctx->ac, LLVMTypeOf(ret), 2, values, bbs);
}
struct pipe_debug_callback *debug)
{
if (sscreen->info.chip_class >= GFX9) {
- struct si_shader *es_main_part =
- shader->key.part.gs.es->main_shader_part_es;
+ struct si_shader *es_main_part;
+ enum pipe_shader_type es_type = shader->key.part.gs.es->type;
+
+ if (es_type == PIPE_SHADER_TESS_EVAL && shader->key.as_ngg)
+ es_main_part = shader->key.part.gs.es->main_shader_part_ngg_es;
+ else
+ es_main_part = shader->key.part.gs.es->main_shader_part_es;
- if (shader->key.part.gs.es->type == PIPE_SHADER_VERTEX &&
+ if (es_type == PIPE_SHADER_VERTEX &&
!si_get_vs_prolog(sscreen, compiler, shader, debug, es_main_part,
&shader->key.part.gs.vs_prolog))
return false;