static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
/** Whether the shader runs as a combination of multiple API shaders */
-bool si_is_multi_part_shader(struct si_shader_context *ctx)
+bool si_is_multi_part_shader(struct si_shader *shader)
{
- if (ctx->screen->info.chip_class <= GFX8)
+ if (shader->selector->screen->info.chip_class <= GFX8)
return false;
- return ctx->shader->key.as_ls ||
- ctx->shader->key.as_es ||
- ctx->type == PIPE_SHADER_TESS_CTRL ||
- ctx->type == PIPE_SHADER_GEOMETRY;
+ return shader->key.as_ls ||
+ shader->key.as_es ||
+ shader->selector->type == PIPE_SHADER_TESS_CTRL ||
+ shader->selector->type == PIPE_SHADER_GEOMETRY;
}
/** Whether the shader runs on a merged HW stage (LSHS or ESGS) */
-bool si_is_merged_shader(struct si_shader_context *ctx)
+bool si_is_merged_shader(struct si_shader *shader)
{
- return ctx->shader->key.as_ngg || si_is_multi_part_shader(ctx);
+ return shader->key.as_ngg || si_is_multi_part_shader(shader);
}
/**
if (num_vbos_in_user_sgprs) {
unsigned user_sgprs = ctx->args.num_sgprs_used;
- if (si_is_merged_shader(ctx))
+ if (si_is_merged_shader(ctx->shader))
user_sgprs -= 8;
assert(user_sgprs <= SI_SGPR_VS_VB_DESCRIPTOR_FIRST);
}
/* Add the scratch offset to input SGPRs. */
- if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(&ctx))
+ if (shader->config.scratch_bytes_per_wave && !si_is_merged_shader(shader))
shader->info.num_input_sgprs += 1; /* scratch byte offset */
/* Calculate the number of fragment input VGPRs. */
return container_of(abi, ctx, abi);
}
-bool si_is_multi_part_shader(struct si_shader_context *ctx);
-bool si_is_merged_shader(struct si_shader_context *ctx);
+bool si_is_multi_part_shader(struct si_shader *shader);
+bool si_is_merged_shader(struct si_shader *shader);
void si_add_arg_checked(struct ac_shader_args *args,
enum ac_arg_regfile file,
unsigned registers, enum ac_arg_type type,
LLVMValueRef si_prolog_get_rw_buffers(struct si_shader_context *ctx)
{
LLVMValueRef ptr[2], list;
- bool merged_shader = si_is_merged_shader(ctx);
+ bool merged_shader = si_is_merged_shader(ctx->shader);
ptr[0] = LLVMGetParam(ctx->main_fn, (merged_shader ? 8 : 0) + SI_SGPR_RW_BUFFERS);
list = LLVMBuildIntToPtr(ctx->ac.builder, ptr[0],
si_llvm_create_func(ctx, "wrapper", returns, num_returns,
si_get_max_workgroup_size(ctx->shader));
- if (si_is_merged_shader(ctx))
+ if (si_is_merged_shader(ctx->shader))
ac_init_exec_full_mask(&ctx->ac);
/* Record the arguments of the function as if they were an output of
/* Merged shaders are executed conditionally depending
* on the number of enabled threads passed in the input SGPRs. */
- if (si_is_multi_part_shader(ctx) && part == 0) {
+ if (si_is_multi_part_shader(ctx->shader) && part == 0) {
LLVMValueRef ena, count = initial[3];
count = LLVMBuildAnd(builder, count,
ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
- if (si_is_multi_part_shader(ctx) &&
+ if (si_is_multi_part_shader(ctx->shader) &&
part + 1 == next_shader_first_part) {
ac_build_endif(&ctx->ac, 6506);