static void dump_info(struct ir3_shader_variant *so, const char *str)
{
uint32_t *bin;
- const char *type = ir3_shader_stage(so->shader);
- bin = ir3_shader_assemble(so, so->shader->compiler->gpu_id);
+ const char *type = ir3_shader_stage(so);
+ bin = ir3_shader_assemble(so);
debug_printf("; %s: %s\n", type, str);
ir3_shader_disasm(so, bin, stdout);
free(bin);
static void
insert_sorted(struct exec_list *var_list, nir_variable *new_var)
{
- nir_foreach_variable(var, var_list) {
+ nir_foreach_variable_in_list(var, var_list) {
if (var->data.location > new_var->data.location) {
exec_node_insert_node_before(&var->node, &new_var->node);
return;
}
static void
-sort_varyings(struct exec_list *var_list)
+sort_varyings(nir_shader *nir, nir_variable_mode mode)
{
struct exec_list new_list;
exec_list_make_empty(&new_list);
- nir_foreach_variable_safe(var, var_list) {
+ nir_foreach_variable_with_modes_safe(var, nir, mode) {
exec_node_remove(&var->node);
insert_sorted(&new_list, var);
}
- exec_list_move_nodes_to(&new_list, var_list);
+ exec_list_append(&nir->variables, &new_list);
}
static void
-fixup_varying_slots(struct exec_list *var_list)
+fixup_varying_slots(nir_shader *nir, nir_variable_mode mode)
{
- nir_foreach_variable(var, var_list) {
+ nir_foreach_variable_with_modes(var, nir, mode) {
if (var->data.location >= VARYING_SLOT_VAR0) {
var->data.location += 9;
} else if ((var->data.location >= VARYING_SLOT_TEX0) &&
NIR_PASS_V(nir, nir_lower_var_copies);
nir_print_shader(nir, stdout);
NIR_PASS_V(nir, gl_nir_lower_atomics, prog, true);
- NIR_PASS_V(nir, nir_lower_atomics_to_ssbo, 8);
+ NIR_PASS_V(nir, nir_lower_atomics_to_ssbo);
nir_print_shader(nir, stdout);
switch (stage) {
case MESA_SHADER_VERTEX:
- nir_assign_var_locations(&nir->inputs,
+ nir_assign_var_locations(nir, nir_var_shader_in,
&nir->num_inputs,
ir3_glsl_type_size);
/* Re-lower global vars, to deal with any dead VS inputs. */
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
- sort_varyings(&nir->outputs);
- nir_assign_var_locations(&nir->outputs,
+ sort_varyings(nir, nir_var_shader_out);
+ nir_assign_var_locations(nir, nir_var_shader_out,
&nir->num_outputs,
ir3_glsl_type_size);
- fixup_varying_slots(&nir->outputs);
+ fixup_varying_slots(nir, nir_var_shader_out);
break;
case MESA_SHADER_FRAGMENT:
- sort_varyings(&nir->inputs);
- nir_assign_var_locations(&nir->inputs,
+ sort_varyings(nir, nir_var_shader_in);
+ nir_assign_var_locations(nir, nir_var_shader_in,
&nir->num_inputs,
ir3_glsl_type_size);
- fixup_varying_slots(&nir->inputs);
- nir_assign_var_locations(&nir->outputs,
+ fixup_varying_slots(nir, nir_var_shader_in);
+ nir_assign_var_locations(nir, nir_var_shader_out,
&nir->num_outputs,
ir3_glsl_type_size);
break;
errx(1, "unhandled shader stage: %d", stage);
}
- nir_assign_var_locations(&nir->uniforms,
+ nir_assign_var_locations(nir, nir_var_uniform,
&nir->num_uniforms,
ir3_glsl_type_size);
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_frexp);
- NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size, 0);
+ NIR_PASS_V(nir, nir_lower_io,
+ nir_var_shader_in | nir_var_shader_out,
+ ir3_glsl_type_size, (nir_lower_io_options)0);
NIR_PASS_V(nir, gl_nir_lower_samplers, prog);
return nir;
.int64 = true,
.variable_pointers = true,
},
- .lower_workgroup_access_to_offsets = true,
.lower_ubo_ssbo_access_to_offsets = true,
.debug = {
.func = debug_func,
/* TODO cmdline option to target different gpus: */
unsigned gpu_id = 320;
const char *info;
- const char *entry;
+ const char *spirv_entry = NULL;
void *ptr;
- bool from_spirv = false;
+ bool from_tgsi = false;
size_t size;
memset(&s, 0, sizeof(s));
if (!strcmp(argv[n], "--half-precision")) {
debug_printf(" %s", argv[n]);
- key.half_precision = true;
n++;
continue;
}
if (strcmp(ext, ".tgsi") == 0) {
if (num_files != 0)
errx(1, "in TGSI mode, only a single file may be specified");
- s.from_tgsi = true;
+ from_tgsi = true;
} else if (strcmp(ext, ".spv") == 0) {
if (num_files != 0)
errx(1, "in SPIR-V mode, only a single file may be specified");
stage = MESA_SHADER_COMPUTE;
- from_spirv = true;
filenames[num_files++] = filename;
n++;
if (n == argc)
errx(1, "in SPIR-V mode, an entry point must be specified");
- entry = argv[n];
+ spirv_entry = argv[n];
n++;
} else if (strcmp(ext, ".comp") == 0) {
- if (s.from_tgsi || from_spirv)
+ if (from_tgsi || spirv_entry)
errx(1, "cannot mix GLSL/TGSI/SPIRV");
if (num_files >= ARRAY_SIZE(filenames))
errx(1, "too many GLSL files");
stage = MESA_SHADER_COMPUTE;
} else if (strcmp(ext, ".frag") == 0) {
- if (s.from_tgsi || from_spirv)
+ if (from_tgsi || spirv_entry)
errx(1, "cannot mix GLSL/TGSI/SPIRV");
if (num_files >= ARRAY_SIZE(filenames))
errx(1, "too many GLSL files");
stage = MESA_SHADER_FRAGMENT;
} else if (strcmp(ext, ".vert") == 0) {
- if (s.from_tgsi)
+ if (from_tgsi)
errx(1, "cannot mix GLSL and TGSI");
if (num_files >= ARRAY_SIZE(filenames))
errx(1, "too many GLSL files");
compiler = ir3_compiler_create(NULL, gpu_id);
- if (s.from_tgsi) {
+ if (from_tgsi) {
struct tgsi_token toks[65536];
const nir_shader_compiler_options *nir_options =
ir3_get_compiler_options(compiler);
nir = tgsi_to_nir_noscreen(toks, nir_options);
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
- } else if (from_spirv) {
- nir = load_spirv(filenames[0], entry, stage);
+ } else if (spirv_entry) {
+ nir = load_spirv(filenames[0], spirv_entry, stage);
- NIR_PASS_V(nir, nir_lower_io, nir_var_all, ir3_glsl_type_size,
- (nir_lower_io_options)0);
+ NIR_PASS_V(nir, nir_lower_io,
+ nir_var_shader_in | nir_var_shader_out,
+ ir3_glsl_type_size, (nir_lower_io_options)0);
/* TODO do this somewhere else */
nir_lower_int64(nir, ~0);
}
s.compiler = compiler;
- s.nir = ir3_optimize_nir(&s, nir, NULL);
+ s.nir = nir;
+
+ ir3_finalize_nir(compiler, nir);
v.key = key;
v.shader = &s;
s.type = v.type = nir->info.stage;
+ ir3_nir_lower_variant(&v, nir);
+
info = "NIR compiler";
ret = ir3_compile_shader_nir(s.compiler, &v);
if (ret) {