It's redundant with nir_shader::info::stage.
Acked-by: Timothy Arceri <tarceri@itsqueeze.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
ac_nir_get_max_workgroup_size(enum chip_class chip_class,
const struct nir_shader *nir)
{
- switch (nir->stage) {
+ switch (nir->info.stage) {
case MESA_SHADER_TESS_CTRL:
return chip_class >= CIK ? 128 : 64;
case MESA_SHADER_GEOMETRY:
if (nctx)
nctx->nir = &ctx;
- ctx.stage = nir->stage;
+ ctx.stage = nir->info.stage;
ctx.main_function = LLVMGetBasicBlockParent(LLVMGetInsertBlock(ctx.ac.builder));
setup_locals(&ctx, func);
- if (nir->stage == MESA_SHADER_COMPUTE)
+ if (nir->info.stage == MESA_SHADER_COMPUTE)
setup_shared(&ctx, nir);
visit_cf_list(&ctx, &func->impl->body);
ctx.max_workgroup_size = ac_nir_get_max_workgroup_size(ctx.options->chip_class, shaders[0]);
- create_function(&ctx, shaders[shader_count - 1]->stage, shader_count >= 2,
- shader_count >= 2 ? shaders[shader_count - 2]->stage : MESA_SHADER_VERTEX);
+ create_function(&ctx, shaders[shader_count - 1]->info.stage, shader_count >= 2,
+ shader_count >= 2 ? shaders[shader_count - 2]->info.stage : MESA_SHADER_VERTEX);
ctx.abi.inputs = &ctx.inputs[0];
ctx.abi.emit_outputs = handle_shader_outputs_post;
ac_init_exec_full_mask(&ctx.ac);
if (ctx.ac.chip_class == GFX9 &&
- shaders[shader_count - 1]->stage == MESA_SHADER_TESS_CTRL)
+ shaders[shader_count - 1]->info.stage == MESA_SHADER_TESS_CTRL)
ac_nir_fixup_ls_hs_input_vgprs(&ctx);
for(int i = 0; i < shader_count; ++i) {
- ctx.stage = shaders[i]->stage;
+ ctx.stage = shaders[i]->info.stage;
ctx.output_mask = 0;
ctx.tess_outputs_written = 0;
ctx.num_output_clips = shaders[i]->info.clip_distance_array_size;
ctx.num_output_culls = shaders[i]->info.cull_distance_array_size;
- if (shaders[i]->stage == MESA_SHADER_GEOMETRY) {
+ if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
ctx.gs_next_vertex = ac_build_alloca(&ctx.ac, ctx.i32, "gs_next_vertex");
ctx.gs_max_out_vertices = shaders[i]->info.gs.vertices_out;
- } else if (shaders[i]->stage == MESA_SHADER_TESS_EVAL) {
+ } else if (shaders[i]->info.stage == MESA_SHADER_TESS_EVAL) {
ctx.tes_primitive_mode = shaders[i]->info.tess.primitive_mode;
- } else if (shaders[i]->stage == MESA_SHADER_VERTEX) {
+ } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX) {
if (shader_info->info.vs.needs_instance_id) {
ctx.shader_info->vs.vgpr_comp_cnt =
MAX2(3, ctx.shader_info->vs.vgpr_comp_cnt);
}
- } else if (shaders[i]->stage == MESA_SHADER_FRAGMENT) {
+ } else if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT) {
shader_info->fs.can_discard = shaders[i]->info.fs.uses_discard;
}
LLVMPositionBuilderAtEnd(ctx.ac.builder, then_block);
}
- if (shaders[i]->stage == MESA_SHADER_FRAGMENT)
+ if (shaders[i]->info.stage == MESA_SHADER_FRAGMENT)
handle_fs_inputs(&ctx, shaders[i]);
- else if(shaders[i]->stage == MESA_SHADER_VERTEX)
+ else if(shaders[i]->info.stage == MESA_SHADER_VERTEX)
handle_vs_inputs(&ctx, shaders[i]);
- else if(shader_count >= 2 && shaders[i]->stage == MESA_SHADER_GEOMETRY)
+ else if(shader_count >= 2 && shaders[i]->info.stage == MESA_SHADER_GEOMETRY)
prepare_gs_input_vgprs(&ctx);
nir_foreach_variable(variable, &shaders[i]->outputs)
- scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->stage);
+ scan_shader_output_decl(&ctx, variable, shaders[i], shaders[i]->info.stage);
ac_nir_translate(&ctx.ac, &ctx.abi, shaders[i], &ctx);
LLVMPositionBuilderAtEnd(ctx.ac.builder, merge_block);
}
- if (shaders[i]->stage == MESA_SHADER_GEOMETRY) {
+ if (shaders[i]->info.stage == MESA_SHADER_GEOMETRY) {
unsigned addclip = shaders[i]->info.clip_distance_array_size +
shaders[i]->info.cull_distance_array_size > 4;
shader_info->gs.gsvs_vertex_size = (util_bitcount64(ctx.output_mask) + addclip) * 16;
shader_info->gs.max_gsvs_emit_size = shader_info->gs.gsvs_vertex_size *
shaders[i]->info.gs.vertices_out;
- } else if (shaders[i]->stage == MESA_SHADER_TESS_CTRL) {
+ } else if (shaders[i]->info.stage == MESA_SHADER_TESS_CTRL) {
shader_info->tcs.outputs_written = ctx.tess_outputs_written;
shader_info->tcs.patch_outputs_written = ctx.tess_patch_outputs_written;
- } else if (shaders[i]->stage == MESA_SHADER_VERTEX && ctx.options->key.vs.as_ls) {
+ } else if (shaders[i]->info.stage == MESA_SHADER_VERTEX && ctx.options->key.vs.as_ls) {
shader_info->vs.outputs_written = ctx.tess_outputs_written;
}
}
static void
ac_fill_shader_info(struct ac_shader_variant_info *shader_info, struct nir_shader *nir, const struct ac_nir_compiler_options *options)
{
- switch (nir->stage) {
+ switch (nir->info.stage) {
case MESA_SHADER_COMPUTE:
for (int i = 0; i < 3; ++i)
shader_info->cs.block_size[i] = nir->info.cs.local_size[i];
LLVMModuleRef llvm_module = ac_translate_nir_to_llvm(tm, nir, nir_count, shader_info,
options);
- ac_compile_llvm_module(tm, llvm_module, binary, config, shader_info, nir[0]->stage, dump_shader, options->supports_spill);
+ ac_compile_llvm_module(tm, llvm_module, binary, config, shader_info, nir[0]->info.stage, dump_shader, options->supports_spill);
for (int i = 0; i < nir_count; ++i)
ac_fill_shader_info(shader_info, nir[i], options);
}
nir_variable *var,
struct ac_shader_info *info)
{
- switch (nir->stage) {
+ switch (nir->info.stage) {
case MESA_SHADER_VERTEX:
info->vs.has_vertex_buffers = true;
break;
spec_entries, num_spec_entries,
stage, entrypoint_name, &supported_ext, &nir_options);
nir = entry_point->shader;
- assert(nir->stage == stage);
+ assert(nir->info.stage == stage);
nir_validate_shader(nir);
free(spec_entries);
* indirect indexing is trivial.
*/
nir_variable_mode indirect_mask = 0;
- if (nir->stage == MESA_SHADER_GEOMETRY ||
- (nir->stage != MESA_SHADER_TESS_CTRL &&
- nir->stage != MESA_SHADER_TESS_EVAL &&
+ if (nir->info.stage == MESA_SHADER_GEOMETRY ||
+ (nir->info.stage != MESA_SHADER_TESS_CTRL &&
+ nir->info.stage != MESA_SHADER_TESS_EVAL &&
!llvm_has_working_vgpr_indexing)) {
indirect_mask |= nir_var_shader_in;
}
options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
options.supports_spill = device->llvm_supports_spill;
- return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->stage,
+ return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
&options, false, code_out, code_size_out);
}
qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
uint32_t vpm_components_queued = 0;
- if (c->s->stage == MESA_SHADER_VERTEX) {
+ if (c->s->info.stage == MESA_SHADER_VERTEX) {
bool uses_iid = c->s->info.system_values_read &
(1ull << SYSTEM_VALUE_INSTANCE_ID);
bool uses_vid = c->s->info.system_values_read &
resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
(loc + 1) * 4);
- if (c->s->stage == MESA_SHADER_FRAGMENT) {
+ if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
if (var->data.location == VARYING_SLOT_POS) {
emit_fragcoord_input(c, loc);
} else if (var->data.location == VARYING_SLOT_PNTC ||
}
}
- if (c->s->stage == MESA_SHADER_VERTEX) {
+ if (c->s->info.stage == MESA_SHADER_VERTEX) {
assert(vpm_components_queued == 0);
assert(num_components == 0);
}
for (int i = 0; i < 4; i++)
add_output(c, loc + i, var->data.location, i);
- if (c->s->stage == MESA_SHADER_FRAGMENT) {
+ if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
switch (var->data.location) {
case FRAG_RESULT_COLOR:
c->output_color_var[0] = var;
static void
nir_to_vir(struct v3d_compile *c)
{
- if (c->s->stage == MESA_SHADER_FRAGMENT) {
+ if (c->s->info.stage == MESA_SHADER_FRAGMENT) {
c->payload_w = vir_MOV(c, vir_reg(QFILE_REG, 0));
c->payload_w_centroid = vir_MOV(c, vir_reg(QFILE_REG, 1));
c->payload_z = vir_MOV(c, vir_reg(QFILE_REG, 2));
v3d_nir_to_vir(struct v3d_compile *c)
{
if (V3D_DEBUG & (V3D_DEBUG_NIR |
- v3d_debug_flag_for_shader_stage(c->s->stage))) {
+ v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
fprintf(stderr, "%s prog %d/%d NIR:\n",
vir_get_stage_name(c),
c->program_id, c->variant_id);
nir_to_vir(c);
- switch (c->s->stage) {
+ switch (c->s->info.stage) {
case MESA_SHADER_FRAGMENT:
emit_frag_end(c);
break;
}
if (V3D_DEBUG & (V3D_DEBUG_VIR |
- v3d_debug_flag_for_shader_stage(c->s->stage))) {
+ v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
fprintf(stderr, "%s prog %d/%d pre-opt VIR:\n",
vir_get_stage_name(c),
c->program_id, c->variant_id);
/* XXX: vir_schedule_instructions(c); */
if (V3D_DEBUG & (V3D_DEBUG_VIR |
- v3d_debug_flag_for_shader_stage(c->s->stage))) {
+ v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
fprintf(stderr, "%s prog %d/%d VIR:\n",
vir_get_stage_name(c),
c->program_id, c->variant_id);
if (c->vs_key && c->vs_key->is_coord)
return "MESA_SHADER_COORD";
else
- return gl_shader_stage_name(c->s->stage);
+ return gl_shader_stage_name(c->s->info.stage);
}
}
if (V3D_DEBUG & (V3D_DEBUG_QPU |
- v3d_debug_flag_for_shader_stage(c->s->stage))) {
+ v3d_debug_flag_for_shader_stage(c->s->info.stage))) {
v3d_dump_qpu(c);
}
* two locations. For instance, if we have in the IR code a dvec3 attr0 in
* location 0 and vec4 attr1 in location 1, in NIR attr0 will use
* locations/slots 0 and 1, and attr1 will use location/slot 2 */
- if (shader->stage == MESA_SHADER_VERTEX)
+ if (shader->info.stage == MESA_SHADER_VERTEX)
nir_remap_attributes(shader);
shader->info.name = ralloc_asprintf(shader, "GLSL%d", shader_prog->Name);
break;
case ir_var_shader_in:
- if (shader->stage == MESA_SHADER_FRAGMENT &&
+ if (shader->info.stage == MESA_SHADER_FRAGMENT &&
ir->data.location == VARYING_SLOT_FACE) {
/* For whatever reason, GLSL IR makes gl_FrontFacing an input */
var->data.location = SYSTEM_VALUE_FRONT_FACE;
var->data.mode = nir_var_system_value;
- } else if (shader->stage == MESA_SHADER_GEOMETRY &&
+ } else if (shader->info.stage == MESA_SHADER_GEOMETRY &&
ir->data.location == VARYING_SLOT_PRIMITIVE_ID) {
/* For whatever reason, GLSL IR makes gl_PrimitiveIDIn an input */
var->data.location = SYSTEM_VALUE_PRIMITIVE_ID;
} else {
var->data.mode = nir_var_shader_in;
- if (shader->stage == MESA_SHADER_TESS_EVAL &&
+ if (shader->info.stage == MESA_SHADER_TESS_EVAL &&
(ir->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
ir->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)) {
var->data.compact = ir->type->without_array()->is_scalar();
case ir_var_shader_out:
var->data.mode = nir_var_shader_out;
- if (shader->stage == MESA_SHADER_TESS_CTRL &&
+ if (shader->info.stage == MESA_SHADER_TESS_CTRL &&
(ir->data.location == VARYING_SLOT_TESS_LEVEL_INNER ||
ir->data.location == VARYING_SLOT_TESS_LEVEL_OUTER)) {
var->data.compact = ir->type->without_array()->is_scalar();
shader->options = options;
- if (si)
+ if (si) {
+ assert(si->stage == stage);
shader->info = *si;
+ } else {
+ shader->info.stage = stage;
+ }
exec_list_make_empty(&shader->functions);
exec_list_make_empty(&shader->registers);
shader->num_uniforms = 0;
shader->num_shared = 0;
- shader->stage = stage;
-
return shader;
}
break;
case nir_var_shared:
- assert(shader->stage == MESA_SHADER_COMPUTE);
+ assert(shader->info.stage == MESA_SHADER_COMPUTE);
exec_list_push_tail(&shader->shared, &var->node);
break;
var->type = type;
var->data.mode = mode;
- if ((mode == nir_var_shader_in && shader->stage != MESA_SHADER_VERTEX) ||
- (mode == nir_var_shader_out && shader->stage != MESA_SHADER_FRAGMENT))
+ if ((mode == nir_var_shader_in &&
+ shader->info.stage != MESA_SHADER_VERTEX) ||
+ (mode == nir_var_shader_out &&
+ shader->info.stage != MESA_SHADER_FRAGMENT))
var->data.interpolation = INTERP_MODE_SMOOTH;
if (mode == nir_var_shader_in || mode == nir_var_uniform)
* access plus one
*/
unsigned num_inputs, num_uniforms, num_outputs, num_shared;
-
- /** The shader stage, such as MESA_SHADER_VERTEX. */
- gl_shader_stage stage;
} nir_shader;
static inline nir_function_impl *
clone_state state;
init_clone_state(&state, NULL, true, false);
- nir_shader *ns = nir_shader_create(mem_ctx, s->stage, s->options, NULL);
+ nir_shader *ns = nir_shader_create(mem_ctx, s->info.stage, s->options, NULL);
state.ns = ns;
clone_var_list(&state, &ns->uniforms, &s->uniforms);
else
shader->info.inputs_read |= bitfield;
- if (shader->stage == MESA_SHADER_FRAGMENT) {
+ if (shader->info.stage == MESA_SHADER_FRAGMENT) {
shader->info.fs.uses_sample_qualifier |= var->data.sample;
}
} else {
{
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, shader->stage)) {
+ if (nir_is_per_vertex_io(var, shader->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
nir_variable *var = deref->var;
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, shader->stage)) {
+ if (nir_is_per_vertex_io(var, shader->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
switch (instr->intrinsic) {
case nir_intrinsic_discard:
case nir_intrinsic_discard_if:
- assert(shader->stage == MESA_SHADER_FRAGMENT);
+ assert(shader->info.stage == MESA_SHADER_FRAGMENT);
shader->info.fs.uses_discard = true;
break;
/* We need to track which input_reads bits correspond to a
* dvec3/dvec4 input attribute */
- if (shader->stage == MESA_SHADER_VERTEX &&
+ if (shader->info.stage == MESA_SHADER_VERTEX &&
var->data.mode == nir_var_shader_in &&
glsl_type_is_dual_slot(glsl_without_array(var->type))) {
for (uint i = 0; i < glsl_count_attribute_slots(var->type, false); i++) {
case nir_intrinsic_end_primitive:
case nir_intrinsic_end_primitive_with_counter:
- assert(shader->stage == MESA_SHADER_GEOMETRY);
+ assert(shader->info.stage == MESA_SHADER_GEOMETRY);
shader->info.gs.uses_end_primitive = 1;
break;
shader->info.patch_inputs_read = 0;
shader->info.patch_outputs_written = 0;
shader->info.system_values_read = 0;
- if (shader->stage == MESA_SHADER_FRAGMENT) {
+ if (shader->info.stage == MESA_SHADER_FRAGMENT) {
shader->info.fs.uses_sample_qualifier = false;
}
nir_foreach_block(block, entrypoint) {
nir_variable *var = intrin_instr->variables[0]->var;
read[var->data.location_frac] |=
get_variable_io_mask(intrin_instr->variables[0]->var,
- shader->stage);
+ shader->info.stage);
}
}
}
uint64_t other_stage = used_by_other_stage[var->data.location_frac];
- if (!(other_stage & get_variable_io_mask(var, shader->stage))) {
+ if (!(other_stage & get_variable_io_mask(var, shader->info.stage))) {
/* This one is invalid, make it a global variable instead */
var->data.location = 0;
var->data.mode = nir_var_global;
bool
nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer)
{
- assert(producer->stage != MESA_SHADER_FRAGMENT);
- assert(consumer->stage != MESA_SHADER_VERTEX);
+ assert(producer->info.stage != MESA_SHADER_FRAGMENT);
+ assert(consumer->info.stage != MESA_SHADER_VERTEX);
uint64_t read[4] = { 0 }, written[4] = { 0 };
nir_foreach_variable(var, &producer->outputs) {
written[var->data.location_frac] |=
- get_variable_io_mask(var, producer->stage);
+ get_variable_io_mask(var, producer->info.stage);
}
nir_foreach_variable(var, &consumer->inputs) {
read[var->data.location_frac] |=
- get_variable_io_mask(var, consumer->stage);
+ get_variable_io_mask(var, consumer->info.stage);
}
/* Each TCS invocation can read data written by other TCS invocations,
* so even if the outputs are not used by the TES we must also make
* sure they are not read by the TCS before demoting them to globals.
*/
- if (producer->stage == MESA_SHADER_TESS_CTRL)
+ if (producer->info.stage == MESA_SHADER_TESS_CTRL)
tcs_add_output_reads(producer, read);
bool progress = false;
nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
bool alpha_to_one)
{
- assert(shader->stage == MESA_SHADER_FRAGMENT);
+ assert(shader->info.stage == MESA_SHADER_FRAGMENT);
nir_foreach_function(function, shader) {
nir_function_impl *impl = function->impl;
nir_intrinsic_instr *new_instr = nir_intrinsic_instr_create(mem_ctx, op);
nir_intrinsic_set_base(new_instr,
- shader_program->data->UniformStorage[uniform_loc].opaque[shader->stage].index);
+ shader_program->data->UniformStorage[uniform_loc].opaque[shader->info.stage].index);
nir_load_const_instr *offset_const =
nir_load_const_instr_create(mem_ctx, 1, 32);
nir_lower_bitmap(nir_shader *shader,
const nir_lower_bitmap_options *options)
{
- assert(shader->stage == MESA_SHADER_FRAGMENT);
+ assert(shader->info.stage == MESA_SHADER_FRAGMENT);
lower_bitmap_impl(nir_shader_get_entrypoint(shader), options);
}
static bool
is_color_output(lower_state *state, nir_variable *out)
{
- switch (state->shader->stage) {
+ switch (state->shader->info.stage) {
case MESA_SHADER_VERTEX:
case MESA_SHADER_GEOMETRY:
switch (out->data.location) {
* array length.
*/
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, nir->stage))
+ if (nir_is_per_vertex_io(var, nir->info.stage))
type = glsl_get_array_element(type);
assert(glsl_type_is_array(type));
cull->data.location = VARYING_SLOT_CLIP_DIST0;
} else {
/* Turn the ClipDistance array into a combined one */
- update_type(clip, nir->stage, clip_array_size + cull_array_size);
+ update_type(clip, nir->info.stage, clip_array_size + cull_array_size);
/* Rewrite CullDistance to reference the combined array */
nir_foreach_function(function, nir) {
{
bool progress = false;
- if (nir->stage <= MESA_SHADER_GEOMETRY)
+ if (nir->info.stage <= MESA_SHADER_GEOMETRY)
progress |= combine_clip_cull(nir, &nir->outputs, true);
- if (nir->stage > MESA_SHADER_VERTEX)
+ if (nir->info.stage > MESA_SHADER_VERTEX)
progress |= combine_clip_cull(nir, &nir->inputs, false);
return progress;
.shader = shader,
};
- assert(shader->stage == MESA_SHADER_FRAGMENT);
+ assert(shader->info.stage == MESA_SHADER_FRAGMENT);
nir_foreach_function(function, shader) {
if (function->impl)
nir_intrinsic_op op;
switch (mode) {
case nir_var_shader_in:
- if (nir->stage == MESA_SHADER_FRAGMENT &&
+ if (nir->info.stage == MESA_SHADER_FRAGMENT &&
nir->options->use_interpolated_input_intrinsics &&
var->data.interpolation != INTERP_MODE_FLAT) {
assert(vertex_index == NULL);
b->cursor = nir_before_instr(instr);
- const bool per_vertex = nir_is_per_vertex_io(var, b->shader->stage);
+ const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
nir_ssa_def *offset;
nir_ssa_def *vertex_index = NULL;
static void
emit_output_copies_impl(struct lower_io_state *state, nir_function_impl *impl)
{
- if (state->shader->stage == MESA_SHADER_GEOMETRY) {
+ if (state->shader->info.stage == MESA_SHADER_GEOMETRY) {
/* For geometry shaders, we have to emit the output copies right
* before each EmitVertex call.
*/
{
struct lower_io_state state;
- if (shader->stage == MESA_SHADER_TESS_CTRL)
+ if (shader->info.stage == MESA_SHADER_TESS_CTRL)
return;
state.shader = shader;
(var->data.mode != nir_var_shader_out))
continue;
- bool vs_in = (state->shader->stage == MESA_SHADER_VERTEX) &&
+ bool vs_in = (state->shader->info.stage == MESA_SHADER_VERTEX) &&
(var->data.mode == nir_var_shader_in);
if (glsl_count_attribute_slots(var->type, vs_in) == 1)
continue;
nir_foreach_function(function, shader) {
if (function->impl)
- progress |= lower_impl(function->impl, shader_program, shader->stage);
+ progress |= lower_impl(function->impl, shader_program,
+ shader->info.stage);
}
return progress;
nir_builder *b)
{
nir_variable *var = deref->var;
- gl_shader_stage stage = state->shader->stage;
+ gl_shader_stage stage = state->shader->info.stage;
unsigned location = var->data.location;
unsigned binding;
const struct glsl_type *orig_type = deref->deref.type;
if ((nir_tex_instr_src_index(tex, nir_tex_src_lod) == -1) &&
(tex->op == nir_texop_txf || tex->op == nir_texop_txs ||
tex->op == nir_texop_txl || tex->op == nir_texop_query_levels ||
- (tex->op == nir_texop_tex && b->shader->stage != MESA_SHADER_FRAGMENT))) {
+ (tex->op == nir_texop_tex &&
+ b->shader->info.stage != MESA_SHADER_FRAGMENT))) {
b->cursor = nir_before_instr(&tex->instr);
nir_tex_instr_add_src(tex, nir_tex_src_lod, nir_src_for_ssa(nir_imm_int(b, 0)));
progress = true;
.shader = shader,
};
- if (shader->stage != MESA_SHADER_FRAGMENT)
+ if (shader->info.stage != MESA_SHADER_FRAGMENT)
return;
if (setup_inputs(&state) != 0)
bool progress = false;
nir_builder b;
- assert(shader->stage == MESA_SHADER_FRAGMENT);
+ assert(shader->info.stage == MESA_SHADER_FRAGMENT);
nir_foreach_function(function, shader) {
if (function->impl) {
.shader = shader,
};
- assert(shader->stage == MESA_SHADER_FRAGMENT);
+ assert(shader->info.stage == MESA_SHADER_FRAGMENT);
nir_foreach_function(function, shader) {
if (function->impl)
const char *loc = NULL;
char buf[4];
- switch (state->shader->stage) {
+ switch (state->shader->info.stage) {
case MESA_SHADER_VERTEX:
if (var->data.mode == nir_var_shader_in)
loc = gl_vert_attrib_name(var->data.location);
state.annotations = annotations;
- fprintf(fp, "shader: %s\n", gl_shader_stage_name(shader->stage));
+ fprintf(fp, "shader: %s\n", gl_shader_stage_name(shader->info.stage));
if (shader->info.name)
fprintf(fp, "name: %s\n", shader->info.name);
if (shader->info.label)
fprintf(fp, "label: %s\n", shader->info.label);
- switch (shader->stage) {
+ switch (shader->info.stage) {
case MESA_SHADER_COMPUTE:
fprintf(fp, "local-size: %u, %u, %u%s\n",
shader->info.cs.local_size[0],
assert(glsl_type_is_array(var->type));
const struct glsl_type *type = glsl_get_array_element(var->type);
- if (nir_is_per_vertex_io(var, state->shader->stage)) {
+ if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
assert(glsl_type_is_array(type));
assert(glsl_type_is_scalar(glsl_get_array_element(type)));
} else {
break;
case SpvExecutionModeEarlyFragmentTests:
- assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
b->shader->info.fs.early_fragment_tests = true;
break;
case SpvExecutionModeInvocations:
- assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
break;
case SpvExecutionModeDepthReplacing:
- assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
break;
case SpvExecutionModeDepthGreater:
- assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
break;
case SpvExecutionModeDepthLess:
- assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
break;
case SpvExecutionModeDepthUnchanged:
- assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
break;
case SpvExecutionModeLocalSize:
- assert(b->shader->stage == MESA_SHADER_COMPUTE);
+ assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
b->shader->info.cs.local_size[0] = mode->literals[0];
b->shader->info.cs.local_size[1] = mode->literals[1];
b->shader->info.cs.local_size[2] = mode->literals[2];
break; /* Nothing to do with this */
case SpvExecutionModeOutputVertices:
- if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
- b->shader->stage == MESA_SHADER_TESS_EVAL) {
+ if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
b->shader->info.tess.tcs_vertices_out = mode->literals[0];
} else {
- assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
b->shader->info.gs.vertices_out = mode->literals[0];
}
break;
case SpvExecutionModeInputTrianglesAdjacency:
case SpvExecutionModeQuads:
case SpvExecutionModeIsolines:
- if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
- b->shader->stage == MESA_SHADER_TESS_EVAL) {
+ if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
b->shader->info.tess.primitive_mode =
gl_primitive_from_spv_execution_mode(mode->exec_mode);
} else {
- assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
b->shader->info.gs.vertices_in =
vertices_in_from_spv_execution_mode(mode->exec_mode);
}
case SpvExecutionModeOutputPoints:
case SpvExecutionModeOutputLineStrip:
case SpvExecutionModeOutputTriangleStrip:
- assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
b->shader->info.gs.output_primitive =
gl_primitive_from_spv_execution_mode(mode->exec_mode);
break;
case SpvExecutionModeSpacingEqual:
- assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
- b->shader->stage == MESA_SHADER_TESS_EVAL);
+ assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL);
b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
break;
case SpvExecutionModeSpacingFractionalEven:
- assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
- b->shader->stage == MESA_SHADER_TESS_EVAL);
+ assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL);
b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
break;
case SpvExecutionModeSpacingFractionalOdd:
- assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
- b->shader->stage == MESA_SHADER_TESS_EVAL);
+ assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL);
b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
break;
case SpvExecutionModeVertexOrderCw:
- assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
- b->shader->stage == MESA_SHADER_TESS_EVAL);
+ assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL);
b->shader->info.tess.ccw = false;
break;
case SpvExecutionModeVertexOrderCcw:
- assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
- b->shader->stage == MESA_SHADER_TESS_EVAL);
+ assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL);
b->shader->info.tess.ccw = true;
break;
case SpvExecutionModePointMode:
- assert(b->shader->stage == MESA_SHADER_TESS_CTRL ||
- b->shader->stage == MESA_SHADER_TESS_EVAL);
+ assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL);
b->shader->info.tess.point_mode = true;
break;
set_mode_system_value(mode);
break;
case SpvBuiltInPrimitiveId:
- if (b->shader->stage == MESA_SHADER_FRAGMENT) {
+ if (b->shader->info.stage == MESA_SHADER_FRAGMENT) {
assert(*mode == nir_var_shader_in);
*location = VARYING_SLOT_PRIMITIVE_ID;
} else if (*mode == nir_var_shader_out) {
break;
case SpvBuiltInLayer:
*location = VARYING_SLOT_LAYER;
- if (b->shader->stage == MESA_SHADER_FRAGMENT)
+ if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
*mode = nir_var_shader_in;
- else if (b->shader->stage == MESA_SHADER_GEOMETRY)
+ else if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
*mode = nir_var_shader_out;
else
unreachable("invalid stage for SpvBuiltInLayer");
break;
case SpvBuiltInViewportIndex:
*location = VARYING_SLOT_VIEWPORT;
- if (b->shader->stage == MESA_SHADER_GEOMETRY)
+ if (b->shader->info.stage == MESA_SHADER_GEOMETRY)
*mode = nir_var_shader_out;
- else if (b->shader->stage == MESA_SHADER_FRAGMENT)
+ else if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
*mode = nir_var_shader_in;
else
unreachable("invalid stage for SpvBuiltInViewportIndex");
if (dec->decoration == SpvDecorationLocation) {
unsigned location = dec->literals[0];
bool is_vertex_input;
- if (b->shader->stage == MESA_SHADER_FRAGMENT &&
+ if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
vtn_var->mode == vtn_variable_mode_output) {
is_vertex_input = false;
location += FRAG_RESULT_DATA0;
- } else if (b->shader->stage == MESA_SHADER_VERTEX &&
+ } else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
vtn_var->mode == vtn_variable_mode_input) {
is_vertex_input = true;
location += VERT_ATTRIB_GENERIC0;
int array_length = -1;
struct vtn_type *interface_type = var->type;
- if (is_per_vertex_inout(var, b->shader->stage)) {
+ if (is_per_vertex_inout(var, b->shader->info.stage)) {
/* In Geometry shaders (and some tessellation), inputs come
* in per-vertex arrays. However, some builtins come in
* non-per-vertex, hence the need for the is_array check. In
nir_src src = nir_src_for_reg(c->output_regs[loc].reg);
src.reg.base_offset = c->output_regs[loc].offset;
- if (c->build.shader->stage == MESA_SHADER_FRAGMENT &&
+ if (c->build.shader->info.stage == MESA_SHADER_FRAGMENT &&
var->data.location == FRAG_RESULT_DEPTH) {
/* TGSI uses TGSI_SEMANTIC_POSITION.z for the depth output, while
* NIR uses a single float FRAG_RESULT_DEPTH.
v.key = key;
v.shader = &s;
- switch (nir->stage) {
+ switch (nir->info.stage) {
case MESA_SHADER_FRAGMENT:
s.type = v.type = SHADER_FRAGMENT;
break;
s.type = v.type = SHADER_COMPUTE;
break;
default:
- errx(1, "unhandled shader stage: %d", nir->stage);
+ errx(1, "unhandled shader stage: %d", nir->info.stage);
}
info = "NIR compiler";
OPT_V(s, nir_lower_regs_to_ssa);
if (key) {
- if (s->stage == MESA_SHADER_VERTEX) {
+ if (s->info.stage == MESA_SHADER_VERTEX) {
OPT_V(s, nir_lower_clip_vs, key->ucp_enables);
if (key->vclamp_color)
OPT_V(s, nir_lower_clamp_color_outputs);
- } else if (s->stage == MESA_SHADER_FRAGMENT) {
+ } else if (s->info.stage == MESA_SHADER_FRAGMENT) {
OPT_V(s, nir_lower_clip_fs, key->ucp_enables);
if (key->fclamp_color)
OPT_V(s, nir_lower_clamp_color_outputs);
nir_function *func;
unsigned i;
- assert(nir->stage == MESA_SHADER_VERTEX ||
- nir->stage == MESA_SHADER_FRAGMENT);
+ assert(nir->info.stage == MESA_SHADER_VERTEX ||
+ nir->info.stage == MESA_SHADER_FRAGMENT);
- info->processor = pipe_shader_type_from_mesa(nir->stage);
+ info->processor = pipe_shader_type_from_mesa(nir->info.stage);
info->num_tokens = 2; /* indicate that the shader is non-empty */
info->num_instructions = 2;
nir_foreach_variable(variable, &nir->inputs) {
unsigned semantic_name, semantic_index;
unsigned attrib_count = glsl_count_attribute_slots(variable->type,
- nir->stage == MESA_SHADER_VERTEX);
+ nir->info.stage == MESA_SHADER_VERTEX);
assert(attrib_count == 1 && "not implemented");
* tracker has already mapped them to attributes via
* variable->data.driver_location.
*/
- if (nir->stage == MESA_SHADER_VERTEX)
+ if (nir->info.stage == MESA_SHADER_VERTEX)
continue;
/* Fragment shader position is a system value. */
- if (nir->stage == MESA_SHADER_FRAGMENT &&
+ if (nir->info.stage == MESA_SHADER_FRAGMENT &&
variable->data.location == VARYING_SLOT_POS) {
if (variable->data.pixel_center_integer)
info->properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER] =
nir_foreach_variable(variable, &nir->outputs) {
unsigned semantic_name, semantic_index;
- if (nir->stage == MESA_SHADER_FRAGMENT) {
+ if (nir->info.stage == MESA_SHADER_FRAGMENT) {
tgsi_get_gl_frag_result_semantic(variable->data.location,
&semantic_name, &semantic_index);
} else {
nir_foreach_variable(variable, &sel->nir->outputs) {
variable->data.driver_location *= 4;
- if (sel->nir->stage == MESA_SHADER_FRAGMENT) {
+ if (sel->nir->info.stage == MESA_SHADER_FRAGMENT) {
if (variable->data.location == FRAG_RESULT_DEPTH)
variable->data.driver_location += 2;
else if (variable->data.location == FRAG_RESULT_STENCIL)
unsigned fs_attr_idx = 0;
nir_foreach_variable(variable, &nir->inputs) {
unsigned attrib_count = glsl_count_attribute_slots(variable->type,
- nir->stage == MESA_SHADER_VERTEX);
+ nir->info.stage == MESA_SHADER_VERTEX);
unsigned input_idx = variable->data.driver_location;
for (unsigned i = 0; i < attrib_count; ++i) {
LLVMValueRef data[4];
- if (nir->stage == MESA_SHADER_VERTEX)
+ if (nir->info.stage == MESA_SHADER_VERTEX)
declare_nir_input_vs(ctx, variable, i, data);
- else if (nir->stage == MESA_SHADER_FRAGMENT)
+ else if (nir->info.stage == MESA_SHADER_FRAGMENT)
declare_nir_input_fs(ctx, variable, i, &fs_attr_idx, data);
for (unsigned chan = 0; chan < 4; chan++) {
if (vc4_debug & VC4_DEBUG_NIR) {
fprintf(stderr, "%s prog %d NIR:\n",
- gl_shader_stage_name(s->stage),
+ gl_shader_stage_name(s->info.stage),
so->program_id);
nir_print_shader(s, stderr);
fprintf(stderr, "\n");
#define BLORP_CREATE_NIR_INPUT(shader, name, type) ({ \
nir_variable *input = nir_variable_create((shader), nir_var_shader_in, \
type, #name); \
- if ((shader)->stage == MESA_SHADER_FRAGMENT) \
+ if ((shader)->info.stage == MESA_SHADER_FRAGMENT) \
input->data.interpolation = INTERP_MODE_FLAT; \
input->data.location = VARYING_SLOT_VAR0 + \
offsetof(struct brw_blorp_wm_inputs, name) / (4 * sizeof(float)); \
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
- gl_shader_stage stage = b->shader->stage;
+ gl_shader_stage stage = b->shader->info.stage;
if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
(stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
bool is_scalar)
{
nir_variable_mode indirect_mask = 0;
- if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectInput)
+ if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectInput)
indirect_mask |= nir_var_shader_in;
- if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectOutput)
+ if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectOutput)
indirect_mask |= nir_var_shader_out;
- if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectTemp)
+ if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectTemp)
indirect_mask |= nir_var_local;
bool progress;
const struct gen_device_info *devinfo = compiler->devinfo;
UNUSED bool progress; /* Written by OPT */
- const bool is_scalar = compiler->scalar_stage[nir->stage];
+ const bool is_scalar = compiler->scalar_stage[nir->info.stage];
- if (nir->stage == MESA_SHADER_GEOMETRY)
+ if (nir->info.stage == MESA_SHADER_GEOMETRY)
OPT(nir_lower_gs_intrinsics);
/* See also brw_nir_trig_workarounds.py */
OPT(nir_lower_clip_cull_distance_arrays);
nir_variable_mode indirect_mask = 0;
- if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectInput)
+ if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectInput)
indirect_mask |= nir_var_shader_in;
- if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectOutput)
+ if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectOutput)
indirect_mask |= nir_var_shader_out;
- if (compiler->glsl_compiler_options[nir->stage].EmitNoIndirectTemp)
+ if (compiler->glsl_compiler_options[nir->info.stage].EmitNoIndirectTemp)
indirect_mask |= nir_var_local;
nir_lower_indirect_derefs(nir, indirect_mask);
{
const struct gen_device_info *devinfo = compiler->devinfo;
bool debug_enabled =
- (INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->stage));
+ (INTEL_DEBUG & intel_debug_flag_for_shader_stage(nir->info.stage));
UNUSED bool progress; /* Written by OPT */
}
fprintf(stderr, "NIR (SSA form) for %s shader:\n",
- _mesa_shader_stage_to_string(nir->stage));
+ _mesa_shader_stage_to_string(nir->info.stage));
nir_print_shader(nir, stderr);
}
if (unlikely(debug_enabled)) {
fprintf(stderr, "NIR (final form) for %s shader:\n",
- _mesa_shader_stage_to_string(nir->stage));
+ _mesa_shader_stage_to_string(nir->info.stage));
nir_print_shader(nir, stderr);
}
const struct gen_device_info *devinfo = compiler->devinfo;
if ((devinfo->gen <= 7 && !devinfo->is_haswell) ||
- !compiler->scalar_stage[nir->stage]) {
+ !compiler->scalar_stage[nir->info.stage]) {
memset(out_ranges, 0, 4 * sizeof(struct brw_ubo_range));
return;
}
brw_nir_lower_cs_intrinsics(nir_shader *nir,
struct brw_cs_prog_data *prog_data)
{
- assert(nir->stage == MESA_SHADER_COMPUTE);
+ assert(nir->info.stage == MESA_SHADER_COMPUTE);
bool progress = false;
struct lower_intrinsics_state state;
void
brw_nir_apply_tcs_quads_workaround(nir_shader *nir)
{
- assert(nir->stage == MESA_SHADER_TESS_CTRL);
+ assert(nir->info.stage == MESA_SHADER_TESS_CTRL);
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
stage_prog_data(stage_prog_data),
mem_ctx(mem_ctx),
cfg(NULL),
- stage(shader->stage)
+ stage(shader->info.stage)
{
debug_enabled = INTEL_DEBUG & intel_debug_flag_for_shader_stage(stage);
stage_name = _mesa_shader_stage_to_string(stage);
const struct cfg_t *cfg)
{
const struct gen_device_info *devinfo = p->devinfo;
- const char *stage_abbrev = _mesa_shader_stage_to_abbrev(nir->stage);
+ const char *stage_abbrev = _mesa_shader_stage_to_abbrev(nir->info.stage);
bool debug_flag = INTEL_DEBUG &
- intel_debug_flag_for_shader_stage(nir->stage);
+ intel_debug_flag_for_shader_stage(nir->info.stage);
struct annotation_info annotation;
memset(&annotation, 0, sizeof(annotation));
int spill_count = 0, fill_count = 0;
case SHADER_OPCODE_TG4:
case SHADER_OPCODE_TG4_OFFSET:
case SHADER_OPCODE_SAMPLEINFO:
- generate_tex(p, prog_data, nir->stage,
+ generate_tex(p, prog_data, nir->info.stage,
inst, dst, src[0], src[1], src[2]);
break;
case SHADER_OPCODE_FIND_LIVE_CHANNEL: {
const struct brw_reg mask =
- brw_stage_has_packed_dispatch(devinfo, nir->stage,
+ brw_stage_has_packed_dispatch(devinfo, nir->info.stage,
&prog_data->base) ? brw_imm_ud(~0u) :
brw_dmask_reg();
brw_find_live_channel(p, dst, mask);
if (unlikely(debug_flag)) {
fprintf(stderr, "Native code for %s %s shader %s:\n",
nir->info.label ? nir->info.label : "unnamed",
- _mesa_shader_stage_to_string(nir->stage), nir->info.name);
+ _mesa_shader_stage_to_string(nir->info.stage), nir->info.name);
fprintf(stderr, "%s vec4 shader: %d instructions. %d loops. %u cycles. %d:%d "
"spills:fills. Compacted %d to %d bytes (%.0f%%)\n",
struct anv_pipeline_bind_map *map)
{
struct anv_pipeline_layout *layout = pipeline->layout;
+ gl_shader_stage stage = shader->info.stage;
struct apply_pipeline_layout_state state = {
.shader = shader,
BITSET_WORD b, _tmp;
BITSET_FOREACH_SET(b, _tmp, state.set[set].used,
set_layout->binding_count) {
- if (set_layout->binding[b].stage[shader->stage].surface_index >= 0) {
+ if (set_layout->binding[b].stage[stage].surface_index >= 0) {
map->surface_count +=
anv_descriptor_set_binding_layout_get_hw_size(&set_layout->binding[b]);
}
- if (set_layout->binding[b].stage[shader->stage].sampler_index >= 0) {
+ if (set_layout->binding[b].stage[stage].sampler_index >= 0) {
map->sampler_count +=
anv_descriptor_set_binding_layout_get_hw_size(&set_layout->binding[b]);
}
- if (set_layout->binding[b].stage[shader->stage].image_index >= 0)
+ if (set_layout->binding[b].stage[stage].image_index >= 0)
map->image_count += set_layout->binding[b].array_size;
}
}
struct anv_descriptor_set_binding_layout *binding =
&set_layout->binding[b];
- if (binding->stage[shader->stage].surface_index >= 0) {
+ if (binding->stage[stage].surface_index >= 0) {
state.set[set].surface_offsets[b] = surface;
struct anv_sampler **samplers = binding->immutable_samplers;
for (unsigned i = 0; i < binding->array_size; i++) {
}
}
- if (binding->stage[shader->stage].sampler_index >= 0) {
+ if (binding->stage[stage].sampler_index >= 0) {
state.set[set].sampler_offsets[b] = sampler;
struct anv_sampler **samplers = binding->immutable_samplers;
for (unsigned i = 0; i < binding->array_size; i++) {
}
}
- if (binding->stage[shader->stage].image_index >= 0) {
+ if (binding->stage[stage].image_index >= 0) {
state.set[set].image_offsets[b] = image;
image += binding->array_size;
}
void
anv_nir_lower_input_attachments(nir_shader *shader)
{
- assert(shader->stage == MESA_SHADER_FRAGMENT);
+ assert(shader->info.stage == MESA_SHADER_FRAGMENT);
nir_foreach_function(function, shader) {
if (!function->impl)
static nir_ssa_def *
build_instance_id(struct lower_multiview_state *state)
{
- assert(state->builder.shader->stage == MESA_SHADER_VERTEX);
+ assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX);
if (state->instance_id == NULL) {
nir_builder *b = &state->builder;
assert(state->view_mask != 0);
if (0 && _mesa_bitcount(state->view_mask) == 1) {
state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1);
- } else if (state->builder.shader->stage == MESA_SHADER_VERTEX) {
+ } else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) {
/* We only support 16 viewports */
assert((state->view_mask & 0xffff0000) == 0);
}
} else {
const struct glsl_type *type = glsl_int_type();
- if (b->shader->stage == MESA_SHADER_TESS_CTRL ||
- b->shader->stage == MESA_SHADER_GEOMETRY)
+ if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
+ b->shader->info.stage == MESA_SHADER_GEOMETRY)
type = glsl_array_type(type, 1);
nir_variable *idx_var =
nir_variable_create(b->shader, nir_var_shader_in,
type, "view index");
idx_var->data.location = VARYING_SLOT_VIEW_INDEX;
- if (b->shader->stage == MESA_SHADER_FRAGMENT)
+ if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
idx_var->data.interpolation = INTERP_MODE_FLAT;
if (glsl_type_is_array(type)) {
bool
anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
{
- assert(shader->stage != MESA_SHADER_COMPUTE);
+ assert(shader->info.stage != MESA_SHADER_COMPUTE);
/* If multiview isn't enabled, we have nothing to do. */
if (view_mask == 0)
* available in the VS. If it's not a fragment shader, we need to pass
* the view index on to the next stage.
*/
- if (shader->stage != MESA_SHADER_FRAGMENT) {
+ if (shader->info.stage != MESA_SHADER_FRAGMENT) {
nir_ssa_def *view_index = build_view_index(&state);
nir_builder *b = &state.builder;
spec_entries, num_spec_entries,
stage, entrypoint_name, &supported_ext, nir_options);
nir_shader *nir = entry_point->shader;
- assert(nir->stage == stage);
+ assert(nir->info.stage == stage);
nir_validate_shader(nir);
ralloc_steal(mem_ctx, nir);
NIR_PASS_V(producer, nir_lower_indirect_derefs, indirect_mask);
NIR_PASS_V(consumer, nir_lower_indirect_derefs, indirect_mask);
- const bool p_is_scalar = compiler->scalar_stage[producer->stage];
+ const bool p_is_scalar =
+ compiler->scalar_stage[producer->info.stage];
producer = brw_nir_optimize(producer, compiler, p_is_scalar);
- const bool c_is_scalar = compiler->scalar_stage[producer->stage];
+ const bool c_is_scalar =
+ compiler->scalar_stage[producer->info.stage];
consumer = brw_nir_optimize(consumer, compiler, c_is_scalar);
}
brw_nir_setup_glsl_builtin_uniform(var, prog, stage_prog_data,
is_scalar);
} else {
- brw_nir_setup_glsl_uniform(shader->stage, var, prog, stage_prog_data,
- is_scalar);
+ brw_nir_setup_glsl_uniform(shader->info.stage, var, prog,
+ stage_prog_data, is_scalar);
}
}
}
NIR_PASS_V(nir, nir_lower_var_copies);
NIR_PASS_V(nir, nir_lower_io_types);
- if (nir->stage == MESA_SHADER_VERTEX) {
+ if (nir->info.stage == MESA_SHADER_VERTEX) {
/* Needs special handling so drvloc matches the vbo state: */
st_nir_assign_vs_in_locations(prog, nir);
/* Re-lower global vars, to deal with any dead VS inputs. */
&nir->num_outputs,
type_size);
st_nir_fixup_varying_slots(st, &nir->outputs);
- } else if (nir->stage == MESA_SHADER_FRAGMENT) {
+ } else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
sort_varyings(&nir->inputs);
nir_assign_var_locations(&nir->inputs,
&nir->num_inputs,
nir_assign_var_locations(&nir->outputs,
&nir->num_outputs,
type_size);
- } else if (nir->stage == MESA_SHADER_COMPUTE) {
+ } else if (nir->info.stage == MESA_SHADER_COMPUTE) {
/* TODO? */
} else {
unreachable("invalid shader type for tgsi bypass\n");
}
struct gl_shader_program *shader_program;
- switch (nir->stage) {
+ switch (nir->info.stage) {
case MESA_SHADER_VERTEX:
shader_program = ((struct st_vertex_program *)prog)->shader_program;
break;
}
NIR_PASS_V(nir, nir_lower_atomics_to_ssbo,
- st->ctx->Const.Program[nir->stage].MaxAtomicBuffers);
+ st->ctx->Const.Program[nir->info.stage].MaxAtomicBuffers);
st_nir_assign_uniform_locations(prog, shader_program,
&nir->uniforms, &nir->num_uniforms);