.lower_rotate = true,
.lower_to_scalar = true,
.has_imul24 = true,
+ .lower_wpos_pntc = true,
+
+ /* Only needed for the spirv_to_nir() pass done in ir3_cmdline.c
+ * but that should be harmless for GL since 64b is not
+ * supported there.
+ */
+ .lower_int64_options = (nir_lower_int64_options)~0,
};
/* we don't want to lower vertex_id to _zero_based on newer gpus: */
.lower_to_scalar = true,
.has_imul24 = true,
.max_unroll_iterations = 32,
+ .lower_wpos_pntc = true,
+
+ /* Only needed for the spirv_to_nir() pass done in ir3_cmdline.c
+ * but that should be harmless for GL since 64b is not
+ * supported there.
+ */
+ .lower_int64_options = (nir_lower_int64_options)~0,
};
const nir_shader_compiler_options *
OPT(s, nir_opt_dce);
}
progress |= OPT(s, nir_opt_if, false);
+ progress |= OPT(s, nir_opt_loop_unroll, nir_var_all);
progress |= OPT(s, nir_opt_remove_phis);
progress |= OPT(s, nir_opt_undef);
} while (progress);
}
void
-ir3_optimize_nir(struct ir3_shader *shader, nir_shader *s)
+ir3_finalize_nir(struct ir3_compiler *compiler, nir_shader *s)
{
struct nir_lower_tex_options tex_options = {
.lower_rect = 0,
.lower_tg4_offsets = true,
};
- if (shader->compiler->gpu_id >= 400) {
+ if (compiler->gpu_id >= 400) {
/* a4xx seems to have *no* sam.p */
tex_options.lower_txp = ~0; /* lower all txp */
} else {
debug_printf("----------------------\n");
}
- OPT_V(s, nir_lower_regs_to_ssa);
- OPT_V(s, nir_lower_wrmasks, should_split_wrmask, s);
+ if (s->info.stage == MESA_SHADER_GEOMETRY)
+ NIR_PASS_V(s, ir3_nir_lower_gs);
- OPT_V(s, ir3_nir_apply_trig_workarounds);
+ NIR_PASS_V(s, nir_lower_io_arrays_to_elements_no_indirects, false);
+
+ NIR_PASS_V(s, nir_lower_amul, ir3_glsl_type_size);
- if (shader->type == MESA_SHADER_FRAGMENT)
- OPT_V(s, nir_lower_fb_read);
+ OPT_V(s, nir_lower_regs_to_ssa);
+ OPT_V(s, nir_lower_wrmasks, should_split_wrmask, s);
OPT_V(s, nir_lower_tex, &tex_options);
OPT_V(s, nir_lower_load_const_to_scalar);
- if (shader->compiler->gpu_id < 500)
+ if (compiler->gpu_id < 500)
OPT_V(s, ir3_nir_lower_tg4_to_tex);
ir3_optimize_loop(s);
nir_sweep(s);
}
+/**
+ * Late passes that need to be done after pscreen->finalize_nir()
+ */
+void
+ir3_nir_post_finalize(struct ir3_compiler *compiler, nir_shader *s)
+{
+ NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
+ ir3_glsl_type_size, (nir_lower_io_options)0);
+
+ if (s->info.stage == MESA_SHADER_FRAGMENT) {
+ /* NOTE: lower load_barycentric_at_sample first, since it
+ * produces load_barycentric_at_offset:
+ */
+ NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_sample);
+ NIR_PASS_V(s, ir3_nir_lower_load_barycentric_at_offset);
+ NIR_PASS_V(s, ir3_nir_move_varying_inputs);
+ NIR_PASS_V(s, nir_lower_fb_read);
+ }
+
+ if (compiler->gpu_id >= 600 &&
+ s->info.stage == MESA_SHADER_FRAGMENT &&
+ !(ir3_shader_debug & IR3_DBG_NOFP16)) {
+ NIR_PASS_V(s, nir_lower_mediump_outputs);
+ }
+
+ /* we cannot ensure that ir3_finalize_nir() is only called once, so
+ * we also need to do trig workarounds here:
+ */
+ OPT_V(s, ir3_nir_apply_trig_workarounds);
+
+ ir3_optimize_loop(s);
+}
+
+static bool
+ir3_nir_lower_layer_id(nir_shader *nir)
+{
+ unsigned layer_id_loc = ~0;
+ nir_foreach_shader_in_variable(var, nir) {
+ if (var->data.location == VARYING_SLOT_LAYER) {
+ layer_id_loc = var->data.driver_location;
+ break;
+ }
+ }
+
+ assert(layer_id_loc != ~0);
+
+ bool progress = false;
+ nir_builder b;
+
+ nir_foreach_function(func, nir) {
+ nir_builder_init(&b, func->impl);
+
+ nir_foreach_block(block, func->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intrin =
+ nir_instr_as_intrinsic(instr);
+
+ if (intrin->intrinsic != nir_intrinsic_load_input)
+ continue;
+
+ unsigned base = nir_intrinsic_base(intrin);
+ if (base != layer_id_loc)
+ continue;
+
+ b.cursor = nir_before_instr(&intrin->instr);
+ nir_ssa_def *zero = nir_imm_int(&b, 0);
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+ nir_src_for_ssa(zero));
+ nir_instr_remove(&intrin->instr);
+ progress = true;
+ }
+ }
+
+ if (progress) {
+ nir_metadata_preserve(func->impl,
+ nir_metadata_block_index |
+ nir_metadata_dominance);
+ } else {
+ nir_metadata_preserve(func->impl, nir_metadata_all);
+ }
+ }
+
+ return progress;
+}
+
void
ir3_nir_lower_variant(struct ir3_shader_variant *so, nir_shader *s)
{
break;
case MESA_SHADER_TESS_CTRL:
NIR_PASS_V(s, ir3_nir_lower_tess_ctrl, so, so->key.tessellation);
- NIR_PASS_V(s, ir3_nir_lower_to_explicit_input);
+ NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so->shader->compiler);
progress = true;
break;
case MESA_SHADER_TESS_EVAL:
progress = true;
break;
case MESA_SHADER_GEOMETRY:
- NIR_PASS_V(s, ir3_nir_lower_to_explicit_input);
+ NIR_PASS_V(s, ir3_nir_lower_to_explicit_input, so->shader->compiler);
progress = true;
break;
default:
progress |= OPT(s, nir_lower_clip_fs, so->key.ucp_enables, false);
if (so->key.fclamp_color)
progress |= OPT(s, nir_lower_clamp_color_outputs);
+ if (so->key.layer_zero && (s->info.inputs_read & VARYING_BIT_LAYER))
+ progress |= OPT(s, ir3_nir_lower_layer_id);
}
if (so->key.color_two_side) {
- OPT_V(s, nir_lower_two_sided_color);
+ OPT_V(s, nir_lower_two_sided_color, true);
progress = true;
}
progress |= OPT(s, nir_lower_tex, &tex_options);
}
- progress |= OPT(s, ir3_nir_analyze_ubo_ranges, so);
+ if (!so->binning_pass)
+ OPT_V(s, ir3_nir_analyze_ubo_ranges, so);
+
+ progress |= OPT(s, ir3_nir_lower_ubo_loads, so);
/* UBO offset lowering has to come after we've decided what will
* be left as load_ubo
switch (intr->intrinsic) {
case nir_intrinsic_get_buffer_size:
+ if (ir3_bindless_resource(intr->src[0]))
+ break;
idx = nir_src_as_uint(intr->src[0]);
if (layout->ssbo_size.mask & (1 << idx))
break;
MAX2(layout->num_driver_params, IR3_DP_INSTID_BASE + 1);
break;
case nir_intrinsic_load_user_clip_plane:
+ idx = nir_intrinsic_ucp_id(intr);
layout->num_driver_params =
- MAX2(layout->num_driver_params, IR3_DP_UCP7_W + 1);
+ MAX2(layout->num_driver_params, IR3_DP_UCP0_X + (idx + 1) * 4);
break;
case nir_intrinsic_load_num_work_groups:
layout->num_driver_params =
constoff += align(cnt, 4) / 4;
}
- if (const_state->num_driver_params > 0)
+ if (const_state->num_driver_params > 0) {
+ /* offset cannot be 0 for vs params loaded by CP_DRAW_INDIRECT_MULTI */
+ if (v->type == MESA_SHADER_VERTEX && compiler->gpu_id >= 600)
+ constoff = MAX2(constoff, 1);
const_state->offsets.driver_param = constoff;
+ }
constoff += const_state->num_driver_params / 4;
if ((v->type == MESA_SHADER_VERTEX) &&
const_state->offsets.immediate = constoff;
- assert(constoff <= compiler->max_const);
+ assert(constoff <= ir3_max_const(v));
}