bool allow_copies)
{
bool progress;
+ unsigned lower_flrp =
+ (shader->options->lower_flrp16 ? 16 : 0) |
+ (shader->options->lower_flrp32 ? 32 : 0) |
+ (shader->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
NIR_PASS(progress, shader, nir_opt_algebraic);
NIR_PASS(progress, shader, nir_opt_constant_folding);
+
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress;
+ NIR_PASS(lower_flrp_progress,
+ shader,
+ nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ shader->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, shader,
+ nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, shader, nir_opt_undef);
NIR_PASS(progress, shader, nir_opt_conditional_discard);
if (shader->options->max_unroll_iterations) {
v3d_optimize_nir(struct nir_shader *s)
{
bool progress;
+ unsigned lower_flrp =
+ (s->options->lower_flrp16 ? 16 : 0) |
+ (s->options->lower_flrp32 ? 32 : 0) |
+ (s->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
NIR_PASS(progress, s, nir_opt_algebraic);
NIR_PASS(progress, s, nir_opt_constant_folding);
+
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress;
+
+ NIR_PASS(lower_flrp_progress, s, nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ s->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, s, nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, s, nir_opt_undef);
} while (progress);
(('~flrp', a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp32'),
(('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
- (('flrp@16', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp16'),
- (('flrp@32', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp32'),
- (('flrp@64', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp64'),
(('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
(('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
(('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
ir3_optimize_loop(nir_shader *s)
{
bool progress;
+ unsigned lower_flrp =
+ (s->options->lower_flrp16 ? 16 : 0) |
+ (s->options->lower_flrp32 ? 32 : 0) |
+ (s->options->lower_flrp64 ? 64 : 0);
+
do {
progress = false;
progress |= OPT(s, nir_opt_intrinsics);
progress |= OPT(s, nir_opt_algebraic);
progress |= OPT(s, nir_opt_constant_folding);
+
+ if (lower_flrp != 0) {
+ if (OPT(s, nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ s->options->lower_ffma)) {
+ OPT(s, nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
progress |= OPT(s, nir_opt_dead_cf);
if (OPT(s, nir_opt_trivial_continues)) {
progress |= true;
optimise_nir(nir_shader *nir)
{
bool progress;
+ unsigned lower_flrp =
+ (nir->options->lower_flrp16 ? 16 : 0) |
+ (nir->options->lower_flrp32 ? 32 : 0) |
+ (nir->options->lower_flrp64 ? 64 : 0);
NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
+
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress;
+ NIR_PASS(lower_flrp_progress,
+ nir,
+ nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ nir->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, nir,
+ nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_loop_unroll,
nir_var_shader_in |
si_nir_opts(struct nir_shader *nir)
{
bool progress;
+ unsigned lower_flrp =
+ (sel->nir->options->lower_flrp16 ? 16 : 0) |
+ (sel->nir->options->lower_flrp32 ? 32 : 0) |
+ (sel->nir->options->lower_flrp64 ? 64 : 0);
+
do {
progress = false;
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress;
+
+ NIR_PASS(lower_flrp_progress, sel->nir, nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ sel->nir->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, sel->nir,
+ nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_conditional_discard);
if (nir->options->max_unroll_iterations) {
vc4_optimize_nir(struct nir_shader *s)
{
bool progress;
+ unsigned lower_flrp =
+ (s->options->lower_flrp16 ? 16 : 0) |
+ (s->options->lower_flrp32 ? 32 : 0) |
+ (s->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
NIR_PASS(progress, s, nir_opt_algebraic);
NIR_PASS(progress, s, nir_opt_constant_folding);
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress;
+
+ NIR_PASS(lower_flrp_progress, s, nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ s->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, s, nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, s, nir_opt_undef);
NIR_PASS(progress, s, nir_opt_loop_unroll,
nir_var_shader_in |
brw_nir_no_indirect_mask(compiler, nir->info.stage);
bool progress;
+ unsigned lower_flrp =
+ (nir->options->lower_flrp16 ? 16 : 0) |
+ (nir->options->lower_flrp32 ? 32 : 0) |
+ (nir->options->lower_flrp64 ? 64 : 0);
+
do {
progress = false;
OPT(nir_split_array_vars, nir_var_function_temp);
OPT(nir_opt_idiv_const, 32);
OPT(nir_opt_algebraic);
OPT(nir_opt_constant_folding);
+
+ if (lower_flrp != 0) {
+ /* To match the old behavior, set always_precise only for scalar
+ * shader stages.
+ */
+ if (OPT(nir_lower_flrp,
+ lower_flrp,
+ is_scalar /* always_precise */,
+ compiler->devinfo->gen >= 6)) {
+ OPT(nir_opt_constant_folding);
+ }
+
+ /* Nothing should rematerialize any flrps, so we only need to do this
+ * lowering once.
+ */
+ lower_flrp = 0;
+ }
+
OPT(nir_opt_dead_cf);
if (OPT(nir_opt_trivial_continues)) {
/* If nir_opt_trivial_continues makes progress, then we need to clean
st_nir_opts(nir_shader *nir, bool scalar)
{
bool progress;
+ unsigned lower_flrp =
+ (nir->options->lower_flrp16 ? 16 : 0) |
+ (nir->options->lower_flrp32 ? 32 : 0) |
+ (nir->options->lower_flrp64 ? 64 : 0);
+
do {
progress = false;
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress;
+
+ NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ nir->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, nir,
+ nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only need to do this
+ * lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_conditional_discard);
if (nir->options->max_unroll_iterations) {