*/
#include <inttypes.h>
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/crc32.h"
#include "util/u_math.h"
#include "util/u_memory.h"
struct qreg result)
{
struct qinst *last_inst = NULL;
- if (!list_empty(&c->cur_block->instructions))
+ if (!list_is_empty(&c->cur_block->instructions))
last_inst = (struct qinst *)c->cur_block->instructions.prev;
assert(result.file == QFILE_UNIF ||
if (!src->is_ssa)
return false;
- if (!list_empty(&src->ssa->if_uses))
+ if (!list_is_empty(&src->ssa->if_uses))
return false;
return (src->ssa->uses.next == &src->use_link &&
struct qreg result;
switch (instr->op) {
- case nir_op_fmov:
- case nir_op_imov:
+ case nir_op_mov:
result = qir_MOV(c, src[0]);
break;
case nir_op_fmul:
else
point_size = qir_uniform_f(c, 1.0);
- /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
- * BCM21553).
- */
- point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
-
qir_VPM_WRITE(c, point_size);
}
progress = false;
NIR_PASS_V(s, nir_lower_vars_to_ssa);
- NIR_PASS(progress, s, nir_lower_alu_to_scalar);
+ NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_lower_phis_to_scalar);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
.lower_fdiv = true,
.lower_ffma = true,
.lower_flrp32 = true,
+ .lower_fmod = true,
.lower_fpow = true,
.lower_fsat = true,
.lower_fsqrt = true,
.lower_ldexp = true,
.lower_negate = true,
+ .lower_rotate = true,
+ .lower_to_scalar = true,
.max_unroll_iterations = 32,
};
NIR_PASS_V(c->s, nir_lower_alpha_test,
c->fs_key->alpha_test_func,
c->fs_key->sample_alpha_to_one &&
- c->fs_key->msaa);
+ c->fs_key->msaa,
+ NULL);
}
NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
}
if (c->key->ucp_enables) {
if (stage == QSTAGE_FRAG) {
- NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
+ NIR_PASS_V(c->s, nir_lower_clip_fs,
+ c->key->ucp_enables, false);
} else {
NIR_PASS_V(c->s, nir_lower_clip_vs,
- c->key->ucp_enables, false);
+ c->key->ucp_enables, false, false, NULL);
NIR_PASS_V(c->s, nir_lower_io_to_scalar,
nir_var_shader_out);
}
NIR_PASS_V(c->s, vc4_nir_lower_io, c);
NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
- NIR_PASS_V(c->s, nir_lower_idiv);
+ NIR_PASS_V(c->s, nir_lower_idiv, nir_lower_idiv_fast);
vc4_optimize_nir(c->s);
+ /* Do late algebraic optimization to turn add(a, neg(b)) back into
+ * subs, then the mandatory cleanup after algebraic. Note that it may
+ * produce fnegs, and if so then we need to keep running to squash
+ * fneg(fneg(a)).
+ */
+ bool more_late_algebraic = true;
+ while (more_late_algebraic) {
+ more_late_algebraic = false;
+ NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
+ NIR_PASS_V(c->s, nir_opt_constant_folding);
+ NIR_PASS_V(c->s, nir_copy_prop);
+ NIR_PASS_V(c->s, nir_opt_dce);
+ NIR_PASS_V(c->s, nir_opt_cse);
+ }
+
NIR_PASS_V(c->s, nir_lower_bool_to_int32);
NIR_PASS_V(c->s, nir_convert_from_ssa, true);
s = tgsi_to_nir(cso->tokens, pctx->screen);
}
+ if (s->info.stage == MESA_SHADER_VERTEX)
+ NIR_PASS_V(s, nir_lower_point_size, 1.0f, 0.0f);
+
NIR_PASS_V(s, nir_lower_io, nir_var_all, type_size,
(nir_lower_io_options)0);