struct qreg result;
switch (instr->op) {
- case nir_op_fmov:
- case nir_op_imov:
+ case nir_op_mov:
result = qir_MOV(c, src[0]);
break;
case nir_op_fmul:
else
point_size = qir_uniform_f(c, 1.0);
- /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
- * BCM21553).
- */
- point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
-
qir_VPM_WRITE(c, point_size);
}
vc4_optimize_nir(struct nir_shader *s)
{
bool progress;
+ unsigned lower_flrp =
+ (s->options->lower_flrp16 ? 16 : 0) |
+ (s->options->lower_flrp32 ? 32 : 0) |
+ (s->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
NIR_PASS_V(s, nir_lower_vars_to_ssa);
- NIR_PASS(progress, s, nir_lower_alu_to_scalar);
+ NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_lower_phis_to_scalar);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
NIR_PASS(progress, s, nir_opt_algebraic);
NIR_PASS(progress, s, nir_opt_constant_folding);
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress = false;
+
+ NIR_PASS(lower_flrp_progress, s, nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ s->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, s, nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, s, nir_opt_undef);
NIR_PASS(progress, s, nir_opt_loop_unroll,
nir_var_shader_in |
0));
break;
- case nir_intrinsic_load_alpha_ref_float:
- ntq_store_dest(c, &instr->dest, 0,
- qir_uniform(c, QUNIFORM_ALPHA_REF, 0));
- break;
-
case nir_intrinsic_load_sample_mask_in:
ntq_store_dest(c, &instr->dest, 0,
qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
.lower_fdiv = true,
.lower_ffma = true,
.lower_flrp32 = true,
+ .lower_fmod = true,
.lower_fpow = true,
.lower_fsat = true,
.lower_fsqrt = true,
.lower_ldexp = true,
.lower_negate = true,
- .native_integers = true,
+ .lower_rotate = true,
+ .lower_to_scalar = true,
.max_unroll_iterations = 32,
};
c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
- if (stage == QSTAGE_FRAG) {
- if (c->fs_key->alpha_test_func != COMPARE_FUNC_ALWAYS) {
- NIR_PASS_V(c->s, nir_lower_alpha_test,
- c->fs_key->alpha_test_func,
- c->fs_key->sample_alpha_to_one &&
- c->fs_key->msaa);
- }
+ if (stage == QSTAGE_FRAG)
NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
- }
struct nir_lower_tex_options tex_options = {
/* We would need to implement txs, but we don't want the
if (c->key->ucp_enables) {
if (stage == QSTAGE_FRAG) {
- NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
+ NIR_PASS_V(c->s, nir_lower_clip_fs,
+ c->key->ucp_enables, false);
} else {
NIR_PASS_V(c->s, nir_lower_clip_vs,
- c->key->ucp_enables, false);
+ c->key->ucp_enables, false, false, NULL);
NIR_PASS_V(c->s, nir_lower_io_to_scalar,
nir_var_shader_out);
}
vc4_optimize_nir(c->s);
+ /* Do late algebraic optimization to turn add(a, neg(b)) back into
+ * subs, then the mandatory cleanup after algebraic. Note that it may
+ * produce fnegs, and if so then we need to keep running to squash
+ * fneg(fneg(a)).
+ */
+ bool more_late_algebraic = true;
+ while (more_late_algebraic) {
+ more_late_algebraic = false;
+ NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
+ NIR_PASS_V(c->s, nir_opt_constant_folding);
+ NIR_PASS_V(c->s, nir_copy_prop);
+ NIR_PASS_V(c->s, nir_opt_dce);
+ NIR_PASS_V(c->s, nir_opt_cse);
+ }
+
NIR_PASS_V(c->s, nir_lower_bool_to_int32);
NIR_PASS_V(c->s, nir_convert_from_ssa, true);
s = tgsi_to_nir(cso->tokens, pctx->screen);
}
+ if (s->info.stage == MESA_SHADER_VERTEX)
+ NIR_PASS_V(s, nir_lower_point_size, 1.0f, 0.0f);
+
NIR_PASS_V(s, nir_lower_io, nir_var_all, type_size,
(nir_lower_io_options)0);