}
}
+static inline bool
+nir_is_float_control_signed_zero_inf_nan_preserve(unsigned execution_mode, unsigned bit_size)
+{
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64);
+}
+
+static inline bool
+nir_is_denorm_flush_to_zero(unsigned execution_mode, unsigned bit_size)
+{
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64);
+}
+
+static inline bool
+nir_is_denorm_preserve(unsigned execution_mode, unsigned bit_size)
+{
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP64);
+}
+
+static inline bool
+nir_is_rounding_mode_rtne(unsigned execution_mode, unsigned bit_size)
+{
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
+}
+
+static inline bool
+nir_is_rounding_mode_rtz(unsigned execution_mode, unsigned bit_size)
+{
+ return (16 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16) ||
+ (32 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) ||
+ (64 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64);
+}
+
+static inline bool
+nir_has_any_rounding_mode_rtz(unsigned execution_mode)
+{
+ return (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16) ||
+ (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) ||
+ (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64);
+}
+
+static inline bool
+nir_has_any_rounding_mode_rtne(unsigned execution_mode)
+{
+ return (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16) ||
+ (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32) ||
+ (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
+}
+
+static inline nir_rounding_mode
+nir_get_rounding_mode_from_float_controls(unsigned execution_mode,
+ nir_alu_type type)
+{
+ if (nir_alu_type_get_base_type(type) != nir_type_float)
+ return nir_rounding_mode_undef;
+
+ unsigned bit_size = nir_alu_type_get_type_size(type);
+
+ if (nir_is_rounding_mode_rtz(execution_mode, bit_size))
+ return nir_rounding_mode_rtz;
+ if (nir_is_rounding_mode_rtne(execution_mode, bit_size))
+ return nir_rounding_mode_rtne;
+ return nir_rounding_mode_undef;
+}
+
+static inline bool
+nir_has_any_rounding_mode_enabled(unsigned execution_mode)
+{
+ bool result =
+ nir_has_any_rounding_mode_rtne(execution_mode) ||
+ nir_has_any_rounding_mode_rtz(execution_mode);
+ return result;
+}
+
typedef enum {
/**
* Operation where the first two sources are commutative.
return nir_dest_num_components(instr->dest.dest);
}
+static inline bool
+nir_alu_instr_is_comparison(const nir_alu_instr *instr)
+{
+ switch (instr->op) {
+ case nir_op_flt:
+ case nir_op_fge:
+ case nir_op_feq:
+ case nir_op_fne:
+ case nir_op_ilt:
+ case nir_op_ult:
+ case nir_op_ige:
+ case nir_op_uge:
+ case nir_op_ieq:
+ case nir_op_ine:
+ case nir_op_i2b1:
+ case nir_op_f2b1:
+ case nir_op_inot:
+ return true;
+ default:
+ return false;
+ }
+}
+
bool nir_const_value_negative_equal(nir_const_value c1, nir_const_value c2,
nir_alu_type full_type);
}
bool nir_deref_instr_has_indirect(nir_deref_instr *instr);
+bool nir_deref_instr_is_known_out_of_bounds(nir_deref_instr *instr);
bool nir_deref_instr_has_complex_use(nir_deref_instr *instr);
bool nir_deref_instr_remove_if_unused(nir_deref_instr *instr);
NIR_INTRINSIC_SWIZZLE_MASK,
/* Separate source/dest access flags for copies */
- NIR_INTRINSIC_SRC_ACCESS = 21,
- NIR_INTRINSIC_DST_ACCESS = 22,
+ NIR_INTRINSIC_SRC_ACCESS,
+ NIR_INTRINSIC_DST_ACCESS,
NIR_INTRINSIC_NUM_INDEX_FLAGS,
}
static inline bool
-nir_alu_instr_is_comparison(const nir_alu_instr *instr)
+nir_tex_instr_has_implicit_derivative(const nir_tex_instr *instr)
{
switch (instr->op) {
- case nir_op_flt:
- case nir_op_fge:
- case nir_op_feq:
- case nir_op_fne:
- case nir_op_ilt:
- case nir_op_ult:
- case nir_op_ige:
- case nir_op_uge:
- case nir_op_ieq:
- case nir_op_ine:
- case nir_op_i2b1:
- case nir_op_f2b1:
- case nir_op_inot:
+ case nir_texop_tex:
+ case nir_texop_txb:
+ case nir_texop_lod:
return true;
default:
return false;
case nir_tex_src_projector:
case nir_tex_src_comparator:
case nir_tex_src_bias:
+ case nir_tex_src_min_lod:
case nir_tex_src_ddx:
case nir_tex_src_ddy:
return nir_type_float;
case nir_tex_src_offset:
case nir_tex_src_ms_index:
+ case nir_tex_src_plane:
+ return nir_type_int;
+
+ case nir_tex_src_ms_mcs:
+ case nir_tex_src_texture_deref:
+ case nir_tex_src_sampler_deref:
case nir_tex_src_texture_offset:
case nir_tex_src_sampler_offset:
- return nir_type_int;
+ case nir_tex_src_texture_handle:
+ case nir_tex_src_sampler_handle:
+ return nir_type_uint;
- default:
- unreachable("Invalid texture source type");
+ case nir_num_tex_src_types:
+ unreachable("nir_num_tex_src_types is not a valid source type");
}
+
+ unreachable("Invalid texture source type");
}
static inline unsigned
nir_const_value value[];
} nir_load_const_instr;
-#define nir_const_load_to_arr(arr, l, m) \
-{ \
- nir_const_value_to_array(arr, l->value, l->def.num_components, m); \
-} while (false);
-
typedef enum {
nir_jump_return,
nir_jump_break,
#undef NIR_DEFINE_SRC_AS_CONST
+typedef struct {
+ nir_ssa_def *def;
+ unsigned comp;
+} nir_ssa_scalar;
+
+static inline bool
+nir_ssa_scalar_is_const(nir_ssa_scalar s)
+{
+ return s.def->parent_instr->type == nir_instr_type_load_const;
+}
+
+static inline nir_const_value
+nir_ssa_scalar_as_const_value(nir_ssa_scalar s)
+{
+ assert(s.comp < s.def->num_components);
+ nir_load_const_instr *load = nir_instr_as_load_const(s.def->parent_instr);
+ return load->value[s.comp];
+}
+
+#define NIR_DEFINE_SCALAR_AS_CONST(type, suffix) \
+static inline type \
+nir_ssa_scalar_as_##suffix(nir_ssa_scalar s) \
+{ \
+ return nir_const_value_as_##suffix( \
+ nir_ssa_scalar_as_const_value(s), s.def->bit_size); \
+}
+
+NIR_DEFINE_SCALAR_AS_CONST(int64_t, int)
+NIR_DEFINE_SCALAR_AS_CONST(uint64_t, uint)
+NIR_DEFINE_SCALAR_AS_CONST(bool, bool)
+NIR_DEFINE_SCALAR_AS_CONST(double, float)
+
+#undef NIR_DEFINE_SCALAR_AS_CONST
+
+static inline bool
+nir_ssa_scalar_is_alu(nir_ssa_scalar s)
+{
+ return s.def->parent_instr->type == nir_instr_type_alu;
+}
+
+static inline nir_op
+nir_ssa_scalar_alu_op(nir_ssa_scalar s)
+{
+ return nir_instr_as_alu(s.def->parent_instr)->op;
+}
+
+static inline nir_ssa_scalar
+nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx)
+{
+ nir_ssa_scalar out = { NULL, 0 };
+
+ nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
+ assert(alu_src_idx < nir_op_infos[alu->op].num_inputs);
+
+ /* Our component must be written */
+ assert(s.comp < s.def->num_components);
+ assert(alu->dest.write_mask & (1u << s.comp));
+
+ assert(alu->src[alu_src_idx].src.is_ssa);
+ out.def = alu->src[alu_src_idx].src.ssa;
+
+ if (nir_op_infos[alu->op].input_sizes[alu_src_idx] == 0) {
+ /* The ALU src is unsized so the source component follows the
+ * destination component.
+ */
+ out.comp = alu->src[alu_src_idx].swizzle[s.comp];
+ } else {
+ /* This is a sized source so all source components work together to
+ * produce all the destination components. Since we need to return a
+ * scalar, this only works if the source is a scalar.
+ */
+ assert(nir_op_infos[alu->op].input_sizes[alu_src_idx] == 1);
+ out.comp = alu->src[alu_src_idx].swizzle[0];
+ }
+ assert(out.comp < out.def->num_components);
+
+ return out;
+}
+
+
/*
* Control flow
*
nir_lower_minmax64 = (1 << 10),
nir_lower_shift64 = (1 << 11),
nir_lower_imul_2x32_64 = (1 << 12),
+ nir_lower_extract64 = (1 << 13),
} nir_lower_int64_options;
typedef enum {
nir_lower_dfract = (1 << 6),
nir_lower_dround_even = (1 << 7),
nir_lower_dmod = (1 << 8),
- nir_lower_fp64_full_software = (1 << 9),
+ nir_lower_dsub = (1 << 9),
+ nir_lower_ddiv = (1 << 10),
+ nir_lower_fp64_full_software = (1 << 11),
} nir_lower_doubles_options;
+typedef enum {
+ nir_divergence_single_prim_per_subgroup = (1 << 0),
+ nir_divergence_single_patch_per_tcs_subgroup = (1 << 1),
+ nir_divergence_single_patch_per_tes_subgroup = (1 << 2),
+ nir_divergence_view_index_uniform = (1 << 3),
+} nir_divergence_options;
+
typedef struct nir_shader_compiler_options {
bool lower_fdiv;
bool lower_ffma;
bool lower_fpow;
bool lower_fsat;
bool lower_fsqrt;
+ bool lower_sincos;
bool lower_fmod;
/** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
bool lower_bitfield_extract;
/* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
bool lower_scmp;
+ /* lower fall_equalN/fany_nequalN (ex:fany_nequal4 to sne+fdot4+fsat) */
+ bool lower_vector_cmp;
+
/** enables rules to lower idiv by power-of-two: */
bool lower_idiv;
- /** enable rules to avoid bit shifts */
- bool lower_bitshift;
+ /** enable rules to avoid bit ops */
+ bool lower_bitops;
/** enables rules to lower isign to imin+imax */
bool lower_isign;
/** enables rules to lower fsign to fsub and flt */
bool lower_fsign;
+ /* lower fdph to fdot4 */
+ bool lower_fdph;
+
+ /** lower fdot to fmul and fsum/fadd. */
+ bool lower_fdot;
+
/* Does the native fdot instruction replicate its result for four
* components? If so, then opt_algebraic_late will turn all fdotN
* instructions into fdot_replicatedN instructions.
/* Lowers when rotate instruction is not supported */
bool lower_rotate;
+ /**
+ * Is this the Intel vec4 backend?
+ *
+ * Used to inhibit algebraic optimizations that are known to be harmful on
+ * the Intel vec4 backend. This is generally applicable to any
+ * optimization that might cause more immediate values to be used in
+ * 3-source (e.g., ffma and flrp) instructions.
+ */
+ bool intel_vec4;
+
unsigned max_unroll_iterations;
nir_lower_int64_options lower_int64_options;
/** @} */
+nir_ssa_def *nir_instr_ssa_def(nir_instr *instr);
+
typedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state);
typedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state);
typedef bool (*nir_foreach_src_cb)(nir_src *src, void *state);
bool nir_src_is_dynamically_uniform(nir_src src);
bool nir_srcs_equal(nir_src src1, nir_src src2);
+bool nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2);
void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
#define NIR_SKIP(name) should_skip_nir(#name)
+/** An instruction filtering callback
+ *
+ * Returns true if the instruction should be processed and false otherwise.
+ */
+typedef bool (*nir_instr_filter_cb)(const nir_instr *, const void *);
+
+/** A simple instruction lowering callback
+ *
+ * Many instruction lowering passes can be written as a simple function which
+ * takes an instruction as its input and returns a sequence of instructions
+ * that implement the consumed instruction. This function type represents
+ * such a lowering function. When called, a function with this prototype
+ * should either return NULL indicating that no lowering needs to be done or
+ * emit a sequence of instructions using the provided builder (whose cursor
+ * will already be placed after the instruction to be lowered) and return the
+ * resulting nir_ssa_def.
+ */
+typedef nir_ssa_def *(*nir_lower_instr_cb)(struct nir_builder *,
+ nir_instr *, void *);
+
+/**
+ * Special return value for nir_lower_instr_cb when some progress occurred
+ * (like changing an input to the instr) that didn't result in a replacement
+ * SSA def being generated.
+ */
+#define NIR_LOWER_INSTR_PROGRESS ((nir_ssa_def *)(uintptr_t)1)
+
+/** Iterate over all the instructions in a nir_function_impl and lower them
+ * using the provided callbacks
+ *
+ * This function implements the guts of a standard lowering pass for you. It
+ * iterates over all of the instructions in a nir_function_impl and calls the
+ * filter callback on each one. If the filter callback returns true, it then
+ * calls the lowering call back on the instruction. (Splitting it this way
+ * allows us to avoid some save/restore work for instructions we know won't be
+ * lowered.) If the instruction is dead after the lowering is complete, it
+ * will be removed. If new instructions are added, the lowering callback will
+ * also be called on them in case multiple lowerings are required.
+ *
+ * The metadata for the nir_function_impl will also be updated. If any blocks
+ * are added (they cannot be removed), dominance and block indices will be
+ * invalidated.
+ */
+bool nir_function_impl_lower_instructions(nir_function_impl *impl,
+ nir_instr_filter_cb filter,
+ nir_lower_instr_cb lower,
+ void *cb_data);
+bool nir_shader_lower_instructions(nir_shader *shader,
+ nir_instr_filter_cb filter,
+ nir_lower_instr_cb lower,
+ void *cb_data);
+
void nir_calc_dominance_impl(nir_function_impl *impl);
void nir_calc_dominance(nir_shader *shader);
nir_block *nir_dominance_lca(nir_block *b1, nir_block *b2);
bool nir_block_dominates(nir_block *parent, nir_block *child);
+bool nir_block_is_unreachable(nir_block *block);
void nir_dump_dom_tree_impl(nir_function_impl *impl, FILE *fp);
void nir_dump_dom_tree(nir_shader *shader, FILE *fp);
gl_shader_stage stage);
typedef enum {
+ /* If set, this causes all 64-bit IO operations to be lowered on-the-fly
+ * to 32-bit operations. This is only valid for nir_var_shader_in/out
+ * modes.
+ */
+ nir_lower_io_lower_64bit_to_32 = (1 << 0),
+
/* If set, this forces all non-flat fragment shader inputs to be
* interpolated as if with the "sample" qualifier. This requires
* nir_shader_compiler_options::use_interpolated_input_intrinsics.
bool nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode);
+bool
+nir_lower_vars_to_explicit_types(nir_shader *shader,
+ nir_variable_mode modes,
+ glsl_type_size_align_func type_info);
+
typedef enum {
/**
* An address format which is a simple 32-bit global GPU address.
bool nir_lower_constant_initializers(nir_shader *shader,
nir_variable_mode modes);
-bool nir_move_load_const(nir_shader *shader);
bool nir_move_vec_src_uses_to_dest(nir_shader *shader);
bool nir_lower_vec_to_movs(nir_shader *shader);
void nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
bool nir_lower_flrp(nir_shader *shader, unsigned lowering_mask,
bool always_precise, bool have_ffma);
-bool nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set);
+bool nir_lower_alu_to_scalar(nir_shader *shader, nir_instr_filter_cb cb, const void *data);
bool nir_lower_bool_to_float(nir_shader *shader);
bool nir_lower_bool_to_int32(nir_shader *shader);
bool nir_lower_int_to_float(nir_shader *shader);
bool nir_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval);
bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables, bool use_vars);
+bool nir_lower_clip_gs(nir_shader *shader, unsigned ucp_enables);
bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
bool nir_lower_clip_cull_distance_arrays(nir_shader *nir);
nir_lower_doubles_options options);
bool nir_lower_pack(nir_shader *shader);
+bool nir_lower_point_size(nir_shader *shader, float min, float max);
+
typedef enum {
nir_lower_interpolation_at_sample = (1 << 1),
nir_lower_interpolation_at_offset = (1 << 2),
bool nir_repair_ssa(nir_shader *shader);
void nir_convert_loop_to_lcssa(nir_loop *loop);
+bool nir_convert_to_lcssa(nir_shader *shader, bool skip_invariants, bool skip_bool_invariants);
+bool* nir_divergence_analysis(nir_shader *shader, nir_divergence_options options);
/* If phi_webs_only is true, only convert SSA values involved in phi nodes to
* registers. If false, convert all values (even those not involved in a phi
bool nir_lower_ssa_defs_to_regs_block(nir_block *block);
bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl);
+bool nir_lower_samplers(nir_shader *shader);
+
/* This is here for unit tests. */
bool nir_opt_comparison_pre_impl(nir_function_impl *impl);
bool nir_opt_loop_unroll(nir_shader *shader, nir_variable_mode indirect_mask);
-bool nir_opt_move_comparisons(nir_shader *shader);
+typedef enum {
+ nir_move_const_undef = (1 << 0),
+ nir_move_load_ubo = (1 << 1),
+ nir_move_load_input = (1 << 2),
+ nir_move_comparisons = (1 << 3),
+} nir_move_options;
+
+bool nir_can_move_instr(nir_instr *instr, nir_move_options options);
-bool nir_opt_move_load_ubo(nir_shader *shader);
+bool nir_opt_sink(nir_shader *shader, nir_move_options options);
+
+bool nir_opt_move(nir_shader *shader, nir_move_options options);
bool nir_opt_peephole_select(nir_shader *shader, unsigned limit,
bool indirect_load_ok, bool expensive_alu_ok);
bool nir_opt_rematerialize_compares(nir_shader *shader);
bool nir_opt_remove_phis(nir_shader *shader);
+bool nir_opt_remove_phis_block(nir_block *block);
bool nir_opt_shrink_load(nir_shader *shader);
nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);
-bool nir_lower_sincos(nir_shader *shader);
-
static inline bool
nir_variable_is_in_ubo(const nir_variable *var)
{