arr[i] = c[i].m; \
} while (false)
+static inline nir_const_value
+nir_const_value_for_raw_uint(uint64_t x, unsigned bit_size)
+{
+ nir_const_value v;
+ memset(&v, 0, sizeof(v));
+
+ switch (bit_size) {
+ case 1: v.b = x; break;
+ case 8: v.u8 = x; break;
+ case 16: v.u16 = x; break;
+ case 32: v.u32 = x; break;
+ case 64: v.u64 = x; break;
+ default:
+ unreachable("Invalid bit size");
+ }
+
+ return v;
+}
+
+static inline nir_const_value
+nir_const_value_for_int(int64_t i, unsigned bit_size)
+{
+ nir_const_value v;
+ memset(&v, 0, sizeof(v));
+
+ assert(bit_size <= 64);
+ if (bit_size < 64) {
+ assert(i >= (-(1ll << (bit_size - 1))));
+ assert(i < (1ll << (bit_size - 1)));
+ }
+
+ return nir_const_value_for_raw_uint(i, bit_size);
+}
+
+static inline nir_const_value
+nir_const_value_for_uint(uint64_t u, unsigned bit_size)
+{
+ nir_const_value v;
+ memset(&v, 0, sizeof(v));
+
+ assert(bit_size <= 64);
+ if (bit_size < 64)
+ assert(u < (1ull << bit_size));
+
+ return nir_const_value_for_raw_uint(u, bit_size);
+}
+
+static inline nir_const_value
+nir_const_value_for_bool(bool b, unsigned bit_size)
+{
+ /* Booleans use a 0/-1 convention */
+ return nir_const_value_for_int(-(int)b, bit_size);
+}
+
+/* This one isn't inline because it requires half-float conversion */
+nir_const_value nir_const_value_for_float(double b, unsigned bit_size);
+
+static inline int64_t
+nir_const_value_as_int(nir_const_value value, unsigned bit_size)
+{
+ switch (bit_size) {
+ /* int1_t uses 0/-1 convention */
+ case 1: return -(int)value.b;
+ case 8: return value.i8;
+ case 16: return value.i16;
+ case 32: return value.i32;
+ case 64: return value.i64;
+ default:
+ unreachable("Invalid bit size");
+ }
+}
+
+static inline int64_t
+nir_const_value_as_uint(nir_const_value value, unsigned bit_size)
+{
+ switch (bit_size) {
+ case 1: return value.b;
+ case 8: return value.u8;
+ case 16: return value.u16;
+ case 32: return value.u32;
+ case 64: return value.u64;
+ default:
+ unreachable("Invalid bit size");
+ }
+}
+
+static inline bool
+nir_const_value_as_bool(nir_const_value value, unsigned bit_size)
+{
+ int64_t i = nir_const_value_as_int(value, bit_size);
+
+ /* Booleans of any size use 0/-1 convention */
+ assert(i == 0 || i == -1);
+
+ return i;
+}
+
+/* This one isn't inline because it requires half-float conversion */
+double nir_const_value_as_float(nir_const_value value, unsigned bit_size);
+
typedef struct nir_constant {
/**
* Value of the constant.
unsigned patch:1;
unsigned invariant:1;
+ /**
+ * Can this variable be coalesced with another?
+ *
+ * This is set by nir_lower_io_to_temporaries to say that any
+ * copies involving this variable should stay put. Propagating it can
+ * duplicate the resulting load/store, which is not wanted, and may
+ * result in a load/store of the variable with an indirect offset which
+ * the backend may not be able to handle.
+ */
+ unsigned cannot_coalesce:1;
+
/**
* When separate shader programs are enabled, only input/outputs between
* the stages of a multi-stage separate program can be safely removed
src.ssa->parent_instr->type == nir_instr_type_load_const;
}
-int64_t nir_src_as_int(nir_src src);
-uint64_t nir_src_as_uint(nir_src src);
-bool nir_src_as_bool(nir_src src);
-double nir_src_as_float(nir_src src);
-int64_t nir_src_comp_as_int(nir_src src, unsigned component);
-uint64_t nir_src_comp_as_uint(nir_src src, unsigned component);
-bool nir_src_comp_as_bool(nir_src src, unsigned component);
-double nir_src_comp_as_float(nir_src src, unsigned component);
-
static inline unsigned
nir_dest_bit_size(nir_dest dest)
{
* it must ensure that the resulting value is bit-for-bit identical to the
* original.
*/
- bool exact;
+ bool exact:1;
+
+ /**
+ * Indicates that this instruction do not cause wrapping to occur, in the
+ * form of overflow or underflow.
+ */
+ bool no_signed_wrap:1;
+ bool no_unsigned_wrap:1;
nir_alu_dest dest;
nir_alu_src src[];
return read_mask;
}
-/*
- * For instructions whose destinations are SSA, get the number of channels
- * used for a source
+/**
+ * Get the number of channels used for a source
*/
static inline unsigned
nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src)
{
- assert(instr->dest.dest.is_ssa);
-
if (nir_op_infos[instr->op].input_sizes[src] > 0)
return nir_op_infos[instr->op].input_sizes[src];
- return instr->dest.dest.ssa.num_components;
+ return nir_dest_num_components(instr->dest.dest);
}
-bool nir_const_value_negative_equal(const nir_const_value *c1,
- const nir_const_value *c2,
- unsigned components,
- nir_alu_type base_type,
- unsigned bits);
+bool nir_const_value_negative_equal(nir_const_value c1, nir_const_value c2,
+ nir_alu_type full_type);
bool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
unsigned src1, unsigned src2);
case nir_op_i2b1:
case nir_op_f2b1:
case nir_op_inot:
- case nir_op_fnot:
return true;
default:
return false;
nir_parallel_copy_instr, instr,
type, nir_instr_type_parallel_copy)
+
+#define NIR_DEFINE_SRC_AS_CONST(type, suffix) \
+static inline type \
+nir_src_comp_as_##suffix(nir_src src, unsigned comp) \
+{ \
+ assert(nir_src_is_const(src)); \
+ nir_load_const_instr *load = \
+ nir_instr_as_load_const(src.ssa->parent_instr); \
+ assert(comp < load->def.num_components); \
+ return nir_const_value_as_##suffix(load->value[comp], \
+ load->def.bit_size); \
+} \
+ \
+static inline type \
+nir_src_as_##suffix(nir_src src) \
+{ \
+ assert(nir_src_num_components(src) == 1); \
+ return nir_src_comp_as_##suffix(src, 0); \
+}
+
+NIR_DEFINE_SRC_AS_CONST(int64_t, int)
+NIR_DEFINE_SRC_AS_CONST(uint64_t, uint)
+NIR_DEFINE_SRC_AS_CONST(bool, bool)
+NIR_DEFINE_SRC_AS_CONST(double, float)
+
+#undef NIR_DEFINE_SRC_AS_CONST
+
+
+typedef struct {
+ nir_ssa_def *def;
+ unsigned comp;
+} nir_ssa_scalar;
+
+static inline bool
+nir_ssa_scalar_is_const(nir_ssa_scalar s)
+{
+ return s.def->parent_instr->type == nir_instr_type_load_const;
+}
+
+static inline nir_const_value
+nir_ssa_scalar_as_const_value(nir_ssa_scalar s)
+{
+ assert(s.comp < s.def->num_components);
+ nir_load_const_instr *load = nir_instr_as_load_const(s.def->parent_instr);
+ return load->value[s.comp];
+}
+
+#define NIR_DEFINE_SCALAR_AS_CONST(type, suffix) \
+static inline type \
+nir_ssa_scalar_as_##suffix(nir_ssa_scalar s) \
+{ \
+ return nir_const_value_as_##suffix( \
+ nir_ssa_scalar_as_const_value(s), s.def->bit_size); \
+}
+
+NIR_DEFINE_SCALAR_AS_CONST(int64_t, int)
+NIR_DEFINE_SCALAR_AS_CONST(uint64_t, uint)
+NIR_DEFINE_SCALAR_AS_CONST(bool, bool)
+NIR_DEFINE_SCALAR_AS_CONST(double, float)
+
+#undef NIR_DEFINE_SCALAR_AS_CONST
+
+static inline bool
+nir_ssa_scalar_is_alu(nir_ssa_scalar s)
+{
+ return s.def->parent_instr->type == nir_instr_type_alu;
+}
+
+static inline nir_op
+nir_ssa_scalar_alu_op(nir_ssa_scalar s)
+{
+ return nir_instr_as_alu(s.def->parent_instr)->op;
+}
+
+static inline nir_ssa_scalar
+nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx)
+{
+ nir_ssa_scalar out = { NULL, 0 };
+
+ nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
+ assert(alu_src_idx < nir_op_infos[alu->op].num_inputs);
+
+ /* Our component must be written */
+ assert(s.comp < s.def->num_components);
+ assert(alu->dest.write_mask & (1u << s.comp));
+
+ assert(alu->src[alu_src_idx].src.is_ssa);
+ out.def = alu->src[alu_src_idx].src.ssa;
+
+ if (nir_op_infos[alu->op].input_sizes[alu_src_idx] == 0) {
+ /* The ALU src is unsized so the source component follows the
+ * destination component.
+ */
+ out.comp = alu->src[alu_src_idx].swizzle[s.comp];
+ } else {
+ /* This is a sized source so all source components work together to
+ * produce all the destination components. Since we need to return a
+ * scalar, this only works if the source is a scalar.
+ */
+ assert(nir_op_infos[alu->op].input_sizes[alu_src_idx] == 1);
+ out.comp = alu->src[alu_src_idx].swizzle[0];
+ }
+ assert(out.comp < out.def->num_components);
+
+ return out;
+}
+
+
/*
* Control flow
*
nir_lower_minmax64 = (1 << 10),
nir_lower_shift64 = (1 << 11),
nir_lower_imul_2x32_64 = (1 << 12),
+ nir_lower_extract64 = (1 << 13),
} nir_lower_int64_options;
typedef enum {
/* Lowers when 32x32->64 bit multiplication is not supported */
bool lower_mul_2x32_64;
+ /* Lowers when rotate instruction is not supported */
+ bool lower_rotate;
+
+ /**
+ * Is this the Intel vec4 backend?
+ *
+ * Used to inhibit algebraic optimizations that are known to be harmful on
+ * the Intel vec4 backend. This is generally applicable to any
+ * optimization that might cause more immediate values to be used in
+ * 3-source (e.g., ffma and flrp) instructions.
+ */
+ bool intel_vec4;
+
unsigned max_unroll_iterations;
nir_lower_int64_options lower_int64_options;
bool nir_src_is_dynamically_uniform(nir_src src);
bool nir_srcs_equal(nir_src src1, nir_src src2);
+bool nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2);
void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer);
+
+void nir_assign_io_var_locations(struct exec_list *var_list,
+ unsigned *size,
+ gl_shader_stage stage);
+
typedef enum {
/* If set, this forces all non-flat fragment shader inputs to be
* interpolated as if with the "sample" qualifier. This requires
int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options);
+bool nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode);
+
typedef enum {
/**
* An address format which is a simple 32-bit global GPU address.
bool nir_lower_idiv(nir_shader *shader);
+bool nir_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval);
+
bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables, bool use_vars);
bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
bool nir_lower_clip_cull_distance_arrays(nir_shader *nir);
nir_lower_doubles_options options);
bool nir_lower_pack(nir_shader *shader);
+typedef enum {
+ nir_lower_interpolation_at_sample = (1 << 1),
+ nir_lower_interpolation_at_offset = (1 << 2),
+ nir_lower_interpolation_centroid = (1 << 3),
+ nir_lower_interpolation_pixel = (1 << 4),
+ nir_lower_interpolation_sample = (1 << 5),
+} nir_lower_interpolation_options;
+
+bool nir_lower_interpolation(nir_shader *shader,
+ nir_lower_interpolation_options options);
+
bool nir_normalize_cubemap_coords(nir_shader *shader);
void nir_live_ssa_defs_impl(nir_function_impl *impl);
bool nir_lower_ssa_defs_to_regs_block(nir_block *block);
bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl);
+/* This is here for unit tests. */
+bool nir_opt_comparison_pre_impl(nir_function_impl *impl);
+
bool nir_opt_comparison_pre(nir_shader *shader);
bool nir_opt_algebraic(nir_shader *shader);
bool nir_opt_rematerialize_compares(nir_shader *shader);
bool nir_opt_remove_phis(nir_shader *shader);
+bool nir_opt_remove_phis_block(nir_block *block);
bool nir_opt_shrink_load(nir_shader *shader);
bool nir_lower_sincos(nir_shader *shader);
+static inline bool
+nir_variable_is_in_ubo(const nir_variable *var)
+{
+ return (var->data.mode == nir_var_mem_ubo &&
+ var->interface_type != NULL);
+}
+
+static inline bool
+nir_variable_is_in_ssbo(const nir_variable *var)
+{
+ return (var->data.mode == nir_var_mem_ssbo &&
+ var->interface_type != NULL);
+}
+
+static inline bool
+nir_variable_is_in_block(const nir_variable *var)
+{
+ return nir_variable_is_in_ubo(var) || nir_variable_is_in_ssbo(var);
+}
+
#ifdef __cplusplus
} /* extern "C" */
#endif