#include "util/list.h"
#include "util/ralloc.h"
#include "util/set.h"
+#include "util/bitscan.h"
#include "util/bitset.h"
#include "util/macros.h"
#include "compiler/nir_types.h"
return src.is_ssa ? src.ssa->num_components : src.reg.reg->num_components;
}
+static inline bool
+nir_src_is_const(nir_src src)
+{
+ return src.is_ssa &&
+ src.ssa->parent_instr->type == nir_instr_type_load_const;
+}
+
+int64_t nir_src_as_int(nir_src src);
+uint64_t nir_src_as_uint(nir_src src);
+bool nir_src_as_bool(nir_src src);
+double nir_src_as_float(nir_src src);
+int64_t nir_src_comp_as_int(nir_src src, unsigned component);
+uint64_t nir_src_comp_as_uint(nir_src src, unsigned component);
+bool nir_src_comp_as_bool(nir_src src, unsigned component);
+double nir_src_comp_as_float(nir_src src, unsigned component);
+
static inline unsigned
nir_dest_bit_size(nir_dest dest)
{
return (instr->dest.write_mask >> channel) & 1;
}
+static inline nir_component_mask_t
+nir_alu_instr_src_read_mask(const nir_alu_instr *instr, unsigned src)
+{
+ nir_component_mask_t read_mask = 0;
+ for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; c++) {
+ if (!nir_alu_instr_channel_used(instr, src, c))
+ continue;
+
+ read_mask |= (1 << instr->src[src].swizzle[c]);
+ }
+ return read_mask;
+}
+
/*
* For instructions whose destinations are SSA, get the number of channels
* used for a source
*/
NIR_INTRINSIC_ACCESS = 16,
+ /**
+ * Alignment for offsets and addresses
+ *
+ * These two parameters, specify an alignment in terms of a multiplier and
+ * an offset. The offset or address parameter X of the intrinsic is
+ * guaranteed to satisfy the following:
+ *
+ * (X - align_offset) % align_mul == 0
+ */
+ NIR_INTRINSIC_ALIGN_MUL = 17,
+ NIR_INTRINSIC_ALIGN_OFFSET = 18,
+
NIR_INTRINSIC_NUM_INDEX_FLAGS,
} nir_intrinsic_index_flag;
INTRINSIC_IDX_ACCESSORS(image_array, IMAGE_ARRAY, bool)
INTRINSIC_IDX_ACCESSORS(access, ACCESS, enum gl_access_qualifier)
INTRINSIC_IDX_ACCESSORS(format, FORMAT, unsigned)
+INTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned)
+INTRINSIC_IDX_ACCESSORS(align_offset, ALIGN_OFFSET, unsigned)
+
+static inline void
+nir_intrinsic_set_align(nir_intrinsic_instr *intrin,
+ unsigned align_mul, unsigned align_offset)
+{
+ assert(util_is_power_of_two_nonzero(align_mul));
+ assert(align_offset < align_mul);
+ nir_intrinsic_set_align_mul(intrin, align_mul);
+ nir_intrinsic_set_align_offset(intrin, align_offset);
+}
+
+/** Returns a simple alignment for a load/store intrinsic offset
+ *
+ * Instead of the full mul+offset alignment scheme provided by the ALIGN_MUL
+ * and ALIGN_OFFSET parameters, this helper takes both into account and
+ * provides a single simple alignment parameter. The offset X is guaranteed
+ * to satisfy X % align == 0.
+ */
+static inline unsigned
+nir_intrinsic_align(const nir_intrinsic_instr *intrin)
+{
+ const unsigned align_mul = nir_intrinsic_align_mul(intrin);
+ const unsigned align_offset = nir_intrinsic_align_offset(intrin);
+ assert(align_offset < align_mul);
+ return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
+}
/**
* \group texture information
nir_tex_src_offset,
nir_tex_src_bias,
nir_tex_src_lod,
+ nir_tex_src_min_lod,
nir_tex_src_ms_index, /* MSAA sample index */
nir_tex_src_ms_mcs, /* MSAA compression value */
nir_tex_src_ddx,
case nir_op_uge:
case nir_op_ieq:
case nir_op_ine:
- case nir_op_i2b:
- case nir_op_f2b:
+ case nir_op_i2b32:
+ case nir_op_f2b32:
case nir_op_inot:
case nir_op_fnot:
return true;
/* Number of instructions in the loop */
unsigned num_instructions;
- /* How many times the loop is run (if known) */
- unsigned trip_count;
- bool is_trip_count_known;
+ /* Maximum number of times the loop is run (if known) */
+ unsigned max_trip_count;
+
+ /* Do we know the exact number of times the loop will be run */
+ bool exact_trip_count_known;
/* Unroll the loop regardless of its size */
bool force_unroll;
*/
bool fdot_replicates;
+ /** lowers ffloor to fsub+ffract: */
+ bool lower_ffloor;
+
/** lowers ffract to fsub+ffloor: */
bool lower_ffract;
+ /** lowers fceil to fneg+ffloor+fneg: */
+ bool lower_fceil;
+
bool lower_ldexp;
bool lower_pack_half_2x16;
bool lower_helper_invocation;
bool lower_cs_local_index_from_id;
+ bool lower_cs_local_id_from_index;
bool lower_device_index_to_zero;
}
}
+static inline nir_cursor
+nir_before_src(nir_src *src, bool is_if_condition)
+{
+ if (is_if_condition) {
+ nir_block *prev_block =
+ nir_cf_node_as_block(nir_cf_node_prev(&src->parent_if->cf_node));
+ assert(!nir_block_ends_in_jump(prev_block));
+ return nir_after_block(prev_block);
+ } else if (src->parent_instr->type == nir_instr_type_phi) {
+#ifndef NDEBUG
+ nir_phi_instr *cond_phi = nir_instr_as_phi(src->parent_instr);
+ bool found = false;
+ nir_foreach_phi_src(phi_src, cond_phi) {
+ if (phi_src->src.ssa == src->ssa) {
+ found = true;
+ break;
+ }
+ }
+ assert(found);
+#endif
+ /* The LIST_ENTRY macro is a generic container-of macro, it just happens
+ * to have a more specific name.
+ */
+ nir_phi_src *phi_src = LIST_ENTRY(nir_phi_src, src, src);
+ return nir_after_block_before_jump(phi_src->pred);
+ } else {
+ return nir_before_instr(src->parent_instr);
+ }
+}
+
static inline nir_cursor
nir_before_cf_node(nir_cf_node *node)
{
bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state);
nir_const_value *nir_src_as_const_value(nir_src src);
+
+static inline struct nir_instr *
+nir_src_instr(const struct nir_src *src)
+{
+ return src->is_ssa ? src->ssa->parent_instr : NULL;
+}
+
+#define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
+static inline c_type * \
+nir_src_as_ ## name (struct nir_src *src) \
+{ \
+ return src->is_ssa && src->ssa->parent_instr->type == type_enum \
+ ? cast_macro(src->ssa->parent_instr) : NULL; \
+} \
+static inline const c_type * \
+nir_src_as_ ## name ## _const(const struct nir_src *src) \
+{ \
+ return src->is_ssa && src->ssa->parent_instr->type == type_enum \
+ ? cast_macro(src->ssa->parent_instr) : NULL; \
+}
+
+NIR_SRC_AS_(alu_instr, nir_alu_instr, nir_instr_type_alu, nir_instr_as_alu)
+
bool nir_src_is_dynamically_uniform(nir_src src);
bool nir_srcs_equal(nir_src src1, nir_src src2);
void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
nir_shader *nir_shader_serialize_deserialize(void *mem_ctx, nir_shader *s);
#ifndef NDEBUG
-void nir_validate_shader(nir_shader *shader);
+void nir_validate_shader(nir_shader *shader, const char *when);
void nir_metadata_set_validation_flag(nir_shader *shader);
void nir_metadata_check_validation_flag(nir_shader *shader);
return should_print;
}
#else
-static inline void nir_validate_shader(nir_shader *shader) { (void) shader; }
+static inline void nir_validate_shader(nir_shader *shader, const char *when) { (void) shader; (void)when; }
static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
static inline bool should_clone_nir(void) { return false; }
static inline bool should_print_nir(void) { return false; }
#endif /* NDEBUG */
-#define _PASS(nir, do_pass) do { \
+#define _PASS(pass, nir, do_pass) do { \
do_pass \
- nir_validate_shader(nir); \
+ nir_validate_shader(nir, "after " #pass); \
if (should_clone_nir()) { \
nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
ralloc_free(nir); \
} \
} while (0)
-#define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \
+#define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, \
nir_metadata_set_validation_flag(nir); \
if (should_print_nir()) \
printf("%s\n", #pass); \
} \
)
-#define NIR_PASS_V(nir, pass, ...) _PASS(nir, \
+#define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, \
if (should_print_nir()) \
printf("%s\n", #pass); \
pass(nir, ##__VA_ARGS__); \
/* Some helpers to do very simple linking */
bool nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer);
+bool nir_remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
+ uint64_t *used_by_other_stage,
+ uint64_t *used_by_other_stage_patches);
void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
bool default_to_smooth_interp);
+void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
+bool nir_link_constant_varyings(nir_shader *producer, nir_shader *consumer);
typedef enum {
/* If set, this forces all non-flat fragment shader inputs to be
unsigned lower_y_u_v_external;
unsigned lower_yx_xuxv_external;
unsigned lower_xy_uxvx_external;
+ unsigned lower_ayuv_external;
/**
* To emulate certain texture wrap modes, this can be used
* Implies lower_txd_cube_map and lower_txd_shadow.
*/
bool lower_txd;
+
+ /**
+ * If true, lower nir_texop_txb that try to use shadow compare and min_lod
+ * at the same time to a nir_texop_lod, some math, and nir_texop_tex.
+ */
+ bool lower_txb_shadow_clamp;
+
+ /**
+ * If true, lower nir_texop_txd on shadow samplers when it uses min_lod
+ * with nir_texop_txl. This includes cube maps.
+ */
+ bool lower_txd_shadow_clamp;
+
+ /**
+ * If true, lower nir_texop_txd on when it uses both offset and min_lod
+ * with nir_texop_txl. This includes cube maps.
+ */
+ bool lower_txd_offset_clamp;
} nir_lower_tex_options;
bool nir_lower_tex(nir_shader *shader,
bool nir_lower_idiv(nir_shader *shader);
-bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables);
+bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables, bool use_vars);
bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
bool nir_lower_clip_cull_distance_arrays(nir_shader *nir);
void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset);
-bool nir_lower_to_source_mods(nir_shader *shader);
+
+typedef enum {
+ nir_lower_int_source_mods = 1 << 0,
+ nir_lower_float_source_mods = 1 << 1,
+ nir_lower_all_source_mods = (1 << 2) - 1
+} nir_lower_to_source_mods_flags;
+
+
+bool nir_lower_to_source_mods(nir_shader *shader, nir_lower_to_source_mods_flags options);
bool nir_lower_gs_intrinsics(nir_shader *shader);
nir_lower_isign64 = (1 << 1),
/** Lower all int64 modulus and division opcodes */
nir_lower_divmod64 = (1 << 2),
+ /** Lower all 64-bit umul_high and imul_high opcodes */
+ nir_lower_imul_high64 = (1 << 3),
} nir_lower_int64_options;
bool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options);
bool nir_lower_phis_to_regs_block(nir_block *block);
bool nir_lower_ssa_defs_to_regs_block(nir_block *block);
+bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl);
bool nir_opt_algebraic(nir_shader *shader);
bool nir_opt_algebraic_before_ffma(nir_shader *shader);
bool nir_opt_dead_cf(nir_shader *shader);
+bool nir_opt_dead_write_vars(nir_shader *shader);
+
bool nir_opt_find_array_copies(nir_shader *shader);
bool nir_opt_gcm(nir_shader *shader, bool value_number);
+bool nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size);
+
bool nir_opt_if(nir_shader *shader);
bool nir_opt_intrinsics(nir_shader *shader);