#define NIR_FALSE 0u
#define NIR_TRUE (~0u)
#define NIR_MAX_VEC_COMPONENTS 4
+#define NIR_MAX_MATRIX_COLUMNS 4
typedef uint8_t nir_component_mask_t;
/** Defines a cast function
typedef enum {
nir_var_shader_in = (1 << 0),
nir_var_shader_out = (1 << 1),
- nir_var_global = (1 << 2),
- nir_var_local = (1 << 3),
+ nir_var_shader_temp = (1 << 2),
+ nir_var_function_temp = (1 << 3),
nir_var_uniform = (1 << 4),
- nir_var_ubo = (1 << 5),
+ nir_var_mem_ubo = (1 << 5),
nir_var_system_value = (1 << 6),
- nir_var_ssbo = (1 << 7),
- nir_var_shared = (1 << 8),
+ nir_var_mem_ssbo = (1 << 7),
+ nir_var_mem_shared = (1 << 8),
+ nir_var_mem_global = (1 << 9),
nir_var_all = ~0,
} nir_variable_mode;
* by the type associated with the \c nir_variable. Constants may be
* scalars, vectors, or matrices.
*/
- nir_const_value values[NIR_MAX_VEC_COMPONENTS];
+ nir_const_value values[NIR_MAX_MATRIX_COLUMNS];
/* we could get this from the var->type but makes clone *much* easier to
* not have to care about the type.
*/
unsigned interpolation:2;
- /**
- * \name ARB_fragment_coord_conventions
- * @{
- */
- unsigned origin_upper_left:1;
- unsigned pixel_center_integer:1;
- /*@}*/
-
/**
* If non-zero, then this variable may be packed along with other variables
* into a single varying slot, so this offset should be applied when
static inline bool
nir_variable_is_global(const nir_variable *var)
{
- return var->data.mode != nir_var_local;
+ return var->data.mode != nir_var_function_temp;
}
typedef struct nir_register {
nir_alu_type input_types[NIR_MAX_VEC_COMPONENTS];
nir_op_algebraic_property algebraic_properties;
+
+ /* Whether this represents a numeric conversion opcode */
+ bool is_conversion;
} nir_op_info;
extern const nir_op_info nir_op_infos[nir_num_opcodes];
return instr->dest.dest.ssa.num_components;
}
+bool nir_const_value_negative_equal(const nir_const_value *c1,
+ const nir_const_value *c2,
+ unsigned components,
+ nir_alu_type base_type,
+ unsigned bits);
+
bool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
unsigned src1, unsigned src2);
+bool nir_alu_srcs_negative_equal(const nir_alu_instr *alu1,
+ const nir_alu_instr *alu2,
+ unsigned src1, unsigned src2);
+
typedef enum {
nir_deref_type_var,
nir_deref_type_array,
NIR_INTRINSIC_ALIGN_MUL = 17,
NIR_INTRINSIC_ALIGN_OFFSET = 18,
+ /**
+ * The Vulkan descriptor type for a vulkan_resource_[re]index intrinsic.
+ */
+ NIR_INTRINSIC_DESC_TYPE = 19,
+
NIR_INTRINSIC_NUM_INDEX_FLAGS,
} nir_intrinsic_index_flag;
*/
unsigned dest_components;
+ /** bitfield of legal bit sizes */
+ unsigned dest_bit_sizes;
+
/** the number of constant indices used by the intrinsic */
unsigned num_indices;
INTRINSIC_IDX_ACCESSORS(format, FORMAT, unsigned)
INTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned)
INTRINSIC_IDX_ACCESSORS(align_offset, ALIGN_OFFSET, unsigned)
+INTRINSIC_IDX_ACCESSORS(desc_type, DESC_TYPE, unsigned)
static inline void
nir_intrinsic_set_align(nir_intrinsic_instr *intrin,
nir_tex_src_sampler_deref, /* < deref pointing to the sampler */
nir_tex_src_texture_offset, /* < dynamically uniform indirect offset */
nir_tex_src_sampler_offset, /* < dynamically uniform indirect offset */
+ nir_tex_src_texture_handle, /* < bindless texture handle */
+ nir_tex_src_sampler_handle, /* < bindless sampler handle */
nir_tex_src_plane, /* < selects plane for planar textures */
nir_num_tex_src_types
} nir_tex_src_type;
/* gather component selector */
unsigned component : 2;
+ /* gather offsets */
+ int8_t tg4_offsets[4][2];
+
+ /* True if the texture index or handle is not dynamically uniform */
+ bool texture_non_uniform;
+
+ /* True if the sampler index or handle is not dynamically uniform */
+ bool sampler_non_uniform;
+
/** The texture index
*
* If this texture instruction has a nir_tex_src_texture_offset source,
void nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx);
+bool nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex);
+
typedef struct {
nir_instr instr;
#define nir_foreach_instr_reverse_safe(instr, block) \
foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
+typedef enum {
+ nir_selection_control_none = 0x0,
+ nir_selection_control_flatten = 0x1,
+ nir_selection_control_dont_flatten = 0x2,
+} nir_selection_control;
+
typedef struct nir_if {
nir_cf_node cf_node;
nir_src condition;
+ nir_selection_control control;
struct exec_list then_list; /** < list of nir_cf_node */
struct exec_list else_list; /** < list of nir_cf_node */
typedef struct {
nir_if *nif;
+ /** Instruction that generates nif::condition. */
nir_instr *conditional_instr;
+ /** Block within ::nif that has the break instruction. */
nir_block *break_block;
+
+ /** Last block for the then- or else-path that does not contain the break. */
nir_block *continue_from_block;
+ /** True when ::break_block is in the else-path of ::nif. */
bool continue_from_then;
+ bool induction_rhs;
+
+ /* This is true if the terminators exact trip count is unknown. For
+ * example:
+ *
+ * for (int i = 0; i < imin(x, 4); i++)
+ * ...
+ *
+ * Here loop analysis would have set a max_trip_count of 4 however we dont
+ * know for sure that this is the exact trip count.
+ */
+ bool exact_trip_count_unknown;
struct list_head loop_terminator_link;
} nir_loop_terminator;
typedef struct {
- /* Number of instructions in the loop */
- unsigned num_instructions;
+ /* Estimated cost (in number of instructions) of the loop */
+ unsigned instr_cost;
+
+ /* Guessed trip count based on array indexing */
+ unsigned guessed_trip_count;
/* Maximum number of times the loop is run (if known) */
unsigned max_trip_count;
struct list_head loop_terminator_list;
} nir_loop_info;
+typedef enum {
+ nir_loop_control_none = 0x0,
+ nir_loop_control_unroll = 0x1,
+ nir_loop_control_dont_unroll = 0x2,
+} nir_loop_control;
+
typedef struct {
nir_cf_node cf_node;
struct exec_list body; /** < list of nir_cf_node */
nir_loop_info *info;
+ nir_loop_control control;
+ bool partially_unrolled;
} nir_loop;
/**
* If the function is only declared and not implemented, this is NULL.
*/
nir_function_impl *impl;
+
+ bool is_entrypoint;
} nir_function;
+typedef enum {
+ nir_lower_imul64 = (1 << 0),
+ nir_lower_isign64 = (1 << 1),
+ /** Lower all int64 modulus and division opcodes */
+ nir_lower_divmod64 = (1 << 2),
+ /** Lower all 64-bit umul_high and imul_high opcodes */
+ nir_lower_imul_high64 = (1 << 3),
+ nir_lower_mov64 = (1 << 4),
+ nir_lower_icmp64 = (1 << 5),
+ nir_lower_iadd64 = (1 << 6),
+ nir_lower_iabs64 = (1 << 7),
+ nir_lower_ineg64 = (1 << 8),
+ nir_lower_logic64 = (1 << 9),
+ nir_lower_minmax64 = (1 << 10),
+ nir_lower_shift64 = (1 << 11),
+ nir_lower_imul_2x32_64 = (1 << 12),
+} nir_lower_int64_options;
+
+typedef enum {
+ nir_lower_drcp = (1 << 0),
+ nir_lower_dsqrt = (1 << 1),
+ nir_lower_drsq = (1 << 2),
+ nir_lower_dtrunc = (1 << 3),
+ nir_lower_dfloor = (1 << 4),
+ nir_lower_dceil = (1 << 5),
+ nir_lower_dfract = (1 << 6),
+ nir_lower_dround_even = (1 << 7),
+ nir_lower_dmod = (1 << 8),
+ nir_lower_fp64_full_software = (1 << 9),
+} nir_lower_doubles_options;
+
typedef struct nir_shader_compiler_options {
bool lower_fdiv;
bool lower_ffma;
bool fuse_ffma;
+ bool lower_flrp16;
bool lower_flrp32;
/** Lowers flrp when it does not support doubles */
bool lower_flrp64;
bool lower_fpow;
bool lower_fsat;
bool lower_fsqrt;
+ bool lower_fmod16;
bool lower_fmod32;
bool lower_fmod64;
/** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
/** enables rules to lower idiv by power-of-two: */
bool lower_idiv;
+ /** enables rules to lower isign to imin+imax */
+ bool lower_isign;
+
/* Does the native fdot instruction replicate its result for four
* components? If so, then opt_algebraic_late will turn all fdotN
* instructions into fdot_replicatedN instructions.
bool lower_extract_word;
bool lower_all_io_to_temps;
+ bool lower_all_io_to_elements;
/**
* Does the driver support real 32-bit integers? (Otherwise, integers
/* Set if nir_lower_wpos_ytransform() should also invert gl_PointCoord. */
bool lower_wpos_pntc;
+ bool lower_hadd;
+ bool lower_add_sat;
+
/**
* Should nir_lower_io() create load_interpolated_input intrinsics?
*
*/
bool use_interpolated_input_intrinsics;
+ /* Lowers when 32x32->64 bit multiplication is not supported */
+ bool lower_mul_2x32_64;
+
unsigned max_unroll_iterations;
+
+ nir_lower_int64_options lower_int64_options;
+ nir_lower_doubles_options lower_doubles_options;
} nir_shader_compiler_options;
typedef struct nir_shader {
unsigned constant_data_size;
} nir_shader;
+#define nir_foreach_function(func, shader) \
+ foreach_list_typed(nir_function, func, node, &(shader)->functions)
+
static inline nir_function_impl *
nir_shader_get_entrypoint(nir_shader *shader)
{
- assert(exec_list_length(&shader->functions) == 1);
- struct exec_node *func_node = exec_list_get_head(&shader->functions);
- nir_function *func = exec_node_data(nir_function, func_node, node);
+ nir_function *func = NULL;
+
+ nir_foreach_function(function, shader) {
+ assert(func == NULL);
+ if (function->is_entrypoint) {
+ func = function;
+#ifndef NDEBUG
+ break;
+#endif
+ }
+ }
+
+ if (!func)
+ return NULL;
+
assert(func->num_params == 0);
assert(func->impl);
return func->impl;
}
-#define nir_foreach_function(func, shader) \
- foreach_list_typed(nir_function, func, node, &(shader)->functions)
-
nir_shader *nir_shader_create(void *mem_ctx,
gl_shader_stage stage,
const nir_shader_compiler_options *options,
static inline void
nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var)
{
- assert(var->data.mode == nir_var_local);
+ assert(var->data.mode == nir_var_function_temp);
exec_list_push_tail(&impl->locals, &var->node);
}
void nir_print_deref(const nir_deref_instr *deref, FILE *fp);
nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
-nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi);
+nir_function_impl *nir_function_impl_clone(nir_shader *shader,
+ const nir_function_impl *fi);
nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
void nir_metadata_set_validation_flag(nir_shader *shader);
void nir_metadata_check_validation_flag(nir_shader *shader);
+static inline bool
+should_skip_nir(const char *name)
+{
+ static const char *list = NULL;
+ if (!list) {
+ /* Comma separated list of names to skip. */
+ list = getenv("NIR_SKIP");
+ if (!list)
+ list = "";
+ }
+
+ if (!list[0])
+ return false;
+
+ return comma_separated_list_contains(list, name);
+}
+
static inline bool
should_clone_nir(void)
{
static inline void nir_validate_shader(nir_shader *shader, const char *when) { (void) shader; (void)when; }
static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
+static inline bool should_skip_nir(UNUSED const char *pass_name) { return false; }
static inline bool should_clone_nir(void) { return false; }
static inline bool should_serialize_deserialize_nir(void) { return false; }
static inline bool should_print_nir(void) { return false; }
#endif /* NDEBUG */
#define _PASS(pass, nir, do_pass) do { \
+ if (should_skip_nir(#pass)) { \
+ printf("skipping %s\n", #pass); \
+ break; \
+ } \
do_pass \
nir_validate_shader(nir, "after " #pass); \
if (should_clone_nir()) { \
nir_print_shader(nir, stdout); \
)
+#define NIR_SKIP(name) should_skip_nir(#name)
+
void nir_calc_dominance_impl(nir_function_impl *impl);
void nir_calc_dominance(nir_shader *shader);
bool nir_lower_returns_impl(nir_function_impl *impl);
bool nir_lower_returns(nir_shader *shader);
+void nir_inline_function_impl(struct nir_builder *b,
+ const nir_function_impl *impl,
+ nir_ssa_def **params);
bool nir_inline_functions(nir_shader *shader);
bool nir_propagate_invariant(nir_shader *shader);
bool nir_lower_global_vars_to_local(nir_shader *shader);
+typedef enum {
+ nir_lower_direct_array_deref_of_vec_load = (1 << 0),
+ nir_lower_indirect_array_deref_of_vec_load = (1 << 1),
+ nir_lower_direct_array_deref_of_vec_store = (1 << 2),
+ nir_lower_indirect_array_deref_of_vec_store = (1 << 3),
+} nir_lower_array_deref_of_vec_options;
+
+bool nir_lower_array_deref_of_vec(nir_shader *shader, nir_variable_mode modes,
+ nir_lower_array_deref_of_vec_options options);
+
bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes);
bool nir_lower_locals_to_regs(nir_shader *shader);
nir_variable_mode modes,
int (*type_size)(const struct glsl_type *),
nir_lower_io_options);
+
+typedef enum {
+ /**
+ * An address format which is a simple 32-bit global GPU address.
+ */
+ nir_address_format_32bit_global,
+
+ /**
+ * An address format which is a simple 64-bit global GPU address.
+ */
+ nir_address_format_64bit_global,
+
+ /**
+ * An address format which is a bounds-checked 64-bit global GPU address.
+ *
+ * The address is comprised as a 32-bit vec4 where .xy are a uint64_t base
+ * address stored with the low bits in .x and high bits in .y, .z is a
+ * size, and .w is an offset. When the final I/O operation is lowered, .w
+ * is checked against .z and the operation is predicated on the result.
+ */
+ nir_address_format_64bit_bounded_global,
+
+ /**
+ * An address format which is comprised of a vec2 where the first
+ * component is a buffer index and the second is an offset.
+ */
+ nir_address_format_32bit_index_offset,
+} nir_address_format;
+bool nir_lower_explicit_io(nir_shader *shader,
+ nir_variable_mode modes,
+ nir_address_format);
+
nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
bool alpha_to_one);
bool nir_lower_alu(nir_shader *shader);
bool nir_lower_alu_to_scalar(nir_shader *shader);
+bool nir_lower_bool_to_float(nir_shader *shader);
bool nir_lower_bool_to_int32(nir_shader *shader);
bool nir_lower_load_const_to_scalar(nir_shader *shader);
bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
bool outputs_only);
void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
+bool nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode mask);
+
+bool nir_lower_uniforms_to_ubo(nir_shader *shader, int multiplier);
typedef struct nir_lower_subgroups_options {
uint8_t subgroup_size;
unsigned lower_yx_xuxv_external;
unsigned lower_xy_uxvx_external;
unsigned lower_ayuv_external;
+ unsigned lower_xyuv_external;
/**
* To emulate certain texture wrap modes, this can be used
*/
uint8_t swizzles[32][4];
+ /* Can be used to scale sampled values in range required by the format. */
+ float scale_factors[32];
+
/**
* Bitmap of textures that need srgb to linear conversion. If
* (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
*/
bool lower_txd_offset_clamp;
+ /**
+ * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
+ * sampler index is not statically determinable to be less than 16.
+ */
+ bool lower_txd_clamp_if_sampler_index_not_lt_16;
+
+ /**
+ * If true, apply a .bagr swizzle on tg4 results to handle Broadcom's
+ * mixed-up tg4 locations.
+ */
+ bool lower_tg4_broadcom_swizzle;
+
+ /**
+ * If true, lowers tg4 with 4 constant offsets to 4 tg4 calls
+ */
+ bool lower_tg4_offsets;
+
enum nir_lower_tex_packing lower_tex_packing[32];
} nir_lower_tex_options;
bool nir_lower_tex(nir_shader *shader,
const nir_lower_tex_options *options);
+enum nir_lower_non_uniform_access_type {
+ nir_lower_non_uniform_ubo_access = (1 << 0),
+ nir_lower_non_uniform_ssbo_access = (1 << 1),
+ nir_lower_non_uniform_texture_access = (1 << 2),
+ nir_lower_non_uniform_image_access = (1 << 3),
+};
+
+bool nir_lower_non_uniform_access(nir_shader *shader,
+ enum nir_lower_non_uniform_access_type);
+
bool nir_lower_idiv(nir_shader *shader);
bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables, bool use_vars);
bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
bool nir_lower_clip_cull_distance_arrays(nir_shader *nir);
+bool nir_lower_frexp(nir_shader *nir);
+
void nir_lower_two_sided_color(nir_shader *shader);
bool nir_lower_clamp_color_outputs(nir_shader *shader);
typedef enum {
nir_lower_int_source_mods = 1 << 0,
nir_lower_float_source_mods = 1 << 1,
- nir_lower_all_source_mods = (1 << 2) - 1
+ nir_lower_triop_abs = 1 << 2,
+ nir_lower_all_source_mods = (1 << 3) - 1
} nir_lower_to_source_mods_flags;
nir_lower_bit_size_callback callback,
void *callback_data);
-typedef enum {
- nir_lower_imul64 = (1 << 0),
- nir_lower_isign64 = (1 << 1),
- /** Lower all int64 modulus and division opcodes */
- nir_lower_divmod64 = (1 << 2),
- /** Lower all 64-bit umul_high and imul_high opcodes */
- nir_lower_imul_high64 = (1 << 3),
-} nir_lower_int64_options;
-
+nir_lower_int64_options nir_lower_int64_op_to_options_mask(nir_op opcode);
bool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options);
-typedef enum {
- nir_lower_drcp = (1 << 0),
- nir_lower_dsqrt = (1 << 1),
- nir_lower_drsq = (1 << 2),
- nir_lower_dtrunc = (1 << 3),
- nir_lower_dfloor = (1 << 4),
- nir_lower_dceil = (1 << 5),
- nir_lower_dfract = (1 << 6),
- nir_lower_dround_even = (1 << 7),
- nir_lower_dmod = (1 << 8)
-} nir_lower_doubles_options;
-
-bool nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
+nir_lower_doubles_options nir_lower_doubles_op_to_options_mask(nir_op opcode);
+bool nir_lower_doubles(nir_shader *shader, const nir_shader *softfp64,
+ nir_lower_doubles_options options);
bool nir_lower_pack(nir_shader *shader);
bool nir_normalize_cubemap_coords(nir_shader *shader);
bool nir_lower_ssa_defs_to_regs_block(nir_block *block);
bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl);
+bool nir_opt_comparison_pre(nir_shader *shader);
+
bool nir_opt_algebraic(nir_shader *shader);
bool nir_opt_algebraic_before_ffma(nir_shader *shader);
bool nir_opt_algebraic_late(nir_shader *shader);
bool nir_opt_constant_folding(nir_shader *shader);
+bool nir_opt_combine_stores(nir_shader *shader, nir_variable_mode modes);
+
bool nir_opt_global_to_local(nir_shader *shader);
bool nir_copy_prop(nir_shader *shader);
bool nir_opt_dead_write_vars(nir_shader *shader);
+bool nir_opt_deref_impl(nir_function_impl *impl);
bool nir_opt_deref(nir_shader *shader);
bool nir_opt_find_array_copies(nir_shader *shader);
bool nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size);
-bool nir_opt_if(nir_shader *shader);
+bool nir_opt_if(nir_shader *shader, bool aggressive_last_continue);
bool nir_opt_intrinsics(nir_shader *shader);
bool nir_opt_conditional_discard(nir_shader *shader);
+void nir_strip(nir_shader *shader);
+
void nir_sweep(nir_shader *shader);
void nir_remap_dual_slot_attributes(nir_shader *shader,