} nir_rounding_mode;
typedef union {
- bool b[NIR_MAX_VEC_COMPONENTS];
- float f32[NIR_MAX_VEC_COMPONENTS];
- double f64[NIR_MAX_VEC_COMPONENTS];
- int8_t i8[NIR_MAX_VEC_COMPONENTS];
- uint8_t u8[NIR_MAX_VEC_COMPONENTS];
- int16_t i16[NIR_MAX_VEC_COMPONENTS];
- uint16_t u16[NIR_MAX_VEC_COMPONENTS];
- int32_t i32[NIR_MAX_VEC_COMPONENTS];
- uint32_t u32[NIR_MAX_VEC_COMPONENTS];
- int64_t i64[NIR_MAX_VEC_COMPONENTS];
- uint64_t u64[NIR_MAX_VEC_COMPONENTS];
+ bool b;
+ float f32;
+ double f64;
+ int8_t i8;
+ uint8_t u8;
+ int16_t i16;
+ uint16_t u16;
+ int32_t i32;
+ uint32_t u32;
+ int64_t i64;
+ uint64_t u64;
} nir_const_value;
+#define nir_const_value_to_array(arr, c, components, m) \
+{ \
+ for (unsigned i = 0; i < components; ++i) \
+ arr[i] = c[i].m; \
+} while (false)
+
typedef struct nir_constant {
/**
* Value of the constant.
* by the type associated with the \c nir_variable. Constants may be
* scalars, vectors, or matrices.
*/
- nir_const_value values[NIR_MAX_MATRIX_COLUMNS];
+ nir_const_value values[NIR_MAX_MATRIX_COLUMNS][NIR_MAX_VEC_COMPONENTS];
/* we could get this from the var->type but makes clone *much* easier to
* not have to care about the type.
/** only for debug purposes, can be NULL */
const char *name;
- /** whether this register is local (per-function) or global (per-shader) */
- bool is_global;
-
/** set of nir_srcs where this register is used (read from) */
struct list_head uses;
nir_op nir_type_conversion_op(nir_alu_type src, nir_alu_type dst,
nir_rounding_mode rnd);
+static inline nir_op
+nir_op_vec(unsigned components)
+{
+ switch (components) {
+ case 1: return nir_op_imov;
+ case 2: return nir_op_vec2;
+ case 3: return nir_op_vec3;
+ case 4: return nir_op_vec4;
+ default: unreachable("bad component count");
+ }
+}
+
typedef enum {
- NIR_OP_IS_COMMUTATIVE = (1 << 0),
+ /**
+ * Operation where the first two sources are commutative.
+ *
+ * For 2-source operations, this just mathematical commutativity. Some
+ * 3-source operations, like ffma, are only commutative in the first two
+ * sources.
+ */
+ NIR_OP_IS_2SRC_COMMUTATIVE = (1 << 0),
NIR_OP_IS_ASSOCIATIVE = (1 << 1),
} nir_op_algebraic_property;
nir_dest dest;
} nir_deref_instr;
-NIR_DEFINE_CAST(nir_instr_as_deref, nir_instr, nir_deref_instr, instr,
- type, nir_instr_type_deref)
-
-static inline nir_deref_instr *
-nir_src_as_deref(nir_src src)
-{
- if (!src.is_ssa)
- return NULL;
-
- if (src.ssa->parent_instr->type != nir_instr_type_deref)
- return NULL;
-
- return nir_instr_as_deref(src.ssa->parent_instr);
-}
+static inline nir_deref_instr *nir_src_as_deref(nir_src src);
static inline nir_deref_instr *
nir_deref_instr_parent(const nir_deref_instr *instr)
return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
}
+/* Converts a image_deref_* intrinsic into a image_* one */
+void nir_rewrite_image_intrinsic(nir_intrinsic_instr *instr,
+ nir_ssa_def *handle, bool bindless);
+
/**
* \group texture information
*
nir_texop_txl, /**< Texture look-up with explicit LOD */
nir_texop_txd, /**< Texture look-up with partial derivatives */
nir_texop_txf, /**< Texel fetch with explicit LOD */
- nir_texop_txf_ms, /**< Multisample texture fetch */
+ nir_texop_txf_ms, /**< Multisample texture fetch */
+ nir_texop_txf_ms_fb, /**< Multisample texture fetch from framebuffer */
nir_texop_txf_ms_mcs, /**< Multisample compression value fetch */
nir_texop_txs, /**< Texture size */
nir_texop_lod, /**< Texture lod query */
case nir_texop_txd:
case nir_texop_txf:
case nir_texop_txf_ms:
+ case nir_texop_txf_ms_fb:
case nir_texop_tg4:
return false;
default:
switch (instr->op) {
case nir_texop_txf:
case nir_texop_txf_ms:
+ case nir_texop_txf_ms_fb:
case nir_texop_txf_ms_mcs:
case nir_texop_samples_identical:
return nir_type_int;
typedef struct {
nir_instr instr;
- nir_const_value value;
-
nir_ssa_def def;
+
+ nir_const_value value[];
} nir_load_const_instr;
+#define nir_const_load_to_arr(arr, l, m) \
+{ \
+ nir_const_value_to_array(arr, l->value, l->def.num_components, m); \
+} while (false);
+
typedef enum {
nir_jump_return,
nir_jump_break,
NIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr,
type, nir_instr_type_alu)
+NIR_DEFINE_CAST(nir_instr_as_deref, nir_instr, nir_deref_instr, instr,
+ type, nir_instr_type_deref)
NIR_DEFINE_CAST(nir_instr_as_call, nir_instr, nir_call_instr, instr,
type, nir_instr_type_call)
NIR_DEFINE_CAST(nir_instr_as_jump, nir_instr, nir_jump_instr, instr,
return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
}
+/**
+ * Return true if this list of cf_nodes contains a single empty block.
+ */
+static inline bool
+nir_cf_list_is_empty_block(struct exec_list *cf_list)
+{
+ if (exec_list_is_singular(cf_list)) {
+ struct exec_node *head = exec_list_get_head(cf_list);
+ nir_block *block =
+ nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
+ return exec_list_is_empty(&block->instr_list);
+ }
+ return false;
+}
+
typedef struct {
uint8_t num_components;
uint8_t bit_size;
/** enables rules to lower isign to imin+imax */
bool lower_isign;
+ /** enables rules to lower fsign to fsub and flt */
+ bool lower_fsign;
+
/* Does the native fdot instruction replicate its result for four
* components? If so, then opt_algebraic_late will turn all fdotN
* instructions into fdot_replicatedN instructions.
/** lowers fceil to fneg+ffloor+fneg: */
bool lower_fceil;
+ bool lower_ftrunc;
+
bool lower_ldexp;
bool lower_pack_half_2x16;
bool lower_all_io_to_temps;
bool lower_all_io_to_elements;
- /**
- * Does the driver support real 32-bit integers? (Otherwise, integers
- * are simulated by floats.)
- */
- bool native_integers;
-
/* Indicates that the driver only has zero-based vertex id */
bool vertex_id_zero_based;
*/
bool lower_helper_invocation;
+ /**
+ * Convert gl_SampleMaskIn to gl_HelperInvocation as follows:
+ *
+ * gl_SampleMaskIn == 0 ---> gl_HelperInvocation
+ * gl_SampleMaskIn != 0 ---> !gl_HelperInvocation
+ */
+ bool optimize_sample_mask_in;
+
bool lower_cs_local_index_from_id;
bool lower_cs_local_id_from_index;
struct exec_list functions; /** < list of nir_function */
- /** list of global register in the shader */
- struct exec_list registers;
-
- /** next available global register index */
- unsigned reg_alloc;
-
/**
* the highest index a load_input_*, load_uniform_*, etc. intrinsic can
* access plus one
*/
unsigned num_inputs, num_uniforms, num_outputs, num_shared;
+ /** Size in bytes of required scratch space */
+ unsigned scratch_size;
+
/** Constant data associated with this shader.
*
* Constant data is loaded through load_constant intrinsics. See also
const nir_shader_compiler_options *options,
shader_info *si);
-/** creates a register, including assigning it an index and adding it to the list */
-nir_register *nir_global_reg_create(nir_shader *shader);
-
nir_register *nir_local_reg_create(nir_function_impl *impl);
void nir_reg_remove(nir_register *reg);
nir_const_value *nir_src_as_const_value(nir_src src);
-static inline struct nir_instr *
-nir_src_instr(const struct nir_src *src)
-{
- return src->is_ssa ? src->ssa->parent_instr : NULL;
-}
-
#define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
static inline c_type * \
-nir_src_as_ ## name (struct nir_src *src) \
-{ \
- return src->is_ssa && src->ssa->parent_instr->type == type_enum \
- ? cast_macro(src->ssa->parent_instr) : NULL; \
-} \
-static inline const c_type * \
-nir_src_as_ ## name ## _const(const struct nir_src *src) \
+nir_src_as_ ## name (nir_src src) \
{ \
- return src->is_ssa && src->ssa->parent_instr->type == type_enum \
- ? cast_macro(src->ssa->parent_instr) : NULL; \
+ return src.is_ssa && src.ssa->parent_instr->type == type_enum \
+ ? cast_macro(src.ssa->parent_instr) : NULL; \
}
NIR_SRC_AS_(alu_instr, nir_alu_instr, nir_instr_type_alu, nir_instr_as_alu)
+NIR_SRC_AS_(intrinsic, nir_intrinsic_instr,
+ nir_instr_type_intrinsic, nir_instr_as_intrinsic)
+NIR_SRC_AS_(deref, nir_deref_instr, nir_instr_type_deref, nir_instr_as_deref)
bool nir_src_is_dynamically_uniform(nir_src src);
bool nir_srcs_equal(nir_src src1, nir_src src2);
nir_loop *nir_block_get_following_loop(nir_block *block);
void nir_index_local_regs(nir_function_impl *impl);
-void nir_index_global_regs(nir_shader *shader);
void nir_index_ssa_defs(nir_function_impl *impl);
unsigned nir_index_instrs(nir_function_impl *impl);
nir_function_impl *entrypoint,
bool outputs, bool inputs);
+bool nir_lower_vars_to_scratch(nir_shader *shader,
+ nir_variable_mode modes,
+ int size_threshold,
+ glsl_type_size_align_func size_align);
+
void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
+void nir_gather_ssa_types(nir_function_impl *impl,
+ BITSET_WORD *float_types,
+ BITSET_WORD *int_types);
+
void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
- int (*type_size)(const struct glsl_type *));
+ int (*type_size)(const struct glsl_type *, bool));
/* Some helpers to do very simple linking */
bool nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer);
} nir_lower_io_options;
bool nir_lower_io(nir_shader *shader,
nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *),
+ int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options);
typedef enum {
* component is a buffer index and the second is an offset.
*/
nir_address_format_32bit_index_offset,
+
+ /**
+ * An address format which is a simple 32-bit offset.
+ */
+ nir_address_format_32bit_offset,
+
+ /**
+ * An address format representing a purely logical addressing model. In
+ * this model, all deref chains must be complete from the dereference
+ * operation to the variable. Cast derefs are not allowed. These
+ * addresses will be 32-bit scalars but the format is immaterial because
+ * you can always chase the chain.
+ */
+ nir_address_format_logical,
} nir_address_format;
+
+static inline unsigned
+nir_address_format_bit_size(nir_address_format addr_format)
+{
+ switch (addr_format) {
+ case nir_address_format_32bit_global: return 32;
+ case nir_address_format_64bit_global: return 64;
+ case nir_address_format_64bit_bounded_global: return 32;
+ case nir_address_format_32bit_index_offset: return 32;
+ case nir_address_format_32bit_offset: return 32;
+ case nir_address_format_logical: return 32;
+ }
+ unreachable("Invalid address format");
+}
+
+static inline unsigned
+nir_address_format_num_components(nir_address_format addr_format)
+{
+ switch (addr_format) {
+ case nir_address_format_32bit_global: return 1;
+ case nir_address_format_64bit_global: return 1;
+ case nir_address_format_64bit_bounded_global: return 4;
+ case nir_address_format_32bit_index_offset: return 2;
+ case nir_address_format_32bit_offset: return 1;
+ case nir_address_format_logical: return 1;
+ }
+ unreachable("Invalid address format");
+}
+
+static inline const struct glsl_type *
+nir_address_format_to_glsl_type(nir_address_format addr_format)
+{
+ unsigned bit_size = nir_address_format_bit_size(addr_format);
+ assert(bit_size == 32 || bit_size == 64);
+ return glsl_vector_type(bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64,
+ nir_address_format_num_components(addr_format));
+}
+
+const nir_const_value *nir_address_format_null_value(nir_address_format addr_format);
+
+nir_ssa_def * nir_explicit_io_address_from_deref(struct nir_builder *b,
+ nir_deref_instr *deref,
+ nir_ssa_def *base_addr,
+ nir_address_format addr_format);
+void nir_lower_explicit_io_instr(struct nir_builder *b,
+ nir_intrinsic_instr *io_instr,
+ nir_ssa_def *addr,
+ nir_address_format addr_format);
+
bool nir_lower_explicit_io(nir_shader *shader,
nir_variable_mode modes,
nir_address_format);
void nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
bool alpha_to_one);
bool nir_lower_alu(nir_shader *shader);
-bool nir_lower_alu_to_scalar(nir_shader *shader);
+
+bool nir_lower_flrp(nir_shader *shader, unsigned lowering_mask,
+ bool always_precise, bool have_ffma);
+
+bool nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set);
bool nir_lower_bool_to_float(nir_shader *shader);
bool nir_lower_bool_to_int32(nir_shader *shader);
+bool nir_lower_int_to_float(nir_shader *shader);
bool nir_lower_load_const_to_scalar(nir_shader *shader);
bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
bool nir_lower_phis_to_scalar(nir_shader *shader);
void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
bool nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode mask);
+void nir_lower_fragcoord_wtrans(nir_shader *shader);
+void nir_lower_viewport_transform(nir_shader *shader);
bool nir_lower_uniforms_to_ubo(nir_shader *shader, int multiplier);
typedef struct nir_lower_subgroups_options {
*/
unsigned lower_srgb;
+ /**
+ * If true, lower nir_texop_tex on shaders that doesn't support implicit
+ * LODs to nir_texop_txl.
+ */
+ bool lower_tex_without_implicit_lod;
+
/**
* If true, lower nir_texop_txd on cube maps with nir_texop_txl.
*/
*/
bool lower_txd_offset_clamp;
+ /**
+ * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
+ * sampler is bindless.
+ */
+ bool lower_txd_clamp_bindless_sampler;
+
/**
* If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
* sampler index is not statically determinable to be less than 16.
const nir_lower_wpos_ytransform_options *options);
bool nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading);
+bool nir_lower_fb_read(nir_shader *shader);
+
typedef struct nir_lower_drawpixels_options {
gl_state_index16 texcoord_state_tokens[STATE_LENGTH];
gl_state_index16 scale_state_tokens[STATE_LENGTH];
bool nir_opt_combine_stores(nir_shader *shader, nir_variable_mode modes);
-bool nir_opt_global_to_local(nir_shader *shader);
-
bool nir_copy_prop(nir_shader *shader);
bool nir_opt_copy_prop_vars(nir_shader *shader);
nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);
+bool nir_lower_sincos(nir_shader *shader);
+
#ifdef __cplusplus
} /* extern "C" */
#endif