extern "C" {
#endif
-struct gl_program;
-struct gl_shader_program;
-
#define NIR_FALSE 0u
#define NIR_TRUE (~0u)
+#define NIR_MAX_VEC_COMPONENTS 4
+typedef uint8_t nir_component_mask_t;
/** Defines a cast function
*
struct nir_function;
struct nir_shader;
struct nir_instr;
+struct nir_builder;
/**
* \sa nir_variable::state_slots
*/
typedef struct {
- int tokens[5];
+ gl_state_index16 tokens[STATE_LENGTH];
int swizzle;
} nir_state_slot;
nir_var_uniform = (1 << 4),
nir_var_shader_storage = (1 << 5),
nir_var_system_value = (1 << 6),
- nir_var_param = (1 << 7),
nir_var_shared = (1 << 8),
nir_var_all = ~0,
} nir_variable_mode;
} nir_rounding_mode;
typedef union {
- float f32[4];
- double f64[4];
- int8_t i8[4];
- uint8_t u8[4];
- int16_t i16[4];
- uint16_t u16[4];
- int32_t i32[4];
- uint32_t u32[4];
- int64_t i64[4];
- uint64_t u64[4];
+ float f32[NIR_MAX_VEC_COMPONENTS];
+ double f64[NIR_MAX_VEC_COMPONENTS];
+ int8_t i8[NIR_MAX_VEC_COMPONENTS];
+ uint8_t u8[NIR_MAX_VEC_COMPONENTS];
+ int16_t i16[NIR_MAX_VEC_COMPONENTS];
+ uint16_t u16[NIR_MAX_VEC_COMPONENTS];
+ int32_t i32[NIR_MAX_VEC_COMPONENTS];
+ uint32_t u32[NIR_MAX_VEC_COMPONENTS];
+ int64_t i64[NIR_MAX_VEC_COMPONENTS];
+ uint64_t u64[NIR_MAX_VEC_COMPONENTS];
} nir_const_value;
typedef struct nir_constant {
* by the type associated with the \c nir_variable. Constants may be
* scalars, vectors, or matrices.
*/
- nir_const_value values[4];
+ nir_const_value values[NIR_MAX_VEC_COMPONENTS];
/* we could get this from the var->type but makes clone *much* easier to
* not have to care about the type.
nir_depth_layout_unchanged
} nir_depth_layout;
+/**
+ * Enum keeping track of how a variable was declared.
+ */
+typedef enum {
+ /**
+ * Normal declaration.
+ */
+ nir_var_declared_normally = 0,
+
+ /**
+ * Variable is implicitly generated by the compiler and should not be
+ * visible via the API.
+ */
+ nir_var_hidden,
+} nir_var_declaration_type;
+
/**
* Either a uniform, global variable, shader input, or shader output. Based on
* ir_variable - it should be easy to translate between the two.
*/
unsigned fb_fetch_output:1;
+ /**
+ * Non-zero if this variable is considered bindless as defined by
+ * ARB_bindless_texture.
+ */
+ unsigned bindless:1;
+
+ /**
+ * Was an explicit binding set in the shader?
+ */
+ unsigned explicit_binding:1;
+
+ /**
+ * Was a transfer feedback buffer set in the shader?
+ */
+ unsigned explicit_xfb_buffer:1;
+
+ /**
+ * Was a transfer feedback stride set in the shader?
+ */
+ unsigned explicit_xfb_stride:1;
+
+ /**
+ * Was an explicit offset set in the shader?
+ */
+ unsigned explicit_offset:1;
+
/**
* \brief Layout qualifier for gl_FragDepth.
*
int binding;
/**
- * Location an atomic counter is stored at.
+ * Location an atomic counter or transform feedback is stored at.
*/
unsigned offset;
+ /**
+ * Transform feedback buffer.
+ */
+ unsigned xfb_buffer;
+
+ /**
+ * Transform feedback stride.
+ */
+ unsigned xfb_stride;
+
+ /**
+ * How the variable was declared. See nir_var_declaration_type.
+ *
+ * This is used to detect variables generated by the compiler, so should
+ * not be visible via the API.
+ */
+ unsigned how_declared:2;
+
/**
* ARB_shader_image_load_store qualifiers.
*/
struct {
- bool read_only; /**< "readonly" qualifier. */
- bool write_only; /**< "writeonly" qualifier. */
- bool coherent;
- bool _volatile;
- bool restrict_flag;
+ enum gl_access_qualifier access;
/** Image internal format if specified explicitly, otherwise GL_NONE. */
GLenum format;
* \sa ir_variable::location
*/
const struct glsl_type *interface_type;
+
+ /**
+ * Description of per-member data for per-member struct variables
+ *
+ * This is used for variables which are actually an amalgamation of
+ * multiple entities such as a struct of built-in values or a struct of
+ * inputs each with their own layout specifier. This is only allowed on
+ * variables with a struct or array of array of struct type.
+ */
+ unsigned num_members;
+ struct nir_variable_data *members;
} nir_variable;
#define nir_foreach_variable(var, var_list) \
static inline bool
nir_variable_is_global(const nir_variable *var)
{
- return var->data.mode != nir_var_local && var->data.mode != nir_var_param;
+ return var->data.mode != nir_var_local;
}
typedef struct nir_register {
typedef enum {
nir_instr_type_alu,
+ nir_instr_type_deref,
nir_instr_type_call,
nir_instr_type_tex,
nir_instr_type_intrinsic,
/** Index into the live_in and live_out bitfields */
unsigned live_index;
+ /** Instruction which produces this SSA value. */
nir_instr *parent_instr;
/** set of nir_instrs where this register is used (read from) */
typedef struct nir_src {
union {
+ /** Instruction that consumes this value as a source. */
nir_instr *parent_instr;
struct nir_if *parent_if;
};
return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size;
}
+static inline unsigned
+nir_src_num_components(nir_src src)
+{
+ return src.is_ssa ? src.ssa->num_components : src.reg.reg->num_components;
+}
+
static inline unsigned
nir_dest_bit_size(nir_dest dest)
{
return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size;
}
+static inline unsigned
+nir_dest_num_components(nir_dest dest)
+{
+ return dest.is_ssa ? dest.ssa.num_components : dest.reg.reg->num_components;
+}
+
void nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if);
void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr);
* a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
* a swizzle of {2, x, 1, 0} where x means "don't care."
*/
- uint8_t swizzle[4];
+ uint8_t swizzle[NIR_MAX_VEC_COMPONENTS];
} nir_alu_src;
typedef struct {
bool saturate;
- unsigned write_mask : 4; /* ignored if dest.is_ssa is true */
+ unsigned write_mask : NIR_MAX_VEC_COMPONENTS; /* ignored if dest.is_ssa is true */
} nir_alu_dest;
typedef enum {
case GLSL_TYPE_INT16:
return nir_type_int16;
break;
+ case GLSL_TYPE_UINT8:
+ return nir_type_uint8;
+ case GLSL_TYPE_INT8:
+ return nir_type_int8;
case GLSL_TYPE_UINT64:
return nir_type_uint64;
break;
/**
* The number of components in each input
*/
- unsigned input_sizes[4];
+ unsigned input_sizes[NIR_MAX_VEC_COMPONENTS];
/**
* The type of vector that each input takes. Note that negate and
* absolute value are only allowed on inputs with int or float type and
* behave differently on the two.
*/
- nir_alu_type input_types[4];
+ nir_alu_type input_types[NIR_MAX_VEC_COMPONENTS];
nir_op_algebraic_property algebraic_properties;
} nir_op_info;
typedef enum {
nir_deref_type_var,
nir_deref_type_array,
- nir_deref_type_struct
+ nir_deref_type_array_wildcard,
+ nir_deref_type_struct,
+ nir_deref_type_cast,
} nir_deref_type;
-typedef struct nir_deref {
+typedef struct {
+ nir_instr instr;
+
+ /** The type of this deref instruction */
nir_deref_type deref_type;
- struct nir_deref *child;
+
+ /** The mode of the underlying variable */
+ nir_variable_mode mode;
+
+ /** The dereferenced type of the resulting pointer value */
const struct glsl_type *type;
-} nir_deref;
-typedef struct {
- nir_deref deref;
-
- nir_variable *var;
-} nir_deref_var;
-
-/* This enum describes how the array is referenced. If the deref is
- * direct then the base_offset is used. If the deref is indirect then
- * offset is given by base_offset + indirect. If the deref is a wildcard
- * then the deref refers to all of the elements of the array at the same
- * time. Wildcard dereferences are only ever allowed in copy_var
- * intrinsics and the source and destination derefs must have matching
- * wildcards.
- */
-typedef enum {
- nir_deref_array_type_direct,
- nir_deref_array_type_indirect,
- nir_deref_array_type_wildcard,
-} nir_deref_array_type;
+ union {
+ /** Variable being dereferenced if deref_type is a deref_var */
+ nir_variable *var;
-typedef struct {
- nir_deref deref;
+ /** Parent deref if deref_type is not deref_var */
+ nir_src parent;
+ };
- nir_deref_array_type deref_array_type;
- unsigned base_offset;
- nir_src indirect;
-} nir_deref_array;
+ /** Additional deref parameters */
+ union {
+ struct {
+ nir_src index;
+ } arr;
-typedef struct {
- nir_deref deref;
+ struct {
+ unsigned index;
+ } strct;
+ };
- unsigned index;
-} nir_deref_struct;
+ /** Destination to store the resulting "pointer" */
+ nir_dest dest;
+} nir_deref_instr;
-NIR_DEFINE_CAST(nir_deref_as_var, nir_deref, nir_deref_var, deref,
- deref_type, nir_deref_type_var)
-NIR_DEFINE_CAST(nir_deref_as_array, nir_deref, nir_deref_array, deref,
- deref_type, nir_deref_type_array)
-NIR_DEFINE_CAST(nir_deref_as_struct, nir_deref, nir_deref_struct, deref,
- deref_type, nir_deref_type_struct)
+NIR_DEFINE_CAST(nir_instr_as_deref, nir_instr, nir_deref_instr, instr,
+ type, nir_instr_type_deref)
-/* Returns the last deref in the chain. */
-static inline nir_deref *
-nir_deref_tail(nir_deref *deref)
+static inline nir_deref_instr *
+nir_src_as_deref(nir_src src)
{
- while (deref->child)
- deref = deref->child;
- return deref;
+ if (!src.is_ssa)
+ return NULL;
+
+ if (src.ssa->parent_instr->type != nir_instr_type_deref)
+ return NULL;
+
+ return nir_instr_as_deref(src.ssa->parent_instr);
}
+static inline nir_deref_instr *
+nir_deref_instr_parent(const nir_deref_instr *instr)
+{
+ if (instr->deref_type == nir_deref_type_var)
+ return NULL;
+ else
+ return nir_src_as_deref(instr->parent);
+}
+
+static inline nir_variable *
+nir_deref_instr_get_variable(const nir_deref_instr *instr)
+{
+ while (instr->deref_type != nir_deref_type_var) {
+ if (instr->deref_type == nir_deref_type_cast)
+ return NULL;
+
+ instr = nir_deref_instr_parent(instr);
+ }
+
+ return instr->var;
+}
+
+bool nir_deref_instr_has_indirect(nir_deref_instr *instr);
+
+bool nir_deref_instr_remove_if_unused(nir_deref_instr *instr);
+
typedef struct {
nir_instr instr;
- unsigned num_params;
- nir_deref_var **params;
- nir_deref_var *return_deref;
-
struct nir_function *callee;
-} nir_call_instr;
-
-#define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
- num_variables, num_indices, idx0, idx1, idx2, flags) \
- nir_intrinsic_##name,
-#define LAST_INTRINSIC(name) nir_last_intrinsic = nir_intrinsic_##name,
+ unsigned num_params;
+ nir_src params[];
+} nir_call_instr;
-typedef enum {
#include "nir_intrinsics.h"
- nir_num_intrinsics = nir_last_intrinsic + 1
-} nir_intrinsic_op;
-#define NIR_INTRINSIC_MAX_CONST_INDEX 3
+#define NIR_INTRINSIC_MAX_CONST_INDEX 4
/** Represents an intrinsic
*
int const_index[NIR_INTRINSIC_MAX_CONST_INDEX];
- nir_deref_var *variables[2];
-
nir_src src[];
} nir_intrinsic_instr;
+static inline nir_variable *
+nir_intrinsic_get_var(nir_intrinsic_instr *intrin, unsigned i)
+{
+ return nir_deref_instr_get_variable(nir_src_as_deref(intrin->src[i]));
+}
+
/**
* \name NIR intrinsics semantic flags
*
*/
NIR_INTRINSIC_INTERP_MODE = 9,
+ /**
+ * A binary nir_op to use when performing a reduction or scan operation
+ */
+ NIR_INTRINSIC_REDUCTION_OP = 10,
+
+ /**
+ * Cluster size for reduction operations
+ */
+ NIR_INTRINSIC_CLUSTER_SIZE = 11,
+
+ /**
+ * Parameter index for a load_param intrinsic
+ */
+ NIR_INTRINSIC_PARAM_IDX = 12,
+
+ /**
+ * Image dimensionality for image intrinsics
+ *
+ * One of GLSL_SAMPLER_DIM_*
+ */
+ NIR_INTRINSIC_IMAGE_DIM = 13,
+
+ /**
+ * Non-zero if we are accessing an array image
+ */
+ NIR_INTRINSIC_IMAGE_ARRAY = 14,
+
+ /**
+ * Image format for image intrinsics
+ */
+ NIR_INTRINSIC_FORMAT = 15,
+
+ /**
+ * Access qualifiers for image intrinsics
+ */
+ NIR_INTRINSIC_ACCESS = 16,
+
NIR_INTRINSIC_NUM_INDEX_FLAGS,
} nir_intrinsic_index_flag;
-#define NIR_INTRINSIC_MAX_INPUTS 4
+#define NIR_INTRINSIC_MAX_INPUTS 5
typedef struct {
const char *name;
*/
unsigned dest_components;
- /** the number of inputs/outputs that are variables */
- unsigned num_variables;
-
/** the number of constant indices used by the intrinsic */
unsigned num_indices;
extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics];
+static inline unsigned
+nir_intrinsic_src_components(nir_intrinsic_instr *intr, unsigned srcn)
+{
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
+ assert(srcn < info->num_srcs);
+ if (info->src_components[srcn])
+ return info->src_components[srcn];
+ else
+ return intr->num_components;
+}
+
+static inline unsigned
+nir_intrinsic_dest_components(nir_intrinsic_instr *intr)
+{
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
+ if (!info->has_dest)
+ return 0;
+ else if (info->dest_components)
+ return info->dest_components;
+ else
+ return intr->num_components;
+}
#define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
static inline type \
{ \
const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
- return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
+ return (type)instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
} \
static inline void \
nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
INTRINSIC_IDX_ACCESSORS(binding, BINDING, unsigned)
INTRINSIC_IDX_ACCESSORS(component, COMPONENT, unsigned)
INTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned)
+INTRINSIC_IDX_ACCESSORS(reduction_op, REDUCTION_OP, unsigned)
+INTRINSIC_IDX_ACCESSORS(cluster_size, CLUSTER_SIZE, unsigned)
+INTRINSIC_IDX_ACCESSORS(param_idx, PARAM_IDX, unsigned)
+INTRINSIC_IDX_ACCESSORS(image_dim, IMAGE_DIM, enum glsl_sampler_dim)
+INTRINSIC_IDX_ACCESSORS(image_array, IMAGE_ARRAY, bool)
+INTRINSIC_IDX_ACCESSORS(access, ACCESS, enum gl_access_qualifier)
+INTRINSIC_IDX_ACCESSORS(format, FORMAT, unsigned)
/**
* \group texture information
nir_tex_src_ms_mcs, /* MSAA compression value */
nir_tex_src_ddx,
nir_tex_src_ddy,
+ nir_tex_src_texture_deref, /* < deref pointing to the texture */
+ nir_tex_src_sampler_deref, /* < deref pointing to the sampler */
nir_tex_src_texture_offset, /* < dynamically uniform indirect offset */
nir_tex_src_sampler_offset, /* < dynamically uniform indirect offset */
nir_tex_src_plane, /* < selects plane for planar textures */
/** The size of the texture array or 0 if it's not an array */
unsigned texture_array_size;
- /** The texture deref
- *
- * If this is null, use texture_index instead.
- */
- nir_deref_var *texture;
-
/** The sampler index
*
* The following operations do not require a sampler and, as such, this
* then the sampler index is given by sampler_index + sampler_offset.
*/
unsigned sampler_index;
-
- /** The sampler deref
- *
- * If this is null, use sampler_index instead.
- */
- nir_deref_var *sampler;
} nir_tex_instr;
static inline unsigned
}
}
+static inline bool
+nir_alu_instr_is_comparison(const nir_alu_instr *instr)
+{
+ switch (instr->op) {
+ case nir_op_flt:
+ case nir_op_fge:
+ case nir_op_feq:
+ case nir_op_fne:
+ case nir_op_ilt:
+ case nir_op_ult:
+ case nir_op_ige:
+ case nir_op_uge:
+ case nir_op_ieq:
+ case nir_op_ine:
+ case nir_op_i2b:
+ case nir_op_f2b:
+ case nir_op_inot:
+ case nir_op_fnot:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline nir_alu_type
nir_tex_instr_src_type(const nir_tex_instr *instr, unsigned src)
{
return exec_node_data(nir_instr, tail, node);
}
+static inline bool
+nir_block_ends_in_jump(nir_block *block)
+{
+ return !exec_list_is_empty(&block->instr_list) &&
+ nir_block_last_instr(block)->type == nir_instr_type_jump;
+}
+
#define nir_foreach_instr(instr, block) \
foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
#define nir_foreach_instr_reverse(instr, block) \
/* Unroll the loop regardless of its size */
bool force_unroll;
+ /* Does the loop contain complex loop terminators, continues or other
+ * complex behaviours? If this is true we can't rely on
+ * loop_terminator_list to be complete or accurate.
+ */
+ bool complex_loop;
+
nir_loop_terminator *limiting_terminator;
/* A list of loop_terminators terminating this loop. */
/** list for all local variables in the function */
struct exec_list locals;
- /** array of variables used as parameters */
- unsigned num_params;
- nir_variable **params;
-
- /** variable used to hold the result of the function */
- nir_variable *return_var;
-
/** list of local registers in the function */
struct exec_list registers;
return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
}
-typedef enum {
- nir_parameter_in,
- nir_parameter_out,
- nir_parameter_inout,
-} nir_parameter_type;
-
typedef struct {
- nir_parameter_type param_type;
- const struct glsl_type *type;
+ uint8_t num_components;
+ uint8_t bit_size;
} nir_parameter;
typedef struct nir_function {
unsigned num_params;
nir_parameter *params;
- const struct glsl_type *return_type;
/** The implementation of this function.
*
bool lower_fsqrt;
bool lower_fmod32;
bool lower_fmod64;
+ /** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
bool lower_bitfield_extract;
+ /** Lowers ibitfield_extract/ubitfield_extract to bfm, compares, shifts. */
+ bool lower_bitfield_extract_to_shifts;
+ /** Lowers bitfield_insert to bfi/bfm */
bool lower_bitfield_insert;
+ /** Lowers bitfield_insert to bfm, compares, and shifts. */
+ bool lower_bitfield_insert_to_shifts;
+ /** Lowers bitfield_reverse to shifts. */
+ bool lower_bitfield_reverse;
+ /** Lowers bit_count to shifts. */
+ bool lower_bit_count;
+ /** Lowers bfm to shifts and subtracts. */
+ bool lower_bfm;
+ /** Lowers ifind_msb to compare and ufind_msb */
+ bool lower_ifind_msb;
+ /** Lowers find_lsb to ufind_msb and logic ops */
+ bool lower_find_lsb;
bool lower_uadd_carry;
bool lower_usub_borrow;
+ /** Lowers imul_high/umul_high to 16-bit multiplies and carry operations. */
+ bool lower_mul_high;
/** lowers fneg and ineg to fsub and isub. */
bool lower_negate;
/** lowers fsub and isub to fadd+fneg and iadd+ineg. */
/** enables rules to lower idiv by power-of-two: */
bool lower_idiv;
+ /* lower b2f to iand */
+ bool lower_b2f;
+
/* Does the native fdot instruction replicate its result for four
* components? If so, then opt_algebraic_late will turn all fdotN
* instructions into fdot_replicatedN instructions.
/** lowers ffract to fsub+ffloor: */
bool lower_ffract;
+ bool lower_ldexp;
+
bool lower_pack_half_2x16;
bool lower_pack_unorm_2x16;
bool lower_pack_snorm_2x16;
bool lower_extract_byte;
bool lower_extract_word;
+ bool lower_all_io_to_temps;
+
/**
* Does the driver support real 32-bit integers? (Otherwise, integers
* are simulated by floats.)
/* Indicates that the driver only has zero-based vertex id */
bool vertex_id_zero_based;
+ /**
+ * If enabled, gl_BaseVertex will be lowered as:
+ * is_indexed_draw (~0/0) & firstvertex
+ */
+ bool lower_base_vertex;
+
+ /**
+ * If enabled, gl_HelperInvocation will be lowered as:
+ *
+ * !((1 << sample_id) & sample_mask_in))
+ *
+ * This depends on some possibly hw implementation details, which may
+ * not be true for all hw. In particular that the FS is only executed
+ * for covered samples or for helper invocations. So, do not blindly
+ * enable this option.
+ *
+ * Note: See also issue #22 in ARB_shader_image_load_store
+ */
+ bool lower_helper_invocation;
+
bool lower_cs_local_index_from_id;
+ bool lower_device_index_to_zero;
+
+ /* Set if nir_lower_wpos_ytransform() should also invert gl_PointCoord. */
+ bool lower_wpos_pntc;
+
/**
* Should nir_lower_io() create load_interpolated_input intrinsics?
*
*/
bool use_interpolated_input_intrinsics;
- /**
- * Do vertex shader double inputs use two locations? The Vulkan spec
- * requires two locations to be used, OpenGL allows a single location.
- */
- bool vs_inputs_dual_locations;
-
unsigned max_unroll_iterations;
} nir_shader_compiler_options;
* access plus one
*/
unsigned num_inputs, num_uniforms, num_outputs, num_shared;
+
+ /** Constant data associated with this shader.
+ *
+ * Constant data is loaded through load_constant intrinsics. See also
+ * nir_opt_large_constants.
+ */
+ void *constant_data;
+ unsigned constant_data_size;
} nir_shader;
static inline nir_function_impl *
assert(exec_list_length(&shader->functions) == 1);
struct exec_node *func_node = exec_list_get_head(&shader->functions);
nir_function *func = exec_node_data(nir_function, func_node, node);
- assert(func->return_type == glsl_void_type());
assert(func->num_params == 0);
assert(func->impl);
return func->impl;
/** creates an instruction with default swizzle/writemask/etc. with NULL registers */
nir_alu_instr *nir_alu_instr_create(nir_shader *shader, nir_op op);
+nir_deref_instr *nir_deref_instr_create(nir_shader *shader,
+ nir_deref_type deref_type);
+
nir_jump_instr *nir_jump_instr_create(nir_shader *shader, nir_jump_type type);
nir_load_const_instr *nir_load_const_instr_create(nir_shader *shader,
unsigned num_components,
unsigned bit_size);
-nir_deref_var *nir_deref_var_create(void *mem_ctx, nir_variable *var);
-nir_deref_array *nir_deref_array_create(void *mem_ctx);
-nir_deref_struct *nir_deref_struct_create(void *mem_ctx, unsigned field_index);
-
-typedef bool (*nir_deref_foreach_leaf_cb)(nir_deref_var *deref, void *state);
-bool nir_deref_foreach_leaf(nir_deref_var *deref,
- nir_deref_foreach_leaf_cb cb, void *state);
-
-nir_load_const_instr *
-nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref);
+nir_const_value nir_alu_binop_identity(nir_op binop, unsigned bit_size);
/**
* NIR Cursors and Instruction Insertion API
}
}
+static inline nir_cursor
+nir_before_src(nir_src *src, bool is_if_condition)
+{
+ if (is_if_condition) {
+ nir_block *prev_block =
+ nir_cf_node_as_block(nir_cf_node_prev(&src->parent_if->cf_node));
+ assert(!nir_block_ends_in_jump(prev_block));
+ return nir_after_block(prev_block);
+ } else if (src->parent_instr->type == nir_instr_type_phi) {
+#ifndef NDEBUG
+ nir_phi_instr *cond_phi = nir_instr_as_phi(src->parent_instr);
+ bool found = false;
+ nir_foreach_phi_src(phi_src, cond_phi) {
+ if (phi_src->src.ssa == src->ssa) {
+ found = true;
+ break;
+ }
+ }
+ assert(found);
+#endif
+ /* The LIST_ENTRY macro is a generic container-of macro, it just happens
+ * to have a more specific name.
+ */
+ nir_phi_src *phi_src = LIST_ENTRY(nir_phi_src, src, src);
+ return nir_after_block_before_jump(phi_src->pred);
+ } else {
+ return nir_before_instr(src->parent_instr);
+ }
+}
+
static inline nir_cursor
nir_before_cf_node(nir_cf_node *node)
{
nir_instr_insert(nir_after_cf_list(list), after);
}
-void nir_instr_remove(nir_instr *instr);
+void nir_instr_remove_v(nir_instr *instr);
+
+static inline nir_cursor
+nir_instr_remove(nir_instr *instr)
+{
+ nir_cursor cursor;
+ nir_instr *prev = nir_instr_prev(instr);
+ if (prev) {
+ cursor = nir_after_instr(prev);
+ } else {
+ cursor = nir_before_block(instr->block);
+ }
+ nir_instr_remove_v(instr);
+ return cursor;
+}
/** @} */
void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
nir_dest new_dest);
-void nir_instr_rewrite_deref(nir_instr *instr, nir_deref_var **deref,
- nir_deref_var *new_deref);
void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
unsigned num_components, unsigned bit_size,
void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
nir_instr *after_me);
-uint8_t nir_ssa_def_components_read(const nir_ssa_def *def);
+nir_component_mask_t nir_ssa_def_components_read(const nir_ssa_def *def);
/*
* finds the next basic block in source-code order, returns NULL if there is
nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi);
nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
-nir_deref *nir_deref_clone(const nir_deref *deref, void *mem_ctx);
-nir_deref_var *nir_deref_var_clone(const nir_deref_var *deref, void *mem_ctx);
nir_shader *nir_shader_serialize_deserialize(void *mem_ctx, nir_shader *s);
int nir_gs_count_vertices(const nir_shader *shader);
+bool nir_shrink_vec_array_vars(nir_shader *shader, nir_variable_mode modes);
+bool nir_split_array_vars(nir_shader *shader, nir_variable_mode modes);
bool nir_split_var_copies(nir_shader *shader);
+bool nir_split_per_member_structs(nir_shader *shader);
+bool nir_split_struct_vars(nir_shader *shader, nir_variable_mode modes);
bool nir_lower_returns_impl(nir_function_impl *impl);
bool nir_lower_returns(nir_shader *shader);
bool nir_propagate_invariant(nir_shader *shader);
void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, nir_shader *shader);
+void nir_lower_deref_copy_instr(struct nir_builder *b,
+ nir_intrinsic_instr *copy);
bool nir_lower_var_copies(nir_shader *shader);
+void nir_fixup_deref_modes(nir_shader *shader);
+
bool nir_lower_global_vars_to_local(nir_shader *shader);
bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes);
bool nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage);
-void nir_lower_io_types(nir_shader *shader);
bool nir_lower_regs_to_ssa_impl(nir_function_impl *impl);
bool nir_lower_regs_to_ssa(nir_shader *shader);
bool nir_lower_vars_to_ssa(nir_shader *shader);
+bool nir_remove_dead_derefs(nir_shader *shader);
+bool nir_remove_dead_derefs_impl(nir_function_impl *impl);
bool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes);
bool nir_lower_constant_initializers(nir_shader *shader,
nir_variable_mode modes);
+bool nir_move_load_const(nir_shader *shader);
bool nir_move_vec_src_uses_to_dest(nir_shader *shader);
bool nir_lower_vec_to_movs(nir_shader *shader);
void nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
bool alpha_to_one);
+bool nir_lower_alu(nir_shader *shader);
bool nir_lower_alu_to_scalar(nir_shader *shader);
bool nir_lower_load_const_to_scalar(nir_shader *shader);
bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
-bool nir_lower_samplers(nir_shader *shader,
- const struct gl_shader_program *shader_program);
-bool nir_lower_samplers_as_deref(nir_shader *shader,
- const struct gl_shader_program *shader_program);
-
typedef struct nir_lower_subgroups_options {
uint8_t subgroup_size;
uint8_t ballot_bit_size;
bool lower_to_scalar:1;
bool lower_vote_trivial:1;
+ bool lower_vote_eq_to_ballot:1;
bool lower_subgroup_masks:1;
+ bool lower_shuffle:1;
+ bool lower_shuffle_to_32bit:1;
+ bool lower_quad:1;
} nir_lower_subgroups_options;
bool nir_lower_subgroups(nir_shader *shader,
bool nir_lower_clamp_color_outputs(nir_shader *shader);
void nir_lower_passthrough_edgeflags(nir_shader *shader);
-void nir_lower_tes_patch_vertices(nir_shader *tes, unsigned patch_vertices);
+bool nir_lower_patch_vertices(nir_shader *nir, unsigned static_count,
+ const gl_state_index16 *uniform_state_tokens);
typedef struct nir_lower_wpos_ytransform_options {
- int state_tokens[5];
+ gl_state_index16 state_tokens[STATE_LENGTH];
bool fs_coord_origin_upper_left :1;
bool fs_coord_origin_lower_left :1;
bool fs_coord_pixel_center_integer :1;
bool nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading);
typedef struct nir_lower_drawpixels_options {
- int texcoord_state_tokens[5];
- int scale_state_tokens[5];
- int bias_state_tokens[5];
+ gl_state_index16 texcoord_state_tokens[STATE_LENGTH];
+ gl_state_index16 scale_state_tokens[STATE_LENGTH];
+ gl_state_index16 bias_state_tokens[STATE_LENGTH];
unsigned drawpix_sampler;
unsigned pixelmap_sampler;
bool pixel_maps :1;
void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
-bool nir_lower_atomics(nir_shader *shader,
- const struct gl_shader_program *shader_program);
bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset);
-bool nir_lower_uniforms_to_ubo(nir_shader *shader);
bool nir_lower_to_source_mods(nir_shader *shader);
bool nir_lower_gs_intrinsics(nir_shader *shader);
+typedef unsigned (*nir_lower_bit_size_callback)(const nir_alu_instr *, void *);
+
+bool nir_lower_bit_size(nir_shader *shader,
+ nir_lower_bit_size_callback callback,
+ void *callback_data);
+
typedef enum {
nir_lower_imul64 = (1 << 0),
nir_lower_isign64 = (1 << 1),
} nir_lower_doubles_options;
bool nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
-bool nir_lower_64bit_pack(nir_shader *shader);
+bool nir_lower_pack(nir_shader *shader);
bool nir_normalize_cubemap_coords(nir_shader *shader);
bool nir_opt_dead_cf(nir_shader *shader);
+bool nir_opt_find_array_copies(nir_shader *shader);
+
bool nir_opt_gcm(nir_shader *shader, bool value_number);
bool nir_opt_if(nir_shader *shader);
bool nir_opt_intrinsics(nir_shader *shader);
+bool nir_opt_large_constants(nir_shader *shader,
+ glsl_type_size_align_func size_align,
+ unsigned threshold);
+
bool nir_opt_loop_unroll(nir_shader *shader, nir_variable_mode indirect_mask);
bool nir_opt_move_comparisons(nir_shader *shader);
+bool nir_opt_move_load_ubo(nir_shader *shader);
+
bool nir_opt_peephole_select(nir_shader *shader, unsigned limit);
+bool nir_opt_remove_phis_impl(nir_function_impl *impl);
bool nir_opt_remove_phis(nir_shader *shader);
+bool nir_opt_shrink_load(nir_shader *shader);
+
bool nir_opt_trivial_continues(nir_shader *shader);
bool nir_opt_undef(nir_shader *shader);
void nir_sweep(nir_shader *shader);
+void nir_remap_dual_slot_attributes(nir_shader *shader,
+ uint64_t *dual_slot_inputs);
+uint64_t nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot);
+
nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);