#include "util/list.h"
#include "util/ralloc.h"
#include "util/set.h"
+#include "util/bitscan.h"
#include "util/bitset.h"
#include "util/macros.h"
#include "compiler/nir_types.h"
#define NIR_FALSE 0u
#define NIR_TRUE (~0u)
+#define NIR_MAX_VEC_COMPONENTS 4
+#define NIR_MAX_MATRIX_COLUMNS 4
+typedef uint8_t nir_component_mask_t;
/** Defines a cast function
*
struct nir_function;
struct nir_shader;
struct nir_instr;
+struct nir_builder;
/**
typedef enum {
nir_var_shader_in = (1 << 0),
nir_var_shader_out = (1 << 1),
- nir_var_global = (1 << 2),
- nir_var_local = (1 << 3),
+ nir_var_shader_temp = (1 << 2),
+ nir_var_function_temp = (1 << 3),
nir_var_uniform = (1 << 4),
- nir_var_shader_storage = (1 << 5),
+ nir_var_mem_ubo = (1 << 5),
nir_var_system_value = (1 << 6),
- nir_var_param = (1 << 7),
- nir_var_shared = (1 << 8),
+ nir_var_mem_ssbo = (1 << 7),
+ nir_var_mem_shared = (1 << 8),
+ nir_var_mem_global = (1 << 9),
nir_var_all = ~0,
} nir_variable_mode;
} nir_rounding_mode;
typedef union {
- float f32[4];
- double f64[4];
- int8_t i8[4];
- uint8_t u8[4];
- int16_t i16[4];
- uint16_t u16[4];
- int32_t i32[4];
- uint32_t u32[4];
- int64_t i64[4];
- uint64_t u64[4];
+ bool b;
+ float f32;
+ double f64;
+ int8_t i8;
+ uint8_t u8;
+ int16_t i16;
+ uint16_t u16;
+ int32_t i32;
+ uint32_t u32;
+ int64_t i64;
+ uint64_t u64;
} nir_const_value;
+#define nir_const_value_to_array(arr, c, components, m) \
+{ \
+ for (unsigned i = 0; i < components; ++i) \
+ arr[i] = c[i].m; \
+} while (false)
+
typedef struct nir_constant {
/**
* Value of the constant.
* by the type associated with the \c nir_variable. Constants may be
* scalars, vectors, or matrices.
*/
- nir_const_value values[4];
+ nir_const_value values[NIR_MAX_MATRIX_COLUMNS][NIR_MAX_VEC_COMPONENTS];
/* we could get this from the var->type but makes clone *much* easier to
* not have to care about the type.
nir_depth_layout_unchanged
} nir_depth_layout;
+/**
+ * Enum keeping track of how a variable was declared.
+ */
+typedef enum {
+ /**
+ * Normal declaration.
+ */
+ nir_var_declared_normally = 0,
+
+ /**
+ * Variable is implicitly generated by the compiler and should not be
+ * visible via the API.
+ */
+ nir_var_hidden,
+} nir_var_declaration_type;
+
/**
* Either a uniform, global variable, shader input, or shader output. Based on
* ir_variable - it should be easy to translate between the two.
*/
unsigned interpolation:2;
- /**
- * \name ARB_fragment_coord_conventions
- * @{
- */
- unsigned origin_upper_left:1;
- unsigned pixel_center_integer:1;
- /*@}*/
-
/**
* If non-zero, then this variable may be packed along with other variables
* into a single varying slot, so this offset should be applied when
*/
unsigned explicit_binding:1;
+ /**
+ * Was a transfer feedback buffer set in the shader?
+ */
+ unsigned explicit_xfb_buffer:1;
+
+ /**
+ * Was a transfer feedback stride set in the shader?
+ */
+ unsigned explicit_xfb_stride:1;
+
+ /**
+ * Was an explicit offset set in the shader?
+ */
+ unsigned explicit_offset:1;
+
/**
* \brief Layout qualifier for gl_FragDepth.
*
int binding;
/**
- * Location an atomic counter is stored at.
+ * Location an atomic counter or transform feedback is stored at.
*/
unsigned offset;
+ /**
+ * Transform feedback buffer.
+ */
+ unsigned xfb_buffer;
+
+ /**
+ * Transform feedback stride.
+ */
+ unsigned xfb_stride;
+
+ /**
+ * How the variable was declared. See nir_var_declaration_type.
+ *
+ * This is used to detect variables generated by the compiler, so should
+ * not be visible via the API.
+ */
+ unsigned how_declared:2;
+
/**
* ARB_shader_image_load_store qualifiers.
*/
struct {
- bool read_only; /**< "readonly" qualifier. */
- bool write_only; /**< "writeonly" qualifier. */
- bool coherent;
- bool _volatile;
- bool restrict_flag;
+ enum gl_access_qualifier access;
/** Image internal format if specified explicitly, otherwise GL_NONE. */
GLenum format;
* \sa ir_variable::location
*/
const struct glsl_type *interface_type;
+
+ /**
+ * Description of per-member data for per-member struct variables
+ *
+ * This is used for variables which are actually an amalgamation of
+ * multiple entities such as a struct of built-in values or a struct of
+ * inputs each with their own layout specifier. This is only allowed on
+ * variables with a struct or array of array of struct type.
+ */
+ unsigned num_members;
+ struct nir_variable_data *members;
} nir_variable;
#define nir_foreach_variable(var, var_list) \
static inline bool
nir_variable_is_global(const nir_variable *var)
{
- return var->data.mode != nir_var_local && var->data.mode != nir_var_param;
+ return var->data.mode != nir_var_function_temp;
}
typedef struct nir_register {
/** only for debug purposes, can be NULL */
const char *name;
- /** whether this register is local (per-function) or global (per-shader) */
- bool is_global;
-
- /**
- * If this flag is set to true, then accessing channels >= num_components
- * is well-defined, and simply spills over to the next array element. This
- * is useful for backends that can do per-component accessing, in
- * particular scalar backends. By setting this flag and making
- * num_components equal to 1, structures can be packed tightly into
- * registers and then registers can be accessed per-component to get to
- * each structure member, even if it crosses vec4 boundaries.
- */
- bool is_packed;
-
/** set of nir_srcs where this register is used (read from) */
struct list_head uses;
#define nir_foreach_register_safe(reg, reg_list) \
foreach_list_typed_safe(nir_register, reg, node, reg_list)
-typedef enum {
+typedef enum PACKED {
nir_instr_type_alu,
nir_instr_type_deref,
nir_instr_type_call,
typedef struct nir_instr {
struct exec_node node;
- nir_instr_type type;
struct nir_block *block;
-
- /** generic instruction index. */
- unsigned index;
+ nir_instr_type type;
/* A temporary for optimization and analysis passes to use for storing
* flags. For instance, DCE uses this to store the "dead/live" info.
*/
uint8_t pass_flags;
+
+ /** generic instruction index. */
+ unsigned index;
} nir_instr;
static inline nir_instr *
return src.is_ssa ? src.ssa->num_components : src.reg.reg->num_components;
}
+static inline bool
+nir_src_is_const(nir_src src)
+{
+ return src.is_ssa &&
+ src.ssa->parent_instr->type == nir_instr_type_load_const;
+}
+
+int64_t nir_src_as_int(nir_src src);
+uint64_t nir_src_as_uint(nir_src src);
+bool nir_src_as_bool(nir_src src);
+double nir_src_as_float(nir_src src);
+int64_t nir_src_comp_as_int(nir_src src, unsigned component);
+uint64_t nir_src_comp_as_uint(nir_src src, unsigned component);
+bool nir_src_comp_as_bool(nir_src src, unsigned component);
+double nir_src_comp_as_float(nir_src src, unsigned component);
+
static inline unsigned
nir_dest_bit_size(nir_dest dest)
{
* a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
* a swizzle of {2, x, 1, 0} where x means "don't care."
*/
- uint8_t swizzle[4];
+ uint8_t swizzle[NIR_MAX_VEC_COMPONENTS];
} nir_alu_src;
typedef struct {
bool saturate;
- unsigned write_mask : 4; /* ignored if dest.is_ssa is true */
+ unsigned write_mask : NIR_MAX_VEC_COMPONENTS; /* ignored if dest.is_ssa is true */
} nir_alu_dest;
+/** NIR sized and unsized types
+ *
+ * The values in this enum are carefully chosen so that the sized type is
+ * just the unsized type OR the number of bits.
+ */
typedef enum {
nir_type_invalid = 0, /* Not a valid type */
- nir_type_float,
- nir_type_int,
- nir_type_uint,
- nir_type_bool,
+ nir_type_int = 2,
+ nir_type_uint = 4,
+ nir_type_bool = 6,
+ nir_type_float = 128,
+ nir_type_bool1 = 1 | nir_type_bool,
nir_type_bool32 = 32 | nir_type_bool,
+ nir_type_int1 = 1 | nir_type_int,
nir_type_int8 = 8 | nir_type_int,
nir_type_int16 = 16 | nir_type_int,
nir_type_int32 = 32 | nir_type_int,
nir_type_int64 = 64 | nir_type_int,
+ nir_type_uint1 = 1 | nir_type_uint,
nir_type_uint8 = 8 | nir_type_uint,
nir_type_uint16 = 16 | nir_type_uint,
nir_type_uint32 = 32 | nir_type_uint,
nir_type_float64 = 64 | nir_type_float,
} nir_alu_type;
-#define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8
-#define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007
+#define NIR_ALU_TYPE_SIZE_MASK 0x79
+#define NIR_ALU_TYPE_BASE_TYPE_MASK 0x86
static inline unsigned
nir_alu_type_get_type_size(nir_alu_type type)
{
switch (base_type) {
case GLSL_TYPE_BOOL:
- return nir_type_bool32;
+ return nir_type_bool1;
break;
case GLSL_TYPE_UINT:
return nir_type_uint32;
case GLSL_TYPE_DOUBLE:
return nir_type_float64;
break;
- default:
- unreachable("unknown type");
+
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_ATOMIC_UINT:
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_ARRAY:
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_SUBROUTINE:
+ case GLSL_TYPE_FUNCTION:
+ case GLSL_TYPE_ERROR:
+ return nir_type_invalid;
}
+
+ unreachable("unknown type");
}
static inline nir_alu_type
nir_op nir_type_conversion_op(nir_alu_type src, nir_alu_type dst,
nir_rounding_mode rnd);
+static inline nir_op
+nir_op_vec(unsigned components)
+{
+ switch (components) {
+ case 1: return nir_op_mov;
+ case 2: return nir_op_vec2;
+ case 3: return nir_op_vec3;
+ case 4: return nir_op_vec4;
+ default: unreachable("bad component count");
+ }
+}
+
typedef enum {
- NIR_OP_IS_COMMUTATIVE = (1 << 0),
+ /**
+ * Operation where the first two sources are commutative.
+ *
+ * For 2-source operations, this just mathematical commutativity. Some
+ * 3-source operations, like ffma, are only commutative in the first two
+ * sources.
+ */
+ NIR_OP_IS_2SRC_COMMUTATIVE = (1 << 0),
NIR_OP_IS_ASSOCIATIVE = (1 << 1),
} nir_op_algebraic_property;
/**
* The number of components in each input
*/
- unsigned input_sizes[4];
+ unsigned input_sizes[NIR_MAX_VEC_COMPONENTS];
/**
* The type of vector that each input takes. Note that negate and
* absolute value are only allowed on inputs with int or float type and
* behave differently on the two.
*/
- nir_alu_type input_types[4];
+ nir_alu_type input_types[NIR_MAX_VEC_COMPONENTS];
nir_op_algebraic_property algebraic_properties;
+
+ /* Whether this represents a numeric conversion opcode */
+ bool is_conversion;
} nir_op_info;
extern const nir_op_info nir_op_infos[nir_num_opcodes];
return (instr->dest.write_mask >> channel) & 1;
}
+static inline nir_component_mask_t
+nir_alu_instr_src_read_mask(const nir_alu_instr *instr, unsigned src)
+{
+ nir_component_mask_t read_mask = 0;
+ for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; c++) {
+ if (!nir_alu_instr_channel_used(instr, src, c))
+ continue;
+
+ read_mask |= (1 << instr->src[src].swizzle[c]);
+ }
+ return read_mask;
+}
+
/*
* For instructions whose destinations are SSA, get the number of channels
* used for a source
return instr->dest.dest.ssa.num_components;
}
+bool nir_const_value_negative_equal(const nir_const_value *c1,
+ const nir_const_value *c2,
+ unsigned components,
+ nir_alu_type base_type,
+ unsigned bits);
+
bool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
unsigned src1, unsigned src2);
+bool nir_alu_srcs_negative_equal(const nir_alu_instr *alu1,
+ const nir_alu_instr *alu2,
+ unsigned src1, unsigned src2);
+
typedef enum {
nir_deref_type_var,
nir_deref_type_array,
nir_deref_type_array_wildcard,
+ nir_deref_type_ptr_as_array,
nir_deref_type_struct,
nir_deref_type_cast,
} nir_deref_type;
-typedef struct nir_deref {
- nir_deref_type deref_type;
- struct nir_deref *child;
- const struct glsl_type *type;
-} nir_deref;
-
-typedef struct {
- nir_deref deref;
-
- nir_variable *var;
-} nir_deref_var;
-
-/* This enum describes how the array is referenced. If the deref is
- * direct then the base_offset is used. If the deref is indirect then
- * offset is given by base_offset + indirect. If the deref is a wildcard
- * then the deref refers to all of the elements of the array at the same
- * time. Wildcard dereferences are only ever allowed in copy_var
- * intrinsics and the source and destination derefs must have matching
- * wildcards.
- */
-typedef enum {
- nir_deref_array_type_direct,
- nir_deref_array_type_indirect,
- nir_deref_array_type_wildcard,
-} nir_deref_array_type;
-
-typedef struct {
- nir_deref deref;
-
- nir_deref_array_type deref_array_type;
- unsigned base_offset;
- nir_src indirect;
-} nir_deref_array;
-
-typedef struct {
- nir_deref deref;
-
- unsigned index;
-} nir_deref_struct;
-
-NIR_DEFINE_CAST(nir_deref_as_var, nir_deref, nir_deref_var, deref,
- deref_type, nir_deref_type_var)
-NIR_DEFINE_CAST(nir_deref_as_array, nir_deref, nir_deref_array, deref,
- deref_type, nir_deref_type_array)
-NIR_DEFINE_CAST(nir_deref_as_struct, nir_deref, nir_deref_struct, deref,
- deref_type, nir_deref_type_struct)
-
-/* Returns the last deref in the chain. */
-static inline nir_deref *
-nir_deref_tail(nir_deref *deref)
-{
- while (deref->child)
- deref = deref->child;
- return deref;
-}
-
typedef struct {
nir_instr instr;
struct {
unsigned index;
} strct;
+
+ struct {
+ unsigned ptr_stride;
+ } cast;
};
/** Destination to store the resulting "pointer" */
nir_dest dest;
} nir_deref_instr;
-NIR_DEFINE_CAST(nir_instr_as_deref, nir_instr, nir_deref_instr, instr,
- type, nir_instr_type_deref)
+static inline nir_deref_instr *nir_src_as_deref(nir_src src);
static inline nir_deref_instr *
-nir_src_as_deref(nir_src src)
+nir_deref_instr_parent(const nir_deref_instr *instr)
{
- if (!src.is_ssa)
+ if (instr->deref_type == nir_deref_type_var)
return NULL;
+ else
+ return nir_src_as_deref(instr->parent);
+}
- if (src.ssa->parent_instr->type != nir_instr_type_deref)
- return NULL;
+static inline nir_variable *
+nir_deref_instr_get_variable(const nir_deref_instr *instr)
+{
+ while (instr->deref_type != nir_deref_type_var) {
+ if (instr->deref_type == nir_deref_type_cast)
+ return NULL;
+
+ instr = nir_deref_instr_parent(instr);
+ }
- return nir_instr_as_deref(src.ssa->parent_instr);
+ return instr->var;
}
+bool nir_deref_instr_has_indirect(nir_deref_instr *instr);
+bool nir_deref_instr_has_complex_use(nir_deref_instr *instr);
+
+bool nir_deref_instr_remove_if_unused(nir_deref_instr *instr);
+
+unsigned nir_deref_instr_ptr_as_array_stride(nir_deref_instr *instr);
+
typedef struct {
nir_instr instr;
- unsigned num_params;
- nir_deref_var **params;
- nir_deref_var *return_deref;
-
struct nir_function *callee;
+
+ unsigned num_params;
+ nir_src params[];
} nir_call_instr;
#include "nir_intrinsics.h"
-#define NIR_INTRINSIC_MAX_CONST_INDEX 3
+#define NIR_INTRINSIC_MAX_CONST_INDEX 4
/** Represents an intrinsic
*
int const_index[NIR_INTRINSIC_MAX_CONST_INDEX];
- nir_deref_var *variables[2];
-
nir_src src[];
} nir_intrinsic_instr;
+static inline nir_variable *
+nir_intrinsic_get_var(nir_intrinsic_instr *intrin, unsigned i)
+{
+ return nir_deref_instr_get_variable(nir_src_as_deref(intrin->src[i]));
+}
+
/**
* \name NIR intrinsics semantic flags
*
*/
NIR_INTRINSIC_CLUSTER_SIZE = 11,
+ /**
+ * Parameter index for a load_param intrinsic
+ */
+ NIR_INTRINSIC_PARAM_IDX = 12,
+
+ /**
+ * Image dimensionality for image intrinsics
+ *
+ * One of GLSL_SAMPLER_DIM_*
+ */
+ NIR_INTRINSIC_IMAGE_DIM = 13,
+
+ /**
+ * Non-zero if we are accessing an array image
+ */
+ NIR_INTRINSIC_IMAGE_ARRAY = 14,
+
+ /**
+ * Image format for image intrinsics
+ */
+ NIR_INTRINSIC_FORMAT = 15,
+
+ /**
+ * Access qualifiers for image and memory access intrinsics
+ */
+ NIR_INTRINSIC_ACCESS = 16,
+
+ /**
+ * Alignment for offsets and addresses
+ *
+ * These two parameters, specify an alignment in terms of a multiplier and
+ * an offset. The offset or address parameter X of the intrinsic is
+ * guaranteed to satisfy the following:
+ *
+ * (X - align_offset) % align_mul == 0
+ */
+ NIR_INTRINSIC_ALIGN_MUL = 17,
+ NIR_INTRINSIC_ALIGN_OFFSET = 18,
+
+ /**
+ * The Vulkan descriptor type for a vulkan_resource_[re]index intrinsic.
+ */
+ NIR_INTRINSIC_DESC_TYPE = 19,
+
+ /**
+ * The nir_alu_type of a uniform/input/output
+ */
+ NIR_INTRINSIC_TYPE = 20,
+
NIR_INTRINSIC_NUM_INDEX_FLAGS,
} nir_intrinsic_index_flag;
-#define NIR_INTRINSIC_MAX_INPUTS 4
+#define NIR_INTRINSIC_MAX_INPUTS 5
typedef struct {
const char *name;
/** number of components of each input register
*
* If this value is 0, the number of components is given by the
- * num_components field of nir_intrinsic_instr.
+ * num_components field of nir_intrinsic_instr. If this value is -1, the
+ * intrinsic consumes however many components are provided and it is not
+ * validated at all.
*/
- unsigned src_components[NIR_INTRINSIC_MAX_INPUTS];
+ int src_components[NIR_INTRINSIC_MAX_INPUTS];
bool has_dest;
*/
unsigned dest_components;
- /** the number of inputs/outputs that are variables */
- unsigned num_variables;
+ /** bitfield of legal bit sizes */
+ unsigned dest_bit_sizes;
/** the number of constant indices used by the intrinsic */
unsigned num_indices;
{
const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
assert(srcn < info->num_srcs);
- if (info->src_components[srcn])
+ if (info->src_components[srcn] > 0)
return info->src_components[srcn];
- else
+ else if (info->src_components[srcn] == 0)
return intr->num_components;
+ else
+ return nir_src_num_components(intr->src[srcn]);
}
static inline unsigned
{ \
const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
- return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
+ return (type)instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
} \
static inline void \
nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
INTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned)
INTRINSIC_IDX_ACCESSORS(reduction_op, REDUCTION_OP, unsigned)
INTRINSIC_IDX_ACCESSORS(cluster_size, CLUSTER_SIZE, unsigned)
+INTRINSIC_IDX_ACCESSORS(param_idx, PARAM_IDX, unsigned)
+INTRINSIC_IDX_ACCESSORS(image_dim, IMAGE_DIM, enum glsl_sampler_dim)
+INTRINSIC_IDX_ACCESSORS(image_array, IMAGE_ARRAY, bool)
+INTRINSIC_IDX_ACCESSORS(access, ACCESS, enum gl_access_qualifier)
+INTRINSIC_IDX_ACCESSORS(format, FORMAT, unsigned)
+INTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned)
+INTRINSIC_IDX_ACCESSORS(align_offset, ALIGN_OFFSET, unsigned)
+INTRINSIC_IDX_ACCESSORS(desc_type, DESC_TYPE, unsigned)
+INTRINSIC_IDX_ACCESSORS(type, TYPE, nir_alu_type)
+
+static inline void
+nir_intrinsic_set_align(nir_intrinsic_instr *intrin,
+ unsigned align_mul, unsigned align_offset)
+{
+ assert(util_is_power_of_two_nonzero(align_mul));
+ assert(align_offset < align_mul);
+ nir_intrinsic_set_align_mul(intrin, align_mul);
+ nir_intrinsic_set_align_offset(intrin, align_offset);
+}
+
+/** Returns a simple alignment for a load/store intrinsic offset
+ *
+ * Instead of the full mul+offset alignment scheme provided by the ALIGN_MUL
+ * and ALIGN_OFFSET parameters, this helper takes both into account and
+ * provides a single simple alignment parameter. The offset X is guaranteed
+ * to satisfy X % align == 0.
+ */
+static inline unsigned
+nir_intrinsic_align(const nir_intrinsic_instr *intrin)
+{
+ const unsigned align_mul = nir_intrinsic_align_mul(intrin);
+ const unsigned align_offset = nir_intrinsic_align_offset(intrin);
+ assert(align_offset < align_mul);
+ return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
+}
+
+/* Converts a image_deref_* intrinsic into a image_* one */
+void nir_rewrite_image_intrinsic(nir_intrinsic_instr *instr,
+ nir_ssa_def *handle, bool bindless);
/**
* \group texture information
nir_tex_src_offset,
nir_tex_src_bias,
nir_tex_src_lod,
+ nir_tex_src_min_lod,
nir_tex_src_ms_index, /* MSAA sample index */
nir_tex_src_ms_mcs, /* MSAA compression value */
nir_tex_src_ddx,
nir_tex_src_ddy,
+ nir_tex_src_texture_deref, /* < deref pointing to the texture */
+ nir_tex_src_sampler_deref, /* < deref pointing to the sampler */
nir_tex_src_texture_offset, /* < dynamically uniform indirect offset */
nir_tex_src_sampler_offset, /* < dynamically uniform indirect offset */
+ nir_tex_src_texture_handle, /* < bindless texture handle */
+ nir_tex_src_sampler_handle, /* < bindless sampler handle */
nir_tex_src_plane, /* < selects plane for planar textures */
nir_num_tex_src_types
} nir_tex_src_type;
nir_texop_txl, /**< Texture look-up with explicit LOD */
nir_texop_txd, /**< Texture look-up with partial derivatives */
nir_texop_txf, /**< Texel fetch with explicit LOD */
- nir_texop_txf_ms, /**< Multisample texture fetch */
+ nir_texop_txf_ms, /**< Multisample texture fetch */
+ nir_texop_txf_ms_fb, /**< Multisample texture fetch from framebuffer */
nir_texop_txf_ms_mcs, /**< Multisample compression value fetch */
nir_texop_txs, /**< Texture size */
nir_texop_lod, /**< Texture lod query */
/* gather component selector */
unsigned component : 2;
+ /* gather offsets */
+ int8_t tg4_offsets[4][2];
+
+ /* True if the texture index or handle is not dynamically uniform */
+ bool texture_non_uniform;
+
+ /* True if the sampler index or handle is not dynamically uniform */
+ bool sampler_non_uniform;
+
/** The texture index
*
* If this texture instruction has a nir_tex_src_texture_offset source,
/** The size of the texture array or 0 if it's not an array */
unsigned texture_array_size;
- /** The texture deref
- *
- * If this is null, use texture_index instead.
- */
- nir_deref_var *texture;
-
/** The sampler index
*
* The following operations do not require a sampler and, as such, this
* then the sampler index is given by sampler_index + sampler_offset.
*/
unsigned sampler_index;
-
- /** The sampler deref
- *
- * If this is null, use sampler_index instead.
- */
- nir_deref_var *sampler;
} nir_tex_instr;
static inline unsigned
case nir_texop_txd:
case nir_texop_txf:
case nir_texop_txf_ms:
+ case nir_texop_txf_ms_fb:
case nir_texop_tg4:
return false;
default:
case nir_op_uge:
case nir_op_ieq:
case nir_op_ine:
- case nir_op_i2b:
- case nir_op_f2b:
+ case nir_op_i2b1:
+ case nir_op_f2b1:
case nir_op_inot:
case nir_op_fnot:
return true;
switch (instr->op) {
case nir_texop_txf:
case nir_texop_txf_ms:
+ case nir_texop_txf_ms_fb:
case nir_texop_txf_ms_mcs:
case nir_texop_samples_identical:
return nir_type_int;
void nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx);
+bool nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex);
+
typedef struct {
nir_instr instr;
- nir_const_value value;
-
nir_ssa_def def;
+
+ nir_const_value value[];
} nir_load_const_instr;
+#define nir_const_load_to_arr(arr, l, m) \
+{ \
+ nir_const_value_to_array(arr, l->value, l->def.num_components, m); \
+} while (false);
+
typedef enum {
nir_jump_return,
nir_jump_break,
NIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr,
type, nir_instr_type_alu)
+NIR_DEFINE_CAST(nir_instr_as_deref, nir_instr, nir_deref_instr, instr,
+ type, nir_instr_type_deref)
NIR_DEFINE_CAST(nir_instr_as_call, nir_instr, nir_call_instr, instr,
type, nir_instr_type_call)
NIR_DEFINE_CAST(nir_instr_as_jump, nir_instr, nir_jump_instr, instr,
return exec_node_data(nir_instr, tail, node);
}
+static inline bool
+nir_block_ends_in_jump(nir_block *block)
+{
+ return !exec_list_is_empty(&block->instr_list) &&
+ nir_block_last_instr(block)->type == nir_instr_type_jump;
+}
+
#define nir_foreach_instr(instr, block) \
foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
#define nir_foreach_instr_reverse(instr, block) \
#define nir_foreach_instr_reverse_safe(instr, block) \
foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
+typedef enum {
+ nir_selection_control_none = 0x0,
+ nir_selection_control_flatten = 0x1,
+ nir_selection_control_dont_flatten = 0x2,
+} nir_selection_control;
+
typedef struct nir_if {
nir_cf_node cf_node;
nir_src condition;
+ nir_selection_control control;
struct exec_list then_list; /** < list of nir_cf_node */
struct exec_list else_list; /** < list of nir_cf_node */
typedef struct {
nir_if *nif;
+ /** Instruction that generates nif::condition. */
nir_instr *conditional_instr;
+ /** Block within ::nif that has the break instruction. */
nir_block *break_block;
+
+ /** Last block for the then- or else-path that does not contain the break. */
nir_block *continue_from_block;
+ /** True when ::break_block is in the else-path of ::nif. */
bool continue_from_then;
+ bool induction_rhs;
+
+ /* This is true if the terminators exact trip count is unknown. For
+ * example:
+ *
+ * for (int i = 0; i < imin(x, 4); i++)
+ * ...
+ *
+ * Here loop analysis would have set a max_trip_count of 4 however we dont
+ * know for sure that this is the exact trip count.
+ */
+ bool exact_trip_count_unknown;
struct list_head loop_terminator_link;
} nir_loop_terminator;
typedef struct {
- /* Number of instructions in the loop */
- unsigned num_instructions;
+ /* Estimated cost (in number of instructions) of the loop */
+ unsigned instr_cost;
+
+ /* Guessed trip count based on array indexing */
+ unsigned guessed_trip_count;
+
+ /* Maximum number of times the loop is run (if known) */
+ unsigned max_trip_count;
- /* How many times the loop is run (if known) */
- unsigned trip_count;
- bool is_trip_count_known;
+ /* Do we know the exact number of times the loop will be run */
+ bool exact_trip_count_known;
/* Unroll the loop regardless of its size */
bool force_unroll;
+ /* Does the loop contain complex loop terminators, continues or other
+ * complex behaviours? If this is true we can't rely on
+ * loop_terminator_list to be complete or accurate.
+ */
+ bool complex_loop;
+
nir_loop_terminator *limiting_terminator;
/* A list of loop_terminators terminating this loop. */
struct list_head loop_terminator_list;
} nir_loop_info;
+typedef enum {
+ nir_loop_control_none = 0x0,
+ nir_loop_control_unroll = 0x1,
+ nir_loop_control_dont_unroll = 0x2,
+} nir_loop_control;
+
typedef struct {
nir_cf_node cf_node;
struct exec_list body; /** < list of nir_cf_node */
nir_loop_info *info;
+ nir_loop_control control;
+ bool partially_unrolled;
} nir_loop;
/**
/** list for all local variables in the function */
struct exec_list locals;
- /** array of variables used as parameters */
- unsigned num_params;
- nir_variable **params;
-
- /** variable used to hold the result of the function */
- nir_variable *return_var;
-
/** list of local registers in the function */
struct exec_list registers;
return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
}
-typedef enum {
- nir_parameter_in,
- nir_parameter_out,
- nir_parameter_inout,
-} nir_parameter_type;
+/**
+ * Return true if this list of cf_nodes contains a single empty block.
+ */
+static inline bool
+nir_cf_list_is_empty_block(struct exec_list *cf_list)
+{
+ if (exec_list_is_singular(cf_list)) {
+ struct exec_node *head = exec_list_get_head(cf_list);
+ nir_block *block =
+ nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
+ return exec_list_is_empty(&block->instr_list);
+ }
+ return false;
+}
typedef struct {
- nir_parameter_type param_type;
- const struct glsl_type *type;
+ uint8_t num_components;
+ uint8_t bit_size;
} nir_parameter;
typedef struct nir_function {
unsigned num_params;
nir_parameter *params;
- const struct glsl_type *return_type;
/** The implementation of this function.
*
* If the function is only declared and not implemented, this is NULL.
*/
nir_function_impl *impl;
+
+ bool is_entrypoint;
} nir_function;
+typedef enum {
+ nir_lower_imul64 = (1 << 0),
+ nir_lower_isign64 = (1 << 1),
+ /** Lower all int64 modulus and division opcodes */
+ nir_lower_divmod64 = (1 << 2),
+ /** Lower all 64-bit umul_high and imul_high opcodes */
+ nir_lower_imul_high64 = (1 << 3),
+ nir_lower_mov64 = (1 << 4),
+ nir_lower_icmp64 = (1 << 5),
+ nir_lower_iadd64 = (1 << 6),
+ nir_lower_iabs64 = (1 << 7),
+ nir_lower_ineg64 = (1 << 8),
+ nir_lower_logic64 = (1 << 9),
+ nir_lower_minmax64 = (1 << 10),
+ nir_lower_shift64 = (1 << 11),
+ nir_lower_imul_2x32_64 = (1 << 12),
+} nir_lower_int64_options;
+
+typedef enum {
+ nir_lower_drcp = (1 << 0),
+ nir_lower_dsqrt = (1 << 1),
+ nir_lower_drsq = (1 << 2),
+ nir_lower_dtrunc = (1 << 3),
+ nir_lower_dfloor = (1 << 4),
+ nir_lower_dceil = (1 << 5),
+ nir_lower_dfract = (1 << 6),
+ nir_lower_dround_even = (1 << 7),
+ nir_lower_dmod = (1 << 8),
+ nir_lower_fp64_full_software = (1 << 9),
+} nir_lower_doubles_options;
+
typedef struct nir_shader_compiler_options {
bool lower_fdiv;
bool lower_ffma;
bool fuse_ffma;
+ bool lower_flrp16;
bool lower_flrp32;
/** Lowers flrp when it does not support doubles */
bool lower_flrp64;
bool lower_fpow;
bool lower_fsat;
bool lower_fsqrt;
- bool lower_fmod32;
- bool lower_fmod64;
+ bool lower_fmod;
/** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
bool lower_bitfield_extract;
/** Lowers ibitfield_extract/ubitfield_extract to bfm, compares, shifts. */
/** enables rules to lower idiv by power-of-two: */
bool lower_idiv;
- /* lower b2f to iand */
- bool lower_b2f;
+ /** enable rules to avoid bit shifts */
+ bool lower_bitshift;
+
+ /** enables rules to lower isign to imin+imax */
+ bool lower_isign;
+
+ /** enables rules to lower fsign to fsub and flt */
+ bool lower_fsign;
/* Does the native fdot instruction replicate its result for four
* components? If so, then opt_algebraic_late will turn all fdotN
*/
bool fdot_replicates;
+ /** lowers ffloor to fsub+ffract: */
+ bool lower_ffloor;
+
/** lowers ffract to fsub+ffloor: */
bool lower_ffract;
+ /** lowers fceil to fneg+ffloor+fneg: */
+ bool lower_fceil;
+
+ bool lower_ftrunc;
+
bool lower_ldexp;
bool lower_pack_half_2x16;
bool lower_extract_word;
bool lower_all_io_to_temps;
-
- /**
- * Does the driver support real 32-bit integers? (Otherwise, integers
- * are simulated by floats.)
- */
- bool native_integers;
+ bool lower_all_io_to_elements;
/* Indicates that the driver only has zero-based vertex id */
bool vertex_id_zero_based;
*/
bool lower_base_vertex;
+ /**
+ * If enabled, gl_HelperInvocation will be lowered as:
+ *
+ * !((1 << sample_id) & sample_mask_in))
+ *
+ * This depends on some possibly hw implementation details, which may
+ * not be true for all hw. In particular that the FS is only executed
+ * for covered samples or for helper invocations. So, do not blindly
+ * enable this option.
+ *
+ * Note: See also issue #22 in ARB_shader_image_load_store
+ */
+ bool lower_helper_invocation;
+
+ /**
+ * Convert gl_SampleMaskIn to gl_HelperInvocation as follows:
+ *
+ * gl_SampleMaskIn == 0 ---> gl_HelperInvocation
+ * gl_SampleMaskIn != 0 ---> !gl_HelperInvocation
+ */
+ bool optimize_sample_mask_in;
+
bool lower_cs_local_index_from_id;
+ bool lower_cs_local_id_from_index;
bool lower_device_index_to_zero;
+ /* Set if nir_lower_wpos_ytransform() should also invert gl_PointCoord. */
+ bool lower_wpos_pntc;
+
+ bool lower_hadd;
+ bool lower_add_sat;
+
+ /**
+ * Should IO be re-vectorized? Some scalar ISAs still operate on vec4's
+ * for IO purposes and would prefer loads/stores be vectorized.
+ */
+ bool vectorize_io;
+
/**
* Should nir_lower_io() create load_interpolated_input intrinsics?
*
*/
bool use_interpolated_input_intrinsics;
- /**
- * Do vertex shader double inputs use two locations? The Vulkan spec
- * requires two locations to be used, OpenGL allows a single location.
- */
- bool vs_inputs_dual_locations;
+ /* Lowers when 32x32->64 bit multiplication is not supported */
+ bool lower_mul_2x32_64;
unsigned max_unroll_iterations;
+
+ nir_lower_int64_options lower_int64_options;
+ nir_lower_doubles_options lower_doubles_options;
} nir_shader_compiler_options;
typedef struct nir_shader {
struct exec_list functions; /** < list of nir_function */
- /** list of global register in the shader */
- struct exec_list registers;
-
- /** next available global register index */
- unsigned reg_alloc;
-
/**
* the highest index a load_input_*, load_uniform_*, etc. intrinsic can
* access plus one
*/
unsigned num_inputs, num_uniforms, num_outputs, num_shared;
+
+ /** Size in bytes of required scratch space */
+ unsigned scratch_size;
+
+ /** Constant data associated with this shader.
+ *
+ * Constant data is loaded through load_constant intrinsics. See also
+ * nir_opt_large_constants.
+ */
+ void *constant_data;
+ unsigned constant_data_size;
} nir_shader;
+#define nir_foreach_function(func, shader) \
+ foreach_list_typed(nir_function, func, node, &(shader)->functions)
+
static inline nir_function_impl *
nir_shader_get_entrypoint(nir_shader *shader)
{
- assert(exec_list_length(&shader->functions) == 1);
- struct exec_node *func_node = exec_list_get_head(&shader->functions);
- nir_function *func = exec_node_data(nir_function, func_node, node);
- assert(func->return_type == glsl_void_type());
+ nir_function *func = NULL;
+
+ nir_foreach_function(function, shader) {
+ assert(func == NULL);
+ if (function->is_entrypoint) {
+ func = function;
+#ifndef NDEBUG
+ break;
+#endif
+ }
+ }
+
+ if (!func)
+ return NULL;
+
assert(func->num_params == 0);
assert(func->impl);
return func->impl;
}
-#define nir_foreach_function(func, shader) \
- foreach_list_typed(nir_function, func, node, &(shader)->functions)
-
nir_shader *nir_shader_create(void *mem_ctx,
gl_shader_stage stage,
const nir_shader_compiler_options *options,
shader_info *si);
-/** creates a register, including assigning it an index and adding it to the list */
-nir_register *nir_global_reg_create(nir_shader *shader);
-
nir_register *nir_local_reg_create(nir_function_impl *impl);
void nir_reg_remove(nir_register *reg);
static inline void
nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var)
{
- assert(var->data.mode == nir_var_local);
+ assert(var->data.mode == nir_var_function_temp);
exec_list_push_tail(&impl->locals, &var->node);
}
unsigned num_components,
unsigned bit_size);
-nir_deref_var *nir_deref_var_create(void *mem_ctx, nir_variable *var);
-nir_deref_array *nir_deref_array_create(void *mem_ctx);
-nir_deref_struct *nir_deref_struct_create(void *mem_ctx, unsigned field_index);
-
-typedef bool (*nir_deref_foreach_leaf_cb)(nir_deref_var *deref, void *state);
-bool nir_deref_foreach_leaf(nir_deref_var *deref,
- nir_deref_foreach_leaf_cb cb, void *state);
-
-nir_load_const_instr *
-nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref);
-
nir_const_value nir_alu_binop_identity(nir_op binop, unsigned bit_size);
/**
}
}
+static inline nir_cursor
+nir_before_src(nir_src *src, bool is_if_condition)
+{
+ if (is_if_condition) {
+ nir_block *prev_block =
+ nir_cf_node_as_block(nir_cf_node_prev(&src->parent_if->cf_node));
+ assert(!nir_block_ends_in_jump(prev_block));
+ return nir_after_block(prev_block);
+ } else if (src->parent_instr->type == nir_instr_type_phi) {
+#ifndef NDEBUG
+ nir_phi_instr *cond_phi = nir_instr_as_phi(src->parent_instr);
+ bool found = false;
+ nir_foreach_phi_src(phi_src, cond_phi) {
+ if (phi_src->src.ssa == src->ssa) {
+ found = true;
+ break;
+ }
+ }
+ assert(found);
+#endif
+ /* The LIST_ENTRY macro is a generic container-of macro, it just happens
+ * to have a more specific name.
+ */
+ nir_phi_src *phi_src = LIST_ENTRY(nir_phi_src, src, src);
+ return nir_after_block_before_jump(phi_src->pred);
+ } else {
+ return nir_before_instr(src->parent_instr);
+ }
+}
+
static inline nir_cursor
nir_before_cf_node(nir_cf_node *node)
{
bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state);
nir_const_value *nir_src_as_const_value(nir_src src);
+
+#define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
+static inline c_type * \
+nir_src_as_ ## name (nir_src src) \
+{ \
+ return src.is_ssa && src.ssa->parent_instr->type == type_enum \
+ ? cast_macro(src.ssa->parent_instr) : NULL; \
+}
+
+NIR_SRC_AS_(alu_instr, nir_alu_instr, nir_instr_type_alu, nir_instr_as_alu)
+NIR_SRC_AS_(intrinsic, nir_intrinsic_instr,
+ nir_instr_type_intrinsic, nir_instr_as_intrinsic)
+NIR_SRC_AS_(deref, nir_deref_instr, nir_instr_type_deref, nir_instr_as_deref)
+
bool nir_src_is_dynamically_uniform(nir_src src);
bool nir_srcs_equal(nir_src src1, nir_src src2);
void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
nir_dest new_dest);
-void nir_instr_rewrite_deref(nir_instr *instr, nir_deref_var **deref,
- nir_deref_var *new_deref);
void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
unsigned num_components, unsigned bit_size,
void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
nir_instr *after_me);
-uint8_t nir_ssa_def_components_read(const nir_ssa_def *def);
+nir_component_mask_t nir_ssa_def_components_read(const nir_ssa_def *def);
/*
* finds the next basic block in source-code order, returns NULL if there is
nir_loop *nir_block_get_following_loop(nir_block *block);
void nir_index_local_regs(nir_function_impl *impl);
-void nir_index_global_regs(nir_shader *shader);
void nir_index_ssa_defs(nir_function_impl *impl);
unsigned nir_index_instrs(nir_function_impl *impl);
void nir_print_shader(nir_shader *shader, FILE *fp);
void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors);
void nir_print_instr(const nir_instr *instr, FILE *fp);
+void nir_print_deref(const nir_deref_instr *deref, FILE *fp);
+
+/** Shallow clone of a single ALU instruction. */
+nir_alu_instr *nir_alu_instr_clone(nir_shader *s, const nir_alu_instr *orig);
nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
-nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi);
+nir_function_impl *nir_function_impl_clone(nir_shader *shader,
+ const nir_function_impl *fi);
nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
-nir_deref *nir_deref_clone(const nir_deref *deref, void *mem_ctx);
-nir_deref_var *nir_deref_var_clone(const nir_deref_var *deref, void *mem_ctx);
-nir_shader *nir_shader_serialize_deserialize(void *mem_ctx, nir_shader *s);
+void nir_shader_replace(nir_shader *dest, nir_shader *src);
+
+void nir_shader_serialize_deserialize(nir_shader *s);
#ifndef NDEBUG
-void nir_validate_shader(nir_shader *shader);
+void nir_validate_shader(nir_shader *shader, const char *when);
void nir_metadata_set_validation_flag(nir_shader *shader);
void nir_metadata_check_validation_flag(nir_shader *shader);
+static inline bool
+should_skip_nir(const char *name)
+{
+ static const char *list = NULL;
+ if (!list) {
+ /* Comma separated list of names to skip. */
+ list = getenv("NIR_SKIP");
+ if (!list)
+ list = "";
+ }
+
+ if (!list[0])
+ return false;
+
+ return comma_separated_list_contains(list, name);
+}
+
static inline bool
should_clone_nir(void)
{
return should_print;
}
#else
-static inline void nir_validate_shader(nir_shader *shader) { (void) shader; }
+static inline void nir_validate_shader(nir_shader *shader, const char *when) { (void) shader; (void)when; }
static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
+static inline bool should_skip_nir(UNUSED const char *pass_name) { return false; }
static inline bool should_clone_nir(void) { return false; }
static inline bool should_serialize_deserialize_nir(void) { return false; }
static inline bool should_print_nir(void) { return false; }
#endif /* NDEBUG */
-#define _PASS(nir, do_pass) do { \
+#define _PASS(pass, nir, do_pass) do { \
+ if (should_skip_nir(#pass)) { \
+ printf("skipping %s\n", #pass); \
+ break; \
+ } \
do_pass \
- nir_validate_shader(nir); \
+ nir_validate_shader(nir, "after " #pass); \
if (should_clone_nir()) { \
nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
- ralloc_free(nir); \
- nir = clone; \
+ nir_shader_replace(nir, clone); \
} \
if (should_serialize_deserialize_nir()) { \
- void *mem_ctx = ralloc_parent(nir); \
- nir = nir_shader_serialize_deserialize(mem_ctx, nir); \
+ nir_shader_serialize_deserialize(nir); \
} \
} while (0)
-#define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \
+#define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, \
nir_metadata_set_validation_flag(nir); \
if (should_print_nir()) \
printf("%s\n", #pass); \
} \
)
-#define NIR_PASS_V(nir, pass, ...) _PASS(nir, \
+#define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, \
if (should_print_nir()) \
printf("%s\n", #pass); \
pass(nir, ##__VA_ARGS__); \
nir_print_shader(nir, stdout); \
)
+#define NIR_SKIP(name) should_skip_nir(#name)
+
void nir_calc_dominance_impl(nir_function_impl *impl);
void nir_calc_dominance(nir_shader *shader);
int nir_gs_count_vertices(const nir_shader *shader);
+bool nir_shrink_vec_array_vars(nir_shader *shader, nir_variable_mode modes);
+bool nir_split_array_vars(nir_shader *shader, nir_variable_mode modes);
bool nir_split_var_copies(nir_shader *shader);
+bool nir_split_per_member_structs(nir_shader *shader);
+bool nir_split_struct_vars(nir_shader *shader, nir_variable_mode modes);
bool nir_lower_returns_impl(nir_function_impl *impl);
bool nir_lower_returns(nir_shader *shader);
+void nir_inline_function_impl(struct nir_builder *b,
+ const nir_function_impl *impl,
+ nir_ssa_def **params);
bool nir_inline_functions(nir_shader *shader);
bool nir_propagate_invariant(nir_shader *shader);
void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, nir_shader *shader);
+void nir_lower_deref_copy_instr(struct nir_builder *b,
+ nir_intrinsic_instr *copy);
bool nir_lower_var_copies(nir_shader *shader);
+void nir_fixup_deref_modes(nir_shader *shader);
+
bool nir_lower_global_vars_to_local(nir_shader *shader);
+typedef enum {
+ nir_lower_direct_array_deref_of_vec_load = (1 << 0),
+ nir_lower_indirect_array_deref_of_vec_load = (1 << 1),
+ nir_lower_direct_array_deref_of_vec_store = (1 << 2),
+ nir_lower_indirect_array_deref_of_vec_store = (1 << 3),
+} nir_lower_array_deref_of_vec_options;
+
+bool nir_lower_array_deref_of_vec(nir_shader *shader, nir_variable_mode modes,
+ nir_lower_array_deref_of_vec_options options);
+
bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes);
bool nir_lower_locals_to_regs(nir_shader *shader);
nir_function_impl *entrypoint,
bool outputs, bool inputs);
+bool nir_lower_vars_to_scratch(nir_shader *shader,
+ nir_variable_mode modes,
+ int size_threshold,
+ glsl_type_size_align_func size_align);
+
void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
+void nir_gather_ssa_types(nir_function_impl *impl,
+ BITSET_WORD *float_types,
+ BITSET_WORD *int_types);
+
void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
- int (*type_size)(const struct glsl_type *));
+ int (*type_size)(const struct glsl_type *, bool));
/* Some helpers to do very simple linking */
bool nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer);
+bool nir_remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
+ uint64_t *used_by_other_stage,
+ uint64_t *used_by_other_stage_patches);
void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
bool default_to_smooth_interp);
+void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
+bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer);
typedef enum {
/* If set, this forces all non-flat fragment shader inputs to be
} nir_lower_io_options;
bool nir_lower_io(nir_shader *shader,
nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *),
+ int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options);
+
+typedef enum {
+ /**
+ * An address format which is a simple 32-bit global GPU address.
+ */
+ nir_address_format_32bit_global,
+
+ /**
+ * An address format which is a simple 64-bit global GPU address.
+ */
+ nir_address_format_64bit_global,
+
+ /**
+ * An address format which is a bounds-checked 64-bit global GPU address.
+ *
+ * The address is comprised as a 32-bit vec4 where .xy are a uint64_t base
+ * address stored with the low bits in .x and high bits in .y, .z is a
+ * size, and .w is an offset. When the final I/O operation is lowered, .w
+ * is checked against .z and the operation is predicated on the result.
+ */
+ nir_address_format_64bit_bounded_global,
+
+ /**
+ * An address format which is comprised of a vec2 where the first
+ * component is a buffer index and the second is an offset.
+ */
+ nir_address_format_32bit_index_offset,
+
+ /**
+ * An address format which is a simple 32-bit offset.
+ */
+ nir_address_format_32bit_offset,
+
+ /**
+ * An address format representing a purely logical addressing model. In
+ * this model, all deref chains must be complete from the dereference
+ * operation to the variable. Cast derefs are not allowed. These
+ * addresses will be 32-bit scalars but the format is immaterial because
+ * you can always chase the chain.
+ */
+ nir_address_format_logical,
+} nir_address_format;
+
+static inline unsigned
+nir_address_format_bit_size(nir_address_format addr_format)
+{
+ switch (addr_format) {
+ case nir_address_format_32bit_global: return 32;
+ case nir_address_format_64bit_global: return 64;
+ case nir_address_format_64bit_bounded_global: return 32;
+ case nir_address_format_32bit_index_offset: return 32;
+ case nir_address_format_32bit_offset: return 32;
+ case nir_address_format_logical: return 32;
+ }
+ unreachable("Invalid address format");
+}
+
+static inline unsigned
+nir_address_format_num_components(nir_address_format addr_format)
+{
+ switch (addr_format) {
+ case nir_address_format_32bit_global: return 1;
+ case nir_address_format_64bit_global: return 1;
+ case nir_address_format_64bit_bounded_global: return 4;
+ case nir_address_format_32bit_index_offset: return 2;
+ case nir_address_format_32bit_offset: return 1;
+ case nir_address_format_logical: return 1;
+ }
+ unreachable("Invalid address format");
+}
+
+static inline const struct glsl_type *
+nir_address_format_to_glsl_type(nir_address_format addr_format)
+{
+ unsigned bit_size = nir_address_format_bit_size(addr_format);
+ assert(bit_size == 32 || bit_size == 64);
+ return glsl_vector_type(bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64,
+ nir_address_format_num_components(addr_format));
+}
+
+const nir_const_value *nir_address_format_null_value(nir_address_format addr_format);
+
+nir_ssa_def *nir_build_addr_ieq(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
+ nir_address_format addr_format);
+
+nir_ssa_def *nir_build_addr_isub(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
+ nir_address_format addr_format);
+
+nir_ssa_def * nir_explicit_io_address_from_deref(struct nir_builder *b,
+ nir_deref_instr *deref,
+ nir_ssa_def *base_addr,
+ nir_address_format addr_format);
+void nir_lower_explicit_io_instr(struct nir_builder *b,
+ nir_intrinsic_instr *io_instr,
+ nir_ssa_def *addr,
+ nir_address_format addr_format);
+
+bool nir_lower_explicit_io(nir_shader *shader,
+ nir_variable_mode modes,
+ nir_address_format);
+
nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
bool nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage);
-void nir_lower_io_types(nir_shader *shader);
bool nir_lower_regs_to_ssa_impl(nir_function_impl *impl);
bool nir_lower_regs_to_ssa(nir_shader *shader);
bool nir_lower_vars_to_ssa(nir_shader *shader);
+bool nir_remove_dead_derefs(nir_shader *shader);
+bool nir_remove_dead_derefs_impl(nir_function_impl *impl);
bool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes);
bool nir_lower_constant_initializers(nir_shader *shader,
nir_variable_mode modes);
void nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
bool alpha_to_one);
bool nir_lower_alu(nir_shader *shader);
-bool nir_lower_alu_to_scalar(nir_shader *shader);
+
+bool nir_lower_flrp(nir_shader *shader, unsigned lowering_mask,
+ bool always_precise, bool have_ffma);
+
+bool nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set);
+bool nir_lower_bool_to_float(nir_shader *shader);
+bool nir_lower_bool_to_int32(nir_shader *shader);
+bool nir_lower_int_to_float(nir_shader *shader);
bool nir_lower_load_const_to_scalar(nir_shader *shader);
bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
bool nir_lower_phis_to_scalar(nir_shader *shader);
bool outputs_only);
void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
+bool nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode mask);
+
+void nir_lower_fragcoord_wtrans(nir_shader *shader);
+void nir_lower_viewport_transform(nir_shader *shader);
+bool nir_lower_uniforms_to_ubo(nir_shader *shader, int multiplier);
typedef struct nir_lower_subgroups_options {
uint8_t subgroup_size;
bool nir_lower_system_values(nir_shader *shader);
+enum PACKED nir_lower_tex_packing {
+ nir_lower_tex_packing_none = 0,
+ /* The sampler returns up to 2 32-bit words of half floats or 16-bit signed
+ * or unsigned ints based on the sampler type
+ */
+ nir_lower_tex_packing_16,
+ /* The sampler returns 1 32-bit word of 4x8 unorm */
+ nir_lower_tex_packing_8,
+};
+
typedef struct nir_lower_tex_options {
/**
* bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
unsigned lower_y_u_v_external;
unsigned lower_yx_xuxv_external;
unsigned lower_xy_uxvx_external;
+ unsigned lower_ayuv_external;
+ unsigned lower_xyuv_external;
/**
* To emulate certain texture wrap modes, this can be used
*/
uint8_t swizzles[32][4];
+ /* Can be used to scale sampled values in range required by the format. */
+ float scale_factors[32];
+
/**
* Bitmap of textures that need srgb to linear conversion. If
* (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
*/
unsigned lower_srgb;
+ /**
+ * If true, lower nir_texop_tex on shaders that doesn't support implicit
+ * LODs to nir_texop_txl.
+ */
+ bool lower_tex_without_implicit_lod;
+
/**
* If true, lower nir_texop_txd on cube maps with nir_texop_txl.
*/
bool lower_txd_cube_map;
+ /**
+ * If true, lower nir_texop_txd on 3D surfaces with nir_texop_txl.
+ */
+ bool lower_txd_3d;
+
/**
* If true, lower nir_texop_txd on shadow samplers (except cube maps)
* with nir_texop_txl. Notice that cube map shadow samplers are lowered
* Implies lower_txd_cube_map and lower_txd_shadow.
*/
bool lower_txd;
+
+ /**
+ * If true, lower nir_texop_txb that try to use shadow compare and min_lod
+ * at the same time to a nir_texop_lod, some math, and nir_texop_tex.
+ */
+ bool lower_txb_shadow_clamp;
+
+ /**
+ * If true, lower nir_texop_txd on shadow samplers when it uses min_lod
+ * with nir_texop_txl. This includes cube maps.
+ */
+ bool lower_txd_shadow_clamp;
+
+ /**
+ * If true, lower nir_texop_txd on when it uses both offset and min_lod
+ * with nir_texop_txl. This includes cube maps.
+ */
+ bool lower_txd_offset_clamp;
+
+ /**
+ * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
+ * sampler is bindless.
+ */
+ bool lower_txd_clamp_bindless_sampler;
+
+ /**
+ * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
+ * sampler index is not statically determinable to be less than 16.
+ */
+ bool lower_txd_clamp_if_sampler_index_not_lt_16;
+
+ /**
+ * If true, apply a .bagr swizzle on tg4 results to handle Broadcom's
+ * mixed-up tg4 locations.
+ */
+ bool lower_tg4_broadcom_swizzle;
+
+ /**
+ * If true, lowers tg4 with 4 constant offsets to 4 tg4 calls
+ */
+ bool lower_tg4_offsets;
+
+ enum nir_lower_tex_packing lower_tex_packing[32];
} nir_lower_tex_options;
bool nir_lower_tex(nir_shader *shader,
const nir_lower_tex_options *options);
+enum nir_lower_non_uniform_access_type {
+ nir_lower_non_uniform_ubo_access = (1 << 0),
+ nir_lower_non_uniform_ssbo_access = (1 << 1),
+ nir_lower_non_uniform_texture_access = (1 << 2),
+ nir_lower_non_uniform_image_access = (1 << 3),
+};
+
+bool nir_lower_non_uniform_access(nir_shader *shader,
+ enum nir_lower_non_uniform_access_type);
+
bool nir_lower_idiv(nir_shader *shader);
-bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables);
+bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables, bool use_vars);
bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
bool nir_lower_clip_cull_distance_arrays(nir_shader *nir);
+bool nir_lower_frexp(nir_shader *nir);
+
void nir_lower_two_sided_color(nir_shader *shader);
bool nir_lower_clamp_color_outputs(nir_shader *shader);
void nir_lower_passthrough_edgeflags(nir_shader *shader);
-void nir_lower_tes_patch_vertices(nir_shader *tes, unsigned patch_vertices);
+bool nir_lower_patch_vertices(nir_shader *nir, unsigned static_count,
+ const gl_state_index16 *uniform_state_tokens);
typedef struct nir_lower_wpos_ytransform_options {
gl_state_index16 state_tokens[STATE_LENGTH];
const nir_lower_wpos_ytransform_options *options);
bool nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading);
+bool nir_lower_fb_read(nir_shader *shader);
+
typedef struct nir_lower_drawpixels_options {
gl_state_index16 texcoord_state_tokens[STATE_LENGTH];
gl_state_index16 scale_state_tokens[STATE_LENGTH];
void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset);
-bool nir_lower_to_source_mods(nir_shader *shader);
+
+typedef enum {
+ nir_lower_int_source_mods = 1 << 0,
+ nir_lower_float_source_mods = 1 << 1,
+ nir_lower_triop_abs = 1 << 2,
+ nir_lower_all_source_mods = (1 << 3) - 1
+} nir_lower_to_source_mods_flags;
+
+
+bool nir_lower_to_source_mods(nir_shader *shader, nir_lower_to_source_mods_flags options);
bool nir_lower_gs_intrinsics(nir_shader *shader);
nir_lower_bit_size_callback callback,
void *callback_data);
-typedef enum {
- nir_lower_imul64 = (1 << 0),
- nir_lower_isign64 = (1 << 1),
- /** Lower all int64 modulus and division opcodes */
- nir_lower_divmod64 = (1 << 2),
-} nir_lower_int64_options;
-
+nir_lower_int64_options nir_lower_int64_op_to_options_mask(nir_op opcode);
bool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options);
-typedef enum {
- nir_lower_drcp = (1 << 0),
- nir_lower_dsqrt = (1 << 1),
- nir_lower_drsq = (1 << 2),
- nir_lower_dtrunc = (1 << 3),
- nir_lower_dfloor = (1 << 4),
- nir_lower_dceil = (1 << 5),
- nir_lower_dfract = (1 << 6),
- nir_lower_dround_even = (1 << 7),
- nir_lower_dmod = (1 << 8)
-} nir_lower_doubles_options;
-
-bool nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
+nir_lower_doubles_options nir_lower_doubles_op_to_options_mask(nir_op opcode);
+bool nir_lower_doubles(nir_shader *shader, const nir_shader *softfp64,
+ nir_lower_doubles_options options);
bool nir_lower_pack(nir_shader *shader);
bool nir_normalize_cubemap_coords(nir_shader *shader);
bool nir_lower_phis_to_regs_block(nir_block *block);
bool nir_lower_ssa_defs_to_regs_block(nir_block *block);
+bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl);
+
+bool nir_opt_comparison_pre(nir_shader *shader);
bool nir_opt_algebraic(nir_shader *shader);
bool nir_opt_algebraic_before_ffma(nir_shader *shader);
bool nir_opt_algebraic_late(nir_shader *shader);
bool nir_opt_constant_folding(nir_shader *shader);
-bool nir_opt_global_to_local(nir_shader *shader);
+bool nir_opt_combine_stores(nir_shader *shader, nir_variable_mode modes);
bool nir_copy_prop(nir_shader *shader);
bool nir_opt_dead_cf(nir_shader *shader);
+bool nir_opt_dead_write_vars(nir_shader *shader);
+
+bool nir_opt_deref_impl(nir_function_impl *impl);
+bool nir_opt_deref(nir_shader *shader);
+
+bool nir_opt_find_array_copies(nir_shader *shader);
+
bool nir_opt_gcm(nir_shader *shader, bool value_number);
-bool nir_opt_if(nir_shader *shader);
+bool nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size);
+
+bool nir_opt_if(nir_shader *shader, bool aggressive_last_continue);
bool nir_opt_intrinsics(nir_shader *shader);
+bool nir_opt_large_constants(nir_shader *shader,
+ glsl_type_size_align_func size_align,
+ unsigned threshold);
+
bool nir_opt_loop_unroll(nir_shader *shader, nir_variable_mode indirect_mask);
bool nir_opt_move_comparisons(nir_shader *shader);
bool nir_opt_move_load_ubo(nir_shader *shader);
-bool nir_opt_peephole_select(nir_shader *shader, unsigned limit);
+bool nir_opt_peephole_select(nir_shader *shader, unsigned limit,
+ bool indirect_load_ok, bool expensive_alu_ok);
+
+bool nir_opt_rematerialize_compares(nir_shader *shader);
bool nir_opt_remove_phis(nir_shader *shader);
bool nir_opt_conditional_discard(nir_shader *shader);
+void nir_strip(nir_shader *shader);
+
void nir_sweep(nir_shader *shader);
+void nir_remap_dual_slot_attributes(nir_shader *shader,
+ uint64_t *dual_slot_inputs);
+uint64_t nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot);
+
nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);
+bool nir_lower_sincos(nir_shader *shader);
+
#ifdef __cplusplus
} /* extern "C" */
#endif