typedef enum {
nir_var_shader_in = (1 << 0),
nir_var_shader_out = (1 << 1),
- nir_var_global = (1 << 2),
- nir_var_local = (1 << 3),
+ nir_var_shader_temp = (1 << 2),
+ nir_var_function = (1 << 3),
nir_var_uniform = (1 << 4),
- nir_var_shader_storage = (1 << 5),
+ nir_var_ubo = (1 << 5),
nir_var_system_value = (1 << 6),
+ nir_var_ssbo = (1 << 7),
nir_var_shared = (1 << 8),
nir_var_all = ~0,
} nir_variable_mode;
} nir_rounding_mode;
typedef union {
+ bool b[NIR_MAX_VEC_COMPONENTS];
float f32[NIR_MAX_VEC_COMPONENTS];
double f64[NIR_MAX_VEC_COMPONENTS];
int8_t i8[NIR_MAX_VEC_COMPONENTS];
static inline bool
nir_variable_is_global(const nir_variable *var)
{
- return var->data.mode != nir_var_local;
+ return var->data.mode != nir_var_function;
}
typedef struct nir_register {
#define nir_foreach_register_safe(reg, reg_list) \
foreach_list_typed_safe(nir_register, reg, node, reg_list)
-typedef enum {
+typedef enum PACKED {
nir_instr_type_alu,
nir_instr_type_deref,
nir_instr_type_call,
typedef struct nir_instr {
struct exec_node node;
- nir_instr_type type;
struct nir_block *block;
-
- /** generic instruction index. */
- unsigned index;
+ nir_instr_type type;
/* A temporary for optimization and analysis passes to use for storing
* flags. For instance, DCE uses this to store the "dead/live" info.
*/
uint8_t pass_flags;
+
+ /** generic instruction index. */
+ unsigned index;
} nir_instr;
static inline nir_instr *
unsigned write_mask : NIR_MAX_VEC_COMPONENTS; /* ignored if dest.is_ssa is true */
} nir_alu_dest;
+/** NIR sized and unsized types
+ *
+ * The values in this enum are carefully chosen so that the sized type is
+ * just the unsized type OR the number of bits.
+ */
typedef enum {
nir_type_invalid = 0, /* Not a valid type */
- nir_type_float,
- nir_type_int,
- nir_type_uint,
- nir_type_bool,
+ nir_type_int = 2,
+ nir_type_uint = 4,
+ nir_type_bool = 6,
+ nir_type_float = 128,
+ nir_type_bool1 = 1 | nir_type_bool,
nir_type_bool32 = 32 | nir_type_bool,
+ nir_type_int1 = 1 | nir_type_int,
nir_type_int8 = 8 | nir_type_int,
nir_type_int16 = 16 | nir_type_int,
nir_type_int32 = 32 | nir_type_int,
nir_type_int64 = 64 | nir_type_int,
+ nir_type_uint1 = 1 | nir_type_uint,
nir_type_uint8 = 8 | nir_type_uint,
nir_type_uint16 = 16 | nir_type_uint,
nir_type_uint32 = 32 | nir_type_uint,
nir_type_float64 = 64 | nir_type_float,
} nir_alu_type;
-#define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8
-#define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007
+#define NIR_ALU_TYPE_SIZE_MASK 0x79
+#define NIR_ALU_TYPE_BASE_TYPE_MASK 0x86
static inline unsigned
nir_alu_type_get_type_size(nir_alu_type type)
{
switch (base_type) {
case GLSL_TYPE_BOOL:
- return nir_type_bool32;
+ return nir_type_bool1;
break;
case GLSL_TYPE_UINT:
return nir_type_uint32;
nir_deref_type_var,
nir_deref_type_array,
nir_deref_type_array_wildcard,
+ nir_deref_type_ptr_as_array,
nir_deref_type_struct,
nir_deref_type_cast,
} nir_deref_type;
struct {
unsigned index;
} strct;
+
+ struct {
+ unsigned ptr_stride;
+ } cast;
};
/** Destination to store the resulting "pointer" */
bool nir_deref_instr_remove_if_unused(nir_deref_instr *instr);
+unsigned nir_deref_instr_ptr_as_array_stride(nir_deref_instr *instr);
+
typedef struct {
nir_instr instr;
NIR_INTRINSIC_FORMAT = 15,
/**
- * Access qualifiers for image intrinsics
+ * Access qualifiers for image and memory access intrinsics
*/
NIR_INTRINSIC_ACCESS = 16,
NIR_INTRINSIC_ALIGN_MUL = 17,
NIR_INTRINSIC_ALIGN_OFFSET = 18,
+ /**
+ * The Vulkan descriptor type for a vulkan_resource_[re]index intrinsic.
+ */
+ NIR_INTRINSIC_DESC_TYPE = 19,
+
NIR_INTRINSIC_NUM_INDEX_FLAGS,
} nir_intrinsic_index_flag;
/** number of components of each input register
*
* If this value is 0, the number of components is given by the
- * num_components field of nir_intrinsic_instr.
+ * num_components field of nir_intrinsic_instr. If this value is -1, the
+ * intrinsic consumes however many components are provided and it is not
+ * validated at all.
*/
- unsigned src_components[NIR_INTRINSIC_MAX_INPUTS];
+ int src_components[NIR_INTRINSIC_MAX_INPUTS];
bool has_dest;
{
const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
assert(srcn < info->num_srcs);
- if (info->src_components[srcn])
+ if (info->src_components[srcn] > 0)
return info->src_components[srcn];
- else
+ else if (info->src_components[srcn] == 0)
return intr->num_components;
+ else
+ return nir_src_num_components(intr->src[srcn]);
}
static inline unsigned
INTRINSIC_IDX_ACCESSORS(format, FORMAT, unsigned)
INTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned)
INTRINSIC_IDX_ACCESSORS(align_offset, ALIGN_OFFSET, unsigned)
+INTRINSIC_IDX_ACCESSORS(desc_type, DESC_TYPE, unsigned)
static inline void
nir_intrinsic_set_align(nir_intrinsic_instr *intrin,
case nir_op_uge:
case nir_op_ieq:
case nir_op_ine:
- case nir_op_i2b32:
- case nir_op_f2b32:
+ case nir_op_i2b1:
+ case nir_op_f2b1:
case nir_op_inot:
case nir_op_fnot:
return true;
* If the function is only declared and not implemented, this is NULL.
*/
nir_function_impl *impl;
+
+ bool is_entrypoint;
} nir_function;
typedef struct nir_shader_compiler_options {
/** enables rules to lower idiv by power-of-two: */
bool lower_idiv;
- /* lower b2f to iand */
- bool lower_b2f;
-
/* Does the native fdot instruction replicate its result for four
* components? If so, then opt_algebraic_late will turn all fdotN
* instructions into fdot_replicatedN instructions.
unsigned constant_data_size;
} nir_shader;
+#define nir_foreach_function(func, shader) \
+ foreach_list_typed(nir_function, func, node, &(shader)->functions)
+
static inline nir_function_impl *
nir_shader_get_entrypoint(nir_shader *shader)
{
- assert(exec_list_length(&shader->functions) == 1);
- struct exec_node *func_node = exec_list_get_head(&shader->functions);
- nir_function *func = exec_node_data(nir_function, func_node, node);
+ nir_function *func = NULL;
+
+ nir_foreach_function(function, shader) {
+ assert(func == NULL);
+ if (function->is_entrypoint) {
+ func = function;
+#ifndef NDEBUG
+ break;
+#endif
+ }
+ }
+
+ if (!func)
+ return NULL;
+
assert(func->num_params == 0);
assert(func->impl);
return func->impl;
}
-#define nir_foreach_function(func, shader) \
- foreach_list_typed(nir_function, func, node, &(shader)->functions)
-
nir_shader *nir_shader_create(void *mem_ctx,
gl_shader_stage stage,
const nir_shader_compiler_options *options,
static inline void
nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var)
{
- assert(var->data.mode == nir_var_local);
+ assert(var->data.mode == nir_var_function);
exec_list_push_tail(&impl->locals, &var->node);
}
void nir_print_shader(nir_shader *shader, FILE *fp);
void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors);
void nir_print_instr(const nir_instr *instr, FILE *fp);
+void nir_print_deref(const nir_deref_instr *deref, FILE *fp);
nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi);
void nir_metadata_set_validation_flag(nir_shader *shader);
void nir_metadata_check_validation_flag(nir_shader *shader);
+static inline bool
+should_skip_nir(const char *name)
+{
+ static const char *list = NULL;
+ if (!list) {
+ /* Comma separated list of names to skip. */
+ list = getenv("NIR_SKIP");
+ if (!list)
+ list = "";
+ }
+
+ if (!list[0])
+ return false;
+
+ return comma_separated_list_contains(list, name);
+}
+
static inline bool
should_clone_nir(void)
{
static inline void nir_validate_shader(nir_shader *shader, const char *when) { (void) shader; (void)when; }
static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
+static inline bool should_skip_nir(const char *pass_name) { return false; }
static inline bool should_clone_nir(void) { return false; }
static inline bool should_serialize_deserialize_nir(void) { return false; }
static inline bool should_print_nir(void) { return false; }
#endif /* NDEBUG */
#define _PASS(pass, nir, do_pass) do { \
+ if (should_skip_nir(#pass)) { \
+ printf("skipping %s\n", #pass); \
+ break; \
+ } \
do_pass \
nir_validate_shader(nir, "after " #pass); \
if (should_clone_nir()) { \
nir_print_shader(nir, stdout); \
)
+#define NIR_SKIP(name) should_skip_nir(#name)
+
void nir_calc_dominance_impl(nir_function_impl *impl);
void nir_calc_dominance(nir_shader *shader);
void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
bool default_to_smooth_interp);
void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
-bool nir_link_constant_varyings(nir_shader *producer, nir_shader *consumer);
+bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer);
typedef enum {
/* If set, this forces all non-flat fragment shader inputs to be
nir_variable_mode modes,
int (*type_size)(const struct glsl_type *),
nir_lower_io_options);
+
+typedef enum {
+ /**
+ * An address format which is comprised of a vec2 where the first
+ * component is a vulkan descriptor index and the second is an offset.
+ */
+ nir_address_format_vk_index_offset,
+} nir_address_format;
+bool nir_lower_explicit_io(nir_shader *shader,
+ nir_variable_mode modes,
+ nir_address_format);
+
nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
bool alpha_to_one);
bool nir_lower_alu(nir_shader *shader);
bool nir_lower_alu_to_scalar(nir_shader *shader);
+bool nir_lower_bool_to_float(nir_shader *shader);
+bool nir_lower_bool_to_int32(nir_shader *shader);
bool nir_lower_load_const_to_scalar(nir_shader *shader);
bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
bool nir_lower_phis_to_scalar(nir_shader *shader);
bool nir_lower_system_values(nir_shader *shader);
+enum PACKED nir_lower_tex_packing {
+ nir_lower_tex_packing_none = 0,
+ /* The sampler returns up to 2 32-bit words of half floats or 16-bit signed
+ * or unsigned ints based on the sampler type
+ */
+ nir_lower_tex_packing_16,
+ /* The sampler returns 1 32-bit word of 4x8 unorm */
+ nir_lower_tex_packing_8,
+};
+
typedef struct nir_lower_tex_options {
/**
* bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
*/
bool lower_txd_cube_map;
+ /**
+ * If true, lower nir_texop_txd on 3D surfaces with nir_texop_txl.
+ */
+ bool lower_txd_3d;
+
/**
* If true, lower nir_texop_txd on shadow samplers (except cube maps)
* with nir_texop_txl. Notice that cube map shadow samplers are lowered
* with nir_texop_txl. This includes cube maps.
*/
bool lower_txd_offset_clamp;
+
+ /**
+ * If true, apply a .bagr swizzle on tg4 results to handle Broadcom's
+ * mixed-up tg4 locations.
+ */
+ bool lower_tg4_broadcom_swizzle;
+
+ enum nir_lower_tex_packing lower_tex_packing[32];
} nir_lower_tex_options;
bool nir_lower_tex(nir_shader *shader,
nir_lower_divmod64 = (1 << 2),
/** Lower all 64-bit umul_high and imul_high opcodes */
nir_lower_imul_high64 = (1 << 3),
+ nir_lower_mov64 = (1 << 4),
+ nir_lower_icmp64 = (1 << 5),
+ nir_lower_iadd64 = (1 << 6),
+ nir_lower_iabs64 = (1 << 7),
+ nir_lower_ineg64 = (1 << 8),
+ nir_lower_logic64 = (1 << 9),
+ nir_lower_minmax64 = (1 << 10),
+ nir_lower_shift64 = (1 << 11),
} nir_lower_int64_options;
bool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options);
nir_lower_dceil = (1 << 5),
nir_lower_dfract = (1 << 6),
nir_lower_dround_even = (1 << 7),
- nir_lower_dmod = (1 << 8)
+ nir_lower_dmod = (1 << 8),
+ nir_lower_fp64_full_software = (1 << 9),
} nir_lower_doubles_options;
bool nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
bool nir_opt_dead_write_vars(nir_shader *shader);
+bool nir_opt_deref(nir_shader *shader);
+
bool nir_opt_find_array_copies(nir_shader *shader);
bool nir_opt_gcm(nir_shader *shader, bool value_number);
bool nir_opt_move_load_ubo(nir_shader *shader);
-bool nir_opt_peephole_select(nir_shader *shader, unsigned limit);
+bool nir_opt_peephole_select(nir_shader *shader, unsigned limit,
+ bool indirect_load_ok, bool expensive_alu_ok);
-bool nir_opt_remove_phis_impl(nir_function_impl *impl);
bool nir_opt_remove_phis(nir_shader *shader);
bool nir_opt_shrink_load(nir_shader *shader);