extern "C" {
#endif
-struct gl_program;
-struct gl_shader_program;
-
#define NIR_FALSE 0u
#define NIR_TRUE (~0u)
* \sa nir_variable::state_slots
*/
typedef struct {
- int tokens[5];
+ gl_state_index16 tokens[STATE_LENGTH];
int swizzle;
} nir_state_slot;
*/
unsigned fb_fetch_output:1;
+ /**
+ * Non-zero if this variable is considered bindless as defined by
+ * ARB_bindless_texture.
+ */
+ unsigned bindless:1;
+
/**
* \brief Layout qualifier for gl_FragDepth.
*
return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size;
}
+static inline unsigned
+nir_src_num_components(nir_src src)
+{
+ return src.is_ssa ? src.ssa->num_components : src.reg.reg->num_components;
+}
+
static inline unsigned
nir_dest_bit_size(nir_dest dest)
{
return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size;
}
+static inline unsigned
+nir_dest_num_components(nir_dest dest)
+{
+ return dest.is_ssa ? dest.ssa.num_components : dest.reg.reg->num_components;
+}
+
void nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if);
void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr);
case GLSL_TYPE_INT16:
return nir_type_int16;
break;
+ case GLSL_TYPE_UINT8:
+ return nir_type_uint8;
+ case GLSL_TYPE_INT8:
+ return nir_type_int8;
case GLSL_TYPE_UINT64:
return nir_type_uint64;
break;
struct nir_function *callee;
} nir_call_instr;
-#define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
- num_variables, num_indices, idx0, idx1, idx2, flags) \
- nir_intrinsic_##name,
-
-#define LAST_INTRINSIC(name) nir_last_intrinsic = nir_intrinsic_##name,
-
-typedef enum {
#include "nir_intrinsics.h"
- nir_num_intrinsics = nir_last_intrinsic + 1
-} nir_intrinsic_op;
#define NIR_INTRINSIC_MAX_CONST_INDEX 3
*/
NIR_INTRINSIC_INTERP_MODE = 9,
+ /**
+ * A binary nir_op to use when performing a reduction or scan operation
+ */
+ NIR_INTRINSIC_REDUCTION_OP = 10,
+
+ /**
+ * Cluster size for reduction operations
+ */
+ NIR_INTRINSIC_CLUSTER_SIZE = 11,
+
NIR_INTRINSIC_NUM_INDEX_FLAGS,
} nir_intrinsic_index_flag;
extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics];
+static inline unsigned
+nir_intrinsic_src_components(nir_intrinsic_instr *intr, unsigned srcn)
+{
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
+ assert(srcn < info->num_srcs);
+ if (info->src_components[srcn])
+ return info->src_components[srcn];
+ else
+ return intr->num_components;
+}
+
+static inline unsigned
+nir_intrinsic_dest_components(nir_intrinsic_instr *intr)
+{
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
+ if (!info->has_dest)
+ return 0;
+ else if (info->dest_components)
+ return info->dest_components;
+ else
+ return intr->num_components;
+}
#define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
static inline type \
INTRINSIC_IDX_ACCESSORS(binding, BINDING, unsigned)
INTRINSIC_IDX_ACCESSORS(component, COMPONENT, unsigned)
INTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned)
+INTRINSIC_IDX_ACCESSORS(reduction_op, REDUCTION_OP, unsigned)
+INTRINSIC_IDX_ACCESSORS(cluster_size, CLUSTER_SIZE, unsigned)
/**
* \group texture information
/** lowers ffract to fsub+ffloor: */
bool lower_ffract;
+ bool lower_ldexp;
+
bool lower_pack_half_2x16;
bool lower_pack_unorm_2x16;
bool lower_pack_snorm_2x16;
bool lower_cs_local_index_from_id;
+ bool lower_device_index_to_zero;
+
/**
* Should nir_lower_io() create load_interpolated_input intrinsics?
*
nir_load_const_instr *
nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref);
+nir_const_value nir_alu_binop_identity(nir_op binop, unsigned bit_size);
+
/**
* NIR Cursors and Instruction Insertion API
* @{
nir_instr_insert(nir_after_cf_list(list), after);
}
-void nir_instr_remove(nir_instr *instr);
+void nir_instr_remove_v(nir_instr *instr);
+
+static inline nir_cursor
+nir_instr_remove(nir_instr *instr)
+{
+ nir_cursor cursor;
+ nir_instr *prev = nir_instr_prev(instr);
+ if (prev) {
+ cursor = nir_after_instr(prev);
+ } else {
+ cursor = nir_before_block(instr->block);
+ }
+ nir_instr_remove_v(instr);
+ return cursor;
+}
/** @} */
void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
-bool nir_lower_samplers(nir_shader *shader,
- const struct gl_shader_program *shader_program);
-bool nir_lower_samplers_as_deref(nir_shader *shader,
- const struct gl_shader_program *shader_program);
-
typedef struct nir_lower_subgroups_options {
uint8_t subgroup_size;
uint8_t ballot_bit_size;
bool lower_to_scalar:1;
bool lower_vote_trivial:1;
+ bool lower_vote_eq_to_ballot:1;
bool lower_subgroup_masks:1;
+ bool lower_shuffle:1;
+ bool lower_shuffle_to_32bit:1;
+ bool lower_quad:1;
} nir_lower_subgroups_options;
bool nir_lower_subgroups(nir_shader *shader,
void nir_lower_tes_patch_vertices(nir_shader *tes, unsigned patch_vertices);
typedef struct nir_lower_wpos_ytransform_options {
- int state_tokens[5];
+ gl_state_index16 state_tokens[STATE_LENGTH];
bool fs_coord_origin_upper_left :1;
bool fs_coord_origin_lower_left :1;
bool fs_coord_pixel_center_integer :1;
bool nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading);
typedef struct nir_lower_drawpixels_options {
- int texcoord_state_tokens[5];
- int scale_state_tokens[5];
- int bias_state_tokens[5];
+ gl_state_index16 texcoord_state_tokens[STATE_LENGTH];
+ gl_state_index16 scale_state_tokens[STATE_LENGTH];
+ gl_state_index16 bias_state_tokens[STATE_LENGTH];
unsigned drawpix_sampler;
unsigned pixelmap_sampler;
bool pixel_maps :1;
void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
-bool nir_lower_atomics(nir_shader *shader,
- const struct gl_shader_program *shader_program);
bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset);
-bool nir_lower_uniforms_to_ubo(nir_shader *shader);
bool nir_lower_to_source_mods(nir_shader *shader);
bool nir_lower_gs_intrinsics(nir_shader *shader);
bool nir_opt_move_comparisons(nir_shader *shader);
+bool nir_opt_move_load_ubo(nir_shader *shader);
+
bool nir_opt_peephole_select(nir_shader *shader, unsigned limit);
bool nir_opt_remove_phis(nir_shader *shader);
+bool nir_opt_shrink_load(nir_shader *shader);
+
bool nir_opt_trivial_continues(nir_shader *shader);
bool nir_opt_undef(nir_shader *shader);