extern "C" {
#endif
-struct gl_program;
-struct gl_shader_program;
-
#define NIR_FALSE 0u
#define NIR_TRUE (~0u)
*/
unsigned fb_fetch_output:1;
+ /**
+ * Non-zero if this variable is considered bindless as defined by
+ * ARB_bindless_texture.
+ */
+ unsigned bindless:1;
+
+ /**
+ * Was an explicit binding set in the shader?
+ */
+ unsigned explicit_binding:1;
+
/**
* \brief Layout qualifier for gl_FragDepth.
*
/** Index into the live_in and live_out bitfields */
unsigned live_index;
+ /** Instruction which produces this SSA value. */
nir_instr *parent_instr;
/** set of nir_instrs where this register is used (read from) */
typedef struct nir_src {
union {
+ /** Instruction that consumes this value as a source. */
nir_instr *parent_instr;
struct nir_if *parent_if;
};
return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size;
}
+static inline unsigned
+nir_src_num_components(nir_src src)
+{
+ return src.is_ssa ? src.ssa->num_components : src.reg.reg->num_components;
+}
+
static inline unsigned
nir_dest_bit_size(nir_dest dest)
{
return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size;
}
+static inline unsigned
+nir_dest_num_components(nir_dest dest)
+{
+ return dest.is_ssa ? dest.ssa.num_components : dest.reg.reg->num_components;
+}
+
void nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if);
void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr);
struct nir_function *callee;
} nir_call_instr;
-#define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
- num_variables, num_indices, idx0, idx1, idx2, flags) \
- nir_intrinsic_##name,
-
-#define LAST_INTRINSIC(name) nir_last_intrinsic = nir_intrinsic_##name,
-
-typedef enum {
#include "nir_intrinsics.h"
- nir_num_intrinsics = nir_last_intrinsic + 1
-} nir_intrinsic_op;
#define NIR_INTRINSIC_MAX_CONST_INDEX 3
extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics];
+static inline unsigned
+nir_intrinsic_src_components(nir_intrinsic_instr *intr, unsigned srcn)
+{
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
+ assert(srcn < info->num_srcs);
+ if (info->src_components[srcn])
+ return info->src_components[srcn];
+ else
+ return intr->num_components;
+}
+
+static inline unsigned
+nir_intrinsic_dest_components(nir_intrinsic_instr *intr)
+{
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
+ if (!info->has_dest)
+ return 0;
+ else if (info->dest_components)
+ return info->dest_components;
+ else
+ return intr->num_components;
+}
#define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
static inline type \
}
}
+static inline bool
+nir_alu_instr_is_comparison(const nir_alu_instr *instr)
+{
+ switch (instr->op) {
+ case nir_op_flt:
+ case nir_op_fge:
+ case nir_op_feq:
+ case nir_op_fne:
+ case nir_op_ilt:
+ case nir_op_ult:
+ case nir_op_ige:
+ case nir_op_uge:
+ case nir_op_ieq:
+ case nir_op_ine:
+ case nir_op_i2b:
+ case nir_op_f2b:
+ case nir_op_inot:
+ case nir_op_fnot:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline nir_alu_type
nir_tex_instr_src_type(const nir_tex_instr *instr, unsigned src)
{
bool lower_fsqrt;
bool lower_fmod32;
bool lower_fmod64;
+ /** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
bool lower_bitfield_extract;
+ /** Lowers ibitfield_extract/ubitfield_extract to bfm, compares, shifts. */
+ bool lower_bitfield_extract_to_shifts;
+ /** Lowers bitfield_insert to bfi/bfm */
bool lower_bitfield_insert;
+ /** Lowers bitfield_insert to bfm, compares, and shifts. */
+ bool lower_bitfield_insert_to_shifts;
+ /** Lowers bitfield_reverse to shifts. */
+ bool lower_bitfield_reverse;
+ /** Lowers bit_count to shifts. */
+ bool lower_bit_count;
+ /** Lowers bfm to shifts and subtracts. */
+ bool lower_bfm;
+ /** Lowers ifind_msb to compare and ufind_msb */
+ bool lower_ifind_msb;
+ /** Lowers find_lsb to ufind_msb and logic ops */
+ bool lower_find_lsb;
bool lower_uadd_carry;
bool lower_usub_borrow;
+ /** Lowers imul_high/umul_high to 16-bit multiplies and carry operations. */
+ bool lower_mul_high;
/** lowers fneg and ineg to fsub and isub. */
bool lower_negate;
/** lowers fsub and isub to fadd+fneg and iadd+ineg. */
/** enables rules to lower idiv by power-of-two: */
bool lower_idiv;
+ /* lower b2f to iand */
+ bool lower_b2f;
+
/* Does the native fdot instruction replicate its result for four
* components? If so, then opt_algebraic_late will turn all fdotN
* instructions into fdot_replicatedN instructions.
/* Indicates that the driver only has zero-based vertex id */
bool vertex_id_zero_based;
+ /**
+ * If enabled, gl_BaseVertex will be lowered as:
+ * is_indexed_draw (~0/0) & firstvertex
+ */
+ bool lower_base_vertex;
+
bool lower_cs_local_index_from_id;
bool lower_device_index_to_zero;
nir_instr_insert(nir_after_cf_list(list), after);
}
-void nir_instr_remove(nir_instr *instr);
+void nir_instr_remove_v(nir_instr *instr);
+
+static inline nir_cursor
+nir_instr_remove(nir_instr *instr)
+{
+ nir_cursor cursor;
+ nir_instr *prev = nir_instr_prev(instr);
+ if (prev) {
+ cursor = nir_after_instr(prev);
+ } else {
+ cursor = nir_before_block(instr->block);
+ }
+ nir_instr_remove_v(instr);
+ return cursor;
+}
/** @} */
bool nir_lower_constant_initializers(nir_shader *shader,
nir_variable_mode modes);
+bool nir_move_load_const(nir_shader *shader);
bool nir_move_vec_src_uses_to_dest(nir_shader *shader);
bool nir_lower_vec_to_movs(nir_shader *shader);
void nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
bool alpha_to_one);
+bool nir_lower_alu(nir_shader *shader);
bool nir_lower_alu_to_scalar(nir_shader *shader);
bool nir_lower_load_const_to_scalar(nir_shader *shader);
bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
-bool nir_lower_samplers(nir_shader *shader,
- const struct gl_shader_program *shader_program);
-bool nir_lower_samplers_as_deref(nir_shader *shader,
- const struct gl_shader_program *shader_program);
-
typedef struct nir_lower_subgroups_options {
uint8_t subgroup_size;
uint8_t ballot_bit_size;
bool lower_vote_eq_to_ballot:1;
bool lower_subgroup_masks:1;
bool lower_shuffle:1;
+ bool lower_shuffle_to_32bit:1;
bool lower_quad:1;
} nir_lower_subgroups_options;
void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
-bool nir_lower_atomics(nir_shader *shader,
- const struct gl_shader_program *shader_program);
bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset);
bool nir_lower_to_source_mods(nir_shader *shader);
bool nir_lower_gs_intrinsics(nir_shader *shader);
+typedef unsigned (*nir_lower_bit_size_callback)(const nir_alu_instr *, void *);
+
+bool nir_lower_bit_size(nir_shader *shader,
+ nir_lower_bit_size_callback callback,
+ void *callback_data);
+
typedef enum {
nir_lower_imul64 = (1 << 0),
nir_lower_isign64 = (1 << 1),
} nir_lower_doubles_options;
bool nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
-bool nir_lower_64bit_pack(nir_shader *shader);
+bool nir_lower_pack(nir_shader *shader);
bool nir_normalize_cubemap_coords(nir_shader *shader);