* by the type associated with the \c nir_variable. Constants may be
* scalars, vectors, or matrices.
*/
- nir_const_value values[NIR_MAX_MATRIX_COLUMNS][NIR_MAX_VEC_COMPONENTS];
+ nir_const_value values[NIR_MAX_VEC_COMPONENTS];
/* we could get this from the var->type but makes clone *much* easier to
* not have to care about the type.
case GLSL_TYPE_DOUBLE:
return nir_type_float64;
break;
- default:
- unreachable("unknown type");
+
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_ATOMIC_UINT:
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_ARRAY:
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_SUBROUTINE:
+ case GLSL_TYPE_FUNCTION:
+ case GLSL_TYPE_ERROR:
+ return nir_type_invalid;
}
+
+ unreachable("unknown type");
}
static inline nir_alu_type
nir_op nir_type_conversion_op(nir_alu_type src, nir_alu_type dst,
nir_rounding_mode rnd);
+static inline nir_op
+nir_op_vec(unsigned components)
+{
+ switch (components) {
+ case 1: return nir_op_mov;
+ case 2: return nir_op_vec2;
+ case 3: return nir_op_vec3;
+ case 4: return nir_op_vec4;
+ default: unreachable("bad component count");
+ }
+}
+
typedef enum {
- NIR_OP_IS_COMMUTATIVE = (1 << 0),
+ /**
+ * Operation where the first two sources are commutative.
+ *
+ * For 2-source operations, this just mathematical commutativity. Some
+ * 3-source operations, like ffma, are only commutative in the first two
+ * sources.
+ */
+ NIR_OP_IS_2SRC_COMMUTATIVE = (1 << 0),
NIR_OP_IS_ASSOCIATIVE = (1 << 1),
} nir_op_algebraic_property;
* it must ensure that the resulting value is bit-for-bit identical to the
* original.
*/
- bool exact;
+ bool exact:1;
+
+ /**
+ * Indicates that this instruction do not cause wrapping to occur, in the
+ * form of overflow or underflow.
+ */
+ bool no_signed_wrap:1;
+ bool no_unsigned_wrap:1;
nir_alu_dest dest;
nir_alu_src src[];
}
bool nir_deref_instr_has_indirect(nir_deref_instr *instr);
+bool nir_deref_instr_has_complex_use(nir_deref_instr *instr);
bool nir_deref_instr_remove_if_unused(nir_deref_instr *instr);
/**
* For store instructions, a writemask for the store.
*/
- NIR_INTRINSIC_WRMASK = 2,
+ NIR_INTRINSIC_WRMASK,
/**
* The stream-id for GS emit_vertex/end_primitive intrinsics.
*/
- NIR_INTRINSIC_STREAM_ID = 3,
+ NIR_INTRINSIC_STREAM_ID,
/**
* The clip-plane id for load_user_clip_plane intrinsic.
*/
- NIR_INTRINSIC_UCP_ID = 4,
+ NIR_INTRINSIC_UCP_ID,
/**
* The amount of data, starting from BASE, that this instruction may
* access. This is used to provide bounds if the offset is not constant.
*/
- NIR_INTRINSIC_RANGE = 5,
+ NIR_INTRINSIC_RANGE,
/**
* The Vulkan descriptor set for vulkan_resource_index intrinsic.
*/
- NIR_INTRINSIC_DESC_SET = 6,
+ NIR_INTRINSIC_DESC_SET,
/**
* The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
*/
- NIR_INTRINSIC_BINDING = 7,
+ NIR_INTRINSIC_BINDING,
/**
* Component offset.
*/
- NIR_INTRINSIC_COMPONENT = 8,
+ NIR_INTRINSIC_COMPONENT,
/**
* Interpolation mode (only meaningful for FS inputs).
*/
- NIR_INTRINSIC_INTERP_MODE = 9,
+ NIR_INTRINSIC_INTERP_MODE,
/**
* A binary nir_op to use when performing a reduction or scan operation
*/
- NIR_INTRINSIC_REDUCTION_OP = 10,
+ NIR_INTRINSIC_REDUCTION_OP,
/**
* Cluster size for reduction operations
*/
- NIR_INTRINSIC_CLUSTER_SIZE = 11,
+ NIR_INTRINSIC_CLUSTER_SIZE,
/**
* Parameter index for a load_param intrinsic
*/
- NIR_INTRINSIC_PARAM_IDX = 12,
+ NIR_INTRINSIC_PARAM_IDX,
/**
* Image dimensionality for image intrinsics
*
* One of GLSL_SAMPLER_DIM_*
*/
- NIR_INTRINSIC_IMAGE_DIM = 13,
+ NIR_INTRINSIC_IMAGE_DIM,
/**
* Non-zero if we are accessing an array image
*/
- NIR_INTRINSIC_IMAGE_ARRAY = 14,
+ NIR_INTRINSIC_IMAGE_ARRAY,
/**
* Image format for image intrinsics
*/
- NIR_INTRINSIC_FORMAT = 15,
+ NIR_INTRINSIC_FORMAT,
/**
* Access qualifiers for image and memory access intrinsics
*/
- NIR_INTRINSIC_ACCESS = 16,
+ NIR_INTRINSIC_ACCESS,
/**
* Alignment for offsets and addresses
*
* (X - align_offset) % align_mul == 0
*/
- NIR_INTRINSIC_ALIGN_MUL = 17,
- NIR_INTRINSIC_ALIGN_OFFSET = 18,
+ NIR_INTRINSIC_ALIGN_MUL,
+ NIR_INTRINSIC_ALIGN_OFFSET,
/**
* The Vulkan descriptor type for a vulkan_resource_[re]index intrinsic.
*/
- NIR_INTRINSIC_DESC_TYPE = 19,
+ NIR_INTRINSIC_DESC_TYPE,
+
+ /**
+ * The nir_alu_type of a uniform/input/output
+ */
+ NIR_INTRINSIC_TYPE,
+
+ /**
+ * The swizzle mask for the instructions
+ * SwizzleInvocationsAMD and SwizzleInvocationsMaskedAMD
+ */
+ NIR_INTRINSIC_SWIZZLE_MASK,
+
+ /* Separate source/dest access flags for copies */
+ NIR_INTRINSIC_SRC_ACCESS = 21,
+ NIR_INTRINSIC_DST_ACCESS = 22,
NIR_INTRINSIC_NUM_INDEX_FLAGS,
INTRINSIC_IDX_ACCESSORS(image_dim, IMAGE_DIM, enum glsl_sampler_dim)
INTRINSIC_IDX_ACCESSORS(image_array, IMAGE_ARRAY, bool)
INTRINSIC_IDX_ACCESSORS(access, ACCESS, enum gl_access_qualifier)
+INTRINSIC_IDX_ACCESSORS(src_access, SRC_ACCESS, enum gl_access_qualifier)
+INTRINSIC_IDX_ACCESSORS(dst_access, DST_ACCESS, enum gl_access_qualifier)
INTRINSIC_IDX_ACCESSORS(format, FORMAT, unsigned)
INTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned)
INTRINSIC_IDX_ACCESSORS(align_offset, ALIGN_OFFSET, unsigned)
INTRINSIC_IDX_ACCESSORS(desc_type, DESC_TYPE, unsigned)
+INTRINSIC_IDX_ACCESSORS(type, TYPE, nir_alu_type)
+INTRINSIC_IDX_ACCESSORS(swizzle_mask, SWIZZLE_MASK, unsigned)
static inline void
nir_intrinsic_set_align(nir_intrinsic_instr *intrin,
void nir_rewrite_image_intrinsic(nir_intrinsic_instr *instr,
nir_ssa_def *handle, bool bindless);
+/* Determine if an intrinsic can be arbitrarily reordered and eliminated. */
+static inline bool
+nir_intrinsic_can_reorder(nir_intrinsic_instr *instr)
+{
+ if (instr->intrinsic == nir_intrinsic_load_deref ||
+ instr->intrinsic == nir_intrinsic_load_ssbo ||
+ instr->intrinsic == nir_intrinsic_bindless_image_load ||
+ instr->intrinsic == nir_intrinsic_image_deref_load ||
+ instr->intrinsic == nir_intrinsic_image_load) {
+ return nir_intrinsic_access(instr) & ACCESS_CAN_REORDER;
+ } else {
+ const nir_intrinsic_info *info =
+ &nir_intrinsic_infos[instr->intrinsic];
+ return (info->flags & NIR_INTRINSIC_CAN_ELIMINATE) &&
+ (info->flags & NIR_INTRINSIC_CAN_REORDER);
+ }
+}
+
/**
* \group texture information
*
case nir_op_i2b1:
case nir_op_f2b1:
case nir_op_inot:
- case nir_op_fnot:
return true;
default:
return false;
bool lower_fpow;
bool lower_fsat;
bool lower_fsqrt;
- bool lower_fmod16;
- bool lower_fmod32;
- bool lower_fmod64;
+ bool lower_fmod;
/** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
bool lower_bitfield_extract;
- /** Lowers ibitfield_extract/ubitfield_extract to bfm, compares, shifts. */
+ /** Lowers ibitfield_extract/ubitfield_extract to compares, shifts. */
bool lower_bitfield_extract_to_shifts;
/** Lowers bitfield_insert to bfi/bfm */
bool lower_bitfield_insert;
- /** Lowers bitfield_insert to bfm, compares, and shifts. */
+ /** Lowers bitfield_insert to compares, and shifts. */
bool lower_bitfield_insert_to_shifts;
+ /** Lowers bitfield_insert to bfm/bitfield_select. */
+ bool lower_bitfield_insert_to_bitfield_select;
/** Lowers bitfield_reverse to shifts. */
bool lower_bitfield_reverse;
/** Lowers bit_count to shifts. */
bool lower_bit_count;
- /** Lowers bfm to shifts and subtracts. */
- bool lower_bfm;
/** Lowers ifind_msb to compare and ufind_msb */
bool lower_ifind_msb;
/** Lowers find_lsb to ufind_msb and logic ops */
/** enables rules to lower idiv by power-of-two: */
bool lower_idiv;
+ /** enable rules to avoid bit shifts */
+ bool lower_bitshift;
+
/** enables rules to lower isign to imin+imax */
bool lower_isign;
bool lower_all_io_to_temps;
bool lower_all_io_to_elements;
- /**
- * Does the driver support real 32-bit integers? (Otherwise, integers
- * are simulated by floats.)
- */
- bool native_integers;
-
/* Indicates that the driver only has zero-based vertex id */
bool vertex_id_zero_based;
bool lower_hadd;
bool lower_add_sat;
+ /**
+ * Should IO be re-vectorized? Some scalar ISAs still operate on vec4's
+ * for IO purposes and would prefer loads/stores be vectorized.
+ */
+ bool vectorize_io;
+
/**
* Should nir_lower_io() create load_interpolated_input intrinsics?
*
/* Lowers when 32x32->64 bit multiplication is not supported */
bool lower_mul_2x32_64;
+ /* Lowers when rotate instruction is not supported */
+ bool lower_rotate;
+
unsigned max_unroll_iterations;
nir_lower_int64_options lower_int64_options;
void nir_print_instr(const nir_instr *instr, FILE *fp);
void nir_print_deref(const nir_deref_instr *deref, FILE *fp);
+/** Shallow clone of a single ALU instruction. */
+nir_alu_instr *nir_alu_instr_clone(nir_shader *s, const nir_alu_instr *orig);
+
nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
nir_function_impl *nir_function_impl_clone(nir_shader *shader,
const nir_function_impl *fi);
nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
-nir_shader *nir_shader_serialize_deserialize(void *mem_ctx, nir_shader *s);
+void nir_shader_replace(nir_shader *dest, nir_shader *src);
+
+void nir_shader_serialize_deserialize(nir_shader *s);
#ifndef NDEBUG
void nir_validate_shader(nir_shader *shader, const char *when);
nir_validate_shader(nir, "after " #pass); \
if (should_clone_nir()) { \
nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
- ralloc_free(nir); \
- nir = clone; \
+ nir_shader_replace(nir, clone); \
} \
if (should_serialize_deserialize_nir()) { \
- void *mem_ctx = ralloc_parent(nir); \
- nir = nir_shader_serialize_deserialize(mem_ctx, nir); \
+ nir_shader_serialize_deserialize(nir); \
} \
} while (0)
* component is a buffer index and the second is an offset.
*/
nir_address_format_32bit_index_offset,
+
+ /**
+ * An address format which is a simple 32-bit offset.
+ */
+ nir_address_format_32bit_offset,
+
+ /**
+ * An address format representing a purely logical addressing model. In
+ * this model, all deref chains must be complete from the dereference
+ * operation to the variable. Cast derefs are not allowed. These
+ * addresses will be 32-bit scalars but the format is immaterial because
+ * you can always chase the chain.
+ */
+ nir_address_format_logical,
} nir_address_format;
static inline unsigned
case nir_address_format_64bit_global: return 64;
case nir_address_format_64bit_bounded_global: return 32;
case nir_address_format_32bit_index_offset: return 32;
+ case nir_address_format_32bit_offset: return 32;
+ case nir_address_format_logical: return 32;
}
unreachable("Invalid address format");
}
case nir_address_format_64bit_global: return 1;
case nir_address_format_64bit_bounded_global: return 4;
case nir_address_format_32bit_index_offset: return 2;
+ case nir_address_format_32bit_offset: return 1;
+ case nir_address_format_logical: return 1;
}
unreachable("Invalid address format");
}
nir_address_format_num_components(addr_format));
}
+const nir_const_value *nir_address_format_null_value(nir_address_format addr_format);
+
+nir_ssa_def *nir_build_addr_ieq(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
+ nir_address_format addr_format);
+
+nir_ssa_def *nir_build_addr_isub(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
+ nir_address_format addr_format);
+
nir_ssa_def * nir_explicit_io_address_from_deref(struct nir_builder *b,
nir_deref_instr *deref,
nir_ssa_def *base_addr,
void nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
bool alpha_to_one);
bool nir_lower_alu(nir_shader *shader);
-bool nir_lower_alu_to_scalar(nir_shader *shader);
+
+bool nir_lower_flrp(nir_shader *shader, unsigned lowering_mask,
+ bool always_precise, bool have_ffma);
+
+bool nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set);
bool nir_lower_bool_to_float(nir_shader *shader);
bool nir_lower_bool_to_int32(nir_shader *shader);
+bool nir_lower_int_to_float(nir_shader *shader);
bool nir_lower_load_const_to_scalar(nir_shader *shader);
bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
bool nir_lower_phis_to_scalar(nir_shader *shader);
*/
bool lower_txd_clamp_if_sampler_index_not_lt_16;
+ /**
+ * If true, lower nir_texop_txs with a non-0-lod into nir_texop_txs with
+ * 0-lod followed by a nir_ishr.
+ */
+ bool lower_txs_lod;
+
/**
* If true, apply a .bagr swizzle on tg4 results to handle Broadcom's
* mixed-up tg4 locations.
bool nir_lower_idiv(nir_shader *shader);
+bool nir_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval);
+
bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables, bool use_vars);
bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
bool nir_lower_clip_cull_distance_arrays(nir_shader *nir);
nir_lower_doubles_options options);
bool nir_lower_pack(nir_shader *shader);
+typedef enum {
+ nir_lower_interpolation_at_sample = (1 << 1),
+ nir_lower_interpolation_at_offset = (1 << 2),
+ nir_lower_interpolation_centroid = (1 << 3),
+ nir_lower_interpolation_pixel = (1 << 4),
+ nir_lower_interpolation_sample = (1 << 5),
+} nir_lower_interpolation_options;
+
+bool nir_lower_interpolation(nir_shader *shader,
+ nir_lower_interpolation_options options);
+
bool nir_normalize_cubemap_coords(nir_shader *shader);
void nir_live_ssa_defs_impl(nir_function_impl *impl);
bool nir_opt_peephole_select(nir_shader *shader, unsigned limit,
bool indirect_load_ok, bool expensive_alu_ok);
+bool nir_opt_rematerialize_compares(nir_shader *shader);
+
bool nir_opt_remove_phis(nir_shader *shader);
bool nir_opt_shrink_load(nir_shader *shader);
bool nir_opt_undef(nir_shader *shader);
+bool nir_opt_vectorize(nir_shader *shader);
+
bool nir_opt_conditional_discard(nir_shader *shader);
void nir_strip(nir_shader *shader);
nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);
+bool nir_lower_sincos(nir_shader *shader);
+
+static inline bool
+nir_variable_is_in_ubo(const nir_variable *var)
+{
+ return (var->data.mode == nir_var_mem_ubo &&
+ var->interface_type != NULL);
+}
+
+static inline bool
+nir_variable_is_in_ssbo(const nir_variable *var)
+{
+ return (var->data.mode == nir_var_mem_ssbo &&
+ var->interface_type != NULL);
+}
+
+static inline bool
+nir_variable_is_in_block(const nir_variable *var)
+{
+ return nir_variable_is_in_ubo(var) || nir_variable_is_in_ssbo(var);
+}
+
#ifdef __cplusplus
} /* extern "C" */
#endif