*
* \sa nir_variable_mode
*/
- nir_variable_mode mode:11;
+ unsigned mode:11;
/**
* Is the variable read-only?
unsigned per_view:1;
/**
- * \brief Layout qualifier for gl_FragDepth.
+ * \brief Layout qualifier for gl_FragDepth. See nir_depth_layout.
*
* This is not equal to \c ir_depth_layout_none if and only if this
* variable is \c gl_FragDepth and a layout qualifier is specified.
*/
- nir_depth_layout depth_layout:3;
+ unsigned depth_layout:3;
/**
* Vertex stream output identifier.
unsigned stream:9;
/**
+ * See gl_access_qualifier.
+ *
* Access flags for memory variables (SSBO/global), image uniforms, and
* bindless images in uniforms/inputs/outputs.
*/
- enum gl_access_qualifier access:8;
+ unsigned access:8;
/**
* Descriptor set binding for sampler or UBO.
case nir_op_flt:
case nir_op_fge:
case nir_op_feq:
- case nir_op_fne:
+ case nir_op_fneu:
case nir_op_ilt:
case nir_op_ult:
case nir_op_ige:
const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
+} \
+static inline bool \
+nir_intrinsic_has_##name(nir_intrinsic_instr *instr) \
+{ \
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
+ return info->index_map[NIR_INTRINSIC_##flag] > 0; \
}
INTRINSIC_IDX_ACCESSORS(write_mask, WRMASK, unsigned)
* NIR loop is implemented as "while (1) { body }".
*/
nir_jump_continue,
+
+ /** Jumps for unstructured CFG.
+ *
+ * As within an unstructured CFG we can't rely on block ordering we need to
+ * place explicit jumps at the end of every block.
+ */
+ nir_jump_goto,
+ nir_jump_goto_if,
} nir_jump_type;
typedef struct {
nir_instr instr;
nir_jump_type type;
+ nir_src condition;
+ struct nir_block *target;
+ struct nir_block *else_target;
} nir_jump_instr;
/* creates a new SSA variable in an undefined state */
/* total number of basic blocks, only valid when block_index_dirty = false */
unsigned num_blocks;
+ /** True if this nir_function_impl uses structured control-flow
+ *
+ * Structured nir_function_impls have different validation rules.
+ */
+ bool structured;
+
nir_metadata valid_metadata;
} nir_function_impl;
/** lowers fsub and isub to fadd+fneg and iadd+ineg. */
bool lower_sub;
- /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
+ /* lower {slt,sge,seq,sne} to {flt,fge,feq,fneu} + b2f: */
bool lower_scmp;
/* lower fall_equalN/fany_nequalN (ex:fany_nequal4 to sne+fdot4+fsat) */
bool lower_pack_snorm_2x16;
bool lower_pack_unorm_4x8;
bool lower_pack_snorm_4x8;
+ bool lower_pack_64_2x32_split;
+ bool lower_pack_32_2x16_split;
bool lower_unpack_half_2x16;
bool lower_unpack_unorm_2x16;
bool lower_unpack_snorm_2x16;
bool lower_unpack_unorm_4x8;
bool lower_unpack_snorm_4x8;
+ bool lower_unpack_64_2x32_split;
+ bool lower_unpack_32_2x16_split;
bool lower_pack_split;
bool lower_cs_local_index_from_id;
bool lower_cs_local_id_from_index;
+ /* Prevents lowering global_invocation_id to be in terms of work_group_id */
+ bool has_cs_global_id;
+
bool lower_device_index_to_zero;
/* Set if nir_lower_wpos_ytransform() should also invert gl_PointCoord. */
/** Constant data associated with this shader.
*
- * Constant data is loaded through load_constant intrinsics. See also
- * nir_opt_large_constants.
+ * Constant data is loaded through load_constant intrinsics (as compared to
+ * the NIR load_const instructions which have the constant value inlined
+ * into them). This is usually generated by nir_opt_large_constants (so
+ * shaders don't have to load_const into a temporary array when they want
+ * to indirect on a const array).
*/
void *constant_data;
+ /** Size of the constant data associated with the shader, in bytes */
unsigned constant_data_size;
} nir_shader;
nir_component_mask_t nir_ssa_def_components_read(const nir_ssa_def *def);
+
+/** Returns the next block, disregarding structure
+ *
+ * The ordering is deterministic but has no guarantees beyond that. In
+ * particular, it is not guaranteed to be dominance-preserving.
+ */
+nir_block *nir_block_unstructured_next(nir_block *block);
+nir_block *nir_unstructured_start_block(nir_function_impl *impl);
+
+#define nir_foreach_block_unstructured(block, impl) \
+ for (nir_block *block = nir_unstructured_start_block(impl); block != NULL; \
+ block = nir_block_unstructured_next(block))
+
+#define nir_foreach_block_unstructured_safe(block, impl) \
+ for (nir_block *block = nir_unstructured_start_block(impl), \
+ *next = nir_block_unstructured_next(block); \
+ block != NULL; \
+ block = next, next = nir_block_unstructured_next(block))
+
/*
* finds the next basic block in source-code order, returns NULL if there is
* none
*/
nir_address_format_32bit_index_offset,
+ /**
+ * An address format which is a 64-bit value, where the high 32 bits
+ * are a buffer index, and the low 32 bits are an offset.
+ */
+ nir_address_format_32bit_index_offset_pack64,
+
/**
* An address format which is comprised of a vec3 where the first two
* components specify the buffer and the third is an offset.
*/
nir_address_format_32bit_offset,
+ /**
+ * An address format which is a simple 32-bit offset cast to 64-bit.
+ */
+ nir_address_format_32bit_offset_as_64bit,
+
/**
* An address format representing a purely logical addressing model. In
* this model, all deref chains must be complete from the dereference
case nir_address_format_64bit_global: return 64;
case nir_address_format_64bit_bounded_global: return 32;
case nir_address_format_32bit_index_offset: return 32;
+ case nir_address_format_32bit_index_offset_pack64: return 64;
case nir_address_format_vec2_index_32bit_offset: return 32;
case nir_address_format_32bit_offset: return 32;
+ case nir_address_format_32bit_offset_as_64bit: return 64;
case nir_address_format_logical: return 32;
}
unreachable("Invalid address format");
case nir_address_format_64bit_global: return 1;
case nir_address_format_64bit_bounded_global: return 4;
case nir_address_format_32bit_index_offset: return 2;
+ case nir_address_format_32bit_index_offset_pack64: return 1;
case nir_address_format_vec2_index_32bit_offset: return 3;
case nir_address_format_32bit_offset: return 1;
+ case nir_address_format_32bit_offset_as_64bit: return 1;
case nir_address_format_logical: return 1;
}
unreachable("Invalid address format");
bool nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode mask);
bool nir_lower_fragcolor(nir_shader *shader);
-void nir_lower_fragcoord_wtrans(nir_shader *shader);
+bool nir_lower_fragcoord_wtrans(nir_shader *shader);
void nir_lower_viewport_transform(nir_shader *shader);
bool nir_lower_uniforms_to_ubo(nir_shader *shader, int multiplier);
unsigned lower_xy_uxvx_external;
unsigned lower_ayuv_external;
unsigned lower_xyuv_external;
+ unsigned bt709_external;
+ unsigned bt2020_external;
/**
* To emulate certain texture wrap modes, this can be used
bool nir_lower_idiv(nir_shader *shader, enum nir_lower_idiv_path path);
-bool nir_lower_input_attachments(nir_shader *shader, bool use_fragcoord_sysval);
+typedef struct nir_input_attachment_options {
+ bool use_fragcoord_sysval;
+ bool use_layer_id_sysval;
+ bool use_view_id_for_layer;
+} nir_input_attachment_options;
+
+bool nir_lower_input_attachments(nir_shader *shader,
+ const nir_input_attachment_options *options);
bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables,
bool use_vars,
void *callback_data);
nir_lower_int64_options nir_lower_int64_op_to_options_mask(nir_op opcode);
-bool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options);
+bool nir_lower_int64(nir_shader *shader);
nir_lower_doubles_options nir_lower_doubles_op_to_options_mask(nir_op opcode);
bool nir_lower_doubles(nir_shader *shader, const nir_shader *softfp64,
bool nir_lower_memory_model(nir_shader *shader);
+bool nir_lower_goto_ifs(nir_shader *shader);
+
bool nir_normalize_cubemap_coords(nir_shader *shader);
void nir_live_ssa_defs_impl(nir_function_impl *impl);
bool nir_opt_remove_phis(nir_shader *shader);
bool nir_opt_remove_phis_block(nir_block *block);
-bool nir_opt_shrink_load(nir_shader *shader);
+bool nir_opt_shrink_vectors(nir_shader *shader);
bool nir_opt_trivial_continues(nir_shader *shader);
nir_should_vectorize_mem_func callback,
nir_variable_mode robust_modes);
-void nir_strip(nir_shader *shader);
-
void nir_sweep(nir_shader *shader);
void nir_remap_dual_slot_attributes(nir_shader *shader,