typedef enum {
nir_var_shader_in = (1 << 0),
nir_var_shader_out = (1 << 1),
- nir_var_private = (1 << 2),
- nir_var_function = (1 << 3),
+ nir_var_shader_temp = (1 << 2),
+ nir_var_function_temp = (1 << 3),
nir_var_uniform = (1 << 4),
- nir_var_ubo = (1 << 5),
+ nir_var_mem_ubo = (1 << 5),
nir_var_system_value = (1 << 6),
- nir_var_ssbo = (1 << 7),
- nir_var_shared = (1 << 8),
+ nir_var_mem_ssbo = (1 << 7),
+ nir_var_mem_shared = (1 << 8),
+ nir_var_mem_global = (1 << 9),
nir_var_all = ~0,
} nir_variable_mode;
*/
unsigned interpolation:2;
- /**
- * \name ARB_fragment_coord_conventions
- * @{
- */
- unsigned origin_upper_left:1;
- unsigned pixel_center_integer:1;
- /*@}*/
-
/**
* If non-zero, then this variable may be packed along with other variables
* into a single varying slot, so this offset should be applied when
static inline bool
nir_variable_is_global(const nir_variable *var)
{
- return var->data.mode != nir_var_function;
+ return var->data.mode != nir_var_function_temp;
}
typedef struct nir_register {
nir_alu_type input_types[NIR_MAX_VEC_COMPONENTS];
nir_op_algebraic_property algebraic_properties;
+
+ /* Whether this represents a numeric conversion opcode */
+ bool is_conversion;
} nir_op_info;
extern const nir_op_info nir_op_infos[nir_num_opcodes];
*/
unsigned dest_components;
+ /** bitfield of legal bit sizes */
+ unsigned dest_bit_sizes;
+
/** the number of constant indices used by the intrinsic */
unsigned num_indices;
typedef struct {
nir_if *nif;
+ /** Instruction that generates nif::condition. */
nir_instr *conditional_instr;
+ /** Block within ::nif that has the break instruction. */
nir_block *break_block;
+
+ /** Last block for the then- or else-path that does not contain the break. */
nir_block *continue_from_block;
+ /** True when ::break_block is in the else-path of ::nif. */
bool continue_from_then;
struct list_head loop_terminator_link;
bool is_entrypoint;
} nir_function;
+typedef enum {
+ nir_lower_imul64 = (1 << 0),
+ nir_lower_isign64 = (1 << 1),
+ /** Lower all int64 modulus and division opcodes */
+ nir_lower_divmod64 = (1 << 2),
+ /** Lower all 64-bit umul_high and imul_high opcodes */
+ nir_lower_imul_high64 = (1 << 3),
+ nir_lower_mov64 = (1 << 4),
+ nir_lower_icmp64 = (1 << 5),
+ nir_lower_iadd64 = (1 << 6),
+ nir_lower_iabs64 = (1 << 7),
+ nir_lower_ineg64 = (1 << 8),
+ nir_lower_logic64 = (1 << 9),
+ nir_lower_minmax64 = (1 << 10),
+ nir_lower_shift64 = (1 << 11),
+ nir_lower_imul_2x32_64 = (1 << 12),
+} nir_lower_int64_options;
+
+typedef enum {
+ nir_lower_drcp = (1 << 0),
+ nir_lower_dsqrt = (1 << 1),
+ nir_lower_drsq = (1 << 2),
+ nir_lower_dtrunc = (1 << 3),
+ nir_lower_dfloor = (1 << 4),
+ nir_lower_dceil = (1 << 5),
+ nir_lower_dfract = (1 << 6),
+ nir_lower_dround_even = (1 << 7),
+ nir_lower_dmod = (1 << 8),
+ nir_lower_fp64_full_software = (1 << 9),
+} nir_lower_doubles_options;
+
typedef struct nir_shader_compiler_options {
bool lower_fdiv;
bool lower_ffma;
/** enables rules to lower idiv by power-of-two: */
bool lower_idiv;
+ /** enables rules to lower isign to imin+imax */
+ bool lower_isign;
+
/* Does the native fdot instruction replicate its result for four
* components? If so, then opt_algebraic_late will turn all fdotN
* instructions into fdot_replicatedN instructions.
/* Set if nir_lower_wpos_ytransform() should also invert gl_PointCoord. */
bool lower_wpos_pntc;
+ bool lower_hadd;
+ bool lower_add_sat;
+
/**
* Should nir_lower_io() create load_interpolated_input intrinsics?
*
*/
bool use_interpolated_input_intrinsics;
+ /* Lowers when 32x32->64 bit multiplication is not supported */
+ bool lower_mul_2x32_64;
+
unsigned max_unroll_iterations;
+
+ nir_lower_int64_options lower_int64_options;
+ nir_lower_doubles_options lower_doubles_options;
} nir_shader_compiler_options;
typedef struct nir_shader {
static inline void
nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var)
{
- assert(var->data.mode == nir_var_function);
+ assert(var->data.mode == nir_var_function_temp);
exec_list_push_tail(&impl->locals, &var->node);
}
static inline void nir_validate_shader(nir_shader *shader, const char *when) { (void) shader; (void)when; }
static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
-static inline bool should_skip_nir(const char *pass_name) { return false; }
+static inline bool should_skip_nir(UNUSED const char *pass_name) { return false; }
static inline bool should_clone_nir(void) { return false; }
static inline bool should_serialize_deserialize_nir(void) { return false; }
static inline bool should_print_nir(void) { return false; }
nir_lower_io_options);
typedef enum {
+ /**
+ * An address format which is a simple 32-bit global GPU address.
+ */
+ nir_address_format_32bit_global,
+
+ /**
+ * An address format which is a simple 64-bit global GPU address.
+ */
+ nir_address_format_64bit_global,
+
/**
* An address format which is comprised of a vec2 where the first
* component is a vulkan descriptor index and the second is an offset.
void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
+bool nir_lower_uniforms_to_ubo(nir_shader *shader, int multiplier);
+
typedef struct nir_lower_subgroups_options {
uint8_t subgroup_size;
uint8_t ballot_bit_size;
unsigned lower_yx_xuxv_external;
unsigned lower_xy_uxvx_external;
unsigned lower_ayuv_external;
+ unsigned lower_xyuv_external;
/**
* To emulate certain texture wrap modes, this can be used
*/
uint8_t swizzles[32][4];
+ /* Can be used to scale sampled values in range required by the format. */
+ float scale_factors[32];
+
/**
* Bitmap of textures that need srgb to linear conversion. If
* (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
*/
bool lower_txd_offset_clamp;
+ /**
+ * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
+ * sampler index is not statically determinable to be less than 16.
+ */
+ bool lower_txd_clamp_if_sampler_index_not_lt_16;
+
/**
* If true, apply a .bagr swizzle on tg4 results to handle Broadcom's
* mixed-up tg4 locations.
typedef enum {
nir_lower_int_source_mods = 1 << 0,
nir_lower_float_source_mods = 1 << 1,
- nir_lower_all_source_mods = (1 << 2) - 1
+ nir_lower_triop_abs = 1 << 2,
+ nir_lower_all_source_mods = (1 << 3) - 1
} nir_lower_to_source_mods_flags;
nir_lower_bit_size_callback callback,
void *callback_data);
-typedef enum {
- nir_lower_imul64 = (1 << 0),
- nir_lower_isign64 = (1 << 1),
- /** Lower all int64 modulus and division opcodes */
- nir_lower_divmod64 = (1 << 2),
- /** Lower all 64-bit umul_high and imul_high opcodes */
- nir_lower_imul_high64 = (1 << 3),
- nir_lower_mov64 = (1 << 4),
- nir_lower_icmp64 = (1 << 5),
- nir_lower_iadd64 = (1 << 6),
- nir_lower_iabs64 = (1 << 7),
- nir_lower_ineg64 = (1 << 8),
- nir_lower_logic64 = (1 << 9),
- nir_lower_minmax64 = (1 << 10),
- nir_lower_shift64 = (1 << 11),
-} nir_lower_int64_options;
-
+nir_lower_int64_options nir_lower_int64_op_to_options_mask(nir_op opcode);
bool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options);
-typedef enum {
- nir_lower_drcp = (1 << 0),
- nir_lower_dsqrt = (1 << 1),
- nir_lower_drsq = (1 << 2),
- nir_lower_dtrunc = (1 << 3),
- nir_lower_dfloor = (1 << 4),
- nir_lower_dceil = (1 << 5),
- nir_lower_dfract = (1 << 6),
- nir_lower_dround_even = (1 << 7),
- nir_lower_dmod = (1 << 8),
- nir_lower_fp64_full_software = (1 << 9),
-} nir_lower_doubles_options;
-
+nir_lower_doubles_options nir_lower_doubles_op_to_options_mask(nir_op opcode);
bool nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
bool nir_lower_pack(nir_shader *shader);