X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;ds=sidebyside;f=src%2Fcompiler%2Fnir%2Fnir.h;h=497327eaca864c5274bcc449eb787d434a34f6ba;hb=f217a94542154cff8254e0ca5e5ca986c2394be3;hp=84d197028e52520e8471069dc671dc16b91a53fa;hpb=fa6417495c7096d337965345a0f2ad8a18115b19;p=mesa.git diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index 84d197028e5..497327eaca8 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -34,6 +34,7 @@ #include "util/list.h" #include "util/ralloc.h" #include "util/set.h" +#include "util/bitscan.h" #include "util/bitset.h" #include "util/macros.h" #include "compiler/nir_types.h" @@ -117,6 +118,7 @@ typedef enum { } nir_rounding_mode; typedef union { + bool b[NIR_MAX_VEC_COMPONENTS]; float f32[NIR_MAX_VEC_COMPONENTS]; double f64[NIR_MAX_VEC_COMPONENTS]; int8_t i8[NIR_MAX_VEC_COMPONENTS]; @@ -377,11 +379,7 @@ typedef struct nir_variable { * ARB_shader_image_load_store qualifiers. */ struct { - bool read_only; /**< "readonly" qualifier. */ - bool write_only; /**< "writeonly" qualifier. */ - bool coherent; - bool _volatile; - bool restrict_flag; + enum gl_access_qualifier access; /** Image internal format if specified explicitly, otherwise GL_NONE. */ GLenum format; @@ -489,7 +487,7 @@ typedef struct nir_register { #define nir_foreach_register_safe(reg, reg_list) \ foreach_list_typed_safe(nir_register, reg, node, reg_list) -typedef enum { +typedef enum PACKED { nir_instr_type_alu, nir_instr_type_deref, nir_instr_type_call, @@ -504,16 +502,16 @@ typedef enum { typedef struct nir_instr { struct exec_node node; - nir_instr_type type; struct nir_block *block; - - /** generic instruction index. */ - unsigned index; + nir_instr_type type; /* A temporary for optimization and analysis passes to use for storing * flags. For instance, DCE uses this to store the "dead/live" info. */ uint8_t pass_flags; + + /** generic instruction index. */ + unsigned index; } nir_instr; static inline nir_instr * @@ -704,6 +702,22 @@ nir_src_num_components(nir_src src) return src.is_ssa ? src.ssa->num_components : src.reg.reg->num_components; } +static inline bool +nir_src_is_const(nir_src src) +{ + return src.is_ssa && + src.ssa->parent_instr->type == nir_instr_type_load_const; +} + +int64_t nir_src_as_int(nir_src src); +uint64_t nir_src_as_uint(nir_src src); +bool nir_src_as_bool(nir_src src); +double nir_src_as_float(nir_src src); +int64_t nir_src_comp_as_int(nir_src src, unsigned component); +uint64_t nir_src_comp_as_uint(nir_src src, unsigned component); +bool nir_src_comp_as_bool(nir_src src, unsigned component); +double nir_src_comp_as_float(nir_src src, unsigned component); + static inline unsigned nir_dest_bit_size(nir_dest dest) { @@ -766,17 +780,25 @@ typedef struct { unsigned write_mask : NIR_MAX_VEC_COMPONENTS; /* ignored if dest.is_ssa is true */ } nir_alu_dest; +/** NIR sized and unsized types + * + * The values in this enum are carefully chosen so that the sized type is + * just the unsized type OR the number of bits. + */ typedef enum { nir_type_invalid = 0, /* Not a valid type */ - nir_type_float, - nir_type_int, - nir_type_uint, - nir_type_bool, + nir_type_int = 2, + nir_type_uint = 4, + nir_type_bool = 6, + nir_type_float = 128, + nir_type_bool1 = 1 | nir_type_bool, nir_type_bool32 = 32 | nir_type_bool, + nir_type_int1 = 1 | nir_type_int, nir_type_int8 = 8 | nir_type_int, nir_type_int16 = 16 | nir_type_int, nir_type_int32 = 32 | nir_type_int, nir_type_int64 = 64 | nir_type_int, + nir_type_uint1 = 1 | nir_type_uint, nir_type_uint8 = 8 | nir_type_uint, nir_type_uint16 = 16 | nir_type_uint, nir_type_uint32 = 32 | nir_type_uint, @@ -786,8 +808,8 @@ typedef enum { nir_type_float64 = 64 | nir_type_float, } nir_alu_type; -#define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8 -#define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007 +#define NIR_ALU_TYPE_SIZE_MASK 0x79 +#define NIR_ALU_TYPE_BASE_TYPE_MASK 0x86 static inline unsigned nir_alu_type_get_type_size(nir_alu_type type) @@ -806,7 +828,7 @@ nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type) { switch (base_type) { case GLSL_TYPE_BOOL: - return nir_type_bool32; + return nir_type_bool1; break; case GLSL_TYPE_UINT: return nir_type_uint32; @@ -940,6 +962,19 @@ nir_alu_instr_channel_used(const nir_alu_instr *instr, unsigned src, return (instr->dest.write_mask >> channel) & 1; } +static inline nir_component_mask_t +nir_alu_instr_src_read_mask(const nir_alu_instr *instr, unsigned src) +{ + nir_component_mask_t read_mask = 0; + for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; c++) { + if (!nir_alu_instr_channel_used(instr, src, c)) + continue; + + read_mask |= (1 << instr->src[src].swizzle[c]); + } + return read_mask; +} + /* * For instructions whose destinations are SSA, get the number of channels * used for a source @@ -1053,7 +1088,7 @@ typedef struct { #include "nir_intrinsics.h" -#define NIR_INTRINSIC_MAX_CONST_INDEX 3 +#define NIR_INTRINSIC_MAX_CONST_INDEX 4 /** Represents an intrinsic * @@ -1201,6 +1236,40 @@ typedef enum { */ NIR_INTRINSIC_PARAM_IDX = 12, + /** + * Image dimensionality for image intrinsics + * + * One of GLSL_SAMPLER_DIM_* + */ + NIR_INTRINSIC_IMAGE_DIM = 13, + + /** + * Non-zero if we are accessing an array image + */ + NIR_INTRINSIC_IMAGE_ARRAY = 14, + + /** + * Image format for image intrinsics + */ + NIR_INTRINSIC_FORMAT = 15, + + /** + * Access qualifiers for image intrinsics + */ + NIR_INTRINSIC_ACCESS = 16, + + /** + * Alignment for offsets and addresses + * + * These two parameters, specify an alignment in terms of a multiplier and + * an offset. The offset or address parameter X of the intrinsic is + * guaranteed to satisfy the following: + * + * (X - align_offset) % align_mul == 0 + */ + NIR_INTRINSIC_ALIGN_MUL = 17, + NIR_INTRINSIC_ALIGN_OFFSET = 18, + NIR_INTRINSIC_NUM_INDEX_FLAGS, } nir_intrinsic_index_flag; @@ -1269,7 +1338,7 @@ nir_intrinsic_##name(const nir_intrinsic_instr *instr) \ { \ const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \ assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \ - return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \ + return (type)instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \ } \ static inline void \ nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \ @@ -1291,6 +1360,38 @@ INTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned) INTRINSIC_IDX_ACCESSORS(reduction_op, REDUCTION_OP, unsigned) INTRINSIC_IDX_ACCESSORS(cluster_size, CLUSTER_SIZE, unsigned) INTRINSIC_IDX_ACCESSORS(param_idx, PARAM_IDX, unsigned) +INTRINSIC_IDX_ACCESSORS(image_dim, IMAGE_DIM, enum glsl_sampler_dim) +INTRINSIC_IDX_ACCESSORS(image_array, IMAGE_ARRAY, bool) +INTRINSIC_IDX_ACCESSORS(access, ACCESS, enum gl_access_qualifier) +INTRINSIC_IDX_ACCESSORS(format, FORMAT, unsigned) +INTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned) +INTRINSIC_IDX_ACCESSORS(align_offset, ALIGN_OFFSET, unsigned) + +static inline void +nir_intrinsic_set_align(nir_intrinsic_instr *intrin, + unsigned align_mul, unsigned align_offset) +{ + assert(util_is_power_of_two_nonzero(align_mul)); + assert(align_offset < align_mul); + nir_intrinsic_set_align_mul(intrin, align_mul); + nir_intrinsic_set_align_offset(intrin, align_offset); +} + +/** Returns a simple alignment for a load/store intrinsic offset + * + * Instead of the full mul+offset alignment scheme provided by the ALIGN_MUL + * and ALIGN_OFFSET parameters, this helper takes both into account and + * provides a single simple alignment parameter. The offset X is guaranteed + * to satisfy X % align == 0. + */ +static inline unsigned +nir_intrinsic_align(const nir_intrinsic_instr *intrin) +{ + const unsigned align_mul = nir_intrinsic_align_mul(intrin); + const unsigned align_offset = nir_intrinsic_align_offset(intrin); + assert(align_offset < align_mul); + return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul; +} /** * \group texture information @@ -1306,6 +1407,7 @@ typedef enum { nir_tex_src_offset, nir_tex_src_bias, nir_tex_src_lod, + nir_tex_src_min_lod, nir_tex_src_ms_index, /* MSAA sample index */ nir_tex_src_ms_mcs, /* MSAA compression value */ nir_tex_src_ddx, @@ -1476,8 +1578,8 @@ nir_alu_instr_is_comparison(const nir_alu_instr *instr) case nir_op_uge: case nir_op_ieq: case nir_op_ine: - case nir_op_i2b: - case nir_op_f2b: + case nir_op_i2b1: + case nir_op_f2b1: case nir_op_inot: case nir_op_fnot: return true; @@ -1753,6 +1855,13 @@ nir_block_last_instr(nir_block *block) return exec_node_data(nir_instr, tail, node); } +static inline bool +nir_block_ends_in_jump(nir_block *block) +{ + return !exec_list_is_empty(&block->instr_list) && + nir_block_last_instr(block)->type == nir_instr_type_jump; +} + #define nir_foreach_instr(instr, block) \ foreach_list_typed(nir_instr, instr, node, &(block)->instr_list) #define nir_foreach_instr_reverse(instr, block) \ @@ -1787,13 +1896,21 @@ typedef struct { /* Number of instructions in the loop */ unsigned num_instructions; - /* How many times the loop is run (if known) */ - unsigned trip_count; - bool is_trip_count_known; + /* Maximum number of times the loop is run (if known) */ + unsigned max_trip_count; + + /* Do we know the exact number of times the loop will be run */ + bool exact_trip_count_known; /* Unroll the loop regardless of its size */ bool force_unroll; + /* Does the loop contain complex loop terminators, continues or other + * complex behaviours? If this is true we can't rely on + * loop_terminator_list to be complete or accurate. + */ + bool complex_loop; + nir_loop_terminator *limiting_terminator; /* A list of loop_terminators terminating this loop. */ @@ -2010,18 +2127,21 @@ typedef struct nir_shader_compiler_options { /** enables rules to lower idiv by power-of-two: */ bool lower_idiv; - /* lower b2f to iand */ - bool lower_b2f; - /* Does the native fdot instruction replicate its result for four * components? If so, then opt_algebraic_late will turn all fdotN * instructions into fdot_replicatedN instructions. */ bool fdot_replicates; + /** lowers ffloor to fsub+ffract: */ + bool lower_ffloor; + /** lowers ffract to fsub+ffloor: */ bool lower_ffract; + /** lowers fceil to fneg+ffloor+fneg: */ + bool lower_fceil; + bool lower_ldexp; bool lower_pack_half_2x16; @@ -2070,6 +2190,7 @@ typedef struct nir_shader_compiler_options { bool lower_helper_invocation; bool lower_cs_local_index_from_id; + bool lower_cs_local_id_from_index; bool lower_device_index_to_zero; @@ -2084,12 +2205,6 @@ typedef struct nir_shader_compiler_options { */ bool use_interpolated_input_intrinsics; - /** - * Do vertex shader double inputs use two locations? The Vulkan spec - * requires two locations to be used, OpenGL allows a single location. - */ - bool vs_inputs_dual_locations; - unsigned max_unroll_iterations; } nir_shader_compiler_options; @@ -2324,6 +2439,36 @@ nir_after_block_before_jump(nir_block *block) } } +static inline nir_cursor +nir_before_src(nir_src *src, bool is_if_condition) +{ + if (is_if_condition) { + nir_block *prev_block = + nir_cf_node_as_block(nir_cf_node_prev(&src->parent_if->cf_node)); + assert(!nir_block_ends_in_jump(prev_block)); + return nir_after_block(prev_block); + } else if (src->parent_instr->type == nir_instr_type_phi) { +#ifndef NDEBUG + nir_phi_instr *cond_phi = nir_instr_as_phi(src->parent_instr); + bool found = false; + nir_foreach_phi_src(phi_src, cond_phi) { + if (phi_src->src.ssa == src->ssa) { + found = true; + break; + } + } + assert(found); +#endif + /* The LIST_ENTRY macro is a generic container-of macro, it just happens + * to have a more specific name. + */ + nir_phi_src *phi_src = LIST_ENTRY(nir_phi_src, src, src); + return nir_after_block_before_jump(phi_src->pred); + } else { + return nir_before_instr(src->parent_instr); + } +} + static inline nir_cursor nir_before_cf_node(nir_cf_node *node) { @@ -2461,6 +2606,29 @@ bool nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state); bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state); nir_const_value *nir_src_as_const_value(nir_src src); + +static inline struct nir_instr * +nir_src_instr(const struct nir_src *src) +{ + return src->is_ssa ? src->ssa->parent_instr : NULL; +} + +#define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \ +static inline c_type * \ +nir_src_as_ ## name (struct nir_src *src) \ +{ \ + return src->is_ssa && src->ssa->parent_instr->type == type_enum \ + ? cast_macro(src->ssa->parent_instr) : NULL; \ +} \ +static inline const c_type * \ +nir_src_as_ ## name ## _const(const struct nir_src *src) \ +{ \ + return src->is_ssa && src->ssa->parent_instr->type == type_enum \ + ? cast_macro(src->ssa->parent_instr) : NULL; \ +} + +NIR_SRC_AS_(alu_instr, nir_alu_instr, nir_instr_type_alu, nir_instr_as_alu) + bool nir_src_is_dynamically_uniform(nir_src src); bool nir_srcs_equal(nir_src src1, nir_src src2); void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src); @@ -2557,6 +2725,7 @@ void nir_index_blocks(nir_function_impl *impl); void nir_print_shader(nir_shader *shader, FILE *fp); void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors); void nir_print_instr(const nir_instr *instr, FILE *fp); +void nir_print_deref(const nir_deref_instr *deref, FILE *fp); nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s); nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi); @@ -2566,7 +2735,7 @@ nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader); nir_shader *nir_shader_serialize_deserialize(void *mem_ctx, nir_shader *s); #ifndef NDEBUG -void nir_validate_shader(nir_shader *shader); +void nir_validate_shader(nir_shader *shader, const char *when); void nir_metadata_set_validation_flag(nir_shader *shader); void nir_metadata_check_validation_flag(nir_shader *shader); @@ -2600,7 +2769,7 @@ should_print_nir(void) return should_print; } #else -static inline void nir_validate_shader(nir_shader *shader) { (void) shader; } +static inline void nir_validate_shader(nir_shader *shader, const char *when) { (void) shader; (void)when; } static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; } static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; } static inline bool should_clone_nir(void) { return false; } @@ -2608,9 +2777,9 @@ static inline bool should_serialize_deserialize_nir(void) { return false; } static inline bool should_print_nir(void) { return false; } #endif /* NDEBUG */ -#define _PASS(nir, do_pass) do { \ +#define _PASS(pass, nir, do_pass) do { \ do_pass \ - nir_validate_shader(nir); \ + nir_validate_shader(nir, "after " #pass); \ if (should_clone_nir()) { \ nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \ ralloc_free(nir); \ @@ -2622,7 +2791,7 @@ static inline bool should_print_nir(void) { return false; } } \ } while (0) -#define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \ +#define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, \ nir_metadata_set_validation_flag(nir); \ if (should_print_nir()) \ printf("%s\n", #pass); \ @@ -2634,7 +2803,7 @@ static inline bool should_print_nir(void) { return false; } } \ ) -#define NIR_PASS_V(nir, pass, ...) _PASS(nir, \ +#define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, \ if (should_print_nir()) \ printf("%s\n", #pass); \ pass(nir, ##__VA_ARGS__); \ @@ -2659,6 +2828,7 @@ void nir_dump_cfg(nir_shader *shader, FILE *fp); int nir_gs_count_vertices(const nir_shader *shader); +bool nir_shrink_vec_array_vars(nir_shader *shader, nir_variable_mode modes); bool nir_split_array_vars(nir_shader *shader, nir_variable_mode modes); bool nir_split_var_copies(nir_shader *shader); bool nir_split_per_member_structs(nir_shader *shader); @@ -2695,8 +2865,13 @@ void nir_assign_var_locations(struct exec_list *var_list, unsigned *size, /* Some helpers to do very simple linking */ bool nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer); +bool nir_remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list, + uint64_t *used_by_other_stage, + uint64_t *used_by_other_stage_patches); void nir_compact_varyings(nir_shader *producer, nir_shader *consumer, bool default_to_smooth_interp); +void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer); +bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer); typedef enum { /* If set, this forces all non-flat fragment shader inputs to be @@ -2731,6 +2906,7 @@ void nir_lower_alpha_test(nir_shader *shader, enum compare_func func, bool alpha_to_one); bool nir_lower_alu(nir_shader *shader); bool nir_lower_alu_to_scalar(nir_shader *shader); +bool nir_lower_bool_to_int32(nir_shader *shader); bool nir_lower_load_const_to_scalar(nir_shader *shader); bool nir_lower_read_invocation_to_scalar(nir_shader *shader); bool nir_lower_phis_to_scalar(nir_shader *shader); @@ -2757,6 +2933,16 @@ bool nir_lower_subgroups(nir_shader *shader, bool nir_lower_system_values(nir_shader *shader); +enum PACKED nir_lower_tex_packing { + nir_lower_tex_packing_none = 0, + /* The sampler returns up to 2 32-bit words of half floats or 16-bit signed + * or unsigned ints based on the sampler type + */ + nir_lower_tex_packing_16, + /* The sampler returns 1 32-bit word of 4x8 unorm */ + nir_lower_tex_packing_8, +}; + typedef struct nir_lower_tex_options { /** * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which @@ -2788,6 +2974,7 @@ typedef struct nir_lower_tex_options { unsigned lower_y_u_v_external; unsigned lower_yx_xuxv_external; unsigned lower_xy_uxvx_external; + unsigned lower_ayuv_external; /** * To emulate certain texture wrap modes, this can be used @@ -2832,6 +3019,11 @@ typedef struct nir_lower_tex_options { */ bool lower_txd_cube_map; + /** + * If true, lower nir_texop_txd on 3D surfaces with nir_texop_txl. + */ + bool lower_txd_3d; + /** * If true, lower nir_texop_txd on shadow samplers (except cube maps) * with nir_texop_txl. Notice that cube map shadow samplers are lowered @@ -2844,6 +3036,26 @@ typedef struct nir_lower_tex_options { * Implies lower_txd_cube_map and lower_txd_shadow. */ bool lower_txd; + + /** + * If true, lower nir_texop_txb that try to use shadow compare and min_lod + * at the same time to a nir_texop_lod, some math, and nir_texop_tex. + */ + bool lower_txb_shadow_clamp; + + /** + * If true, lower nir_texop_txd on shadow samplers when it uses min_lod + * with nir_texop_txl. This includes cube maps. + */ + bool lower_txd_shadow_clamp; + + /** + * If true, lower nir_texop_txd on when it uses both offset and min_lod + * with nir_texop_txl. This includes cube maps. + */ + bool lower_txd_offset_clamp; + + enum nir_lower_tex_packing lower_tex_packing[32]; } nir_lower_tex_options; bool nir_lower_tex(nir_shader *shader, @@ -2851,7 +3063,7 @@ bool nir_lower_tex(nir_shader *shader, bool nir_lower_idiv(nir_shader *shader); -bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables); +bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables, bool use_vars); bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables); bool nir_lower_clip_cull_distance_arrays(nir_shader *nir); @@ -2896,7 +3108,15 @@ typedef struct nir_lower_bitmap_options { void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options); bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset); -bool nir_lower_to_source_mods(nir_shader *shader); + +typedef enum { + nir_lower_int_source_mods = 1 << 0, + nir_lower_float_source_mods = 1 << 1, + nir_lower_all_source_mods = (1 << 2) - 1 +} nir_lower_to_source_mods_flags; + + +bool nir_lower_to_source_mods(nir_shader *shader, nir_lower_to_source_mods_flags options); bool nir_lower_gs_intrinsics(nir_shader *shader); @@ -2911,6 +3131,8 @@ typedef enum { nir_lower_isign64 = (1 << 1), /** Lower all int64 modulus and division opcodes */ nir_lower_divmod64 = (1 << 2), + /** Lower all 64-bit umul_high and imul_high opcodes */ + nir_lower_imul_high64 = (1 << 3), } nir_lower_int64_options; bool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options); @@ -2952,6 +3174,7 @@ bool nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only); bool nir_lower_phis_to_regs_block(nir_block *block); bool nir_lower_ssa_defs_to_regs_block(nir_block *block); +bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl); bool nir_opt_algebraic(nir_shader *shader); bool nir_opt_algebraic_before_ffma(nir_shader *shader); @@ -2970,8 +3193,14 @@ bool nir_opt_dce(nir_shader *shader); bool nir_opt_dead_cf(nir_shader *shader); +bool nir_opt_dead_write_vars(nir_shader *shader); + +bool nir_opt_find_array_copies(nir_shader *shader); + bool nir_opt_gcm(nir_shader *shader, bool value_number); +bool nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size); + bool nir_opt_if(nir_shader *shader); bool nir_opt_intrinsics(nir_shader *shader); @@ -2986,9 +3215,9 @@ bool nir_opt_move_comparisons(nir_shader *shader); bool nir_opt_move_load_ubo(nir_shader *shader); -bool nir_opt_peephole_select(nir_shader *shader, unsigned limit); +bool nir_opt_peephole_select(nir_shader *shader, unsigned limit, + bool indirect_load_ok, bool expensive_alu_ok); -bool nir_opt_remove_phis_impl(nir_function_impl *impl); bool nir_opt_remove_phis(nir_shader *shader); bool nir_opt_shrink_load(nir_shader *shader); @@ -3001,8 +3230,9 @@ bool nir_opt_conditional_discard(nir_shader *shader); void nir_sweep(nir_shader *shader); -void nir_remap_attributes(nir_shader *shader, - const nir_shader_compiler_options *options); +void nir_remap_dual_slot_attributes(nir_shader *shader, + uint64_t *dual_slot_inputs); +uint64_t nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot); nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val); gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);