glsl: Add a lowering pass for 64-bit integer division
[mesa.git] / src / compiler / glsl / ir_optimization.h
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24
25 /**
26 * \file ir_optimization.h
27 *
28 * Prototypes for optimization passes to be called by the compiler and drivers.
29 */
30
31 /* Operations for lower_instructions() */
32 #define SUB_TO_ADD_NEG 0x01
33 #define DIV_TO_MUL_RCP 0x02
34 #define EXP_TO_EXP2 0x04
35 #define POW_TO_EXP2 0x08
36 #define LOG_TO_LOG2 0x10
37 #define MOD_TO_FLOOR 0x20
38 #define INT_DIV_TO_MUL_RCP 0x40
39 #define LDEXP_TO_ARITH 0x80
40 #define CARRY_TO_ARITH 0x100
41 #define BORROW_TO_ARITH 0x200
42 #define SAT_TO_CLAMP 0x400
43 #define DOPS_TO_DFRAC 0x800
44 #define DFREXP_DLDEXP_TO_ARITH 0x1000
45 #define BIT_COUNT_TO_MATH 0x02000
46 #define EXTRACT_TO_SHIFTS 0x04000
47 #define INSERT_TO_SHIFTS 0x08000
48 #define REVERSE_TO_SHIFTS 0x10000
49 #define FIND_LSB_TO_FLOAT_CAST 0x20000
50 #define FIND_MSB_TO_FLOAT_CAST 0x40000
51 #define IMUL_HIGH_TO_MUL 0x80000
52
53 /* Opertaions for lower_64bit_integer_instructions() */
54 #define MUL64 (1U << 0)
55 #define SIGN64 (1U << 1)
56 #define DIV64 (1U << 2)
57
58 /**
59 * \see class lower_packing_builtins_visitor
60 */
61 enum lower_packing_builtins_op {
62 LOWER_PACK_UNPACK_NONE = 0x0000,
63
64 LOWER_PACK_SNORM_2x16 = 0x0001,
65 LOWER_UNPACK_SNORM_2x16 = 0x0002,
66
67 LOWER_PACK_UNORM_2x16 = 0x0004,
68 LOWER_UNPACK_UNORM_2x16 = 0x0008,
69
70 LOWER_PACK_HALF_2x16 = 0x0010,
71 LOWER_UNPACK_HALF_2x16 = 0x0020,
72
73 LOWER_PACK_SNORM_4x8 = 0x0040,
74 LOWER_UNPACK_SNORM_4x8 = 0x0080,
75
76 LOWER_PACK_UNORM_4x8 = 0x0100,
77 LOWER_UNPACK_UNORM_4x8 = 0x0200,
78
79 LOWER_PACK_USE_BFI = 0x0400,
80 LOWER_PACK_USE_BFE = 0x0800,
81 };
82
83 bool do_common_optimization(exec_list *ir, bool linked,
84 bool uniform_locations_assigned,
85 const struct gl_shader_compiler_options *options,
86 bool native_integers);
87
88 bool ir_constant_fold(ir_rvalue **rvalue);
89
90 bool do_rebalance_tree(exec_list *instructions);
91 bool do_algebraic(exec_list *instructions, bool native_integers,
92 const struct gl_shader_compiler_options *options);
93 bool opt_conditional_discard(exec_list *instructions);
94 bool do_constant_folding(exec_list *instructions);
95 bool do_constant_variable(exec_list *instructions);
96 bool do_constant_variable_unlinked(exec_list *instructions);
97 bool do_copy_propagation(exec_list *instructions);
98 bool do_copy_propagation_elements(exec_list *instructions);
99 bool do_constant_propagation(exec_list *instructions);
100 void do_dead_builtin_varyings(struct gl_context *ctx,
101 gl_linked_shader *producer,
102 gl_linked_shader *consumer,
103 unsigned num_tfeedback_decls,
104 class tfeedback_decl *tfeedback_decls);
105 bool do_dead_code(exec_list *instructions, bool uniform_locations_assigned);
106 bool do_dead_code_local(exec_list *instructions);
107 bool do_dead_code_unlinked(exec_list *instructions);
108 bool do_dead_functions(exec_list *instructions);
109 bool opt_flip_matrices(exec_list *instructions);
110 bool do_function_inlining(exec_list *instructions);
111 bool do_lower_jumps(exec_list *instructions, bool pull_out_jumps = true, bool lower_sub_return = true, bool lower_main_return = false, bool lower_continue = false, bool lower_break = false);
112 bool do_lower_texture_projection(exec_list *instructions);
113 bool do_if_simplification(exec_list *instructions);
114 bool opt_flatten_nested_if_blocks(exec_list *instructions);
115 bool do_discard_simplification(exec_list *instructions);
116 bool lower_if_to_cond_assign(gl_shader_stage stage, exec_list *instructions,
117 unsigned max_depth = 0, unsigned min_branch_cost = 0);
118 bool do_mat_op_to_vec(exec_list *instructions);
119 bool do_minmax_prune(exec_list *instructions);
120 bool do_noop_swizzle(exec_list *instructions);
121 bool do_structure_splitting(exec_list *instructions);
122 bool do_swizzle_swizzle(exec_list *instructions);
123 bool do_vectorize(exec_list *instructions);
124 bool do_tree_grafting(exec_list *instructions);
125 bool do_vec_index_to_cond_assign(exec_list *instructions);
126 bool do_vec_index_to_swizzle(exec_list *instructions);
127 bool lower_discard(exec_list *instructions);
128 void lower_discard_flow(exec_list *instructions);
129 bool lower_instructions(exec_list *instructions, unsigned what_to_lower);
130 bool lower_noise(exec_list *instructions);
131 bool lower_variable_index_to_cond_assign(gl_shader_stage stage,
132 exec_list *instructions, bool lower_input, bool lower_output,
133 bool lower_temp, bool lower_uniform);
134 bool lower_quadop_vector(exec_list *instructions, bool dont_lower_swz);
135 bool lower_const_arrays_to_uniforms(exec_list *instructions, unsigned stage);
136 bool lower_clip_cull_distance(struct gl_shader_program *prog,
137 gl_linked_shader *shader);
138 void lower_output_reads(unsigned stage, exec_list *instructions);
139 bool lower_packing_builtins(exec_list *instructions, int op_mask);
140 void lower_shared_reference(struct gl_linked_shader *shader,
141 unsigned *shared_size);
142 void lower_ubo_reference(struct gl_linked_shader *shader,
143 bool clamp_block_indices);
144 void lower_packed_varyings(void *mem_ctx,
145 unsigned locations_used,
146 const uint8_t *components,
147 ir_variable_mode mode,
148 unsigned gs_input_vertices,
149 gl_linked_shader *shader,
150 bool disable_varying_packing, bool xfb_enabled);
151 bool lower_vector_insert(exec_list *instructions, bool lower_nonconstant_index);
152 bool lower_vector_derefs(gl_linked_shader *shader);
153 void lower_named_interface_blocks(void *mem_ctx, gl_linked_shader *shader);
154 bool optimize_redundant_jumps(exec_list *instructions);
155 bool optimize_split_arrays(exec_list *instructions, bool linked);
156 bool lower_offset_arrays(exec_list *instructions);
157 void optimize_dead_builtin_variables(exec_list *instructions,
158 enum ir_variable_mode other);
159 bool lower_tess_level(gl_linked_shader *shader);
160
161 bool lower_vertex_id(gl_linked_shader *shader);
162 bool lower_blend_equation_advanced(gl_linked_shader *shader);
163
164 bool lower_subroutine(exec_list *instructions, struct _mesa_glsl_parse_state *state);
165 void propagate_invariance(exec_list *instructions);
166
167 ir_rvalue *
168 compare_index_block(exec_list *instructions, ir_variable *index,
169 unsigned base, unsigned components, void *mem_ctx);
170
171 bool lower_64bit_integer_instructions(exec_list *instructions,
172 unsigned what_to_lower);