Revert "i965: use nir_lower_indirect_derefs() for GLSL"
[mesa.git] / src / mesa / drivers / dri / i965 / brw_link.cpp
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_context.h"
25 #include "brw_shader.h"
26 #include "brw_fs.h"
27 #include "brw_nir.h"
28 #include "brw_program.h"
29 #include "compiler/glsl/ir.h"
30 #include "compiler/glsl/ir_optimization.h"
31 #include "compiler/glsl/program.h"
32 #include "program/program.h"
33 #include "main/shaderapi.h"
34 #include "main/shaderobj.h"
35 #include "main/uniforms.h"
36
37 /**
38 * Performs a compile of the shader stages even when we don't know
39 * what non-orthogonal state will be set, in the hope that it reflects
40 * the eventual NOS used, and thus allows us to produce link failures.
41 */
42 static bool
43 brw_shader_precompile(struct gl_context *ctx,
44 struct gl_shader_program *sh_prog)
45 {
46 struct gl_linked_shader *vs = sh_prog->_LinkedShaders[MESA_SHADER_VERTEX];
47 struct gl_linked_shader *tcs = sh_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
48 struct gl_linked_shader *tes = sh_prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
49 struct gl_linked_shader *gs = sh_prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
50 struct gl_linked_shader *fs = sh_prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
51 struct gl_linked_shader *cs = sh_prog->_LinkedShaders[MESA_SHADER_COMPUTE];
52
53 if (fs && !brw_fs_precompile(ctx, sh_prog, fs->Program))
54 return false;
55
56 if (gs && !brw_gs_precompile(ctx, sh_prog, gs->Program))
57 return false;
58
59 if (tes && !brw_tes_precompile(ctx, sh_prog, tes->Program))
60 return false;
61
62 if (tcs && !brw_tcs_precompile(ctx, sh_prog, tcs->Program))
63 return false;
64
65 if (vs && !brw_vs_precompile(ctx, sh_prog, vs->Program))
66 return false;
67
68 if (cs && !brw_cs_precompile(ctx, sh_prog, cs->Program))
69 return false;
70
71 return true;
72 }
73
74 static void
75 brw_lower_packing_builtins(struct brw_context *brw,
76 exec_list *ir)
77 {
78 /* Gens < 7 don't have instructions to convert to or from half-precision,
79 * and Gens < 6 don't expose that functionality.
80 */
81 if (brw->gen != 6)
82 return;
83
84 lower_packing_builtins(ir, LOWER_PACK_HALF_2x16 | LOWER_UNPACK_HALF_2x16);
85 }
86
87 static void
88 process_glsl_ir(struct brw_context *brw,
89 struct gl_shader_program *shader_prog,
90 struct gl_linked_shader *shader)
91 {
92 struct gl_context *ctx = &brw->ctx;
93 const struct brw_compiler *compiler = brw->screen->compiler;
94 const struct gl_shader_compiler_options *options =
95 &ctx->Const.ShaderCompilerOptions[shader->Stage];
96
97 /* Temporary memory context for any new IR. */
98 void *mem_ctx = ralloc_context(NULL);
99
100 ralloc_adopt(mem_ctx, shader->ir);
101
102 lower_blend_equation_advanced(shader);
103
104 /* lower_packing_builtins() inserts arithmetic instructions, so it
105 * must precede lower_instructions().
106 */
107 brw_lower_packing_builtins(brw, shader->ir);
108 do_mat_op_to_vec(shader->ir);
109
110 unsigned instructions_to_lower = (DIV_TO_MUL_RCP |
111 SUB_TO_ADD_NEG |
112 EXP_TO_EXP2 |
113 LOG_TO_LOG2 |
114 DFREXP_DLDEXP_TO_ARITH);
115 if (brw->gen < 7) {
116 instructions_to_lower |= BIT_COUNT_TO_MATH |
117 EXTRACT_TO_SHIFTS |
118 INSERT_TO_SHIFTS |
119 REVERSE_TO_SHIFTS;
120 }
121
122 lower_instructions(shader->ir, instructions_to_lower);
123
124 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
125 * if-statements need to be flattened.
126 */
127 if (brw->gen < 6)
128 lower_if_to_cond_assign(shader->Stage, shader->ir, 16);
129
130 do_lower_texture_projection(shader->ir);
131 brw_lower_texture_gradients(brw, shader->ir);
132 do_vec_index_to_cond_assign(shader->ir);
133 lower_vector_insert(shader->ir, true);
134 lower_offset_arrays(shader->ir);
135 lower_noise(shader->ir);
136 lower_quadop_vector(shader->ir, false);
137
138 do_copy_propagation(shader->ir);
139
140 bool lowered_variable_indexing =
141 lower_variable_index_to_cond_assign(shader->Stage, shader->ir,
142 options->EmitNoIndirectInput,
143 options->EmitNoIndirectOutput,
144 options->EmitNoIndirectTemp,
145 options->EmitNoIndirectUniform);
146
147 if (unlikely(brw->perf_debug && lowered_variable_indexing)) {
148 perf_debug("Unsupported form of variable indexing in %s; falling "
149 "back to very inefficient code generation\n",
150 _mesa_shader_stage_to_abbrev(shader->Stage));
151 }
152
153 bool progress;
154 do {
155 progress = false;
156
157 if (compiler->scalar_stage[shader->Stage]) {
158 if (shader->Stage == MESA_SHADER_VERTEX ||
159 shader->Stage == MESA_SHADER_FRAGMENT)
160 brw_do_channel_expressions(shader->ir);
161 brw_do_vector_splitting(shader->ir);
162 }
163
164 progress = do_lower_jumps(shader->ir, true, true,
165 true, /* main return */
166 false, /* continue */
167 false /* loops */
168 ) || progress;
169
170 progress = do_common_optimization(shader->ir, true, true,
171 options, ctx->Const.NativeIntegers) || progress;
172 } while (progress);
173
174 validate_ir_tree(shader->ir);
175
176 /* Now that we've finished altering the linked IR, reparent any live IR back
177 * to the permanent memory context, and free the temporary one (discarding any
178 * junk we optimized away).
179 */
180 reparent_ir(shader->ir, shader->ir);
181 ralloc_free(mem_ctx);
182
183 if (ctx->_Shader->Flags & GLSL_DUMP) {
184 fprintf(stderr, "\n");
185 if (shader->ir) {
186 fprintf(stderr, "GLSL IR for linked %s program %d:\n",
187 _mesa_shader_stage_to_string(shader->Stage),
188 shader_prog->Name);
189 _mesa_print_ir(stderr, shader->ir, NULL);
190 } else {
191 fprintf(stderr, "No GLSL IR for linked %s program %d (shader may be "
192 "from cache)\n", _mesa_shader_stage_to_string(shader->Stage),
193 shader_prog->Name);
194 }
195 fprintf(stderr, "\n");
196 }
197 }
198
199 extern "C" struct gl_linked_shader *
200 brw_new_shader(gl_shader_stage stage)
201 {
202 struct brw_shader *shader;
203
204 shader = rzalloc(NULL, struct brw_shader);
205 if (shader) {
206 shader->base.Stage = stage;
207 }
208
209 return &shader->base;
210 }
211
212 extern "C" GLboolean
213 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
214 {
215 struct brw_context *brw = brw_context(ctx);
216 const struct brw_compiler *compiler = brw->screen->compiler;
217 unsigned int stage;
218
219 for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
220 struct gl_linked_shader *shader = shProg->_LinkedShaders[stage];
221 if (!shader)
222 continue;
223
224 struct gl_program *prog = shader->Program;
225 prog->Parameters = _mesa_new_parameter_list();
226
227 process_glsl_ir(brw, shProg, shader);
228
229 _mesa_copy_linked_program_data(shProg, shader);
230
231 /* Make a pass over the IR to add state references for any built-in
232 * uniforms that are used. This has to be done now (during linking).
233 * Code generation doesn't happen until the first time this shader is
234 * used for rendering. Waiting until then to generate the parameters is
235 * too late. At that point, the values for the built-in uniforms won't
236 * get sent to the shader.
237 */
238 foreach_in_list(ir_instruction, node, shader->ir) {
239 ir_variable *var = node->as_variable();
240
241 if ((var == NULL) || (var->data.mode != ir_var_uniform)
242 || (strncmp(var->name, "gl_", 3) != 0))
243 continue;
244
245 const ir_state_slot *const slots = var->get_state_slots();
246 assert(slots != NULL);
247
248 for (unsigned int i = 0; i < var->get_num_state_slots(); i++) {
249 _mesa_add_state_reference(prog->Parameters,
250 (gl_state_index *) slots[i].tokens);
251 }
252 }
253
254 prog->SamplersUsed = shader->active_samplers;
255 prog->ShadowSamplers = shader->shadow_samplers;
256 _mesa_update_shader_textures_used(shProg, prog);
257
258 brw_add_texrect_params(prog);
259
260 bool debug_enabled =
261 (INTEL_DEBUG & intel_debug_flag_for_shader_stage(shader->Stage));
262
263 if (debug_enabled && shader->ir) {
264 fprintf(stderr, "GLSL IR for native %s shader %d:\n",
265 _mesa_shader_stage_to_string(shader->Stage), shProg->Name);
266 _mesa_print_ir(stderr, shader->ir, NULL);
267 fprintf(stderr, "\n\n");
268 }
269
270 prog->nir = brw_create_nir(brw, shProg, prog, (gl_shader_stage) stage,
271 compiler->scalar_stage[stage]);
272 }
273
274 if ((ctx->_Shader->Flags & GLSL_DUMP) && shProg->Name != 0) {
275 for (unsigned i = 0; i < shProg->NumShaders; i++) {
276 const struct gl_shader *sh = shProg->Shaders[i];
277 if (!sh)
278 continue;
279
280 fprintf(stderr, "GLSL %s shader %d source for linked program %d:\n",
281 _mesa_shader_stage_to_string(sh->Stage),
282 i, shProg->Name);
283 fprintf(stderr, "%s", sh->Source);
284 fprintf(stderr, "\n");
285 }
286 }
287
288 if (brw->precompile && !brw_shader_precompile(ctx, shProg))
289 return false;
290
291 build_program_resource_list(ctx, shProg);
292
293 for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
294 struct gl_linked_shader *shader = shProg->_LinkedShaders[stage];
295 if (!shader)
296 continue;
297
298 /* The GLSL IR won't be needed anymore. */
299 ralloc_free(shader->ir);
300 shader->ir = NULL;
301 }
302
303 return true;
304 }