nir: Embed the shader_info in the nir_shader again
[mesa.git] / src / mesa / drivers / dri / i965 / brw_link.cpp
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_context.h"
25 #include "compiler/brw_nir.h"
26 #include "brw_program.h"
27 #include "compiler/glsl/ir.h"
28 #include "compiler/glsl/ir_optimization.h"
29 #include "compiler/glsl/program.h"
30 #include "program/program.h"
31 #include "main/shaderapi.h"
32 #include "main/shaderobj.h"
33 #include "main/uniforms.h"
34
35 /**
36 * Performs a compile of the shader stages even when we don't know
37 * what non-orthogonal state will be set, in the hope that it reflects
38 * the eventual NOS used, and thus allows us to produce link failures.
39 */
40 static bool
41 brw_shader_precompile(struct gl_context *ctx,
42 struct gl_shader_program *sh_prog)
43 {
44 struct gl_linked_shader *vs = sh_prog->_LinkedShaders[MESA_SHADER_VERTEX];
45 struct gl_linked_shader *tcs = sh_prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
46 struct gl_linked_shader *tes = sh_prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
47 struct gl_linked_shader *gs = sh_prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
48 struct gl_linked_shader *fs = sh_prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
49 struct gl_linked_shader *cs = sh_prog->_LinkedShaders[MESA_SHADER_COMPUTE];
50
51 if (fs && !brw_fs_precompile(ctx, fs->Program))
52 return false;
53
54 if (gs && !brw_gs_precompile(ctx, gs->Program))
55 return false;
56
57 if (tes && !brw_tes_precompile(ctx, sh_prog, tes->Program))
58 return false;
59
60 if (tcs && !brw_tcs_precompile(ctx, sh_prog, tcs->Program))
61 return false;
62
63 if (vs && !brw_vs_precompile(ctx, vs->Program))
64 return false;
65
66 if (cs && !brw_cs_precompile(ctx, cs->Program))
67 return false;
68
69 return true;
70 }
71
72 static void
73 brw_lower_packing_builtins(struct brw_context *brw,
74 exec_list *ir)
75 {
76 /* Gens < 7 don't have instructions to convert to or from half-precision,
77 * and Gens < 6 don't expose that functionality.
78 */
79 if (brw->gen != 6)
80 return;
81
82 lower_packing_builtins(ir, LOWER_PACK_HALF_2x16 | LOWER_UNPACK_HALF_2x16);
83 }
84
85 static void
86 process_glsl_ir(struct brw_context *brw,
87 struct gl_shader_program *shader_prog,
88 struct gl_linked_shader *shader)
89 {
90 struct gl_context *ctx = &brw->ctx;
91
92 /* Temporary memory context for any new IR. */
93 void *mem_ctx = ralloc_context(NULL);
94
95 ralloc_adopt(mem_ctx, shader->ir);
96
97 lower_blend_equation_advanced(shader);
98
99 /* lower_packing_builtins() inserts arithmetic instructions, so it
100 * must precede lower_instructions().
101 */
102 brw_lower_packing_builtins(brw, shader->ir);
103 do_mat_op_to_vec(shader->ir);
104
105 unsigned instructions_to_lower = (DIV_TO_MUL_RCP |
106 SUB_TO_ADD_NEG |
107 EXP_TO_EXP2 |
108 LOG_TO_LOG2 |
109 DFREXP_DLDEXP_TO_ARITH);
110 if (brw->gen < 7) {
111 instructions_to_lower |= BIT_COUNT_TO_MATH |
112 EXTRACT_TO_SHIFTS |
113 INSERT_TO_SHIFTS |
114 REVERSE_TO_SHIFTS;
115 }
116
117 lower_instructions(shader->ir, instructions_to_lower);
118
119 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
120 * if-statements need to be flattened.
121 */
122 if (brw->gen < 6)
123 lower_if_to_cond_assign(shader->Stage, shader->ir, 16);
124
125 do_lower_texture_projection(shader->ir);
126 do_vec_index_to_cond_assign(shader->ir);
127 lower_vector_insert(shader->ir, true);
128 lower_offset_arrays(shader->ir);
129 lower_noise(shader->ir);
130 lower_quadop_vector(shader->ir, false);
131
132 validate_ir_tree(shader->ir);
133
134 /* Now that we've finished altering the linked IR, reparent any live IR back
135 * to the permanent memory context, and free the temporary one (discarding any
136 * junk we optimized away).
137 */
138 reparent_ir(shader->ir, shader->ir);
139 ralloc_free(mem_ctx);
140
141 if (ctx->_Shader->Flags & GLSL_DUMP) {
142 fprintf(stderr, "\n");
143 if (shader->ir) {
144 fprintf(stderr, "GLSL IR for linked %s program %d:\n",
145 _mesa_shader_stage_to_string(shader->Stage),
146 shader_prog->Name);
147 _mesa_print_ir(stderr, shader->ir, NULL);
148 } else {
149 fprintf(stderr, "No GLSL IR for linked %s program %d (shader may be "
150 "from cache)\n", _mesa_shader_stage_to_string(shader->Stage),
151 shader_prog->Name);
152 }
153 fprintf(stderr, "\n");
154 }
155 }
156
157 static void
158 unify_interfaces(struct shader_info **infos)
159 {
160 struct shader_info *prev_info = NULL;
161
162 for (unsigned i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
163 if (!infos[i])
164 continue;
165
166 if (prev_info) {
167 prev_info->outputs_written |= infos[i]->inputs_read &
168 ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
169 infos[i]->inputs_read |= prev_info->outputs_written &
170 ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
171
172 prev_info->patch_outputs_written |= infos[i]->patch_inputs_read;
173 infos[i]->patch_inputs_read |= prev_info->patch_outputs_written;
174 }
175 prev_info = infos[i];
176 }
177 }
178
179 extern "C" GLboolean
180 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
181 {
182 struct brw_context *brw = brw_context(ctx);
183 const struct brw_compiler *compiler = brw->screen->compiler;
184 unsigned int stage;
185 struct shader_info *infos[MESA_SHADER_STAGES] = { 0, };
186
187 for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
188 struct gl_linked_shader *shader = shProg->_LinkedShaders[stage];
189 if (!shader)
190 continue;
191
192 struct gl_program *prog = shader->Program;
193 prog->Parameters = _mesa_new_parameter_list();
194
195 process_glsl_ir(brw, shProg, shader);
196
197 _mesa_copy_linked_program_data(shProg, shader);
198
199 prog->ShadowSamplers = shader->shadow_samplers;
200 _mesa_update_shader_textures_used(shProg, prog);
201
202 bool debug_enabled =
203 (INTEL_DEBUG & intel_debug_flag_for_shader_stage(shader->Stage));
204
205 if (debug_enabled && shader->ir) {
206 fprintf(stderr, "GLSL IR for native %s shader %d:\n",
207 _mesa_shader_stage_to_string(shader->Stage), shProg->Name);
208 _mesa_print_ir(stderr, shader->ir, NULL);
209 fprintf(stderr, "\n\n");
210 }
211
212 prog->nir = brw_create_nir(brw, shProg, prog, (gl_shader_stage) stage,
213 compiler->scalar_stage[stage]);
214 infos[stage] = &prog->nir->info;
215
216 /* Make a pass over the IR to add state references for any built-in
217 * uniforms that are used. This has to be done now (during linking).
218 * Code generation doesn't happen until the first time this shader is
219 * used for rendering. Waiting until then to generate the parameters is
220 * too late. At that point, the values for the built-in uniforms won't
221 * get sent to the shader.
222 */
223 nir_foreach_variable(var, &prog->nir->uniforms) {
224 if (strncmp(var->name, "gl_", 3) == 0) {
225 const nir_state_slot *const slots = var->state_slots;
226 assert(var->state_slots != NULL);
227
228 for (unsigned int i = 0; i < var->num_state_slots; i++) {
229 _mesa_add_state_reference(prog->Parameters,
230 (gl_state_index *)slots[i].tokens);
231 }
232 }
233 }
234 }
235
236 /* The linker tries to dead code eliminate unused varying components,
237 * and make sure interfaces match. But it isn't able to do so in all
238 * cases. So, explicitly make the interfaces match by OR'ing together
239 * the inputs_read/outputs_written bitfields of adjacent stages.
240 */
241 if (!shProg->SeparateShader)
242 unify_interfaces(infos);
243
244 if ((ctx->_Shader->Flags & GLSL_DUMP) && shProg->Name != 0) {
245 for (unsigned i = 0; i < shProg->NumShaders; i++) {
246 const struct gl_shader *sh = shProg->Shaders[i];
247 if (!sh)
248 continue;
249
250 fprintf(stderr, "GLSL %s shader %d source for linked program %d:\n",
251 _mesa_shader_stage_to_string(sh->Stage),
252 i, shProg->Name);
253 fprintf(stderr, "%s", sh->Source);
254 fprintf(stderr, "\n");
255 }
256 }
257
258 if (brw->precompile && !brw_shader_precompile(ctx, shProg))
259 return false;
260
261 build_program_resource_list(ctx, shProg);
262
263 for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
264 struct gl_linked_shader *shader = shProg->_LinkedShaders[stage];
265 if (!shader)
266 continue;
267
268 /* The GLSL IR won't be needed anymore. */
269 ralloc_free(shader->ir);
270 shader->ir = NULL;
271 }
272
273 return true;
274 }