i965: Add tessellation evaluation shaders
[mesa.git] / src / mesa / drivers / dri / i965 / brw_link.cpp
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_context.h"
25 #include "brw_shader.h"
26 #include "brw_fs.h"
27 #include "brw_nir.h"
28 #include "brw_program.h"
29 #include "glsl/ir_optimization.h"
30 #include "glsl/glsl_parser_extras.h"
31 #include "program/program.h"
32 #include "main/shaderapi.h"
33 #include "main/uniforms.h"
34
35 /**
36 * Performs a compile of the shader stages even when we don't know
37 * what non-orthogonal state will be set, in the hope that it reflects
38 * the eventual NOS used, and thus allows us to produce link failures.
39 */
40 static bool
41 brw_shader_precompile(struct gl_context *ctx,
42 struct gl_shader_program *sh_prog)
43 {
44 struct gl_shader *vs = sh_prog->_LinkedShaders[MESA_SHADER_VERTEX];
45 struct gl_shader *tes = sh_prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
46 struct gl_shader *gs = sh_prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
47 struct gl_shader *fs = sh_prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
48 struct gl_shader *cs = sh_prog->_LinkedShaders[MESA_SHADER_COMPUTE];
49
50 if (fs && !brw_fs_precompile(ctx, sh_prog, fs->Program))
51 return false;
52
53 if (gs && !brw_gs_precompile(ctx, sh_prog, gs->Program))
54 return false;
55
56 if (tes && !brw_tes_precompile(ctx, sh_prog, tes->Program))
57 return false;
58
59 if (vs && !brw_vs_precompile(ctx, sh_prog, vs->Program))
60 return false;
61
62 if (cs && !brw_cs_precompile(ctx, sh_prog, cs->Program))
63 return false;
64
65 return true;
66 }
67
68 static void
69 brw_lower_packing_builtins(struct brw_context *brw,
70 gl_shader_stage shader_type,
71 exec_list *ir)
72 {
73 const struct brw_compiler *compiler = brw->intelScreen->compiler;
74
75 int ops = LOWER_PACK_SNORM_2x16
76 | LOWER_UNPACK_SNORM_2x16
77 | LOWER_PACK_UNORM_2x16
78 | LOWER_UNPACK_UNORM_2x16;
79
80 if (compiler->scalar_stage[shader_type]) {
81 ops |= LOWER_UNPACK_UNORM_4x8
82 | LOWER_UNPACK_SNORM_4x8
83 | LOWER_PACK_UNORM_4x8
84 | LOWER_PACK_SNORM_4x8;
85 }
86
87 if (brw->gen >= 7) {
88 /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
89 * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
90 * lowering is needed. For SOA code, the Half2x16 ops must be
91 * scalarized.
92 */
93 if (compiler->scalar_stage[shader_type]) {
94 ops |= LOWER_PACK_HALF_2x16_TO_SPLIT
95 | LOWER_UNPACK_HALF_2x16_TO_SPLIT;
96 }
97 } else {
98 ops |= LOWER_PACK_HALF_2x16
99 | LOWER_UNPACK_HALF_2x16;
100 }
101
102 lower_packing_builtins(ir, ops);
103 }
104
105 static void
106 process_glsl_ir(gl_shader_stage stage,
107 struct brw_context *brw,
108 struct gl_shader_program *shader_prog,
109 struct gl_shader *shader)
110 {
111 struct gl_context *ctx = &brw->ctx;
112 const struct brw_compiler *compiler = brw->intelScreen->compiler;
113 const struct gl_shader_compiler_options *options =
114 &ctx->Const.ShaderCompilerOptions[shader->Stage];
115
116 /* Temporary memory context for any new IR. */
117 void *mem_ctx = ralloc_context(NULL);
118
119 ralloc_adopt(mem_ctx, shader->ir);
120
121 /* lower_packing_builtins() inserts arithmetic instructions, so it
122 * must precede lower_instructions().
123 */
124 brw_lower_packing_builtins(brw, shader->Stage, shader->ir);
125 do_mat_op_to_vec(shader->ir);
126 const int bitfield_insert = brw->gen >= 7 ? BITFIELD_INSERT_TO_BFM_BFI : 0;
127 lower_instructions(shader->ir,
128 MOD_TO_FLOOR |
129 DIV_TO_MUL_RCP |
130 SUB_TO_ADD_NEG |
131 EXP_TO_EXP2 |
132 LOG_TO_LOG2 |
133 bitfield_insert |
134 LDEXP_TO_ARITH |
135 CARRY_TO_ARITH |
136 BORROW_TO_ARITH);
137
138 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
139 * if-statements need to be flattened.
140 */
141 if (brw->gen < 6)
142 lower_if_to_cond_assign(shader->ir, 16);
143
144 do_lower_texture_projection(shader->ir);
145 brw_lower_texture_gradients(brw, shader->ir);
146 do_vec_index_to_cond_assign(shader->ir);
147 lower_vector_insert(shader->ir, true);
148 lower_offset_arrays(shader->ir);
149 brw_do_lower_unnormalized_offset(shader->ir);
150 lower_noise(shader->ir);
151 lower_quadop_vector(shader->ir, false);
152
153 bool lowered_variable_indexing =
154 lower_variable_index_to_cond_assign((gl_shader_stage)stage,
155 shader->ir,
156 options->EmitNoIndirectInput,
157 options->EmitNoIndirectOutput,
158 options->EmitNoIndirectTemp,
159 options->EmitNoIndirectUniform);
160
161 if (unlikely(brw->perf_debug && lowered_variable_indexing)) {
162 perf_debug("Unsupported form of variable indexing in %s; falling "
163 "back to very inefficient code generation\n",
164 _mesa_shader_stage_to_abbrev(shader->Stage));
165 }
166
167 bool progress;
168 do {
169 progress = false;
170
171 if (compiler->scalar_stage[shader->Stage]) {
172 brw_do_channel_expressions(shader->ir);
173 brw_do_vector_splitting(shader->ir);
174 }
175
176 progress = do_lower_jumps(shader->ir, true, true,
177 true, /* main return */
178 false, /* continue */
179 false /* loops */
180 ) || progress;
181
182 progress = do_common_optimization(shader->ir, true, true,
183 options, ctx->Const.NativeIntegers) || progress;
184 } while (progress);
185
186 validate_ir_tree(shader->ir);
187
188 /* Now that we've finished altering the linked IR, reparent any live IR back
189 * to the permanent memory context, and free the temporary one (discarding any
190 * junk we optimized away).
191 */
192 reparent_ir(shader->ir, shader->ir);
193 ralloc_free(mem_ctx);
194
195 if (ctx->_Shader->Flags & GLSL_DUMP) {
196 fprintf(stderr, "\n");
197 fprintf(stderr, "GLSL IR for linked %s program %d:\n",
198 _mesa_shader_stage_to_string(shader->Stage),
199 shader_prog->Name);
200 _mesa_print_ir(stderr, shader->ir, NULL);
201 fprintf(stderr, "\n");
202 }
203 }
204
205 extern "C" GLboolean
206 brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
207 {
208 struct brw_context *brw = brw_context(ctx);
209 const struct brw_compiler *compiler = brw->intelScreen->compiler;
210 unsigned int stage;
211
212 for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
213 struct gl_shader *shader = shProg->_LinkedShaders[stage];
214 if (!shader)
215 continue;
216
217 struct gl_program *prog =
218 ctx->Driver.NewProgram(ctx, _mesa_shader_stage_to_program(stage),
219 shader->Name);
220 if (!prog)
221 return false;
222 prog->Parameters = _mesa_new_parameter_list();
223
224 _mesa_copy_linked_program_data((gl_shader_stage) stage, shProg, prog);
225
226 process_glsl_ir((gl_shader_stage) stage, brw, shProg, shader);
227
228 /* Make a pass over the IR to add state references for any built-in
229 * uniforms that are used. This has to be done now (during linking).
230 * Code generation doesn't happen until the first time this shader is
231 * used for rendering. Waiting until then to generate the parameters is
232 * too late. At that point, the values for the built-in uniforms won't
233 * get sent to the shader.
234 */
235 foreach_in_list(ir_instruction, node, shader->ir) {
236 ir_variable *var = node->as_variable();
237
238 if ((var == NULL) || (var->data.mode != ir_var_uniform)
239 || (strncmp(var->name, "gl_", 3) != 0))
240 continue;
241
242 const ir_state_slot *const slots = var->get_state_slots();
243 assert(slots != NULL);
244
245 for (unsigned int i = 0; i < var->get_num_state_slots(); i++) {
246 _mesa_add_state_reference(prog->Parameters,
247 (gl_state_index *) slots[i].tokens);
248 }
249 }
250
251 do_set_program_inouts(shader->ir, prog, shader->Stage);
252
253 prog->SamplersUsed = shader->active_samplers;
254 prog->ShadowSamplers = shader->shadow_samplers;
255 _mesa_update_shader_textures_used(shProg, prog);
256
257 _mesa_reference_program(ctx, &shader->Program, prog);
258
259 brw_add_texrect_params(prog);
260
261 prog->nir = brw_create_nir(brw, shProg, prog, (gl_shader_stage) stage,
262 compiler->scalar_stage[stage]);
263
264 _mesa_reference_program(ctx, &prog, NULL);
265 }
266
267 if ((ctx->_Shader->Flags & GLSL_DUMP) && shProg->Name != 0) {
268 for (unsigned i = 0; i < shProg->NumShaders; i++) {
269 const struct gl_shader *sh = shProg->Shaders[i];
270 if (!sh)
271 continue;
272
273 fprintf(stderr, "GLSL %s shader %d source for linked program %d:\n",
274 _mesa_shader_stage_to_string(sh->Stage),
275 i, shProg->Name);
276 fprintf(stderr, "%s", sh->Source);
277 fprintf(stderr, "\n");
278 }
279 }
280
281 if (brw->precompile && !brw_shader_precompile(ctx, shProg))
282 return false;
283
284 return true;
285 }