2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "brw_context.h"
25 #include "compiler/brw_nir.h"
26 #include "brw_program.h"
27 #include "compiler/glsl/gl_nir.h"
28 #include "compiler/glsl/gl_nir_linker.h"
29 #include "compiler/glsl/ir.h"
30 #include "compiler/glsl/ir_optimization.h"
31 #include "compiler/glsl/program.h"
32 #include "compiler/nir/nir_serialize.h"
33 #include "program/program.h"
34 #include "main/glspirv.h"
35 #include "main/mtypes.h"
36 #include "main/shaderapi.h"
37 #include "main/shaderobj.h"
38 #include "main/uniforms.h"
41 * Performs a compile of the shader stages even when we don't know
42 * what non-orthogonal state will be set, in the hope that it reflects
43 * the eventual NOS used, and thus allows us to produce link failures.
46 brw_shader_precompile(struct gl_context
*ctx
,
47 struct gl_shader_program
*sh_prog
)
49 struct gl_linked_shader
*vs
= sh_prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
50 struct gl_linked_shader
*tcs
= sh_prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
];
51 struct gl_linked_shader
*tes
= sh_prog
->_LinkedShaders
[MESA_SHADER_TESS_EVAL
];
52 struct gl_linked_shader
*gs
= sh_prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
53 struct gl_linked_shader
*fs
= sh_prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
54 struct gl_linked_shader
*cs
= sh_prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
];
56 if (fs
&& !brw_fs_precompile(ctx
, fs
->Program
))
59 if (gs
&& !brw_gs_precompile(ctx
, gs
->Program
))
62 if (tes
&& !brw_tes_precompile(ctx
, sh_prog
, tes
->Program
))
65 if (tcs
&& !brw_tcs_precompile(ctx
, sh_prog
, tcs
->Program
))
68 if (vs
&& !brw_vs_precompile(ctx
, vs
->Program
))
71 if (cs
&& !brw_cs_precompile(ctx
, cs
->Program
))
78 brw_lower_packing_builtins(struct brw_context
*brw
,
81 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
83 /* Gens < 7 don't have instructions to convert to or from half-precision,
84 * and Gens < 6 don't expose that functionality.
86 if (devinfo
->gen
!= 6)
89 lower_packing_builtins(ir
, LOWER_PACK_HALF_2x16
| LOWER_UNPACK_HALF_2x16
);
93 process_glsl_ir(struct brw_context
*brw
,
94 struct gl_shader_program
*shader_prog
,
95 struct gl_linked_shader
*shader
)
97 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
98 struct gl_context
*ctx
= &brw
->ctx
;
100 /* Temporary memory context for any new IR. */
101 void *mem_ctx
= ralloc_context(NULL
);
103 ralloc_adopt(mem_ctx
, shader
->ir
);
105 lower_blend_equation_advanced(
106 shader
, ctx
->Extensions
.KHR_blend_equation_advanced_coherent
);
108 /* lower_packing_builtins() inserts arithmetic instructions, so it
109 * must precede lower_instructions().
111 brw_lower_packing_builtins(brw
, shader
->ir
);
112 do_mat_op_to_vec(shader
->ir
);
114 unsigned instructions_to_lower
= (DIV_TO_MUL_RCP
|
118 DFREXP_DLDEXP_TO_ARITH
);
119 if (devinfo
->gen
< 7) {
120 instructions_to_lower
|= BIT_COUNT_TO_MATH
|
126 lower_instructions(shader
->ir
, instructions_to_lower
);
128 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
129 * if-statements need to be flattened.
131 if (devinfo
->gen
< 6)
132 lower_if_to_cond_assign(shader
->Stage
, shader
->ir
, 16);
134 do_lower_texture_projection(shader
->ir
);
135 do_vec_index_to_cond_assign(shader
->ir
);
136 lower_vector_insert(shader
->ir
, true);
137 lower_offset_arrays(shader
->ir
);
138 lower_noise(shader
->ir
);
139 lower_quadop_vector(shader
->ir
, false);
141 validate_ir_tree(shader
->ir
);
143 /* Now that we've finished altering the linked IR, reparent any live IR back
144 * to the permanent memory context, and free the temporary one (discarding any
145 * junk we optimized away).
147 reparent_ir(shader
->ir
, shader
->ir
);
148 ralloc_free(mem_ctx
);
150 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
151 fprintf(stderr
, "\n");
153 fprintf(stderr
, "GLSL IR for linked %s program %d:\n",
154 _mesa_shader_stage_to_string(shader
->Stage
),
156 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
158 fprintf(stderr
, "No GLSL IR for linked %s program %d (shader may be "
159 "from cache)\n", _mesa_shader_stage_to_string(shader
->Stage
),
162 fprintf(stderr
, "\n");
167 unify_interfaces(struct shader_info
**infos
)
169 struct shader_info
*prev_info
= NULL
;
171 for (unsigned i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_FRAGMENT
; i
++) {
176 prev_info
->outputs_written
|= infos
[i
]->inputs_read
&
177 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
178 infos
[i
]->inputs_read
|= prev_info
->outputs_written
&
179 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
181 prev_info
->patch_outputs_written
|= infos
[i
]->patch_inputs_read
;
182 infos
[i
]->patch_inputs_read
|= prev_info
->patch_outputs_written
;
184 prev_info
= infos
[i
];
189 update_xfb_info(struct gl_transform_feedback_info
*xfb_info
,
190 struct shader_info
*info
)
195 for (unsigned i
= 0; i
< xfb_info
->NumOutputs
; i
++) {
196 struct gl_transform_feedback_output
*output
= &xfb_info
->Outputs
[i
];
198 /* The VUE header contains three scalar fields packed together:
199 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
200 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
201 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
203 switch (output
->OutputRegister
) {
204 case VARYING_SLOT_LAYER
:
205 assert(output
->NumComponents
== 1);
206 output
->OutputRegister
= VARYING_SLOT_PSIZ
;
207 output
->ComponentOffset
= 1;
209 case VARYING_SLOT_VIEWPORT
:
210 assert(output
->NumComponents
== 1);
211 output
->OutputRegister
= VARYING_SLOT_PSIZ
;
212 output
->ComponentOffset
= 2;
214 case VARYING_SLOT_PSIZ
:
215 assert(output
->NumComponents
== 1);
216 output
->ComponentOffset
= 3;
220 info
->outputs_written
|= 1ull << output
->OutputRegister
;
225 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*shProg
)
227 struct brw_context
*brw
= brw_context(ctx
);
228 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
230 struct shader_info
*infos
[MESA_SHADER_STAGES
] = { 0, };
232 if (shProg
->data
->LinkStatus
== LINKING_SKIPPED
)
235 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
236 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
240 struct gl_program
*prog
= shader
->Program
;
241 prog
->Parameters
= _mesa_new_parameter_list();
243 if (!shader
->spirv_data
)
244 process_glsl_ir(brw
, shProg
, shader
);
246 _mesa_copy_linked_program_data(shProg
, shader
);
248 prog
->ShadowSamplers
= shader
->shadow_samplers
;
251 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(shader
->Stage
));
253 if (debug_enabled
&& shader
->ir
) {
254 fprintf(stderr
, "GLSL IR for native %s shader %d:\n",
255 _mesa_shader_stage_to_string(shader
->Stage
), shProg
->Name
);
256 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
257 fprintf(stderr
, "\n\n");
260 prog
->nir
= brw_create_nir(brw
, shProg
, prog
, (gl_shader_stage
) stage
,
261 compiler
->scalar_stage
[stage
]);
264 /* SPIR-V programs use a NIR linker */
265 if (shProg
->data
->spirv
) {
266 if (!gl_nir_link_uniforms(ctx
, shProg
))
269 gl_nir_link_assign_atomic_counter_resources(ctx
, shProg
);
272 /* Determine first and last stage. */
273 unsigned first
= MESA_SHADER_STAGES
;
275 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
276 if (!shProg
->_LinkedShaders
[i
])
278 if (first
== MESA_SHADER_STAGES
)
283 /* Linking the stages in the opposite order (from fragment to vertex)
284 * ensures that inter-shader outputs written to in an earlier stage
285 * are eliminated if they are (transitively) not used in a later
288 * TODO: Look into Shadow of Mordor regressions on HSW and enable this for
289 * all platforms. See: https://bugs.freedesktop.org/show_bug.cgi?id=103537
291 if (first
!= last
&& brw
->screen
->devinfo
.gen
>= 8) {
293 for (int i
= next
- 1; i
>= 0; i
--) {
294 if (shProg
->_LinkedShaders
[i
] == NULL
)
297 brw_nir_link_shaders(compiler
,
298 &shProg
->_LinkedShaders
[i
]->Program
->nir
,
299 &shProg
->_LinkedShaders
[next
]->Program
->nir
);
304 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
305 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
309 struct gl_program
*prog
= shader
->Program
;
311 _mesa_update_shader_textures_used(shProg
, prog
);
313 brw_shader_gather_info(prog
->nir
, prog
);
315 NIR_PASS_V(prog
->nir
, gl_nir_lower_samplers
, shProg
);
316 NIR_PASS_V(prog
->nir
, gl_nir_lower_atomics
, shProg
, false);
317 NIR_PASS_V(prog
->nir
, nir_lower_atomics_to_ssbo
,
318 prog
->nir
->info
.num_abos
);
320 infos
[stage
] = &prog
->nir
->info
;
322 update_xfb_info(prog
->sh
.LinkedTransformFeedback
, infos
[stage
]);
324 /* Make a pass over the IR to add state references for any built-in
325 * uniforms that are used. This has to be done now (during linking).
326 * Code generation doesn't happen until the first time this shader is
327 * used for rendering. Waiting until then to generate the parameters is
328 * too late. At that point, the values for the built-in uniforms won't
329 * get sent to the shader.
331 nir_foreach_variable(var
, &prog
->nir
->uniforms
) {
332 const nir_state_slot
*const slots
= var
->state_slots
;
333 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
334 assert(slots
!= NULL
);
335 _mesa_add_state_reference(prog
->Parameters
, slots
[i
].tokens
);
340 /* The linker tries to dead code eliminate unused varying components,
341 * and make sure interfaces match. But it isn't able to do so in all
342 * cases. So, explicitly make the interfaces match by OR'ing together
343 * the inputs_read/outputs_written bitfields of adjacent stages.
345 if (!shProg
->SeparateShader
)
346 unify_interfaces(infos
);
348 if ((ctx
->_Shader
->Flags
& GLSL_DUMP
) && shProg
->Name
!= 0) {
349 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
350 const struct gl_shader
*sh
= shProg
->Shaders
[i
];
354 fprintf(stderr
, "GLSL %s shader %d source for linked program %d:\n",
355 _mesa_shader_stage_to_string(sh
->Stage
),
357 fprintf(stderr
, "%s", sh
->Source
);
358 fprintf(stderr
, "\n");
362 if (brw
->ctx
.Cache
) {
363 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
364 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
368 struct gl_program
*prog
= shader
->Program
;
369 brw_program_serialize_nir(ctx
, prog
);
373 if (brw
->precompile
&& !brw_shader_precompile(ctx
, shProg
))
376 /* SPIR-V programs build its resource list from linked NIR shaders. */
377 if (!shProg
->data
->spirv
)
378 build_program_resource_list(ctx
, shProg
);
380 nir_build_program_resource_list(ctx
, shProg
);
382 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
383 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
387 /* The GLSL IR won't be needed anymore. */
388 ralloc_free(shader
->ir
);