2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "brw_context.h"
25 #include "compiler/brw_nir.h"
26 #include "brw_program.h"
27 #include "compiler/glsl/gl_nir.h"
28 #include "compiler/glsl/ir.h"
29 #include "compiler/glsl/ir_optimization.h"
30 #include "compiler/glsl/program.h"
31 #include "compiler/nir/nir_serialize.h"
32 #include "program/program.h"
33 #include "main/mtypes.h"
34 #include "main/shaderapi.h"
35 #include "main/shaderobj.h"
36 #include "main/uniforms.h"
39 * Performs a compile of the shader stages even when we don't know
40 * what non-orthogonal state will be set, in the hope that it reflects
41 * the eventual NOS used, and thus allows us to produce link failures.
44 brw_shader_precompile(struct gl_context
*ctx
,
45 struct gl_shader_program
*sh_prog
)
47 struct gl_linked_shader
*vs
= sh_prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
48 struct gl_linked_shader
*tcs
= sh_prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
];
49 struct gl_linked_shader
*tes
= sh_prog
->_LinkedShaders
[MESA_SHADER_TESS_EVAL
];
50 struct gl_linked_shader
*gs
= sh_prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
51 struct gl_linked_shader
*fs
= sh_prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
52 struct gl_linked_shader
*cs
= sh_prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
];
54 if (fs
&& !brw_fs_precompile(ctx
, fs
->Program
))
57 if (gs
&& !brw_gs_precompile(ctx
, gs
->Program
))
60 if (tes
&& !brw_tes_precompile(ctx
, sh_prog
, tes
->Program
))
63 if (tcs
&& !brw_tcs_precompile(ctx
, sh_prog
, tcs
->Program
))
66 if (vs
&& !brw_vs_precompile(ctx
, vs
->Program
))
69 if (cs
&& !brw_cs_precompile(ctx
, cs
->Program
))
76 brw_lower_packing_builtins(struct brw_context
*brw
,
79 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
81 /* Gens < 7 don't have instructions to convert to or from half-precision,
82 * and Gens < 6 don't expose that functionality.
84 if (devinfo
->gen
!= 6)
87 lower_packing_builtins(ir
, LOWER_PACK_HALF_2x16
| LOWER_UNPACK_HALF_2x16
);
91 process_glsl_ir(struct brw_context
*brw
,
92 struct gl_shader_program
*shader_prog
,
93 struct gl_linked_shader
*shader
)
95 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
96 struct gl_context
*ctx
= &brw
->ctx
;
98 /* Temporary memory context for any new IR. */
99 void *mem_ctx
= ralloc_context(NULL
);
101 ralloc_adopt(mem_ctx
, shader
->ir
);
103 lower_blend_equation_advanced(
104 shader
, ctx
->Extensions
.KHR_blend_equation_advanced_coherent
);
106 /* lower_packing_builtins() inserts arithmetic instructions, so it
107 * must precede lower_instructions().
109 brw_lower_packing_builtins(brw
, shader
->ir
);
110 do_mat_op_to_vec(shader
->ir
);
112 unsigned instructions_to_lower
= (DIV_TO_MUL_RCP
|
116 DFREXP_DLDEXP_TO_ARITH
);
117 if (devinfo
->gen
< 7) {
118 instructions_to_lower
|= BIT_COUNT_TO_MATH
|
124 lower_instructions(shader
->ir
, instructions_to_lower
);
126 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
127 * if-statements need to be flattened.
129 if (devinfo
->gen
< 6)
130 lower_if_to_cond_assign(shader
->Stage
, shader
->ir
, 16);
132 do_lower_texture_projection(shader
->ir
);
133 do_vec_index_to_cond_assign(shader
->ir
);
134 lower_vector_insert(shader
->ir
, true);
135 lower_offset_arrays(shader
->ir
);
136 lower_noise(shader
->ir
);
137 lower_quadop_vector(shader
->ir
, false);
139 validate_ir_tree(shader
->ir
);
141 /* Now that we've finished altering the linked IR, reparent any live IR back
142 * to the permanent memory context, and free the temporary one (discarding any
143 * junk we optimized away).
145 reparent_ir(shader
->ir
, shader
->ir
);
146 ralloc_free(mem_ctx
);
148 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
149 fprintf(stderr
, "\n");
151 fprintf(stderr
, "GLSL IR for linked %s program %d:\n",
152 _mesa_shader_stage_to_string(shader
->Stage
),
154 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
156 fprintf(stderr
, "No GLSL IR for linked %s program %d (shader may be "
157 "from cache)\n", _mesa_shader_stage_to_string(shader
->Stage
),
160 fprintf(stderr
, "\n");
165 unify_interfaces(struct shader_info
**infos
)
167 struct shader_info
*prev_info
= NULL
;
169 for (unsigned i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_FRAGMENT
; i
++) {
174 prev_info
->outputs_written
|= infos
[i
]->inputs_read
&
175 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
176 infos
[i
]->inputs_read
|= prev_info
->outputs_written
&
177 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
179 prev_info
->patch_outputs_written
|= infos
[i
]->patch_inputs_read
;
180 infos
[i
]->patch_inputs_read
|= prev_info
->patch_outputs_written
;
182 prev_info
= infos
[i
];
187 update_xfb_info(struct gl_transform_feedback_info
*xfb_info
,
188 struct shader_info
*info
)
193 for (unsigned i
= 0; i
< xfb_info
->NumOutputs
; i
++) {
194 struct gl_transform_feedback_output
*output
= &xfb_info
->Outputs
[i
];
196 /* The VUE header contains three scalar fields packed together:
197 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
198 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
199 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
201 switch (output
->OutputRegister
) {
202 case VARYING_SLOT_LAYER
:
203 assert(output
->NumComponents
== 1);
204 output
->OutputRegister
= VARYING_SLOT_PSIZ
;
205 output
->ComponentOffset
= 1;
207 case VARYING_SLOT_VIEWPORT
:
208 assert(output
->NumComponents
== 1);
209 output
->OutputRegister
= VARYING_SLOT_PSIZ
;
210 output
->ComponentOffset
= 2;
212 case VARYING_SLOT_PSIZ
:
213 assert(output
->NumComponents
== 1);
214 output
->ComponentOffset
= 3;
218 info
->outputs_written
|= 1ull << output
->OutputRegister
;
223 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*shProg
)
225 struct brw_context
*brw
= brw_context(ctx
);
226 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
228 struct shader_info
*infos
[MESA_SHADER_STAGES
] = { 0, };
230 if (shProg
->data
->LinkStatus
== LINKING_SKIPPED
)
233 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
234 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
238 struct gl_program
*prog
= shader
->Program
;
239 prog
->Parameters
= _mesa_new_parameter_list();
241 if (!shader
->spirv_data
)
242 process_glsl_ir(brw
, shProg
, shader
);
244 _mesa_copy_linked_program_data(shProg
, shader
);
246 prog
->ShadowSamplers
= shader
->shadow_samplers
;
247 _mesa_update_shader_textures_used(shProg
, prog
);
250 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(shader
->Stage
));
252 if (debug_enabled
&& shader
->ir
) {
253 fprintf(stderr
, "GLSL IR for native %s shader %d:\n",
254 _mesa_shader_stage_to_string(shader
->Stage
), shProg
->Name
);
255 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
256 fprintf(stderr
, "\n\n");
259 prog
->nir
= brw_create_nir(brw
, shProg
, prog
, (gl_shader_stage
) stage
,
260 compiler
->scalar_stage
[stage
]);
263 /* Determine first and last stage. */
264 unsigned first
= MESA_SHADER_STAGES
;
266 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
267 if (!shProg
->_LinkedShaders
[i
])
269 if (first
== MESA_SHADER_STAGES
)
274 /* Linking the stages in the opposite order (from fragment to vertex)
275 * ensures that inter-shader outputs written to in an earlier stage
276 * are eliminated if they are (transitively) not used in a later
279 * TODO: Look into Shadow of Mordor regressions on HSW and enable this for
280 * all platforms. See: https://bugs.freedesktop.org/show_bug.cgi?id=103537
282 if (first
!= last
&& brw
->screen
->devinfo
.gen
>= 8) {
284 for (int i
= next
- 1; i
>= 0; i
--) {
285 if (shProg
->_LinkedShaders
[i
] == NULL
)
288 brw_nir_link_shaders(compiler
,
289 &shProg
->_LinkedShaders
[i
]->Program
->nir
,
290 &shProg
->_LinkedShaders
[next
]->Program
->nir
);
295 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
296 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
300 struct gl_program
*prog
= shader
->Program
;
301 brw_shader_gather_info(prog
->nir
, prog
);
303 NIR_PASS_V(prog
->nir
, gl_nir_lower_samplers
, shProg
);
304 NIR_PASS_V(prog
->nir
, gl_nir_lower_atomics
, shProg
, false);
305 NIR_PASS_V(prog
->nir
, nir_lower_atomics_to_ssbo
,
306 prog
->nir
->info
.num_abos
);
308 infos
[stage
] = &prog
->nir
->info
;
310 update_xfb_info(prog
->sh
.LinkedTransformFeedback
, infos
[stage
]);
312 /* Make a pass over the IR to add state references for any built-in
313 * uniforms that are used. This has to be done now (during linking).
314 * Code generation doesn't happen until the first time this shader is
315 * used for rendering. Waiting until then to generate the parameters is
316 * too late. At that point, the values for the built-in uniforms won't
317 * get sent to the shader.
319 nir_foreach_variable(var
, &prog
->nir
->uniforms
) {
320 if (strncmp(var
->name
, "gl_", 3) == 0) {
321 const nir_state_slot
*const slots
= var
->state_slots
;
322 assert(var
->state_slots
!= NULL
);
324 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
325 _mesa_add_state_reference(prog
->Parameters
, slots
[i
].tokens
);
331 /* The linker tries to dead code eliminate unused varying components,
332 * and make sure interfaces match. But it isn't able to do so in all
333 * cases. So, explicitly make the interfaces match by OR'ing together
334 * the inputs_read/outputs_written bitfields of adjacent stages.
336 if (!shProg
->SeparateShader
)
337 unify_interfaces(infos
);
339 if ((ctx
->_Shader
->Flags
& GLSL_DUMP
) && shProg
->Name
!= 0) {
340 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
341 const struct gl_shader
*sh
= shProg
->Shaders
[i
];
345 fprintf(stderr
, "GLSL %s shader %d source for linked program %d:\n",
346 _mesa_shader_stage_to_string(sh
->Stage
),
348 fprintf(stderr
, "%s", sh
->Source
);
349 fprintf(stderr
, "\n");
353 if (brw
->ctx
.Cache
) {
354 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
355 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
359 struct gl_program
*prog
= shader
->Program
;
360 brw_program_serialize_nir(ctx
, prog
);
364 if (brw
->precompile
&& !brw_shader_precompile(ctx
, shProg
))
367 build_program_resource_list(ctx
, shProg
);
369 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
370 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
374 /* The GLSL IR won't be needed anymore. */
375 ralloc_free(shader
->ir
);