2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "brw_context.h"
25 #include "compiler/brw_nir.h"
26 #include "brw_program.h"
27 #include "compiler/glsl/ir.h"
28 #include "compiler/glsl/ir_optimization.h"
29 #include "compiler/glsl/program.h"
30 #include "program/program.h"
31 #include "main/shaderapi.h"
32 #include "main/shaderobj.h"
33 #include "main/uniforms.h"
36 * Performs a compile of the shader stages even when we don't know
37 * what non-orthogonal state will be set, in the hope that it reflects
38 * the eventual NOS used, and thus allows us to produce link failures.
41 brw_shader_precompile(struct gl_context
*ctx
,
42 struct gl_shader_program
*sh_prog
)
44 struct gl_linked_shader
*vs
= sh_prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
45 struct gl_linked_shader
*tcs
= sh_prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
];
46 struct gl_linked_shader
*tes
= sh_prog
->_LinkedShaders
[MESA_SHADER_TESS_EVAL
];
47 struct gl_linked_shader
*gs
= sh_prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
48 struct gl_linked_shader
*fs
= sh_prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
49 struct gl_linked_shader
*cs
= sh_prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
];
51 if (fs
&& !brw_fs_precompile(ctx
, fs
->Program
))
54 if (gs
&& !brw_gs_precompile(ctx
, gs
->Program
))
57 if (tes
&& !brw_tes_precompile(ctx
, sh_prog
, tes
->Program
))
60 if (tcs
&& !brw_tcs_precompile(ctx
, sh_prog
, tcs
->Program
))
63 if (vs
&& !brw_vs_precompile(ctx
, vs
->Program
))
66 if (cs
&& !brw_cs_precompile(ctx
, cs
->Program
))
73 brw_lower_packing_builtins(struct brw_context
*brw
,
76 /* Gens < 7 don't have instructions to convert to or from half-precision,
77 * and Gens < 6 don't expose that functionality.
82 lower_packing_builtins(ir
, LOWER_PACK_HALF_2x16
| LOWER_UNPACK_HALF_2x16
);
86 process_glsl_ir(struct brw_context
*brw
,
87 struct gl_shader_program
*shader_prog
,
88 struct gl_linked_shader
*shader
)
90 struct gl_context
*ctx
= &brw
->ctx
;
91 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
92 const struct gl_shader_compiler_options
*options
=
93 &ctx
->Const
.ShaderCompilerOptions
[shader
->Stage
];
95 /* Temporary memory context for any new IR. */
96 void *mem_ctx
= ralloc_context(NULL
);
98 ralloc_adopt(mem_ctx
, shader
->ir
);
100 lower_blend_equation_advanced(shader
);
102 /* lower_packing_builtins() inserts arithmetic instructions, so it
103 * must precede lower_instructions().
105 brw_lower_packing_builtins(brw
, shader
->ir
);
106 do_mat_op_to_vec(shader
->ir
);
108 unsigned instructions_to_lower
= (DIV_TO_MUL_RCP
|
112 DFREXP_DLDEXP_TO_ARITH
);
114 instructions_to_lower
|= BIT_COUNT_TO_MATH
|
120 lower_instructions(shader
->ir
, instructions_to_lower
);
122 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
123 * if-statements need to be flattened.
126 lower_if_to_cond_assign(shader
->Stage
, shader
->ir
, 16);
128 do_lower_texture_projection(shader
->ir
);
129 do_vec_index_to_cond_assign(shader
->ir
);
130 lower_vector_insert(shader
->ir
, true);
131 lower_offset_arrays(shader
->ir
);
132 lower_noise(shader
->ir
);
133 lower_quadop_vector(shader
->ir
, false);
139 if (compiler
->scalar_stage
[shader
->Stage
]) {
140 if (shader
->Stage
== MESA_SHADER_VERTEX
||
141 shader
->Stage
== MESA_SHADER_FRAGMENT
)
142 brw_do_channel_expressions(shader
->ir
);
143 brw_do_vector_splitting(shader
->ir
);
146 progress
= do_common_optimization(shader
->ir
, true, true,
147 options
, ctx
->Const
.NativeIntegers
) || progress
;
150 validate_ir_tree(shader
->ir
);
152 /* Now that we've finished altering the linked IR, reparent any live IR back
153 * to the permanent memory context, and free the temporary one (discarding any
154 * junk we optimized away).
156 reparent_ir(shader
->ir
, shader
->ir
);
157 ralloc_free(mem_ctx
);
159 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
160 fprintf(stderr
, "\n");
162 fprintf(stderr
, "GLSL IR for linked %s program %d:\n",
163 _mesa_shader_stage_to_string(shader
->Stage
),
165 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
167 fprintf(stderr
, "No GLSL IR for linked %s program %d (shader may be "
168 "from cache)\n", _mesa_shader_stage_to_string(shader
->Stage
),
171 fprintf(stderr
, "\n");
176 unify_interfaces(struct shader_info
**infos
)
178 struct shader_info
*prev_info
= NULL
;
180 for (unsigned i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_FRAGMENT
; i
++) {
185 prev_info
->outputs_written
|= infos
[i
]->inputs_read
&
186 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
187 infos
[i
]->inputs_read
|= prev_info
->outputs_written
&
188 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
190 prev_info
->patch_outputs_written
|= infos
[i
]->patch_inputs_read
;
191 infos
[i
]->patch_inputs_read
|= prev_info
->patch_outputs_written
;
193 prev_info
= infos
[i
];
198 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*shProg
)
200 struct brw_context
*brw
= brw_context(ctx
);
201 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
203 struct shader_info
*infos
[MESA_SHADER_STAGES
] = { 0, };
205 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
206 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
210 struct gl_program
*prog
= shader
->Program
;
211 prog
->Parameters
= _mesa_new_parameter_list();
213 process_glsl_ir(brw
, shProg
, shader
);
215 _mesa_copy_linked_program_data(shProg
, shader
);
217 prog
->ShadowSamplers
= shader
->shadow_samplers
;
218 _mesa_update_shader_textures_used(shProg
, prog
);
221 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(shader
->Stage
));
223 if (debug_enabled
&& shader
->ir
) {
224 fprintf(stderr
, "GLSL IR for native %s shader %d:\n",
225 _mesa_shader_stage_to_string(shader
->Stage
), shProg
->Name
);
226 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
227 fprintf(stderr
, "\n\n");
230 prog
->nir
= brw_create_nir(brw
, shProg
, prog
, (gl_shader_stage
) stage
,
231 compiler
->scalar_stage
[stage
]);
232 infos
[stage
] = prog
->nir
->info
;
234 /* Make a pass over the IR to add state references for any built-in
235 * uniforms that are used. This has to be done now (during linking).
236 * Code generation doesn't happen until the first time this shader is
237 * used for rendering. Waiting until then to generate the parameters is
238 * too late. At that point, the values for the built-in uniforms won't
239 * get sent to the shader.
241 nir_foreach_variable(var
, &prog
->nir
->uniforms
) {
242 if (strncmp(var
->name
, "gl_", 3) == 0) {
243 const nir_state_slot
*const slots
= var
->state_slots
;
244 assert(var
->state_slots
!= NULL
);
246 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
247 _mesa_add_state_reference(prog
->Parameters
,
248 (gl_state_index
*)slots
[i
].tokens
);
254 /* The linker tries to dead code eliminate unused varying components,
255 * and make sure interfaces match. But it isn't able to do so in all
256 * cases. So, explicitly make the interfaces match by OR'ing together
257 * the inputs_read/outputs_written bitfields of adjacent stages.
259 if (!shProg
->SeparateShader
)
260 unify_interfaces(infos
);
262 if ((ctx
->_Shader
->Flags
& GLSL_DUMP
) && shProg
->Name
!= 0) {
263 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
264 const struct gl_shader
*sh
= shProg
->Shaders
[i
];
268 fprintf(stderr
, "GLSL %s shader %d source for linked program %d:\n",
269 _mesa_shader_stage_to_string(sh
->Stage
),
271 fprintf(stderr
, "%s", sh
->Source
);
272 fprintf(stderr
, "\n");
276 if (brw
->precompile
&& !brw_shader_precompile(ctx
, shProg
))
279 build_program_resource_list(ctx
, shProg
);
281 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
282 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
286 /* The GLSL IR won't be needed anymore. */
287 ralloc_free(shader
->ir
);