2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "brw_context.h"
25 #include "compiler/brw_nir.h"
26 #include "brw_program.h"
27 #include "compiler/glsl/ir.h"
28 #include "compiler/glsl/ir_optimization.h"
29 #include "compiler/glsl/program.h"
30 #include "program/program.h"
31 #include "main/mtypes.h"
32 #include "main/shaderapi.h"
33 #include "main/shaderobj.h"
34 #include "main/uniforms.h"
37 * Performs a compile of the shader stages even when we don't know
38 * what non-orthogonal state will be set, in the hope that it reflects
39 * the eventual NOS used, and thus allows us to produce link failures.
42 brw_shader_precompile(struct gl_context
*ctx
,
43 struct gl_shader_program
*sh_prog
)
45 struct gl_linked_shader
*vs
= sh_prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
46 struct gl_linked_shader
*tcs
= sh_prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
];
47 struct gl_linked_shader
*tes
= sh_prog
->_LinkedShaders
[MESA_SHADER_TESS_EVAL
];
48 struct gl_linked_shader
*gs
= sh_prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
49 struct gl_linked_shader
*fs
= sh_prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
50 struct gl_linked_shader
*cs
= sh_prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
];
52 if (fs
&& !brw_fs_precompile(ctx
, fs
->Program
))
55 if (gs
&& !brw_gs_precompile(ctx
, gs
->Program
))
58 if (tes
&& !brw_tes_precompile(ctx
, sh_prog
, tes
->Program
))
61 if (tcs
&& !brw_tcs_precompile(ctx
, sh_prog
, tcs
->Program
))
64 if (vs
&& !brw_vs_precompile(ctx
, vs
->Program
))
67 if (cs
&& !brw_cs_precompile(ctx
, cs
->Program
))
74 brw_lower_packing_builtins(struct brw_context
*brw
,
77 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
79 /* Gens < 7 don't have instructions to convert to or from half-precision,
80 * and Gens < 6 don't expose that functionality.
82 if (devinfo
->gen
!= 6)
85 lower_packing_builtins(ir
, LOWER_PACK_HALF_2x16
| LOWER_UNPACK_HALF_2x16
);
89 process_glsl_ir(struct brw_context
*brw
,
90 struct gl_shader_program
*shader_prog
,
91 struct gl_linked_shader
*shader
)
93 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
94 struct gl_context
*ctx
= &brw
->ctx
;
96 /* Temporary memory context for any new IR. */
97 void *mem_ctx
= ralloc_context(NULL
);
99 ralloc_adopt(mem_ctx
, shader
->ir
);
101 lower_blend_equation_advanced(shader
);
103 /* lower_packing_builtins() inserts arithmetic instructions, so it
104 * must precede lower_instructions().
106 brw_lower_packing_builtins(brw
, shader
->ir
);
107 do_mat_op_to_vec(shader
->ir
);
109 unsigned instructions_to_lower
= (DIV_TO_MUL_RCP
|
113 DFREXP_DLDEXP_TO_ARITH
);
114 if (devinfo
->gen
< 7) {
115 instructions_to_lower
|= BIT_COUNT_TO_MATH
|
121 lower_instructions(shader
->ir
, instructions_to_lower
);
123 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
124 * if-statements need to be flattened.
126 if (devinfo
->gen
< 6)
127 lower_if_to_cond_assign(shader
->Stage
, shader
->ir
, 16);
129 do_lower_texture_projection(shader
->ir
);
130 do_vec_index_to_cond_assign(shader
->ir
);
131 lower_vector_insert(shader
->ir
, true);
132 lower_offset_arrays(shader
->ir
);
133 lower_noise(shader
->ir
);
134 lower_quadop_vector(shader
->ir
, false);
136 validate_ir_tree(shader
->ir
);
138 /* Now that we've finished altering the linked IR, reparent any live IR back
139 * to the permanent memory context, and free the temporary one (discarding any
140 * junk we optimized away).
142 reparent_ir(shader
->ir
, shader
->ir
);
143 ralloc_free(mem_ctx
);
145 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
146 fprintf(stderr
, "\n");
148 fprintf(stderr
, "GLSL IR for linked %s program %d:\n",
149 _mesa_shader_stage_to_string(shader
->Stage
),
151 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
153 fprintf(stderr
, "No GLSL IR for linked %s program %d (shader may be "
154 "from cache)\n", _mesa_shader_stage_to_string(shader
->Stage
),
157 fprintf(stderr
, "\n");
162 unify_interfaces(struct shader_info
**infos
)
164 struct shader_info
*prev_info
= NULL
;
166 for (unsigned i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_FRAGMENT
; i
++) {
171 prev_info
->outputs_written
|= infos
[i
]->inputs_read
&
172 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
173 infos
[i
]->inputs_read
|= prev_info
->outputs_written
&
174 ~(VARYING_BIT_TESS_LEVEL_INNER
| VARYING_BIT_TESS_LEVEL_OUTER
);
176 prev_info
->patch_outputs_written
|= infos
[i
]->patch_inputs_read
;
177 infos
[i
]->patch_inputs_read
|= prev_info
->patch_outputs_written
;
179 prev_info
= infos
[i
];
184 update_xfb_info(struct gl_transform_feedback_info
*xfb_info
)
189 for (unsigned i
= 0; i
< xfb_info
->NumOutputs
; i
++) {
190 struct gl_transform_feedback_output
*output
= &xfb_info
->Outputs
[i
];
192 /* The VUE header contains three scalar fields packed together:
193 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
194 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
195 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
197 switch (output
->OutputRegister
) {
198 case VARYING_SLOT_LAYER
:
199 assert(output
->NumComponents
== 1);
200 output
->OutputRegister
= VARYING_SLOT_PSIZ
;
201 output
->ComponentOffset
= 1;
203 case VARYING_SLOT_VIEWPORT
:
204 assert(output
->NumComponents
== 1);
205 output
->OutputRegister
= VARYING_SLOT_PSIZ
;
206 output
->ComponentOffset
= 2;
208 case VARYING_SLOT_PSIZ
:
209 assert(output
->NumComponents
== 1);
210 output
->ComponentOffset
= 3;
217 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*shProg
)
219 struct brw_context
*brw
= brw_context(ctx
);
220 const struct brw_compiler
*compiler
= brw
->screen
->compiler
;
222 struct shader_info
*infos
[MESA_SHADER_STAGES
] = { 0, };
224 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
225 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
229 struct gl_program
*prog
= shader
->Program
;
230 prog
->Parameters
= _mesa_new_parameter_list();
232 process_glsl_ir(brw
, shProg
, shader
);
234 _mesa_copy_linked_program_data(shProg
, shader
);
236 prog
->ShadowSamplers
= shader
->shadow_samplers
;
237 _mesa_update_shader_textures_used(shProg
, prog
);
239 update_xfb_info(prog
->sh
.LinkedTransformFeedback
);
242 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(shader
->Stage
));
244 if (debug_enabled
&& shader
->ir
) {
245 fprintf(stderr
, "GLSL IR for native %s shader %d:\n",
246 _mesa_shader_stage_to_string(shader
->Stage
), shProg
->Name
);
247 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
248 fprintf(stderr
, "\n\n");
251 prog
->nir
= brw_create_nir(brw
, shProg
, prog
, (gl_shader_stage
) stage
,
252 compiler
->scalar_stage
[stage
]);
253 infos
[stage
] = &prog
->nir
->info
;
255 /* Make a pass over the IR to add state references for any built-in
256 * uniforms that are used. This has to be done now (during linking).
257 * Code generation doesn't happen until the first time this shader is
258 * used for rendering. Waiting until then to generate the parameters is
259 * too late. At that point, the values for the built-in uniforms won't
260 * get sent to the shader.
262 nir_foreach_variable(var
, &prog
->nir
->uniforms
) {
263 if (strncmp(var
->name
, "gl_", 3) == 0) {
264 const nir_state_slot
*const slots
= var
->state_slots
;
265 assert(var
->state_slots
!= NULL
);
267 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
268 _mesa_add_state_reference(prog
->Parameters
,
269 (gl_state_index
*)slots
[i
].tokens
);
275 /* The linker tries to dead code eliminate unused varying components,
276 * and make sure interfaces match. But it isn't able to do so in all
277 * cases. So, explicitly make the interfaces match by OR'ing together
278 * the inputs_read/outputs_written bitfields of adjacent stages.
280 if (!shProg
->SeparateShader
)
281 unify_interfaces(infos
);
283 if ((ctx
->_Shader
->Flags
& GLSL_DUMP
) && shProg
->Name
!= 0) {
284 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
285 const struct gl_shader
*sh
= shProg
->Shaders
[i
];
289 fprintf(stderr
, "GLSL %s shader %d source for linked program %d:\n",
290 _mesa_shader_stage_to_string(sh
->Stage
),
292 fprintf(stderr
, "%s", sh
->Source
);
293 fprintf(stderr
, "\n");
297 if (brw
->precompile
&& !brw_shader_precompile(ctx
, shProg
))
300 build_program_resource_list(ctx
, shProg
);
302 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
303 struct gl_linked_shader
*shader
= shProg
->_LinkedShaders
[stage
];
307 /* The GLSL IR won't be needed anymore. */
308 ralloc_free(shader
->ir
);