2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/macros.h"
25 #include "brw_context.h"
31 #include "glsl/ir_optimization.h"
32 #include "glsl/glsl_parser_extras.h"
33 #include "main/shaderapi.h"
36 * Performs a compile of the shader stages even when we don't know
37 * what non-orthogonal state will be set, in the hope that it reflects
38 * the eventual NOS used, and thus allows us to produce link failures.
41 brw_shader_precompile(struct gl_context
*ctx
,
42 struct gl_shader_program
*sh_prog
)
44 struct gl_shader
*vs
= sh_prog
->_LinkedShaders
[MESA_SHADER_VERTEX
];
45 struct gl_shader
*gs
= sh_prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
];
46 struct gl_shader
*fs
= sh_prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
47 struct gl_shader
*cs
= sh_prog
->_LinkedShaders
[MESA_SHADER_COMPUTE
];
49 if (fs
&& !brw_fs_precompile(ctx
, sh_prog
, fs
->Program
))
52 if (gs
&& !brw_gs_precompile(ctx
, sh_prog
, gs
->Program
))
55 if (vs
&& !brw_vs_precompile(ctx
, sh_prog
, vs
->Program
))
58 if (cs
&& !brw_cs_precompile(ctx
, sh_prog
, cs
->Program
))
65 brw_lower_packing_builtins(struct brw_context
*brw
,
66 gl_shader_stage shader_type
,
69 int ops
= LOWER_PACK_SNORM_2x16
70 | LOWER_UNPACK_SNORM_2x16
71 | LOWER_PACK_UNORM_2x16
72 | LOWER_UNPACK_UNORM_2x16
;
74 if (is_scalar_shader_stage(brw
->intelScreen
->compiler
, shader_type
)) {
75 ops
|= LOWER_UNPACK_UNORM_4x8
76 | LOWER_UNPACK_SNORM_4x8
77 | LOWER_PACK_UNORM_4x8
78 | LOWER_PACK_SNORM_4x8
;
82 /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
83 * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
84 * lowering is needed. For SOA code, the Half2x16 ops must be
87 if (is_scalar_shader_stage(brw
->intelScreen
->compiler
, shader_type
)) {
88 ops
|= LOWER_PACK_HALF_2x16_TO_SPLIT
89 | LOWER_UNPACK_HALF_2x16_TO_SPLIT
;
92 ops
|= LOWER_PACK_HALF_2x16
93 | LOWER_UNPACK_HALF_2x16
;
96 lower_packing_builtins(ir
, ops
);
100 process_glsl_ir(gl_shader_stage stage
,
101 struct brw_context
*brw
,
102 struct gl_shader_program
*shader_prog
,
103 struct gl_shader
*shader
)
105 struct gl_context
*ctx
= &brw
->ctx
;
106 const struct gl_shader_compiler_options
*options
=
107 &ctx
->Const
.ShaderCompilerOptions
[shader
->Stage
];
109 /* Temporary memory context for any new IR. */
110 void *mem_ctx
= ralloc_context(NULL
);
112 ralloc_adopt(mem_ctx
, shader
->ir
);
114 /* lower_packing_builtins() inserts arithmetic instructions, so it
115 * must precede lower_instructions().
117 brw_lower_packing_builtins(brw
, shader
->Stage
, shader
->ir
);
118 do_mat_op_to_vec(shader
->ir
);
119 const int bitfield_insert
= brw
->gen
>= 7 ? BITFIELD_INSERT_TO_BFM_BFI
: 0;
120 lower_instructions(shader
->ir
,
131 /* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
132 * if-statements need to be flattened.
135 lower_if_to_cond_assign(shader
->ir
, 16);
137 do_lower_texture_projection(shader
->ir
);
138 brw_lower_texture_gradients(brw
, shader
->ir
);
139 do_vec_index_to_cond_assign(shader
->ir
);
140 lower_vector_insert(shader
->ir
, true);
141 lower_offset_arrays(shader
->ir
);
142 brw_do_lower_unnormalized_offset(shader
->ir
);
143 lower_noise(shader
->ir
);
144 lower_quadop_vector(shader
->ir
, false);
146 bool lowered_variable_indexing
=
147 lower_variable_index_to_cond_assign((gl_shader_stage
)stage
,
149 options
->EmitNoIndirectInput
,
150 options
->EmitNoIndirectOutput
,
151 options
->EmitNoIndirectTemp
,
152 options
->EmitNoIndirectUniform
);
154 if (unlikely(brw
->perf_debug
&& lowered_variable_indexing
)) {
155 perf_debug("Unsupported form of variable indexing in %s; falling "
156 "back to very inefficient code generation\n",
157 _mesa_shader_stage_to_abbrev(shader
->Stage
));
164 if (is_scalar_shader_stage(brw
->intelScreen
->compiler
, shader
->Stage
)) {
165 brw_do_channel_expressions(shader
->ir
);
166 brw_do_vector_splitting(shader
->ir
);
169 progress
= do_lower_jumps(shader
->ir
, true, true,
170 true, /* main return */
171 false, /* continue */
175 progress
= do_common_optimization(shader
->ir
, true, true,
176 options
, ctx
->Const
.NativeIntegers
) || progress
;
179 validate_ir_tree(shader
->ir
);
181 /* Now that we've finished altering the linked IR, reparent any live IR back
182 * to the permanent memory context, and free the temporary one (discarding any
183 * junk we optimized away).
185 reparent_ir(shader
->ir
, shader
->ir
);
186 ralloc_free(mem_ctx
);
188 if (ctx
->_Shader
->Flags
& GLSL_DUMP
) {
189 fprintf(stderr
, "\n");
190 fprintf(stderr
, "GLSL IR for linked %s program %d:\n",
191 _mesa_shader_stage_to_string(shader
->Stage
),
193 _mesa_print_ir(stderr
, shader
->ir
, NULL
);
194 fprintf(stderr
, "\n");
199 brw_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*shProg
)
201 struct brw_context
*brw
= brw_context(ctx
);
202 const struct brw_compiler
*compiler
= brw
->intelScreen
->compiler
;
205 for (stage
= 0; stage
< ARRAY_SIZE(shProg
->_LinkedShaders
); stage
++) {
206 struct gl_shader
*shader
= shProg
->_LinkedShaders
[stage
];
210 struct gl_program
*prog
=
211 ctx
->Driver
.NewProgram(ctx
, _mesa_shader_stage_to_program(stage
),
215 prog
->Parameters
= _mesa_new_parameter_list();
217 _mesa_copy_linked_program_data((gl_shader_stage
) stage
, shProg
, prog
);
219 process_glsl_ir((gl_shader_stage
) stage
, brw
, shProg
, shader
);
221 /* Make a pass over the IR to add state references for any built-in
222 * uniforms that are used. This has to be done now (during linking).
223 * Code generation doesn't happen until the first time this shader is
224 * used for rendering. Waiting until then to generate the parameters is
225 * too late. At that point, the values for the built-in uniforms won't
226 * get sent to the shader.
228 foreach_in_list(ir_instruction
, node
, shader
->ir
) {
229 ir_variable
*var
= node
->as_variable();
231 if ((var
== NULL
) || (var
->data
.mode
!= ir_var_uniform
)
232 || (strncmp(var
->name
, "gl_", 3) != 0))
235 const ir_state_slot
*const slots
= var
->get_state_slots();
236 assert(slots
!= NULL
);
238 for (unsigned int i
= 0; i
< var
->get_num_state_slots(); i
++) {
239 _mesa_add_state_reference(prog
->Parameters
,
240 (gl_state_index
*) slots
[i
].tokens
);
244 do_set_program_inouts(shader
->ir
, prog
, shader
->Stage
);
246 prog
->SamplersUsed
= shader
->active_samplers
;
247 prog
->ShadowSamplers
= shader
->shadow_samplers
;
248 _mesa_update_shader_textures_used(shProg
, prog
);
250 _mesa_reference_program(ctx
, &shader
->Program
, prog
);
252 brw_add_texrect_params(prog
);
254 prog
->nir
= brw_create_nir(brw
, shProg
, prog
, (gl_shader_stage
) stage
,
255 is_scalar_shader_stage(compiler
, stage
));
257 _mesa_reference_program(ctx
, &prog
, NULL
);
260 if ((ctx
->_Shader
->Flags
& GLSL_DUMP
) && shProg
->Name
!= 0) {
261 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
262 const struct gl_shader
*sh
= shProg
->Shaders
[i
];
266 fprintf(stderr
, "GLSL %s shader %d source for linked program %d:\n",
267 _mesa_shader_stage_to_string(sh
->Stage
),
269 fprintf(stderr
, "%s", sh
->Source
);
270 fprintf(stderr
, "\n");
274 if (brw
->precompile
&& !brw_shader_precompile(ctx
, shProg
))