2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "brw_shader.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "glsl/nir/nir_builder.h"
28 #include "program/prog_to_nir.h"
30 struct remap_vs_attrs_state
{
36 remap_vs_attrs(nir_block
*block
, void *void_state
)
38 struct remap_vs_attrs_state
*state
= void_state
;
40 nir_foreach_instr_safe(block
, instr
) {
41 if (instr
->type
!= nir_instr_type_intrinsic
)
44 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
46 if (intrin
->intrinsic
== nir_intrinsic_load_input
) {
47 /* Attributes come in a contiguous block, ordered by their
48 * gl_vert_attrib value. That means we can compute the slot
49 * number for an attribute by masking out the enabled attributes
50 * before it and counting the bits.
52 nir_const_value
*const_offset
= nir_src_as_const_value(intrin
->src
[0]);
54 /* We set EmitNoIndirect for VS inputs, so there are no indirects. */
57 int attr
= intrin
->const_index
[0] + const_offset
->u
[0];
58 int slot
= _mesa_bitcount_64(state
->inputs_read
&
59 BITFIELD64_MASK(attr
));
61 /* The NIR -> FS pass will just add the base and offset together, so
62 * there's no reason to keep them separate. Just put it all in
63 * const_index[0] and set the offset src[0] to load_const(0).
65 intrin
->const_index
[0] = 4 * slot
;
67 state
->b
.cursor
= nir_before_instr(&intrin
->instr
);
68 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[0],
69 nir_src_for_ssa(nir_imm_int(&state
->b
, 0)));
76 brw_nir_lower_inputs(nir_shader
*nir
,
77 const struct brw_device_info
*devinfo
,
81 case MESA_SHADER_VERTEX
:
82 /* Start with the location of the variable's base. */
83 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
84 var
->data
.driver_location
= var
->data
.location
;
87 /* Now use nir_lower_io to walk dereference chains. Attribute arrays
88 * are loaded as one vec4 per element (or matrix column), so we use
89 * type_size_vec4 here.
91 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
);
94 /* Finally, translate VERT_ATTRIB_* values into the actual registers.
96 * Note that we can use nir->info.inputs_read instead of
97 * key->inputs_read since the two are identical aside from Gen4-5
98 * edge flag differences.
100 struct remap_vs_attrs_state remap_state
= {
101 .inputs_read
= nir
->info
.inputs_read
,
104 /* This pass needs actual constants */
105 nir_opt_constant_folding(nir
);
107 nir_foreach_overload(nir
, overload
) {
108 if (overload
->impl
) {
109 nir_builder_init(&remap_state
.b
, overload
->impl
);
110 nir_foreach_block(overload
->impl
, remap_vs_attrs
, &remap_state
);
115 case MESA_SHADER_GEOMETRY
: {
117 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
118 var
->data
.driver_location
= var
->data
.location
;
121 /* The GLSL linker will have already matched up GS inputs and
122 * the outputs of prior stages. The driver does extend VS outputs
123 * in some cases, but only for legacy OpenGL or Gen4-5 hardware,
124 * neither of which offer geometry shader support. So we can
125 * safely ignore that.
127 * For SSO pipelines, we use a fixed VUE map layout based on variable
128 * locations, so we can rely on rendezvous-by-location to make this
131 * However, we need to ignore VARYING_SLOT_PRIMITIVE_ID, as it's not
132 * written by previous stages and shows up via payload magic.
134 struct brw_vue_map input_vue_map
;
135 GLbitfield64 inputs_read
=
136 nir
->info
.inputs_read
& ~VARYING_BIT_PRIMITIVE_ID
;
137 brw_compute_vue_map(devinfo
, &input_vue_map
, inputs_read
,
138 nir
->info
.separate_shader
);
140 /* Start with the slot for the variable's base. */
141 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
142 assert(input_vue_map
.varying_to_slot
[var
->data
.location
] != -1);
143 var
->data
.driver_location
=
144 input_vue_map
.varying_to_slot
[var
->data
.location
];
147 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
148 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
);
152 case MESA_SHADER_FRAGMENT
:
154 nir_assign_var_locations(&nir
->inputs
, &nir
->num_inputs
,
157 case MESA_SHADER_COMPUTE
:
158 /* Compute shaders have no inputs. */
159 assert(exec_list_is_empty(&nir
->inputs
));
162 unreachable("unsupported shader stage");
167 brw_nir_lower_outputs(nir_shader
*nir
, bool is_scalar
)
169 switch (nir
->stage
) {
170 case MESA_SHADER_VERTEX
:
171 case MESA_SHADER_GEOMETRY
:
173 nir_assign_var_locations(&nir
->outputs
, &nir
->num_outputs
,
174 type_size_vec4_times_4
);
175 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4_times_4
);
177 nir_foreach_variable(var
, &nir
->outputs
)
178 var
->data
.driver_location
= var
->data
.location
;
181 case MESA_SHADER_FRAGMENT
:
182 nir_assign_var_locations(&nir
->outputs
, &nir
->num_outputs
,
185 case MESA_SHADER_COMPUTE
:
186 /* Compute shaders have no outputs. */
187 assert(exec_list_is_empty(&nir
->outputs
));
190 unreachable("unsupported shader stage");
195 type_size_scalar_bytes(const struct glsl_type
*type
)
197 return type_size_scalar(type
) * 4;
201 type_size_vec4_bytes(const struct glsl_type
*type
)
203 return type_size_vec4(type
) * 16;
207 brw_nir_lower_uniforms(nir_shader
*nir
, bool is_scalar
)
210 nir_assign_var_locations(&nir
->uniforms
, &nir
->num_uniforms
,
211 type_size_scalar_bytes
);
212 nir_lower_io(nir
, nir_var_uniform
, type_size_scalar_bytes
);
214 nir_assign_var_locations(&nir
->uniforms
, &nir
->num_uniforms
,
215 type_size_vec4_bytes
);
216 nir_lower_io(nir
, nir_var_uniform
, type_size_vec4_bytes
);
220 #include "util/debug.h"
225 static int should_clone
= -1;
226 if (should_clone
< 1)
227 should_clone
= env_var_as_boolean("NIR_TEST_CLONE", false);
232 #define _OPT(do_pass) (({ \
233 bool this_progress = true; \
235 nir_validate_shader(nir); \
236 if (should_clone_nir()) { \
237 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
244 #define OPT(pass, ...) _OPT( \
245 nir_metadata_set_validation_flag(nir); \
246 this_progress = pass(nir ,##__VA_ARGS__); \
247 if (this_progress) { \
249 nir_metadata_check_validation_flag(nir); \
253 #define OPT_V(pass, ...) _OPT( \
254 pass(nir, ##__VA_ARGS__); \
258 nir_optimize(nir_shader
*nir
, bool is_scalar
)
263 OPT_V(nir_lower_vars_to_ssa
);
266 OPT_V(nir_lower_alu_to_scalar
);
272 OPT_V(nir_lower_phis_to_scalar
);
278 OPT(nir_opt_peephole_select
);
279 OPT(nir_opt_algebraic
);
280 OPT(nir_opt_constant_folding
);
281 OPT(nir_opt_dead_cf
);
282 OPT(nir_opt_remove_phis
);
289 /* Does some simple lowering and runs the standard suite of optimizations
291 * This is intended to be called more-or-less directly after you get the
292 * shader out of GLSL or some other source. While it is geared towards i965,
293 * it is not at all generator-specific except for the is_scalar flag. Even
294 * there, it is safe to call with is_scalar = false for a shader that is
295 * intended for the FS backend as long as nir_optimize is called again with
296 * is_scalar = true to scalarize everything prior to code gen.
299 brw_preprocess_nir(nir_shader
*nir
, bool is_scalar
)
301 bool progress
; /* Written by OPT and OPT_V */
304 if (nir
->stage
== MESA_SHADER_GEOMETRY
)
305 OPT(nir_lower_gs_intrinsics
);
307 static const nir_lower_tex_options tex_options
= {
311 OPT(nir_lower_tex
, &tex_options
);
312 OPT(nir_normalize_cubemap_coords
);
314 OPT(nir_lower_global_vars_to_local
);
316 OPT(nir_split_var_copies
);
318 nir
= nir_optimize(nir
, is_scalar
);
320 /* Lower a bunch of stuff */
321 OPT_V(nir_lower_var_copies
);
323 /* Get rid of split copies */
324 nir
= nir_optimize(nir
, is_scalar
);
326 OPT(nir_remove_dead_variables
);
331 /* Lowers inputs, outputs, uniforms, and samplers for i965
333 * This function does all of the standard lowering prior to post-processing.
334 * The lowering done is highly gen, stage, and backend-specific. The
335 * shader_prog parameter is optional and is used only for lowering sampler
336 * derefs and atomics for GLSL shaders.
339 brw_lower_nir(nir_shader
*nir
,
340 const struct brw_device_info
*devinfo
,
341 const struct gl_shader_program
*shader_prog
,
344 bool progress
; /* Written by OPT and OPT_V */
347 OPT_V(brw_nir_lower_inputs
, devinfo
, is_scalar
);
348 OPT_V(brw_nir_lower_outputs
, is_scalar
);
349 OPT_V(brw_nir_lower_uniforms
, is_scalar
);
350 OPT_V(nir_lower_io
, nir_var_all
, is_scalar
? type_size_scalar
: type_size_vec4
);
353 OPT_V(nir_lower_samplers
, shader_prog
);
356 OPT(nir_lower_system_values
);
359 OPT_V(nir_lower_atomics
, shader_prog
);
362 return nir_optimize(nir
, is_scalar
);
365 /* Prepare the given shader for codegen
367 * This function is intended to be called right before going into the actual
368 * backend and is highly backend-specific. Also, once this function has been
369 * called on a shader, it will no longer be in SSA form so most optimizations
373 brw_postprocess_nir(nir_shader
*nir
,
374 const struct brw_device_info
*devinfo
,
378 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(nir
->stage
));
380 bool progress
; /* Written by OPT and OPT_V */
383 if (devinfo
->gen
>= 6) {
384 /* Try and fuse multiply-adds */
385 OPT(brw_nir_opt_peephole_ffma
);
388 OPT(nir_opt_algebraic_late
);
390 OPT(nir_lower_locals_to_regs
);
392 OPT_V(nir_lower_to_source_mods
);
396 if (unlikely(debug_enabled
)) {
397 /* Re-index SSA defs so we print more sensible numbers. */
398 nir_foreach_overload(nir
, overload
) {
400 nir_index_ssa_defs(overload
->impl
);
403 fprintf(stderr
, "NIR (SSA form) for %s shader:\n",
404 _mesa_shader_stage_to_string(nir
->stage
));
405 nir_print_shader(nir
, stderr
);
408 OPT_V(nir_convert_from_ssa
, true);
411 OPT_V(nir_move_vec_src_uses_to_dest
);
412 OPT(nir_lower_vec_to_movs
);
415 /* This is the last pass we run before we start emitting stuff. It
416 * determines when we need to insert boolean resolves on Gen <= 5. We
417 * run it last because it stashes data in instr->pass_flags and we don't
418 * want that to be squashed by other NIR passes.
420 if (devinfo
->gen
<= 5)
421 brw_nir_analyze_boolean_resolves(nir
);
425 if (unlikely(debug_enabled
)) {
426 fprintf(stderr
, "NIR (final form) for %s shader:\n",
427 _mesa_shader_stage_to_string(nir
->stage
));
428 nir_print_shader(nir
, stderr
);
435 brw_create_nir(struct brw_context
*brw
,
436 const struct gl_shader_program
*shader_prog
,
437 const struct gl_program
*prog
,
438 gl_shader_stage stage
,
441 struct gl_context
*ctx
= &brw
->ctx
;
442 const struct brw_device_info
*devinfo
= brw
->intelScreen
->devinfo
;
443 const nir_shader_compiler_options
*options
=
444 ctx
->Const
.ShaderCompilerOptions
[stage
].NirOptions
;
448 /* First, lower the GLSL IR or Mesa IR to NIR */
450 nir
= glsl_to_nir(shader_prog
, stage
, options
);
452 nir
= prog_to_nir(prog
, options
);
453 OPT_V(nir_convert_to_ssa
); /* turn registers into SSA */
455 nir_validate_shader(nir
);
459 nir
= brw_preprocess_nir(nir
, is_scalar
);
460 nir
= brw_lower_nir(nir
, devinfo
, shader_prog
, is_scalar
);
466 brw_nir_apply_sampler_key(nir_shader
*nir
,
467 const struct brw_device_info
*devinfo
,
468 const struct brw_sampler_prog_key_data
*key_tex
,
471 nir_lower_tex_options tex_options
= { 0 };
473 /* Iron Lake and prior require lowering of all rectangle textures */
474 if (devinfo
->gen
< 6)
475 tex_options
.lower_rect
= true;
477 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
478 if (devinfo
->gen
< 8) {
479 tex_options
.saturate_s
= key_tex
->gl_clamp_mask
[0];
480 tex_options
.saturate_t
= key_tex
->gl_clamp_mask
[1];
481 tex_options
.saturate_r
= key_tex
->gl_clamp_mask
[2];
484 /* Prior to Haswell, we have to fake texture swizzle */
485 for (unsigned s
= 0; s
< MAX_SAMPLERS
; s
++) {
486 if (key_tex
->swizzles
[s
] == SWIZZLE_NOOP
)
489 tex_options
.swizzle_result
|= (1 << s
);
490 for (unsigned c
= 0; c
< 4; c
++)
491 tex_options
.swizzles
[s
][c
] = GET_SWZ(key_tex
->swizzles
[s
], c
);
494 if (nir_lower_tex(nir
, &tex_options
)) {
495 nir_validate_shader(nir
);
496 nir
= nir_optimize(nir
, is_scalar
);
503 brw_type_for_nir_type(nir_alu_type type
)
507 return BRW_REGISTER_TYPE_UD
;
510 return BRW_REGISTER_TYPE_D
;
512 return BRW_REGISTER_TYPE_F
;
514 unreachable("unknown type");
517 return BRW_REGISTER_TYPE_F
;
520 /* Returns the glsl_base_type corresponding to a nir_alu_type.
521 * This is used by both brw_vec4_nir and brw_fs_nir.
524 brw_glsl_base_type_for_nir_type(nir_alu_type type
)
528 return GLSL_TYPE_FLOAT
;
531 return GLSL_TYPE_INT
;
534 return GLSL_TYPE_UINT
;
537 unreachable("bad type");