2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "brw_shader.h"
26 #include "compiler/glsl_types.h"
27 #include "compiler/nir/nir_builder.h"
30 is_input(nir_intrinsic_instr
*intrin
)
32 return intrin
->intrinsic
== nir_intrinsic_load_input
||
33 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
||
34 intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
;
38 is_output(nir_intrinsic_instr
*intrin
)
40 return intrin
->intrinsic
== nir_intrinsic_load_output
||
41 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
42 intrin
->intrinsic
== nir_intrinsic_store_output
||
43 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
47 * In many cases, we just add the base and offset together, so there's no
48 * reason to keep them separate. Sometimes, combining them is essential:
49 * if a shader only accesses part of a compound variable (such as a matrix
50 * or array), the variable's base may not actually exist in the VUE map.
52 * This pass adds constant offsets to instr->const_index[0], and resets
53 * the offset source to 0. Non-constant offsets remain unchanged - since
54 * we don't know what part of a compound variable is accessed, we allocate
55 * storage for the entire thing.
59 add_const_offset_to_base_block(nir_block
*block
, nir_builder
*b
,
60 nir_variable_mode mode
)
62 nir_foreach_instr_safe(instr
, block
) {
63 if (instr
->type
!= nir_instr_type_intrinsic
)
66 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
68 if ((mode
== nir_var_shader_in
&& is_input(intrin
)) ||
69 (mode
== nir_var_shader_out
&& is_output(intrin
))) {
70 nir_src
*offset
= nir_get_io_offset_src(intrin
);
71 nir_const_value
*const_offset
= nir_src_as_const_value(*offset
);
74 intrin
->const_index
[0] += const_offset
->u32
[0];
75 b
->cursor
= nir_before_instr(&intrin
->instr
);
76 nir_instr_rewrite_src(&intrin
->instr
, offset
,
77 nir_src_for_ssa(nir_imm_int(b
, 0)));
85 add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
)
87 nir_foreach_function(f
, nir
) {
90 nir_builder_init(&b
, f
->impl
);
91 nir_foreach_block(block
, f
->impl
) {
92 add_const_offset_to_base_block(block
, &b
, mode
);
99 remap_vs_attrs(nir_block
*block
, shader_info
*nir_info
)
101 nir_foreach_instr(instr
, block
) {
102 if (instr
->type
!= nir_instr_type_intrinsic
)
105 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
107 if (intrin
->intrinsic
== nir_intrinsic_load_input
) {
108 /* Attributes come in a contiguous block, ordered by their
109 * gl_vert_attrib value. That means we can compute the slot
110 * number for an attribute by masking out the enabled attributes
111 * before it and counting the bits.
113 int attr
= intrin
->const_index
[0];
114 int slot
= _mesa_bitcount_64(nir_info
->inputs_read
&
115 BITFIELD64_MASK(attr
));
116 intrin
->const_index
[0] = 4 * slot
;
123 remap_inputs_with_vue_map(nir_block
*block
, const struct brw_vue_map
*vue_map
)
125 nir_foreach_instr(instr
, block
) {
126 if (instr
->type
!= nir_instr_type_intrinsic
)
129 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
131 if (intrin
->intrinsic
== nir_intrinsic_load_input
||
132 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
) {
133 int vue_slot
= vue_map
->varying_to_slot
[intrin
->const_index
[0]];
134 assert(vue_slot
!= -1);
135 intrin
->const_index
[0] = vue_slot
;
142 remap_tess_levels(nir_builder
*b
, nir_intrinsic_instr
*intr
,
143 GLenum primitive_mode
)
145 const int location
= nir_intrinsic_base(intr
);
146 const unsigned component
= nir_intrinsic_component(intr
);
149 if (location
== VARYING_SLOT_TESS_LEVEL_INNER
) {
150 switch (primitive_mode
) {
152 /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
153 nir_intrinsic_set_base(intr
, 0);
154 nir_intrinsic_set_component(intr
, 3 - component
);
155 out_of_bounds
= false;
158 /* gl_TessLevelInner[0] lives at DWord 4. */
159 nir_intrinsic_set_base(intr
, 1);
160 out_of_bounds
= component
> 0;
163 out_of_bounds
= true;
166 unreachable("Bogus tessellation domain");
168 } else if (location
== VARYING_SLOT_TESS_LEVEL_OUTER
) {
169 if (primitive_mode
== GL_ISOLINES
) {
170 /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
171 nir_intrinsic_set_base(intr
, 1);
172 nir_intrinsic_set_component(intr
, 2 + nir_intrinsic_component(intr
));
173 out_of_bounds
= component
> 1;
175 /* Triangles use DWords 7-5 (reversed); Quads use 7-4 (reversed) */
176 nir_intrinsic_set_base(intr
, 1);
177 nir_intrinsic_set_component(intr
, 3 - nir_intrinsic_component(intr
));
178 out_of_bounds
= component
== 3 && primitive_mode
== GL_TRIANGLES
;
185 if (nir_intrinsic_infos
[intr
->intrinsic
].has_dest
) {
186 b
->cursor
= nir_before_instr(&intr
->instr
);
187 nir_ssa_def
*undef
= nir_ssa_undef(b
, 1, 32);
188 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(undef
));
190 nir_instr_remove(&intr
->instr
);
197 remap_patch_urb_offsets(nir_block
*block
, nir_builder
*b
,
198 const struct brw_vue_map
*vue_map
,
199 GLenum tes_primitive_mode
)
201 const bool is_passthrough_tcs
= b
->shader
->info
->name
&&
202 strcmp(b
->shader
->info
->name
, "passthrough") == 0;
204 nir_foreach_instr_safe(instr
, block
) {
205 if (instr
->type
!= nir_instr_type_intrinsic
)
208 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
210 gl_shader_stage stage
= b
->shader
->stage
;
212 if ((stage
== MESA_SHADER_TESS_CTRL
&& is_output(intrin
)) ||
213 (stage
== MESA_SHADER_TESS_EVAL
&& is_input(intrin
))) {
215 if (!is_passthrough_tcs
&&
216 remap_tess_levels(b
, intrin
, tes_primitive_mode
))
219 int vue_slot
= vue_map
->varying_to_slot
[intrin
->const_index
[0]];
220 assert(vue_slot
!= -1);
221 intrin
->const_index
[0] = vue_slot
;
223 nir_src
*vertex
= nir_get_io_vertex_index_src(intrin
);
225 nir_const_value
*const_vertex
= nir_src_as_const_value(*vertex
);
227 intrin
->const_index
[0] += const_vertex
->u32
[0] *
228 vue_map
->num_per_vertex_slots
;
230 b
->cursor
= nir_before_instr(&intrin
->instr
);
232 /* Multiply by the number of per-vertex slots. */
233 nir_ssa_def
*vertex_offset
=
235 nir_ssa_for_src(b
, *vertex
, 1),
237 vue_map
->num_per_vertex_slots
));
239 /* Add it to the existing offset */
240 nir_src
*offset
= nir_get_io_offset_src(intrin
);
241 nir_ssa_def
*total_offset
=
242 nir_iadd(b
, vertex_offset
,
243 nir_ssa_for_src(b
, *offset
, 1));
245 nir_instr_rewrite_src(&intrin
->instr
, offset
,
246 nir_src_for_ssa(total_offset
));
255 brw_nir_lower_vs_inputs(nir_shader
*nir
,
257 bool use_legacy_snorm_formula
,
258 const uint8_t *vs_attrib_wa_flags
)
260 /* Start with the location of the variable's base. */
261 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
262 var
->data
.driver_location
= var
->data
.location
;
265 /* Now use nir_lower_io to walk dereference chains. Attribute arrays are
266 * loaded as one vec4 or dvec4 per element (or matrix column), depending on
267 * whether it is a double-precision type or not.
269 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, 0);
271 /* This pass needs actual constants */
272 nir_opt_constant_folding(nir
);
274 add_const_offset_to_base(nir
, nir_var_shader_in
);
276 brw_nir_apply_attribute_workarounds(nir
, use_legacy_snorm_formula
,
280 /* Finally, translate VERT_ATTRIB_* values into the actual registers. */
282 nir_foreach_function(function
, nir
) {
283 if (function
->impl
) {
284 nir_foreach_block(block
, function
->impl
) {
285 remap_vs_attrs(block
, nir
->info
);
293 brw_nir_lower_vue_inputs(nir_shader
*nir
, bool is_scalar
,
294 const struct brw_vue_map
*vue_map
)
296 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
297 var
->data
.driver_location
= var
->data
.location
;
300 /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
301 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, 0);
303 if (is_scalar
|| nir
->stage
!= MESA_SHADER_GEOMETRY
) {
304 /* This pass needs actual constants */
305 nir_opt_constant_folding(nir
);
307 add_const_offset_to_base(nir
, nir_var_shader_in
);
309 nir_foreach_function(function
, nir
) {
310 if (function
->impl
) {
311 nir_foreach_block(block
, function
->impl
) {
312 remap_inputs_with_vue_map(block
, vue_map
);
320 brw_nir_lower_tes_inputs(nir_shader
*nir
, const struct brw_vue_map
*vue_map
)
322 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
323 var
->data
.driver_location
= var
->data
.location
;
326 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, 0);
328 /* This pass needs actual constants */
329 nir_opt_constant_folding(nir
);
331 add_const_offset_to_base(nir
, nir_var_shader_in
);
333 nir_foreach_function(function
, nir
) {
334 if (function
->impl
) {
336 nir_builder_init(&b
, function
->impl
);
337 nir_foreach_block(block
, function
->impl
) {
338 remap_patch_urb_offsets(block
, &b
, vue_map
,
339 nir
->info
->tess
.primitive_mode
);
346 brw_nir_lower_fs_inputs(nir_shader
*nir
,
347 const struct gen_device_info
*devinfo
,
348 const struct brw_wm_prog_key
*key
)
350 foreach_list_typed(nir_variable
, var
, node
, &nir
->inputs
) {
351 var
->data
.driver_location
= var
->data
.location
;
353 /* Apply default interpolation mode.
355 * Everything defaults to smooth except for the legacy GL color
356 * built-in variables, which might be flat depending on API state.
358 if (var
->data
.interpolation
== INTERP_MODE_NONE
) {
359 const bool flat
= key
->flat_shade
&&
360 (var
->data
.location
== VARYING_SLOT_COL0
||
361 var
->data
.location
== VARYING_SLOT_COL1
);
363 var
->data
.interpolation
= flat
? INTERP_MODE_FLAT
364 : INTERP_MODE_SMOOTH
;
367 /* On Ironlake and below, there is only one interpolation mode.
368 * Centroid interpolation doesn't mean anything on this hardware --
369 * there is no multisampling.
371 if (devinfo
->gen
< 6) {
372 var
->data
.centroid
= false;
373 var
->data
.sample
= false;
377 nir_lower_io_options lower_io_options
= 0;
378 if (key
->persample_interp
)
379 lower_io_options
|= nir_lower_io_force_sample_interpolation
;
381 nir_lower_io(nir
, nir_var_shader_in
, type_size_vec4
, lower_io_options
);
383 /* This pass needs actual constants */
384 nir_opt_constant_folding(nir
);
386 add_const_offset_to_base(nir
, nir_var_shader_in
);
390 brw_nir_lower_vue_outputs(nir_shader
*nir
,
393 nir_foreach_variable(var
, &nir
->outputs
) {
394 var
->data
.driver_location
= var
->data
.location
;
397 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
, 0);
401 brw_nir_lower_tcs_outputs(nir_shader
*nir
, const struct brw_vue_map
*vue_map
,
402 GLenum tes_primitive_mode
)
404 nir_foreach_variable(var
, &nir
->outputs
) {
405 var
->data
.driver_location
= var
->data
.location
;
408 nir_lower_io(nir
, nir_var_shader_out
, type_size_vec4
, 0);
410 /* This pass needs actual constants */
411 nir_opt_constant_folding(nir
);
413 add_const_offset_to_base(nir
, nir_var_shader_out
);
415 nir_foreach_function(function
, nir
) {
416 if (function
->impl
) {
418 nir_builder_init(&b
, function
->impl
);
419 nir_foreach_block(block
, function
->impl
) {
420 remap_patch_urb_offsets(block
, &b
, vue_map
, tes_primitive_mode
);
427 brw_nir_lower_fs_outputs(nir_shader
*nir
)
429 nir_foreach_variable(var
, &nir
->outputs
) {
430 var
->data
.driver_location
=
431 SET_FIELD(var
->data
.index
, BRW_NIR_FRAG_OUTPUT_INDEX
) |
432 SET_FIELD(var
->data
.location
, BRW_NIR_FRAG_OUTPUT_LOCATION
);
435 nir_lower_io(nir
, nir_var_shader_out
, type_size_dvec4
, 0);
439 brw_nir_lower_cs_shared(nir_shader
*nir
)
441 nir_assign_var_locations(&nir
->shared
, &nir
->num_shared
,
442 type_size_scalar_bytes
);
443 nir_lower_io(nir
, nir_var_shared
, type_size_scalar_bytes
, 0);
446 #define OPT(pass, ...) ({ \
447 bool this_progress = false; \
448 NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
454 #define OPT_V(pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
457 nir_optimize(nir_shader
*nir
, const struct brw_compiler
*compiler
,
460 nir_variable_mode indirect_mask
= 0;
461 if (compiler
->glsl_compiler_options
[nir
->stage
].EmitNoIndirectInput
)
462 indirect_mask
|= nir_var_shader_in
;
463 if (compiler
->glsl_compiler_options
[nir
->stage
].EmitNoIndirectOutput
)
464 indirect_mask
|= nir_var_shader_out
;
465 if (compiler
->glsl_compiler_options
[nir
->stage
].EmitNoIndirectTemp
)
466 indirect_mask
|= nir_var_local
;
471 OPT_V(nir_lower_vars_to_ssa
);
472 OPT(nir_opt_copy_prop_vars
);
475 OPT(nir_lower_alu_to_scalar
);
481 OPT(nir_lower_phis_to_scalar
);
487 OPT(nir_opt_peephole_select
, 0);
488 OPT(nir_opt_algebraic
);
489 OPT(nir_opt_constant_folding
);
490 OPT(nir_opt_dead_cf
);
491 if (OPT(nir_opt_trivial_continues
)) {
492 /* If nir_opt_trivial_continues makes progress, then we need to clean
493 * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
500 if (nir
->options
->max_unroll_iterations
!= 0) {
501 OPT(nir_opt_loop_unroll
, indirect_mask
);
503 OPT(nir_opt_remove_phis
);
505 OPT_V(nir_lower_doubles
, nir_lower_drcp
|
512 nir_lower_dround_even
|
514 OPT_V(nir_lower_double_pack
);
520 /* Does some simple lowering and runs the standard suite of optimizations
522 * This is intended to be called more-or-less directly after you get the
523 * shader out of GLSL or some other source. While it is geared towards i965,
524 * it is not at all generator-specific except for the is_scalar flag. Even
525 * there, it is safe to call with is_scalar = false for a shader that is
526 * intended for the FS backend as long as nir_optimize is called again with
527 * is_scalar = true to scalarize everything prior to code gen.
530 brw_preprocess_nir(const struct brw_compiler
*compiler
, nir_shader
*nir
)
532 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
533 bool progress
; /* Written by OPT and OPT_V */
536 const bool is_scalar
= compiler
->scalar_stage
[nir
->stage
];
538 if (nir
->stage
== MESA_SHADER_GEOMETRY
)
539 OPT(nir_lower_gs_intrinsics
);
541 /* See also brw_nir_trig_workarounds.py */
542 if (compiler
->precise_trig
&&
543 !(devinfo
->gen
>= 10 || devinfo
->is_kabylake
))
544 OPT(brw_nir_apply_trig_workarounds
);
546 static const nir_lower_tex_options tex_options
= {
548 .lower_txf_offset
= true,
549 .lower_rect_offset
= true,
550 .lower_txd_cube_map
= true,
553 OPT(nir_lower_tex
, &tex_options
);
554 OPT(nir_normalize_cubemap_coords
);
556 OPT(nir_lower_global_vars_to_local
);
558 OPT(nir_split_var_copies
);
560 nir
= nir_optimize(nir
, compiler
, is_scalar
);
563 OPT_V(nir_lower_load_const_to_scalar
);
566 /* Lower a bunch of stuff */
567 OPT_V(nir_lower_var_copies
);
569 OPT_V(nir_lower_clip_cull_distance_arrays
);
571 nir_variable_mode indirect_mask
= 0;
572 if (compiler
->glsl_compiler_options
[nir
->stage
].EmitNoIndirectInput
)
573 indirect_mask
|= nir_var_shader_in
;
574 if (compiler
->glsl_compiler_options
[nir
->stage
].EmitNoIndirectOutput
)
575 indirect_mask
|= nir_var_shader_out
;
576 if (compiler
->glsl_compiler_options
[nir
->stage
].EmitNoIndirectTemp
)
577 indirect_mask
|= nir_var_local
;
579 nir_lower_indirect_derefs(nir
, indirect_mask
);
581 /* Get rid of split copies */
582 nir
= nir_optimize(nir
, compiler
, is_scalar
);
584 OPT(nir_remove_dead_variables
, nir_var_local
);
589 /* Prepare the given shader for codegen
591 * This function is intended to be called right before going into the actual
592 * backend and is highly backend-specific. Also, once this function has been
593 * called on a shader, it will no longer be in SSA form so most optimizations
597 brw_postprocess_nir(nir_shader
*nir
, const struct brw_compiler
*compiler
,
600 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
602 (INTEL_DEBUG
& intel_debug_flag_for_shader_stage(nir
->stage
));
604 bool progress
; /* Written by OPT and OPT_V */
607 nir
= nir_optimize(nir
, compiler
, is_scalar
);
609 if (devinfo
->gen
>= 6) {
610 /* Try and fuse multiply-adds */
611 OPT(brw_nir_opt_peephole_ffma
);
614 OPT(nir_opt_algebraic_late
);
616 OPT_V(nir_lower_to_source_mods
);
619 OPT(nir_opt_move_comparisons
);
621 OPT(nir_lower_locals_to_regs
);
623 if (unlikely(debug_enabled
)) {
624 /* Re-index SSA defs so we print more sensible numbers. */
625 nir_foreach_function(function
, nir
) {
627 nir_index_ssa_defs(function
->impl
);
630 fprintf(stderr
, "NIR (SSA form) for %s shader:\n",
631 _mesa_shader_stage_to_string(nir
->stage
));
632 nir_print_shader(nir
, stderr
);
635 OPT_V(nir_convert_from_ssa
, true);
638 OPT_V(nir_move_vec_src_uses_to_dest
);
639 OPT(nir_lower_vec_to_movs
);
642 /* This is the last pass we run before we start emitting stuff. It
643 * determines when we need to insert boolean resolves on Gen <= 5. We
644 * run it last because it stashes data in instr->pass_flags and we don't
645 * want that to be squashed by other NIR passes.
647 if (devinfo
->gen
<= 5)
648 brw_nir_analyze_boolean_resolves(nir
);
652 if (unlikely(debug_enabled
)) {
653 fprintf(stderr
, "NIR (final form) for %s shader:\n",
654 _mesa_shader_stage_to_string(nir
->stage
));
655 nir_print_shader(nir
, stderr
);
662 brw_nir_apply_sampler_key(nir_shader
*nir
,
663 const struct brw_compiler
*compiler
,
664 const struct brw_sampler_prog_key_data
*key_tex
,
667 const struct gen_device_info
*devinfo
= compiler
->devinfo
;
668 nir_lower_tex_options tex_options
= { 0 };
670 /* Iron Lake and prior require lowering of all rectangle textures */
671 if (devinfo
->gen
< 6)
672 tex_options
.lower_rect
= true;
674 /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
675 if (devinfo
->gen
< 8) {
676 tex_options
.saturate_s
= key_tex
->gl_clamp_mask
[0];
677 tex_options
.saturate_t
= key_tex
->gl_clamp_mask
[1];
678 tex_options
.saturate_r
= key_tex
->gl_clamp_mask
[2];
681 /* Prior to Haswell, we have to fake texture swizzle */
682 for (unsigned s
= 0; s
< MAX_SAMPLERS
; s
++) {
683 if (key_tex
->swizzles
[s
] == SWIZZLE_NOOP
)
686 tex_options
.swizzle_result
|= (1 << s
);
687 for (unsigned c
= 0; c
< 4; c
++)
688 tex_options
.swizzles
[s
][c
] = GET_SWZ(key_tex
->swizzles
[s
], c
);
691 /* Prior to Haswell, we have to lower gradients on shadow samplers */
692 tex_options
.lower_txd_shadow
= devinfo
->gen
< 8 && !devinfo
->is_haswell
;
694 tex_options
.lower_y_uv_external
= key_tex
->y_uv_image_mask
;
695 tex_options
.lower_y_u_v_external
= key_tex
->y_u_v_image_mask
;
696 tex_options
.lower_yx_xuxv_external
= key_tex
->yx_xuxv_image_mask
;
698 if (nir_lower_tex(nir
, &tex_options
)) {
699 nir_validate_shader(nir
);
700 nir
= nir_optimize(nir
, compiler
, is_scalar
);
707 brw_type_for_nir_type(nir_alu_type type
)
711 case nir_type_uint32
:
712 return BRW_REGISTER_TYPE_UD
;
715 case nir_type_bool32
:
717 return BRW_REGISTER_TYPE_D
;
719 case nir_type_float32
:
720 return BRW_REGISTER_TYPE_F
;
721 case nir_type_float64
:
722 return BRW_REGISTER_TYPE_DF
;
724 return BRW_REGISTER_TYPE_Q
;
725 case nir_type_uint64
:
726 return BRW_REGISTER_TYPE_UQ
;
728 unreachable("unknown type");
731 return BRW_REGISTER_TYPE_F
;
734 /* Returns the glsl_base_type corresponding to a nir_alu_type.
735 * This is used by both brw_vec4_nir and brw_fs_nir.
738 brw_glsl_base_type_for_nir_type(nir_alu_type type
)
742 case nir_type_float32
:
743 return GLSL_TYPE_FLOAT
;
745 case nir_type_float64
:
746 return GLSL_TYPE_DOUBLE
;
750 return GLSL_TYPE_INT
;
753 case nir_type_uint32
:
754 return GLSL_TYPE_UINT
;
757 unreachable("bad type");