2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
28 #include "float64_glsl.h"
29 #include "glsl_to_nir.h"
30 #include "ir_visitor.h"
31 #include "ir_hierarchical_visitor.h"
33 #include "ir_optimization.h"
35 #include "compiler/nir/nir_control_flow.h"
36 #include "compiler/nir/nir_builder.h"
37 #include "compiler/nir/nir_builtin_builder.h"
38 #include "compiler/nir/nir_deref.h"
39 #include "main/errors.h"
40 #include "main/mtypes.h"
41 #include "main/shaderobj.h"
42 #include "util/u_math.h"
45 * pass to lower GLSL IR to NIR
47 * This will lower variable dereferences to loads/stores of corresponding
48 * variables in NIR - the variables will be converted to registers in a later
54 class nir_visitor
: public ir_visitor
57 nir_visitor(gl_context
*ctx
, nir_shader
*shader
);
60 virtual void visit(ir_variable
*);
61 virtual void visit(ir_function
*);
62 virtual void visit(ir_function_signature
*);
63 virtual void visit(ir_loop
*);
64 virtual void visit(ir_if
*);
65 virtual void visit(ir_discard
*);
66 virtual void visit(ir_demote
*);
67 virtual void visit(ir_loop_jump
*);
68 virtual void visit(ir_return
*);
69 virtual void visit(ir_call
*);
70 virtual void visit(ir_assignment
*);
71 virtual void visit(ir_emit_vertex
*);
72 virtual void visit(ir_end_primitive
*);
73 virtual void visit(ir_expression
*);
74 virtual void visit(ir_swizzle
*);
75 virtual void visit(ir_texture
*);
76 virtual void visit(ir_constant
*);
77 virtual void visit(ir_dereference_variable
*);
78 virtual void visit(ir_dereference_record
*);
79 virtual void visit(ir_dereference_array
*);
80 virtual void visit(ir_barrier
*);
82 void create_function(ir_function_signature
*ir
);
85 void add_instr(nir_instr
*instr
, unsigned num_components
, unsigned bit_size
);
86 nir_ssa_def
*evaluate_rvalue(ir_rvalue
*ir
);
88 nir_alu_instr
*emit(nir_op op
, unsigned dest_size
, nir_ssa_def
**srcs
);
89 nir_alu_instr
*emit(nir_op op
, unsigned dest_size
, nir_ssa_def
*src1
);
90 nir_alu_instr
*emit(nir_op op
, unsigned dest_size
, nir_ssa_def
*src1
,
92 nir_alu_instr
*emit(nir_op op
, unsigned dest_size
, nir_ssa_def
*src1
,
93 nir_ssa_def
*src2
, nir_ssa_def
*src3
);
98 nir_function_impl
*impl
;
100 nir_ssa_def
*result
; /* result of the expression tree last visited */
102 nir_deref_instr
*evaluate_deref(ir_instruction
*ir
);
104 nir_constant
*constant_copy(ir_constant
*ir
, void *mem_ctx
);
106 /* most recent deref instruction created */
107 nir_deref_instr
*deref
;
109 /* whether the IR we're operating on is per-function or global */
112 ir_function_signature
*sig
;
114 /* map of ir_variable -> nir_variable */
115 struct hash_table
*var_table
;
117 /* map of ir_function_signature -> nir_function_overload */
118 struct hash_table
*overload_table
;
122 * This visitor runs before the main visitor, calling create_function() for
123 * each function so that the main visitor can resolve forward references in
127 class nir_function_visitor
: public ir_hierarchical_visitor
130 nir_function_visitor(nir_visitor
*v
) : visitor(v
)
133 virtual ir_visitor_status
visit_enter(ir_function
*);
136 nir_visitor
*visitor
;
139 /* glsl_to_nir can only handle converting certain function paramaters
140 * to NIR. This visitor checks for parameters it can't currently handle.
142 class ir_function_param_visitor
: public ir_hierarchical_visitor
145 ir_function_param_visitor()
150 virtual ir_visitor_status
visit_enter(ir_function_signature
*ir
)
153 if (ir
->is_intrinsic())
154 return visit_continue
;
156 foreach_in_list(ir_variable
, param
, &ir
->parameters
) {
157 if (!param
->type
->is_vector() || !param
->type
->is_scalar()) {
162 if (param
->data
.mode
== ir_var_function_inout
) {
168 return visit_continue
;
174 } /* end of anonymous namespace */
178 has_unsupported_function_param(exec_list
*ir
)
180 ir_function_param_visitor visitor
;
181 visit_list_elements(&visitor
, ir
);
182 return visitor
.unsupported
;
186 glsl_to_nir(struct gl_context
*ctx
,
187 const struct gl_shader_program
*shader_prog
,
188 gl_shader_stage stage
,
189 const nir_shader_compiler_options
*options
)
191 struct gl_linked_shader
*sh
= shader_prog
->_LinkedShaders
[stage
];
193 const struct gl_shader_compiler_options
*gl_options
=
194 &ctx
->Const
.ShaderCompilerOptions
[stage
];
196 /* glsl_to_nir can only handle converting certain function paramaters
197 * to NIR. If we find something we can't handle then we get the GLSL IR
198 * opts to remove it before we continue on.
200 * TODO: add missing glsl ir to nir support and remove this loop.
202 while (has_unsupported_function_param(sh
->ir
)) {
203 do_common_optimization(sh
->ir
, true, true, gl_options
,
204 ctx
->Const
.NativeIntegers
);
207 nir_shader
*shader
= nir_shader_create(NULL
, stage
, options
,
210 nir_visitor
v1(ctx
, shader
);
211 nir_function_visitor
v2(&v1
);
213 visit_exec_list(sh
->ir
, &v1
);
215 nir_validate_shader(shader
, "after glsl to nir, before function inline");
217 /* We have to lower away local constant initializers right before we
218 * inline functions. That way they get properly initialized at the top
219 * of the function and not at the top of its caller.
221 nir_lower_variable_initializers(shader
, (nir_variable_mode
)~0);
222 nir_lower_returns(shader
);
223 nir_inline_functions(shader
);
224 nir_opt_deref(shader
);
226 nir_validate_shader(shader
, "after function inlining and return lowering");
228 /* Now that we have inlined everything remove all of the functions except
231 foreach_list_typed_safe(nir_function
, function
, node
, &(shader
)->functions
){
232 if (strcmp("main", function
->name
) != 0) {
233 exec_node_remove(&function
->node
);
237 shader
->info
.name
= ralloc_asprintf(shader
, "GLSL%d", shader_prog
->Name
);
238 if (shader_prog
->Label
)
239 shader
->info
.label
= ralloc_strdup(shader
, shader_prog
->Label
);
241 /* Check for transform feedback varyings specified via the API */
242 shader
->info
.has_transform_feedback_varyings
=
243 shader_prog
->TransformFeedback
.NumVarying
> 0;
245 /* Check for transform feedback varyings specified in the Shader */
246 if (shader_prog
->last_vert_prog
)
247 shader
->info
.has_transform_feedback_varyings
|=
248 shader_prog
->last_vert_prog
->sh
.LinkedTransformFeedback
->NumVarying
> 0;
250 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
251 shader
->info
.fs
.pixel_center_integer
= sh
->Program
->info
.fs
.pixel_center_integer
;
252 shader
->info
.fs
.origin_upper_left
= sh
->Program
->info
.fs
.origin_upper_left
;
258 nir_visitor::nir_visitor(gl_context
*ctx
, nir_shader
*shader
)
260 this->supports_std430
= ctx
->Const
.UseSTD430AsDefaultPacking
;
261 this->shader
= shader
;
262 this->is_global
= true;
263 this->var_table
= _mesa_pointer_hash_table_create(NULL
);
264 this->overload_table
= _mesa_pointer_hash_table_create(NULL
);
269 memset(&this->b
, 0, sizeof(this->b
));
272 nir_visitor::~nir_visitor()
274 _mesa_hash_table_destroy(this->var_table
, NULL
);
275 _mesa_hash_table_destroy(this->overload_table
, NULL
);
279 nir_visitor::evaluate_deref(ir_instruction
*ir
)
286 nir_visitor::constant_copy(ir_constant
*ir
, void *mem_ctx
)
291 nir_constant
*ret
= rzalloc(mem_ctx
, nir_constant
);
293 const unsigned rows
= ir
->type
->vector_elements
;
294 const unsigned cols
= ir
->type
->matrix_columns
;
297 ret
->num_elements
= 0;
298 switch (ir
->type
->base_type
) {
300 /* Only float base types can be matrices. */
303 for (unsigned r
= 0; r
< rows
; r
++)
304 ret
->values
[r
].u32
= ir
->value
.u
[r
];
308 case GLSL_TYPE_UINT16
:
309 /* Only float base types can be matrices. */
312 for (unsigned r
= 0; r
< rows
; r
++)
313 ret
->values
[r
].u16
= ir
->value
.u16
[r
];
317 /* Only float base types can be matrices. */
320 for (unsigned r
= 0; r
< rows
; r
++)
321 ret
->values
[r
].i32
= ir
->value
.i
[r
];
325 case GLSL_TYPE_INT16
:
326 /* Only float base types can be matrices. */
329 for (unsigned r
= 0; r
< rows
; r
++)
330 ret
->values
[r
].i16
= ir
->value
.i16
[r
];
333 case GLSL_TYPE_FLOAT
:
334 case GLSL_TYPE_FLOAT16
:
335 case GLSL_TYPE_DOUBLE
:
337 ret
->elements
= ralloc_array(mem_ctx
, nir_constant
*, cols
);
338 ret
->num_elements
= cols
;
339 for (unsigned c
= 0; c
< cols
; c
++) {
340 nir_constant
*col_const
= rzalloc(mem_ctx
, nir_constant
);
341 col_const
->num_elements
= 0;
342 switch (ir
->type
->base_type
) {
343 case GLSL_TYPE_FLOAT
:
344 for (unsigned r
= 0; r
< rows
; r
++)
345 col_const
->values
[r
].f32
= ir
->value
.f
[c
* rows
+ r
];
348 case GLSL_TYPE_FLOAT16
:
349 for (unsigned r
= 0; r
< rows
; r
++)
350 col_const
->values
[r
].u16
= ir
->value
.f16
[c
* rows
+ r
];
353 case GLSL_TYPE_DOUBLE
:
354 for (unsigned r
= 0; r
< rows
; r
++)
355 col_const
->values
[r
].f64
= ir
->value
.d
[c
* rows
+ r
];
359 unreachable("Cannot get here from the first level switch");
361 ret
->elements
[c
] = col_const
;
364 switch (ir
->type
->base_type
) {
365 case GLSL_TYPE_FLOAT
:
366 for (unsigned r
= 0; r
< rows
; r
++)
367 ret
->values
[r
].f32
= ir
->value
.f
[r
];
370 case GLSL_TYPE_FLOAT16
:
371 for (unsigned r
= 0; r
< rows
; r
++)
372 ret
->values
[r
].u16
= ir
->value
.f16
[r
];
375 case GLSL_TYPE_DOUBLE
:
376 for (unsigned r
= 0; r
< rows
; r
++)
377 ret
->values
[r
].f64
= ir
->value
.d
[r
];
381 unreachable("Cannot get here from the first level switch");
386 case GLSL_TYPE_UINT64
:
387 /* Only float base types can be matrices. */
390 for (unsigned r
= 0; r
< rows
; r
++)
391 ret
->values
[r
].u64
= ir
->value
.u64
[r
];
394 case GLSL_TYPE_INT64
:
395 /* Only float base types can be matrices. */
398 for (unsigned r
= 0; r
< rows
; r
++)
399 ret
->values
[r
].i64
= ir
->value
.i64
[r
];
403 /* Only float base types can be matrices. */
406 for (unsigned r
= 0; r
< rows
; r
++)
407 ret
->values
[r
].b
= ir
->value
.b
[r
];
411 case GLSL_TYPE_STRUCT
:
412 case GLSL_TYPE_ARRAY
:
413 ret
->elements
= ralloc_array(mem_ctx
, nir_constant
*,
415 ret
->num_elements
= ir
->type
->length
;
417 for (i
= 0; i
< ir
->type
->length
; i
++)
418 ret
->elements
[i
] = constant_copy(ir
->const_elements
[i
], mem_ctx
);
422 unreachable("not reached");
428 static const glsl_type
*
429 wrap_type_in_array(const glsl_type
*elem_type
, const glsl_type
*array_type
)
431 if (!array_type
->is_array())
434 elem_type
= wrap_type_in_array(elem_type
, array_type
->fields
.array
);
436 return glsl_type::get_array_instance(elem_type
, array_type
->length
);
440 get_nir_how_declared(unsigned how_declared
)
442 if (how_declared
== ir_var_hidden
)
443 return nir_var_hidden
;
445 return nir_var_declared_normally
;
449 nir_visitor::visit(ir_variable
*ir
)
451 /* TODO: In future we should switch to using the NIR lowering pass but for
452 * now just ignore these variables as GLSL IR should have lowered them.
453 * Anything remaining are just dead vars that weren't cleaned up.
455 if (ir
->data
.mode
== ir_var_shader_shared
)
458 /* FINISHME: inout parameters */
459 assert(ir
->data
.mode
!= ir_var_function_inout
);
461 if (ir
->data
.mode
== ir_var_function_out
)
464 nir_variable
*var
= rzalloc(shader
, nir_variable
);
465 var
->type
= ir
->type
;
466 var
->name
= ralloc_strdup(var
, ir
->name
);
468 var
->data
.always_active_io
= ir
->data
.always_active_io
;
469 var
->data
.read_only
= ir
->data
.read_only
;
470 var
->data
.centroid
= ir
->data
.centroid
;
471 var
->data
.sample
= ir
->data
.sample
;
472 var
->data
.patch
= ir
->data
.patch
;
473 var
->data
.how_declared
= get_nir_how_declared(ir
->data
.how_declared
);
474 var
->data
.invariant
= ir
->data
.invariant
;
475 var
->data
.location
= ir
->data
.location
;
476 var
->data
.stream
= ir
->data
.stream
;
477 if (ir
->data
.stream
& (1u << 31))
478 var
->data
.stream
|= NIR_STREAM_PACKED
;
480 var
->data
.precision
= ir
->data
.precision
;
481 var
->data
.explicit_location
= ir
->data
.explicit_location
;
482 var
->data
.matrix_layout
= ir
->data
.matrix_layout
;
483 var
->data
.from_named_ifc_block
= ir
->data
.from_named_ifc_block
;
484 var
->data
.compact
= false;
486 switch(ir
->data
.mode
) {
488 case ir_var_temporary
:
490 var
->data
.mode
= nir_var_shader_temp
;
492 var
->data
.mode
= nir_var_function_temp
;
495 case ir_var_function_in
:
496 case ir_var_const_in
:
497 var
->data
.mode
= nir_var_function_temp
;
500 case ir_var_shader_in
:
501 if (shader
->info
.stage
== MESA_SHADER_GEOMETRY
&&
502 ir
->data
.location
== VARYING_SLOT_PRIMITIVE_ID
) {
503 /* For whatever reason, GLSL IR makes gl_PrimitiveIDIn an input */
504 var
->data
.location
= SYSTEM_VALUE_PRIMITIVE_ID
;
505 var
->data
.mode
= nir_var_system_value
;
507 var
->data
.mode
= nir_var_shader_in
;
509 if (shader
->info
.stage
== MESA_SHADER_TESS_EVAL
&&
510 (ir
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
511 ir
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
)) {
512 var
->data
.compact
= ir
->type
->without_array()->is_scalar();
515 if (shader
->info
.stage
> MESA_SHADER_VERTEX
&&
516 ir
->data
.location
>= VARYING_SLOT_CLIP_DIST0
&&
517 ir
->data
.location
<= VARYING_SLOT_CULL_DIST1
) {
518 var
->data
.compact
= ir
->type
->without_array()->is_scalar();
523 case ir_var_shader_out
:
524 var
->data
.mode
= nir_var_shader_out
;
525 if (shader
->info
.stage
== MESA_SHADER_TESS_CTRL
&&
526 (ir
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
527 ir
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
)) {
528 var
->data
.compact
= ir
->type
->without_array()->is_scalar();
531 if (shader
->info
.stage
<= MESA_SHADER_GEOMETRY
&&
532 ir
->data
.location
>= VARYING_SLOT_CLIP_DIST0
&&
533 ir
->data
.location
<= VARYING_SLOT_CULL_DIST1
) {
534 var
->data
.compact
= ir
->type
->without_array()->is_scalar();
539 if (ir
->get_interface_type())
540 var
->data
.mode
= nir_var_mem_ubo
;
542 var
->data
.mode
= nir_var_uniform
;
545 case ir_var_shader_storage
:
546 var
->data
.mode
= nir_var_mem_ssbo
;
549 case ir_var_system_value
:
550 var
->data
.mode
= nir_var_system_value
;
554 unreachable("not reached");
557 unsigned mem_access
= 0;
558 if (ir
->data
.memory_read_only
)
559 mem_access
|= ACCESS_NON_WRITEABLE
;
560 if (ir
->data
.memory_write_only
)
561 mem_access
|= ACCESS_NON_READABLE
;
562 if (ir
->data
.memory_coherent
)
563 mem_access
|= ACCESS_COHERENT
;
564 if (ir
->data
.memory_volatile
)
565 mem_access
|= ACCESS_VOLATILE
;
566 if (ir
->data
.memory_restrict
)
567 mem_access
|= ACCESS_RESTRICT
;
569 var
->interface_type
= ir
->get_interface_type();
571 /* For UBO and SSBO variables, we need explicit types */
572 if (var
->data
.mode
& (nir_var_mem_ubo
| nir_var_mem_ssbo
)) {
573 const glsl_type
*explicit_ifc_type
=
574 ir
->get_interface_type()->get_explicit_interface_type(supports_std430
);
576 var
->interface_type
= explicit_ifc_type
;
578 if (ir
->type
->without_array()->is_interface()) {
579 /* If the type contains the interface, wrap the explicit type in the
580 * right number of arrays.
582 var
->type
= wrap_type_in_array(explicit_ifc_type
, ir
->type
);
584 /* Otherwise, this variable is one entry in the interface */
585 UNUSED
bool found
= false;
586 for (unsigned i
= 0; i
< explicit_ifc_type
->length
; i
++) {
587 const glsl_struct_field
*field
=
588 &explicit_ifc_type
->fields
.structure
[i
];
589 if (strcmp(ir
->name
, field
->name
) != 0)
592 var
->type
= field
->type
;
593 if (field
->memory_read_only
)
594 mem_access
|= ACCESS_NON_WRITEABLE
;
595 if (field
->memory_write_only
)
596 mem_access
|= ACCESS_NON_READABLE
;
597 if (field
->memory_coherent
)
598 mem_access
|= ACCESS_COHERENT
;
599 if (field
->memory_volatile
)
600 mem_access
|= ACCESS_VOLATILE
;
601 if (field
->memory_restrict
)
602 mem_access
|= ACCESS_RESTRICT
;
611 var
->data
.interpolation
= ir
->data
.interpolation
;
612 var
->data
.location_frac
= ir
->data
.location_frac
;
614 switch (ir
->data
.depth_layout
) {
615 case ir_depth_layout_none
:
616 var
->data
.depth_layout
= nir_depth_layout_none
;
618 case ir_depth_layout_any
:
619 var
->data
.depth_layout
= nir_depth_layout_any
;
621 case ir_depth_layout_greater
:
622 var
->data
.depth_layout
= nir_depth_layout_greater
;
624 case ir_depth_layout_less
:
625 var
->data
.depth_layout
= nir_depth_layout_less
;
627 case ir_depth_layout_unchanged
:
628 var
->data
.depth_layout
= nir_depth_layout_unchanged
;
631 unreachable("not reached");
634 var
->data
.index
= ir
->data
.index
;
635 var
->data
.descriptor_set
= 0;
636 var
->data
.binding
= ir
->data
.binding
;
637 var
->data
.explicit_binding
= ir
->data
.explicit_binding
;
638 var
->data
.bindless
= ir
->data
.bindless
;
639 var
->data
.offset
= ir
->data
.offset
;
640 var
->data
.access
= (gl_access_qualifier
)mem_access
;
642 if (var
->type
->without_array()->is_image()) {
643 var
->data
.image
.format
= ir
->data
.image_format
;
644 } else if (var
->data
.mode
== nir_var_shader_out
) {
645 var
->data
.xfb
.buffer
= ir
->data
.xfb_buffer
;
646 var
->data
.xfb
.stride
= ir
->data
.xfb_stride
;
649 var
->data
.fb_fetch_output
= ir
->data
.fb_fetch_output
;
650 var
->data
.explicit_xfb_buffer
= ir
->data
.explicit_xfb_buffer
;
651 var
->data
.explicit_xfb_stride
= ir
->data
.explicit_xfb_stride
;
653 var
->num_state_slots
= ir
->get_num_state_slots();
654 if (var
->num_state_slots
> 0) {
655 var
->state_slots
= rzalloc_array(var
, nir_state_slot
,
656 var
->num_state_slots
);
658 ir_state_slot
*state_slots
= ir
->get_state_slots();
659 for (unsigned i
= 0; i
< var
->num_state_slots
; i
++) {
660 for (unsigned j
= 0; j
< 5; j
++)
661 var
->state_slots
[i
].tokens
[j
] = state_slots
[i
].tokens
[j
];
662 var
->state_slots
[i
].swizzle
= state_slots
[i
].swizzle
;
665 var
->state_slots
= NULL
;
668 var
->constant_initializer
= constant_copy(ir
->constant_initializer
, var
);
670 if (var
->data
.mode
== nir_var_function_temp
)
671 nir_function_impl_add_variable(impl
, var
);
673 nir_shader_add_variable(shader
, var
);
675 _mesa_hash_table_insert(var_table
, ir
, var
);
679 nir_function_visitor::visit_enter(ir_function
*ir
)
681 foreach_in_list(ir_function_signature
, sig
, &ir
->signatures
) {
682 visitor
->create_function(sig
);
684 return visit_continue_with_parent
;
688 nir_visitor::create_function(ir_function_signature
*ir
)
690 if (ir
->is_intrinsic())
693 nir_function
*func
= nir_function_create(shader
, ir
->function_name());
694 if (strcmp(ir
->function_name(), "main") == 0)
695 func
->is_entrypoint
= true;
697 func
->num_params
= ir
->parameters
.length() +
698 (ir
->return_type
!= glsl_type::void_type
);
699 func
->params
= ralloc_array(shader
, nir_parameter
, func
->num_params
);
703 if (ir
->return_type
!= glsl_type::void_type
) {
704 /* The return value is a variable deref (basically an out parameter) */
705 func
->params
[np
].num_components
= 1;
706 func
->params
[np
].bit_size
= 32;
710 foreach_in_list(ir_variable
, param
, &ir
->parameters
) {
711 /* FINISHME: pass arrays, structs, etc by reference? */
712 assert(param
->type
->is_vector() || param
->type
->is_scalar());
714 if (param
->data
.mode
== ir_var_function_in
) {
715 func
->params
[np
].num_components
= param
->type
->vector_elements
;
716 func
->params
[np
].bit_size
= glsl_get_bit_size(param
->type
);
718 func
->params
[np
].num_components
= 1;
719 func
->params
[np
].bit_size
= 32;
723 assert(np
== func
->num_params
);
725 _mesa_hash_table_insert(this->overload_table
, ir
, func
);
729 nir_visitor::visit(ir_function
*ir
)
731 foreach_in_list(ir_function_signature
, sig
, &ir
->signatures
)
736 nir_visitor::visit(ir_function_signature
*ir
)
738 if (ir
->is_intrinsic())
743 struct hash_entry
*entry
=
744 _mesa_hash_table_search(this->overload_table
, ir
);
747 nir_function
*func
= (nir_function
*) entry
->data
;
749 if (ir
->is_defined
) {
750 nir_function_impl
*impl
= nir_function_impl_create(func
);
753 this->is_global
= false;
755 nir_builder_init(&b
, impl
);
756 b
.cursor
= nir_after_cf_list(&impl
->body
);
758 unsigned i
= (ir
->return_type
!= glsl_type::void_type
) ? 1 : 0;
760 foreach_in_list(ir_variable
, param
, &ir
->parameters
) {
762 nir_local_variable_create(impl
, param
->type
, param
->name
);
764 if (param
->data
.mode
== ir_var_function_in
) {
765 nir_store_var(&b
, var
, nir_load_param(&b
, i
), ~0);
768 _mesa_hash_table_insert(var_table
, param
, var
);
772 visit_exec_list(&ir
->body
, this);
774 this->is_global
= true;
781 nir_visitor::visit(ir_loop
*ir
)
784 visit_exec_list(&ir
->body_instructions
, this);
785 nir_pop_loop(&b
, NULL
);
789 nir_visitor::visit(ir_if
*ir
)
791 nir_push_if(&b
, evaluate_rvalue(ir
->condition
));
792 visit_exec_list(&ir
->then_instructions
, this);
793 nir_push_else(&b
, NULL
);
794 visit_exec_list(&ir
->else_instructions
, this);
795 nir_pop_if(&b
, NULL
);
799 nir_visitor::visit(ir_discard
*ir
)
802 * discards aren't treated as control flow, because before we lower them
803 * they can appear anywhere in the shader and the stuff after them may still
804 * be executed (yay, crazy GLSL rules!). However, after lowering, all the
805 * discards will be immediately followed by a return.
808 nir_intrinsic_instr
*discard
;
810 discard
= nir_intrinsic_instr_create(this->shader
,
811 nir_intrinsic_discard_if
);
813 nir_src_for_ssa(evaluate_rvalue(ir
->condition
));
815 discard
= nir_intrinsic_instr_create(this->shader
, nir_intrinsic_discard
);
818 nir_builder_instr_insert(&b
, &discard
->instr
);
822 nir_visitor::visit(ir_demote
*ir
)
824 nir_intrinsic_instr
*demote
=
825 nir_intrinsic_instr_create(this->shader
, nir_intrinsic_demote
);
827 nir_builder_instr_insert(&b
, &demote
->instr
);
831 nir_visitor::visit(ir_emit_vertex
*ir
)
833 nir_intrinsic_instr
*instr
=
834 nir_intrinsic_instr_create(this->shader
, nir_intrinsic_emit_vertex
);
835 nir_intrinsic_set_stream_id(instr
, ir
->stream_id());
836 nir_builder_instr_insert(&b
, &instr
->instr
);
840 nir_visitor::visit(ir_end_primitive
*ir
)
842 nir_intrinsic_instr
*instr
=
843 nir_intrinsic_instr_create(this->shader
, nir_intrinsic_end_primitive
);
844 nir_intrinsic_set_stream_id(instr
, ir
->stream_id());
845 nir_builder_instr_insert(&b
, &instr
->instr
);
849 nir_visitor::visit(ir_loop_jump
*ir
)
853 case ir_loop_jump::jump_break
:
854 type
= nir_jump_break
;
856 case ir_loop_jump::jump_continue
:
857 type
= nir_jump_continue
;
860 unreachable("not reached");
863 nir_jump_instr
*instr
= nir_jump_instr_create(this->shader
, type
);
864 nir_builder_instr_insert(&b
, &instr
->instr
);
868 nir_visitor::visit(ir_return
*ir
)
870 if (ir
->value
!= NULL
) {
871 nir_deref_instr
*ret_deref
=
872 nir_build_deref_cast(&b
, nir_load_param(&b
, 0),
873 nir_var_function_temp
, ir
->value
->type
, 0);
875 nir_ssa_def
*val
= evaluate_rvalue(ir
->value
);
876 nir_store_deref(&b
, ret_deref
, val
, ~0);
879 nir_jump_instr
*instr
= nir_jump_instr_create(this->shader
, nir_jump_return
);
880 nir_builder_instr_insert(&b
, &instr
->instr
);
884 intrinsic_set_std430_align(nir_intrinsic_instr
*intrin
, const glsl_type
*type
)
886 unsigned bit_size
= type
->is_boolean() ? 32 : glsl_get_bit_size(type
);
887 unsigned pow2_components
= util_next_power_of_two(type
->vector_elements
);
888 nir_intrinsic_set_align(intrin
, (bit_size
/ 8) * pow2_components
, 0);
891 /* Accumulate any qualifiers along the deref chain to get the actual
892 * load/store qualifier.
895 static enum gl_access_qualifier
896 deref_get_qualifier(nir_deref_instr
*deref
)
899 nir_deref_path_init(&path
, deref
, NULL
);
901 unsigned qualifiers
= path
.path
[0]->var
->data
.access
;
903 const glsl_type
*parent_type
= path
.path
[0]->type
;
904 for (nir_deref_instr
**cur_ptr
= &path
.path
[1]; *cur_ptr
; cur_ptr
++) {
905 nir_deref_instr
*cur
= *cur_ptr
;
907 if (parent_type
->is_interface()) {
908 const struct glsl_struct_field
*field
=
909 &parent_type
->fields
.structure
[cur
->strct
.index
];
910 if (field
->memory_read_only
)
911 qualifiers
|= ACCESS_NON_WRITEABLE
;
912 if (field
->memory_write_only
)
913 qualifiers
|= ACCESS_NON_READABLE
;
914 if (field
->memory_coherent
)
915 qualifiers
|= ACCESS_COHERENT
;
916 if (field
->memory_volatile
)
917 qualifiers
|= ACCESS_VOLATILE
;
918 if (field
->memory_restrict
)
919 qualifiers
|= ACCESS_RESTRICT
;
922 parent_type
= cur
->type
;
925 nir_deref_path_finish(&path
);
927 return (gl_access_qualifier
) qualifiers
;
931 nir_visitor::visit(ir_call
*ir
)
933 if (ir
->callee
->is_intrinsic()) {
936 switch (ir
->callee
->intrinsic_id
) {
937 case ir_intrinsic_generic_atomic_add
:
938 op
= ir
->return_deref
->type
->is_integer_32_64()
939 ? nir_intrinsic_deref_atomic_add
: nir_intrinsic_deref_atomic_fadd
;
941 case ir_intrinsic_generic_atomic_and
:
942 op
= nir_intrinsic_deref_atomic_and
;
944 case ir_intrinsic_generic_atomic_or
:
945 op
= nir_intrinsic_deref_atomic_or
;
947 case ir_intrinsic_generic_atomic_xor
:
948 op
= nir_intrinsic_deref_atomic_xor
;
950 case ir_intrinsic_generic_atomic_min
:
951 assert(ir
->return_deref
);
952 if (ir
->return_deref
->type
== glsl_type::int_type
)
953 op
= nir_intrinsic_deref_atomic_imin
;
954 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
955 op
= nir_intrinsic_deref_atomic_umin
;
956 else if (ir
->return_deref
->type
== glsl_type::float_type
)
957 op
= nir_intrinsic_deref_atomic_fmin
;
959 unreachable("Invalid type");
961 case ir_intrinsic_generic_atomic_max
:
962 assert(ir
->return_deref
);
963 if (ir
->return_deref
->type
== glsl_type::int_type
)
964 op
= nir_intrinsic_deref_atomic_imax
;
965 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
966 op
= nir_intrinsic_deref_atomic_umax
;
967 else if (ir
->return_deref
->type
== glsl_type::float_type
)
968 op
= nir_intrinsic_deref_atomic_fmax
;
970 unreachable("Invalid type");
972 case ir_intrinsic_generic_atomic_exchange
:
973 op
= nir_intrinsic_deref_atomic_exchange
;
975 case ir_intrinsic_generic_atomic_comp_swap
:
976 op
= ir
->return_deref
->type
->is_integer_32_64()
977 ? nir_intrinsic_deref_atomic_comp_swap
978 : nir_intrinsic_deref_atomic_fcomp_swap
;
980 case ir_intrinsic_atomic_counter_read
:
981 op
= nir_intrinsic_atomic_counter_read_deref
;
983 case ir_intrinsic_atomic_counter_increment
:
984 op
= nir_intrinsic_atomic_counter_inc_deref
;
986 case ir_intrinsic_atomic_counter_predecrement
:
987 op
= nir_intrinsic_atomic_counter_pre_dec_deref
;
989 case ir_intrinsic_atomic_counter_add
:
990 op
= nir_intrinsic_atomic_counter_add_deref
;
992 case ir_intrinsic_atomic_counter_and
:
993 op
= nir_intrinsic_atomic_counter_and_deref
;
995 case ir_intrinsic_atomic_counter_or
:
996 op
= nir_intrinsic_atomic_counter_or_deref
;
998 case ir_intrinsic_atomic_counter_xor
:
999 op
= nir_intrinsic_atomic_counter_xor_deref
;
1001 case ir_intrinsic_atomic_counter_min
:
1002 op
= nir_intrinsic_atomic_counter_min_deref
;
1004 case ir_intrinsic_atomic_counter_max
:
1005 op
= nir_intrinsic_atomic_counter_max_deref
;
1007 case ir_intrinsic_atomic_counter_exchange
:
1008 op
= nir_intrinsic_atomic_counter_exchange_deref
;
1010 case ir_intrinsic_atomic_counter_comp_swap
:
1011 op
= nir_intrinsic_atomic_counter_comp_swap_deref
;
1013 case ir_intrinsic_image_load
:
1014 op
= nir_intrinsic_image_deref_load
;
1016 case ir_intrinsic_image_store
:
1017 op
= nir_intrinsic_image_deref_store
;
1019 case ir_intrinsic_image_atomic_add
:
1020 op
= ir
->return_deref
->type
->is_integer_32_64()
1021 ? nir_intrinsic_image_deref_atomic_add
1022 : nir_intrinsic_image_deref_atomic_fadd
;
1024 case ir_intrinsic_image_atomic_min
:
1025 if (ir
->return_deref
->type
== glsl_type::int_type
)
1026 op
= nir_intrinsic_image_deref_atomic_imin
;
1027 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
1028 op
= nir_intrinsic_image_deref_atomic_umin
;
1030 unreachable("Invalid type");
1032 case ir_intrinsic_image_atomic_max
:
1033 if (ir
->return_deref
->type
== glsl_type::int_type
)
1034 op
= nir_intrinsic_image_deref_atomic_imax
;
1035 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
1036 op
= nir_intrinsic_image_deref_atomic_umax
;
1038 unreachable("Invalid type");
1040 case ir_intrinsic_image_atomic_and
:
1041 op
= nir_intrinsic_image_deref_atomic_and
;
1043 case ir_intrinsic_image_atomic_or
:
1044 op
= nir_intrinsic_image_deref_atomic_or
;
1046 case ir_intrinsic_image_atomic_xor
:
1047 op
= nir_intrinsic_image_deref_atomic_xor
;
1049 case ir_intrinsic_image_atomic_exchange
:
1050 op
= nir_intrinsic_image_deref_atomic_exchange
;
1052 case ir_intrinsic_image_atomic_comp_swap
:
1053 op
= nir_intrinsic_image_deref_atomic_comp_swap
;
1055 case ir_intrinsic_image_atomic_inc_wrap
:
1056 op
= nir_intrinsic_image_deref_atomic_inc_wrap
;
1058 case ir_intrinsic_image_atomic_dec_wrap
:
1059 op
= nir_intrinsic_image_deref_atomic_dec_wrap
;
1061 case ir_intrinsic_memory_barrier
:
1062 op
= nir_intrinsic_memory_barrier
;
1064 case ir_intrinsic_image_size
:
1065 op
= nir_intrinsic_image_deref_size
;
1067 case ir_intrinsic_image_samples
:
1068 op
= nir_intrinsic_image_deref_samples
;
1070 case ir_intrinsic_ssbo_store
:
1071 case ir_intrinsic_ssbo_load
:
1072 case ir_intrinsic_ssbo_atomic_add
:
1073 case ir_intrinsic_ssbo_atomic_and
:
1074 case ir_intrinsic_ssbo_atomic_or
:
1075 case ir_intrinsic_ssbo_atomic_xor
:
1076 case ir_intrinsic_ssbo_atomic_min
:
1077 case ir_intrinsic_ssbo_atomic_max
:
1078 case ir_intrinsic_ssbo_atomic_exchange
:
1079 case ir_intrinsic_ssbo_atomic_comp_swap
:
1080 /* SSBO store/loads should only have been lowered in GLSL IR for
1081 * non-nir drivers, NIR drivers make use of gl_nir_lower_buffers()
1084 unreachable("Invalid operation nir doesn't want lowered ssbo "
1086 case ir_intrinsic_shader_clock
:
1087 op
= nir_intrinsic_shader_clock
;
1089 case ir_intrinsic_begin_invocation_interlock
:
1090 op
= nir_intrinsic_begin_invocation_interlock
;
1092 case ir_intrinsic_end_invocation_interlock
:
1093 op
= nir_intrinsic_end_invocation_interlock
;
1095 case ir_intrinsic_group_memory_barrier
:
1096 op
= nir_intrinsic_group_memory_barrier
;
1098 case ir_intrinsic_memory_barrier_atomic_counter
:
1099 op
= nir_intrinsic_memory_barrier_atomic_counter
;
1101 case ir_intrinsic_memory_barrier_buffer
:
1102 op
= nir_intrinsic_memory_barrier_buffer
;
1104 case ir_intrinsic_memory_barrier_image
:
1105 op
= nir_intrinsic_memory_barrier_image
;
1107 case ir_intrinsic_memory_barrier_shared
:
1108 op
= nir_intrinsic_memory_barrier_shared
;
1110 case ir_intrinsic_shared_load
:
1111 op
= nir_intrinsic_load_shared
;
1113 case ir_intrinsic_shared_store
:
1114 op
= nir_intrinsic_store_shared
;
1116 case ir_intrinsic_shared_atomic_add
:
1117 op
= ir
->return_deref
->type
->is_integer_32_64()
1118 ? nir_intrinsic_shared_atomic_add
1119 : nir_intrinsic_shared_atomic_fadd
;
1121 case ir_intrinsic_shared_atomic_and
:
1122 op
= nir_intrinsic_shared_atomic_and
;
1124 case ir_intrinsic_shared_atomic_or
:
1125 op
= nir_intrinsic_shared_atomic_or
;
1127 case ir_intrinsic_shared_atomic_xor
:
1128 op
= nir_intrinsic_shared_atomic_xor
;
1130 case ir_intrinsic_shared_atomic_min
:
1131 assert(ir
->return_deref
);
1132 if (ir
->return_deref
->type
== glsl_type::int_type
)
1133 op
= nir_intrinsic_shared_atomic_imin
;
1134 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
1135 op
= nir_intrinsic_shared_atomic_umin
;
1136 else if (ir
->return_deref
->type
== glsl_type::float_type
)
1137 op
= nir_intrinsic_shared_atomic_fmin
;
1139 unreachable("Invalid type");
1141 case ir_intrinsic_shared_atomic_max
:
1142 assert(ir
->return_deref
);
1143 if (ir
->return_deref
->type
== glsl_type::int_type
)
1144 op
= nir_intrinsic_shared_atomic_imax
;
1145 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
1146 op
= nir_intrinsic_shared_atomic_umax
;
1147 else if (ir
->return_deref
->type
== glsl_type::float_type
)
1148 op
= nir_intrinsic_shared_atomic_fmax
;
1150 unreachable("Invalid type");
1152 case ir_intrinsic_shared_atomic_exchange
:
1153 op
= nir_intrinsic_shared_atomic_exchange
;
1155 case ir_intrinsic_shared_atomic_comp_swap
:
1156 op
= ir
->return_deref
->type
->is_integer_32_64()
1157 ? nir_intrinsic_shared_atomic_comp_swap
1158 : nir_intrinsic_shared_atomic_fcomp_swap
;
1160 case ir_intrinsic_vote_any
:
1161 op
= nir_intrinsic_vote_any
;
1163 case ir_intrinsic_vote_all
:
1164 op
= nir_intrinsic_vote_all
;
1166 case ir_intrinsic_vote_eq
:
1167 op
= nir_intrinsic_vote_ieq
;
1169 case ir_intrinsic_ballot
:
1170 op
= nir_intrinsic_ballot
;
1172 case ir_intrinsic_read_invocation
:
1173 op
= nir_intrinsic_read_invocation
;
1175 case ir_intrinsic_read_first_invocation
:
1176 op
= nir_intrinsic_read_first_invocation
;
1178 case ir_intrinsic_helper_invocation
:
1179 op
= nir_intrinsic_is_helper_invocation
;
1182 unreachable("not reached");
1185 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(shader
, op
);
1186 nir_ssa_def
*ret
= &instr
->dest
.ssa
;
1189 case nir_intrinsic_deref_atomic_add
:
1190 case nir_intrinsic_deref_atomic_imin
:
1191 case nir_intrinsic_deref_atomic_umin
:
1192 case nir_intrinsic_deref_atomic_imax
:
1193 case nir_intrinsic_deref_atomic_umax
:
1194 case nir_intrinsic_deref_atomic_and
:
1195 case nir_intrinsic_deref_atomic_or
:
1196 case nir_intrinsic_deref_atomic_xor
:
1197 case nir_intrinsic_deref_atomic_exchange
:
1198 case nir_intrinsic_deref_atomic_comp_swap
:
1199 case nir_intrinsic_deref_atomic_fadd
:
1200 case nir_intrinsic_deref_atomic_fmin
:
1201 case nir_intrinsic_deref_atomic_fmax
:
1202 case nir_intrinsic_deref_atomic_fcomp_swap
: {
1203 int param_count
= ir
->actual_parameters
.length();
1204 assert(param_count
== 2 || param_count
== 3);
1207 exec_node
*param
= ir
->actual_parameters
.get_head();
1208 ir_rvalue
*rvalue
= (ir_rvalue
*) param
;
1209 ir_dereference
*deref
= rvalue
->as_dereference();
1210 ir_swizzle
*swizzle
= NULL
;
1212 /* We may have a swizzle to pick off a single vec4 component */
1213 swizzle
= rvalue
->as_swizzle();
1214 assert(swizzle
&& swizzle
->type
->vector_elements
== 1);
1215 deref
= swizzle
->val
->as_dereference();
1218 nir_deref_instr
*nir_deref
= evaluate_deref(deref
);
1220 nir_deref
= nir_build_deref_array_imm(&b
, nir_deref
,
1223 instr
->src
[0] = nir_src_for_ssa(&nir_deref
->dest
.ssa
);
1225 nir_intrinsic_set_access(instr
, deref_get_qualifier(nir_deref
));
1227 /* data1 parameter (this is always present) */
1228 param
= param
->get_next();
1229 ir_instruction
*inst
= (ir_instruction
*) param
;
1230 instr
->src
[1] = nir_src_for_ssa(evaluate_rvalue(inst
->as_rvalue()));
1232 /* data2 parameter (only with atomic_comp_swap) */
1233 if (param_count
== 3) {
1234 assert(op
== nir_intrinsic_deref_atomic_comp_swap
||
1235 op
== nir_intrinsic_deref_atomic_fcomp_swap
);
1236 param
= param
->get_next();
1237 inst
= (ir_instruction
*) param
;
1238 instr
->src
[2] = nir_src_for_ssa(evaluate_rvalue(inst
->as_rvalue()));
1242 assert(ir
->return_deref
);
1243 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1244 ir
->return_deref
->type
->vector_elements
, 32, NULL
);
1245 nir_builder_instr_insert(&b
, &instr
->instr
);
1248 case nir_intrinsic_atomic_counter_read_deref
:
1249 case nir_intrinsic_atomic_counter_inc_deref
:
1250 case nir_intrinsic_atomic_counter_pre_dec_deref
:
1251 case nir_intrinsic_atomic_counter_add_deref
:
1252 case nir_intrinsic_atomic_counter_min_deref
:
1253 case nir_intrinsic_atomic_counter_max_deref
:
1254 case nir_intrinsic_atomic_counter_and_deref
:
1255 case nir_intrinsic_atomic_counter_or_deref
:
1256 case nir_intrinsic_atomic_counter_xor_deref
:
1257 case nir_intrinsic_atomic_counter_exchange_deref
:
1258 case nir_intrinsic_atomic_counter_comp_swap_deref
: {
1259 /* Set the counter variable dereference. */
1260 exec_node
*param
= ir
->actual_parameters
.get_head();
1261 ir_dereference
*counter
= (ir_dereference
*)param
;
1263 instr
->src
[0] = nir_src_for_ssa(&evaluate_deref(counter
)->dest
.ssa
);
1264 param
= param
->get_next();
1266 /* Set the intrinsic destination. */
1267 if (ir
->return_deref
) {
1268 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
1271 /* Set the intrinsic parameters. */
1272 if (!param
->is_tail_sentinel()) {
1274 nir_src_for_ssa(evaluate_rvalue((ir_dereference
*)param
));
1275 param
= param
->get_next();
1278 if (!param
->is_tail_sentinel()) {
1280 nir_src_for_ssa(evaluate_rvalue((ir_dereference
*)param
));
1281 param
= param
->get_next();
1284 nir_builder_instr_insert(&b
, &instr
->instr
);
1287 case nir_intrinsic_image_deref_load
:
1288 case nir_intrinsic_image_deref_store
:
1289 case nir_intrinsic_image_deref_atomic_add
:
1290 case nir_intrinsic_image_deref_atomic_imin
:
1291 case nir_intrinsic_image_deref_atomic_umin
:
1292 case nir_intrinsic_image_deref_atomic_imax
:
1293 case nir_intrinsic_image_deref_atomic_umax
:
1294 case nir_intrinsic_image_deref_atomic_and
:
1295 case nir_intrinsic_image_deref_atomic_or
:
1296 case nir_intrinsic_image_deref_atomic_xor
:
1297 case nir_intrinsic_image_deref_atomic_exchange
:
1298 case nir_intrinsic_image_deref_atomic_comp_swap
:
1299 case nir_intrinsic_image_deref_atomic_fadd
:
1300 case nir_intrinsic_image_deref_samples
:
1301 case nir_intrinsic_image_deref_size
:
1302 case nir_intrinsic_image_deref_atomic_inc_wrap
:
1303 case nir_intrinsic_image_deref_atomic_dec_wrap
: {
1304 nir_ssa_undef_instr
*instr_undef
=
1305 nir_ssa_undef_instr_create(shader
, 1, 32);
1306 nir_builder_instr_insert(&b
, &instr_undef
->instr
);
1308 /* Set the image variable dereference. */
1309 exec_node
*param
= ir
->actual_parameters
.get_head();
1310 ir_dereference
*image
= (ir_dereference
*)param
;
1311 nir_deref_instr
*deref
= evaluate_deref(image
);
1312 const glsl_type
*type
= deref
->type
;
1314 nir_intrinsic_set_access(instr
, deref_get_qualifier(deref
));
1316 instr
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
1317 param
= param
->get_next();
1319 /* Set the intrinsic destination. */
1320 if (ir
->return_deref
) {
1321 unsigned num_components
= ir
->return_deref
->type
->vector_elements
;
1322 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1323 num_components
, 32, NULL
);
1326 if (op
== nir_intrinsic_image_deref_size
) {
1327 instr
->num_components
= instr
->dest
.ssa
.num_components
;
1328 } else if (op
== nir_intrinsic_image_deref_load
||
1329 op
== nir_intrinsic_image_deref_store
) {
1330 instr
->num_components
= 4;
1333 if (op
== nir_intrinsic_image_deref_size
||
1334 op
== nir_intrinsic_image_deref_samples
) {
1335 nir_builder_instr_insert(&b
, &instr
->instr
);
1339 /* Set the address argument, extending the coordinate vector to four
1342 nir_ssa_def
*src_addr
=
1343 evaluate_rvalue((ir_dereference
*)param
);
1344 nir_ssa_def
*srcs
[4];
1346 for (int i
= 0; i
< 4; i
++) {
1347 if (i
< type
->coordinate_components())
1348 srcs
[i
] = nir_channel(&b
, src_addr
, i
);
1350 srcs
[i
] = &instr_undef
->def
;
1353 instr
->src
[1] = nir_src_for_ssa(nir_vec(&b
, srcs
, 4));
1354 param
= param
->get_next();
1356 /* Set the sample argument, which is undefined for single-sample
1359 if (type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_MS
) {
1361 nir_src_for_ssa(evaluate_rvalue((ir_dereference
*)param
));
1362 param
= param
->get_next();
1364 instr
->src
[2] = nir_src_for_ssa(&instr_undef
->def
);
1367 /* Set the intrinsic parameters. */
1368 if (!param
->is_tail_sentinel()) {
1370 nir_src_for_ssa(evaluate_rvalue((ir_dereference
*)param
));
1371 param
= param
->get_next();
1372 } else if (op
== nir_intrinsic_image_deref_load
) {
1373 instr
->src
[3] = nir_src_for_ssa(nir_imm_int(&b
, 0)); /* LOD */
1376 if (!param
->is_tail_sentinel()) {
1378 nir_src_for_ssa(evaluate_rvalue((ir_dereference
*)param
));
1379 param
= param
->get_next();
1380 } else if (op
== nir_intrinsic_image_deref_store
) {
1381 instr
->src
[4] = nir_src_for_ssa(nir_imm_int(&b
, 0)); /* LOD */
1384 nir_builder_instr_insert(&b
, &instr
->instr
);
1387 case nir_intrinsic_memory_barrier
:
1388 case nir_intrinsic_group_memory_barrier
:
1389 case nir_intrinsic_memory_barrier_atomic_counter
:
1390 case nir_intrinsic_memory_barrier_buffer
:
1391 case nir_intrinsic_memory_barrier_image
:
1392 case nir_intrinsic_memory_barrier_shared
:
1393 nir_builder_instr_insert(&b
, &instr
->instr
);
1395 case nir_intrinsic_shader_clock
:
1396 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 2, 32, NULL
);
1397 instr
->num_components
= 2;
1398 nir_intrinsic_set_memory_scope(instr
, NIR_SCOPE_SUBGROUP
);
1399 nir_builder_instr_insert(&b
, &instr
->instr
);
1401 case nir_intrinsic_begin_invocation_interlock
:
1402 nir_builder_instr_insert(&b
, &instr
->instr
);
1404 case nir_intrinsic_end_invocation_interlock
:
1405 nir_builder_instr_insert(&b
, &instr
->instr
);
1407 case nir_intrinsic_store_ssbo
: {
1408 exec_node
*param
= ir
->actual_parameters
.get_head();
1409 ir_rvalue
*block
= ((ir_instruction
*)param
)->as_rvalue();
1411 param
= param
->get_next();
1412 ir_rvalue
*offset
= ((ir_instruction
*)param
)->as_rvalue();
1414 param
= param
->get_next();
1415 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
1417 param
= param
->get_next();
1418 ir_constant
*write_mask
= ((ir_instruction
*)param
)->as_constant();
1421 nir_ssa_def
*nir_val
= evaluate_rvalue(val
);
1422 if (val
->type
->is_boolean())
1423 nir_val
= nir_b2i32(&b
, nir_val
);
1425 instr
->src
[0] = nir_src_for_ssa(nir_val
);
1426 instr
->src
[1] = nir_src_for_ssa(evaluate_rvalue(block
));
1427 instr
->src
[2] = nir_src_for_ssa(evaluate_rvalue(offset
));
1428 intrinsic_set_std430_align(instr
, val
->type
);
1429 nir_intrinsic_set_write_mask(instr
, write_mask
->value
.u
[0]);
1430 instr
->num_components
= val
->type
->vector_elements
;
1432 nir_builder_instr_insert(&b
, &instr
->instr
);
1435 case nir_intrinsic_load_shared
: {
1436 exec_node
*param
= ir
->actual_parameters
.get_head();
1437 ir_rvalue
*offset
= ((ir_instruction
*)param
)->as_rvalue();
1439 nir_intrinsic_set_base(instr
, 0);
1440 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(offset
));
1442 const glsl_type
*type
= ir
->return_deref
->var
->type
;
1443 instr
->num_components
= type
->vector_elements
;
1444 intrinsic_set_std430_align(instr
, type
);
1446 /* Setup destination register */
1447 unsigned bit_size
= type
->is_boolean() ? 32 : glsl_get_bit_size(type
);
1448 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1449 type
->vector_elements
, bit_size
, NULL
);
1451 nir_builder_instr_insert(&b
, &instr
->instr
);
1453 /* The value in shared memory is a 32-bit value */
1454 if (type
->is_boolean())
1455 ret
= nir_b2b1(&b
, &instr
->dest
.ssa
);
1458 case nir_intrinsic_store_shared
: {
1459 exec_node
*param
= ir
->actual_parameters
.get_head();
1460 ir_rvalue
*offset
= ((ir_instruction
*)param
)->as_rvalue();
1462 param
= param
->get_next();
1463 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
1465 param
= param
->get_next();
1466 ir_constant
*write_mask
= ((ir_instruction
*)param
)->as_constant();
1469 nir_intrinsic_set_base(instr
, 0);
1470 instr
->src
[1] = nir_src_for_ssa(evaluate_rvalue(offset
));
1472 nir_intrinsic_set_write_mask(instr
, write_mask
->value
.u
[0]);
1474 nir_ssa_def
*nir_val
= evaluate_rvalue(val
);
1475 /* The value in shared memory is a 32-bit value */
1476 if (val
->type
->is_boolean())
1477 nir_val
= nir_b2b32(&b
, nir_val
);
1479 instr
->src
[0] = nir_src_for_ssa(nir_val
);
1480 instr
->num_components
= val
->type
->vector_elements
;
1481 intrinsic_set_std430_align(instr
, val
->type
);
1483 nir_builder_instr_insert(&b
, &instr
->instr
);
1486 case nir_intrinsic_shared_atomic_add
:
1487 case nir_intrinsic_shared_atomic_imin
:
1488 case nir_intrinsic_shared_atomic_umin
:
1489 case nir_intrinsic_shared_atomic_imax
:
1490 case nir_intrinsic_shared_atomic_umax
:
1491 case nir_intrinsic_shared_atomic_and
:
1492 case nir_intrinsic_shared_atomic_or
:
1493 case nir_intrinsic_shared_atomic_xor
:
1494 case nir_intrinsic_shared_atomic_exchange
:
1495 case nir_intrinsic_shared_atomic_comp_swap
:
1496 case nir_intrinsic_shared_atomic_fadd
:
1497 case nir_intrinsic_shared_atomic_fmin
:
1498 case nir_intrinsic_shared_atomic_fmax
:
1499 case nir_intrinsic_shared_atomic_fcomp_swap
: {
1500 int param_count
= ir
->actual_parameters
.length();
1501 assert(param_count
== 2 || param_count
== 3);
1504 exec_node
*param
= ir
->actual_parameters
.get_head();
1505 ir_instruction
*inst
= (ir_instruction
*) param
;
1506 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(inst
->as_rvalue()));
1508 /* data1 parameter (this is always present) */
1509 param
= param
->get_next();
1510 inst
= (ir_instruction
*) param
;
1511 instr
->src
[1] = nir_src_for_ssa(evaluate_rvalue(inst
->as_rvalue()));
1513 /* data2 parameter (only with atomic_comp_swap) */
1514 if (param_count
== 3) {
1515 assert(op
== nir_intrinsic_shared_atomic_comp_swap
||
1516 op
== nir_intrinsic_shared_atomic_fcomp_swap
);
1517 param
= param
->get_next();
1518 inst
= (ir_instruction
*) param
;
1520 nir_src_for_ssa(evaluate_rvalue(inst
->as_rvalue()));
1524 assert(ir
->return_deref
);
1525 unsigned bit_size
= glsl_get_bit_size(ir
->return_deref
->type
);
1526 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1527 ir
->return_deref
->type
->vector_elements
,
1529 nir_builder_instr_insert(&b
, &instr
->instr
);
1532 case nir_intrinsic_vote_any
:
1533 case nir_intrinsic_vote_all
:
1534 case nir_intrinsic_vote_ieq
: {
1535 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 1, NULL
);
1536 instr
->num_components
= 1;
1538 ir_rvalue
*value
= (ir_rvalue
*) ir
->actual_parameters
.get_head();
1539 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(value
));
1541 nir_builder_instr_insert(&b
, &instr
->instr
);
1545 case nir_intrinsic_ballot
: {
1546 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1547 ir
->return_deref
->type
->vector_elements
, 64, NULL
);
1548 instr
->num_components
= ir
->return_deref
->type
->vector_elements
;
1550 ir_rvalue
*value
= (ir_rvalue
*) ir
->actual_parameters
.get_head();
1551 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(value
));
1553 nir_builder_instr_insert(&b
, &instr
->instr
);
1556 case nir_intrinsic_read_invocation
: {
1557 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1558 ir
->return_deref
->type
->vector_elements
, 32, NULL
);
1559 instr
->num_components
= ir
->return_deref
->type
->vector_elements
;
1561 ir_rvalue
*value
= (ir_rvalue
*) ir
->actual_parameters
.get_head();
1562 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(value
));
1564 ir_rvalue
*invocation
= (ir_rvalue
*) ir
->actual_parameters
.get_head()->next
;
1565 instr
->src
[1] = nir_src_for_ssa(evaluate_rvalue(invocation
));
1567 nir_builder_instr_insert(&b
, &instr
->instr
);
1570 case nir_intrinsic_read_first_invocation
: {
1571 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1572 ir
->return_deref
->type
->vector_elements
, 32, NULL
);
1573 instr
->num_components
= ir
->return_deref
->type
->vector_elements
;
1575 ir_rvalue
*value
= (ir_rvalue
*) ir
->actual_parameters
.get_head();
1576 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(value
));
1578 nir_builder_instr_insert(&b
, &instr
->instr
);
1581 case nir_intrinsic_is_helper_invocation
: {
1582 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 1, NULL
);
1583 instr
->num_components
= 1;
1584 nir_builder_instr_insert(&b
, &instr
->instr
);
1588 unreachable("not reached");
1591 if (ir
->return_deref
)
1592 nir_store_deref(&b
, evaluate_deref(ir
->return_deref
), ret
, ~0);
1597 struct hash_entry
*entry
=
1598 _mesa_hash_table_search(this->overload_table
, ir
->callee
);
1600 nir_function
*callee
= (nir_function
*) entry
->data
;
1602 nir_call_instr
*call
= nir_call_instr_create(this->shader
, callee
);
1605 nir_deref_instr
*ret_deref
= NULL
;
1606 if (ir
->return_deref
) {
1607 nir_variable
*ret_tmp
=
1608 nir_local_variable_create(this->impl
, ir
->return_deref
->type
,
1610 ret_deref
= nir_build_deref_var(&b
, ret_tmp
);
1611 call
->params
[i
++] = nir_src_for_ssa(&ret_deref
->dest
.ssa
);
1614 foreach_two_lists(formal_node
, &ir
->callee
->parameters
,
1615 actual_node
, &ir
->actual_parameters
) {
1616 ir_rvalue
*param_rvalue
= (ir_rvalue
*) actual_node
;
1617 ir_variable
*sig_param
= (ir_variable
*) formal_node
;
1619 if (sig_param
->data
.mode
== ir_var_function_out
) {
1620 nir_deref_instr
*out_deref
= evaluate_deref(param_rvalue
);
1621 call
->params
[i
] = nir_src_for_ssa(&out_deref
->dest
.ssa
);
1622 } else if (sig_param
->data
.mode
== ir_var_function_in
) {
1623 nir_ssa_def
*val
= evaluate_rvalue(param_rvalue
);
1624 nir_src src
= nir_src_for_ssa(val
);
1626 nir_src_copy(&call
->params
[i
], &src
, call
);
1627 } else if (sig_param
->data
.mode
== ir_var_function_inout
) {
1628 unreachable("unimplemented: inout parameters");
1634 nir_builder_instr_insert(&b
, &call
->instr
);
1636 if (ir
->return_deref
)
1637 nir_store_deref(&b
, evaluate_deref(ir
->return_deref
), nir_load_deref(&b
, ret_deref
), ~0);
1641 nir_visitor::visit(ir_assignment
*ir
)
1643 unsigned num_components
= ir
->lhs
->type
->vector_elements
;
1645 b
.exact
= ir
->lhs
->variable_referenced()->data
.invariant
||
1646 ir
->lhs
->variable_referenced()->data
.precise
;
1648 if ((ir
->rhs
->as_dereference() || ir
->rhs
->as_constant()) &&
1649 (ir
->write_mask
== (1 << num_components
) - 1 || ir
->write_mask
== 0)) {
1650 nir_deref_instr
*lhs
= evaluate_deref(ir
->lhs
);
1651 nir_deref_instr
*rhs
= evaluate_deref(ir
->rhs
);
1652 enum gl_access_qualifier lhs_qualifiers
= deref_get_qualifier(lhs
);
1653 enum gl_access_qualifier rhs_qualifiers
= deref_get_qualifier(rhs
);
1654 if (ir
->condition
) {
1655 nir_push_if(&b
, evaluate_rvalue(ir
->condition
));
1656 nir_copy_deref_with_access(&b
, lhs
, rhs
, lhs_qualifiers
,
1658 nir_pop_if(&b
, NULL
);
1660 nir_copy_deref_with_access(&b
, lhs
, rhs
, lhs_qualifiers
,
1666 assert(ir
->rhs
->type
->is_scalar() || ir
->rhs
->type
->is_vector());
1668 ir
->lhs
->accept(this);
1669 nir_deref_instr
*lhs_deref
= this->deref
;
1670 nir_ssa_def
*src
= evaluate_rvalue(ir
->rhs
);
1672 if (ir
->write_mask
!= (1 << num_components
) - 1 && ir
->write_mask
!= 0) {
1673 /* GLSL IR will give us the input to the write-masked assignment in a
1674 * single packed vector. So, for example, if the writemask is xzw, then
1675 * we have to swizzle x -> x, y -> z, and z -> w and get the y component
1679 unsigned component
= 0;
1680 for (unsigned i
= 0; i
< 4; i
++) {
1681 swiz
[i
] = ir
->write_mask
& (1 << i
) ? component
++ : 0;
1683 src
= nir_swizzle(&b
, src
, swiz
, num_components
);
1686 enum gl_access_qualifier qualifiers
= deref_get_qualifier(lhs_deref
);
1687 if (ir
->condition
) {
1688 nir_push_if(&b
, evaluate_rvalue(ir
->condition
));
1689 nir_store_deref_with_access(&b
, lhs_deref
, src
, ir
->write_mask
,
1691 nir_pop_if(&b
, NULL
);
1693 nir_store_deref_with_access(&b
, lhs_deref
, src
, ir
->write_mask
,
1699 * Given an instruction, returns a pointer to its destination or NULL if there
1700 * is no destination.
1702 * Note that this only handles instructions we generate at this level.
1705 get_instr_dest(nir_instr
*instr
)
1707 nir_alu_instr
*alu_instr
;
1708 nir_intrinsic_instr
*intrinsic_instr
;
1709 nir_tex_instr
*tex_instr
;
1711 switch (instr
->type
) {
1712 case nir_instr_type_alu
:
1713 alu_instr
= nir_instr_as_alu(instr
);
1714 return &alu_instr
->dest
.dest
;
1716 case nir_instr_type_intrinsic
:
1717 intrinsic_instr
= nir_instr_as_intrinsic(instr
);
1718 if (nir_intrinsic_infos
[intrinsic_instr
->intrinsic
].has_dest
)
1719 return &intrinsic_instr
->dest
;
1723 case nir_instr_type_tex
:
1724 tex_instr
= nir_instr_as_tex(instr
);
1725 return &tex_instr
->dest
;
1728 unreachable("not reached");
1735 nir_visitor::add_instr(nir_instr
*instr
, unsigned num_components
,
1738 nir_dest
*dest
= get_instr_dest(instr
);
1741 nir_ssa_dest_init(instr
, dest
, num_components
, bit_size
, NULL
);
1743 nir_builder_instr_insert(&b
, instr
);
1746 assert(dest
->is_ssa
);
1747 this->result
= &dest
->ssa
;
1752 nir_visitor::evaluate_rvalue(ir_rvalue
* ir
)
1755 if (ir
->as_dereference() || ir
->as_constant()) {
1757 * A dereference is being used on the right hand side, which means we
1758 * must emit a variable load.
1761 enum gl_access_qualifier access
= deref_get_qualifier(this->deref
);
1762 this->result
= nir_load_deref_with_access(&b
, this->deref
, access
);
1765 return this->result
;
1769 type_is_float(glsl_base_type type
)
1771 return type
== GLSL_TYPE_FLOAT
|| type
== GLSL_TYPE_DOUBLE
||
1772 type
== GLSL_TYPE_FLOAT16
;
1776 type_is_signed(glsl_base_type type
)
1778 return type
== GLSL_TYPE_INT
|| type
== GLSL_TYPE_INT64
||
1779 type
== GLSL_TYPE_INT16
;
1783 nir_visitor::visit(ir_expression
*ir
)
1785 /* Some special cases */
1786 switch (ir
->operation
) {
1787 case ir_unop_interpolate_at_centroid
:
1788 case ir_binop_interpolate_at_offset
:
1789 case ir_binop_interpolate_at_sample
: {
1790 ir_dereference
*deref
= ir
->operands
[0]->as_dereference();
1791 ir_swizzle
*swizzle
= NULL
;
1793 /* the api does not allow a swizzle here, but the varying packing code
1794 * may have pushed one into here.
1796 swizzle
= ir
->operands
[0]->as_swizzle();
1798 deref
= swizzle
->val
->as_dereference();
1802 deref
->accept(this);
1804 nir_intrinsic_op op
;
1805 if (this->deref
->mode
== nir_var_shader_in
) {
1806 switch (ir
->operation
) {
1807 case ir_unop_interpolate_at_centroid
:
1808 op
= nir_intrinsic_interp_deref_at_centroid
;
1810 case ir_binop_interpolate_at_offset
:
1811 op
= nir_intrinsic_interp_deref_at_offset
;
1813 case ir_binop_interpolate_at_sample
:
1814 op
= nir_intrinsic_interp_deref_at_sample
;
1817 unreachable("Invalid interpolation intrinsic");
1820 /* This case can happen if the vertex shader does not write the
1821 * given varying. In this case, the linker will lower it to a
1822 * global variable. Since interpolating a variable makes no
1823 * sense, we'll just turn it into a load which will probably
1824 * eventually end up as an SSA definition.
1826 assert(this->deref
->mode
== nir_var_shader_temp
);
1827 op
= nir_intrinsic_load_deref
;
1830 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(shader
, op
);
1831 intrin
->num_components
= deref
->type
->vector_elements
;
1832 intrin
->src
[0] = nir_src_for_ssa(&this->deref
->dest
.ssa
);
1834 if (intrin
->intrinsic
== nir_intrinsic_interp_deref_at_offset
||
1835 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_sample
)
1836 intrin
->src
[1] = nir_src_for_ssa(evaluate_rvalue(ir
->operands
[1]));
1838 unsigned bit_size
= glsl_get_bit_size(deref
->type
);
1839 add_instr(&intrin
->instr
, deref
->type
->vector_elements
, bit_size
);
1842 unsigned swiz
[4] = {
1843 swizzle
->mask
.x
, swizzle
->mask
.y
, swizzle
->mask
.z
, swizzle
->mask
.w
1846 result
= nir_swizzle(&b
, result
, swiz
,
1847 swizzle
->type
->vector_elements
);
1853 case ir_unop_ssbo_unsized_array_length
: {
1854 nir_intrinsic_instr
*intrin
=
1855 nir_intrinsic_instr_create(b
.shader
,
1856 nir_intrinsic_deref_buffer_array_length
);
1858 ir_dereference
*deref
= ir
->operands
[0]->as_dereference();
1859 intrin
->src
[0] = nir_src_for_ssa(&evaluate_deref(deref
)->dest
.ssa
);
1861 add_instr(&intrin
->instr
, 1, 32);
1865 case ir_binop_ubo_load
:
1866 /* UBO loads should only have been lowered in GLSL IR for non-nir drivers,
1867 * NIR drivers make use of gl_nir_lower_buffers() instead.
1869 unreachable("Invalid operation nir doesn't want lowered ubo loads");
1874 nir_ssa_def
*srcs
[4];
1875 for (unsigned i
= 0; i
< ir
->num_operands
; i
++)
1876 srcs
[i
] = evaluate_rvalue(ir
->operands
[i
]);
1878 glsl_base_type types
[4];
1879 for (unsigned i
= 0; i
< ir
->num_operands
; i
++)
1880 types
[i
] = ir
->operands
[i
]->type
->base_type
;
1882 glsl_base_type out_type
= ir
->type
->base_type
;
1884 switch (ir
->operation
) {
1885 case ir_unop_bit_not
: result
= nir_inot(&b
, srcs
[0]); break;
1886 case ir_unop_logic_not
:
1887 result
= nir_inot(&b
, srcs
[0]);
1890 result
= type_is_float(types
[0]) ? nir_fneg(&b
, srcs
[0])
1891 : nir_ineg(&b
, srcs
[0]);
1894 result
= type_is_float(types
[0]) ? nir_fabs(&b
, srcs
[0])
1895 : nir_iabs(&b
, srcs
[0]);
1898 result
= nir_uclz(&b
, srcs
[0]);
1900 case ir_unop_saturate
:
1901 assert(type_is_float(types
[0]));
1902 result
= nir_fsat(&b
, srcs
[0]);
1905 result
= type_is_float(types
[0]) ? nir_fsign(&b
, srcs
[0])
1906 : nir_isign(&b
, srcs
[0]);
1908 case ir_unop_rcp
: result
= nir_frcp(&b
, srcs
[0]); break;
1909 case ir_unop_rsq
: result
= nir_frsq(&b
, srcs
[0]); break;
1910 case ir_unop_sqrt
: result
= nir_fsqrt(&b
, srcs
[0]); break;
1911 case ir_unop_exp
: unreachable("ir_unop_exp should have been lowered");
1912 case ir_unop_log
: unreachable("ir_unop_log should have been lowered");
1913 case ir_unop_exp2
: result
= nir_fexp2(&b
, srcs
[0]); break;
1914 case ir_unop_log2
: result
= nir_flog2(&b
, srcs
[0]); break;
1956 case ir_unop_i642u64
:
1957 case ir_unop_u642i64
: {
1958 nir_alu_type src_type
= nir_get_nir_type_for_glsl_base_type(types
[0]);
1959 nir_alu_type dst_type
= nir_get_nir_type_for_glsl_base_type(out_type
);
1960 result
= nir_build_alu(&b
, nir_type_conversion_op(src_type
, dst_type
,
1961 nir_rounding_mode_undef
),
1962 srcs
[0], NULL
, NULL
, NULL
);
1963 /* b2i and b2f don't have fixed bit-size versions so the builder will
1964 * just assume 32 and we have to fix it up here.
1966 result
->bit_size
= nir_alu_type_get_type_size(dst_type
);
1970 case ir_unop_f2fmp
: {
1971 result
= nir_build_alu(&b
, nir_op_f2fmp
, srcs
[0], NULL
, NULL
, NULL
);
1975 case ir_unop_i2imp
: {
1976 result
= nir_build_alu(&b
, nir_op_i2imp
, srcs
[0], NULL
, NULL
, NULL
);
1980 case ir_unop_u2ump
: {
1981 result
= nir_build_alu(&b
, nir_op_u2ump
, srcs
[0], NULL
, NULL
, NULL
);
1985 case ir_unop_bitcast_i2f
:
1986 case ir_unop_bitcast_f2i
:
1987 case ir_unop_bitcast_u2f
:
1988 case ir_unop_bitcast_f2u
:
1989 case ir_unop_bitcast_i642d
:
1990 case ir_unop_bitcast_d2i64
:
1991 case ir_unop_bitcast_u642d
:
1992 case ir_unop_bitcast_d2u64
:
1993 case ir_unop_subroutine_to_int
:
1995 result
= nir_mov(&b
, srcs
[0]);
1997 case ir_unop_trunc
: result
= nir_ftrunc(&b
, srcs
[0]); break;
1998 case ir_unop_ceil
: result
= nir_fceil(&b
, srcs
[0]); break;
1999 case ir_unop_floor
: result
= nir_ffloor(&b
, srcs
[0]); break;
2000 case ir_unop_fract
: result
= nir_ffract(&b
, srcs
[0]); break;
2001 case ir_unop_frexp_exp
: result
= nir_frexp_exp(&b
, srcs
[0]); break;
2002 case ir_unop_frexp_sig
: result
= nir_frexp_sig(&b
, srcs
[0]); break;
2003 case ir_unop_round_even
: result
= nir_fround_even(&b
, srcs
[0]); break;
2004 case ir_unop_sin
: result
= nir_fsin(&b
, srcs
[0]); break;
2005 case ir_unop_cos
: result
= nir_fcos(&b
, srcs
[0]); break;
2006 case ir_unop_dFdx
: result
= nir_fddx(&b
, srcs
[0]); break;
2007 case ir_unop_dFdy
: result
= nir_fddy(&b
, srcs
[0]); break;
2008 case ir_unop_dFdx_fine
: result
= nir_fddx_fine(&b
, srcs
[0]); break;
2009 case ir_unop_dFdy_fine
: result
= nir_fddy_fine(&b
, srcs
[0]); break;
2010 case ir_unop_dFdx_coarse
: result
= nir_fddx_coarse(&b
, srcs
[0]); break;
2011 case ir_unop_dFdy_coarse
: result
= nir_fddy_coarse(&b
, srcs
[0]); break;
2012 case ir_unop_pack_snorm_2x16
:
2013 result
= nir_pack_snorm_2x16(&b
, srcs
[0]);
2015 case ir_unop_pack_snorm_4x8
:
2016 result
= nir_pack_snorm_4x8(&b
, srcs
[0]);
2018 case ir_unop_pack_unorm_2x16
:
2019 result
= nir_pack_unorm_2x16(&b
, srcs
[0]);
2021 case ir_unop_pack_unorm_4x8
:
2022 result
= nir_pack_unorm_4x8(&b
, srcs
[0]);
2024 case ir_unop_pack_half_2x16
:
2025 result
= nir_pack_half_2x16(&b
, srcs
[0]);
2027 case ir_unop_unpack_snorm_2x16
:
2028 result
= nir_unpack_snorm_2x16(&b
, srcs
[0]);
2030 case ir_unop_unpack_snorm_4x8
:
2031 result
= nir_unpack_snorm_4x8(&b
, srcs
[0]);
2033 case ir_unop_unpack_unorm_2x16
:
2034 result
= nir_unpack_unorm_2x16(&b
, srcs
[0]);
2036 case ir_unop_unpack_unorm_4x8
:
2037 result
= nir_unpack_unorm_4x8(&b
, srcs
[0]);
2039 case ir_unop_unpack_half_2x16
:
2040 result
= nir_unpack_half_2x16(&b
, srcs
[0]);
2042 case ir_unop_pack_sampler_2x32
:
2043 case ir_unop_pack_image_2x32
:
2044 case ir_unop_pack_double_2x32
:
2045 case ir_unop_pack_int_2x32
:
2046 case ir_unop_pack_uint_2x32
:
2047 result
= nir_pack_64_2x32(&b
, srcs
[0]);
2049 case ir_unop_unpack_sampler_2x32
:
2050 case ir_unop_unpack_image_2x32
:
2051 case ir_unop_unpack_double_2x32
:
2052 case ir_unop_unpack_int_2x32
:
2053 case ir_unop_unpack_uint_2x32
:
2054 result
= nir_unpack_64_2x32(&b
, srcs
[0]);
2056 case ir_unop_bitfield_reverse
:
2057 result
= nir_bitfield_reverse(&b
, srcs
[0]);
2059 case ir_unop_bit_count
:
2060 result
= nir_bit_count(&b
, srcs
[0]);
2062 case ir_unop_find_msb
:
2064 case GLSL_TYPE_UINT
:
2065 result
= nir_ufind_msb(&b
, srcs
[0]);
2068 result
= nir_ifind_msb(&b
, srcs
[0]);
2071 unreachable("Invalid type for findMSB()");
2074 case ir_unop_find_lsb
:
2075 result
= nir_find_lsb(&b
, srcs
[0]);
2078 case ir_unop_get_buffer_size
: {
2079 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(
2081 nir_intrinsic_get_buffer_size
);
2082 load
->num_components
= ir
->type
->vector_elements
;
2083 load
->src
[0] = nir_src_for_ssa(evaluate_rvalue(ir
->operands
[0]));
2084 unsigned bit_size
= glsl_get_bit_size(ir
->type
);
2085 add_instr(&load
->instr
, ir
->type
->vector_elements
, bit_size
);
2090 result
= nir_atan(&b
, srcs
[0]);
2094 result
= type_is_float(out_type
) ? nir_fadd(&b
, srcs
[0], srcs
[1])
2095 : nir_iadd(&b
, srcs
[0], srcs
[1]);
2097 case ir_binop_add_sat
:
2098 result
= type_is_signed(out_type
) ? nir_iadd_sat(&b
, srcs
[0], srcs
[1])
2099 : nir_uadd_sat(&b
, srcs
[0], srcs
[1]);
2102 result
= type_is_float(out_type
) ? nir_fsub(&b
, srcs
[0], srcs
[1])
2103 : nir_isub(&b
, srcs
[0], srcs
[1]);
2105 case ir_binop_sub_sat
:
2106 result
= type_is_signed(out_type
) ? nir_isub_sat(&b
, srcs
[0], srcs
[1])
2107 : nir_usub_sat(&b
, srcs
[0], srcs
[1]);
2109 case ir_binop_abs_sub
:
2110 /* out_type is always unsigned for ir_binop_abs_sub, so we have to key
2111 * on the type of the sources.
2113 result
= type_is_signed(types
[0]) ? nir_uabs_isub(&b
, srcs
[0], srcs
[1])
2114 : nir_uabs_usub(&b
, srcs
[0], srcs
[1]);
2117 result
= type_is_signed(out_type
) ? nir_ihadd(&b
, srcs
[0], srcs
[1])
2118 : nir_uhadd(&b
, srcs
[0], srcs
[1]);
2120 case ir_binop_avg_round
:
2121 result
= type_is_signed(out_type
) ? nir_irhadd(&b
, srcs
[0], srcs
[1])
2122 : nir_urhadd(&b
, srcs
[0], srcs
[1]);
2124 case ir_binop_mul_32x16
:
2125 result
= type_is_signed(out_type
) ? nir_imul_32x16(&b
, srcs
[0], srcs
[1])
2126 : nir_umul_32x16(&b
, srcs
[0], srcs
[1]);
2129 if (type_is_float(out_type
))
2130 result
= nir_fmul(&b
, srcs
[0], srcs
[1]);
2131 else if (out_type
== GLSL_TYPE_INT64
&&
2132 (ir
->operands
[0]->type
->base_type
== GLSL_TYPE_INT
||
2133 ir
->operands
[1]->type
->base_type
== GLSL_TYPE_INT
))
2134 result
= nir_imul_2x32_64(&b
, srcs
[0], srcs
[1]);
2135 else if (out_type
== GLSL_TYPE_UINT64
&&
2136 (ir
->operands
[0]->type
->base_type
== GLSL_TYPE_UINT
||
2137 ir
->operands
[1]->type
->base_type
== GLSL_TYPE_UINT
))
2138 result
= nir_umul_2x32_64(&b
, srcs
[0], srcs
[1]);
2140 result
= nir_imul(&b
, srcs
[0], srcs
[1]);
2143 if (type_is_float(out_type
))
2144 result
= nir_fdiv(&b
, srcs
[0], srcs
[1]);
2145 else if (type_is_signed(out_type
))
2146 result
= nir_idiv(&b
, srcs
[0], srcs
[1]);
2148 result
= nir_udiv(&b
, srcs
[0], srcs
[1]);
2151 result
= type_is_float(out_type
) ? nir_fmod(&b
, srcs
[0], srcs
[1])
2152 : nir_umod(&b
, srcs
[0], srcs
[1]);
2155 if (type_is_float(out_type
))
2156 result
= nir_fmin(&b
, srcs
[0], srcs
[1]);
2157 else if (type_is_signed(out_type
))
2158 result
= nir_imin(&b
, srcs
[0], srcs
[1]);
2160 result
= nir_umin(&b
, srcs
[0], srcs
[1]);
2163 if (type_is_float(out_type
))
2164 result
= nir_fmax(&b
, srcs
[0], srcs
[1]);
2165 else if (type_is_signed(out_type
))
2166 result
= nir_imax(&b
, srcs
[0], srcs
[1]);
2168 result
= nir_umax(&b
, srcs
[0], srcs
[1]);
2170 case ir_binop_pow
: result
= nir_fpow(&b
, srcs
[0], srcs
[1]); break;
2171 case ir_binop_bit_and
: result
= nir_iand(&b
, srcs
[0], srcs
[1]); break;
2172 case ir_binop_bit_or
: result
= nir_ior(&b
, srcs
[0], srcs
[1]); break;
2173 case ir_binop_bit_xor
: result
= nir_ixor(&b
, srcs
[0], srcs
[1]); break;
2174 case ir_binop_logic_and
:
2175 result
= nir_iand(&b
, srcs
[0], srcs
[1]);
2177 case ir_binop_logic_or
:
2178 result
= nir_ior(&b
, srcs
[0], srcs
[1]);
2180 case ir_binop_logic_xor
:
2181 result
= nir_ixor(&b
, srcs
[0], srcs
[1]);
2183 case ir_binop_lshift
: result
= nir_ishl(&b
, srcs
[0], srcs
[1]); break;
2184 case ir_binop_rshift
:
2185 result
= (type_is_signed(out_type
)) ? nir_ishr(&b
, srcs
[0], srcs
[1])
2186 : nir_ushr(&b
, srcs
[0], srcs
[1]);
2188 case ir_binop_imul_high
:
2189 result
= (out_type
== GLSL_TYPE_INT
) ? nir_imul_high(&b
, srcs
[0], srcs
[1])
2190 : nir_umul_high(&b
, srcs
[0], srcs
[1]);
2192 case ir_binop_carry
: result
= nir_uadd_carry(&b
, srcs
[0], srcs
[1]); break;
2193 case ir_binop_borrow
: result
= nir_usub_borrow(&b
, srcs
[0], srcs
[1]); break;
2195 if (type_is_float(types
[0]))
2196 result
= nir_flt(&b
, srcs
[0], srcs
[1]);
2197 else if (type_is_signed(types
[0]))
2198 result
= nir_ilt(&b
, srcs
[0], srcs
[1]);
2200 result
= nir_ult(&b
, srcs
[0], srcs
[1]);
2202 case ir_binop_gequal
:
2203 if (type_is_float(types
[0]))
2204 result
= nir_fge(&b
, srcs
[0], srcs
[1]);
2205 else if (type_is_signed(types
[0]))
2206 result
= nir_ige(&b
, srcs
[0], srcs
[1]);
2208 result
= nir_uge(&b
, srcs
[0], srcs
[1]);
2210 case ir_binop_equal
:
2211 if (type_is_float(types
[0]))
2212 result
= nir_feq(&b
, srcs
[0], srcs
[1]);
2214 result
= nir_ieq(&b
, srcs
[0], srcs
[1]);
2216 case ir_binop_nequal
:
2217 if (type_is_float(types
[0]))
2218 result
= nir_fne(&b
, srcs
[0], srcs
[1]);
2220 result
= nir_ine(&b
, srcs
[0], srcs
[1]);
2222 case ir_binop_all_equal
:
2223 if (type_is_float(types
[0])) {
2224 switch (ir
->operands
[0]->type
->vector_elements
) {
2225 case 1: result
= nir_feq(&b
, srcs
[0], srcs
[1]); break;
2226 case 2: result
= nir_ball_fequal2(&b
, srcs
[0], srcs
[1]); break;
2227 case 3: result
= nir_ball_fequal3(&b
, srcs
[0], srcs
[1]); break;
2228 case 4: result
= nir_ball_fequal4(&b
, srcs
[0], srcs
[1]); break;
2230 unreachable("not reached");
2233 switch (ir
->operands
[0]->type
->vector_elements
) {
2234 case 1: result
= nir_ieq(&b
, srcs
[0], srcs
[1]); break;
2235 case 2: result
= nir_ball_iequal2(&b
, srcs
[0], srcs
[1]); break;
2236 case 3: result
= nir_ball_iequal3(&b
, srcs
[0], srcs
[1]); break;
2237 case 4: result
= nir_ball_iequal4(&b
, srcs
[0], srcs
[1]); break;
2239 unreachable("not reached");
2243 case ir_binop_any_nequal
:
2244 if (type_is_float(types
[0])) {
2245 switch (ir
->operands
[0]->type
->vector_elements
) {
2246 case 1: result
= nir_fne(&b
, srcs
[0], srcs
[1]); break;
2247 case 2: result
= nir_bany_fnequal2(&b
, srcs
[0], srcs
[1]); break;
2248 case 3: result
= nir_bany_fnequal3(&b
, srcs
[0], srcs
[1]); break;
2249 case 4: result
= nir_bany_fnequal4(&b
, srcs
[0], srcs
[1]); break;
2251 unreachable("not reached");
2254 switch (ir
->operands
[0]->type
->vector_elements
) {
2255 case 1: result
= nir_ine(&b
, srcs
[0], srcs
[1]); break;
2256 case 2: result
= nir_bany_inequal2(&b
, srcs
[0], srcs
[1]); break;
2257 case 3: result
= nir_bany_inequal3(&b
, srcs
[0], srcs
[1]); break;
2258 case 4: result
= nir_bany_inequal4(&b
, srcs
[0], srcs
[1]); break;
2260 unreachable("not reached");
2265 switch (ir
->operands
[0]->type
->vector_elements
) {
2266 case 2: result
= nir_fdot2(&b
, srcs
[0], srcs
[1]); break;
2267 case 3: result
= nir_fdot3(&b
, srcs
[0], srcs
[1]); break;
2268 case 4: result
= nir_fdot4(&b
, srcs
[0], srcs
[1]); break;
2270 unreachable("not reached");
2273 case ir_binop_vector_extract
: {
2274 result
= nir_channel(&b
, srcs
[0], 0);
2275 for (unsigned i
= 1; i
< ir
->operands
[0]->type
->vector_elements
; i
++) {
2276 nir_ssa_def
*swizzled
= nir_channel(&b
, srcs
[0], i
);
2277 result
= nir_bcsel(&b
, nir_ieq(&b
, srcs
[1], nir_imm_int(&b
, i
)),
2283 case ir_binop_atan2
:
2284 result
= nir_atan2(&b
, srcs
[0], srcs
[1]);
2287 case ir_binop_ldexp
: result
= nir_ldexp(&b
, srcs
[0], srcs
[1]); break;
2289 result
= nir_ffma(&b
, srcs
[0], srcs
[1], srcs
[2]);
2292 result
= nir_flrp(&b
, srcs
[0], srcs
[1], srcs
[2]);
2295 result
= nir_bcsel(&b
, srcs
[0], srcs
[1], srcs
[2]);
2297 case ir_triop_bitfield_extract
:
2298 result
= (out_type
== GLSL_TYPE_INT
) ?
2299 nir_ibitfield_extract(&b
, srcs
[0], srcs
[1], srcs
[2]) :
2300 nir_ubitfield_extract(&b
, srcs
[0], srcs
[1], srcs
[2]);
2302 case ir_quadop_bitfield_insert
:
2303 result
= nir_bitfield_insert(&b
, srcs
[0], srcs
[1], srcs
[2], srcs
[3]);
2305 case ir_quadop_vector
:
2306 result
= nir_vec(&b
, srcs
, ir
->type
->vector_elements
);
2310 unreachable("not reached");
2315 nir_visitor::visit(ir_swizzle
*ir
)
2317 unsigned swizzle
[4] = { ir
->mask
.x
, ir
->mask
.y
, ir
->mask
.z
, ir
->mask
.w
};
2318 result
= nir_swizzle(&b
, evaluate_rvalue(ir
->val
), swizzle
,
2319 ir
->type
->vector_elements
);
2323 nir_visitor::visit(ir_texture
*ir
)
2330 num_srcs
= 1; /* coordinate */
2335 op
= (ir
->op
== ir_txb
) ? nir_texop_txb
: nir_texop_txl
;
2336 num_srcs
= 2; /* coordinate, bias/lod */
2340 op
= nir_texop_txd
; /* coordinate, dPdx, dPdy */
2346 if (ir
->lod_info
.lod
!= NULL
)
2347 num_srcs
= 2; /* coordinate, lod */
2349 num_srcs
= 1; /* coordinate */
2353 op
= nir_texop_txf_ms
;
2354 num_srcs
= 2; /* coordinate, sample_index */
2359 if (ir
->lod_info
.lod
!= NULL
)
2360 num_srcs
= 1; /* lod */
2367 num_srcs
= 1; /* coordinate */
2372 num_srcs
= 1; /* coordinate */
2375 case ir_query_levels
:
2376 op
= nir_texop_query_levels
;
2380 case ir_texture_samples
:
2381 op
= nir_texop_texture_samples
;
2385 case ir_samples_identical
:
2386 op
= nir_texop_samples_identical
;
2387 num_srcs
= 1; /* coordinate */
2391 unreachable("not reached");
2394 if (ir
->projector
!= NULL
)
2396 if (ir
->shadow_comparator
!= NULL
)
2398 /* offsets are constants we store inside nir_tex_intrs.offsets */
2399 if (ir
->offset
!= NULL
&& !ir
->offset
->type
->is_array())
2402 /* Add one for the texture deref */
2405 nir_tex_instr
*instr
= nir_tex_instr_create(this->shader
, num_srcs
);
2408 instr
->sampler_dim
=
2409 (glsl_sampler_dim
) ir
->sampler
->type
->sampler_dimensionality
;
2410 instr
->is_array
= ir
->sampler
->type
->sampler_array
;
2411 instr
->is_shadow
= ir
->sampler
->type
->sampler_shadow
;
2412 if (instr
->is_shadow
)
2413 instr
->is_new_style_shadow
= (ir
->type
->vector_elements
== 1);
2414 switch (ir
->type
->base_type
) {
2415 case GLSL_TYPE_FLOAT
:
2416 instr
->dest_type
= nir_type_float
;
2418 case GLSL_TYPE_FLOAT16
:
2419 instr
->dest_type
= nir_type_float16
;
2421 case GLSL_TYPE_INT16
:
2422 instr
->dest_type
= nir_type_int16
;
2424 case GLSL_TYPE_UINT16
:
2425 instr
->dest_type
= nir_type_uint16
;
2428 instr
->dest_type
= nir_type_int
;
2430 case GLSL_TYPE_BOOL
:
2431 case GLSL_TYPE_UINT
:
2432 instr
->dest_type
= nir_type_uint
;
2435 unreachable("not reached");
2438 nir_deref_instr
*sampler_deref
= evaluate_deref(ir
->sampler
);
2440 /* check for bindless handles */
2441 if (sampler_deref
->mode
!= nir_var_uniform
||
2442 nir_deref_instr_get_variable(sampler_deref
)->data
.bindless
) {
2443 nir_ssa_def
*load
= nir_load_deref(&b
, sampler_deref
);
2444 instr
->src
[0].src
= nir_src_for_ssa(load
);
2445 instr
->src
[0].src_type
= nir_tex_src_texture_handle
;
2446 instr
->src
[1].src
= nir_src_for_ssa(load
);
2447 instr
->src
[1].src_type
= nir_tex_src_sampler_handle
;
2449 instr
->src
[0].src
= nir_src_for_ssa(&sampler_deref
->dest
.ssa
);
2450 instr
->src
[0].src_type
= nir_tex_src_texture_deref
;
2451 instr
->src
[1].src
= nir_src_for_ssa(&sampler_deref
->dest
.ssa
);
2452 instr
->src
[1].src_type
= nir_tex_src_sampler_deref
;
2455 unsigned src_number
= 2;
2457 if (ir
->coordinate
!= NULL
) {
2458 instr
->coord_components
= ir
->coordinate
->type
->vector_elements
;
2459 instr
->src
[src_number
].src
=
2460 nir_src_for_ssa(evaluate_rvalue(ir
->coordinate
));
2461 instr
->src
[src_number
].src_type
= nir_tex_src_coord
;
2465 if (ir
->projector
!= NULL
) {
2466 instr
->src
[src_number
].src
=
2467 nir_src_for_ssa(evaluate_rvalue(ir
->projector
));
2468 instr
->src
[src_number
].src_type
= nir_tex_src_projector
;
2472 if (ir
->shadow_comparator
!= NULL
) {
2473 instr
->src
[src_number
].src
=
2474 nir_src_for_ssa(evaluate_rvalue(ir
->shadow_comparator
));
2475 instr
->src
[src_number
].src_type
= nir_tex_src_comparator
;
2479 if (ir
->offset
!= NULL
) {
2480 if (ir
->offset
->type
->is_array()) {
2481 for (int i
= 0; i
< ir
->offset
->type
->array_size(); i
++) {
2482 const ir_constant
*c
=
2483 ir
->offset
->as_constant()->get_array_element(i
);
2485 for (unsigned j
= 0; j
< 2; ++j
) {
2486 int val
= c
->get_int_component(j
);
2487 assert(val
<= 31 && val
>= -32);
2488 instr
->tg4_offsets
[i
][j
] = val
;
2492 assert(ir
->offset
->type
->is_vector() || ir
->offset
->type
->is_scalar());
2494 instr
->src
[src_number
].src
=
2495 nir_src_for_ssa(evaluate_rvalue(ir
->offset
));
2496 instr
->src
[src_number
].src_type
= nir_tex_src_offset
;
2503 instr
->src
[src_number
].src
=
2504 nir_src_for_ssa(evaluate_rvalue(ir
->lod_info
.bias
));
2505 instr
->src
[src_number
].src_type
= nir_tex_src_bias
;
2512 if (ir
->lod_info
.lod
!= NULL
) {
2513 instr
->src
[src_number
].src
=
2514 nir_src_for_ssa(evaluate_rvalue(ir
->lod_info
.lod
));
2515 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
2521 instr
->src
[src_number
].src
=
2522 nir_src_for_ssa(evaluate_rvalue(ir
->lod_info
.grad
.dPdx
));
2523 instr
->src
[src_number
].src_type
= nir_tex_src_ddx
;
2525 instr
->src
[src_number
].src
=
2526 nir_src_for_ssa(evaluate_rvalue(ir
->lod_info
.grad
.dPdy
));
2527 instr
->src
[src_number
].src_type
= nir_tex_src_ddy
;
2532 instr
->src
[src_number
].src
=
2533 nir_src_for_ssa(evaluate_rvalue(ir
->lod_info
.sample_index
));
2534 instr
->src
[src_number
].src_type
= nir_tex_src_ms_index
;
2539 instr
->component
= ir
->lod_info
.component
->as_constant()->value
.u
[0];
2546 assert(src_number
== num_srcs
);
2548 unsigned bit_size
= glsl_get_bit_size(ir
->type
);
2549 add_instr(&instr
->instr
, nir_tex_instr_dest_size(instr
), bit_size
);
2553 nir_visitor::visit(ir_constant
*ir
)
2556 * We don't know if this variable is an array or struct that gets
2557 * dereferenced, so do the safe thing an make it a variable with a
2558 * constant initializer and return a dereference.
2562 nir_local_variable_create(this->impl
, ir
->type
, "const_temp");
2563 var
->data
.read_only
= true;
2564 var
->constant_initializer
= constant_copy(ir
, var
);
2566 this->deref
= nir_build_deref_var(&b
, var
);
2570 nir_visitor::visit(ir_dereference_variable
*ir
)
2572 if (ir
->variable_referenced()->data
.mode
== ir_var_function_out
) {
2573 unsigned i
= (sig
->return_type
!= glsl_type::void_type
) ? 1 : 0;
2575 foreach_in_list(ir_variable
, param
, &sig
->parameters
) {
2576 if (param
== ir
->variable_referenced()) {
2582 this->deref
= nir_build_deref_cast(&b
, nir_load_param(&b
, i
),
2583 nir_var_function_temp
, ir
->type
, 0);
2587 assert(ir
->variable_referenced()->data
.mode
!= ir_var_function_inout
);
2589 struct hash_entry
*entry
=
2590 _mesa_hash_table_search(this->var_table
, ir
->var
);
2592 nir_variable
*var
= (nir_variable
*) entry
->data
;
2594 this->deref
= nir_build_deref_var(&b
, var
);
2598 nir_visitor::visit(ir_dereference_record
*ir
)
2600 ir
->record
->accept(this);
2602 int field_index
= ir
->field_idx
;
2603 assert(field_index
>= 0);
2605 this->deref
= nir_build_deref_struct(&b
, this->deref
, field_index
);
2609 nir_visitor::visit(ir_dereference_array
*ir
)
2611 nir_ssa_def
*index
= evaluate_rvalue(ir
->array_index
);
2613 ir
->array
->accept(this);
2615 this->deref
= nir_build_deref_array(&b
, this->deref
, index
);
2619 nir_visitor::visit(ir_barrier
*)
2621 if (shader
->info
.stage
== MESA_SHADER_COMPUTE
) {
2622 nir_intrinsic_instr
*shared_barrier
=
2623 nir_intrinsic_instr_create(this->shader
,
2624 nir_intrinsic_memory_barrier_shared
);
2625 nir_builder_instr_insert(&b
, &shared_barrier
->instr
);
2626 } else if (shader
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
2627 nir_intrinsic_instr
*patch_barrier
=
2628 nir_intrinsic_instr_create(this->shader
,
2629 nir_intrinsic_memory_barrier_tcs_patch
);
2630 nir_builder_instr_insert(&b
, &patch_barrier
->instr
);
2633 nir_intrinsic_instr
*instr
=
2634 nir_intrinsic_instr_create(this->shader
, nir_intrinsic_control_barrier
);
2635 nir_builder_instr_insert(&b
, &instr
->instr
);
2639 glsl_float64_funcs_to_nir(struct gl_context
*ctx
,
2640 const nir_shader_compiler_options
*options
)
2642 /* We pretend it's a vertex shader. Ultimately, the stage shouldn't
2643 * matter because we're not optimizing anything here.
2645 struct gl_shader
*sh
= _mesa_new_shader(-1, MESA_SHADER_VERTEX
);
2646 sh
->Source
= float64_source
;
2647 sh
->CompileStatus
= COMPILE_FAILURE
;
2648 _mesa_glsl_compile_shader(ctx
, sh
, false, false, true);
2650 if (!sh
->CompileStatus
) {
2653 "fp64 software impl compile failed:\n%s\nsource:\n%s\n",
2654 sh
->InfoLog
, float64_source
);
2659 nir_shader
*nir
= nir_shader_create(NULL
, MESA_SHADER_VERTEX
, options
, NULL
);
2661 nir_visitor
v1(ctx
, nir
);
2662 nir_function_visitor
v2(&v1
);
2664 visit_exec_list(sh
->ir
, &v1
);
2666 /* _mesa_delete_shader will try to free sh->Source but it's static const */
2668 _mesa_delete_shader(ctx
, sh
);
2670 nir_validate_shader(nir
, "float64_funcs_to_nir");
2672 NIR_PASS_V(nir
, nir_lower_variable_initializers
, nir_var_function_temp
);
2673 NIR_PASS_V(nir
, nir_lower_returns
);
2674 NIR_PASS_V(nir
, nir_inline_functions
);
2675 NIR_PASS_V(nir
, nir_opt_deref
);
2677 /* Do some optimizations to clean up the shader now. By optimizing the
2678 * functions in the library, we avoid having to re-do that work every
2679 * time we inline a copy of a function. Reducing basic blocks also helps
2680 * with compile times.
2682 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
2683 NIR_PASS_V(nir
, nir_copy_prop
);
2684 NIR_PASS_V(nir
, nir_opt_dce
);
2685 NIR_PASS_V(nir
, nir_opt_cse
);
2686 NIR_PASS_V(nir
, nir_opt_gcm
, true);
2687 NIR_PASS_V(nir
, nir_opt_peephole_select
, 1, false, false);
2688 NIR_PASS_V(nir
, nir_opt_dce
);