2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
28 #include "float64_glsl.h"
29 #include "glsl_to_nir.h"
30 #include "ir_visitor.h"
31 #include "ir_hierarchical_visitor.h"
33 #include "ir_optimization.h"
35 #include "compiler/nir/nir_control_flow.h"
36 #include "compiler/nir/nir_builder.h"
37 #include "compiler/nir/nir_builtin_builder.h"
38 #include "compiler/nir/nir_deref.h"
39 #include "main/errors.h"
40 #include "main/mtypes.h"
41 #include "main/shaderobj.h"
42 #include "util/u_math.h"
45 * pass to lower GLSL IR to NIR
47 * This will lower variable dereferences to loads/stores of corresponding
48 * variables in NIR - the variables will be converted to registers in a later
54 class nir_visitor
: public ir_visitor
57 nir_visitor(gl_context
*ctx
, nir_shader
*shader
);
60 virtual void visit(ir_variable
*);
61 virtual void visit(ir_function
*);
62 virtual void visit(ir_function_signature
*);
63 virtual void visit(ir_loop
*);
64 virtual void visit(ir_if
*);
65 virtual void visit(ir_discard
*);
66 virtual void visit(ir_demote
*);
67 virtual void visit(ir_loop_jump
*);
68 virtual void visit(ir_return
*);
69 virtual void visit(ir_call
*);
70 virtual void visit(ir_assignment
*);
71 virtual void visit(ir_emit_vertex
*);
72 virtual void visit(ir_end_primitive
*);
73 virtual void visit(ir_expression
*);
74 virtual void visit(ir_swizzle
*);
75 virtual void visit(ir_texture
*);
76 virtual void visit(ir_constant
*);
77 virtual void visit(ir_dereference_variable
*);
78 virtual void visit(ir_dereference_record
*);
79 virtual void visit(ir_dereference_array
*);
80 virtual void visit(ir_barrier
*);
82 void create_function(ir_function_signature
*ir
);
85 void add_instr(nir_instr
*instr
, unsigned num_components
, unsigned bit_size
);
86 nir_ssa_def
*evaluate_rvalue(ir_rvalue
*ir
);
88 nir_alu_instr
*emit(nir_op op
, unsigned dest_size
, nir_ssa_def
**srcs
);
89 nir_alu_instr
*emit(nir_op op
, unsigned dest_size
, nir_ssa_def
*src1
);
90 nir_alu_instr
*emit(nir_op op
, unsigned dest_size
, nir_ssa_def
*src1
,
92 nir_alu_instr
*emit(nir_op op
, unsigned dest_size
, nir_ssa_def
*src1
,
93 nir_ssa_def
*src2
, nir_ssa_def
*src3
);
98 nir_function_impl
*impl
;
100 nir_ssa_def
*result
; /* result of the expression tree last visited */
102 nir_deref_instr
*evaluate_deref(ir_instruction
*ir
);
104 nir_constant
*constant_copy(ir_constant
*ir
, void *mem_ctx
);
106 /* most recent deref instruction created */
107 nir_deref_instr
*deref
;
109 /* whether the IR we're operating on is per-function or global */
112 ir_function_signature
*sig
;
114 /* map of ir_variable -> nir_variable */
115 struct hash_table
*var_table
;
117 /* map of ir_function_signature -> nir_function_overload */
118 struct hash_table
*overload_table
;
122 * This visitor runs before the main visitor, calling create_function() for
123 * each function so that the main visitor can resolve forward references in
127 class nir_function_visitor
: public ir_hierarchical_visitor
130 nir_function_visitor(nir_visitor
*v
) : visitor(v
)
133 virtual ir_visitor_status
visit_enter(ir_function
*);
136 nir_visitor
*visitor
;
139 /* glsl_to_nir can only handle converting certain function paramaters
140 * to NIR. This visitor checks for parameters it can't currently handle.
142 class ir_function_param_visitor
: public ir_hierarchical_visitor
145 ir_function_param_visitor()
150 virtual ir_visitor_status
visit_enter(ir_function_signature
*ir
)
153 if (ir
->is_intrinsic())
154 return visit_continue
;
156 foreach_in_list(ir_variable
, param
, &ir
->parameters
) {
157 if (!param
->type
->is_vector() || !param
->type
->is_scalar()) {
162 if (param
->data
.mode
== ir_var_function_inout
) {
168 if (!glsl_type_is_vector_or_scalar(ir
->return_type
) &&
169 !ir
->return_type
->is_void()) {
174 return visit_continue
;
180 } /* end of anonymous namespace */
184 has_unsupported_function_param(exec_list
*ir
)
186 ir_function_param_visitor visitor
;
187 visit_list_elements(&visitor
, ir
);
188 return visitor
.unsupported
;
192 glsl_to_nir(struct gl_context
*ctx
,
193 const struct gl_shader_program
*shader_prog
,
194 gl_shader_stage stage
,
195 const nir_shader_compiler_options
*options
)
197 struct gl_linked_shader
*sh
= shader_prog
->_LinkedShaders
[stage
];
199 const struct gl_shader_compiler_options
*gl_options
=
200 &ctx
->Const
.ShaderCompilerOptions
[stage
];
202 /* glsl_to_nir can only handle converting certain function paramaters
203 * to NIR. If we find something we can't handle then we get the GLSL IR
204 * opts to remove it before we continue on.
206 * TODO: add missing glsl ir to nir support and remove this loop.
208 while (has_unsupported_function_param(sh
->ir
)) {
209 do_common_optimization(sh
->ir
, true, true, gl_options
,
210 ctx
->Const
.NativeIntegers
);
213 nir_shader
*shader
= nir_shader_create(NULL
, stage
, options
,
216 nir_visitor
v1(ctx
, shader
);
217 nir_function_visitor
v2(&v1
);
219 visit_exec_list(sh
->ir
, &v1
);
221 nir_validate_shader(shader
, "after glsl to nir, before function inline");
223 /* We have to lower away local constant initializers right before we
224 * inline functions. That way they get properly initialized at the top
225 * of the function and not at the top of its caller.
227 nir_lower_variable_initializers(shader
, (nir_variable_mode
)~0);
228 nir_lower_returns(shader
);
229 nir_inline_functions(shader
);
230 nir_opt_deref(shader
);
232 nir_validate_shader(shader
, "after function inlining and return lowering");
234 /* Now that we have inlined everything remove all of the functions except
237 foreach_list_typed_safe(nir_function
, function
, node
, &(shader
)->functions
){
238 if (strcmp("main", function
->name
) != 0) {
239 exec_node_remove(&function
->node
);
243 shader
->info
.name
= ralloc_asprintf(shader
, "GLSL%d", shader_prog
->Name
);
244 if (shader_prog
->Label
)
245 shader
->info
.label
= ralloc_strdup(shader
, shader_prog
->Label
);
247 /* Check for transform feedback varyings specified via the API */
248 shader
->info
.has_transform_feedback_varyings
=
249 shader_prog
->TransformFeedback
.NumVarying
> 0;
251 /* Check for transform feedback varyings specified in the Shader */
252 if (shader_prog
->last_vert_prog
)
253 shader
->info
.has_transform_feedback_varyings
|=
254 shader_prog
->last_vert_prog
->sh
.LinkedTransformFeedback
->NumVarying
> 0;
256 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
257 shader
->info
.fs
.pixel_center_integer
= sh
->Program
->info
.fs
.pixel_center_integer
;
258 shader
->info
.fs
.origin_upper_left
= sh
->Program
->info
.fs
.origin_upper_left
;
264 nir_visitor::nir_visitor(gl_context
*ctx
, nir_shader
*shader
)
266 this->supports_std430
= ctx
->Const
.UseSTD430AsDefaultPacking
;
267 this->shader
= shader
;
268 this->is_global
= true;
269 this->var_table
= _mesa_pointer_hash_table_create(NULL
);
270 this->overload_table
= _mesa_pointer_hash_table_create(NULL
);
275 memset(&this->b
, 0, sizeof(this->b
));
278 nir_visitor::~nir_visitor()
280 _mesa_hash_table_destroy(this->var_table
, NULL
);
281 _mesa_hash_table_destroy(this->overload_table
, NULL
);
285 nir_visitor::evaluate_deref(ir_instruction
*ir
)
292 nir_visitor::constant_copy(ir_constant
*ir
, void *mem_ctx
)
297 nir_constant
*ret
= rzalloc(mem_ctx
, nir_constant
);
299 const unsigned rows
= ir
->type
->vector_elements
;
300 const unsigned cols
= ir
->type
->matrix_columns
;
303 ret
->num_elements
= 0;
304 switch (ir
->type
->base_type
) {
306 /* Only float base types can be matrices. */
309 for (unsigned r
= 0; r
< rows
; r
++)
310 ret
->values
[r
].u32
= ir
->value
.u
[r
];
314 case GLSL_TYPE_UINT16
:
315 /* Only float base types can be matrices. */
318 for (unsigned r
= 0; r
< rows
; r
++)
319 ret
->values
[r
].u16
= ir
->value
.u16
[r
];
323 /* Only float base types can be matrices. */
326 for (unsigned r
= 0; r
< rows
; r
++)
327 ret
->values
[r
].i32
= ir
->value
.i
[r
];
331 case GLSL_TYPE_INT16
:
332 /* Only float base types can be matrices. */
335 for (unsigned r
= 0; r
< rows
; r
++)
336 ret
->values
[r
].i16
= ir
->value
.i16
[r
];
339 case GLSL_TYPE_FLOAT
:
340 case GLSL_TYPE_FLOAT16
:
341 case GLSL_TYPE_DOUBLE
:
343 ret
->elements
= ralloc_array(mem_ctx
, nir_constant
*, cols
);
344 ret
->num_elements
= cols
;
345 for (unsigned c
= 0; c
< cols
; c
++) {
346 nir_constant
*col_const
= rzalloc(mem_ctx
, nir_constant
);
347 col_const
->num_elements
= 0;
348 switch (ir
->type
->base_type
) {
349 case GLSL_TYPE_FLOAT
:
350 for (unsigned r
= 0; r
< rows
; r
++)
351 col_const
->values
[r
].f32
= ir
->value
.f
[c
* rows
+ r
];
354 case GLSL_TYPE_FLOAT16
:
355 for (unsigned r
= 0; r
< rows
; r
++)
356 col_const
->values
[r
].u16
= ir
->value
.f16
[c
* rows
+ r
];
359 case GLSL_TYPE_DOUBLE
:
360 for (unsigned r
= 0; r
< rows
; r
++)
361 col_const
->values
[r
].f64
= ir
->value
.d
[c
* rows
+ r
];
365 unreachable("Cannot get here from the first level switch");
367 ret
->elements
[c
] = col_const
;
370 switch (ir
->type
->base_type
) {
371 case GLSL_TYPE_FLOAT
:
372 for (unsigned r
= 0; r
< rows
; r
++)
373 ret
->values
[r
].f32
= ir
->value
.f
[r
];
376 case GLSL_TYPE_FLOAT16
:
377 for (unsigned r
= 0; r
< rows
; r
++)
378 ret
->values
[r
].u16
= ir
->value
.f16
[r
];
381 case GLSL_TYPE_DOUBLE
:
382 for (unsigned r
= 0; r
< rows
; r
++)
383 ret
->values
[r
].f64
= ir
->value
.d
[r
];
387 unreachable("Cannot get here from the first level switch");
392 case GLSL_TYPE_UINT64
:
393 /* Only float base types can be matrices. */
396 for (unsigned r
= 0; r
< rows
; r
++)
397 ret
->values
[r
].u64
= ir
->value
.u64
[r
];
400 case GLSL_TYPE_INT64
:
401 /* Only float base types can be matrices. */
404 for (unsigned r
= 0; r
< rows
; r
++)
405 ret
->values
[r
].i64
= ir
->value
.i64
[r
];
409 /* Only float base types can be matrices. */
412 for (unsigned r
= 0; r
< rows
; r
++)
413 ret
->values
[r
].b
= ir
->value
.b
[r
];
417 case GLSL_TYPE_STRUCT
:
418 case GLSL_TYPE_ARRAY
:
419 ret
->elements
= ralloc_array(mem_ctx
, nir_constant
*,
421 ret
->num_elements
= ir
->type
->length
;
423 for (i
= 0; i
< ir
->type
->length
; i
++)
424 ret
->elements
[i
] = constant_copy(ir
->const_elements
[i
], mem_ctx
);
428 unreachable("not reached");
434 static const glsl_type
*
435 wrap_type_in_array(const glsl_type
*elem_type
, const glsl_type
*array_type
)
437 if (!array_type
->is_array())
440 elem_type
= wrap_type_in_array(elem_type
, array_type
->fields
.array
);
442 return glsl_type::get_array_instance(elem_type
, array_type
->length
);
446 get_nir_how_declared(unsigned how_declared
)
448 if (how_declared
== ir_var_hidden
)
449 return nir_var_hidden
;
451 return nir_var_declared_normally
;
455 nir_visitor::visit(ir_variable
*ir
)
457 /* TODO: In future we should switch to using the NIR lowering pass but for
458 * now just ignore these variables as GLSL IR should have lowered them.
459 * Anything remaining are just dead vars that weren't cleaned up.
461 if (ir
->data
.mode
== ir_var_shader_shared
)
464 /* FINISHME: inout parameters */
465 assert(ir
->data
.mode
!= ir_var_function_inout
);
467 if (ir
->data
.mode
== ir_var_function_out
)
470 nir_variable
*var
= rzalloc(shader
, nir_variable
);
471 var
->type
= ir
->type
;
472 var
->name
= ralloc_strdup(var
, ir
->name
);
474 var
->data
.always_active_io
= ir
->data
.always_active_io
;
475 var
->data
.read_only
= ir
->data
.read_only
;
476 var
->data
.centroid
= ir
->data
.centroid
;
477 var
->data
.sample
= ir
->data
.sample
;
478 var
->data
.patch
= ir
->data
.patch
;
479 var
->data
.how_declared
= get_nir_how_declared(ir
->data
.how_declared
);
480 var
->data
.invariant
= ir
->data
.invariant
;
481 var
->data
.location
= ir
->data
.location
;
482 var
->data
.stream
= ir
->data
.stream
;
483 if (ir
->data
.stream
& (1u << 31))
484 var
->data
.stream
|= NIR_STREAM_PACKED
;
486 var
->data
.precision
= ir
->data
.precision
;
487 var
->data
.explicit_location
= ir
->data
.explicit_location
;
488 var
->data
.matrix_layout
= ir
->data
.matrix_layout
;
489 var
->data
.from_named_ifc_block
= ir
->data
.from_named_ifc_block
;
490 var
->data
.compact
= false;
492 switch(ir
->data
.mode
) {
494 case ir_var_temporary
:
496 var
->data
.mode
= nir_var_shader_temp
;
498 var
->data
.mode
= nir_var_function_temp
;
501 case ir_var_function_in
:
502 case ir_var_const_in
:
503 var
->data
.mode
= nir_var_function_temp
;
506 case ir_var_shader_in
:
507 if (shader
->info
.stage
== MESA_SHADER_GEOMETRY
&&
508 ir
->data
.location
== VARYING_SLOT_PRIMITIVE_ID
) {
509 /* For whatever reason, GLSL IR makes gl_PrimitiveIDIn an input */
510 var
->data
.location
= SYSTEM_VALUE_PRIMITIVE_ID
;
511 var
->data
.mode
= nir_var_system_value
;
513 var
->data
.mode
= nir_var_shader_in
;
515 if (shader
->info
.stage
== MESA_SHADER_TESS_EVAL
&&
516 (ir
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
517 ir
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
)) {
518 var
->data
.compact
= ir
->type
->without_array()->is_scalar();
521 if (shader
->info
.stage
> MESA_SHADER_VERTEX
&&
522 ir
->data
.location
>= VARYING_SLOT_CLIP_DIST0
&&
523 ir
->data
.location
<= VARYING_SLOT_CULL_DIST1
) {
524 var
->data
.compact
= ir
->type
->without_array()->is_scalar();
529 case ir_var_shader_out
:
530 var
->data
.mode
= nir_var_shader_out
;
531 if (shader
->info
.stage
== MESA_SHADER_TESS_CTRL
&&
532 (ir
->data
.location
== VARYING_SLOT_TESS_LEVEL_INNER
||
533 ir
->data
.location
== VARYING_SLOT_TESS_LEVEL_OUTER
)) {
534 var
->data
.compact
= ir
->type
->without_array()->is_scalar();
537 if (shader
->info
.stage
<= MESA_SHADER_GEOMETRY
&&
538 ir
->data
.location
>= VARYING_SLOT_CLIP_DIST0
&&
539 ir
->data
.location
<= VARYING_SLOT_CULL_DIST1
) {
540 var
->data
.compact
= ir
->type
->without_array()->is_scalar();
545 if (ir
->get_interface_type())
546 var
->data
.mode
= nir_var_mem_ubo
;
548 var
->data
.mode
= nir_var_uniform
;
551 case ir_var_shader_storage
:
552 var
->data
.mode
= nir_var_mem_ssbo
;
555 case ir_var_system_value
:
556 var
->data
.mode
= nir_var_system_value
;
560 unreachable("not reached");
563 unsigned mem_access
= 0;
564 if (ir
->data
.memory_read_only
)
565 mem_access
|= ACCESS_NON_WRITEABLE
;
566 if (ir
->data
.memory_write_only
)
567 mem_access
|= ACCESS_NON_READABLE
;
568 if (ir
->data
.memory_coherent
)
569 mem_access
|= ACCESS_COHERENT
;
570 if (ir
->data
.memory_volatile
)
571 mem_access
|= ACCESS_VOLATILE
;
572 if (ir
->data
.memory_restrict
)
573 mem_access
|= ACCESS_RESTRICT
;
575 var
->interface_type
= ir
->get_interface_type();
577 /* For UBO and SSBO variables, we need explicit types */
578 if (var
->data
.mode
& (nir_var_mem_ubo
| nir_var_mem_ssbo
)) {
579 const glsl_type
*explicit_ifc_type
=
580 ir
->get_interface_type()->get_explicit_interface_type(supports_std430
);
582 var
->interface_type
= explicit_ifc_type
;
584 if (ir
->type
->without_array()->is_interface()) {
585 /* If the type contains the interface, wrap the explicit type in the
586 * right number of arrays.
588 var
->type
= wrap_type_in_array(explicit_ifc_type
, ir
->type
);
590 /* Otherwise, this variable is one entry in the interface */
591 UNUSED
bool found
= false;
592 for (unsigned i
= 0; i
< explicit_ifc_type
->length
; i
++) {
593 const glsl_struct_field
*field
=
594 &explicit_ifc_type
->fields
.structure
[i
];
595 if (strcmp(ir
->name
, field
->name
) != 0)
598 var
->type
= field
->type
;
599 if (field
->memory_read_only
)
600 mem_access
|= ACCESS_NON_WRITEABLE
;
601 if (field
->memory_write_only
)
602 mem_access
|= ACCESS_NON_READABLE
;
603 if (field
->memory_coherent
)
604 mem_access
|= ACCESS_COHERENT
;
605 if (field
->memory_volatile
)
606 mem_access
|= ACCESS_VOLATILE
;
607 if (field
->memory_restrict
)
608 mem_access
|= ACCESS_RESTRICT
;
617 var
->data
.interpolation
= ir
->data
.interpolation
;
618 var
->data
.location_frac
= ir
->data
.location_frac
;
620 switch (ir
->data
.depth_layout
) {
621 case ir_depth_layout_none
:
622 var
->data
.depth_layout
= nir_depth_layout_none
;
624 case ir_depth_layout_any
:
625 var
->data
.depth_layout
= nir_depth_layout_any
;
627 case ir_depth_layout_greater
:
628 var
->data
.depth_layout
= nir_depth_layout_greater
;
630 case ir_depth_layout_less
:
631 var
->data
.depth_layout
= nir_depth_layout_less
;
633 case ir_depth_layout_unchanged
:
634 var
->data
.depth_layout
= nir_depth_layout_unchanged
;
637 unreachable("not reached");
640 var
->data
.index
= ir
->data
.index
;
641 var
->data
.descriptor_set
= 0;
642 var
->data
.binding
= ir
->data
.binding
;
643 var
->data
.explicit_binding
= ir
->data
.explicit_binding
;
644 var
->data
.bindless
= ir
->data
.bindless
;
645 var
->data
.offset
= ir
->data
.offset
;
646 var
->data
.access
= (gl_access_qualifier
)mem_access
;
648 if (var
->type
->without_array()->is_image()) {
649 var
->data
.image
.format
= ir
->data
.image_format
;
650 } else if (var
->data
.mode
== nir_var_shader_out
) {
651 var
->data
.xfb
.buffer
= ir
->data
.xfb_buffer
;
652 var
->data
.xfb
.stride
= ir
->data
.xfb_stride
;
655 var
->data
.fb_fetch_output
= ir
->data
.fb_fetch_output
;
656 var
->data
.explicit_xfb_buffer
= ir
->data
.explicit_xfb_buffer
;
657 var
->data
.explicit_xfb_stride
= ir
->data
.explicit_xfb_stride
;
659 var
->num_state_slots
= ir
->get_num_state_slots();
660 if (var
->num_state_slots
> 0) {
661 var
->state_slots
= rzalloc_array(var
, nir_state_slot
,
662 var
->num_state_slots
);
664 ir_state_slot
*state_slots
= ir
->get_state_slots();
665 for (unsigned i
= 0; i
< var
->num_state_slots
; i
++) {
666 for (unsigned j
= 0; j
< 5; j
++)
667 var
->state_slots
[i
].tokens
[j
] = state_slots
[i
].tokens
[j
];
668 var
->state_slots
[i
].swizzle
= state_slots
[i
].swizzle
;
671 var
->state_slots
= NULL
;
674 var
->constant_initializer
= constant_copy(ir
->constant_initializer
, var
);
676 if (var
->data
.mode
== nir_var_function_temp
)
677 nir_function_impl_add_variable(impl
, var
);
679 nir_shader_add_variable(shader
, var
);
681 _mesa_hash_table_insert(var_table
, ir
, var
);
685 nir_function_visitor::visit_enter(ir_function
*ir
)
687 foreach_in_list(ir_function_signature
, sig
, &ir
->signatures
) {
688 visitor
->create_function(sig
);
690 return visit_continue_with_parent
;
694 nir_visitor::create_function(ir_function_signature
*ir
)
696 if (ir
->is_intrinsic())
699 nir_function
*func
= nir_function_create(shader
, ir
->function_name());
700 if (strcmp(ir
->function_name(), "main") == 0)
701 func
->is_entrypoint
= true;
703 func
->num_params
= ir
->parameters
.length() +
704 (ir
->return_type
!= glsl_type::void_type
);
705 func
->params
= ralloc_array(shader
, nir_parameter
, func
->num_params
);
709 if (ir
->return_type
!= glsl_type::void_type
) {
710 /* The return value is a variable deref (basically an out parameter) */
711 func
->params
[np
].num_components
= 1;
712 func
->params
[np
].bit_size
= 32;
716 foreach_in_list(ir_variable
, param
, &ir
->parameters
) {
717 /* FINISHME: pass arrays, structs, etc by reference? */
718 assert(param
->type
->is_vector() || param
->type
->is_scalar());
720 if (param
->data
.mode
== ir_var_function_in
) {
721 func
->params
[np
].num_components
= param
->type
->vector_elements
;
722 func
->params
[np
].bit_size
= glsl_get_bit_size(param
->type
);
724 func
->params
[np
].num_components
= 1;
725 func
->params
[np
].bit_size
= 32;
729 assert(np
== func
->num_params
);
731 _mesa_hash_table_insert(this->overload_table
, ir
, func
);
735 nir_visitor::visit(ir_function
*ir
)
737 foreach_in_list(ir_function_signature
, sig
, &ir
->signatures
)
742 nir_visitor::visit(ir_function_signature
*ir
)
744 if (ir
->is_intrinsic())
749 struct hash_entry
*entry
=
750 _mesa_hash_table_search(this->overload_table
, ir
);
753 nir_function
*func
= (nir_function
*) entry
->data
;
755 if (ir
->is_defined
) {
756 nir_function_impl
*impl
= nir_function_impl_create(func
);
759 this->is_global
= false;
761 nir_builder_init(&b
, impl
);
762 b
.cursor
= nir_after_cf_list(&impl
->body
);
764 unsigned i
= (ir
->return_type
!= glsl_type::void_type
) ? 1 : 0;
766 foreach_in_list(ir_variable
, param
, &ir
->parameters
) {
768 nir_local_variable_create(impl
, param
->type
, param
->name
);
770 if (param
->data
.mode
== ir_var_function_in
) {
771 nir_store_var(&b
, var
, nir_load_param(&b
, i
), ~0);
774 _mesa_hash_table_insert(var_table
, param
, var
);
778 visit_exec_list(&ir
->body
, this);
780 this->is_global
= true;
787 nir_visitor::visit(ir_loop
*ir
)
790 visit_exec_list(&ir
->body_instructions
, this);
791 nir_pop_loop(&b
, NULL
);
795 nir_visitor::visit(ir_if
*ir
)
797 nir_push_if(&b
, evaluate_rvalue(ir
->condition
));
798 visit_exec_list(&ir
->then_instructions
, this);
799 nir_push_else(&b
, NULL
);
800 visit_exec_list(&ir
->else_instructions
, this);
801 nir_pop_if(&b
, NULL
);
805 nir_visitor::visit(ir_discard
*ir
)
808 * discards aren't treated as control flow, because before we lower them
809 * they can appear anywhere in the shader and the stuff after them may still
810 * be executed (yay, crazy GLSL rules!). However, after lowering, all the
811 * discards will be immediately followed by a return.
814 nir_intrinsic_instr
*discard
;
816 discard
= nir_intrinsic_instr_create(this->shader
,
817 nir_intrinsic_discard_if
);
819 nir_src_for_ssa(evaluate_rvalue(ir
->condition
));
821 discard
= nir_intrinsic_instr_create(this->shader
, nir_intrinsic_discard
);
824 nir_builder_instr_insert(&b
, &discard
->instr
);
828 nir_visitor::visit(ir_demote
*ir
)
830 nir_intrinsic_instr
*demote
=
831 nir_intrinsic_instr_create(this->shader
, nir_intrinsic_demote
);
833 nir_builder_instr_insert(&b
, &demote
->instr
);
837 nir_visitor::visit(ir_emit_vertex
*ir
)
839 nir_intrinsic_instr
*instr
=
840 nir_intrinsic_instr_create(this->shader
, nir_intrinsic_emit_vertex
);
841 nir_intrinsic_set_stream_id(instr
, ir
->stream_id());
842 nir_builder_instr_insert(&b
, &instr
->instr
);
846 nir_visitor::visit(ir_end_primitive
*ir
)
848 nir_intrinsic_instr
*instr
=
849 nir_intrinsic_instr_create(this->shader
, nir_intrinsic_end_primitive
);
850 nir_intrinsic_set_stream_id(instr
, ir
->stream_id());
851 nir_builder_instr_insert(&b
, &instr
->instr
);
855 nir_visitor::visit(ir_loop_jump
*ir
)
859 case ir_loop_jump::jump_break
:
860 type
= nir_jump_break
;
862 case ir_loop_jump::jump_continue
:
863 type
= nir_jump_continue
;
866 unreachable("not reached");
869 nir_jump_instr
*instr
= nir_jump_instr_create(this->shader
, type
);
870 nir_builder_instr_insert(&b
, &instr
->instr
);
874 nir_visitor::visit(ir_return
*ir
)
876 if (ir
->value
!= NULL
) {
877 nir_deref_instr
*ret_deref
=
878 nir_build_deref_cast(&b
, nir_load_param(&b
, 0),
879 nir_var_function_temp
, ir
->value
->type
, 0);
881 nir_ssa_def
*val
= evaluate_rvalue(ir
->value
);
882 nir_store_deref(&b
, ret_deref
, val
, ~0);
885 nir_jump_instr
*instr
= nir_jump_instr_create(this->shader
, nir_jump_return
);
886 nir_builder_instr_insert(&b
, &instr
->instr
);
890 intrinsic_set_std430_align(nir_intrinsic_instr
*intrin
, const glsl_type
*type
)
892 unsigned bit_size
= type
->is_boolean() ? 32 : glsl_get_bit_size(type
);
893 unsigned pow2_components
= util_next_power_of_two(type
->vector_elements
);
894 nir_intrinsic_set_align(intrin
, (bit_size
/ 8) * pow2_components
, 0);
897 /* Accumulate any qualifiers along the deref chain to get the actual
898 * load/store qualifier.
901 static enum gl_access_qualifier
902 deref_get_qualifier(nir_deref_instr
*deref
)
905 nir_deref_path_init(&path
, deref
, NULL
);
907 unsigned qualifiers
= path
.path
[0]->var
->data
.access
;
909 const glsl_type
*parent_type
= path
.path
[0]->type
;
910 for (nir_deref_instr
**cur_ptr
= &path
.path
[1]; *cur_ptr
; cur_ptr
++) {
911 nir_deref_instr
*cur
= *cur_ptr
;
913 if (parent_type
->is_interface()) {
914 const struct glsl_struct_field
*field
=
915 &parent_type
->fields
.structure
[cur
->strct
.index
];
916 if (field
->memory_read_only
)
917 qualifiers
|= ACCESS_NON_WRITEABLE
;
918 if (field
->memory_write_only
)
919 qualifiers
|= ACCESS_NON_READABLE
;
920 if (field
->memory_coherent
)
921 qualifiers
|= ACCESS_COHERENT
;
922 if (field
->memory_volatile
)
923 qualifiers
|= ACCESS_VOLATILE
;
924 if (field
->memory_restrict
)
925 qualifiers
|= ACCESS_RESTRICT
;
928 parent_type
= cur
->type
;
931 nir_deref_path_finish(&path
);
933 return (gl_access_qualifier
) qualifiers
;
937 nir_visitor::visit(ir_call
*ir
)
939 if (ir
->callee
->is_intrinsic()) {
942 switch (ir
->callee
->intrinsic_id
) {
943 case ir_intrinsic_generic_atomic_add
:
944 op
= ir
->return_deref
->type
->is_integer_32_64()
945 ? nir_intrinsic_deref_atomic_add
: nir_intrinsic_deref_atomic_fadd
;
947 case ir_intrinsic_generic_atomic_and
:
948 op
= nir_intrinsic_deref_atomic_and
;
950 case ir_intrinsic_generic_atomic_or
:
951 op
= nir_intrinsic_deref_atomic_or
;
953 case ir_intrinsic_generic_atomic_xor
:
954 op
= nir_intrinsic_deref_atomic_xor
;
956 case ir_intrinsic_generic_atomic_min
:
957 assert(ir
->return_deref
);
958 if (ir
->return_deref
->type
== glsl_type::int_type
)
959 op
= nir_intrinsic_deref_atomic_imin
;
960 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
961 op
= nir_intrinsic_deref_atomic_umin
;
962 else if (ir
->return_deref
->type
== glsl_type::float_type
)
963 op
= nir_intrinsic_deref_atomic_fmin
;
965 unreachable("Invalid type");
967 case ir_intrinsic_generic_atomic_max
:
968 assert(ir
->return_deref
);
969 if (ir
->return_deref
->type
== glsl_type::int_type
)
970 op
= nir_intrinsic_deref_atomic_imax
;
971 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
972 op
= nir_intrinsic_deref_atomic_umax
;
973 else if (ir
->return_deref
->type
== glsl_type::float_type
)
974 op
= nir_intrinsic_deref_atomic_fmax
;
976 unreachable("Invalid type");
978 case ir_intrinsic_generic_atomic_exchange
:
979 op
= nir_intrinsic_deref_atomic_exchange
;
981 case ir_intrinsic_generic_atomic_comp_swap
:
982 op
= ir
->return_deref
->type
->is_integer_32_64()
983 ? nir_intrinsic_deref_atomic_comp_swap
984 : nir_intrinsic_deref_atomic_fcomp_swap
;
986 case ir_intrinsic_atomic_counter_read
:
987 op
= nir_intrinsic_atomic_counter_read_deref
;
989 case ir_intrinsic_atomic_counter_increment
:
990 op
= nir_intrinsic_atomic_counter_inc_deref
;
992 case ir_intrinsic_atomic_counter_predecrement
:
993 op
= nir_intrinsic_atomic_counter_pre_dec_deref
;
995 case ir_intrinsic_atomic_counter_add
:
996 op
= nir_intrinsic_atomic_counter_add_deref
;
998 case ir_intrinsic_atomic_counter_and
:
999 op
= nir_intrinsic_atomic_counter_and_deref
;
1001 case ir_intrinsic_atomic_counter_or
:
1002 op
= nir_intrinsic_atomic_counter_or_deref
;
1004 case ir_intrinsic_atomic_counter_xor
:
1005 op
= nir_intrinsic_atomic_counter_xor_deref
;
1007 case ir_intrinsic_atomic_counter_min
:
1008 op
= nir_intrinsic_atomic_counter_min_deref
;
1010 case ir_intrinsic_atomic_counter_max
:
1011 op
= nir_intrinsic_atomic_counter_max_deref
;
1013 case ir_intrinsic_atomic_counter_exchange
:
1014 op
= nir_intrinsic_atomic_counter_exchange_deref
;
1016 case ir_intrinsic_atomic_counter_comp_swap
:
1017 op
= nir_intrinsic_atomic_counter_comp_swap_deref
;
1019 case ir_intrinsic_image_load
:
1020 op
= nir_intrinsic_image_deref_load
;
1022 case ir_intrinsic_image_store
:
1023 op
= nir_intrinsic_image_deref_store
;
1025 case ir_intrinsic_image_atomic_add
:
1026 op
= ir
->return_deref
->type
->is_integer_32_64()
1027 ? nir_intrinsic_image_deref_atomic_add
1028 : nir_intrinsic_image_deref_atomic_fadd
;
1030 case ir_intrinsic_image_atomic_min
:
1031 if (ir
->return_deref
->type
== glsl_type::int_type
)
1032 op
= nir_intrinsic_image_deref_atomic_imin
;
1033 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
1034 op
= nir_intrinsic_image_deref_atomic_umin
;
1036 unreachable("Invalid type");
1038 case ir_intrinsic_image_atomic_max
:
1039 if (ir
->return_deref
->type
== glsl_type::int_type
)
1040 op
= nir_intrinsic_image_deref_atomic_imax
;
1041 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
1042 op
= nir_intrinsic_image_deref_atomic_umax
;
1044 unreachable("Invalid type");
1046 case ir_intrinsic_image_atomic_and
:
1047 op
= nir_intrinsic_image_deref_atomic_and
;
1049 case ir_intrinsic_image_atomic_or
:
1050 op
= nir_intrinsic_image_deref_atomic_or
;
1052 case ir_intrinsic_image_atomic_xor
:
1053 op
= nir_intrinsic_image_deref_atomic_xor
;
1055 case ir_intrinsic_image_atomic_exchange
:
1056 op
= nir_intrinsic_image_deref_atomic_exchange
;
1058 case ir_intrinsic_image_atomic_comp_swap
:
1059 op
= nir_intrinsic_image_deref_atomic_comp_swap
;
1061 case ir_intrinsic_image_atomic_inc_wrap
:
1062 op
= nir_intrinsic_image_deref_atomic_inc_wrap
;
1064 case ir_intrinsic_image_atomic_dec_wrap
:
1065 op
= nir_intrinsic_image_deref_atomic_dec_wrap
;
1067 case ir_intrinsic_memory_barrier
:
1068 op
= nir_intrinsic_memory_barrier
;
1070 case ir_intrinsic_image_size
:
1071 op
= nir_intrinsic_image_deref_size
;
1073 case ir_intrinsic_image_samples
:
1074 op
= nir_intrinsic_image_deref_samples
;
1076 case ir_intrinsic_ssbo_store
:
1077 case ir_intrinsic_ssbo_load
:
1078 case ir_intrinsic_ssbo_atomic_add
:
1079 case ir_intrinsic_ssbo_atomic_and
:
1080 case ir_intrinsic_ssbo_atomic_or
:
1081 case ir_intrinsic_ssbo_atomic_xor
:
1082 case ir_intrinsic_ssbo_atomic_min
:
1083 case ir_intrinsic_ssbo_atomic_max
:
1084 case ir_intrinsic_ssbo_atomic_exchange
:
1085 case ir_intrinsic_ssbo_atomic_comp_swap
:
1086 /* SSBO store/loads should only have been lowered in GLSL IR for
1087 * non-nir drivers, NIR drivers make use of gl_nir_lower_buffers()
1090 unreachable("Invalid operation nir doesn't want lowered ssbo "
1092 case ir_intrinsic_shader_clock
:
1093 op
= nir_intrinsic_shader_clock
;
1095 case ir_intrinsic_begin_invocation_interlock
:
1096 op
= nir_intrinsic_begin_invocation_interlock
;
1098 case ir_intrinsic_end_invocation_interlock
:
1099 op
= nir_intrinsic_end_invocation_interlock
;
1101 case ir_intrinsic_group_memory_barrier
:
1102 op
= nir_intrinsic_group_memory_barrier
;
1104 case ir_intrinsic_memory_barrier_atomic_counter
:
1105 op
= nir_intrinsic_memory_barrier_atomic_counter
;
1107 case ir_intrinsic_memory_barrier_buffer
:
1108 op
= nir_intrinsic_memory_barrier_buffer
;
1110 case ir_intrinsic_memory_barrier_image
:
1111 op
= nir_intrinsic_memory_barrier_image
;
1113 case ir_intrinsic_memory_barrier_shared
:
1114 op
= nir_intrinsic_memory_barrier_shared
;
1116 case ir_intrinsic_shared_load
:
1117 op
= nir_intrinsic_load_shared
;
1119 case ir_intrinsic_shared_store
:
1120 op
= nir_intrinsic_store_shared
;
1122 case ir_intrinsic_shared_atomic_add
:
1123 op
= ir
->return_deref
->type
->is_integer_32_64()
1124 ? nir_intrinsic_shared_atomic_add
1125 : nir_intrinsic_shared_atomic_fadd
;
1127 case ir_intrinsic_shared_atomic_and
:
1128 op
= nir_intrinsic_shared_atomic_and
;
1130 case ir_intrinsic_shared_atomic_or
:
1131 op
= nir_intrinsic_shared_atomic_or
;
1133 case ir_intrinsic_shared_atomic_xor
:
1134 op
= nir_intrinsic_shared_atomic_xor
;
1136 case ir_intrinsic_shared_atomic_min
:
1137 assert(ir
->return_deref
);
1138 if (ir
->return_deref
->type
== glsl_type::int_type
)
1139 op
= nir_intrinsic_shared_atomic_imin
;
1140 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
1141 op
= nir_intrinsic_shared_atomic_umin
;
1142 else if (ir
->return_deref
->type
== glsl_type::float_type
)
1143 op
= nir_intrinsic_shared_atomic_fmin
;
1145 unreachable("Invalid type");
1147 case ir_intrinsic_shared_atomic_max
:
1148 assert(ir
->return_deref
);
1149 if (ir
->return_deref
->type
== glsl_type::int_type
)
1150 op
= nir_intrinsic_shared_atomic_imax
;
1151 else if (ir
->return_deref
->type
== glsl_type::uint_type
)
1152 op
= nir_intrinsic_shared_atomic_umax
;
1153 else if (ir
->return_deref
->type
== glsl_type::float_type
)
1154 op
= nir_intrinsic_shared_atomic_fmax
;
1156 unreachable("Invalid type");
1158 case ir_intrinsic_shared_atomic_exchange
:
1159 op
= nir_intrinsic_shared_atomic_exchange
;
1161 case ir_intrinsic_shared_atomic_comp_swap
:
1162 op
= ir
->return_deref
->type
->is_integer_32_64()
1163 ? nir_intrinsic_shared_atomic_comp_swap
1164 : nir_intrinsic_shared_atomic_fcomp_swap
;
1166 case ir_intrinsic_vote_any
:
1167 op
= nir_intrinsic_vote_any
;
1169 case ir_intrinsic_vote_all
:
1170 op
= nir_intrinsic_vote_all
;
1172 case ir_intrinsic_vote_eq
:
1173 op
= nir_intrinsic_vote_ieq
;
1175 case ir_intrinsic_ballot
:
1176 op
= nir_intrinsic_ballot
;
1178 case ir_intrinsic_read_invocation
:
1179 op
= nir_intrinsic_read_invocation
;
1181 case ir_intrinsic_read_first_invocation
:
1182 op
= nir_intrinsic_read_first_invocation
;
1184 case ir_intrinsic_helper_invocation
:
1185 op
= nir_intrinsic_is_helper_invocation
;
1188 unreachable("not reached");
1191 nir_intrinsic_instr
*instr
= nir_intrinsic_instr_create(shader
, op
);
1192 nir_ssa_def
*ret
= &instr
->dest
.ssa
;
1195 case nir_intrinsic_deref_atomic_add
:
1196 case nir_intrinsic_deref_atomic_imin
:
1197 case nir_intrinsic_deref_atomic_umin
:
1198 case nir_intrinsic_deref_atomic_imax
:
1199 case nir_intrinsic_deref_atomic_umax
:
1200 case nir_intrinsic_deref_atomic_and
:
1201 case nir_intrinsic_deref_atomic_or
:
1202 case nir_intrinsic_deref_atomic_xor
:
1203 case nir_intrinsic_deref_atomic_exchange
:
1204 case nir_intrinsic_deref_atomic_comp_swap
:
1205 case nir_intrinsic_deref_atomic_fadd
:
1206 case nir_intrinsic_deref_atomic_fmin
:
1207 case nir_intrinsic_deref_atomic_fmax
:
1208 case nir_intrinsic_deref_atomic_fcomp_swap
: {
1209 int param_count
= ir
->actual_parameters
.length();
1210 assert(param_count
== 2 || param_count
== 3);
1213 exec_node
*param
= ir
->actual_parameters
.get_head();
1214 ir_rvalue
*rvalue
= (ir_rvalue
*) param
;
1215 ir_dereference
*deref
= rvalue
->as_dereference();
1216 ir_swizzle
*swizzle
= NULL
;
1218 /* We may have a swizzle to pick off a single vec4 component */
1219 swizzle
= rvalue
->as_swizzle();
1220 assert(swizzle
&& swizzle
->type
->vector_elements
== 1);
1221 deref
= swizzle
->val
->as_dereference();
1224 nir_deref_instr
*nir_deref
= evaluate_deref(deref
);
1226 nir_deref
= nir_build_deref_array_imm(&b
, nir_deref
,
1229 instr
->src
[0] = nir_src_for_ssa(&nir_deref
->dest
.ssa
);
1231 nir_intrinsic_set_access(instr
, deref_get_qualifier(nir_deref
));
1233 /* data1 parameter (this is always present) */
1234 param
= param
->get_next();
1235 ir_instruction
*inst
= (ir_instruction
*) param
;
1236 instr
->src
[1] = nir_src_for_ssa(evaluate_rvalue(inst
->as_rvalue()));
1238 /* data2 parameter (only with atomic_comp_swap) */
1239 if (param_count
== 3) {
1240 assert(op
== nir_intrinsic_deref_atomic_comp_swap
||
1241 op
== nir_intrinsic_deref_atomic_fcomp_swap
);
1242 param
= param
->get_next();
1243 inst
= (ir_instruction
*) param
;
1244 instr
->src
[2] = nir_src_for_ssa(evaluate_rvalue(inst
->as_rvalue()));
1248 assert(ir
->return_deref
);
1249 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1250 ir
->return_deref
->type
->vector_elements
, 32, NULL
);
1251 nir_builder_instr_insert(&b
, &instr
->instr
);
1254 case nir_intrinsic_atomic_counter_read_deref
:
1255 case nir_intrinsic_atomic_counter_inc_deref
:
1256 case nir_intrinsic_atomic_counter_pre_dec_deref
:
1257 case nir_intrinsic_atomic_counter_add_deref
:
1258 case nir_intrinsic_atomic_counter_min_deref
:
1259 case nir_intrinsic_atomic_counter_max_deref
:
1260 case nir_intrinsic_atomic_counter_and_deref
:
1261 case nir_intrinsic_atomic_counter_or_deref
:
1262 case nir_intrinsic_atomic_counter_xor_deref
:
1263 case nir_intrinsic_atomic_counter_exchange_deref
:
1264 case nir_intrinsic_atomic_counter_comp_swap_deref
: {
1265 /* Set the counter variable dereference. */
1266 exec_node
*param
= ir
->actual_parameters
.get_head();
1267 ir_dereference
*counter
= (ir_dereference
*)param
;
1269 instr
->src
[0] = nir_src_for_ssa(&evaluate_deref(counter
)->dest
.ssa
);
1270 param
= param
->get_next();
1272 /* Set the intrinsic destination. */
1273 if (ir
->return_deref
) {
1274 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 32, NULL
);
1277 /* Set the intrinsic parameters. */
1278 if (!param
->is_tail_sentinel()) {
1280 nir_src_for_ssa(evaluate_rvalue((ir_dereference
*)param
));
1281 param
= param
->get_next();
1284 if (!param
->is_tail_sentinel()) {
1286 nir_src_for_ssa(evaluate_rvalue((ir_dereference
*)param
));
1287 param
= param
->get_next();
1290 nir_builder_instr_insert(&b
, &instr
->instr
);
1293 case nir_intrinsic_image_deref_load
:
1294 case nir_intrinsic_image_deref_store
:
1295 case nir_intrinsic_image_deref_atomic_add
:
1296 case nir_intrinsic_image_deref_atomic_imin
:
1297 case nir_intrinsic_image_deref_atomic_umin
:
1298 case nir_intrinsic_image_deref_atomic_imax
:
1299 case nir_intrinsic_image_deref_atomic_umax
:
1300 case nir_intrinsic_image_deref_atomic_and
:
1301 case nir_intrinsic_image_deref_atomic_or
:
1302 case nir_intrinsic_image_deref_atomic_xor
:
1303 case nir_intrinsic_image_deref_atomic_exchange
:
1304 case nir_intrinsic_image_deref_atomic_comp_swap
:
1305 case nir_intrinsic_image_deref_atomic_fadd
:
1306 case nir_intrinsic_image_deref_samples
:
1307 case nir_intrinsic_image_deref_size
:
1308 case nir_intrinsic_image_deref_atomic_inc_wrap
:
1309 case nir_intrinsic_image_deref_atomic_dec_wrap
: {
1310 nir_ssa_undef_instr
*instr_undef
=
1311 nir_ssa_undef_instr_create(shader
, 1, 32);
1312 nir_builder_instr_insert(&b
, &instr_undef
->instr
);
1314 /* Set the image variable dereference. */
1315 exec_node
*param
= ir
->actual_parameters
.get_head();
1316 ir_dereference
*image
= (ir_dereference
*)param
;
1317 nir_deref_instr
*deref
= evaluate_deref(image
);
1318 const glsl_type
*type
= deref
->type
;
1320 nir_intrinsic_set_access(instr
, deref_get_qualifier(deref
));
1322 instr
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
1323 param
= param
->get_next();
1325 /* Set the intrinsic destination. */
1326 if (ir
->return_deref
) {
1327 unsigned num_components
= ir
->return_deref
->type
->vector_elements
;
1328 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1329 num_components
, 32, NULL
);
1332 if (op
== nir_intrinsic_image_deref_size
) {
1333 instr
->num_components
= instr
->dest
.ssa
.num_components
;
1334 } else if (op
== nir_intrinsic_image_deref_load
||
1335 op
== nir_intrinsic_image_deref_store
) {
1336 instr
->num_components
= 4;
1339 if (op
== nir_intrinsic_image_deref_size
||
1340 op
== nir_intrinsic_image_deref_samples
) {
1341 nir_builder_instr_insert(&b
, &instr
->instr
);
1345 /* Set the address argument, extending the coordinate vector to four
1348 nir_ssa_def
*src_addr
=
1349 evaluate_rvalue((ir_dereference
*)param
);
1350 nir_ssa_def
*srcs
[4];
1352 for (int i
= 0; i
< 4; i
++) {
1353 if (i
< type
->coordinate_components())
1354 srcs
[i
] = nir_channel(&b
, src_addr
, i
);
1356 srcs
[i
] = &instr_undef
->def
;
1359 instr
->src
[1] = nir_src_for_ssa(nir_vec(&b
, srcs
, 4));
1360 param
= param
->get_next();
1362 /* Set the sample argument, which is undefined for single-sample
1365 if (type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_MS
) {
1367 nir_src_for_ssa(evaluate_rvalue((ir_dereference
*)param
));
1368 param
= param
->get_next();
1370 instr
->src
[2] = nir_src_for_ssa(&instr_undef
->def
);
1373 /* Set the intrinsic parameters. */
1374 if (!param
->is_tail_sentinel()) {
1376 nir_src_for_ssa(evaluate_rvalue((ir_dereference
*)param
));
1377 param
= param
->get_next();
1378 } else if (op
== nir_intrinsic_image_deref_load
) {
1379 instr
->src
[3] = nir_src_for_ssa(nir_imm_int(&b
, 0)); /* LOD */
1382 if (!param
->is_tail_sentinel()) {
1384 nir_src_for_ssa(evaluate_rvalue((ir_dereference
*)param
));
1385 param
= param
->get_next();
1386 } else if (op
== nir_intrinsic_image_deref_store
) {
1387 instr
->src
[4] = nir_src_for_ssa(nir_imm_int(&b
, 0)); /* LOD */
1390 nir_builder_instr_insert(&b
, &instr
->instr
);
1393 case nir_intrinsic_memory_barrier
:
1394 case nir_intrinsic_group_memory_barrier
:
1395 case nir_intrinsic_memory_barrier_atomic_counter
:
1396 case nir_intrinsic_memory_barrier_buffer
:
1397 case nir_intrinsic_memory_barrier_image
:
1398 case nir_intrinsic_memory_barrier_shared
:
1399 nir_builder_instr_insert(&b
, &instr
->instr
);
1401 case nir_intrinsic_shader_clock
:
1402 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 2, 32, NULL
);
1403 instr
->num_components
= 2;
1404 nir_intrinsic_set_memory_scope(instr
, NIR_SCOPE_SUBGROUP
);
1405 nir_builder_instr_insert(&b
, &instr
->instr
);
1407 case nir_intrinsic_begin_invocation_interlock
:
1408 nir_builder_instr_insert(&b
, &instr
->instr
);
1410 case nir_intrinsic_end_invocation_interlock
:
1411 nir_builder_instr_insert(&b
, &instr
->instr
);
1413 case nir_intrinsic_store_ssbo
: {
1414 exec_node
*param
= ir
->actual_parameters
.get_head();
1415 ir_rvalue
*block
= ((ir_instruction
*)param
)->as_rvalue();
1417 param
= param
->get_next();
1418 ir_rvalue
*offset
= ((ir_instruction
*)param
)->as_rvalue();
1420 param
= param
->get_next();
1421 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
1423 param
= param
->get_next();
1424 ir_constant
*write_mask
= ((ir_instruction
*)param
)->as_constant();
1427 nir_ssa_def
*nir_val
= evaluate_rvalue(val
);
1428 if (val
->type
->is_boolean())
1429 nir_val
= nir_b2i32(&b
, nir_val
);
1431 instr
->src
[0] = nir_src_for_ssa(nir_val
);
1432 instr
->src
[1] = nir_src_for_ssa(evaluate_rvalue(block
));
1433 instr
->src
[2] = nir_src_for_ssa(evaluate_rvalue(offset
));
1434 intrinsic_set_std430_align(instr
, val
->type
);
1435 nir_intrinsic_set_write_mask(instr
, write_mask
->value
.u
[0]);
1436 instr
->num_components
= val
->type
->vector_elements
;
1438 nir_builder_instr_insert(&b
, &instr
->instr
);
1441 case nir_intrinsic_load_shared
: {
1442 exec_node
*param
= ir
->actual_parameters
.get_head();
1443 ir_rvalue
*offset
= ((ir_instruction
*)param
)->as_rvalue();
1445 nir_intrinsic_set_base(instr
, 0);
1446 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(offset
));
1448 const glsl_type
*type
= ir
->return_deref
->var
->type
;
1449 instr
->num_components
= type
->vector_elements
;
1450 intrinsic_set_std430_align(instr
, type
);
1452 /* Setup destination register */
1453 unsigned bit_size
= type
->is_boolean() ? 32 : glsl_get_bit_size(type
);
1454 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1455 type
->vector_elements
, bit_size
, NULL
);
1457 nir_builder_instr_insert(&b
, &instr
->instr
);
1459 /* The value in shared memory is a 32-bit value */
1460 if (type
->is_boolean())
1461 ret
= nir_b2b1(&b
, &instr
->dest
.ssa
);
1464 case nir_intrinsic_store_shared
: {
1465 exec_node
*param
= ir
->actual_parameters
.get_head();
1466 ir_rvalue
*offset
= ((ir_instruction
*)param
)->as_rvalue();
1468 param
= param
->get_next();
1469 ir_rvalue
*val
= ((ir_instruction
*)param
)->as_rvalue();
1471 param
= param
->get_next();
1472 ir_constant
*write_mask
= ((ir_instruction
*)param
)->as_constant();
1475 nir_intrinsic_set_base(instr
, 0);
1476 instr
->src
[1] = nir_src_for_ssa(evaluate_rvalue(offset
));
1478 nir_intrinsic_set_write_mask(instr
, write_mask
->value
.u
[0]);
1480 nir_ssa_def
*nir_val
= evaluate_rvalue(val
);
1481 /* The value in shared memory is a 32-bit value */
1482 if (val
->type
->is_boolean())
1483 nir_val
= nir_b2b32(&b
, nir_val
);
1485 instr
->src
[0] = nir_src_for_ssa(nir_val
);
1486 instr
->num_components
= val
->type
->vector_elements
;
1487 intrinsic_set_std430_align(instr
, val
->type
);
1489 nir_builder_instr_insert(&b
, &instr
->instr
);
1492 case nir_intrinsic_shared_atomic_add
:
1493 case nir_intrinsic_shared_atomic_imin
:
1494 case nir_intrinsic_shared_atomic_umin
:
1495 case nir_intrinsic_shared_atomic_imax
:
1496 case nir_intrinsic_shared_atomic_umax
:
1497 case nir_intrinsic_shared_atomic_and
:
1498 case nir_intrinsic_shared_atomic_or
:
1499 case nir_intrinsic_shared_atomic_xor
:
1500 case nir_intrinsic_shared_atomic_exchange
:
1501 case nir_intrinsic_shared_atomic_comp_swap
:
1502 case nir_intrinsic_shared_atomic_fadd
:
1503 case nir_intrinsic_shared_atomic_fmin
:
1504 case nir_intrinsic_shared_atomic_fmax
:
1505 case nir_intrinsic_shared_atomic_fcomp_swap
: {
1506 int param_count
= ir
->actual_parameters
.length();
1507 assert(param_count
== 2 || param_count
== 3);
1510 exec_node
*param
= ir
->actual_parameters
.get_head();
1511 ir_instruction
*inst
= (ir_instruction
*) param
;
1512 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(inst
->as_rvalue()));
1514 /* data1 parameter (this is always present) */
1515 param
= param
->get_next();
1516 inst
= (ir_instruction
*) param
;
1517 instr
->src
[1] = nir_src_for_ssa(evaluate_rvalue(inst
->as_rvalue()));
1519 /* data2 parameter (only with atomic_comp_swap) */
1520 if (param_count
== 3) {
1521 assert(op
== nir_intrinsic_shared_atomic_comp_swap
||
1522 op
== nir_intrinsic_shared_atomic_fcomp_swap
);
1523 param
= param
->get_next();
1524 inst
= (ir_instruction
*) param
;
1526 nir_src_for_ssa(evaluate_rvalue(inst
->as_rvalue()));
1530 assert(ir
->return_deref
);
1531 unsigned bit_size
= glsl_get_bit_size(ir
->return_deref
->type
);
1532 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1533 ir
->return_deref
->type
->vector_elements
,
1535 nir_builder_instr_insert(&b
, &instr
->instr
);
1538 case nir_intrinsic_vote_any
:
1539 case nir_intrinsic_vote_all
:
1540 case nir_intrinsic_vote_ieq
: {
1541 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 1, NULL
);
1542 instr
->num_components
= 1;
1544 ir_rvalue
*value
= (ir_rvalue
*) ir
->actual_parameters
.get_head();
1545 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(value
));
1547 nir_builder_instr_insert(&b
, &instr
->instr
);
1551 case nir_intrinsic_ballot
: {
1552 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1553 ir
->return_deref
->type
->vector_elements
, 64, NULL
);
1554 instr
->num_components
= ir
->return_deref
->type
->vector_elements
;
1556 ir_rvalue
*value
= (ir_rvalue
*) ir
->actual_parameters
.get_head();
1557 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(value
));
1559 nir_builder_instr_insert(&b
, &instr
->instr
);
1562 case nir_intrinsic_read_invocation
: {
1563 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1564 ir
->return_deref
->type
->vector_elements
, 32, NULL
);
1565 instr
->num_components
= ir
->return_deref
->type
->vector_elements
;
1567 ir_rvalue
*value
= (ir_rvalue
*) ir
->actual_parameters
.get_head();
1568 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(value
));
1570 ir_rvalue
*invocation
= (ir_rvalue
*) ir
->actual_parameters
.get_head()->next
;
1571 instr
->src
[1] = nir_src_for_ssa(evaluate_rvalue(invocation
));
1573 nir_builder_instr_insert(&b
, &instr
->instr
);
1576 case nir_intrinsic_read_first_invocation
: {
1577 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
,
1578 ir
->return_deref
->type
->vector_elements
, 32, NULL
);
1579 instr
->num_components
= ir
->return_deref
->type
->vector_elements
;
1581 ir_rvalue
*value
= (ir_rvalue
*) ir
->actual_parameters
.get_head();
1582 instr
->src
[0] = nir_src_for_ssa(evaluate_rvalue(value
));
1584 nir_builder_instr_insert(&b
, &instr
->instr
);
1587 case nir_intrinsic_is_helper_invocation
: {
1588 nir_ssa_dest_init(&instr
->instr
, &instr
->dest
, 1, 1, NULL
);
1589 instr
->num_components
= 1;
1590 nir_builder_instr_insert(&b
, &instr
->instr
);
1594 unreachable("not reached");
1597 if (ir
->return_deref
)
1598 nir_store_deref(&b
, evaluate_deref(ir
->return_deref
), ret
, ~0);
1603 struct hash_entry
*entry
=
1604 _mesa_hash_table_search(this->overload_table
, ir
->callee
);
1606 nir_function
*callee
= (nir_function
*) entry
->data
;
1608 nir_call_instr
*call
= nir_call_instr_create(this->shader
, callee
);
1611 nir_deref_instr
*ret_deref
= NULL
;
1612 if (ir
->return_deref
) {
1613 nir_variable
*ret_tmp
=
1614 nir_local_variable_create(this->impl
, ir
->return_deref
->type
,
1616 ret_deref
= nir_build_deref_var(&b
, ret_tmp
);
1617 call
->params
[i
++] = nir_src_for_ssa(&ret_deref
->dest
.ssa
);
1620 foreach_two_lists(formal_node
, &ir
->callee
->parameters
,
1621 actual_node
, &ir
->actual_parameters
) {
1622 ir_rvalue
*param_rvalue
= (ir_rvalue
*) actual_node
;
1623 ir_variable
*sig_param
= (ir_variable
*) formal_node
;
1625 if (sig_param
->data
.mode
== ir_var_function_out
) {
1626 nir_deref_instr
*out_deref
= evaluate_deref(param_rvalue
);
1627 call
->params
[i
] = nir_src_for_ssa(&out_deref
->dest
.ssa
);
1628 } else if (sig_param
->data
.mode
== ir_var_function_in
) {
1629 nir_ssa_def
*val
= evaluate_rvalue(param_rvalue
);
1630 nir_src src
= nir_src_for_ssa(val
);
1632 nir_src_copy(&call
->params
[i
], &src
, call
);
1633 } else if (sig_param
->data
.mode
== ir_var_function_inout
) {
1634 unreachable("unimplemented: inout parameters");
1640 nir_builder_instr_insert(&b
, &call
->instr
);
1642 if (ir
->return_deref
)
1643 nir_store_deref(&b
, evaluate_deref(ir
->return_deref
), nir_load_deref(&b
, ret_deref
), ~0);
1647 nir_visitor::visit(ir_assignment
*ir
)
1649 unsigned num_components
= ir
->lhs
->type
->vector_elements
;
1651 b
.exact
= ir
->lhs
->variable_referenced()->data
.invariant
||
1652 ir
->lhs
->variable_referenced()->data
.precise
;
1654 if ((ir
->rhs
->as_dereference() || ir
->rhs
->as_constant()) &&
1655 (ir
->write_mask
== (1 << num_components
) - 1 || ir
->write_mask
== 0)) {
1656 nir_deref_instr
*lhs
= evaluate_deref(ir
->lhs
);
1657 nir_deref_instr
*rhs
= evaluate_deref(ir
->rhs
);
1658 enum gl_access_qualifier lhs_qualifiers
= deref_get_qualifier(lhs
);
1659 enum gl_access_qualifier rhs_qualifiers
= deref_get_qualifier(rhs
);
1660 if (ir
->condition
) {
1661 nir_push_if(&b
, evaluate_rvalue(ir
->condition
));
1662 nir_copy_deref_with_access(&b
, lhs
, rhs
, lhs_qualifiers
,
1664 nir_pop_if(&b
, NULL
);
1666 nir_copy_deref_with_access(&b
, lhs
, rhs
, lhs_qualifiers
,
1672 assert(ir
->rhs
->type
->is_scalar() || ir
->rhs
->type
->is_vector());
1674 ir
->lhs
->accept(this);
1675 nir_deref_instr
*lhs_deref
= this->deref
;
1676 nir_ssa_def
*src
= evaluate_rvalue(ir
->rhs
);
1678 if (ir
->write_mask
!= (1 << num_components
) - 1 && ir
->write_mask
!= 0) {
1679 /* GLSL IR will give us the input to the write-masked assignment in a
1680 * single packed vector. So, for example, if the writemask is xzw, then
1681 * we have to swizzle x -> x, y -> z, and z -> w and get the y component
1685 unsigned component
= 0;
1686 for (unsigned i
= 0; i
< 4; i
++) {
1687 swiz
[i
] = ir
->write_mask
& (1 << i
) ? component
++ : 0;
1689 src
= nir_swizzle(&b
, src
, swiz
, num_components
);
1692 enum gl_access_qualifier qualifiers
= deref_get_qualifier(lhs_deref
);
1693 if (ir
->condition
) {
1694 nir_push_if(&b
, evaluate_rvalue(ir
->condition
));
1695 nir_store_deref_with_access(&b
, lhs_deref
, src
, ir
->write_mask
,
1697 nir_pop_if(&b
, NULL
);
1699 nir_store_deref_with_access(&b
, lhs_deref
, src
, ir
->write_mask
,
1705 * Given an instruction, returns a pointer to its destination or NULL if there
1706 * is no destination.
1708 * Note that this only handles instructions we generate at this level.
1711 get_instr_dest(nir_instr
*instr
)
1713 nir_alu_instr
*alu_instr
;
1714 nir_intrinsic_instr
*intrinsic_instr
;
1715 nir_tex_instr
*tex_instr
;
1717 switch (instr
->type
) {
1718 case nir_instr_type_alu
:
1719 alu_instr
= nir_instr_as_alu(instr
);
1720 return &alu_instr
->dest
.dest
;
1722 case nir_instr_type_intrinsic
:
1723 intrinsic_instr
= nir_instr_as_intrinsic(instr
);
1724 if (nir_intrinsic_infos
[intrinsic_instr
->intrinsic
].has_dest
)
1725 return &intrinsic_instr
->dest
;
1729 case nir_instr_type_tex
:
1730 tex_instr
= nir_instr_as_tex(instr
);
1731 return &tex_instr
->dest
;
1734 unreachable("not reached");
1741 nir_visitor::add_instr(nir_instr
*instr
, unsigned num_components
,
1744 nir_dest
*dest
= get_instr_dest(instr
);
1747 nir_ssa_dest_init(instr
, dest
, num_components
, bit_size
, NULL
);
1749 nir_builder_instr_insert(&b
, instr
);
1752 assert(dest
->is_ssa
);
1753 this->result
= &dest
->ssa
;
1758 nir_visitor::evaluate_rvalue(ir_rvalue
* ir
)
1761 if (ir
->as_dereference() || ir
->as_constant()) {
1763 * A dereference is being used on the right hand side, which means we
1764 * must emit a variable load.
1767 enum gl_access_qualifier access
= deref_get_qualifier(this->deref
);
1768 this->result
= nir_load_deref_with_access(&b
, this->deref
, access
);
1771 return this->result
;
1775 type_is_float(glsl_base_type type
)
1777 return type
== GLSL_TYPE_FLOAT
|| type
== GLSL_TYPE_DOUBLE
||
1778 type
== GLSL_TYPE_FLOAT16
;
1782 type_is_signed(glsl_base_type type
)
1784 return type
== GLSL_TYPE_INT
|| type
== GLSL_TYPE_INT64
||
1785 type
== GLSL_TYPE_INT16
;
1789 nir_visitor::visit(ir_expression
*ir
)
1791 /* Some special cases */
1792 switch (ir
->operation
) {
1793 case ir_unop_interpolate_at_centroid
:
1794 case ir_binop_interpolate_at_offset
:
1795 case ir_binop_interpolate_at_sample
: {
1796 ir_dereference
*deref
= ir
->operands
[0]->as_dereference();
1797 ir_swizzle
*swizzle
= NULL
;
1799 /* the api does not allow a swizzle here, but the varying packing code
1800 * may have pushed one into here.
1802 swizzle
= ir
->operands
[0]->as_swizzle();
1804 deref
= swizzle
->val
->as_dereference();
1808 deref
->accept(this);
1810 nir_intrinsic_op op
;
1811 if (this->deref
->mode
== nir_var_shader_in
) {
1812 switch (ir
->operation
) {
1813 case ir_unop_interpolate_at_centroid
:
1814 op
= nir_intrinsic_interp_deref_at_centroid
;
1816 case ir_binop_interpolate_at_offset
:
1817 op
= nir_intrinsic_interp_deref_at_offset
;
1819 case ir_binop_interpolate_at_sample
:
1820 op
= nir_intrinsic_interp_deref_at_sample
;
1823 unreachable("Invalid interpolation intrinsic");
1826 /* This case can happen if the vertex shader does not write the
1827 * given varying. In this case, the linker will lower it to a
1828 * global variable. Since interpolating a variable makes no
1829 * sense, we'll just turn it into a load which will probably
1830 * eventually end up as an SSA definition.
1832 assert(this->deref
->mode
== nir_var_shader_temp
);
1833 op
= nir_intrinsic_load_deref
;
1836 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(shader
, op
);
1837 intrin
->num_components
= deref
->type
->vector_elements
;
1838 intrin
->src
[0] = nir_src_for_ssa(&this->deref
->dest
.ssa
);
1840 if (intrin
->intrinsic
== nir_intrinsic_interp_deref_at_offset
||
1841 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_sample
)
1842 intrin
->src
[1] = nir_src_for_ssa(evaluate_rvalue(ir
->operands
[1]));
1844 unsigned bit_size
= glsl_get_bit_size(deref
->type
);
1845 add_instr(&intrin
->instr
, deref
->type
->vector_elements
, bit_size
);
1848 unsigned swiz
[4] = {
1849 swizzle
->mask
.x
, swizzle
->mask
.y
, swizzle
->mask
.z
, swizzle
->mask
.w
1852 result
= nir_swizzle(&b
, result
, swiz
,
1853 swizzle
->type
->vector_elements
);
1859 case ir_unop_ssbo_unsized_array_length
: {
1860 nir_intrinsic_instr
*intrin
=
1861 nir_intrinsic_instr_create(b
.shader
,
1862 nir_intrinsic_deref_buffer_array_length
);
1864 ir_dereference
*deref
= ir
->operands
[0]->as_dereference();
1865 intrin
->src
[0] = nir_src_for_ssa(&evaluate_deref(deref
)->dest
.ssa
);
1867 add_instr(&intrin
->instr
, 1, 32);
1871 case ir_binop_ubo_load
:
1872 /* UBO loads should only have been lowered in GLSL IR for non-nir drivers,
1873 * NIR drivers make use of gl_nir_lower_buffers() instead.
1875 unreachable("Invalid operation nir doesn't want lowered ubo loads");
1880 nir_ssa_def
*srcs
[4];
1881 for (unsigned i
= 0; i
< ir
->num_operands
; i
++)
1882 srcs
[i
] = evaluate_rvalue(ir
->operands
[i
]);
1884 glsl_base_type types
[4];
1885 for (unsigned i
= 0; i
< ir
->num_operands
; i
++)
1886 types
[i
] = ir
->operands
[i
]->type
->base_type
;
1888 glsl_base_type out_type
= ir
->type
->base_type
;
1890 switch (ir
->operation
) {
1891 case ir_unop_bit_not
: result
= nir_inot(&b
, srcs
[0]); break;
1892 case ir_unop_logic_not
:
1893 result
= nir_inot(&b
, srcs
[0]);
1896 result
= type_is_float(types
[0]) ? nir_fneg(&b
, srcs
[0])
1897 : nir_ineg(&b
, srcs
[0]);
1900 result
= type_is_float(types
[0]) ? nir_fabs(&b
, srcs
[0])
1901 : nir_iabs(&b
, srcs
[0]);
1904 result
= nir_uclz(&b
, srcs
[0]);
1906 case ir_unop_saturate
:
1907 assert(type_is_float(types
[0]));
1908 result
= nir_fsat(&b
, srcs
[0]);
1911 result
= type_is_float(types
[0]) ? nir_fsign(&b
, srcs
[0])
1912 : nir_isign(&b
, srcs
[0]);
1914 case ir_unop_rcp
: result
= nir_frcp(&b
, srcs
[0]); break;
1915 case ir_unop_rsq
: result
= nir_frsq(&b
, srcs
[0]); break;
1916 case ir_unop_sqrt
: result
= nir_fsqrt(&b
, srcs
[0]); break;
1917 case ir_unop_exp
: unreachable("ir_unop_exp should have been lowered");
1918 case ir_unop_log
: unreachable("ir_unop_log should have been lowered");
1919 case ir_unop_exp2
: result
= nir_fexp2(&b
, srcs
[0]); break;
1920 case ir_unop_log2
: result
= nir_flog2(&b
, srcs
[0]); break;
1962 case ir_unop_i642u64
:
1963 case ir_unop_u642i64
: {
1964 nir_alu_type src_type
= nir_get_nir_type_for_glsl_base_type(types
[0]);
1965 nir_alu_type dst_type
= nir_get_nir_type_for_glsl_base_type(out_type
);
1966 result
= nir_build_alu(&b
, nir_type_conversion_op(src_type
, dst_type
,
1967 nir_rounding_mode_undef
),
1968 srcs
[0], NULL
, NULL
, NULL
);
1969 /* b2i and b2f don't have fixed bit-size versions so the builder will
1970 * just assume 32 and we have to fix it up here.
1972 result
->bit_size
= nir_alu_type_get_type_size(dst_type
);
1976 case ir_unop_f2fmp
: {
1977 result
= nir_build_alu(&b
, nir_op_f2fmp
, srcs
[0], NULL
, NULL
, NULL
);
1981 case ir_unop_i2imp
: {
1982 result
= nir_build_alu(&b
, nir_op_i2imp
, srcs
[0], NULL
, NULL
, NULL
);
1986 case ir_unop_u2ump
: {
1987 result
= nir_build_alu(&b
, nir_op_u2ump
, srcs
[0], NULL
, NULL
, NULL
);
1991 case ir_unop_bitcast_i2f
:
1992 case ir_unop_bitcast_f2i
:
1993 case ir_unop_bitcast_u2f
:
1994 case ir_unop_bitcast_f2u
:
1995 case ir_unop_bitcast_i642d
:
1996 case ir_unop_bitcast_d2i64
:
1997 case ir_unop_bitcast_u642d
:
1998 case ir_unop_bitcast_d2u64
:
1999 case ir_unop_subroutine_to_int
:
2001 result
= nir_mov(&b
, srcs
[0]);
2003 case ir_unop_trunc
: result
= nir_ftrunc(&b
, srcs
[0]); break;
2004 case ir_unop_ceil
: result
= nir_fceil(&b
, srcs
[0]); break;
2005 case ir_unop_floor
: result
= nir_ffloor(&b
, srcs
[0]); break;
2006 case ir_unop_fract
: result
= nir_ffract(&b
, srcs
[0]); break;
2007 case ir_unop_frexp_exp
: result
= nir_frexp_exp(&b
, srcs
[0]); break;
2008 case ir_unop_frexp_sig
: result
= nir_frexp_sig(&b
, srcs
[0]); break;
2009 case ir_unop_round_even
: result
= nir_fround_even(&b
, srcs
[0]); break;
2010 case ir_unop_sin
: result
= nir_fsin(&b
, srcs
[0]); break;
2011 case ir_unop_cos
: result
= nir_fcos(&b
, srcs
[0]); break;
2012 case ir_unop_dFdx
: result
= nir_fddx(&b
, srcs
[0]); break;
2013 case ir_unop_dFdy
: result
= nir_fddy(&b
, srcs
[0]); break;
2014 case ir_unop_dFdx_fine
: result
= nir_fddx_fine(&b
, srcs
[0]); break;
2015 case ir_unop_dFdy_fine
: result
= nir_fddy_fine(&b
, srcs
[0]); break;
2016 case ir_unop_dFdx_coarse
: result
= nir_fddx_coarse(&b
, srcs
[0]); break;
2017 case ir_unop_dFdy_coarse
: result
= nir_fddy_coarse(&b
, srcs
[0]); break;
2018 case ir_unop_pack_snorm_2x16
:
2019 result
= nir_pack_snorm_2x16(&b
, srcs
[0]);
2021 case ir_unop_pack_snorm_4x8
:
2022 result
= nir_pack_snorm_4x8(&b
, srcs
[0]);
2024 case ir_unop_pack_unorm_2x16
:
2025 result
= nir_pack_unorm_2x16(&b
, srcs
[0]);
2027 case ir_unop_pack_unorm_4x8
:
2028 result
= nir_pack_unorm_4x8(&b
, srcs
[0]);
2030 case ir_unop_pack_half_2x16
:
2031 result
= nir_pack_half_2x16(&b
, srcs
[0]);
2033 case ir_unop_unpack_snorm_2x16
:
2034 result
= nir_unpack_snorm_2x16(&b
, srcs
[0]);
2036 case ir_unop_unpack_snorm_4x8
:
2037 result
= nir_unpack_snorm_4x8(&b
, srcs
[0]);
2039 case ir_unop_unpack_unorm_2x16
:
2040 result
= nir_unpack_unorm_2x16(&b
, srcs
[0]);
2042 case ir_unop_unpack_unorm_4x8
:
2043 result
= nir_unpack_unorm_4x8(&b
, srcs
[0]);
2045 case ir_unop_unpack_half_2x16
:
2046 result
= nir_unpack_half_2x16(&b
, srcs
[0]);
2048 case ir_unop_pack_sampler_2x32
:
2049 case ir_unop_pack_image_2x32
:
2050 case ir_unop_pack_double_2x32
:
2051 case ir_unop_pack_int_2x32
:
2052 case ir_unop_pack_uint_2x32
:
2053 result
= nir_pack_64_2x32(&b
, srcs
[0]);
2055 case ir_unop_unpack_sampler_2x32
:
2056 case ir_unop_unpack_image_2x32
:
2057 case ir_unop_unpack_double_2x32
:
2058 case ir_unop_unpack_int_2x32
:
2059 case ir_unop_unpack_uint_2x32
:
2060 result
= nir_unpack_64_2x32(&b
, srcs
[0]);
2062 case ir_unop_bitfield_reverse
:
2063 result
= nir_bitfield_reverse(&b
, srcs
[0]);
2065 case ir_unop_bit_count
:
2066 result
= nir_bit_count(&b
, srcs
[0]);
2068 case ir_unop_find_msb
:
2070 case GLSL_TYPE_UINT
:
2071 result
= nir_ufind_msb(&b
, srcs
[0]);
2074 result
= nir_ifind_msb(&b
, srcs
[0]);
2077 unreachable("Invalid type for findMSB()");
2080 case ir_unop_find_lsb
:
2081 result
= nir_find_lsb(&b
, srcs
[0]);
2084 case ir_unop_get_buffer_size
: {
2085 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(
2087 nir_intrinsic_get_buffer_size
);
2088 load
->num_components
= ir
->type
->vector_elements
;
2089 load
->src
[0] = nir_src_for_ssa(evaluate_rvalue(ir
->operands
[0]));
2090 unsigned bit_size
= glsl_get_bit_size(ir
->type
);
2091 add_instr(&load
->instr
, ir
->type
->vector_elements
, bit_size
);
2096 result
= nir_atan(&b
, srcs
[0]);
2100 result
= type_is_float(out_type
) ? nir_fadd(&b
, srcs
[0], srcs
[1])
2101 : nir_iadd(&b
, srcs
[0], srcs
[1]);
2103 case ir_binop_add_sat
:
2104 result
= type_is_signed(out_type
) ? nir_iadd_sat(&b
, srcs
[0], srcs
[1])
2105 : nir_uadd_sat(&b
, srcs
[0], srcs
[1]);
2108 result
= type_is_float(out_type
) ? nir_fsub(&b
, srcs
[0], srcs
[1])
2109 : nir_isub(&b
, srcs
[0], srcs
[1]);
2111 case ir_binop_sub_sat
:
2112 result
= type_is_signed(out_type
) ? nir_isub_sat(&b
, srcs
[0], srcs
[1])
2113 : nir_usub_sat(&b
, srcs
[0], srcs
[1]);
2115 case ir_binop_abs_sub
:
2116 /* out_type is always unsigned for ir_binop_abs_sub, so we have to key
2117 * on the type of the sources.
2119 result
= type_is_signed(types
[0]) ? nir_uabs_isub(&b
, srcs
[0], srcs
[1])
2120 : nir_uabs_usub(&b
, srcs
[0], srcs
[1]);
2123 result
= type_is_signed(out_type
) ? nir_ihadd(&b
, srcs
[0], srcs
[1])
2124 : nir_uhadd(&b
, srcs
[0], srcs
[1]);
2126 case ir_binop_avg_round
:
2127 result
= type_is_signed(out_type
) ? nir_irhadd(&b
, srcs
[0], srcs
[1])
2128 : nir_urhadd(&b
, srcs
[0], srcs
[1]);
2130 case ir_binop_mul_32x16
:
2131 result
= type_is_signed(out_type
) ? nir_imul_32x16(&b
, srcs
[0], srcs
[1])
2132 : nir_umul_32x16(&b
, srcs
[0], srcs
[1]);
2135 if (type_is_float(out_type
))
2136 result
= nir_fmul(&b
, srcs
[0], srcs
[1]);
2137 else if (out_type
== GLSL_TYPE_INT64
&&
2138 (ir
->operands
[0]->type
->base_type
== GLSL_TYPE_INT
||
2139 ir
->operands
[1]->type
->base_type
== GLSL_TYPE_INT
))
2140 result
= nir_imul_2x32_64(&b
, srcs
[0], srcs
[1]);
2141 else if (out_type
== GLSL_TYPE_UINT64
&&
2142 (ir
->operands
[0]->type
->base_type
== GLSL_TYPE_UINT
||
2143 ir
->operands
[1]->type
->base_type
== GLSL_TYPE_UINT
))
2144 result
= nir_umul_2x32_64(&b
, srcs
[0], srcs
[1]);
2146 result
= nir_imul(&b
, srcs
[0], srcs
[1]);
2149 if (type_is_float(out_type
))
2150 result
= nir_fdiv(&b
, srcs
[0], srcs
[1]);
2151 else if (type_is_signed(out_type
))
2152 result
= nir_idiv(&b
, srcs
[0], srcs
[1]);
2154 result
= nir_udiv(&b
, srcs
[0], srcs
[1]);
2157 result
= type_is_float(out_type
) ? nir_fmod(&b
, srcs
[0], srcs
[1])
2158 : nir_umod(&b
, srcs
[0], srcs
[1]);
2161 if (type_is_float(out_type
))
2162 result
= nir_fmin(&b
, srcs
[0], srcs
[1]);
2163 else if (type_is_signed(out_type
))
2164 result
= nir_imin(&b
, srcs
[0], srcs
[1]);
2166 result
= nir_umin(&b
, srcs
[0], srcs
[1]);
2169 if (type_is_float(out_type
))
2170 result
= nir_fmax(&b
, srcs
[0], srcs
[1]);
2171 else if (type_is_signed(out_type
))
2172 result
= nir_imax(&b
, srcs
[0], srcs
[1]);
2174 result
= nir_umax(&b
, srcs
[0], srcs
[1]);
2176 case ir_binop_pow
: result
= nir_fpow(&b
, srcs
[0], srcs
[1]); break;
2177 case ir_binop_bit_and
: result
= nir_iand(&b
, srcs
[0], srcs
[1]); break;
2178 case ir_binop_bit_or
: result
= nir_ior(&b
, srcs
[0], srcs
[1]); break;
2179 case ir_binop_bit_xor
: result
= nir_ixor(&b
, srcs
[0], srcs
[1]); break;
2180 case ir_binop_logic_and
:
2181 result
= nir_iand(&b
, srcs
[0], srcs
[1]);
2183 case ir_binop_logic_or
:
2184 result
= nir_ior(&b
, srcs
[0], srcs
[1]);
2186 case ir_binop_logic_xor
:
2187 result
= nir_ixor(&b
, srcs
[0], srcs
[1]);
2189 case ir_binop_lshift
: result
= nir_ishl(&b
, srcs
[0], srcs
[1]); break;
2190 case ir_binop_rshift
:
2191 result
= (type_is_signed(out_type
)) ? nir_ishr(&b
, srcs
[0], srcs
[1])
2192 : nir_ushr(&b
, srcs
[0], srcs
[1]);
2194 case ir_binop_imul_high
:
2195 result
= (out_type
== GLSL_TYPE_INT
) ? nir_imul_high(&b
, srcs
[0], srcs
[1])
2196 : nir_umul_high(&b
, srcs
[0], srcs
[1]);
2198 case ir_binop_carry
: result
= nir_uadd_carry(&b
, srcs
[0], srcs
[1]); break;
2199 case ir_binop_borrow
: result
= nir_usub_borrow(&b
, srcs
[0], srcs
[1]); break;
2201 if (type_is_float(types
[0]))
2202 result
= nir_flt(&b
, srcs
[0], srcs
[1]);
2203 else if (type_is_signed(types
[0]))
2204 result
= nir_ilt(&b
, srcs
[0], srcs
[1]);
2206 result
= nir_ult(&b
, srcs
[0], srcs
[1]);
2208 case ir_binop_gequal
:
2209 if (type_is_float(types
[0]))
2210 result
= nir_fge(&b
, srcs
[0], srcs
[1]);
2211 else if (type_is_signed(types
[0]))
2212 result
= nir_ige(&b
, srcs
[0], srcs
[1]);
2214 result
= nir_uge(&b
, srcs
[0], srcs
[1]);
2216 case ir_binop_equal
:
2217 if (type_is_float(types
[0]))
2218 result
= nir_feq(&b
, srcs
[0], srcs
[1]);
2220 result
= nir_ieq(&b
, srcs
[0], srcs
[1]);
2222 case ir_binop_nequal
:
2223 if (type_is_float(types
[0]))
2224 result
= nir_fne(&b
, srcs
[0], srcs
[1]);
2226 result
= nir_ine(&b
, srcs
[0], srcs
[1]);
2228 case ir_binop_all_equal
:
2229 if (type_is_float(types
[0])) {
2230 switch (ir
->operands
[0]->type
->vector_elements
) {
2231 case 1: result
= nir_feq(&b
, srcs
[0], srcs
[1]); break;
2232 case 2: result
= nir_ball_fequal2(&b
, srcs
[0], srcs
[1]); break;
2233 case 3: result
= nir_ball_fequal3(&b
, srcs
[0], srcs
[1]); break;
2234 case 4: result
= nir_ball_fequal4(&b
, srcs
[0], srcs
[1]); break;
2236 unreachable("not reached");
2239 switch (ir
->operands
[0]->type
->vector_elements
) {
2240 case 1: result
= nir_ieq(&b
, srcs
[0], srcs
[1]); break;
2241 case 2: result
= nir_ball_iequal2(&b
, srcs
[0], srcs
[1]); break;
2242 case 3: result
= nir_ball_iequal3(&b
, srcs
[0], srcs
[1]); break;
2243 case 4: result
= nir_ball_iequal4(&b
, srcs
[0], srcs
[1]); break;
2245 unreachable("not reached");
2249 case ir_binop_any_nequal
:
2250 if (type_is_float(types
[0])) {
2251 switch (ir
->operands
[0]->type
->vector_elements
) {
2252 case 1: result
= nir_fne(&b
, srcs
[0], srcs
[1]); break;
2253 case 2: result
= nir_bany_fnequal2(&b
, srcs
[0], srcs
[1]); break;
2254 case 3: result
= nir_bany_fnequal3(&b
, srcs
[0], srcs
[1]); break;
2255 case 4: result
= nir_bany_fnequal4(&b
, srcs
[0], srcs
[1]); break;
2257 unreachable("not reached");
2260 switch (ir
->operands
[0]->type
->vector_elements
) {
2261 case 1: result
= nir_ine(&b
, srcs
[0], srcs
[1]); break;
2262 case 2: result
= nir_bany_inequal2(&b
, srcs
[0], srcs
[1]); break;
2263 case 3: result
= nir_bany_inequal3(&b
, srcs
[0], srcs
[1]); break;
2264 case 4: result
= nir_bany_inequal4(&b
, srcs
[0], srcs
[1]); break;
2266 unreachable("not reached");
2271 switch (ir
->operands
[0]->type
->vector_elements
) {
2272 case 2: result
= nir_fdot2(&b
, srcs
[0], srcs
[1]); break;
2273 case 3: result
= nir_fdot3(&b
, srcs
[0], srcs
[1]); break;
2274 case 4: result
= nir_fdot4(&b
, srcs
[0], srcs
[1]); break;
2276 unreachable("not reached");
2279 case ir_binop_vector_extract
: {
2280 result
= nir_channel(&b
, srcs
[0], 0);
2281 for (unsigned i
= 1; i
< ir
->operands
[0]->type
->vector_elements
; i
++) {
2282 nir_ssa_def
*swizzled
= nir_channel(&b
, srcs
[0], i
);
2283 result
= nir_bcsel(&b
, nir_ieq(&b
, srcs
[1], nir_imm_int(&b
, i
)),
2289 case ir_binop_atan2
:
2290 result
= nir_atan2(&b
, srcs
[0], srcs
[1]);
2293 case ir_binop_ldexp
: result
= nir_ldexp(&b
, srcs
[0], srcs
[1]); break;
2295 result
= nir_ffma(&b
, srcs
[0], srcs
[1], srcs
[2]);
2298 result
= nir_flrp(&b
, srcs
[0], srcs
[1], srcs
[2]);
2301 result
= nir_bcsel(&b
, srcs
[0], srcs
[1], srcs
[2]);
2303 case ir_triop_bitfield_extract
:
2304 result
= (out_type
== GLSL_TYPE_INT
) ?
2305 nir_ibitfield_extract(&b
, srcs
[0], srcs
[1], srcs
[2]) :
2306 nir_ubitfield_extract(&b
, srcs
[0], srcs
[1], srcs
[2]);
2308 case ir_quadop_bitfield_insert
:
2309 result
= nir_bitfield_insert(&b
, srcs
[0], srcs
[1], srcs
[2], srcs
[3]);
2311 case ir_quadop_vector
:
2312 result
= nir_vec(&b
, srcs
, ir
->type
->vector_elements
);
2316 unreachable("not reached");
2321 nir_visitor::visit(ir_swizzle
*ir
)
2323 unsigned swizzle
[4] = { ir
->mask
.x
, ir
->mask
.y
, ir
->mask
.z
, ir
->mask
.w
};
2324 result
= nir_swizzle(&b
, evaluate_rvalue(ir
->val
), swizzle
,
2325 ir
->type
->vector_elements
);
2329 nir_visitor::visit(ir_texture
*ir
)
2336 num_srcs
= 1; /* coordinate */
2341 op
= (ir
->op
== ir_txb
) ? nir_texop_txb
: nir_texop_txl
;
2342 num_srcs
= 2; /* coordinate, bias/lod */
2346 op
= nir_texop_txd
; /* coordinate, dPdx, dPdy */
2352 if (ir
->lod_info
.lod
!= NULL
)
2353 num_srcs
= 2; /* coordinate, lod */
2355 num_srcs
= 1; /* coordinate */
2359 op
= nir_texop_txf_ms
;
2360 num_srcs
= 2; /* coordinate, sample_index */
2365 if (ir
->lod_info
.lod
!= NULL
)
2366 num_srcs
= 1; /* lod */
2373 num_srcs
= 1; /* coordinate */
2378 num_srcs
= 1; /* coordinate */
2381 case ir_query_levels
:
2382 op
= nir_texop_query_levels
;
2386 case ir_texture_samples
:
2387 op
= nir_texop_texture_samples
;
2391 case ir_samples_identical
:
2392 op
= nir_texop_samples_identical
;
2393 num_srcs
= 1; /* coordinate */
2397 unreachable("not reached");
2400 if (ir
->projector
!= NULL
)
2402 if (ir
->shadow_comparator
!= NULL
)
2404 /* offsets are constants we store inside nir_tex_intrs.offsets */
2405 if (ir
->offset
!= NULL
&& !ir
->offset
->type
->is_array())
2408 /* Add one for the texture deref */
2411 nir_tex_instr
*instr
= nir_tex_instr_create(this->shader
, num_srcs
);
2414 instr
->sampler_dim
=
2415 (glsl_sampler_dim
) ir
->sampler
->type
->sampler_dimensionality
;
2416 instr
->is_array
= ir
->sampler
->type
->sampler_array
;
2417 instr
->is_shadow
= ir
->sampler
->type
->sampler_shadow
;
2418 if (instr
->is_shadow
)
2419 instr
->is_new_style_shadow
= (ir
->type
->vector_elements
== 1);
2420 switch (ir
->type
->base_type
) {
2421 case GLSL_TYPE_FLOAT
:
2422 instr
->dest_type
= nir_type_float
;
2424 case GLSL_TYPE_FLOAT16
:
2425 instr
->dest_type
= nir_type_float16
;
2427 case GLSL_TYPE_INT16
:
2428 instr
->dest_type
= nir_type_int16
;
2430 case GLSL_TYPE_UINT16
:
2431 instr
->dest_type
= nir_type_uint16
;
2434 instr
->dest_type
= nir_type_int
;
2436 case GLSL_TYPE_BOOL
:
2437 case GLSL_TYPE_UINT
:
2438 instr
->dest_type
= nir_type_uint
;
2441 unreachable("not reached");
2444 nir_deref_instr
*sampler_deref
= evaluate_deref(ir
->sampler
);
2446 /* check for bindless handles */
2447 if (sampler_deref
->mode
!= nir_var_uniform
||
2448 nir_deref_instr_get_variable(sampler_deref
)->data
.bindless
) {
2449 nir_ssa_def
*load
= nir_load_deref(&b
, sampler_deref
);
2450 instr
->src
[0].src
= nir_src_for_ssa(load
);
2451 instr
->src
[0].src_type
= nir_tex_src_texture_handle
;
2452 instr
->src
[1].src
= nir_src_for_ssa(load
);
2453 instr
->src
[1].src_type
= nir_tex_src_sampler_handle
;
2455 instr
->src
[0].src
= nir_src_for_ssa(&sampler_deref
->dest
.ssa
);
2456 instr
->src
[0].src_type
= nir_tex_src_texture_deref
;
2457 instr
->src
[1].src
= nir_src_for_ssa(&sampler_deref
->dest
.ssa
);
2458 instr
->src
[1].src_type
= nir_tex_src_sampler_deref
;
2461 unsigned src_number
= 2;
2463 if (ir
->coordinate
!= NULL
) {
2464 instr
->coord_components
= ir
->coordinate
->type
->vector_elements
;
2465 instr
->src
[src_number
].src
=
2466 nir_src_for_ssa(evaluate_rvalue(ir
->coordinate
));
2467 instr
->src
[src_number
].src_type
= nir_tex_src_coord
;
2471 if (ir
->projector
!= NULL
) {
2472 instr
->src
[src_number
].src
=
2473 nir_src_for_ssa(evaluate_rvalue(ir
->projector
));
2474 instr
->src
[src_number
].src_type
= nir_tex_src_projector
;
2478 if (ir
->shadow_comparator
!= NULL
) {
2479 instr
->src
[src_number
].src
=
2480 nir_src_for_ssa(evaluate_rvalue(ir
->shadow_comparator
));
2481 instr
->src
[src_number
].src_type
= nir_tex_src_comparator
;
2485 if (ir
->offset
!= NULL
) {
2486 if (ir
->offset
->type
->is_array()) {
2487 for (int i
= 0; i
< ir
->offset
->type
->array_size(); i
++) {
2488 const ir_constant
*c
=
2489 ir
->offset
->as_constant()->get_array_element(i
);
2491 for (unsigned j
= 0; j
< 2; ++j
) {
2492 int val
= c
->get_int_component(j
);
2493 assert(val
<= 31 && val
>= -32);
2494 instr
->tg4_offsets
[i
][j
] = val
;
2498 assert(ir
->offset
->type
->is_vector() || ir
->offset
->type
->is_scalar());
2500 instr
->src
[src_number
].src
=
2501 nir_src_for_ssa(evaluate_rvalue(ir
->offset
));
2502 instr
->src
[src_number
].src_type
= nir_tex_src_offset
;
2509 instr
->src
[src_number
].src
=
2510 nir_src_for_ssa(evaluate_rvalue(ir
->lod_info
.bias
));
2511 instr
->src
[src_number
].src_type
= nir_tex_src_bias
;
2518 if (ir
->lod_info
.lod
!= NULL
) {
2519 instr
->src
[src_number
].src
=
2520 nir_src_for_ssa(evaluate_rvalue(ir
->lod_info
.lod
));
2521 instr
->src
[src_number
].src_type
= nir_tex_src_lod
;
2527 instr
->src
[src_number
].src
=
2528 nir_src_for_ssa(evaluate_rvalue(ir
->lod_info
.grad
.dPdx
));
2529 instr
->src
[src_number
].src_type
= nir_tex_src_ddx
;
2531 instr
->src
[src_number
].src
=
2532 nir_src_for_ssa(evaluate_rvalue(ir
->lod_info
.grad
.dPdy
));
2533 instr
->src
[src_number
].src_type
= nir_tex_src_ddy
;
2538 instr
->src
[src_number
].src
=
2539 nir_src_for_ssa(evaluate_rvalue(ir
->lod_info
.sample_index
));
2540 instr
->src
[src_number
].src_type
= nir_tex_src_ms_index
;
2545 instr
->component
= ir
->lod_info
.component
->as_constant()->value
.u
[0];
2552 assert(src_number
== num_srcs
);
2554 unsigned bit_size
= glsl_get_bit_size(ir
->type
);
2555 add_instr(&instr
->instr
, nir_tex_instr_dest_size(instr
), bit_size
);
2559 nir_visitor::visit(ir_constant
*ir
)
2562 * We don't know if this variable is an array or struct that gets
2563 * dereferenced, so do the safe thing an make it a variable with a
2564 * constant initializer and return a dereference.
2568 nir_local_variable_create(this->impl
, ir
->type
, "const_temp");
2569 var
->data
.read_only
= true;
2570 var
->constant_initializer
= constant_copy(ir
, var
);
2572 this->deref
= nir_build_deref_var(&b
, var
);
2576 nir_visitor::visit(ir_dereference_variable
*ir
)
2578 if (ir
->variable_referenced()->data
.mode
== ir_var_function_out
) {
2579 unsigned i
= (sig
->return_type
!= glsl_type::void_type
) ? 1 : 0;
2581 foreach_in_list(ir_variable
, param
, &sig
->parameters
) {
2582 if (param
== ir
->variable_referenced()) {
2588 this->deref
= nir_build_deref_cast(&b
, nir_load_param(&b
, i
),
2589 nir_var_function_temp
, ir
->type
, 0);
2593 assert(ir
->variable_referenced()->data
.mode
!= ir_var_function_inout
);
2595 struct hash_entry
*entry
=
2596 _mesa_hash_table_search(this->var_table
, ir
->var
);
2598 nir_variable
*var
= (nir_variable
*) entry
->data
;
2600 this->deref
= nir_build_deref_var(&b
, var
);
2604 nir_visitor::visit(ir_dereference_record
*ir
)
2606 ir
->record
->accept(this);
2608 int field_index
= ir
->field_idx
;
2609 assert(field_index
>= 0);
2611 this->deref
= nir_build_deref_struct(&b
, this->deref
, field_index
);
2615 nir_visitor::visit(ir_dereference_array
*ir
)
2617 nir_ssa_def
*index
= evaluate_rvalue(ir
->array_index
);
2619 ir
->array
->accept(this);
2621 this->deref
= nir_build_deref_array(&b
, this->deref
, index
);
2625 nir_visitor::visit(ir_barrier
*)
2627 if (shader
->info
.stage
== MESA_SHADER_COMPUTE
) {
2628 nir_intrinsic_instr
*shared_barrier
=
2629 nir_intrinsic_instr_create(this->shader
,
2630 nir_intrinsic_memory_barrier_shared
);
2631 nir_builder_instr_insert(&b
, &shared_barrier
->instr
);
2632 } else if (shader
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
2633 nir_intrinsic_instr
*patch_barrier
=
2634 nir_intrinsic_instr_create(this->shader
,
2635 nir_intrinsic_memory_barrier_tcs_patch
);
2636 nir_builder_instr_insert(&b
, &patch_barrier
->instr
);
2639 nir_intrinsic_instr
*instr
=
2640 nir_intrinsic_instr_create(this->shader
, nir_intrinsic_control_barrier
);
2641 nir_builder_instr_insert(&b
, &instr
->instr
);
2645 glsl_float64_funcs_to_nir(struct gl_context
*ctx
,
2646 const nir_shader_compiler_options
*options
)
2648 /* We pretend it's a vertex shader. Ultimately, the stage shouldn't
2649 * matter because we're not optimizing anything here.
2651 struct gl_shader
*sh
= _mesa_new_shader(-1, MESA_SHADER_VERTEX
);
2652 sh
->Source
= float64_source
;
2653 sh
->CompileStatus
= COMPILE_FAILURE
;
2654 _mesa_glsl_compile_shader(ctx
, sh
, false, false, true);
2656 if (!sh
->CompileStatus
) {
2659 "fp64 software impl compile failed:\n%s\nsource:\n%s\n",
2660 sh
->InfoLog
, float64_source
);
2665 nir_shader
*nir
= nir_shader_create(NULL
, MESA_SHADER_VERTEX
, options
, NULL
);
2667 nir_visitor
v1(ctx
, nir
);
2668 nir_function_visitor
v2(&v1
);
2670 visit_exec_list(sh
->ir
, &v1
);
2672 /* _mesa_delete_shader will try to free sh->Source but it's static const */
2674 _mesa_delete_shader(ctx
, sh
);
2676 nir_validate_shader(nir
, "float64_funcs_to_nir");
2678 NIR_PASS_V(nir
, nir_lower_variable_initializers
, nir_var_function_temp
);
2679 NIR_PASS_V(nir
, nir_lower_returns
);
2680 NIR_PASS_V(nir
, nir_inline_functions
);
2681 NIR_PASS_V(nir
, nir_opt_deref
);
2683 /* Do some optimizations to clean up the shader now. By optimizing the
2684 * functions in the library, we avoid having to re-do that work every
2685 * time we inline a copy of a function. Reducing basic blocks also helps
2686 * with compile times.
2688 NIR_PASS_V(nir
, nir_lower_vars_to_ssa
);
2689 NIR_PASS_V(nir
, nir_copy_prop
);
2690 NIR_PASS_V(nir
, nir_opt_dce
);
2691 NIR_PASS_V(nir
, nir_opt_cse
);
2692 NIR_PASS_V(nir
, nir_opt_gcm
, true);
2693 NIR_PASS_V(nir
, nir_opt_peephole_select
, 1, false, false);
2694 NIR_PASS_V(nir
, nir_opt_dce
);