2 * Copyright © 2019 Google, Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file lower_precision.cpp
28 #include "main/macros.h"
29 #include "main/mtypes.h"
30 #include "compiler/glsl_types.h"
32 #include "ir_builder.h"
33 #include "ir_optimization.h"
34 #include "ir_rvalue_visitor.h"
35 #include "util/half_float.h"
37 #include "util/hash_table.h"
42 class find_precision_visitor
: public ir_rvalue_enter_visitor
{
44 find_precision_visitor(const struct gl_shader_compiler_options
*options
);
45 ~find_precision_visitor();
47 virtual void handle_rvalue(ir_rvalue
**rvalue
);
48 virtual ir_visitor_status
visit_enter(ir_call
*ir
);
50 ir_function_signature
*map_builtin(ir_function_signature
*sig
);
52 /* Set of rvalues that can be lowered. This will be filled in by
53 * find_lowerable_rvalues_visitor. Only the root node of a lowerable section
54 * will be added to this set.
56 struct set
*lowerable_rvalues
;
59 * A mapping of builtin signature functions to lowered versions. This is
60 * filled in lazily when a lowered version is needed.
62 struct hash_table
*lowered_builtins
;
64 * A temporary hash table only used in order to clone functions.
66 struct hash_table
*clone_ht
;
68 void *lowered_builtin_mem_ctx
;
70 const struct gl_shader_compiler_options
*options
;
73 class find_lowerable_rvalues_visitor
: public ir_hierarchical_visitor
{
75 enum can_lower_state
{
81 enum parent_relation
{
82 /* The parent performs a further operation involving the result from the
83 * child and can be lowered along with it.
86 /* The parent instruction’s operation is independent of the child type so
87 * the child should be lowered separately.
89 INDEPENDENT_OPERATION
,
93 ir_instruction
*instr
;
94 enum can_lower_state state
;
95 /* List of child rvalues that can be lowered. When this stack entry is
96 * popped, if this node itself can’t be lowered than all of the children
97 * are root nodes to lower so we will add them to lowerable_rvalues.
98 * Otherwise if this node can also be lowered then we won’t add the
99 * children because we only want to add the topmost lowerable nodes to
100 * lowerable_rvalues and the children will be lowered as part of lowering
103 std::vector
<ir_instruction
*> lowerable_children
;
106 find_lowerable_rvalues_visitor(struct set
*result
,
107 const struct gl_shader_compiler_options
*options
);
109 static void stack_enter(class ir_instruction
*ir
, void *data
);
110 static void stack_leave(class ir_instruction
*ir
, void *data
);
112 virtual ir_visitor_status
visit(ir_constant
*ir
);
113 virtual ir_visitor_status
visit(ir_dereference_variable
*ir
);
115 virtual ir_visitor_status
visit_enter(ir_dereference_record
*ir
);
116 virtual ir_visitor_status
visit_enter(ir_dereference_array
*ir
);
117 virtual ir_visitor_status
visit_enter(ir_texture
*ir
);
118 virtual ir_visitor_status
visit_enter(ir_expression
*ir
);
120 virtual ir_visitor_status
visit_leave(ir_assignment
*ir
);
121 virtual ir_visitor_status
visit_leave(ir_call
*ir
);
123 can_lower_state
handle_precision(const glsl_type
*type
,
124 int precision
) const;
126 static parent_relation
get_parent_relation(ir_instruction
*parent
,
127 ir_instruction
*child
);
129 std::vector
<stack_entry
> stack
;
130 struct set
*lowerable_rvalues
;
131 const struct gl_shader_compiler_options
*options
;
133 void pop_stack_entry();
134 void add_lowerable_children(const stack_entry
&entry
);
137 class lower_precision_visitor
: public ir_rvalue_visitor
{
139 virtual void handle_rvalue(ir_rvalue
**rvalue
);
140 virtual ir_visitor_status
visit_enter(ir_dereference_array
*);
141 virtual ir_visitor_status
visit_enter(ir_dereference_record
*);
142 virtual ir_visitor_status
visit_enter(ir_call
*ir
);
143 virtual ir_visitor_status
visit_enter(ir_texture
*ir
);
144 virtual ir_visitor_status
visit_leave(ir_expression
*);
148 can_lower_type(const struct gl_shader_compiler_options
*options
,
149 const glsl_type
*type
)
151 /* Don’t lower any expressions involving non-float types except bool and
152 * texture samplers. This will rule out operations that change the type such
153 * as conversion to ints. Instead it will end up lowering the arguments
154 * instead and adding a final conversion to float32. We want to handle
155 * boolean types so that it will do comparisons as 16-bit.
158 switch (type
->without_array()->base_type
) {
159 /* TODO: should we do anything for these two with regard to Int16 vs FP16
163 case GLSL_TYPE_SAMPLER
:
164 case GLSL_TYPE_IMAGE
:
167 case GLSL_TYPE_FLOAT
:
168 return options
->LowerPrecisionFloat16
;
172 return options
->LowerPrecisionInt16
;
179 find_lowerable_rvalues_visitor::find_lowerable_rvalues_visitor(struct set
*res
,
180 const struct gl_shader_compiler_options
*opts
)
182 lowerable_rvalues
= res
;
184 callback_enter
= stack_enter
;
185 callback_leave
= stack_leave
;
191 find_lowerable_rvalues_visitor::stack_enter(class ir_instruction
*ir
,
194 find_lowerable_rvalues_visitor
*state
=
195 (find_lowerable_rvalues_visitor
*) data
;
197 /* Add a new stack entry for this instruction */
201 entry
.state
= state
->in_assignee
? CANT_LOWER
: UNKNOWN
;
203 state
->stack
.push_back(entry
);
207 find_lowerable_rvalues_visitor::add_lowerable_children(const stack_entry
&entry
)
209 /* We can’t lower this node so if there were any pending children then they
210 * are all root lowerable nodes and we should add them to the set.
212 for (auto &it
: entry
.lowerable_children
)
213 _mesa_set_add(lowerable_rvalues
, it
);
217 find_lowerable_rvalues_visitor::pop_stack_entry()
219 const stack_entry
&entry
= stack
.back();
221 if (stack
.size() >= 2) {
222 /* Combine this state into the parent state, unless the parent operation
223 * doesn’t have any relation to the child operations
225 stack_entry
&parent
= stack
.end()[-2];
226 parent_relation rel
= get_parent_relation(parent
.instr
, entry
.instr
);
228 if (rel
== COMBINED_OPERATION
) {
229 switch (entry
.state
) {
231 parent
.state
= CANT_LOWER
;
234 if (parent
.state
== UNKNOWN
)
235 parent
.state
= SHOULD_LOWER
;
243 if (entry
.state
== SHOULD_LOWER
) {
244 ir_rvalue
*rv
= entry
.instr
->as_rvalue();
247 add_lowerable_children(entry
);
248 } else if (stack
.size() >= 2) {
249 stack_entry
&parent
= stack
.end()[-2];
251 switch (get_parent_relation(parent
.instr
, rv
)) {
252 case COMBINED_OPERATION
:
253 /* We only want to add the toplevel lowerable instructions to the
254 * lowerable set. Therefore if there is a parent then instead of
255 * adding this instruction to the set we will queue depending on
256 * the result of the parent instruction.
258 parent
.lowerable_children
.push_back(entry
.instr
);
260 case INDEPENDENT_OPERATION
:
261 _mesa_set_add(lowerable_rvalues
, rv
);
265 /* This is a toplevel node so add it directly to the lowerable
268 _mesa_set_add(lowerable_rvalues
, rv
);
270 } else if (entry
.state
== CANT_LOWER
) {
271 add_lowerable_children(entry
);
278 find_lowerable_rvalues_visitor::stack_leave(class ir_instruction
*ir
,
281 find_lowerable_rvalues_visitor
*state
=
282 (find_lowerable_rvalues_visitor
*) data
;
284 state
->pop_stack_entry();
287 enum find_lowerable_rvalues_visitor::can_lower_state
288 find_lowerable_rvalues_visitor::handle_precision(const glsl_type
*type
,
291 if (!can_lower_type(options
, type
))
295 case GLSL_PRECISION_NONE
:
297 case GLSL_PRECISION_HIGH
:
299 case GLSL_PRECISION_MEDIUM
:
300 case GLSL_PRECISION_LOW
:
307 enum find_lowerable_rvalues_visitor::parent_relation
308 find_lowerable_rvalues_visitor::get_parent_relation(ir_instruction
*parent
,
309 ir_instruction
*child
)
311 /* If the parent is a dereference instruction then the only child could be
312 * for example an array dereference and that should be lowered independently
315 if (parent
->as_dereference())
316 return INDEPENDENT_OPERATION
;
318 /* The precision of texture sampling depend on the precision of the sampler.
319 * The rest of the arguments don’t matter so we can treat it as an
320 * independent operation.
322 if (parent
->as_texture())
323 return INDEPENDENT_OPERATION
;
325 return COMBINED_OPERATION
;
329 find_lowerable_rvalues_visitor::visit(ir_constant
*ir
)
331 stack_enter(ir
, this);
333 if (!can_lower_type(options
, ir
->type
))
334 stack
.back().state
= CANT_LOWER
;
336 stack_leave(ir
, this);
338 return visit_continue
;
342 find_lowerable_rvalues_visitor::visit(ir_dereference_variable
*ir
)
344 stack_enter(ir
, this);
346 if (stack
.back().state
== UNKNOWN
)
347 stack
.back().state
= handle_precision(ir
->type
, ir
->precision());
349 stack_leave(ir
, this);
351 return visit_continue
;
355 find_lowerable_rvalues_visitor::visit_enter(ir_dereference_record
*ir
)
357 ir_hierarchical_visitor::visit_enter(ir
);
359 if (stack
.back().state
== UNKNOWN
)
360 stack
.back().state
= handle_precision(ir
->type
, ir
->precision());
362 return visit_continue
;
366 find_lowerable_rvalues_visitor::visit_enter(ir_dereference_array
*ir
)
368 ir_hierarchical_visitor::visit_enter(ir
);
370 if (stack
.back().state
== UNKNOWN
)
371 stack
.back().state
= handle_precision(ir
->type
, ir
->precision());
373 return visit_continue
;
377 find_lowerable_rvalues_visitor::visit_enter(ir_texture
*ir
)
379 ir_hierarchical_visitor::visit_enter(ir
);
381 /* The precision of the sample value depends on the precision of the
384 stack
.back().state
= handle_precision(ir
->type
,
385 ir
->sampler
->precision());
386 return visit_continue
;
390 find_lowerable_rvalues_visitor::visit_enter(ir_expression
*ir
)
392 ir_hierarchical_visitor::visit_enter(ir
);
394 if (!can_lower_type(options
, ir
->type
))
395 stack
.back().state
= CANT_LOWER
;
397 /* Don't lower precision for derivative calculations */
398 if (!options
->LowerPrecisionDerivatives
&&
399 (ir
->operation
== ir_unop_dFdx
||
400 ir
->operation
== ir_unop_dFdx_coarse
||
401 ir
->operation
== ir_unop_dFdx_fine
||
402 ir
->operation
== ir_unop_dFdy
||
403 ir
->operation
== ir_unop_dFdy_coarse
||
404 ir
->operation
== ir_unop_dFdy_fine
)) {
405 stack
.back().state
= CANT_LOWER
;
408 return visit_continue
;
412 function_always_returns_mediump_or_lowp(const char *name
)
414 return !strcmp(name
, "bitCount") ||
415 !strcmp(name
, "findLSB") ||
416 !strcmp(name
, "findMSB") ||
417 !strcmp(name
, "unpackHalf2x16") ||
418 !strcmp(name
, "unpackUnorm4x8") ||
419 !strcmp(name
, "unpackSnorm4x8");
423 is_lowerable_builtin(ir_call
*ir
,
424 const struct set
*lowerable_rvalues
)
426 /* The intrinsic call is inside the wrapper imageLoad function that will
427 * be inlined. We have to handle both of them.
429 if (ir
->callee
->intrinsic_id
== ir_intrinsic_image_load
||
430 (ir
->callee
->is_builtin() &&
431 !strcmp(ir
->callee_name(), "imageLoad"))) {
432 ir_rvalue
*param
= (ir_rvalue
*)ir
->actual_parameters
.get_head();
433 ir_variable
*resource
= param
->variable_referenced();
435 assert(ir
->callee
->return_precision
== GLSL_PRECISION_NONE
);
436 assert(resource
->type
->without_array()->is_image());
438 /* GLSL ES 3.20 requires that images have a precision modifier, but if
439 * you set one, it doesn't do anything, because all intrinsics are
440 * defined with highp. This seems to be a spec bug.
442 * In theory we could set the return value to mediump if the image
443 * format has a lower precision. This appears to be the most sensible
446 const struct util_format_description
*desc
=
447 util_format_description(resource
->data
.image_format
);
449 util_format_get_first_non_void_channel(resource
->data
.image_format
);
452 if (desc
->channel
[i
].pure_integer
||
453 desc
->channel
[i
].type
== UTIL_FORMAT_TYPE_FLOAT
)
454 return desc
->channel
[i
].size
<= 16;
456 return desc
->channel
[i
].size
<= 10; /* unorm/snorm */
459 /* Handle special calls. */
460 if (ir
->callee
->is_builtin() && ir
->actual_parameters
.length()) {
461 ir_rvalue
*param
= (ir_rvalue
*)ir
->actual_parameters
.get_head();
462 ir_variable
*var
= param
->variable_referenced();
464 /* Handle builtin wrappers around ir_texture opcodes. These wrappers will
465 * be inlined by lower_precision() if we return true here, so that we can
466 * get to ir_texture later and do proper lowering.
468 * We should lower the type of the return value if the sampler type
469 * uses lower precision. The function parameters don't matter.
471 if (var
&& var
->type
->without_array()->is_sampler()) {
472 /* textureSize always returns highp. */
473 if (!strcmp(ir
->callee_name(), "textureSize"))
476 return var
->data
.precision
== GLSL_PRECISION_MEDIUM
||
477 var
->data
.precision
== GLSL_PRECISION_LOW
;
481 if (!ir
->callee
->is_builtin() ||
482 /* Parameters are always highp: */
483 !strcmp(ir
->callee_name(), "floatBitsToInt") ||
484 !strcmp(ir
->callee_name(), "floatBitsToUint") ||
485 !strcmp(ir
->callee_name(), "intBitsToFloat") ||
486 !strcmp(ir
->callee_name(), "uintBitsToFloat") ||
487 !strcmp(ir
->callee_name(), "bitfieldReverse") ||
488 !strcmp(ir
->callee_name(), "frexp") ||
489 !strcmp(ir
->callee_name(), "ldexp") ||
490 /* Parameters and outputs are always highp: */
491 /* TODO: The operations are highp, but carry and borrow outputs are lowp. */
492 !strcmp(ir
->callee_name(), "uaddCarry") ||
493 !strcmp(ir
->callee_name(), "usubBorrow") ||
494 !strcmp(ir
->callee_name(), "imulExtended") ||
495 !strcmp(ir
->callee_name(), "umulExtended") ||
496 !strcmp(ir
->callee_name(), "unpackUnorm2x16") ||
497 !strcmp(ir
->callee_name(), "unpackSnorm2x16") ||
498 /* Outputs are highp: */
499 !strcmp(ir
->callee_name(), "packUnorm2x16") ||
500 !strcmp(ir
->callee_name(), "packSnorm2x16") ||
501 /* Parameters are mediump and outputs are highp. The parameters should
502 * be optimized in NIR, not here, e.g:
503 * - packHalf2x16 can just be a bitcast from f16vec2 to uint32
504 * - Other opcodes don't have to convert parameters to highp if the hw
505 * has f16 versions. Optimize in NIR accordingly.
507 !strcmp(ir
->callee_name(), "packHalf2x16") ||
508 !strcmp(ir
->callee_name(), "packUnorm4x8") ||
509 !strcmp(ir
->callee_name(), "packSnorm4x8"))
512 assert(ir
->callee
->return_precision
== GLSL_PRECISION_NONE
);
514 /* Number of parameters to check if they are lowerable. */
515 unsigned check_parameters
= ir
->actual_parameters
.length();
517 /* Interpolation functions only consider the precision of the interpolant. */
518 /* Bitfield functions ignore the precision of "offset" and "bits". */
519 if (!strcmp(ir
->callee_name(), "interpolateAtOffset") ||
520 !strcmp(ir
->callee_name(), "interpolateAtSample") ||
521 !strcmp(ir
->callee_name(), "bitfieldExtract")) {
522 check_parameters
= 1;
523 } else if (!strcmp(ir
->callee_name(), "bitfieldInsert")) {
524 check_parameters
= 2;
525 } if (function_always_returns_mediump_or_lowp(ir
->callee_name())) {
526 /* These only lower the return value. Parameters keep their precision,
527 * which is preserved in map_builtin.
529 check_parameters
= 0;
532 foreach_in_list(ir_rvalue
, param
, &ir
->actual_parameters
) {
533 if (!check_parameters
)
536 if (!param
->as_constant() &&
537 _mesa_set_search(lowerable_rvalues
, param
) == NULL
)
547 find_lowerable_rvalues_visitor::visit_leave(ir_call
*ir
)
549 ir_hierarchical_visitor::visit_leave(ir
);
551 /* Special case for handling temporary variables generated by the compiler
552 * for function calls. If we assign to one of these using a function call
553 * that has a lowerable return type then we can assume the temporary
554 * variable should have a medium precision too.
557 /* Do nothing if the return type is void. */
558 if (!ir
->return_deref
)
559 return visit_continue
;
561 ir_variable
*var
= ir
->return_deref
->variable_referenced();
563 assert(var
->data
.mode
== ir_var_temporary
);
565 unsigned return_precision
= ir
->callee
->return_precision
;
567 /* If the call is to a builtin, then the function won’t have a return
568 * precision and we should determine it from the precision of the arguments.
570 if (is_lowerable_builtin(ir
, lowerable_rvalues
))
571 return_precision
= GLSL_PRECISION_MEDIUM
;
573 can_lower_state lower_state
=
574 handle_precision(var
->type
, return_precision
);
576 if (lower_state
== SHOULD_LOWER
) {
577 /* There probably shouldn’t be any situations where multiple ir_call
578 * instructions write to the same temporary?
580 assert(var
->data
.precision
== GLSL_PRECISION_NONE
);
581 var
->data
.precision
= GLSL_PRECISION_MEDIUM
;
583 var
->data
.precision
= GLSL_PRECISION_HIGH
;
586 return visit_continue
;
590 find_lowerable_rvalues_visitor::visit_leave(ir_assignment
*ir
)
592 ir_hierarchical_visitor::visit_leave(ir
);
594 /* Special case for handling temporary variables generated by the compiler.
595 * If we assign to one of these using a lowered precision then we can assume
596 * the temporary variable should have a medium precision too.
598 ir_variable
*var
= ir
->lhs
->variable_referenced();
600 if (var
->data
.mode
== ir_var_temporary
) {
601 if (_mesa_set_search(lowerable_rvalues
, ir
->rhs
)) {
602 /* Only override the precision if this is the first assignment. For
603 * temporaries such as the ones generated for the ?: operator there
604 * can be multiple assignments with different precisions. This way we
605 * get the highest precision of all of the assignments.
607 if (var
->data
.precision
== GLSL_PRECISION_NONE
)
608 var
->data
.precision
= GLSL_PRECISION_MEDIUM
;
609 } else if (!ir
->rhs
->as_constant()) {
610 var
->data
.precision
= GLSL_PRECISION_HIGH
;
614 return visit_continue
;
618 find_lowerable_rvalues(const struct gl_shader_compiler_options
*options
,
619 exec_list
*instructions
,
622 find_lowerable_rvalues_visitor
v(result
, options
);
624 visit_list_elements(&v
, instructions
);
626 assert(v
.stack
.empty());
629 static const glsl_type
*
630 convert_type(bool up
, const glsl_type
*type
)
632 if (type
->is_array()) {
633 return glsl_type::get_array_instance(convert_type(up
, type
->fields
.array
),
635 type
->explicit_stride
);
638 glsl_base_type new_base_type
;
641 switch (type
->base_type
) {
642 case GLSL_TYPE_FLOAT16
:
643 new_base_type
= GLSL_TYPE_FLOAT
;
645 case GLSL_TYPE_INT16
:
646 new_base_type
= GLSL_TYPE_INT
;
648 case GLSL_TYPE_UINT16
:
649 new_base_type
= GLSL_TYPE_UINT
;
652 unreachable("invalid type");
656 switch (type
->base_type
) {
657 case GLSL_TYPE_FLOAT
:
658 new_base_type
= GLSL_TYPE_FLOAT16
;
661 new_base_type
= GLSL_TYPE_INT16
;
664 new_base_type
= GLSL_TYPE_UINT16
;
667 unreachable("invalid type");
672 return glsl_type::get_instance(new_base_type
,
673 type
->vector_elements
,
674 type
->matrix_columns
,
675 type
->explicit_stride
,
676 type
->interface_row_major
);
679 static const glsl_type
*
680 lower_glsl_type(const glsl_type
*type
)
682 return convert_type(false, type
);
686 convert_precision(bool up
, ir_rvalue
*ir
)
691 switch (ir
->type
->without_array()->base_type
) {
692 case GLSL_TYPE_FLOAT16
:
695 case GLSL_TYPE_INT16
:
698 case GLSL_TYPE_UINT16
:
702 unreachable("invalid type");
706 switch (ir
->type
->without_array()->base_type
) {
707 case GLSL_TYPE_FLOAT
:
717 unreachable("invalid type");
722 const glsl_type
*desired_type
= convert_type(up
, ir
->type
);
723 void *mem_ctx
= ralloc_parent(ir
);
724 return new(mem_ctx
) ir_expression(op
, desired_type
, ir
, NULL
);
728 lower_precision_visitor::handle_rvalue(ir_rvalue
**rvalue
)
730 ir_rvalue
*ir
= *rvalue
;
735 if (ir
->as_dereference()) {
736 if (!ir
->type
->is_boolean())
737 *rvalue
= convert_precision(false, ir
);
738 } else if (ir
->type
->is_32bit()) {
739 ir
->type
= lower_glsl_type(ir
->type
);
741 ir_constant
*const_ir
= ir
->as_constant();
744 ir_constant_data value
;
746 if (ir
->type
->base_type
== GLSL_TYPE_FLOAT16
) {
747 for (unsigned i
= 0; i
< ARRAY_SIZE(value
.f16
); i
++)
748 value
.f16
[i
] = _mesa_float_to_half(const_ir
->value
.f
[i
]);
749 } else if (ir
->type
->base_type
== GLSL_TYPE_INT16
) {
750 for (unsigned i
= 0; i
< ARRAY_SIZE(value
.i16
); i
++)
751 value
.i16
[i
] = const_ir
->value
.i
[i
];
752 } else if (ir
->type
->base_type
== GLSL_TYPE_UINT16
) {
753 for (unsigned i
= 0; i
< ARRAY_SIZE(value
.u16
); i
++)
754 value
.u16
[i
] = const_ir
->value
.u
[i
];
756 unreachable("invalid type");
759 const_ir
->value
= value
;
765 lower_precision_visitor::visit_enter(ir_dereference_record
*ir
)
767 /* We don’t want to lower the variable */
768 return visit_continue_with_parent
;
772 lower_precision_visitor::visit_enter(ir_dereference_array
*ir
)
774 /* We don’t want to convert the array index or the variable. If the array
775 * index itself is lowerable that will be handled separately.
777 return visit_continue_with_parent
;
781 lower_precision_visitor::visit_enter(ir_call
*ir
)
783 /* We don’t want to convert the arguments. These will be handled separately.
785 return visit_continue_with_parent
;
789 lower_precision_visitor::visit_enter(ir_texture
*ir
)
791 /* We don’t want to convert the arguments. These will be handled separately.
793 return visit_continue_with_parent
;
797 lower_precision_visitor::visit_leave(ir_expression
*ir
)
799 ir_rvalue_visitor::visit_leave(ir
);
801 /* If the expression is a conversion operation to or from bool then fix the
804 switch (ir
->operation
) {
806 ir
->operation
= ir_unop_b2f16
;
809 ir
->operation
= ir_unop_f162b
;
813 /* Nothing to do - they both support int16. */
819 return visit_continue
;
823 find_precision_visitor::handle_rvalue(ir_rvalue
**rvalue
)
825 /* Checking the precision of rvalue can be lowered first throughout
826 * find_lowerable_rvalues_visitor.
827 * Once it found the precision of rvalue can be lowered, then we can
828 * add conversion f2fmp, etc. through lower_precision_visitor.
833 struct set_entry
*entry
= _mesa_set_search(lowerable_rvalues
, *rvalue
);
838 _mesa_set_remove(lowerable_rvalues
, entry
);
840 /* If the entire expression is just a variable dereference then trying to
841 * lower it will just directly add pointless to and from conversions without
842 * any actual operation in-between. Although these will eventually get
843 * optimised out, avoiding generating them here also avoids breaking inout
844 * parameters to functions.
846 if ((*rvalue
)->as_dereference())
849 lower_precision_visitor v
;
851 (*rvalue
)->accept(&v
);
852 v
.handle_rvalue(rvalue
);
854 /* We don’t need to add the final conversion if the final type has been
857 if ((*rvalue
)->type
->base_type
!= GLSL_TYPE_BOOL
) {
858 *rvalue
= convert_precision(true, *rvalue
);
863 find_precision_visitor::visit_enter(ir_call
*ir
)
865 ir_rvalue_enter_visitor::visit_enter(ir
);
867 ir_variable
*return_var
=
868 ir
->return_deref
? ir
->return_deref
->variable_referenced() : NULL
;
870 /* Don't do anything for image_load here. We have only changed the return
871 * value to mediump/lowp, so that following instructions can use reduced
874 * The return value type of the intrinsic itself isn't changed here, but
875 * can be changed in NIR if all users use the *2*mp opcode.
877 if (ir
->callee
->intrinsic_id
== ir_intrinsic_image_load
)
878 return visit_continue
;
880 /* If this is a call to a builtin and the find_lowerable_rvalues_visitor
881 * overrode the precision of the temporary return variable, then we can
882 * replace the builtin implementation with a lowered version.
885 if (!ir
->callee
->is_builtin() ||
886 ir
->callee
->is_intrinsic() ||
887 return_var
== NULL
||
888 (return_var
->data
.precision
!= GLSL_PRECISION_MEDIUM
&&
889 return_var
->data
.precision
!= GLSL_PRECISION_LOW
))
890 return visit_continue
;
892 ir
->callee
= map_builtin(ir
->callee
);
893 ir
->generate_inline(ir
);
896 return visit_continue_with_parent
;
899 ir_function_signature
*
900 find_precision_visitor::map_builtin(ir_function_signature
*sig
)
902 if (lowered_builtins
== NULL
) {
903 lowered_builtins
= _mesa_pointer_hash_table_create(NULL
);
904 clone_ht
=_mesa_pointer_hash_table_create(NULL
);
905 lowered_builtin_mem_ctx
= ralloc_context(NULL
);
907 struct hash_entry
*entry
= _mesa_hash_table_search(lowered_builtins
, sig
);
909 return (ir_function_signature
*) entry
->data
;
912 ir_function_signature
*lowered_sig
=
913 sig
->clone(lowered_builtin_mem_ctx
, clone_ht
);
915 /* Functions that always return mediump or lowp should keep their
916 * parameters intact, because they can be highp. NIR can lower
917 * the up-conversion for parameters if needed.
919 if (!function_always_returns_mediump_or_lowp(sig
->function_name())) {
920 foreach_in_list(ir_variable
, param
, &lowered_sig
->parameters
) {
921 param
->data
.precision
= GLSL_PRECISION_MEDIUM
;
925 lower_precision(options
, &lowered_sig
->body
);
927 _mesa_hash_table_clear(clone_ht
, NULL
);
929 _mesa_hash_table_insert(lowered_builtins
, sig
, lowered_sig
);
934 find_precision_visitor::find_precision_visitor(const struct gl_shader_compiler_options
*options
)
935 : lowerable_rvalues(_mesa_pointer_set_create(NULL
)),
936 lowered_builtins(NULL
),
938 lowered_builtin_mem_ctx(NULL
),
943 find_precision_visitor::~find_precision_visitor()
945 _mesa_set_destroy(lowerable_rvalues
, NULL
);
947 if (lowered_builtins
) {
948 _mesa_hash_table_destroy(lowered_builtins
, NULL
);
949 _mesa_hash_table_destroy(clone_ht
, NULL
);
950 ralloc_free(lowered_builtin_mem_ctx
);
954 /* Lowering opcodes to 16 bits is not enough for programs with control flow
955 * (and the ?: operator, which is represented by if-then-else in the IR),
956 * because temporary variables, which are used for passing values between
957 * code blocks, are not lowered, resulting in 32-bit phis in NIR.
959 * First change the variable types to 16 bits, then change all ir_dereference
962 class lower_variables_visitor
: public ir_rvalue_enter_visitor
{
964 lower_variables_visitor(const struct gl_shader_compiler_options
*options
)
966 lower_vars
= _mesa_pointer_set_create(NULL
);
969 virtual ~lower_variables_visitor()
971 _mesa_set_destroy(lower_vars
, NULL
);
974 virtual ir_visitor_status
visit(ir_variable
*var
);
975 virtual ir_visitor_status
visit_enter(ir_assignment
*ir
);
976 virtual ir_visitor_status
visit_enter(ir_return
*ir
);
977 virtual ir_visitor_status
visit_enter(ir_call
*ir
);
978 virtual void handle_rvalue(ir_rvalue
**rvalue
);
980 void fix_types_in_deref_chain(ir_dereference
*ir
);
981 void convert_split_assignment(ir_dereference
*lhs
, ir_rvalue
*rhs
,
984 const struct gl_shader_compiler_options
*options
;
989 lower_constant(ir_constant
*ir
)
991 if (ir
->type
->is_array()) {
992 for (int i
= 0; i
< ir
->type
->array_size(); i
++)
993 lower_constant(ir
->get_array_element(i
));
995 ir
->type
= lower_glsl_type(ir
->type
);
999 ir
->type
= lower_glsl_type(ir
->type
);
1000 ir_constant_data value
;
1002 if (ir
->type
->base_type
== GLSL_TYPE_FLOAT16
) {
1003 for (unsigned i
= 0; i
< ARRAY_SIZE(value
.f16
); i
++)
1004 value
.f16
[i
] = _mesa_float_to_half(ir
->value
.f
[i
]);
1005 } else if (ir
->type
->base_type
== GLSL_TYPE_INT16
) {
1006 for (unsigned i
= 0; i
< ARRAY_SIZE(value
.i16
); i
++)
1007 value
.i16
[i
] = ir
->value
.i
[i
];
1008 } else if (ir
->type
->base_type
== GLSL_TYPE_UINT16
) {
1009 for (unsigned i
= 0; i
< ARRAY_SIZE(value
.u16
); i
++)
1010 value
.u16
[i
] = ir
->value
.u
[i
];
1012 unreachable("invalid type");
1019 lower_variables_visitor::visit(ir_variable
*var
)
1021 if ((var
->data
.mode
!= ir_var_temporary
&&
1022 var
->data
.mode
!= ir_var_auto
) ||
1023 !var
->type
->without_array()->is_32bit() ||
1024 (var
->data
.precision
!= GLSL_PRECISION_MEDIUM
&&
1025 var
->data
.precision
!= GLSL_PRECISION_LOW
) ||
1026 !can_lower_type(options
, var
->type
))
1027 return visit_continue
;
1029 /* Lower constant initializers. */
1030 if (var
->constant_value
&&
1031 var
->type
== var
->constant_value
->type
) {
1032 if (!options
->LowerPrecisionConstants
)
1033 return visit_continue
;
1034 var
->constant_value
=
1035 var
->constant_value
->clone(ralloc_parent(var
), NULL
);
1036 lower_constant(var
->constant_value
);
1039 if (var
->constant_initializer
&&
1040 var
->type
== var
->constant_initializer
->type
) {
1041 if (!options
->LowerPrecisionConstants
)
1042 return visit_continue
;
1043 var
->constant_initializer
=
1044 var
->constant_initializer
->clone(ralloc_parent(var
), NULL
);
1045 lower_constant(var
->constant_initializer
);
1048 var
->type
= lower_glsl_type(var
->type
);
1049 _mesa_set_add(lower_vars
, var
);
1051 return visit_continue
;
1055 lower_variables_visitor::fix_types_in_deref_chain(ir_dereference
*ir
)
1057 assert(ir
->type
->without_array()->is_32bit());
1058 assert(_mesa_set_search(lower_vars
, ir
->variable_referenced()));
1060 /* Fix the type in the dereference node. */
1061 ir
->type
= lower_glsl_type(ir
->type
);
1063 /* If it's an array, fix the types in the whole dereference chain. */
1064 for (ir_dereference_array
*deref_array
= ir
->as_dereference_array();
1066 deref_array
= deref_array
->array
->as_dereference_array()) {
1067 assert(deref_array
->array
->type
->without_array()->is_32bit());
1068 deref_array
->array
->type
= lower_glsl_type(deref_array
->array
->type
);
1073 lower_variables_visitor::convert_split_assignment(ir_dereference
*lhs
,
1077 void *mem_ctx
= ralloc_parent(lhs
);
1079 if (lhs
->type
->is_array()) {
1080 for (unsigned i
= 0; i
< lhs
->type
->length
; i
++) {
1081 ir_dereference
*l
, *r
;
1083 l
= new(mem_ctx
) ir_dereference_array(lhs
->clone(mem_ctx
, NULL
),
1084 new(mem_ctx
) ir_constant(i
));
1085 r
= new(mem_ctx
) ir_dereference_array(rhs
->clone(mem_ctx
, NULL
),
1086 new(mem_ctx
) ir_constant(i
));
1087 convert_split_assignment(l
, r
, insert_before
);
1092 assert(lhs
->type
->is_16bit() || lhs
->type
->is_32bit());
1093 assert(rhs
->type
->is_16bit() || rhs
->type
->is_32bit());
1094 assert(lhs
->type
->is_16bit() != rhs
->type
->is_16bit());
1096 ir_assignment
*assign
=
1097 new(mem_ctx
) ir_assignment(lhs
, convert_precision(lhs
->type
->is_32bit(), rhs
));
1100 base_ir
->insert_before(assign
);
1102 base_ir
->insert_after(assign
);
1106 lower_variables_visitor::visit_enter(ir_assignment
*ir
)
1108 ir_dereference
*lhs
= ir
->lhs
;
1109 ir_variable
*var
= lhs
->variable_referenced();
1110 ir_dereference
*rhs_deref
= ir
->rhs
->as_dereference();
1111 ir_variable
*rhs_var
= rhs_deref
? rhs_deref
->variable_referenced() : NULL
;
1112 ir_constant
*rhs_const
= ir
->rhs
->as_constant();
1114 /* Legalize array assignments between lowered and non-lowered variables. */
1115 if (lhs
->type
->is_array() &&
1116 (rhs_var
|| rhs_const
) &&
1119 var
->type
->without_array()->is_16bit() !=
1120 rhs_var
->type
->without_array()->is_16bit())) &&
1123 var
->type
->without_array()->is_16bit() &&
1124 rhs_const
->type
->without_array()->is_32bit()))) {
1125 assert(ir
->rhs
->type
->is_array());
1127 /* Fix array assignments from lowered to non-lowered. */
1128 if (rhs_var
&& _mesa_set_search(lower_vars
, rhs_var
)) {
1129 fix_types_in_deref_chain(rhs_deref
);
1130 /* Convert to 32 bits for LHS. */
1131 convert_split_assignment(lhs
, rhs_deref
, true);
1133 return visit_continue
;
1136 /* Fix array assignments from non-lowered to lowered. */
1138 _mesa_set_search(lower_vars
, var
) &&
1139 ir
->rhs
->type
->without_array()->is_32bit()) {
1140 fix_types_in_deref_chain(lhs
);
1141 /* Convert to 16 bits for LHS. */
1142 convert_split_assignment(lhs
, ir
->rhs
, true);
1144 return visit_continue
;
1148 /* Fix assignment types. */
1150 _mesa_set_search(lower_vars
, var
)) {
1151 /* Fix the LHS type. */
1152 if (lhs
->type
->without_array()->is_32bit())
1153 fix_types_in_deref_chain(lhs
);
1155 /* Fix the RHS type if it's a lowered variable. */
1157 _mesa_set_search(lower_vars
, rhs_var
) &&
1158 rhs_deref
->type
->without_array()->is_32bit())
1159 fix_types_in_deref_chain(rhs_deref
);
1161 /* Fix the RHS type if it's a non-array expression. */
1162 if (ir
->rhs
->type
->is_32bit()) {
1163 ir_expression
*expr
= ir
->rhs
->as_expression();
1165 /* Convert the RHS to the LHS type. */
1167 (expr
->operation
== ir_unop_f162f
||
1168 expr
->operation
== ir_unop_i2i
||
1169 expr
->operation
== ir_unop_u2u
) &&
1170 expr
->operands
[0]->type
->is_16bit()) {
1171 /* If there is an "up" conversion, just remove it.
1172 * This is optional. We could as well execute the else statement and
1173 * let NIR eliminate the up+down conversions.
1175 ir
->rhs
= expr
->operands
[0];
1177 /* Add a "down" conversion operation to fix the type of RHS. */
1178 ir
->rhs
= convert_precision(false, ir
->rhs
);
1183 return ir_rvalue_enter_visitor::visit_enter(ir
);
1187 lower_variables_visitor::visit_enter(ir_return
*ir
)
1189 void *mem_ctx
= ralloc_parent(ir
);
1191 ir_dereference
*deref
= ir
->value
? ir
->value
->as_dereference() : NULL
;
1193 ir_variable
*var
= deref
->variable_referenced();
1195 /* Fix the type of the return value. */
1197 _mesa_set_search(lower_vars
, var
) &&
1198 deref
->type
->without_array()->is_32bit()) {
1199 /* Create a 32-bit temporary variable. */
1200 ir_variable
*new_var
=
1201 new(mem_ctx
) ir_variable(deref
->type
, "lowerp", ir_var_temporary
);
1202 base_ir
->insert_before(new_var
);
1204 /* Fix types in dereferences. */
1205 fix_types_in_deref_chain(deref
);
1207 /* Convert to 32 bits for the return value. */
1208 convert_split_assignment(new(mem_ctx
) ir_dereference_variable(new_var
),
1210 ir
->value
= new(mem_ctx
) ir_dereference_variable(new_var
);
1214 return ir_rvalue_enter_visitor::visit_enter(ir
);
1217 void lower_variables_visitor::handle_rvalue(ir_rvalue
**rvalue
)
1219 ir_rvalue
*ir
= *rvalue
;
1221 if (in_assignee
|| ir
== NULL
)
1224 ir_expression
*expr
= ir
->as_expression();
1225 ir_dereference
*expr_op0_deref
= expr
? expr
->operands
[0]->as_dereference() : NULL
;
1227 /* Remove f2fmp(float16). Same for int16 and uint16. */
1230 (expr
->operation
== ir_unop_f2fmp
||
1231 expr
->operation
== ir_unop_i2imp
||
1232 expr
->operation
== ir_unop_u2ump
||
1233 expr
->operation
== ir_unop_f2f16
||
1234 expr
->operation
== ir_unop_i2i
||
1235 expr
->operation
== ir_unop_u2u
) &&
1236 expr
->type
->without_array()->is_16bit() &&
1237 expr_op0_deref
->type
->without_array()->is_32bit() &&
1238 expr_op0_deref
->variable_referenced() &&
1239 _mesa_set_search(lower_vars
, expr_op0_deref
->variable_referenced())) {
1240 fix_types_in_deref_chain(expr_op0_deref
);
1242 /* Remove f2fmp/i2imp/u2ump. */
1243 *rvalue
= expr_op0_deref
;
1247 ir_dereference
*deref
= ir
->as_dereference();
1250 ir_variable
*var
= deref
->variable_referenced();
1252 /* var can be NULL if we are dereferencing ir_constant. */
1254 _mesa_set_search(lower_vars
, var
) &&
1255 deref
->type
->without_array()->is_32bit()) {
1256 fix_types_in_deref_chain(deref
);
1258 /* Then convert the type up. Optimizations should eliminate this. */
1259 *rvalue
= convert_precision(true, deref
);
1265 lower_variables_visitor::visit_enter(ir_call
*ir
)
1267 void *mem_ctx
= ralloc_parent(ir
);
1269 /* We can't pass 16-bit variables as 32-bit inout/out parameters. */
1270 foreach_two_lists(formal_node
, &ir
->callee
->parameters
,
1271 actual_node
, &ir
->actual_parameters
) {
1272 ir_dereference
*param_deref
=
1273 ((ir_rvalue
*)actual_node
)->as_dereference();
1274 ir_variable
*param
= (ir_variable
*)formal_node
;
1279 ir_variable
*var
= param_deref
->variable_referenced();
1281 /* var can be NULL if we are dereferencing ir_constant. */
1283 _mesa_set_search(lower_vars
, var
) &&
1284 param
->type
->without_array()->is_32bit()) {
1285 fix_types_in_deref_chain(param_deref
);
1287 /* Create a 32-bit temporary variable for the parameter. */
1288 ir_variable
*new_var
=
1289 new(mem_ctx
) ir_variable(param
->type
, "lowerp", ir_var_temporary
);
1290 base_ir
->insert_before(new_var
);
1292 /* Replace the parameter. */
1293 actual_node
->replace_with(new(mem_ctx
) ir_dereference_variable(new_var
));
1295 if (param
->data
.mode
== ir_var_function_in
||
1296 param
->data
.mode
== ir_var_function_inout
) {
1297 /* Convert to 32 bits for passing in. */
1298 convert_split_assignment(new(mem_ctx
) ir_dereference_variable(new_var
),
1299 param_deref
->clone(mem_ctx
, NULL
), true);
1301 if (param
->data
.mode
== ir_var_function_out
||
1302 param
->data
.mode
== ir_var_function_inout
) {
1303 /* Convert to 16 bits after returning. */
1304 convert_split_assignment(param_deref
,
1305 new(mem_ctx
) ir_dereference_variable(new_var
),
1311 /* Fix the type of return value dereferencies. */
1312 ir_dereference_variable
*ret_deref
= ir
->return_deref
;
1313 ir_variable
*ret_var
= ret_deref
? ret_deref
->variable_referenced() : NULL
;
1316 _mesa_set_search(lower_vars
, ret_var
) &&
1317 ret_deref
->type
->without_array()->is_32bit()) {
1318 /* Create a 32-bit temporary variable. */
1319 ir_variable
*new_var
=
1320 new(mem_ctx
) ir_variable(ir
->callee
->return_type
, "lowerp",
1322 base_ir
->insert_before(new_var
);
1324 /* Replace the return variable. */
1325 ret_deref
->var
= new_var
;
1327 /* Convert to 16 bits after returning. */
1328 convert_split_assignment(new(mem_ctx
) ir_dereference_variable(ret_var
),
1329 new(mem_ctx
) ir_dereference_variable(new_var
),
1333 return ir_rvalue_enter_visitor::visit_enter(ir
);
1339 lower_precision(const struct gl_shader_compiler_options
*options
,
1340 exec_list
*instructions
)
1342 find_precision_visitor
v(options
);
1343 find_lowerable_rvalues(options
, instructions
, v
.lowerable_rvalues
);
1344 visit_list_elements(&v
, instructions
);
1346 lower_variables_visitor
vars(options
);
1347 visit_list_elements(&vars
, instructions
);