2 * Copyright © 2019 Google, Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * \file lower_precision.cpp
28 #include "main/macros.h"
29 #include "main/mtypes.h"
30 #include "compiler/glsl_types.h"
32 #include "ir_builder.h"
33 #include "ir_optimization.h"
34 #include "ir_rvalue_visitor.h"
35 #include "util/half_float.h"
37 #include "util/hash_table.h"
42 class find_precision_visitor
: public ir_rvalue_enter_visitor
{
44 find_precision_visitor(const struct gl_shader_compiler_options
*options
);
45 ~find_precision_visitor();
47 virtual void handle_rvalue(ir_rvalue
**rvalue
);
48 virtual ir_visitor_status
visit_enter(ir_call
*ir
);
50 ir_function_signature
*map_builtin(ir_function_signature
*sig
);
54 /* Set of rvalues that can be lowered. This will be filled in by
55 * find_lowerable_rvalues_visitor. Only the root node of a lowerable section
56 * will be added to this set.
58 struct set
*lowerable_rvalues
;
61 * A mapping of builtin signature functions to lowered versions. This is
62 * filled in lazily when a lowered version is needed.
64 struct hash_table
*lowered_builtins
;
66 * A temporary hash table only used in order to clone functions.
68 struct hash_table
*clone_ht
;
70 void *lowered_builtin_mem_ctx
;
72 const struct gl_shader_compiler_options
*options
;
75 class find_lowerable_rvalues_visitor
: public ir_hierarchical_visitor
{
77 enum can_lower_state
{
83 enum parent_relation
{
84 /* The parent performs a further operation involving the result from the
85 * child and can be lowered along with it.
88 /* The parent instruction’s operation is independent of the child type so
89 * the child should be lowered separately.
91 INDEPENDENT_OPERATION
,
95 ir_instruction
*instr
;
96 enum can_lower_state state
;
97 /* List of child rvalues that can be lowered. When this stack entry is
98 * popped, if this node itself can’t be lowered than all of the children
99 * are root nodes to lower so we will add them to lowerable_rvalues.
100 * Otherwise if this node can also be lowered then we won’t add the
101 * children because we only want to add the topmost lowerable nodes to
102 * lowerable_rvalues and the children will be lowered as part of lowering
105 std::vector
<ir_instruction
*> lowerable_children
;
108 find_lowerable_rvalues_visitor(struct set
*result
,
109 const struct gl_shader_compiler_options
*options
);
110 bool can_lower_type(const glsl_type
*type
) const;
112 static void stack_enter(class ir_instruction
*ir
, void *data
);
113 static void stack_leave(class ir_instruction
*ir
, void *data
);
115 virtual ir_visitor_status
visit(ir_constant
*ir
);
116 virtual ir_visitor_status
visit(ir_dereference_variable
*ir
);
118 virtual ir_visitor_status
visit_enter(ir_dereference_record
*ir
);
119 virtual ir_visitor_status
visit_enter(ir_dereference_array
*ir
);
120 virtual ir_visitor_status
visit_enter(ir_texture
*ir
);
121 virtual ir_visitor_status
visit_enter(ir_expression
*ir
);
123 virtual ir_visitor_status
visit_leave(ir_assignment
*ir
);
124 virtual ir_visitor_status
visit_leave(ir_call
*ir
);
126 can_lower_state
handle_precision(const glsl_type
*type
,
127 int precision
) const;
129 static parent_relation
get_parent_relation(ir_instruction
*parent
,
130 ir_instruction
*child
);
132 std::vector
<stack_entry
> stack
;
133 struct set
*lowerable_rvalues
;
134 const struct gl_shader_compiler_options
*options
;
136 void pop_stack_entry();
137 void add_lowerable_children(const stack_entry
&entry
);
140 class lower_precision_visitor
: public ir_rvalue_visitor
{
142 virtual void handle_rvalue(ir_rvalue
**rvalue
);
143 virtual ir_visitor_status
visit_enter(ir_dereference_array
*);
144 virtual ir_visitor_status
visit_enter(ir_dereference_record
*);
145 virtual ir_visitor_status
visit_enter(ir_call
*ir
);
146 virtual ir_visitor_status
visit_enter(ir_texture
*ir
);
147 virtual ir_visitor_status
visit_leave(ir_expression
*);
151 find_lowerable_rvalues_visitor::can_lower_type(const glsl_type
*type
) const
153 /* Don’t lower any expressions involving non-float types except bool and
154 * texture samplers. This will rule out operations that change the type such
155 * as conversion to ints. Instead it will end up lowering the arguments
156 * instead and adding a final conversion to float32. We want to handle
157 * boolean types so that it will do comparisons as 16-bit.
160 switch (type
->base_type
) {
161 /* TODO: should we do anything for these two with regard to Int16 vs FP16
165 case GLSL_TYPE_SAMPLER
:
168 case GLSL_TYPE_FLOAT
:
169 return options
->LowerPrecisionFloat16
;
173 return options
->LowerPrecisionInt16
;
180 find_lowerable_rvalues_visitor::find_lowerable_rvalues_visitor(struct set
*res
,
181 const struct gl_shader_compiler_options
*opts
)
183 lowerable_rvalues
= res
;
185 callback_enter
= stack_enter
;
186 callback_leave
= stack_leave
;
192 find_lowerable_rvalues_visitor::stack_enter(class ir_instruction
*ir
,
195 find_lowerable_rvalues_visitor
*state
=
196 (find_lowerable_rvalues_visitor
*) data
;
198 /* Add a new stack entry for this instruction */
202 entry
.state
= state
->in_assignee
? CANT_LOWER
: UNKNOWN
;
204 state
->stack
.push_back(entry
);
208 find_lowerable_rvalues_visitor::add_lowerable_children(const stack_entry
&entry
)
210 /* We can’t lower this node so if there were any pending children then they
211 * are all root lowerable nodes and we should add them to the set.
213 for (auto &it
: entry
.lowerable_children
)
214 _mesa_set_add(lowerable_rvalues
, it
);
218 find_lowerable_rvalues_visitor::pop_stack_entry()
220 const stack_entry
&entry
= stack
.back();
222 if (stack
.size() >= 2) {
223 /* Combine this state into the parent state, unless the parent operation
224 * doesn’t have any relation to the child operations
226 stack_entry
&parent
= stack
.end()[-2];
227 parent_relation rel
= get_parent_relation(parent
.instr
, entry
.instr
);
229 if (rel
== COMBINED_OPERATION
) {
230 switch (entry
.state
) {
232 parent
.state
= CANT_LOWER
;
235 if (parent
.state
== UNKNOWN
)
236 parent
.state
= SHOULD_LOWER
;
244 if (entry
.state
== SHOULD_LOWER
) {
245 ir_rvalue
*rv
= entry
.instr
->as_rvalue();
248 add_lowerable_children(entry
);
249 } else if (stack
.size() >= 2) {
250 stack_entry
&parent
= stack
.end()[-2];
252 switch (get_parent_relation(parent
.instr
, rv
)) {
253 case COMBINED_OPERATION
:
254 /* We only want to add the toplevel lowerable instructions to the
255 * lowerable set. Therefore if there is a parent then instead of
256 * adding this instruction to the set we will queue depending on
257 * the result of the parent instruction.
259 parent
.lowerable_children
.push_back(entry
.instr
);
261 case INDEPENDENT_OPERATION
:
262 _mesa_set_add(lowerable_rvalues
, rv
);
266 /* This is a toplevel node so add it directly to the lowerable
269 _mesa_set_add(lowerable_rvalues
, rv
);
271 } else if (entry
.state
== CANT_LOWER
) {
272 add_lowerable_children(entry
);
279 find_lowerable_rvalues_visitor::stack_leave(class ir_instruction
*ir
,
282 find_lowerable_rvalues_visitor
*state
=
283 (find_lowerable_rvalues_visitor
*) data
;
285 state
->pop_stack_entry();
288 enum find_lowerable_rvalues_visitor::can_lower_state
289 find_lowerable_rvalues_visitor::handle_precision(const glsl_type
*type
,
292 if (!can_lower_type(type
))
296 case GLSL_PRECISION_NONE
:
298 case GLSL_PRECISION_HIGH
:
300 case GLSL_PRECISION_MEDIUM
:
301 case GLSL_PRECISION_LOW
:
308 enum find_lowerable_rvalues_visitor::parent_relation
309 find_lowerable_rvalues_visitor::get_parent_relation(ir_instruction
*parent
,
310 ir_instruction
*child
)
312 /* If the parent is a dereference instruction then the only child could be
313 * for example an array dereference and that should be lowered independently
316 if (parent
->as_dereference())
317 return INDEPENDENT_OPERATION
;
319 /* The precision of texture sampling depend on the precision of the sampler.
320 * The rest of the arguments don’t matter so we can treat it as an
321 * independent operation.
323 if (parent
->as_texture())
324 return INDEPENDENT_OPERATION
;
326 return COMBINED_OPERATION
;
330 find_lowerable_rvalues_visitor::visit(ir_constant
*ir
)
332 stack_enter(ir
, this);
334 if (!can_lower_type(ir
->type
))
335 stack
.back().state
= CANT_LOWER
;
337 stack_leave(ir
, this);
339 return visit_continue
;
343 find_lowerable_rvalues_visitor::visit(ir_dereference_variable
*ir
)
345 stack_enter(ir
, this);
347 if (stack
.back().state
== UNKNOWN
)
348 stack
.back().state
= handle_precision(ir
->type
, ir
->precision());
350 stack_leave(ir
, this);
352 return visit_continue
;
356 find_lowerable_rvalues_visitor::visit_enter(ir_dereference_record
*ir
)
358 ir_hierarchical_visitor::visit_enter(ir
);
360 if (stack
.back().state
== UNKNOWN
)
361 stack
.back().state
= handle_precision(ir
->type
, ir
->precision());
363 return visit_continue
;
367 find_lowerable_rvalues_visitor::visit_enter(ir_dereference_array
*ir
)
369 ir_hierarchical_visitor::visit_enter(ir
);
371 if (stack
.back().state
== UNKNOWN
)
372 stack
.back().state
= handle_precision(ir
->type
, ir
->precision());
374 return visit_continue
;
378 find_lowerable_rvalues_visitor::visit_enter(ir_texture
*ir
)
380 ir_hierarchical_visitor::visit_enter(ir
);
382 if (stack
.back().state
== UNKNOWN
) {
383 /* The precision of the sample value depends on the precision of the
386 stack
.back().state
= handle_precision(ir
->type
,
387 ir
->sampler
->precision());
390 return visit_continue
;
394 find_lowerable_rvalues_visitor::visit_enter(ir_expression
*ir
)
396 ir_hierarchical_visitor::visit_enter(ir
);
398 if (!can_lower_type(ir
->type
))
399 stack
.back().state
= CANT_LOWER
;
401 /* Don't lower precision for derivative calculations */
402 if (ir
->operation
== ir_unop_dFdx
||
403 ir
->operation
== ir_unop_dFdx_coarse
||
404 ir
->operation
== ir_unop_dFdx_fine
||
405 ir
->operation
== ir_unop_dFdy
||
406 ir
->operation
== ir_unop_dFdy_coarse
||
407 ir
->operation
== ir_unop_dFdy_fine
) {
408 stack
.back().state
= CANT_LOWER
;
411 return visit_continue
;
415 is_lowerable_builtin(ir_call
*ir
,
416 const struct set
*lowerable_rvalues
)
418 if (!ir
->callee
->is_builtin())
421 assert(ir
->callee
->return_precision
== GLSL_PRECISION_NONE
);
423 foreach_in_list(ir_rvalue
, param
, &ir
->actual_parameters
) {
424 if (!param
->as_constant() &&
425 _mesa_set_search(lowerable_rvalues
, param
) == NULL
)
433 find_lowerable_rvalues_visitor::visit_leave(ir_call
*ir
)
435 ir_hierarchical_visitor::visit_leave(ir
);
437 /* Special case for handling temporary variables generated by the compiler
438 * for function calls. If we assign to one of these using a function call
439 * that has a lowerable return type then we can assume the temporary
440 * variable should have a medium precision too.
443 /* Do nothing if the return type is void. */
444 if (!ir
->return_deref
)
445 return visit_continue
;
447 ir_variable
*var
= ir
->return_deref
->variable_referenced();
449 assert(var
->data
.mode
== ir_var_temporary
);
451 unsigned return_precision
= ir
->callee
->return_precision
;
453 /* If the call is to a builtin, then the function won’t have a return
454 * precision and we should determine it from the precision of the arguments.
456 if (is_lowerable_builtin(ir
, lowerable_rvalues
))
457 return_precision
= GLSL_PRECISION_MEDIUM
;
459 can_lower_state lower_state
=
460 handle_precision(var
->type
, return_precision
);
462 if (lower_state
== SHOULD_LOWER
) {
463 /* There probably shouldn’t be any situations where multiple ir_call
464 * instructions write to the same temporary?
466 assert(var
->data
.precision
== GLSL_PRECISION_NONE
);
467 var
->data
.precision
= GLSL_PRECISION_MEDIUM
;
469 var
->data
.precision
= GLSL_PRECISION_HIGH
;
472 return visit_continue
;
476 find_lowerable_rvalues_visitor::visit_leave(ir_assignment
*ir
)
478 ir_hierarchical_visitor::visit_leave(ir
);
480 /* Special case for handling temporary variables generated by the compiler.
481 * If we assign to one of these using a lowered precision then we can assume
482 * the temporary variable should have a medium precision too.
484 ir_variable
*var
= ir
->lhs
->variable_referenced();
486 if (var
->data
.mode
== ir_var_temporary
) {
487 if (_mesa_set_search(lowerable_rvalues
, ir
->rhs
)) {
488 /* Only override the precision if this is the first assignment. For
489 * temporaries such as the ones generated for the ?: operator there
490 * can be multiple assignments with different precisions. This way we
491 * get the highest precision of all of the assignments.
493 if (var
->data
.precision
== GLSL_PRECISION_NONE
)
494 var
->data
.precision
= GLSL_PRECISION_MEDIUM
;
495 } else if (!ir
->rhs
->as_constant()) {
496 var
->data
.precision
= GLSL_PRECISION_HIGH
;
500 return visit_continue
;
504 find_lowerable_rvalues(const struct gl_shader_compiler_options
*options
,
505 exec_list
*instructions
,
508 find_lowerable_rvalues_visitor
v(result
, options
);
510 visit_list_elements(&v
, instructions
);
512 assert(v
.stack
.empty());
516 convert_precision(glsl_base_type type
, bool up
, ir_rvalue
*ir
)
518 unsigned new_type
, op
;
522 case GLSL_TYPE_FLOAT16
:
523 new_type
= GLSL_TYPE_FLOAT
;
526 case GLSL_TYPE_INT16
:
527 new_type
= GLSL_TYPE_INT
;
530 case GLSL_TYPE_UINT16
:
531 new_type
= GLSL_TYPE_UINT
;
535 unreachable("invalid type");
540 case GLSL_TYPE_FLOAT
:
541 new_type
= GLSL_TYPE_FLOAT16
;
545 new_type
= GLSL_TYPE_INT16
;
549 new_type
= GLSL_TYPE_UINT16
;
553 unreachable("invalid type");
558 const glsl_type
*desired_type
;
559 desired_type
= glsl_type::get_instance(new_type
,
560 ir
->type
->vector_elements
,
561 ir
->type
->matrix_columns
);
563 void *mem_ctx
= ralloc_parent(ir
);
564 return new(mem_ctx
) ir_expression(op
, desired_type
, ir
, NULL
);
567 static glsl_base_type
568 lower_type(glsl_base_type type
)
571 case GLSL_TYPE_FLOAT
:
572 return GLSL_TYPE_FLOAT16
;
574 return GLSL_TYPE_INT16
;
576 return GLSL_TYPE_UINT16
;
578 unreachable("invalid type");
579 return GLSL_TYPE_ERROR
;;
584 lower_precision_visitor::handle_rvalue(ir_rvalue
**rvalue
)
586 ir_rvalue
*ir
= *rvalue
;
591 if (ir
->as_dereference()) {
592 if (!ir
->type
->is_boolean())
593 *rvalue
= convert_precision(ir
->type
->base_type
, false, ir
);
594 } else if (ir
->type
->base_type
== GLSL_TYPE_FLOAT
||
595 ir
->type
->base_type
== GLSL_TYPE_INT
||
596 ir
->type
->base_type
== GLSL_TYPE_UINT
) {
597 ir
->type
= glsl_type::get_instance(lower_type(ir
->type
->base_type
),
598 ir
->type
->vector_elements
,
599 ir
->type
->matrix_columns
,
600 ir
->type
->explicit_stride
,
601 ir
->type
->interface_row_major
);
603 ir_constant
*const_ir
= ir
->as_constant();
606 ir_constant_data value
;
608 if (ir
->type
->base_type
== GLSL_TYPE_FLOAT16
) {
609 for (unsigned i
= 0; i
< ARRAY_SIZE(value
.f16
); i
++)
610 value
.f16
[i
] = _mesa_float_to_half(const_ir
->value
.f
[i
]);
611 } else if (ir
->type
->base_type
== GLSL_TYPE_INT16
) {
612 for (unsigned i
= 0; i
< ARRAY_SIZE(value
.i16
); i
++)
613 value
.i16
[i
] = const_ir
->value
.i
[i
];
614 } else if (ir
->type
->base_type
== GLSL_TYPE_UINT16
) {
615 for (unsigned i
= 0; i
< ARRAY_SIZE(value
.u16
); i
++)
616 value
.u16
[i
] = const_ir
->value
.u
[i
];
618 unreachable("invalid type");
621 const_ir
->value
= value
;
627 lower_precision_visitor::visit_enter(ir_dereference_record
*ir
)
629 /* We don’t want to lower the variable */
630 return visit_continue_with_parent
;
634 lower_precision_visitor::visit_enter(ir_dereference_array
*ir
)
636 /* We don’t want to convert the array index or the variable. If the array
637 * index itself is lowerable that will be handled separately.
639 return visit_continue_with_parent
;
643 lower_precision_visitor::visit_enter(ir_call
*ir
)
645 /* We don’t want to convert the arguments. These will be handled separately.
647 return visit_continue_with_parent
;
651 lower_precision_visitor::visit_enter(ir_texture
*ir
)
653 /* We don’t want to convert the arguments. These will be handled separately.
655 return visit_continue_with_parent
;
659 lower_precision_visitor::visit_leave(ir_expression
*ir
)
661 ir_rvalue_visitor::visit_leave(ir
);
663 /* If the expression is a conversion operation to or from bool then fix the
666 switch (ir
->operation
) {
668 ir
->operation
= ir_unop_b2f16
;
671 ir
->operation
= ir_unop_f162b
;
675 /* Nothing to do - they both support int16. */
681 return visit_continue
;
685 find_precision_visitor::handle_rvalue(ir_rvalue
**rvalue
)
687 /* Checking the precision of rvalue can be lowered first throughout
688 * find_lowerable_rvalues_visitor.
689 * Once it found the precision of rvalue can be lowered, then we can
690 * add conversion f2fmp, etc. through lower_precision_visitor.
695 struct set_entry
*entry
= _mesa_set_search(lowerable_rvalues
, *rvalue
);
700 _mesa_set_remove(lowerable_rvalues
, entry
);
702 /* If the entire expression is just a variable dereference then trying to
703 * lower it will just directly add pointless to and from conversions without
704 * any actual operation in-between. Although these will eventually get
705 * optimised out, avoiding generating them here also avoids breaking inout
706 * parameters to functions.
708 if ((*rvalue
)->as_dereference())
711 lower_precision_visitor v
;
713 (*rvalue
)->accept(&v
);
714 v
.handle_rvalue(rvalue
);
716 /* We don’t need to add the final conversion if the final type has been
719 if ((*rvalue
)->type
->base_type
!= GLSL_TYPE_BOOL
)
720 *rvalue
= convert_precision((*rvalue
)->type
->base_type
, true, *rvalue
);
726 find_precision_visitor::visit_enter(ir_call
*ir
)
728 ir_rvalue_enter_visitor::visit_enter(ir
);
730 /* If this is a call to a builtin and the find_lowerable_rvalues_visitor
731 * overrode the precision of the temporary return variable, then we can
732 * replace the builtin implementation with a lowered version.
735 if (!ir
->callee
->is_builtin() ||
736 ir
->return_deref
== NULL
||
737 (ir
->return_deref
->variable_referenced()->data
.precision
!=
738 GLSL_PRECISION_MEDIUM
&&
739 ir
->return_deref
->variable_referenced()->data
.precision
!=
741 return visit_continue
;
743 ir
->callee
= map_builtin(ir
->callee
);
744 ir
->generate_inline(ir
);
747 return visit_continue_with_parent
;
750 ir_function_signature
*
751 find_precision_visitor::map_builtin(ir_function_signature
*sig
)
753 if (lowered_builtins
== NULL
) {
754 lowered_builtins
= _mesa_pointer_hash_table_create(NULL
);
755 clone_ht
=_mesa_pointer_hash_table_create(NULL
);
756 lowered_builtin_mem_ctx
= ralloc_context(NULL
);
758 struct hash_entry
*entry
= _mesa_hash_table_search(lowered_builtins
, sig
);
760 return (ir_function_signature
*) entry
->data
;
763 ir_function_signature
*lowered_sig
=
764 sig
->clone(lowered_builtin_mem_ctx
, clone_ht
);
766 foreach_in_list(ir_variable
, param
, &lowered_sig
->parameters
) {
767 param
->data
.precision
= GLSL_PRECISION_MEDIUM
;
770 lower_precision(options
, &lowered_sig
->body
);
772 _mesa_hash_table_clear(clone_ht
, NULL
);
774 _mesa_hash_table_insert(lowered_builtins
, sig
, lowered_sig
);
779 find_precision_visitor::find_precision_visitor(const struct gl_shader_compiler_options
*options
)
781 lowerable_rvalues(_mesa_pointer_set_create(NULL
)),
782 lowered_builtins(NULL
),
784 lowered_builtin_mem_ctx(NULL
),
789 find_precision_visitor::~find_precision_visitor()
791 _mesa_set_destroy(lowerable_rvalues
, NULL
);
793 if (lowered_builtins
) {
794 _mesa_hash_table_destroy(lowered_builtins
, NULL
);
795 _mesa_hash_table_destroy(clone_ht
, NULL
);
796 ralloc_free(lowered_builtin_mem_ctx
);
803 lower_precision(const struct gl_shader_compiler_options
*options
,
804 exec_list
*instructions
)
806 find_precision_visitor
v(options
);
808 find_lowerable_rvalues(options
, instructions
, v
.lowerable_rvalues
);
810 visit_list_elements(&v
, instructions
);