2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "nir_builder.h"
27 * Some ALU operations may not be supported in hardware in specific bit-sizes.
28 * This pass allows implementations to selectively lower such operations to
29 * a bit-size that is supported natively and then converts the result back to
30 * the original bit-size.
34 lower_instr(nir_builder
*bld
, nir_alu_instr
*alu
, unsigned bit_size
)
36 const nir_op op
= alu
->op
;
37 unsigned dst_bit_size
= alu
->dest
.dest
.ssa
.bit_size
;
39 bld
->cursor
= nir_before_instr(&alu
->instr
);
41 /* Convert each source to the requested bit-size */
42 nir_ssa_def
*srcs
[NIR_MAX_VEC_COMPONENTS
] = { NULL
};
43 for (unsigned i
= 0; i
< nir_op_infos
[op
].num_inputs
; i
++) {
44 nir_ssa_def
*src
= nir_ssa_for_alu_src(bld
, alu
, i
);
46 nir_alu_type type
= nir_op_infos
[op
].input_types
[i
];
47 if (nir_alu_type_get_type_size(type
) == 0)
48 src
= nir_convert_to_bit_size(bld
, src
, type
, bit_size
);
50 if (i
== 1 && (op
== nir_op_ishl
|| op
== nir_op_ishr
|| op
== nir_op_ushr
)) {
51 assert(util_is_power_of_two_nonzero(dst_bit_size
));
52 src
= nir_iand(bld
, src
, nir_imm_int(bld
, dst_bit_size
- 1));
58 /* Emit the lowered ALU instruction */
59 nir_ssa_def
*lowered_dst
= NULL
;
60 if (op
== nir_op_imul_high
|| op
== nir_op_umul_high
) {
61 assert(dst_bit_size
* 2 <= bit_size
);
62 nir_ssa_def
*lowered_dst
= nir_imul(bld
, srcs
[0], srcs
[1]);
63 if (nir_op_infos
[op
].output_type
& nir_type_uint
)
64 lowered_dst
= nir_ushr_imm(bld
, lowered_dst
, dst_bit_size
);
66 lowered_dst
= nir_ishr_imm(bld
, lowered_dst
, dst_bit_size
);
68 lowered_dst
= nir_build_alu_src_arr(bld
, op
, srcs
);
72 /* Convert result back to the original bit-size */
73 if (dst_bit_size
!= bit_size
) {
74 nir_alu_type type
= nir_op_infos
[op
].output_type
;
75 nir_ssa_def
*dst
= nir_convert_to_bit_size(bld
, lowered_dst
, type
, dst_bit_size
);
76 nir_ssa_def_rewrite_uses(&alu
->dest
.dest
.ssa
, nir_src_for_ssa(dst
));
78 nir_ssa_def_rewrite_uses(&alu
->dest
.dest
.ssa
, nir_src_for_ssa(lowered_dst
));
83 lower_impl(nir_function_impl
*impl
,
84 nir_lower_bit_size_callback callback
,
88 nir_builder_init(&b
, impl
);
89 bool progress
= false;
91 nir_foreach_block(block
, impl
) {
92 nir_foreach_instr_safe(instr
, block
) {
93 if (instr
->type
!= nir_instr_type_alu
)
96 nir_alu_instr
*alu
= nir_instr_as_alu(instr
);
97 assert(alu
->dest
.dest
.is_ssa
);
99 unsigned lower_bit_size
= callback(alu
, callback_data
);
100 if (lower_bit_size
== 0)
103 lower_instr(&b
, alu
, lower_bit_size
);
109 nir_metadata_preserve(impl
, nir_metadata_block_index
|
110 nir_metadata_dominance
);
112 nir_metadata_preserve(impl
, nir_metadata_all
);
119 nir_lower_bit_size(nir_shader
*shader
,
120 nir_lower_bit_size_callback callback
,
123 bool progress
= false;
125 nir_foreach_function(function
, shader
) {
127 progress
|= lower_impl(function
->impl
, callback
, callback_data
);
134 split_phi(nir_builder
*b
, nir_phi_instr
*phi
)
136 nir_phi_instr
*lowered
[2] = {
137 nir_phi_instr_create(b
->shader
),
138 nir_phi_instr_create(b
->shader
)
140 int num_components
= phi
->dest
.ssa
.num_components
;
141 assert(phi
->dest
.ssa
.bit_size
== 64);
143 nir_foreach_phi_src(src
, phi
) {
144 assert(num_components
== src
->src
.ssa
->num_components
);
146 b
->cursor
= nir_before_src(&src
->src
, false);
148 nir_ssa_def
*x
= nir_unpack_64_2x32_split_x(b
, src
->src
.ssa
);
149 nir_ssa_def
*y
= nir_unpack_64_2x32_split_y(b
, src
->src
.ssa
);
151 nir_phi_src
*xsrc
= rzalloc(lowered
[0], nir_phi_src
);
152 xsrc
->pred
= src
->pred
;
153 xsrc
->src
= nir_src_for_ssa(x
);
154 exec_list_push_tail(&lowered
[0]->srcs
, &xsrc
->node
);
156 nir_phi_src
*ysrc
= rzalloc(lowered
[1], nir_phi_src
);
157 ysrc
->pred
= src
->pred
;
158 ysrc
->src
= nir_src_for_ssa(y
);
159 exec_list_push_tail(&lowered
[1]->srcs
, &ysrc
->node
);
162 nir_ssa_dest_init(&lowered
[0]->instr
, &lowered
[0]->dest
,
163 num_components
, 32, NULL
);
164 nir_ssa_dest_init(&lowered
[1]->instr
, &lowered
[1]->dest
,
165 num_components
, 32, NULL
);
167 b
->cursor
= nir_before_instr(&phi
->instr
);
168 nir_builder_instr_insert(b
, &lowered
[0]->instr
);
169 nir_builder_instr_insert(b
, &lowered
[1]->instr
);
171 b
->cursor
= nir_after_phis(nir_cursor_current_block(b
->cursor
));
172 nir_ssa_def
*merged
= nir_pack_64_2x32_split(b
, &lowered
[0]->dest
.ssa
, &lowered
[1]->dest
.ssa
);
173 nir_ssa_def_rewrite_uses(&phi
->dest
.ssa
, nir_src_for_ssa(merged
));
174 nir_instr_remove(&phi
->instr
);
178 lower_64bit_phi_impl(nir_function_impl
*impl
)
181 nir_builder_init(&b
, impl
);
182 bool progress
= false;
184 nir_foreach_block(block
, impl
) {
185 nir_foreach_instr_safe(instr
, block
) {
186 if (instr
->type
!= nir_instr_type_phi
)
189 nir_phi_instr
*phi
= nir_instr_as_phi(instr
);
190 assert(phi
->dest
.is_ssa
);
192 if (phi
->dest
.ssa
.bit_size
<= 32)
201 nir_metadata_preserve(impl
, nir_metadata_block_index
|
202 nir_metadata_dominance
);
204 nir_metadata_preserve(impl
, nir_metadata_all
);
211 nir_lower_64bit_phis(nir_shader
*shader
)
213 bool progress
= false;
215 nir_foreach_function(function
, shader
) {
217 progress
|= lower_64bit_phi_impl(function
->impl
);