Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / compiler / nir / nir_lower_bit_size.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir_builder.h"
25
26 /**
27 * Some ALU operations may not be supported in hardware in specific bit-sizes.
28 * This pass allows implementations to selectively lower such operations to
29 * a bit-size that is supported natively and then converts the result back to
30 * the original bit-size.
31 */
32
33 static void
34 lower_instr(nir_builder *bld, nir_alu_instr *alu, unsigned bit_size)
35 {
36 const nir_op op = alu->op;
37 unsigned dst_bit_size = alu->dest.dest.ssa.bit_size;
38
39 bld->cursor = nir_before_instr(&alu->instr);
40
41 /* Convert each source to the requested bit-size */
42 nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS] = { NULL };
43 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
44 nir_ssa_def *src = nir_ssa_for_alu_src(bld, alu, i);
45
46 nir_alu_type type = nir_op_infos[op].input_types[i];
47 if (nir_alu_type_get_type_size(type) == 0)
48 src = nir_convert_to_bit_size(bld, src, type, bit_size);
49
50 if (i == 1 && (op == nir_op_ishl || op == nir_op_ishr || op == nir_op_ushr)) {
51 assert(util_is_power_of_two_nonzero(dst_bit_size));
52 src = nir_iand(bld, src, nir_imm_int(bld, dst_bit_size - 1));
53 }
54
55 srcs[i] = src;
56 }
57
58 /* Emit the lowered ALU instruction */
59 nir_ssa_def *lowered_dst = NULL;
60 if (op == nir_op_imul_high || op == nir_op_umul_high) {
61 assert(dst_bit_size * 2 <= bit_size);
62 nir_ssa_def *lowered_dst = nir_imul(bld, srcs[0], srcs[1]);
63 if (nir_op_infos[op].output_type & nir_type_uint)
64 lowered_dst = nir_ushr_imm(bld, lowered_dst, dst_bit_size);
65 else
66 lowered_dst = nir_ishr_imm(bld, lowered_dst, dst_bit_size);
67 } else {
68 lowered_dst = nir_build_alu_src_arr(bld, op, srcs);
69 }
70
71
72 /* Convert result back to the original bit-size */
73 if (dst_bit_size != bit_size) {
74 nir_alu_type type = nir_op_infos[op].output_type;
75 nir_ssa_def *dst = nir_convert_to_bit_size(bld, lowered_dst, type, dst_bit_size);
76 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(dst));
77 } else {
78 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(lowered_dst));
79 }
80 }
81
82 static bool
83 lower_impl(nir_function_impl *impl,
84 nir_lower_bit_size_callback callback,
85 void *callback_data)
86 {
87 nir_builder b;
88 nir_builder_init(&b, impl);
89 bool progress = false;
90
91 nir_foreach_block(block, impl) {
92 nir_foreach_instr_safe(instr, block) {
93 if (instr->type != nir_instr_type_alu)
94 continue;
95
96 nir_alu_instr *alu = nir_instr_as_alu(instr);
97 assert(alu->dest.dest.is_ssa);
98
99 unsigned lower_bit_size = callback(alu, callback_data);
100 if (lower_bit_size == 0)
101 continue;
102
103 lower_instr(&b, alu, lower_bit_size);
104 progress = true;
105 }
106 }
107
108 if (progress) {
109 nir_metadata_preserve(impl, nir_metadata_block_index |
110 nir_metadata_dominance);
111 } else {
112 nir_metadata_preserve(impl, nir_metadata_all);
113 }
114
115 return progress;
116 }
117
118 bool
119 nir_lower_bit_size(nir_shader *shader,
120 nir_lower_bit_size_callback callback,
121 void *callback_data)
122 {
123 bool progress = false;
124
125 nir_foreach_function(function, shader) {
126 if (function->impl)
127 progress |= lower_impl(function->impl, callback, callback_data);
128 }
129
130 return progress;
131 }
132
133 static void
134 split_phi(nir_builder *b, nir_phi_instr *phi)
135 {
136 nir_phi_instr *lowered[2] = {
137 nir_phi_instr_create(b->shader),
138 nir_phi_instr_create(b->shader)
139 };
140 int num_components = phi->dest.ssa.num_components;
141 assert(phi->dest.ssa.bit_size == 64);
142
143 nir_foreach_phi_src(src, phi) {
144 assert(num_components == src->src.ssa->num_components);
145
146 b->cursor = nir_before_src(&src->src, false);
147
148 nir_ssa_def *x = nir_unpack_64_2x32_split_x(b, src->src.ssa);
149 nir_ssa_def *y = nir_unpack_64_2x32_split_y(b, src->src.ssa);
150
151 nir_phi_src *xsrc = rzalloc(lowered[0], nir_phi_src);
152 xsrc->pred = src->pred;
153 xsrc->src = nir_src_for_ssa(x);
154 exec_list_push_tail(&lowered[0]->srcs, &xsrc->node);
155
156 nir_phi_src *ysrc = rzalloc(lowered[1], nir_phi_src);
157 ysrc->pred = src->pred;
158 ysrc->src = nir_src_for_ssa(y);
159 exec_list_push_tail(&lowered[1]->srcs, &ysrc->node);
160 }
161
162 nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest,
163 num_components, 32, NULL);
164 nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest,
165 num_components, 32, NULL);
166
167 b->cursor = nir_before_instr(&phi->instr);
168 nir_builder_instr_insert(b, &lowered[0]->instr);
169 nir_builder_instr_insert(b, &lowered[1]->instr);
170
171 b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
172 nir_ssa_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);
173 nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_src_for_ssa(merged));
174 nir_instr_remove(&phi->instr);
175 }
176
177 static bool
178 lower_64bit_phi_impl(nir_function_impl *impl)
179 {
180 nir_builder b;
181 nir_builder_init(&b, impl);
182 bool progress = false;
183
184 nir_foreach_block(block, impl) {
185 nir_foreach_instr_safe(instr, block) {
186 if (instr->type != nir_instr_type_phi)
187 break;
188
189 nir_phi_instr *phi = nir_instr_as_phi(instr);
190 assert(phi->dest.is_ssa);
191
192 if (phi->dest.ssa.bit_size <= 32)
193 continue;
194
195 split_phi(&b, phi);
196 progress = true;
197 }
198 }
199
200 if (progress) {
201 nir_metadata_preserve(impl, nir_metadata_block_index |
202 nir_metadata_dominance);
203 } else {
204 nir_metadata_preserve(impl, nir_metadata_all);
205 }
206
207 return progress;
208 }
209
210 bool
211 nir_lower_64bit_phis(nir_shader *shader)
212 {
213 bool progress = false;
214
215 nir_foreach_function(function, shader) {
216 if (function->impl)
217 progress |= lower_64bit_phi_impl(function->impl);
218 }
219
220 return progress;
221 }