nir/algebraic: mark some optimizations with fsat(NaN) as inexact
[mesa.git] / src / compiler / nir / nir_lower_non_uniform_access.c
1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 static nir_ssa_def *
28 read_first_invocation(nir_builder *b, nir_ssa_def *x)
29 {
30 nir_intrinsic_instr *first =
31 nir_intrinsic_instr_create(b->shader,
32 nir_intrinsic_read_first_invocation);
33 first->num_components = x->num_components;
34 first->src[0] = nir_src_for_ssa(x);
35 nir_ssa_dest_init(&first->instr, &first->dest,
36 x->num_components, x->bit_size, NULL);
37 nir_builder_instr_insert(b, &first->instr);
38 return &first->dest.ssa;
39 }
40
41 static bool
42 lower_non_uniform_tex_access(nir_builder *b, nir_tex_instr *tex)
43 {
44 if (!tex->texture_non_uniform && !tex->sampler_non_uniform)
45 return false;
46
47 /* We can have at most one texture and one sampler handle */
48 nir_ssa_def *handles[2];
49 nir_deref_instr *parent_derefs[2];
50 int texture_deref_handle = -1;
51 int sampler_deref_handle = -1;
52 unsigned handle_count = 0;
53 for (unsigned i = 0; i < tex->num_srcs; i++) {
54 switch (tex->src[i].src_type) {
55 case nir_tex_src_texture_offset:
56 case nir_tex_src_texture_handle:
57 case nir_tex_src_texture_deref:
58 if (!tex->texture_non_uniform)
59 continue;
60 break;
61
62 case nir_tex_src_sampler_offset:
63 case nir_tex_src_sampler_handle:
64 case nir_tex_src_sampler_deref:
65 if (!tex->sampler_non_uniform)
66 continue;
67 break;
68
69 default:
70 continue;
71 }
72
73 assert(handle_count < 2);
74 assert(tex->src[i].src.is_ssa);
75 nir_ssa_def *handle = tex->src[i].src.ssa;
76 if (handle->parent_instr->type == nir_instr_type_deref) {
77 nir_deref_instr *deref = nir_instr_as_deref(handle->parent_instr);
78 nir_deref_instr *parent = nir_deref_instr_parent(deref);
79 if (deref->deref_type == nir_deref_type_var)
80 continue;
81
82 assert(parent->deref_type == nir_deref_type_var);
83 assert(deref->deref_type == nir_deref_type_array);
84
85 /* If it's constant, it's automatically uniform; don't bother. */
86 if (nir_src_is_const(deref->arr.index))
87 continue;
88
89 handle = deref->arr.index.ssa;
90
91 parent_derefs[handle_count] = parent;
92 if (tex->src[i].src_type == nir_tex_src_texture_deref)
93 texture_deref_handle = handle_count;
94 else
95 sampler_deref_handle = handle_count;
96 }
97 assert(handle->num_components == 1);
98
99 handles[handle_count++] = handle;
100 }
101
102 if (handle_count == 0)
103 return false;
104
105 b->cursor = nir_instr_remove(&tex->instr);
106
107 nir_push_loop(b);
108
109 nir_ssa_def *all_equal_first = nir_imm_true(b);
110 nir_ssa_def *first[2];
111 for (unsigned i = 0; i < handle_count; i++) {
112 first[i] = read_first_invocation(b, handles[i]);
113 nir_ssa_def *equal_first = nir_ieq(b, first[i], handles[i]);
114 all_equal_first = nir_iand(b, all_equal_first, equal_first);
115 }
116
117 nir_push_if(b, all_equal_first);
118
119 /* Replicate the derefs. */
120 if (texture_deref_handle >= 0) {
121 int src_idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_deref);
122 nir_deref_instr *deref = parent_derefs[texture_deref_handle];
123 deref = nir_build_deref_array(b, deref, first[texture_deref_handle]);
124 tex->src[src_idx].src = nir_src_for_ssa(&deref->dest.ssa);
125 }
126
127 if (sampler_deref_handle >= 0) {
128 int src_idx = nir_tex_instr_src_index(tex, nir_tex_src_sampler_deref);
129 nir_deref_instr *deref = parent_derefs[sampler_deref_handle];
130 deref = nir_build_deref_array(b, deref, first[sampler_deref_handle]);
131 tex->src[src_idx].src = nir_src_for_ssa(&deref->dest.ssa);
132 }
133
134 nir_builder_instr_insert(b, &tex->instr);
135 nir_jump(b, nir_jump_break);
136
137 return true;
138 }
139
140 static bool
141 lower_non_uniform_access_intrin(nir_builder *b, nir_intrinsic_instr *intrin,
142 unsigned handle_src)
143 {
144 if (!(nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM))
145 return false;
146
147 assert(intrin->src[handle_src].is_ssa);
148 nir_ssa_def *handle = intrin->src[handle_src].ssa;
149 nir_deref_instr *parent_deref = NULL;
150 if (handle->parent_instr->type == nir_instr_type_deref) {
151 nir_deref_instr *deref = nir_instr_as_deref(handle->parent_instr);
152 parent_deref = nir_deref_instr_parent(deref);
153 if (deref->deref_type == nir_deref_type_var)
154 return false;
155
156 assert(parent_deref->deref_type == nir_deref_type_var);
157 assert(deref->deref_type == nir_deref_type_array);
158
159 handle = deref->arr.index.ssa;
160 }
161
162 /* If it's constant, it's automatically uniform; don't bother. */
163 if (handle->parent_instr->type == nir_instr_type_load_const)
164 return false;
165
166 b->cursor = nir_instr_remove(&intrin->instr);
167
168 nir_push_loop(b);
169
170 assert(handle->num_components == 1);
171
172 nir_ssa_def *first = read_first_invocation(b, handle);
173 nir_push_if(b, nir_ieq(b, first, handle));
174
175 /* Replicate the deref. */
176 if (parent_deref) {
177 nir_deref_instr *deref = nir_build_deref_array(b, parent_deref, first);
178 intrin->src[handle_src] = nir_src_for_ssa(&deref->dest.ssa);
179 }
180
181 nir_builder_instr_insert(b, &intrin->instr);
182 nir_jump(b, nir_jump_break);
183
184 return true;
185 }
186
187 static bool
188 nir_lower_non_uniform_access_impl(nir_function_impl *impl,
189 enum nir_lower_non_uniform_access_type types)
190 {
191 bool progress = false;
192
193 nir_builder b;
194 nir_builder_init(&b, impl);
195
196 nir_foreach_block_safe(block, impl) {
197 nir_foreach_instr_safe(instr, block) {
198 switch (instr->type) {
199 case nir_instr_type_tex: {
200 nir_tex_instr *tex = nir_instr_as_tex(instr);
201 if ((types & nir_lower_non_uniform_texture_access) &&
202 lower_non_uniform_tex_access(&b, tex))
203 progress = true;
204 break;
205 }
206
207 case nir_instr_type_intrinsic: {
208 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
209 switch (intrin->intrinsic) {
210 case nir_intrinsic_load_ubo:
211 if ((types & nir_lower_non_uniform_ubo_access) &&
212 lower_non_uniform_access_intrin(&b, intrin, 0))
213 progress = true;
214 break;
215
216 case nir_intrinsic_load_ssbo:
217 case nir_intrinsic_ssbo_atomic_add:
218 case nir_intrinsic_ssbo_atomic_imin:
219 case nir_intrinsic_ssbo_atomic_umin:
220 case nir_intrinsic_ssbo_atomic_imax:
221 case nir_intrinsic_ssbo_atomic_umax:
222 case nir_intrinsic_ssbo_atomic_and:
223 case nir_intrinsic_ssbo_atomic_or:
224 case nir_intrinsic_ssbo_atomic_xor:
225 case nir_intrinsic_ssbo_atomic_exchange:
226 case nir_intrinsic_ssbo_atomic_comp_swap:
227 case nir_intrinsic_ssbo_atomic_fadd:
228 case nir_intrinsic_ssbo_atomic_fmin:
229 case nir_intrinsic_ssbo_atomic_fmax:
230 case nir_intrinsic_ssbo_atomic_fcomp_swap:
231 if ((types & nir_lower_non_uniform_ssbo_access) &&
232 lower_non_uniform_access_intrin(&b, intrin, 0))
233 progress = true;
234 break;
235
236 case nir_intrinsic_store_ssbo:
237 /* SSBO Stores put the index in the second source */
238 if ((types & nir_lower_non_uniform_ssbo_access) &&
239 lower_non_uniform_access_intrin(&b, intrin, 1))
240 progress = true;
241 break;
242
243 case nir_intrinsic_image_load:
244 case nir_intrinsic_image_store:
245 case nir_intrinsic_image_atomic_add:
246 case nir_intrinsic_image_atomic_imin:
247 case nir_intrinsic_image_atomic_umin:
248 case nir_intrinsic_image_atomic_imax:
249 case nir_intrinsic_image_atomic_umax:
250 case nir_intrinsic_image_atomic_and:
251 case nir_intrinsic_image_atomic_or:
252 case nir_intrinsic_image_atomic_xor:
253 case nir_intrinsic_image_atomic_exchange:
254 case nir_intrinsic_image_atomic_comp_swap:
255 case nir_intrinsic_image_atomic_fadd:
256 case nir_intrinsic_image_size:
257 case nir_intrinsic_image_samples:
258 case nir_intrinsic_bindless_image_load:
259 case nir_intrinsic_bindless_image_store:
260 case nir_intrinsic_bindless_image_atomic_add:
261 case nir_intrinsic_bindless_image_atomic_imin:
262 case nir_intrinsic_bindless_image_atomic_umin:
263 case nir_intrinsic_bindless_image_atomic_imax:
264 case nir_intrinsic_bindless_image_atomic_umax:
265 case nir_intrinsic_bindless_image_atomic_and:
266 case nir_intrinsic_bindless_image_atomic_or:
267 case nir_intrinsic_bindless_image_atomic_xor:
268 case nir_intrinsic_bindless_image_atomic_exchange:
269 case nir_intrinsic_bindless_image_atomic_comp_swap:
270 case nir_intrinsic_bindless_image_atomic_fadd:
271 case nir_intrinsic_bindless_image_size:
272 case nir_intrinsic_bindless_image_samples:
273 case nir_intrinsic_image_deref_load:
274 case nir_intrinsic_image_deref_store:
275 case nir_intrinsic_image_deref_atomic_add:
276 case nir_intrinsic_image_deref_atomic_umin:
277 case nir_intrinsic_image_deref_atomic_imin:
278 case nir_intrinsic_image_deref_atomic_umax:
279 case nir_intrinsic_image_deref_atomic_imax:
280 case nir_intrinsic_image_deref_atomic_and:
281 case nir_intrinsic_image_deref_atomic_or:
282 case nir_intrinsic_image_deref_atomic_xor:
283 case nir_intrinsic_image_deref_atomic_exchange:
284 case nir_intrinsic_image_deref_atomic_comp_swap:
285 case nir_intrinsic_image_deref_size:
286 case nir_intrinsic_image_deref_samples:
287 if ((types & nir_lower_non_uniform_image_access) &&
288 lower_non_uniform_access_intrin(&b, intrin, 0))
289 progress = true;
290 break;
291
292 default:
293 /* Nothing to do */
294 break;
295 }
296 break;
297 }
298
299 default:
300 /* Nothing to do */
301 break;
302 }
303 }
304 }
305
306 if (progress)
307 nir_metadata_preserve(impl, nir_metadata_none);
308
309 return progress;
310 }
311
312 /**
313 * Lowers non-uniform resource access by using a loop
314 *
315 * This pass lowers non-uniform resource access by using subgroup operations
316 * and a loop. Most hardware requires things like textures and UBO access
317 * operations to happen on a dynamically uniform (or at least subgroup
318 * uniform) resource. This pass allows for non-uniform access by placing the
319 * texture instruction in a loop that looks something like this:
320 *
321 * loop {
322 * bool tex_eq_first = readFirstInvocationARB(texture) == texture;
323 * bool smp_eq_first = readFirstInvocationARB(sampler) == sampler;
324 * if (tex_eq_first && smp_eq_first) {
325 * res = texture(texture, sampler, ...);
326 * break;
327 * }
328 * }
329 *
330 * Fortunately, because the instruction is immediately followed by the only
331 * break in the loop, the block containing the instruction dominates the end
332 * of the loop. Therefore, it's safe to move the instruction into the loop
333 * without fixing up SSA in any way.
334 */
335 bool
336 nir_lower_non_uniform_access(nir_shader *shader,
337 enum nir_lower_non_uniform_access_type types)
338 {
339 bool progress = false;
340
341 nir_foreach_function(function, shader) {
342 if (function->impl &&
343 nir_lower_non_uniform_access_impl(function->impl, types))
344 progress = true;
345 }
346
347 return progress;
348 }