0ab32100ef2cae0890dccc76f3acfcbdb6fe9902
[mesa.git] / src / compiler / nir / nir_lower_non_uniform_access.c
1 /*
2 * Copyright © 2019 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 static nir_ssa_def *
28 read_first_invocation(nir_builder *b, nir_ssa_def *x)
29 {
30 nir_intrinsic_instr *first =
31 nir_intrinsic_instr_create(b->shader,
32 nir_intrinsic_read_first_invocation);
33 first->num_components = x->num_components;
34 first->src[0] = nir_src_for_ssa(x);
35 nir_ssa_dest_init(&first->instr, &first->dest,
36 x->num_components, x->bit_size, NULL);
37 nir_builder_instr_insert(b, &first->instr);
38 return &first->dest.ssa;
39 }
40
41 static bool
42 lower_non_uniform_tex_access(nir_builder *b, nir_tex_instr *tex)
43 {
44 if (!tex->texture_non_uniform && !tex->sampler_non_uniform)
45 return false;
46
47 /* We can have at most one texture and one sampler handle */
48 nir_ssa_def *handles[2];
49 unsigned handle_count = 0;
50 for (unsigned i = 0; i < tex->num_srcs; i++) {
51 switch (tex->src[i].src_type) {
52 case nir_tex_src_texture_offset:
53 case nir_tex_src_texture_handle:
54 if (!tex->texture_non_uniform)
55 continue;
56 break;
57
58 case nir_tex_src_sampler_offset:
59 case nir_tex_src_sampler_handle:
60 if (!tex->sampler_non_uniform)
61 continue;
62 break;
63
64 default:
65 continue;
66 }
67
68 assert(tex->src[i].src.is_ssa);
69 assert(tex->src[i].src.ssa->num_components == 1);
70 assert(handle_count < 2);
71 handles[handle_count++] = tex->src[i].src.ssa;
72 }
73
74 if (handle_count == 0)
75 return false;
76
77 b->cursor = nir_instr_remove(&tex->instr);
78
79 nir_push_loop(b);
80
81 nir_ssa_def *all_equal_first = nir_imm_true(b);
82 for (unsigned i = 0; i < handle_count; i++) {
83 nir_ssa_def *equal_first =
84 nir_ieq(b, read_first_invocation(b, handles[i]), handles[i]);
85 all_equal_first = nir_iand(b, all_equal_first, equal_first);
86 }
87
88 nir_push_if(b, all_equal_first);
89
90 nir_builder_instr_insert(b, &tex->instr);
91 nir_jump(b, nir_jump_break);
92
93 return true;
94 }
95
96 static bool
97 lower_non_uniform_access_intrin(nir_builder *b, nir_intrinsic_instr *intrin,
98 unsigned handle_src)
99 {
100 if (!(nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM))
101 return false;
102
103 /* If it's constant, it's automatically uniform; don't bother. */
104 if (nir_src_is_const(intrin->src[handle_src]))
105 return false;
106
107 b->cursor = nir_instr_remove(&intrin->instr);
108
109 nir_push_loop(b);
110
111 assert(intrin->src[handle_src].is_ssa);
112 assert(intrin->src[handle_src].ssa->num_components == 1);
113 nir_ssa_def *handle = intrin->src[handle_src].ssa;
114
115 nir_push_if(b, nir_ieq(b, read_first_invocation(b, handle), handle));
116
117 nir_builder_instr_insert(b, &intrin->instr);
118 nir_jump(b, nir_jump_break);
119
120 return true;
121 }
122
123 static bool
124 nir_lower_non_uniform_access_impl(nir_function_impl *impl,
125 enum nir_lower_non_uniform_access_type types)
126 {
127 bool progress = false;
128
129 nir_builder b;
130 nir_builder_init(&b, impl);
131
132 nir_foreach_block_safe(block, impl) {
133 nir_foreach_instr_safe(instr, block) {
134 switch (instr->type) {
135 case nir_instr_type_tex: {
136 nir_tex_instr *tex = nir_instr_as_tex(instr);
137 if ((types & nir_lower_non_uniform_texture_access) &&
138 lower_non_uniform_tex_access(&b, tex))
139 progress = true;
140 break;
141 }
142
143 case nir_instr_type_intrinsic: {
144 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
145 switch (intrin->intrinsic) {
146 case nir_intrinsic_load_ubo:
147 if ((types & nir_lower_non_uniform_ubo_access) &&
148 lower_non_uniform_access_intrin(&b, intrin, 0))
149 progress = true;
150 break;
151
152 case nir_intrinsic_load_ssbo:
153 case nir_intrinsic_ssbo_atomic_add:
154 case nir_intrinsic_ssbo_atomic_imin:
155 case nir_intrinsic_ssbo_atomic_umin:
156 case nir_intrinsic_ssbo_atomic_imax:
157 case nir_intrinsic_ssbo_atomic_umax:
158 case nir_intrinsic_ssbo_atomic_and:
159 case nir_intrinsic_ssbo_atomic_or:
160 case nir_intrinsic_ssbo_atomic_xor:
161 case nir_intrinsic_ssbo_atomic_exchange:
162 case nir_intrinsic_ssbo_atomic_comp_swap:
163 case nir_intrinsic_ssbo_atomic_fadd:
164 case nir_intrinsic_ssbo_atomic_fmin:
165 case nir_intrinsic_ssbo_atomic_fmax:
166 case nir_intrinsic_ssbo_atomic_fcomp_swap:
167 if ((types & nir_lower_non_uniform_ssbo_access) &&
168 lower_non_uniform_access_intrin(&b, intrin, 0))
169 progress = true;
170 break;
171
172 case nir_intrinsic_store_ssbo:
173 /* SSBO Stores put the index in the second source */
174 if ((types & nir_lower_non_uniform_ssbo_access) &&
175 lower_non_uniform_access_intrin(&b, intrin, 1))
176 progress = true;
177 break;
178
179 case nir_intrinsic_image_load:
180 case nir_intrinsic_image_store:
181 case nir_intrinsic_image_atomic_add:
182 case nir_intrinsic_image_atomic_min:
183 case nir_intrinsic_image_atomic_max:
184 case nir_intrinsic_image_atomic_and:
185 case nir_intrinsic_image_atomic_or:
186 case nir_intrinsic_image_atomic_xor:
187 case nir_intrinsic_image_atomic_exchange:
188 case nir_intrinsic_image_atomic_comp_swap:
189 case nir_intrinsic_image_atomic_fadd:
190 case nir_intrinsic_image_size:
191 case nir_intrinsic_image_samples:
192 case nir_intrinsic_bindless_image_load:
193 case nir_intrinsic_bindless_image_store:
194 case nir_intrinsic_bindless_image_atomic_add:
195 case nir_intrinsic_bindless_image_atomic_min:
196 case nir_intrinsic_bindless_image_atomic_max:
197 case nir_intrinsic_bindless_image_atomic_and:
198 case nir_intrinsic_bindless_image_atomic_or:
199 case nir_intrinsic_bindless_image_atomic_xor:
200 case nir_intrinsic_bindless_image_atomic_exchange:
201 case nir_intrinsic_bindless_image_atomic_comp_swap:
202 case nir_intrinsic_bindless_image_atomic_fadd:
203 case nir_intrinsic_bindless_image_size:
204 case nir_intrinsic_bindless_image_samples:
205 if ((types & nir_lower_non_uniform_image_access) &&
206 lower_non_uniform_access_intrin(&b, intrin, 0))
207 progress = true;
208 break;
209
210 default:
211 /* Nothing to do */
212 break;
213 }
214 break;
215 }
216
217 default:
218 /* Nothing to do */
219 break;
220 }
221 }
222 }
223
224 if (progress)
225 nir_metadata_preserve(impl, nir_metadata_none);
226
227 return progress;
228 }
229
230 /**
231 * Lowers non-uniform resource access by using a loop
232 *
233 * This pass lowers non-uniform resource access by using subgroup operations
234 * and a loop. Most hardware requires things like textures and UBO access
235 * operations to happen on a dynamically uniform (or at least subgroup
236 * uniform) resource. This pass allows for non-uniform access by placing the
237 * texture instruction in a loop that looks something like this:
238 *
239 * loop {
240 * bool tex_eq_first = readFirstInvocationARB(texture) == texture;
241 * bool smp_eq_first = readFirstInvocationARB(sampler) == sampler;
242 * if (tex_eq_first && smp_eq_first) {
243 * res = texture(texture, sampler, ...);
244 * break;
245 * }
246 * }
247 *
248 * Fortunately, because the instruction is immediately followed by the only
249 * break in the loop, the block containing the instruction dominates the end
250 * of the loop. Therefore, it's safe to move the instruction into the loop
251 * without fixing up SSA in any way.
252 */
253 bool
254 nir_lower_non_uniform_access(nir_shader *shader,
255 enum nir_lower_non_uniform_access_type types)
256 {
257 bool progress = false;
258
259 nir_foreach_function(function, shader) {
260 if (function->impl &&
261 nir_lower_non_uniform_access_impl(function->impl, types))
262 progress = true;
263 }
264
265 return progress;
266 }