2 * Copyright © 2019 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
28 read_first_invocation(nir_builder
*b
, nir_ssa_def
*x
)
30 nir_intrinsic_instr
*first
=
31 nir_intrinsic_instr_create(b
->shader
,
32 nir_intrinsic_read_first_invocation
);
33 first
->num_components
= x
->num_components
;
34 first
->src
[0] = nir_src_for_ssa(x
);
35 nir_ssa_dest_init(&first
->instr
, &first
->dest
,
36 x
->num_components
, x
->bit_size
, NULL
);
37 nir_builder_instr_insert(b
, &first
->instr
);
38 return &first
->dest
.ssa
;
42 lower_non_uniform_tex_access(nir_builder
*b
, nir_tex_instr
*tex
)
44 if (!tex
->texture_non_uniform
&& !tex
->sampler_non_uniform
)
47 /* We can have at most one texture and one sampler handle */
48 nir_ssa_def
*handles
[2];
49 nir_deref_instr
*parent_derefs
[2];
50 int texture_deref_handle
= -1;
51 int sampler_deref_handle
= -1;
52 unsigned handle_count
= 0;
53 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
54 switch (tex
->src
[i
].src_type
) {
55 case nir_tex_src_texture_offset
:
56 case nir_tex_src_texture_handle
:
57 case nir_tex_src_texture_deref
:
58 if (!tex
->texture_non_uniform
)
62 case nir_tex_src_sampler_offset
:
63 case nir_tex_src_sampler_handle
:
64 case nir_tex_src_sampler_deref
:
65 if (!tex
->sampler_non_uniform
)
73 assert(handle_count
< 2);
74 assert(tex
->src
[i
].src
.is_ssa
);
75 nir_ssa_def
*handle
= tex
->src
[i
].src
.ssa
;
76 if (handle
->parent_instr
->type
== nir_instr_type_deref
) {
77 nir_deref_instr
*deref
= nir_instr_as_deref(handle
->parent_instr
);
78 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
79 if (deref
->deref_type
== nir_deref_type_var
)
82 assert(parent
->deref_type
== nir_deref_type_var
);
83 assert(deref
->deref_type
== nir_deref_type_array
);
85 /* If it's constant, it's automatically uniform; don't bother. */
86 if (nir_src_is_const(deref
->arr
.index
))
89 handle
= deref
->arr
.index
.ssa
;
91 parent_derefs
[handle_count
] = parent
;
92 if (tex
->src
[i
].src_type
== nir_tex_src_texture_deref
)
93 texture_deref_handle
= handle_count
;
95 sampler_deref_handle
= handle_count
;
97 assert(handle
->num_components
== 1);
99 handles
[handle_count
++] = handle
;
102 if (handle_count
== 0)
105 b
->cursor
= nir_instr_remove(&tex
->instr
);
109 nir_ssa_def
*all_equal_first
= nir_imm_true(b
);
110 nir_ssa_def
*first
[2];
111 for (unsigned i
= 0; i
< handle_count
; i
++) {
112 first
[i
] = read_first_invocation(b
, handles
[i
]);
113 nir_ssa_def
*equal_first
= nir_ieq(b
, first
[i
], handles
[i
]);
114 all_equal_first
= nir_iand(b
, all_equal_first
, equal_first
);
117 nir_push_if(b
, all_equal_first
);
119 /* Replicate the derefs. */
120 if (texture_deref_handle
>= 0) {
121 int src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_texture_deref
);
122 nir_deref_instr
*deref
= parent_derefs
[texture_deref_handle
];
123 deref
= nir_build_deref_array(b
, deref
, first
[texture_deref_handle
]);
124 tex
->src
[src_idx
].src
= nir_src_for_ssa(&deref
->dest
.ssa
);
127 if (sampler_deref_handle
>= 0) {
128 int src_idx
= nir_tex_instr_src_index(tex
, nir_tex_src_sampler_deref
);
129 nir_deref_instr
*deref
= parent_derefs
[sampler_deref_handle
];
130 deref
= nir_build_deref_array(b
, deref
, first
[sampler_deref_handle
]);
131 tex
->src
[src_idx
].src
= nir_src_for_ssa(&deref
->dest
.ssa
);
134 nir_builder_instr_insert(b
, &tex
->instr
);
135 nir_jump(b
, nir_jump_break
);
141 lower_non_uniform_access_intrin(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
144 if (!(nir_intrinsic_access(intrin
) & ACCESS_NON_UNIFORM
))
147 assert(intrin
->src
[handle_src
].is_ssa
);
148 nir_ssa_def
*handle
= intrin
->src
[handle_src
].ssa
;
149 nir_deref_instr
*parent_deref
= NULL
;
150 if (handle
->parent_instr
->type
== nir_instr_type_deref
) {
151 nir_deref_instr
*deref
= nir_instr_as_deref(handle
->parent_instr
);
152 parent_deref
= nir_deref_instr_parent(deref
);
153 if (deref
->deref_type
== nir_deref_type_var
)
156 assert(parent_deref
->deref_type
== nir_deref_type_var
);
157 assert(deref
->deref_type
== nir_deref_type_array
);
159 handle
= deref
->arr
.index
.ssa
;
162 /* If it's constant, it's automatically uniform; don't bother. */
163 if (handle
->parent_instr
->type
== nir_instr_type_load_const
)
166 b
->cursor
= nir_instr_remove(&intrin
->instr
);
170 assert(handle
->num_components
== 1);
172 nir_ssa_def
*first
= read_first_invocation(b
, handle
);
173 nir_push_if(b
, nir_ieq(b
, first
, handle
));
175 /* Replicate the deref. */
177 nir_deref_instr
*deref
= nir_build_deref_array(b
, parent_deref
, first
);
178 intrin
->src
[handle_src
] = nir_src_for_ssa(&deref
->dest
.ssa
);
181 nir_builder_instr_insert(b
, &intrin
->instr
);
182 nir_jump(b
, nir_jump_break
);
188 nir_lower_non_uniform_access_impl(nir_function_impl
*impl
,
189 enum nir_lower_non_uniform_access_type types
)
191 bool progress
= false;
194 nir_builder_init(&b
, impl
);
196 nir_foreach_block_safe(block
, impl
) {
197 nir_foreach_instr_safe(instr
, block
) {
198 switch (instr
->type
) {
199 case nir_instr_type_tex
: {
200 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
201 if ((types
& nir_lower_non_uniform_texture_access
) &&
202 lower_non_uniform_tex_access(&b
, tex
))
207 case nir_instr_type_intrinsic
: {
208 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
209 switch (intrin
->intrinsic
) {
210 case nir_intrinsic_load_ubo
:
211 if ((types
& nir_lower_non_uniform_ubo_access
) &&
212 lower_non_uniform_access_intrin(&b
, intrin
, 0))
216 case nir_intrinsic_load_ssbo
:
217 case nir_intrinsic_ssbo_atomic_add
:
218 case nir_intrinsic_ssbo_atomic_imin
:
219 case nir_intrinsic_ssbo_atomic_umin
:
220 case nir_intrinsic_ssbo_atomic_imax
:
221 case nir_intrinsic_ssbo_atomic_umax
:
222 case nir_intrinsic_ssbo_atomic_and
:
223 case nir_intrinsic_ssbo_atomic_or
:
224 case nir_intrinsic_ssbo_atomic_xor
:
225 case nir_intrinsic_ssbo_atomic_exchange
:
226 case nir_intrinsic_ssbo_atomic_comp_swap
:
227 case nir_intrinsic_ssbo_atomic_fadd
:
228 case nir_intrinsic_ssbo_atomic_fmin
:
229 case nir_intrinsic_ssbo_atomic_fmax
:
230 case nir_intrinsic_ssbo_atomic_fcomp_swap
:
231 if ((types
& nir_lower_non_uniform_ssbo_access
) &&
232 lower_non_uniform_access_intrin(&b
, intrin
, 0))
236 case nir_intrinsic_store_ssbo
:
237 /* SSBO Stores put the index in the second source */
238 if ((types
& nir_lower_non_uniform_ssbo_access
) &&
239 lower_non_uniform_access_intrin(&b
, intrin
, 1))
243 case nir_intrinsic_image_load
:
244 case nir_intrinsic_image_store
:
245 case nir_intrinsic_image_atomic_add
:
246 case nir_intrinsic_image_atomic_imin
:
247 case nir_intrinsic_image_atomic_umin
:
248 case nir_intrinsic_image_atomic_imax
:
249 case nir_intrinsic_image_atomic_umax
:
250 case nir_intrinsic_image_atomic_and
:
251 case nir_intrinsic_image_atomic_or
:
252 case nir_intrinsic_image_atomic_xor
:
253 case nir_intrinsic_image_atomic_exchange
:
254 case nir_intrinsic_image_atomic_comp_swap
:
255 case nir_intrinsic_image_atomic_fadd
:
256 case nir_intrinsic_image_size
:
257 case nir_intrinsic_image_samples
:
258 case nir_intrinsic_bindless_image_load
:
259 case nir_intrinsic_bindless_image_store
:
260 case nir_intrinsic_bindless_image_atomic_add
:
261 case nir_intrinsic_bindless_image_atomic_imin
:
262 case nir_intrinsic_bindless_image_atomic_umin
:
263 case nir_intrinsic_bindless_image_atomic_imax
:
264 case nir_intrinsic_bindless_image_atomic_umax
:
265 case nir_intrinsic_bindless_image_atomic_and
:
266 case nir_intrinsic_bindless_image_atomic_or
:
267 case nir_intrinsic_bindless_image_atomic_xor
:
268 case nir_intrinsic_bindless_image_atomic_exchange
:
269 case nir_intrinsic_bindless_image_atomic_comp_swap
:
270 case nir_intrinsic_bindless_image_atomic_fadd
:
271 case nir_intrinsic_bindless_image_size
:
272 case nir_intrinsic_bindless_image_samples
:
273 case nir_intrinsic_image_deref_load
:
274 case nir_intrinsic_image_deref_store
:
275 case nir_intrinsic_image_deref_atomic_add
:
276 case nir_intrinsic_image_deref_atomic_umin
:
277 case nir_intrinsic_image_deref_atomic_imin
:
278 case nir_intrinsic_image_deref_atomic_umax
:
279 case nir_intrinsic_image_deref_atomic_imax
:
280 case nir_intrinsic_image_deref_atomic_and
:
281 case nir_intrinsic_image_deref_atomic_or
:
282 case nir_intrinsic_image_deref_atomic_xor
:
283 case nir_intrinsic_image_deref_atomic_exchange
:
284 case nir_intrinsic_image_deref_atomic_comp_swap
:
285 case nir_intrinsic_image_deref_size
:
286 case nir_intrinsic_image_deref_samples
:
287 if ((types
& nir_lower_non_uniform_image_access
) &&
288 lower_non_uniform_access_intrin(&b
, intrin
, 0))
307 nir_metadata_preserve(impl
, nir_metadata_none
);
313 * Lowers non-uniform resource access by using a loop
315 * This pass lowers non-uniform resource access by using subgroup operations
316 * and a loop. Most hardware requires things like textures and UBO access
317 * operations to happen on a dynamically uniform (or at least subgroup
318 * uniform) resource. This pass allows for non-uniform access by placing the
319 * texture instruction in a loop that looks something like this:
322 * bool tex_eq_first = readFirstInvocationARB(texture) == texture;
323 * bool smp_eq_first = readFirstInvocationARB(sampler) == sampler;
324 * if (tex_eq_first && smp_eq_first) {
325 * res = texture(texture, sampler, ...);
330 * Fortunately, because the instruction is immediately followed by the only
331 * break in the loop, the block containing the instruction dominates the end
332 * of the loop. Therefore, it's safe to move the instruction into the loop
333 * without fixing up SSA in any way.
336 nir_lower_non_uniform_access(nir_shader
*shader
,
337 enum nir_lower_non_uniform_access_type types
)
339 bool progress
= false;
341 nir_foreach_function(function
, shader
) {
342 if (function
->impl
&&
343 nir_lower_non_uniform_access_impl(function
->impl
, types
))