2 * Copyright © 2019 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
28 read_first_invocation(nir_builder
*b
, nir_ssa_def
*x
)
30 nir_intrinsic_instr
*first
=
31 nir_intrinsic_instr_create(b
->shader
,
32 nir_intrinsic_read_first_invocation
);
33 first
->num_components
= x
->num_components
;
34 first
->src
[0] = nir_src_for_ssa(x
);
35 nir_ssa_dest_init(&first
->instr
, &first
->dest
,
36 x
->num_components
, x
->bit_size
, NULL
);
37 return &first
->dest
.ssa
;
41 lower_non_uniform_tex_access(nir_builder
*b
, nir_tex_instr
*tex
)
43 if (!tex
->texture_non_uniform
&& !tex
->sampler_non_uniform
)
46 /* We can have at most one texture and one sampler handle */
47 nir_ssa_def
*handles
[2];
48 unsigned handle_count
= 0;
49 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
50 switch (tex
->src
[i
].src_type
) {
51 case nir_tex_src_texture_offset
:
52 case nir_tex_src_texture_handle
:
53 if (!tex
->texture_non_uniform
)
57 case nir_tex_src_sampler_offset
:
58 case nir_tex_src_sampler_handle
:
59 if (!tex
->sampler_non_uniform
)
67 assert(tex
->src
[i
].src
.is_ssa
);
68 assert(tex
->src
[i
].src
.ssa
->num_components
== 1);
69 assert(handle_count
< 2);
70 handles
[handle_count
++] = tex
->src
[i
].src
.ssa
;
73 if (handle_count
== 0)
76 b
->cursor
= nir_instr_remove(&tex
->instr
);
80 nir_ssa_def
*all_equal_first
= nir_imm_true(b
);
81 for (unsigned i
= 0; i
< handle_count
; i
++) {
82 nir_ssa_def
*equal_first
=
83 nir_ieq(b
, read_first_invocation(b
, handles
[i
]), handles
[i
]);
84 all_equal_first
= nir_iand(b
, all_equal_first
, equal_first
);
87 nir_push_if(b
, all_equal_first
);
89 nir_builder_instr_insert(b
, &tex
->instr
);
90 nir_jump(b
, nir_jump_break
);
96 lower_non_uniform_access_intrin(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
99 if (!(nir_intrinsic_access(intrin
) & ACCESS_NON_UNIFORM
))
102 /* If it's constant, it's automatically uniform; don't bother. */
103 if (nir_src_is_const(intrin
->src
[handle_src
]))
106 b
->cursor
= nir_instr_remove(&intrin
->instr
);
110 assert(intrin
->src
[handle_src
].is_ssa
);
111 assert(intrin
->src
[handle_src
].ssa
->num_components
== 1);
112 nir_ssa_def
*handle
= intrin
->src
[handle_src
].ssa
;
114 nir_push_if(b
, nir_ieq(b
, read_first_invocation(b
, handle
), handle
));
116 nir_builder_instr_insert(b
, &intrin
->instr
);
117 nir_jump(b
, nir_jump_break
);
123 nir_lower_non_uniform_access_impl(nir_function_impl
*impl
,
124 enum nir_lower_non_uniform_access_type types
)
126 bool progress
= false;
129 nir_builder_init(&b
, impl
);
131 nir_foreach_block(block
, impl
) {
132 nir_foreach_instr(instr
, block
) {
133 switch (instr
->type
) {
134 case nir_instr_type_tex
: {
135 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
136 if ((types
& nir_lower_non_uniform_texture_access
) &&
137 lower_non_uniform_tex_access(&b
, tex
))
142 case nir_instr_type_intrinsic
: {
143 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
144 switch (intrin
->intrinsic
) {
145 case nir_intrinsic_load_ubo
:
146 if ((types
& nir_lower_non_uniform_ubo_access
) &&
147 lower_non_uniform_access_intrin(&b
, intrin
, 0))
151 case nir_intrinsic_load_ssbo
:
152 case nir_intrinsic_ssbo_atomic_add
:
153 case nir_intrinsic_ssbo_atomic_imin
:
154 case nir_intrinsic_ssbo_atomic_umin
:
155 case nir_intrinsic_ssbo_atomic_imax
:
156 case nir_intrinsic_ssbo_atomic_umax
:
157 case nir_intrinsic_ssbo_atomic_and
:
158 case nir_intrinsic_ssbo_atomic_or
:
159 case nir_intrinsic_ssbo_atomic_xor
:
160 case nir_intrinsic_ssbo_atomic_exchange
:
161 case nir_intrinsic_ssbo_atomic_comp_swap
:
162 case nir_intrinsic_ssbo_atomic_fadd
:
163 case nir_intrinsic_ssbo_atomic_fmin
:
164 case nir_intrinsic_ssbo_atomic_fmax
:
165 case nir_intrinsic_ssbo_atomic_fcomp_swap
:
166 if ((types
& nir_lower_non_uniform_ssbo_access
) &&
167 lower_non_uniform_access_intrin(&b
, intrin
, 0))
171 case nir_intrinsic_store_ssbo
:
172 /* SSBO Stores put the index in the second source */
173 if ((types
& nir_lower_non_uniform_ssbo_access
) &&
174 lower_non_uniform_access_intrin(&b
, intrin
, 1))
178 case nir_intrinsic_image_load
:
179 case nir_intrinsic_image_store
:
180 case nir_intrinsic_image_atomic_add
:
181 case nir_intrinsic_image_atomic_min
:
182 case nir_intrinsic_image_atomic_max
:
183 case nir_intrinsic_image_atomic_and
:
184 case nir_intrinsic_image_atomic_or
:
185 case nir_intrinsic_image_atomic_xor
:
186 case nir_intrinsic_image_atomic_exchange
:
187 case nir_intrinsic_image_atomic_comp_swap
:
188 case nir_intrinsic_image_atomic_fadd
:
189 case nir_intrinsic_image_size
:
190 case nir_intrinsic_image_samples
:
191 case nir_intrinsic_bindless_image_load
:
192 case nir_intrinsic_bindless_image_store
:
193 case nir_intrinsic_bindless_image_atomic_add
:
194 case nir_intrinsic_bindless_image_atomic_min
:
195 case nir_intrinsic_bindless_image_atomic_max
:
196 case nir_intrinsic_bindless_image_atomic_and
:
197 case nir_intrinsic_bindless_image_atomic_or
:
198 case nir_intrinsic_bindless_image_atomic_xor
:
199 case nir_intrinsic_bindless_image_atomic_exchange
:
200 case nir_intrinsic_bindless_image_atomic_comp_swap
:
201 case nir_intrinsic_bindless_image_atomic_fadd
:
202 case nir_intrinsic_bindless_image_size
:
203 case nir_intrinsic_bindless_image_samples
:
204 if ((types
& nir_lower_non_uniform_image_access
) &&
205 lower_non_uniform_access_intrin(&b
, intrin
, 0))
224 nir_metadata_preserve(impl
, nir_metadata_none
);
230 * Lowers non-uniform resource access by using a loop
232 * This pass lowers non-uniform resource access by using subgroup operations
233 * and a loop. Most hardware requires things like textures and UBO access
234 * operations to happen on a dynamically uniform (or at least subgroup
235 * uniform) resource. This pass allows for non-uniform access by placing the
236 * texture instruction in a loop that looks something like this:
239 * bool tex_eq_first = readFirstInvocationARB(texture) == texture;
240 * bool smp_eq_first = readFirstInvocationARB(sampler) == sampler;
241 * if (tex_eq_first && smp_eq_first) {
242 * res = texture(texture, sampler, ...);
247 * Fortunately, because the instruction is immediately followed by the only
248 * break in the loop, the block containing the instruction dominates the end
249 * of the loop. Therefore, it's safe to move the instruction into the loop
250 * without fixing up SSA in any way.
253 nir_lower_non_uniform_access(nir_shader
*shader
,
254 enum nir_lower_non_uniform_access_type types
)
256 bool progress
= false;
258 nir_foreach_function(function
, shader
) {
259 if (function
->impl
&&
260 nir_lower_non_uniform_access_impl(function
->impl
, types
))