2 * Copyright © 2018-2019 Igalia S.L.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "compiler/nir/nir_builder.h"
28 * This pass moves to NIR certain offset computations for different I/O
29 * ops that are currently implemented on the IR3 backend compiler, to
30 * give NIR a chance to optimize them:
32 * - Dword-offset for SSBO load, store and atomics: A new, similar intrinsic
33 * is emitted that replaces the original one, adding a new source that
34 * holds the result of the original byte-offset source divided by 4.
38 /* Returns the ir3-specific intrinsic opcode corresponding to an SSBO
39 * instruction that is handled by this pass. It also conveniently returns
40 * the offset source index in @offset_src_idx.
42 * If @intrinsic is not SSBO, or it is not handled by the pass, -1 is
46 get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic
,
47 uint8_t *offset_src_idx
)
49 debug_assert(offset_src_idx
);
54 case nir_intrinsic_store_ssbo
:
56 return nir_intrinsic_store_ssbo_ir3
;
57 case nir_intrinsic_load_ssbo
:
58 return nir_intrinsic_load_ssbo_ir3
;
59 case nir_intrinsic_ssbo_atomic_add
:
60 return nir_intrinsic_ssbo_atomic_add_ir3
;
61 case nir_intrinsic_ssbo_atomic_imin
:
62 return nir_intrinsic_ssbo_atomic_imin_ir3
;
63 case nir_intrinsic_ssbo_atomic_umin
:
64 return nir_intrinsic_ssbo_atomic_umin_ir3
;
65 case nir_intrinsic_ssbo_atomic_imax
:
66 return nir_intrinsic_ssbo_atomic_imax_ir3
;
67 case nir_intrinsic_ssbo_atomic_umax
:
68 return nir_intrinsic_ssbo_atomic_umax_ir3
;
69 case nir_intrinsic_ssbo_atomic_and
:
70 return nir_intrinsic_ssbo_atomic_and_ir3
;
71 case nir_intrinsic_ssbo_atomic_or
:
72 return nir_intrinsic_ssbo_atomic_or_ir3
;
73 case nir_intrinsic_ssbo_atomic_xor
:
74 return nir_intrinsic_ssbo_atomic_xor_ir3
;
75 case nir_intrinsic_ssbo_atomic_exchange
:
76 return nir_intrinsic_ssbo_atomic_exchange_ir3
;
77 case nir_intrinsic_ssbo_atomic_comp_swap
:
78 return nir_intrinsic_ssbo_atomic_comp_swap_ir3
;
87 check_and_propagate_bit_shift32(nir_builder
*b
, nir_alu_instr
*alu_instr
,
88 int32_t direction
, int32_t shift
)
90 debug_assert(alu_instr
->src
[1].src
.is_ssa
);
91 nir_ssa_def
*shift_ssa
= alu_instr
->src
[1].src
.ssa
;
93 /* Only propagate if the shift is a const value so we can check value range
96 nir_const_value
*const_val
= nir_src_as_const_value(alu_instr
->src
[1].src
);
100 int32_t current_shift
= const_val
[0].i32
* direction
;
101 int32_t new_shift
= current_shift
+ shift
;
103 /* If the merge would reverse the direction, bail out.
104 * e.g, 'x << 2' then 'x >> 4' is not 'x >> 2'.
106 if (current_shift
* new_shift
< 0)
109 /* If the propagation would overflow an int32_t, bail out too to be on the
112 if (new_shift
< -31 || new_shift
> 31)
115 /* Add or substract shift depending on the final direction (SHR vs. SHL). */
116 if (shift
* direction
< 0)
117 shift_ssa
= nir_isub(b
, shift_ssa
, nir_imm_int(b
, abs(shift
)));
119 shift_ssa
= nir_iadd(b
, shift_ssa
, nir_imm_int(b
, abs(shift
)));
125 ir3_nir_try_propagate_bit_shift(nir_builder
*b
, nir_ssa_def
*offset
, int32_t shift
)
127 nir_instr
*offset_instr
= offset
->parent_instr
;
128 if (offset_instr
->type
!= nir_instr_type_alu
)
131 nir_alu_instr
*alu
= nir_instr_as_alu(offset_instr
);
132 nir_ssa_def
*shift_ssa
;
133 nir_ssa_def
*new_offset
= NULL
;
135 b
->cursor
= nir_after_instr(&alu
->instr
);
137 /* the first src could be something like ssa_18.x, but we only want
138 * the single component. Otherwise the ishl/ishr/ushr could turn
139 * into a vec4 operation:
141 nir_ssa_def
*src0
= nir_mov_alu(b
, alu
->src
[0], 1);
145 shift_ssa
= check_and_propagate_bit_shift32(b
, alu
, 1, shift
);
147 new_offset
= nir_ishl(b
, src0
, shift_ssa
);
150 shift_ssa
= check_and_propagate_bit_shift32(b
, alu
, -1, shift
);
152 new_offset
= nir_ishr(b
, src0
, shift_ssa
);
155 shift_ssa
= check_and_propagate_bit_shift32(b
, alu
, -1, shift
);
157 new_offset
= nir_ushr(b
, src0
, shift_ssa
);
167 lower_offset_for_ssbo(nir_intrinsic_instr
*intrinsic
, nir_builder
*b
,
168 unsigned ir3_ssbo_opcode
, uint8_t offset_src_idx
)
170 unsigned num_srcs
= nir_intrinsic_infos
[intrinsic
->intrinsic
].num_srcs
;
172 bool has_dest
= nir_intrinsic_infos
[intrinsic
->intrinsic
].has_dest
;
173 nir_ssa_def
*new_dest
= NULL
;
175 /* Here we create a new intrinsic and copy over all contents from the old one. */
177 nir_intrinsic_instr
*new_intrinsic
;
180 /* 'offset_src_idx' holds the index of the source that represent the offset. */
182 nir_intrinsic_instr_create(b
->shader
, ir3_ssbo_opcode
);
184 debug_assert(intrinsic
->src
[offset_src_idx
].is_ssa
);
185 nir_ssa_def
*offset
= intrinsic
->src
[offset_src_idx
].ssa
;
187 /* Since we don't have value range checking, we first try to propagate
188 * the division by 4 ('offset >> 2') into another bit-shift instruction that
189 * possibly defines the offset. If that's the case, we emit a similar
190 * instructions adjusting (merging) the shift value.
192 * Here we use the convention that shifting right is negative while shifting
193 * left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
195 nir_ssa_def
*new_offset
= ir3_nir_try_propagate_bit_shift(b
, offset
, -2);
197 /* The new source that will hold the dword-offset is always the last
198 * one for every intrinsic.
200 target_src
= &new_intrinsic
->src
[num_srcs
];
201 *target_src
= nir_src_for_ssa(offset
);
204 debug_assert(intrinsic
->dest
.is_ssa
);
205 nir_ssa_def
*dest
= &intrinsic
->dest
.ssa
;
206 nir_ssa_dest_init(&new_intrinsic
->instr
, &new_intrinsic
->dest
,
207 dest
->num_components
, dest
->bit_size
, NULL
);
208 new_dest
= &new_intrinsic
->dest
.ssa
;
211 for (unsigned i
= 0; i
< num_srcs
; i
++)
212 new_intrinsic
->src
[i
] = nir_src_for_ssa(intrinsic
->src
[i
].ssa
);
214 for (unsigned i
= 0; i
< NIR_INTRINSIC_MAX_CONST_INDEX
; i
++)
215 new_intrinsic
->const_index
[i
] = intrinsic
->const_index
[i
];
217 new_intrinsic
->num_components
= intrinsic
->num_components
;
219 b
->cursor
= nir_before_instr(&intrinsic
->instr
);
221 /* If we managed to propagate the division by 4, just use the new offset
222 * register and don't emit the SHR.
227 offset
= nir_ushr(b
, offset
, nir_imm_int(b
, 2));
229 /* Insert the new intrinsic right before the old one. */
230 nir_builder_instr_insert(b
, &new_intrinsic
->instr
);
232 /* Replace the last source of the new intrinsic by the result of
233 * the offset divided by 4.
235 nir_instr_rewrite_src(&new_intrinsic
->instr
,
237 nir_src_for_ssa(offset
));
240 /* Replace the uses of the original destination by that
241 * of the new intrinsic.
243 nir_ssa_def_rewrite_uses(&intrinsic
->dest
.ssa
,
244 nir_src_for_ssa(new_dest
));
247 /* Finally remove the original intrinsic. */
248 nir_instr_remove(&intrinsic
->instr
);
254 lower_io_offsets_block(nir_block
*block
, nir_builder
*b
, void *mem_ctx
)
256 bool progress
= false;
258 nir_foreach_instr_safe(instr
, block
) {
259 if (instr
->type
!= nir_instr_type_intrinsic
)
262 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
266 uint8_t offset_src_idx
;
267 ir3_intrinsic
= get_ir3_intrinsic_for_ssbo_intrinsic(intr
->intrinsic
,
269 if (ir3_intrinsic
!= -1) {
270 progress
|= lower_offset_for_ssbo(intr
, b
, (unsigned) ir3_intrinsic
,
279 lower_io_offsets_func(nir_function_impl
*impl
)
281 void *mem_ctx
= ralloc_parent(impl
);
283 nir_builder_init(&b
, impl
);
285 bool progress
= false;
286 nir_foreach_block_safe(block
, impl
) {
287 progress
|= lower_io_offsets_block(block
, &b
, mem_ctx
);
291 nir_metadata_preserve(impl
, nir_metadata_block_index
|
292 nir_metadata_dominance
);
299 ir3_nir_lower_io_offsets(nir_shader
*shader
)
301 bool progress
= false;
303 nir_foreach_function(function
, shader
) {
305 progress
|= lower_io_offsets_func(function
->impl
);