2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
26 #include "nir_deref.h"
28 #include "util/u_dynarray.h"
31 * Elimination of dead writes based on derefs.
33 * Dead writes are stores and copies that write to a deref, which then gets
34 * another write before it was used (read or sourced for a copy). Those
35 * writes can be removed since they don't affect anything.
37 * For derefs that refer to a memory area that can be read after the program,
38 * the last write is considered used. The presence of certain instructions
39 * may also cause writes to be considered used, e.g. memory barrier (in this case
40 * the value must be written as other thread might use it).
42 * The write mask for store instructions is considered, so it is possible that
43 * a store is removed because of the combination of other stores overwritten
47 /* Entry for unused_writes arrays. */
49 /* If NULL indicates the entry is free to be reused. */
50 nir_intrinsic_instr
*intrin
;
51 nir_component_mask_t mask
;
56 clear_unused_for_modes(struct util_dynarray
*unused_writes
, nir_variable_mode modes
)
58 util_dynarray_foreach_reverse(unused_writes
, struct write_entry
, entry
) {
59 if (entry
->dst
->mode
& modes
)
60 *entry
= util_dynarray_pop(unused_writes
, struct write_entry
);
65 clear_unused_for_read(struct util_dynarray
*unused_writes
, nir_deref_instr
*src
)
67 util_dynarray_foreach_reverse(unused_writes
, struct write_entry
, entry
) {
68 if (nir_compare_derefs(src
, entry
->dst
) & nir_derefs_may_alias_bit
)
69 *entry
= util_dynarray_pop(unused_writes
, struct write_entry
);
74 update_unused_writes(struct util_dynarray
*unused_writes
,
75 nir_intrinsic_instr
*intrin
,
76 nir_deref_instr
*dst
, nir_component_mask_t mask
)
78 bool progress
= false;
80 /* This pass assumes that destination of copies and stores are derefs that
81 * end in a vector or scalar (it is OK to have wildcards or indirects for
84 assert(glsl_type_is_vector_or_scalar(dst
->type
));
86 /* Find writes that are unused and can be removed. */
87 util_dynarray_foreach_reverse(unused_writes
, struct write_entry
, entry
) {
88 nir_deref_compare_result comp
= nir_compare_derefs(dst
, entry
->dst
);
89 if (comp
& nir_derefs_a_contains_b_bit
) {
91 if (entry
->mask
== 0) {
92 nir_instr_remove(&entry
->intrin
->instr
);
93 *entry
= util_dynarray_pop(unused_writes
, struct write_entry
);
99 /* Add the new write to the unused array. */
100 struct write_entry new_entry
= {
106 util_dynarray_append(unused_writes
, struct write_entry
, new_entry
);
112 remove_dead_write_vars_local(void *mem_ctx
, nir_block
*block
)
114 bool progress
= false;
116 struct util_dynarray unused_writes
;
117 util_dynarray_init(&unused_writes
, mem_ctx
);
119 nir_foreach_instr_safe(instr
, block
) {
120 if (instr
->type
== nir_instr_type_call
) {
121 clear_unused_for_modes(&unused_writes
, nir_var_shader_out
|
122 nir_var_shader_temp
|
123 nir_var_function_temp
|
130 if (instr
->type
!= nir_instr_type_intrinsic
)
133 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
134 switch (intrin
->intrinsic
) {
135 case nir_intrinsic_control_barrier
:
136 case nir_intrinsic_group_memory_barrier
:
137 case nir_intrinsic_memory_barrier
: {
138 clear_unused_for_modes(&unused_writes
, nir_var_shader_out
|
145 case nir_intrinsic_memory_barrier_buffer
:
146 clear_unused_for_modes(&unused_writes
, nir_var_mem_ssbo
|
150 case nir_intrinsic_memory_barrier_shared
:
151 clear_unused_for_modes(&unused_writes
, nir_var_mem_shared
);
154 case nir_intrinsic_memory_barrier_tcs_patch
:
155 clear_unused_for_modes(&unused_writes
, nir_var_shader_out
);
158 case nir_intrinsic_scoped_barrier
: {
159 if (nir_intrinsic_memory_semantics(intrin
) & NIR_MEMORY_RELEASE
) {
160 clear_unused_for_modes(&unused_writes
,
161 nir_intrinsic_memory_modes(intrin
));
166 case nir_intrinsic_emit_vertex
:
167 case nir_intrinsic_emit_vertex_with_counter
: {
168 clear_unused_for_modes(&unused_writes
, nir_var_shader_out
);
172 case nir_intrinsic_load_deref
: {
173 nir_deref_instr
*src
= nir_src_as_deref(intrin
->src
[0]);
174 clear_unused_for_read(&unused_writes
, src
);
178 case nir_intrinsic_store_deref
: {
179 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
181 if (nir_intrinsic_access(intrin
) & ACCESS_VOLATILE
) {
182 /* Consider a volatile write to also be a sort of read. This
183 * prevents us from deleting a non-volatile write just before a
184 * volatile write thanks to a non-volatile write afterwards. It's
185 * quite the corner case, but this should be safer and more
186 * predictable for the programmer than allowing two non-volatile
187 * writes to be combined with a volatile write between them.
189 clear_unused_for_read(&unused_writes
, dst
);
193 nir_component_mask_t mask
= nir_intrinsic_write_mask(intrin
);
194 progress
|= update_unused_writes(&unused_writes
, intrin
, dst
, mask
);
198 case nir_intrinsic_copy_deref
: {
199 nir_deref_instr
*src
= nir_src_as_deref(intrin
->src
[1]);
200 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
202 if (nir_intrinsic_dst_access(intrin
) & ACCESS_VOLATILE
) {
203 clear_unused_for_read(&unused_writes
, src
);
204 clear_unused_for_read(&unused_writes
, dst
);
208 /* Self-copy is removed. */
209 if (nir_compare_derefs(src
, dst
) & nir_derefs_equal_bit
) {
210 nir_instr_remove(instr
);
215 clear_unused_for_read(&unused_writes
, src
);
216 nir_component_mask_t mask
= (1 << glsl_get_vector_elements(dst
->type
)) - 1;
217 progress
|= update_unused_writes(&unused_writes
, intrin
, dst
, mask
);
226 /* All unused writes at the end of the block are kept, since we can't be
227 * sure they'll be overwritten or not with local analysis only.
234 remove_dead_write_vars_impl(void *mem_ctx
, nir_function_impl
*impl
)
236 bool progress
= false;
238 nir_metadata_require(impl
, nir_metadata_block_index
);
240 nir_foreach_block(block
, impl
)
241 progress
|= remove_dead_write_vars_local(mem_ctx
, block
);
244 nir_metadata_preserve(impl
, nir_metadata_block_index
|
245 nir_metadata_dominance
);
247 nir_metadata_preserve(impl
, nir_metadata_all
);
254 nir_opt_dead_write_vars(nir_shader
*shader
)
256 void *mem_ctx
= ralloc_context(NULL
);
257 bool progress
= false;
259 nir_foreach_function(function
, shader
) {
262 progress
|= remove_dead_write_vars_impl(mem_ctx
, function
->impl
);
265 ralloc_free(mem_ctx
);