nir: Report progress properly in nir_lower_bool_to_*
[mesa.git] / src / compiler / nir / nir_opt_dead_write_vars.c
1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 #include "util/u_dynarray.h"
29
30 /**
31 * Elimination of dead writes based on derefs.
32 *
33 * Dead writes are stores and copies that write to a deref, which then gets
34 * another write before it was used (read or sourced for a copy). Those
35 * writes can be removed since they don't affect anything.
36 *
37 * For derefs that refer to a memory area that can be read after the program,
38 * the last write is considered used. The presence of certain instructions
39 * may also cause writes to be considered used, e.g. memory barrier (in this case
40 * the value must be written as other thread might use it).
41 *
42 * The write mask for store instructions is considered, so it is possible that
43 * a store is removed because of the combination of other stores overwritten
44 * its value.
45 */
46
47 /* Entry for unused_writes arrays. */
48 struct write_entry {
49 /* If NULL indicates the entry is free to be reused. */
50 nir_intrinsic_instr *intrin;
51 nir_component_mask_t mask;
52 nir_deref_instr *dst;
53 };
54
55 static void
56 clear_unused_for_modes(struct util_dynarray *unused_writes, nir_variable_mode modes)
57 {
58 util_dynarray_foreach_reverse(unused_writes, struct write_entry, entry) {
59 if (entry->dst->mode & modes)
60 *entry = util_dynarray_pop(unused_writes, struct write_entry);
61 }
62 }
63
64 static void
65 clear_unused_for_read(struct util_dynarray *unused_writes, nir_deref_instr *src)
66 {
67 util_dynarray_foreach_reverse(unused_writes, struct write_entry, entry) {
68 if (nir_compare_derefs(src, entry->dst) & nir_derefs_may_alias_bit)
69 *entry = util_dynarray_pop(unused_writes, struct write_entry);
70 }
71 }
72
73 static bool
74 update_unused_writes(struct util_dynarray *unused_writes,
75 nir_intrinsic_instr *intrin,
76 nir_deref_instr *dst, nir_component_mask_t mask)
77 {
78 bool progress = false;
79
80 /* This pass assumes that destination of copies and stores are derefs that
81 * end in a vector or scalar (it is OK to have wildcards or indirects for
82 * arrays).
83 */
84 assert(glsl_type_is_vector_or_scalar(dst->type));
85
86 /* Find writes that are unused and can be removed. */
87 util_dynarray_foreach_reverse(unused_writes, struct write_entry, entry) {
88 nir_deref_compare_result comp = nir_compare_derefs(dst, entry->dst);
89 if (comp & nir_derefs_a_contains_b_bit) {
90 entry->mask &= ~mask;
91 if (entry->mask == 0) {
92 nir_instr_remove(&entry->intrin->instr);
93 *entry = util_dynarray_pop(unused_writes, struct write_entry);
94 progress = true;
95 }
96 }
97 }
98
99 /* Add the new write to the unused array. */
100 struct write_entry new_entry = {
101 .intrin = intrin,
102 .mask = mask,
103 .dst = dst,
104 };
105
106 util_dynarray_append(unused_writes, struct write_entry, new_entry);
107
108 return progress;
109 }
110
111 static bool
112 remove_dead_write_vars_local(void *mem_ctx, nir_block *block)
113 {
114 bool progress = false;
115
116 struct util_dynarray unused_writes;
117 util_dynarray_init(&unused_writes, mem_ctx);
118
119 nir_foreach_instr_safe(instr, block) {
120 if (instr->type == nir_instr_type_call) {
121 clear_unused_for_modes(&unused_writes, nir_var_shader_out |
122 nir_var_shader_temp |
123 nir_var_function_temp |
124 nir_var_mem_ssbo |
125 nir_var_mem_shared |
126 nir_var_mem_global);
127 continue;
128 }
129
130 if (instr->type != nir_instr_type_intrinsic)
131 continue;
132
133 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
134 switch (intrin->intrinsic) {
135 case nir_intrinsic_control_barrier:
136 case nir_intrinsic_group_memory_barrier:
137 case nir_intrinsic_memory_barrier: {
138 clear_unused_for_modes(&unused_writes, nir_var_shader_out |
139 nir_var_mem_ssbo |
140 nir_var_mem_shared |
141 nir_var_mem_global);
142 break;
143 }
144
145 case nir_intrinsic_memory_barrier_buffer:
146 clear_unused_for_modes(&unused_writes, nir_var_mem_ssbo |
147 nir_var_mem_global);
148 break;
149
150 case nir_intrinsic_memory_barrier_shared:
151 clear_unused_for_modes(&unused_writes, nir_var_mem_shared);
152 break;
153
154 case nir_intrinsic_memory_barrier_tcs_patch:
155 clear_unused_for_modes(&unused_writes, nir_var_shader_out);
156 break;
157
158 case nir_intrinsic_scoped_barrier: {
159 if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE) {
160 clear_unused_for_modes(&unused_writes,
161 nir_intrinsic_memory_modes(intrin));
162 }
163 break;
164 }
165
166 case nir_intrinsic_emit_vertex:
167 case nir_intrinsic_emit_vertex_with_counter: {
168 clear_unused_for_modes(&unused_writes, nir_var_shader_out);
169 break;
170 }
171
172 case nir_intrinsic_load_deref: {
173 nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
174 clear_unused_for_read(&unused_writes, src);
175 break;
176 }
177
178 case nir_intrinsic_store_deref: {
179 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
180
181 if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE) {
182 /* Consider a volatile write to also be a sort of read. This
183 * prevents us from deleting a non-volatile write just before a
184 * volatile write thanks to a non-volatile write afterwards. It's
185 * quite the corner case, but this should be safer and more
186 * predictable for the programmer than allowing two non-volatile
187 * writes to be combined with a volatile write between them.
188 */
189 clear_unused_for_read(&unused_writes, dst);
190 break;
191 }
192
193 nir_component_mask_t mask = nir_intrinsic_write_mask(intrin);
194 progress |= update_unused_writes(&unused_writes, intrin, dst, mask);
195 break;
196 }
197
198 case nir_intrinsic_copy_deref: {
199 nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
200 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
201
202 if (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE) {
203 clear_unused_for_read(&unused_writes, src);
204 clear_unused_for_read(&unused_writes, dst);
205 break;
206 }
207
208 /* Self-copy is removed. */
209 if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
210 nir_instr_remove(instr);
211 progress = true;
212 break;
213 }
214
215 clear_unused_for_read(&unused_writes, src);
216 nir_component_mask_t mask = (1 << glsl_get_vector_elements(dst->type)) - 1;
217 progress |= update_unused_writes(&unused_writes, intrin, dst, mask);
218 break;
219 }
220
221 default:
222 break;
223 }
224 }
225
226 /* All unused writes at the end of the block are kept, since we can't be
227 * sure they'll be overwritten or not with local analysis only.
228 */
229
230 return progress;
231 }
232
233 static bool
234 remove_dead_write_vars_impl(void *mem_ctx, nir_function_impl *impl)
235 {
236 bool progress = false;
237
238 nir_metadata_require(impl, nir_metadata_block_index);
239
240 nir_foreach_block(block, impl)
241 progress |= remove_dead_write_vars_local(mem_ctx, block);
242
243 if (progress) {
244 nir_metadata_preserve(impl, nir_metadata_block_index |
245 nir_metadata_dominance);
246 } else {
247 nir_metadata_preserve(impl, nir_metadata_all);
248 }
249
250 return progress;
251 }
252
253 bool
254 nir_opt_dead_write_vars(nir_shader *shader)
255 {
256 void *mem_ctx = ralloc_context(NULL);
257 bool progress = false;
258
259 nir_foreach_function(function, shader) {
260 if (!function->impl)
261 continue;
262 progress |= remove_dead_write_vars_impl(mem_ctx, function->impl);
263 }
264
265 ralloc_free(mem_ctx);
266 return progress;
267 }