nir: rename nir_var_ssbo to nir_var_mem_ssbo
[mesa.git] / src / compiler / nir / nir_opt_copy_prop_vars.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 #include "util/bitscan.h"
29 #include "util/u_dynarray.h"
30
31 /**
32 * Variable-based copy propagation
33 *
34 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
35 * However, there are cases, especially when dealing with indirects, where SSA
36 * won't help you. This pass is for those times. Specifically, it handles
37 * the following things that the rest of NIR can't:
38 *
39 * 1) Copy-propagation on variables that have indirect access. This includes
40 * propagating from indirect stores into indirect loads.
41 *
42 * 2) Removal of redundant load_deref intrinsics. We can't trust regular CSE
43 * to do this because it isn't aware of variable writes that may alias the
44 * value and make the former load invalid.
45 *
46 * This pass uses an intermediate solution between being local / "per-block"
47 * and a complete data-flow analysis. It follows the control flow graph, and
48 * propagate the available copy information forward, invalidating data at each
49 * cf_node.
50 *
51 * Removal of dead writes to variables is handled by another pass.
52 */
53
54 struct vars_written {
55 nir_variable_mode modes;
56
57 /* Key is deref and value is the uintptr_t with the write mask. */
58 struct hash_table *derefs;
59 };
60
61 struct value {
62 bool is_ssa;
63 union {
64 nir_ssa_def *ssa[4];
65 nir_deref_instr *deref;
66 };
67 };
68
69 struct copy_entry {
70 struct value src;
71
72 nir_deref_instr *dst;
73 };
74
75 struct copy_prop_var_state {
76 nir_function_impl *impl;
77
78 void *mem_ctx;
79 void *lin_ctx;
80
81 /* Maps nodes to vars_written. Used to invalidate copy entries when
82 * visiting each node.
83 */
84 struct hash_table *vars_written_map;
85
86 bool progress;
87 };
88
89 static bool
90 value_equals_store_src(struct value *value, nir_intrinsic_instr *intrin)
91 {
92 assert(intrin->intrinsic == nir_intrinsic_store_deref);
93 uintptr_t write_mask = nir_intrinsic_write_mask(intrin);
94
95 for (unsigned i = 0; i < intrin->num_components; i++) {
96 if ((write_mask & (1 << i)) &&
97 value->ssa[i] != intrin->src[1].ssa)
98 return false;
99 }
100
101 return true;
102 }
103
104 static struct vars_written *
105 create_vars_written(struct copy_prop_var_state *state)
106 {
107 struct vars_written *written =
108 linear_zalloc_child(state->lin_ctx, sizeof(struct vars_written));
109 written->derefs = _mesa_pointer_hash_table_create(state->mem_ctx);
110 return written;
111 }
112
113 static void
114 gather_vars_written(struct copy_prop_var_state *state,
115 struct vars_written *written,
116 nir_cf_node *cf_node)
117 {
118 struct vars_written *new_written = NULL;
119
120 switch (cf_node->type) {
121 case nir_cf_node_function: {
122 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
123 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
124 gather_vars_written(state, NULL, cf_node);
125 break;
126 }
127
128 case nir_cf_node_block: {
129 if (!written)
130 break;
131
132 nir_block *block = nir_cf_node_as_block(cf_node);
133 nir_foreach_instr(instr, block) {
134 if (instr->type == nir_instr_type_call) {
135 written->modes |= nir_var_shader_out |
136 nir_var_shader_temp |
137 nir_var_function_temp |
138 nir_var_mem_ssbo |
139 nir_var_shared;
140 continue;
141 }
142
143 if (instr->type != nir_instr_type_intrinsic)
144 continue;
145
146 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
147 switch (intrin->intrinsic) {
148 case nir_intrinsic_barrier:
149 case nir_intrinsic_memory_barrier:
150 written->modes |= nir_var_shader_out |
151 nir_var_mem_ssbo |
152 nir_var_shared;
153 break;
154
155 case nir_intrinsic_emit_vertex:
156 case nir_intrinsic_emit_vertex_with_counter:
157 written->modes = nir_var_shader_out;
158 break;
159
160 case nir_intrinsic_deref_atomic_add:
161 case nir_intrinsic_deref_atomic_imin:
162 case nir_intrinsic_deref_atomic_umin:
163 case nir_intrinsic_deref_atomic_imax:
164 case nir_intrinsic_deref_atomic_umax:
165 case nir_intrinsic_deref_atomic_and:
166 case nir_intrinsic_deref_atomic_or:
167 case nir_intrinsic_deref_atomic_xor:
168 case nir_intrinsic_deref_atomic_exchange:
169 case nir_intrinsic_deref_atomic_comp_swap:
170 case nir_intrinsic_store_deref:
171 case nir_intrinsic_copy_deref: {
172 /* Destination in all of store_deref, copy_deref and the atomics is src[0]. */
173 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
174
175 uintptr_t mask = intrin->intrinsic == nir_intrinsic_store_deref ?
176 nir_intrinsic_write_mask(intrin) : (1 << glsl_get_vector_elements(dst->type)) - 1;
177
178 struct hash_entry *ht_entry = _mesa_hash_table_search(written->derefs, dst);
179 if (ht_entry)
180 ht_entry->data = (void *)(mask | (uintptr_t)ht_entry->data);
181 else
182 _mesa_hash_table_insert(written->derefs, dst, (void *)mask);
183
184 break;
185 }
186
187 default:
188 break;
189 }
190 }
191
192 break;
193 }
194
195 case nir_cf_node_if: {
196 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
197
198 new_written = create_vars_written(state);
199
200 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
201 gather_vars_written(state, new_written, cf_node);
202
203 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
204 gather_vars_written(state, new_written, cf_node);
205
206 break;
207 }
208
209 case nir_cf_node_loop: {
210 nir_loop *loop = nir_cf_node_as_loop(cf_node);
211
212 new_written = create_vars_written(state);
213
214 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
215 gather_vars_written(state, new_written, cf_node);
216
217 break;
218 }
219
220 default:
221 unreachable("Invalid CF node type");
222 }
223
224 if (new_written) {
225 /* Merge new information to the parent control flow node. */
226 if (written) {
227 written->modes |= new_written->modes;
228 hash_table_foreach(new_written->derefs, new_entry) {
229 struct hash_entry *old_entry =
230 _mesa_hash_table_search_pre_hashed(written->derefs, new_entry->hash,
231 new_entry->key);
232 if (old_entry) {
233 nir_component_mask_t merged = (uintptr_t) new_entry->data |
234 (uintptr_t) old_entry->data;
235 old_entry->data = (void *) ((uintptr_t) merged);
236 } else {
237 _mesa_hash_table_insert_pre_hashed(written->derefs, new_entry->hash,
238 new_entry->key, new_entry->data);
239 }
240 }
241 }
242 _mesa_hash_table_insert(state->vars_written_map, cf_node, new_written);
243 }
244 }
245
246 static struct copy_entry *
247 copy_entry_create(struct util_dynarray *copies,
248 nir_deref_instr *dst_deref)
249 {
250 struct copy_entry new_entry = {
251 .dst = dst_deref,
252 };
253 util_dynarray_append(copies, struct copy_entry, new_entry);
254 return util_dynarray_top_ptr(copies, struct copy_entry);
255 }
256
257 /* Remove copy entry by swapping it with the last element and reducing the
258 * size. If used inside an iteration on copies, it must be a reverse
259 * (backwards) iteration. It is safe to use in those cases because the swap
260 * will not affect the rest of the iteration.
261 */
262 static void
263 copy_entry_remove(struct util_dynarray *copies,
264 struct copy_entry *entry)
265 {
266 /* This also works when removing the last element since pop don't shrink
267 * the memory used by the array, so the swap is useless but not invalid.
268 */
269 *entry = util_dynarray_pop(copies, struct copy_entry);
270 }
271
272 static struct copy_entry *
273 lookup_entry_for_deref(struct util_dynarray *copies,
274 nir_deref_instr *deref,
275 nir_deref_compare_result allowed_comparisons)
276 {
277 util_dynarray_foreach(copies, struct copy_entry, iter) {
278 if (nir_compare_derefs(iter->dst, deref) & allowed_comparisons)
279 return iter;
280 }
281
282 return NULL;
283 }
284
285 static struct copy_entry *
286 lookup_entry_and_kill_aliases(struct util_dynarray *copies,
287 nir_deref_instr *deref,
288 unsigned write_mask)
289 {
290 /* TODO: Take into account the write_mask. */
291
292 nir_deref_instr *dst_match = NULL;
293 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
294 if (!iter->src.is_ssa) {
295 /* If this write aliases the source of some entry, get rid of it */
296 if (nir_compare_derefs(iter->src.deref, deref) & nir_derefs_may_alias_bit) {
297 copy_entry_remove(copies, iter);
298 continue;
299 }
300 }
301
302 nir_deref_compare_result comp = nir_compare_derefs(iter->dst, deref);
303
304 if (comp & nir_derefs_equal_bit) {
305 /* Removing entries invalidate previous iter pointers, so we'll
306 * collect the matching entry later. Just make sure it is unique.
307 */
308 assert(!dst_match);
309 dst_match = iter->dst;
310 } else if (comp & nir_derefs_may_alias_bit) {
311 copy_entry_remove(copies, iter);
312 }
313 }
314
315 struct copy_entry *entry = NULL;
316 if (dst_match) {
317 util_dynarray_foreach(copies, struct copy_entry, iter) {
318 if (iter->dst == dst_match) {
319 entry = iter;
320 break;
321 }
322 }
323 assert(entry);
324 }
325 return entry;
326 }
327
328 static void
329 kill_aliases(struct util_dynarray *copies,
330 nir_deref_instr *deref,
331 unsigned write_mask)
332 {
333 /* TODO: Take into account the write_mask. */
334
335 struct copy_entry *entry =
336 lookup_entry_and_kill_aliases(copies, deref, write_mask);
337 if (entry)
338 copy_entry_remove(copies, entry);
339 }
340
341 static struct copy_entry *
342 get_entry_and_kill_aliases(struct util_dynarray *copies,
343 nir_deref_instr *deref,
344 unsigned write_mask)
345 {
346 /* TODO: Take into account the write_mask. */
347
348 struct copy_entry *entry =
349 lookup_entry_and_kill_aliases(copies, deref, write_mask);
350
351 if (entry == NULL)
352 entry = copy_entry_create(copies, deref);
353
354 return entry;
355 }
356
357 static void
358 apply_barrier_for_modes(struct util_dynarray *copies,
359 nir_variable_mode modes)
360 {
361 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
362 if ((iter->dst->mode & modes) ||
363 (!iter->src.is_ssa && (iter->src.deref->mode & modes)))
364 copy_entry_remove(copies, iter);
365 }
366 }
367
368 static void
369 store_to_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
370 const struct value *value, unsigned write_mask)
371 {
372 if (value->is_ssa) {
373 /* Clear src if it was being used as non-SSA. */
374 if (!entry->src.is_ssa)
375 memset(entry->src.ssa, 0, sizeof(entry->src.ssa));
376 entry->src.is_ssa = true;
377 /* Only overwrite the written components */
378 for (unsigned i = 0; i < 4; i++) {
379 if (write_mask & (1 << i))
380 entry->src.ssa[i] = value->ssa[i];
381 }
382 } else {
383 /* Non-ssa stores always write everything */
384 entry->src.is_ssa = false;
385 entry->src.deref = value->deref;
386 }
387 }
388
389 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
390 * single SSA def. Because an entry could reference up to 4 different SSA
391 * defs, a vecN operation may be inserted to combine them into a single SSA
392 * def before handing it back to the caller. If the load instruction is no
393 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
394 * possible, in some cases, for the load to be used in the vecN operation in
395 * which case it isn't deleted.)
396 */
397 static bool
398 load_from_ssa_entry_value(struct copy_prop_var_state *state,
399 struct copy_entry *entry,
400 nir_builder *b, nir_intrinsic_instr *intrin,
401 struct value *value)
402 {
403 *value = entry->src;
404 assert(value->is_ssa);
405
406 const struct glsl_type *type = entry->dst->type;
407 unsigned num_components = glsl_get_vector_elements(type);
408
409 nir_component_mask_t available = 0;
410 bool all_same = true;
411 for (unsigned i = 0; i < num_components; i++) {
412 if (value->ssa[i])
413 available |= (1 << i);
414
415 if (value->ssa[i] != value->ssa[0])
416 all_same = false;
417 }
418
419 if (all_same) {
420 /* Our work here is done */
421 b->cursor = nir_instr_remove(&intrin->instr);
422 intrin->instr.block = NULL;
423 return true;
424 }
425
426 if (available != (1 << num_components) - 1 &&
427 intrin->intrinsic == nir_intrinsic_load_deref &&
428 (available & nir_ssa_def_components_read(&intrin->dest.ssa)) == 0) {
429 /* If none of the components read are available as SSA values, then we
430 * should just bail. Otherwise, we would end up replacing the uses of
431 * the load_deref a vecN() that just gathers up its components.
432 */
433 return false;
434 }
435
436 b->cursor = nir_after_instr(&intrin->instr);
437
438 nir_ssa_def *load_def =
439 intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
440
441 bool keep_intrin = false;
442 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
443 for (unsigned i = 0; i < num_components; i++) {
444 if (value->ssa[i]) {
445 comps[i] = nir_channel(b, value->ssa[i], i);
446 } else {
447 /* We don't have anything for this component in our
448 * list. Just re-use a channel from the load.
449 */
450 if (load_def == NULL)
451 load_def = nir_load_deref(b, entry->dst);
452
453 if (load_def->parent_instr == &intrin->instr)
454 keep_intrin = true;
455
456 comps[i] = nir_channel(b, load_def, i);
457 }
458 }
459
460 nir_ssa_def *vec = nir_vec(b, comps, num_components);
461 for (unsigned i = 0; i < num_components; i++)
462 value->ssa[i] = vec;
463
464 if (!keep_intrin) {
465 /* Removing this instruction should not touch the cursor because we
466 * created the cursor after the intrinsic and have added at least one
467 * instruction (the vec) since then.
468 */
469 assert(b->cursor.instr != &intrin->instr);
470 nir_instr_remove(&intrin->instr);
471 intrin->instr.block = NULL;
472 }
473
474 return true;
475 }
476
477 /**
478 * Specialize the wildcards in a deref chain
479 *
480 * This function returns a deref chain identical to \param deref except that
481 * some of its wildcards are replaced with indices from \param specific. The
482 * process is guided by \param guide which references the same type as \param
483 * specific but has the same wildcard array lengths as \param deref.
484 */
485 static nir_deref_instr *
486 specialize_wildcards(nir_builder *b,
487 nir_deref_path *deref,
488 nir_deref_path *guide,
489 nir_deref_path *specific)
490 {
491 nir_deref_instr **deref_p = &deref->path[1];
492 nir_deref_instr **guide_p = &guide->path[1];
493 nir_deref_instr **spec_p = &specific->path[1];
494 nir_deref_instr *ret_tail = deref->path[0];
495 for (; *deref_p; deref_p++) {
496 if ((*deref_p)->deref_type == nir_deref_type_array_wildcard) {
497 /* This is where things get tricky. We have to search through
498 * the entry deref to find its corresponding wildcard and fill
499 * this slot in with the value from the src.
500 */
501 while (*guide_p &&
502 (*guide_p)->deref_type != nir_deref_type_array_wildcard) {
503 guide_p++;
504 spec_p++;
505 }
506 assert(*guide_p && *spec_p);
507
508 ret_tail = nir_build_deref_follower(b, ret_tail, *spec_p);
509
510 guide_p++;
511 spec_p++;
512 } else {
513 ret_tail = nir_build_deref_follower(b, ret_tail, *deref_p);
514 }
515 }
516
517 return ret_tail;
518 }
519
520 /* Do a "load" from an deref-based entry return it in "value" as a value. The
521 * deref returned in "value" will always be a fresh copy so the caller can
522 * steal it and assign it to the instruction directly without copying it
523 * again.
524 */
525 static bool
526 load_from_deref_entry_value(struct copy_prop_var_state *state,
527 struct copy_entry *entry,
528 nir_builder *b, nir_intrinsic_instr *intrin,
529 nir_deref_instr *src, struct value *value)
530 {
531 *value = entry->src;
532
533 b->cursor = nir_instr_remove(&intrin->instr);
534
535 nir_deref_path entry_dst_path, src_path;
536 nir_deref_path_init(&entry_dst_path, entry->dst, state->mem_ctx);
537 nir_deref_path_init(&src_path, src, state->mem_ctx);
538
539 bool need_to_specialize_wildcards = false;
540 nir_deref_instr **entry_p = &entry_dst_path.path[1];
541 nir_deref_instr **src_p = &src_path.path[1];
542 while (*entry_p && *src_p) {
543 nir_deref_instr *entry_tail = *entry_p++;
544 nir_deref_instr *src_tail = *src_p++;
545
546 if (src_tail->deref_type == nir_deref_type_array &&
547 entry_tail->deref_type == nir_deref_type_array_wildcard)
548 need_to_specialize_wildcards = true;
549 }
550
551 /* If the entry deref is longer than the source deref then it refers to a
552 * smaller type and we can't source from it.
553 */
554 assert(*entry_p == NULL);
555
556 if (need_to_specialize_wildcards) {
557 /* The entry has some wildcards that are not in src. This means we need
558 * to construct a new deref based on the entry but using the wildcards
559 * from the source and guided by the entry dst. Oof.
560 */
561 nir_deref_path entry_src_path;
562 nir_deref_path_init(&entry_src_path, entry->src.deref, state->mem_ctx);
563 value->deref = specialize_wildcards(b, &entry_src_path,
564 &entry_dst_path, &src_path);
565 nir_deref_path_finish(&entry_src_path);
566 }
567
568 /* If our source deref is longer than the entry deref, that's ok because
569 * it just means the entry deref needs to be extended a bit.
570 */
571 while (*src_p) {
572 nir_deref_instr *src_tail = *src_p++;
573 value->deref = nir_build_deref_follower(b, value->deref, src_tail);
574 }
575
576 nir_deref_path_finish(&entry_dst_path);
577 nir_deref_path_finish(&src_path);
578
579 return true;
580 }
581
582 static bool
583 try_load_from_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
584 nir_builder *b, nir_intrinsic_instr *intrin,
585 nir_deref_instr *src, struct value *value)
586 {
587 if (entry == NULL)
588 return false;
589
590 if (entry->src.is_ssa) {
591 return load_from_ssa_entry_value(state, entry, b, intrin, value);
592 } else {
593 return load_from_deref_entry_value(state, entry, b, intrin, src, value);
594 }
595 }
596
597 static void
598 invalidate_copies_for_cf_node(struct copy_prop_var_state *state,
599 struct util_dynarray *copies,
600 nir_cf_node *cf_node)
601 {
602 struct hash_entry *ht_entry = _mesa_hash_table_search(state->vars_written_map, cf_node);
603 assert(ht_entry);
604
605 struct vars_written *written = ht_entry->data;
606 if (written->modes) {
607 util_dynarray_foreach_reverse(copies, struct copy_entry, entry) {
608 if (entry->dst->mode & written->modes)
609 copy_entry_remove(copies, entry);
610 }
611 }
612
613 hash_table_foreach (written->derefs, entry) {
614 nir_deref_instr *deref_written = (nir_deref_instr *)entry->key;
615 kill_aliases(copies, deref_written, (uintptr_t)entry->data);
616 }
617 }
618
619 static void
620 copy_prop_vars_block(struct copy_prop_var_state *state,
621 nir_builder *b, nir_block *block,
622 struct util_dynarray *copies)
623 {
624 nir_foreach_instr_safe(instr, block) {
625 if (instr->type == nir_instr_type_call) {
626 apply_barrier_for_modes(copies, nir_var_shader_out |
627 nir_var_shader_temp |
628 nir_var_function_temp |
629 nir_var_mem_ssbo |
630 nir_var_shared);
631 continue;
632 }
633
634 if (instr->type != nir_instr_type_intrinsic)
635 continue;
636
637 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
638 switch (intrin->intrinsic) {
639 case nir_intrinsic_barrier:
640 case nir_intrinsic_memory_barrier:
641 apply_barrier_for_modes(copies, nir_var_shader_out |
642 nir_var_mem_ssbo |
643 nir_var_shared);
644 break;
645
646 case nir_intrinsic_emit_vertex:
647 case nir_intrinsic_emit_vertex_with_counter:
648 apply_barrier_for_modes(copies, nir_var_shader_out);
649 break;
650
651 case nir_intrinsic_load_deref: {
652 nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
653
654 struct copy_entry *src_entry =
655 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
656 struct value value;
657 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
658 if (value.is_ssa) {
659 /* lookup_load has already ensured that we get a single SSA
660 * value that has all of the channels. We just have to do the
661 * rewrite operation.
662 */
663 if (intrin->instr.block) {
664 /* The lookup left our instruction in-place. This means it
665 * must have used it to vec up a bunch of different sources.
666 * We need to be careful when rewriting uses so we don't
667 * rewrite the vecN itself.
668 */
669 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
670 nir_src_for_ssa(value.ssa[0]),
671 value.ssa[0]->parent_instr);
672 } else {
673 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
674 nir_src_for_ssa(value.ssa[0]));
675 }
676 } else {
677 /* We're turning it into a load of a different variable */
678 intrin->src[0] = nir_src_for_ssa(&value.deref->dest.ssa);
679
680 /* Put it back in again. */
681 nir_builder_instr_insert(b, instr);
682
683 value.is_ssa = true;
684 for (unsigned i = 0; i < intrin->num_components; i++)
685 value.ssa[i] = &intrin->dest.ssa;
686 }
687 state->progress = true;
688 } else {
689 value.is_ssa = true;
690 for (unsigned i = 0; i < intrin->num_components; i++)
691 value.ssa[i] = &intrin->dest.ssa;
692 }
693
694 /* Now that we have a value, we're going to store it back so that we
695 * have the right value next time we come looking for it. In order
696 * to do this, we need an exact match, not just something that
697 * contains what we're looking for.
698 */
699 struct copy_entry *store_entry =
700 lookup_entry_for_deref(copies, src, nir_derefs_equal_bit);
701 if (!store_entry)
702 store_entry = copy_entry_create(copies, src);
703
704 /* Set up a store to this entry with the value of the load. This way
705 * we can potentially remove subsequent loads. However, we use a
706 * NULL instruction so we don't try and delete the load on a
707 * subsequent store.
708 */
709 store_to_entry(state, store_entry, &value,
710 ((1 << intrin->num_components) - 1));
711 break;
712 }
713
714 case nir_intrinsic_store_deref: {
715 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
716 struct copy_entry *entry =
717 lookup_entry_for_deref(copies, dst, nir_derefs_equal_bit);
718 if (entry && value_equals_store_src(&entry->src, intrin)) {
719 /* If we are storing the value from a load of the same var the
720 * store is redundant so remove it.
721 */
722 nir_instr_remove(instr);
723 } else {
724 struct value value = {
725 .is_ssa = true
726 };
727
728 for (unsigned i = 0; i < intrin->num_components; i++)
729 value.ssa[i] = intrin->src[1].ssa;
730
731 unsigned wrmask = nir_intrinsic_write_mask(intrin);
732 struct copy_entry *entry =
733 get_entry_and_kill_aliases(copies, dst, wrmask);
734 store_to_entry(state, entry, &value, wrmask);
735 }
736
737 break;
738 }
739
740 case nir_intrinsic_copy_deref: {
741 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
742 nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
743
744 if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
745 /* This is a no-op self-copy. Get rid of it */
746 nir_instr_remove(instr);
747 continue;
748 }
749
750 struct copy_entry *src_entry =
751 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
752 struct value value;
753 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
754 /* If load works, intrin (the copy_deref) is removed. */
755 if (value.is_ssa) {
756 nir_store_deref(b, dst, value.ssa[0], 0xf);
757 } else {
758 /* If this would be a no-op self-copy, don't bother. */
759 if (nir_compare_derefs(value.deref, dst) & nir_derefs_equal_bit)
760 continue;
761
762 /* Just turn it into a copy of a different deref */
763 intrin->src[1] = nir_src_for_ssa(&value.deref->dest.ssa);
764
765 /* Put it back in again. */
766 nir_builder_instr_insert(b, instr);
767 }
768
769 state->progress = true;
770 } else {
771 value = (struct value) {
772 .is_ssa = false,
773 { .deref = src },
774 };
775 }
776
777 struct copy_entry *dst_entry =
778 get_entry_and_kill_aliases(copies, dst, 0xf);
779 store_to_entry(state, dst_entry, &value, 0xf);
780 break;
781 }
782
783 case nir_intrinsic_deref_atomic_add:
784 case nir_intrinsic_deref_atomic_imin:
785 case nir_intrinsic_deref_atomic_umin:
786 case nir_intrinsic_deref_atomic_imax:
787 case nir_intrinsic_deref_atomic_umax:
788 case nir_intrinsic_deref_atomic_and:
789 case nir_intrinsic_deref_atomic_or:
790 case nir_intrinsic_deref_atomic_xor:
791 case nir_intrinsic_deref_atomic_exchange:
792 case nir_intrinsic_deref_atomic_comp_swap:
793 kill_aliases(copies, nir_src_as_deref(intrin->src[0]), 0xf);
794 break;
795
796 default:
797 break;
798 }
799 }
800 }
801
802 static void
803 copy_prop_vars_cf_node(struct copy_prop_var_state *state,
804 struct util_dynarray *copies,
805 nir_cf_node *cf_node)
806 {
807 switch (cf_node->type) {
808 case nir_cf_node_function: {
809 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
810
811 struct util_dynarray impl_copies;
812 util_dynarray_init(&impl_copies, state->mem_ctx);
813
814 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
815 copy_prop_vars_cf_node(state, &impl_copies, cf_node);
816
817 break;
818 }
819
820 case nir_cf_node_block: {
821 nir_block *block = nir_cf_node_as_block(cf_node);
822 nir_builder b;
823 nir_builder_init(&b, state->impl);
824 copy_prop_vars_block(state, &b, block, copies);
825 break;
826 }
827
828 case nir_cf_node_if: {
829 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
830
831 /* Clone the copies for each branch of the if statement. The idea is
832 * that they both see the same state of available copies, but do not
833 * interfere to each other.
834 */
835
836 struct util_dynarray then_copies;
837 util_dynarray_clone(&then_copies, state->mem_ctx, copies);
838
839 struct util_dynarray else_copies;
840 util_dynarray_clone(&else_copies, state->mem_ctx, copies);
841
842 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
843 copy_prop_vars_cf_node(state, &then_copies, cf_node);
844
845 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
846 copy_prop_vars_cf_node(state, &else_copies, cf_node);
847
848 /* Both branches copies can be ignored, since the effect of running both
849 * branches was captured in the first pass that collects vars_written.
850 */
851
852 invalidate_copies_for_cf_node(state, copies, cf_node);
853
854 break;
855 }
856
857 case nir_cf_node_loop: {
858 nir_loop *loop = nir_cf_node_as_loop(cf_node);
859
860 /* Invalidate before cloning the copies for the loop, since the loop
861 * body can be executed more than once.
862 */
863
864 invalidate_copies_for_cf_node(state, copies, cf_node);
865
866 struct util_dynarray loop_copies;
867 util_dynarray_clone(&loop_copies, state->mem_ctx, copies);
868
869 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
870 copy_prop_vars_cf_node(state, &loop_copies, cf_node);
871
872 break;
873 }
874
875 default:
876 unreachable("Invalid CF node type");
877 }
878 }
879
880 static bool
881 nir_copy_prop_vars_impl(nir_function_impl *impl)
882 {
883 void *mem_ctx = ralloc_context(NULL);
884
885 struct copy_prop_var_state state = {
886 .impl = impl,
887 .mem_ctx = mem_ctx,
888 .lin_ctx = linear_zalloc_parent(mem_ctx, 0),
889
890 .vars_written_map = _mesa_pointer_hash_table_create(mem_ctx),
891 };
892
893 gather_vars_written(&state, NULL, &impl->cf_node);
894
895 copy_prop_vars_cf_node(&state, NULL, &impl->cf_node);
896
897 if (state.progress) {
898 nir_metadata_preserve(impl, nir_metadata_block_index |
899 nir_metadata_dominance);
900 } else {
901 #ifndef NDEBUG
902 impl->valid_metadata &= ~nir_metadata_not_properly_reset;
903 #endif
904 }
905
906 ralloc_free(mem_ctx);
907 return state.progress;
908 }
909
910 bool
911 nir_opt_copy_prop_vars(nir_shader *shader)
912 {
913 bool progress = false;
914
915 nir_foreach_function(function, shader) {
916 if (!function->impl)
917 continue;
918 progress |= nir_copy_prop_vars_impl(function->impl);
919 }
920
921 return progress;
922 }