2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
26 #include "nir_deref.h"
28 #include "util/bitscan.h"
29 #include "util/u_dynarray.h"
32 * Variable-based copy propagation
34 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
35 * However, there are cases, especially when dealing with indirects, where SSA
36 * won't help you. This pass is for those times. Specifically, it handles
37 * the following things that the rest of NIR can't:
39 * 1) Copy-propagation on variables that have indirect access. This includes
40 * propagating from indirect stores into indirect loads.
42 * 2) Removal of redundant load_deref intrinsics. We can't trust regular CSE
43 * to do this because it isn't aware of variable writes that may alias the
44 * value and make the former load invalid.
46 * This pass uses an intermediate solution between being local / "per-block"
47 * and a complete data-flow analysis. It follows the control flow graph, and
48 * propagate the available copy information forward, invalidating data at each
51 * Removal of dead writes to variables is handled by another pass.
55 nir_variable_mode modes
;
57 /* Key is deref and value is the uintptr_t with the write mask. */
58 struct hash_table
*derefs
;
65 nir_deref_instr
*deref
;
75 struct copy_prop_var_state
{
76 nir_function_impl
*impl
;
81 /* Maps nodes to vars_written. Used to invalidate copy entries when
84 struct hash_table
*vars_written_map
;
90 value_equals_store_src(struct value
*value
, nir_intrinsic_instr
*intrin
)
92 assert(intrin
->intrinsic
== nir_intrinsic_store_deref
);
93 uintptr_t write_mask
= nir_intrinsic_write_mask(intrin
);
95 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
96 if ((write_mask
& (1 << i
)) &&
97 value
->ssa
[i
] != intrin
->src
[1].ssa
)
104 static struct vars_written
*
105 create_vars_written(struct copy_prop_var_state
*state
)
107 struct vars_written
*written
=
108 linear_zalloc_child(state
->lin_ctx
, sizeof(struct vars_written
));
109 written
->derefs
= _mesa_pointer_hash_table_create(state
->mem_ctx
);
114 gather_vars_written(struct copy_prop_var_state
*state
,
115 struct vars_written
*written
,
116 nir_cf_node
*cf_node
)
118 struct vars_written
*new_written
= NULL
;
120 switch (cf_node
->type
) {
121 case nir_cf_node_function
: {
122 nir_function_impl
*impl
= nir_cf_node_as_function(cf_node
);
123 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &impl
->body
)
124 gather_vars_written(state
, NULL
, cf_node
);
128 case nir_cf_node_block
: {
132 nir_block
*block
= nir_cf_node_as_block(cf_node
);
133 nir_foreach_instr(instr
, block
) {
134 if (instr
->type
== nir_instr_type_call
) {
135 written
->modes
|= nir_var_shader_out
|
136 nir_var_shader_temp
|
137 nir_var_function_temp
|
143 if (instr
->type
!= nir_instr_type_intrinsic
)
146 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
147 switch (intrin
->intrinsic
) {
148 case nir_intrinsic_barrier
:
149 case nir_intrinsic_memory_barrier
:
150 written
->modes
|= nir_var_shader_out
|
155 case nir_intrinsic_emit_vertex
:
156 case nir_intrinsic_emit_vertex_with_counter
:
157 written
->modes
= nir_var_shader_out
;
160 case nir_intrinsic_deref_atomic_add
:
161 case nir_intrinsic_deref_atomic_imin
:
162 case nir_intrinsic_deref_atomic_umin
:
163 case nir_intrinsic_deref_atomic_imax
:
164 case nir_intrinsic_deref_atomic_umax
:
165 case nir_intrinsic_deref_atomic_and
:
166 case nir_intrinsic_deref_atomic_or
:
167 case nir_intrinsic_deref_atomic_xor
:
168 case nir_intrinsic_deref_atomic_exchange
:
169 case nir_intrinsic_deref_atomic_comp_swap
:
170 case nir_intrinsic_store_deref
:
171 case nir_intrinsic_copy_deref
: {
172 /* Destination in all of store_deref, copy_deref and the atomics is src[0]. */
173 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
175 uintptr_t mask
= intrin
->intrinsic
== nir_intrinsic_store_deref
?
176 nir_intrinsic_write_mask(intrin
) : (1 << glsl_get_vector_elements(dst
->type
)) - 1;
178 struct hash_entry
*ht_entry
= _mesa_hash_table_search(written
->derefs
, dst
);
180 ht_entry
->data
= (void *)(mask
| (uintptr_t)ht_entry
->data
);
182 _mesa_hash_table_insert(written
->derefs
, dst
, (void *)mask
);
195 case nir_cf_node_if
: {
196 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
198 new_written
= create_vars_written(state
);
200 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &if_stmt
->then_list
)
201 gather_vars_written(state
, new_written
, cf_node
);
203 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &if_stmt
->else_list
)
204 gather_vars_written(state
, new_written
, cf_node
);
209 case nir_cf_node_loop
: {
210 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
212 new_written
= create_vars_written(state
);
214 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &loop
->body
)
215 gather_vars_written(state
, new_written
, cf_node
);
221 unreachable("Invalid CF node type");
225 /* Merge new information to the parent control flow node. */
227 written
->modes
|= new_written
->modes
;
228 hash_table_foreach(new_written
->derefs
, new_entry
) {
229 struct hash_entry
*old_entry
=
230 _mesa_hash_table_search_pre_hashed(written
->derefs
, new_entry
->hash
,
233 nir_component_mask_t merged
= (uintptr_t) new_entry
->data
|
234 (uintptr_t) old_entry
->data
;
235 old_entry
->data
= (void *) ((uintptr_t) merged
);
237 _mesa_hash_table_insert_pre_hashed(written
->derefs
, new_entry
->hash
,
238 new_entry
->key
, new_entry
->data
);
242 _mesa_hash_table_insert(state
->vars_written_map
, cf_node
, new_written
);
246 static struct copy_entry
*
247 copy_entry_create(struct util_dynarray
*copies
,
248 nir_deref_instr
*dst_deref
)
250 struct copy_entry new_entry
= {
253 util_dynarray_append(copies
, struct copy_entry
, new_entry
);
254 return util_dynarray_top_ptr(copies
, struct copy_entry
);
257 /* Remove copy entry by swapping it with the last element and reducing the
258 * size. If used inside an iteration on copies, it must be a reverse
259 * (backwards) iteration. It is safe to use in those cases because the swap
260 * will not affect the rest of the iteration.
263 copy_entry_remove(struct util_dynarray
*copies
,
264 struct copy_entry
*entry
)
266 /* This also works when removing the last element since pop don't shrink
267 * the memory used by the array, so the swap is useless but not invalid.
269 *entry
= util_dynarray_pop(copies
, struct copy_entry
);
272 static struct copy_entry
*
273 lookup_entry_for_deref(struct util_dynarray
*copies
,
274 nir_deref_instr
*deref
,
275 nir_deref_compare_result allowed_comparisons
)
277 util_dynarray_foreach(copies
, struct copy_entry
, iter
) {
278 if (nir_compare_derefs(iter
->dst
, deref
) & allowed_comparisons
)
285 static struct copy_entry
*
286 lookup_entry_and_kill_aliases(struct util_dynarray
*copies
,
287 nir_deref_instr
*deref
,
290 /* TODO: Take into account the write_mask. */
292 nir_deref_instr
*dst_match
= NULL
;
293 util_dynarray_foreach_reverse(copies
, struct copy_entry
, iter
) {
294 if (!iter
->src
.is_ssa
) {
295 /* If this write aliases the source of some entry, get rid of it */
296 if (nir_compare_derefs(iter
->src
.deref
, deref
) & nir_derefs_may_alias_bit
) {
297 copy_entry_remove(copies
, iter
);
302 nir_deref_compare_result comp
= nir_compare_derefs(iter
->dst
, deref
);
304 if (comp
& nir_derefs_equal_bit
) {
305 /* Removing entries invalidate previous iter pointers, so we'll
306 * collect the matching entry later. Just make sure it is unique.
309 dst_match
= iter
->dst
;
310 } else if (comp
& nir_derefs_may_alias_bit
) {
311 copy_entry_remove(copies
, iter
);
315 struct copy_entry
*entry
= NULL
;
317 util_dynarray_foreach(copies
, struct copy_entry
, iter
) {
318 if (iter
->dst
== dst_match
) {
329 kill_aliases(struct util_dynarray
*copies
,
330 nir_deref_instr
*deref
,
333 /* TODO: Take into account the write_mask. */
335 struct copy_entry
*entry
=
336 lookup_entry_and_kill_aliases(copies
, deref
, write_mask
);
338 copy_entry_remove(copies
, entry
);
341 static struct copy_entry
*
342 get_entry_and_kill_aliases(struct util_dynarray
*copies
,
343 nir_deref_instr
*deref
,
346 /* TODO: Take into account the write_mask. */
348 struct copy_entry
*entry
=
349 lookup_entry_and_kill_aliases(copies
, deref
, write_mask
);
352 entry
= copy_entry_create(copies
, deref
);
358 apply_barrier_for_modes(struct util_dynarray
*copies
,
359 nir_variable_mode modes
)
361 util_dynarray_foreach_reverse(copies
, struct copy_entry
, iter
) {
362 if ((iter
->dst
->mode
& modes
) ||
363 (!iter
->src
.is_ssa
&& (iter
->src
.deref
->mode
& modes
)))
364 copy_entry_remove(copies
, iter
);
369 store_to_entry(struct copy_prop_var_state
*state
, struct copy_entry
*entry
,
370 const struct value
*value
, unsigned write_mask
)
373 /* Clear src if it was being used as non-SSA. */
374 if (!entry
->src
.is_ssa
)
375 memset(entry
->src
.ssa
, 0, sizeof(entry
->src
.ssa
));
376 entry
->src
.is_ssa
= true;
377 /* Only overwrite the written components */
378 for (unsigned i
= 0; i
< 4; i
++) {
379 if (write_mask
& (1 << i
))
380 entry
->src
.ssa
[i
] = value
->ssa
[i
];
383 /* Non-ssa stores always write everything */
384 entry
->src
.is_ssa
= false;
385 entry
->src
.deref
= value
->deref
;
389 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
390 * single SSA def. Because an entry could reference up to 4 different SSA
391 * defs, a vecN operation may be inserted to combine them into a single SSA
392 * def before handing it back to the caller. If the load instruction is no
393 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
394 * possible, in some cases, for the load to be used in the vecN operation in
395 * which case it isn't deleted.)
398 load_from_ssa_entry_value(struct copy_prop_var_state
*state
,
399 struct copy_entry
*entry
,
400 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
404 assert(value
->is_ssa
);
406 const struct glsl_type
*type
= entry
->dst
->type
;
407 unsigned num_components
= glsl_get_vector_elements(type
);
409 nir_component_mask_t available
= 0;
410 bool all_same
= true;
411 for (unsigned i
= 0; i
< num_components
; i
++) {
413 available
|= (1 << i
);
415 if (value
->ssa
[i
] != value
->ssa
[0])
420 /* Our work here is done */
421 b
->cursor
= nir_instr_remove(&intrin
->instr
);
422 intrin
->instr
.block
= NULL
;
426 if (available
!= (1 << num_components
) - 1 &&
427 intrin
->intrinsic
== nir_intrinsic_load_deref
&&
428 (available
& nir_ssa_def_components_read(&intrin
->dest
.ssa
)) == 0) {
429 /* If none of the components read are available as SSA values, then we
430 * should just bail. Otherwise, we would end up replacing the uses of
431 * the load_deref a vecN() that just gathers up its components.
436 b
->cursor
= nir_after_instr(&intrin
->instr
);
438 nir_ssa_def
*load_def
=
439 intrin
->intrinsic
== nir_intrinsic_load_deref
? &intrin
->dest
.ssa
: NULL
;
441 bool keep_intrin
= false;
442 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
443 for (unsigned i
= 0; i
< num_components
; i
++) {
445 comps
[i
] = nir_channel(b
, value
->ssa
[i
], i
);
447 /* We don't have anything for this component in our
448 * list. Just re-use a channel from the load.
450 if (load_def
== NULL
)
451 load_def
= nir_load_deref(b
, entry
->dst
);
453 if (load_def
->parent_instr
== &intrin
->instr
)
456 comps
[i
] = nir_channel(b
, load_def
, i
);
460 nir_ssa_def
*vec
= nir_vec(b
, comps
, num_components
);
461 for (unsigned i
= 0; i
< num_components
; i
++)
465 /* Removing this instruction should not touch the cursor because we
466 * created the cursor after the intrinsic and have added at least one
467 * instruction (the vec) since then.
469 assert(b
->cursor
.instr
!= &intrin
->instr
);
470 nir_instr_remove(&intrin
->instr
);
471 intrin
->instr
.block
= NULL
;
478 * Specialize the wildcards in a deref chain
480 * This function returns a deref chain identical to \param deref except that
481 * some of its wildcards are replaced with indices from \param specific. The
482 * process is guided by \param guide which references the same type as \param
483 * specific but has the same wildcard array lengths as \param deref.
485 static nir_deref_instr
*
486 specialize_wildcards(nir_builder
*b
,
487 nir_deref_path
*deref
,
488 nir_deref_path
*guide
,
489 nir_deref_path
*specific
)
491 nir_deref_instr
**deref_p
= &deref
->path
[1];
492 nir_deref_instr
**guide_p
= &guide
->path
[1];
493 nir_deref_instr
**spec_p
= &specific
->path
[1];
494 nir_deref_instr
*ret_tail
= deref
->path
[0];
495 for (; *deref_p
; deref_p
++) {
496 if ((*deref_p
)->deref_type
== nir_deref_type_array_wildcard
) {
497 /* This is where things get tricky. We have to search through
498 * the entry deref to find its corresponding wildcard and fill
499 * this slot in with the value from the src.
502 (*guide_p
)->deref_type
!= nir_deref_type_array_wildcard
) {
506 assert(*guide_p
&& *spec_p
);
508 ret_tail
= nir_build_deref_follower(b
, ret_tail
, *spec_p
);
513 ret_tail
= nir_build_deref_follower(b
, ret_tail
, *deref_p
);
520 /* Do a "load" from an deref-based entry return it in "value" as a value. The
521 * deref returned in "value" will always be a fresh copy so the caller can
522 * steal it and assign it to the instruction directly without copying it
526 load_from_deref_entry_value(struct copy_prop_var_state
*state
,
527 struct copy_entry
*entry
,
528 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
529 nir_deref_instr
*src
, struct value
*value
)
533 b
->cursor
= nir_instr_remove(&intrin
->instr
);
535 nir_deref_path entry_dst_path
, src_path
;
536 nir_deref_path_init(&entry_dst_path
, entry
->dst
, state
->mem_ctx
);
537 nir_deref_path_init(&src_path
, src
, state
->mem_ctx
);
539 bool need_to_specialize_wildcards
= false;
540 nir_deref_instr
**entry_p
= &entry_dst_path
.path
[1];
541 nir_deref_instr
**src_p
= &src_path
.path
[1];
542 while (*entry_p
&& *src_p
) {
543 nir_deref_instr
*entry_tail
= *entry_p
++;
544 nir_deref_instr
*src_tail
= *src_p
++;
546 if (src_tail
->deref_type
== nir_deref_type_array
&&
547 entry_tail
->deref_type
== nir_deref_type_array_wildcard
)
548 need_to_specialize_wildcards
= true;
551 /* If the entry deref is longer than the source deref then it refers to a
552 * smaller type and we can't source from it.
554 assert(*entry_p
== NULL
);
556 if (need_to_specialize_wildcards
) {
557 /* The entry has some wildcards that are not in src. This means we need
558 * to construct a new deref based on the entry but using the wildcards
559 * from the source and guided by the entry dst. Oof.
561 nir_deref_path entry_src_path
;
562 nir_deref_path_init(&entry_src_path
, entry
->src
.deref
, state
->mem_ctx
);
563 value
->deref
= specialize_wildcards(b
, &entry_src_path
,
564 &entry_dst_path
, &src_path
);
565 nir_deref_path_finish(&entry_src_path
);
568 /* If our source deref is longer than the entry deref, that's ok because
569 * it just means the entry deref needs to be extended a bit.
572 nir_deref_instr
*src_tail
= *src_p
++;
573 value
->deref
= nir_build_deref_follower(b
, value
->deref
, src_tail
);
576 nir_deref_path_finish(&entry_dst_path
);
577 nir_deref_path_finish(&src_path
);
583 try_load_from_entry(struct copy_prop_var_state
*state
, struct copy_entry
*entry
,
584 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
585 nir_deref_instr
*src
, struct value
*value
)
590 if (entry
->src
.is_ssa
) {
591 return load_from_ssa_entry_value(state
, entry
, b
, intrin
, value
);
593 return load_from_deref_entry_value(state
, entry
, b
, intrin
, src
, value
);
598 invalidate_copies_for_cf_node(struct copy_prop_var_state
*state
,
599 struct util_dynarray
*copies
,
600 nir_cf_node
*cf_node
)
602 struct hash_entry
*ht_entry
= _mesa_hash_table_search(state
->vars_written_map
, cf_node
);
605 struct vars_written
*written
= ht_entry
->data
;
606 if (written
->modes
) {
607 util_dynarray_foreach_reverse(copies
, struct copy_entry
, entry
) {
608 if (entry
->dst
->mode
& written
->modes
)
609 copy_entry_remove(copies
, entry
);
613 hash_table_foreach (written
->derefs
, entry
) {
614 nir_deref_instr
*deref_written
= (nir_deref_instr
*)entry
->key
;
615 kill_aliases(copies
, deref_written
, (uintptr_t)entry
->data
);
620 copy_prop_vars_block(struct copy_prop_var_state
*state
,
621 nir_builder
*b
, nir_block
*block
,
622 struct util_dynarray
*copies
)
624 nir_foreach_instr_safe(instr
, block
) {
625 if (instr
->type
== nir_instr_type_call
) {
626 apply_barrier_for_modes(copies
, nir_var_shader_out
|
627 nir_var_shader_temp
|
628 nir_var_function_temp
|
634 if (instr
->type
!= nir_instr_type_intrinsic
)
637 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
638 switch (intrin
->intrinsic
) {
639 case nir_intrinsic_barrier
:
640 case nir_intrinsic_memory_barrier
:
641 apply_barrier_for_modes(copies
, nir_var_shader_out
|
646 case nir_intrinsic_emit_vertex
:
647 case nir_intrinsic_emit_vertex_with_counter
:
648 apply_barrier_for_modes(copies
, nir_var_shader_out
);
651 case nir_intrinsic_load_deref
: {
652 nir_deref_instr
*src
= nir_src_as_deref(intrin
->src
[0]);
654 struct copy_entry
*src_entry
=
655 lookup_entry_for_deref(copies
, src
, nir_derefs_a_contains_b_bit
);
657 if (try_load_from_entry(state
, src_entry
, b
, intrin
, src
, &value
)) {
659 /* lookup_load has already ensured that we get a single SSA
660 * value that has all of the channels. We just have to do the
663 if (intrin
->instr
.block
) {
664 /* The lookup left our instruction in-place. This means it
665 * must have used it to vec up a bunch of different sources.
666 * We need to be careful when rewriting uses so we don't
667 * rewrite the vecN itself.
669 nir_ssa_def_rewrite_uses_after(&intrin
->dest
.ssa
,
670 nir_src_for_ssa(value
.ssa
[0]),
671 value
.ssa
[0]->parent_instr
);
673 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
674 nir_src_for_ssa(value
.ssa
[0]));
677 /* We're turning it into a load of a different variable */
678 intrin
->src
[0] = nir_src_for_ssa(&value
.deref
->dest
.ssa
);
680 /* Put it back in again. */
681 nir_builder_instr_insert(b
, instr
);
684 for (unsigned i
= 0; i
< intrin
->num_components
; i
++)
685 value
.ssa
[i
] = &intrin
->dest
.ssa
;
687 state
->progress
= true;
690 for (unsigned i
= 0; i
< intrin
->num_components
; i
++)
691 value
.ssa
[i
] = &intrin
->dest
.ssa
;
694 /* Now that we have a value, we're going to store it back so that we
695 * have the right value next time we come looking for it. In order
696 * to do this, we need an exact match, not just something that
697 * contains what we're looking for.
699 struct copy_entry
*store_entry
=
700 lookup_entry_for_deref(copies
, src
, nir_derefs_equal_bit
);
702 store_entry
= copy_entry_create(copies
, src
);
704 /* Set up a store to this entry with the value of the load. This way
705 * we can potentially remove subsequent loads. However, we use a
706 * NULL instruction so we don't try and delete the load on a
709 store_to_entry(state
, store_entry
, &value
,
710 ((1 << intrin
->num_components
) - 1));
714 case nir_intrinsic_store_deref
: {
715 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
716 struct copy_entry
*entry
=
717 lookup_entry_for_deref(copies
, dst
, nir_derefs_equal_bit
);
718 if (entry
&& value_equals_store_src(&entry
->src
, intrin
)) {
719 /* If we are storing the value from a load of the same var the
720 * store is redundant so remove it.
722 nir_instr_remove(instr
);
724 struct value value
= {
728 for (unsigned i
= 0; i
< intrin
->num_components
; i
++)
729 value
.ssa
[i
] = intrin
->src
[1].ssa
;
731 unsigned wrmask
= nir_intrinsic_write_mask(intrin
);
732 struct copy_entry
*entry
=
733 get_entry_and_kill_aliases(copies
, dst
, wrmask
);
734 store_to_entry(state
, entry
, &value
, wrmask
);
740 case nir_intrinsic_copy_deref
: {
741 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
742 nir_deref_instr
*src
= nir_src_as_deref(intrin
->src
[1]);
744 if (nir_compare_derefs(src
, dst
) & nir_derefs_equal_bit
) {
745 /* This is a no-op self-copy. Get rid of it */
746 nir_instr_remove(instr
);
750 struct copy_entry
*src_entry
=
751 lookup_entry_for_deref(copies
, src
, nir_derefs_a_contains_b_bit
);
753 if (try_load_from_entry(state
, src_entry
, b
, intrin
, src
, &value
)) {
754 /* If load works, intrin (the copy_deref) is removed. */
756 nir_store_deref(b
, dst
, value
.ssa
[0], 0xf);
758 /* If this would be a no-op self-copy, don't bother. */
759 if (nir_compare_derefs(value
.deref
, dst
) & nir_derefs_equal_bit
)
762 /* Just turn it into a copy of a different deref */
763 intrin
->src
[1] = nir_src_for_ssa(&value
.deref
->dest
.ssa
);
765 /* Put it back in again. */
766 nir_builder_instr_insert(b
, instr
);
769 state
->progress
= true;
771 value
= (struct value
) {
777 struct copy_entry
*dst_entry
=
778 get_entry_and_kill_aliases(copies
, dst
, 0xf);
779 store_to_entry(state
, dst_entry
, &value
, 0xf);
783 case nir_intrinsic_deref_atomic_add
:
784 case nir_intrinsic_deref_atomic_imin
:
785 case nir_intrinsic_deref_atomic_umin
:
786 case nir_intrinsic_deref_atomic_imax
:
787 case nir_intrinsic_deref_atomic_umax
:
788 case nir_intrinsic_deref_atomic_and
:
789 case nir_intrinsic_deref_atomic_or
:
790 case nir_intrinsic_deref_atomic_xor
:
791 case nir_intrinsic_deref_atomic_exchange
:
792 case nir_intrinsic_deref_atomic_comp_swap
:
793 kill_aliases(copies
, nir_src_as_deref(intrin
->src
[0]), 0xf);
803 copy_prop_vars_cf_node(struct copy_prop_var_state
*state
,
804 struct util_dynarray
*copies
,
805 nir_cf_node
*cf_node
)
807 switch (cf_node
->type
) {
808 case nir_cf_node_function
: {
809 nir_function_impl
*impl
= nir_cf_node_as_function(cf_node
);
811 struct util_dynarray impl_copies
;
812 util_dynarray_init(&impl_copies
, state
->mem_ctx
);
814 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &impl
->body
)
815 copy_prop_vars_cf_node(state
, &impl_copies
, cf_node
);
820 case nir_cf_node_block
: {
821 nir_block
*block
= nir_cf_node_as_block(cf_node
);
823 nir_builder_init(&b
, state
->impl
);
824 copy_prop_vars_block(state
, &b
, block
, copies
);
828 case nir_cf_node_if
: {
829 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
831 /* Clone the copies for each branch of the if statement. The idea is
832 * that they both see the same state of available copies, but do not
833 * interfere to each other.
836 struct util_dynarray then_copies
;
837 util_dynarray_clone(&then_copies
, state
->mem_ctx
, copies
);
839 struct util_dynarray else_copies
;
840 util_dynarray_clone(&else_copies
, state
->mem_ctx
, copies
);
842 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &if_stmt
->then_list
)
843 copy_prop_vars_cf_node(state
, &then_copies
, cf_node
);
845 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &if_stmt
->else_list
)
846 copy_prop_vars_cf_node(state
, &else_copies
, cf_node
);
848 /* Both branches copies can be ignored, since the effect of running both
849 * branches was captured in the first pass that collects vars_written.
852 invalidate_copies_for_cf_node(state
, copies
, cf_node
);
857 case nir_cf_node_loop
: {
858 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
860 /* Invalidate before cloning the copies for the loop, since the loop
861 * body can be executed more than once.
864 invalidate_copies_for_cf_node(state
, copies
, cf_node
);
866 struct util_dynarray loop_copies
;
867 util_dynarray_clone(&loop_copies
, state
->mem_ctx
, copies
);
869 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &loop
->body
)
870 copy_prop_vars_cf_node(state
, &loop_copies
, cf_node
);
876 unreachable("Invalid CF node type");
881 nir_copy_prop_vars_impl(nir_function_impl
*impl
)
883 void *mem_ctx
= ralloc_context(NULL
);
885 struct copy_prop_var_state state
= {
888 .lin_ctx
= linear_zalloc_parent(mem_ctx
, 0),
890 .vars_written_map
= _mesa_pointer_hash_table_create(mem_ctx
),
893 gather_vars_written(&state
, NULL
, &impl
->cf_node
);
895 copy_prop_vars_cf_node(&state
, NULL
, &impl
->cf_node
);
897 if (state
.progress
) {
898 nir_metadata_preserve(impl
, nir_metadata_block_index
|
899 nir_metadata_dominance
);
902 impl
->valid_metadata
&= ~nir_metadata_not_properly_reset
;
906 ralloc_free(mem_ctx
);
907 return state
.progress
;
911 nir_opt_copy_prop_vars(nir_shader
*shader
)
913 bool progress
= false;
915 nir_foreach_function(function
, shader
) {
918 progress
|= nir_copy_prop_vars_impl(function
->impl
);