2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
27 #include "util/bitscan.h"
30 * Variable-based copy propagation
32 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
33 * However, there are cases, especially when dealing with indirects, where SSA
34 * won't help you. This pass is for those times. Specifically, it handles
35 * the following things that the rest of NIR can't:
37 * 1) Copy-propagation on variables that have indirect access. This includes
38 * propagating from indirect stores into indirect loads.
40 * 2) Dead code elimination of store_var and copy_var intrinsics based on
41 * killed destination values.
43 * 3) Removal of redundant load_var intrinsics. We can't trust regular CSE
44 * to do this because it isn't aware of variable writes that may alias the
45 * value and make the former load invalid.
47 * Unfortunately, properly handling all of those cases makes this path rather
48 * complex. In order to avoid additional complexity, this pass is entirely
49 * block-local. If we tried to make it global, the data-flow analysis would
50 * rapidly get out of hand. Fortunately, for anything that is only ever
51 * accessed directly, we get SSA based copy-propagation which is extremely
52 * powerful so this isn't that great a loss.
64 struct list_head link
;
66 nir_instr
*store_instr
[4];
68 unsigned comps_may_be_read
;
74 struct copy_prop_var_state
{
79 struct list_head copies
;
81 /* We're going to be allocating and deleting a lot of copy entries so we'll
82 * keep a free list to avoid thrashing malloc too badly.
84 struct list_head copy_free_list
;
89 static struct copy_entry
*
90 copy_entry_create(struct copy_prop_var_state
*state
,
91 nir_deref_var
*dst_deref
)
93 struct copy_entry
*entry
;
94 if (!list_empty(&state
->copy_free_list
)) {
95 struct list_head
*item
= state
->copy_free_list
.next
;
97 entry
= LIST_ENTRY(struct copy_entry
, item
, link
);
98 memset(entry
, 0, sizeof(*entry
));
100 entry
= rzalloc(state
->mem_ctx
, struct copy_entry
);
103 entry
->dst
= dst_deref
;
104 list_add(&entry
->link
, &state
->copies
);
110 copy_entry_remove(struct copy_prop_var_state
*state
, struct copy_entry
*entry
)
112 list_del(&entry
->link
);
113 list_add(&entry
->link
, &state
->copy_free_list
);
116 enum deref_compare_result
{
117 derefs_equal_bit
= (1 << 0),
118 derefs_may_alias_bit
= (1 << 1),
119 derefs_a_contains_b_bit
= (1 << 2),
120 derefs_b_contains_a_bit
= (1 << 3),
123 /** Returns true if the storage referrenced to by deref completely contains
124 * the storage referenced by sub.
126 * NOTE: This is fairly general and could be moved to core NIR if someone else
129 static enum deref_compare_result
130 compare_derefs(nir_deref_var
*a
, nir_deref_var
*b
)
132 if (a
->var
!= b
->var
)
135 /* Start off assuming they fully compare. We ignore equality for now. In
136 * the end, we'll determine that by containment.
138 enum deref_compare_result result
= derefs_may_alias_bit
|
139 derefs_a_contains_b_bit
|
140 derefs_b_contains_a_bit
;
142 nir_deref
*a_tail
= &a
->deref
;
143 nir_deref
*b_tail
= &b
->deref
;
144 while (a_tail
->child
&& b_tail
->child
) {
145 a_tail
= a_tail
->child
;
146 b_tail
= b_tail
->child
;
148 assert(a_tail
->deref_type
== b_tail
->deref_type
);
149 switch (a_tail
->deref_type
) {
150 case nir_deref_type_array
: {
151 nir_deref_array
*a_arr
= nir_deref_as_array(a_tail
);
152 nir_deref_array
*b_arr
= nir_deref_as_array(b_tail
);
154 if (a_arr
->deref_array_type
== nir_deref_array_type_direct
&&
155 b_arr
->deref_array_type
== nir_deref_array_type_direct
) {
156 /* If they're both direct and have different offsets, they
157 * don't even alias much less anything else.
159 if (a_arr
->base_offset
!= b_arr
->base_offset
)
161 } else if (a_arr
->deref_array_type
== nir_deref_array_type_wildcard
) {
162 if (b_arr
->deref_array_type
!= nir_deref_array_type_wildcard
)
163 result
&= ~derefs_b_contains_a_bit
;
164 } else if (b_arr
->deref_array_type
== nir_deref_array_type_wildcard
) {
165 if (a_arr
->deref_array_type
!= nir_deref_array_type_wildcard
)
166 result
&= ~derefs_a_contains_b_bit
;
167 } else if (a_arr
->deref_array_type
== nir_deref_array_type_indirect
&&
168 b_arr
->deref_array_type
== nir_deref_array_type_indirect
) {
169 assert(a_arr
->indirect
.is_ssa
&& b_arr
->indirect
.is_ssa
);
170 if (a_arr
->indirect
.ssa
== b_arr
->indirect
.ssa
) {
171 /* If they're different constant offsets from the same indirect
172 * then they don't alias at all.
174 if (a_arr
->base_offset
!= b_arr
->base_offset
)
176 /* Otherwise the indirect and base both match */
178 /* If they're have different indirect offsets then we can't
179 * prove anything about containment.
181 result
&= ~(derefs_a_contains_b_bit
| derefs_b_contains_a_bit
);
184 /* In this case, one is indirect and the other direct so we can't
185 * prove anything about containment.
187 result
&= ~(derefs_a_contains_b_bit
| derefs_b_contains_a_bit
);
192 case nir_deref_type_struct
: {
193 nir_deref_struct
*a_struct
= nir_deref_as_struct(a_tail
);
194 nir_deref_struct
*b_struct
= nir_deref_as_struct(b_tail
);
196 /* If they're different struct members, they don't even alias */
197 if (a_struct
->index
!= b_struct
->index
)
203 unreachable("Invalid deref type");
207 /* If a is longer than b, then it can't contain b */
209 result
&= ~derefs_a_contains_b_bit
;
211 result
&= ~derefs_b_contains_a_bit
;
213 /* If a contains b and b contains a they must be equal. */
214 if ((result
& derefs_a_contains_b_bit
) && (result
& derefs_b_contains_a_bit
))
215 result
|= derefs_equal_bit
;
221 remove_dead_writes(struct copy_prop_var_state
*state
,
222 struct copy_entry
*entry
, unsigned write_mask
)
224 /* We're overwriting another entry. Some of it's components may not
225 * have been read yet and, if that's the case, we may be able to delete
226 * some instructions but we have to be careful.
228 unsigned dead_comps
= write_mask
& ~entry
->comps_may_be_read
;
230 for (unsigned mask
= dead_comps
; mask
;) {
231 unsigned i
= u_bit_scan(&mask
);
233 nir_instr
*instr
= entry
->store_instr
[i
];
235 /* We may have already deleted it on a previous iteration */
239 /* See if this instr is used anywhere that it's not dead */
241 for (unsigned j
= 0; j
< 4; j
++) {
242 if (entry
->store_instr
[j
] == instr
) {
243 if (dead_comps
& (1 << j
)) {
244 entry
->store_instr
[j
] = NULL
;
252 nir_instr_remove(instr
);
253 state
->progress
= true;
258 static struct copy_entry
*
259 lookup_entry_for_deref(struct copy_prop_var_state
*state
,
260 nir_deref_var
*deref
,
261 enum deref_compare_result allowed_comparisons
)
263 list_for_each_entry(struct copy_entry
, iter
, &state
->copies
, link
) {
264 if (compare_derefs(iter
->dst
, deref
) & allowed_comparisons
)
272 mark_aliased_entries_as_read(struct copy_prop_var_state
*state
,
273 nir_deref_var
*deref
, unsigned components
)
275 list_for_each_entry(struct copy_entry
, iter
, &state
->copies
, link
) {
276 if (compare_derefs(iter
->dst
, deref
) & derefs_may_alias_bit
)
277 iter
->comps_may_be_read
|= components
;
281 static struct copy_entry
*
282 get_entry_and_kill_aliases(struct copy_prop_var_state
*state
,
283 nir_deref_var
*deref
,
286 struct copy_entry
*entry
= NULL
;
287 list_for_each_entry_safe(struct copy_entry
, iter
, &state
->copies
, link
) {
288 if (!iter
->src
.is_ssa
) {
289 /* If this write aliases the source of some entry, get rid of it */
290 if (compare_derefs(iter
->src
.deref
, deref
) & derefs_may_alias_bit
) {
291 copy_entry_remove(state
, iter
);
296 enum deref_compare_result comp
= compare_derefs(iter
->dst
, deref
);
297 /* This is a store operation. If we completely overwrite some value, we
298 * want to delete any dead writes that may be present.
300 if (comp
& derefs_b_contains_a_bit
)
301 remove_dead_writes(state
, iter
, write_mask
);
303 if (comp
& derefs_equal_bit
) {
304 assert(entry
== NULL
);
306 } else if (comp
& derefs_may_alias_bit
) {
307 copy_entry_remove(state
, iter
);
312 entry
= copy_entry_create(state
, deref
);
318 apply_barrier_for_modes(struct copy_prop_var_state
*state
,
319 nir_variable_mode modes
)
321 list_for_each_entry_safe(struct copy_entry
, iter
, &state
->copies
, link
) {
322 if ((iter
->dst
->var
->data
.mode
& modes
) ||
323 (!iter
->src
.is_ssa
&& (iter
->src
.deref
->var
->data
.mode
& modes
)))
324 copy_entry_remove(state
, iter
);
329 store_to_entry(struct copy_prop_var_state
*state
, struct copy_entry
*entry
,
330 const struct value
*value
, unsigned write_mask
,
331 nir_instr
*store_instr
)
333 entry
->comps_may_be_read
&= ~write_mask
;
335 entry
->src
.is_ssa
= true;
336 /* Only overwrite the written components */
337 for (unsigned i
= 0; i
< 4; i
++) {
338 if (write_mask
& (1 << i
)) {
339 entry
->store_instr
[i
] = store_instr
;
340 entry
->src
.ssa
[i
] = value
->ssa
[i
];
344 /* Non-ssa stores always write everything */
345 entry
->src
.is_ssa
= false;
346 entry
->src
.deref
= value
->deref
;
347 for (unsigned i
= 0; i
< 4; i
++)
348 entry
->store_instr
[i
] = store_instr
;
352 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
353 * single SSA def. Because an entry could reference up to 4 different SSA
354 * defs, a vecN operation may be inserted to combine them into a single SSA
355 * def before handing it back to the caller. If the load instruction is no
356 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
357 * possible, in some cases, for the load to be used in the vecN operation in
358 * which case it isn't deleted.)
361 load_from_ssa_entry_value(struct copy_prop_var_state
*state
,
362 struct copy_entry
*entry
,
363 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
367 assert(value
->is_ssa
);
369 const struct glsl_type
*type
= nir_deref_tail(&entry
->dst
->deref
)->type
;
370 unsigned num_components
= glsl_get_vector_elements(type
);
372 uint8_t available
= 0;
373 bool all_same
= true;
374 for (unsigned i
= 0; i
< num_components
; i
++) {
376 available
|= (1 << i
);
378 if (value
->ssa
[i
] != value
->ssa
[0])
383 /* Our work here is done */
384 b
->cursor
= nir_instr_remove(&intrin
->instr
);
385 intrin
->instr
.block
= NULL
;
389 if (available
!= (1 << num_components
) - 1 &&
390 intrin
->intrinsic
== nir_intrinsic_load_var
&&
391 (available
& nir_ssa_def_components_read(&intrin
->dest
.ssa
)) == 0) {
392 /* If none of the components read are available as SSA values, then we
393 * should just bail. Otherwise, we would end up replacing the uses of
394 * the load_var a vecN() that just gathers up its components.
399 b
->cursor
= nir_after_instr(&intrin
->instr
);
401 nir_ssa_def
*load_def
=
402 intrin
->intrinsic
== nir_intrinsic_load_var
? &intrin
->dest
.ssa
: NULL
;
404 bool keep_intrin
= false;
405 nir_ssa_def
*comps
[4];
406 for (unsigned i
= 0; i
< num_components
; i
++) {
408 comps
[i
] = nir_channel(b
, value
->ssa
[i
], i
);
410 /* We don't have anything for this component in our
411 * list. Just re-use a channel from the load.
413 if (load_def
== NULL
)
414 load_def
= nir_load_deref_var(b
, entry
->dst
);
416 if (load_def
->parent_instr
== &intrin
->instr
)
419 comps
[i
] = nir_channel(b
, load_def
, i
);
423 nir_ssa_def
*vec
= nir_vec(b
, comps
, num_components
);
424 for (unsigned i
= 0; i
< num_components
; i
++)
428 /* Removing this instruction should not touch the cursor because we
429 * created the cursor after the intrinsic and have added at least one
430 * instruction (the vec) since then.
432 assert(b
->cursor
.instr
!= &intrin
->instr
);
433 nir_instr_remove(&intrin
->instr
);
434 intrin
->instr
.block
= NULL
;
441 * Specialize the wildcards in a deref chain
443 * This function returns a deref chain identical to \param deref except that
444 * some of its wildcards are replaced with indices from \param specific. The
445 * process is guided by \param guide which references the same type as \param
446 * specific but has the same wildcard array lengths as \param deref.
448 static nir_deref_var
*
449 specialize_wildcards(nir_deref_var
*deref
,
450 nir_deref_var
*guide
,
451 nir_deref_var
*specific
,
454 nir_deref_var
*ret
= nir_deref_var_create(mem_ctx
, deref
->var
);
456 nir_deref
*deref_tail
= deref
->deref
.child
;
457 nir_deref
*guide_tail
= &guide
->deref
;
458 nir_deref
*spec_tail
= &specific
->deref
;
459 nir_deref
*ret_tail
= &ret
->deref
;
461 switch (deref_tail
->deref_type
) {
462 case nir_deref_type_array
: {
463 nir_deref_array
*deref_arr
= nir_deref_as_array(deref_tail
);
465 nir_deref_array
*ret_arr
= nir_deref_array_create(ret_tail
);
466 ret_arr
->deref
.type
= deref_arr
->deref
.type
;
467 ret_arr
->deref_array_type
= deref_arr
->deref_array_type
;
469 switch (deref_arr
->deref_array_type
) {
470 case nir_deref_array_type_direct
:
471 ret_arr
->base_offset
= deref_arr
->base_offset
;
473 case nir_deref_array_type_indirect
:
474 ret_arr
->base_offset
= deref_arr
->base_offset
;
475 assert(deref_arr
->indirect
.is_ssa
);
476 ret_arr
->indirect
= deref_arr
->indirect
;
478 case nir_deref_array_type_wildcard
:
479 /* This is where things get tricky. We have to search through
480 * the entry deref to find its corresponding wildcard and fill
481 * this slot in with the value from the src.
483 while (guide_tail
->child
) {
484 guide_tail
= guide_tail
->child
;
485 spec_tail
= spec_tail
->child
;
487 if (guide_tail
->deref_type
== nir_deref_type_array
&&
488 nir_deref_as_array(guide_tail
)->deref_array_type
==
489 nir_deref_array_type_wildcard
)
493 nir_deref_array
*spec_arr
= nir_deref_as_array(spec_tail
);
494 ret_arr
->deref_array_type
= spec_arr
->deref_array_type
;
495 ret_arr
->base_offset
= spec_arr
->base_offset
;
496 ret_arr
->indirect
= spec_arr
->indirect
;
499 ret_tail
->child
= &ret_arr
->deref
;
502 case nir_deref_type_struct
: {
503 nir_deref_struct
*deref_struct
= nir_deref_as_struct(deref_tail
);
505 nir_deref_struct
*ret_struct
=
506 nir_deref_struct_create(ret_tail
, deref_struct
->index
);
507 ret_struct
->deref
.type
= deref_struct
->deref
.type
;
509 ret_tail
->child
= &ret_struct
->deref
;
512 case nir_deref_type_var
:
513 unreachable("Invalid deref type");
516 deref_tail
= deref_tail
->child
;
517 ret_tail
= ret_tail
->child
;
523 /* Do a "load" from an deref-based entry return it in "value" as a value. The
524 * deref returned in "value" will always be a fresh copy so the caller can
525 * steal it and assign it to the instruction directly without copying it
529 load_from_deref_entry_value(struct copy_prop_var_state
*state
,
530 struct copy_entry
*entry
,
531 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
532 nir_deref_var
*src
, struct value
*value
)
536 /* Walk the deref to get the two tails and also figure out if we need to
537 * specialize any wildcards.
539 bool need_to_specialize_wildcards
= false;
540 nir_deref
*entry_tail
= &entry
->dst
->deref
;
541 nir_deref
*src_tail
= &src
->deref
;
542 while (entry_tail
->child
&& src_tail
->child
) {
543 assert(src_tail
->child
->deref_type
== entry_tail
->child
->deref_type
);
544 if (src_tail
->child
->deref_type
== nir_deref_type_array
) {
545 nir_deref_array
*entry_arr
= nir_deref_as_array(entry_tail
->child
);
546 nir_deref_array
*src_arr
= nir_deref_as_array(src_tail
->child
);
548 if (src_arr
->deref_array_type
!= nir_deref_array_type_wildcard
&&
549 entry_arr
->deref_array_type
== nir_deref_array_type_wildcard
)
550 need_to_specialize_wildcards
= true;
553 entry_tail
= entry_tail
->child
;
554 src_tail
= src_tail
->child
;
557 /* If the entry deref is longer than the source deref then it refers to a
558 * smaller type and we can't source from it.
560 assert(entry_tail
->child
== NULL
);
562 if (need_to_specialize_wildcards
) {
563 /* The entry has some wildcards that are not in src. This means we need
564 * to construct a new deref based on the entry but using the wildcards
565 * from the source and guided by the entry dst. Oof.
567 value
->deref
= specialize_wildcards(entry
->src
.deref
, entry
->dst
, src
,
570 /* We're going to need to make a copy in case we modify it below */
571 value
->deref
= nir_deref_var_clone(value
->deref
, state
->mem_ctx
);
574 if (src_tail
->child
) {
575 /* If our source deref is longer than the entry deref, that's ok because
576 * it just means the entry deref needs to be extended a bit.
578 nir_deref
*value_tail
= nir_deref_tail(&value
->deref
->deref
);
579 value_tail
->child
= nir_deref_clone(src_tail
->child
, value_tail
);
582 b
->cursor
= nir_instr_remove(&intrin
->instr
);
588 try_load_from_entry(struct copy_prop_var_state
*state
, struct copy_entry
*entry
,
589 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
590 nir_deref_var
*src
, struct value
*value
)
595 if (entry
->src
.is_ssa
) {
596 return load_from_ssa_entry_value(state
, entry
, b
, intrin
, value
);
598 return load_from_deref_entry_value(state
, entry
, b
, intrin
, src
, value
);
603 copy_prop_vars_block(struct copy_prop_var_state
*state
,
604 nir_builder
*b
, nir_block
*block
)
606 /* Start each block with a blank slate */
607 list_for_each_entry_safe(struct copy_entry
, iter
, &state
->copies
, link
)
608 copy_entry_remove(state
, iter
);
610 nir_foreach_instr_safe(instr
, block
) {
611 if (instr
->type
!= nir_instr_type_intrinsic
)
614 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
615 switch (intrin
->intrinsic
) {
616 case nir_intrinsic_barrier
:
617 case nir_intrinsic_memory_barrier
:
618 /* If we hit a barrier, we need to trash everything that may possibly
619 * be accessible to another thread. Locals, globals, and things of
620 * the like are safe, however.
622 apply_barrier_for_modes(state
, ~(nir_var_local
| nir_var_global
|
623 nir_var_shader_in
| nir_var_uniform
));
626 case nir_intrinsic_emit_vertex
:
627 case nir_intrinsic_emit_vertex_with_counter
:
628 apply_barrier_for_modes(state
, nir_var_shader_out
);
631 case nir_intrinsic_load_var
: {
632 nir_deref_var
*src
= intrin
->variables
[0];
634 uint8_t comps_read
= nir_ssa_def_components_read(&intrin
->dest
.ssa
);
635 mark_aliased_entries_as_read(state
, src
, comps_read
);
637 struct copy_entry
*src_entry
=
638 lookup_entry_for_deref(state
, src
, derefs_a_contains_b_bit
);
640 if (try_load_from_entry(state
, src_entry
, b
, intrin
, src
, &value
)) {
642 /* lookup_load has already ensured that we get a single SSA
643 * value that has all of the channels. We just have to do the
646 if (intrin
->instr
.block
) {
647 /* The lookup left our instruction in-place. This means it
648 * must have used it to vec up a bunch of different sources.
649 * We need to be careful when rewriting uses so we don't
650 * rewrite the vecN itself.
652 nir_ssa_def_rewrite_uses_after(&intrin
->dest
.ssa
,
653 nir_src_for_ssa(value
.ssa
[0]),
654 value
.ssa
[0]->parent_instr
);
656 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
657 nir_src_for_ssa(value
.ssa
[0]));
660 /* We're turning it into a load of a different variable */
661 ralloc_steal(intrin
, value
.deref
);
662 intrin
->variables
[0] = value
.deref
;
664 /* Put it back in again. */
665 nir_builder_instr_insert(b
, instr
);
668 for (unsigned i
= 0; i
< intrin
->num_components
; i
++)
669 value
.ssa
[i
] = &intrin
->dest
.ssa
;
671 state
->progress
= true;
674 for (unsigned i
= 0; i
< intrin
->num_components
; i
++)
675 value
.ssa
[i
] = &intrin
->dest
.ssa
;
678 /* Now that we have a value, we're going to store it back so that we
679 * have the right value next time we come looking for it. In order
680 * to do this, we need an exact match, not just something that
681 * contains what we're looking for.
683 struct copy_entry
*store_entry
=
684 lookup_entry_for_deref(state
, src
, derefs_equal_bit
);
686 store_entry
= copy_entry_create(state
, src
);
688 /* Set up a store to this entry with the value of the load. This way
689 * we can potentially remove subsequent loads. However, we use a
690 * NULL instruction so we don't try and delete the load on a
693 store_to_entry(state
, store_entry
, &value
,
694 ((1 << intrin
->num_components
) - 1), NULL
);
698 case nir_intrinsic_store_var
: {
699 struct value value
= {
703 for (unsigned i
= 0; i
< intrin
->num_components
; i
++)
704 value
.ssa
[i
] = intrin
->src
[0].ssa
;
706 nir_deref_var
*dst
= intrin
->variables
[0];
707 unsigned wrmask
= nir_intrinsic_write_mask(intrin
);
708 struct copy_entry
*entry
=
709 get_entry_and_kill_aliases(state
, dst
, wrmask
);
710 store_to_entry(state
, entry
, &value
, wrmask
, &intrin
->instr
);
714 case nir_intrinsic_copy_var
: {
715 nir_deref_var
*dst
= intrin
->variables
[0];
716 nir_deref_var
*src
= intrin
->variables
[1];
718 if (compare_derefs(src
, dst
) & derefs_equal_bit
) {
719 /* This is a no-op self-copy. Get rid of it */
720 nir_instr_remove(instr
);
724 mark_aliased_entries_as_read(state
, src
, 0xf);
726 struct copy_entry
*src_entry
=
727 lookup_entry_for_deref(state
, src
, derefs_a_contains_b_bit
);
729 if (try_load_from_entry(state
, src_entry
, b
, intrin
, src
, &value
)) {
731 nir_store_deref_var(b
, dst
, value
.ssa
[0], 0xf);
732 intrin
= nir_instr_as_intrinsic(nir_builder_last_instr(b
));
734 /* If this would be a no-op self-copy, don't bother. */
735 if (compare_derefs(value
.deref
, dst
) & derefs_equal_bit
)
738 /* Just turn it into a copy of a different deref */
739 ralloc_steal(intrin
, value
.deref
);
740 intrin
->variables
[1] = value
.deref
;
742 /* Put it back in again. */
743 nir_builder_instr_insert(b
, instr
);
746 state
->progress
= true;
748 value
= (struct value
) {
754 struct copy_entry
*dst_entry
=
755 get_entry_and_kill_aliases(state
, dst
, 0xf);
756 store_to_entry(state
, dst_entry
, &value
, 0xf, &intrin
->instr
);
767 nir_opt_copy_prop_vars(nir_shader
*shader
)
769 struct copy_prop_var_state state
;
771 nir_assert_lowered_derefs(shader
, nir_lower_load_store_derefs
);
773 state
.shader
= shader
;
774 state
.mem_ctx
= ralloc_context(NULL
);
775 list_inithead(&state
.copies
);
776 list_inithead(&state
.copy_free_list
);
778 bool global_progress
= false;
779 nir_foreach_function(function
, shader
) {
784 nir_builder_init(&b
, function
->impl
);
786 state
.progress
= false;
787 nir_foreach_block(block
, function
->impl
)
788 copy_prop_vars_block(&state
, &b
, block
);
790 if (state
.progress
) {
791 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
792 nir_metadata_dominance
);
793 global_progress
= true;
797 ralloc_free(state
.mem_ctx
);
799 return global_progress
;