2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
26 #include "nir_deref.h"
28 #include "util/bitscan.h"
31 * Variable-based copy propagation
33 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
34 * However, there are cases, especially when dealing with indirects, where SSA
35 * won't help you. This pass is for those times. Specifically, it handles
36 * the following things that the rest of NIR can't:
38 * 1) Copy-propagation on variables that have indirect access. This includes
39 * propagating from indirect stores into indirect loads.
41 * 2) Dead code elimination of store_var and copy_var intrinsics based on
42 * killed destination values.
44 * 3) Removal of redundant load_deref intrinsics. We can't trust regular CSE
45 * to do this because it isn't aware of variable writes that may alias the
46 * value and make the former load invalid.
48 * Unfortunately, properly handling all of those cases makes this path rather
49 * complex. In order to avoid additional complexity, this pass is entirely
50 * block-local. If we tried to make it global, the data-flow analysis would
51 * rapidly get out of hand. Fortunately, for anything that is only ever
52 * accessed directly, we get SSA based copy-propagation which is extremely
53 * powerful so this isn't that great a loss.
60 nir_deref_instr
*deref
;
65 struct list_head link
;
67 nir_instr
*store_instr
[4];
69 unsigned comps_may_be_read
;
75 struct copy_prop_var_state
{
80 struct list_head copies
;
82 /* We're going to be allocating and deleting a lot of copy entries so we'll
83 * keep a free list to avoid thrashing malloc too badly.
85 struct list_head copy_free_list
;
90 static struct copy_entry
*
91 copy_entry_create(struct copy_prop_var_state
*state
,
92 nir_deref_instr
*dst_deref
)
94 struct copy_entry
*entry
;
95 if (!list_empty(&state
->copy_free_list
)) {
96 struct list_head
*item
= state
->copy_free_list
.next
;
98 entry
= LIST_ENTRY(struct copy_entry
, item
, link
);
99 memset(entry
, 0, sizeof(*entry
));
101 entry
= rzalloc(state
->mem_ctx
, struct copy_entry
);
104 entry
->dst
= dst_deref
;
105 list_add(&entry
->link
, &state
->copies
);
111 copy_entry_remove(struct copy_prop_var_state
*state
, struct copy_entry
*entry
)
113 list_del(&entry
->link
);
114 list_add(&entry
->link
, &state
->copy_free_list
);
117 enum deref_compare_result
{
118 derefs_equal_bit
= (1 << 0),
119 derefs_may_alias_bit
= (1 << 1),
120 derefs_a_contains_b_bit
= (1 << 2),
121 derefs_b_contains_a_bit
= (1 << 3),
124 /** Returns true if the storage referrenced to by deref completely contains
125 * the storage referenced by sub.
127 * NOTE: This is fairly general and could be moved to core NIR if someone else
130 static enum deref_compare_result
131 compare_deref_paths(nir_deref_path
*a_path
,
132 nir_deref_path
*b_path
)
134 if (a_path
->path
[0]->var
!= b_path
->path
[0]->var
)
137 /* Start off assuming they fully compare. We ignore equality for now. In
138 * the end, we'll determine that by containment.
140 enum deref_compare_result result
= derefs_may_alias_bit
|
141 derefs_a_contains_b_bit
|
142 derefs_b_contains_a_bit
;
144 nir_deref_instr
**a_p
= &a_path
->path
[1];
145 nir_deref_instr
**b_p
= &b_path
->path
[1];
146 while (*a_p
!= NULL
&& *b_p
!= NULL
) {
147 nir_deref_instr
*a_tail
= *(a_p
++);
148 nir_deref_instr
*b_tail
= *(b_p
++);
150 switch (a_tail
->deref_type
) {
151 case nir_deref_type_array
:
152 case nir_deref_type_array_wildcard
: {
153 assert(b_tail
->deref_type
== nir_deref_type_array
||
154 b_tail
->deref_type
== nir_deref_type_array_wildcard
);
156 if (a_tail
->deref_type
== nir_deref_type_array_wildcard
) {
157 if (b_tail
->deref_type
!= nir_deref_type_array_wildcard
)
158 result
&= ~derefs_b_contains_a_bit
;
159 } else if (b_tail
->deref_type
== nir_deref_type_array_wildcard
) {
160 if (a_tail
->deref_type
!= nir_deref_type_array_wildcard
)
161 result
&= ~derefs_a_contains_b_bit
;
163 assert(a_tail
->deref_type
== nir_deref_type_array
&&
164 b_tail
->deref_type
== nir_deref_type_array
);
165 assert(a_tail
->arr
.index
.is_ssa
&& b_tail
->arr
.index
.is_ssa
);
167 nir_const_value
*a_index_const
=
168 nir_src_as_const_value(a_tail
->arr
.index
);
169 nir_const_value
*b_index_const
=
170 nir_src_as_const_value(b_tail
->arr
.index
);
171 if (a_index_const
&& b_index_const
) {
172 /* If they're both direct and have different offsets, they
173 * don't even alias much less anything else.
175 if (a_index_const
->u32
[0] != b_index_const
->u32
[0])
177 } else if (a_tail
->arr
.index
.ssa
== b_tail
->arr
.index
.ssa
) {
178 /* They're the same indirect, continue on */
180 /* They're not the same index so we can't prove anything about
183 result
&= ~(derefs_a_contains_b_bit
| derefs_b_contains_a_bit
);
189 case nir_deref_type_struct
: {
190 /* If they're different struct members, they don't even alias */
191 if (a_tail
->strct
.index
!= b_tail
->strct
.index
)
197 unreachable("Invalid deref type");
201 /* If a is longer than b, then it can't contain b */
203 result
&= ~derefs_a_contains_b_bit
;
205 result
&= ~derefs_b_contains_a_bit
;
207 /* If a contains b and b contains a they must be equal. */
208 if ((result
& derefs_a_contains_b_bit
) && (result
& derefs_b_contains_a_bit
))
209 result
|= derefs_equal_bit
;
214 static enum deref_compare_result
215 compare_derefs(nir_deref_instr
*a
, nir_deref_instr
*b
)
218 return derefs_equal_bit
| derefs_may_alias_bit
|
219 derefs_a_contains_b_bit
| derefs_b_contains_a_bit
;
222 nir_deref_path a_path
, b_path
;
223 nir_deref_path_init(&a_path
, a
, NULL
);
224 nir_deref_path_init(&b_path
, b
, NULL
);
225 assert(a_path
.path
[0]->deref_type
== nir_deref_type_var
);
226 assert(b_path
.path
[0]->deref_type
== nir_deref_type_var
);
228 enum deref_compare_result result
= compare_deref_paths(&a_path
, &b_path
);
230 nir_deref_path_finish(&a_path
);
231 nir_deref_path_finish(&b_path
);
237 remove_dead_writes(struct copy_prop_var_state
*state
,
238 struct copy_entry
*entry
, unsigned write_mask
)
240 /* We're overwriting another entry. Some of it's components may not
241 * have been read yet and, if that's the case, we may be able to delete
242 * some instructions but we have to be careful.
244 unsigned dead_comps
= write_mask
& ~entry
->comps_may_be_read
;
246 for (unsigned mask
= dead_comps
; mask
;) {
247 unsigned i
= u_bit_scan(&mask
);
249 nir_instr
*instr
= entry
->store_instr
[i
];
251 /* We may have already deleted it on a previous iteration */
255 /* See if this instr is used anywhere that it's not dead */
257 for (unsigned j
= 0; j
< 4; j
++) {
258 if (entry
->store_instr
[j
] == instr
) {
259 if (dead_comps
& (1 << j
)) {
260 entry
->store_instr
[j
] = NULL
;
268 nir_instr_remove(instr
);
269 state
->progress
= true;
274 static struct copy_entry
*
275 lookup_entry_for_deref(struct copy_prop_var_state
*state
,
276 nir_deref_instr
*deref
,
277 enum deref_compare_result allowed_comparisons
)
279 list_for_each_entry(struct copy_entry
, iter
, &state
->copies
, link
) {
280 if (compare_derefs(iter
->dst
, deref
) & allowed_comparisons
)
288 mark_aliased_entries_as_read(struct copy_prop_var_state
*state
,
289 nir_deref_instr
*deref
, unsigned components
)
291 list_for_each_entry(struct copy_entry
, iter
, &state
->copies
, link
) {
292 if (compare_derefs(iter
->dst
, deref
) & derefs_may_alias_bit
)
293 iter
->comps_may_be_read
|= components
;
297 static struct copy_entry
*
298 get_entry_and_kill_aliases(struct copy_prop_var_state
*state
,
299 nir_deref_instr
*deref
,
302 struct copy_entry
*entry
= NULL
;
303 list_for_each_entry_safe(struct copy_entry
, iter
, &state
->copies
, link
) {
304 if (!iter
->src
.is_ssa
) {
305 /* If this write aliases the source of some entry, get rid of it */
306 if (compare_derefs(iter
->src
.deref
, deref
) & derefs_may_alias_bit
) {
307 copy_entry_remove(state
, iter
);
312 enum deref_compare_result comp
= compare_derefs(iter
->dst
, deref
);
313 /* This is a store operation. If we completely overwrite some value, we
314 * want to delete any dead writes that may be present.
316 if (comp
& derefs_b_contains_a_bit
)
317 remove_dead_writes(state
, iter
, write_mask
);
319 if (comp
& derefs_equal_bit
) {
320 assert(entry
== NULL
);
322 } else if (comp
& derefs_may_alias_bit
) {
323 copy_entry_remove(state
, iter
);
328 entry
= copy_entry_create(state
, deref
);
334 apply_barrier_for_modes(struct copy_prop_var_state
*state
,
335 nir_variable_mode modes
)
337 list_for_each_entry_safe(struct copy_entry
, iter
, &state
->copies
, link
) {
338 nir_variable
*dst_var
= nir_deref_instr_get_variable(iter
->dst
);
339 nir_variable
*src_var
= iter
->src
.is_ssa
? NULL
:
340 nir_deref_instr_get_variable(iter
->src
.deref
);
342 if ((dst_var
->data
.mode
& modes
) ||
343 (src_var
&& (src_var
->data
.mode
& modes
)))
344 copy_entry_remove(state
, iter
);
349 store_to_entry(struct copy_prop_var_state
*state
, struct copy_entry
*entry
,
350 const struct value
*value
, unsigned write_mask
,
351 nir_instr
*store_instr
)
353 entry
->comps_may_be_read
&= ~write_mask
;
355 entry
->src
.is_ssa
= true;
356 /* Only overwrite the written components */
357 for (unsigned i
= 0; i
< 4; i
++) {
358 if (write_mask
& (1 << i
)) {
359 entry
->store_instr
[i
] = store_instr
;
360 entry
->src
.ssa
[i
] = value
->ssa
[i
];
364 /* Non-ssa stores always write everything */
365 entry
->src
.is_ssa
= false;
366 entry
->src
.deref
= value
->deref
;
367 for (unsigned i
= 0; i
< 4; i
++)
368 entry
->store_instr
[i
] = store_instr
;
372 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
373 * single SSA def. Because an entry could reference up to 4 different SSA
374 * defs, a vecN operation may be inserted to combine them into a single SSA
375 * def before handing it back to the caller. If the load instruction is no
376 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
377 * possible, in some cases, for the load to be used in the vecN operation in
378 * which case it isn't deleted.)
381 load_from_ssa_entry_value(struct copy_prop_var_state
*state
,
382 struct copy_entry
*entry
,
383 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
387 assert(value
->is_ssa
);
389 const struct glsl_type
*type
= entry
->dst
->type
;
390 unsigned num_components
= glsl_get_vector_elements(type
);
392 nir_component_mask_t available
= 0;
393 bool all_same
= true;
394 for (unsigned i
= 0; i
< num_components
; i
++) {
396 available
|= (1 << i
);
398 if (value
->ssa
[i
] != value
->ssa
[0])
403 /* Our work here is done */
404 b
->cursor
= nir_instr_remove(&intrin
->instr
);
405 intrin
->instr
.block
= NULL
;
409 if (available
!= (1 << num_components
) - 1 &&
410 intrin
->intrinsic
== nir_intrinsic_load_deref
&&
411 (available
& nir_ssa_def_components_read(&intrin
->dest
.ssa
)) == 0) {
412 /* If none of the components read are available as SSA values, then we
413 * should just bail. Otherwise, we would end up replacing the uses of
414 * the load_deref a vecN() that just gathers up its components.
419 b
->cursor
= nir_after_instr(&intrin
->instr
);
421 nir_ssa_def
*load_def
=
422 intrin
->intrinsic
== nir_intrinsic_load_deref
? &intrin
->dest
.ssa
: NULL
;
424 bool keep_intrin
= false;
425 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
426 for (unsigned i
= 0; i
< num_components
; i
++) {
428 comps
[i
] = nir_channel(b
, value
->ssa
[i
], i
);
430 /* We don't have anything for this component in our
431 * list. Just re-use a channel from the load.
433 if (load_def
== NULL
)
434 load_def
= nir_load_deref(b
, entry
->dst
);
436 if (load_def
->parent_instr
== &intrin
->instr
)
439 comps
[i
] = nir_channel(b
, load_def
, i
);
443 nir_ssa_def
*vec
= nir_vec(b
, comps
, num_components
);
444 for (unsigned i
= 0; i
< num_components
; i
++)
448 /* Removing this instruction should not touch the cursor because we
449 * created the cursor after the intrinsic and have added at least one
450 * instruction (the vec) since then.
452 assert(b
->cursor
.instr
!= &intrin
->instr
);
453 nir_instr_remove(&intrin
->instr
);
454 intrin
->instr
.block
= NULL
;
461 * Specialize the wildcards in a deref chain
463 * This function returns a deref chain identical to \param deref except that
464 * some of its wildcards are replaced with indices from \param specific. The
465 * process is guided by \param guide which references the same type as \param
466 * specific but has the same wildcard array lengths as \param deref.
468 static nir_deref_instr
*
469 specialize_wildcards(nir_builder
*b
,
470 nir_deref_path
*deref
,
471 nir_deref_path
*guide
,
472 nir_deref_path
*specific
)
474 nir_deref_instr
**deref_p
= &deref
->path
[1];
475 nir_deref_instr
**guide_p
= &guide
->path
[1];
476 nir_deref_instr
**spec_p
= &specific
->path
[1];
477 nir_deref_instr
*ret_tail
= deref
->path
[0];
478 for (; *deref_p
; deref_p
++) {
479 if ((*deref_p
)->deref_type
== nir_deref_type_array_wildcard
) {
480 /* This is where things get tricky. We have to search through
481 * the entry deref to find its corresponding wildcard and fill
482 * this slot in with the value from the src.
485 (*guide_p
)->deref_type
!= nir_deref_type_array_wildcard
) {
489 assert(*guide_p
&& *spec_p
);
491 ret_tail
= nir_build_deref_follower(b
, ret_tail
, *spec_p
);
496 ret_tail
= nir_build_deref_follower(b
, ret_tail
, *deref_p
);
503 /* Do a "load" from an deref-based entry return it in "value" as a value. The
504 * deref returned in "value" will always be a fresh copy so the caller can
505 * steal it and assign it to the instruction directly without copying it
509 load_from_deref_entry_value(struct copy_prop_var_state
*state
,
510 struct copy_entry
*entry
,
511 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
512 nir_deref_instr
*src
, struct value
*value
)
516 b
->cursor
= nir_instr_remove(&intrin
->instr
);
518 nir_deref_path entry_dst_path
, src_path
;
519 nir_deref_path_init(&entry_dst_path
, entry
->dst
, state
->mem_ctx
);
520 nir_deref_path_init(&src_path
, src
, state
->mem_ctx
);
522 bool need_to_specialize_wildcards
= false;
523 nir_deref_instr
**entry_p
= &entry_dst_path
.path
[1];
524 nir_deref_instr
**src_p
= &src_path
.path
[1];
525 while (*entry_p
&& *src_p
) {
526 nir_deref_instr
*entry_tail
= *entry_p
++;
527 nir_deref_instr
*src_tail
= *src_p
++;
529 if (src_tail
->deref_type
== nir_deref_type_array
&&
530 entry_tail
->deref_type
== nir_deref_type_array_wildcard
)
531 need_to_specialize_wildcards
= true;
534 /* If the entry deref is longer than the source deref then it refers to a
535 * smaller type and we can't source from it.
537 assert(*entry_p
== NULL
);
539 if (need_to_specialize_wildcards
) {
540 /* The entry has some wildcards that are not in src. This means we need
541 * to construct a new deref based on the entry but using the wildcards
542 * from the source and guided by the entry dst. Oof.
544 nir_deref_path entry_src_path
;
545 nir_deref_path_init(&entry_src_path
, entry
->src
.deref
, state
->mem_ctx
);
546 value
->deref
= specialize_wildcards(b
, &entry_src_path
,
547 &entry_dst_path
, &src_path
);
548 nir_deref_path_finish(&entry_src_path
);
551 /* If our source deref is longer than the entry deref, that's ok because
552 * it just means the entry deref needs to be extended a bit.
555 nir_deref_instr
*src_tail
= *src_p
++;
556 value
->deref
= nir_build_deref_follower(b
, value
->deref
, src_tail
);
559 nir_deref_path_finish(&entry_dst_path
);
560 nir_deref_path_finish(&src_path
);
566 try_load_from_entry(struct copy_prop_var_state
*state
, struct copy_entry
*entry
,
567 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
568 nir_deref_instr
*src
, struct value
*value
)
573 if (entry
->src
.is_ssa
) {
574 return load_from_ssa_entry_value(state
, entry
, b
, intrin
, value
);
576 return load_from_deref_entry_value(state
, entry
, b
, intrin
, src
, value
);
581 copy_prop_vars_block(struct copy_prop_var_state
*state
,
582 nir_builder
*b
, nir_block
*block
)
584 /* Start each block with a blank slate */
585 list_for_each_entry_safe(struct copy_entry
, iter
, &state
->copies
, link
)
586 copy_entry_remove(state
, iter
);
588 nir_foreach_instr_safe(instr
, block
) {
589 if (instr
->type
!= nir_instr_type_intrinsic
)
592 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
593 switch (intrin
->intrinsic
) {
594 case nir_intrinsic_barrier
:
595 case nir_intrinsic_memory_barrier
:
596 /* If we hit a barrier, we need to trash everything that may possibly
597 * be accessible to another thread. Locals, globals, and things of
598 * the like are safe, however.
600 apply_barrier_for_modes(state
, ~(nir_var_local
| nir_var_global
|
601 nir_var_shader_in
| nir_var_uniform
));
604 case nir_intrinsic_emit_vertex
:
605 case nir_intrinsic_emit_vertex_with_counter
:
606 apply_barrier_for_modes(state
, nir_var_shader_out
);
609 case nir_intrinsic_load_deref
: {
610 nir_deref_instr
*src
= nir_src_as_deref(intrin
->src
[0]);
612 uint8_t comps_read
= nir_ssa_def_components_read(&intrin
->dest
.ssa
);
613 mark_aliased_entries_as_read(state
, src
, comps_read
);
615 struct copy_entry
*src_entry
=
616 lookup_entry_for_deref(state
, src
, derefs_a_contains_b_bit
);
618 if (try_load_from_entry(state
, src_entry
, b
, intrin
, src
, &value
)) {
620 /* lookup_load has already ensured that we get a single SSA
621 * value that has all of the channels. We just have to do the
624 if (intrin
->instr
.block
) {
625 /* The lookup left our instruction in-place. This means it
626 * must have used it to vec up a bunch of different sources.
627 * We need to be careful when rewriting uses so we don't
628 * rewrite the vecN itself.
630 nir_ssa_def_rewrite_uses_after(&intrin
->dest
.ssa
,
631 nir_src_for_ssa(value
.ssa
[0]),
632 value
.ssa
[0]->parent_instr
);
634 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
635 nir_src_for_ssa(value
.ssa
[0]));
638 /* We're turning it into a load of a different variable */
639 intrin
->src
[0] = nir_src_for_ssa(&value
.deref
->dest
.ssa
);
641 /* Put it back in again. */
642 nir_builder_instr_insert(b
, instr
);
645 for (unsigned i
= 0; i
< intrin
->num_components
; i
++)
646 value
.ssa
[i
] = &intrin
->dest
.ssa
;
648 state
->progress
= true;
651 for (unsigned i
= 0; i
< intrin
->num_components
; i
++)
652 value
.ssa
[i
] = &intrin
->dest
.ssa
;
655 /* Now that we have a value, we're going to store it back so that we
656 * have the right value next time we come looking for it. In order
657 * to do this, we need an exact match, not just something that
658 * contains what we're looking for.
660 struct copy_entry
*store_entry
=
661 lookup_entry_for_deref(state
, src
, derefs_equal_bit
);
663 store_entry
= copy_entry_create(state
, src
);
665 /* Set up a store to this entry with the value of the load. This way
666 * we can potentially remove subsequent loads. However, we use a
667 * NULL instruction so we don't try and delete the load on a
670 store_to_entry(state
, store_entry
, &value
,
671 ((1 << intrin
->num_components
) - 1), NULL
);
675 case nir_intrinsic_store_deref
: {
676 struct value value
= {
680 for (unsigned i
= 0; i
< intrin
->num_components
; i
++)
681 value
.ssa
[i
] = intrin
->src
[1].ssa
;
683 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
684 unsigned wrmask
= nir_intrinsic_write_mask(intrin
);
685 struct copy_entry
*entry
=
686 get_entry_and_kill_aliases(state
, dst
, wrmask
);
687 store_to_entry(state
, entry
, &value
, wrmask
, &intrin
->instr
);
691 case nir_intrinsic_copy_deref
: {
692 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
693 nir_deref_instr
*src
= nir_src_as_deref(intrin
->src
[1]);
695 if (compare_derefs(src
, dst
) & derefs_equal_bit
) {
696 /* This is a no-op self-copy. Get rid of it */
697 nir_instr_remove(instr
);
701 mark_aliased_entries_as_read(state
, src
, 0xf);
703 struct copy_entry
*src_entry
=
704 lookup_entry_for_deref(state
, src
, derefs_a_contains_b_bit
);
706 if (try_load_from_entry(state
, src_entry
, b
, intrin
, src
, &value
)) {
708 nir_store_deref(b
, dst
, value
.ssa
[0], 0xf);
709 intrin
= nir_instr_as_intrinsic(nir_builder_last_instr(b
));
711 /* If this would be a no-op self-copy, don't bother. */
712 if (compare_derefs(value
.deref
, dst
) & derefs_equal_bit
)
715 /* Just turn it into a copy of a different deref */
716 intrin
->src
[1] = nir_src_for_ssa(&value
.deref
->dest
.ssa
);
718 /* Put it back in again. */
719 nir_builder_instr_insert(b
, instr
);
722 state
->progress
= true;
724 value
= (struct value
) {
730 struct copy_entry
*dst_entry
=
731 get_entry_and_kill_aliases(state
, dst
, 0xf);
732 store_to_entry(state
, dst_entry
, &value
, 0xf, &intrin
->instr
);
743 nir_opt_copy_prop_vars(nir_shader
*shader
)
745 struct copy_prop_var_state state
;
747 state
.shader
= shader
;
748 state
.mem_ctx
= ralloc_context(NULL
);
749 list_inithead(&state
.copies
);
750 list_inithead(&state
.copy_free_list
);
752 bool global_progress
= false;
753 nir_foreach_function(function
, shader
) {
758 nir_builder_init(&b
, function
->impl
);
760 state
.progress
= false;
761 nir_foreach_block(block
, function
->impl
)
762 copy_prop_vars_block(&state
, &b
, block
);
764 if (state
.progress
) {
765 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
766 nir_metadata_dominance
);
767 global_progress
= true;
771 ralloc_free(state
.mem_ctx
);
773 return global_progress
;