2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
26 #include "nir_deref.h"
28 #include "util/bitscan.h"
29 #include "util/u_dynarray.h"
31 static const bool debug
= false;
34 * Variable-based copy propagation
36 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
37 * However, there are cases, especially when dealing with indirects, where SSA
38 * won't help you. This pass is for those times. Specifically, it handles
39 * the following things that the rest of NIR can't:
41 * 1) Copy-propagation on variables that have indirect access. This includes
42 * propagating from indirect stores into indirect loads.
44 * 2) Removal of redundant load_deref intrinsics. We can't trust regular CSE
45 * to do this because it isn't aware of variable writes that may alias the
46 * value and make the former load invalid.
48 * This pass uses an intermediate solution between being local / "per-block"
49 * and a complete data-flow analysis. It follows the control flow graph, and
50 * propagate the available copy information forward, invalidating data at each
53 * Removal of dead writes to variables is handled by another pass.
57 nir_variable_mode modes
;
59 /* Key is deref and value is the uintptr_t with the write mask. */
60 struct hash_table
*derefs
;
67 nir_ssa_def
*def
[NIR_MAX_VEC_COMPONENTS
];
68 uint8_t component
[NIR_MAX_VEC_COMPONENTS
];
70 nir_deref_instr
*deref
;
75 value_set_ssa_components(struct value
*value
, nir_ssa_def
*def
,
76 unsigned num_components
)
79 memset(&value
->ssa
, 0, sizeof(value
->ssa
));
81 for (unsigned i
= 0; i
< num_components
; i
++) {
82 value
->ssa
.def
[i
] = def
;
83 value
->ssa
.component
[i
] = i
;
93 struct copy_prop_var_state
{
94 nir_function_impl
*impl
;
99 /* Maps nodes to vars_written. Used to invalidate copy entries when
100 * visiting each node.
102 struct hash_table
*vars_written_map
;
108 value_equals_store_src(struct value
*value
, nir_intrinsic_instr
*intrin
)
110 assert(intrin
->intrinsic
== nir_intrinsic_store_deref
);
111 uintptr_t write_mask
= nir_intrinsic_write_mask(intrin
);
113 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
114 if ((write_mask
& (1 << i
)) &&
115 (value
->ssa
.def
[i
] != intrin
->src
[1].ssa
||
116 value
->ssa
.component
[i
] != i
))
123 static struct vars_written
*
124 create_vars_written(struct copy_prop_var_state
*state
)
126 struct vars_written
*written
=
127 linear_zalloc_child(state
->lin_ctx
, sizeof(struct vars_written
));
128 written
->derefs
= _mesa_pointer_hash_table_create(state
->mem_ctx
);
133 gather_vars_written(struct copy_prop_var_state
*state
,
134 struct vars_written
*written
,
135 nir_cf_node
*cf_node
)
137 struct vars_written
*new_written
= NULL
;
139 switch (cf_node
->type
) {
140 case nir_cf_node_function
: {
141 nir_function_impl
*impl
= nir_cf_node_as_function(cf_node
);
142 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &impl
->body
)
143 gather_vars_written(state
, NULL
, cf_node
);
147 case nir_cf_node_block
: {
151 nir_block
*block
= nir_cf_node_as_block(cf_node
);
152 nir_foreach_instr(instr
, block
) {
153 if (instr
->type
== nir_instr_type_call
) {
154 written
->modes
|= nir_var_shader_out
|
155 nir_var_shader_temp
|
156 nir_var_function_temp
|
163 if (instr
->type
!= nir_instr_type_intrinsic
)
166 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
167 switch (intrin
->intrinsic
) {
168 case nir_intrinsic_control_barrier
:
169 case nir_intrinsic_memory_barrier
:
170 written
->modes
|= nir_var_shader_out
|
176 case nir_intrinsic_scoped_memory_barrier
:
177 if (nir_intrinsic_memory_semantics(intrin
) & NIR_MEMORY_ACQUIRE
)
178 written
->modes
|= nir_intrinsic_memory_modes(intrin
);
181 case nir_intrinsic_emit_vertex
:
182 case nir_intrinsic_emit_vertex_with_counter
:
183 written
->modes
= nir_var_shader_out
;
186 case nir_intrinsic_deref_atomic_add
:
187 case nir_intrinsic_deref_atomic_imin
:
188 case nir_intrinsic_deref_atomic_umin
:
189 case nir_intrinsic_deref_atomic_imax
:
190 case nir_intrinsic_deref_atomic_umax
:
191 case nir_intrinsic_deref_atomic_and
:
192 case nir_intrinsic_deref_atomic_or
:
193 case nir_intrinsic_deref_atomic_xor
:
194 case nir_intrinsic_deref_atomic_exchange
:
195 case nir_intrinsic_deref_atomic_comp_swap
:
196 case nir_intrinsic_store_deref
:
197 case nir_intrinsic_copy_deref
: {
198 /* Destination in all of store_deref, copy_deref and the atomics is src[0]. */
199 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
201 uintptr_t mask
= intrin
->intrinsic
== nir_intrinsic_store_deref
?
202 nir_intrinsic_write_mask(intrin
) : (1 << glsl_get_vector_elements(dst
->type
)) - 1;
204 struct hash_entry
*ht_entry
= _mesa_hash_table_search(written
->derefs
, dst
);
206 ht_entry
->data
= (void *)(mask
| (uintptr_t)ht_entry
->data
);
208 _mesa_hash_table_insert(written
->derefs
, dst
, (void *)mask
);
221 case nir_cf_node_if
: {
222 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
224 new_written
= create_vars_written(state
);
226 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &if_stmt
->then_list
)
227 gather_vars_written(state
, new_written
, cf_node
);
229 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &if_stmt
->else_list
)
230 gather_vars_written(state
, new_written
, cf_node
);
235 case nir_cf_node_loop
: {
236 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
238 new_written
= create_vars_written(state
);
240 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &loop
->body
)
241 gather_vars_written(state
, new_written
, cf_node
);
247 unreachable("Invalid CF node type");
251 /* Merge new information to the parent control flow node. */
253 written
->modes
|= new_written
->modes
;
254 hash_table_foreach(new_written
->derefs
, new_entry
) {
255 struct hash_entry
*old_entry
=
256 _mesa_hash_table_search_pre_hashed(written
->derefs
, new_entry
->hash
,
259 nir_component_mask_t merged
= (uintptr_t) new_entry
->data
|
260 (uintptr_t) old_entry
->data
;
261 old_entry
->data
= (void *) ((uintptr_t) merged
);
263 _mesa_hash_table_insert_pre_hashed(written
->derefs
, new_entry
->hash
,
264 new_entry
->key
, new_entry
->data
);
268 _mesa_hash_table_insert(state
->vars_written_map
, cf_node
, new_written
);
272 static struct copy_entry
*
273 copy_entry_create(struct util_dynarray
*copies
,
274 nir_deref_instr
*dst_deref
)
276 struct copy_entry new_entry
= {
279 util_dynarray_append(copies
, struct copy_entry
, new_entry
);
280 return util_dynarray_top_ptr(copies
, struct copy_entry
);
283 /* Remove copy entry by swapping it with the last element and reducing the
284 * size. If used inside an iteration on copies, it must be a reverse
285 * (backwards) iteration. It is safe to use in those cases because the swap
286 * will not affect the rest of the iteration.
289 copy_entry_remove(struct util_dynarray
*copies
,
290 struct copy_entry
*entry
)
292 /* This also works when removing the last element since pop don't shrink
293 * the memory used by the array, so the swap is useless but not invalid.
295 *entry
= util_dynarray_pop(copies
, struct copy_entry
);
299 is_array_deref_of_vector(nir_deref_instr
*deref
)
301 if (deref
->deref_type
!= nir_deref_type_array
)
303 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
304 return glsl_type_is_vector(parent
->type
);
307 static struct copy_entry
*
308 lookup_entry_for_deref(struct util_dynarray
*copies
,
309 nir_deref_instr
*deref
,
310 nir_deref_compare_result allowed_comparisons
)
312 struct copy_entry
*entry
= NULL
;
313 util_dynarray_foreach(copies
, struct copy_entry
, iter
) {
314 nir_deref_compare_result result
= nir_compare_derefs(iter
->dst
, deref
);
315 if (result
& allowed_comparisons
) {
317 if (result
& nir_derefs_equal_bit
)
319 /* Keep looking in case we have an equal match later in the array. */
325 static struct copy_entry
*
326 lookup_entry_and_kill_aliases(struct util_dynarray
*copies
,
327 nir_deref_instr
*deref
,
330 /* TODO: Take into account the write_mask. */
332 nir_deref_instr
*dst_match
= NULL
;
333 util_dynarray_foreach_reverse(copies
, struct copy_entry
, iter
) {
334 if (!iter
->src
.is_ssa
) {
335 /* If this write aliases the source of some entry, get rid of it */
336 if (nir_compare_derefs(iter
->src
.deref
, deref
) & nir_derefs_may_alias_bit
) {
337 copy_entry_remove(copies
, iter
);
342 nir_deref_compare_result comp
= nir_compare_derefs(iter
->dst
, deref
);
344 if (comp
& nir_derefs_equal_bit
) {
345 /* Removing entries invalidate previous iter pointers, so we'll
346 * collect the matching entry later. Just make sure it is unique.
349 dst_match
= iter
->dst
;
350 } else if (comp
& nir_derefs_may_alias_bit
) {
351 copy_entry_remove(copies
, iter
);
355 struct copy_entry
*entry
= NULL
;
357 util_dynarray_foreach(copies
, struct copy_entry
, iter
) {
358 if (iter
->dst
== dst_match
) {
369 kill_aliases(struct util_dynarray
*copies
,
370 nir_deref_instr
*deref
,
373 /* TODO: Take into account the write_mask. */
375 struct copy_entry
*entry
=
376 lookup_entry_and_kill_aliases(copies
, deref
, write_mask
);
378 copy_entry_remove(copies
, entry
);
381 static struct copy_entry
*
382 get_entry_and_kill_aliases(struct util_dynarray
*copies
,
383 nir_deref_instr
*deref
,
386 /* TODO: Take into account the write_mask. */
388 struct copy_entry
*entry
=
389 lookup_entry_and_kill_aliases(copies
, deref
, write_mask
);
392 entry
= copy_entry_create(copies
, deref
);
398 apply_barrier_for_modes(struct util_dynarray
*copies
,
399 nir_variable_mode modes
)
401 util_dynarray_foreach_reverse(copies
, struct copy_entry
, iter
) {
402 if ((iter
->dst
->mode
& modes
) ||
403 (!iter
->src
.is_ssa
&& (iter
->src
.deref
->mode
& modes
)))
404 copy_entry_remove(copies
, iter
);
409 value_set_from_value(struct value
*value
, const struct value
*from
,
410 unsigned base_index
, unsigned write_mask
)
412 /* We can't have non-zero indexes with non-trivial write masks */
413 assert(base_index
== 0 || write_mask
== 1);
416 /* Clear value if it was being used as non-SSA. */
418 memset(&value
->ssa
, 0, sizeof(value
->ssa
));
419 value
->is_ssa
= true;
420 /* Only overwrite the written components */
421 for (unsigned i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; i
++) {
422 if (write_mask
& (1 << i
)) {
423 value
->ssa
.def
[base_index
+ i
] = from
->ssa
.def
[i
];
424 value
->ssa
.component
[base_index
+ i
] = from
->ssa
.component
[i
];
428 /* Non-ssa stores always write everything */
429 value
->is_ssa
= false;
430 value
->deref
= from
->deref
;
434 /* Try to load a single element of a vector from the copy_entry. If the data
435 * isn't available, just let the original intrinsic do the work.
438 load_element_from_ssa_entry_value(struct copy_prop_var_state
*state
,
439 struct copy_entry
*entry
,
440 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
441 struct value
*value
, unsigned index
)
443 assert(index
< glsl_get_vector_elements(entry
->dst
->type
));
445 /* We don't have the element available, so let the instruction do the work. */
446 if (!entry
->src
.ssa
.def
[index
])
449 b
->cursor
= nir_instr_remove(&intrin
->instr
);
450 intrin
->instr
.block
= NULL
;
452 assert(entry
->src
.ssa
.component
[index
] <
453 entry
->src
.ssa
.def
[index
]->num_components
);
454 nir_ssa_def
*def
= nir_channel(b
, entry
->src
.ssa
.def
[index
],
455 entry
->src
.ssa
.component
[index
]);
457 *value
= (struct value
) {
470 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
471 * single SSA def. Because an entry could reference multiple different SSA
472 * defs, a vecN operation may be inserted to combine them into a single SSA
473 * def before handing it back to the caller. If the load instruction is no
474 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
475 * possible, in some cases, for the load to be used in the vecN operation in
476 * which case it isn't deleted.)
479 load_from_ssa_entry_value(struct copy_prop_var_state
*state
,
480 struct copy_entry
*entry
,
481 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
482 nir_deref_instr
*src
, struct value
*value
)
484 if (is_array_deref_of_vector(src
)) {
485 if (nir_src_is_const(src
->arr
.index
)) {
486 return load_element_from_ssa_entry_value(state
, entry
, b
, intrin
, value
,
487 nir_src_as_uint(src
->arr
.index
));
490 /* An SSA copy_entry for the vector won't help indirect load. */
491 if (glsl_type_is_vector(entry
->dst
->type
)) {
492 assert(entry
->dst
->type
== nir_deref_instr_parent(src
)->type
);
493 /* TODO: If all SSA entries are there, try an if-ladder. */
499 assert(value
->is_ssa
);
501 const struct glsl_type
*type
= entry
->dst
->type
;
502 unsigned num_components
= glsl_get_vector_elements(type
);
504 nir_component_mask_t available
= 0;
505 bool all_same
= true;
506 for (unsigned i
= 0; i
< num_components
; i
++) {
507 if (value
->ssa
.def
[i
])
508 available
|= (1 << i
);
510 if (value
->ssa
.def
[i
] != value
->ssa
.def
[0])
513 if (value
->ssa
.component
[i
] != i
)
518 /* Our work here is done */
519 b
->cursor
= nir_instr_remove(&intrin
->instr
);
520 intrin
->instr
.block
= NULL
;
524 if (available
!= (1 << num_components
) - 1 &&
525 intrin
->intrinsic
== nir_intrinsic_load_deref
&&
526 (available
& nir_ssa_def_components_read(&intrin
->dest
.ssa
)) == 0) {
527 /* If none of the components read are available as SSA values, then we
528 * should just bail. Otherwise, we would end up replacing the uses of
529 * the load_deref a vecN() that just gathers up its components.
534 b
->cursor
= nir_after_instr(&intrin
->instr
);
536 nir_ssa_def
*load_def
=
537 intrin
->intrinsic
== nir_intrinsic_load_deref
? &intrin
->dest
.ssa
: NULL
;
539 bool keep_intrin
= false;
540 nir_ssa_def
*comps
[NIR_MAX_VEC_COMPONENTS
];
541 for (unsigned i
= 0; i
< num_components
; i
++) {
542 if (value
->ssa
.def
[i
]) {
543 comps
[i
] = nir_channel(b
, value
->ssa
.def
[i
], value
->ssa
.component
[i
]);
545 /* We don't have anything for this component in our
546 * list. Just re-use a channel from the load.
548 if (load_def
== NULL
)
549 load_def
= nir_load_deref(b
, entry
->dst
);
551 if (load_def
->parent_instr
== &intrin
->instr
)
554 comps
[i
] = nir_channel(b
, load_def
, i
);
558 nir_ssa_def
*vec
= nir_vec(b
, comps
, num_components
);
559 value_set_ssa_components(value
, vec
, num_components
);
562 /* Removing this instruction should not touch the cursor because we
563 * created the cursor after the intrinsic and have added at least one
564 * instruction (the vec) since then.
566 assert(b
->cursor
.instr
!= &intrin
->instr
);
567 nir_instr_remove(&intrin
->instr
);
568 intrin
->instr
.block
= NULL
;
575 * Specialize the wildcards in a deref chain
577 * This function returns a deref chain identical to \param deref except that
578 * some of its wildcards are replaced with indices from \param specific. The
579 * process is guided by \param guide which references the same type as \param
580 * specific but has the same wildcard array lengths as \param deref.
582 static nir_deref_instr
*
583 specialize_wildcards(nir_builder
*b
,
584 nir_deref_path
*deref
,
585 nir_deref_path
*guide
,
586 nir_deref_path
*specific
)
588 nir_deref_instr
**deref_p
= &deref
->path
[1];
589 nir_deref_instr
**guide_p
= &guide
->path
[1];
590 nir_deref_instr
**spec_p
= &specific
->path
[1];
591 nir_deref_instr
*ret_tail
= deref
->path
[0];
592 for (; *deref_p
; deref_p
++) {
593 if ((*deref_p
)->deref_type
== nir_deref_type_array_wildcard
) {
594 /* This is where things get tricky. We have to search through
595 * the entry deref to find its corresponding wildcard and fill
596 * this slot in with the value from the src.
599 (*guide_p
)->deref_type
!= nir_deref_type_array_wildcard
) {
603 assert(*guide_p
&& *spec_p
);
605 ret_tail
= nir_build_deref_follower(b
, ret_tail
, *spec_p
);
610 ret_tail
= nir_build_deref_follower(b
, ret_tail
, *deref_p
);
617 /* Do a "load" from an deref-based entry return it in "value" as a value. The
618 * deref returned in "value" will always be a fresh copy so the caller can
619 * steal it and assign it to the instruction directly without copying it
623 load_from_deref_entry_value(struct copy_prop_var_state
*state
,
624 struct copy_entry
*entry
,
625 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
626 nir_deref_instr
*src
, struct value
*value
)
630 b
->cursor
= nir_instr_remove(&intrin
->instr
);
632 nir_deref_path entry_dst_path
, src_path
;
633 nir_deref_path_init(&entry_dst_path
, entry
->dst
, state
->mem_ctx
);
634 nir_deref_path_init(&src_path
, src
, state
->mem_ctx
);
636 bool need_to_specialize_wildcards
= false;
637 nir_deref_instr
**entry_p
= &entry_dst_path
.path
[1];
638 nir_deref_instr
**src_p
= &src_path
.path
[1];
639 while (*entry_p
&& *src_p
) {
640 nir_deref_instr
*entry_tail
= *entry_p
++;
641 nir_deref_instr
*src_tail
= *src_p
++;
643 if (src_tail
->deref_type
== nir_deref_type_array
&&
644 entry_tail
->deref_type
== nir_deref_type_array_wildcard
)
645 need_to_specialize_wildcards
= true;
648 /* If the entry deref is longer than the source deref then it refers to a
649 * smaller type and we can't source from it.
651 assert(*entry_p
== NULL
);
653 if (need_to_specialize_wildcards
) {
654 /* The entry has some wildcards that are not in src. This means we need
655 * to construct a new deref based on the entry but using the wildcards
656 * from the source and guided by the entry dst. Oof.
658 nir_deref_path entry_src_path
;
659 nir_deref_path_init(&entry_src_path
, entry
->src
.deref
, state
->mem_ctx
);
660 value
->deref
= specialize_wildcards(b
, &entry_src_path
,
661 &entry_dst_path
, &src_path
);
662 nir_deref_path_finish(&entry_src_path
);
665 /* If our source deref is longer than the entry deref, that's ok because
666 * it just means the entry deref needs to be extended a bit.
669 nir_deref_instr
*src_tail
= *src_p
++;
670 value
->deref
= nir_build_deref_follower(b
, value
->deref
, src_tail
);
673 nir_deref_path_finish(&entry_dst_path
);
674 nir_deref_path_finish(&src_path
);
680 try_load_from_entry(struct copy_prop_var_state
*state
, struct copy_entry
*entry
,
681 nir_builder
*b
, nir_intrinsic_instr
*intrin
,
682 nir_deref_instr
*src
, struct value
*value
)
687 if (entry
->src
.is_ssa
) {
688 return load_from_ssa_entry_value(state
, entry
, b
, intrin
, src
, value
);
690 return load_from_deref_entry_value(state
, entry
, b
, intrin
, src
, value
);
695 invalidate_copies_for_cf_node(struct copy_prop_var_state
*state
,
696 struct util_dynarray
*copies
,
697 nir_cf_node
*cf_node
)
699 struct hash_entry
*ht_entry
= _mesa_hash_table_search(state
->vars_written_map
, cf_node
);
702 struct vars_written
*written
= ht_entry
->data
;
703 if (written
->modes
) {
704 util_dynarray_foreach_reverse(copies
, struct copy_entry
, entry
) {
705 if (entry
->dst
->mode
& written
->modes
)
706 copy_entry_remove(copies
, entry
);
710 hash_table_foreach (written
->derefs
, entry
) {
711 nir_deref_instr
*deref_written
= (nir_deref_instr
*)entry
->key
;
712 kill_aliases(copies
, deref_written
, (uintptr_t)entry
->data
);
717 print_value(struct value
*value
, unsigned num_components
)
719 if (!value
->is_ssa
) {
720 printf(" %s ", glsl_get_type_name(value
->deref
->type
));
721 nir_print_deref(value
->deref
, stdout
);
725 bool same_ssa
= true;
726 for (unsigned i
= 0; i
< num_components
; i
++) {
727 if (value
->ssa
.component
[i
] != i
||
728 (i
> 0 && value
->ssa
.def
[i
- 1] != value
->ssa
.def
[i
])) {
734 printf(" ssa_%d", value
->ssa
.def
[0]->index
);
736 for (int i
= 0; i
< num_components
; i
++) {
737 if (value
->ssa
.def
[i
])
738 printf(" ssa_%d[%u]", value
->ssa
.def
[i
]->index
, value
->ssa
.component
[i
]);
746 print_copy_entry(struct copy_entry
*entry
)
748 printf(" %s ", glsl_get_type_name(entry
->dst
->type
));
749 nir_print_deref(entry
->dst
, stdout
);
752 unsigned num_components
= glsl_get_vector_elements(entry
->dst
->type
);
753 print_value(&entry
->src
, num_components
);
758 dump_instr(nir_instr
*instr
)
761 nir_print_instr(instr
, stdout
);
766 dump_copy_entries(struct util_dynarray
*copies
)
768 util_dynarray_foreach(copies
, struct copy_entry
, iter
)
769 print_copy_entry(iter
);
774 copy_prop_vars_block(struct copy_prop_var_state
*state
,
775 nir_builder
*b
, nir_block
*block
,
776 struct util_dynarray
*copies
)
779 printf("# block%d\n", block
->index
);
780 dump_copy_entries(copies
);
783 nir_foreach_instr_safe(instr
, block
) {
784 if (debug
&& instr
->type
== nir_instr_type_deref
)
787 if (instr
->type
== nir_instr_type_call
) {
788 if (debug
) dump_instr(instr
);
789 apply_barrier_for_modes(copies
, nir_var_shader_out
|
790 nir_var_shader_temp
|
791 nir_var_function_temp
|
795 if (debug
) dump_copy_entries(copies
);
799 if (instr
->type
!= nir_instr_type_intrinsic
)
802 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
803 switch (intrin
->intrinsic
) {
804 case nir_intrinsic_control_barrier
:
805 case nir_intrinsic_memory_barrier
:
806 if (debug
) dump_instr(instr
);
808 apply_barrier_for_modes(copies
, nir_var_shader_out
|
814 case nir_intrinsic_memory_barrier_buffer
:
815 if (debug
) dump_instr(instr
);
817 apply_barrier_for_modes(copies
, nir_var_mem_ssbo
|
821 case nir_intrinsic_memory_barrier_shared
:
822 if (debug
) dump_instr(instr
);
824 apply_barrier_for_modes(copies
, nir_var_mem_shared
);
827 case nir_intrinsic_memory_barrier_tcs_patch
:
828 if (debug
) dump_instr(instr
);
830 apply_barrier_for_modes(copies
, nir_var_shader_out
);
833 case nir_intrinsic_scoped_memory_barrier
:
834 if (debug
) dump_instr(instr
);
836 if (nir_intrinsic_memory_semantics(intrin
) & NIR_MEMORY_ACQUIRE
)
837 apply_barrier_for_modes(copies
, nir_intrinsic_memory_modes(intrin
));
840 case nir_intrinsic_emit_vertex
:
841 case nir_intrinsic_emit_vertex_with_counter
:
842 if (debug
) dump_instr(instr
);
844 apply_barrier_for_modes(copies
, nir_var_shader_out
);
847 case nir_intrinsic_load_deref
: {
848 if (debug
) dump_instr(instr
);
850 if (nir_intrinsic_access(intrin
) & ACCESS_VOLATILE
)
853 nir_deref_instr
*src
= nir_src_as_deref(intrin
->src
[0]);
855 /* Direct array_derefs of vectors operate on the vectors (the parent
856 * deref). Indirects will be handled like other derefs.
859 nir_deref_instr
*vec_src
= src
;
860 if (is_array_deref_of_vector(src
) && nir_src_is_const(src
->arr
.index
)) {
861 vec_src
= nir_deref_instr_parent(src
);
862 unsigned vec_comps
= glsl_get_vector_elements(vec_src
->type
);
863 vec_index
= nir_src_as_uint(src
->arr
.index
);
865 /* Loading from an invalid index yields an undef */
866 if (vec_index
>= vec_comps
) {
867 b
->cursor
= nir_instr_remove(instr
);
868 nir_ssa_def
*u
= nir_ssa_undef(b
, 1, intrin
->dest
.ssa
.bit_size
);
869 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(u
));
874 struct copy_entry
*src_entry
=
875 lookup_entry_for_deref(copies
, src
, nir_derefs_a_contains_b_bit
);
876 struct value value
= {0};
877 if (try_load_from_entry(state
, src_entry
, b
, intrin
, src
, &value
)) {
879 /* lookup_load has already ensured that we get a single SSA
880 * value that has all of the channels. We just have to do the
881 * rewrite operation. Note for array derefs of vectors, the
884 if (intrin
->instr
.block
) {
885 /* The lookup left our instruction in-place. This means it
886 * must have used it to vec up a bunch of different sources.
887 * We need to be careful when rewriting uses so we don't
888 * rewrite the vecN itself.
890 nir_ssa_def_rewrite_uses_after(&intrin
->dest
.ssa
,
891 nir_src_for_ssa(value
.ssa
.def
[0]),
892 value
.ssa
.def
[0]->parent_instr
);
894 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
895 nir_src_for_ssa(value
.ssa
.def
[0]));
898 /* We're turning it into a load of a different variable */
899 intrin
->src
[0] = nir_src_for_ssa(&value
.deref
->dest
.ssa
);
901 /* Put it back in again. */
902 nir_builder_instr_insert(b
, instr
);
903 value_set_ssa_components(&value
, &intrin
->dest
.ssa
,
904 intrin
->num_components
);
906 state
->progress
= true;
908 value_set_ssa_components(&value
, &intrin
->dest
.ssa
,
909 intrin
->num_components
);
912 /* Now that we have a value, we're going to store it back so that we
913 * have the right value next time we come looking for it. In order
914 * to do this, we need an exact match, not just something that
915 * contains what we're looking for.
917 struct copy_entry
*entry
=
918 lookup_entry_for_deref(copies
, vec_src
, nir_derefs_equal_bit
);
920 entry
= copy_entry_create(copies
, vec_src
);
922 /* Update the entry with the value of the load. This way
923 * we can potentially remove subsequent loads.
925 value_set_from_value(&entry
->src
, &value
, vec_index
,
926 (1 << intrin
->num_components
) - 1);
930 case nir_intrinsic_store_deref
: {
931 if (debug
) dump_instr(instr
);
933 if (nir_intrinsic_access(intrin
) & ACCESS_VOLATILE
)
936 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
937 assert(glsl_type_is_vector_or_scalar(dst
->type
));
939 /* Direct array_derefs of vectors operate on the vectors (the parent
940 * deref). Indirects will be handled like other derefs.
943 nir_deref_instr
*vec_dst
= dst
;
944 if (is_array_deref_of_vector(dst
) && nir_src_is_const(dst
->arr
.index
)) {
945 vec_dst
= nir_deref_instr_parent(dst
);
946 unsigned vec_comps
= glsl_get_vector_elements(vec_dst
->type
);
948 vec_index
= nir_src_as_uint(dst
->arr
.index
);
950 /* Storing to an invalid index is a no-op. */
951 if (vec_index
>= vec_comps
) {
952 nir_instr_remove(instr
);
957 struct copy_entry
*entry
=
958 lookup_entry_for_deref(copies
, dst
, nir_derefs_equal_bit
);
959 if (entry
&& value_equals_store_src(&entry
->src
, intrin
)) {
960 /* If we are storing the value from a load of the same var the
961 * store is redundant so remove it.
963 nir_instr_remove(instr
);
965 struct value value
= {0};
966 value_set_ssa_components(&value
, intrin
->src
[1].ssa
,
967 intrin
->num_components
);
968 unsigned wrmask
= nir_intrinsic_write_mask(intrin
);
969 struct copy_entry
*entry
=
970 get_entry_and_kill_aliases(copies
, vec_dst
, wrmask
);
971 value_set_from_value(&entry
->src
, &value
, vec_index
, wrmask
);
977 case nir_intrinsic_copy_deref
: {
978 if (debug
) dump_instr(instr
);
980 if ((nir_intrinsic_src_access(intrin
) & ACCESS_VOLATILE
) ||
981 (nir_intrinsic_dst_access(intrin
) & ACCESS_VOLATILE
))
984 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
985 nir_deref_instr
*src
= nir_src_as_deref(intrin
->src
[1]);
987 if (nir_compare_derefs(src
, dst
) & nir_derefs_equal_bit
) {
988 /* This is a no-op self-copy. Get rid of it */
989 nir_instr_remove(instr
);
993 /* The copy_deref intrinsic doesn't keep track of num_components, so
996 unsigned num_components
= glsl_get_vector_elements(dst
->type
);
997 unsigned full_mask
= (1 << num_components
) - 1;
999 /* Copy of direct array derefs of vectors are not handled. Just
1000 * invalidate what's written and bail.
1002 if ((is_array_deref_of_vector(src
) && nir_src_is_const(src
->arr
.index
)) ||
1003 (is_array_deref_of_vector(dst
) && nir_src_is_const(dst
->arr
.index
))) {
1004 kill_aliases(copies
, dst
, full_mask
);
1008 struct copy_entry
*src_entry
=
1009 lookup_entry_for_deref(copies
, src
, nir_derefs_a_contains_b_bit
);
1011 if (try_load_from_entry(state
, src_entry
, b
, intrin
, src
, &value
)) {
1012 /* If load works, intrin (the copy_deref) is removed. */
1014 nir_store_deref(b
, dst
, value
.ssa
.def
[0], full_mask
);
1016 /* If this would be a no-op self-copy, don't bother. */
1017 if (nir_compare_derefs(value
.deref
, dst
) & nir_derefs_equal_bit
)
1020 /* Just turn it into a copy of a different deref */
1021 intrin
->src
[1] = nir_src_for_ssa(&value
.deref
->dest
.ssa
);
1023 /* Put it back in again. */
1024 nir_builder_instr_insert(b
, instr
);
1027 state
->progress
= true;
1029 value
= (struct value
) {
1035 nir_variable
*src_var
= nir_deref_instr_get_variable(src
);
1036 if (src_var
&& src_var
->data
.cannot_coalesce
) {
1037 /* The source cannot be coaleseced, which means we can't propagate
1043 struct copy_entry
*dst_entry
=
1044 get_entry_and_kill_aliases(copies
, dst
, full_mask
);
1045 value_set_from_value(&dst_entry
->src
, &value
, 0, full_mask
);
1049 case nir_intrinsic_deref_atomic_add
:
1050 case nir_intrinsic_deref_atomic_imin
:
1051 case nir_intrinsic_deref_atomic_umin
:
1052 case nir_intrinsic_deref_atomic_imax
:
1053 case nir_intrinsic_deref_atomic_umax
:
1054 case nir_intrinsic_deref_atomic_and
:
1055 case nir_intrinsic_deref_atomic_or
:
1056 case nir_intrinsic_deref_atomic_xor
:
1057 case nir_intrinsic_deref_atomic_exchange
:
1058 case nir_intrinsic_deref_atomic_comp_swap
:
1059 if (debug
) dump_instr(instr
);
1061 if (nir_intrinsic_access(intrin
) & ACCESS_VOLATILE
)
1064 nir_deref_instr
*dst
= nir_src_as_deref(intrin
->src
[0]);
1065 unsigned num_components
= glsl_get_vector_elements(dst
->type
);
1066 unsigned full_mask
= (1 << num_components
) - 1;
1067 kill_aliases(copies
, dst
, full_mask
);
1071 continue; /* To skip the debug below. */
1074 if (debug
) dump_copy_entries(copies
);
1079 copy_prop_vars_cf_node(struct copy_prop_var_state
*state
,
1080 struct util_dynarray
*copies
,
1081 nir_cf_node
*cf_node
)
1083 switch (cf_node
->type
) {
1084 case nir_cf_node_function
: {
1085 nir_function_impl
*impl
= nir_cf_node_as_function(cf_node
);
1087 struct util_dynarray impl_copies
;
1088 util_dynarray_init(&impl_copies
, state
->mem_ctx
);
1090 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &impl
->body
)
1091 copy_prop_vars_cf_node(state
, &impl_copies
, cf_node
);
1096 case nir_cf_node_block
: {
1097 nir_block
*block
= nir_cf_node_as_block(cf_node
);
1099 nir_builder_init(&b
, state
->impl
);
1100 copy_prop_vars_block(state
, &b
, block
, copies
);
1104 case nir_cf_node_if
: {
1105 nir_if
*if_stmt
= nir_cf_node_as_if(cf_node
);
1107 /* Clone the copies for each branch of the if statement. The idea is
1108 * that they both see the same state of available copies, but do not
1109 * interfere to each other.
1112 struct util_dynarray then_copies
;
1113 util_dynarray_clone(&then_copies
, state
->mem_ctx
, copies
);
1115 struct util_dynarray else_copies
;
1116 util_dynarray_clone(&else_copies
, state
->mem_ctx
, copies
);
1118 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &if_stmt
->then_list
)
1119 copy_prop_vars_cf_node(state
, &then_copies
, cf_node
);
1121 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &if_stmt
->else_list
)
1122 copy_prop_vars_cf_node(state
, &else_copies
, cf_node
);
1124 /* Both branches copies can be ignored, since the effect of running both
1125 * branches was captured in the first pass that collects vars_written.
1128 invalidate_copies_for_cf_node(state
, copies
, cf_node
);
1133 case nir_cf_node_loop
: {
1134 nir_loop
*loop
= nir_cf_node_as_loop(cf_node
);
1136 /* Invalidate before cloning the copies for the loop, since the loop
1137 * body can be executed more than once.
1140 invalidate_copies_for_cf_node(state
, copies
, cf_node
);
1142 struct util_dynarray loop_copies
;
1143 util_dynarray_clone(&loop_copies
, state
->mem_ctx
, copies
);
1145 foreach_list_typed_safe(nir_cf_node
, cf_node
, node
, &loop
->body
)
1146 copy_prop_vars_cf_node(state
, &loop_copies
, cf_node
);
1152 unreachable("Invalid CF node type");
1157 nir_copy_prop_vars_impl(nir_function_impl
*impl
)
1159 void *mem_ctx
= ralloc_context(NULL
);
1162 nir_metadata_require(impl
, nir_metadata_block_index
);
1163 printf("## nir_copy_prop_vars_impl for %s\n", impl
->function
->name
);
1166 struct copy_prop_var_state state
= {
1169 .lin_ctx
= linear_zalloc_parent(mem_ctx
, 0),
1171 .vars_written_map
= _mesa_pointer_hash_table_create(mem_ctx
),
1174 gather_vars_written(&state
, NULL
, &impl
->cf_node
);
1176 copy_prop_vars_cf_node(&state
, NULL
, &impl
->cf_node
);
1178 if (state
.progress
) {
1179 nir_metadata_preserve(impl
, nir_metadata_block_index
|
1180 nir_metadata_dominance
);
1183 impl
->valid_metadata
&= ~nir_metadata_not_properly_reset
;
1187 ralloc_free(mem_ctx
);
1188 return state
.progress
;
1192 nir_opt_copy_prop_vars(nir_shader
*shader
)
1194 bool progress
= false;
1196 nir_foreach_function(function
, shader
) {
1197 if (!function
->impl
)
1199 progress
|= nir_copy_prop_vars_impl(function
->impl
);