nir/copy_prop_vars: use NIR_MAX_VEC_COMPONENTS
[mesa.git] / src / compiler / nir / nir_opt_copy_prop_vars.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_deref.h"
27
28 #include "util/bitscan.h"
29 #include "util/u_dynarray.h"
30
31 static const bool debug = false;
32
33 /**
34 * Variable-based copy propagation
35 *
36 * Normally, NIR trusts in SSA form for most of its copy-propagation needs.
37 * However, there are cases, especially when dealing with indirects, where SSA
38 * won't help you. This pass is for those times. Specifically, it handles
39 * the following things that the rest of NIR can't:
40 *
41 * 1) Copy-propagation on variables that have indirect access. This includes
42 * propagating from indirect stores into indirect loads.
43 *
44 * 2) Removal of redundant load_deref intrinsics. We can't trust regular CSE
45 * to do this because it isn't aware of variable writes that may alias the
46 * value and make the former load invalid.
47 *
48 * This pass uses an intermediate solution between being local / "per-block"
49 * and a complete data-flow analysis. It follows the control flow graph, and
50 * propagate the available copy information forward, invalidating data at each
51 * cf_node.
52 *
53 * Removal of dead writes to variables is handled by another pass.
54 */
55
56 struct vars_written {
57 nir_variable_mode modes;
58
59 /* Key is deref and value is the uintptr_t with the write mask. */
60 struct hash_table *derefs;
61 };
62
63 struct value {
64 bool is_ssa;
65 union {
66 struct {
67 nir_ssa_def *def[NIR_MAX_VEC_COMPONENTS];
68 uint8_t component[NIR_MAX_VEC_COMPONENTS];
69 } ssa;
70 nir_deref_instr *deref;
71 };
72 };
73
74 static void
75 value_set_ssa_components(struct value *value, nir_ssa_def *def,
76 unsigned num_components)
77 {
78 if (!value->is_ssa)
79 memset(&value->ssa, 0, sizeof(value->ssa));
80 value->is_ssa = true;
81 for (unsigned i = 0; i < num_components; i++) {
82 value->ssa.def[i] = def;
83 value->ssa.component[i] = i;
84 }
85 }
86
87 struct copy_entry {
88 struct value src;
89
90 nir_deref_instr *dst;
91 };
92
93 struct copy_prop_var_state {
94 nir_function_impl *impl;
95
96 void *mem_ctx;
97 void *lin_ctx;
98
99 /* Maps nodes to vars_written. Used to invalidate copy entries when
100 * visiting each node.
101 */
102 struct hash_table *vars_written_map;
103
104 bool progress;
105 };
106
107 static bool
108 value_equals_store_src(struct value *value, nir_intrinsic_instr *intrin)
109 {
110 assert(intrin->intrinsic == nir_intrinsic_store_deref);
111 uintptr_t write_mask = nir_intrinsic_write_mask(intrin);
112
113 for (unsigned i = 0; i < intrin->num_components; i++) {
114 if ((write_mask & (1 << i)) &&
115 (value->ssa.def[i] != intrin->src[1].ssa ||
116 value->ssa.component[i] != i))
117 return false;
118 }
119
120 return true;
121 }
122
123 static struct vars_written *
124 create_vars_written(struct copy_prop_var_state *state)
125 {
126 struct vars_written *written =
127 linear_zalloc_child(state->lin_ctx, sizeof(struct vars_written));
128 written->derefs = _mesa_pointer_hash_table_create(state->mem_ctx);
129 return written;
130 }
131
132 static void
133 gather_vars_written(struct copy_prop_var_state *state,
134 struct vars_written *written,
135 nir_cf_node *cf_node)
136 {
137 struct vars_written *new_written = NULL;
138
139 switch (cf_node->type) {
140 case nir_cf_node_function: {
141 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
142 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
143 gather_vars_written(state, NULL, cf_node);
144 break;
145 }
146
147 case nir_cf_node_block: {
148 if (!written)
149 break;
150
151 nir_block *block = nir_cf_node_as_block(cf_node);
152 nir_foreach_instr(instr, block) {
153 if (instr->type == nir_instr_type_call) {
154 written->modes |= nir_var_shader_out |
155 nir_var_shader_temp |
156 nir_var_function_temp |
157 nir_var_mem_ssbo |
158 nir_var_mem_shared;
159 continue;
160 }
161
162 if (instr->type != nir_instr_type_intrinsic)
163 continue;
164
165 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
166 switch (intrin->intrinsic) {
167 case nir_intrinsic_barrier:
168 case nir_intrinsic_memory_barrier:
169 written->modes |= nir_var_shader_out |
170 nir_var_mem_ssbo |
171 nir_var_mem_shared;
172 break;
173
174 case nir_intrinsic_emit_vertex:
175 case nir_intrinsic_emit_vertex_with_counter:
176 written->modes = nir_var_shader_out;
177 break;
178
179 case nir_intrinsic_deref_atomic_add:
180 case nir_intrinsic_deref_atomic_imin:
181 case nir_intrinsic_deref_atomic_umin:
182 case nir_intrinsic_deref_atomic_imax:
183 case nir_intrinsic_deref_atomic_umax:
184 case nir_intrinsic_deref_atomic_and:
185 case nir_intrinsic_deref_atomic_or:
186 case nir_intrinsic_deref_atomic_xor:
187 case nir_intrinsic_deref_atomic_exchange:
188 case nir_intrinsic_deref_atomic_comp_swap:
189 case nir_intrinsic_store_deref:
190 case nir_intrinsic_copy_deref: {
191 /* Destination in all of store_deref, copy_deref and the atomics is src[0]. */
192 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
193
194 uintptr_t mask = intrin->intrinsic == nir_intrinsic_store_deref ?
195 nir_intrinsic_write_mask(intrin) : (1 << glsl_get_vector_elements(dst->type)) - 1;
196
197 struct hash_entry *ht_entry = _mesa_hash_table_search(written->derefs, dst);
198 if (ht_entry)
199 ht_entry->data = (void *)(mask | (uintptr_t)ht_entry->data);
200 else
201 _mesa_hash_table_insert(written->derefs, dst, (void *)mask);
202
203 break;
204 }
205
206 default:
207 break;
208 }
209 }
210
211 break;
212 }
213
214 case nir_cf_node_if: {
215 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
216
217 new_written = create_vars_written(state);
218
219 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
220 gather_vars_written(state, new_written, cf_node);
221
222 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
223 gather_vars_written(state, new_written, cf_node);
224
225 break;
226 }
227
228 case nir_cf_node_loop: {
229 nir_loop *loop = nir_cf_node_as_loop(cf_node);
230
231 new_written = create_vars_written(state);
232
233 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
234 gather_vars_written(state, new_written, cf_node);
235
236 break;
237 }
238
239 default:
240 unreachable("Invalid CF node type");
241 }
242
243 if (new_written) {
244 /* Merge new information to the parent control flow node. */
245 if (written) {
246 written->modes |= new_written->modes;
247 hash_table_foreach(new_written->derefs, new_entry) {
248 struct hash_entry *old_entry =
249 _mesa_hash_table_search_pre_hashed(written->derefs, new_entry->hash,
250 new_entry->key);
251 if (old_entry) {
252 nir_component_mask_t merged = (uintptr_t) new_entry->data |
253 (uintptr_t) old_entry->data;
254 old_entry->data = (void *) ((uintptr_t) merged);
255 } else {
256 _mesa_hash_table_insert_pre_hashed(written->derefs, new_entry->hash,
257 new_entry->key, new_entry->data);
258 }
259 }
260 }
261 _mesa_hash_table_insert(state->vars_written_map, cf_node, new_written);
262 }
263 }
264
265 static struct copy_entry *
266 copy_entry_create(struct util_dynarray *copies,
267 nir_deref_instr *dst_deref)
268 {
269 struct copy_entry new_entry = {
270 .dst = dst_deref,
271 };
272 util_dynarray_append(copies, struct copy_entry, new_entry);
273 return util_dynarray_top_ptr(copies, struct copy_entry);
274 }
275
276 /* Remove copy entry by swapping it with the last element and reducing the
277 * size. If used inside an iteration on copies, it must be a reverse
278 * (backwards) iteration. It is safe to use in those cases because the swap
279 * will not affect the rest of the iteration.
280 */
281 static void
282 copy_entry_remove(struct util_dynarray *copies,
283 struct copy_entry *entry)
284 {
285 /* This also works when removing the last element since pop don't shrink
286 * the memory used by the array, so the swap is useless but not invalid.
287 */
288 *entry = util_dynarray_pop(copies, struct copy_entry);
289 }
290
291 static struct copy_entry *
292 lookup_entry_for_deref(struct util_dynarray *copies,
293 nir_deref_instr *deref,
294 nir_deref_compare_result allowed_comparisons)
295 {
296 util_dynarray_foreach(copies, struct copy_entry, iter) {
297 if (nir_compare_derefs(iter->dst, deref) & allowed_comparisons)
298 return iter;
299 }
300
301 return NULL;
302 }
303
304 static struct copy_entry *
305 lookup_entry_and_kill_aliases(struct util_dynarray *copies,
306 nir_deref_instr *deref,
307 unsigned write_mask)
308 {
309 /* TODO: Take into account the write_mask. */
310
311 nir_deref_instr *dst_match = NULL;
312 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
313 if (!iter->src.is_ssa) {
314 /* If this write aliases the source of some entry, get rid of it */
315 if (nir_compare_derefs(iter->src.deref, deref) & nir_derefs_may_alias_bit) {
316 copy_entry_remove(copies, iter);
317 continue;
318 }
319 }
320
321 nir_deref_compare_result comp = nir_compare_derefs(iter->dst, deref);
322
323 if (comp & nir_derefs_equal_bit) {
324 /* Removing entries invalidate previous iter pointers, so we'll
325 * collect the matching entry later. Just make sure it is unique.
326 */
327 assert(!dst_match);
328 dst_match = iter->dst;
329 } else if (comp & nir_derefs_may_alias_bit) {
330 copy_entry_remove(copies, iter);
331 }
332 }
333
334 struct copy_entry *entry = NULL;
335 if (dst_match) {
336 util_dynarray_foreach(copies, struct copy_entry, iter) {
337 if (iter->dst == dst_match) {
338 entry = iter;
339 break;
340 }
341 }
342 assert(entry);
343 }
344 return entry;
345 }
346
347 static void
348 kill_aliases(struct util_dynarray *copies,
349 nir_deref_instr *deref,
350 unsigned write_mask)
351 {
352 /* TODO: Take into account the write_mask. */
353
354 struct copy_entry *entry =
355 lookup_entry_and_kill_aliases(copies, deref, write_mask);
356 if (entry)
357 copy_entry_remove(copies, entry);
358 }
359
360 static struct copy_entry *
361 get_entry_and_kill_aliases(struct util_dynarray *copies,
362 nir_deref_instr *deref,
363 unsigned write_mask)
364 {
365 /* TODO: Take into account the write_mask. */
366
367 struct copy_entry *entry =
368 lookup_entry_and_kill_aliases(copies, deref, write_mask);
369
370 if (entry == NULL)
371 entry = copy_entry_create(copies, deref);
372
373 return entry;
374 }
375
376 static void
377 apply_barrier_for_modes(struct util_dynarray *copies,
378 nir_variable_mode modes)
379 {
380 util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
381 if ((iter->dst->mode & modes) ||
382 (!iter->src.is_ssa && (iter->src.deref->mode & modes)))
383 copy_entry_remove(copies, iter);
384 }
385 }
386
387 static void
388 value_set_from_value(struct value *value, const struct value *from,
389 unsigned write_mask)
390 {
391 if (from->is_ssa) {
392 /* Clear value if it was being used as non-SSA. */
393 if (!value->is_ssa)
394 memset(&value->ssa, 0, sizeof(value->ssa));
395 value->is_ssa = true;
396 /* Only overwrite the written components */
397 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
398 if (write_mask & (1 << i)) {
399 value->ssa.def[i] = from->ssa.def[i];
400 value->ssa.component[i] = from->ssa.component[i];
401 }
402 }
403 } else {
404 /* Non-ssa stores always write everything */
405 value->is_ssa = false;
406 value->deref = from->deref;
407 }
408 }
409
410 /* Do a "load" from an SSA-based entry return it in "value" as a value with a
411 * single SSA def. Because an entry could reference multiple different SSA
412 * defs, a vecN operation may be inserted to combine them into a single SSA
413 * def before handing it back to the caller. If the load instruction is no
414 * longer needed, it is removed and nir_instr::block is set to NULL. (It is
415 * possible, in some cases, for the load to be used in the vecN operation in
416 * which case it isn't deleted.)
417 */
418 static bool
419 load_from_ssa_entry_value(struct copy_prop_var_state *state,
420 struct copy_entry *entry,
421 nir_builder *b, nir_intrinsic_instr *intrin,
422 struct value *value)
423 {
424 *value = entry->src;
425 assert(value->is_ssa);
426
427 const struct glsl_type *type = entry->dst->type;
428 unsigned num_components = glsl_get_vector_elements(type);
429
430 nir_component_mask_t available = 0;
431 bool all_same = true;
432 for (unsigned i = 0; i < num_components; i++) {
433 if (value->ssa.def[i])
434 available |= (1 << i);
435
436 if (value->ssa.def[i] != value->ssa.def[0])
437 all_same = false;
438
439 if (value->ssa.component[i] != i)
440 all_same = false;
441 }
442
443 if (all_same) {
444 /* Our work here is done */
445 b->cursor = nir_instr_remove(&intrin->instr);
446 intrin->instr.block = NULL;
447 return true;
448 }
449
450 if (available != (1 << num_components) - 1 &&
451 intrin->intrinsic == nir_intrinsic_load_deref &&
452 (available & nir_ssa_def_components_read(&intrin->dest.ssa)) == 0) {
453 /* If none of the components read are available as SSA values, then we
454 * should just bail. Otherwise, we would end up replacing the uses of
455 * the load_deref a vecN() that just gathers up its components.
456 */
457 return false;
458 }
459
460 b->cursor = nir_after_instr(&intrin->instr);
461
462 nir_ssa_def *load_def =
463 intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
464
465 bool keep_intrin = false;
466 nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
467 for (unsigned i = 0; i < num_components; i++) {
468 if (value->ssa.def[i]) {
469 comps[i] = nir_channel(b, value->ssa.def[i], value->ssa.component[i]);
470 } else {
471 /* We don't have anything for this component in our
472 * list. Just re-use a channel from the load.
473 */
474 if (load_def == NULL)
475 load_def = nir_load_deref(b, entry->dst);
476
477 if (load_def->parent_instr == &intrin->instr)
478 keep_intrin = true;
479
480 comps[i] = nir_channel(b, load_def, i);
481 }
482 }
483
484 nir_ssa_def *vec = nir_vec(b, comps, num_components);
485 value_set_ssa_components(value, vec, num_components);
486
487 if (!keep_intrin) {
488 /* Removing this instruction should not touch the cursor because we
489 * created the cursor after the intrinsic and have added at least one
490 * instruction (the vec) since then.
491 */
492 assert(b->cursor.instr != &intrin->instr);
493 nir_instr_remove(&intrin->instr);
494 intrin->instr.block = NULL;
495 }
496
497 return true;
498 }
499
500 /**
501 * Specialize the wildcards in a deref chain
502 *
503 * This function returns a deref chain identical to \param deref except that
504 * some of its wildcards are replaced with indices from \param specific. The
505 * process is guided by \param guide which references the same type as \param
506 * specific but has the same wildcard array lengths as \param deref.
507 */
508 static nir_deref_instr *
509 specialize_wildcards(nir_builder *b,
510 nir_deref_path *deref,
511 nir_deref_path *guide,
512 nir_deref_path *specific)
513 {
514 nir_deref_instr **deref_p = &deref->path[1];
515 nir_deref_instr **guide_p = &guide->path[1];
516 nir_deref_instr **spec_p = &specific->path[1];
517 nir_deref_instr *ret_tail = deref->path[0];
518 for (; *deref_p; deref_p++) {
519 if ((*deref_p)->deref_type == nir_deref_type_array_wildcard) {
520 /* This is where things get tricky. We have to search through
521 * the entry deref to find its corresponding wildcard and fill
522 * this slot in with the value from the src.
523 */
524 while (*guide_p &&
525 (*guide_p)->deref_type != nir_deref_type_array_wildcard) {
526 guide_p++;
527 spec_p++;
528 }
529 assert(*guide_p && *spec_p);
530
531 ret_tail = nir_build_deref_follower(b, ret_tail, *spec_p);
532
533 guide_p++;
534 spec_p++;
535 } else {
536 ret_tail = nir_build_deref_follower(b, ret_tail, *deref_p);
537 }
538 }
539
540 return ret_tail;
541 }
542
543 /* Do a "load" from an deref-based entry return it in "value" as a value. The
544 * deref returned in "value" will always be a fresh copy so the caller can
545 * steal it and assign it to the instruction directly without copying it
546 * again.
547 */
548 static bool
549 load_from_deref_entry_value(struct copy_prop_var_state *state,
550 struct copy_entry *entry,
551 nir_builder *b, nir_intrinsic_instr *intrin,
552 nir_deref_instr *src, struct value *value)
553 {
554 *value = entry->src;
555
556 b->cursor = nir_instr_remove(&intrin->instr);
557
558 nir_deref_path entry_dst_path, src_path;
559 nir_deref_path_init(&entry_dst_path, entry->dst, state->mem_ctx);
560 nir_deref_path_init(&src_path, src, state->mem_ctx);
561
562 bool need_to_specialize_wildcards = false;
563 nir_deref_instr **entry_p = &entry_dst_path.path[1];
564 nir_deref_instr **src_p = &src_path.path[1];
565 while (*entry_p && *src_p) {
566 nir_deref_instr *entry_tail = *entry_p++;
567 nir_deref_instr *src_tail = *src_p++;
568
569 if (src_tail->deref_type == nir_deref_type_array &&
570 entry_tail->deref_type == nir_deref_type_array_wildcard)
571 need_to_specialize_wildcards = true;
572 }
573
574 /* If the entry deref is longer than the source deref then it refers to a
575 * smaller type and we can't source from it.
576 */
577 assert(*entry_p == NULL);
578
579 if (need_to_specialize_wildcards) {
580 /* The entry has some wildcards that are not in src. This means we need
581 * to construct a new deref based on the entry but using the wildcards
582 * from the source and guided by the entry dst. Oof.
583 */
584 nir_deref_path entry_src_path;
585 nir_deref_path_init(&entry_src_path, entry->src.deref, state->mem_ctx);
586 value->deref = specialize_wildcards(b, &entry_src_path,
587 &entry_dst_path, &src_path);
588 nir_deref_path_finish(&entry_src_path);
589 }
590
591 /* If our source deref is longer than the entry deref, that's ok because
592 * it just means the entry deref needs to be extended a bit.
593 */
594 while (*src_p) {
595 nir_deref_instr *src_tail = *src_p++;
596 value->deref = nir_build_deref_follower(b, value->deref, src_tail);
597 }
598
599 nir_deref_path_finish(&entry_dst_path);
600 nir_deref_path_finish(&src_path);
601
602 return true;
603 }
604
605 static bool
606 try_load_from_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
607 nir_builder *b, nir_intrinsic_instr *intrin,
608 nir_deref_instr *src, struct value *value)
609 {
610 if (entry == NULL)
611 return false;
612
613 if (entry->src.is_ssa) {
614 return load_from_ssa_entry_value(state, entry, b, intrin, value);
615 } else {
616 return load_from_deref_entry_value(state, entry, b, intrin, src, value);
617 }
618 }
619
620 static void
621 invalidate_copies_for_cf_node(struct copy_prop_var_state *state,
622 struct util_dynarray *copies,
623 nir_cf_node *cf_node)
624 {
625 struct hash_entry *ht_entry = _mesa_hash_table_search(state->vars_written_map, cf_node);
626 assert(ht_entry);
627
628 struct vars_written *written = ht_entry->data;
629 if (written->modes) {
630 util_dynarray_foreach_reverse(copies, struct copy_entry, entry) {
631 if (entry->dst->mode & written->modes)
632 copy_entry_remove(copies, entry);
633 }
634 }
635
636 hash_table_foreach (written->derefs, entry) {
637 nir_deref_instr *deref_written = (nir_deref_instr *)entry->key;
638 kill_aliases(copies, deref_written, (uintptr_t)entry->data);
639 }
640 }
641
642 static bool
643 is_array_deref_of_vector(nir_deref_instr *deref)
644 {
645 if (deref->deref_type != nir_deref_type_array)
646 return false;
647 nir_deref_instr *parent = nir_deref_instr_parent(deref);
648 return glsl_type_is_vector(parent->type);
649 }
650
651 static void
652 print_value(struct value *value, unsigned num_components)
653 {
654 if (!value->is_ssa) {
655 printf(" %s ", glsl_get_type_name(value->deref->type));
656 nir_print_deref(value->deref, stdout);
657 return;
658 }
659
660 bool same_ssa = true;
661 for (unsigned i = 0; i < num_components; i++) {
662 if (value->ssa.component[i] != i ||
663 (i > 0 && value->ssa.def[i - 1] != value->ssa.def[i])) {
664 same_ssa = false;
665 break;
666 }
667 }
668 if (same_ssa) {
669 printf(" ssa_%d", value->ssa.def[0]->index);
670 } else {
671 for (int i = 0; i < num_components; i++) {
672 if (value->ssa.def[i])
673 printf(" ssa_%d[%u]", value->ssa.def[i]->index, value->ssa.component[i]);
674 else
675 printf(" _");
676 }
677 }
678 }
679
680 static void
681 print_copy_entry(struct copy_entry *entry)
682 {
683 printf(" %s ", glsl_get_type_name(entry->dst->type));
684 nir_print_deref(entry->dst, stdout);
685 printf(":\t");
686
687 unsigned num_components = glsl_get_vector_elements(entry->dst->type);
688 print_value(&entry->src, num_components);
689 printf("\n");
690 }
691
692 static void
693 dump_instr(nir_instr *instr)
694 {
695 printf(" ");
696 nir_print_instr(instr, stdout);
697 printf("\n");
698 }
699
700 static void
701 dump_copy_entries(struct util_dynarray *copies)
702 {
703 util_dynarray_foreach(copies, struct copy_entry, iter)
704 print_copy_entry(iter);
705 printf("\n");
706 }
707
708 static void
709 copy_prop_vars_block(struct copy_prop_var_state *state,
710 nir_builder *b, nir_block *block,
711 struct util_dynarray *copies)
712 {
713 if (debug) {
714 printf("# block%d\n", block->index);
715 dump_copy_entries(copies);
716 }
717
718 nir_foreach_instr_safe(instr, block) {
719 if (debug && instr->type == nir_instr_type_deref)
720 dump_instr(instr);
721
722 if (instr->type == nir_instr_type_call) {
723 if (debug) dump_instr(instr);
724 apply_barrier_for_modes(copies, nir_var_shader_out |
725 nir_var_shader_temp |
726 nir_var_function_temp |
727 nir_var_mem_ssbo |
728 nir_var_mem_shared);
729 if (debug) dump_copy_entries(copies);
730 continue;
731 }
732
733 if (instr->type != nir_instr_type_intrinsic)
734 continue;
735
736 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
737 switch (intrin->intrinsic) {
738 case nir_intrinsic_barrier:
739 case nir_intrinsic_memory_barrier:
740 if (debug) dump_instr(instr);
741
742 apply_barrier_for_modes(copies, nir_var_shader_out |
743 nir_var_mem_ssbo |
744 nir_var_mem_shared);
745 break;
746
747 case nir_intrinsic_emit_vertex:
748 case nir_intrinsic_emit_vertex_with_counter:
749 if (debug) dump_instr(instr);
750
751 apply_barrier_for_modes(copies, nir_var_shader_out);
752 break;
753
754 case nir_intrinsic_load_deref: {
755 if (debug) dump_instr(instr);
756
757 nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
758
759 if (is_array_deref_of_vector(src)) {
760 /* Not handled yet. This load won't invalidate existing copies. */
761 break;
762 }
763
764 struct copy_entry *src_entry =
765 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
766 struct value value = {0};
767 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
768 if (value.is_ssa) {
769 /* lookup_load has already ensured that we get a single SSA
770 * value that has all of the channels. We just have to do the
771 * rewrite operation.
772 */
773 if (intrin->instr.block) {
774 /* The lookup left our instruction in-place. This means it
775 * must have used it to vec up a bunch of different sources.
776 * We need to be careful when rewriting uses so we don't
777 * rewrite the vecN itself.
778 */
779 nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
780 nir_src_for_ssa(value.ssa.def[0]),
781 value.ssa.def[0]->parent_instr);
782 } else {
783 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
784 nir_src_for_ssa(value.ssa.def[0]));
785 }
786 } else {
787 /* We're turning it into a load of a different variable */
788 intrin->src[0] = nir_src_for_ssa(&value.deref->dest.ssa);
789
790 /* Put it back in again. */
791 nir_builder_instr_insert(b, instr);
792 value_set_ssa_components(&value, &intrin->dest.ssa,
793 intrin->num_components);
794 }
795 state->progress = true;
796 } else {
797 value_set_ssa_components(&value, &intrin->dest.ssa,
798 intrin->num_components);
799 }
800
801 /* Now that we have a value, we're going to store it back so that we
802 * have the right value next time we come looking for it. In order
803 * to do this, we need an exact match, not just something that
804 * contains what we're looking for.
805 */
806 struct copy_entry *entry =
807 lookup_entry_for_deref(copies, src, nir_derefs_equal_bit);
808 if (!entry)
809 entry = copy_entry_create(copies, src);
810
811 /* Update the entry with the value of the load. This way
812 * we can potentially remove subsequent loads.
813 */
814 value_set_from_value(&entry->src, &value,
815 (1 << intrin->num_components) - 1);
816 break;
817 }
818
819 case nir_intrinsic_store_deref: {
820 if (debug) dump_instr(instr);
821
822 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
823 struct copy_entry *entry =
824 lookup_entry_for_deref(copies, dst, nir_derefs_equal_bit);
825 if (entry && value_equals_store_src(&entry->src, intrin)) {
826 /* If we are storing the value from a load of the same var the
827 * store is redundant so remove it.
828 */
829 nir_instr_remove(instr);
830 } else if (is_array_deref_of_vector(dst)) {
831 /* Not handled yet. Writing into an element of 'dst' invalidates
832 * any related entries in copies.
833 */
834 nir_deref_instr *vector = nir_deref_instr_parent(dst);
835 unsigned vector_components = glsl_get_vector_elements(vector->type);
836 kill_aliases(copies, vector, (1 << vector_components) - 1);
837 } else {
838 struct value value = {0};
839 value_set_ssa_components(&value, intrin->src[1].ssa,
840 intrin->num_components);
841 unsigned wrmask = nir_intrinsic_write_mask(intrin);
842 struct copy_entry *entry =
843 get_entry_and_kill_aliases(copies, dst, wrmask);
844 value_set_from_value(&entry->src, &value, wrmask);
845 }
846
847 break;
848 }
849
850 case nir_intrinsic_copy_deref: {
851 if (debug) dump_instr(instr);
852
853 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
854 nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
855
856 if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
857 /* This is a no-op self-copy. Get rid of it */
858 nir_instr_remove(instr);
859 continue;
860 }
861
862 /* The copy_deref intrinsic doesn't keep track of num_components, so
863 * get it ourselves.
864 */
865 unsigned num_components = glsl_get_vector_elements(dst->type);
866 unsigned full_mask = (1 << num_components) - 1;
867
868 if (is_array_deref_of_vector(src) || is_array_deref_of_vector(dst)) {
869 /* Cases not handled yet. Writing into an element of 'dst'
870 * invalidates any related entries in copies. Reading from 'src'
871 * doesn't invalidate anything, so no action needed for it.
872 */
873 kill_aliases(copies, dst, full_mask);
874 break;
875 }
876
877 struct copy_entry *src_entry =
878 lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
879 struct value value;
880 if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
881 /* If load works, intrin (the copy_deref) is removed. */
882 if (value.is_ssa) {
883 nir_store_deref(b, dst, value.ssa.def[0], full_mask);
884 } else {
885 /* If this would be a no-op self-copy, don't bother. */
886 if (nir_compare_derefs(value.deref, dst) & nir_derefs_equal_bit)
887 continue;
888
889 /* Just turn it into a copy of a different deref */
890 intrin->src[1] = nir_src_for_ssa(&value.deref->dest.ssa);
891
892 /* Put it back in again. */
893 nir_builder_instr_insert(b, instr);
894 }
895
896 state->progress = true;
897 } else {
898 value = (struct value) {
899 .is_ssa = false,
900 { .deref = src },
901 };
902 }
903
904 struct copy_entry *dst_entry =
905 get_entry_and_kill_aliases(copies, dst, full_mask);
906 value_set_from_value(&dst_entry->src, &value, full_mask);
907 break;
908 }
909
910 case nir_intrinsic_deref_atomic_add:
911 case nir_intrinsic_deref_atomic_imin:
912 case nir_intrinsic_deref_atomic_umin:
913 case nir_intrinsic_deref_atomic_imax:
914 case nir_intrinsic_deref_atomic_umax:
915 case nir_intrinsic_deref_atomic_and:
916 case nir_intrinsic_deref_atomic_or:
917 case nir_intrinsic_deref_atomic_xor:
918 case nir_intrinsic_deref_atomic_exchange:
919 case nir_intrinsic_deref_atomic_comp_swap:
920 if (debug) dump_instr(instr);
921
922 nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
923 unsigned num_components = glsl_get_vector_elements(dst->type);
924 unsigned full_mask = (1 << num_components) - 1;
925 kill_aliases(copies, dst, full_mask);
926 break;
927
928 default:
929 continue; /* To skip the debug below. */
930 }
931
932 if (debug) dump_copy_entries(copies);
933 }
934 }
935
936 static void
937 copy_prop_vars_cf_node(struct copy_prop_var_state *state,
938 struct util_dynarray *copies,
939 nir_cf_node *cf_node)
940 {
941 switch (cf_node->type) {
942 case nir_cf_node_function: {
943 nir_function_impl *impl = nir_cf_node_as_function(cf_node);
944
945 struct util_dynarray impl_copies;
946 util_dynarray_init(&impl_copies, state->mem_ctx);
947
948 foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
949 copy_prop_vars_cf_node(state, &impl_copies, cf_node);
950
951 break;
952 }
953
954 case nir_cf_node_block: {
955 nir_block *block = nir_cf_node_as_block(cf_node);
956 nir_builder b;
957 nir_builder_init(&b, state->impl);
958 copy_prop_vars_block(state, &b, block, copies);
959 break;
960 }
961
962 case nir_cf_node_if: {
963 nir_if *if_stmt = nir_cf_node_as_if(cf_node);
964
965 /* Clone the copies for each branch of the if statement. The idea is
966 * that they both see the same state of available copies, but do not
967 * interfere to each other.
968 */
969
970 struct util_dynarray then_copies;
971 util_dynarray_clone(&then_copies, state->mem_ctx, copies);
972
973 struct util_dynarray else_copies;
974 util_dynarray_clone(&else_copies, state->mem_ctx, copies);
975
976 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
977 copy_prop_vars_cf_node(state, &then_copies, cf_node);
978
979 foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
980 copy_prop_vars_cf_node(state, &else_copies, cf_node);
981
982 /* Both branches copies can be ignored, since the effect of running both
983 * branches was captured in the first pass that collects vars_written.
984 */
985
986 invalidate_copies_for_cf_node(state, copies, cf_node);
987
988 break;
989 }
990
991 case nir_cf_node_loop: {
992 nir_loop *loop = nir_cf_node_as_loop(cf_node);
993
994 /* Invalidate before cloning the copies for the loop, since the loop
995 * body can be executed more than once.
996 */
997
998 invalidate_copies_for_cf_node(state, copies, cf_node);
999
1000 struct util_dynarray loop_copies;
1001 util_dynarray_clone(&loop_copies, state->mem_ctx, copies);
1002
1003 foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
1004 copy_prop_vars_cf_node(state, &loop_copies, cf_node);
1005
1006 break;
1007 }
1008
1009 default:
1010 unreachable("Invalid CF node type");
1011 }
1012 }
1013
1014 static bool
1015 nir_copy_prop_vars_impl(nir_function_impl *impl)
1016 {
1017 void *mem_ctx = ralloc_context(NULL);
1018
1019 if (debug) {
1020 nir_metadata_require(impl, nir_metadata_block_index);
1021 printf("## nir_copy_prop_vars_impl for %s\n", impl->function->name);
1022 }
1023
1024 struct copy_prop_var_state state = {
1025 .impl = impl,
1026 .mem_ctx = mem_ctx,
1027 .lin_ctx = linear_zalloc_parent(mem_ctx, 0),
1028
1029 .vars_written_map = _mesa_pointer_hash_table_create(mem_ctx),
1030 };
1031
1032 gather_vars_written(&state, NULL, &impl->cf_node);
1033
1034 copy_prop_vars_cf_node(&state, NULL, &impl->cf_node);
1035
1036 if (state.progress) {
1037 nir_metadata_preserve(impl, nir_metadata_block_index |
1038 nir_metadata_dominance);
1039 } else {
1040 #ifndef NDEBUG
1041 impl->valid_metadata &= ~nir_metadata_not_properly_reset;
1042 #endif
1043 }
1044
1045 ralloc_free(mem_ctx);
1046 return state.progress;
1047 }
1048
1049 bool
1050 nir_opt_copy_prop_vars(nir_shader *shader)
1051 {
1052 bool progress = false;
1053
1054 nir_foreach_function(function, shader) {
1055 if (!function->impl)
1056 continue;
1057 progress |= nir_copy_prop_vars_impl(function->impl);
1058 }
1059
1060 return progress;
1061 }